repo_name
stringlengths
6
130
hexsha
sequence
file_path
sequence
code
sequence
apis
sequence
possible_versions
list
me714/Dwin_Transformer
[ "825a63869c46db4ef83ccc31d479bbd971ffd47c" ]
[ "configs/video_detect.py" ]
[ "import argparse\nimport math\nimport os\nimport shutil\nimport time\nimport numpy as np\nfrom pathlib import Path\nfrom ensemble_boxes import *\nimport copy\nimport cv2\nimport torch\nimport torch.backends.cudnn as cudnn\nfrom numpy import random\nimport matplotlib.pyplot as plt\nfrom itertools import combinations\nimport random\n\nfrom models.experimental import attempt_load\nfrom utils.datasets import LoadStreams, LoadImages\nfrom utils.general import (\n check_img_size, non_max_suppression, apply_classifier, scale_coords,\n xyxy2xywh, xywh2xyxy, plot_one_box, strip_optimizer, set_logging)\nfrom utils.torch_utils import select_device, load_classifier, time_synchronized\nfrom mmdet.apis import init_detector, inference_detector\n\nfcap = cv2.VideoCapture('/root/Swin-Transformer-Object-Detection/demo/VID_20210909_164000.mp4')\ndata_root = '/root/Swin-Transformer-Object-Detection/'\nconfig_file = data_root + 'configs/swin.py'\ncheckpoint_file = data_root + '2021_7_28/epoch_50.pth'\n\n# build the model from a config file and a checkpoint file\nswin_model = init_detector(config_file, checkpoint_file, device='cuda:0')\n\n\nframerate = 10\n\ndef get_image(fcap, framerate):\n c = 1\n while True:\n ret, frame = fcap.read()\n if ret:\n if (c % framerate == 0):\n cv2.imwrite(data_root + 'demo/video_frame/' + str(c) + '.jpg', frame)\n c += 1\n cv2.waitKey(0)\n else:\n print('the task is end')\n break\n fcap.release()\n\n\n\n\ndef filterbox_iou(rec1, rec2):\n \"\"\"\n computing IoU\n :param rec1: (y0, x0, y1, x1), which reflects\n (top, left, bottom, right)\n :param rec2: (y0, x0, y1, x1)\n :return: scala value of IoU\n \"\"\"\n # computing area of each rectangles\n S_rec1 = (rec1[2] - rec1[0]) * (rec1[3] - rec1[1])\n S_rec2 = (rec2[2] - rec2[0]) * (rec2[3] - rec2[1])\n\n # computing the sum_area\n sum_area = S_rec1 + S_rec2\n\n # find the each edge of intersect rectangle\n left_line = max(rec1[1], rec2[1])\n right_line = min(rec1[3], rec2[3])\n top_line = max(rec1[0], rec2[0])\n bottom_line = min(rec1[2], rec2[2])\n\n # judge if there is an intersect\n if left_line >= right_line or top_line >= bottom_line:\n return 0\n else:\n intersect = (right_line - left_line) * (bottom_line - top_line)\n return (intersect / (sum_area - intersect)) * 1.0\n\n\ndef detect(save_img=False):\n out, source, weights, view_img, save_txt, imgsz = \\\n opt.save_dir, opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size\n webcam = source.isnumeric() or source.startswith(('rtsp://', 'rtmp://', 'http://')) or source.endswith('.txt')\n\n # Initialize\n set_logging()\n device = select_device(opt.device)\n if os.path.exists(out): # output dir\n shutil.rmtree(out) # delete dir\n os.makedirs(out) # make new dir\n half = device.type != 'cpu' # half precision only supported on CUDA\n\n # Load model\n model = attempt_load(weights, map_location=device) # load FP32 model\n imgsz = check_img_size(imgsz, s=model.stride.max()) # check img_size\n if half:\n model.half() # to FP16\n\n # Second-stage classifier\n classify = False\n if classify:\n modelc = load_classifier(name='resnet101', n=2) # initialize\n modelc.load_state_dict(torch.load('weights/resnet101.pt', map_location=device)['model']) # load weights\n modelc.to(device).eval()\n\n # Set Dataloader\n vid_path, vid_writer = None, None\n if webcam:\n view_img = True\n cudnn.benchmark = True # set True to speed up constant image size inference\n dataset = LoadStreams(source, img_size=imgsz)\n else:\n save_img = True\n dataset = LoadImages(source, img_size=imgsz)\n\n # Get names and colors\n names = model.module.names if hasattr(model, 'module') else model.names\n colors = [[random.randint(0, 255) for _ in range(3)] for _ in range(len(names))]\n\n # Run inference\n t0 = time.time()\n img = torch.zeros((1, 3, imgsz, imgsz), device=device) # init img\n _ = model(img.half() if half else img) if device.type != 'cpu' else None # run once\n f_detect = 0\n counting_img = 0\n full_detect = 0\n full_truth = 0\n img_dict = {}\n frame_key = 0\n dict2 = {}\n for path, img, im0s, vid_cap in dataset:\n img_before = img\n img = torch.from_numpy(img).to(device)\n # img_before = img\n img = img.half() if half else img.float() # uint8 to fp16/32\n img /= 255.0 # 0 - 255 to 0.0 - 1.0\n if img.ndimension() == 3:\n img = img.unsqueeze(0)\n\n # Inference\n t1 = time_synchronized()\n pred = model(img, augment=opt.augment)[0]\n\n # Apply NMS\n nms_pred = non_max_suppression(pred, opt.conf_thres, opt.iou_thres, classes=1,\n agnostic=opt.agnostic_nms)\n # nms_pred = cross_class_nms(nms_pred, opt.conf_thres, 0.9, agnostic=opt.agnostic_nms)\n t2 = time_synchronized()\n\n # Process detections\n\n for i, det in enumerate(nms_pred): # detections per image\n print(det)\n dict1 = {'total': 0}\n if webcam: # batch_size >= 1\n p, s, im0 = path[i], '%g: ' % i, im0s[i].copy()\n else:\n p, s, im0 = path, '', im0s\n\n save_path = str(Path(out) / Path(p).name)\n txt_path = str(Path(out) / Path(p).stem) + ('_%g' % dataset.frame if dataset.mode == 'video' else '')\n s += '%gx%g ' % img.shape[2:] # print string\n gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh\n\n swin_img = cv2.imread(p)\n result = inference_detector(swin_model, swin_img)\n swin_bbox_list, swin_score_list, swin_label_list = swin_model.show_result(swin_img, result,\n out_file=save_path)\n\n yolo_bbox_list = det[:, 0:4].cpu().detach().numpy().tolist()\n yolo_score_list = det[:, 4].cpu().detach().numpy().tolist()\n yolo_label_list = det[:, 5].cpu().detach().numpy().tolist()\n\n swin_list = ['txd', 'jgc', 'xbs', 'wbs', 'c-pg', 'lwz', 'tc', 'a-pg', 'b-pg', 'g-pg', 'z-pg', 'bbt', 'lxb',\n 'xgg', 'lsd', 'wt']\n yolo_list = ['wt', 'jgc', 'lsd', 'lxb', 'bbt', 'xgg', 'txd', 'lwz', 'tc', 'xbs', 'wbs', 'a-pg', 'b-pg',\n 'c-pg', 'g-pg', 'z-pg']\n\n swin_trueLabel_list = []\n for i in swin_label_list:\n swin_trueLabel_list.append(yolo_list.index(swin_list[i]))\n\n\n # NMS for different class, high thresh\n # nms_bbox, nms_score, nms_label = yolo_bbox_list, yolo_score_list, yolo_label_list\n # nms_bbox, nms_score, nms_label = torch.from_numpy(np.array(nms_bbox)).reshape(-1, 4), torch.from_numpy(\n # np.array(nms_score)).reshape(-1, 1), torch.from_numpy(np.array(nms_label)).reshape(-1, 1)\n # two_det = torch.cat((torch.cat((nms_bbox, nms_score), 1), nms_label), 1)\n\n # normalize\n # 需要将框进行归一化操作\n # for i, single in enumerate(swin_bbox_list):\n # swin_bbox_list[i] = [single[0] / 640, single[1] / 480, single[2] / 640, single[3] / 480]\n #\n # for i, single in enumerate(yolo_bbox_list):\n # yolo_bbox_list[i] = [single[0] / 640, single[1] / 480, single[2] / 640, single[3] / 480]\n\n swin_object = [0, 1, 2, 3, 6, 7, 8, 9, 10] # from yolo_list:wt lsd lwz tc xbs wbs\n # yolo_list = ['0wt', 'jgc', '2lsd', 'lxb', '4bbt', 'xgg', '6txd', 'lwz', '8tc', 'xbs', '10wbs', 'a-pg', '12b-pg',\n # 'c-pg', '14g-pg', 'z-pg']\n yolo_label_list_copy = yolo_label_list.copy()\n swin_trueLabel_list_copy = swin_trueLabel_list.copy()\n for i in yolo_label_list_copy:\n if i in swin_object:\n index1 = yolo_label_list.index(i)\n del yolo_bbox_list[index1]\n del yolo_score_list[index1]\n del yolo_label_list[index1]\n\n # label_filter = [4, 5, 11, 12, 13, 14, 15]\n # filer_box = {}\n # filter_list = []\n # filter_label_list = []\n # for i in range(len(yolo_label_list)):\n # if yolo_label_list_copy[i] in label_filter:\n # filter_list.append(i)\n # filter_label_list.append(yolo_label_list_copy[i])\n\n # yolo_bbox_list_copy = yolo_bbox_list\n # yolo_score_list_copy = yolo_score_list\n #\n #\n # for pair in combinations(filter_list, 2):\n # box1 = yolo_bbox_list_copy[pair[0]]\n # box2 = yolo_bbox_list_copy[pair[1]]\n # b_iou = filterbox_iou(box1, box2)\n # if b_iou >= 0.9:\n # if box1 in yolo_bbox_list and box2 in yolo_bbox_list:\n # index_0 = yolo_bbox_list.index(box1)\n # index_1 = yolo_bbox_list.index(box2)\n # index = index_0 if yolo_score_list[pair[0]] <= yolo_score_list[pair[1]] else index_1\n # del yolo_bbox_list[index]\n # del yolo_score_list[index]\n # del yolo_label_list[index]\n\n\n\n for i in swin_trueLabel_list_copy:\n if i not in swin_object:\n index2 = swin_trueLabel_list.index(i)\n del swin_bbox_list[index2]\n del swin_score_list[index2]\n del swin_trueLabel_list[index2]\n two_bbox, two_score, two_label = copy.deepcopy(swin_bbox_list), copy.deepcopy(swin_score_list), copy.deepcopy(swin_trueLabel_list)\n for i in range(len(yolo_bbox_list)):\n two_bbox.append(yolo_bbox_list[i])\n two_score.append(yolo_score_list[i])\n two_label.append(yolo_label_list[i])\n two_bbox, two_score, two_label = torch.from_numpy(np.array(two_bbox)).reshape(-1, 4), torch.from_numpy(\n np.array(two_score)).reshape(-1, 1), torch.from_numpy(np.array(two_label)).reshape(-1, 1)\n\n\n yolo_bbox_list, yolo_score_list, yolo_label_list = torch.from_numpy(np.array(yolo_bbox_list)).reshape(-1,\n 4), torch.from_numpy(\n np.array(yolo_score_list)).reshape(-1, 1), torch.from_numpy(np.array(yolo_label_list)).reshape(-1, 1)\n\n swin_bbox_list, swin_score_list, swin_trueLabel_list = torch.from_numpy(np.array(swin_bbox_list)).reshape(\n -1,\n 4), torch.from_numpy(\n np.array(swin_score_list)).reshape(-1, 1), torch.from_numpy(np.array(swin_trueLabel_list)).reshape(-1,\n 1)\n\n # det = torch.cat((torch.cat((swin_bbox_list, swin_score_list), 1), swin_trueLabel_list), 1) # only show swin_model inference result\n # det = torch.cat((torch.cat((yolo_bbox_list, yolo_score_list), 1), yolo_label_list),1) # only show yolo_model inference result\n det = torch.cat((torch.cat((two_bbox, two_score), 1), two_label), 1) # show two_model inference result\n\n # bbox_list = [swin_bbox_list, yolo_bbox_list]\n # score_list = [swin_score_list, yolo_score_list]\n # label_list = [swin_trueLabel_list, yolo_label_list]\n #\n # wbf_weight = [1, 1]\n # iou_thr = 0.55\n # skip_box_thr = 0.0001\n #\n # boxes, scores, labels = weighted_boxes_fusion(bbox_list, score_list, label_list, weights=wbf_weight,\n # iou_thr=iou_thr, skip_box_thr=skip_box_thr)\n # for in_file in boxes:\n # in_file[0], in_file[1], in_file[2], in_file[3] = int(in_file[0] * 640), int(in_file[1] * 480), int(\n # in_file[2] * 640), int(in_file[3] * 480)\n # boxes, scores, labels = boxes.reshape(-1, 4), scores.reshape(-1, 1), labels.reshape(-1, 1)\n # boxes, scores, labels = torch.from_numpy(boxes), torch.from_numpy(scores), torch.from_numpy(labels)\n # det2model = torch.cat((torch.cat((boxes, scores), 1), labels), 1)\n # det = det2model\n\n if det is not None and len(det):\n numers = len(det)\n # Rescale boxes from img_size to im0 size\n det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()\n\n # Print results\n for c in det[:, -1].unique():\n n = (det[:, -1] == c).sum() # detections per class\n s += '%g %ss, ' % (n, names[int(c)]) # add to string\n\n # Write results 包围框、置信度、种类\n for *xyxy, conf, cls in reversed(det):\n if dict1.__contains__(cls):\n dict1[cls] = dict1[cls] + 1\n dict1['total'] = dict1['total'] + 1\n else:\n dict1[cls] = 0\n dict1['total'] = dict1['total'] + 1\n if save_txt: # Write to file\n xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh\n line = (cls, conf, *xywh) if opt.save_conf else (cls, *xywh) # label format\n with open(txt_path + '.txt', 'a') as f:\n f.write(('%g ' * len(line) + '\\n') % line)\n\n if save_img or view_img: # Add bbox to image\n label = '%s %.2f' % (names[int(cls)], conf)\n img1 = im0.copy()\n # if cv2.waitKey(1)==32:\n # count = 0\n # for filename in os.listdir('new_image/'):\n # if filename.endswith('.jpg'):\n # count += 1\n # # print(count)\n # print(f\"保存第{count + 1}张图片\")\n # # 保存图像,保存到上一层的imgs文件夹内,以1、2、3、4...为文件名保存图像\n # cv2.imwrite('new_image/{}.jpg'.format(count + 1), img1)\n # plot_one_box(xyxy, im0, label=label, color=colors[int(cls)], line_thickness=0.5) # 线的粗细\n plot_one_box(xyxy, im0, label=label, color=colors[int(cls)], line_thickness=2) # 线的粗细\n\n\n\n\n # print(f\"\\n{names[int(cls)]}的包围框坐标是{int(xyxy[0]),int(xyxy[1]),int(xyxy[2]),int(xyxy[3])}\")\n # print(f\"\\n{names[int(cls)]}的中心坐标是{(int(xyxy[0])+int(xyxy[2]))/2, (int(xyxy[1])+int(xyxy[3]))/2}\")\n # Print time (inference + NMS)\n # print('%sDone. (%.3fs)' % (s, t2 - t1))\n print(f\"{s}\")\n print(f\"s\")\n\n # 打印坐标、种类\n # print('%s' % (names[int(cls)]))\n\n # Stream results\n # view_img = True\n if view_img:\n cv2.imshow(p, im0)\n if cv2.waitKey(1) == ord('q'): # q to quit\n raise StopIteration\n\n # Save results (image with detections)\n if save_img:\n if dataset.mode == 'images':\n txt = f\".numers={len(det)}\"\n cv2.putText(im0, txt,\n (50, 100),\n cv2.FONT_HERSHEY_SIMPLEX, 1.2, (34, 157, 255), 2)\n cv2.imwrite(save_path, im0)\n else:\n if vid_path != save_path: # new video\n vid_path = save_path\n if isinstance(vid_writer, cv2.VideoWriter):\n vid_writer.release() # release previous video writer\n\n fourcc = 'mp4v' # output video codec\n fps = vid_cap.get(cv2.CAP_PROP_FPS)\n w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*fourcc), fps, (w, h))\n vid_writer.write(im0)\n im_after = im0\n img_dict[frame_key] = dict1\n frame_key += 1\n detected = len(det)\n\n img_category = save_path.split('/')[-1].split('_')[0]\n if img_category == 'body':\n true = 17\n elif img_category =='op':\n true = 12\n else:\n true = 29\n root_path = '/root/results/'\n\n if detected == true:\n plt.figure()\n plt.subplot(1, 3, 1)\n plt.title('original image', size=10)\n plt.axis([0, 640, 0, 480])\n plt.xticks([])\n plt.yticks([])\n plt.imshow(img_before.transpose(1, 2, 0))\n\n plt.subplot(1, 3, 2)\n plt.title('detected image', size=10)\n plt.axis([0, 640, 0, 480])\n plt.xticks([])\n plt.yticks([])\n plt.imshow(im_after)\n\n\n plt.text(700, 300, f\"Original:{true}\", size=10)\n plt.text(700, 100, f\"Detected:{detected}\", size=10)\n # plt.text(700, 100, f\"Average confidence:{conf}%\")\n plt.savefig(root_path + f'{img_category}_{counting_img}.jpg', bbox_inches='tight', pad_inches=0.1,\n dpi=800)\n counting_img += 1\n full_detect += detected\n full_truth += true\n\n elif detected != true and f_detect <= 7 and random.uniform(0, 1) > 0.65:\n plt.figure()\n plt.subplot(1, 3, 1)\n plt.title(f'original image', size=10)\n plt.axis([0, 640, 0, 480])\n plt.xticks([])\n plt.yticks([])\n plt.imshow(img_before.transpose(1, 2, 0))\n\n plt.subplot(1, 3, 2)\n plt.title(f'detected image', size=10)\n plt.axis([0, 640, 0, 480])\n plt.xticks([])\n plt.yticks([])\n plt.imshow(im_after)\n\n\n plt.text(700, 300, f\"Original:{true}\", size=10)\n plt.text(700, 100, f\"Detected:{detected}\", size=10)\n plt.savefig(root_path + f'{img_category}_{counting_img}.jpg', bbox_inches='tight', pad_inches=0.1,\n dpi=800)\n\n counting_img += 1\n f_detect+=1\n\n full_detect += detected\n full_truth += true\n else:\n # print('wrong-------', save_path)\n pass\n # plt.show()\n # plt.figure()\n # plt.axis([0, 640, 0, 480])\n # plt.text(700, 300, f\"Origina:{count_acc}%\")\n # plt.text(700, 200, f\"Detected:{classify_acc}%\")\n # plt.text(700, 100, f\"Average confidence:{conf}%\")\n\n # break\n\n if save_txt or save_img:\n print('Results saved to %s' % Path(out))\n\n full_time = time.time() - t0\n\n print('Done. (%.3fs)' % full_time)\n merege = math.ceil(full_detect/frame_key)\n for i in img_dict:\n if img_dict[i]['total'] == merege:\n\n dict2 = img_dict[i]\n\n\n\n\n plt.figure()\n plt.xticks([])\n plt.yticks([])\n plt.axis([0, 640, 0, 680])\n plt.text(50, 620, f\"Calming detection report:{dict2}\", color='blue', size=5)\n plt.text(50, 520, f\"Calming detection report\", color='blue', size=10)\n plt.text(50, 420, f\"the detect: {merege}\", color='blue', size=10)\n plt.text(50, 320, f\"All equipment Detected: {full_detect}\", size=10)\n plt.text(50, 220, f\"All equipment manually counted: {full_truth}\", size=10)\n plt.text(50, 120, f\"Counting Accuracy: %.2f\" % (full_detect*100/full_truth) + '%', size=10)\n plt.text(50, 40, f\"Average time: %.2f\" % (full_time/counting_img) + \" s\", size=10)\n print('dfddddddddddddddddddddddddddddddddddddddddd')\n plt.savefig('/root/Downloads/report.jpg')\n\n\nif __name__ == '__main__':\n get_image(fcap,framerate)\n parser = argparse.ArgumentParser()\n parser.add_argument('--weights', nargs='+', type=str, default='super_yolo.pt', help='model.pt path(s)')\n parser.add_argument('--source', type=str, default='/root/Swin-Transformer-Object-Detection/demo/video_frame', help='source') # file/folder, 0 for webcam\n parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')\n parser.add_argument('--conf-thres', type=float, default=0.85, help='object confidence threshold')\n parser.add_argument('--iou-thres', type=float, default=0.45, help='IOU threshold for NMS')\n parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')\n parser.add_argument('--view-img', action='store_true', help='display results')\n parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')\n parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')\n parser.add_argument('--save-dir', type=str, default='/root/Calming_final_test/results', help='directory to save results')\n parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --class 0, or --class 0 2 3')\n parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')\n parser.add_argument('--augment', action='store_true', help='augmented inference')\n parser.add_argument('--update', action='store_true', help='update all models')\n opt = parser.parse_args()\n print(opt)\n\n with torch.no_grad():\n if opt.update: # update all models (to fix SourceChangeWarning)\n for opt.weights in ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt']:\n detect()\n strip_optimizer(opt.weights)\n else:\n detect()\n" ]
[ [ "matplotlib.pyplot.text", "matplotlib.pyplot.yticks", "matplotlib.pyplot.imshow", "matplotlib.pyplot.title", "torch.zeros", "torch.load", "torch.cat", "torch.from_numpy", "matplotlib.pyplot.savefig", "torch.tensor", "matplotlib.pyplot.subplot", "torch.no_grad", "matplotlib.pyplot.axis", "matplotlib.pyplot.xticks", "numpy.array", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
greenjew/deeploma
[ "499de7ad844546acf0760aa00096d66216fd3ee9" ]
[ "api/vk_methods.py" ]
[ "import requests as r\nimport pandas as pd\nimport time\nfrom datetime import datetime\nimport re\n\n\n\nTOKEN_VK = '23acc95023acc95023acc9504023c092a1223ac23acc9507ef4dc240205bcafea27244d' # vk service token\nversion = 5.101\n\ndef get_members(group_id):\n\n try_count = 0\n while try_count < 2:\n try:\n response = r.get('https://api.vk.com/method/groups.getById',\n params={\n 'v': version,\n 'access_token': TOKEN_VK,\n 'group_ids': group_id,\n 'fields': 'members_count'\n })\n return response.json()['response'][0]['members_count']\n except:\n try_count += 1\n time.sleep(0.06)\n\n\ndef cleanText(raw_text):\n cleanr = re.compile('<.*?>|(\\[.*?\\|)|\\]')\n cleantext = re.sub(cleanr, '', raw_text)\n return cleantext\n\n\ndef load_from_vk(group_id, date_from, date_to):\n headers = ['group_name', 'members', 'post_date', 'post_link', 'text', 'views', 'likes', 'reposts', 'comments']\n posts_in_group = []\n offset = 0\n members = get_members(group_id)\n\n date_ok = True\n last_try = 0\n # Выгружаем посты на стенке, пока не выйдем за \"левую\" дату\n\n while date_ok or last_try <= 1:\n res = r.get('https://api.vk.com/method/wall.get',\n params={\n 'v': version,\n 'access_token': TOKEN_VK,\n 'domain': group_id,\n 'offset': offset,\n 'count': '100',\n 'extended': '1',\n 'fields': 'name'\n })\n try:\n response = res.json()['response']\n except:\n if res.json()['error']['error_code'] != 0:\n raise Exception(group_id, 'channel_not_found')\n\n if response['count'] == 0: # если в выгрузке пусто, переходим к следующей группе\n date_ok = False\n last_try = 2\n continue\n\n # считаем посты удовлетворяющие условию по датам\n all_posts = response['items']\n group_name = response['groups'][0]['name']\n if all(datetime.fromtimestamp(post['date']).date() < date_from\n for post in all_posts):\n date_ok = False\n last_try += 1\n else:\n for post in all_posts:\n post_info = []\n post_date = datetime.fromtimestamp(post['date'])\n if date_from < post_date.date() < date_to:\n print(post_date)\n post_link = 'https://vk.com/wall' + str(post['owner_id']) + '_' + str(post['id'])\n post_text = cleanText(post['text'])\n post_info.append((group_name, members, post_date, post_link, post_text,\n post['views']['count'], post['likes']['count'], post['reposts']['count'],\n post['comments']['count']))\n posts_in_group.extend(post_info)\n offset += len(all_posts)\n time.sleep(0.06)\n\n posts_data = pd.DataFrame(posts_in_group, columns=headers)\n mean_ = int(posts_data.groupby(posts_data['post_date'].dt.to_period('d')).mean()['views'].mean())\n std_ = int(posts_data.groupby(posts_data['post_date'].dt.to_period('d')).std()['views'].mean())\n\n def three_sigma_anomaly(views):\n ano_cut_off = 3 * std_\n upper_cut = mean_ + ano_cut_off\n if views > upper_cut:\n return 'Да'\n else:\n return 'Нет'\n\n anomalies = posts_data.views.apply(three_sigma_anomaly)\n posts_data['is_anomaly'] = anomalies\n\n return posts_data" ]
[ [ "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
NewCPM/MCPM
[ "9fb9b7725ccc4452701be47d103ab61f81b4595b", "9fb9b7725ccc4452701be47d103ab61f81b4595b", "9fb9b7725ccc4452701be47d103ab61f81b4595b" ]
[ "examples/OGLE-BLG-ECL-234840/plot_v8.py", "examples/plot_tpf_pixel_curves.py", "source/MCPM/prfdata.py" ]
[ "import matplotlib.pyplot as plt\nfrom matplotlib import gridspec\nimport numpy as np\n\n\nin_data = \"run_6/run_6_e2_phot_prf_limit.dat\"\nin_model = \"run_6/run_6_e2_phot.res\"\nout_file = \"run_6/plot_eb234840_v8.png\"\n\nkwargs = {'color': 'red', 'marker': '.', 'ls': 'none'}\nx_lim = [7500., 7528.]\ny_lim = [-4000., 500.]\n\nkwargs_1 = {'color': 'blue', 'ls': ':', 'lw': 2, 'zorder': 10}\n\nxlabel = 'BJD - 2450000'\nylabel = 'delta flux'\n\nband = np.arange(7500, 7508.0001)\nkwargs_band = {'color': 'blue', 'lw': 2, 'zorder': 10}\n\n################\n# End of settings\n\n(times, values, errors) = np.loadtxt(in_data, unpack=True)\n(times_model, _, _, values_model) = np.loadtxt(in_model, unpack=True)\n\nplt.errorbar(times, values, yerr=errors, **kwargs)\nmask = (times_model > band[-1])\nplt.plot(times_model[mask], values_model[mask], **kwargs_1)\nplt.xlabel(xlabel)\nplt.ylabel(ylabel)\nplt.xlim(x_lim)\nplt.ylim(y_lim)\n\nplt.plot(band, band*0., **kwargs_band)\n\nplt.savefig(out_file)\n\n", "import matplotlib.pyplot as plt\n#from matplotlib import rc\nfrom matplotlib import rcParams\n\nfrom MCPM.cpmfitsource import CpmFitSource\n\n\ndef plot_tpf_data(ra, dec, channel, campaign, file_out, half_size=2,\n stars_subtract=[], adjust=None, xlabel=None, ylabel=None, **kwargs):\n \"\"\"\n Plot TPF data for given settings.\n \"\"\"\n cpm_source = CpmFitSource(ra=ra, dec=dec, campaign=campaign, channel=channel)\n cpm_source.set_pixels_square(half_size)\n for (ra, dec, flux) in stars_subtract:\n cpm_source.subtract_flux_from_star(ra, dec, flux)\n cpm_source.plot_pixel_curves(**kwargs)\n if adjust is not None:\n plt.subplots_adjust(**adjust)\n if xlabel is not None:\n plt.figtext(0.51, 0.004, xlabel)\n if ylabel is not None:\n plt.figtext(0.002, 0.5, ylabel, rotation=90)\n\n plt.savefig(file_out)\n plt.close()\n\nif __name__ == \"__main__\":\n #stars_0241 = [[270.63370, -27.52653, 30.e3]]\n stars_0241 = [[270.63370, -27.52653, 16996.5]]\n plot_tpf_data(270.6323333, -27.5296111, 49, 91, \"ob160241_c91_pixel_curves.png\",\n half_size=3, stars_subtract=stars_0241)\n plot_tpf_data(270.6323333, -27.5296111, 49, 92, \"ob160241_c92_pixel_curves.png\",\n half_size=3, stars_subtract=stars_0241)\n\n plot_tpf_data(269.5648750, -27.9635833, 31, 92, \"ob160940_pixel_curves.png\")\n\n default = rcParams['font.size']\n rcParams['font.size'] = 18\n plt.rc('text', usetex=True)\n\n plot_tpf_data(\n 271.2375417, -28.6278056, 52, 92, \"ob160975_pixel_curves.png\",\n adjust={\"left\": 0.07, \"bottom\":0.06, \"right\":.995, \"top\":.995},\n xlabel=\"BJD-2450000\", ylabel=r'counts [e$^-$s$^{-1}$]')\n plot_tpf_data(\n 271.001083, -28.155111, 52, 91, \"ob160795_pixel_curves.png\",\n adjust={\"left\": 0.07, \"bottom\":0.06, \"right\":.995, \"top\":.995},\n xlabel=\"BJD-2450000\", ylabel='counts')\n plot_tpf_data(\n 269.886542, -28.407417, 31, 91, \"ob160813_pixel_curves.png\",\n adjust={\"left\": 0.07, \"bottom\":0.06, \"right\":.995, \"top\":.995},\n xlabel=\"BJD-2450000\", ylabel='counts')\n\n\n plot_tpf_data(\n 271.354292, -28.005583, 52, 92, \"ob160980_pixel_curves.png\",\n adjust={\"left\": 0.07, \"bottom\":0.06, \"right\":.995, \"top\":.995},\n xlabel=\"BJD-2450000\", ylabel=r'counts [e$^-$s$^{-1}$]')\n\n rcParams['font.size'] = default\n\n plot_tpf_data(269.9291250, -28.4108333, 31, 91, \"eb234840_pixel_curves.png\")\n\n", "from os import path\nimport glob\nimport numpy as np\nfrom scipy.interpolate import RectBivariateSpline\nfrom math import fabs\n\nfrom astropy.io import fits\n\nimport MCPM\nfrom MCPM.utils import module_output_for_channel\n\n\nclass PrfData(object):\n \"\"\"\n K2 PRF data \n \"\"\"\n\n data_directory = path.join(MCPM.MODULE_PATH, 'data', 'Kepler_PRF') \n\n def __init__(self, channel=None, module=None, output=None):\n \"\"\"\n provide channel or both module and output\n data_directory has to be set\n \"\"\"\n if (module is None) != (output is None):\n raise ValueError('You must set both module and output options')\n if (channel is None) == (module is None):\n raise ValueError('provide channel or both module and output')\n\n if channel is not None:\n (module, output) = module_output_for_channel[channel]\n text = \"kplr{:02d}.{:}_*_prf.fits\".format(int(module), output)\n names = path.join(self.data_directory, text)\n\n try:\n file_name = glob.glob(names)[-1]\n except:\n www = 'http://archive.stsci.edu/missions/kepler/fpc/prf/'\n raise FileNotFoundError(('PRF files {:} not found. The file ' +\n 'should be downloaded from {:} to {:}'\n ).format(names, www, self.data_directory))\n \n keys = ['CRPIX1P', 'CRVAL1P', 'CDELT1P', \n 'CRPIX2P', 'CRVAL2P', 'CDELT2P']\n with fits.open(file_name) as prf_hdus:\n self._data = []\n self._keywords = []\n for hdu in prf_hdus[1:]:\n self._data.append(hdu.data)\n keywords = dict()\n for key in keys:\n keywords[key] = hdu.header[key]\n self._keywords.append(keywords)\n\n # make sure last hdu is for central area\n center_x = np.array([value['CRVAL1P'] for value in self._keywords])\n center_y = np.array([value['CRVAL2P'] for value in self._keywords])\n dx = center_x - np.mean(center_x)\n dy = center_y - np.mean(center_y)\n if np.argmin(np.sqrt(dx**2+dy**2)) != len(center_x)-1:\n raise ValueError('The last hdu in PRF file is not the one in ' + \n 'the center - contarary to what we assumed here!')\n \n # make a list of pairs but exclude the central point\n n = len(center_x)\n self._corners_pairs = [(i, i+1) if i>=0 else (i+n-1, i+1) for i in \n range(-1, n-2)]\n\n # make sure that the first four corners are in clockwise, or \n # anti-clockwise order:\n for (i, j) in self._corners_pairs:\n # We want one coordinate to be equal and other to be different.\n if (fabs(center_x[i] - center_x[j]) < .001 != \n fabs(center_y[i] - center_y[j]) < .001): \n msg = 'something wrong with order of centers of hdus'\n raise ValueError(msg)\n\n # prepare equations to be used for barycentric interpolation\n self._equations = dict()\n for (i, j) in self._corners_pairs:\n xs = [center_x[i], center_x[j], center_x[-1]]\n ys = [center_y[i], center_y[j], center_y[-1]]\n self._equations[(i, j)] = np.array([xs, ys, [1., 1., 1.]])\n\n # grid on which prf is defined:\n x_lim = self._keywords[0]['CRPIX1P'] - .5\n y_lim = self._keywords[0]['CRPIX2P'] - .5\n self._prf_grid_x = np.linspace(-x_lim, x_lim, num=int(2*x_lim+1+.5))\n self._prf_grid_y = np.linspace(-y_lim, y_lim, num=int(2*y_lim+1+.5))\n self._prf_grid_x *= self._keywords[0]['CDELT1P']\n self._prf_grid_y *= self._keywords[0]['CDELT2P']\n #self._prf_grid_x = (np.arange(nx) - nx / 2. + .5) * self._keywords[0]['CDELT1P']\n #self._prf_grid_y = (np.arange(ny) - ny / 2. + .5) * self._keywords[0]['CDELT2P']\n \n self.center_x = center_x\n self.center_y = center_y\n\n # For interpolation lazy loading:\n self._spline_function = None\n self._fast_x = None\n self._fast_y = None\n\n def _get_barycentric_interpolation_weights(self, x, y):\n \"\"\"find in which triangle given point is located and \n calculate weights for barycentric interpolation\"\"\"\n for (i, j) in self._corners_pairs:\n equation = self._equations[(i, j)]\n weights = np.linalg.solve(equation, np.array([x, y, 1.]))\n if np.all(weights >= 0.): # i.e. we found triangle in which \n return (np.array([i, j, -1]), weights) # the point is located\n raise ValueError(\"Point ({:}, {:}) doesn't lie in any of the triangles\".format(x, y))\n\n def _interpolate_prf(self, x, y):\n \"\"\"barycentric interpolation on a traiangle grid\"\"\"\n (indexes, weights) = self._get_barycentric_interpolation_weights(x=x, \n y=y)\n prf = (self._data[indexes[0]] * weights[0] \n + self._data[indexes[1]] * weights[1] \n + self._data[indexes[2]] * weights[2])\n return prf\n\n def get_interpolated_prf(self, star_x, star_y, pixels_list, fast=True):\n \"\"\"\n For star centered at given position calculate PRF for list of pixels.\n Example: star_x=100.5, \n star_y=200.5, \n pixels_list=[[100., 200.], [101., 200.], [102., 200.]]\n The fast option controls if we're doing full interpolation (False), \n or use results from some previous run. The full interpolation is done \n if the current pixel is further than 3 pix from the remembered run.\n \"\"\"\n max_distance = 3.\n \n if self._fast_x is None:\n distance2 = 2. * max_distance**2\n else:\n distance2 = (self._fast_x-star_x)**2+(self._fast_y-star_y)**2\n \n if (self._spline_function is None \n or not fast or distance2 > max_distance**2):\n prf = self._interpolate_prf(star_x, star_y)\n self._spline_function = RectBivariateSpline(x=self._prf_grid_x,\n y=self._prf_grid_y, z=prf)\n self._fast_x = star_x\n self._fast_y = star_y\n \n out = np.array([self._spline_function(y-star_y, x-star_x)[0][0] \n for (x, y) in pixels_list])\n # Yes, here we revert the order of x,y because of K2 PRF data format.\n\n out[(out < 0.)] = 0.\n return out\n" ]
[ [ "numpy.arange", "matplotlib.pyplot.ylim", "matplotlib.pyplot.savefig", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlim", "matplotlib.pyplot.errorbar", "matplotlib.pyplot.xlabel", "numpy.loadtxt", "matplotlib.pyplot.ylabel" ], [ "matplotlib.pyplot.rc", "matplotlib.pyplot.savefig", "matplotlib.pyplot.close", "matplotlib.pyplot.subplots_adjust", "matplotlib.pyplot.figtext" ], [ "numpy.sqrt", "scipy.interpolate.RectBivariateSpline", "numpy.all", "numpy.mean", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
astrophys/Python_Debugging_Examples
[ "510b4b6966166dddc14eda3f6813700386d2324f" ]
[ "code/txburstML.py" ]
[ "#!/usr/bin/python3\nimport argparse\nimport pandas as pd\nimport numpy as np\nimport warnings\nwarnings.filterwarnings('ignore')\nfrom joblib import delayed,Parallel\nimport os\n\ndef whichKeep(est_params):\n kon = np.array(est_params)[:,0]\n koff = np.array(est_params)[:,1]\n ksyn = np.array(est_params)[:,2]\n which_kon = ~(kon < 2*1e-3)*~(kon > 1e3 - 1)\n which_koff = ~(koff < 2*1e-3)*~(koff > 1e3 - 1)\n which_burst = ksyn/koff > 1\n which_ksyn = ksyn > 1\n which = which_burst*which_koff*which_kon*which_ksyn\n return which\n\n\ndef MaximumLikelihood(vals, export_asymp_ci = False, fix = 0, metod = 'L-BFGS-B'):\n from scipy.interpolate import interp1d\n from scipy.optimize import minimize\n from scipy import special\n from scipy.stats import poisson,norm\n from scipy.special import j_roots\n from scipy.special import beta as beta_fun\n import numpy as np\n if len(vals) == 0:\n return np.array([np.nan, np.nan, np.nan])\n def dBP(at, alpha, bet, lam):\n at.shape = (len(at), 1)\n np.repeat(at, 50, axis = 1)\n def fun(at, m):\n if(max(m) < 1e6):\n return(poisson.pmf(at,m))\n else:\n return(norm.pdf(at,loc=m,scale=sqrt(m)))\n\n x,w = j_roots(50,alpha = bet - 1, beta = alpha - 1)\n gs = np.sum(w*fun(at, m = lam*(1+x)/2), axis=1)\n prob = 1/beta_fun(alpha, bet)*2**(-alpha-bet+1)*gs\n return(prob)\n def LogLikelihood(x, vals):\n kon = x[0]\n koff = x[1]\n ksyn = x[2]\n return(-np.sum(np.log( dBP(vals,kon,koff,ksyn) + 1e-10) ) )\n x0 = MomentInference(vals)\n if np.isnan(x0).any() or any(x0 < 0):\n x0 = np.array([10,10,10])\n bnds = ((1e-3,1e3),(1e-3,1e3), (1, 1e4))\n vals_ = np.copy(vals) # Otherwise the structure is violated.\n try:\n ll = minimize(LogLikelihood, x0, args = (vals_), method=metod, bounds=bnds)\n except:\n return np.array([np.nan,np.nan,np.nan])\n #se = ll.hess_inv.todense().diagonal()\n estim = ll.x\n return estim\n\n# moment-based inference\ndef MomentInference(vals, export_moments=False):\n # code from Anton Larsson's R implementation\n from scipy import stats # needs imports inside function when run in ipyparallel\n import numpy as np\n m1 = float(np.mean(vals))\n m2 = float(sum(vals*(vals - 1))/len(vals))\n m3 = float(sum(vals*(vals - 1)*(vals - 2))/len(vals))\n\n # sanity check on input (e.g. need at least on expression level)\n if sum(vals) == 0: return np.nan\n if m1 == 0: return np.nan\n if m2 == 0: return np.nan\n\n r1=m1\n r2=m2/m1\n r3=m3/m2\n\n if (r1*r2-2*r1*r3 + r2*r3) == 0: return np.nan\n if ((r1*r2 - 2*r1*r3 + r2*r3)*(r1-2*r2+r3)) == 0: return np.nan\n if (r1 - 2*r2 + r3) == 0: return np.nan\n\n lambda_est = (2*r1*(r3-r2))/(r1*r2-2*r1*r3 + r2*r3)\n mu_est = (2*(r3-r2)*(r1-r3)*(r2-r1))/((r1*r2 - 2*r1*r3 + r2*r3)*(r1-2*r2+r3))\n v_est = (2*r1*r3 - r1*r2 - r2*r3)/(r1 - 2*r2 + r3)\n\n if export_moments:\n return np.array([lambda_est, mu_est, v_est, r1, r2, r3])\n\n return np.array([lambda_est, mu_est, v_est])\n\n\nparser = argparse.ArgumentParser(description='Maximum likelihood inference of bursting kinetics from scRNA-seq data')\nparser.add_argument('file', metavar='file', type=str, nargs=1,help='.csv file with allelic-resolution transcript counts' )\nparser.add_argument('--njobs', default=[50], nargs=1, type=int, help='Number of jobs for the parallelization, default 50')\nargs = parser.parse_args()\nfilename = args.file[0]\nnjobs = args.njobs[0]\nprint('Reading file ' + filename)\nrpkm = pd.read_csv(filename, index_col=0)\n\nprint('Inferring kinetics:')\nparams = Parallel(n_jobs=njobs, verbose = 3)(delayed(MaximumLikelihood)(np.around(rpkm[pd.notnull(rpkm)])) for i,rpkm in rpkm.iterrows())\nkeep = whichKeep(params)\n\nprint('Inferred kinetics of {} genes out of {} total'.format(np.sum(keep), len(keep)))\n\nbase = os.path.splitext(os.path.basename(filename))[0]\nbase = base + '_ML.pkl'\nprint('Saving result to ' + base)\n\npd.to_pickle(pd.DataFrame([ params, list(keep)], columns=rpkm.index).T, base)\n" ]
[ [ "pandas.read_csv", "scipy.special.j_roots", "pandas.notnull", "numpy.isnan", "numpy.copy", "scipy.optimize.minimize", "numpy.mean", "scipy.stats.poisson.pmf", "numpy.repeat", "numpy.array", "numpy.sum", "scipy.special.beta" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
mmikolajczak/recommendation_system_hetrec2011_movielens
[ "3ae13e62605ffbf5517bc2079e086a400de48748" ]
[ "recommendations_system/ffm/ffm.py" ]
[ "import subprocess\nimport warnings\nimport os.path as osp\nimport os\nimport numpy as np\n\n\n# Note: libffm doesn't handle relative paths very well, hence abspath used.\nclass FFM:\n\n def __init__(self, train_binary_path, predict_binary_path, model_path=None):\n self.train_binary_path = osp.abspath(train_binary_path)\n self.predict_binary_path = osp.abspath(predict_binary_path)\n self.model_path = osp.abspath(model_path) if model_path is not None else None\n\n def fit(self, X, model_path='model', l=0.00002, k=4, t=15, r=0.2, s=1):\n \"\"\"\n -l <lambda>: regularization parameter (default 0.00002)\n -k <factor>: number of latent factors (default 4)\n -t <iteration>: number of iterations (default 15)\n -r <eta>: learning rate (default 0.2)\n -s <nr_threads>: number of threads (default 1)\n \"\"\"\n # validation support?\n warnings.warn('Please note that unix newline format (LF) is required for libffm binaries to work correctly.' +\n ' Windows (CR LF) will cause the issues.')\n\n if type(X) != str:\n raise ValueError(f'Improper input type {type(X)}.X must be a path to ffm file.')\n self.model_path = osp.abspath(model_path)\n train_data_abspath = osp.abspath(X)\n cmd = f'{self.train_binary_path} -l {l} -k {k} -t {t} -r {r} -s {s} {train_data_abspath} {self.model_path}'\n proc = subprocess.Popen(cmd)\n proc.wait()\n os.remove(f'{train_data_abspath}.bin')\n\n def predict(self, X, output_file):\n warnings.warn('Please note that unix newline format (LF) is required for libffm binaries to work correctly.' +\n ' Windows (CR LF) will cause the issues.')\n if self.model_path is None:\n raise RuntimeError('Model must be fitted first!')\n if type(X) != str:\n raise ValueError(f'Improper input type {type(X)}.X must be a path to ffm file.')\n\n predicted_data_abspath = osp.abspath(X)\n output_file_abspath = osp.abspath(output_file)\n\n cmd = f'{self.predict_binary_path} {predicted_data_abspath} {self.model_path} {output_file_abspath}'\n proc = subprocess.Popen(cmd)\n proc.wait()\n\n @classmethod\n def pred_file_to_numpy(cls, preds_file):\n return np.loadtxt(preds_file)\n\n @classmethod\n def ground_truth_from_ffm_file(cls, ffm_file):\n with open(ffm_file, 'r') as f:\n labels = [line.split(' ')[0] for line in f]\n return np.array(labels).astype(float)\n" ]
[ [ "numpy.array", "numpy.loadtxt" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
caspase-like-homolog-identifier/c14_witcher
[ "e2c481607b85fed749daec0e9b3b29b65d6b448f" ]
[ "find_deathdomains.py" ]
[ "#!/usr/bin/env python\n\nfrom run_hmmer import RunHmmer\nfrom Bio import SearchIO\nimport pandas as pd\nimport collections\nimport random\nimport tempfile\nimport argparse\nimport pprint\nimport glob\nimport sys\n\nclass FindDeathDomains(RunHmmer):\n\n def __init__(self, seqfile, dd_hmm_path, *hmmersearch_args):\n\n \"\"\" Subclass the Hmmer commandline wrapper \"\"\" \n \n self.dd_hmm_paths = glob.glob(dd_hmm_path)\n super().__init__(\"hmmsearch\", None, seqfile, None, None, *hmmersearch_args)\n self.deathdomain_hits = {}\n self.dd_dict = None\n\n \n def deathdomains_iter(self):\n\n \"\"\" iterate over the deathdomains \"\"\"\n self.dd_names = []\n for hmm_file in self.dd_hmm_paths:\n self.hmmfile = hmm_file\n tmp1, tmp2 = [ tempfile.NamedTemporaryFile(delete=False) for _ in range(2) ]\n self.align_out = tmp1.name\n self.domtblout = tmp2.name\n std, stderr = self()\n deathdomain = self.has_deathdomain(self.domtblout)\n\n if deathdomain:\n self.deathdomain_hits[deathdomain[0].id] = deathdomain[0].hits \n self.dd_names.append(deathdomain[0].id)\n \n \n def has_deathdomain(self, domtab):\n\n return list(SearchIO.parse(domtab, \"hmmsearch3-domtab\")) \n \n \n def DeathDomains(self, feature):\n \"\"\"Property to view the death domains.Start analysis if not done already\"\"\"\n # _id\n # _id_alt\n # _query_id\n # _description\n # _description_alt\n # _query_description\n # attributes\n # dbxrefs\n # _items\n # accession\n # seq_len\n # evalue\n # bitscore\n # bias\n \n if not self.deathdomain_hits:\n self.deathdomains_iter()\n #create dict using seq.ids as keys and empty lists as values\n dd_dict = collections.defaultdict(list)\n for dd in self.deathdomain_hits:\n #print(dd)\n for hit in self.deathdomain_hits[dd]:\n dd_dict[hit.id].append(vars(hit)[feature])\n\n self.deathdomains = pd.DataFrame(columns = ['Seq_ID']+self.dd_names)\n for seq_id, values in dd_dict.items():\n \n self.deathdomains = self.deathdomains.append(pd.Series([seq_id]+values, index= ['Seq_ID']+self.dd_names, name = seq_id))\n \n\n return self.deathdomains\n \n \n \n \nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=\"\")\n parser.add_argument('seqfile', action='store', type=str)\n parser.add_argument('-g','--hmm_glob', default=\"/opt/DB_REF/Pfam/Ig*hmm\")\n args = parser.parse_args() \n dd = FindDeathDomains(args.seqfile, args.hmm_glob)\n dd.deathdomains_iter()\n print(\"\\n\\n\\n\\n\")\n \n print(dd.DeathDomains('evalue'))\n" ]
[ [ "pandas.Series", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
MehdiAbbanaBennani/statistical-optimisation
[ "0de96661ca7ab857639ad14127b97af39321762e" ]
[ "src/logistic_regression.py" ]
[ "import numpy as np\nfrom tqdm import tqdm\nfrom gradient import Gradient\n\n\nclass LogisticRegression:\n\n def __init__(self, type, mu, gradient_param, data, d=100, theta=None):\n if theta is None:\n self.theta = np.random.rand(d) * 2 - 1\n else:\n self.theta = theta\n\n self.type = type\n self.gradient = Gradient(gradient_param)\n self.mat = data\n self.n_samples = data[\"Xtrain\"].shape[0]\n self.mu = mu\n\n @staticmethod\n def sigmoid(z):\n return 1 / (1 + np.exp(- z))\n\n def error(self, X, y_true):\n N = len(y_true)\n return sum([self.single_error(X[i], y_true[i])\n for i in range(N)]) / N\n\n def single_error(self, X, y_true):\n # y_pred = round(self.predict(X))\n y_pred = self.predict_label(X)\n return abs(y_true - y_pred) / 2\n\n def loss(self, X, y_true):\n N = len(y_true)\n return sum([self.single_loss(X[i], y_true[i])\n for i in range(N)]) / N\n\n def single_loss(self, X, y_true):\n y_pred = self.predict(X)\n if self.type == \"square\":\n return (y_pred - y_true) ** 2\n if self.type == \"logistic\":\n return np.log(1 + np.exp(- y_true * y_pred))\n # return - y_true * np.log(y_pred) - (1 - y_true) * np.log(1 - y_pred)\n\n def predict(self, X):\n # return self.sigmoid(np.dot(X, self.theta))\n return np.dot(X, self.theta)\n\n def predict_label(self, X):\n y_pred = self.predict(X)\n if y_pred < 0 :\n return -1\n else :\n return 1\n\n def log(self, log_dict, it, log_freq):\n log_dict[\"train_losses\"].append(self.loss(X=self.mat[\"Xtrain\"],\n y_true=self.mat[\"ytrain\"]))\n log_dict[\"test_losses\"].append(self.loss(X=self.mat[\"Xtest\"],\n y_true=self.mat[\"ytest\"]))\n log_dict[\"train_errors\"].append(self.error(X=self.mat[\"Xtrain\"],\n y_true=self.mat[\"ytrain\"]))\n log_dict[\"test_errors\"].append(self.error(X=self.mat[\"Xtest\"],\n y_true=self.mat[\"ytest\"]))\n if log_freq == \"epoch\" :\n log_dict[\"iterations\"].append(it / self.n_samples)\n else :\n log_dict[\"iterations\"].append(it)\n\n def compute_n_iter(self, n_epoch):\n return n_epoch * (self.n_samples // self.gradient.batch_size)\n\n def log_freq_to_iter(self, log_freq):\n if log_freq == \"epoch\" :\n return self.n_samples\n else :\n return log_freq\n\n def run_optimizer(self, n_epoch, log_freq, optimizer):\n log_dict = {\"train_losses\": [],\n \"test_losses\": [],\n \"iterations\": [],\n \"train_errors\": [],\n \"test_errors\": []}\n n_iter = self.compute_n_iter(n_epoch)\n\n for it in tqdm(range(n_iter)):\n if optimizer == \"sgd\" :\n self.gradient.sgd_step(model=self, it=it)\n if optimizer == \"sag\":\n self.gradient.sag_step(model=self, it=it)\n\n if it % self.log_freq_to_iter(log_freq) == 0:\n self.log(log_dict, it, log_freq)\n\n return log_dict" ]
[ [ "numpy.dot", "numpy.exp", "numpy.random.rand" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
synapticarbors/npy-append-array
[ "bf33483e7c2c50e13c9e55940878ca8217f4d4ad" ]
[ "npy_append_array/npy_append_array.py" ]
[ "import numpy as np\nimport os.path\nfrom struct import pack, unpack\nfrom io import BytesIO\n\ndef header_tuple_dict(tuple_in):\n return {\n 'shape': tuple_in[0],\n 'fortran_order': tuple_in[1],\n 'descr': np.lib.format.dtype_to_descr(tuple_in[2])\n }\n\ndef has_fortran_order(arr):\n return not arr.flags.c_contiguous and arr.flags.f_contiguous\n\ndef peek(fp, length):\n pos = fp.tell()\n tmp = fp.read(length)\n fp.seek(pos)\n return tmp\n\nclass NpyAppendArray:\n def __init__(self, filename):\n self.filename = filename\n self.fp = None\n self.__is_init = False\n if os.path.isfile(filename):\n self.__init()\n\n def __init(self):\n self.fp = open(self.filename, mode=\"rb+\")\n fp = self.fp\n\n magic = np.lib.format.read_magic(fp)\n self.is_version_1 = magic[0] == 1 and magic[1] == 0\n self.is_version_2 = magic[0] == 2 and magic[1] == 0\n\n if not self.is_version_1 and not self.is_version_2:\n raise NotImplementedError(\n \"version (%d, %d) not implemented\"%magic\n )\n\n self.header_length, = unpack(\"<H\", peek(fp, 2)) if self.is_version_1 \\\n else unpack(\"<I\", peek(fp, 4))\n\n self.header = np.lib.format.read_array_header_1_0(fp) if \\\n self.is_version_1 else np.lib.format.read_array_header_2_0(fp)\n\n if self.header[1] != False:\n raise NotImplementedError(\"fortran_order not implemented\")\n\n fp.seek(0)\n\n self.header_bytes = fp.read(self.header_length + (\n 10 if self.is_version_1 else 12\n ))\n\n fp.seek(0, 2)\n\n self.__is_init = True\n\n def __create_header_bytes(self, header_map, spare_space=False):\n io = BytesIO()\n np.lib.format.write_array_header_2_0(io, header_map)\n\n if spare_space:\n io.getbuffer()[8:12] = pack(\"<I\", int(\n io.getbuffer().nbytes-12+64\n ))\n io.getbuffer()[-1] = 32\n io.write(b\" \"*64)\n io.getbuffer()[-1] = 10\n\n return io.getbuffer()\n\n def append(self, arr):\n if not arr.flags.c_contiguous:\n raise NotImplementedError(\"ndarray needs to be c_contiguous\")\n\n if has_fortran_order(arr):\n raise NotImplementedError(\"fortran_order not implemented\")\n\n arr_descr = np.lib.format.dtype_to_descr(arr.dtype)\n\n if not self.__is_init:\n with open(self.filename, \"wb\") as fp0:\n fp0.write(self.__create_header_bytes({\n 'descr': arr_descr,\n 'fortran_order': False,\n 'shape': arr.shape\n }, True))\n arr.tofile(fp0)\n\n # np.save(self.filename, arr)\n self.__init()\n return\n\n descr = self.header[2]\n\n if arr_descr != descr:\n raise TypeError(\"incompatible ndarrays types %s and %s\"%(\n arr_descr, descr\n ))\n\n shape = self.header[0]\n\n if len(arr.shape) != len(shape):\n raise TypeError(\"incompatible ndarrays shape lengths %s and %s\"%(\n len(arr.shape), len(shape)\n ))\n\n for i, e in enumerate(shape):\n if i > 0 and e != arr.shape[i]:\n raise TypeError(\"ndarray shapes can only differ on zero axis\")\n\n new_shape = list(shape)\n new_shape[0] += arr.shape[0]\n new_shape = tuple(new_shape)\n self.header = (new_shape, self.header[1], self.header[2])\n\n self.fp.seek(0)\n\n new_header_map = header_tuple_dict(self.header)\n\n new_header_bytes = self.__create_header_bytes(new_header_map, True)\n header_length = len(self.header_bytes)\n\n if header_length != len(new_header_bytes):\n new_header_bytes = self.__create_header_bytes(new_header_map)\n\n if header_length != len(new_header_bytes):\n raise TypeError(\"header length mismatch, old: %d, new: %d\"%(\n header_length, len(new_header_bytes)\n ))\n\n self.header_bytes = new_header_bytes\n\n self.fp.write(new_header_bytes)\n\n self.fp.seek(0, 2)\n\n arr.tofile(self.fp)\n\n def __del__(self):\n if self.fp is not None:\n self.fp.close()" ]
[ [ "numpy.lib.format.read_array_header_1_0", "numpy.lib.format.read_magic", "numpy.lib.format.read_array_header_2_0", "numpy.lib.format.dtype_to_descr", "numpy.lib.format.write_array_header_2_0" ] ]
[ { "matplotlib": [], "numpy": [ "1.11", "1.10", "1.12", "1.19", "1.24", "1.13", "1.16", "1.9", "1.18", "1.23", "1.21", "1.22", "1.20", "1.15", "1.14", "1.17" ], "pandas": [], "scipy": [], "tensorflow": [] } ]
Erebyel/Gilbert
[ "b7206278cae8c4686de9b87f042fbda42b5fe324" ]
[ "gilbert.py" ]
[ "\n##---------------------- Carga de bibliotecas\nfrom pandas import DataFrame\nimport streamlit as st\nimport numpy as np\n\n##---------------------- Base de datos\nfrase = DataFrame({'artículo': ['El', 'El', 'El', 'La', 'El', 'La', 'El', 'El', 'La', 'El', 'La', 'El', 'La', 'La', 'El', 'La', 'La', 'El', 'La', 'El', 'El', 'El', 'La', 'El', 'El', 'El', 'La', 'La', 'El', 'El', 'El', 'El', 'El', 'El', 'El', 'La', 'La', 'La', 'La', 'El', 'El', 'El', 'El', 'El', 'La', 'La', 'El', 'La', 'El', 'El', 'El', 'La', 'El', 'El', 'El', 'El', 'La', 'La', 'El', 'El', 'El', 'El', 'El', 'La', 'El', 'El', 'La', 'El', 'El', 'El', 'La', 'La', 'El', 'La', 'La', 'El', 'La', 'La', 'El', 'El', 'El', 'El', 'El', 'La', 'La', 'El', 'El', 'El', 'El', 'El', 'El', 'El', 'La', 'El', 'La', 'La', 'La', 'El', 'La', 'El', 'El', 'El', 'La', 'La', 'El', 'La', 'El', 'El', 'La', 'El', 'El', 'La', 'El', 'La', 'La', 'El', 'El', 'El', 'La', 'La', 'El', 'La', 'La', 'La', 'El', 'El', 'El', 'El', 'La', 'El', 'La', 'El', 'El', 'La', 'El', 'El', 'La', 'El', 'El', 'El', 'El', 'El', 'La', 'El', 'El', 'El', 'El', 'El', 'El', 'El', 'El', 'El', 'La', 'El', 'El', 'El', 'El', 'El', 'El', 'El', 'La', 'La', 'El', 'La', 'El', 'El', 'El', 'El', 'La', 'El', 'El', 'La', 'La', 'El', 'El', 'La', 'El', 'La', 'El', 'La', 'El', 'La', 'El', 'La', 'El', 'El', 'El', 'La', 'El', 'El', 'El', 'La', 'El', 'El', 'La', 'La', 'La', 'El', 'La', 'El', 'El', 'La', 'La', 'El', 'El', 'El', 'La', 'La', 'El', 'El', 'El', 'El', 'El', 'La', 'La', 'La', 'El', 'El', 'El', 'La', 'El', 'La', 'La', 'La', 'El', 'La', 'El', 'La', 'La', 'El', 'El', 'La', 'La', 'El', 'El', 'La', 'El', 'La', 'El', 'La', 'El', 'La', 'El', 'La', 'La', 'El', 'La', 'El', 'El', 'El', 'La', 'La', 'El', 'El', 'La', 'El', 'La', 'La', 'La', 'La', 'La', 'El', 'La', 'El', 'El', 'La', 'La', 'El', 'El', 'El', 'El', 'El', 'El', 'El', 'El', 'La', 'El', 'La', 'El', 'El', 'La', 'El', 'La', 'La', 'La', 'El', 'La', 'La', 'La', 'El', 'El', 'El', 'El', 'El', 'El', 'El', 'El', 'La', 'La', 'El', 'La', 'La', 'El', 'El', 'La', 'El', 'El', 'El', 'La', 'La', 'El', 'El', 'El', 'La', 'La', 'El', 'El', 'El', 'El', 'El', 'El', 'La', 'El', 'El', 'La', 'La', 'La', 'El', 'El', 'El', 'El', 'La', 'La', 'El', 'El', 'El', 'El', 'El', 'La', 'El', 'La', 'La', 'La', 'El', 'El', 'La', 'El', 'La', 'El', 'El', 'La', 'La', 'La', 'La', 'El', 'El', 'El', 'El', 'El', 'El', 'La', 'El', 'El', 'El', 'La', 'El', 'El', 'El', 'La', 'La', 'La', 'La', 'El', 'La', 'La', 'El', 'La', 'La', 'El', 'El', 'La', 'La', 'La', 'La', 'El', 'El', 'El', 'El', 'La', 'La', 'La', 'La', 'El', 'El', 'El', 'El', 'El', 'El', 'La', 'El', 'El', 'La', 'La', 'El', 'La', 'La', 'La', 'El', 'La', 'La', 'El', 'El', 'La', 'El', 'El', 'El', 'La', 'La', 'El', 'La', 'El', 'El', 'La', 'El', 'El', 'La', 'La', 'El', 'La', 'La', 'El', 'La', 'La', 'El', 'La', 'La', 'La', 'La', 'La', 'La', 'La', 'La', 'El', 'El', 'El', 'La', 'La', 'El', 'El', 'La', 'El', 'La', 'La', 'El', 'El', 'El', 'El', 'La', 'El', 'La', 'El', 'El', 'El', 'El', 'El', 'El', 'La', 'El', 'El', 'La', 'El', 'El', 'El', 'El', 'La', 'El', 'El', 'El', 'La', 'El', 'El', 'La', 'El', 'La', 'El', 'El', 'El', 'El', 'La', 'El', 'La', 'El', 'El', 'El', 'El', 'La', 'El', 'El', 'El', 'El', 'El', 'La', 'La', 'La', 'La', 'La', 'El', 'El', 'La', 'La', 'La', 'El', 'El', 'El', 'La', 'La', 'La', 'La', 'El', 'El', 'La', 'El', 'La', 'El', 'La', 'La', 'La', 'El', 'La', 'El', 'El', 'La', 'El', 'La', 'El', 'La', 'La', 'El', 'La', 'La', 'El', 'El', 'La', 'El', 'La', 'El', 'La', 'El', 'El', 'El', 'El', 'La', 'El', 'El', 'La', 'El', 'El', 'El', 'La', 'La', 'El', 'La', 'La', 'El', 'El', 'El', 'La', 'La', 'El', 'El', 'La', 'El', 'El', 'El', 'El', 'La', 'El', 'La', 'La', 'El', 'La', 'El', 'El', 'La', 'La', 'El', 'La', 'La', 'El', 'El', 'El', 'La', 'La', 'La', 'El', 'El', 'La', 'El', 'El', 'La', 'El', 'La', 'La', 'El', 'La', 'La', 'El', 'El', 'La', 'La', 'La', 'El', 'El', 'La', 'El', 'El', 'La', 'El', 'El', 'La', 'El', 'La', 'La', 'El', 'La', 'El', 'La', 'El', 'El', 'El', 'La', 'La', 'El', 'El', 'El', 'La', 'El', 'El', 'La', 'La', 'La', 'El', 'La', 'El', 'El', 'El', 'El', 'El', 'La', 'El', 'La', 'El', 'El', 'El', 'El', 'La', 'El', 'La', 'El', 'El', 'El', 'La', 'El', 'La', 'La', 'El', 'La', 'La', 'El', 'La', 'El', 'La', 'La', 'El', 'El', 'El', 'El', 'El', 'El', 'El', 'La', 'La', 'La', 'El', 'La', 'El', 'La', 'La', 'El', 'El', 'El', 'La', 'La', 'La', 'La', 'El', 'El', 'El', 'El', 'El', 'El', 'El', 'El', 'El', 'La', 'El', 'El', 'La', 'El', 'La', 'La', 'La', 'El', 'El'], 'sujeto': ['acantilado', 'ácaro', 'acertijo', 'adivinanza', 'adorno', 'aeronave', 'afluente', 'aguacate', 'aguja', 'alba', 'alegría', 'alféizar', 'alondra', 'amada', 'amanecer', 'amante', 'amistad', 'amor', 'anciana', 'andén', 'ángel', 'anillo', 'ansiedad', 'aposento', 'árbol', 'arco', 'armadura', 'arpía', 'arquitecto', 'arrebol', 'arroyo', 'artefacto mágico', 'asteroide', 'astronauta', 'atún', 'aurora', 'ausencia', 'avena', 'aventura', 'avión', 'azafrán', 'azúcar', 'baile de disfraces', 'balcón', 'baldosa', 'ballena', 'balrog', 'balsa', 'banco', 'bandido', 'bar', 'barca', 'barco pirata', 'belfo', 'beso', 'besugo', 'biblioteca', 'bicicleta', 'bigote', 'bikini', 'billar', 'bisonte', 'bizcocho borracho', 'boca', 'bocadillo', 'bogavante', 'bohemia', 'bolo', 'bombero', 'bosque', 'bota', 'botella', 'botón', 'braga', 'brisa', 'bronceador', 'bruja', 'brújula', 'buitre', 'burdégano', 'caballero', 'caballito de mar', 'caballo', 'cabaña', 'cadena', 'café', 'caldero', 'camarote', 'camino', 'campo de batalla', 'campo de torneos', 'cancerbero', 'canoa', 'capitán', 'carta', 'casa solariega', 'cascada', 'castillo', 'catacumba', 'catarro', 'cementerio', 'centauro', 'cerradura', 'chimenea', 'chocolate', 'cicatriz', 'cíclope', 'cielo', 'ciénaga', 'cisne', 'ciudad', 'claridad', 'cobertizo', 'cocina', 'cocinera', 'cofre', 'colchón', 'colibrí', 'colina', 'colonia', 'cometa', 'comida', 'compasión', 'concha', 'concierto', 'constelación', 'copo de nieve', 'cordón', 'corona', 'corpúsculo', 'creatividad', 'crepúsculo', 'crucero', 'cuchara', 'cuchillo', 'cuervo', 'cueva', 'dado', 'dardo', 'dátil', 'delfín', 'demonio', 'depresión', 'desagüe', 'desenlace', 'desertor', 'desfiladero', 'desierto', 'destino', 'devaneo', 'día', 'dibujo', 'dinastía', 'diodo', 'dios', 'dique', 'dodo', 'dolor', 'dragón', 'dragón de komodo', 'dríada', 'droga', 'duende', 'duna', 'eclipse', 'edredón', 'ejército', 'elfo', 'elocuencia', 'enano', 'enemigo', 'epifanía', 'época', 'equidna', 'equilibrista', 'equitación', 'erizo de mar', 'escalera', 'escarabajo', 'escarcha', 'escasez', 'escoba', 'escorpión', 'escotilla', 'escritor', 'escudero', 'escudo', 'esfinge', 'esgrima', 'espacio', 'espacio exterior', 'espada', 'espaguetis', 'espejo', 'esperanza', 'esponja', 'esposa', 'establo', 'estación', 'estadio', 'estanque', 'estatua', 'estrella', 'estropajo', 'estuario', 'faisán', 'familia', 'farmacia', 'farol', 'felpudo', 'fénix', 'feudo', 'fiambre', 'fiebre', 'fiera', 'fiesta', 'fino ropaje', 'fiordo', 'flamenco', 'flauta', 'flirteo', 'flota', 'fluctuación', 'foca', 'foso', 'frambuesa', 'francotirador', 'fraternidad', 'fresa', 'fresco', 'frío', 'frontera', 'fruta', 'fruto seco', 'fuego', 'fuente', 'futuro', 'gabardina', 'galápago', 'galaxia', 'gallo', 'gasolina', 'gato', 'gaviota', 'geografía', 'gigante', 'ginebra', 'giro postal', 'globo', 'glotón', 'golondrina', 'gorgona', 'gorila', 'gorrión', 'granada', 'granizo', 'granja', 'grapadora', 'grasa', 'grosella', 'grulla', 'guardia', 'guardia', 'guateque', 'guepardo', 'guindilla', 'gula', 'gusano', 'haba', 'habitante', 'hacha', 'hada', 'hada madrina', 'halcón', 'hambre', 'hamburguesa', 'hechizo', 'hélice', 'helicóptero', 'heraldo', 'herboristería', 'heredero', 'herida', 'hermana', 'hermanastra', 'hermano', 'herramienta', 'hidra', 'hiena', 'hierro forjado', 'hígado', 'higiene', 'hipocampo', 'hipogrifo', 'hipopótamo', 'hobbit', 'hogar', 'hormiga', 'hormigonera', 'horno microondas', 'hortaliza', 'huelga', 'huérfano', 'hueso', 'humedad', 'huracán', 'hurón', 'idilio', 'iglesia', 'iguana', 'imán', 'impermeable', 'impresionismo', 'incandescencia', 'infraestructura', 'insecto', 'instituto', 'incendio', 'interespacio', 'internado', 'interruptor', 'intimidad', 'invernadero', 'invierno', 'inyección', 'iridiscencia', 'isla', 'jabón', 'jaguar', 'jamón', 'jardín', 'jarra', 'jaula', 'jazz', 'jengibre', 'jerbo', 'jilguero', 'jinete', 'joya', 'judo', 'jungla', 'justa', 'justicia', 'kiwi', 'ladrón', 'lagartija', 'lago', 'lanza', 'látigo', 'laurel', 'lava', 'lechuga', 'lechuza', 'lenteja', 'leñador', 'león', 'leopardo', 'leotardo', 'leprechaun', 'lesión', 'libélula', 'libro', 'licor', 'ligue', 'diferencia', 'limón', 'linaje', 'lince', 'litera', 'literatura', 'llave', 'lluvia', 'lobo', 'locomotora', 'lombriz de tierra', 'loro', 'lotería', 'lubina', 'lugar bajo el agua', 'lugar bajo tierra', 'luminiscencia', 'luna', 'luz', 'madrastra', 'magnetófono', 'mago', 'mamut', 'manantial', 'manifestación', 'manta', 'mantícora', 'manzana', 'mapa', 'mar', 'mar', 'maratón', 'marinero', 'marisco', 'marmota', 'mausoleo', 'mazapán', 'mazmorra', 'mazorca', 'meandro', 'medianoche', 'meiga', 'melancolía', 'mendigo', 'mermelada de tomate', 'mina', 'minotauro', 'mirlo', 'molécula', 'molinillo', 'monasterio', 'monstruo', 'montaña', 'montaña rusa', 'monte', 'mosca', 'muérgano', 'mujeriego', 'muñeca', 'murciégalo', 'muro', 'musa', 'música', 'nabo', 'naranja', 'nariz', 'narval', 'nata', 'natación', 'naufragio', 'nave', 'náyade', 'nécora', 'nectarina', 'nevera', 'nieve', 'ninfa', 'niñera', 'niño', 'níspero', 'noche', 'noria', 'nostalgia', 'novelista', 'noviazgo', 'nube', 'nudillo', 'nutria', 'nutrición', 'nylon', 'ñandú', 'ñu', 'oasis', 'obertura', 'obrero', 'oca', 'océano', 'odio', 'oficial', 'ogro', 'ojo', 'oleaje', 'olla de presión', 'olvido', 'ombligo', 'ondina', 'orate', 'orco', 'ordinario', 'orégano', 'oreja', 'orfanato', 'ornitorrinco', 'oro', 'orquesta', 'ósculo', 'oso hormiguero', 'ostra', 'otoño', 'oveja', 'pabellón', 'pájaro', 'palacio', 'pantano', 'pantera', 'parchís', 'pasión', 'pastel', 'patinaje', 'payaso', 'pegaso', 'peluca', 'perfume', 'perro', 'pescador', 'petirrojo', 'pez', 'pezuña', 'piedra', 'pintura', 'piña', 'pipa', 'pirata', 'pistacho', 'pistola', 'pitonisa', 'pizarra', 'planeta', 'plano', 'plástico', 'plata', 'playa', 'pluma', 'poción', 'político', 'polizón', 'posada', 'pozo', 'pradera', 'precipicio', 'prenda de amor', 'primavera', 'princesa', 'príncipe', 'promesa', 'pueblo', 'puente', 'puerta', 'puerto', 'pulga', 'quebrantahuesos', 'quimera', 'química', 'quiosco', 'radio', 'rana', 'rascacielos', 'rastrillo', 'rata', 'ratón', 'raya', 'realismo', 'receta', 'recogedor', 'rectángulo', 'recuerdo', 'refresco', 'regadera', 'regalo', 'regreso', 'reina', 'reino', 'relámpago', 'relieve', 'religión', 'reliquia', 'remo', 'remolacha', 'rémora', 'rencor', 'reno', 'reportaje', 'reproducción', 'resiliencia', 'retraso', 'retrato', 'reunión', 'rey', 'rinoceronte', 'río', 'rocío', 'rodilla', 'romanticismo', 'ropa', 'ruina', 'ruiseñor', 'sábana', 'sacaclavos', 'sacerdote', 'sacerdotisa', 'sal', 'salchichón', 'salida', 'salmuera', 'salón de baile', 'salón del trono', 'saltamontes', 'salud', 'sangre', 'sanguijuela', 'santuario', 'sapo', 'sartén', 'satélite', 'semáforo', 'sensualidad', 'sentimiento', 'sequía', 'serendipia', 'sereno', 'serpiente', 'serpiente marina', 'servilletero', 'sexo', 'sílfide', 'sinfonía', 'sirena', 'sistema solar', 'sol', 'soledad', 'sombrero', 'sonámbulo', 'suciedad', 'sueño', 'sujetador', 'taberna', 'tambor', 'tarántula', 'tarta de queso', 'taxi', 'tempestad', 'templo', 'tentación', 'tentempié', 'terciopelo', 'tesoro', 'tierra', 'tierra extranjera', 'tifón', 'timón', 'tiovivo', 'toalla', 'tobillo', 'tobogán', 'torre', 'tortilla', 'tortuga', 'trabajo duro', 'trampa', 'transatlántico', 'transeúnte', 'tranvía', 'trasgo', 'tren', 'trenza', 'trigo', 'tripulación', 'tritón', 'troll', 'trueno', 'tucán', 'tuerca', 'tulipán', 'tumba', 'ultramarinos', 'unicornio', 'uniforme', 'universidad', 'universo', 'uña', 'urraca', 'utensilio', 'uva', 'vaca', 'vagabundo', 'vagina', 'vagón', 'vainilla', 'vajilla', 'valle', 'vampiro', 'varano', 'vaso', 'velero', 'venado', 'vendaje', 'ventana', 'verdad', 'verdulería', 'vestuario', 'vía', 'viajero', 'víbora', 'vida', 'vidrio', 'viejo', 'viento', 'vinagrera', 'virtud', 'visita', 'vitalidad', 'vituperio', 'vodka', 'volcán', 'vuelo', 'whisky', 'wombat', 'wyvern', 'xilófono', 'yate', 'yegua', 'yogur', 'yunque', 'zanahoria', 'zapato', 'zarzamora', 'zarzuela', 'cebra', 'zorro', 'zueco'], 'adjetivo masculino': ['absurdo', 'ácido', 'admirable', 'adolescente', 'afectuoso', 'afortunado', 'alegre', 'altivo', 'amable', 'amargo', 'ambiguo', 'amistoso', 'andrajoso', 'angelical', 'anómalo', 'anónimo', 'ansioso', 'antiguo', 'apasionado', 'apático', 'argénteo', 'árido', 'arrejuntado', 'artesanal', 'áspero', 'astuto', 'atento', 'atómico', 'atractivo', 'atrevido', 'atroz', 'audaz', 'áurico', 'ausente', 'automático', 'bajo', 'bancario', 'barato', 'bárbaro', 'básico', 'basto', 'beato', 'belga', 'bélico', 'beligerante', 'bello', 'bíblico', 'bilingüe', 'biológico', 'blanco', 'blando', 'bonito', 'boreal', 'borracho', 'boscoso', 'breve', 'brillante', 'brusco', 'brutal', 'bueno', 'burgués', 'burlón', 'cálido', 'callejero', 'caprichoso', 'cariñoso', 'cascarrabias', 'casposo', 'cauto', 'célebre', 'celoso', 'cercano', 'cerúleo', 'ciego', 'cínico', 'clasista', 'cobarde', 'coherente', 'colosal', 'cómodo', 'compacto', 'compasivo', 'complejo', 'complicado', 'comprensivo', 'común', 'contradictorio', 'convencional', 'convincente', 'cordial', 'corpulento', 'cortante', 'cortesano', 'cósmico', 'creativo', 'criminal', 'crítico', 'crónico', 'cruel', 'cuántico', 'cuidadoso', 'culpable', 'curativo', 'curioso', 'curvo', 'débil', 'decidido', 'delgado', 'delicado', 'delicioso', 'delincuente', 'dependiente', 'deprimido', 'desagradable', 'desaliñado', 'desapasionado', 'desarmado', 'descomunal', 'desconfiado', 'descuidado', 'deseado', 'desfavorecido', 'deshonrado', 'desierto', 'despierto', 'dichoso', 'diferente', 'difícil', 'diminuto', 'dinámico', 'directo', 'discreto', 'disfrazado', 'disperso', 'distante', 'divertido', 'divino', 'dócil', 'doloroso', 'doméstico', 'dorado', 'dracónico', 'dramático', 'druídico', 'dulce', 'duro', 'ecológico', 'efímero', 'egoísta', 'electrónico', 'elegante', 'élfico', 'emocional', 'encantador', 'enérgico', 'enfadado', 'enfermo', 'engreído', 'enjuto', 'enterrado', 'entrometido', 'equilibrado', 'erótico', 'erróneo', 'esbelto', 'escandaloso', 'escéptico', 'espacial', 'espeso', 'espiritual', 'espontáneo', 'estéril', 'estimulante', 'estoico', 'estricto', 'eterno', 'ético', 'exagerado', 'excéntrico', 'excesivo', 'exclusivo', 'exigente', 'exitoso', 'exótico', 'explosivo', 'expresivo', 'exquisito', 'extraordinario', 'extrovertido', 'fácil', 'falto', 'familiar', 'famoso', 'fanático', 'fantástico', 'fascinante', 'fatal', 'fatuo', 'favorito', 'feliz', 'femenino', 'feo', 'fértil', 'fiable', 'ficticio', 'fiel', 'fijo', 'final', 'fino', 'firme', 'flaco', 'flexible', 'flojo', 'floral', 'fluvial', 'formal', 'frágil', 'franco', 'frecuente', 'fresco', 'frío', 'fuerte', 'fugaz', 'fúnebre', 'funesto', 'furioso', 'fútil', 'general', 'genérico', 'generoso', 'genético', 'genial', 'geográfico', 'geológico', 'geométrico', 'gigante', 'gitano', 'glacial', 'global', 'glorioso', 'gordo', 'gótico', 'gracioso', 'gráfico', 'grande', 'grandilocuente', 'grandioso', 'grato', 'gratuito', 'grave', 'griego', 'gris', 'grosero', 'grotesco', 'grueso', 'gruñón', 'guapo', 'hábil', 'habitual', 'hablador', 'hambriento', 'harto', 'henchido', 'herbáceo', 'heredado', 'herido', 'hermoso', 'heroico', 'heterogéneo', 'hidráulico', 'hipócrita', 'hipotético', 'histérico', 'histórico', 'holgazán', 'homogéneo', 'homosexual', 'hondo', 'horizontal', 'horrible', 'hostil', 'humanitario', 'humano', 'húmedo', 'humilde', 'huraño', 'imprudente', 'incandescente', 'incognoscible', 'inconmensurable', 'inconsciente', 'joven', 'judío', 'juguetón', 'juramentado', 'jurídico', 'justo', 'juvenil', 'kinestésico', 'laboral', 'lamentable', 'largo', 'latente', 'lateral', 'legal', 'legítimo', 'lejano', 'lento', 'lésbico', 'leve', 'levítico', 'liberal', 'libre', 'lícito', 'ligero', 'limpio', 'lindo', 'lingüístico', 'líquido', 'listo', 'litúrgico', 'llamativo', 'lleno', 'llorón', 'lluvioso', 'local', 'loco', 'lógico', 'lúcido', 'lujoso', 'luminiscente', 'luminoso', 'lunático', 'maduro', 'mágico', 'magnífico', 'maldito', 'maleducado', 'malhumorado', 'malicioso', 'maltratado', 'maravilloso', 'marciano', 'marginal', 'marino', 'masculino', 'material', 'maternal', 'medieval', 'melancólico', 'mensurable', 'menudo', 'meticuloso', 'mezquino', 'miedoso', 'minúsculo', 'miserable', 'misterioso', 'mítico', 'moderado', 'moderno', 'modesto', 'molesto', 'monumental', 'mordaz', 'mortal', 'móvil', 'mudo', 'musical', 'mutuo', 'naciente', 'nacional', 'nacionalista', 'narcisista', 'narrativo', 'natural', 'nazi', 'negativo', 'negro', 'nervioso', 'neutro', 'noble', 'nocivo', 'nocturno', 'nónuplo', 'normal', 'normativo', 'notable', 'notarial', 'notorio', 'novel', 'novelero', 'nuclear', 'nuevo', 'nulo', 'numérico', 'numeroso', 'nutritivo', 'objetivo', 'obligatorio', 'observable', 'obvio', 'occidental', 'oceánico', 'octavo', 'óctuplo', 'ocultación', 'oculto', 'odioso', 'ofensivo', 'oficial', 'ontológico', 'opaco', 'operativo', 'oportuno', 'óptico', 'oral', 'orbitado', 'ordinario', 'orgánico', 'organizativo', 'orgulloso', 'oriental', 'original', 'originario', 'ortográfico', 'oscuro', 'pálido', 'parturiento', 'pasional', 'pasivo', 'pasteloso', 'patético', 'pedregoso', 'peligroso', 'penetrante', 'penoso', 'pequeño', 'perenne', 'perezoso', 'perfecto', 'perpetuo', 'perseverante', 'perverso', 'pícaro', 'pintoresco', 'placentero', 'pobre', 'poderoso', 'poético', 'polémico', 'positivo', 'precoz', 'preponderante', 'prestigioso', 'pretencioso', 'previsible', 'prodigioso', 'profético', 'profundo', 'progresista', 'provocador', 'prudente', 'puntual', 'quieto', 'químico', 'quinto', 'quirúrgico', 'quisquilloso', 'racional', 'racista', 'radiante', 'radical', 'rápido', 'raro', 'razonable', 'reacio', 'realista', 'rebelde', 'receloso', 'reciente', 'recto', 'referente', 'relativo', 'reluciente', 'renovador', 'repentino', 'reservado', 'resistente', 'respetable', 'responsable', 'revolucionario', 'rico', 'ridículo', 'rígido', 'riguroso', 'rimbombante', 'robado', 'rocoso', 'románico', 'romano', 'romántico', 'roto', 'rotundo', 'rubio', 'ruidoso', 'rutinario', 'sabio', 'sagaz', 'sagrado', 'salado', 'salvaje', 'sangriento', 'sano', 'santificado', 'secreto', 'seguro', 'selenita', 'sencillo', 'sensato', 'sensible', 'sensorial', 'sentimental', 'sereno', 'serio', 'servicial', 'severo', 'sexual', 'silencioso', 'similar', 'simpático', 'simulado', 'sincero', 'siniestro', 'sintético', 'sobrenatural', 'sofista', 'sofisticado', 'soleado', 'solemne', 'solidario', 'solitario', 'sombrío', 'sonriente', 'sospechoso', 'suave', 'sucio', 'suculento', 'sudoroso', 'sueño', 'susceptible', 'sutil', 'tacaño', 'taciturno', 'tajante', 'talentoso', 'tardío', 'temeroso', 'temible', 'temporal', 'tenaz', 'tenso', 'teórico', 'terapéutico', 'térmico', 'terrestre', 'terrible', 'territorial', 'terrorista', 'tibio', 'tierno', 'tieso', 'tímido', 'típico', 'tonto', 'torpe', 'tóxico', 'trabajador', 'tradicional', 'trágico', 'traicionado', 'tranquilo', 'transitorio', 'transparente', 'travieso', 'tripulado', 'triste', 'trivial', 'turbio', 'ulterior', 'último', 'unánime', 'único', 'uniforme', 'unitario', 'universal', 'universitario', 'urbano', 'urgente', 'usual', 'útil', 'utilitario', 'utilizable', 'vacío', 'vagamundo', 'vago', 'valeroso', 'válido', 'valiente', 'valioso', 'vano', 'variable', 'variado', 'vasto', 'vegetal', 'vegetativo', 'veloz', 'envenenado', 'verbal', 'verde', 'verosímil', 'vertical', 'vespertino', 'veterano', 'viable', 'victorioso', 'viejo', 'vigente', 'violento', 'virgen', 'visible', 'vital', 'vitoreado', 'vivaz', 'viviente', 'voluntario', 'vulgar', 'yodado', 'zafio', 'zafíreo', 'zarrapastroso', 'zopenco', 'enquistado', 'conquistado', 'atormentado', 'radiactivo', 'machista', 'fulminante', 'plurilingüe', 'equivalente', 'equidistante', 'paralelo', 'ignorante', 'destrozado', 'acuartelado', 'evolucionado', 'añejo', 'dañado', 'anglicano', 'norteño', 'sureño', 'sustentado', 'español', 'calzado', 'embustero', 'amarillo', 'azul', 'rojo', 'rosa', 'arrinconado', 'oloroso', 'omnipresente', 'omnisciente', 'todopoderoso', 'acomplejado', 'castellanizado', 'debilitado', 'diferenciado', 'sepulcral', 'terraplanista', 'homeostático', 'onomatopéyico', 'gritón', 'sustancioso', 'lácteo', 'cósmico', 'bíblico', 'apestoso', 'despojado', 'rubicundo', 'encuestado', 'tórrido', 'mentiroso', 'estúpido', 'escrupuloso', 'contundente', 'cobrizo', 'escandaloso', 'lozano', 'pechugón', 'níveo', 'blanco', 'esculpido', 'negro', 'racista', 'robótico', 'inteligente', 'artificial', 'artificioso', 'adecuado', 'cómico', 'tramado', 'tramposo', 'lúcido'], 'adjetivo femenino': ['absurda', 'ácida', 'admirable', 'adolescente', 'afectuosa', 'afortunada', 'alegre', 'altiva', 'amable', 'amarga', 'ambigua', 'amistosa', 'andrajosa', 'angelical', 'anómala', 'anónima', 'ansiosa', 'antigua', 'apasionada', 'apática', 'argéntea', 'árida', 'arrejuntada', 'artesanal', 'áspera', 'astuta', 'atenta', 'atómica', 'atractiva', 'atrevida', 'atroz', 'audaz', 'áurica', 'ausente', 'automática', 'baja', 'bancaria', 'barata', 'bárbara', 'básica', 'basta', 'beata', 'belga', 'bélica', 'beligerante', 'bella', 'bíblica', 'bilingüe', 'biológica', 'blanca', 'blanda', 'bonita', 'boreal', 'borracha', 'boscosa', 'breve', 'brillante', 'brusca', 'brutal', 'buena', 'burguesa', 'burlona', 'cálida', 'callejera', 'caprichosa', 'cariñosa', 'cascarrabias', 'casposa', 'cauta', 'célebre', 'celosa', 'cercana', 'cerúlea', 'ciega', 'cínica', 'clasista', 'cobarde', 'coherente', 'colosal', 'cómoda', 'compacta', 'compasiva', 'compleja', 'complicada', 'comprensiva', 'común', 'contradictoria', 'convencional', 'convincente', 'cordial', 'corpulenta', 'cortante', 'cortesana', 'cósmica', 'creativa', 'criminal', 'crítica', 'crónica', 'cruel', 'cuántica', 'cuidadosa', 'culpable', 'curativa', 'curiosa', 'curva', 'débil', 'decidida', 'delgada', 'delicada', 'deliciosa', 'delincuente', 'dependiente', 'deprimida', 'desagradable', 'desaliñada', 'desapasionada', 'desarmada', 'descomunal', 'desconfiada', 'descuidada', 'deseada', 'desfavorecida', 'deshonrada', 'desierta', 'despierta', 'dichosa', 'diferente', 'difícil', 'diminuta', 'dinámica', 'directa', 'discreta', 'disfrazada', 'dispersa', 'distante', 'divertida', 'divina', 'dócil', 'dolorosa', 'doméstica', 'dorada', 'dracónica', 'dramática', 'druídica', 'dulce', 'dura', 'ecológica', 'efímera', 'egoísta', 'electrónica', 'elegante', 'élfica', 'emocional', 'encantadora', 'enérgica', 'enfadada', 'enferma', 'engreída', 'enjuta', 'enterrada', 'entrometida', 'equilibrada', 'erótica', 'errónea', 'esbelta', 'escandalosa', 'escéptica', 'espacial', 'espesa', 'espiritual', 'espontánea', 'estéril', 'estimulante', 'estoica', 'estricta', 'eterna', 'ética', 'exagerada', 'excéntrica', 'excesiva', 'exclusiva', 'exigente', 'exitosa', 'exótica', 'explosiva', 'expresiva', 'exquisita', 'extraordinaria', 'extrovertida', 'fácil', 'falta', 'familiar', 'famosa', 'fanática', 'fantástica', 'fascinante', 'fatal', 'fatua', 'favorita', 'feliz', 'femenina', 'fea', 'fértil', 'fiable', 'ficticia', 'fiel', 'fija', 'final', 'fina', 'firme', 'flaca', 'flexible', 'floja', 'floral', 'fluvial', 'formal', 'frágil', 'franca', 'frecuente', 'fresca', 'fría', 'fuerte', 'fugaz', 'fúnebre', 'funesta', 'furiosa', 'fútil', 'general', 'genérica', 'generosa', 'genética', 'genial', 'geográfica', 'geológica', 'geométrica', 'gigante', 'gitana', 'glacial', 'global', 'gloriosa', 'gorda', 'gótica', 'graciosa', 'gráfica', 'grande', 'grandilocuente', 'grandiosa', 'grata', 'gratuita', 'grave', 'griega', 'gris', 'grosera', 'grotesca', 'gruesa', 'gruñona', 'guapa', 'hábil', 'habitual', 'habladora', 'hambrienta', 'harta', 'henchida', 'herbácea', 'heredada', 'herida', 'hermosa', 'heroica', 'heterogénea', 'hidráulica', 'hipócrita', 'hipotética', 'histérica', 'histórica', 'holgazana', 'homogénea', 'homosexual', 'honda', 'horizontal', 'horrible', 'hostil', 'humanitaria', 'humana', 'húmeda', 'humilde', 'huraña', 'imprudente', 'incandescente', 'incognoscible', 'inconmensurable', 'inconsciente', 'joven', 'judía', 'juguetona', 'juramentada', 'jurídica', 'justa', 'juvenil', 'kinestésica', 'laboral', 'lamentable', 'larga', 'latente', 'lateral', 'legal', 'legítima', 'lejana', 'lenta', 'lésbica', 'leve', 'levítica', 'liberal', 'libre', 'lícita', 'ligera', 'limpia', 'linda', 'lingüística', 'líquida', 'lista', 'litúrgica', 'llamativa', 'llena', 'llorona', 'lluviosa', 'local', 'loca', 'lógica', 'lúcida', 'lujosa', 'luminiscente', 'luminosa', 'lunática', 'madura', 'mágica', 'magnífica', 'maldita', 'maleducada', 'malhumorada', 'maliciosa', 'maltratada', 'maravillosa', 'marciana', 'marginal', 'marina', 'masculina', 'material', 'maternal', 'medieval', 'melancólica', 'mensurable', 'menuda', 'meticulosa', 'mezquina', 'miedosa', 'minúscula', 'miserable', 'misteriosa', 'mítica', 'moderada', 'moderna', 'modesta', 'molesta', 'monumental', 'mordaz', 'mortal', 'móvil', 'muda', 'musical', 'mutua', 'naciente', 'nacional', 'nacionalista', 'narcisista', 'narrativa', 'natural', 'nazi', 'negativa', 'negra', 'nerviosa', 'neutra', 'noble', 'nociva', 'nocturna', 'nónupla', 'normal', 'normativa', 'notable', 'notarial', 'notoria', 'novel', 'novelera', 'nuclear', 'nueva', 'nula', 'numérica', 'numerosa', 'nutritiva', 'objetiva', 'obligatoria', 'observable', 'obvia', 'occidental', 'oceánica', 'octava', 'óctupla', 'ocultación', 'oculta', 'odiosa', 'ofensiva', 'oficial', 'ontológica', 'opaca', 'operativa', 'oportuna', 'óptica', 'oral', 'orbitada', 'ordinaria', 'orgánica', 'organizativa', 'orgullosa', 'oriental', 'original', 'originaria', 'ortográfica', 'oscura', 'pálida', 'parturienta', 'pasional', 'pasiva', 'pastelosa', 'patética', 'pedregosa', 'peligrosa', 'penetrante', 'penosa', 'pequeña', 'perenne', 'perezosa', 'perfecta', 'perpetua', 'perseverante', 'perversa', 'pícara', 'pintoresca', 'placentera', 'pobre', 'poderosa', 'poética', 'polémica', 'positiva', 'precoz', 'preponderante', 'prestigiosa', 'pretenciosa', 'previsible', 'prodigiosa', 'profética', 'profunda', 'progresista', 'provocadora', 'prudente', 'puntual', 'quieta', 'química', 'quinta', 'quirúrgica', 'quisquillosa', 'racional', 'racista', 'radiante', 'radical', 'rápida', 'rara', 'razonable', 'reacia', 'realista', 'rebelde', 'recelosa', 'reciente', 'recta', 'referente', 'relativa', 'reluciente', 'renovadora', 'repentina', 'reservada', 'resistente', 'respetable', 'responsable', 'revolucionaria', 'rica', 'ridícula', 'rígida', 'rigurosa', 'rimbombante', 'robada', 'rocosa', 'románica', 'romana', 'romántica', 'rota', 'rotunda', 'rubia', 'ruidosa', 'rutinaria', 'sabia', 'sagaz', 'sagrada', 'salada', 'salvaje', 'sangrienta', 'sana', 'santificada', 'secreta', 'segura', 'selenita', 'sencilla', 'sensata', 'sensible', 'sensorial', 'sentimental', 'serena', 'seria', 'servicial', 'severa', 'sexual', 'silenciosa', 'similar', 'simpática', 'simulada', 'sincera', 'siniestra', 'sintética', 'sobrenatural', 'sofista', 'sofisticada', 'soleada', 'solemne', 'solidaria', 'solitaria', 'sombría', 'sonriente', 'sospechosa', 'suave', 'sucia', 'suculenta', 'sudorosa', 'sueña', 'susceptible', 'sutil', 'tacaña', 'taciturna', 'tajante', 'talentosa', 'tardía', 'temerosa', 'temible', 'temporal', 'tenaz', 'tensa', 'teórica', 'terapéutica', 'térmica', 'terrestre', 'terrible', 'territorial', 'terrorista', 'tibia', 'tierna', 'tiesa', 'tímida', 'típica', 'tonta', 'torpe', 'tóxica', 'trabajador', 'tradicional', 'trágica', 'traicionada', 'tranquila', 'transitoria', 'transparente', 'traviesa', 'tripulada', 'triste', 'trivial', 'turbia', 'ulterior', 'última', 'unánime', 'única', 'uniforme', 'unitaria', 'universal', 'universitaria', 'urbana', 'urgente', 'usual', 'útil', 'utilitaria', 'utilizable', 'vacía', 'vagamunda', 'vaga', 'valerosa', 'válida', 'valiente', 'valiosa', 'vana', 'variable', 'variada', 'vasta', 'vegetal', 'vegetativa', 'veloz', 'envenenada', 'verbal', 'verde', 'verosímil', 'vertical', 'vespertina', 'veterana', 'viable', 'victoriosa', 'vieja', 'vigente', 'violenta', 'virgen', 'visible', 'vital', 'vitoreada', 'vivaz', 'viviente', 'voluntaria', 'vulgar', 'yodada', 'zafia', 'zafírea', 'zarrapastrosa', 'zopenca', 'enquistada', 'conquistada', 'atormentada', 'radiactiva', 'machista', 'fulminante', 'plurilingüe', 'equivalente', 'equidistante', 'paralela', 'ignorante', 'destrozada', 'acuartelada', 'evolucionada', 'añeja', 'dañada', 'anglicana', 'norteña', 'sureña', 'sustentada', 'española', 'calzada', 'embustera', 'amarilla', 'azul', 'roja', 'rosa', 'arrinconada', 'olorosa', 'omnipresente', 'omnisciente', 'todopoderosa', 'acomplejada', 'castellanizada', 'debilitado', 'diferenciada', 'sepulcral', 'terraplanista', 'homeostática', 'onomatopéyica', 'gritona', 'sustanciosa', 'láctea', 'cósmica', 'bíblica', 'apestosa', 'despojada', 'rubicunda', 'encuestada', 'tórrida', 'mentirosa', 'estúpida', 'escrupulosa', 'contundente', 'cobriza', 'escandalosa', 'lozana', 'pechugona', 'nívea', 'blanca', 'esculpida', 'negra', 'racista', 'robótica', 'inteligente', 'artificial', 'artificiosa', 'adecuada', 'cómica', 'tramada', 'tramposa', 'lúcida'], 'acciones': ['abofetea a alguien', 'aborrece algo', 'aborta', 'abrocha algo', 'acaba inquieto', 'acaricia a algo/alguien', 'acosa a alguien', 'adelgaza', 'adivina', 'adopta', 'afeita', 'agria', 'agujerea una superficie', 'ahoga a alguien', 'ahorra', 'aísla', 'ajusta', 'alinea', 'alumbra', 'ama', 'amarra', 'amenaza a alguien', 'amputa un miembro', 'amuebla un hogar', 'aniquila un enemigo', 'anticipa un evento', 'anuncia un evento', 'apesta', 'araña', 'arde', 'asedia', 'asesina a un amigo', 'asfixia a un enemigo', 'aterriza forzosamente', 'atormenta', 'atraviesa', 'aturde a alguien', 'auxilia a alguien', 'averigua una mentira', 'ayuna', 'babea', 'baila', 'balancea un objeto', 'balbucea con vergüenza', 'barajea', 'barre', 'batalla en una guerra', 'batea', 'bautiza algo', 'bebe', 'besa a alguien', 'blande un arma', 'blanquea algo', 'blanquea dinero', 'bloquea algo', 'boicotea una estrategia', 'bombardea un territorio', 'borda un tapiz', 'borra algo', 'brilla', 'brinca', 'brinda', 'bromea', 'brota', 'bucea', 'bulle', 'burla', 'busca', 'cabalga', 'cae', 'cambia', 'camufla', 'canta', 'captura', 'castra', 'celebra', 'cepilla', 'cercena', 'chilla', 'cobra vida', 'codicia', 'cojea', 'combate', 'come', 'compite', 'complica algo', 'concibe algo', 'condena a alguien', 'confronta', 'conquista', 'consagra', 'conserva', 'consigna', 'conspira', 'construye', 'contagia', 'copula con el enemigo', 'coquetea', 'corona', 'corre', 'corta', 'corteja a alguien', 'cosecha', 'cultiva', 'cumple una promesa', 'curte', 'custodia', 'danza', 'daña', 'deambula', 'debilita', 'decapita', 'declara', 'deforma', 'defrauda', 'deja pasar el tiempo', 'delata', 'demora', 'denuncia', 'derruye', 'desabrocha', 'desafía', 'desaparece', 'desayuna', 'descansa', 'descubre algo', 'desea', 'desembarca', 'desencanta a alguien', 'desentona', 'deshonra', 'desilusiona', 'desnuda a alguien', 'desobedece', 'desviste', 'devasta', 'dibuja', 'discute', 'disfruta', 'dispara', 'distorsiona', 'divorcia', 'duda', 'duerme', 'eclipsa', 'edifica', 'elige un blasón', 'elimina', 'emborracha', 'emigra', 'empalma', 'empeora', 'enamora', 'encadena', 'encanta', 'enciende', 'encuentra', 'endulza', 'enferma', 'engaña', 'engrasa', 'ensambla', 'entierra', 'entrevista', 'envejece', 'envenena', 'erradica', 'eructa', 'es derrotado', 'es tentado', 'es timado', 'es vapuleado', 'escoge', 'escupe', 'esmalta', 'esposa', 'está penando', 'estornuda', 'estrangula', 'estropea', 'excita', 'experimenta', 'extermina', 'extorsiona', 'extraña', 'fabrica', 'facilita', 'falla', 'falsea', 'fantasea', 'favorece a alguien', 'fermenta', 'festeja', 'fía', 'filma', 'filtra', 'finaliza', 'financia', 'fisgonea', 'flagela', 'flaquea', 'flirtea', 'florece', 'flota', 'fluctúa', 'forcejea', 'forja', 'forma', 'fracasa', 'fracciona', 'fractura', 'fragmenta', 'frecuenta', 'fríe', 'friega', 'fuerza', 'funciona', 'galantea', 'galopa', 'gana', 'garabatea', 'garantiza', 'gasta', 'genera', 'germina', 'gesticula', 'gime', 'gimotea', 'gira', 'glasea', 'glorifica', 'glosa', 'gobierna', 'golpea', 'gorjea', 'gorrea', 'gorronear', 'gotea', 'goza', 'graba', 'grada', 'gradúa', 'granula', 'grapa', 'gravita', 'grita', 'gruñe', 'guarda', 'guía', 'habilita', 'habita', 'habla', 'hace', 'hace amigos', 'hace enemigos', 'hace vibrar algo', 'hacina', 'halla una herramienta', 'halla una pista', 'hereda', 'hermana', 'hiberna', 'hidrata', 'hiela', 'hiere', 'hierra', 'hierve', 'hila', 'hilvana', 'hipa', 'hojear', 'honra', 'hornea', 'hospeda', 'huele', 'huelga', 'humea', 'humedece', 'humilla', 'hunde', 'huye', 'idolatra', 'ignora', 'ilumina', 'imagina', 'imitar', 'impide', 'impone', 'impregna', 'improvisa', 'impulsa una iniciativa', 'incapacita a alguien', 'incinera', 'incomoda', 'infiere algo', 'influye', 'infringe las normas', 'injuria a alguien', 'inocula un veneno', 'inspira', 'instaura algo novedoso', 'instruye al enemigo', 'insulta a alguien', 'intercambia información', 'interpreta', 'interroga a alguien', 'intimida a alguien', 'invade algo', 'investiga', 'invita', 'invoca algo/a alguien', 'jadea', 'jala', 'juega', 'junta algunas piezas', 'jura', 'juzga acertadamente', 'juzga erróneamente', 'lacera', 'lacra', 'ladra', 'lame una superficie', 'lanza algo', 'lastra', 'late', 'le afecta un cambio mágico', 'le gusta algo', 'legitima', 'levanta', 'libera algo', 'lidera un evento', 'lidia con algo inesperado', 'limita', 'limpia', 'lincha', 'lisia a alguien', 'lisonjea inapropiadamente', 'llama a alguien', 'llamea', 'llega', 'llena algo', 'lleva algo a algún sitio', 'llora', 'llueve', 'logra', 'luce algo', 'lucha', 'lustra algo', 'madura', 'malgasta', 'maltrata', 'manda', 'manipula', 'masculla', 'medita', 'medra', 'mendiga', 'merodea', 'mezcla', 'mide', 'miente', 'mima', 'mina', 'mira', 'moderniza', 'modifica', 'moja', 'muele', 'muerde algo/a alguien', 'muere', 'nace', 'nada', 'narra', 'naufraga', 'navega', 'necesita algo/a alguien', 'negocia', 'niega algo', 'nieva', 'nivela', 'nombra', 'nomina', 'nota', 'notifica', 'nubla', 'numera', 'nutre', 'obedece', 'obsequia', 'obtiene', 'obvia', 'ocasiona', 'oculta', 'ocupa', 'odia', 'ofende', 'oficia', 'ofrece', 'olvida', 'omite', 'ondea algo en alto', 'opera lejos', 'opina', 'oprime a alguien', 'opta por una opción', 'ordena', 'organiza', 'orienta', 'origina un conflicto', 'orilla una embarcación', 'ornamenta algo', 'orquesta', 'oscila', 'otorga', 'oxigena', 'oye', 'parodia', 'participa en una justa', 'pasea', 'patea', 'patrulla', 'pega algo/ a alguien', 'peina', 'perdona', 'peregrina', 'perjudica', 'permanece', 'persevera', 'persigue', 'pertenece', 'pierde algo/ a alguien', 'pilota', 'piratea', 'pisotea', 'plancha', 'planifica', 'predestina', 'predice', 'premia', 'priva', 'procrea', 'profana', 'progresa', 'prohíbe', 'promete', 'promueve', 'propulsa', 'protesta', 'provoca', 'puebla', 'quebranta', 'queda', 'queda hospitalizado', 'quiebra', 'quiere a alguien/algo', 'quita a alguien', 'raciona algo', 'rapta a alguien', 'rasura algo', 'razona', 'recauda', 'rechaza', 'recluta a alguien', 'recoge algo', 'recompensa a alguien', 'reconquista a alguien', 'reconstruye algo', 'recuerda algo', 'recupera algo', 'reduce algo', 'regresa', 'renuncia', 'replica algo', 'reprime a alguien', 'repudia a alguien', 'requisa algo', 'rescata', 'rescata a alguien', 'responde', 'resucita', 'resuelve algo', 'retiene ilegalmente a alguien', 'rige un pueblo', 'rima', 'roba', 'rompe un juramento', 'ruega', 'sabotea algo', 'sacrifica algo', 'salpica', 'salva a alguien', 'saquea algo', 'se aburre', 'se ahoga', 'se baña', 'se confunde de identidad', 'se equivoca', 'se fascina con algo', 'se habitúa a algo extraño', 'se habitúa a una nueva vida', 'se hace valer', 'se harta', 'se hiere', 'se infiltra', 'se irrita', 'se jubila', 'se junta con alguien', 'se justifica', 'se lamenta', 'se lastima', 'se le rompe el corazón', 'se libra', 'se magulla', 'se mancha', 'se maravilla', 'se marcha', 'se marchita', 'se marea', 'se mece', 'se molesta', 'se mosquea', 'se motiva', 'se muda', 'se obsesiona', 'se olvida', 'se opone a algo', 'se pierde', 'se posa', 'se queja', 'se quema', 'se recluye', 'se reconcilia', 'se retira', 'se reúne', 'se ríe a carcajadas', 'se rinde', 'se rompe', 'se separa', 'se tambalea', 'se traga algo', 'se tranquiliza', 'se trastorna', 'se turna con alguien', 'se voltea', 'secuestra a alguien', 'seduce a alguien', 'selecciona algo', 'sella un pacto', 'separa algo', 'sepulta algo', 'simplifica algo', 'sitia un lugar', 'soborna a alguien', 'sobrevive', 'socorre a alguien', 'soluciona', 'somete a alguien', 'sonríe', 'soporta algo', 'sorprende a alguien', 'sospecha de algo', 'subestima a otro', 'subestima al enemigo', 'suelda', 'suelta', 'sueña', 'sufre un flechazo inoportuno', 'sugiere una idea', 'sujeta algo', 'supervisa', 'suplanta', 'sustituye', 'sustrae', 'talla', 'tapia algo', 'tararea', 'tartamudea', 'templa un objeto', 'tiembla', 'tiende algo', 'tiñe algo', 'tira algo', 'tira de alguien', 'tolera', 'tontea con alguien', 'tornea un objeto', 'tortura a alguien', 'traduce', 'trafica', 'traiciona', 'trama', 'traspasa algo', 'traslada algo', 'traza', 'trepa', 'trilla algo', 'trincha algo', 'tripula una nave', 'tritura algo', 'tropieza', 'ubica un lugar', 'ubica un objeto', 'ultima algún detalle', 'ultraja', 'ulula', 'une', 'unifica', 'unta algo', 'usa algo', 'usurpa', 'utiliza a alguien', 'va a prisión', 'vadea un lugar', 'vaga por un lugar', 'valida alguna aptitud', 'valora algo', 'vaticina un evento', 've algo insólito', 'veda algo', 'vegeta', 'veja a alguien', 'vence a alguien', 'vende algo/a alguien', 'venera algo', 'venga a alguien querido', 'venga a un desconocido', 'ventila', 'verifica', 'viaja', 'vigila a alguien', 'vilipendia', 'viola', 'visita', 'vitorea', 'vive', 'vocea', 'vota a alguien equivocadamente', 'vuela', 'vuelca', 'vuelve al origen', 'yace', 'zanganea', 'zanja un asunto importante', 'zarandea', 'zigzaguea por un lugar', 'zumba', 'se constipa', 'se apuesta aglo irremplazable', 'confiesa una mezquindad', 'prospera a costa de otro', 'confía en la persona equivocada', 'se come algo tóxico', 'engorda demasiado', 'se camufla entre los habitantes', 'corre hacia un sueño', 'se mete en una caja', 'se despierta en otra época', 'viaja al centro de la tierra', 'se duerme en clase', 'cae sobre una tarta', 'soba un sujetador', 'espolborea veneno sobre alguien', 'canta una canción de cuna', 'apuesta con el enemigo', 'se enamora de su enemigo', 'busca un final feliz', 'comienza a hacerse preguntas', 'se hace derogar', 'se intoxica', 'irradia algo', 'se vuelve radiactivo', 'consigue un material extraño', 'es un embustero', 'mordisquea la comida ajena', 'contextualiza algo', 'aporta un significado al mundo', 'encuentra el significado del universo', 'se encuentra con un ente creador', 'agita unas maracas', 'consigue un don', 'aplana el universo', 'conquista el espacio', 'se enamora de un objeto', 'se desposa con un objeto', 'asesina accidentalmente a alguien', 'secunda una censura', 'se atraganta', 'descuida su aspecto', 'hiere a un amigo', 'hiere a un enemigo', 'cosifica a alguien', 'se siente atraido sexualmente', 'es sexualizado', 'pronuncia un discuros', 'extravía el objeto que lo iba a salvar', 'muere', 'muere de forma estúpida', 'fallece premeditadamente', 'se suicida para evitar a su enemigo', 'estudia', 'convence a un aristócrata', 'se depila', 'depila a alguien', 'escribe un diario', 'roba un objeto', 'se esconde con cobardía', 'se detiene en el camino', 'detiene a alguien', 'es detenido inoportunamente', 'casca nueces', 'rompe un objeto sagrado', 'es excomulgado', 'es cómplice de un asesinato', 'ayuda a su enemigo']})\nretos = {'Retos': ['Yo no tengo muy claro que Ana tenga una ánfora, pero eso da igual, porque lo que sí sé es que tienes que hacer una anáfora', 'Alíviate o no te alivies, altérate o no te alteres, pero haz que tu texto sea aliterado', 'Qué paradójico sería que tu texto no tuviese una paradoja', 'Era como… la descripción que has hecho. Ex-ac-ta-men-te', 'Este reto es un alivio, te permite la elipsis de 1 palabra que te haya salido como obligatoria para tu texto. Elige sabiamente', 'Este reto es un alivio, te permite la elipsis de 2 palabras que te hayan salido como obligatorias para tu texto. Elige sabiamente', 'Este reto es un alivio, te permite la elipsis de 3 palabras que te hayan salido como obligatorias para tu texto. Elige sabiamente', 'Este reto es un alivio, te permite la elipsis de 4 palabras que te hayan salido como obligatorias para tu texto. Elige sabiamente', '¿Quién conoce el futuro? Bueno, pues tendrás que imaginártelo', 'Me da igual que tengas que incluir una lavadora, tu texto debe enmarcarse en la época clásica', 'Me importa poco que tu protagonista sea una impresora 3D, tus protagonistas están en la Edad Media', 'En una época donde existía la magia… tu texto estaría en su contexto correcto', 'Si no te ríes al leerlo, no molas porque no es comedia', 'Seguro que, gracias a tu emotiva oda, el protagonista de tu historia será recordado eternamente', 'Ni Ulises está a la altura de tu epopeya', 'Don Quijote estaría orgulloso de tu aporte al noble arte de las historias de caballería', '¿A quién no le gusta viajar? Nos vamos a visitar otro planeta en este viaje intergaláctico', '¿Has soñado con viajes en el tiempo? Quién no…', '¿Estás preparado? Te vas a embarcar en un camino del héroe', 'Los escritores a veces parece que no saben hacerlo, yo que sé… mira, tú usa frases simples porque no puedes usar yuxtaposiciones ni subordinadas ni coordinadas.', '¡Te has librado! Eres libre de restricciones', 'Perdona, pero no me equivoqué al decir que tenías que escribir una antanaclasis', 'Este aire suena como una sinestesia, ¿no os parece?', 'No es dislexia, es un sinécdoque, ¡que no te enteras!', '¡Te has librado! Eres libre de restricciones', '¡No corras tanto! No puedes escribir más de 50 palabras', '¡No corras tanto! No puedes escribir más de 100 palabras', '¡No corras tanto! No puedes escribir más de 150 palabras', 'Tic-Tac Solo tienes 10 minutos para escribir ¡Rápido!', 'Y dije… que tu texto sea un diálogo', '¿No es verdad, ángel de amor, que en verso se escribe mejor?', 'Tiene que parecer un ensayo, no serlo, porque de esto sé que no tienes ni idea', 'A ver, no te alarmes, pero debes hacer una metáfora con lo que tengas', '¿Cuántas líneas tiene ese papel? Bueno, pues como mucho, puedes llenar 20 líneas', '¿Cuántas líneas tiene ese papel? Bueno, pues como mucho, puedes llenar 30 líneas', '¿Cuántas líneas tiene ese papel? Bueno, pues como mucho, puedes llenar 40 líneas', 'La prosa ha muerto, escríbeme un poema', 'Esta es difícil. Tu protagonista es ahora el antagonista… debe ser una tragedia, porque triunfa frente al bien', 'Esto es como cuando tienes que hacer un símil…', 'Tu protagonista se convierte en un lema del diccionario, ahora tienes que definirlo sin nombrarlo en ningún momento', 'Me apetece escuchar esa canción, sí, ya sabes… la que acabas de escribir', 'Los mitos griegos molan mucho, haz que el tuyo pueda colar por uno.', 'Encuentras la hoja de una novela durante un paseo matutino, ¿qué tiene escrito? ¿Podrías trascribirlo para mi?', 'Sepa vuesa merced que vuestras palabras suenan tan cercanas para alguien de mi uso, gracias por escribir unas líneas en castellano antiguo', 'Edgar Allan Poe no existe, ¿quién va a decirnos ahora \"nunca más\"?', 'Ni el señor gray está a la altura de tu perversión, haz que se corra (la tinta, la tinta)', 'Esto es un tema serio, te lo ha pedido un catedrático para la clase que tiene mañana.', 'Con la venia de su señoría, esa ley que usted cita y describe todavía no la he encontrado en el Código Civil.', 'A Spielberg le ha encantado tu idea, pero lo que has escrito solo da para un corto.', 'Más te vale que tu historia tenga una moraleja']}\n\n##---------------------- Funciones\ndef idea():\n '''Genera una frase aleatoria que podrás utilizar como la idea principal del relato.\n El programa no utiliza ninguna lógica ni coherencia para la selección de las columnas,\n por lo que puedes enfrentarte a ideas bastante incoherentes; lo que puede resultar en\n un ejercicio bastante estimulante para la imaginación'''\n aleatorios = np.random.randint(len(frase['artículo']), size=3)\n if frase['artículo'][aleatorios[0]] == 'El':\n return ' '.join([frase['artículo'][aleatorios[0]], frase['sujeto'][aleatorios[0]], frase['adjetivo masculino'][aleatorios[1]], frase['acciones'][aleatorios[2]]])\n else:\n return ' '.join([frase['artículo'][aleatorios[0]], frase['sujeto'][aleatorios[0]], frase['adjetivo femenino'][aleatorios[1]], frase['acciones'][aleatorios[2]]])\n\ndef palabras():\n '''Genera un listado de palabras aleatorio en base a adjetivos que debes utilizar en el\n desarrollo del texto; estas palabras pueden aparecer en todas sus variantes de género y número.'''\n palabras = []\n for n in range(int(np.random.randint(1, high=11, size=1))):\n palabras.append(frase['adjetivo masculino'][int(np.random.randint(len(frase['artículo']), size=1))])\n return set(palabras)\n\ndef reto():\n '''Lanza un reto aleatorio de los que existen dentro de la lista, para hacer más complicado\n (o facilitar a veces) la ejecución del relato.'''\n return retos['Retos'][int(np.random.randint(len(retos['Retos']), size=1))]\n\ndef dice():\n '''¡Devuelve la respuesta que ha generado Gilbert!'''\n return {'idea': idea(), 'palabras': palabras(), 'reto': reto()}\n\ndef juego(nivel = ''):\n '''Elige el nivel de dificultad que tendrá la tarea de Gilbert: fácil, normal o difícil.'''\n\n while nivel not in ['fácil', 'normal', 'difícil']:\n nivel = input('Elige el nivel de dificultad: fácil, normal o difícil: ').lower()\n partida = dice()\n if nivel == 'fácil':\n return idea()\n elif nivel == 'normal':\n return idea(), ', '.join(palabras())\n elif nivel == 'difícil':\n return idea(), ', '.join(palabras()), reto()\n else:\n return 'Parece que ha ocurrido algo inesperado.'\n\n##---------------------- Objetos externos\nwith open('reglas.md', \"r\") as texto:\n reglas = texto.read()\nwith open('sobre_proyecto.md', \"r\") as texto:\n sobre_proyecto = texto.read()\nwith open('desarrollo.md', \"r\") as texto:\n desarrollado = texto.read()\n\n##---------------------- Aplicación Streamlit\n##--- Textos\nst.title('Gilbert.dice')\nst.header('Generador de frases aleatorias')\nst.markdown('### Podrás utilizarlas para inspirarte, trabajar la imaginación y perder el miedo a la página en blanco.')\n\n##--- Menú de la izquierda\nst.sidebar.title(\"Acepta el reto y pincha en comenzar\")\nst.sidebar.write('Elige la dificultad y enfréntate a la página en blanco.')\nfichero = st.sidebar.selectbox(\"Selecciona la dificultad:\",('fácil', 'normal', 'difícil'))\n\n#-- Botones\ncomenzar = st.sidebar.button('Generar')\nsaber_mas = st.sidebar.button('Reglas del juego')\nproyecto = st.sidebar.button('Detalles del proyecto')\ndesarrollo = st.sidebar.button('Desarrollo de Gilbert')\n\n##--- Rutina del programa\nif comenzar:\n gilbert = juego(fichero)\n if fichero == 'fácil':\n st.markdown('La idea para tu próximo relato es:')\n st.markdown('**' + gilbert + '**\\n')\n elif fichero == 'normal':\n st.markdown('La idea para tu próximo relato es:')\n st.markdown('**' + gilbert[0] + '**\\n')\n st.markdown('El texto debe incluir estas palabras:')\n st.markdown('**' + gilbert[1] + '**\\n')\n else:\n st.markdown('La idea para tu próximo relato es:')\n st.markdown('**' + gilbert[0] + '**\\n')\n st.markdown('El texto debe incluir estas palabras:')\n st.markdown('**' + gilbert[1] + '**\\n')\n st.markdown('Además, debes tratar de cumplir con el siguiente reto:')\n st.markdown('**' + gilbert[2] + '**\\n')\n\nif saber_mas:\n st.markdown(reglas)\n\nif proyecto:\n st.markdown(sobre_proyecto)\n\nif desarrollo:\n st.markdown(desarrollado)\n\n##--- Pie del menú de la izquierda\nst.sidebar.markdown('Un proyecto personal de [**Erebyel** (María Reyes Rocío Pérez)](http://www.erebyel.es).')\n" ]
[ [ "pandas.DataFrame", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
GanshengT/mne-python
[ "49253e74308137e14187561a204d784ea28f12a7" ]
[ "mne/viz/misc.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"Functions to make simple plots with M/EEG data.\"\"\"\n\n# Authors: Alexandre Gramfort <[email protected]>\n# Denis Engemann <[email protected]>\n# Martin Luessi <[email protected]>\n# Eric Larson <[email protected]>\n# Cathy Nangini <[email protected]>\n# Mainak Jas <[email protected]>\n#\n# License: Simplified BSD\n\nimport base64\nimport copy\nfrom glob import glob\nfrom io import BytesIO\nfrom itertools import cycle\nimport os.path as op\nimport warnings\nfrom distutils.version import LooseVersion\nfrom collections import defaultdict\n\nimport numpy as np\n\nfrom ..defaults import DEFAULTS\nfrom ..fixes import _get_img_fdata\nfrom ..rank import compute_rank\nfrom ..surface import read_surface\nfrom ..io.constants import FIFF\nfrom ..io.proj import make_projector\nfrom ..io.pick import (_DATA_CH_TYPES_SPLIT, pick_types, pick_info,\n pick_channels)\nfrom ..source_space import (read_source_spaces, SourceSpaces,\n _check_mri, _ensure_src)\nfrom ..transforms import invert_transform, apply_trans, _frame_to_str\nfrom ..utils import (logger, verbose, warn, _check_option, get_subjects_dir,\n _mask_to_onsets_offsets, _pl, _on_missing, fill_doc)\nfrom ..io.pick import _picks_by_type\nfrom ..filter import estimate_ringing_samples\nfrom .utils import (tight_layout, _get_color_list, _prepare_trellis, plt_show,\n _figure_agg)\n\n\ndef _index_info_cov(info, cov, exclude):\n if exclude == 'bads':\n exclude = info['bads']\n info = pick_info(info, pick_channels(info['ch_names'], cov['names'],\n exclude))\n del exclude\n picks_list = \\\n _picks_by_type(info, meg_combined=False, ref_meg=False,\n exclude=())\n picks_by_type = dict(picks_list)\n\n ch_names = [n for n in cov.ch_names if n in info['ch_names']]\n ch_idx = [cov.ch_names.index(n) for n in ch_names]\n\n info_ch_names = info['ch_names']\n idx_by_type = defaultdict(list)\n for ch_type, sel in picks_by_type.items():\n idx_by_type[ch_type] = [ch_names.index(info_ch_names[c])\n for c in sel if info_ch_names[c] in ch_names]\n idx_names = [(idx_by_type[key],\n '%s covariance' % DEFAULTS['titles'][key],\n DEFAULTS['units'][key],\n DEFAULTS['scalings'][key],\n key)\n for key in _DATA_CH_TYPES_SPLIT\n if len(idx_by_type[key]) > 0]\n C = cov.data[ch_idx][:, ch_idx]\n return info, C, ch_names, idx_names\n\n\n@verbose\ndef plot_cov(cov, info, exclude=(), colorbar=True, proj=False, show_svd=True,\n show=True, verbose=None):\n \"\"\"Plot Covariance data.\n\n Parameters\n ----------\n cov : instance of Covariance\n The covariance matrix.\n %(info_not_none)s\n exclude : list of str | str\n List of channels to exclude. If empty do not exclude any channel.\n If 'bads', exclude info['bads'].\n colorbar : bool\n Show colorbar or not.\n proj : bool\n Apply projections or not.\n show_svd : bool\n Plot also singular values of the noise covariance for each sensor\n type. We show square roots ie. standard deviations.\n show : bool\n Show figure if True.\n %(verbose)s\n\n Returns\n -------\n fig_cov : instance of matplotlib.figure.Figure\n The covariance plot.\n fig_svd : instance of matplotlib.figure.Figure | None\n The SVD spectra plot of the covariance.\n\n See Also\n --------\n mne.compute_rank\n\n Notes\n -----\n For each channel type, the rank is estimated using\n :func:`mne.compute_rank`.\n\n .. versionchanged:: 0.19\n Approximate ranks for each channel type are shown with red dashed lines.\n \"\"\"\n import matplotlib.pyplot as plt\n from matplotlib.colors import Normalize\n from scipy import linalg\n from ..cov import Covariance\n\n info, C, ch_names, idx_names = _index_info_cov(info, cov, exclude)\n del cov, exclude\n\n projs = []\n if proj:\n projs = copy.deepcopy(info['projs'])\n\n # Activate the projection items\n for p in projs:\n p['active'] = True\n\n P, ncomp, _ = make_projector(projs, ch_names)\n if ncomp > 0:\n logger.info(' Created an SSP operator (subspace dimension'\n ' = %d)' % ncomp)\n C = np.dot(P, np.dot(C, P.T))\n else:\n logger.info(' The projection vectors do not apply to these '\n 'channels.')\n\n fig_cov, axes = plt.subplots(1, len(idx_names), squeeze=False,\n figsize=(3.8 * len(idx_names), 3.7))\n for k, (idx, name, _, _, _) in enumerate(idx_names):\n vlim = np.max(np.abs(C[idx][:, idx]))\n im = axes[0, k].imshow(C[idx][:, idx], interpolation=\"nearest\",\n norm=Normalize(vmin=-vlim, vmax=vlim),\n cmap='RdBu_r')\n axes[0, k].set(title=name)\n\n if colorbar:\n from mpl_toolkits.axes_grid1 import make_axes_locatable\n divider = make_axes_locatable(axes[0, k])\n cax = divider.append_axes(\"right\", size=\"5.5%\", pad=0.05)\n plt.colorbar(im, cax=cax, format='%.0e')\n\n fig_cov.subplots_adjust(0.04, 0.0, 0.98, 0.94, 0.2, 0.26)\n tight_layout(fig=fig_cov)\n\n fig_svd = None\n if show_svd:\n fig_svd, axes = plt.subplots(1, len(idx_names), squeeze=False,\n figsize=(3.8 * len(idx_names), 3.7))\n for k, (idx, name, unit, scaling, key) in enumerate(idx_names):\n this_C = C[idx][:, idx]\n s = linalg.svd(this_C, compute_uv=False)\n this_C = Covariance(this_C, [info['ch_names'][ii] for ii in idx],\n [], [], 0)\n this_info = pick_info(info, idx)\n this_info['projs'] = []\n this_rank = compute_rank(this_C, info=this_info)\n # Protect against true zero singular values\n s[s <= 0] = 1e-10 * s[s > 0].min()\n s = np.sqrt(s) * scaling\n axes[0, k].plot(s, color='k', zorder=3)\n this_rank = this_rank[key]\n axes[0, k].axvline(this_rank - 1, ls='--', color='r',\n alpha=0.5, zorder=4, clip_on=False)\n axes[0, k].text(this_rank - 1, axes[0, k].get_ylim()[1],\n 'rank ≈ %d' % (this_rank,), ha='right', va='top',\n color='r', alpha=0.5, zorder=4)\n axes[0, k].set(ylabel=u'Noise σ (%s)' % unit, yscale='log',\n xlabel='Eigenvalue index', title=name,\n xlim=[0, len(s) - 1])\n tight_layout(fig=fig_svd)\n\n plt_show(show)\n\n return fig_cov, fig_svd\n\n\ndef plot_source_spectrogram(stcs, freq_bins, tmin=None, tmax=None,\n source_index=None, colorbar=False, show=True):\n \"\"\"Plot source power in time-freqency grid.\n\n Parameters\n ----------\n stcs : list of SourceEstimate\n Source power for consecutive time windows, one SourceEstimate object\n should be provided for each frequency bin.\n freq_bins : list of tuples of float\n Start and end points of frequency bins of interest.\n tmin : float\n Minimum time instant to show.\n tmax : float\n Maximum time instant to show.\n source_index : int | None\n Index of source for which the spectrogram will be plotted. If None,\n the source with the largest activation will be selected.\n colorbar : bool\n If true, a colorbar will be added to the plot.\n show : bool\n Show figure if True.\n\n Returns\n -------\n fig : instance of Figure\n The figure.\n \"\"\"\n import matplotlib.pyplot as plt\n\n # Input checks\n if len(stcs) == 0:\n raise ValueError('cannot plot spectrogram if len(stcs) == 0')\n\n stc = stcs[0]\n if tmin is not None and tmin < stc.times[0]:\n raise ValueError('tmin cannot be smaller than the first time point '\n 'provided in stcs')\n if tmax is not None and tmax > stc.times[-1] + stc.tstep:\n raise ValueError('tmax cannot be larger than the sum of the last time '\n 'point and the time step, which are provided in stcs')\n\n # Preparing time-frequency cell boundaries for plotting\n if tmin is None:\n tmin = stc.times[0]\n if tmax is None:\n tmax = stc.times[-1] + stc.tstep\n time_bounds = np.arange(tmin, tmax + stc.tstep, stc.tstep)\n freq_bounds = sorted(set(np.ravel(freq_bins)))\n freq_ticks = copy.deepcopy(freq_bounds)\n\n # Reject time points that will not be plotted and gather results\n source_power = []\n for stc in stcs:\n stc = stc.copy() # copy since crop modifies inplace\n stc.crop(tmin, tmax - stc.tstep)\n source_power.append(stc.data)\n source_power = np.array(source_power)\n\n # Finding the source with maximum source power\n if source_index is None:\n source_index = np.unravel_index(source_power.argmax(),\n source_power.shape)[1]\n\n # If there is a gap in the frequency bins record its locations so that it\n # can be covered with a gray horizontal bar\n gap_bounds = []\n for i in range(len(freq_bins) - 1):\n lower_bound = freq_bins[i][1]\n upper_bound = freq_bins[i + 1][0]\n if lower_bound != upper_bound:\n freq_bounds.remove(lower_bound)\n gap_bounds.append((lower_bound, upper_bound))\n\n # Preparing time-frequency grid for plotting\n time_grid, freq_grid = np.meshgrid(time_bounds, freq_bounds)\n\n # Plotting the results\n fig = plt.figure(figsize=(9, 6))\n plt.pcolor(time_grid, freq_grid, source_power[:, source_index, :],\n cmap='Reds')\n ax = plt.gca()\n\n ax.set(title='Source power', xlabel='Time (s)', ylabel='Frequency (Hz)')\n\n time_tick_labels = [str(np.round(t, 2)) for t in time_bounds]\n n_skip = 1 + len(time_bounds) // 10\n for i in range(len(time_bounds)):\n if i % n_skip != 0:\n time_tick_labels[i] = ''\n\n ax.set_xticks(time_bounds)\n ax.set_xticklabels(time_tick_labels)\n plt.xlim(time_bounds[0], time_bounds[-1])\n plt.yscale('log')\n ax.set_yticks(freq_ticks)\n ax.set_yticklabels([np.round(freq, 2) for freq in freq_ticks])\n plt.ylim(freq_bounds[0], freq_bounds[-1])\n\n plt.grid(True, ls='-')\n if colorbar:\n plt.colorbar()\n tight_layout(fig=fig)\n\n # Covering frequency gaps with horizontal bars\n for lower_bound, upper_bound in gap_bounds:\n plt.barh(lower_bound, time_bounds[-1] - time_bounds[0], upper_bound -\n lower_bound, time_bounds[0], color='#666666')\n\n plt_show(show)\n return fig\n\n\ndef _plot_mri_contours(mri_fname, surfaces, src, orientation='coronal',\n slices=None, show=True, show_indices=False,\n show_orientation=False, img_output=False, width=512):\n \"\"\"Plot BEM contours on anatomical slices.\"\"\"\n import matplotlib.pyplot as plt\n from matplotlib import patheffects\n from .._freesurfer import _mri_orientation, _read_mri_info\n # For ease of plotting, we will do everything in voxel coordinates.\n _check_option('orientation', orientation, ('coronal', 'axial', 'sagittal'))\n\n # Load the T1 data\n _, vox_mri_t, _, _, _, nim = _read_mri_info(\n mri_fname, units='mm', return_img=True)\n mri_vox_t = invert_transform(vox_mri_t)['trans']\n del vox_mri_t\n\n # plot axes (x, y, z) as data axes\n (x, y, z), (flip_x, flip_y, flip_z), order = _mri_orientation(\n nim, orientation)\n transpose = x < y\n\n data = _get_img_fdata(nim)\n shift_x = data.shape[x] if flip_x < 0 else 0\n shift_y = data.shape[y] if flip_y < 0 else 0\n n_slices = data.shape[z]\n if slices is None:\n slices = np.round(np.linspace(0, n_slices - 1, 14)).astype(int)[1:-1]\n slices = np.atleast_1d(slices).copy()\n slices[slices < 0] += n_slices # allow negative indexing\n if not np.array_equal(np.sort(slices), slices) or slices.ndim != 1 or \\\n slices.size < 1 or slices[0] < 0 or slices[-1] >= n_slices or \\\n slices.dtype.kind not in 'iu':\n raise ValueError('slices must be a sorted 1D array of int with unique '\n 'elements, at least one element, and no elements '\n 'greater than %d, got %s' % (n_slices - 1, slices))\n if flip_z < 0:\n # Proceed in the opposite order to maintain left-to-right / orientation\n slices = slices[::-1]\n\n # create of list of surfaces\n surfs = list()\n for file_name, color in surfaces:\n surf = dict()\n surf['rr'], surf['tris'] = read_surface(file_name)\n # move surface to voxel coordinate system\n surf['rr'] = apply_trans(mri_vox_t, surf['rr'])\n surfs.append((surf, color))\n\n sources = list()\n if src is not None:\n _ensure_src(src, extra=' or None')\n # Eventually we can relax this by allowing ``trans`` if need be\n if src[0]['coord_frame'] != FIFF.FIFFV_COORD_MRI:\n raise ValueError(\n 'Source space must be in MRI coordinates, got '\n f'{_frame_to_str[src[0][\"coord_frame\"]]}')\n for src_ in src:\n points = src_['rr'][src_['inuse'].astype(bool)]\n sources.append(apply_trans(mri_vox_t, points * 1e3))\n sources = np.concatenate(sources, axis=0)\n\n if img_output:\n n_col = n_axes = 1\n dpi = 96\n # 2x standard MRI resolution is probably good enough for the\n # traces\n w = width / dpi\n figsize = (w, w / data.shape[x] * data.shape[y])\n fig = _figure_agg(figsize=figsize, dpi=dpi, facecolor='k')\n ax = fig.add_axes([0, 0, 1, 1], frame_on=False, facecolor='k')\n axs = [ax] * len(slices)\n plt.close(fig)\n else:\n n_col = 4\n fig, axs, _, _ = _prepare_trellis(len(slices), n_col)\n fig.set_facecolor('k')\n dpi = fig.get_dpi()\n n_axes = len(axs)\n bounds = np.concatenate(\n [[-np.inf], slices[:-1] + np.diff(slices) / 2., [np.inf]]) # float\n slicer = [slice(None)] * 3\n ori_labels = dict(R='LR', A='PA', S='IS')\n xlabels, ylabels = ori_labels[order[0]], ori_labels[order[1]]\n path_effects = [patheffects.withStroke(linewidth=4, foreground=\"k\",\n alpha=0.75)]\n out = list() if img_output else fig\n for ai, (ax, sl, lower, upper) in enumerate(zip(\n axs, slices, bounds[:-1], bounds[1:])):\n # adjust the orientations for good view\n slicer[z] = sl\n dat = data[tuple(slicer)]\n dat = dat.T if transpose else dat\n dat = dat[::flip_y, ::flip_x]\n\n # First plot the anatomical data\n if img_output:\n ax.clear()\n ax.imshow(dat, cmap=plt.cm.gray, origin='lower')\n ax.set_autoscale_on(False)\n ax.axis('off')\n ax.set_aspect('equal') # XXX eventually could deal with zooms\n\n # and then plot the contours on top\n for surf, color in surfs:\n with warnings.catch_warnings(record=True): # ignore contour warn\n warnings.simplefilter('ignore')\n ax.tricontour(flip_x * surf['rr'][:, x] + shift_x,\n flip_y * surf['rr'][:, y] + shift_y,\n surf['tris'], surf['rr'][:, z],\n levels=[sl], colors=color, linewidths=1.0,\n zorder=1)\n\n if len(sources):\n in_slice = (sources[:, z] >= lower) & (sources[:, z] < upper)\n ax.scatter(flip_x * sources[in_slice, x] + shift_x,\n flip_y * sources[in_slice, y] + shift_y,\n marker='.', color='#FF00FF', s=1, zorder=2)\n if show_indices:\n ax.text(dat.shape[1] // 8 + 0.5, 0.5, str(sl),\n color='w', fontsize='x-small', va='bottom', ha='left')\n # label the axes\n kwargs = dict(\n color='#66CCEE', fontsize='medium', path_effects=path_effects,\n family='monospace', clip_on=False, zorder=5, weight='bold')\n if show_orientation:\n if ai % n_col == 0: # left\n ax.text(0, dat.shape[0] / 2., xlabels[0],\n va='center', ha='left', **kwargs)\n if ai % n_col == n_col - 1 or ai == n_axes - 1: # right\n ax.text(dat.shape[1] - 1, dat.shape[0] / 2., xlabels[1],\n va='center', ha='right', **kwargs)\n if ai >= n_axes - n_col: # bottom\n ax.text(dat.shape[1] / 2., 0, ylabels[0],\n ha='center', va='bottom', **kwargs)\n if ai < n_col or n_col == 1: # top\n ax.text(dat.shape[1] / 2., dat.shape[0] - 1, ylabels[1],\n ha='center', va='top', **kwargs)\n if img_output:\n output = BytesIO()\n fig.savefig(output, bbox_inches='tight',\n pad_inches=0, format='png', dpi=dpi)\n out.append(base64.b64encode(output.getvalue()).decode('ascii'))\n\n fig.subplots_adjust(left=0., bottom=0., right=1., top=1., wspace=0.,\n hspace=0.)\n plt_show(show, fig=fig)\n return out, flip_z\n\n\ndef plot_bem(subject=None, subjects_dir=None, orientation='coronal',\n slices=None, brain_surfaces=None, src=None, show=True,\n show_indices=True, mri='T1.mgz', show_orientation=True):\n \"\"\"Plot BEM contours on anatomical slices.\n\n Parameters\n ----------\n subject : str\n Subject name.\n subjects_dir : str | None\n Path to the SUBJECTS_DIR. If None, the path is obtained by using\n the environment variable SUBJECTS_DIR.\n orientation : str\n 'coronal' or 'axial' or 'sagittal'.\n slices : list of int\n Slice indices.\n brain_surfaces : None | str | list of str\n One or more brain surface to plot (optional). Entries should correspond\n to files in the subject's ``surf`` directory (e.g. ``\"white\"``).\n src : None | SourceSpaces | str\n SourceSpaces instance or path to a source space to plot individual\n sources as scatter-plot. Sources will be shown on exactly one slice\n (whichever slice is closest to each source in the given orientation\n plane). Path can be absolute or relative to the subject's ``bem``\n folder.\n\n .. versionchanged:: 0.20\n All sources are shown on the nearest slice rather than some\n being omitted.\n show : bool\n Show figure if True.\n show_indices : bool\n Show slice indices if True.\n\n .. versionadded:: 0.20\n mri : str\n The name of the MRI to use. Can be a standard FreeSurfer MRI such as\n ``'T1.mgz'``, or a full path to a custom MRI file.\n\n .. versionadded:: 0.21\n show_orientation : str\n Show the orientation (L/R, P/A, I/S) of the data slices.\n\n .. versionadded:: 0.21\n\n Returns\n -------\n fig : instance of matplotlib.figure.Figure\n The figure.\n\n See Also\n --------\n mne.viz.plot_alignment\n\n Notes\n -----\n Images are plotted in MRI voxel coordinates.\n\n If ``src`` is not None, for a given slice index, all source points are\n shown that are halfway between the previous slice and the given slice,\n and halfway between the given slice and the next slice.\n For large slice decimations, this can\n make some source points appear outside the BEM contour, which is shown\n for the given slice index. For example, in the case where the single\n midpoint slice is used ``slices=[128]``, all source points will be shown\n on top of the midpoint MRI slice with the BEM boundary drawn for that\n slice.\n \"\"\"\n subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)\n mri_fname = _check_mri(mri, subject, subjects_dir)\n\n # Get the BEM surface filenames\n bem_path = op.join(subjects_dir, subject, 'bem')\n\n if not op.isdir(bem_path):\n raise IOError('Subject bem directory \"%s\" does not exist' % bem_path)\n\n surfaces = _get_bem_plotting_surfaces(bem_path)\n if brain_surfaces is not None:\n if isinstance(brain_surfaces, str):\n brain_surfaces = (brain_surfaces,)\n for surf_name in brain_surfaces:\n for hemi in ('lh', 'rh'):\n surf_fname = op.join(subjects_dir, subject, 'surf',\n hemi + '.' + surf_name)\n if op.exists(surf_fname):\n surfaces.append((surf_fname, '#00DD00'))\n else:\n raise IOError(\"Surface %s does not exist.\" % surf_fname)\n\n if isinstance(src, str):\n if not op.exists(src):\n src_ = op.join(subjects_dir, subject, 'bem', src)\n if op.exists(src_):\n src = src_\n else:\n raise IOError(\"%s does not exist\" % src)\n src = read_source_spaces(src)\n elif src is not None and not isinstance(src, SourceSpaces):\n raise TypeError(\"src needs to be None, str or SourceSpaces instance, \"\n \"not %s\" % repr(src))\n\n if len(surfaces) == 0:\n raise IOError('No surface files found. Surface files must end with '\n 'inner_skull.surf, outer_skull.surf or outer_skin.surf')\n\n # Plot the contours\n return _plot_mri_contours(mri_fname, surfaces, src, orientation, slices,\n show, show_indices, show_orientation)[0]\n\n\ndef _get_bem_plotting_surfaces(bem_path):\n surfaces = []\n for surf_name, color in (('*inner_skull', '#FF0000'),\n ('*outer_skull', '#FFFF00'),\n ('*outer_skin', '#FFAA80')):\n surf_fname = glob(op.join(bem_path, surf_name + '.surf'))\n if len(surf_fname) > 0:\n surf_fname = surf_fname[0]\n logger.info(\"Using surface: %s\" % surf_fname)\n surfaces.append((surf_fname, color))\n return surfaces\n\n\n@verbose\ndef plot_events(events, sfreq=None, first_samp=0, color=None, event_id=None,\n axes=None, equal_spacing=True, show=True, on_missing='raise',\n verbose=None):\n \"\"\"Plot events to get a visual display of the paradigm.\n\n Parameters\n ----------\n events : array, shape (n_events, 3)\n The events.\n sfreq : float | None\n The sample frequency. If None, data will be displayed in samples (not\n seconds).\n first_samp : int\n The index of the first sample. Recordings made on Neuromag systems\n number samples relative to the system start (not relative to the\n beginning of the recording). In such cases the ``raw.first_samp``\n attribute can be passed here. Default is 0.\n color : dict | None\n Dictionary of event_id integers as keys and colors as values. If None,\n colors are automatically drawn from a default list (cycled through if\n number of events longer than list of default colors). Color can be any\n valid :doc:`matplotlib color <tutorials/colors/colors>`.\n event_id : dict | None\n Dictionary of event labels (e.g. 'aud_l') as keys and their associated\n event_id values. Labels are used to plot a legend. If None, no legend\n is drawn.\n axes : instance of Axes\n The subplot handle.\n equal_spacing : bool\n Use equal spacing between events in y-axis.\n show : bool\n Show figure if True.\n %(on_missing_events)s\n %(verbose)s\n\n Returns\n -------\n fig : matplotlib.figure.Figure\n The figure object containing the plot.\n\n Notes\n -----\n .. versionadded:: 0.9.0\n \"\"\"\n if sfreq is None:\n sfreq = 1.0\n xlabel = 'Samples'\n else:\n xlabel = 'Time (s)'\n\n events = np.asarray(events)\n if len(events) == 0:\n raise ValueError('No events in events array, cannot plot.')\n unique_events = np.unique(events[:, 2])\n\n if event_id is not None:\n # get labels and unique event ids from event_id dict,\n # sorted by value\n event_id_rev = {v: k for k, v in event_id.items()}\n conditions, unique_events_id = zip(*sorted(event_id.items(),\n key=lambda x: x[1]))\n\n keep = np.ones(len(unique_events_id), bool)\n for ii, this_event in enumerate(unique_events_id):\n if this_event not in unique_events:\n msg = f'{this_event} from event_id is not present in events.'\n _on_missing(on_missing, msg)\n keep[ii] = False\n conditions = [cond for cond, k in zip(conditions, keep) if k]\n unique_events_id = [id_ for id_, k in zip(unique_events_id, keep) if k]\n if len(unique_events_id) == 0:\n raise RuntimeError('No usable event IDs found')\n\n for this_event in unique_events:\n if this_event not in unique_events_id:\n warn('event %s missing from event_id will be ignored'\n % this_event)\n\n else:\n unique_events_id = unique_events\n\n color = _handle_event_colors(color, unique_events, event_id)\n import matplotlib.pyplot as plt\n\n fig = None\n if axes is None:\n fig = plt.figure()\n ax = axes if axes else plt.gca()\n\n unique_events_id = np.array(unique_events_id)\n min_event = np.min(unique_events_id)\n max_event = np.max(unique_events_id)\n max_x = (events[np.in1d(events[:, 2], unique_events_id), 0].max() -\n first_samp) / sfreq\n\n handles, labels = list(), list()\n for idx, ev in enumerate(unique_events_id):\n ev_mask = events[:, 2] == ev\n count = ev_mask.sum()\n if count == 0:\n continue\n y = np.full(count, idx + 1 if equal_spacing else events[ev_mask, 2][0])\n if event_id is not None:\n event_label = '%s (%s)' % (event_id_rev[ev], count)\n else:\n event_label = 'N=%d' % (count,)\n labels.append(event_label)\n kwargs = {}\n if ev in color:\n kwargs['color'] = color[ev]\n handles.append(\n ax.plot((events[ev_mask, 0] - first_samp) / sfreq,\n y, '.', clip_on=False, **kwargs)[0])\n\n if equal_spacing:\n ax.set_ylim(0, unique_events_id.size + 1)\n ax.set_yticks(1 + np.arange(unique_events_id.size))\n ax.set_yticklabels(unique_events_id)\n else:\n ax.set_ylim([min_event - 1, max_event + 1])\n\n ax.set(xlabel=xlabel, ylabel='Event id', xlim=[0, max_x])\n\n ax.grid(True)\n\n fig = fig if fig is not None else plt.gcf()\n # reverse order so that the highest numbers are at the top\n # (match plot order)\n handles, labels = handles[::-1], labels[::-1]\n box = ax.get_position()\n factor = 0.8 if event_id is not None else 0.9\n ax.set_position([box.x0, box.y0, box.width * factor, box.height])\n ax.legend(handles, labels, loc='center left', bbox_to_anchor=(1, 0.5),\n fontsize='small')\n fig.canvas.draw()\n plt_show(show)\n return fig\n\n\ndef _get_presser(fig):\n \"\"\"Get our press callback.\"\"\"\n import matplotlib\n callbacks = fig.canvas.callbacks.callbacks['button_press_event']\n func = None\n for key, val in callbacks.items():\n if LooseVersion(matplotlib.__version__) >= '3':\n func = val()\n else:\n func = val.func\n if func.__class__.__name__ == 'partial':\n break\n else:\n func = None\n assert func is not None\n return func\n\n\ndef plot_dipole_amplitudes(dipoles, colors=None, show=True):\n \"\"\"Plot the amplitude traces of a set of dipoles.\n\n Parameters\n ----------\n dipoles : list of instance of Dipole\n The dipoles whose amplitudes should be shown.\n colors : list of color | None\n Color to plot with each dipole. If None default colors are used.\n show : bool\n Show figure if True.\n\n Returns\n -------\n fig : matplotlib.figure.Figure\n The figure object containing the plot.\n\n Notes\n -----\n .. versionadded:: 0.9.0\n \"\"\"\n import matplotlib.pyplot as plt\n if colors is None:\n colors = cycle(_get_color_list())\n fig, ax = plt.subplots(1, 1)\n xlim = [np.inf, -np.inf]\n for dip, color in zip(dipoles, colors):\n ax.plot(dip.times, dip.amplitude * 1e9, color=color, linewidth=1.5)\n xlim[0] = min(xlim[0], dip.times[0])\n xlim[1] = max(xlim[1], dip.times[-1])\n ax.set(xlim=xlim, xlabel='Time (s)', ylabel='Amplitude (nAm)')\n if show:\n fig.show(warn=False)\n return fig\n\n\ndef adjust_axes(axes, remove_spines=('top', 'right'), grid=True):\n \"\"\"Adjust some properties of axes.\n\n Parameters\n ----------\n axes : list\n List of axes to process.\n remove_spines : list of str\n Which axis spines to remove.\n grid : bool\n Turn grid on (True) or off (False).\n \"\"\"\n axes = [axes] if not isinstance(axes, (list, tuple, np.ndarray)) else axes\n for ax in axes:\n if grid:\n ax.grid(zorder=0)\n for key in remove_spines:\n ax.spines[key].set_visible(False)\n\n\ndef _filter_ticks(lims, fscale):\n \"\"\"Create approximately spaced ticks between lims.\"\"\"\n if fscale == 'linear':\n return None, None # let matplotlib handle it\n lims = np.array(lims)\n ticks = list()\n if lims[1] > 20 * lims[0]:\n base = np.array([1, 2, 4])\n else:\n base = np.arange(1, 11)\n for exp in range(int(np.floor(np.log10(lims[0]))),\n int(np.floor(np.log10(lims[1]))) + 1):\n ticks += (base * (10 ** exp)).tolist()\n ticks = np.array(ticks)\n ticks = ticks[(ticks >= lims[0]) & (ticks <= lims[1])]\n ticklabels = [('%g' if t < 1 else '%d') % t for t in ticks]\n return ticks, ticklabels\n\n\ndef _get_flim(flim, fscale, freq, sfreq=None):\n \"\"\"Get reasonable frequency limits.\"\"\"\n if flim is None:\n if freq is None:\n flim = [0.1 if fscale == 'log' else 0., sfreq / 2.]\n else:\n if fscale == 'linear':\n flim = [freq[0]]\n else:\n flim = [freq[0] if freq[0] > 0 else 0.1 * freq[1]]\n flim += [freq[-1]]\n if fscale == 'log':\n if flim[0] <= 0:\n raise ValueError('flim[0] must be positive, got %s' % flim[0])\n elif flim[0] < 0:\n raise ValueError('flim[0] must be non-negative, got %s' % flim[0])\n return flim\n\n\ndef _check_fscale(fscale):\n \"\"\"Check for valid fscale.\"\"\"\n if not isinstance(fscale, str) or fscale not in ('log', 'linear'):\n raise ValueError('fscale must be \"log\" or \"linear\", got %s'\n % (fscale,))\n\n\n_DEFAULT_ALIM = (-80, 10)\n\n\ndef plot_filter(h, sfreq, freq=None, gain=None, title=None, color='#1f77b4',\n flim=None, fscale='log', alim=_DEFAULT_ALIM, show=True,\n compensate=False, plot=('time', 'magnitude', 'delay'),\n axes=None):\n \"\"\"Plot properties of a filter.\n\n Parameters\n ----------\n h : dict or ndarray\n An IIR dict or 1D ndarray of coefficients (for FIR filter).\n sfreq : float\n Sample rate of the data (Hz).\n freq : array-like or None\n The ideal response frequencies to plot (must be in ascending order).\n If None (default), do not plot the ideal response.\n gain : array-like or None\n The ideal response gains to plot.\n If None (default), do not plot the ideal response.\n title : str | None\n The title to use. If None (default), determine the title based\n on the type of the system.\n color : color object\n The color to use (default '#1f77b4').\n flim : tuple or None\n If not None, the x-axis frequency limits (Hz) to use.\n If None, freq will be used. If None (default) and freq is None,\n ``(0.1, sfreq / 2.)`` will be used.\n fscale : str\n Frequency scaling to use, can be \"log\" (default) or \"linear\".\n alim : tuple\n The y-axis amplitude limits (dB) to use (default: (-60, 10)).\n show : bool\n Show figure if True (default).\n compensate : bool\n If True, compensate for the filter delay (phase will not be shown).\n\n - For linear-phase FIR filters, this visualizes the filter coefficients\n assuming that the output will be shifted by ``N // 2``.\n - For IIR filters, this changes the filter coefficient display\n by filtering backward and forward, and the frequency response\n by squaring it.\n\n .. versionadded:: 0.18\n plot : list | tuple | str\n A list of the requested plots from ``time``, ``magnitude`` and\n ``delay``. Default is to plot all three filter properties\n ('time', 'magnitude', 'delay').\n\n .. versionadded:: 0.21.0\n axes : instance of Axes | list | None\n The axes to plot to. If list, the list must be a list of Axes of\n the same length as the number of requested plot types. If instance of\n Axes, there must be only one filter property plotted.\n Defaults to ``None``.\n\n .. versionadded:: 0.21.0\n\n Returns\n -------\n fig : matplotlib.figure.Figure\n The figure containing the plots.\n\n See Also\n --------\n mne.filter.create_filter\n plot_ideal_filter\n\n Notes\n -----\n .. versionadded:: 0.14\n \"\"\"\n from scipy.signal import (\n freqz, group_delay, lfilter, filtfilt, sosfilt, sosfiltfilt)\n import matplotlib.pyplot as plt\n\n sfreq = float(sfreq)\n _check_option('fscale', fscale, ['log', 'linear'])\n if isinstance(plot, str):\n plot = [plot]\n for xi, x in enumerate(plot):\n _check_option('plot[%d]' % xi, x, ('magnitude', 'delay', 'time'))\n\n flim = _get_flim(flim, fscale, freq, sfreq)\n if fscale == 'log':\n omega = np.logspace(np.log10(flim[0]), np.log10(flim[1]), 1000)\n else:\n omega = np.linspace(flim[0], flim[1], 1000)\n xticks, xticklabels = _filter_ticks(flim, fscale)\n omega /= sfreq / (2 * np.pi)\n if isinstance(h, dict): # IIR h.ndim == 2: # second-order sections\n if 'sos' in h:\n H = np.ones(len(omega), np.complex128)\n gd = np.zeros(len(omega))\n for section in h['sos']:\n this_H = freqz(section[:3], section[3:], omega)[1]\n H *= this_H\n if compensate:\n H *= this_H.conj() # time reversal is freq conj\n else:\n # Assume the forward-backward delay zeros out, which it\n # mostly should\n with warnings.catch_warnings(record=True): # singular GD\n warnings.simplefilter('ignore')\n gd += group_delay((section[:3], section[3:]), omega)[1]\n n = estimate_ringing_samples(h['sos'])\n delta = np.zeros(n)\n delta[0] = 1\n if compensate:\n delta = np.pad(delta, [(n - 1, 0)], 'constant')\n func = sosfiltfilt\n gd += (len(delta) - 1) // 2\n else:\n func = sosfilt\n h = func(h['sos'], delta)\n else:\n H = freqz(h['b'], h['a'], omega)[1]\n if compensate:\n H *= H.conj()\n with warnings.catch_warnings(record=True): # singular GD\n warnings.simplefilter('ignore')\n gd = group_delay((h['b'], h['a']), omega)[1]\n if compensate:\n gd += group_delay(h['b'].conj(), h['a'].conj(), omega)[1]\n n = estimate_ringing_samples((h['b'], h['a']))\n delta = np.zeros(n)\n delta[0] = 1\n if compensate:\n delta = np.pad(delta, [(n - 1, 0)], 'constant')\n func = filtfilt\n else:\n func = lfilter\n h = func(h['b'], h['a'], delta)\n if title is None:\n title = 'SOS (IIR) filter'\n if compensate:\n title += ' (forward-backward)'\n else:\n H = freqz(h, worN=omega)[1]\n with warnings.catch_warnings(record=True): # singular GD\n warnings.simplefilter('ignore')\n gd = group_delay((h, [1.]), omega)[1]\n title = 'FIR filter' if title is None else title\n if compensate:\n title += ' (delay-compensated)'\n\n fig = None\n if axes is None:\n fig, axes = plt.subplots(len(plot), 1)\n if isinstance(axes, plt.Axes):\n axes = [axes]\n elif isinstance(axes, np.ndarray):\n axes = list(axes)\n if fig is None:\n fig = axes[0].get_figure()\n if len(axes) != len(plot):\n raise ValueError('Length of axes (%d) must be the same as number of '\n 'requested filter properties (%d)'\n % (len(axes), len(plot)))\n\n t = np.arange(len(h))\n dlim = np.abs(t).max() / 2.\n dlim = [-dlim, dlim]\n if compensate:\n n_shift = (len(h) - 1) // 2\n t -= n_shift\n assert t[0] == -t[-1]\n gd -= n_shift\n t = t / sfreq\n gd = gd / sfreq\n f = omega * sfreq / (2 * np.pi)\n sl = slice(0 if fscale == 'linear' else 1, None, None)\n mag = 10 * np.log10(np.maximum((H * H.conj()).real, 1e-20))\n\n if 'time' in plot:\n ax_time_idx = np.where([p == 'time' for p in plot])[0][0]\n axes[ax_time_idx].plot(t, h, color=color)\n axes[ax_time_idx].set(xlim=t[[0, -1]], xlabel='Time (s)',\n ylabel='Amplitude', title=title)\n # Magnitude\n if 'magnitude' in plot:\n ax_mag_idx = np.where([p == 'magnitude' for p in plot])[0][0]\n axes[ax_mag_idx].plot(f[sl], mag[sl], color=color,\n linewidth=2, zorder=4)\n if freq is not None and gain is not None:\n plot_ideal_filter(freq, gain, axes[ax_mag_idx],\n fscale=fscale, show=False)\n axes[ax_mag_idx].set(ylabel='Magnitude (dB)', xlabel='', xscale=fscale)\n if xticks is not None:\n axes[ax_mag_idx].set(xticks=xticks)\n axes[ax_mag_idx].set(xticklabels=xticklabels)\n axes[ax_mag_idx].set(xlim=flim, ylim=alim, xlabel='Frequency (Hz)',\n ylabel='Amplitude (dB)')\n # Delay\n if 'delay' in plot:\n ax_delay_idx = np.where([p == 'delay' for p in plot])[0][0]\n axes[ax_delay_idx].plot(f[sl], gd[sl], color=color,\n linewidth=2, zorder=4)\n # shade nulled regions\n for start, stop in zip(*_mask_to_onsets_offsets(mag <= -39.9)):\n axes[ax_delay_idx].axvspan(f[start], f[stop - 1],\n facecolor='k', alpha=0.05,\n zorder=5)\n axes[ax_delay_idx].set(xlim=flim, ylabel='Group delay (s)',\n xlabel='Frequency (Hz)',\n xscale=fscale)\n if xticks is not None:\n axes[ax_delay_idx].set(xticks=xticks)\n axes[ax_delay_idx].set(xticklabels=xticklabels)\n axes[ax_delay_idx].set(xlim=flim, ylim=dlim, xlabel='Frequency (Hz)',\n ylabel='Delay (s)')\n\n adjust_axes(axes)\n tight_layout()\n plt_show(show)\n return fig\n\n\ndef plot_ideal_filter(freq, gain, axes=None, title='', flim=None, fscale='log',\n alim=_DEFAULT_ALIM, color='r', alpha=0.5, linestyle='--',\n show=True):\n \"\"\"Plot an ideal filter response.\n\n Parameters\n ----------\n freq : array-like\n The ideal response frequencies to plot (must be in ascending order).\n gain : array-like or None\n The ideal response gains to plot.\n axes : instance of Axes | None\n The subplot handle. With None (default), axes are created.\n title : str\n The title to use, (default: '').\n flim : tuple or None\n If not None, the x-axis frequency limits (Hz) to use.\n If None (default), freq used.\n fscale : str\n Frequency scaling to use, can be \"log\" (default) or \"linear\".\n alim : tuple\n If not None (default), the y-axis limits (dB) to use.\n color : color object\n The color to use (default: 'r').\n alpha : float\n The alpha to use (default: 0.5).\n linestyle : str\n The line style to use (default: '--').\n show : bool\n Show figure if True (default).\n\n Returns\n -------\n fig : instance of matplotlib.figure.Figure\n The figure.\n\n See Also\n --------\n plot_filter\n\n Notes\n -----\n .. versionadded:: 0.14\n\n Examples\n --------\n Plot a simple ideal band-pass filter::\n\n >>> from mne.viz import plot_ideal_filter\n >>> freq = [0, 1, 40, 50]\n >>> gain = [0, 1, 1, 0]\n >>> plot_ideal_filter(freq, gain, flim=(0.1, 100)) #doctest: +ELLIPSIS\n <...Figure...>\n \"\"\"\n import matplotlib.pyplot as plt\n my_freq, my_gain = list(), list()\n if freq[0] != 0:\n raise ValueError('freq should start with DC (zero) and end with '\n 'Nyquist, but got %s for DC' % (freq[0],))\n freq = np.array(freq)\n # deal with semilogx problems @ x=0\n _check_option('fscale', fscale, ['log', 'linear'])\n if fscale == 'log':\n freq[0] = 0.1 * freq[1] if flim is None else min(flim[0], freq[1])\n flim = _get_flim(flim, fscale, freq)\n transitions = list()\n for ii in range(len(freq)):\n if ii < len(freq) - 1 and gain[ii] != gain[ii + 1]:\n transitions += [[freq[ii], freq[ii + 1]]]\n my_freq += np.linspace(freq[ii], freq[ii + 1], 20,\n endpoint=False).tolist()\n my_gain += np.linspace(gain[ii], gain[ii + 1], 20,\n endpoint=False).tolist()\n else:\n my_freq.append(freq[ii])\n my_gain.append(gain[ii])\n my_gain = 10 * np.log10(np.maximum(my_gain, 10 ** (alim[0] / 10.)))\n if axes is None:\n axes = plt.subplots(1)[1]\n for transition in transitions:\n axes.axvspan(*transition, color=color, alpha=0.1)\n axes.plot(my_freq, my_gain, color=color, linestyle=linestyle, alpha=0.5,\n linewidth=4, zorder=3)\n xticks, xticklabels = _filter_ticks(flim, fscale)\n axes.set(ylim=alim, xlabel='Frequency (Hz)', ylabel='Amplitude (dB)',\n xscale=fscale)\n if xticks is not None:\n axes.set(xticks=xticks)\n axes.set(xticklabels=xticklabels)\n axes.set(xlim=flim)\n if title:\n axes.set(title=title)\n adjust_axes(axes)\n tight_layout()\n plt_show(show)\n return axes.figure\n\n\ndef _handle_event_colors(color_dict, unique_events, event_id):\n \"\"\"Create event-integer-to-color mapping, assigning defaults as needed.\"\"\"\n default_colors = dict(zip(sorted(unique_events), cycle(_get_color_list())))\n # warn if not enough colors\n if color_dict is None:\n if len(unique_events) > len(_get_color_list()):\n warn('More events than default colors available. You should pass '\n 'a list of unique colors.')\n else:\n custom_colors = dict()\n for key, color in color_dict.items():\n if key in unique_events: # key was a valid event integer\n custom_colors[key] = color\n elif key in event_id: # key was an event label\n custom_colors[event_id[key]] = color\n else: # key not a valid event, warn and ignore\n warn('Event ID %s is in the color dict but is not '\n 'present in events or event_id.' % str(key))\n # warn if color_dict is missing any entries\n unassigned = sorted(set(unique_events) - set(custom_colors))\n if len(unassigned):\n unassigned_str = ', '.join(str(e) for e in unassigned)\n warn('Color was not assigned for event%s %s. Default colors will '\n 'be used.' % (_pl(unassigned), unassigned_str))\n default_colors.update(custom_colors)\n return default_colors\n\n\n@fill_doc\ndef plot_csd(csd, info=None, mode='csd', colorbar=True, cmap=None,\n n_cols=None, show=True):\n \"\"\"Plot CSD matrices.\n\n A sub-plot is created for each frequency. If an info object is passed to\n the function, different channel types are plotted in different figures.\n\n Parameters\n ----------\n csd : instance of CrossSpectralDensity\n The CSD matrix to plot.\n %(info)s\n Used to split the figure by channel-type, if provided.\n By default, the CSD matrix is plotted as a whole.\n mode : 'csd' | 'coh'\n Whether to plot the cross-spectral density ('csd', the default), or\n the coherence ('coh') between the channels.\n colorbar : bool\n Whether to show a colorbar. Defaults to ``True``.\n cmap : str | None\n The matplotlib colormap to use. Defaults to None, which means the\n colormap will default to matplotlib's default.\n n_cols : int | None\n CSD matrices are plotted in a grid. This parameter controls how\n many matrix to plot side by side before starting a new row. By\n default, a number will be chosen to make the grid as square as\n possible.\n show : bool\n Whether to show the figure. Defaults to ``True``.\n\n Returns\n -------\n fig : list of Figure\n The figures created by this function.\n \"\"\"\n import matplotlib.pyplot as plt\n\n if mode not in ['csd', 'coh']:\n raise ValueError('\"mode\" should be either \"csd\" or \"coh\".')\n\n if info is not None:\n info_ch_names = info['ch_names']\n sel_eeg = pick_types(info, meg=False, eeg=True, ref_meg=False,\n exclude=[])\n sel_mag = pick_types(info, meg='mag', eeg=False, ref_meg=False,\n exclude=[])\n sel_grad = pick_types(info, meg='grad', eeg=False, ref_meg=False,\n exclude=[])\n idx_eeg = [csd.ch_names.index(info_ch_names[c])\n for c in sel_eeg if info_ch_names[c] in csd.ch_names]\n idx_mag = [csd.ch_names.index(info_ch_names[c])\n for c in sel_mag if info_ch_names[c] in csd.ch_names]\n idx_grad = [csd.ch_names.index(info_ch_names[c])\n for c in sel_grad if info_ch_names[c] in csd.ch_names]\n indices = [idx_eeg, idx_mag, idx_grad]\n titles = ['EEG', 'Magnetometers', 'Gradiometers']\n\n if mode == 'csd':\n # The units in which to plot the CSD\n units = dict(eeg='µV²', grad='fT²/cm²', mag='fT²')\n scalings = dict(eeg=1e12, grad=1e26, mag=1e30)\n else:\n indices = [np.arange(len(csd.ch_names))]\n if mode == 'csd':\n titles = ['Cross-spectral density']\n # Units and scaling unknown\n units = dict()\n scalings = dict()\n elif mode == 'coh':\n titles = ['Coherence']\n\n n_freqs = len(csd.frequencies)\n\n if n_cols is None:\n n_cols = int(np.ceil(np.sqrt(n_freqs)))\n n_rows = int(np.ceil(n_freqs / float(n_cols)))\n\n figs = []\n for ind, title, ch_type in zip(indices, titles, ['eeg', 'mag', 'grad']):\n if len(ind) == 0:\n continue\n\n fig, axes = plt.subplots(n_rows, n_cols, squeeze=False,\n figsize=(2 * n_cols + 1, 2.2 * n_rows))\n\n csd_mats = []\n for i in range(len(csd.frequencies)):\n cm = csd.get_data(index=i)[ind][:, ind]\n if mode == 'csd':\n cm = np.abs(cm) * scalings.get(ch_type, 1)\n elif mode == 'coh':\n # Compute coherence from the CSD matrix\n psd = np.diag(cm).real\n cm = np.abs(cm) ** 2 / psd[np.newaxis, :] / psd[:, np.newaxis]\n csd_mats.append(cm)\n\n vmax = np.max(csd_mats)\n\n for i, (freq, mat) in enumerate(zip(csd.frequencies, csd_mats)):\n ax = axes[i // n_cols][i % n_cols]\n im = ax.imshow(mat, interpolation='nearest', cmap=cmap, vmin=0,\n vmax=vmax)\n ax.set_xticks([])\n ax.set_yticks([])\n if csd._is_sum:\n ax.set_title('%.1f-%.1f Hz.' % (np.min(freq),\n np.max(freq)))\n else:\n ax.set_title('%.1f Hz.' % freq)\n\n plt.suptitle(title)\n plt.subplots_adjust(top=0.8)\n\n if colorbar:\n cb = plt.colorbar(im, ax=[a for ax_ in axes for a in ax_])\n if mode == 'csd':\n label = u'CSD'\n if ch_type in units:\n label += u' (%s)' % units[ch_type]\n cb.set_label(label)\n elif mode == 'coh':\n cb.set_label('Coherence')\n\n figs.append(fig)\n\n plt_show(show)\n return figs\n\n\ndef plot_chpi_snr(snr_dict, axes=None):\n \"\"\"Plot time-varying SNR estimates of the HPI coils.\n\n Parameters\n ----------\n snr_dict : dict\n The dictionary returned by `~mne.chpi.compute_chpi_snr`. Must have keys\n ``times``, ``freqs``, ``TYPE_snr``, ``TYPE_power``, and ``TYPE_resid``\n (where ``TYPE`` can be ``mag`` or ``grad`` or both).\n axes : None | list of matplotlib.axes.Axes\n Figure axes in which to draw the SNR, power, and residual plots. The\n number of axes should be 3× the number of MEG sensor types present in\n ``snr_dict``. If ``None`` (the default), a new\n `~matplotlib.figure.Figure` is created with the required number of\n axes.\n\n Returns\n -------\n fig : instance of matplotlib.figure.Figure\n A figure with subplots for SNR, power, and residual variance,\n separately for magnetometers and/or gradiometers (depending on what is\n present in ``snr_dict``).\n\n Notes\n -----\n If you supply a list of existing `~matplotlib.axes.Axes`, then the figure\n legend will not be drawn automatically. If you still want it, running\n ``fig.legend(loc='right', title='cHPI frequencies')`` will recreate it,\n though you may also need to manually adjust the margin to make room for it\n (e.g., using ``fig.subplots_adjust(right=0.8)``).\n\n .. versionadded:: 0.24\n \"\"\"\n import matplotlib.pyplot as plt\n\n valid_keys = list(snr_dict)[2:]\n titles = dict(snr='SNR', power='cHPI power', resid='Residual variance')\n full_names = dict(mag='magnetometers', grad='gradiometers')\n axes_was_none = axes is None\n if axes_was_none:\n fig, axes = plt.subplots(len(valid_keys), 1, sharex=True)\n else:\n fig = axes[0].get_figure()\n if len(axes) != len(valid_keys):\n raise ValueError(f'axes must be a list of {len(valid_keys)} axes, got '\n f'length {len(axes)} ({axes}).')\n fig.set_size_inches(10, 10)\n legend_labels_exist = False\n for key, ax in zip(valid_keys, axes):\n ch_type, kind = key.split('_')\n scaling = 1 if kind == 'snr' else DEFAULTS['scalings'][ch_type]\n plot_kwargs = dict(color='k') if kind == 'resid' else dict()\n lines = ax.plot(snr_dict['times'], snr_dict[key] * scaling ** 2,\n **plot_kwargs)\n # the freqs should be the same for all sensor types (and for SNR and\n # power subplots), so we only need to label the lines on one axes\n # (otherwise we get duplicate legend entries).\n if not legend_labels_exist:\n for line, freq in zip(lines, snr_dict['freqs']):\n line.set_label(f'{freq} Hz')\n legend_labels_exist = True\n unit = DEFAULTS['units'][ch_type]\n unit = f'({unit})' if '/' in unit else unit\n set_kwargs = dict(title=f'{titles[kind]}, {full_names[ch_type]}',\n ylabel='dB' if kind == 'snr' else f'{unit}²')\n if not axes_was_none:\n set_kwargs.update(xlabel='Time (s)')\n ax.set(**set_kwargs)\n if axes_was_none:\n ax.set(xlabel='Time (s)')\n fig.align_ylabels()\n fig.subplots_adjust(left=0.1, right=0.825, bottom=0.075, top=0.95,\n hspace=0.7)\n fig.legend(loc='right', title='cHPI frequencies')\n return fig\n" ]
[ [ "numpy.diag", "numpy.dot", "scipy.linalg.svd", "numpy.sqrt", "numpy.linspace", "scipy.signal.freqz", "numpy.asarray", "matplotlib.pyplot.barh", "numpy.in1d", "numpy.concatenate", "numpy.max", "numpy.round", "numpy.where", "matplotlib.pyplot.gca", "scipy.signal.group_delay", "numpy.pad", "numpy.unique", "numpy.arange", "numpy.full", "matplotlib.pyplot.gcf", "numpy.atleast_1d", "matplotlib.patheffects.withStroke", "numpy.diff", "matplotlib.pyplot.close", "matplotlib.pyplot.subplots_adjust", "numpy.ravel", "numpy.zeros", "matplotlib.pyplot.figure", "numpy.min", "matplotlib.pyplot.ylim", "numpy.log10", "numpy.meshgrid", "numpy.array", "matplotlib.pyplot.suptitle", "numpy.maximum", "numpy.abs", "matplotlib.pyplot.yscale", "matplotlib.pyplot.subplots", "matplotlib.colors.Normalize", "numpy.sort", "matplotlib.pyplot.colorbar", "matplotlib.pyplot.xlim", "matplotlib.pyplot.grid", "matplotlib.pyplot.pcolor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
bopopescu/smart_contracts7
[ "40a487cb3843e86ab5e4cb50b1aafa2095f648cd" ]
[ "env/lib/python3.6/site-packages/torch/optim/asgd.py" ]
[ "import math\nimport torch\nfrom .optimizer import Optimizer\n\n\nclass ASGD(Optimizer):\n \"\"\"Implements Averaged Stochastic Gradient Descent.\n\n It has been proposed in `Acceleration of stochastic approximation by\n averaging`_.\n\n Arguments:\n params (iterable): iterable of parameters to optimize or dicts defining\n parameter groups\n lr (float, optional): learning rate (default: 1e-2)\n lambd (float, optional): decay term (default: 1e-4)\n alpha (float, optional): power for eta update (default: 0.75)\n t0 (float, optional): point at which to start averaging (default: 1e6)\n weight_decay (float, optional): weight decay (L2 penalty) (default: 0)\n\n .. _Acceleration of stochastic approximation by averaging:\n http://dl.acm.org/citation.cfm?id=131098\n \"\"\"\n\n def __init__(self, params, lr=1e-2, lambd=1e-4, alpha=0.75, t0=1e6, weight_decay=0):\n defaults = dict(lr=lr, lambd=lambd, alpha=alpha, t0=t0,\n weight_decay=weight_decay)\n super(ASGD, self).__init__(params, defaults)\n\n def step(self, closure=None):\n \"\"\"Performs a single optimization step.\n\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n for p in group['params']:\n if p.grad is None:\n continue\n grad = p.grad.data\n if grad.is_sparse:\n raise RuntimeError('ASGD does not support sparse gradients')\n state = self.state[p]\n\n # State initialization\n if len(state) == 0:\n state['step'] = 0\n state['eta'] = group['lr']\n state['mu'] = 1\n state['ax'] = torch.zeros_like(p.data)\n\n state['step'] += 1\n\n if group['weight_decay'] != 0:\n grad = grad.add(group['weight_decay'], p.data)\n\n # decay term\n p.data.mul_(1 - group['lambd'] * state['eta'])\n\n # update parameter\n p.data.add_(-state['eta'], grad)\n\n # averaging\n if state['mu'] != 1:\n state['ax'].add_(p.data.sub(state['ax']).mul(state['mu']))\n else:\n state['ax'].copy_(p.data)\n\n # update eta and mu\n state['eta'] = (group['lr'] /\n math.pow((1 + group['lambd'] * group['lr'] * state['step']), group['alpha']))\n state['mu'] = 1 / max(1, state['step'] - group['t0'])\n\n return loss\n" ]
[ [ "torch.zeros_like" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
luftwurzel/pandas
[ "8980af7ce9d98713b0f8792e38f0fe43088e8780", "8980af7ce9d98713b0f8792e38f0fe43088e8780", "8980af7ce9d98713b0f8792e38f0fe43088e8780", "8980af7ce9d98713b0f8792e38f0fe43088e8780" ]
[ "pandas/tests/io/parser/test_python_parser_only.py", "pandas/tests/extension/test_floating.py", "pandas/plotting/_matplotlib/misc.py", "pandas/tests/plotting/frame/test_frame_color.py" ]
[ "\"\"\"\nTests that apply specifically to the Python parser. Unless specifically\nstated as a Python-specific issue, the goal is to eventually move as many of\nthese tests out of this module as soon as the C parser can accept further\narguments when parsing.\n\"\"\"\nfrom __future__ import annotations\n\nimport csv\nfrom io import (\n BytesIO,\n StringIO,\n)\n\nimport pytest\n\nfrom pandas.errors import (\n ParserError,\n ParserWarning,\n)\n\nfrom pandas import (\n DataFrame,\n Index,\n MultiIndex,\n)\nimport pandas._testing as tm\n\n\ndef test_default_separator(python_parser_only):\n # see gh-17333\n #\n # csv.Sniffer in Python treats \"o\" as separator.\n data = \"aob\\n1o2\\n3o4\"\n parser = python_parser_only\n expected = DataFrame({\"a\": [1, 3], \"b\": [2, 4]})\n\n result = parser.read_csv(StringIO(data), sep=None)\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\"skipfooter\", [\"foo\", 1.5, True])\ndef test_invalid_skipfooter_non_int(python_parser_only, skipfooter):\n # see gh-15925 (comment)\n data = \"a\\n1\\n2\"\n parser = python_parser_only\n msg = \"skipfooter must be an integer\"\n\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(StringIO(data), skipfooter=skipfooter)\n\n\ndef test_invalid_skipfooter_negative(python_parser_only):\n # see gh-15925 (comment)\n data = \"a\\n1\\n2\"\n parser = python_parser_only\n msg = \"skipfooter cannot be negative\"\n\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(StringIO(data), skipfooter=-1)\n\n\[email protected](\"kwargs\", [{\"sep\": None}, {\"delimiter\": \"|\"}])\ndef test_sniff_delimiter(python_parser_only, kwargs):\n data = \"\"\"index|A|B|C\nfoo|1|2|3\nbar|4|5|6\nbaz|7|8|9\n\"\"\"\n parser = python_parser_only\n result = parser.read_csv(StringIO(data), index_col=0, **kwargs)\n expected = DataFrame(\n [[1, 2, 3], [4, 5, 6], [7, 8, 9]],\n columns=[\"A\", \"B\", \"C\"],\n index=Index([\"foo\", \"bar\", \"baz\"], name=\"index\"),\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_sniff_delimiter_comment(python_parser_only):\n data = \"\"\"# comment line\nindex|A|B|C\n# comment line\nfoo|1|2|3 # ignore | this\nbar|4|5|6\nbaz|7|8|9\n\"\"\"\n parser = python_parser_only\n result = parser.read_csv(StringIO(data), index_col=0, sep=None, comment=\"#\")\n expected = DataFrame(\n [[1, 2, 3], [4, 5, 6], [7, 8, 9]],\n columns=[\"A\", \"B\", \"C\"],\n index=Index([\"foo\", \"bar\", \"baz\"], name=\"index\"),\n )\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\"encoding\", [None, \"utf-8\"])\ndef test_sniff_delimiter_encoding(python_parser_only, encoding):\n parser = python_parser_only\n data = \"\"\"ignore this\nignore this too\nindex|A|B|C\nfoo|1|2|3\nbar|4|5|6\nbaz|7|8|9\n\"\"\"\n\n if encoding is not None:\n from io import TextIOWrapper\n\n data = data.encode(encoding)\n data = BytesIO(data)\n data = TextIOWrapper(data, encoding=encoding)\n else:\n data = StringIO(data)\n\n result = parser.read_csv(data, index_col=0, sep=None, skiprows=2, encoding=encoding)\n expected = DataFrame(\n [[1, 2, 3], [4, 5, 6], [7, 8, 9]],\n columns=[\"A\", \"B\", \"C\"],\n index=Index([\"foo\", \"bar\", \"baz\"], name=\"index\"),\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_single_line(python_parser_only):\n # see gh-6607: sniff separator\n parser = python_parser_only\n result = parser.read_csv(StringIO(\"1,2\"), names=[\"a\", \"b\"], header=None, sep=None)\n\n expected = DataFrame({\"a\": [1], \"b\": [2]})\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\"kwargs\", [{\"skipfooter\": 2}, {\"nrows\": 3}])\ndef test_skipfooter(python_parser_only, kwargs):\n # see gh-6607\n data = \"\"\"A,B,C\n1,2,3\n4,5,6\n7,8,9\nwant to skip this\nalso also skip this\n\"\"\"\n parser = python_parser_only\n result = parser.read_csv(StringIO(data), **kwargs)\n\n expected = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=[\"A\", \"B\", \"C\"])\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\n \"compression,klass\", [(\"gzip\", \"GzipFile\"), (\"bz2\", \"BZ2File\")]\n)\ndef test_decompression_regex_sep(python_parser_only, csv1, compression, klass):\n # see gh-6607\n parser = python_parser_only\n\n with open(csv1, \"rb\") as f:\n data = f.read()\n\n data = data.replace(b\",\", b\"::\")\n expected = parser.read_csv(csv1)\n\n module = pytest.importorskip(compression)\n klass = getattr(module, klass)\n\n with tm.ensure_clean() as path:\n with klass(path, mode=\"wb\") as tmp:\n tmp.write(data)\n\n result = parser.read_csv(path, sep=\"::\", compression=compression)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_read_csv_buglet_4x_multi_index(python_parser_only):\n # see gh-6607\n data = \"\"\" A B C D E\none two three four\na b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640\na q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744\nx q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838\"\"\"\n parser = python_parser_only\n\n expected = DataFrame(\n [\n [-0.5109, -2.3358, -0.4645, 0.05076, 0.3640],\n [0.4473, 1.4152, 0.2834, 1.00661, 0.1744],\n [-0.6662, -0.5243, -0.3580, 0.89145, 2.5838],\n ],\n columns=[\"A\", \"B\", \"C\", \"D\", \"E\"],\n index=MultiIndex.from_tuples(\n [(\"a\", \"b\", 10.0032, 5), (\"a\", \"q\", 20, 4), (\"x\", \"q\", 30, 3)],\n names=[\"one\", \"two\", \"three\", \"four\"],\n ),\n )\n result = parser.read_csv(StringIO(data), sep=r\"\\s+\")\n tm.assert_frame_equal(result, expected)\n\n\ndef test_read_csv_buglet_4x_multi_index2(python_parser_only):\n # see gh-6893\n data = \" A B C\\na b c\\n1 3 7 0 3 6\\n3 1 4 1 5 9\"\n parser = python_parser_only\n\n expected = DataFrame.from_records(\n [(1, 3, 7, 0, 3, 6), (3, 1, 4, 1, 5, 9)],\n columns=list(\"abcABC\"),\n index=list(\"abc\"),\n )\n result = parser.read_csv(StringIO(data), sep=r\"\\s+\")\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\"add_footer\", [True, False])\ndef test_skipfooter_with_decimal(python_parser_only, add_footer):\n # see gh-6971\n data = \"1#2\\n3#4\"\n parser = python_parser_only\n expected = DataFrame({\"a\": [1.2, 3.4]})\n\n if add_footer:\n # The stray footer line should not mess with the\n # casting of the first two lines if we skip it.\n kwargs = {\"skipfooter\": 1}\n data += \"\\nFooter\"\n else:\n kwargs = {}\n\n result = parser.read_csv(StringIO(data), names=[\"a\"], decimal=\"#\", **kwargs)\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\n \"sep\", [\"::\", \"#####\", \"!!!\", \"123\", \"#1!c5\", \"%!c!d\", \"@@#4:2\", \"_!pd#_\"]\n)\[email protected](\n \"encoding\", [\"utf-16\", \"utf-16-be\", \"utf-16-le\", \"utf-32\", \"cp037\"]\n)\ndef test_encoding_non_utf8_multichar_sep(python_parser_only, sep, encoding):\n # see gh-3404\n expected = DataFrame({\"a\": [1], \"b\": [2]})\n parser = python_parser_only\n\n data = \"1\" + sep + \"2\"\n encoded_data = data.encode(encoding)\n\n result = parser.read_csv(\n BytesIO(encoded_data), sep=sep, names=[\"a\", \"b\"], encoding=encoding\n )\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\"quoting\", [csv.QUOTE_MINIMAL, csv.QUOTE_NONE])\ndef test_multi_char_sep_quotes(python_parser_only, quoting):\n # see gh-13374\n kwargs = {\"sep\": \",,\"}\n parser = python_parser_only\n\n data = 'a,,b\\n1,,a\\n2,,\"2,,b\"'\n\n if quoting == csv.QUOTE_NONE:\n msg = \"Expected 2 fields in line 3, saw 3\"\n with pytest.raises(ParserError, match=msg):\n parser.read_csv(StringIO(data), quoting=quoting, **kwargs)\n else:\n msg = \"ignored when a multi-char delimiter is used\"\n with pytest.raises(ParserError, match=msg):\n parser.read_csv(StringIO(data), quoting=quoting, **kwargs)\n\n\ndef test_none_delimiter(python_parser_only, capsys):\n # see gh-13374 and gh-17465\n parser = python_parser_only\n data = \"a,b,c\\n0,1,2\\n3,4,5,6\\n7,8,9\"\n expected = DataFrame({\"a\": [0, 7], \"b\": [1, 8], \"c\": [2, 9]})\n\n # We expect the third line in the data to be\n # skipped because it is malformed, but we do\n # not expect any errors to occur.\n result = parser.read_csv(StringIO(data), header=0, sep=None, on_bad_lines=\"warn\")\n tm.assert_frame_equal(result, expected)\n\n captured = capsys.readouterr()\n assert \"Skipping line 3\" in captured.err\n\n\[email protected](\"data\", ['a\\n1\\n\"b\"a', 'a,b,c\\ncat,foo,bar\\ndog,foo,\"baz'])\[email protected](\"skipfooter\", [0, 1])\ndef test_skipfooter_bad_row(python_parser_only, data, skipfooter):\n # see gh-13879 and gh-15910\n parser = python_parser_only\n if skipfooter:\n msg = \"parsing errors in the skipped footer rows\"\n with pytest.raises(ParserError, match=msg):\n parser.read_csv(StringIO(data), skipfooter=skipfooter)\n else:\n msg = \"unexpected end of data|expected after\"\n with pytest.raises(ParserError, match=msg):\n parser.read_csv(StringIO(data), skipfooter=skipfooter)\n\n\ndef test_malformed_skipfooter(python_parser_only):\n parser = python_parser_only\n data = \"\"\"ignore\nA,B,C\n1,2,3 # comment\n1,2,3,4,5\n2,3,4\nfooter\n\"\"\"\n msg = \"Expected 3 fields in line 4, saw 5\"\n with pytest.raises(ParserError, match=msg):\n parser.read_csv(StringIO(data), header=1, comment=\"#\", skipfooter=1)\n\n\ndef test_python_engine_file_no_next(python_parser_only):\n parser = python_parser_only\n\n class NoNextBuffer:\n def __init__(self, csv_data) -> None:\n self.data = csv_data\n\n def __iter__(self):\n return self.data.__iter__()\n\n def read(self):\n return self.data\n\n def readline(self):\n return self.data\n\n parser.read_csv(NoNextBuffer(\"a\\n1\"))\n\n\[email protected](\"bad_line_func\", [lambda x: [\"2\", \"3\"], lambda x: x[:2]])\ndef test_on_bad_lines_callable(python_parser_only, bad_line_func):\n # GH 5686\n parser = python_parser_only\n data = \"\"\"a,b\n1,2\n2,3,4,5,6\n3,4\n\"\"\"\n bad_sio = StringIO(data)\n result = parser.read_csv(bad_sio, on_bad_lines=bad_line_func)\n expected = DataFrame({\"a\": [1, 2, 3], \"b\": [2, 3, 4]})\n tm.assert_frame_equal(result, expected)\n\n\ndef test_on_bad_lines_callable_write_to_external_list(python_parser_only):\n # GH 5686\n parser = python_parser_only\n data = \"\"\"a,b\n1,2\n2,3,4,5,6\n3,4\n\"\"\"\n bad_sio = StringIO(data)\n lst = []\n\n def bad_line_func(bad_line: list[str]) -> list[str]:\n lst.append(bad_line)\n return [\"2\", \"3\"]\n\n result = parser.read_csv(bad_sio, on_bad_lines=bad_line_func)\n expected = DataFrame({\"a\": [1, 2, 3], \"b\": [2, 3, 4]})\n tm.assert_frame_equal(result, expected)\n assert lst == [[\"2\", \"3\", \"4\", \"5\", \"6\"]]\n\n\[email protected](\"bad_line_func\", [lambda x: [\"foo\", \"bar\"], lambda x: x[:2]])\[email protected](\"sep\", [\",\", \"111\"])\ndef test_on_bad_lines_callable_iterator_true(python_parser_only, bad_line_func, sep):\n # GH 5686\n # iterator=True has a separate code path than iterator=False\n parser = python_parser_only\n data = f\"\"\"\n0{sep}1\nhi{sep}there\nfoo{sep}bar{sep}baz\ngood{sep}bye\n\"\"\"\n bad_sio = StringIO(data)\n result_iter = parser.read_csv(\n bad_sio, on_bad_lines=bad_line_func, chunksize=1, iterator=True, sep=sep\n )\n expecteds = [\n {\"0\": \"hi\", \"1\": \"there\"},\n {\"0\": \"foo\", \"1\": \"bar\"},\n {\"0\": \"good\", \"1\": \"bye\"},\n ]\n for i, (result, expected) in enumerate(zip(result_iter, expecteds)):\n expected = DataFrame(expected, index=range(i, i + 1))\n tm.assert_frame_equal(result, expected)\n\n\ndef test_on_bad_lines_callable_dont_swallow_errors(python_parser_only):\n # GH 5686\n parser = python_parser_only\n data = \"\"\"a,b\n1,2\n2,3,4,5,6\n3,4\n\"\"\"\n bad_sio = StringIO(data)\n msg = \"This function is buggy.\"\n\n def bad_line_func(bad_line):\n raise ValueError(msg)\n\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(bad_sio, on_bad_lines=bad_line_func)\n\n\ndef test_on_bad_lines_callable_not_expected_length(python_parser_only):\n # GH 5686\n parser = python_parser_only\n data = \"\"\"a,b\n1,2\n2,3,4,5,6\n3,4\n\"\"\"\n bad_sio = StringIO(data)\n\n with tm.assert_produces_warning(ParserWarning, match=\"Length of header or names\"):\n result = parser.read_csv(bad_sio, on_bad_lines=lambda x: x)\n expected = DataFrame({\"a\": [1, 2, 3], \"b\": [2, 3, 4]})\n tm.assert_frame_equal(result, expected)\n\n\ndef test_on_bad_lines_callable_returns_none(python_parser_only):\n # GH 5686\n parser = python_parser_only\n data = \"\"\"a,b\n1,2\n2,3,4,5,6\n3,4\n\"\"\"\n bad_sio = StringIO(data)\n\n result = parser.read_csv(bad_sio, on_bad_lines=lambda x: None)\n expected = DataFrame({\"a\": [1, 3], \"b\": [2, 4]})\n tm.assert_frame_equal(result, expected)\n\n\ndef test_on_bad_lines_index_col_inferred(python_parser_only):\n # GH 5686\n parser = python_parser_only\n data = \"\"\"a,b\n1,2,3\n4,5,6\n\"\"\"\n bad_sio = StringIO(data)\n\n result = parser.read_csv(bad_sio, on_bad_lines=lambda x: [\"99\", \"99\"])\n expected = DataFrame({\"a\": [2, 5], \"b\": [3, 6]}, index=[1, 4])\n tm.assert_frame_equal(result, expected)\n", "\"\"\"\nThis file contains a minimal set of tests for compliance with the extension\narray interface test suite, and should contain no other tests.\nThe test suite for the full functionality of the array is located in\n`pandas/tests/arrays/`.\n\nThe tests in this file are inherited from the BaseExtensionTests, and only\nminimal tweaks should be applied to get the tests passing (by overwriting a\nparent method).\n\nAdditional tests should either be added to one of the BaseExtensionTests\nclasses (if they are relevant for the extension interface for all dtypes), or\nbe added to the array-specific tests in `pandas/tests/arrays/`.\n\n\"\"\"\nimport numpy as np\nimport pytest\n\nfrom pandas.core.dtypes.common import is_extension_array_dtype\n\nimport pandas as pd\nimport pandas._testing as tm\nfrom pandas.api.types import is_float_dtype\nfrom pandas.core.arrays.floating import (\n Float32Dtype,\n Float64Dtype,\n)\nfrom pandas.tests.extension import base\n\n\ndef make_data():\n return (\n list(np.arange(0.1, 0.9, 0.1))\n + [pd.NA]\n + list(np.arange(1, 9.8, 0.1))\n + [pd.NA]\n + [9.9, 10.0]\n )\n\n\[email protected](params=[Float32Dtype, Float64Dtype])\ndef dtype(request):\n return request.param()\n\n\[email protected]\ndef data(dtype):\n return pd.array(make_data(), dtype=dtype)\n\n\[email protected]\ndef data_for_twos(dtype):\n return pd.array(np.ones(100) * 2, dtype=dtype)\n\n\[email protected]\ndef data_missing(dtype):\n return pd.array([pd.NA, 0.1], dtype=dtype)\n\n\[email protected]\ndef data_for_sorting(dtype):\n return pd.array([0.1, 0.2, 0.0], dtype=dtype)\n\n\[email protected]\ndef data_missing_for_sorting(dtype):\n return pd.array([0.1, pd.NA, 0.0], dtype=dtype)\n\n\[email protected]\ndef na_cmp():\n # we are pd.NA\n return lambda x, y: x is pd.NA and y is pd.NA\n\n\[email protected]\ndef na_value():\n return pd.NA\n\n\[email protected]\ndef data_for_grouping(dtype):\n b = 0.1\n a = 0.0\n c = 0.2\n na = pd.NA\n return pd.array([b, b, na, na, a, a, b, c], dtype=dtype)\n\n\nclass TestDtype(base.BaseDtypeTests):\n pass\n\n\nclass TestArithmeticOps(base.BaseArithmeticOpsTests):\n def check_opname(self, s, op_name, other, exc=None):\n # overwriting to indicate ops don't raise an error\n super().check_opname(s, op_name, other, exc=None)\n\n def _check_op(self, s, op, other, op_name, exc=NotImplementedError):\n if exc is None:\n sdtype = tm.get_dtype(s)\n if (\n hasattr(other, \"dtype\")\n and not is_extension_array_dtype(other.dtype)\n and is_float_dtype(other.dtype)\n ):\n # other is np.float64 and would therefore always result in\n # upcasting, so keeping other as same numpy_dtype\n other = other.astype(sdtype.numpy_dtype)\n\n result = op(s, other)\n expected = self._combine(s, other, op)\n\n # combine method result in 'biggest' (float64) dtype\n expected = expected.astype(sdtype)\n\n self.assert_equal(result, expected)\n else:\n with pytest.raises(exc):\n op(s, other)\n\n def _check_divmod_op(self, s, op, other, exc=None):\n super()._check_divmod_op(s, op, other, None)\n\n\nclass TestComparisonOps(base.BaseComparisonOpsTests):\n # TODO: share with IntegerArray?\n def _check_op(self, s, op, other, op_name, exc=NotImplementedError):\n if exc is None:\n result = op(s, other)\n # Override to do the astype to boolean\n expected = s.combine(other, op).astype(\"boolean\")\n self.assert_series_equal(result, expected)\n else:\n with pytest.raises(exc):\n op(s, other)\n\n def check_opname(self, s, op_name, other, exc=None):\n super().check_opname(s, op_name, other, exc=None)\n\n def _compare_other(self, s, data, op, other):\n op_name = f\"__{op.__name__}__\"\n self.check_opname(s, op_name, other)\n\n\nclass TestInterface(base.BaseInterfaceTests):\n pass\n\n\nclass TestConstructors(base.BaseConstructorsTests):\n pass\n\n\nclass TestReshaping(base.BaseReshapingTests):\n pass\n\n\nclass TestGetitem(base.BaseGetitemTests):\n pass\n\n\nclass TestSetitem(base.BaseSetitemTests):\n pass\n\n\nclass TestIndex(base.BaseIndexTests):\n pass\n\n\nclass TestMissing(base.BaseMissingTests):\n pass\n\n\nclass TestMethods(base.BaseMethodsTests):\n pass\n\n\nclass TestCasting(base.BaseCastingTests):\n pass\n\n\nclass TestGroupby(base.BaseGroupbyTests):\n pass\n\n\nclass TestNumericReduce(base.BaseNumericReduceTests):\n def check_reduce(self, s, op_name, skipna):\n # overwrite to ensure pd.NA is tested instead of np.nan\n # https://github.com/pandas-dev/pandas/issues/30958\n result = getattr(s, op_name)(skipna=skipna)\n if not skipna and s.isna().any():\n expected = pd.NA\n else:\n expected = getattr(s.dropna().astype(s.dtype.numpy_dtype), op_name)(\n skipna=skipna\n )\n tm.assert_almost_equal(result, expected)\n\n\[email protected](reason=\"Tested in tests/reductions/test_reductions.py\")\nclass TestBooleanReduce(base.BaseBooleanReduceTests):\n pass\n\n\nclass TestPrinting(base.BasePrintingTests):\n pass\n\n\nclass TestParsing(base.BaseParsingTests):\n pass\n\n\nclass Test2DCompat(base.Dim2CompatTests):\n pass\n", "from __future__ import annotations\n\nimport random\nfrom typing import (\n TYPE_CHECKING,\n Hashable,\n)\n\nimport matplotlib.lines as mlines\nimport matplotlib.patches as patches\nimport numpy as np\n\nfrom pandas.core.dtypes.missing import notna\n\nfrom pandas.io.formats.printing import pprint_thing\nfrom pandas.plotting._matplotlib.style import get_standard_colors\nfrom pandas.plotting._matplotlib.tools import (\n create_subplots,\n do_adjust_figure,\n maybe_adjust_figure,\n set_ticks_props,\n)\n\nif TYPE_CHECKING:\n from matplotlib.axes import Axes\n from matplotlib.figure import Figure\n\n from pandas import (\n DataFrame,\n Index,\n Series,\n )\n\n\ndef scatter_matrix(\n frame: DataFrame,\n alpha=0.5,\n figsize=None,\n ax=None,\n grid=False,\n diagonal=\"hist\",\n marker=\".\",\n density_kwds=None,\n hist_kwds=None,\n range_padding=0.05,\n **kwds,\n):\n df = frame._get_numeric_data()\n n = df.columns.size\n naxes = n * n\n fig, axes = create_subplots(naxes=naxes, figsize=figsize, ax=ax, squeeze=False)\n\n # no gaps between subplots\n maybe_adjust_figure(fig, wspace=0, hspace=0)\n\n mask = notna(df)\n\n marker = _get_marker_compat(marker)\n\n hist_kwds = hist_kwds or {}\n density_kwds = density_kwds or {}\n\n # GH 14855\n kwds.setdefault(\"edgecolors\", \"none\")\n\n boundaries_list = []\n for a in df.columns:\n values = df[a].values[mask[a].values]\n rmin_, rmax_ = np.min(values), np.max(values)\n rdelta_ext = (rmax_ - rmin_) * range_padding / 2\n boundaries_list.append((rmin_ - rdelta_ext, rmax_ + rdelta_ext))\n\n for i, a in enumerate(df.columns):\n for j, b in enumerate(df.columns):\n ax = axes[i, j]\n\n if i == j:\n values = df[a].values[mask[a].values]\n\n # Deal with the diagonal by drawing a histogram there.\n if diagonal == \"hist\":\n ax.hist(values, **hist_kwds)\n\n elif diagonal in (\"kde\", \"density\"):\n from scipy.stats import gaussian_kde\n\n y = values\n gkde = gaussian_kde(y)\n ind = np.linspace(y.min(), y.max(), 1000)\n ax.plot(ind, gkde.evaluate(ind), **density_kwds)\n\n ax.set_xlim(boundaries_list[i])\n\n else:\n common = (mask[a] & mask[b]).values\n\n ax.scatter(\n df[b][common], df[a][common], marker=marker, alpha=alpha, **kwds\n )\n\n ax.set_xlim(boundaries_list[j])\n ax.set_ylim(boundaries_list[i])\n\n ax.set_xlabel(b)\n ax.set_ylabel(a)\n\n if j != 0:\n ax.yaxis.set_visible(False)\n if i != n - 1:\n ax.xaxis.set_visible(False)\n\n if len(df.columns) > 1:\n lim1 = boundaries_list[0]\n locs = axes[0][1].yaxis.get_majorticklocs()\n locs = locs[(lim1[0] <= locs) & (locs <= lim1[1])]\n adj = (locs - lim1[0]) / (lim1[1] - lim1[0])\n\n lim0 = axes[0][0].get_ylim()\n adj = adj * (lim0[1] - lim0[0]) + lim0[0]\n axes[0][0].yaxis.set_ticks(adj)\n\n if np.all(locs == locs.astype(int)):\n # if all ticks are int\n locs = locs.astype(int)\n axes[0][0].yaxis.set_ticklabels(locs)\n\n set_ticks_props(axes, xlabelsize=8, xrot=90, ylabelsize=8, yrot=0)\n\n return axes\n\n\ndef _get_marker_compat(marker):\n if marker not in mlines.lineMarkers:\n return \"o\"\n return marker\n\n\ndef radviz(\n frame: DataFrame,\n class_column,\n ax: Axes | None = None,\n color=None,\n colormap=None,\n **kwds,\n) -> Axes:\n import matplotlib.pyplot as plt\n\n def normalize(series):\n a = min(series)\n b = max(series)\n return (series - a) / (b - a)\n\n n = len(frame)\n classes = frame[class_column].drop_duplicates()\n class_col = frame[class_column]\n df = frame.drop(class_column, axis=1).apply(normalize)\n\n if ax is None:\n ax = plt.gca()\n ax.set_xlim(-1, 1)\n ax.set_ylim(-1, 1)\n\n to_plot: dict[Hashable, list[list]] = {}\n colors = get_standard_colors(\n num_colors=len(classes), colormap=colormap, color_type=\"random\", color=color\n )\n\n for kls in classes:\n to_plot[kls] = [[], []]\n\n m = len(frame.columns) - 1\n s = np.array(\n [(np.cos(t), np.sin(t)) for t in [2 * np.pi * (i / m) for i in range(m)]]\n )\n\n for i in range(n):\n row = df.iloc[i].values\n row_ = np.repeat(np.expand_dims(row, axis=1), 2, axis=1)\n y = (s * row_).sum(axis=0) / row.sum()\n kls = class_col.iat[i]\n to_plot[kls][0].append(y[0])\n to_plot[kls][1].append(y[1])\n\n for i, kls in enumerate(classes):\n ax.scatter(\n to_plot[kls][0],\n to_plot[kls][1],\n color=colors[i],\n label=pprint_thing(kls),\n **kwds,\n )\n ax.legend()\n\n ax.add_patch(patches.Circle((0.0, 0.0), radius=1.0, facecolor=\"none\"))\n\n for xy, name in zip(s, df.columns):\n\n ax.add_patch(patches.Circle(xy, radius=0.025, facecolor=\"gray\"))\n\n if xy[0] < 0.0 and xy[1] < 0.0:\n ax.text(\n xy[0] - 0.025, xy[1] - 0.025, name, ha=\"right\", va=\"top\", size=\"small\"\n )\n elif xy[0] < 0.0 and xy[1] >= 0.0:\n ax.text(\n xy[0] - 0.025,\n xy[1] + 0.025,\n name,\n ha=\"right\",\n va=\"bottom\",\n size=\"small\",\n )\n elif xy[0] >= 0.0 and xy[1] < 0.0:\n ax.text(\n xy[0] + 0.025, xy[1] - 0.025, name, ha=\"left\", va=\"top\", size=\"small\"\n )\n elif xy[0] >= 0.0 and xy[1] >= 0.0:\n ax.text(\n xy[0] + 0.025, xy[1] + 0.025, name, ha=\"left\", va=\"bottom\", size=\"small\"\n )\n\n ax.axis(\"equal\")\n return ax\n\n\ndef andrews_curves(\n frame: DataFrame,\n class_column,\n ax: Axes | None = None,\n samples: int = 200,\n color=None,\n colormap=None,\n **kwds,\n) -> Axes:\n import matplotlib.pyplot as plt\n\n def function(amplitudes):\n def f(t):\n x1 = amplitudes[0]\n result = x1 / np.sqrt(2.0)\n\n # Take the rest of the coefficients and resize them\n # appropriately. Take a copy of amplitudes as otherwise numpy\n # deletes the element from amplitudes itself.\n coeffs = np.delete(np.copy(amplitudes), 0)\n coeffs = np.resize(coeffs, (int((coeffs.size + 1) / 2), 2))\n\n # Generate the harmonics and arguments for the sin and cos\n # functions.\n harmonics = np.arange(0, coeffs.shape[0]) + 1\n trig_args = np.outer(harmonics, t)\n\n result += np.sum(\n coeffs[:, 0, np.newaxis] * np.sin(trig_args)\n + coeffs[:, 1, np.newaxis] * np.cos(trig_args),\n axis=0,\n )\n return result\n\n return f\n\n n = len(frame)\n class_col = frame[class_column]\n classes = frame[class_column].drop_duplicates()\n df = frame.drop(class_column, axis=1)\n t = np.linspace(-np.pi, np.pi, samples)\n used_legends: set[str] = set()\n\n color_values = get_standard_colors(\n num_colors=len(classes), colormap=colormap, color_type=\"random\", color=color\n )\n colors = dict(zip(classes, color_values))\n if ax is None:\n ax = plt.gca()\n ax.set_xlim(-np.pi, np.pi)\n for i in range(n):\n row = df.iloc[i].values\n f = function(row)\n y = f(t)\n kls = class_col.iat[i]\n label = pprint_thing(kls)\n if label not in used_legends:\n used_legends.add(label)\n ax.plot(t, y, color=colors[kls], label=label, **kwds)\n else:\n ax.plot(t, y, color=colors[kls], **kwds)\n\n ax.legend(loc=\"upper right\")\n ax.grid()\n return ax\n\n\ndef bootstrap_plot(\n series: Series,\n fig: Figure | None = None,\n size: int = 50,\n samples: int = 500,\n **kwds,\n) -> Figure:\n\n import matplotlib.pyplot as plt\n\n # TODO: is the failure mentioned below still relevant?\n # random.sample(ndarray, int) fails on python 3.3, sigh\n data = list(series.values)\n samplings = [random.sample(data, size) for _ in range(samples)]\n\n means = np.array([np.mean(sampling) for sampling in samplings])\n medians = np.array([np.median(sampling) for sampling in samplings])\n midranges = np.array(\n [(min(sampling) + max(sampling)) * 0.5 for sampling in samplings]\n )\n if fig is None:\n fig = plt.figure()\n x = list(range(samples))\n axes = []\n ax1 = fig.add_subplot(2, 3, 1)\n ax1.set_xlabel(\"Sample\")\n axes.append(ax1)\n ax1.plot(x, means, **kwds)\n ax2 = fig.add_subplot(2, 3, 2)\n ax2.set_xlabel(\"Sample\")\n axes.append(ax2)\n ax2.plot(x, medians, **kwds)\n ax3 = fig.add_subplot(2, 3, 3)\n ax3.set_xlabel(\"Sample\")\n axes.append(ax3)\n ax3.plot(x, midranges, **kwds)\n ax4 = fig.add_subplot(2, 3, 4)\n ax4.set_xlabel(\"Mean\")\n axes.append(ax4)\n ax4.hist(means, **kwds)\n ax5 = fig.add_subplot(2, 3, 5)\n ax5.set_xlabel(\"Median\")\n axes.append(ax5)\n ax5.hist(medians, **kwds)\n ax6 = fig.add_subplot(2, 3, 6)\n ax6.set_xlabel(\"Midrange\")\n axes.append(ax6)\n ax6.hist(midranges, **kwds)\n for axis in axes:\n plt.setp(axis.get_xticklabels(), fontsize=8)\n plt.setp(axis.get_yticklabels(), fontsize=8)\n if do_adjust_figure(fig):\n plt.tight_layout()\n return fig\n\n\ndef parallel_coordinates(\n frame: DataFrame,\n class_column,\n cols=None,\n ax: Axes | None = None,\n color=None,\n use_columns=False,\n xticks=None,\n colormap=None,\n axvlines: bool = True,\n axvlines_kwds=None,\n sort_labels: bool = False,\n **kwds,\n) -> Axes:\n import matplotlib.pyplot as plt\n\n if axvlines_kwds is None:\n axvlines_kwds = {\"linewidth\": 1, \"color\": \"black\"}\n\n n = len(frame)\n classes = frame[class_column].drop_duplicates()\n class_col = frame[class_column]\n\n if cols is None:\n df = frame.drop(class_column, axis=1)\n else:\n df = frame[cols]\n\n used_legends: set[str] = set()\n\n ncols = len(df.columns)\n\n # determine values to use for xticks\n x: list[int] | Index\n if use_columns is True:\n if not np.all(np.isreal(list(df.columns))):\n raise ValueError(\"Columns must be numeric to be used as xticks\")\n x = df.columns\n elif xticks is not None:\n if not np.all(np.isreal(xticks)):\n raise ValueError(\"xticks specified must be numeric\")\n elif len(xticks) != ncols:\n raise ValueError(\"Length of xticks must match number of columns\")\n x = xticks\n else:\n x = list(range(ncols))\n\n if ax is None:\n ax = plt.gca()\n\n color_values = get_standard_colors(\n num_colors=len(classes), colormap=colormap, color_type=\"random\", color=color\n )\n\n if sort_labels:\n classes = sorted(classes)\n color_values = sorted(color_values)\n colors = dict(zip(classes, color_values))\n\n for i in range(n):\n y = df.iloc[i].values\n kls = class_col.iat[i]\n label = pprint_thing(kls)\n if label not in used_legends:\n used_legends.add(label)\n ax.plot(x, y, color=colors[kls], label=label, **kwds)\n else:\n ax.plot(x, y, color=colors[kls], **kwds)\n\n if axvlines:\n for i in x:\n ax.axvline(i, **axvlines_kwds)\n\n ax.set_xticks(x)\n ax.set_xticklabels(df.columns)\n ax.set_xlim(x[0], x[-1])\n ax.legend(loc=\"upper right\")\n ax.grid()\n return ax\n\n\ndef lag_plot(series: Series, lag: int = 1, ax: Axes | None = None, **kwds) -> Axes:\n # workaround because `c='b'` is hardcoded in matplotlib's scatter method\n import matplotlib.pyplot as plt\n\n kwds.setdefault(\"c\", plt.rcParams[\"patch.facecolor\"])\n\n data = series.values\n y1 = data[:-lag]\n y2 = data[lag:]\n if ax is None:\n ax = plt.gca()\n ax.set_xlabel(\"y(t)\")\n ax.set_ylabel(f\"y(t + {lag})\")\n ax.scatter(y1, y2, **kwds)\n return ax\n\n\ndef autocorrelation_plot(series: Series, ax: Axes | None = None, **kwds) -> Axes:\n import matplotlib.pyplot as plt\n\n n = len(series)\n data = np.asarray(series)\n if ax is None:\n ax = plt.gca()\n ax.set_xlim(1, n)\n ax.set_ylim(-1.0, 1.0)\n mean = np.mean(data)\n c0 = np.sum((data - mean) ** 2) / n\n\n def r(h):\n return ((data[: n - h] - mean) * (data[h:] - mean)).sum() / n / c0\n\n x = np.arange(n) + 1\n y = [r(loc) for loc in x]\n z95 = 1.959963984540054\n z99 = 2.5758293035489004\n ax.axhline(y=z99 / np.sqrt(n), linestyle=\"--\", color=\"grey\")\n ax.axhline(y=z95 / np.sqrt(n), color=\"grey\")\n ax.axhline(y=0.0, color=\"black\")\n ax.axhline(y=-z95 / np.sqrt(n), color=\"grey\")\n ax.axhline(y=-z99 / np.sqrt(n), linestyle=\"--\", color=\"grey\")\n ax.set_xlabel(\"Lag\")\n ax.set_ylabel(\"Autocorrelation\")\n ax.plot(x, y, **kwds)\n if \"label\" in kwds:\n ax.legend()\n ax.grid()\n return ax\n", "\"\"\" Test cases for DataFrame.plot \"\"\"\nimport re\nimport warnings\n\nimport numpy as np\nimport pytest\n\nimport pandas.util._test_decorators as td\n\nimport pandas as pd\nfrom pandas import DataFrame\nimport pandas._testing as tm\nfrom pandas.tests.plotting.common import (\n TestPlotBase,\n _check_plot_works,\n)\n\n\[email protected]_if_no_mpl\nclass TestDataFrameColor(TestPlotBase):\n def test_mpl2_color_cycle_str(self):\n # GH 15516\n df = DataFrame(np.random.randn(10, 3), columns=[\"a\", \"b\", \"c\"])\n colors = [\"C0\", \"C1\", \"C2\", \"C3\", \"C4\", \"C5\", \"C6\", \"C7\", \"C8\", \"C9\"]\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"always\", \"MatplotlibDeprecationWarning\")\n\n for color in colors:\n _check_plot_works(df.plot, color=color)\n\n # if warning is raised, check that it is the exact problematic one\n # GH 36972\n if w:\n match = \"Support for uppercase single-letter colors is deprecated\"\n warning_message = str(w[0].message)\n msg = \"MatplotlibDeprecationWarning related to CN colors was raised\"\n assert match not in warning_message, msg\n\n def test_color_single_series_list(self):\n # GH 3486\n df = DataFrame({\"A\": [1, 2, 3]})\n _check_plot_works(df.plot, color=[\"red\"])\n\n @pytest.mark.parametrize(\"color\", [(1, 0, 0), (1, 0, 0, 0.5)])\n def test_rgb_tuple_color(self, color):\n # GH 16695\n df = DataFrame({\"x\": [1, 2], \"y\": [3, 4]})\n _check_plot_works(df.plot, x=\"x\", y=\"y\", color=color)\n\n def test_color_empty_string(self):\n df = DataFrame(np.random.randn(10, 2))\n with pytest.raises(ValueError, match=\"Invalid color argument:\"):\n df.plot(color=\"\")\n\n def test_color_and_style_arguments(self):\n df = DataFrame({\"x\": [1, 2], \"y\": [3, 4]})\n # passing both 'color' and 'style' arguments should be allowed\n # if there is no color symbol in the style strings:\n ax = df.plot(color=[\"red\", \"black\"], style=[\"-\", \"--\"])\n # check that the linestyles are correctly set:\n linestyle = [line.get_linestyle() for line in ax.lines]\n assert linestyle == [\"-\", \"--\"]\n # check that the colors are correctly set:\n color = [line.get_color() for line in ax.lines]\n assert color == [\"red\", \"black\"]\n # passing both 'color' and 'style' arguments should not be allowed\n # if there is a color symbol in the style strings:\n msg = (\n \"Cannot pass 'style' string with a color symbol and 'color' keyword \"\n \"argument. Please use one or the other or pass 'style' without a color \"\n \"symbol\"\n )\n with pytest.raises(ValueError, match=msg):\n df.plot(color=[\"red\", \"black\"], style=[\"k-\", \"r--\"])\n\n @pytest.mark.parametrize(\n \"color, expected\",\n [\n (\"green\", [\"green\"] * 4),\n ([\"yellow\", \"red\", \"green\", \"blue\"], [\"yellow\", \"red\", \"green\", \"blue\"]),\n ],\n )\n def test_color_and_marker(self, color, expected):\n # GH 21003\n df = DataFrame(np.random.random((7, 4)))\n ax = df.plot(color=color, style=\"d--\")\n # check colors\n result = [i.get_color() for i in ax.lines]\n assert result == expected\n # check markers and linestyles\n assert all(i.get_linestyle() == \"--\" for i in ax.lines)\n assert all(i.get_marker() == \"d\" for i in ax.lines)\n\n def test_bar_colors(self):\n import matplotlib.pyplot as plt\n\n default_colors = self._unpack_cycler(plt.rcParams)\n\n df = DataFrame(np.random.randn(5, 5))\n ax = df.plot.bar()\n self._check_colors(ax.patches[::5], facecolors=default_colors[:5])\n tm.close()\n\n custom_colors = \"rgcby\"\n ax = df.plot.bar(color=custom_colors)\n self._check_colors(ax.patches[::5], facecolors=custom_colors)\n tm.close()\n\n from matplotlib import cm\n\n # Test str -> colormap functionality\n ax = df.plot.bar(colormap=\"jet\")\n rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, 5)]\n self._check_colors(ax.patches[::5], facecolors=rgba_colors)\n tm.close()\n\n # Test colormap functionality\n ax = df.plot.bar(colormap=cm.jet)\n rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, 5)]\n self._check_colors(ax.patches[::5], facecolors=rgba_colors)\n tm.close()\n\n ax = df.loc[:, [0]].plot.bar(color=\"DodgerBlue\")\n self._check_colors([ax.patches[0]], facecolors=[\"DodgerBlue\"])\n tm.close()\n\n ax = df.plot(kind=\"bar\", color=\"green\")\n self._check_colors(ax.patches[::5], facecolors=[\"green\"] * 5)\n tm.close()\n\n def test_bar_user_colors(self):\n df = DataFrame(\n {\"A\": range(4), \"B\": range(1, 5), \"color\": [\"red\", \"blue\", \"blue\", \"red\"]}\n )\n # This should *only* work when `y` is specified, else\n # we use one color per column\n ax = df.plot.bar(y=\"A\", color=df[\"color\"])\n result = [p.get_facecolor() for p in ax.patches]\n expected = [\n (1.0, 0.0, 0.0, 1.0),\n (0.0, 0.0, 1.0, 1.0),\n (0.0, 0.0, 1.0, 1.0),\n (1.0, 0.0, 0.0, 1.0),\n ]\n assert result == expected\n\n def test_if_scatterplot_colorbar_affects_xaxis_visibility(self):\n # addressing issue #10611, to ensure colobar does not\n # interfere with x-axis label and ticklabels with\n # ipython inline backend.\n random_array = np.random.random((1000, 3))\n df = DataFrame(random_array, columns=[\"A label\", \"B label\", \"C label\"])\n\n ax1 = df.plot.scatter(x=\"A label\", y=\"B label\")\n ax2 = df.plot.scatter(x=\"A label\", y=\"B label\", c=\"C label\")\n\n vis1 = [vis.get_visible() for vis in ax1.xaxis.get_minorticklabels()]\n vis2 = [vis.get_visible() for vis in ax2.xaxis.get_minorticklabels()]\n assert vis1 == vis2\n\n vis1 = [vis.get_visible() for vis in ax1.xaxis.get_majorticklabels()]\n vis2 = [vis.get_visible() for vis in ax2.xaxis.get_majorticklabels()]\n assert vis1 == vis2\n\n assert (\n ax1.xaxis.get_label().get_visible() == ax2.xaxis.get_label().get_visible()\n )\n\n def test_if_hexbin_xaxis_label_is_visible(self):\n # addressing issue #10678, to ensure colobar does not\n # interfere with x-axis label and ticklabels with\n # ipython inline backend.\n random_array = np.random.random((1000, 3))\n df = DataFrame(random_array, columns=[\"A label\", \"B label\", \"C label\"])\n\n ax = df.plot.hexbin(\"A label\", \"B label\", gridsize=12)\n assert all(vis.get_visible() for vis in ax.xaxis.get_minorticklabels())\n assert all(vis.get_visible() for vis in ax.xaxis.get_majorticklabels())\n assert ax.xaxis.get_label().get_visible()\n\n def test_if_scatterplot_colorbars_are_next_to_parent_axes(self):\n import matplotlib.pyplot as plt\n\n random_array = np.random.random((1000, 3))\n df = DataFrame(random_array, columns=[\"A label\", \"B label\", \"C label\"])\n\n fig, axes = plt.subplots(1, 2)\n df.plot.scatter(\"A label\", \"B label\", c=\"C label\", ax=axes[0])\n df.plot.scatter(\"A label\", \"B label\", c=\"C label\", ax=axes[1])\n plt.tight_layout()\n\n points = np.array([ax.get_position().get_points() for ax in fig.axes])\n axes_x_coords = points[:, :, 0]\n parent_distance = axes_x_coords[1, :] - axes_x_coords[0, :]\n colorbar_distance = axes_x_coords[3, :] - axes_x_coords[2, :]\n assert np.isclose(parent_distance, colorbar_distance, atol=1e-7).all()\n\n @pytest.mark.parametrize(\"cmap\", [None, \"Greys\"])\n @pytest.mark.parametrize(\"kw\", [\"c\", \"color\"])\n def test_scatter_with_c_column_name_with_colors(self, cmap, kw):\n # https://github.com/pandas-dev/pandas/issues/34316\n df = DataFrame(\n [[5.1, 3.5], [4.9, 3.0], [7.0, 3.2], [6.4, 3.2], [5.9, 3.0]],\n columns=[\"length\", \"width\"],\n )\n df[\"species\"] = [\"r\", \"r\", \"g\", \"g\", \"b\"]\n ax = df.plot.scatter(x=0, y=1, cmap=cmap, **{kw: \"species\"})\n assert ax.collections[0].colorbar is None\n\n def test_scatter_colors(self):\n df = DataFrame({\"a\": [1, 2, 3], \"b\": [1, 2, 3], \"c\": [1, 2, 3]})\n with pytest.raises(TypeError, match=\"Specify exactly one of `c` and `color`\"):\n df.plot.scatter(x=\"a\", y=\"b\", c=\"c\", color=\"green\")\n\n default_colors = self._unpack_cycler(self.plt.rcParams)\n\n ax = df.plot.scatter(x=\"a\", y=\"b\", c=\"c\")\n tm.assert_numpy_array_equal(\n ax.collections[0].get_facecolor()[0],\n np.array(self.colorconverter.to_rgba(default_colors[0])),\n )\n\n ax = df.plot.scatter(x=\"a\", y=\"b\", color=\"white\")\n tm.assert_numpy_array_equal(\n ax.collections[0].get_facecolor()[0],\n np.array([1, 1, 1, 1], dtype=np.float64),\n )\n\n def test_scatter_colorbar_different_cmap(self):\n # GH 33389\n import matplotlib.pyplot as plt\n\n df = DataFrame({\"x\": [1, 2, 3], \"y\": [1, 3, 2], \"c\": [1, 2, 3]})\n df[\"x2\"] = df[\"x\"] + 1\n\n fig, ax = plt.subplots()\n df.plot(\"x\", \"y\", c=\"c\", kind=\"scatter\", cmap=\"cividis\", ax=ax)\n df.plot(\"x2\", \"y\", c=\"c\", kind=\"scatter\", cmap=\"magma\", ax=ax)\n\n assert ax.collections[0].cmap.name == \"cividis\"\n assert ax.collections[1].cmap.name == \"magma\"\n\n def test_line_colors(self):\n from matplotlib import cm\n\n custom_colors = \"rgcby\"\n df = DataFrame(np.random.randn(5, 5))\n\n ax = df.plot(color=custom_colors)\n self._check_colors(ax.get_lines(), linecolors=custom_colors)\n\n tm.close()\n\n ax2 = df.plot(color=custom_colors)\n lines2 = ax2.get_lines()\n\n for l1, l2 in zip(ax.get_lines(), lines2):\n assert l1.get_color() == l2.get_color()\n\n tm.close()\n\n ax = df.plot(colormap=\"jet\")\n rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))]\n self._check_colors(ax.get_lines(), linecolors=rgba_colors)\n tm.close()\n\n ax = df.plot(colormap=cm.jet)\n rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))]\n self._check_colors(ax.get_lines(), linecolors=rgba_colors)\n tm.close()\n\n # make color a list if plotting one column frame\n # handles cases like df.plot(color='DodgerBlue')\n ax = df.loc[:, [0]].plot(color=\"DodgerBlue\")\n self._check_colors(ax.lines, linecolors=[\"DodgerBlue\"])\n\n ax = df.plot(color=\"red\")\n self._check_colors(ax.get_lines(), linecolors=[\"red\"] * 5)\n tm.close()\n\n # GH 10299\n custom_colors = [\"#FF0000\", \"#0000FF\", \"#FFFF00\", \"#000000\", \"#FFFFFF\"]\n ax = df.plot(color=custom_colors)\n self._check_colors(ax.get_lines(), linecolors=custom_colors)\n tm.close()\n\n def test_dont_modify_colors(self):\n colors = [\"r\", \"g\", \"b\"]\n DataFrame(np.random.rand(10, 2)).plot(color=colors)\n assert len(colors) == 3\n\n def test_line_colors_and_styles_subplots(self):\n # GH 9894\n from matplotlib import cm\n\n default_colors = self._unpack_cycler(self.plt.rcParams)\n\n df = DataFrame(np.random.randn(5, 5))\n\n axes = df.plot(subplots=True)\n for ax, c in zip(axes, list(default_colors)):\n self._check_colors(ax.get_lines(), linecolors=[c])\n tm.close()\n\n # single color char\n axes = df.plot(subplots=True, color=\"k\")\n for ax in axes:\n self._check_colors(ax.get_lines(), linecolors=[\"k\"])\n tm.close()\n\n # single color str\n axes = df.plot(subplots=True, color=\"green\")\n for ax in axes:\n self._check_colors(ax.get_lines(), linecolors=[\"green\"])\n tm.close()\n\n custom_colors = \"rgcby\"\n axes = df.plot(color=custom_colors, subplots=True)\n for ax, c in zip(axes, list(custom_colors)):\n self._check_colors(ax.get_lines(), linecolors=[c])\n tm.close()\n\n axes = df.plot(color=list(custom_colors), subplots=True)\n for ax, c in zip(axes, list(custom_colors)):\n self._check_colors(ax.get_lines(), linecolors=[c])\n tm.close()\n\n # GH 10299\n custom_colors = [\"#FF0000\", \"#0000FF\", \"#FFFF00\", \"#000000\", \"#FFFFFF\"]\n axes = df.plot(color=custom_colors, subplots=True)\n for ax, c in zip(axes, list(custom_colors)):\n self._check_colors(ax.get_lines(), linecolors=[c])\n tm.close()\n\n rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))]\n for cmap in [\"jet\", cm.jet]:\n axes = df.plot(colormap=cmap, subplots=True)\n for ax, c in zip(axes, rgba_colors):\n self._check_colors(ax.get_lines(), linecolors=[c])\n tm.close()\n\n # make color a list if plotting one column frame\n # handles cases like df.plot(color='DodgerBlue')\n axes = df.loc[:, [0]].plot(color=\"DodgerBlue\", subplots=True)\n self._check_colors(axes[0].lines, linecolors=[\"DodgerBlue\"])\n\n # single character style\n axes = df.plot(style=\"r\", subplots=True)\n for ax in axes:\n self._check_colors(ax.get_lines(), linecolors=[\"r\"])\n tm.close()\n\n # list of styles\n styles = list(\"rgcby\")\n axes = df.plot(style=styles, subplots=True)\n for ax, c in zip(axes, styles):\n self._check_colors(ax.get_lines(), linecolors=[c])\n tm.close()\n\n def test_area_colors(self):\n from matplotlib import cm\n from matplotlib.collections import PolyCollection\n\n custom_colors = \"rgcby\"\n df = DataFrame(np.random.rand(5, 5))\n\n ax = df.plot.area(color=custom_colors)\n self._check_colors(ax.get_lines(), linecolors=custom_colors)\n poly = [o for o in ax.get_children() if isinstance(o, PolyCollection)]\n self._check_colors(poly, facecolors=custom_colors)\n\n handles, labels = ax.get_legend_handles_labels()\n self._check_colors(handles, facecolors=custom_colors)\n\n for h in handles:\n assert h.get_alpha() is None\n tm.close()\n\n ax = df.plot.area(colormap=\"jet\")\n jet_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))]\n self._check_colors(ax.get_lines(), linecolors=jet_colors)\n poly = [o for o in ax.get_children() if isinstance(o, PolyCollection)]\n self._check_colors(poly, facecolors=jet_colors)\n\n handles, labels = ax.get_legend_handles_labels()\n self._check_colors(handles, facecolors=jet_colors)\n for h in handles:\n assert h.get_alpha() is None\n tm.close()\n\n # When stacked=False, alpha is set to 0.5\n ax = df.plot.area(colormap=cm.jet, stacked=False)\n self._check_colors(ax.get_lines(), linecolors=jet_colors)\n poly = [o for o in ax.get_children() if isinstance(o, PolyCollection)]\n jet_with_alpha = [(c[0], c[1], c[2], 0.5) for c in jet_colors]\n self._check_colors(poly, facecolors=jet_with_alpha)\n\n handles, labels = ax.get_legend_handles_labels()\n linecolors = jet_with_alpha\n self._check_colors(handles[: len(jet_colors)], linecolors=linecolors)\n for h in handles:\n assert h.get_alpha() == 0.5\n\n def test_hist_colors(self):\n default_colors = self._unpack_cycler(self.plt.rcParams)\n\n df = DataFrame(np.random.randn(5, 5))\n ax = df.plot.hist()\n self._check_colors(ax.patches[::10], facecolors=default_colors[:5])\n tm.close()\n\n custom_colors = \"rgcby\"\n ax = df.plot.hist(color=custom_colors)\n self._check_colors(ax.patches[::10], facecolors=custom_colors)\n tm.close()\n\n from matplotlib import cm\n\n # Test str -> colormap functionality\n ax = df.plot.hist(colormap=\"jet\")\n rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, 5)]\n self._check_colors(ax.patches[::10], facecolors=rgba_colors)\n tm.close()\n\n # Test colormap functionality\n ax = df.plot.hist(colormap=cm.jet)\n rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, 5)]\n self._check_colors(ax.patches[::10], facecolors=rgba_colors)\n tm.close()\n\n ax = df.loc[:, [0]].plot.hist(color=\"DodgerBlue\")\n self._check_colors([ax.patches[0]], facecolors=[\"DodgerBlue\"])\n\n ax = df.plot(kind=\"hist\", color=\"green\")\n self._check_colors(ax.patches[::10], facecolors=[\"green\"] * 5)\n tm.close()\n\n @td.skip_if_no_scipy\n def test_kde_colors(self):\n from matplotlib import cm\n\n custom_colors = \"rgcby\"\n df = DataFrame(np.random.rand(5, 5))\n\n ax = df.plot.kde(color=custom_colors)\n self._check_colors(ax.get_lines(), linecolors=custom_colors)\n tm.close()\n\n ax = df.plot.kde(colormap=\"jet\")\n rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))]\n self._check_colors(ax.get_lines(), linecolors=rgba_colors)\n tm.close()\n\n ax = df.plot.kde(colormap=cm.jet)\n rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))]\n self._check_colors(ax.get_lines(), linecolors=rgba_colors)\n\n @td.skip_if_no_scipy\n def test_kde_colors_and_styles_subplots(self):\n from matplotlib import cm\n\n default_colors = self._unpack_cycler(self.plt.rcParams)\n\n df = DataFrame(np.random.randn(5, 5))\n\n axes = df.plot(kind=\"kde\", subplots=True)\n for ax, c in zip(axes, list(default_colors)):\n self._check_colors(ax.get_lines(), linecolors=[c])\n tm.close()\n\n # single color char\n axes = df.plot(kind=\"kde\", color=\"k\", subplots=True)\n for ax in axes:\n self._check_colors(ax.get_lines(), linecolors=[\"k\"])\n tm.close()\n\n # single color str\n axes = df.plot(kind=\"kde\", color=\"red\", subplots=True)\n for ax in axes:\n self._check_colors(ax.get_lines(), linecolors=[\"red\"])\n tm.close()\n\n custom_colors = \"rgcby\"\n axes = df.plot(kind=\"kde\", color=custom_colors, subplots=True)\n for ax, c in zip(axes, list(custom_colors)):\n self._check_colors(ax.get_lines(), linecolors=[c])\n tm.close()\n\n rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))]\n for cmap in [\"jet\", cm.jet]:\n axes = df.plot(kind=\"kde\", colormap=cmap, subplots=True)\n for ax, c in zip(axes, rgba_colors):\n self._check_colors(ax.get_lines(), linecolors=[c])\n tm.close()\n\n # make color a list if plotting one column frame\n # handles cases like df.plot(color='DodgerBlue')\n axes = df.loc[:, [0]].plot(kind=\"kde\", color=\"DodgerBlue\", subplots=True)\n self._check_colors(axes[0].lines, linecolors=[\"DodgerBlue\"])\n\n # single character style\n axes = df.plot(kind=\"kde\", style=\"r\", subplots=True)\n for ax in axes:\n self._check_colors(ax.get_lines(), linecolors=[\"r\"])\n tm.close()\n\n # list of styles\n styles = list(\"rgcby\")\n axes = df.plot(kind=\"kde\", style=styles, subplots=True)\n for ax, c in zip(axes, styles):\n self._check_colors(ax.get_lines(), linecolors=[c])\n tm.close()\n\n def test_boxplot_colors(self):\n def _check_colors(bp, box_c, whiskers_c, medians_c, caps_c=\"k\", fliers_c=None):\n # TODO: outside this func?\n if fliers_c is None:\n fliers_c = \"k\"\n self._check_colors(bp[\"boxes\"], linecolors=[box_c] * len(bp[\"boxes\"]))\n self._check_colors(\n bp[\"whiskers\"], linecolors=[whiskers_c] * len(bp[\"whiskers\"])\n )\n self._check_colors(\n bp[\"medians\"], linecolors=[medians_c] * len(bp[\"medians\"])\n )\n self._check_colors(bp[\"fliers\"], linecolors=[fliers_c] * len(bp[\"fliers\"]))\n self._check_colors(bp[\"caps\"], linecolors=[caps_c] * len(bp[\"caps\"]))\n\n default_colors = self._unpack_cycler(self.plt.rcParams)\n\n df = DataFrame(np.random.randn(5, 5))\n bp = df.plot.box(return_type=\"dict\")\n _check_colors(\n bp,\n default_colors[0],\n default_colors[0],\n default_colors[2],\n default_colors[0],\n )\n tm.close()\n\n dict_colors = {\n \"boxes\": \"#572923\",\n \"whiskers\": \"#982042\",\n \"medians\": \"#804823\",\n \"caps\": \"#123456\",\n }\n bp = df.plot.box(color=dict_colors, sym=\"r+\", return_type=\"dict\")\n _check_colors(\n bp,\n dict_colors[\"boxes\"],\n dict_colors[\"whiskers\"],\n dict_colors[\"medians\"],\n dict_colors[\"caps\"],\n \"r\",\n )\n tm.close()\n\n # partial colors\n dict_colors = {\"whiskers\": \"c\", \"medians\": \"m\"}\n bp = df.plot.box(color=dict_colors, return_type=\"dict\")\n _check_colors(bp, default_colors[0], \"c\", \"m\", default_colors[0])\n tm.close()\n\n from matplotlib import cm\n\n # Test str -> colormap functionality\n bp = df.plot.box(colormap=\"jet\", return_type=\"dict\")\n jet_colors = [cm.jet(n) for n in np.linspace(0, 1, 3)]\n _check_colors(bp, jet_colors[0], jet_colors[0], jet_colors[2], jet_colors[0])\n tm.close()\n\n # Test colormap functionality\n bp = df.plot.box(colormap=cm.jet, return_type=\"dict\")\n _check_colors(bp, jet_colors[0], jet_colors[0], jet_colors[2], jet_colors[0])\n tm.close()\n\n # string color is applied to all artists except fliers\n bp = df.plot.box(color=\"DodgerBlue\", return_type=\"dict\")\n _check_colors(bp, \"DodgerBlue\", \"DodgerBlue\", \"DodgerBlue\", \"DodgerBlue\")\n\n # tuple is also applied to all artists except fliers\n bp = df.plot.box(color=(0, 1, 0), sym=\"#123456\", return_type=\"dict\")\n _check_colors(bp, (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), \"#123456\")\n\n msg = re.escape(\n \"color dict contains invalid key 'xxxx'. The key must be either \"\n \"['boxes', 'whiskers', 'medians', 'caps']\"\n )\n with pytest.raises(ValueError, match=msg):\n # Color contains invalid key results in ValueError\n df.plot.box(color={\"boxes\": \"red\", \"xxxx\": \"blue\"})\n\n def test_default_color_cycle(self):\n import cycler\n import matplotlib.pyplot as plt\n\n colors = list(\"rgbk\")\n plt.rcParams[\"axes.prop_cycle\"] = cycler.cycler(\"color\", colors)\n\n df = DataFrame(np.random.randn(5, 3))\n ax = df.plot()\n\n expected = self._unpack_cycler(plt.rcParams)[:3]\n self._check_colors(ax.get_lines(), linecolors=expected)\n\n def test_no_color_bar(self):\n df = DataFrame(\n {\n \"A\": np.random.uniform(size=20),\n \"B\": np.random.uniform(size=20),\n \"C\": np.arange(20) + np.random.uniform(size=20),\n }\n )\n ax = df.plot.hexbin(x=\"A\", y=\"B\", colorbar=None)\n assert ax.collections[0].colorbar is None\n\n def test_mixing_cmap_and_colormap_raises(self):\n df = DataFrame(\n {\n \"A\": np.random.uniform(size=20),\n \"B\": np.random.uniform(size=20),\n \"C\": np.arange(20) + np.random.uniform(size=20),\n }\n )\n msg = \"Only specify one of `cmap` and `colormap`\"\n with pytest.raises(TypeError, match=msg):\n df.plot.hexbin(x=\"A\", y=\"B\", cmap=\"YlGn\", colormap=\"BuGn\")\n\n def test_passed_bar_colors(self):\n import matplotlib as mpl\n\n color_tuples = [(0.9, 0, 0, 1), (0, 0.9, 0, 1), (0, 0, 0.9, 1)]\n colormap = mpl.colors.ListedColormap(color_tuples)\n barplot = DataFrame([[1, 2, 3]]).plot(kind=\"bar\", cmap=colormap)\n assert color_tuples == [c.get_facecolor() for c in barplot.patches]\n\n def test_rcParams_bar_colors(self):\n import matplotlib as mpl\n\n color_tuples = [(0.9, 0, 0, 1), (0, 0.9, 0, 1), (0, 0, 0.9, 1)]\n with mpl.rc_context(rc={\"axes.prop_cycle\": mpl.cycler(\"color\", color_tuples)}):\n barplot = DataFrame([[1, 2, 3]]).plot(kind=\"bar\")\n assert color_tuples == [c.get_facecolor() for c in barplot.patches]\n\n def test_colors_of_columns_with_same_name(self):\n # ISSUE 11136 -> https://github.com/pandas-dev/pandas/issues/11136\n # Creating a DataFrame with duplicate column labels and testing colors of them.\n df = DataFrame({\"b\": [0, 1, 0], \"a\": [1, 2, 3]})\n df1 = DataFrame({\"a\": [2, 4, 6]})\n df_concat = pd.concat([df, df1], axis=1)\n result = df_concat.plot()\n for legend, line in zip(result.get_legend().legendHandles, result.lines):\n assert legend.get_color() == line.get_color()\n\n def test_invalid_colormap(self):\n df = DataFrame(np.random.randn(3, 2), columns=[\"A\", \"B\"])\n msg = \"'invalid_colormap' is not a valid value for name; supported values are \"\n with pytest.raises(ValueError, match=msg):\n df.plot(colormap=\"invalid_colormap\")\n" ]
[ [ "pandas._testing.assert_produces_warning", "pandas.Index", "pandas.DataFrame", "pandas._testing.ensure_clean", "pandas.MultiIndex.from_tuples", "pandas._testing.assert_frame_equal" ], [ "pandas._testing.assert_almost_equal", "pandas.api.types.is_float_dtype", "pandas._testing.get_dtype", "pandas.core.dtypes.common.is_extension_array_dtype", "numpy.arange", "pandas.array", "numpy.ones" ], [ "numpy.expand_dims", "numpy.sqrt", "pandas.plotting._matplotlib.tools.set_ticks_props", "numpy.linspace", "numpy.asarray", "pandas.core.dtypes.missing.notna", "numpy.max", "scipy.stats.gaussian_kde", "numpy.mean", "pandas.plotting._matplotlib.tools.create_subplots", "matplotlib.pyplot.gca", "matplotlib.pyplot.tight_layout", "numpy.arange", "numpy.sin", "numpy.copy", "pandas.plotting._matplotlib.tools.do_adjust_figure", "numpy.outer", "matplotlib.pyplot.figure", "numpy.min", "numpy.median", "matplotlib.patches.Circle", "pandas.plotting._matplotlib.tools.maybe_adjust_figure", "numpy.isreal", "numpy.sum", "numpy.cos", "pandas.io.formats.printing.pprint_thing" ], [ "pandas.concat", "matplotlib.pyplot.tight_layout", "numpy.random.random", "numpy.linspace", "numpy.arange", "matplotlib.pyplot.subplots", "pandas.DataFrame", "matplotlib.colors.ListedColormap", "pandas._testing.close", "numpy.random.randn", "numpy.random.rand", "matplotlib.cycler", "numpy.random.uniform", "numpy.array", "pandas.tests.plotting.common._check_plot_works", "matplotlib.cm.jet", "numpy.isclose" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "0.24", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jkterry1/parameter-sharing-paper
[ "cb26ad195b580006f66fd8a60973408d5657b209" ]
[ "indicator_opt.py" ]
[ "import sys\nimport json\nimport numpy as np\nimport os\nimport pickle as pkl\nimport time\nfrom pprint import pprint\n\nfrom stable_baselines3 import PPO, DQN\nfrom stable_baselines3.common.utils import set_random_seed\n\nfrom pettingzoo.butterfly import cooperative_pong_v3, prospector_v4, knights_archers_zombies_v7\nfrom pettingzoo.atari import entombed_cooperative_v2, pong_v2\nfrom pettingzoo.atari.base_atari_env import BaseAtariEnv, base_env_wrapper_fn, parallel_wrapper_fn\nimport gym\n\nimport supersuit as ss\nfrom stable_baselines3.common.vec_env import VecMonitor, VecTransposeImage, VecNormalize\nfrom stable_baselines3.common.evaluation import evaluate_policy\nfrom stable_baselines3.common.callbacks import EvalCallback\nfrom stable_baselines3.common.preprocessing import is_image_space, is_image_space_channels_first\n\nimport optuna\nfrom optuna.integration.skopt import SkoptSampler\nfrom optuna.pruners import BasePruner, MedianPruner, SuccessiveHalvingPruner\nfrom optuna.samplers import BaseSampler, RandomSampler, TPESampler\nfrom optuna.visualization import plot_optimization_history, plot_param_importances\n\nfrom utils.hyperparams_opt import sample_ppo_params, sample_dqn_params\nfrom utils.callbacks import SaveVecNormalizeCallback, TrialEvalCallback\n\nfrom indicator_util import AgentIndicatorWrapper, InvertColorIndicator, BinaryIndicator, GeometricPatternIndicator\n\nimport argparse\n\nfrom stable_baselines3.common.utils import set_random_seed\n\nif __name__ == \"__main__\": # noqa: C901\n parser = argparse.ArgumentParser()\n \n '''\n Env List\n - Entombed Cooperative (Atari): DQN, PPO\n - Cooperative Pong (Butterfly): DQN, PPO\n - Prospector (Butterfly): PPO\n - KAZ (Butterfly): DQN, PPO\n - Pong (Atari): DQN, PPO\n '''\n butterfly_envs = [\"prospector-v4\", \"knights-archers-zombies-v7\", \"cooperative-pong-v3\"]\n atari_envs = [\"entombed-cooperative-v2\", \"pong-v2\"]\n\n parser.add_argument(\"--algo\", help=\"RL Algorithm\", default=\"ppo\", type=str, required=False, choices=[\"ppo\", \"dqn\"])\n parser.add_argument(\"--env\", type=str, default=\"pong-v2\", help=\"environment ID\", choices=[\n \"prospector-v4\",\n \"knights-archers-zombies-v7\",\n \"cooperative-pong-v3\",\n \"entombed-cooperative-v2\",\n \"pong-v2\"\n ])\n parser.add_argument(\"-n\", \"--n-timesteps\", help=\"Overwrite the number of timesteps\", default=1e6, type=int)\n parser.add_argument(\"--n-trials\", help=\"Number of trials for optimizing hyperparameters\", type=int, default=10)\n parser.add_argument(\n \"--optimization-log-path\",\n help=\"Path to save the evaluation log and optimal policy for each hyperparameter tried during optimization. \"\n \"Disabled if no argument is passed.\",\n type=str,\n )\n parser.add_argument(\"--eval-episodes\", help=\"Number of episodes to use for evaluation\", default=5, type=int)\n parser.add_argument(\n \"--sampler\",\n help=\"Sampler to use when optimizing hyperparameters\",\n type=str,\n default=\"tpe\",\n choices=[\"random\", \"tpe\", \"skopt\"],\n )\n parser.add_argument(\n \"--pruner\",\n help=\"Pruner to use when optimizing hyperparameters\",\n type=str,\n default=\"median\",\n choices=[\"halving\", \"median\", \"none\"],\n )\n parser.add_argument(\"--n-startup-trials\", help=\"Number of trials before using optuna sampler\", type=int, default=10)\n parser.add_argument(\n \"--n-evaluations\",\n help=\"Training policies are evaluated every n-timesteps // n-evaluations steps when doing hyperparameter optimization\",\n type=int,\n default=100,\n )\n parser.add_argument(\"-f\", \"--log-folder\", help=\"Log folder\", type=str, default=\"logs\")\n parser.add_argument(\n \"--storage\", help=\"Database storage path if distributed optimization should be used\", type=str, default=None\n )\n parser.add_argument(\"--study-name\", help=\"Study name for distributed optimization\", type=str, default=None)\n parser.add_argument(\"--verbose\", help=\"Verbose mode (0: no output, 1: INFO)\", default=1, type=int)\n args = parser.parse_args()\n\n seed = np.random.randint(2 ** 32 - 1, dtype=\"int64\").item()\n set_random_seed(seed)\n\n print(\"=\" * 10, args.env, \"=\" * 10)\n print(f\"Seed: {seed}\")\n \n # Hyperparameter optimization\n\n # Determine sampler and pruner\n if args.sampler == \"random\":\n sampler = RandomSampler(seed=seed)\n elif args.sampler == \"tpe\":\n sampler = TPESampler(n_startup_trials=args.n_startup_trials, seed=seed)\n elif args.sampler == \"skopt\":\n sampler = SkoptSampler(skopt_kwargs={\"base_estimator\": \"GP\", \"acq_func\": \"gp_hedge\"})\n else:\n raise ValueError(f\"Unknown sampler: {args.sampler}\")\n \n if args.pruner == \"halving\":\n pruner = SuccessiveHalvingPruner(min_resource=1, reduction_factor=4, min_early_stopping_rate=0)\n elif args.pruner == \"median\":\n pruner = MedianPruner(n_startup_trials=args.n_startup_trials, n_warmup_steps=args.n_evaluations // 3)\n elif args.pruner == \"none\":\n # Do not prune\n pruner = MedianPruner(n_startup_trials=args.n_trials, n_warmup_steps=args.n_evaluations)\n else:\n raise ValueError(f\"Unknown pruner: {args.pruner}\")\n\n print(f\"Sampler: {args.sampler} - Pruner: {args.pruner}\")\n\n # Create study\n study = optuna.create_study(\n sampler=sampler,\n pruner=pruner,\n storage=args.storage,\n study_name=args.study_name,\n load_if_exists=True,\n direction=\"maximize\",\n )\n\n hyperparams_sampler = {'ppo': sample_ppo_params, 'dqn': sample_dqn_params}\n hyperparams_algo = {'ppo': PPO, 'dqn': DQN}\n \n muesli_obs_size = 96 \n muesli_frame_size = 4\n\n # Objective function for hyperparameter search\n def objective(trial: optuna.Trial) -> float:\n #kwargs = self._hyperparams.copy()\n kwargs = {\n #'n_envs': 1,\n 'policy': 'CnnPolicy',\n #'n_timesteps': 1e6,\n }\n\n # Sample candidate hyperparameters\n sampled_hyperparams = hyperparams_sampler[args.algo](trial)\n kwargs.update(sampled_hyperparams)\n\n # Create training env\n if args.env == \"prospector-v4\":\n env = prospector_v4.parallel_env()\n agent_type = \"prospector\"\n elif args.env == \"knights-archers-zombies-v7\":\n env = knights_archers_zombies_v7.parallel_env()\n agent_type = \"archer\"\n elif args.env == \"cooperative-pong-v3\":\n env = cooperative_pong_v3.parallel_env()\n agent_type = \"paddle_0\"\n elif args.env == \"entombed-cooperative-v2\":\n env = entombed_cooperative_v2.parallel_env()\n agent_type = \"first\"\n elif args.env == \"pong-v2\":\n env = pong_v2.parallel_env()\n agent_type = \"first\"\n env = ss.color_reduction_v0(env)\n env = ss.pad_action_space_v0(env)\n env = ss.pad_observations_v0(env)\n env = ss.resize_v0(env, x_size=muesli_obs_size, y_size=muesli_obs_size, linear_interp=True)\n env = ss.frame_stack_v1(env, stack_size=muesli_frame_size)\n\n # Enable black death\n if args.env == 'knights-archers-zombies-v7':\n env = ss.black_death_v2(env)\n\n # Agent indicator wrapper\n agent_indicator_name = trial.suggest_categorical(\"agent_indicator\", choices=[\"identity\", \"invert\", \"invert-replace\", \"binary\", \"geometric\"])\n if agent_indicator_name == \"invert\":\n agent_indicator = InvertColorIndicator(env, agent_type)\n agent_indicator_wrapper = AgentIndicatorWrapper(agent_indicator)\n elif agent_indicator_name == \"invert-replace\":\n agent_indicator = InvertColorIndicator(env, agent_type)\n agent_indicator_wrapper = AgentIndicatorWrapper(agent_indicator, False)\n elif agent_indicator_name == \"binary\":\n agent_indicator = BinaryIndicator(env, agent_type)\n agent_indicator_wrapper = AgentIndicatorWrapper(agent_indicator)\n elif agent_indicator_name == \"geometric\":\n agent_indicator = GeometricPatternIndicator(env, agent_type)\n agent_indicator_wrapper = AgentIndicatorWrapper(agent_indicator)\n if agent_indicator_name != \"identity\":\n env = ss.observation_lambda_v0(env, agent_indicator_wrapper.apply, agent_indicator_wrapper.apply_space)\n\n env = ss.pettingzoo_env_to_vec_env_v0(env)\n #env = ss.concat_vec_envs_v0(env, num_vec_envs=1, num_cpus=1, base_class='stable_baselines3')\n env = VecMonitor(env)\n\n def image_transpose(env):\n if is_image_space(env.observation_space) and not is_image_space_channels_first(env.observation_space):\n env = VecTransposeImage(env)\n return env\n env = image_transpose(env)\n\n model = hyperparams_algo[args.algo](\n env=env,\n tensorboard_log=None,\n # We do not seed the trial\n seed=None,\n verbose=0,\n **kwargs,\n )\n\n model.trial = trial\n\n # Create eval env\n if args.env == \"prospector-v4\":\n eval_env = prospector_v4.parallel_env()\n agent_type = \"prospector\"\n elif args.env == \"knights-archers-zombies-v7\":\n eval_env = knights_archers_zombies_v7.parallel_env()\n agent_type = \"archer\"\n elif args.env == \"cooperative-pong-v3\":\n eval_env = cooperative_pong_v3.parallel_env()\n agent_type = \"paddle_0\"\n elif args.env == \"entombed-cooperative-v2\":\n eval_env = entombed_cooperative_v2.parallel_env()\n agent_type = \"first\"\n elif args.env == \"pong-v2\":\n def pong_single_raw_env(**kwargs):\n return BaseAtariEnv(game=\"pong\", num_players=1, env_name=os.path.basename(__file__)[:-3], **kwargs)\n pong_single_env = base_env_wrapper_fn(pong_single_raw_env)\n pong_parallel_env = parallel_wrapper_fn(pong_single_env)\n eval_env = pong_parallel_env()\n #eval_env = pong_v2.parallel_env()\n #eval_env = gym.make(\"Pong-v0\", obs_type='image')\n agent_type = \"first\"\n eval_env = ss.color_reduction_v0(eval_env)\n eval_env = ss.pad_action_space_v0(eval_env)\n eval_env = ss.pad_observations_v0(eval_env)\n eval_env = ss.resize_v0(eval_env, x_size=muesli_obs_size, y_size=muesli_obs_size, linear_interp=True)\n eval_env = ss.frame_stack_v1(eval_env, stack_size=muesli_frame_size)\n # Enable black death\n if args.env == 'knights-archers-zombies-v7':\n eval_env = ss.black_death_v2(eval_env)\n\n # Agent indicator wrapper\n if agent_indicator_name == \"invert\":\n eval_agent_indicator = InvertColorIndicator(eval_env, agent_type)\n eval_agent_indicator_wrapper = AgentIndicatorWrapper(eval_agent_indicator)\n elif agent_indicator_name == \"invert-replace\":\n eval_agent_indicator = InvertColorIndicator(eval_env, agent_type)\n eval_agent_indicator_wrapper = AgentIndicatorWrapper(eval_agent_indicator, False)\n elif agent_indicator_name == \"binary\":\n eval_agent_indicator = BinaryIndicator(eval_env, agent_type)\n eval_agent_indicator_wrapper = AgentIndicatorWrapper(eval_agent_indicator)\n elif agent_indicator_name == \"geometric\":\n eval_agent_indicator = GeometricPatternIndicator(eval_env, agent_type)\n eval_agent_indicator_wrapper = AgentIndicatorWrapper(eval_agent_indicator)\n if agent_indicator_name != \"identity\":\n eval_env = ss.observation_lambda_v0(eval_env, eval_agent_indicator_wrapper.apply, eval_agent_indicator_wrapper.apply_space)\n\n eval_env = ss.pettingzoo_env_to_vec_env_v0(eval_env)\n #eval_env = ss.concat_vec_envs_v0(eval_env, num_vec_envs=1, num_cpus=1, base_class='stable_baselines3')\n eval_env = VecMonitor(eval_env)\n eval_env = image_transpose(eval_env)\n\n optuna_eval_freq = int(args.n_timesteps / args.n_evaluations)\n # Account for parallel envs\n optuna_eval_freq = max(optuna_eval_freq // model.get_env().num_envs, 1)\n # Use non-deterministic eval for Atari\n path = None\n if args.optimization_log_path is not None:\n path = os.path.join(args.optimization_log_path, f\"trial_{str(trial.number)}\")\n #callbacks = get_callback_list({\"callback\": self.specified_callbacks})\n callbacks = []\n deterministic_eval = args.env not in atari_envs\n eval_callback = TrialEvalCallback(\n eval_env,\n trial,\n best_model_save_path=path,\n log_path=path,\n n_eval_episodes=args.eval_episodes,\n eval_freq=optuna_eval_freq,\n deterministic=deterministic_eval,\n )\n callbacks.append(eval_callback)\n\n try:\n model.learn(args.n_timesteps, callback=callbacks)\n # Free memory\n model.env.close()\n eval_env.close()\n except (AssertionError, ValueError) as e:\n # Sometimes, random hyperparams can generate NaN\n # Free memory\n model.env.close()\n eval_env.close()\n # Prune hyperparams that generate NaNs\n print(e)\n print(\"============\")\n print(\"Sampled hyperparams:\")\n pprint(sampled_hyperparams)\n raise optuna.exceptions.TrialPruned()\n is_pruned = eval_callback.is_pruned\n reward = eval_callback.last_mean_reward\n\n del model.env, eval_env\n del model\n\n if is_pruned:\n raise optuna.exceptions.TrialPruned()\n\n return reward\n\n pass\n\n try:\n study.optimize(objective, n_trials=args.n_trials, n_jobs=1)\n except KeyboardInterrupt:\n pass\n\n print(\"Number of finished trials: \", len(study.trials))\n\n print(\"Best trial:\")\n trial = study.best_trial\n\n print(\"Value: \", trial.value)\n\n print(\"Params: \")\n for key, value in trial.params.items():\n print(f\" {key}: {value}\")\n\n report_name = (\n f\"report_{args.env}_{args.n_trials}-trials-{args.n_timesteps}\"\n f\"-{args.sampler}-{args.pruner}_{int(time.time())}\"\n )\n\n log_path = os.path.join(args.log_folder, args.algo, report_name)\n\n if args.verbose:\n print(f\"Writing report to {log_path}\")\n\n # Write report\n os.makedirs(os.path.dirname(log_path), exist_ok=True)\n study.trials_dataframe().to_csv(f\"{log_path}.csv\")\n\n # Save python object to inspect/re-use it later\n with open(f\"{log_path}.pkl\", \"wb+\") as f:\n pkl.dump(study, f)\n\n # Plot optimization result\n try:\n fig1 = plot_optimization_history(study)\n fig2 = plot_param_importances(study)\n\n fig1.show()\n fig2.show()\n except (ValueError, ImportError, RuntimeError):\n pass" ]
[ [ "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
sanket-kamthe/probability
[ "c22b6201155c2e58d08a4ad30641d1aff59fbe7c", "c22b6201155c2e58d08a4ad30641d1aff59fbe7c", "c22b6201155c2e58d08a4ad30641d1aff59fbe7c", "c22b6201155c2e58d08a4ad30641d1aff59fbe7c", "c22b6201155c2e58d08a4ad30641d1aff59fbe7c" ]
[ "tensorflow_probability/python/distributions/beta.py", "tensorflow_probability/python/distributions/beta_test.py", "tensorflow_probability/python/distributions/categorical_test.py", "tensorflow_probability/python/distributions/gamma_test.py", "discussion/fun_mcmc/fun_mcmc_lib.py" ]
[ "# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"The Beta distribution class.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Dependency imports\nimport numpy as np\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow_probability.python.distributions import distribution\nfrom tensorflow_probability.python.distributions import kullback_leibler\nfrom tensorflow_probability.python.internal import assert_util\nfrom tensorflow_probability.python.internal import distribution_util\nfrom tensorflow_probability.python.internal import dtype_util\nfrom tensorflow_probability.python.internal import prefer_static\nfrom tensorflow_probability.python.internal import reparameterization\nfrom tensorflow_probability.python.internal import tensor_util\nfrom tensorflow_probability.python.util.seed_stream import SeedStream\nfrom tensorflow.python.util import deprecation # pylint: disable=g-direct-tensorflow-import\n\n__all__ = [\n \"Beta\",\n]\n\n\n_beta_sample_note = \"\"\"Note: `x` must have dtype `self.dtype` and be in\n`[0, 1].` It must have a shape compatible with `self.batch_shape()`.\"\"\"\n\n\nclass Beta(distribution.Distribution):\n \"\"\"Beta distribution.\n\n The Beta distribution is defined over the `(0, 1)` interval using parameters\n `concentration1` (aka \"alpha\") and `concentration0` (aka \"beta\").\n\n #### Mathematical Details\n\n The probability density function (pdf) is,\n\n ```none\n pdf(x; alpha, beta) = x**(alpha - 1) (1 - x)**(beta - 1) / Z\n Z = Gamma(alpha) Gamma(beta) / Gamma(alpha + beta)\n ```\n\n where:\n\n * `concentration1 = alpha`,\n * `concentration0 = beta`,\n * `Z` is the normalization constant, and,\n * `Gamma` is the [gamma function](\n https://en.wikipedia.org/wiki/Gamma_function).\n\n The concentration parameters represent mean total counts of a `1` or a `0`,\n i.e.,\n\n ```none\n concentration1 = alpha = mean * total_concentration\n concentration0 = beta = (1. - mean) * total_concentration\n ```\n\n where `mean` in `(0, 1)` and `total_concentration` is a positive real number\n representing a mean `total_count = concentration1 + concentration0`.\n\n Distribution parameters are automatically broadcast in all functions; see\n examples for details.\n\n Warning: The samples can be zero due to finite precision.\n This happens more often when some of the concentrations are very small.\n Make sure to round the samples to `np.finfo(dtype).tiny` before computing the\n density.\n\n Samples of this distribution are reparameterized (pathwise differentiable).\n The derivatives are computed using the approach described in the paper\n\n [Michael Figurnov, Shakir Mohamed, Andriy Mnih.\n Implicit Reparameterization Gradients, 2018](https://arxiv.org/abs/1805.08498)\n\n #### Examples\n\n ```python\n import tensorflow_probability as tfp\n tfd = tfp.distributions\n\n # Create a batch of three Beta distributions.\n alpha = [1, 2, 3]\n beta = [1, 2, 3]\n dist = tfd.Beta(alpha, beta)\n\n dist.sample([4, 5]) # Shape [4, 5, 3]\n\n # `x` has three batch entries, each with two samples.\n x = [[.1, .4, .5],\n [.2, .3, .5]]\n # Calculate the probability of each pair of samples under the corresponding\n # distribution in `dist`.\n dist.prob(x) # Shape [2, 3]\n ```\n\n ```python\n # Create batch_shape=[2, 3] via parameter broadcast:\n alpha = [[1.], [2]] # Shape [2, 1]\n beta = [3., 4, 5] # Shape [3]\n dist = tfd.Beta(alpha, beta)\n\n # alpha broadcast as: [[1., 1, 1,],\n # [2, 2, 2]]\n # beta broadcast as: [[3., 4, 5],\n # [3, 4, 5]]\n # batch_Shape [2, 3]\n dist.sample([4, 5]) # Shape [4, 5, 2, 3]\n\n x = [.2, .3, .5]\n # x will be broadcast as [[.2, .3, .5],\n # [.2, .3, .5]],\n # thus matching batch_shape [2, 3].\n dist.prob(x) # Shape [2, 3]\n ```\n\n Compute the gradients of samples w.r.t. the parameters:\n\n ```python\n alpha = tf.constant(1.0)\n beta = tf.constant(2.0)\n dist = tfd.Beta(alpha, beta)\n samples = dist.sample(5) # Shape [5]\n loss = tf.reduce_mean(tf.square(samples)) # Arbitrary loss function\n # Unbiased stochastic gradients of the loss function\n grads = tf.gradients(loss, [alpha, beta])\n ```\n\n \"\"\"\n\n def __init__(self,\n concentration1,\n concentration0,\n validate_args=False,\n allow_nan_stats=True,\n name=\"Beta\"):\n \"\"\"Initialize a batch of Beta distributions.\n\n Args:\n concentration1: Positive floating-point `Tensor` indicating mean\n number of successes; aka \"alpha\". Implies `self.dtype` and\n `self.batch_shape`, i.e.,\n `concentration1.shape = [N1, N2, ..., Nm] = self.batch_shape`.\n concentration0: Positive floating-point `Tensor` indicating mean\n number of failures; aka \"beta\". Otherwise has same semantics as\n `concentration1`.\n validate_args: Python `bool`, default `False`. When `True` distribution\n parameters are checked for validity despite possibly degrading runtime\n performance. When `False` invalid inputs may silently render incorrect\n outputs.\n allow_nan_stats: Python `bool`, default `True`. When `True`, statistics\n (e.g., mean, mode, variance) use the value \"`NaN`\" to indicate the\n result is undefined. When `False`, an exception is raised if one or\n more of the statistic's batch members are undefined.\n name: Python `str` name prefixed to Ops created by this class.\n \"\"\"\n parameters = dict(locals())\n with tf.name_scope(name) as name:\n dtype = dtype_util.common_dtype([concentration1, concentration0],\n dtype_hint=tf.float32)\n self._concentration1 = tensor_util.convert_nonref_to_tensor(\n concentration1, dtype=dtype, name=\"concentration1\")\n self._concentration0 = tensor_util.convert_nonref_to_tensor(\n concentration0, dtype=dtype, name=\"concentration0\")\n super(Beta, self).__init__(\n dtype=dtype,\n validate_args=validate_args,\n allow_nan_stats=allow_nan_stats,\n reparameterization_type=reparameterization.FULLY_REPARAMETERIZED,\n parameters=parameters,\n name=name)\n\n @staticmethod\n def _param_shapes(sample_shape):\n s = tf.convert_to_tensor(sample_shape, dtype=tf.int32)\n return dict(concentration1=s, concentration0=s)\n\n @classmethod\n def _params_event_ndims(cls):\n return dict(concentration1=0, concentration0=0)\n\n @property\n def concentration1(self):\n \"\"\"Concentration parameter associated with a `1` outcome.\"\"\"\n return self._concentration1\n\n @property\n def concentration0(self):\n \"\"\"Concentration parameter associated with a `0` outcome.\"\"\"\n return self._concentration0\n\n @property\n @deprecation.deprecated(\n \"2019-10-01\",\n (\"The `total_concentration` property is deprecated; instead use \"\n \"`dist.concentration1 + dist.concentration0`.\"),\n warn_once=True)\n def total_concentration(self):\n \"\"\"Sum of concentration parameters.\"\"\"\n with self._name_and_control_scope(\"total_concentration\"):\n return self.concentration1 + self.concentration0\n\n def _batch_shape_tensor(self, concentration1=None, concentration0=None):\n return prefer_static.broadcast_shape(\n prefer_static.shape(\n self.concentration1 if concentration1 is None else concentration1),\n prefer_static.shape(\n self.concentration0 if concentration0 is None else concentration0))\n\n def _batch_shape(self):\n return tf.broadcast_static_shape(\n self.concentration1.shape, self.concentration0.shape)\n\n def _event_shape_tensor(self):\n return tf.constant([], dtype=tf.int32)\n\n def _event_shape(self):\n return tf.TensorShape([])\n\n def _sample_n(self, n, seed=None):\n seed = SeedStream(seed, \"beta\")\n concentration1 = tf.convert_to_tensor(self.concentration1)\n concentration0 = tf.convert_to_tensor(self.concentration0)\n shape = self._batch_shape_tensor(concentration1, concentration0)\n expanded_concentration1 = tf.broadcast_to(concentration1, shape)\n expanded_concentration0 = tf.broadcast_to(concentration0, shape)\n gamma1_sample = tf.random.gamma(\n shape=[n], alpha=expanded_concentration1, dtype=self.dtype, seed=seed())\n gamma2_sample = tf.random.gamma(\n shape=[n], alpha=expanded_concentration0, dtype=self.dtype, seed=seed())\n beta_sample = gamma1_sample / (gamma1_sample + gamma2_sample)\n return beta_sample\n\n @distribution_util.AppendDocstring(_beta_sample_note)\n def _log_prob(self, x):\n concentration0 = tf.convert_to_tensor(self.concentration0)\n concentration1 = tf.convert_to_tensor(self.concentration1)\n return (self._log_unnormalized_prob(x, concentration1, concentration0) -\n self._log_normalization(concentration1, concentration0))\n\n @distribution_util.AppendDocstring(_beta_sample_note)\n def _prob(self, x):\n return tf.exp(self._log_prob(x))\n\n @distribution_util.AppendDocstring(_beta_sample_note)\n def _log_cdf(self, x):\n return tf.math.log(self._cdf(x))\n\n @distribution_util.AppendDocstring(_beta_sample_note)\n def _cdf(self, x):\n with tf.control_dependencies(self._maybe_assert_valid_sample(x)):\n concentration1 = tf.convert_to_tensor(self.concentration1)\n concentration0 = tf.convert_to_tensor(self.concentration0)\n shape = self._batch_shape_tensor(concentration1, concentration0)\n concentration1 = tf.broadcast_to(concentration1, shape)\n concentration0 = tf.broadcast_to(concentration0, shape)\n return tf.math.betainc(concentration1, concentration0, x)\n\n def _log_unnormalized_prob(self, x, concentration1, concentration0):\n with tf.control_dependencies(self._maybe_assert_valid_sample(x)):\n return (tf.math.xlogy(concentration1 - 1., x) +\n (concentration0 - 1.) * tf.math.log1p(-x))\n\n def _log_normalization(self, concentration1, concentration0):\n return (tf.math.lgamma(concentration1) + tf.math.lgamma(concentration0) -\n tf.math.lgamma(concentration1 + concentration0))\n\n def _entropy(self):\n concentration1 = tf.convert_to_tensor(self.concentration1)\n concentration0 = tf.convert_to_tensor(self.concentration0)\n total_concentration = concentration1 + concentration0\n return (self._log_normalization(concentration1, concentration0) -\n (concentration1 - 1.) * tf.math.digamma(concentration1) -\n (concentration0 - 1.) * tf.math.digamma(concentration0) +\n (total_concentration - 2.) * tf.math.digamma(total_concentration))\n\n def _mean(self):\n concentration1 = tf.convert_to_tensor(self.concentration1)\n return concentration1 / (concentration1 + self.concentration0)\n\n def _variance(self):\n concentration1 = tf.convert_to_tensor(self.concentration1)\n concentration0 = tf.convert_to_tensor(self.concentration0)\n total_concentration = concentration1 + concentration0\n return (concentration1 * concentration0 /\n ((total_concentration)**2 * (total_concentration + 1.)))\n\n @distribution_util.AppendDocstring(\n \"\"\"Note: The mode is undefined when `concentration1 <= 1` or\n `concentration0 <= 1`. If `self.allow_nan_stats` is `True`, `NaN`\n is used for undefined modes. If `self.allow_nan_stats` is `False` an\n exception is raised when one or more modes are undefined.\"\"\")\n def _mode(self):\n concentration1 = tf.convert_to_tensor(self.concentration1)\n concentration0 = tf.convert_to_tensor(self.concentration0)\n mode = (concentration1 - 1.) / (concentration1 + concentration0 - 2.)\n with tf.control_dependencies([] if self.allow_nan_stats else [ # pylint: disable=g-long-ternary\n assert_util.assert_less(\n tf.ones([], dtype=self.dtype),\n concentration1,\n message=\"Mode undefined for concentration1 <= 1.\"),\n assert_util.assert_less(\n tf.ones([], dtype=self.dtype),\n concentration0,\n message=\"Mode undefined for concentration0 <= 1.\")\n ]):\n return tf.where(\n (concentration1 > 1.) & (concentration0 > 1.),\n mode,\n dtype_util.as_numpy_dtype(self.dtype)(np.nan))\n\n def _maybe_assert_valid_sample(self, x):\n \"\"\"Checks the validity of a sample.\"\"\"\n if not self.validate_args:\n return []\n return [\n assert_util.assert_positive(x, message=\"Sample must be positive.\"),\n assert_util.assert_less(\n x, tf.ones([], x.dtype), message=\"Sample must be less than `1`.\")\n ]\n\n def _parameter_control_dependencies(self, is_init):\n if not self.validate_args:\n return []\n assertions = []\n for concentration in [self.concentration0, self.concentration1]:\n if is_init != tensor_util.is_ref(concentration):\n assertions.append(assert_util.assert_positive(\n concentration,\n message=\"Concentration parameter must be positive.\"))\n return assertions\n\n\n@kullback_leibler.RegisterKL(Beta, Beta)\ndef _kl_beta_beta(d1, d2, name=None):\n \"\"\"Calculate the batchwise KL divergence KL(d1 || d2) with d1 and d2 Beta.\n\n Args:\n d1: instance of a Beta distribution object.\n d2: instance of a Beta distribution object.\n name: (optional) Name to use for created operations.\n default is \"kl_beta_beta\".\n\n Returns:\n Batchwise KL(d1 || d2)\n \"\"\"\n with tf.name_scope(name or \"kl_beta_beta\"):\n d1_concentration1 = tf.convert_to_tensor(d1.concentration1)\n d1_concentration0 = tf.convert_to_tensor(d1.concentration0)\n d2_concentration1 = tf.convert_to_tensor(d2.concentration1)\n d2_concentration0 = tf.convert_to_tensor(d2.concentration0)\n d1_total_concentration = d1_concentration1 + d1_concentration0\n d2_total_concentration = d2_concentration1 + d2_concentration0\n\n d1_log_normalization = d1._log_normalization( # pylint: disable=protected-access\n d1_concentration1, d1_concentration0)\n d2_log_normalization = d2._log_normalization( # pylint: disable=protected-access\n d2_concentration1, d2_concentration0)\n return ((d2_log_normalization - d1_log_normalization) -\n (tf.math.digamma(d1_concentration1) *\n (d2_concentration1 - d1_concentration1)) -\n (tf.math.digamma(d1_concentration0) *\n (d2_concentration0 - d1_concentration0)) +\n (tf.math.digamma(d1_total_concentration) *\n (d2_total_concentration - d1_total_concentration)))\n", "# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n\n# Dependency imports\nimport numpy as np\nfrom scipy import special as sp_special\nfrom scipy import stats as sp_stats\n\nimport tensorflow.compat.v1 as tf1\nimport tensorflow.compat.v2 as tf\nimport tensorflow_probability as tfp\n\nfrom tensorflow_probability.python.internal import test_util\n\ntfd = tfp.distributions\n\n\n@test_util.test_all_tf_execution_regimes\nclass BetaTest(test_util.TestCase):\n\n def testSimpleShapes(self):\n a = np.random.rand(3)\n b = np.random.rand(3)\n dist = tfd.Beta(a, b, validate_args=True)\n self.assertAllEqual([], self.evaluate(dist.event_shape_tensor()))\n self.assertAllEqual([3], self.evaluate(dist.batch_shape_tensor()))\n self.assertEqual(tf.TensorShape([]), dist.event_shape)\n self.assertEqual(tf.TensorShape([3]), dist.batch_shape)\n\n def testComplexShapes(self):\n a = np.random.rand(3, 2, 2)\n b = np.random.rand(3, 2, 2)\n dist = tfd.Beta(a, b, validate_args=True)\n self.assertAllEqual([], self.evaluate(dist.event_shape_tensor()))\n self.assertAllEqual([3, 2, 2], self.evaluate(dist.batch_shape_tensor()))\n self.assertEqual(tf.TensorShape([]), dist.event_shape)\n self.assertEqual(tf.TensorShape([3, 2, 2]), dist.batch_shape)\n\n def testComplexShapesBroadcast(self):\n a = np.random.rand(3, 2, 2)\n b = np.random.rand(2, 2)\n dist = tfd.Beta(a, b, validate_args=True)\n self.assertAllEqual([], self.evaluate(dist.event_shape_tensor()))\n self.assertAllEqual([3, 2, 2], self.evaluate(dist.batch_shape_tensor()))\n self.assertEqual(tf.TensorShape([]), dist.event_shape)\n self.assertEqual(tf.TensorShape([3, 2, 2]), dist.batch_shape)\n\n def testAlphaProperty(self):\n a = [[1., 2, 3]]\n b = [[2., 4, 3]]\n dist = tfd.Beta(a, b, validate_args=True)\n self.assertEqual([1, 3], dist.concentration1.shape)\n self.assertAllClose(a, self.evaluate(dist.concentration1))\n\n def testBetaProperty(self):\n a = [[1., 2, 3]]\n b = [[2., 4, 3]]\n dist = tfd.Beta(a, b, validate_args=True)\n self.assertEqual([1, 3], dist.concentration0.shape)\n self.assertAllClose(b, self.evaluate(dist.concentration0))\n\n def testPdfXProper(self):\n a = [[1., 2, 3]]\n b = [[2., 4, 3]]\n dist = tfd.Beta(a, b, validate_args=True)\n self.evaluate(dist.prob([.1, .3, .6]))\n self.evaluate(dist.prob([.2, .3, .5]))\n # Either condition can trigger.\n with self.assertRaisesOpError(\"Sample must be positive.\"):\n self.evaluate(dist.prob([-1., 0.1, 0.5]))\n with self.assertRaisesOpError(\"Sample must be positive.\"):\n self.evaluate(dist.prob([0., 0.1, 0.5]))\n with self.assertRaisesOpError(\"Sample must be less than `1`.\"):\n self.evaluate(dist.prob([.1, .2, 1.2]))\n with self.assertRaisesOpError(\"Sample must be less than `1`.\"):\n self.evaluate(dist.prob([.1, .2, 1.0]))\n\n def testPdfTwoBatches(self):\n a = [1., 2]\n b = [1., 2]\n x = [.5, .5]\n dist = tfd.Beta(a, b, validate_args=True)\n pdf = dist.prob(x)\n self.assertAllClose([1., 3. / 2], self.evaluate(pdf))\n self.assertEqual((2,), pdf.shape)\n\n def testPdfTwoBatchesNontrivialX(self):\n a = [1., 2]\n b = [1., 2]\n x = [.3, .7]\n dist = tfd.Beta(a, b, validate_args=True)\n pdf = dist.prob(x)\n self.assertAllClose([1, 63. / 50], self.evaluate(pdf))\n self.assertEqual((2,), pdf.shape)\n\n def testPdfUniformZeroBatch(self):\n # This is equivalent to a uniform distribution\n a = 1.\n b = 1.\n x = np.array([.1, .2, .3, .5, .8], dtype=np.float32)\n dist = tfd.Beta(a, b, validate_args=True)\n pdf = dist.prob(x)\n self.assertAllClose([1.] * 5, self.evaluate(pdf))\n self.assertEqual((5,), pdf.shape)\n\n def testPdfAlphaStretchedInBroadcastWhenSameRank(self):\n a = [[1., 2]]\n b = [[1., 2]]\n x = [[.5, .5], [.3, .7]]\n dist = tfd.Beta(a, b, validate_args=True)\n pdf = dist.prob(x)\n self.assertAllClose([[1., 3. / 2], [1., 63. / 50]], self.evaluate(pdf))\n self.assertEqual((2, 2), pdf.shape)\n\n def testPdfAlphaStretchedInBroadcastWhenLowerRank(self):\n a = [1., 2]\n b = [1., 2]\n x = [[.5, .5], [.2, .8]]\n pdf = tfd.Beta(a, b, validate_args=True).prob(x)\n self.assertAllClose([[1., 3. / 2], [1., 24. / 25]], self.evaluate(pdf))\n self.assertEqual((2, 2), pdf.shape)\n\n def testPdfXStretchedInBroadcastWhenSameRank(self):\n a = [[1., 2], [2., 3]]\n b = [[1., 2], [2., 3]]\n x = [[.5, .5]]\n pdf = tfd.Beta(a, b, validate_args=True).prob(x)\n self.assertAllClose([[1., 3. / 2], [3. / 2, 15. / 8]], self.evaluate(pdf))\n self.assertEqual((2, 2), pdf.shape)\n\n def testPdfXStretchedInBroadcastWhenLowerRank(self):\n a = [[1., 2], [2., 3]]\n b = [[1., 2], [2., 3]]\n x = [.5, .5]\n pdf = tfd.Beta(a, b, validate_args=True).prob(x)\n self.assertAllClose([[1., 3. / 2], [3. / 2, 15. / 8]], self.evaluate(pdf))\n self.assertEqual((2, 2), pdf.shape)\n\n def testLogPdfOnBoundaryIsFiniteWhenAlphaIsOne(self):\n b = [[0.01, 0.1, 1., 2], [5., 10., 2., 3]]\n pdf = self.evaluate(tfd.Beta(1., b, validate_args=False).prob(0.))\n self.assertAllEqual(np.ones_like(pdf, dtype=np.bool), np.isfinite(pdf))\n\n def testBetaMean(self):\n a = [1., 2, 3]\n b = [2., 4, 1.2]\n dist = tfd.Beta(a, b, validate_args=True)\n self.assertEqual(dist.mean().shape, (3,))\n expected_mean = sp_stats.beta.mean(a, b)\n self.assertAllClose(expected_mean, self.evaluate(dist.mean()))\n\n def testBetaVariance(self):\n a = [1., 2, 3]\n b = [2., 4, 1.2]\n dist = tfd.Beta(a, b, validate_args=True)\n self.assertEqual(dist.variance().shape, (3,))\n expected_variance = sp_stats.beta.var(a, b)\n self.assertAllClose(expected_variance, self.evaluate(dist.variance()))\n\n def testBetaMode(self):\n a = np.array([1.1, 2, 3])\n b = np.array([2., 4, 1.2])\n expected_mode = (a - 1) / (a + b - 2)\n dist = tfd.Beta(a, b, validate_args=True)\n self.assertEqual(dist.mode().shape, (3,))\n self.assertAllClose(expected_mode, self.evaluate(dist.mode()))\n\n def testBetaModeInvalid(self):\n a = np.array([1., 2, 3])\n b = np.array([2., 4, 1.2])\n dist = tfd.Beta(a, b, allow_nan_stats=False, validate_args=True)\n with self.assertRaisesOpError(\"Condition x < y.*\"):\n self.evaluate(dist.mode())\n\n a = np.array([2., 2, 3])\n b = np.array([1., 4, 1.2])\n dist = tfd.Beta(a, b, allow_nan_stats=False, validate_args=True)\n with self.assertRaisesOpError(\"Condition x < y.*\"):\n self.evaluate(dist.mode())\n\n def testBetaModeEnableAllowNanStats(self):\n a = np.array([1., 2, 3])\n b = np.array([2., 4, 1.2])\n dist = tfd.Beta(a, b, allow_nan_stats=True, validate_args=True)\n\n expected_mode = (a - 1) / (a + b - 2)\n expected_mode[0] = np.nan\n self.assertEqual((3,), dist.mode().shape)\n self.assertAllClose(expected_mode, self.evaluate(dist.mode()))\n\n a = np.array([2., 2, 3])\n b = np.array([1., 4, 1.2])\n dist = tfd.Beta(a, b, allow_nan_stats=True, validate_args=True)\n\n expected_mode = (a - 1) / (a + b - 2)\n expected_mode[0] = np.nan\n self.assertEqual((3,), dist.mode().shape)\n self.assertAllClose(expected_mode, self.evaluate(dist.mode()))\n\n def testBetaEntropy(self):\n a = [1., 2, 3]\n b = [2., 4, 1.2]\n dist = tfd.Beta(a, b, validate_args=True)\n self.assertEqual(dist.entropy().shape, (3,))\n expected_entropy = sp_stats.beta.entropy(a, b)\n self.assertAllClose(expected_entropy, self.evaluate(dist.entropy()))\n\n def testBetaSample(self):\n a = 1.\n b = 2.\n beta = tfd.Beta(a, b, validate_args=True)\n n = tf.constant(100000)\n samples = beta.sample(n)\n sample_values = self.evaluate(samples)\n self.assertEqual(sample_values.shape, (100000,))\n self.assertFalse(np.any(sample_values < 0.0))\n self.assertLess(\n sp_stats.kstest(\n # Beta is a univariate distribution.\n sample_values,\n sp_stats.beta(a=1., b=2.).cdf)[0],\n 0.01)\n # The standard error of the sample mean is 1 / (sqrt(18 * n))\n self.assertAllClose(\n sample_values.mean(axis=0), sp_stats.beta.mean(a, b), atol=1e-2)\n self.assertAllClose(\n np.cov(sample_values, rowvar=0), sp_stats.beta.var(a, b), atol=1e-1)\n\n def testBetaFullyReparameterized(self):\n a = tf.constant(1.0)\n b = tf.constant(2.0)\n _, [grad_a, grad_b] = tfp.math.value_and_gradient(\n lambda a_, b_: tfd.Beta(a, b, validate_args=True).sample(100), [a, b])\n self.assertIsNotNone(grad_a)\n self.assertIsNotNone(grad_b)\n\n # Test that sampling with the same seed twice gives the same results.\n def testBetaSampleMultipleTimes(self):\n a_val = 1.\n b_val = 2.\n n_val = 100\n seed = test_util.test_seed()\n\n tf1.set_random_seed(seed)\n beta1 = tfd.Beta(\n concentration1=a_val,\n concentration0=b_val,\n name=\"beta1\",\n validate_args=True)\n samples1 = self.evaluate(beta1.sample(n_val, seed=seed))\n\n tf1.set_random_seed(seed)\n beta2 = tfd.Beta(\n concentration1=a_val,\n concentration0=b_val,\n name=\"beta2\",\n validate_args=True)\n samples2 = self.evaluate(beta2.sample(n_val, seed=seed))\n\n self.assertAllClose(samples1, samples2)\n\n def testBetaSampleMultidimensional(self):\n a = np.random.rand(3, 2, 2).astype(np.float32)\n b = np.random.rand(3, 2, 2).astype(np.float32)\n beta = tfd.Beta(a, b, validate_args=True)\n n = tf.constant(100000)\n samples = beta.sample(n)\n sample_values = self.evaluate(samples)\n self.assertEqual(sample_values.shape, (100000, 3, 2, 2))\n self.assertFalse(np.any(sample_values < 0.0))\n self.assertAllClose(\n sample_values[:, 1, :].mean(axis=0),\n sp_stats.beta.mean(a, b)[1, :],\n atol=1e-1)\n\n def testBetaCdf(self):\n shape = (30, 40, 50)\n for dt in (np.float32, np.float64):\n a = 10. * np.random.random(shape).astype(dt)\n b = 10. * np.random.random(shape).astype(dt)\n x = np.random.random(shape).astype(dt)\n actual = self.evaluate(tfd.Beta(a, b, validate_args=True).cdf(x))\n self.assertAllEqual(np.ones(shape, dtype=np.bool), 0. <= x)\n self.assertAllEqual(np.ones(shape, dtype=np.bool), 1. >= x)\n self.assertAllClose(sp_stats.beta.cdf(x, a, b), actual, rtol=1e-4, atol=0)\n\n def testBetaLogCdf(self):\n shape = (30, 40, 50)\n for dt in (np.float32, np.float64):\n a = 10. * np.random.random(shape).astype(dt)\n b = 10. * np.random.random(shape).astype(dt)\n x = np.random.random(shape).astype(dt)\n actual = self.evaluate(\n tf.exp(tfd.Beta(a, b, validate_args=True).log_cdf(x)))\n self.assertAllEqual(np.ones(shape, dtype=np.bool), 0. <= x)\n self.assertAllEqual(np.ones(shape, dtype=np.bool), 1. >= x)\n self.assertAllClose(sp_stats.beta.cdf(x, a, b), actual, rtol=1e-4, atol=0)\n\n def testBetaBetaKL(self):\n for shape in [(10,), (4, 5)]:\n a1 = 6.0 * np.random.random(size=shape) + 1e-4\n b1 = 6.0 * np.random.random(size=shape) + 1e-4\n a2 = 6.0 * np.random.random(size=shape) + 1e-4\n b2 = 6.0 * np.random.random(size=shape) + 1e-4\n\n d1 = tfd.Beta(concentration1=a1, concentration0=b1, validate_args=True)\n d2 = tfd.Beta(concentration1=a2, concentration0=b2, validate_args=True)\n\n kl_expected = (sp_special.betaln(a2, b2) - sp_special.betaln(a1, b1) +\n (a1 - a2) * sp_special.digamma(a1) +\n (b1 - b2) * sp_special.digamma(b1) +\n (a2 - a1 + b2 - b1) * sp_special.digamma(a1 + b1))\n\n kl = tfd.kl_divergence(d1, d2)\n kl_val = self.evaluate(kl)\n self.assertEqual(kl.shape, shape)\n self.assertAllClose(kl_val, kl_expected)\n\n # Make sure KL(d1||d1) is 0\n kl_same = self.evaluate(tfd.kl_divergence(d1, d1))\n self.assertAllClose(kl_same, np.zeros_like(kl_expected))\n\n def testBetaMeanAfterMutation(self):\n concentration1 = tf.Variable(2.)\n concentration0 = tf.Variable(3.)\n self.evaluate(concentration1.initializer)\n self.evaluate(concentration0.initializer)\n dist = tfd.Beta(\n concentration1=concentration1,\n concentration0=concentration0,\n validate_args=True)\n with tf.control_dependencies([concentration0.assign(6.)]):\n mean = self.evaluate(dist.mean())\n self.assertEqual(mean, 0.25)\n\n def testGradientThroughConcentration1(self):\n concentration1 = tf.Variable(3.)\n d = tfd.Beta(\n concentration1=concentration1, concentration0=5., validate_args=True)\n with tf.GradientTape() as tape:\n loss = -d.log_prob([0.1, 0.2, 0.85])\n grad = tape.gradient(loss, d.trainable_variables)\n self.assertLen(grad, 1)\n self.assertAllNotNone(grad)\n\n def testAssertsPositiveConcentration1(self):\n concentration1 = tf.Variable([1., 2., -3.])\n self.evaluate(concentration1.initializer)\n with self.assertRaisesOpError(\"Concentration parameter must be positive.\"):\n d = tfd.Beta(\n concentration1=concentration1, concentration0=[5.],\n validate_args=True)\n self.evaluate(d.sample())\n\n def testAssertsPositiveConcentration1AfterMutation(self):\n concentration1 = tf.Variable([1., 2., 3.])\n self.evaluate(concentration1.initializer)\n d = tfd.Beta(concentration1=concentration1, concentration0=[5.],\n validate_args=True)\n with self.assertRaisesOpError(\"Concentration parameter must be positive.\"):\n with tf.control_dependencies([concentration1.assign([1., 2., -3.])]):\n self.evaluate(d.sample())\n\n def testGradientThroughConcentration0(self):\n concentration0 = tf.Variable(3.)\n d = tfd.Beta(\n concentration0=concentration0, concentration1=5., validate_args=True)\n with tf.GradientTape() as tape:\n loss = -d.log_prob([0.25, 0.5, 0.9])\n grad = tape.gradient(loss, d.trainable_variables)\n self.assertLen(grad, 1)\n self.assertAllNotNone(grad)\n\n def testAssertsPositiveConcentration0(self):\n concentration0 = tf.Variable([1., 2., -3.])\n self.evaluate(concentration0.initializer)\n with self.assertRaisesOpError(\"Concentration parameter must be positive.\"):\n d = tfd.Beta(concentration0=concentration0, concentration1=[5.],\n validate_args=True)\n self.evaluate(d.sample())\n\n def testAssertsPositiveConcentration0AfterMutation(self):\n concentration0 = tf.Variable([1., 2., 3.])\n self.evaluate(concentration0.initializer)\n d = tfd.Beta(concentration0=concentration0, concentration1=[5.],\n validate_args=True)\n with self.assertRaisesOpError(\"Concentration parameter must be positive.\"):\n with tf.control_dependencies([concentration0.assign([1., 2., -3.])]):\n self.evaluate(d.sample())\n\nif __name__ == \"__main__\":\n tf.test.main()\n", "# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Tests for Categorical distribution.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Dependency imports\nfrom absl.testing import parameterized\nimport numpy as np\n\nimport tensorflow.compat.v1 as tf1\nimport tensorflow.compat.v2 as tf\nimport tensorflow_probability as tfp\n\nfrom tensorflow_probability.python.internal import prefer_static\nfrom tensorflow_probability.python.internal import tensorshape_util\nfrom tensorflow_probability.python.internal import test_util\n\ntfb = tfp.bijectors\ntfd = tfp.distributions\n\n\ndef make_categorical(batch_shape, num_classes, dtype=tf.int32):\n logits = tf.random.uniform(\n list(batch_shape) + [num_classes], -10, 10, dtype=tf.float32) - 50.\n return tfd.Categorical(logits, dtype=dtype, validate_args=True)\n\n\n@test_util.test_all_tf_execution_regimes\nclass CategoricalTest(test_util.TestCase):\n\n def testP(self):\n p = [0.2, 0.8]\n dist = tfd.Categorical(probs=p, validate_args=True)\n self.assertAllClose(p, self.evaluate(dist.probs))\n self.assertAllEqual([2], dist.probs.shape)\n\n def testLogits(self):\n p = np.array([0.2, 0.8], dtype=np.float32)\n logits = np.log(p) - 50.\n dist = tfd.Categorical(logits=logits, validate_args=True)\n self.assertAllEqual([2], dist.logits.shape)\n self.assertAllClose(logits, self.evaluate(dist.logits))\n\n def testShapes(self):\n for batch_shape in ([], [1], [2, 3, 4]):\n dist = make_categorical(batch_shape, 10)\n self.assertAllEqual(batch_shape, dist.batch_shape)\n self.assertAllEqual(batch_shape,\n self.evaluate(dist.batch_shape_tensor()))\n self.assertAllEqual([], dist.event_shape)\n self.assertAllEqual([], self.evaluate(dist.event_shape_tensor()))\n num_categories = tf.shape(\n dist.probs if dist.logits is None else dist.logits)[-1]\n self.assertEqual(10, self.evaluate(num_categories))\n # The number of categories is available as a constant because the shape is\n # known at graph build time.\n num_categories = prefer_static.shape(\n dist.probs if dist.logits is None else dist.logits)[-1]\n self.assertEqual(10, tf.get_static_value(num_categories))\n\n for batch_shape in ([], [1], [2, 3, 4]):\n dist = make_categorical(\n batch_shape, tf.constant(\n 10, dtype=tf.int32))\n self.assertAllEqual(\n len(batch_shape), tensorshape_util.rank(dist.batch_shape))\n self.assertAllEqual(batch_shape,\n self.evaluate(dist.batch_shape_tensor()))\n self.assertAllEqual([], dist.event_shape)\n self.assertAllEqual([], self.evaluate(dist.event_shape_tensor()))\n num_categories = tf.shape(\n dist.probs if dist.logits is None else dist.logits)[-1]\n self.assertEqual(10, self.evaluate(num_categories))\n\n def testDtype(self):\n dist = make_categorical([], 5, dtype=tf.int32)\n self.assertEqual(dist.dtype, tf.int32)\n self.assertEqual(dist.dtype, dist.sample(5).dtype)\n self.assertEqual(dist.dtype, dist.mode().dtype)\n dist = make_categorical([], 5, dtype=tf.int64)\n self.assertEqual(dist.dtype, tf.int64)\n self.assertEqual(dist.dtype, dist.sample(5).dtype)\n self.assertEqual(dist.dtype, dist.mode().dtype)\n self.assertEqual(dist.logits.dtype, tf.float32)\n self.assertEqual(dist.logits.dtype, dist.entropy().dtype)\n self.assertEqual(\n dist.logits.dtype, dist.prob(np.array(\n 0, dtype=np.int64)).dtype)\n self.assertEqual(\n dist.logits.dtype, dist.log_prob(np.array(\n 0, dtype=np.int64)).dtype)\n for dtype in [tf.float16, tf.float32, tf.float64]:\n dist = make_categorical([], 5, dtype=dtype)\n self.assertEqual(dist.dtype, dtype)\n self.assertEqual(dist.dtype, dist.sample(5).dtype)\n\n def testUnknownShape(self):\n logits = lambda l: tf1.placeholder_with_default( # pylint: disable=g-long-lambda\n np.float32(l), shape=None)\n sample = lambda l: tfd.Categorical( # pylint: disable=g-long-lambda\n logits=logits(l), validate_args=True).sample()\n # Will sample class 1.\n sample_value = self.evaluate(sample([-1000.0, 1000.0]))\n self.assertEqual(1, sample_value)\n\n # Batch entry 0 will sample class 1, batch entry 1 will sample class 0.\n sample_value_batch = self.evaluate(\n sample([[-1000.0, 1000.0], [1000.0, -1000.0]]))\n self.assertAllEqual([1, 0], sample_value_batch)\n\n def testPMFWithBatch(self):\n histograms = [[0.2, 0.8], [0.6, 0.4]]\n dist = tfd.Categorical(tf.math.log(histograms) - 50., validate_args=True)\n self.assertAllClose([0.2, 0.4], self.evaluate(dist.prob([0, 1])))\n\n def testPMFNoBatch(self):\n histograms = [0.2, 0.8]\n dist = tfd.Categorical(tf.math.log(histograms) - 50., validate_args=True)\n self.assertAllClose(0.2, self.evaluate(dist.prob(0)))\n\n def testCDFWithDynamicEventShapeKnownNdims(self):\n \"\"\"Test that dynamically-sized events with unknown shape work.\"\"\"\n batch_size = 2\n make_ph = tf1.placeholder_with_default\n histograms = lambda h: make_ph(np.float32(h), shape=(batch_size, None))\n event = lambda e: make_ph(np.float32(e), shape=(batch_size,))\n dist = lambda h: tfd.Categorical(probs=histograms(h), validate_args=True)\n cdf_op = lambda h, e: dist(h).cdf(event(e))\n\n # Feed values in with different shapes...\n # three classes.\n event_feed_one = [0, 1]\n histograms_feed_one = [[0.5, 0.3, 0.2], [1.0, 0.0, 0.0]]\n expected_cdf_one = [0.5, 1.0]\n\n # six classes.\n event_feed_two = [2, 5]\n histograms_feed_two = [[0.9, 0.0, 0.0, 0.0, 0.0, 0.1],\n [0.15, 0.2, 0.05, 0.35, 0.13, 0.12]]\n expected_cdf_two = [0.9, 1.0]\n\n actual_cdf_one = self.evaluate(\n cdf_op(histograms_feed_one, event_feed_one))\n actual_cdf_two = self.evaluate(\n cdf_op(histograms_feed_two, event_feed_two))\n\n self.assertAllClose(expected_cdf_one, actual_cdf_one)\n self.assertAllClose(expected_cdf_two, actual_cdf_two)\n\n @parameterized.named_parameters(\n ('test1', [0, 1], [[0.5, 0.3, 0.2], [1.0, 0.0, 0.0]], [0.5, 1.0]),\n ('test2', [2, 5], [[0.9, 0.0, 0.0, 0.0, 0.0, 0.1],\n [0.15, 0.2, 0.05, 0.35, 0.13, 0.12]], [0.9, 1.0]))\n def testCDFWithDynamicEventShapeUnknownNdims(\n self, events, histograms, expected_cdf):\n \"\"\"Test that dynamically-sized events with unknown shape work.\"\"\"\n event_ph = tf1.placeholder_with_default(events, shape=None)\n histograms_ph = tf1.placeholder_with_default(\n histograms, shape=None)\n dist = tfd.Categorical(probs=histograms_ph, validate_args=True)\n cdf_op = dist.cdf(event_ph)\n\n actual_cdf = self.evaluate(cdf_op)\n self.assertAllClose(actual_cdf, expected_cdf)\n\n def testCDFWithBatch(self):\n histograms = [[0.2, 0.1, 0.3, 0.25, 0.15],\n [0.1, 0.2, 0.3, 0.2, 0.2],\n [0.1, 0.2, 0.3, 0.2, 0.2],\n [0.1, 0.2, 0.3, 0.2, 0.2]]\n # Note we're testing events outside [0, K-1].\n event = [0, 3, -1, 10]\n expected_cdf = [0.2, 0.8, 0.0, 1.0]\n dist = tfd.Categorical(probs=histograms, validate_args=True)\n cdf_op = dist.cdf(event)\n\n self.assertAllClose(expected_cdf, self.evaluate(cdf_op))\n\n def testCDFWithBatchAndFloatDtype(self):\n histograms = [[0.1, 0.2, 0.3, 0.2, 0.2],\n [0.1, 0.2, 0.3, 0.2, 0.2],\n [0.1, 0.2, 0.3, 0.2, 0.2],\n [0.1, 0.2, 0.3, 0.2, 0.2]]\n # Note we're testing events outside [0, K-1].\n event = [-1., 10., 2.0, 2.5]\n expected_cdf = [0.0, 1.0, 0.6, 0.6]\n dist = tfd.Categorical(\n probs=histograms, dtype=tf.float32, validate_args=True)\n cdf_op = dist.cdf(event)\n\n self.assertAllClose(expected_cdf, self.evaluate(cdf_op))\n\n def testCDFNoBatch(self):\n histogram = [0.1, 0.2, 0.3, 0.4]\n event = 2\n expected_cdf = 0.6\n dist = tfd.Categorical(probs=histogram, validate_args=True)\n cdf_op = dist.cdf(event)\n\n self.assertAlmostEqual(expected_cdf, self.evaluate(cdf_op))\n\n def testCDFBroadcasting(self):\n # shape: [batch=2, n_bins=3]\n histograms = [[0.2, 0.1, 0.7],\n [0.3, 0.45, 0.25]]\n\n # shape: [batch=3, batch=2]\n devent = [\n [0, 0],\n [1, 1],\n [2, 2]\n ]\n dist = tfd.Categorical(probs=histograms, validate_args=True)\n\n # We test that the probabilities are correctly broadcasted over the\n # additional leading batch dimension of size 3.\n expected_cdf_result = np.zeros((3, 2))\n expected_cdf_result[0, 0] = 0.2\n expected_cdf_result[0, 1] = 0.3\n expected_cdf_result[1, 0] = 0.3\n expected_cdf_result[1, 1] = 0.3 + 0.45\n expected_cdf_result[2, 0] = 1.0\n expected_cdf_result[2, 1] = 1.0\n\n self.assertAllClose(expected_cdf_result, self.evaluate(dist.cdf(devent)))\n\n def testBroadcastWithBatchParamsAndBiggerEvent(self):\n ## The parameters have a single batch dimension, and the event has two.\n\n # param shape is [3 x 4], where 4 is the number of bins (non-batch dim).\n cat_params_py = [\n [0.2, 0.15, 0.35, 0.3],\n [0.1, 0.05, 0.68, 0.17],\n [0.1, 0.05, 0.68, 0.17]\n ]\n\n # event shape = [5, 3], both are \"batch\" dimensions.\n disc_event_py = [\n [0, 1, 2],\n [1, 2, 3],\n [0, 0, 0],\n [1, 1, 1],\n [2, 1, 0]\n ]\n\n # shape is [3]\n normal_params_py = [\n -10.0,\n 120.0,\n 50.0\n ]\n\n # shape is [5, 3]\n real_event_py = [\n [-1.0, 0.0, 1.0],\n [100.0, 101, -50],\n [90, 90, 90],\n [-4, -400, 20.0],\n [0.0, 0.0, 0.0]\n ]\n\n cat_params_tf = tf.constant(cat_params_py)\n disc_event_tf = tf.constant(disc_event_py)\n cat = tfd.Categorical(probs=cat_params_tf, validate_args=True)\n\n normal_params_tf = tf.constant(normal_params_py)\n real_event_tf = tf.constant(real_event_py)\n norm = tfd.Normal(loc=normal_params_tf, scale=1.0)\n\n # Check that normal and categorical have the same broadcasting behaviour.\n to_run = {\n 'cat_prob': cat.prob(disc_event_tf),\n 'cat_log_prob': cat.log_prob(disc_event_tf),\n 'cat_cdf': cat.cdf(disc_event_tf),\n 'cat_log_cdf': cat.log_cdf(disc_event_tf),\n 'norm_prob': norm.prob(real_event_tf),\n 'norm_log_prob': norm.log_prob(real_event_tf),\n 'norm_cdf': norm.cdf(real_event_tf),\n 'norm_log_cdf': norm.log_cdf(real_event_tf),\n }\n\n run_result = self.evaluate(to_run)\n\n self.assertAllEqual(run_result['cat_prob'].shape,\n run_result['norm_prob'].shape)\n self.assertAllEqual(run_result['cat_log_prob'].shape,\n run_result['norm_log_prob'].shape)\n self.assertAllEqual(run_result['cat_cdf'].shape,\n run_result['norm_cdf'].shape)\n self.assertAllEqual(run_result['cat_log_cdf'].shape,\n run_result['norm_log_cdf'].shape)\n\n def testLogPMF(self):\n logits = np.log([[0.2, 0.8], [0.6, 0.4]]) - 50.\n dist = tfd.Categorical(logits, validate_args=True)\n self.assertAllClose(np.log([0.2, 0.4]),\n self.evaluate(dist.log_prob([0, 1])))\n self.assertAllClose(np.log([0.2, 0.4]),\n self.evaluate(dist.log_prob([0.0, 1.0])))\n\n def testEntropyNoBatch(self):\n logits = np.log([0.2, 0.8]) - 50.\n dist = tfd.Categorical(logits, validate_args=True)\n self.assertAllClose(-(0.2 * np.log(0.2) + 0.8 * np.log(0.8)),\n self.evaluate(dist.entropy()),\n atol=0, rtol=1e-5)\n\n def testEntropyWithBatch(self):\n logits = np.log([[0.2, 0.8], [0.6, 0.4]]) - 50.\n dist = tfd.Categorical(logits, validate_args=True)\n self.assertAllClose([-(0.2 * np.log(0.2) + 0.8 * np.log(0.8)),\n -(0.6 * np.log(0.6) + 0.4 * np.log(0.4))],\n self.evaluate(dist.entropy()),\n atol=0, rtol=1e-5)\n\n def testEntropyGradient(self):\n with tf.GradientTape(persistent=True) as tape:\n logits = tf.constant([[1., 2., 3.], [2., 5., 1.]])\n tape.watch(logits)\n\n probabilities = tf.math.softmax(logits)\n log_probabilities = tf.math.log_softmax(logits)\n true_entropy = -tf.reduce_sum(probabilities * log_probabilities, axis=-1)\n\n categorical_distribution = tfd.Categorical(\n probs=probabilities, validate_args=True)\n categorical_entropy = categorical_distribution.entropy()\n\n # works\n true_entropy_g = tape.gradient(true_entropy, logits)\n categorical_entropy_g = tape.gradient(categorical_entropy, logits)\n\n res = self.evaluate({'true_entropy': true_entropy,\n 'categorical_entropy': categorical_entropy,\n 'true_entropy_g': true_entropy_g,\n 'categorical_entropy_g': categorical_entropy_g})\n self.assertAllClose(res['true_entropy'],\n res['categorical_entropy'])\n self.assertAllClose(res['true_entropy_g'],\n res['categorical_entropy_g'])\n\n def testEntropyWithZeroProbabilities(self):\n probs = [[0, 0.5, 0.5], [0, 1, 0]]\n dist = tfd.Categorical(probs=probs, validate_args=True)\n dist_entropy = dist.entropy()\n\n ans = [-(0.5*np.log(0.5) + 0.5*np.log(0.5)), -(np.log(1))]\n self.assertAllClose(self.evaluate(dist_entropy), ans)\n\n def testEntropyWithNegInfLogits(self):\n probs = [[0, 0.5, 0.5], [0, 1, 0]]\n dist = tfd.Categorical(logits=np.log(probs), validate_args=True)\n dist_entropy = dist.entropy()\n\n ans = [-(0.5*np.log(0.5) + 0.5*np.log(0.5)), -(np.log(1))]\n self.assertAllClose(self.evaluate(dist_entropy), ans)\n\n def testSample(self):\n histograms = [[[0.2, 0.8], [0.4, 0.6]]]\n dist = tfd.Categorical(tf.math.log(histograms) - 50., validate_args=True)\n n = 10000\n samples = dist.sample(n, seed=test_util.test_seed())\n tensorshape_util.set_shape(samples, [n, 1, 2])\n self.assertEqual(samples.dtype, tf.int32)\n sample_values = self.evaluate(samples)\n self.assertFalse(np.any(sample_values < 0))\n self.assertFalse(np.any(sample_values > 1))\n self.assertAllClose(\n [[0.2, 0.4]], np.mean(sample_values == 0, axis=0), atol=1e-2)\n self.assertAllClose(\n [[0.8, 0.6]], np.mean(sample_values == 1, axis=0), atol=1e-2)\n\n def testSampleWithSampleShape(self):\n histograms = [[[0.2, 0.8], [0.4, 0.6]]]\n dist = tfd.Categorical(tf.math.log(histograms) - 50., validate_args=True)\n samples = dist.sample((100, 100), seed=test_util.test_seed())\n prob = dist.prob(samples)\n prob_val = self.evaluate(prob)\n self.assertAllClose(\n [0.2**2 + 0.8**2], [prob_val[:, :, :, 0].mean()], atol=1e-2)\n self.assertAllClose(\n [0.4**2 + 0.6**2], [prob_val[:, :, :, 1].mean()], atol=1e-2)\n\n def testNotReparameterized(self):\n p = tf.constant([0.3, 0.3, 0.4])\n _, grad_p = tfp.math.value_and_gradient(\n lambda x: tfd.Categorical(x, validate_args=True).sample(100), p)\n self.assertIsNone(grad_p)\n\n def testLogPMFBroadcasting(self):\n # 1 x 2 x 2\n histograms = [[[0.2, 0.8], [0.4, 0.6]]]\n dist = tfd.Categorical(tf.math.log(histograms) - 50., validate_args=True)\n\n prob = dist.prob(1)\n self.assertAllClose([[0.8, 0.6]], self.evaluate(prob))\n\n prob = dist.prob([1])\n self.assertAllClose([[0.8, 0.6]], self.evaluate(prob))\n\n prob = dist.prob([0, 1])\n self.assertAllClose([[0.2, 0.6]], self.evaluate(prob))\n\n prob = dist.prob([[0, 1]])\n self.assertAllClose([[0.2, 0.6]], self.evaluate(prob))\n\n prob = dist.prob([[[0, 1]]])\n self.assertAllClose([[[0.2, 0.6]]], self.evaluate(prob))\n\n prob = dist.prob([[1, 0], [0, 1]])\n self.assertAllClose([[0.8, 0.4], [0.2, 0.6]], self.evaluate(prob))\n\n prob = dist.prob([[[1, 1], [1, 0]], [[1, 0], [0, 1]]])\n self.assertAllClose([[[0.8, 0.6], [0.8, 0.4]], [[0.8, 0.4], [0.2, 0.6]]],\n self.evaluate(prob))\n\n def testLogPMFShape(self):\n # shape [1, 2, 2]\n histograms = [[[0.2, 0.8], [0.4, 0.6]]]\n dist = tfd.Categorical(tf.math.log(histograms), validate_args=True)\n\n log_prob = dist.log_prob([0, 1])\n self.assertEqual(2, tensorshape_util.rank(log_prob.shape))\n self.assertAllEqual([1, 2], log_prob.shape)\n\n log_prob = dist.log_prob([[[1, 1], [1, 0]], [[1, 0], [0, 1]]])\n self.assertEqual(3, tensorshape_util.rank(log_prob.shape))\n self.assertAllEqual([2, 2, 2], log_prob.shape)\n\n def testLogPMFShapeNoBatch(self):\n histograms = [0.2, 0.8]\n dist = tfd.Categorical(tf.math.log(histograms), validate_args=True)\n\n log_prob = dist.log_prob(0)\n self.assertEqual(0, tensorshape_util.rank(log_prob.shape))\n self.assertAllEqual([], log_prob.shape)\n\n log_prob = dist.log_prob([[[1, 1], [1, 0]], [[1, 0], [0, 1]]])\n self.assertEqual(3, tensorshape_util.rank(log_prob.shape))\n self.assertAllEqual([2, 2, 2], log_prob.shape)\n\n def testMode(self):\n histograms = [[[0.2, 0.8], [0.6, 0.4]]]\n dist = tfd.Categorical(tf.math.log(histograms) - 50., validate_args=True)\n self.assertAllEqual([[1, 0]], self.evaluate(dist.mode()))\n\n def testCategoricalCategoricalKL(self):\n\n def np_softmax(logits):\n exp_logits = np.exp(logits)\n return exp_logits / exp_logits.sum(axis=-1, keepdims=True)\n\n for categories in [2, 4]:\n for batch_size in [1, 10]:\n a_logits = np.random.randn(batch_size, categories)\n b_logits = np.random.randn(batch_size, categories)\n\n a = tfd.Categorical(logits=a_logits, validate_args=True)\n b = tfd.Categorical(logits=b_logits, validate_args=True)\n\n kl = tfd.kl_divergence(a, b)\n kl_val = self.evaluate(kl)\n # Make sure KL(a||a) is 0\n kl_same = self.evaluate(tfd.kl_divergence(a, a))\n\n prob_a = np_softmax(a_logits)\n prob_b = np_softmax(b_logits)\n kl_expected = np.sum(prob_a * (np.log(prob_a) - np.log(prob_b)),\n axis=-1)\n\n self.assertEqual(kl.shape, (batch_size,))\n self.assertAllClose(kl_val, kl_expected)\n self.assertAllClose(kl_same, np.zeros_like(kl_expected))\n\n def testParamTensorFromLogits(self):\n x = tf.constant([-1., 0.5, 1.])\n d = tfd.Categorical(logits=x, validate_args=True)\n self.assertAllClose(\n *self.evaluate([x, d.logits_parameter()]),\n atol=0, rtol=1e-4)\n self.assertAllClose(\n *self.evaluate([tf.math.softmax(x),\n d.probs_parameter()]),\n atol=0,\n rtol=1e-4)\n\n def testParamTensorFromProbs(self):\n x = tf.constant([0.1, 0.5, 0.4])\n d = tfd.Categorical(probs=x, validate_args=True)\n self.assertAllClose(\n *self.evaluate([tf.math.log(x), d.logits_parameter()]),\n atol=0, rtol=1e-4)\n self.assertAllClose(\n *self.evaluate([x, d.probs_parameter()]),\n atol=0, rtol=1e-4)\n\n\n@test_util.test_all_tf_execution_regimes\nclass CategoricalFromVariableTest(test_util.TestCase):\n\n def testGradientLogits(self):\n x = tf.Variable([-1., 0., 1])\n d = tfd.Categorical(logits=x, validate_args=True)\n with tf.GradientTape() as tape:\n loss = -d.log_prob([0, 2])\n g = tape.gradient(loss, d.trainable_variables)\n self.assertLen(g, 1)\n self.assertAllNotNone(g)\n\n def testGradientProbs(self):\n x = tf.Variable([0.1, 0.7, 0.2])\n d = tfd.Categorical(probs=x, validate_args=True)\n with tf.GradientTape() as tape:\n loss = -d.log_prob([0, 2])\n g = tape.gradient(loss, d.trainable_variables)\n self.assertLen(g, 1)\n self.assertAllNotNone(g)\n\n def testAssertionsProbs(self):\n x = tf.Variable([0.1, 0.7, 0.0])\n with self.assertRaisesOpError('Argument `probs` must sum to 1.'):\n d = tfd.Categorical(probs=x, validate_args=True)\n self.evaluate([v.initializer for v in d.variables])\n self.evaluate(d.entropy())\n\n def testAssertionsLogits(self):\n x = tfp.util.TransformedVariable(0., tfb.Identity(), shape=None)\n with self.assertRaisesRegexp(\n ValueError, 'Argument `logits` must have rank at least 1.'):\n d = tfd.Categorical(logits=x, validate_args=True)\n self.evaluate([v.initializer for v in d.variables])\n self.evaluate(d.entropy())\n\n\nif __name__ == '__main__':\n tf.test.main()\n", "# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n\n# Dependency imports\nimport numpy as np\nfrom scipy import special as sp_special\nfrom scipy import stats as sp_stats\n\nimport tensorflow.compat.v2 as tf\nimport tensorflow_probability as tfp\n\nfrom tensorflow_probability.python.internal import test_util\n\ntfd = tfp.distributions\n\n\n@test_util.test_all_tf_execution_regimes\nclass GammaTest(test_util.TestCase):\n\n def testGammaShape(self):\n alpha = tf.constant([3.0] * 5)\n beta = tf.constant(11.0)\n gamma = tfd.Gamma(concentration=alpha, rate=beta, validate_args=True)\n\n self.assertEqual(self.evaluate(gamma.batch_shape_tensor()), (5,))\n self.assertEqual(gamma.batch_shape, tf.TensorShape([5]))\n self.assertAllEqual(self.evaluate(gamma.event_shape_tensor()), [])\n self.assertEqual(gamma.event_shape, tf.TensorShape([]))\n\n def testGammaLogPDF(self):\n batch_size = 6\n alpha = tf.constant([2.0] * batch_size)\n beta = tf.constant([3.0] * batch_size)\n alpha_v = 2.0\n beta_v = 3.0\n x = np.array([2.5, 2.5, 4.0, 0.1, 1.0, 2.0], dtype=np.float32)\n gamma = tfd.Gamma(concentration=alpha, rate=beta, validate_args=True)\n log_pdf = gamma.log_prob(x)\n self.assertEqual(log_pdf.shape, (6,))\n pdf = gamma.prob(x)\n self.assertEqual(pdf.shape, (6,))\n expected_log_pdf = sp_stats.gamma.logpdf(x, alpha_v, scale=1 / beta_v)\n self.assertAllClose(self.evaluate(log_pdf), expected_log_pdf)\n self.assertAllClose(self.evaluate(pdf), np.exp(expected_log_pdf))\n\n def testGammaLogPDFBoundary(self):\n # When concentration = 1, we have an exponential distribution. Check that at\n # 0 we have finite log prob.\n rate = np.array([0.1, 0.5, 1., 2., 5., 10.], dtype=np.float32)\n gamma = tfd.Gamma(concentration=1., rate=rate, validate_args=False)\n log_pdf = gamma.log_prob(0.)\n self.assertAllClose(np.log(rate), self.evaluate(log_pdf))\n\n def testGammaLogPDFMultidimensional(self):\n batch_size = 6\n alpha = tf.constant([[2.0, 4.0]] * batch_size)\n beta = tf.constant([[3.0, 4.0]] * batch_size)\n alpha_v = np.array([2.0, 4.0])\n beta_v = np.array([3.0, 4.0])\n x = np.array([[2.5, 2.5, 4.0, 0.1, 1.0, 2.0]], dtype=np.float32).T\n gamma = tfd.Gamma(concentration=alpha, rate=beta, validate_args=True)\n log_pdf = gamma.log_prob(x)\n log_pdf_values = self.evaluate(log_pdf)\n self.assertEqual(log_pdf.shape, (6, 2))\n pdf = gamma.prob(x)\n pdf_values = self.evaluate(pdf)\n self.assertEqual(pdf.shape, (6, 2))\n expected_log_pdf = sp_stats.gamma.logpdf(x, alpha_v, scale=1 / beta_v)\n self.assertAllClose(log_pdf_values, expected_log_pdf)\n self.assertAllClose(pdf_values, np.exp(expected_log_pdf))\n\n def testGammaLogPDFMultidimensionalBroadcasting(self):\n batch_size = 6\n alpha = tf.constant([[2.0, 4.0]] * batch_size)\n beta = tf.constant(3.0)\n alpha_v = np.array([2.0, 4.0])\n beta_v = 3.0\n x = np.array([[2.5, 2.5, 4.0, 0.1, 1.0, 2.0]], dtype=np.float32).T\n gamma = tfd.Gamma(concentration=alpha, rate=beta, validate_args=True)\n log_pdf = gamma.log_prob(x)\n log_pdf_values = self.evaluate(log_pdf)\n self.assertEqual(log_pdf.shape, (6, 2))\n pdf = gamma.prob(x)\n pdf_values = self.evaluate(pdf)\n self.assertEqual(pdf.shape, (6, 2))\n\n expected_log_pdf = sp_stats.gamma.logpdf(x, alpha_v, scale=1 / beta_v)\n self.assertAllClose(log_pdf_values, expected_log_pdf)\n self.assertAllClose(pdf_values, np.exp(expected_log_pdf))\n\n def testGammaCDF(self):\n batch_size = 6\n alpha = tf.constant([2.0] * batch_size)\n beta = tf.constant([3.0] * batch_size)\n alpha_v = 2.0\n beta_v = 3.0\n x = np.array([2.5, 2.5, 4.0, 0.1, 1.0, 2.0], dtype=np.float32)\n\n gamma = tfd.Gamma(concentration=alpha, rate=beta, validate_args=True)\n cdf = gamma.cdf(x)\n self.assertEqual(cdf.shape, (6,))\n expected_cdf = sp_stats.gamma.cdf(x, alpha_v, scale=1 / beta_v)\n self.assertAllClose(self.evaluate(cdf), expected_cdf)\n\n def testGammaMean(self):\n alpha_v = np.array([1.0, 3.0, 2.5])\n beta_v = np.array([1.0, 4.0, 5.0])\n gamma = tfd.Gamma(concentration=alpha_v, rate=beta_v, validate_args=True)\n self.assertEqual(gamma.mean().shape, (3,))\n expected_means = sp_stats.gamma.mean(alpha_v, scale=1 / beta_v)\n self.assertAllClose(self.evaluate(gamma.mean()), expected_means)\n\n def testGammaModeAllowNanStatsIsFalseWorksWhenAllBatchMembersAreDefined(self):\n alpha_v = np.array([5.5, 3.0, 2.5])\n beta_v = np.array([1.0, 4.0, 5.0])\n gamma = tfd.Gamma(concentration=alpha_v, rate=beta_v, validate_args=True)\n expected_modes = (alpha_v - 1) / beta_v\n self.assertEqual(gamma.mode().shape, (3,))\n self.assertAllClose(self.evaluate(gamma.mode()), expected_modes)\n\n def testGammaModeAllowNanStatsFalseRaisesForUndefinedBatchMembers(self):\n # Mode will not be defined for the first entry.\n alpha_v = np.array([0.5, 3.0, 2.5])\n beta_v = np.array([1.0, 4.0, 5.0])\n gamma = tfd.Gamma(\n concentration=alpha_v,\n rate=beta_v,\n allow_nan_stats=False,\n validate_args=True)\n with self.assertRaisesOpError(\n \"Mode not defined when any concentration <= 1.\"):\n self.evaluate(gamma.mode())\n\n def testGammaModeAllowNanStatsIsTrueReturnsNaNforUndefinedBatchMembers(self):\n # Mode will not be defined for the first entry.\n alpha_v = np.array([0.5, 3.0, 2.5])\n beta_v = np.array([1.0, 4.0, 5.0])\n gamma = tfd.Gamma(\n concentration=alpha_v,\n rate=beta_v,\n allow_nan_stats=True,\n validate_args=True)\n expected_modes = (alpha_v - 1) / beta_v\n expected_modes[0] = np.nan\n self.assertEqual(gamma.mode().shape, (3,))\n self.assertAllClose(self.evaluate(gamma.mode()), expected_modes)\n\n def testGammaVariance(self):\n alpha_v = np.array([1.0, 3.0, 2.5])\n beta_v = np.array([1.0, 4.0, 5.0])\n gamma = tfd.Gamma(concentration=alpha_v, rate=beta_v, validate_args=True)\n self.assertEqual(gamma.variance().shape, (3,))\n expected_variances = sp_stats.gamma.var(alpha_v, scale=1 / beta_v)\n self.assertAllClose(self.evaluate(gamma.variance()), expected_variances)\n\n def testGammaStd(self):\n alpha_v = np.array([1.0, 3.0, 2.5])\n beta_v = np.array([1.0, 4.0, 5.0])\n gamma = tfd.Gamma(concentration=alpha_v, rate=beta_v, validate_args=True)\n self.assertEqual(gamma.stddev().shape, (3,))\n expected_stddev = sp_stats.gamma.std(alpha_v, scale=1. / beta_v)\n self.assertAllClose(self.evaluate(gamma.stddev()), expected_stddev)\n\n def testGammaEntropy(self):\n alpha_v = np.array([1.0, 3.0, 2.5])\n beta_v = np.array([1.0, 4.0, 5.0])\n gamma = tfd.Gamma(concentration=alpha_v, rate=beta_v, validate_args=True)\n self.assertEqual(gamma.entropy().shape, (3,))\n expected_entropy = sp_stats.gamma.entropy(alpha_v, scale=1 / beta_v)\n self.assertAllClose(self.evaluate(gamma.entropy()), expected_entropy)\n\n def testGammaSampleSmallAlpha(self):\n alpha_v = 0.05\n beta_v = 1.0\n alpha = tf.constant(alpha_v)\n beta = tf.constant(beta_v)\n n = 100000\n gamma = tfd.Gamma(concentration=alpha, rate=beta, validate_args=True)\n samples = gamma.sample(n, seed=test_util.test_seed())\n sample_values = self.evaluate(samples)\n self.assertEqual(samples.shape, (n,))\n self.assertEqual(sample_values.shape, (n,))\n self.assertTrue(self._kstest(alpha_v, beta_v, sample_values))\n self.assertAllClose(\n sample_values.mean(),\n sp_stats.gamma.mean(alpha_v, scale=1 / beta_v),\n atol=.01)\n self.assertAllClose(\n sample_values.var(),\n sp_stats.gamma.var(alpha_v, scale=1 / beta_v),\n atol=.15)\n\n def testGammaSample(self):\n alpha_v = 4.0\n beta_v = 3.0\n alpha = tf.constant(alpha_v)\n beta = tf.constant(beta_v)\n n = 100000\n gamma = tfd.Gamma(concentration=alpha, rate=beta, validate_args=True)\n samples = gamma.sample(n, seed=test_util.test_seed())\n sample_values = self.evaluate(samples)\n self.assertEqual(samples.shape, (n,))\n self.assertEqual(sample_values.shape, (n,))\n self.assertTrue(self._kstest(alpha_v, beta_v, sample_values))\n self.assertAllClose(\n sample_values.mean(),\n sp_stats.gamma.mean(alpha_v, scale=1 / beta_v),\n atol=.01)\n self.assertAllClose(\n sample_values.var(),\n sp_stats.gamma.var(alpha_v, scale=1 / beta_v),\n atol=.15)\n\n @test_util.numpy_disable_gradient_test\n def testGammaFullyReparameterized(self):\n alpha = tf.constant(4.0)\n beta = tf.constant(3.0)\n _, [grad_alpha, grad_beta] = tfp.math.value_and_gradient(\n lambda a, b: tfd.Gamma(concentration=a, rate=b, validate_args=True). # pylint: disable=g-long-lambda\n sample(100), [alpha, beta])\n self.assertIsNotNone(grad_alpha)\n self.assertIsNotNone(grad_beta)\n\n def testGammaSampleMultiDimensional(self):\n alpha_v = np.array([np.arange(1, 101, dtype=np.float32)]) # 1 x 100\n beta_v = np.array([np.arange(1, 11, dtype=np.float32)]).T # 10 x 1\n gamma = tfd.Gamma(concentration=alpha_v, rate=beta_v, validate_args=True)\n n = 10000\n samples = gamma.sample(n, seed=test_util.test_seed())\n sample_values = self.evaluate(samples)\n self.assertEqual(samples.shape, (n, 10, 100))\n self.assertEqual(sample_values.shape, (n, 10, 100))\n zeros = np.zeros_like(alpha_v + beta_v) # 10 x 100\n alpha_bc = alpha_v + zeros\n beta_bc = beta_v + zeros\n self.assertAllClose(\n sample_values.mean(axis=0),\n sp_stats.gamma.mean(alpha_bc, scale=1 / beta_bc),\n atol=0.,\n rtol=.05)\n self.assertAllClose(\n sample_values.var(axis=0),\n sp_stats.gamma.var(alpha_bc, scale=1 / beta_bc),\n atol=10.0,\n rtol=0.)\n fails = 0\n trials = 0\n for ai, a in enumerate(np.reshape(alpha_v, [-1])):\n for bi, b in enumerate(np.reshape(beta_v, [-1])):\n s = sample_values[:, bi, ai]\n trials += 1\n fails += 0 if self._kstest(a, b, s) else 1\n self.assertLess(fails, trials * 0.03)\n\n def _kstest(self, alpha, beta, samples):\n # Uses the Kolmogorov-Smirnov test for goodness of fit.\n ks, _ = sp_stats.kstest(samples, sp_stats.gamma(alpha, scale=1 / beta).cdf)\n # Return True when the test passes.\n return ks < 0.02\n\n def testGammaPdfOfSampleMultiDims(self):\n gamma = tfd.Gamma(\n concentration=[7., 11.], rate=[[5.], [6.]], validate_args=True)\n num = 50000\n samples = gamma.sample(num, seed=test_util.test_seed())\n pdfs = gamma.prob(samples)\n sample_vals, pdf_vals = self.evaluate([samples, pdfs])\n self.assertEqual(samples.shape, (num, 2, 2))\n self.assertEqual(pdfs.shape, (num, 2, 2))\n self._assertIntegral(sample_vals[:, 0, 0], pdf_vals[:, 0, 0], err=0.02)\n self._assertIntegral(sample_vals[:, 0, 1], pdf_vals[:, 0, 1], err=0.02)\n self._assertIntegral(sample_vals[:, 1, 0], pdf_vals[:, 1, 0], err=0.02)\n self._assertIntegral(sample_vals[:, 1, 1], pdf_vals[:, 1, 1], err=0.02)\n self.assertAllClose(\n sp_stats.gamma.mean([[7., 11.], [7., 11.]],\n scale=1 / np.array([[5., 5.], [6., 6.]])),\n sample_vals.mean(axis=0),\n atol=.1)\n self.assertAllClose(\n sp_stats.gamma.var([[7., 11.], [7., 11.]],\n scale=1 / np.array([[5., 5.], [6., 6.]])),\n sample_vals.var(axis=0),\n atol=.1)\n\n def _assertIntegral(self, sample_vals, pdf_vals, err=1e-3):\n s_p = zip(sample_vals, pdf_vals)\n prev = (0, 0)\n total = 0\n for k in sorted(s_p, key=lambda x: x[0]):\n pair_pdf = (k[1] + prev[1]) / 2\n total += (k[0] - prev[0]) * pair_pdf\n prev = k\n self.assertNear(1., total, err=err)\n\n def testGammaNonPositiveInitializationParamsRaises(self):\n alpha_v = tf.constant(0.0, name=\"alpha\")\n beta_v = tf.constant(1.0, name=\"beta\")\n with self.assertRaisesOpError(\"Argument `concentration` must be positive.\"):\n gamma = tfd.Gamma(\n concentration=alpha_v, rate=beta_v, validate_args=True)\n self.evaluate(gamma.mean())\n alpha_v = tf.constant(1.0, name=\"alpha\")\n beta_v = tf.constant(0.0, name=\"beta\")\n with self.assertRaisesOpError(\"Argument `rate` must be positive.\"):\n gamma = tfd.Gamma(\n concentration=alpha_v, rate=beta_v, validate_args=True)\n self.evaluate(gamma.mean())\n\n def testGammaGammaKL(self):\n alpha0 = np.array([3.])\n beta0 = np.array([1., 2., 3., 1.5, 2.5, 3.5])\n\n alpha1 = np.array([0.4])\n beta1 = np.array([0.5, 1., 1.5, 2., 2.5, 3.])\n\n # Build graph.\n g0 = tfd.Gamma(concentration=alpha0, rate=beta0, validate_args=True)\n g1 = tfd.Gamma(concentration=alpha1, rate=beta1, validate_args=True)\n x = g0.sample(int(1e4), seed=test_util.test_seed())\n kl_sample = tf.reduce_mean(g0.log_prob(x) - g1.log_prob(x), axis=0)\n kl_actual = tfd.kl_divergence(g0, g1)\n\n # Execute graph.\n [kl_sample_, kl_actual_] = self.evaluate([kl_sample, kl_actual])\n\n self.assertEqual(beta0.shape, kl_actual.shape)\n\n kl_expected = ((alpha0 - alpha1) * sp_special.digamma(alpha0)\n + sp_special.gammaln(alpha1)\n - sp_special.gammaln(alpha0)\n + alpha1 * np.log(beta0)\n - alpha1 * np.log(beta1)\n + alpha0 * (beta1 / beta0 - 1.))\n\n self.assertAllClose(kl_expected, kl_actual_, atol=0., rtol=1e-6)\n self.assertAllClose(kl_sample_, kl_actual_, atol=0., rtol=1e-1)\n\n @test_util.numpy_disable_gradient_test\n @test_util.jax_disable_variable_test\n def testGradientThroughConcentration(self):\n concentration = tf.Variable(3.)\n d = tfd.Gamma(concentration=concentration, rate=5., validate_args=True)\n with tf.GradientTape() as tape:\n loss = -d.log_prob([1., 2., 4.])\n grad = tape.gradient(loss, d.trainable_variables)\n self.assertLen(grad, 1)\n self.assertAllNotNone(grad)\n\n @test_util.jax_disable_variable_test\n def testAssertsPositiveConcentration(self):\n concentration = tf.Variable([1., 2., -3.])\n self.evaluate(concentration.initializer)\n with self.assertRaisesOpError(\"Argument `concentration` must be positive.\"):\n d = tfd.Gamma(concentration=concentration, rate=[5.], validate_args=True)\n self.evaluate(d.sample())\n\n def testAssertsPositiveConcentrationAfterMutation(self):\n concentration = tf.Variable([1., 2., 3.])\n self.evaluate(concentration.initializer)\n d = tfd.Gamma(concentration=concentration, rate=[5.], validate_args=True)\n with self.assertRaisesOpError(\"Argument `concentration` must be positive.\"):\n with tf.control_dependencies([concentration.assign([1., 2., -3.])]):\n self.evaluate(d.sample())\n\n def testGradientThroughRate(self):\n rate = tf.Variable(3.)\n d = tfd.Gamma(concentration=1., rate=rate, validate_args=True)\n with tf.GradientTape() as tape:\n loss = -d.log_prob([1., 2., 4.])\n grad = tape.gradient(loss, d.trainable_variables)\n self.assertLen(grad, 1)\n self.assertAllNotNone(grad)\n\n def testAssertsPositiveRate(self):\n rate = tf.Variable([1., 2., -3.])\n self.evaluate(rate.initializer)\n with self.assertRaisesOpError(\"Argument `rate` must be positive.\"):\n d = tfd.Gamma(concentration=[5.], rate=rate, validate_args=True)\n self.evaluate(d.sample())\n\n def testAssertsPositiveRateAfterMutation(self):\n rate = tf.Variable([1., 2., 3.])\n self.evaluate(rate.initializer)\n d = tfd.Gamma(concentration=[3.], rate=rate, validate_args=True)\n self.evaluate(d.mean())\n with self.assertRaisesOpError(\"Argument `rate` must be positive.\"):\n with tf.control_dependencies([rate.assign([1., 2., -3.])]):\n self.evaluate(d.sample())\n\nif __name__ == \"__main__\":\n tf.test.main()\n", "# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Functional MCMC: A functional API for creating new Markov Chains.\n\nThe core convention of this API is that transition operators have the following\nform:\n\n```\ntransition_operator(state...) -> (new_state..., extra_outputs)\n```\n\nWhere 'x...', reresents one or more values. This operator can then be called\nrecursively as follows:\n\n```\nstate = ...\nwhile not_done:\n state, extra = transition_operator(*state)\n```\n\n`state` is allowed to be partially specified (i.e. have `None` elements), which\nthe transition operator must impute when it returns the new state. See\n`call_transition_operator` for more details of the calling convention.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\n# [internal] enable type annotations\nfrom __future__ import print_function\n\nimport collections\n\nimport numpy as np\n\nimport tensorflow_probability as tfp\nfrom discussion.fun_mcmc import backend\nfrom typing import Any, Callable, List, Mapping, Optional, Sequence, Text, Tuple, Union\n\ntf = backend.tf\nutil = backend.util\ntfb = tfp.bijectors\nmcmc_util = tfp.mcmc.internal.util\n\n__all__ = [\n 'adam_init',\n 'adam_step',\n 'AdamExtra',\n 'AdamState',\n 'blanes_3_stage_step',\n 'blanes_4_stage_step',\n 'call_fn',\n 'call_potential_fn',\n 'call_potential_fn_with_grads',\n 'call_transition_operator',\n 'gaussian_momentum_sample',\n 'gradient_descent_step',\n 'GradientDescentExtra',\n 'GradientDescentState',\n 'hamiltonian_integrator',\n 'hamiltonian_monte_carlo',\n 'hamiltonian_monte_carlo_init',\n 'HamiltonianMonteCarloExtra',\n 'HamiltonianMonteCarloState',\n 'IntegratorExtras',\n 'IntegratorState',\n 'IntegratorStep',\n 'IntegratorStepState',\n 'leapfrog_step',\n 'make_gaussian_kinetic_energy_fn',\n 'maybe_broadcast_structure',\n 'mclachlan_optimal_4th_order_step',\n 'metropolis_hastings_step',\n 'MetropolisHastingsExtra',\n 'potential_scale_reduction_extract',\n 'potential_scale_reduction_init',\n 'potential_scale_reduction_step',\n 'PotentialFn',\n 'PotentialScaleReductionState',\n 'random_walk_metropolis',\n 'random_walk_metropolis_init',\n 'RandomWalkMetropolisExtra',\n 'RandomWalkMetropolisState',\n 'running_approximate_auto_covariance_init',\n 'running_approximate_auto_covariance_step',\n 'running_covariance_init',\n 'running_covariance_step',\n 'running_mean_init',\n 'running_mean_step',\n 'running_variance_init',\n 'running_variance_step',\n 'RunningApproximateAutoCovarianceState',\n 'RunningCovarianceState',\n 'RunningMeanState',\n 'RunningVarianceState',\n 'ruth4_step',\n 'sign_adaptation',\n 'spliting_integrator_step',\n 'State',\n 'trace',\n 'transform_log_prob_fn',\n 'transition_kernel_wrapper',\n 'TransitionOperator',\n]\n\n# We quote tf types to avoid unconditionally loading the TF backend.\nAnyTensor = Union['tf.Tensor', np.ndarray, np.generic]\nBooleanTensor = Union[bool, 'tf.Tensor', np.ndarray, np.bool_]\nIntTensor = Union[int, 'tf.Tensor', np.ndarray, np.integer]\nFloatTensor = Union[float, 'tf.Tensor', np.ndarray, np.floating]\n# TODO(b/109648354): Correctly represent the recursive nature of this type.\nTensorNest = Union[AnyTensor, Sequence[AnyTensor], Mapping[Any, AnyTensor]]\nTensorSpecNest = Union['tf.TensorSpec', Sequence['tf.TensorSpec'],\n Mapping[Any, 'tf.TensorSpec']]\nBijectorNest = Union[tfb.Bijector, Sequence[tfb.Bijector],\n Mapping[Any, tfb.Bijector]]\nFloatNest = Union[FloatTensor, Sequence[FloatTensor], Mapping[Any, FloatTensor]]\nIntNest = Union[IntTensor, Sequence[IntTensor], Mapping[Any, IntTensor]]\nDTypeNest = Union['tf.DType', Sequence['tf.DType'], Mapping[Any, 'tf.DType']]\nState = TensorNest # pylint: disable=invalid-name\nTransitionOperator = Callable[..., Tuple[State, TensorNest]]\nPotentialFn = Union[Callable[[TensorNest], Tuple['tf.Tensor', TensorNest]],\n Callable[..., Tuple['tf.Tensor', TensorNest]]]\n\n\ndef trace(\n state: State,\n fn: TransitionOperator,\n num_steps: IntTensor,\n trace_fn: Callable[[State, TensorNest], TensorNest],\n parallel_iterations: int = 10,\n) -> Tuple[State, TensorNest]:\n \"\"\"`TransitionOperator` that runs `fn` repeatedly and traces its outputs.\n\n Args:\n state: A nest of `Tensor`s or None.\n fn: A `TransitionOperator`.\n num_steps: Number of steps to run the function for. Must be greater than 1.\n trace_fn: Callable that the unpacked outputs of `fn` and returns a nest of\n `Tensor`s. These will be stacked and returned.\n parallel_iterations: Number of iterations of the while loop to run in\n parallel.\n\n Returns:\n state: The final state returned by `fn`.\n traces: Stacked outputs of `trace_fn`.\n \"\"\"\n state = util.map_tree(lambda t: (t if t is None else tf.convert_to_tensor(t)),\n state)\n\n def wrapper(state):\n state, extra = util.map_tree(tf.convert_to_tensor,\n call_transition_operator(fn, state))\n trace_element = util.map_tree(tf.convert_to_tensor, trace_fn(state, extra))\n return state, trace_element\n\n # JAX tracing/pre-compilation isn't as stable as TF's, so we won't use it to\n # start.\n if (backend.get_backend() != backend.TENSORFLOW or\n any(e is None for e in util.flatten_tree(state)) or\n tf.executing_eagerly()):\n state, first_trace = wrapper(state)\n trace_arrays = util.map_tree(\n lambda v: util.write_dynamic_array( # pylint: disable=g-long-lambda\n util.make_dynamic_array(\n v.dtype, size=num_steps, element_shape=v.shape), 0, v),\n first_trace)\n start_idx = 1\n else:\n state_spec = util.map_tree(tf.TensorSpec.from_tensor, state)\n # We need the shapes and dtypes of the outputs of `wrapper` function to\n # create the `TensorArray`s, we can get it by pre-compiling the wrapper\n # function.\n wrapper = tf.function(autograph=False)(wrapper)\n concrete_wrapper = wrapper.get_concrete_function(state_spec)\n _, trace_dtypes = concrete_wrapper.output_dtypes\n _, trace_shapes = concrete_wrapper.output_shapes\n trace_arrays = util.map_tree(\n lambda dtype, shape: tf.TensorArray( # pylint: disable=g-long-lambda\n dtype,\n size=num_steps,\n element_shape=shape),\n trace_dtypes,\n trace_shapes)\n wrapper = lambda state: concrete_wrapper(*util.flatten_tree(state))\n start_idx = 0\n\n def body(i, state, trace_arrays):\n state, trace_element = wrapper(state)\n trace_arrays = util.map_tree(lambda a, v: util.write_dynamic_array(a, i, v),\n trace_arrays, trace_element)\n return i + 1, state, trace_arrays\n\n def cond(i, *_):\n return i < num_steps\n\n _, state, trace_arrays = tf.while_loop(\n cond=cond,\n body=body,\n loop_vars=(start_idx, state, trace_arrays),\n parallel_iterations=parallel_iterations)\n\n stacked_trace = util.map_tree(util.snapshot_dynamic_array, trace_arrays)\n\n # TensorFlow often loses the static shape information.\n if backend.get_backend() == backend.TENSORFLOW:\n static_length = tf.get_static_value(num_steps)\n\n def _merge_static_length(x):\n x.set_shape(tf.TensorShape(static_length).concatenate(x.shape[1:]))\n return x\n\n stacked_trace = util.map_tree(_merge_static_length, stacked_trace)\n\n return state, stacked_trace\n\n\ndef _tree_repr(tree: Any) -> Text:\n \"\"\"Utility to get a string representation of the the structure of `tree`.\"\"\"\n\n class _LeafSentinel(object):\n\n def __repr__(self):\n return '.'\n\n return str(util.map_tree(lambda _: _LeafSentinel(), tree))\n\n\ndef call_fn(\n fn: TransitionOperator,\n args: Union[Tuple[Any], Mapping[Text, Any], Any],\n) -> Any:\n \"\"\"Calls a function with `args`.\n\n If `args` is a sequence, `fn` is called like `fn(*args)`. If `args` is a\n mapping, `fn` is called like `fn(**args)`. Otherwise, it is called `fn(args)`.\n\n Args:\n fn: A `TransitionOperator`.\n args: Arguments to `fn`\n\n Returns:\n ret: Return value of `fn`.\n \"\"\"\n if isinstance(\n args, collections.Sequence) and not mcmc_util.is_namedtuple_like(args):\n args = args # type: Tuple[Any]\n return fn(*args)\n elif isinstance(args, collections.Mapping):\n args = args # type: Mapping[str, Any]\n return fn(**args)\n else:\n return fn(args)\n\n\ndef call_potential_fn(\n fn: PotentialFn,\n args: Union[Tuple[Any], Mapping[Text, Any], Any],\n) -> Tuple['tf.Tensor', Any]:\n \"\"\"Calls a transition operator with `args`.\n\n `fn` must fulfill the `PotentialFn` contract:\n\n ```python\n potential, extra = call_fn(fn, args)\n ```\n\n Args:\n fn: `PotentialFn`.\n args: Arguments to `fn`.\n\n Returns:\n ret: Return value of `fn`.\n\n Raises:\n TypeError: If `fn` doesn't fulfill the contract.\n \"\"\"\n ret = call_fn(fn, args)\n error_template = ('`{fn:}` must have a signature '\n '`fn(args) -> (tf.Tensor, extra)`'\n ' but when called with `args=`\\n{args:}\\nreturned '\n '`ret=`\\n{ret:}\\ninstead. The structure of '\n '`args=`\\n{args_s:}\\nThe structure of `ret=`\\n{ret_s:}\\n'\n 'A common solution is to adjust the `return`s in `fn` to '\n 'be `return args, ()`.')\n\n if not isinstance(ret, collections.Sequence) or len(ret) != 2:\n args_s = _tree_repr(args)\n ret_s = _tree_repr(ret)\n raise TypeError(\n error_template.format(\n fn=fn, args=args, ret=ret, args_s=args_s, ret_s=ret_s))\n return ret\n\n\ndef call_transition_operator(\n fn: TransitionOperator,\n args: Union[Tuple[Any], Mapping[Text, Any], Any],\n) -> Tuple[Any, Any]:\n \"\"\"Calls a transition operator with `args`.\n\n `fn` must fulfill the `TransitionOperator` contract:\n\n ```python\n args_out, extra = call_fn(fn, args)\n assert_same_shallow_tree(args, args_out)\n ```\n\n Args:\n fn: `TransitionOperator`.\n args: Arguments to `fn`.\n\n Returns:\n ret: Return value of `fn`.\n\n Raises:\n TypeError: If `fn` doesn't fulfill the contract.\n \"\"\"\n ret = call_fn(fn, args)\n error_template = ('`{fn:}` must have a signature '\n '`fn(args) -> (new_args, extra)`'\n ' but when called with `args=`\\n{args:}\\nreturned '\n '`ret=`\\n{ret:}\\ninstead. The structure of '\n '`args=`\\n{args_s:}\\nThe structure of `ret=`\\n{ret_s:}\\n'\n 'A common solution is to adjust the `return`s in `fn` to '\n 'be `return args, ()`.')\n\n if not isinstance(ret, collections.Sequence) or len(ret) != 2:\n args_s = _tree_repr(args)\n ret_s = _tree_repr(ret)\n raise TypeError(\n error_template.format(\n fn=fn, args=args, ret=ret, args_s=args_s, ret_s=ret_s))\n\n error_template = (\n '`{fn:}` must have a signature '\n '`fn(args) -> (new_args, extra)`'\n ' but when called with `args=`\\n{args:}\\nreturned '\n '`new_args=`\\n{new_args:}\\ninstead. The structure of '\n '`args=`\\n{args_s:}\\nThe structure of `new_args=`\\n{new_args_s:}\\n')\n new_args, extra = ret\n try:\n util.assert_same_shallow_tree(args, new_args)\n except:\n args_s = _tree_repr(args)\n new_args_s = _tree_repr(new_args)\n raise TypeError(\n error_template.format(\n fn=fn,\n args=args,\n new_args=new_args,\n args_s=args_s,\n new_args_s=new_args_s))\n return new_args, extra\n\n\ndef call_potential_fn_with_grads(\n fn: TransitionOperator, args: Union[Tuple[Any], Mapping[Text, Any], Any]\n) -> Tuple['tf.Tensor', TensorNest, TensorNest]:\n \"\"\"Calls `fn` and returns the gradients with respect to `fn`'s first output.\n\n Args:\n fn: A `TransitionOperator`.\n args: Arguments to `fn`\n\n Returns:\n ret: First output of `fn`.\n extra: Second output of `fn`.\n grads: Gradients of `ret` with respect to `args`.\n \"\"\"\n\n def wrapper(args):\n return call_potential_fn(fn, args)\n\n return util.value_and_grad(wrapper, args)\n\n\ndef maybe_broadcast_structure(from_structure: Any, to_structure: Any) -> Any:\n \"\"\"Maybe broadcasts `from_structure` to `to_structure`.\n\n If `from_structure` is a singleton, it is tiled to match the structure of\n `to_structure`. Note that the elements in `from_structure` are not copied if\n this tiling occurs.\n\n Args:\n from_structure: A structure.\n to_structure: A structure.\n\n Returns:\n new_from_structure: Same structure as `to_structure`.\n \"\"\"\n flat_from = util.flatten_tree(from_structure)\n flat_to = util.flatten_tree(to_structure)\n if len(flat_from) == 1:\n flat_from *= len(flat_to)\n return util.unflatten_tree(to_structure, flat_from)\n\n\ndef transform_log_prob_fn(log_prob_fn: PotentialFn,\n bijector: BijectorNest,\n init_state: State = None) -> Any:\n \"\"\"Transforms a log-prob function using a bijector.\n\n This takes a log-prob function and creates a new log-prob function that now\n takes takes state in the domain of the bijector, forward transforms that state\n and calls the original log-prob function. It then returns the log-probability\n that correctly accounts for this transformation.\n\n The wrapped function has the following signature:\n ```none\n (*args, **kwargs) ->\n transformed_space_state, [original_space_state, original_log_prob_extra]\n ```\n Note that currently it is forbidden to pass both `args` and `kwargs` to the\n wrapper.\n\n For convenience you can also pass the initial state (in the original space),\n and this function will return the inverse transformed state as the 2nd return\n value. You'd use this to initialize MCMC operators that operate in the\n transformed space.\n\n Args:\n log_prob_fn: Log prob fn.\n bijector: Bijector(s), must be of the same structure as the `log_prob_fn`\n inputs.\n init_state: Initial state, in the original space.\n\n Returns:\n transformed_log_prob_fn: Transformed log prob fn.\n transformed_init_state: If `init_state` is provided. Initial state in the\n transformed space.\n \"\"\"\n\n def wrapper(*args, **kwargs):\n \"\"\"Transformed wrapper.\"\"\"\n bijector_ = bijector\n\n if args and kwargs:\n raise ValueError('It is forbidden to pass both `args` and `kwargs` to '\n 'this wrapper.')\n if kwargs:\n args = kwargs\n # Use bijector_ to recover the structure of args that has been lossily\n # transmitted via *args and **kwargs.\n args = util.unflatten_tree(bijector_, util.flatten_tree(args))\n\n args = util.map_tree(lambda x: 0. + x, args)\n\n original_space_args = util.map_tree(lambda b, x: b.forward(x), bijector_,\n args)\n original_space_log_prob, extra = call_potential_fn(log_prob_fn,\n original_space_args)\n event_ndims = util.map_tree(\n lambda x: tf.rank(x) - tf.rank(original_space_log_prob), args)\n\n return original_space_log_prob + sum(\n util.flatten_tree(\n util.map_tree(\n lambda b, x, e: b.forward_log_det_jacobian(x, event_ndims=e),\n bijector_, args, event_ndims))), [original_space_args, extra]\n\n if init_state is None:\n return wrapper\n else:\n return wrapper, util.map_tree(lambda b, s: b.inverse(s), bijector,\n init_state)\n\n\nIntegratorStepState = collections.namedtuple('IntegratorStepState',\n 'state, state_grads,momentum')\nIntegratorStepExtras = collections.namedtuple(\n 'IntegratorStepExtras', 'target_log_prob, state_extra, '\n 'kinetic_energy, kinetic_energy_extra')\nIntegratorStep = Callable[[IntegratorStepState], Tuple[IntegratorStepState,\n IntegratorStepExtras]]\n\n\ndef spliting_integrator_step(\n integrator_step_state: IntegratorStepState,\n step_size: FloatTensor,\n target_log_prob_fn: PotentialFn,\n kinetic_energy_fn: PotentialFn,\n coefficients: Sequence[FloatTensor],\n forward: bool = True,\n) -> Tuple[IntegratorStepState, IntegratorStepExtras]:\n \"\"\"Symmetric symplectic integrator `TransitionOperator`.\n\n This implementation is based on Hamiltonian splitting, with the splits\n weighted by coefficients. We update the momentum first, if `forward` argument\n is `True`. See [1] for an overview of the method.\n\n Args:\n integrator_step_state: IntegratorStepState.\n step_size: Step size, structure broadcastable to the `target_log_prob_fn`\n state.\n target_log_prob_fn: Target log prob fn.\n kinetic_energy_fn: Kinetic energy fn.\n coefficients: Integrator coefficients.\n forward: Whether to run the integrator in the forward direction.\n\n Returns:\n integrator_step_state: IntegratorStepState.\n integrator_step_extras: IntegratorStepExtras.\n\n #### References:\n\n [1]: Sergio Blanes, Fernando Casas, J.M. Sanz-Serna. Numerical integrators for\n the Hybrid Monte Carlo method. SIAM J. Sci. Comput., 36(4), 2014.\n https://arxiv.org/pdf/1405.3153.pdf\n \"\"\"\n if len(coefficients) < 2:\n raise ValueError('Too few coefficients. Need at least 2.')\n state = integrator_step_state.state\n state_grads = integrator_step_state.state_grads\n momentum = integrator_step_state.momentum\n # TODO(siege): Consider amortizing this across steps. The tricky bit here\n # is that only a few integrators take these grads.\n momentum_grads = None\n step_size = maybe_broadcast_structure(step_size, state)\n\n state = util.map_tree(tf.convert_to_tensor, state)\n momentum = util.map_tree(tf.convert_to_tensor, momentum)\n state = util.map_tree(tf.convert_to_tensor, state)\n\n idx_and_coefficients = enumerate(coefficients)\n if not forward:\n idx_and_coefficients = reversed(list(idx_and_coefficients))\n\n for i, c in idx_and_coefficients:\n # pylint: disable=cell-var-from-loop\n if i % 2 == 0:\n if state_grads is None:\n _, _, state_grads = call_potential_fn_with_grads(\n target_log_prob_fn, state)\n else:\n state_grads = util.map_tree(tf.convert_to_tensor, state_grads)\n\n momentum = util.map_tree(lambda m, sg, s: m + c * sg * s, momentum,\n state_grads, step_size)\n\n kinetic_energy, kinetic_energy_extra, momentum_grads = call_potential_fn_with_grads(\n kinetic_energy_fn, momentum)\n else:\n if momentum_grads is None:\n _, _, momentum_grads = call_potential_fn_with_grads(\n kinetic_energy_fn, momentum)\n\n state = util.map_tree(lambda x, mg, s: x + c * mg * s, state,\n momentum_grads, step_size)\n\n target_log_prob, state_extra, state_grads = call_potential_fn_with_grads(\n target_log_prob_fn, state)\n\n return (IntegratorStepState(state, state_grads, momentum),\n IntegratorStepExtras(target_log_prob, state_extra, kinetic_energy,\n kinetic_energy_extra))\n\n\ndef leapfrog_step(\n integrator_step_state: IntegratorStepState,\n step_size: FloatTensor,\n target_log_prob_fn: PotentialFn,\n kinetic_energy_fn: PotentialFn,\n) -> Tuple[IntegratorStepState, IntegratorStepExtras]:\n \"\"\"Leapfrog integrator `TransitionOperator`.\n\n Args:\n integrator_step_state: IntegratorStepState.\n step_size: Step size, structure broadcastable to the `target_log_prob_fn`\n state.\n target_log_prob_fn: Target log prob fn.\n kinetic_energy_fn: Kinetic energy fn.\n\n Returns:\n integrator_step_state: IntegratorStepState.\n integrator_step_extras: IntegratorStepExtras.\n \"\"\"\n coefficients = [0.5, 1., 0.5]\n return spliting_integrator_step(\n integrator_step_state,\n step_size,\n target_log_prob_fn,\n kinetic_energy_fn,\n coefficients=coefficients)\n\n\ndef ruth4_step(\n integrator_step_state: IntegratorStepState,\n step_size: FloatTensor,\n target_log_prob_fn: PotentialFn,\n kinetic_energy_fn: PotentialFn,\n) -> Tuple[IntegratorStepState, IntegratorStepExtras]:\n \"\"\"Ruth 4th order integrator `TransitionOperator`.\n\n See [1] for details.\n\n Args:\n integrator_step_state: IntegratorStepState.\n step_size: Step size, structure broadcastable to the `target_log_prob_fn`\n state.\n target_log_prob_fn: Target log prob fn.\n kinetic_energy_fn: Kinetic energy fn.\n\n Returns:\n integrator_step_state: IntegratorStepState.\n integrator_step_extras: IntegratorStepExtras.\n\n #### References:\n\n [1]: Ruth, Ronald D. (August 1983). \"A Canonical Integration Technique\".\n Nuclear Science, IEEE Trans. on. NS-30 (4): 2669-2671\n \"\"\"\n c = 2**(1. / 3)\n coefficients = (1. / (2 - c)) * np.array([0.5, 1., 0.5 - 0.5 * c, -c])\n coefficients = list(coefficients) + list(reversed(coefficients))[1:]\n return spliting_integrator_step(\n integrator_step_state,\n step_size,\n target_log_prob_fn,\n kinetic_energy_fn,\n coefficients=coefficients)\n\n\ndef blanes_3_stage_step(\n integrator_step_state: IntegratorStepState,\n step_size: FloatTensor,\n target_log_prob_fn: PotentialFn,\n kinetic_energy_fn: PotentialFn,\n) -> Tuple[IntegratorStepState, IntegratorStepExtras]:\n \"\"\"Blanes 4th order integrator `TransitionOperator`.\n\n See [1] for details.\n\n Args:\n integrator_step_state: IntegratorStepState.\n step_size: Step size, structure broadcastable to the `target_log_prob_fn`\n state.\n target_log_prob_fn: Target log prob fn.\n kinetic_energy_fn: Kinetic energy fn.\n\n Returns:\n integrator_step_state: IntegratorStepState.\n integrator_step_extras: IntegratorStepExtras.\n\n #### References:\n\n [1]: Sergio Blanes, Fernando Casas, J.M. Sanz-Serna. Numerical integrators for\n the Hybrid Monte Carlo method. SIAM J. Sci. Comput., 36(4), 2014.\n https://arxiv.org/pdf/1405.3153.pdf\n \"\"\"\n a1 = 0.11888010966\n b1 = 0.29619504261\n coefficients = [a1, b1, 0.5 - a1, 1. - 2. * b1]\n coefficients = coefficients + list(reversed(coefficients))[1:]\n return spliting_integrator_step(\n integrator_step_state,\n step_size,\n target_log_prob_fn,\n kinetic_energy_fn,\n coefficients=coefficients)\n\n\ndef blanes_4_stage_step(\n integrator_step_state: IntegratorStepState,\n step_size: FloatTensor,\n target_log_prob_fn: PotentialFn,\n kinetic_energy_fn: PotentialFn,\n) -> Tuple[IntegratorStepState, IntegratorStepExtras]:\n \"\"\"Blanes 6th order integrator `TransitionOperator`.\n\n See [1] for details.\n\n Args:\n integrator_step_state: IntegratorStepState.\n step_size: Step size, structure broadcastable to the `target_log_prob_fn`\n state.\n target_log_prob_fn: Target log prob fn.\n kinetic_energy_fn: Kinetic energy fn.\n\n Returns:\n integrator_step_state: IntegratorStepState.\n integrator_step_extras: IntegratorStepExtras.\n\n #### References:\n\n [1]: Sergio Blanes, Fernando Casas, J.M. Sanz-Serna. Numerical integrators for\n the Hybrid Monte Carlo method. SIAM J. Sci. Comput., 36(4), 2014.\n https://arxiv.org/pdf/1405.3153.pdf\n \"\"\"\n a1 = 0.071353913\n a2 = 0.268548791\n b1 = 0.191667800\n coefficients = [a1, b1, a2, 0.5 - b1, 1. - 2. * (a1 + a2)]\n coefficients = coefficients + list(reversed(coefficients))[1:]\n return spliting_integrator_step(\n integrator_step_state,\n step_size,\n target_log_prob_fn,\n kinetic_energy_fn,\n coefficients=coefficients)\n\n\ndef mclachlan_optimal_4th_order_step(\n integrator_step_state: IntegratorStepState,\n step_size: FloatTensor,\n target_log_prob_fn: PotentialFn,\n kinetic_energy_fn: PotentialFn,\n forward: BooleanTensor,\n) -> Tuple[IntegratorStepState, IntegratorStepExtras]:\n \"\"\"4th order integrator for Hamiltonians with a quadratic kinetic energy.\n\n See [1] for details. Note that this integrator step is not reversible, so for\n use in HMC you should randomly reverse the integration direction to preserve\n detailed balance.\n\n Args:\n integrator_step_state: IntegratorStepState.\n step_size: Step size, structure broadcastable to the `target_log_prob_fn`\n state.\n target_log_prob_fn: Target log prob fn.\n kinetic_energy_fn: Kinetic energy fn.\n forward: A scalar `bool` Tensor. Whether to run this integrator in the\n forward direction. Note that this is done for the entire state, not\n per-batch.\n\n Returns:\n integrator_step_state: IntegratorStepState.\n integrator_step_extras: IntegratorStepExtras.\n\n #### References:\n\n [1]: McLachlan R. I., & Atela P. (1992). The accuracy of symplectic\n integrators. Nonlinearity, 5, 541-562.\n \"\"\"\n # N.B. a's and b's are used in the opposite sense than the Blanes integrators\n # above.\n a1 = 0.5153528374311229364\n a2 = -0.085782019412973646\n a3 = 0.4415830236164665242\n a4 = 0.1288461583653841854\n\n b1 = 0.1344961992774310892\n b2 = -0.2248198030794208058\n b3 = 0.7563200005156682911\n b4 = 0.3340036032863214255\n coefficients = [b1, a1, b2, a2, b3, a3, b4, a4]\n\n def _step(direction):\n return spliting_integrator_step(\n integrator_step_state,\n step_size,\n target_log_prob_fn,\n kinetic_energy_fn,\n coefficients=coefficients,\n forward=direction)\n\n # In principle we can avoid the cond, and use `tf.where` to select between the\n # coefficients. This would require a superfluous momentum update, but in\n # principle is feasible. We're not doing it because it would complicate the\n # code slightly, and there is limited motivation to do it since reversing the\n # directions for all the chains at once is typically valid as well.\n return tf.cond(forward, lambda: _step(True), lambda: _step(False))\n\n\nMetropolisHastingsExtra = collections.namedtuple('MetropolisHastingsExtra',\n 'is_accepted, log_uniform')\n\n\ndef metropolis_hastings_step(\n current_state: State,\n proposed_state: State,\n energy_change: FloatTensor,\n log_uniform: FloatTensor = None,\n seed=None) -> Tuple[State, MetropolisHastingsExtra]:\n \"\"\"Metropolis-Hastings step.\n\n This probabilistically chooses between `current_state` and `proposed_state`\n based on the `energy_change` so as to preserve detailed balance.\n\n Energy change is the negative of `log_accept_ratio`.\n\n Args:\n current_state: Current state.\n proposed_state: Proposed state.\n energy_change: E(proposed_state) - E(previous_state).\n log_uniform: Optional logarithm of a uniformly distributed random sample in\n [0, 1]. It is used to accept/reject the current and proposed state.\n seed: For reproducibility.\n\n Returns:\n new_state: The chosen state.\n mh_extra: MetropolisHastingsExtra.\n \"\"\"\n # Impute the None's in the current state.\n current_state = util.map_tree_up_to(\n current_state,\n lambda c, p: p # pylint: disable=g-long-lambda\n if c is None else c,\n current_state,\n proposed_state)\n\n current_state = util.map_tree(tf.convert_to_tensor, current_state)\n proposed_state = util.map_tree(tf.convert_to_tensor, proposed_state)\n energy_change = tf.convert_to_tensor(energy_change)\n\n log_accept_ratio = -energy_change\n\n if log_uniform is None:\n log_uniform = tf.math.log(\n util.random_uniform(\n shape=tf.shape(log_accept_ratio),\n dtype=log_accept_ratio.dtype,\n seed=seed))\n is_accepted = log_uniform < log_accept_ratio\n\n next_state = _choose(\n is_accepted, proposed_state, current_state, name='choose_next_state')\n return next_state, MetropolisHastingsExtra(\n is_accepted=is_accepted, log_uniform=log_uniform)\n\n\nMomentumSampleFn = Callable[[Any], State]\n\n\ndef gaussian_momentum_sample(state_spec: TensorSpecNest = None,\n state: State = None,\n seed=None) -> State:\n \"\"\"Generates a sample from a Gaussian (Normal) momentum distribution.\n\n One of `state` or `state_spec` need to be specified to obtain the correct\n structure.\n\n Args:\n state_spec: A nest of `TensorSpec`s describing the output shape and dtype.\n state: A nest of `Tensor`s with the shape and dtype being the same as the\n output.\n seed: For reproducibility.\n\n Returns:\n sample: A nest of `Tensor`s with the same structure, shape and dtypes as one\n of the two inputs, distributed with Normal distribution.\n \"\"\"\n if state_spec is None:\n if state is None:\n raise ValueError(\n 'If `state_spec` is `None`, then `state` must be specified.')\n shapes = util.map_tree(tf.shape, state)\n dtypes = util.map_tree(lambda t: t.dtype, state)\n else:\n shapes = util.map_tree(lambda spec: spec.shape, state_spec)\n dtypes = util.map_tree(lambda spec: spec.dtype, state_spec)\n\n num_seeds_needed = len(util.flatten_tree(dtypes))\n seeds = list(util.split_seed(seed, num_seeds_needed))\n seeds = util.unflatten_tree(dtypes, seeds)\n\n def _one_part(dtype, shape, seed):\n return util.random_normal(shape=shape, dtype=dtype, seed=seed)\n\n return util.map_tree_up_to(dtypes, _one_part, dtypes, shapes, seeds)\n\n\ndef make_gaussian_kinetic_energy_fn(\n chain_ndims: IntTensor) -> Callable[..., Tuple['tf.Tensor', TensorNest]]:\n \"\"\"Returns a function that computes the kinetic energy of a state.\n\n Args:\n chain_ndims: How many leading dimensions correspond to independent\n particles.\n\n Returns:\n kinetic_energy_fn: A callable that takes in the expanded state (see\n `call_potential_fn`) and returns the kinetic energy + dummy auxiliary\n output.\n \"\"\"\n\n def kinetic_energy_fn(*args, **kwargs):\n\n def one_component(x):\n return tf.reduce_sum(tf.square(x), axis=tf.range(chain_ndims, tf.rank(x)))\n\n return (tf.add_n(\n [one_component(x) for x in util.flatten_tree([args, kwargs])]) / 2.), ()\n\n return kinetic_energy_fn\n\n\nHamiltonianMonteCarloState = collections.namedtuple(\n 'HamiltonianMonteCarloState',\n 'state, state_grads, target_log_prob, state_extra')\n\nHamiltonianMonteCarloState.__new__.__defaults__ = (None, None, None)\n\n# state_extra is not a true state, but here for convenience.\nHamiltonianMonteCarloExtra = collections.namedtuple(\n 'HamiltonianMonteCarloExtra',\n 'is_accepted, log_accept_ratio, proposed_hmc_state, '\n 'integrator_state, integrator_extra, initial_momentum')\n\n\ndef hamiltonian_monte_carlo_init(\n state: TensorNest,\n target_log_prob_fn: PotentialFn) -> HamiltonianMonteCarloState:\n \"\"\"Initializes the `HamiltonianMonteCarloState`.\n\n Args:\n state: State of the chain.\n target_log_prob_fn: Target log prob fn.\n\n Returns:\n hmc_state: State of the `hamiltonian_monte_carlo` `TransitionOperator`.\n \"\"\"\n target_log_prob, state_extra, state_grads = call_potential_fn_with_grads(\n target_log_prob_fn, util.map_tree(tf.convert_to_tensor, state))\n return HamiltonianMonteCarloState(state, state_grads, target_log_prob,\n state_extra)\n\n\ndef hamiltonian_monte_carlo(\n hmc_state: HamiltonianMonteCarloState,\n target_log_prob_fn: PotentialFn,\n step_size: Any = None,\n num_integrator_steps: IntTensor = None,\n momentum: State = None,\n kinetic_energy_fn: PotentialFn = None,\n momentum_sample_fn: MomentumSampleFn = None,\n integrator_trace_fn: Callable[[IntegratorStepState, IntegratorStepExtras],\n TensorNest] = lambda *args: (),\n log_uniform: FloatTensor = None,\n integrator_fn=None,\n seed=None,\n) -> Tuple[HamiltonianMonteCarloState, HamiltonianMonteCarloExtra]:\n \"\"\"Hamiltonian Monte Carlo `TransitionOperator`.\n\n #### Example\n\n ```python\n step_size = 0.2\n num_steps = 2000\n num_integrator_steps = 10\n state = tf.ones([16, 2])\n\n base_mean = [1., 0]\n base_cov = [[1, 0.5], [0.5, 1]]\n\n bijector = tfb.Softplus()\n base_dist = tfd.MultivariateNormalFullCovariance(\n loc=base_mean, covariance_matrix=base_cov)\n target_dist = bijector(base_dist)\n\n def orig_target_log_prob_fn(x):\n return target_dist.log_prob(x), ()\n\n target_log_prob_fn, state = fun_mcmc.transform_log_prob_fn(\n orig_target_log_prob_fn, bijector, state)\n\n kernel = tf.function(lambda state: fun_mcmc.hamiltonian_monte_carlo(\n state,\n step_size=step_size,\n num_integrator_steps=num_integrator_steps,\n target_log_prob_fn=target_log_prob_fn,\n seed=tfp_test_util.test_seed()))\n\n _, chain = fun_mcmc.trace(\n state=fun_mcmc.hamiltonian_monte_carlo_init(state, target_log_prob_fn),\n fn=kernel,\n num_steps=num_steps,\n trace_fn=lambda state, extra: state.state_extra[0])\n ```\n\n Args:\n hmc_state: HamiltonianMonteCarloState.\n target_log_prob_fn: Target log prob fn.\n step_size: Step size, structure broadcastable to the `target_log_prob_fn`\n state. Optional if `integrator_fn` is specified.\n num_integrator_steps: Number of integrator steps to take. Optional if\n `integrator_fn` is specified.\n momentum: Initial momentum, by default sampled from a standard gaussian.\n kinetic_energy_fn: Kinetic energy function.\n momentum_sample_fn: Sampler for the momentum.\n integrator_trace_fn: Trace function for the integrator.\n log_uniform: Optional logarithm of a uniformly distributed random sample in\n [0, 1], used for the MH accept/reject step.\n integrator_fn: Integrator to use for the HMC dynamics. Uses a\n `hamiltonian_integrator` with `leapfrog_step` by default.\n seed: For reproducibility.\n\n Returns:\n hmc_state: HamiltonianMonteCarloState\n hmc_extra: HamiltonianMonteCarloExtra\n \"\"\"\n if any(e is None for e in util.flatten_tree(hmc_state)):\n hmc_state = hamiltonian_monte_carlo_init(hmc_state.state,\n target_log_prob_fn)\n state = hmc_state.state\n state_grads = hmc_state.state_grads\n target_log_prob = hmc_state.target_log_prob\n state_extra = hmc_state.state_extra\n\n if kinetic_energy_fn is None:\n kinetic_energy_fn = make_gaussian_kinetic_energy_fn(\n len(target_log_prob.shape) if target_log_prob.shape is not None else tf\n .rank(target_log_prob))\n\n if momentum_sample_fn is None:\n momentum_sample_fn = lambda seed: gaussian_momentum_sample( # pylint: disable=g-long-lambda\n state=state, seed=seed)\n\n if integrator_fn is None:\n integrator_fn = lambda state: hamiltonian_integrator( # pylint: disable=g-long-lambda\n state,\n num_steps=num_integrator_steps,\n integrator_step_fn=lambda state: leapfrog_step( # pylint: disable=g-long-lambda\n state,\n step_size=step_size,\n target_log_prob_fn=target_log_prob_fn,\n kinetic_energy_fn=kinetic_energy_fn),\n kinetic_energy_fn=kinetic_energy_fn,\n integrator_trace_fn=integrator_trace_fn)\n\n if momentum is None:\n seed, sample_seed = util.split_seed(seed, 2)\n momentum = momentum_sample_fn(sample_seed)\n\n integrator_state = IntegratorState(\n target_log_prob=target_log_prob,\n momentum=momentum,\n state=state,\n state_grads=state_grads,\n state_extra=state_extra,\n )\n\n integrator_state, integrator_extra = integrator_fn(integrator_state)\n\n proposed_state = HamiltonianMonteCarloState(\n state=integrator_state.state,\n state_grads=integrator_state.state_grads,\n target_log_prob=integrator_state.target_log_prob,\n state_extra=integrator_state.state_extra)\n\n hmc_state, mh_extra = metropolis_hastings_step(\n hmc_state,\n proposed_state,\n integrator_extra.energy_change,\n log_uniform=log_uniform,\n seed=seed)\n\n hmc_state = hmc_state # type: HamiltonianMonteCarloState\n return hmc_state, HamiltonianMonteCarloExtra(\n is_accepted=mh_extra.is_accepted,\n proposed_hmc_state=proposed_state,\n log_accept_ratio=-integrator_extra.energy_change,\n integrator_state=integrator_state,\n integrator_extra=integrator_extra,\n initial_momentum=momentum)\n\n\nIntegratorState = collections.namedtuple(\n 'IntegratorState',\n 'state, state_extra, state_grads, target_log_prob, momentum')\nIntegratorExtras = collections.namedtuple(\n 'IntegratorExtras',\n 'kinetic_energy, kinetic_energy_extra, energy_change, integrator_trace')\n\n\ndef hamiltonian_integrator(\n int_state: IntegratorState,\n num_steps: IntTensor,\n integrator_step_fn: IntegratorStep,\n kinetic_energy_fn: PotentialFn,\n integrator_trace_fn: Callable[[IntegratorStepState, IntegratorStepExtras],\n TensorNest] = lambda *args: (),\n) -> Tuple[IntegratorState, IntegratorExtras]:\n \"\"\"Intergrates a discretized set of Hamiltonian equations.\n\n This function will use the passed `integrator_step_fn` to evolve the system\n for `num_steps`. The `integrator_step_fn` is assumed to be reversible.\n\n Args:\n int_state: Current `IntegratorState`.\n num_steps: Integer scalar or N-D `Tensor`. Number of steps to take. If this\n is not a scalar, then each corresponding independent system will be\n evaluated for that number of steps, followed by copying the final state to\n avoid creating a ragged Tensor. Keep this in mind when interpreting the\n `integrator_trace` in the auxiliary output.\n integrator_step_fn: Instance of `IntegratorStep`.\n kinetic_energy_fn: Function to compute the kinetic energy from momentums.\n integrator_trace_fn: Trace function for the integrator.\n\n Returns:\n integrator_state: `IntegratorState`\n integrator_exras: `IntegratorExtras`\n \"\"\"\n target_log_prob = int_state.target_log_prob\n momentum = int_state.momentum\n state = int_state.state\n state_grads = int_state.state_grads\n state_extra = int_state.state_extra\n\n num_steps = tf.convert_to_tensor(num_steps)\n is_ragged = len(num_steps.shape) > 0 # pylint: disable=g-explicit-length-test\n\n kinetic_energy, kinetic_energy_extra = call_potential_fn(\n kinetic_energy_fn, momentum)\n current_energy = -target_log_prob + kinetic_energy\n\n if is_ragged:\n step = 0\n max_num_steps = tf.reduce_max(num_steps)\n else:\n step = []\n max_num_steps = num_steps\n\n # We need to carry around the integrator state extras so we can properly do\n # the ragged computation.\n # TODO(siege): In principle we can condition this on `is_ragged`, but doesn't\n # seem worthwhile at the time.\n integrator_wrapper_state = (step,\n IntegratorStepState(state, state_grads, momentum),\n IntegratorStepExtras(target_log_prob, state_extra,\n kinetic_energy,\n kinetic_energy_extra))\n\n def integrator_wrapper(step, integrator_step_state, integrator_step_extra):\n \"\"\"Integrator wrapper that tracks extra state.\"\"\"\n old_integrator_step_state = integrator_step_state\n old_integrator_step_extra = integrator_step_extra\n integrator_step_state, integrator_step_extra = integrator_step_fn(\n integrator_step_state)\n\n if is_ragged:\n integrator_step_state = _choose(step < num_steps, integrator_step_state,\n old_integrator_step_state)\n integrator_step_extra = _choose(step < num_steps, integrator_step_extra,\n old_integrator_step_extra)\n step = step + 1\n\n return (step, integrator_step_state, integrator_step_extra), []\n\n def integrator_trace_wrapper_fn(args, _):\n return integrator_trace_fn(args[1], args[2])\n\n [_, integrator_step_state, integrator_step_extra], integrator_trace = trace(\n integrator_wrapper_state,\n integrator_wrapper,\n max_num_steps,\n trace_fn=integrator_trace_wrapper_fn)\n\n proposed_energy = (-integrator_step_extra.target_log_prob +\n integrator_step_extra.kinetic_energy)\n\n energy_change = proposed_energy - current_energy\n\n state = IntegratorState(\n state=integrator_step_state.state,\n state_extra=integrator_step_extra.state_extra,\n state_grads=integrator_step_state.state_grads,\n target_log_prob=integrator_step_extra.target_log_prob,\n momentum=integrator_step_state.momentum)\n\n extra = IntegratorExtras(\n kinetic_energy=integrator_step_extra.kinetic_energy,\n kinetic_energy_extra=integrator_step_extra.kinetic_energy_extra,\n energy_change=energy_change,\n integrator_trace=integrator_trace)\n\n return state, extra\n\n\ndef sign_adaptation(control: FloatNest,\n output: FloatTensor,\n set_point: FloatTensor,\n adaptation_rate: FloatTensor = 0.01) -> FloatNest:\n \"\"\"A function to do simple sign-based control of a variable.\n\n ```\n control = control * (1. + adaptation_rate) ** sign(output - set_point)\n ```\n\n Args:\n control: The control variable.\n output: The output variable.\n set_point: The set point for `output`. This function will adjust `control`\n so that `output` matches `set_point`.\n adaptation_rate: Adaptation rate.\n\n Returns:\n control: New control.\n \"\"\"\n\n def _get_new_control(control, output, set_point):\n new_control = _choose(output > set_point, control * (1. + adaptation_rate),\n control / (1. + adaptation_rate))\n return new_control\n\n output = maybe_broadcast_structure(output, control)\n set_point = maybe_broadcast_structure(set_point, control)\n\n return util.map_tree(_get_new_control, control, output, set_point)\n\n\ndef transition_kernel_wrapper(\n current_state: FloatNest, kernel_results: Optional[Any],\n kernel: tfp.mcmc.TransitionKernel) -> Tuple[FloatNest, Any]:\n \"\"\"Wraps a `tfp.mcmc.TransitionKernel` as a `TransitionOperator`.\n\n Args:\n current_state: Current state passed to the transition kernel.\n kernel_results: Kernel results passed to the transition kernel. Can be\n `None`.\n kernel: The transition kernel.\n\n Returns:\n state: A tuple of:\n current_state: Current state returned by the transition kernel.\n kernel_results: Kernel results returned by the transition kernel.\n extra: An empty tuple.\n \"\"\"\n flat_current_state = util.flatten_tree(current_state)\n if kernel_results is None:\n kernel_results = kernel.bootstrap_results(flat_current_state)\n flat_current_state, kernel_results = kernel.one_step(flat_current_state,\n kernel_results)\n return (util.unflatten_tree(current_state,\n flat_current_state), kernel_results), ()\n\n\ndef _choose(is_accepted, accepted, rejected, name='choose'):\n \"\"\"Helper which expand_dims `is_accepted` then applies tf.where.\"\"\"\n\n def _choose_base_case(is_accepted, accepted, rejected, name):\n \"\"\"Choose base case for one tensor.\"\"\"\n\n def _expand_is_accepted_like(x):\n \"\"\"Helper to expand `is_accepted` like the shape of some input arg.\"\"\"\n with tf.name_scope('expand_is_accepted_like'):\n if x.shape is not None and is_accepted.shape is not None:\n expand_shape = list(is_accepted.shape) + [1] * (\n len(x.shape) - len(is_accepted.shape))\n else:\n expand_shape = tf.concat([\n tf.shape(is_accepted),\n tf.ones([tf.rank(x) - tf.rank(is_accepted)], dtype=tf.int32),\n ],\n axis=0)\n return tf.reshape(is_accepted, expand_shape)\n\n with tf.name_scope(name):\n if accepted is rejected:\n return accepted\n accepted = tf.convert_to_tensor(accepted, name='accepted')\n rejected = tf.convert_to_tensor(rejected, name='rejected')\n return tf.where(_expand_is_accepted_like(accepted), accepted, rejected)\n\n is_accepted = tf.convert_to_tensor(is_accepted, name='is_accepted')\n return util.map_tree(\n lambda a, r: _choose_base_case(is_accepted, a, r, name=name), accepted,\n rejected)\n\n\nAdamState = collections.namedtuple('AdamState', 'state, m, v, t')\nAdamExtra = collections.namedtuple('AdamExtra', 'loss, loss_extra, grads')\n\n\ndef adam_init(state: FloatNest) -> AdamState:\n state = util.map_tree(tf.convert_to_tensor, state)\n return AdamState(\n state=state,\n m=util.map_tree(tf.zeros_like, state),\n v=util.map_tree(tf.zeros_like, state),\n t=tf.constant(0, dtype=tf.int32))\n\n\ndef adam_step(adam_state: AdamState,\n loss_fn: PotentialFn,\n learning_rate: FloatNest,\n beta_1: FloatNest = 0.9,\n beta_2: FloatNest = 0.999,\n epsilon: FloatNest = 1e-8) -> Tuple[AdamState, AdamExtra]:\n \"\"\"Perform one step of the Adam optimization method.\n\n Args:\n adam_state: Current `AdamState`.\n loss_fn: A function whose output will be minimized.\n learning_rate: Learning rate, broadcastable with the state.\n beta_1: Adaptation rate for the first order gradient statistics,\n broadcastable with the state.\n beta_2: Adaptation rate for the second order gradient statistics,\n broadcastable with the state.\n epsilon: Epsilon to stabilize the algorithm, broadcastable with the state.\n Note that the `epsilon` is actually the `epsilon_hat` from introduction to\n Section 2 in [1].\n\n Returns:\n adam_state: `AdamState`\n adam_extra: `AdamExtra`\n\n\n #### References:\n\n [1]: Kingma, D. P., & Ba, J. L. (2015). Adam: a Method for Stochastic\n Optimization. International Conference on Learning Representations\n 2015, 1-15.\n \"\"\"\n if any(e is None for e in util.flatten_tree(adam_state)):\n adam_state = adam_init(adam_state.state)\n state = adam_state.state\n m = adam_state.m\n v = adam_state.v\n learning_rate = maybe_broadcast_structure(learning_rate, state)\n beta_1 = maybe_broadcast_structure(beta_1, state)\n beta_2 = maybe_broadcast_structure(beta_2, state)\n epsilon = maybe_broadcast_structure(epsilon, state)\n t = tf.cast(adam_state.t + 1, tf.float32)\n\n def _one_part(state, g, m, v, learning_rate, beta_1, beta_2, epsilon):\n lr_t = learning_rate * (\n tf.math.sqrt(1. - tf.math.pow(beta_2, t)) /\n (1. - tf.math.pow(beta_1, t)))\n\n m_t = beta_1 * m + (1. - beta_1) * g\n v_t = beta_2 * v + (1. - beta_2) * tf.square(g)\n state = state - lr_t * m_t / (tf.math.sqrt(v_t) + epsilon)\n return state, m_t, v_t\n\n loss, loss_extra, grads = call_potential_fn_with_grads(loss_fn, state)\n\n state_m_v = util.map_tree(_one_part, state, grads, m, v, learning_rate,\n beta_1, beta_2, epsilon)\n\n adam_state = AdamState(\n state=util.map_tree_up_to(state, lambda x: x[0], state_m_v),\n m=util.map_tree_up_to(state, lambda x: x[1], state_m_v),\n v=util.map_tree_up_to(state, lambda x: x[2], state_m_v),\n t=adam_state.t + 1)\n\n return adam_state, AdamExtra(loss_extra=loss_extra, loss=loss, grads=grads)\n\n\nGradientDescentState = collections.namedtuple('GradientDescentState', 'state')\nGradientDescentExtra = collections.namedtuple('GradientDescentExtra',\n 'loss, loss_extra, grads')\n\n\ndef gradient_descent_step(\n gd_state: GradientDescentState, loss_fn: PotentialFn,\n learning_rate: FloatNest\n) -> Tuple[GradientDescentState, GradientDescentExtra]:\n \"\"\"Perform a step of regular gradient descent.\n\n Args:\n gd_state: Current `GradientDescentState`.\n loss_fn: A function whose output will be minimized.\n learning_rate: Learning rate, broadcastable with the state.\n\n Returns:\n gd_state: `GradientDescentState`\n gd_extra: `GradientDescentExtra`\n \"\"\"\n\n state = gd_state.state\n learning_rate = maybe_broadcast_structure(learning_rate, state)\n\n def _one_part(state, g, learning_rate):\n return state - learning_rate * g\n\n loss, loss_extra, grads = call_potential_fn_with_grads(loss_fn, state)\n\n state = util.map_tree(_one_part, state, grads, learning_rate)\n\n gd_state = GradientDescentState(state=state)\n\n return gd_state, GradientDescentExtra(\n loss_extra=loss_extra, loss=loss, grads=grads)\n\n\nRandomWalkMetropolisState = collections.namedtuple(\n 'RandomWalkMetropolisState', 'state, target_log_prob, state_extra')\n\n\nRandomWalkMetropolisExtra = collections.namedtuple(\n 'RandomWalkMetropolisExtra',\n 'is_accepted, log_accept_ratio, proposal_extra, proposed_rwm_state')\n\n\ndef random_walk_metropolis_init(\n state: State, target_log_prob_fn: PotentialFn) -> RandomWalkMetropolisState:\n \"\"\"Initializes the `RandomWalkMetropolisState`.\n\n Args:\n state: State of the chain.\n target_log_prob_fn: Target log prob fn.\n\n Returns:\n hmc_state: State of the `random_walk_metropolis_init` `TransitionOperator`.\n \"\"\"\n target_log_prob, state_extra = call_potential_fn(target_log_prob_fn, state)\n return RandomWalkMetropolisState(\n state=state,\n target_log_prob=target_log_prob,\n state_extra=state_extra,\n )\n\n\ndef random_walk_metropolis(\n rwm_state: RandomWalkMetropolisState,\n target_log_prob_fn: PotentialFn,\n proposal_fn: TransitionOperator,\n log_uniform: FloatTensor = None,\n seed=None) -> Tuple[RandomWalkMetropolisState, RandomWalkMetropolisExtra]:\n \"\"\"Random Walk Metropolis Hastings `TransitionOperator`.\n\n The `proposal_fn` takes in the current state, and must return a proposed\n state. It also must return a 2-tuple as its `extra` output, with the first\n element being arbitrary (returned in `rwm_extra`), and the second element\n being the log odds of going from the current state to the proposed state\n instead of reverse. If the proposal is symmetric about the current state, you\n can return `0.`.\n\n Args:\n rwm_state: RandomWalkMetropolisState.\n target_log_prob_fn: Target log prob fn.\n proposal_fn: Proposal fn.\n log_uniform: Optional logarithm of a uniformly distributed random sample in\n [0, 1], used for the MH accept/reject step.\n seed: For reproducibility.\n\n Returns:\n rwm_state: RandomWalkMetropolisState\n rwm_extra: RandomWalkMetropolisExtra\n \"\"\"\n if any(e is None for e in util.flatten_tree(rwm_state)):\n rwm_state = random_walk_metropolis_init(rwm_state.state, target_log_prob_fn)\n\n seed, sample_seed = util.split_seed(seed, 2)\n proposed_state, (proposal_extra,\n log_proposed_bias) = proposal_fn(rwm_state.state,\n sample_seed)\n\n proposed_target_log_prob, proposed_state_extra = call_potential_fn(\n target_log_prob_fn, proposed_state)\n\n # TODO(siege): Is it really a \"log accept ratio\" if we need to clamp it to 0?\n log_accept_ratio = (\n proposed_target_log_prob - rwm_state.target_log_prob - log_proposed_bias)\n\n proposed_rwm_state = RandomWalkMetropolisState(\n state=proposed_state,\n target_log_prob=proposed_target_log_prob,\n state_extra=proposed_state_extra,\n )\n\n rwm_state, mh_extra = metropolis_hastings_step(\n rwm_state,\n proposed_rwm_state,\n -log_accept_ratio,\n log_uniform=log_uniform,\n seed=seed,\n )\n\n rwm_extra = RandomWalkMetropolisExtra(\n proposal_extra=proposal_extra,\n proposed_rwm_state=proposed_rwm_state,\n log_accept_ratio=log_accept_ratio,\n is_accepted=mh_extra.is_accepted,\n )\n\n rwm_state = rwm_state # type: RandomWalkMetropolisState\n return rwm_state, rwm_extra\n\n\nRunningVarianceState = collections.namedtuple('RunningVarianceState',\n 'num_points, mean, variance')\n\n\ndef running_variance_init(shape: IntTensor,\n dtype: DTypeNest) -> RunningVarianceState:\n \"\"\"Initializes the `RunningVarianceState`.\n\n Args:\n shape: Shape of the computed statistics.\n dtype: DType of the computed statistics.\n\n Returns:\n state: `RunningVarianceState`.\n \"\"\"\n return RunningVarianceState(\n num_points=util.map_tree(lambda _: tf.zeros([], tf.int32), dtype),\n mean=util.map_tree_up_to(dtype, tf.zeros, shape, dtype),\n variance=util.map_tree_up_to(dtype, tf.zeros, shape, dtype),\n )\n\n\ndef running_variance_step(\n state: RunningVarianceState,\n vec: FloatNest,\n axis: Union[int,\n List[int]] = None) -> Tuple[RunningVarianceState, Tuple[()]]:\n \"\"\"Updates the `RunningVarianceState`.\n\n As a computational convenience, this allows computing both independent\n variance estimates, as well as aggregating across an axis of `vec`. For\n example:\n\n - vec shape: [3, 4], axis=None -> mean/var shape: [3, 4]\n - vec shape: [3, 4], axis=0 -> mean/var shape: [4]\n - vec shape: [3, 4], axis=1 -> mean/var shape: [3]\n - vec shape: [3, 4], axis=[0, 1] -> mean/var shape: []\n\n Note that this produces a biased estimate of variance, for simplicity. If the\n unbiased estimate is required, compute it as follows: `state.variance *\n state.num_points / (state.num_points - 1)`.\n\n Args:\n state: `RunningVarianceState`.\n vec: A Tensor to incorporate into the variance estimate.\n axis: If not `None`, treat these axes as being additional axes to aggregate\n over.\n\n Returns:\n state: `RunningVarianceState`.\n extra: Empty tuple.\n \"\"\"\n\n def _one_part(vec, mean, variance, num_points):\n \"\"\"Updates a single part.\"\"\"\n vec = tf.convert_to_tensor(vec, mean.dtype)\n broadcast_mean = mean\n if axis is not None:\n for a in util.flatten_tree(axis):\n broadcast_mean = tf.expand_dims(broadcast_mean, a)\n centered_vec = vec - broadcast_mean\n num_points_f = tf.cast(num_points, vec.dtype)\n # pyformat: disable\n # These are derived by using the definition of variance for N and N + 1\n # points, and then identifying the previous terms/simplifying.\n if axis is None:\n additional_points = 1\n additional_points_f = 1\n new_variance = (\n num_points_f * (num_points_f + additional_points_f) * variance +\n num_points_f * tf.square(centered_vec)) / (\n tf.square(num_points_f + additional_points_f))\n else:\n vec_shape = tf.shape(vec)\n additional_points = tf.math.reduce_prod(tf.gather(vec_shape, axis))\n additional_points_f = tf.cast(additional_points, vec.dtype)\n new_variance = (\n num_points_f * (num_points_f + additional_points_f) * variance +\n num_points_f * tf.reduce_sum(tf.square(centered_vec), axis) -\n tf.square(tf.reduce_sum(vec, axis)) + additional_points_f *\n tf.reduce_sum(tf.square(vec), axis)) / (\n tf.square(num_points_f + additional_points_f))\n centered_vec = tf.reduce_sum(centered_vec, axis)\n # pyformat: enable\n new_mean = mean + centered_vec / (num_points_f + additional_points_f)\n return new_mean, new_variance, num_points + additional_points\n\n new_mean_variance_num_points = util.map_tree(_one_part, vec, state.mean,\n state.variance, state.num_points)\n\n new_mean = util.map_tree_up_to(state.mean, lambda x: x[0],\n new_mean_variance_num_points)\n new_variance = util.map_tree_up_to(state.mean, lambda x: x[1],\n new_mean_variance_num_points)\n new_num_points = util.map_tree_up_to(state.mean, lambda x: x[2],\n new_mean_variance_num_points)\n return RunningVarianceState(\n num_points=new_num_points, mean=new_mean, variance=new_variance), ()\n\n\nRunningCovarianceState = collections.namedtuple('RunningCovarianceState',\n 'num_points, mean, covariance')\n\n\ndef running_covariance_init(shape: IntTensor,\n dtype: DTypeNest) -> RunningCovarianceState:\n \"\"\"Initializes the `RunningCovarianceState`.\n\n Args:\n shape: Shape of the computed mean.\n dtype: DType of the computed statistics.\n\n Returns:\n state: `RunningCovarianceState`.\n \"\"\"\n return RunningCovarianceState(\n num_points=util.map_tree(lambda _: tf.zeros([], tf.int32), dtype),\n mean=util.map_tree_up_to(dtype, tf.zeros, shape, dtype),\n covariance=util.map_tree_up_to(\n dtype, lambda shape, dtype: tf.zeros( # pylint: disable=g-long-lambda\n tf.concat(\n [\n tf.convert_to_tensor(shape),\n tf.convert_to_tensor(shape[-1:]),\n ],\n axis=0,\n ),\n dtype=dtype), shape, dtype),\n )\n\n\ndef running_covariance_step(\n state: RunningCovarianceState,\n vec: FloatTensor,\n axis: Union[int,\n List[int]] = None) -> Tuple[RunningCovarianceState, Tuple[()]]:\n \"\"\"Updates the `RunningCovarianceState`.\n\n As a computational convenience, this allows computing both independent\n covariance estimates, as well as aggregating across an axis of `vec`. For\n example:\n\n - vec shape: [3, 4], axis=None -> mean shape: [3, 4], cov shape [3, 4, 4]\n - vec shape: [3, 4], axis=0 -> mean shape: [4], cov shape [4, 4]\n\n Note that the final unreduced dimension must be the last one (and there must\n be at least one unreduced dimension); thus, the following are illegal:\n\n - vec shape: [3, 4], axis=1 -> Illegal, unreduced dimension is not last.\n - vec shape: [3, 4], axis=[0, 1] -> Illegal, no unreduced dimensions.\n\n Note that this produces a biased estimate of covariance, for simplicity. If\n the unbiased estimate is required, compute it as follows: `state.covariance *\n state.num_points / (state.num_points - 1)`.\n\n Args:\n state: `RunningCovarianceState`.\n vec: A Tensor to incorporate into the variance estimate.\n axis: If not `None`, treat these axes as being additional axes to aggregate\n over.\n\n Returns:\n state: `RunningCovarianceState`.\n extra: Empty tuple.\n \"\"\"\n\n def _outer(x):\n res = tf.einsum('...i,...j->...ij', x, x)\n return res\n\n def _one_part(vec, mean, covariance, num_points):\n \"\"\"Updates a single part.\"\"\"\n vec = tf.convert_to_tensor(vec, mean.dtype)\n broadcast_mean = mean\n if axis is not None:\n for a in util.flatten_tree(axis):\n broadcast_mean = tf.expand_dims(broadcast_mean, a)\n centered_vec = vec - broadcast_mean\n num_points_f = tf.cast(num_points, vec.dtype)\n\n # pyformat: disable\n # These are derived by using the definition of covariance for N and N + 1\n # points, and then identifying the previous terms/simplifying.\n if axis is None:\n additional_points = 1\n additional_points_f = 1\n new_covariance = (\n num_points_f * (num_points_f + additional_points_f) * covariance +\n num_points_f * _outer(centered_vec)) / (\n tf.square(num_points_f + additional_points_f))\n else:\n vec_shape = tf.shape(vec)\n additional_points = tf.math.reduce_prod(tf.gather(vec_shape, axis))\n additional_points_f = tf.cast(additional_points, vec.dtype)\n new_covariance = (\n num_points_f * (num_points_f + additional_points_f) * covariance +\n num_points_f * tf.reduce_sum(_outer(centered_vec), axis) -\n _outer(tf.reduce_sum(vec, axis)) + additional_points_f *\n tf.reduce_sum(_outer(vec), axis)) / (\n tf.square(num_points_f + additional_points_f))\n centered_vec = tf.reduce_sum(centered_vec, axis)\n # pyformat: enable\n new_mean = mean + centered_vec / (num_points_f + additional_points_f)\n return new_mean, new_covariance, num_points + additional_points\n\n new_mean_covariance_num_points = util.map_tree(_one_part, vec, state.mean,\n state.covariance,\n state.num_points)\n\n new_mean = util.map_tree_up_to(state.mean, lambda x: x[0],\n new_mean_covariance_num_points)\n new_covariance = util.map_tree_up_to(state.mean, lambda x: x[1],\n new_mean_covariance_num_points)\n new_num_points = util.map_tree_up_to(state.mean, lambda x: x[2],\n new_mean_covariance_num_points)\n return RunningCovarianceState(\n num_points=new_num_points, mean=new_mean, covariance=new_covariance), ()\n\n\nRunningMeanState = collections.namedtuple('RunningMeanState',\n 'num_points, mean')\n\n\ndef running_mean_init(shape: IntTensor, dtype: DTypeNest) -> RunningMeanState:\n \"\"\"Initializes the `RunningMeanState`.\n\n Args:\n shape: Shape of the computed statistics.\n dtype: DType of the computed statistics.\n\n Returns:\n state: `RunningMeanState`.\n \"\"\"\n return RunningMeanState(\n num_points=util.map_tree(lambda _: tf.zeros([], tf.int32), dtype),\n mean=util.map_tree_up_to(dtype, tf.zeros, shape, dtype),\n )\n\n\ndef running_mean_step(\n state: RunningMeanState,\n vec: FloatTensor,\n axis: Union[int, List[int]] = None\n) -> Tuple[RunningMeanState, Tuple[()]]:\n \"\"\"Updates the `RunningMeanState`.\n\n As a computational convenience, this allows computing both independent\n mean estimates, as well as aggregating across an axis of `vec`. For example:\n\n - vec shape: [3, 4], axis=None -> mean shape: [3, 4]\n - vec shape: [3, 4], axis=0 -> mean shape: [4]\n - vec shape: [3, 4], axis=1 -> mean shape: [3]\n - vec shape: [3, 4], axis=[0, 1] -> mean shape: []\n\n Args:\n state: `RunningMeanState`.\n vec: A Tensor to incorporate into the mean.\n axis: If not `None`, treat these axes as being additional axes to aggregate\n over.\n\n Returns:\n state: `RunningMeanState`.\n extra: Empty tuple.\n \"\"\"\n\n def _one_part(vec, mean, num_points):\n \"\"\"Updates a single part.\"\"\"\n vec = tf.convert_to_tensor(vec, mean.dtype)\n broadcast_mean = mean\n if axis is not None:\n for a in util.flatten_tree(axis):\n broadcast_mean = tf.expand_dims(broadcast_mean, a)\n centered_vec = vec - broadcast_mean\n num_points_f = tf.cast(num_points, vec.dtype)\n if axis is None:\n additional_points = 1\n additional_points_f = 1\n else:\n vec_shape = tf.shape(vec)\n additional_points = tf.math.reduce_prod(tf.gather(vec_shape, axis))\n additional_points_f = tf.cast(additional_points, vec.dtype)\n centered_vec = tf.reduce_sum(centered_vec, axis)\n new_mean = state.mean + centered_vec / (num_points_f + additional_points_f)\n return new_mean, num_points + additional_points\n\n new_mean_num_points = util.map_tree(_one_part, vec, state.mean,\n state.num_points)\n\n new_mean = util.map_tree_up_to(state.mean, lambda x: x[0],\n new_mean_num_points)\n new_num_points = util.map_tree_up_to(state.mean, lambda x: x[1],\n new_mean_num_points)\n return RunningMeanState(num_points=new_num_points, mean=new_mean), ()\n\n\nclass PotentialScaleReductionState(RunningVarianceState):\n pass\n\n\ndef potential_scale_reduction_init(shape,\n dtype) -> PotentialScaleReductionState:\n \"\"\"Initializes `PotentialScaleReductionState`.\n\n Args:\n shape: Shape of the MCMC state.\n dtype: DType of the MCMC state.\n\n Returns:\n state: `PotentialScaleReductionState`.\n \"\"\"\n # We are wrapping running variance so that the user doesn't get the chance to\n # set the reduction axis, which would break the assumptions of\n # `potential_scale_reduction_extract`.\n return PotentialScaleReductionState(\n *running_variance_init(shape, dtype))\n\n\ndef potential_scale_reduction_step(\n state: PotentialScaleReductionState,\n sample) -> Tuple[PotentialScaleReductionState, Tuple[()]]:\n \"\"\"Updates `PotentialScaleReductionState`.\n\n This computes the 'potential scale reduction' statistic from [1]. Note that\n this actually refers to the potential *variance* reduction, but the scale\n terminology has stuck. When this is close to 1, the chains are often\n considered converged.\n\n To extract the actual value of the statistic, use\n `potential_scale_reduction_extract`.\n\n Args:\n state: `PotentialScaleReductionState`\n sample: A sample from an MCMC chain. The leading dimension must have shape\n of at least 1.\n\n Returns:\n state: `PotentialScaleReductionState`.\n extra: Empty tuple.\n\n #### References\n\n [1]: Rooks, S. P. B., & Elman, A. G. (1998). General Methods for Monitoring\n Convergence of Iterative Simulations, 7(4), 434-455.\n \"\"\"\n # We are wrapping running variance so that the user doesn't get the chance to\n # set the reduction axis, which would break the assumptions of\n # `potential_scale_reduction_extract`.\n return PotentialScaleReductionState(\n *running_variance_step(state, sample)[0]), ()\n\n\ndef potential_scale_reduction_extract(\n state: PotentialScaleReductionState,\n independent_chain_ndims: IntNest = 1) -> FloatNest:\n \"\"\"Extracts the 'potential scale reduction' statistic.\n\n Args:\n state: `PotentialScaleReductionState`.\n independent_chain_ndims: Number of initial dimensions that are treated as\n indexing independent chains. Must be at least 1.\n\n Returns:\n rhat: Potential scale reduction.\n \"\"\"\n independent_chain_ndims = maybe_broadcast_structure(independent_chain_ndims,\n state.mean)\n dtype = state.mean.dtype\n\n def _psr_part(num_points, mean, variance, independent_chain_ndims):\n \"\"\"Compute PSR for a single part.\"\"\"\n # TODO(siege): Keeping these per-component points is mildly wasteful because\n # unlike general running variance estimation, these are always the same\n # across parts.\n num_points = tf.cast(num_points, dtype)\n num_chains = tf.cast(\n tf.math.reduce_prod(tf.shape(mean)[:independent_chain_ndims]), dtype)\n\n independent_dims = list(range(independent_chain_ndims))\n # Within chain variance.\n var_w = tf.reduce_mean(variance, independent_dims)\n # Between chain variance.\n var_b = num_chains / (num_chains - 1) * tf.math.reduce_variance(\n state.mean, independent_dims)\n # Estimate of the true variance of the target distribution.\n sigma2p = var_w + var_b\n return ((num_chains + 1) / num_chains * sigma2p / var_w - (num_points - 1) /\n (num_chains * num_points))\n\n return util.map_tree(_psr_part, state.num_points, state.mean, state.variance,\n independent_chain_ndims)\n\n\nRunningApproximateAutoCovarianceState = collections.namedtuple(\n 'RunningApproximateAutoCovarianceState', 'buffer, num_steps, '\n 'mean, auto_covariance')\n\n\ndef running_approximate_auto_covariance_init(\n max_lags: int,\n state_shape: IntTensor,\n dtype: DTypeNest,\n axis: Union[int, List[int]] = None,\n) -> RunningApproximateAutoCovarianceState:\n \"\"\"Initializes `RunningApproximateAutoCovarianceState`.\n\n Args:\n max_lags: Maximum lag for the computed auto-covariance.\n state_shape: Shape of the sequence elements that the auto-covariance is\n computed over. Note that this is before the averaging by the `axis`\n argument.\n dtype: DType of the state.\n axis: Axes to average over. See `running_approximate_auto_covariance_step`\n for details.\n\n Returns:\n state: `RunningApproximateAutoCovarianceState`.\n \"\"\"\n if axis is None:\n mean_shape = state_shape\n else:\n # TODO(siege): Can this be done without doing the surrogate computation?\n mean_shape = util.map_tree_up_to(\n dtype, lambda s: tf.shape(tf.reduce_sum(tf.zeros(s), axis)),\n state_shape)\n\n def _shape_with_lags(shape):\n if isinstance(shape, (tuple, list)):\n return [max_lags + 1] + list(shape)\n else:\n return tf.concat([[max_lags + 1],\n tf.convert_to_tensor(shape, tf.int32)],\n axis=0)\n\n return RunningApproximateAutoCovarianceState(\n buffer=util.map_tree_up_to(\n dtype, lambda d, s: tf.zeros(_shape_with_lags(s), dtype=d), dtype,\n state_shape),\n num_steps=tf.zeros([], dtype=tf.int32),\n mean=util.map_tree_up_to(dtype, lambda d, s: tf.zeros(s, dtype=d), dtype,\n mean_shape),\n auto_covariance=util.map_tree_up_to(\n dtype, lambda d, s: tf.zeros(_shape_with_lags(s), dtype=d), dtype,\n mean_shape),\n )\n\n\ndef running_approximate_auto_covariance_step(\n state: RunningApproximateAutoCovarianceState,\n vec: TensorNest,\n axis: Union[int, List[int]] = None,\n) -> Tuple[RunningApproximateAutoCovarianceState, Tuple[()]]:\n \"\"\"Updates `RunningApproximateAutoCovarianceState`.\n\n This computes a running auto-covariance of a sequence using a biased\n approximation. The algorithm effectively performs `max_lags + 1` separate\n covariance estimates, except with the running mean terms replaced by a shared\n mean computed at lag 0. This is not mathematically correct for lag > 0, but\n empirically the bias is manageable. The bias is large when the `max_lags` is\n large compared to the sequence length: a factor of about 3x is often adequate.\n\n This used a very naive algorithm based on keeping the last `max_lags + 1`\n elements of the sequence as part of the state. The time complexity is\n `O(max_lags * sequence_length)`, so this should only be used instead of the\n versions based on FFT when the memory requrements for materializing the whole\n sequence are excessive.\n\n For convenience, this function supports computing the average auto-correlation\n across dimensions of the elements by specifying the `axis` argument. This must\n either be `None` or refer to the leading dimensions of `vec`. For example:\n\n - vec shape: [3, 4], axis=None -> auto_covariance shape: [max_lags + 1, 3, 4]\n - vec shape: [3, 4], axis=0 -> auto_covariance shape: [max_lags + 1, 4]\n - vec shape: [3, 4], axis=[0, 1] -> auto_covariance shape: [max_lags + 1]\n\n Args:\n state: `RunningApproximateAutoCovarianceState`\n vec: An element of a sequence. This must have the same shape as was passed\n to `running_approximate_auto_covariance_init`.\n axis: If not `None`, treat these axes as being axes to average over.\n\n Returns:\n state: `RunningApproximateAutoCovarianceState`.\n extra: Empty tuple.\n \"\"\"\n\n def _one_part(vec, buf, mean, auto_cov):\n \"\"\"Compute the auto-covariance for one part.\"\"\"\n buf_size = tf.shape(buf)[0]\n tail_idx = tf.range(0, buf_size - 1)\n num_steps = state.num_steps - tf.range(buf_size)\n num_steps = tf.maximum(0, num_steps)\n\n buf = tf.gather(buf, tail_idx)\n buf = tf.concat([vec[tf.newaxis], buf], 0)\n centered_buf = buf - mean\n centered_vec = vec - mean\n\n num_steps_0 = num_steps[0]\n # Need to broadcast on the right with autocov.\n if isinstance(auto_cov.shape, tuple) and isinstance(num_steps.shape, tuple):\n steps_shape = ([-1] + [1] * (len(auto_cov.shape) - len(num_steps.shape)))\n else:\n steps_shape = tf.concat(\n [[-1],\n tf.ones(\n [tf.rank(auto_cov) - tf.rank(num_steps)],\n dtype=tf.int32,\n )],\n axis=0,\n )\n num_steps = tf.reshape(num_steps, steps_shape)\n\n # pyformat: disable\n if axis is None:\n additional_points = 1\n additional_points_f = 1\n # This assumes `additional_points` is the same for every step,\n # verified by the buf update logic above.\n num_points_f = additional_points_f * tf.cast(num_steps, mean.dtype)\n\n auto_cov = ((\n num_points_f * (num_points_f + additional_points_f) * auto_cov +\n num_points_f * centered_vec * centered_buf) /\n tf.square(num_points_f + additional_points_f))\n else:\n vec_shape = tf.shape(vec)\n additional_points = tf.math.reduce_prod(tf.gather(vec_shape, axis))\n additional_points_f = tf.cast(additional_points, vec.dtype)\n num_points_f = additional_points_f * tf.cast(num_steps, mean.dtype)\n buf_axis = util.map_tree(lambda a: a + 1, axis)\n\n auto_cov = (\n num_points_f * (num_points_f + additional_points_f) * auto_cov +\n num_points_f * tf.reduce_sum(centered_vec * centered_buf, buf_axis) -\n tf.reduce_sum(vec, axis) * tf.reduce_sum(buf, buf_axis) +\n additional_points_f * tf.reduce_sum(vec * buf, buf_axis)) / (\n tf.square(num_points_f + additional_points_f))\n centered_vec = tf.reduce_sum(centered_vec, axis)\n # pyformat: enable\n num_points_0_f = additional_points_f * tf.cast(num_steps_0, mean.dtype)\n mean = mean + centered_vec / (num_points_0_f + additional_points_f)\n return buf, auto_cov, mean\n\n new_buffer_auto_cov_mean = util.map_tree(_one_part, vec, state.buffer,\n state.mean, state.auto_covariance)\n\n new_buffer = util.map_tree_up_to(state.buffer, lambda x: x[0],\n new_buffer_auto_cov_mean)\n new_auto_cov = util.map_tree_up_to(state.buffer, lambda x: x[1],\n new_buffer_auto_cov_mean)\n new_mean = util.map_tree_up_to(state.buffer, lambda x: x[2],\n new_buffer_auto_cov_mean)\n\n state = RunningApproximateAutoCovarianceState(\n num_steps=state.num_steps + 1,\n buffer=new_buffer,\n auto_covariance=new_auto_cov,\n mean=new_mean,\n )\n return state, ()\n" ]
[ [ "tensorflow.compat.v2.math.xlogy", "tensorflow.compat.v2.math.digamma", "tensorflow.compat.v2.broadcast_to", "tensorflow.compat.v2.name_scope", "tensorflow.compat.v2.math.betainc", "tensorflow.compat.v2.math.lgamma", "tensorflow.compat.v2.math.log1p", "tensorflow.compat.v2.convert_to_tensor", "tensorflow.compat.v2.broadcast_static_shape", "tensorflow.compat.v2.ones", "tensorflow.compat.v2.TensorShape", "tensorflow.compat.v2.constant", "tensorflow.python.util.deprecation.deprecated" ], [ "numpy.zeros_like", "numpy.any", "tensorflow.compat.v2.TensorShape", "numpy.ones_like", "scipy.special.digamma", "scipy.stats.beta.cdf", "tensorflow.compat.v1.set_random_seed", "tensorflow.compat.v2.Variable", "tensorflow.compat.v2.test.main", "scipy.stats.beta", "numpy.cov", "scipy.special.betaln", "numpy.random.rand", "tensorflow.compat.v2.constant", "numpy.array", "numpy.random.random", "numpy.isfinite", "tensorflow.compat.v2.GradientTape", "scipy.stats.beta.var", "scipy.stats.beta.mean", "numpy.ones", "scipy.stats.beta.entropy" ], [ "tensorflow.compat.v2.math.softmax", "tensorflow.compat.v2.shape", "numpy.mean", "numpy.any", "tensorflow.compat.v2.math.log_softmax", "tensorflow.compat.v2.reduce_sum", "numpy.random.randn", "numpy.zeros_like", "numpy.exp", "tensorflow.compat.v1.placeholder_with_default", "numpy.float32", "tensorflow.compat.v2.math.log", "numpy.zeros", "tensorflow.compat.v2.Variable", "numpy.log", "tensorflow.compat.v2.test.main", "tensorflow.compat.v2.constant", "numpy.array", "tensorflow.compat.v2.get_static_value", "tensorflow.compat.v2.GradientTape" ], [ "scipy.stats.gamma", "numpy.zeros_like", "scipy.stats.gamma.cdf", "tensorflow.compat.v2.TensorShape", "numpy.exp", "scipy.stats.gamma.logpdf", "scipy.stats.gamma.std", "numpy.reshape", "numpy.arange", "scipy.stats.gamma.var", "scipy.special.digamma", "tensorflow.compat.v2.Variable", "numpy.log", "tensorflow.compat.v2.test.main", "scipy.special.gammaln", "tensorflow.compat.v2.constant", "numpy.array", "scipy.stats.gamma.entropy", "tensorflow.compat.v2.GradientTape", "scipy.stats.gamma.mean" ], [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "1.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.8", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
whiskie14142/spktype21
[ "7ed22365fe92cdb74c416d27634df96a45712953" ]
[ "source/spktype21.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"A supporting module for jplephem to handle data type 21 (Version 0.1.0)\n\nThis module computes position and velocity of a celestial small body, from a \nNASA SPICE SPK ephemeris kernel file of data type 21 (Extended Modified \nDifference Arrays).\nhttp://naif.jpl.nasa.gov/pub/naif/toolkit_docs/FORTRAN/req/spk.html\n\nYou can get SPK files for many solar system small bodies from HORIZONS \nsystem of NASA/JPL. See https://ssd.jpl.nasa.gov/?horizons\n\nThis module reads SPK files of data type 21, one of the types of binary SPK \nfile. \n\nAt the point of Oct. 2018, HORIZONS system provides files of type 21 for \nbinary SPK files by default. You can get type 21 binary SPK file for celestial \nsmall bodies through TELNET interface by answering back 'Binary' for \n'SPK file format'. Also you can get type 21 binary SPK file from:\nhttps://ssd.jpl.nasa.gov/x/spk.html\n\nModules required:\n jplephem (version 2.6 or later)\n numpy\n\nUsage:\n from spktype21 import SPKType21\n kernel = SPKType21.open('path')\n position, velocity = kernel.compute_type21(center, target, jd)\n \n where:\n path - path to the SPK file\n center - SPKID of central body (0 for SSB, 10 for Sun, etc.)\n target - SPKID of target body\n jd - time for computation (Julian date)\n\nExceptions:\n RuntimeError will be raised when:\n invalid data_type of SPK file, or\n SPK file contains too large table in EMDA record(s)\n ValueError will be raised when:\n invalid parameter(s) of compute_type21 function\n\nAuthor: Shushi Uetsuki (whiskie14142)\nThis module has been developed based on jplephem and FORTRAN source \nof the SPICE Toolkit of NASA/JPL/NAIF.\njplephem : https://pypi.org/project/jplephem/\nSPICE Toolkit : http://naif.jpl.nasa.gov/naif/toolkit.html\n\"\"\"\n\nfrom numpy import array, zeros, reshape\nfrom jplephem.daf import DAF\nfrom jplephem.names import target_names\n\nT0 = 2451545.0\nS_PER_DAY = 86400.0\n\n# Included from 'spk21.inc' on the FORTRAN source 'spke21.f'\nMAXTRM = 25\n\ndef jd(seconds):\n \"\"\"Convert a number of seconds since J2000 to a Julian Date.\n \"\"\"\n return T0 + seconds / S_PER_DAY\n\nclass SPKType21(object):\n \"\"\"Class for SPK kernel to handle data type 21 (Extended Modified Difference Arrays)\n \"\"\"\n def __init__(self, daf):\n self.daf = daf\n self.segments = [Segment(self.daf, *t) for t in self.daf.summaries()]\n ssec = lambda s : s.start_second\n self.segments.sort(key=ssec)\n \n # initialize arrays for spke21\n self.G = zeros(MAXTRM)\n \n self.REFPOS = zeros(3)\n self.REFVEL = zeros(3)\n \n self.KQ = array([0, 0, 0])\n self.FC = zeros(MAXTRM)\n self.FC[0] = 1.0\n self.WC = zeros(MAXTRM - 1)\n self.W = zeros(MAXTRM + 2)\n \n # initialize for compute_type21\n self.mda_record_exist = False\n self.current_segment_exist = False\n \n @classmethod\n def open(cls, path):\n \"\"\"Open the file at `path` and return an SPK instance.\n \"\"\"\n return cls(DAF(open(path, 'rb')))\n\n def close(self):\n \"\"\"Close this SPK file.\"\"\"\n self.daf.file.close()\n\n def __str__(self):\n daf = self.daf\n d = lambda b: b.decode('latin-1')\n lines = (str(segment) for segment in self.segments)\n return 'File type {0} and format {1} with {2} segments:\\n{3}'.format(\n d(daf.locidw), d(daf.locfmt), len(self.segments), '\\n'.join(lines))\n \n def comments(self):\n return self.daf.comments()\n\n def compute_type21(self, center, target, jd1, jd2=0.0):\n \"\"\"Compute position and velocity of target from SPK data (data type 21).\n Inputs:\n center - SPKID of the coordinate center (0 for Solar System Barycenter, \n 10 for Sun, etc)\n target - SPKID of the target\n jd1, jd2 - Julian date of epoch for computation. (jd1 + jd2) will \n be used for computation. If you want precise definition of \n epoch, jd1 should be an integer or a half integer, and jd2 should\n be a relatively small floating point number.\n Returns:\n Position (X, Y, Z) and velocity (XD, YD, ZD) of the target at \n epoch. Position and velocity are provided as Numpy arrays \n respectively.\n \"\"\"\n eval_sec = (jd1 - T0)\n eval_sec = (eval_sec + jd2) * S_PER_DAY\n \n if self.mda_record_exist:\n if eval_sec >= self.mda_lb and eval_sec < self.mda_ub:\n result = self.spke21(eval_sec, self.mda_record)\n return result[0:3], result[3:]\n \n self.mda_record, self.mda_lb, self.mda_ub = self.get_MDA_record(eval_sec, target, center)\n self.mda_record_exists = True\n \n result = self.spke21(eval_sec, self.mda_record)\n return result[0:3], result[3:]\n \n def get_MDA_record(self, eval_sec, target, center):\n \"\"\"Return a EMDA record for defined epoch.\n Inputs:\n eval_sec - epoch for computation, seconds from J2000\n target - body ID of the target\n center - body ID of coordinate center\n Returns:\n EMDA record - a Numpy array of DLSIZE floating point numbers\n Exception:\n ValueError will be raised when:\n eval_sed is outside of SPK data\n target and center are not in SPK data\n RuntimeError will be raised when:\n invalid data type of SPK data\n \"\"\"\n \n # chech last segment can be used\n if self.current_segment_exist:\n if eval_sec >= self.current_segment.start_second \\\n and eval_sec < self.current_segment.end_second \\\n and target == self.current_segment.target \\\n and center == self.current_segment.center:\n \n return self.current_segment.get_MDA_record(eval_sec)\n\n # select segments with matched 'target' and 'center'\n matched = []\n for segment in self.segments:\n if segment.target == target and segment.center == center:\n matched.append(segment)\n if len(matched) == 0:\n raise ValueError('Invalid Target and/or Center')\n if eval_sec < matched[0].start_second or eval_sec >= matched[-1].end_second:\n raise ValueError('Invalid Time to evaluate')\n \n # selet a segment based on eval_sec\n found = False\n for segment in matched:\n if eval_sec < segment.end_second:\n found = True\n self.current_segment = segment\n break\n if not found:\n self.current_segment = matched[-1]\n self.current_segment_exist = True\n \n # get the MDA record from selected segment\n if self.current_segment.data_type != 21:\n raise RuntimeError('Invalid data. Data Type must be 21')\n \n return self.current_segment.get_MDA_record(eval_sec)\n \n\n\n# left this module only 2018/10/12\n\n def spke21(self, ET, RECORD):\n \"\"\"Compute position and velocity from a Modified Difference Array record\n \n Inputs:\n ET: Epoch time to evaluate position and velocity (seconds since J2000)\n RECORD: A record of Extended Modified Difference Array\n Returns: STATE\n STATE: A numpy array which contains position and velocity\n \"\"\"\n \n# This method was translated from FORTRAN source code ‘spke21.f’ of SPICE \n# Toolkit and modified by Shushi Uetsuki.\n# \n# SPICE Toolkit for FORTRAN : http://naif.jpl.nasa.gov/naif/toolkit_FORTRAN.html\n# SPK Required Reading : http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/req/spk.html\n# \n# Unfortunately, I found some discrepancies between FORTRAN source code \n# and actual data contained in SPK files. So, I tried to compose a \n# method that compute positions and velocities correctly by referencing \n# code of spktype01.\n\n# Following comments start with #C were copied from original FORTRAN code.\n\n#C$ Abstract\n#C\n#C Evaluate a single SPK data record from a segment of type 21\n#C (Extended Difference Lines).\n#C\n#C$ Disclaimer\n#C\n#C THIS SOFTWARE AND ANY RELATED MATERIALS WERE CREATED BY THE\n#C CALIFORNIA INSTITUTE OF TECHNOLOGY (CALTECH) UNDER A U.S.\n#C GOVERNMENT CONTRACT WITH THE NATIONAL AERONAUTICS AND SPACE\n#C PUBLICLY AVAILABLE UNDER U.S. EXPORT LAWS AND IS PROVIDED \"AS-IS\"\n#C TO THE RECIPIENT WITHOUT WARRANTY OF ANY KIND, INCLUDING ANY\n#C WARRANTIES OF PERFORMANCE OR MERCHANTABILITY OR FITNESS FOR A\n#C PARTICULAR USE OR PURPOSE (AS SET FORTH IN UNITED STATES UCC\n#C SECTIONS 2312-2313) OR FOR ANY PURPOSE WHATSOEVER, FOR THE\n#C SOFTWARE AND RELATED MATERIALS, HOWEVER USED.\n#C\n#C IN NO EVENT SHALL CALTECH, ITS JET PROPULSION LABORATORY, OR NASA\n#C BE LIABLE FOR ANY DAMAGES AND/OR COSTS, INCLUDING, BUT NOT\n#C LIMITED TO, INCIDENTAL OR CONSEQUENTIAL DAMAGES OF ANY KIND,\n#C INCLUDING ECONOMIC DAMAGE OR INJURY TO PROPERTY AND LOST PROFITS,\n#C REGARDLESS OF WHETHER CALTECH, JPL, OR NASA BE ADVISED, HAVE\n#C REASON TO KNOW, OR, IN FACT, SHALL KNOW OF THE POSSIBILITY.\n#C\n#C RECIPIENT BEARS ALL RISK RELATING TO QUALITY AND PERFORMANCE OF\n#C THE SOFTWARE AND ANY RELATED MATERIALS, AND AGREES TO INDEMNIFY\n#C CALTECH AND NASA FOR ALL THIRD-PARTY CLAIMS RESULTING FROM THE\n#C ACTIONS OF RECIPIENT IN THE USE OF THE SOFTWARE.\n#C\n#C$ Required_Reading\n#C\n#C SPK\n#C TIME\n#C\n#C$ Keywords\n#C\n#C EPHEMERIS\n#C\n#C$ Declarations\n\n STATE = zeros(6)\n\n#C$ Brief_I/O\n#C\n#C Variable I/O Description\n#C -------- --- --------------------------------------------------\n#C ET I Evaluation epoch.\n#C RECORD I Data record.\n#C STATE O State (position and velocity).\n#C MAXTRM P Maximum number of terms per difference table\n#C component.\n#C\n#C$ Detailed_Input\n#C\n#C ET is an epoch at which a state vector is to be\n#C computed. The epoch is represented as seconds past\n#C J2000 TDB.\n#C\n#C RECORD is a data record which, when evaluated at epoch ET,\n#C will give the state (position and velocity) of an\n#C ephemeris object, relative to its center of motion,\n#C in an inertial reference frame.\n#C\n#C The contents of RECORD are as follows:\n#C\n#C RECORD(1): The difference table size per\n#C Cartesian component. Call this\n#C size MAXDIM; then the difference\n#C line (MDA) size DLSIZE is\n#C\n#C ( 4 * MAXDIM ) + 11\n#C \n#C RECORD(2)\n#C ...\n#C RECORD(1+DLSIZE): An extended difference line.\n#C The contents are:\n#C\n#C Dimension Description\n#C --------- ----------------------------------\n#C 1 Reference epoch of difference line\n#C MAXDIM Stepsize function vector\n#C 1 Reference position vector, x\n#C 1 Reference velocity vector, x\n#C 1 Reference position vector, y\n#C 1 Reference velocity vector, y\n#C 1 Reference position vector, z\n#C 1 Reference velocity vector, z\n#C MAXDIM,3 Modified divided difference\n#C arrays (MDAs)\n#C 1 Maximum integration order plus 1\n#C 3 Integration order array\n#C\n#C$ Detailed_Output\n#C\n#C STATE is the state resulting from evaluation of the input\n#C record at ET. Units are km and km/sec.\n#C\n#C$ Parameters\n#C\n#C MAXTRM is the maximum number of terms allowed in\n#C each component of the difference table \n#C contained in the input argument RECORD.\n#C See the INCLUDE file spk21.inc for the value\n#C of MAXTRM.\n#C \n#C$ Exceptions\n#C\n#C 1) If the maximum table size of the input record exceeds \n#C MAXTRM, the error SPICE(DIFFLINETOOLARGE) is signaled.\n#C\n#C$ Files\n#C\n#C None.\n#C\n#C$ Particulars\n#C\n#C The exact format and structure of type 21 (difference lines)\n#C segments are described in the SPK Required Reading file.\n#C\n#C SPKE21 is a modified version of SPKE01. The routine has been\n#C generalized to support variable size difference lines.\n#C\n#C$ Examples\n#C\n#C None.\n#C\n#C$ Restrictions\n#C\n#C Unknown.\n#C\n#C$ Literature_References\n#C\n#C NAIF Document 168.0, \"S- and P- Kernel (SPK) Specification and\n#C User's Guide\"\n#C\n#C$ Author_and_Institution\n#C\n#C N.J. Bachman (JPL)\n#C F.T. Krogh (JPL)\n#C W.L. Taber (JPL)\n#C I.M. Underwood (JPL)\n#C\n#C$ Version\n#C\n#C- SPICELIB Version 1.0.0, 03-FEB-2014 (NJB) (FTK) (WLT) (IMU)\n#C\n#C-&\n# \n#C$ Index_Entries\n#C\n#C evaluate type_21 spk segment\n#C\n#C-&\n\n#C\n#C The first element of the input record is the dimension\n#C of the difference table MAXDIM. \n#C\n\n# The FORTRAN source code indicates that RECORD[0] contains MAXDIM, but actual \n# data record does not contain it. MAXDIM is contained in each segment.\n\n MAXDIM = self.current_segment.MAXDIM\n\n \n if MAXDIM > MAXTRM:\n mes = ('SPKE21 \\nThe input record has a maximum table dimension ' +\n 'of {0}, while the maximum supported by this routine is {1}. ' +\n 'It is possible that this problem is due to your software ' +\n 'beeing out of date.').format(MAXDIM, MAXTRM)\n raise RuntimeError(mes)\n return STATE\n \n#C\n#C Unpack the contents of the MDA array.\n#C\n#C Name Dimension Description\n#C ------ --------- -------------------------------\n#C TL 1 Reference epoch of record\n#C G MAXDIM Stepsize function vector\n#C REFPOS 3 Reference position vector\n#C REFVEL 3 Reference velocity vector\n#C DT MAXDIM,NTE Modified divided difference arrays\n#C KQMAX1 1 Maximum integration order plus 1\n#C KQ NTE Integration order array\n#C\n#C For our purposes, NTE is always 3.\n#C\n\n# The FORTRAN source code indicates that RECORD[1] contains TL, but on the \n# actual data RECORD[0] contains it, and all addresses for following data are \n# shifted forward by one.\n\n self.TL = RECORD[0]\n self.G = RECORD[1:MAXDIM + 1]\n\n#C \n#C Collect the reference position and velocity.\n#C \n self.REFPOS[0] = RECORD[MAXDIM + 1]\n self.REFVEL[0] = RECORD[MAXDIM + 2]\n \n self.REFPOS[1] = RECORD[MAXDIM + 3]\n self.REFVEL[1] = RECORD[MAXDIM + 4]\n \n self.REFPOS[2] = RECORD[MAXDIM + 5]\n self.REFVEL[2] = RECORD[MAXDIM + 6]\n \n#C\n#C Initializing the difference table is one aspect of this routine\n#C that's a bit different from SPKE01. Here the first dimension of\n#C the table in the input record can be smaller than MAXTRM. So, we\n#C must transfer separately the portions of the table corresponding\n#C to each component.\n#C\n self.DT = reshape(RECORD[MAXDIM + 7:MAXDIM * 4 + 7], (MAXDIM, 3), \n order='F')\n \n self.KQMAX1 = int(RECORD[4 * MAXDIM + 7])\n self.KQ[0] = int(RECORD[4 * MAXDIM + 8])\n self.KQ[1] = int(RECORD[4 * MAXDIM + 9])\n self.KQ[2] = int(RECORD[4 * MAXDIM + 10])\n#C \n#C Next we set up for the computation of the various differences\n#C \n self.DELTA = ET - self.TL\n self.TP = self.DELTA\n self.MQ2 = self.KQMAX1 - 2\n self.KS = self.KQMAX1 - 1\n\n#C\n#C This is clearly collecting some kind of coefficients. \n#C The problem is that we have no idea what they are...\n#C \n#C The G coefficients are supposed to be some kind of step size \n#C vector. \n#C \n#C TP starts out as the delta t between the request time and the\n#C difference line's reference epoch. We then change it from DELTA\n#C by the components of the stepsize vector G.\n#C\n for J in range(1, self.MQ2 + 1):\n#C\n#C Make sure we're not about to attempt division by zero.\n#C\n if self.G[J-1] == 0.0:\n mes = ('SPKE21\\nA value of zero was found at index {0} ' + \n 'of the step size vector.').format(J)\n raise RuntimeError(mes)\n return STATE\n \n self.FC[J] = self.TP / self.G[J-1]\n self.WC[J-1] = self.DELTA / self.G[J-1]\n self.TP = self.DELTA + self.G[J-1]\n\n#C\n#C Collect KQMAX1 reciprocals. \n#C \n for J in range(1, self.KQMAX1 + 1):\n self.W[J-1] = 1.0 / float(J)\n\n#C\n#C Compute the W(K) terms needed for the position interpolation\n#C (Note, it is assumed throughout this routine that KS, which \n#C starts out as KQMAX1-1 (the ``maximum integration'') \n#C is at least 2.\n#C\n self.JX = 0\n self.KS1 = self.KS - 1\n \n while self.KS >= 2:\n \n self.JX = self.JX + 1\n \n for J in range(1, self.JX + 1):\n self.W[J+self.KS-1] = self.FC[J] * self.W[J+self.KS1-1] - self.WC[J-1] * self.W[J+self.KS-1]\n \n self.KS = self.KS1\n self.KS1 = self.KS1 - 1\n\n#C\n#C Perform position interpolation: (Note that KS = 1 right now.\n#C We don't know much more than that.)\n#C\n for I in range(1, 3 + 1):\n \n self.KQQ = self.KQ[I-1]\n self.SUM = 0.0\n \n for J in range(self.KQQ, 0, -1):\n self.SUM = self.SUM + self.DT[J-1, I-1] * self.W[J+self.KS-1]\n \n STATE[I-1] = self.REFPOS[I-1] + self.DELTA * (self.REFVEL[I-1] + self.DELTA * self.SUM)\n\n#C\n#C Again we need to compute the W(K) coefficients that are \n#C going to be used in the velocity interpolation. \n#C (Note, at this point, KS = 1, KS1 = 0.)\n#C \n for J in range(1, self.JX + 1):\n self.W[J+self.KS-1] = self.FC[J] * self.W[J+self.KS1-1] - self.WC[J-1] * self.W[J+self.KS-1]\n \n self.KS = self.KS - 1\n \n#C\n#C Perform velocity interpolation:\n#C\n for I in range(1, 3 + 1):\n self.KQQ = self.KQ[I-1]\n self.SUM = 0.0\n \n for J in range(self.KQQ, 0, -1):\n self.SUM = self.SUM + self.DT[J-1, I-1] * self.W[J+self.KS-1]\n \n STATE[I+3-1] = self.REFVEL[I-1] + self.DELTA * self.SUM\n \n return STATE\n \n \n\nclass Segment(object):\n \"\"\"A single segment of a SPK file.\n\n There are several items of information about each segment that are\n loaded from the underlying SPK file, and made available as object\n attributes:\n\n segment.source - official ephemeris name, like 'DE-0430LE-0430'\n segment.start_second - initial epoch, as seconds from J2000\n segment.end_second - final epoch, as seconds from J2000\n segment.start_jd - start_second, converted to a Julian Date\n segment.end_jd - end_second, converted to a Julian Date\n segment.center - integer center identifier\n segment.target - integer target identifier\n segment.frame - integer frame identifier\n segment.data_type - integer data type identifier\n segment.start_i - index where segment starts\n segment.end_i - index where segment ends\n \"\"\"\n def __init__(self, daf, source, descriptor):\n self.daf = daf\n self.source = source\n (self.start_second, self.end_second, self.target, self.center,\n self.frame, self.data_type, self.start_i, self.end_i) = descriptor\n self.start_jd = jd(self.start_second)\n self.end_jd = jd(self.end_second)\n \n# 'SPK Required Reading' indicates that the penultimate element of the segment \n# is the difference line size (DLSIZE), but actual data contains there a MAXDIM.\n \n self.MAXDIM = int(self.daf.map_array(self.end_i - 1, self.end_i - 1))\n self.DLSIZE = 4 * self.MAXDIM + 11\n\n def __str__(self):\n return self.describe(verbose=False)\n\n def describe(self, verbose=True):\n \"\"\"Return a textual description of the segment.\n \"\"\"\n center = titlecase(target_names.get(self.center, 'Unknown center'))\n target = titlecase(target_names.get(self.target, 'Unknown target'))\n text = ('{0.start_jd:.2f}..{0.end_jd:.2f} {1} ({0.center})'\n ' -> {2} ({0.target})'\n ' data_type={0.data_type}'.format(self, center, target))\n if verbose:\n text += ('\\n frame={0.frame} data_type={0.data_type} source={1}'\n .format(self, self.source.decode('ascii')))\n return text\n \n def get_MDA_record(self, time_sec):\n \"\"\"Return a Modified Difference Array(MDA) record for the time to \n evaluate with its effective time boundaries (lower and upper).\n Inputs:\n time_sec - epoch for computation, seconds from J2000\n Returns: mda_record, lower_boundary, upper_boundary\n mda_record: A Modified Difference Array record\n lower_boundary: lower boundary of the record, seconds since J2000\n upper_boundary: upper boundary of the record, seconds since J2000\n \"\"\"\n\n # Number of records in this segment\n entry_count = int(self.daf.map_array(self.end_i, self.end_i))\n \n # Number of entries in epoch directory \n epoch_dir_count = entry_count // 100\n \n # serch target epoch in epoch directory to narrow serching aria\n if epoch_dir_count >= 1:\n epoch_dir = self.daf.map_array(self.end_i - epoch_dir_count - 1,\n self.end_i - 2)\n found = False\n for i in range(1, epoch_dir_count + 1):\n if epoch_dir[i-1] > time_sec:\n found = True\n break\n if found:\n serch_last_index = i * 100\n serch_start_index = (i - 1) * 100 + 1\n else:\n serch_last_index = entry_count\n serch_start_index = epoch_dir_count * 100 + 1\n else:\n serch_last_index = entry_count\n serch_start_index = 1\n\n # epoch_table contains epochs for all records in this segment \n epoch_table = self.daf.map_array(self.start_i + (entry_count * self.DLSIZE),\n self.start_i + (entry_count * self.DLSIZE) + entry_count - 1)\n\n # serch target epoch in epoch_table\n found = False\n for i in range(serch_start_index, serch_last_index + 1):\n if epoch_table[i-1] > time_sec:\n found = True\n break\n if not found:\n i = serch_last_index\n record_index = i\n upper_boundary = epoch_table[i-1]\n if i != 1:\n lower_boundary = epoch_table[i-2]\n else:\n lower_boundary = self.start_second\n \n mda_record = self.daf.map_array(self.start_i + ((record_index - 1) * self.DLSIZE),\n self.start_i + (record_index * self.DLSIZE) - 1)\n\n # mda_record : one record of MDA\n # lower_boundary : lower boundary of epoch in this MDA record\n # upper_boundary : upper boundary of epoch in this MDA record\n return mda_record, lower_boundary, upper_boundary\n\ndef titlecase(name):\n \"\"\"Title-case target `name` if it looks safe to do so.\n \"\"\"\n return name if name.startswith(('1', 'C/', 'DSS-')) else name.title()\n\n\n\n\n\n\n\n" ]
[ [ "numpy.reshape", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
nvogtvincent/parcels
[ "6f6dbadacaae54949ade9acd4e4a57dd8b5af398", "6f6dbadacaae54949ade9acd4e4a57dd8b5af398" ]
[ "parcels/kernel/basekernel.py", "parcels/fieldset.py" ]
[ "import re\nimport _ctypes\nimport inspect\nimport numpy.ctypeslib as npct\nfrom time import time as ostime\nfrom os import path\nfrom os import remove\nfrom sys import platform\nfrom sys import version_info\nfrom ast import FunctionDef\nfrom hashlib import md5\nfrom parcels.tools.loggers import logger\nimport numpy as np\nfrom numpy import ndarray\n\ntry:\n from mpi4py import MPI\nexcept:\n MPI = None\n\nfrom parcels.tools.global_statics import get_cache_dir\n\n# === import just necessary field classes to perform setup checks === #\nfrom parcels.field import Field\nfrom parcels.field import VectorField\nfrom parcels.field import NestedField\nfrom parcels.field import SummedField\nfrom parcels.grid import GridCode\nfrom parcels.field import FieldOutOfBoundError\nfrom parcels.field import FieldOutOfBoundSurfaceError\nfrom parcels.field import TimeExtrapolationError\nfrom parcels.tools.statuscodes import StateCode, OperationCode, ErrorCode\nfrom parcels.application_kernels.advection import AdvectionRK4_3D\nfrom parcels.application_kernels.advection import AdvectionAnalytical\n\n__all__ = ['BaseKernel']\n\n\nre_indent = re.compile(r\"^(\\s+)\")\n\n\nclass BaseKernel(object):\n \"\"\"Base super class for base Kernel objects that encapsulates auto-generated code.\n\n :arg fieldset: FieldSet object providing the field information (possibly None)\n :arg ptype: PType object for the kernel particle\n :arg pyfunc: (aggregated) Kernel function\n :arg funcname: function name\n :param delete_cfiles: Boolean whether to delete the C-files after compilation in JIT mode (default is True)\n\n Note: A Kernel is either created from a compiled <function ...> object\n or the necessary information (funcname, funccode, funcvars) is provided.\n The py_ast argument may be derived from the code string, but for\n concatenation, the merged AST plus the new header definition is required.\n \"\"\"\n _pyfunc = None\n _fieldset = None\n _ptype = None\n funcname = None\n\n def __init__(self, fieldset, ptype, pyfunc=None, funcname=None, funccode=None, py_ast=None, funcvars=None,\n c_include=\"\", delete_cfiles=True):\n self._fieldset = fieldset\n self.field_args = None\n self.const_args = None\n self._ptype = ptype\n self._lib = None\n self.delete_cfiles = delete_cfiles\n self._cleanup_files = None\n self._cleanup_lib = None\n self._c_include = c_include\n\n # Derive meta information from pyfunc, if not given\n self._pyfunc = None\n self.funcname = funcname or pyfunc.__name__\n self.name = \"%s%s\" % (ptype.name, self.funcname)\n self.ccode = \"\"\n self.funcvars = funcvars\n self.funccode = funccode\n self.py_ast = py_ast\n self.dyn_srcs = []\n self.static_srcs = []\n self.src_file = None\n self.lib_file = None\n self.log_file = None\n\n # Generate the kernel function and add the outer loop\n if self._ptype.uses_jit:\n src_file_or_files, self.lib_file, self.log_file = self.get_kernel_compile_files()\n if type(src_file_or_files) in (list, dict, tuple, ndarray):\n self.dyn_srcs = src_file_or_files\n else:\n self.src_file = src_file_or_files\n\n def __del__(self):\n # Clean-up the in-memory dynamic linked libraries.\n # This is not really necessary, as these programs are not that large, but with the new random\n # naming scheme which is required on Windows OS'es to deal with updates to a Parcels' kernel.\n try:\n self.remove_lib()\n except:\n pass\n self._fieldset = None\n self.field_args = None\n self.const_args = None\n self.funcvars = None\n self.funccode = None\n\n @property\n def ptype(self):\n return self._ptype\n\n @property\n def pyfunc(self):\n return self._pyfunc\n\n @property\n def fieldset(self):\n return self._fieldset\n\n @property\n def c_include(self):\n return self._c_include\n\n @property\n def _cache_key(self):\n field_keys = \"\"\n if self.field_args is not None:\n field_keys = \"-\".join(\n [\"%s:%s\" % (name, field.units.__class__.__name__) for name, field in self.field_args.items()])\n key = self.name + self.ptype._cache_key + field_keys + ('TIME:%f' % ostime())\n return md5(key.encode('utf-8')).hexdigest()\n\n @staticmethod\n def fix_indentation(string):\n \"\"\"Fix indentation to allow in-lined kernel definitions\"\"\"\n lines = string.split('\\n')\n indent = re_indent.match(lines[0])\n if indent:\n lines = [line.replace(indent.groups()[0], '', 1) for line in lines]\n return \"\\n\".join(lines)\n\n def check_fieldsets_in_kernels(self, pyfunc):\n \"\"\"\n function checks the integrity of the fieldset with the kernels.\n This function is to be called from the derived class when setting up the 'pyfunc'.\n \"\"\"\n if self.fieldset is not None:\n if pyfunc is AdvectionRK4_3D:\n warning = False\n if isinstance(self._fieldset.W, Field) and self._fieldset.W.creation_log != 'from_nemo' and \\\n self._fieldset.W._scaling_factor is not None and self._fieldset.W._scaling_factor > 0:\n warning = True\n if type(self._fieldset.W) in [SummedField, NestedField]:\n for f in self._fieldset.W:\n if f.creation_log != 'from_nemo' and f._scaling_factor is not None and f._scaling_factor > 0:\n warning = True\n if warning:\n logger.warning_once('Note that in AdvectionRK4_3D, vertical velocity is assumed positive towards increasing z.\\n'\n ' If z increases downward and w is positive upward you can re-orient it downwards by setting fieldset.W.set_scaling_factor(-1.)')\n elif pyfunc is AdvectionAnalytical:\n if self._ptype.uses_jit:\n raise NotImplementedError('Analytical Advection only works in Scipy mode')\n if self._fieldset.U.interp_method != 'cgrid_velocity':\n raise NotImplementedError('Analytical Advection only works with C-grids')\n if self._fieldset.U.grid.gtype not in [GridCode.CurvilinearZGrid, GridCode.RectilinearZGrid]:\n raise NotImplementedError('Analytical Advection only works with Z-grids in the vertical')\n\n def check_kernel_signature_on_version(self):\n \"\"\"\n returns numkernelargs\n \"\"\"\n numkernelargs = 0\n if self._pyfunc is not None:\n if version_info[0] < 3:\n numkernelargs = len(inspect.getargspec(self._pyfunc).args)\n else:\n numkernelargs = len(inspect.getfullargspec(self._pyfunc).args)\n return numkernelargs\n\n def remove_lib(self):\n if self._lib is not None:\n BaseKernel.cleanup_unload_lib(self._lib)\n del self._lib\n self._lib = None\n\n all_files_array = []\n if self.src_file is None:\n if self.dyn_srcs is not None:\n [all_files_array.append(fpath) for fpath in self.dyn_srcs]\n else:\n if self.src_file is not None:\n all_files_array.append(self.src_file)\n if self.log_file is not None:\n all_files_array.append(self.log_file)\n if self.lib_file is not None and all_files_array is not None and self.delete_cfiles is not None:\n BaseKernel.cleanup_remove_files(self.lib_file, all_files_array, self.delete_cfiles)\n\n # If file already exists, pull new names. This is necessary on a Windows machine, because\n # Python's ctype does not deal in any sort of manner well with dynamic linked libraries on this OS.\n if self._ptype.uses_jit:\n src_file_or_files, self.lib_file, self.log_file = self.get_kernel_compile_files()\n if type(src_file_or_files) in (list, dict, tuple, ndarray):\n self.dyn_srcs = src_file_or_files\n else:\n self.src_file = src_file_or_files\n\n def get_kernel_compile_files(self):\n \"\"\"\n Returns the correct src_file, lib_file, log_file for this kernel\n \"\"\"\n if MPI:\n mpi_comm = MPI.COMM_WORLD\n mpi_rank = mpi_comm.Get_rank()\n cache_name = self._cache_key # only required here because loading is done by Kernel class instead of Compiler class\n dyn_dir = get_cache_dir() if mpi_rank == 0 else None\n dyn_dir = mpi_comm.bcast(dyn_dir, root=0)\n basename = cache_name if mpi_rank == 0 else None\n basename = mpi_comm.bcast(basename, root=0)\n basename = basename + \"_%d\" % mpi_rank\n else:\n cache_name = self._cache_key # only required here because loading is done by Kernel class instead of Compiler class\n dyn_dir = get_cache_dir()\n basename = \"%s_0\" % cache_name\n lib_path = \"lib\" + basename\n src_file_or_files = None\n if type(basename) in (list, dict, tuple, ndarray):\n src_file_or_files = [\"\", ] * len(basename)\n for i, src_file in enumerate(basename):\n src_file_or_files[i] = \"%s.c\" % path.join(dyn_dir, src_file)\n else:\n src_file_or_files = \"%s.c\" % path.join(dyn_dir, basename)\n lib_file = \"%s.%s\" % (path.join(dyn_dir, lib_path), 'dll' if platform == 'win32' else 'so')\n log_file = \"%s.log\" % path.join(dyn_dir, basename)\n return src_file_or_files, lib_file, log_file\n\n def compile(self, compiler):\n \"\"\" Writes kernel code to file and compiles it.\"\"\"\n all_files_array = []\n if self.src_file is None:\n if self.dyn_srcs is not None:\n for dyn_src in self.dyn_srcs:\n with open(dyn_src, 'w') as f:\n f.write(self.ccode)\n all_files_array.append(dyn_src)\n compiler.compile(self.dyn_srcs, self.lib_file, self.log_file)\n else:\n if self.src_file is not None:\n with open(self.src_file, 'w') as f:\n f.write(self.ccode)\n if self.src_file is not None:\n all_files_array.append(self.src_file)\n compiler.compile(self.src_file, self.lib_file, self.log_file)\n if len(all_files_array) > 0:\n logger.info(\"Compiled %s ==> %s\" % (self.name, self.lib_file))\n if self.log_file is not None:\n all_files_array.append(self.log_file)\n\n def load_lib(self):\n self._lib = npct.load_library(self.lib_file, '.')\n self._function = self._lib.particle_loop\n\n def merge(self, kernel, kclass):\n funcname = self.funcname + kernel.funcname\n func_ast = None\n if self.py_ast is not None:\n func_ast = FunctionDef(name=funcname, args=self.py_ast.args, body=self.py_ast.body + kernel.py_ast.body,\n decorator_list=[], lineno=1, col_offset=0)\n delete_cfiles = self.delete_cfiles and kernel.delete_cfiles\n return kclass(self.fieldset, self.ptype, pyfunc=None,\n funcname=funcname, funccode=self.funccode + kernel.funccode,\n py_ast=func_ast, funcvars=self.funcvars + kernel.funcvars,\n c_include=self._c_include + kernel.c_include,\n delete_cfiles=delete_cfiles)\n\n def __add__(self, kernel):\n if not isinstance(kernel, BaseKernel):\n kernel = BaseKernel(self.fieldset, self.ptype, pyfunc=kernel)\n return self.merge(kernel, BaseKernel)\n\n def __radd__(self, kernel):\n if not isinstance(kernel, BaseKernel):\n kernel = BaseKernel(self.fieldset, self.ptype, pyfunc=kernel)\n return kernel.merge(self, BaseKernel)\n\n @staticmethod\n def cleanup_remove_files(lib_file, all_files_array, delete_cfiles):\n if lib_file is not None:\n if path.isfile(lib_file): # and delete_cfiles\n [remove(s) for s in [lib_file, ] if path is not None and path.exists(s)]\n if delete_cfiles and len(all_files_array) > 0:\n [remove(s) for s in all_files_array if path is not None and path.exists(s)]\n\n @staticmethod\n def cleanup_unload_lib(lib):\n # Clean-up the in-memory dynamic linked libraries.\n # This is not really necessary, as these programs are not that large, but with the new random\n # naming scheme which is required on Windows OS'es to deal with updates to a Parcels' kernel.\n if lib is not None:\n try:\n _ctypes.FreeLibrary(lib._handle) if platform == 'win32' else _ctypes.dlclose(lib._handle)\n except:\n pass\n\n def remove_deleted(self, pset, output_file, endtime):\n \"\"\"\n Utility to remove all particles that signalled deletion.\n\n This version is generally applicable to all structures and collections\n \"\"\"\n indices = [i for i, p in enumerate(pset) if p.state == OperationCode.Delete]\n if len(indices) > 0 and output_file is not None:\n output_file.write(pset, endtime, deleted_only=indices)\n pset.remove_indices(indices)\n\n def load_fieldset_jit(self, pset):\n \"\"\"\n Updates the loaded fields of pset's fieldset according to the chunk information within their grids\n \"\"\"\n if pset.fieldset is not None:\n for g in pset.fieldset.gridset.grids:\n g.cstruct = None # This force to point newly the grids from Python to C\n # Make a copy of the transposed array to enforce\n # C-contiguous memory layout for JIT mode.\n for f in pset.fieldset.get_fields():\n if type(f) in [VectorField, NestedField, SummedField]:\n continue\n if f.data.dtype != np.float32:\n raise RuntimeError('Field %s data needs to be float32 in JIT mode' % f.name)\n if f in self.field_args.values():\n f.chunk_data()\n else:\n for block_id in range(len(f.data_chunks)):\n f.data_chunks[block_id] = None\n f.c_data_chunks[block_id] = None\n\n for g in pset.fieldset.gridset.grids:\n g.load_chunk = np.where(g.load_chunk == g.chunk_loading_requested,\n g.chunk_loaded_touched, g.load_chunk)\n if len(g.load_chunk) > g.chunk_not_loaded: # not the case if a field in not called in the kernel\n if not g.load_chunk.flags.c_contiguous:\n g.load_chunk = g.load_chunk.copy()\n if not g.depth.flags.c_contiguous:\n g.depth = g.depth.copy()\n if not g.lon.flags.c_contiguous:\n g.lon = g.lon.copy()\n if not g.lat.flags.c_contiguous:\n g.lat = g.lat.copy()\n\n def evaluate_particle(self, p, endtime, sign_dt, dt, analytical=False):\n \"\"\"\n Execute the kernel evaluation of for an individual particle.\n :arg p: object of (sub-)type (ScipyParticle, JITParticle) or (sub-)type of BaseParticleAccessor\n :arg fieldset: fieldset of the containing ParticleSet (e.g. pset.fieldset)\n :arg analytical: flag indicating the analytical advector or an iterative advection\n :arg endtime: endtime of this overall kernel evaluation step\n :arg dt: computational integration timestep\n \"\"\"\n variables = self._ptype.variables\n # back up variables in case of OperationCode.Repeat\n p_var_back = {}\n pdt_prekernels = .0\n # Don't execute particles that aren't started yet\n sign_end_part = np.sign(endtime - p.time)\n # Compute min/max dt for first timestep. Only use endtime-p.time for one timestep\n reset_dt = False\n if abs(endtime - p.time) < abs(p.dt):\n dt_pos = abs(endtime - p.time)\n reset_dt = True\n else:\n dt_pos = abs(p.dt)\n reset_dt = False\n\n # ==== numerically stable; also making sure that continuously-recovered particles do end successfully,\n # as they fulfil the condition here on entering at the final calculation here. ==== #\n if ((sign_end_part != sign_dt) or np.isclose(dt_pos, 0)) and not np.isclose(dt, 0):\n if abs(p.time) >= abs(endtime):\n p.set_state(StateCode.Success)\n return p\n\n while p.state in [StateCode.Evaluate, OperationCode.Repeat] or np.isclose(dt, 0):\n for var in variables:\n p_var_back[var.name] = getattr(p, var.name)\n try:\n pdt_prekernels = sign_dt * dt_pos\n p.dt = pdt_prekernels\n state_prev = p.state\n res = self._pyfunc(p, self._fieldset, p.time)\n if res is None:\n res = StateCode.Success\n\n if res is StateCode.Success and p.state != state_prev:\n res = p.state\n\n if not analytical and res == StateCode.Success and not np.isclose(p.dt, pdt_prekernels):\n res = OperationCode.Repeat\n\n except FieldOutOfBoundError as fse_xy:\n res = ErrorCode.ErrorOutOfBounds\n p.exception = fse_xy\n except FieldOutOfBoundSurfaceError as fse_z:\n res = ErrorCode.ErrorThroughSurface\n p.exception = fse_z\n except TimeExtrapolationError as fse_t:\n res = ErrorCode.ErrorTimeExtrapolation\n p.exception = fse_t\n\n except Exception as e:\n res = ErrorCode.Error\n p.exception = e\n\n # Handle particle time and time loop\n if res in [StateCode.Success, OperationCode.Delete]:\n # Update time and repeat\n p.time += p.dt\n if reset_dt and p.dt == pdt_prekernels:\n p.dt = dt\n p.update_next_dt()\n if analytical:\n p.dt = np.inf\n if abs(endtime - p.time) < abs(p.dt):\n dt_pos = abs(endtime - p.time)\n reset_dt = True\n else:\n dt_pos = abs(p.dt)\n reset_dt = False\n\n sign_end_part = np.sign(endtime - p.time)\n if res != OperationCode.Delete and not np.isclose(dt_pos, 0) and (sign_end_part == sign_dt):\n res = StateCode.Evaluate\n if sign_end_part != sign_dt:\n dt_pos = 0\n\n p.set_state(res)\n if np.isclose(dt, 0):\n break\n else:\n p.set_state(res)\n # Try again without time update\n for var in variables:\n if var.name not in ['dt', 'state']:\n setattr(p, var.name, p_var_back[var.name])\n if abs(endtime - p.time) < abs(p.dt):\n dt_pos = abs(endtime - p.time)\n reset_dt = True\n else:\n dt_pos = abs(p.dt)\n reset_dt = False\n\n sign_end_part = np.sign(endtime - p.time)\n if sign_end_part != sign_dt:\n dt_pos = 0\n break\n return p\n\n def execute_jit(self, pset, endtime, dt):\n pass\n\n def execute_python(self, pset, endtime, dt):\n pass\n\n def execute(self, pset, endtime, dt, recovery=None, output_file=None, execute_once=False):\n pass\n", "from copy import deepcopy\nfrom glob import glob\nfrom os import path\n\nimport dask.array as da\nimport numpy as np\nimport warnings\n\nfrom parcels.field import Field, DeferredArray\nfrom parcels.field import NestedField\nfrom parcels.field import SummedField\nfrom parcels.field import VectorField\nfrom parcels.grid import Grid\nfrom parcels.gridset import GridSet\nfrom parcels.grid import GridCode\nfrom parcels.tools.converters import TimeConverter, convert_xarray_time_units\nfrom parcels.tools.statuscodes import TimeExtrapolationError\nfrom parcels.tools.loggers import logger\ntry:\n from mpi4py import MPI\nexcept:\n MPI = None\n\n\n__all__ = ['FieldSet']\n\n\nclass FieldSet(object):\n \"\"\"FieldSet class that holds hydrodynamic data needed to execute particles\n\n :param U: :class:`parcels.field.Field` object for zonal velocity component\n :param V: :class:`parcels.field.Field` object for meridional velocity component\n :param fields: Dictionary of additional :class:`parcels.field.Field` objects\n \"\"\"\n def __init__(self, U, V, fields=None):\n self.gridset = GridSet()\n self.completed = False\n if U:\n self.add_field(U, 'U')\n self.time_origin = self.U.grid.time_origin if isinstance(self.U, Field) else self.U[0].grid.time_origin\n if V:\n self.add_field(V, 'V')\n\n # Add additional fields as attributes\n if fields:\n for name, field in fields.items():\n self.add_field(field, name)\n\n self.compute_on_defer = None\n\n @staticmethod\n def checkvaliddimensionsdict(dims):\n for d in dims:\n if d not in ['lon', 'lat', 'depth', 'time']:\n raise NameError('%s is not a valid key in the dimensions dictionary' % d)\n\n @classmethod\n def from_data(cls, data, dimensions, transpose=False, mesh='spherical',\n allow_time_extrapolation=None, time_periodic=False, **kwargs):\n \"\"\"Initialise FieldSet object from raw data\n\n :param data: Dictionary mapping field names to numpy arrays.\n Note that at least a 'U' and 'V' numpy array need to be given, and that\n the built-in Advection kernels assume that U and V are in m/s\n\n 1. If data shape is [xdim, ydim], [xdim, ydim, zdim], [xdim, ydim, tdim] or [xdim, ydim, zdim, tdim],\n whichever is relevant for the dataset, use the flag transpose=True\n 2. If data shape is [ydim, xdim], [zdim, ydim, xdim], [tdim, ydim, xdim] or [tdim, zdim, ydim, xdim],\n use the flag transpose=False (default value)\n 3. If data has any other shape, you first need to reorder it\n :param dimensions: Dictionary mapping field dimensions (lon,\n lat, depth, time) to numpy arrays.\n Note that dimensions can also be a dictionary of dictionaries if\n dimension names are different for each variable\n (e.g. dimensions['U'], dimensions['V'], etc).\n :param transpose: Boolean whether to transpose data on read-in\n :param mesh: String indicating the type of mesh coordinates and\n units used during velocity interpolation, see also `this tutorial <https://nbviewer.jupyter.org/github/OceanParcels/parcels/blob/master/parcels/examples/tutorial_unitconverters.ipynb>`_:\n\n 1. spherical (default): Lat and lon in degree, with a\n correction for zonal velocity U near the poles.\n 2. flat: No conversion, lat/lon are assumed to be in m.\n :param allow_time_extrapolation: boolean whether to allow for extrapolation\n (i.e. beyond the last available time snapshot)\n Default is False if dimensions includes time, else True\n :param time_periodic: To loop periodically over the time component of the Field. It is set to either False or the length of the period (either float in seconds or datetime.timedelta object). (Default: False)\n This flag overrides the allow_time_interpolation and sets it to False\n\n Usage examples\n ==============\n\n * `Analytical advection <https://nbviewer.jupyter.org/github/OceanParcels/parcels/blob/master/parcels/examples/tutorial_analyticaladvection.ipynb>`_\n\n * `Diffusion <https://nbviewer.jupyter.org/github/OceanParcels/parcels/blob/master/parcels/examples/tutorial_diffusion.ipynb>`_\n\n * `Interpolation <https://nbviewer.jupyter.org/github/OceanParcels/parcels/blob/master/parcels/examples/tutorial_interpolation.ipynb>`_\n\n * `Unit converters <https://nbviewer.jupyter.org/github/OceanParcels/parcels/blob/master/parcels/examples/tutorial_unitconverters.ipynb>`_\n\n \"\"\"\n\n fields = {}\n for name, datafld in data.items():\n # Use dimensions[name] if dimensions is a dict of dicts\n dims = dimensions[name] if name in dimensions else dimensions\n cls.checkvaliddimensionsdict(dims)\n\n if allow_time_extrapolation is None:\n allow_time_extrapolation = False if 'time' in dims else True\n\n lon = dims['lon']\n lat = dims['lat']\n depth = np.zeros(1, dtype=np.float32) if 'depth' not in dims else dims['depth']\n time = np.zeros(1, dtype=np.float64) if 'time' not in dims else dims['time']\n time = np.array(time) if not isinstance(time, np.ndarray) else time\n if isinstance(time[0], np.datetime64):\n time_origin = TimeConverter(time[0])\n time = np.array([time_origin.reltime(t) for t in time])\n else:\n time_origin = TimeConverter(0)\n grid = Grid.create_grid(lon, lat, depth, time, time_origin=time_origin, mesh=mesh)\n if 'creation_log' not in kwargs.keys():\n kwargs['creation_log'] = 'from_data'\n\n fields[name] = Field(name, datafld, grid=grid, transpose=transpose,\n allow_time_extrapolation=allow_time_extrapolation, time_periodic=time_periodic, **kwargs)\n u = fields.pop('U', None)\n v = fields.pop('V', None)\n return cls(u, v, fields=fields)\n\n def add_field(self, field, name=None):\n \"\"\"Add a :class:`parcels.field.Field` object to the FieldSet\n\n :param field: :class:`parcels.field.Field` object to be added\n :param name: Name of the :class:`parcels.field.Field` object to be added\n\n For usage examples see the following tutorials:\n\n * `Nested Fields <https://nbviewer.jupyter.org/github/OceanParcels/parcels/blob/master/parcels/examples/tutorial_NestedFields.ipynb>`_\n\n * `Unit converters <https://nbviewer.jupyter.org/github/OceanParcels/parcels/blob/master/parcels/examples/tutorial_unitconverters.ipynb>`_\n\n \"\"\"\n if self.completed:\n raise RuntimeError(\"FieldSet has already been completed. Are you trying to add a Field after you've created the ParticleSet?\")\n name = field.name if name is None else name\n if hasattr(self, name): # check if Field with same name already exists when adding new Field\n raise RuntimeError(\"FieldSet already has a Field with name '%s'\" % name)\n if isinstance(field, SummedField):\n setattr(self, name, field)\n field.name = name\n for fld in field:\n self.gridset.add_grid(fld)\n fld.fieldset = self\n elif isinstance(field, NestedField):\n setattr(self, name, field)\n for fld in field:\n self.gridset.add_grid(fld)\n fld.fieldset = self\n elif isinstance(field, list):\n raise NotImplementedError('FieldLists have been replaced by SummedFields. Use the + operator instead of []')\n else:\n setattr(self, name, field)\n self.gridset.add_grid(field)\n field.fieldset = self\n\n def add_constant_field(self, name, value, mesh='flat'):\n \"\"\"Wrapper function to add a Field that is constant in space,\n useful e.g. when using constant horizontal diffusivity\n\n :param name: Name of the :class:`parcels.field.Field` object to be added\n :param value: Value of the constant field (stored as 32-bit float)\n :param units: Optional UnitConverter object, to convert units\n (e.g. for Horizontal diffusivity from m2/s to degree2/s)\n \"\"\"\n self.add_field(Field(name, value, lon=0, lat=0, mesh=mesh))\n\n def add_vector_field(self, vfield):\n \"\"\"Add a :class:`parcels.field.VectorField` object to the FieldSet\n\n :param vfield: :class:`parcels.field.VectorField` object to be added\n \"\"\"\n setattr(self, vfield.name, vfield)\n for v in vfield.__dict__.values():\n if isinstance(v, Field) and (v not in self.get_fields()):\n self.add_field(v)\n vfield.fieldset = self\n if isinstance(vfield, NestedField):\n for f in vfield:\n f.fieldset = self\n\n def check_complete(self):\n assert self.U, 'FieldSet does not have a Field named \"U\"'\n assert self.V, 'FieldSet does not have a Field named \"V\"'\n for attr, value in vars(self).items():\n if type(value) is Field:\n assert value.name == attr, 'Field %s.name (%s) is not consistent' % (value.name, attr)\n\n def check_velocityfields(U, V, W):\n if (U.interp_method == 'cgrid_velocity' and V.interp_method != 'cgrid_velocity') or \\\n (U.interp_method != 'cgrid_velocity' and V.interp_method == 'cgrid_velocity'):\n raise ValueError(\"If one of U,V.interp_method='cgrid_velocity', the other should be too\")\n\n if 'linear_invdist_land_tracer' in [U.interp_method, V.interp_method]:\n raise NotImplementedError(\"interp_method='linear_invdist_land_tracer' is not implemented for U and V Fields\")\n\n if U.interp_method == 'cgrid_velocity':\n if U.grid.xdim == 1 or U.grid.ydim == 1 or V.grid.xdim == 1 or V.grid.ydim == 1:\n raise NotImplementedError('C-grid velocities require longitude and latitude dimensions at least length 2')\n\n if U.gridindexingtype not in ['nemo', 'mitgcm', 'mom5', 'pop']:\n raise ValueError(\"Field.gridindexing has to be one of 'nemo', 'mitgcm', 'mom5' or 'pop'\")\n\n if U.gridindexingtype == 'mitgcm' and U.grid.gtype in [GridCode.CurvilinearZGrid, GridCode.CurvilinearZGrid]:\n raise NotImplementedError('Curvilinear Grids are not implemented for mitgcm-style grid indexing.'\n 'If you have a use-case for this, please let us know by filing an Issue on github')\n\n if V.gridindexingtype != U.gridindexingtype or (W and W.gridindexingtype != U.gridindexingtype):\n raise ValueError('Not all velocity Fields have the same gridindexingtype')\n\n if U.cast_data_dtype != V.cast_data_dtype or (W and W.cast_data_dtype != U.cast_data_dtype):\n raise ValueError('Not all velocity Fields have the same dtype')\n\n if isinstance(self.U, (SummedField, NestedField)):\n w = self.W if hasattr(self, 'W') else [None]*len(self.U)\n for U, V, W in zip(self.U, self.V, w):\n check_velocityfields(U, V, W)\n else:\n W = self.W if hasattr(self, 'W') else None\n check_velocityfields(self.U, self.V, W)\n\n for fld in [self.U, self.V]:\n if isinstance(fld, SummedField) and fld[0].interp_method in ['partialslip', 'freeslip'] and np.any([fld[0].grid is not f.grid for f in fld]):\n warnings.warn('Slip boundary conditions may not work well with SummedFields. Be careful', UserWarning)\n\n for g in self.gridset.grids:\n g.check_zonal_periodic()\n if len(g.time) == 1:\n continue\n assert isinstance(g.time_origin.time_origin, type(self.time_origin.time_origin)), 'time origins of different grids must be have the same type'\n g.time = g.time + self.time_origin.reltime(g.time_origin)\n if g.defer_load:\n g.time_full = g.time_full + self.time_origin.reltime(g.time_origin)\n g.time_origin = self.time_origin\n if not hasattr(self, 'UV'):\n if isinstance(self.U, SummedField):\n self.add_vector_field(SummedField('UV', self.U, self.V))\n elif isinstance(self.U, NestedField):\n self.add_vector_field(NestedField('UV', self.U, self.V))\n else:\n self.add_vector_field(VectorField('UV', self.U, self.V))\n if not hasattr(self, 'UVW') and hasattr(self, 'W'):\n if isinstance(self.U, SummedField):\n self.add_vector_field(SummedField('UVW', self.U, self.V, self.W))\n elif isinstance(self.U, NestedField):\n self.add_vector_field(NestedField('UVW', self.U, self.V, self.W))\n else:\n self.add_vector_field(VectorField('UVW', self.U, self.V, self.W))\n\n ccode_fieldnames = []\n counter = 1\n for fld in self.get_fields():\n if fld.name not in ccode_fieldnames:\n fld.ccode_name = fld.name\n else:\n fld.ccode_name = fld.name + str(counter)\n counter += 1\n ccode_fieldnames.append(fld.ccode_name)\n\n for f in self.get_fields():\n if type(f) in [VectorField, NestedField, SummedField] or f.dataFiles is None:\n continue\n if f.grid.depth_field is not None:\n if f.grid.depth_field == 'not_yet_set':\n raise ValueError(\"If depth dimension is set at 'not_yet_set', it must be added later using Field.set_depth_from_field(field)\")\n if not f.grid.defer_load:\n depth_data = f.grid.depth_field.data\n f.grid.depth = depth_data if isinstance(depth_data, np.ndarray) else np.array(depth_data)\n self.completed = True\n\n @classmethod\n def parse_wildcards(cls, paths, filenames, var):\n if not isinstance(paths, list):\n paths = sorted(glob(str(paths)))\n if len(paths) == 0:\n notfound_paths = filenames[var] if isinstance(filenames, dict) and var in filenames else filenames\n raise IOError(\"FieldSet files not found for variable %s: %s\" % (var, str(notfound_paths)))\n for fp in paths:\n if not path.exists(fp):\n raise IOError(\"FieldSet file not found: %s\" % str(fp))\n return paths\n\n @classmethod\n def from_netcdf(cls, filenames, variables, dimensions, indices=None, fieldtype=None,\n mesh='spherical', timestamps=None, allow_time_extrapolation=None, time_periodic=False,\n deferred_load=True, chunksize=None, **kwargs):\n \"\"\"Initialises FieldSet object from NetCDF files\n\n :param filenames: Dictionary mapping variables to file(s). The\n filepath may contain wildcards to indicate multiple files\n or be a list of file.\n filenames can be a list [files], a dictionary {var:[files]},\n a dictionary {dim:[files]} (if lon, lat, depth and/or data not stored in same files as data),\n or a dictionary of dictionaries {var:{dim:[files]}}.\n time values are in filenames[data]\n :param variables: Dictionary mapping variables to variable names in the netCDF file(s).\n Note that the built-in Advection kernels assume that U and V are in m/s\n :param dimensions: Dictionary mapping data dimensions (lon,\n lat, depth, time, data) to dimensions in the netCF file(s).\n Note that dimensions can also be a dictionary of dictionaries if\n dimension names are different for each variable\n (e.g. dimensions['U'], dimensions['V'], etc).\n :param indices: Optional dictionary of indices for each dimension\n to read from file(s), to allow for reading of subset of data.\n Default is to read the full extent of each dimension.\n Note that negative indices are not allowed.\n :param fieldtype: Optional dictionary mapping fields to fieldtypes to be used for UnitConverter.\n (either 'U', 'V', 'Kh_zonal', 'Kh_meridional' or None)\n :param mesh: String indicating the type of mesh coordinates and\n units used during velocity interpolation, see also `this tuturial <https://nbviewer.jupyter.org/github/OceanParcels/parcels/blob/master/parcels/examples/tutorial_unitconverters.ipynb>`_:\n\n 1. spherical (default): Lat and lon in degree, with a\n correction for zonal velocity U near the poles.\n 2. flat: No conversion, lat/lon are assumed to be in m.\n :param timestamps: list of lists or array of arrays containing the timestamps for\n each of the files in filenames. Outer list/array corresponds to files, inner\n array corresponds to indices within files.\n Default is None if dimensions includes time.\n :param allow_time_extrapolation: boolean whether to allow for extrapolation\n (i.e. beyond the last available time snapshot)\n Default is False if dimensions includes time, else True\n :param time_periodic: To loop periodically over the time component of the Field. It is set to either False or the length of the period (either float in seconds or datetime.timedelta object). (Default: False)\n This flag overrides the allow_time_interpolation and sets it to False\n :param deferred_load: boolean whether to only pre-load data (in deferred mode) or\n fully load them (default: True). It is advised to deferred load the data, since in\n that case Parcels deals with a better memory management during particle set execution.\n deferred_load=False is however sometimes necessary for plotting the fields.\n :param interp_method: Method for interpolation. Options are 'linear' (default), 'nearest',\n 'linear_invdist_land_tracer', 'cgrid_velocity', 'cgrid_tracer' and 'bgrid_velocity'\n :param gridindexingtype: The type of gridindexing. Either 'nemo' (default) or 'mitgcm' are supported.\n See also the Grid indexing documentation on oceanparcels.org\n :param chunksize: size of the chunks in dask loading. Default is None (no chunking). Can be None or False (no chunking),\n 'auto' (chunking is done in the background, but results in one grid per field individually), or a dict in the format\n '{parcels_varname: {netcdf_dimname : (parcels_dimname, chunksize_as_int)}, ...}', where 'parcels_dimname' is one of ('time', 'depth', 'lat', 'lon')\n :param netcdf_engine: engine to use for netcdf reading in xarray. Default is 'netcdf',\n but in cases where this doesn't work, setting netcdf_engine='scipy' could help\n\n For usage examples see the following tutorials:\n\n * `Basic Parcels setup <https://nbviewer.jupyter.org/github/OceanParcels/parcels/blob/master/parcels/examples/parcels_tutorial.ipynb>`_\n\n * `Argo floats <https://nbviewer.jupyter.org/github/OceanParcels/parcels/blob/master/parcels/examples/tutorial_Argofloats.ipynb>`_\n\n * `Timestamps <https://nbviewer.jupyter.org/github/OceanParcels/parcels/blob/master/parcels/examples/tutorial_timestamps.ipynb>`_\n\n * `Time-evolving depth dimensions <https://nbviewer.jupyter.org/github/OceanParcels/parcels/blob/master/parcels/examples/tutorial_timevaryingdepthdimensions.ipynb>`_\n\n \"\"\"\n # Ensure that times are not provided both in netcdf file and in 'timestamps'.\n if timestamps is not None and 'time' in dimensions:\n logger.warning_once(\"Time already provided, defaulting to dimensions['time'] over timestamps.\")\n timestamps = None\n\n fields = {}\n if 'creation_log' not in kwargs.keys():\n kwargs['creation_log'] = 'from_netcdf'\n for var, name in variables.items():\n # Resolve all matching paths for the current variable\n paths = filenames[var] if type(filenames) is dict and var in filenames else filenames\n if type(paths) is not dict:\n paths = cls.parse_wildcards(paths, filenames, var)\n else:\n for dim, p in paths.items():\n paths[dim] = cls.parse_wildcards(p, filenames, var)\n\n # Use dimensions[var] and indices[var] if either of them is a dict of dicts\n dims = dimensions[var] if var in dimensions else dimensions\n cls.checkvaliddimensionsdict(dims)\n inds = indices[var] if (indices and var in indices) else indices\n fieldtype = fieldtype[var] if (fieldtype and var in fieldtype) else fieldtype\n varchunksize = chunksize[var] if (chunksize and var in chunksize) else chunksize # <varname> -> {<netcdf_dimname>: (<parcels_dimname>, <chunksize_as_int_numeral>) }\n\n grid = None\n dFiles = None\n # check if grid has already been processed (i.e. if other fields have same filenames, dimensions and indices)\n for procvar, _ in fields.items():\n procdims = dimensions[procvar] if procvar in dimensions else dimensions\n procinds = indices[procvar] if (indices and procvar in indices) else indices\n procpaths = filenames[procvar] if isinstance(filenames, dict) and procvar in filenames else filenames\n procchunk = chunksize[procvar] if (chunksize and procvar in chunksize) else chunksize\n nowpaths = filenames[var] if isinstance(filenames, dict) and var in filenames else filenames\n if procdims == dims and procinds == inds:\n possibly_samegrid = True\n if procchunk != varchunksize:\n for dim in varchunksize:\n if varchunksize[dim][1] != procchunk[dim][1]:\n possibly_samegrid &= False\n if not possibly_samegrid:\n break\n if varchunksize == 'auto':\n break\n if 'depth' in dims and dims['depth'] == 'not_yet_set':\n break\n processedGrid = False\n if ((not isinstance(filenames, dict)) or filenames[procvar] == filenames[var]):\n processedGrid = True\n elif isinstance(filenames[procvar], dict):\n processedGrid = True\n for dim in ['lon', 'lat', 'depth']:\n if dim in dimensions:\n processedGrid *= filenames[procvar][dim] == filenames[var][dim]\n if processedGrid:\n grid = fields[procvar].grid\n if procpaths == nowpaths:\n dFiles = fields[procvar].dataFiles\n break\n fields[var] = Field.from_netcdf(paths, (var, name), dims, inds, grid=grid, mesh=mesh, timestamps=timestamps,\n allow_time_extrapolation=allow_time_extrapolation,\n time_periodic=time_periodic, deferred_load=deferred_load,\n fieldtype=fieldtype, chunksize=varchunksize, dataFiles=dFiles, **kwargs)\n\n u = fields.pop('U', None)\n v = fields.pop('V', None)\n return cls(u, v, fields=fields)\n\n @classmethod\n def from_nemo(cls, filenames, variables, dimensions, indices=None, mesh='spherical',\n allow_time_extrapolation=None, time_periodic=False,\n tracer_interp_method='cgrid_tracer', chunksize=None, **kwargs):\n \"\"\"Initialises FieldSet object from NetCDF files of Curvilinear NEMO fields.\n\n See `here <https://nbviewer.jupyter.org/github/OceanParcels/parcels/blob/master/parcels/examples/tutorial_nemo_curvilinear.ipynb>`_\n for a detailed tutorial on the setup for 2D NEMO fields and `here <https://nbviewer.jupyter.org/github/OceanParcels/parcels/blob/master/parcels/examples/tutorial_nemo_3D.ipynb>`_\n for the tutorial on the setup for 3D NEMO fields.\n\n See `here <https://nbviewer.jupyter.org/github/OceanParcels/parcels/blob/master/parcels/examples/documentation_indexing.ipynb>`_\n for a more detailed explanation of the different methods that can be used for c-grid datasets.\n\n :param filenames: Dictionary mapping variables to file(s). The\n filepath may contain wildcards to indicate multiple files,\n or be a list of file.\n filenames can be a list [files], a dictionary {var:[files]},\n a dictionary {dim:[files]} (if lon, lat, depth and/or data not stored in same files as data),\n or a dictionary of dictionaries {var:{dim:[files]}}\n time values are in filenames[data]\n :param variables: Dictionary mapping variables to variable names in the netCDF file(s).\n Note that the built-in Advection kernels assume that U and V are in m/s\n :param dimensions: Dictionary mapping data dimensions (lon,\n lat, depth, time, data) to dimensions in the netCF file(s).\n Note that dimensions can also be a dictionary of dictionaries if\n dimension names are different for each variable.\n Watch out: NEMO is discretised on a C-grid:\n U and V velocities are not located on the same nodes (see https://www.nemo-ocean.eu/doc/node19.html ).\n\n +-----------------------------+-----------------------------+-----------------------------+\n | | V[k,j+1,i+1] | |\n +-----------------------------+-----------------------------+-----------------------------+\n |U[k,j+1,i] |W[k:k+2,j+1,i+1],T[k,j+1,i+1]|U[k,j+1,i+1] |\n +-----------------------------+-----------------------------+-----------------------------+\n | | V[k,j,i+1] + |\n +-----------------------------+-----------------------------+-----------------------------+\n\n To interpolate U, V velocities on the C-grid, Parcels needs to read the f-nodes,\n which are located on the corners of the cells.\n (for indexing details: https://www.nemo-ocean.eu/doc/img360.png )\n In 3D, the depth is the one corresponding to W nodes\n The gridindexingtype is set to 'nemo'. See also the Grid indexing documentation on oceanparcels.org\n :param indices: Optional dictionary of indices for each dimension\n to read from file(s), to allow for reading of subset of data.\n Default is to read the full extent of each dimension.\n Note that negative indices are not allowed.\n :param fieldtype: Optional dictionary mapping fields to fieldtypes to be used for UnitConverter.\n (either 'U', 'V', 'Kh_zonal', 'Kh_meridional' or None)\n :param mesh: String indicating the type of mesh coordinates and\n units used during velocity interpolation, see also `this tutorial <https://nbviewer.jupyter.org/github/OceanParcels/parcels/blob/master/parcels/examples/tutorial_unitconverters.ipynb>`_:\n\n 1. spherical (default): Lat and lon in degree, with a\n correction for zonal velocity U near the poles.\n 2. flat: No conversion, lat/lon are assumed to be in m.\n :param allow_time_extrapolation: boolean whether to allow for extrapolation\n (i.e. beyond the last available time snapshot)\n Default is False if dimensions includes time, else True\n :param time_periodic: To loop periodically over the time component of the Field. It is set to either False or the length of the period (either float in seconds or datetime.timedelta object). (Default: False)\n This flag overrides the allow_time_interpolation and sets it to False\n :param tracer_interp_method: Method for interpolation of tracer fields. It is recommended to use 'cgrid_tracer' (default)\n Note that in the case of from_nemo() and from_cgrid(), the velocity fields are default to 'cgrid_velocity'\n :param chunksize: size of the chunks in dask loading. Default is None (no chunking)\n\n \"\"\"\n\n if 'creation_log' not in kwargs.keys():\n kwargs['creation_log'] = 'from_nemo'\n if kwargs.pop('gridindexingtype', 'nemo') != 'nemo':\n raise ValueError(\"gridindexingtype must be 'nemo' in FieldSet.from_nemo(). Use FieldSet.from_c_grid_dataset otherwise\")\n fieldset = cls.from_c_grid_dataset(filenames, variables, dimensions, mesh=mesh, indices=indices, time_periodic=time_periodic,\n allow_time_extrapolation=allow_time_extrapolation, tracer_interp_method=tracer_interp_method,\n chunksize=chunksize, gridindexingtype='nemo', **kwargs)\n if hasattr(fieldset, 'W'):\n fieldset.W.set_scaling_factor(-1.)\n return fieldset\n\n @classmethod\n def from_mitgcm(cls, filenames, variables, dimensions, indices=None, mesh='spherical',\n allow_time_extrapolation=None, time_periodic=False,\n tracer_interp_method='cgrid_tracer', chunksize=None, **kwargs):\n \"\"\"Initialises FieldSet object from NetCDF files of MITgcm fields.\n All parameters and keywords are exactly the same as for FieldSet.from_nemo(), except that\n gridindexing is set to 'mitgcm' for grids that have the shape\n\n +-----------------------------+-----------------------------+-----------------------------+\n | | V[k,j+1,i] | |\n +-----------------------------+-----------------------------+-----------------------------+\n |U[k,j,i] | W[k-1:k,j,i], T[k,j,i] |U[k,j,i+1] |\n +-----------------------------+-----------------------------+-----------------------------+\n | | V[k,j,i] + |\n +-----------------------------+-----------------------------+-----------------------------+\n\n For indexing details: https://mitgcm.readthedocs.io/en/latest/algorithm/algorithm.html#spatial-discretization-of-the-dynamical-equations\n Note that vertical velocity (W) is assumed postive in the positive z direction (which is upward in MITgcm)\n \"\"\"\n if 'creation_log' not in kwargs.keys():\n kwargs['creation_log'] = 'from_mitgcm'\n if kwargs.pop('gridindexingtype', 'mitgcm') != 'mitgcm':\n raise ValueError(\"gridindexingtype must be 'mitgcm' in FieldSet.from_mitgcm(). Use FieldSet.from_c_grid_dataset otherwise\")\n fieldset = cls.from_c_grid_dataset(filenames, variables, dimensions, mesh=mesh, indices=indices, time_periodic=time_periodic,\n allow_time_extrapolation=allow_time_extrapolation, tracer_interp_method=tracer_interp_method,\n chunksize=chunksize, gridindexingtype='mitgcm', **kwargs)\n return fieldset\n\n @classmethod\n def from_c_grid_dataset(cls, filenames, variables, dimensions, indices=None, mesh='spherical',\n allow_time_extrapolation=None, time_periodic=False,\n tracer_interp_method='cgrid_tracer', gridindexingtype='nemo', chunksize=None, **kwargs):\n \"\"\"Initialises FieldSet object from NetCDF files of Curvilinear NEMO fields.\n\n See `here <https://nbviewer.jupyter.org/github/OceanParcels/parcels/blob/master/parcels/examples/documentation_indexing.ipynb>`_\n for a more detailed explanation of the different methods that can be used for c-grid datasets.\n\n :param filenames: Dictionary mapping variables to file(s). The\n filepath may contain wildcards to indicate multiple files,\n or be a list of file.\n filenames can be a list [files], a dictionary {var:[files]},\n a dictionary {dim:[files]} (if lon, lat, depth and/or data not stored in same files as data),\n or a dictionary of dictionaries {var:{dim:[files]}}\n time values are in filenames[data]\n :param variables: Dictionary mapping variables to variable\n names in the netCDF file(s).\n :param dimensions: Dictionary mapping data dimensions (lon,\n lat, depth, time, data) to dimensions in the netCF file(s).\n Note that dimensions can also be a dictionary of dictionaries if\n dimension names are different for each variable.\n Watch out: NEMO is discretised on a C-grid:\n U and V velocities are not located on the same nodes (see https://www.nemo-ocean.eu/doc/node19.html ).\n\n +-----------------------------+-----------------------------+-----------------------------+\n | | V[k,j+1,i+1] | |\n +-----------------------------+-----------------------------+-----------------------------+\n |U[k,j+1,i] |W[k:k+2,j+1,i+1],T[k,j+1,i+1]|U[k,j+1,i+1] |\n +-----------------------------+-----------------------------+-----------------------------+\n | | V[k,j,i+1] + |\n +-----------------------------+-----------------------------+-----------------------------+\n\n To interpolate U, V velocities on the C-grid, Parcels needs to read the f-nodes,\n which are located on the corners of the cells.\n (for indexing details: https://www.nemo-ocean.eu/doc/img360.png )\n In 3D, the depth is the one corresponding to W nodes.\n :param indices: Optional dictionary of indices for each dimension\n to read from file(s), to allow for reading of subset of data.\n Default is to read the full extent of each dimension.\n Note that negative indices are not allowed.\n :param fieldtype: Optional dictionary mapping fields to fieldtypes to be used for UnitConverter.\n (either 'U', 'V', 'Kh_zonal', 'Kh_meridional' or None)\n :param mesh: String indicating the type of mesh coordinates and\n units used during velocity interpolation:\n\n 1. spherical (default): Lat and lon in degree, with a\n correction for zonal velocity U near the poles.\n 2. flat: No conversion, lat/lon are assumed to be in m.\n :param allow_time_extrapolation: boolean whether to allow for extrapolation\n (i.e. beyond the last available time snapshot)\n Default is False if dimensions includes time, else True\n :param time_periodic: To loop periodically over the time component of the Field. It is set to either False or the length of the period (either float in seconds or datetime.timedelta object). (Default: False)\n This flag overrides the allow_time_interpolation and sets it to False\n :param tracer_interp_method: Method for interpolation of tracer fields. It is recommended to use 'cgrid_tracer' (default)\n Note that in the case of from_nemo() and from_cgrid(), the velocity fields are default to 'cgrid_velocity'\n :param gridindexingtype: The type of gridindexing. Set to 'nemo' in FieldSet.from_nemo()\n See also the Grid indexing documentation on oceanparcels.org\n :param chunksize: size of the chunks in dask loading\n\n \"\"\"\n\n if 'U' in dimensions and 'V' in dimensions and dimensions['U'] != dimensions['V']:\n raise ValueError(\"On a C-grid, the dimensions of velocities should be the corners (f-points) of the cells, so the same for U and V. \"\n \"See also https://nbviewer.jupyter.org/github/OceanParcels/parcels/blob/master/parcels/examples/documentation_indexing.ipynb\")\n if 'U' in dimensions and 'W' in dimensions and dimensions['U'] != dimensions['W']:\n raise ValueError(\"On a C-grid, the dimensions of velocities should be the corners (f-points) of the cells, so the same for U, V and W. \"\n \"See also https://nbviewer.jupyter.org/github/OceanParcels/parcels/blob/master/parcels/examples/documentation_indexing.ipynb\")\n if 'interp_method' in kwargs.keys():\n raise TypeError(\"On a C-grid, the interpolation method for velocities should not be overridden\")\n\n interp_method = {}\n for v in variables:\n if v in ['U', 'V', 'W']:\n interp_method[v] = 'cgrid_velocity'\n else:\n interp_method[v] = tracer_interp_method\n if 'creation_log' not in kwargs.keys():\n kwargs['creation_log'] = 'from_c_grid_dataset'\n\n return cls.from_netcdf(filenames, variables, dimensions, mesh=mesh, indices=indices, time_periodic=time_periodic,\n allow_time_extrapolation=allow_time_extrapolation, interp_method=interp_method,\n chunksize=chunksize, gridindexingtype=gridindexingtype, **kwargs)\n\n @classmethod\n def from_pop(cls, filenames, variables, dimensions, indices=None, mesh='spherical',\n allow_time_extrapolation=None, time_periodic=False,\n tracer_interp_method='bgrid_tracer', chunksize=None, depth_units='m', **kwargs):\n \"\"\"Initialises FieldSet object from NetCDF files of POP fields.\n It is assumed that the velocities in the POP fields is in cm/s.\n\n :param filenames: Dictionary mapping variables to file(s). The\n filepath may contain wildcards to indicate multiple files,\n or be a list of file.\n filenames can be a list [files], a dictionary {var:[files]},\n a dictionary {dim:[files]} (if lon, lat, depth and/or data not stored in same files as data),\n or a dictionary of dictionaries {var:{dim:[files]}}\n time values are in filenames[data]\n :param variables: Dictionary mapping variables to variable names in the netCDF file(s).\n Note that the built-in Advection kernels assume that U and V are in m/s\n :param dimensions: Dictionary mapping data dimensions (lon,\n lat, depth, time, data) to dimensions in the netCF file(s).\n Note that dimensions can also be a dictionary of dictionaries if\n dimension names are different for each variable.\n Watch out: POP is discretised on a B-grid:\n U and V velocity nodes are not located as W velocity and T tracer nodes (see http://www.cesm.ucar.edu/models/cesm1.0/pop2/doc/sci/POPRefManual.pdf ).\n\n +-----------------------------+-----------------------------+-----------------------------+\n |U[k,j+1,i],V[k,j+1,i] | |U[k,j+1,i+1],V[k,j+1,i+1] |\n +-----------------------------+-----------------------------+-----------------------------+\n | |W[k:k+2,j+1,i+1],T[k,j+1,i+1]| |\n +-----------------------------+-----------------------------+-----------------------------+\n |U[k,j,i],V[k,j,i] | +U[k,j,i+1],V[k,j,i+1] |\n +-----------------------------+-----------------------------+-----------------------------+\n\n In 2D: U and V nodes are on the cell vertices and interpolated bilinearly as a A-grid.\n T node is at the cell centre and interpolated constant per cell as a C-grid.\n In 3D: U and V nodes are at the middle of the cell vertical edges,\n They are interpolated bilinearly (independently of z) in the cell.\n W nodes are at the centre of the horizontal interfaces.\n They are interpolated linearly (as a function of z) in the cell.\n T node is at the cell centre, and constant per cell.\n Note that Parcels assumes that the length of the depth dimension (at the W-points)\n is one larger than the size of the velocity and tracer fields in the depth dimension.\n :param indices: Optional dictionary of indices for each dimension\n to read from file(s), to allow for reading of subset of data.\n Default is to read the full extent of each dimension.\n Note that negative indices are not allowed.\n :param fieldtype: Optional dictionary mapping fields to fieldtypes to be used for UnitConverter.\n (either 'U', 'V', 'Kh_zonal', 'Kh_meridional' or None)\n :param mesh: String indicating the type of mesh coordinates and\n units used during velocity interpolation, see also `this tutorial <https://nbviewer.jupyter.org/github/OceanParcels/parcels/blob/master/parcels/examples/tutorial_unitconverters.ipynb>`_:\n\n 1. spherical (default): Lat and lon in degree, with a\n correction for zonal velocity U near the poles.\n 2. flat: No conversion, lat/lon are assumed to be in m.\n :param allow_time_extrapolation: boolean whether to allow for extrapolation\n (i.e. beyond the last available time snapshot)\n Default is False if dimensions includes time, else True\n :param time_periodic: To loop periodically over the time component of the Field. It is set to either False or the length of the period (either float in seconds or datetime.timedelta object). (Default: False)\n This flag overrides the allow_time_interpolation and sets it to False\n :param tracer_interp_method: Method for interpolation of tracer fields. It is recommended to use 'bgrid_tracer' (default)\n Note that in the case of from_pop() and from_bgrid(), the velocity fields are default to 'bgrid_velocity'\n :param chunksize: size of the chunks in dask loading\n :param depth_units: The units of the vertical dimension. Default in Parcels is 'm',\n but many POP outputs are in 'cm'\n\n \"\"\"\n\n if 'creation_log' not in kwargs.keys():\n kwargs['creation_log'] = 'from_pop'\n fieldset = cls.from_b_grid_dataset(filenames, variables, dimensions, mesh=mesh, indices=indices, time_periodic=time_periodic,\n allow_time_extrapolation=allow_time_extrapolation, tracer_interp_method=tracer_interp_method,\n chunksize=chunksize, gridindexingtype='pop', **kwargs)\n if hasattr(fieldset, 'U'):\n fieldset.U.set_scaling_factor(0.01) # cm/s to m/s\n if hasattr(fieldset, 'V'):\n fieldset.V.set_scaling_factor(0.01) # cm/s to m/s\n if hasattr(fieldset, 'W'):\n if depth_units == 'm':\n fieldset.W.set_scaling_factor(-0.01) # cm/s to m/s and change the W direction\n logger.warning_once(\"Parcels assumes depth in POP output to be in 'm'. Use depth_units='cm' if the output depth is in 'cm'.\")\n elif depth_units == 'cm':\n fieldset.W.set_scaling_factor(-1.) # change the W direction but keep W in cm/s because depth is in cm\n else:\n raise SyntaxError(\"'depth_units' has to be 'm' or 'cm'\")\n return fieldset\n\n @classmethod\n def from_mom5(cls, filenames, variables, dimensions, indices=None, mesh='spherical',\n allow_time_extrapolation=None, time_periodic=False,\n tracer_interp_method='bgrid_tracer', chunksize=None, **kwargs):\n \"\"\"Initialises FieldSet object from NetCDF files of MOM5 fields.\n\n :param filenames: Dictionary mapping variables to file(s). The\n filepath may contain wildcards to indicate multiple files,\n or be a list of file.\n filenames can be a list [files], a dictionary {var:[files]},\n a dictionary {dim:[files]} (if lon, lat, depth and/or data not stored in same files as data),\n or a dictionary of dictionaries {var:{dim:[files]}}\n time values are in filenames[data]\n :param variables: Dictionary mapping variables to variable names in the netCDF file(s).\n Note that the built-in Advection kernels assume that U and V are in m/s\n :param dimensions: Dictionary mapping data dimensions (lon,\n lat, depth, time, data) to dimensions in the netCF file(s).\n Note that dimensions can also be a dictionary of dictionaries if\n dimension names are different for each variable.\n\n +-------------------------------+-------------------------------+-------------------------------+\n |U[k,j+1,i],V[k,j+1,i] | |U[k,j+1,i+1],V[k,j+1,i+1] |\n +-------------------------------+-------------------------------+-------------------------------+\n | |W[k-1:k+1,j+1,i+1],T[k,j+1,i+1]| |\n +-------------------------------+-------------------------------+-------------------------------+\n |U[k,j,i],V[k,j,i] | +U[k,j,i+1],V[k,j,i+1] |\n +-------------------------------+-------------------------------+-------------------------------+\n\n In 2D: U and V nodes are on the cell vertices and interpolated bilinearly as a A-grid.\n T node is at the cell centre and interpolated constant per cell as a C-grid.\n In 3D: U and V nodes are at the midlle of the cell vertical edges,\n They are interpolated bilinearly (independently of z) in the cell.\n W nodes are at the centre of the horizontal interfaces, but below the U and V.\n They are interpolated linearly (as a function of z) in the cell.\n Note that W is normally directed upward in MOM5, but Parcels requires W\n in the positive z-direction (downward) so W is multiplied by -1.\n T node is at the cell centre, and constant per cell.\n :param indices: Optional dictionary of indices for each dimension\n to read from file(s), to allow for reading of subset of data.\n Default is to read the full extent of each dimension.\n Note that negative indices are not allowed.\n :param fieldtype: Optional dictionary mapping fields to fieldtypes to be used for UnitConverter.\n (either 'U', 'V', 'Kh_zonal', 'Kh_meridional' or None)\n :param mesh: String indicating the type of mesh coordinates and\n units used during velocity interpolation, see also https://nbviewer.jupyter.org/github/OceanParcels/parcels/blob/master/parcels/examples/tutorial_unitconverters.ipynb:\n\n 1. spherical (default): Lat and lon in degree, with a\n correction for zonal velocity U near the poles.\n 2. flat: No conversion, lat/lon are assumed to be in m.\n :param allow_time_extrapolation: boolean whether to allow for extrapolation\n (i.e. beyond the last available time snapshot)\n Default is False if dimensions includes time, else True\n :param time_periodic: To loop periodically over the time component of the Field. It is set to either False or the length of the period (either float in seconds or datetime.timedelta object). (Default: False)\n This flag overrides the allow_time_interpolation and sets it to False\n :param tracer_interp_method: Method for interpolation of tracer fields. It is recommended to use 'bgrid_tracer' (default)\n Note that in the case of from_mom5() and from_bgrid(), the velocity fields are default to 'bgrid_velocity'\n :param chunksize: size of the chunks in dask loading\n\n\n \"\"\"\n\n if 'creation_log' not in kwargs.keys():\n kwargs['creation_log'] = 'from_mom5'\n fieldset = cls.from_b_grid_dataset(filenames, variables, dimensions, mesh=mesh, indices=indices, time_periodic=time_periodic,\n allow_time_extrapolation=allow_time_extrapolation, tracer_interp_method=tracer_interp_method,\n chunksize=chunksize, gridindexingtype='mom5', **kwargs)\n if hasattr(fieldset, 'W'):\n fieldset.W.set_scaling_factor(-1)\n return fieldset\n\n @classmethod\n def from_b_grid_dataset(cls, filenames, variables, dimensions, indices=None, mesh='spherical',\n allow_time_extrapolation=None, time_periodic=False,\n tracer_interp_method='bgrid_tracer', chunksize=None, **kwargs):\n \"\"\"Initialises FieldSet object from NetCDF files of Bgrid fields.\n\n :param filenames: Dictionary mapping variables to file(s). The\n filepath may contain wildcards to indicate multiple files,\n or be a list of file.\n filenames can be a list [files], a dictionary {var:[files]},\n a dictionary {dim:[files]} (if lon, lat, depth and/or data not stored in same files as data),\n or a dictionary of dictionaries {var:{dim:[files]}}\n time values are in filenames[data]\n :param variables: Dictionary mapping variables to variable\n names in the netCDF file(s).\n :param dimensions: Dictionary mapping data dimensions (lon,\n lat, depth, time, data) to dimensions in the netCF file(s).\n Note that dimensions can also be a dictionary of dictionaries if\n dimension names are different for each variable.\n U and V velocity nodes are not located as W velocity and T tracer nodes (see http://www.cesm.ucar.edu/models/cesm1.0/pop2/doc/sci/POPRefManual.pdf ).\n\n +-----------------------------+-----------------------------+-----------------------------+\n |U[k,j+1,i],V[k,j+1,i] | |U[k,j+1,i+1],V[k,j+1,i+1] |\n +-----------------------------+-----------------------------+-----------------------------+\n | |W[k:k+2,j+1,i+1],T[k,j+1,i+1]| |\n +-----------------------------+-----------------------------+-----------------------------+\n |U[k,j,i],V[k,j,i] | +U[k,j,i+1],V[k,j,i+1] |\n +-----------------------------+-----------------------------+-----------------------------+\n\n In 2D: U and V nodes are on the cell vertices and interpolated bilinearly as a A-grid.\n T node is at the cell centre and interpolated constant per cell as a C-grid.\n In 3D: U and V nodes are at the midlle of the cell vertical edges,\n They are interpolated bilinearly (independently of z) in the cell.\n W nodes are at the centre of the horizontal interfaces.\n They are interpolated linearly (as a function of z) in the cell.\n T node is at the cell centre, and constant per cell.\n :param indices: Optional dictionary of indices for each dimension\n to read from file(s), to allow for reading of subset of data.\n Default is to read the full extent of each dimension.\n Note that negative indices are not allowed.\n :param fieldtype: Optional dictionary mapping fields to fieldtypes to be used for UnitConverter.\n (either 'U', 'V', 'Kh_zonal', 'Kh_meridional' or None)\n :param mesh: String indicating the type of mesh coordinates and\n units used during velocity interpolation:\n\n 1. spherical (default): Lat and lon in degree, with a\n correction for zonal velocity U near the poles.\n 2. flat: No conversion, lat/lon are assumed to be in m.\n :param allow_time_extrapolation: boolean whether to allow for extrapolation\n (i.e. beyond the last available time snapshot)\n Default is False if dimensions includes time, else True\n :param time_periodic: To loop periodically over the time component of the Field. It is set to either False or the length of the period (either float in seconds or datetime.timedelta object). (Default: False)\n This flag overrides the allow_time_interpolation and sets it to False\n :param tracer_interp_method: Method for interpolation of tracer fields. It is recommended to use 'bgrid_tracer' (default)\n Note that in the case of from_pop() and from_bgrid(), the velocity fields are default to 'bgrid_velocity'\n :param chunksize: size of the chunks in dask loading\n\n \"\"\"\n\n if 'U' in dimensions and 'V' in dimensions and dimensions['U'] != dimensions['V']:\n raise ValueError(\"On a B-grid, the dimensions of velocities should be the (top) corners of the grid cells, so the same for U and V. \"\n \"See also https://nbviewer.jupyter.org/github/OceanParcels/parcels/blob/master/parcels/examples/documentation_indexing.ipynb\")\n if 'U' in dimensions and 'W' in dimensions and dimensions['U'] != dimensions['W']:\n raise ValueError(\"On a B-grid, the dimensions of velocities should be the (top) corners of the grid cells, so the same for U, V and W. \"\n \"See also https://nbviewer.jupyter.org/github/OceanParcels/parcels/blob/master/parcels/examples/documentation_indexing.ipynb\")\n\n interp_method = {}\n for v in variables:\n if v in ['U', 'V']:\n interp_method[v] = 'bgrid_velocity'\n elif v in ['W']:\n interp_method[v] = 'bgrid_w_velocity'\n else:\n interp_method[v] = tracer_interp_method\n if 'creation_log' not in kwargs.keys():\n kwargs['creation_log'] = 'from_b_grid_dataset'\n\n return cls.from_netcdf(filenames, variables, dimensions, mesh=mesh, indices=indices, time_periodic=time_periodic,\n allow_time_extrapolation=allow_time_extrapolation, interp_method=interp_method,\n chunksize=chunksize, **kwargs)\n\n @classmethod\n def from_parcels(cls, basename, uvar='vozocrtx', vvar='vomecrty', indices=None, extra_fields=None,\n allow_time_extrapolation=None, time_periodic=False, deferred_load=True,\n chunksize=None, **kwargs):\n \"\"\"Initialises FieldSet data from NetCDF files using the Parcels FieldSet.write() conventions.\n\n :param basename: Base name of the file(s); may contain\n wildcards to indicate multiple files.\n :param indices: Optional dictionary of indices for each dimension\n to read from file(s), to allow for reading of subset of data.\n Default is to read the full extent of each dimension.\n Note that negative indices are not allowed.\n :param fieldtype: Optional dictionary mapping fields to fieldtypes to be used for UnitConverter.\n (either 'U', 'V', 'Kh_zonal', 'Kh_meridional' or None)\n :param extra_fields: Extra fields to read beyond U and V\n :param allow_time_extrapolation: boolean whether to allow for extrapolation\n (i.e. beyond the last available time snapshot)\n Default is False if dimensions includes time, else True\n :param time_periodic: To loop periodically over the time component of the Field. It is set to either False or the length of the period (either float in seconds or datetime.timedelta object). (Default: False)\n This flag overrides the allow_time_interpolation and sets it to False\n :param deferred_load: boolean whether to only pre-load data (in deferred mode) or\n fully load them (default: True). It is advised to deferred load the data, since in\n that case Parcels deals with a better memory management during particle set execution.\n deferred_load=False is however sometimes necessary for plotting the fields.\n :param chunksize: size of the chunks in dask loading\n\n \"\"\"\n\n if extra_fields is None:\n extra_fields = {}\n if 'creation_log' not in kwargs.keys():\n kwargs['creation_log'] = 'from_parcels'\n\n dimensions = {}\n default_dims = {'lon': 'nav_lon', 'lat': 'nav_lat',\n 'depth': 'depth', 'time': 'time_counter'}\n extra_fields.update({'U': uvar, 'V': vvar})\n for vars in extra_fields:\n dimensions[vars] = deepcopy(default_dims)\n dimensions[vars]['depth'] = 'depth%s' % vars.lower()\n filenames = dict([(v, str(\"%s%s.nc\" % (basename, v)))\n for v in extra_fields.keys()])\n return cls.from_netcdf(filenames, indices=indices, variables=extra_fields,\n dimensions=dimensions, allow_time_extrapolation=allow_time_extrapolation,\n time_periodic=time_periodic, deferred_load=deferred_load,\n chunksize=chunksize, **kwargs)\n\n @classmethod\n def from_xarray_dataset(cls, ds, variables, dimensions, mesh='spherical', allow_time_extrapolation=None,\n time_periodic=False, **kwargs):\n \"\"\"Initialises FieldSet data from xarray Datasets.\n\n :param ds: xarray Dataset.\n Note that the built-in Advection kernels assume that U and V are in m/s\n :param variables: Dictionary mapping parcels variable names to data variables in the xarray Dataset.\n :param dimensions: Dictionary mapping data dimensions (lon,\n lat, depth, time, data) to dimensions in the xarray Dataset.\n Note that dimensions can also be a dictionary of dictionaries if\n dimension names are different for each variable\n (e.g. dimensions['U'], dimensions['V'], etc).\n :param fieldtype: Optional dictionary mapping fields to fieldtypes to be used for UnitConverter.\n (either 'U', 'V', 'Kh_zonal', 'Kh_meridional' or None)\n :param mesh: String indicating the type of mesh coordinates and\n units used during velocity interpolation, see also `this tutorial <https://nbviewer.jupyter.org/github/OceanParcels/parcels/blob/master/parcels/examples/tutorial_unitconverters.ipynb>`_:\n\n 1. spherical (default): Lat and lon in degree, with a\n correction for zonal velocity U near the poles.\n 2. flat: No conversion, lat/lon are assumed to be in m.\n :param allow_time_extrapolation: boolean whether to allow for extrapolation\n (i.e. beyond the last available time snapshot)\n Default is False if dimensions includes time, else True\n :param time_periodic: To loop periodically over the time component of the Field. It is set to either False or the length of the period (either float in seconds or datetime.timedelta object). (Default: False)\n This flag overrides the allow_time_interpolation and sets it to False\n \"\"\"\n\n fields = {}\n if 'creation_log' not in kwargs.keys():\n kwargs['creation_log'] = 'from_xarray_dataset'\n if 'time' in dimensions:\n if 'units' not in ds[dimensions['time']].attrs and 'Unit' in ds[dimensions['time']].attrs:\n # Fix DataArrays that have time.Unit instead of expected time.units\n convert_xarray_time_units(ds, dimensions['time'])\n\n for var, name in variables.items():\n dims = dimensions[var] if var in dimensions else dimensions\n cls.checkvaliddimensionsdict(dims)\n\n fields[var] = Field.from_xarray(ds[name], var, dims, mesh=mesh, allow_time_extrapolation=allow_time_extrapolation,\n time_periodic=time_periodic, **kwargs)\n u = fields.pop('U', None)\n v = fields.pop('V', None)\n return cls(u, v, fields=fields)\n\n def get_fields(self):\n \"\"\"Returns a list of all the :class:`parcels.field.Field` and :class:`parcels.field.VectorField`\n objects associated with this FieldSet\"\"\"\n fields = []\n for v in self.__dict__.values():\n if type(v) in [Field, VectorField]:\n if v not in fields:\n fields.append(v)\n elif type(v) in [NestedField, SummedField]:\n if v not in fields:\n fields.append(v)\n for v2 in v:\n if v2 not in fields:\n fields.append(v2)\n return fields\n\n def add_constant(self, name, value):\n \"\"\"Add a constant to the FieldSet. Note that all constants are\n stored as 32-bit floats. While constants can be updated during\n execution in SciPy mode, they can not be updated in JIT mode.\n\n Tutorials using fieldset.add_constant:\n `Analytical advection <https://nbviewer.jupyter.org/github/OceanParcels/parcels/blob/master/parcels/examples/tutorial_analyticaladvection.ipynb>`_\n `Diffusion <https://nbviewer.jupyter.org/github/OceanParcels/parcels/blob/master/parcels/examples/tutorial_diffusion.ipynb>`_\n `Periodic boundaries <https://nbviewer.jupyter.org/github/OceanParcels/parcels/blob/master/parcels/examples/tutorial_periodic_boundaries.ipynb>`_\n\n :param name: Name of the constant\n :param value: Value of the constant (stored as 32-bit float)\n \"\"\"\n setattr(self, name, value)\n\n def add_periodic_halo(self, zonal=False, meridional=False, halosize=5):\n \"\"\"Add a 'halo' to all :class:`parcels.field.Field` objects in a FieldSet,\n through extending the Field (and lon/lat) by copying a small portion\n of the field on one side of the domain to the other.\n\n :param zonal: Create a halo in zonal direction (boolean)\n :param meridional: Create a halo in meridional direction (boolean)\n :param halosize: size of the halo (in grid points). Default is 5 grid points\n \"\"\"\n\n for grid in self.gridset.grids:\n grid.add_periodic_halo(zonal, meridional, halosize)\n for attr, value in iter(self.__dict__.items()):\n if isinstance(value, Field):\n value.add_periodic_halo(zonal, meridional, halosize)\n\n def write(self, filename):\n \"\"\"Write FieldSet to NetCDF file using NEMO convention\n\n :param filename: Basename of the output fileset\"\"\"\n\n if MPI is None or MPI.COMM_WORLD.Get_rank() == 0:\n logger.info(\"Generating FieldSet output with basename: %s\" % filename)\n\n if hasattr(self, 'U'):\n self.U.write(filename, varname='vozocrtx')\n if hasattr(self, 'V'):\n self.V.write(filename, varname='vomecrty')\n\n for v in self.get_fields():\n if (v.name != 'U') and (v.name != 'V'):\n v.write(filename)\n\n def advancetime(self, fieldset_new):\n \"\"\"Replace oldest time on FieldSet with new FieldSet\n\n :param fieldset_new: FieldSet snapshot with which the oldest time has to be replaced\"\"\"\n\n logger.warning_once(\"Fieldset.advancetime() is deprecated.\\n \\\n Parcels deals automatically with loading only 2 time steps simultaneously\\\n such that the total allocated memory remains limited.\")\n\n advance = 0\n for gnew in fieldset_new.gridset.grids:\n gnew.advanced = False\n\n for fnew in fieldset_new.get_fields():\n if isinstance(fnew, VectorField):\n continue\n f = getattr(self, fnew.name)\n gnew = fnew.grid\n if not gnew.advanced:\n g = f.grid\n advance2 = g.advancetime(gnew)\n if advance2*advance < 0:\n raise RuntimeError(\"Some Fields of the Fieldset are advanced forward and other backward\")\n advance = advance2\n gnew.advanced = True\n f.advancetime(fnew, advance == 1)\n\n def computeTimeChunk(self, time, dt):\n \"\"\"Load a chunk of three data time steps into the FieldSet.\n This is used when FieldSet uses data imported from netcdf,\n with default option deferred_load. The loaded time steps are at or immediatly before time\n and the two time steps immediately following time if dt is positive (and inversely for negative dt)\n\n :param time: Time around which the FieldSet chunks are to be loaded. Time is provided as a double, relatively to Fieldset.time_origin\n :param dt: time step of the integration scheme\n \"\"\"\n signdt = np.sign(dt)\n nextTime = np.infty if dt > 0 else -np.infty\n\n for g in self.gridset.grids:\n g.update_status = 'not_updated'\n for f in self.get_fields():\n if type(f) in [VectorField, NestedField, SummedField] or not f.grid.defer_load:\n continue\n if f.grid.update_status == 'not_updated':\n nextTime_loc = f.grid.computeTimeChunk(f, time, signdt)\n if time == nextTime_loc and signdt != 0:\n raise TimeExtrapolationError(time, field=f, msg='In fset.computeTimeChunk')\n nextTime = min(nextTime, nextTime_loc) if signdt >= 0 else max(nextTime, nextTime_loc)\n\n for f in self.get_fields():\n if type(f) in [VectorField, NestedField, SummedField] or not f.grid.defer_load or f.dataFiles is None:\n continue\n g = f.grid\n if g.update_status == 'first_updated': # First load of data\n if f.data is not None and not isinstance(f.data, DeferredArray):\n if not isinstance(f.data, list):\n f.data = None\n else:\n for i in range(len(f.data)):\n del f.data[i, :]\n\n lib = np if f.chunksize in [False, None] else da\n if f.gridindexingtype == 'pop' and g.zdim > 1:\n zd = g.zdim - 1\n else:\n zd = g.zdim\n data = lib.empty((g.tdim, zd, g.ydim-2*g.meridional_halo, g.xdim-2*g.zonal_halo), dtype=np.float32)\n f.loaded_time_indices = range(2)\n for tind in f.loaded_time_indices:\n for fb in f.filebuffers:\n if fb is not None:\n fb.close()\n fb = None\n data = f.computeTimeChunk(data, tind)\n data = f.rescale_and_set_minmax(data)\n\n if(isinstance(f.data, DeferredArray)):\n f.data = DeferredArray()\n f.data = f.reshape(data)\n if not f.chunk_set:\n f.chunk_setup()\n if len(g.load_chunk) > g.chunk_not_loaded:\n g.load_chunk = np.where(g.load_chunk == g.chunk_loaded_touched,\n g.chunk_loading_requested, g.load_chunk)\n g.load_chunk = np.where(g.load_chunk == g.chunk_deprecated,\n g.chunk_not_loaded, g.load_chunk)\n\n elif g.update_status == 'updated':\n lib = np if isinstance(f.data, np.ndarray) else da\n if f.gridindexingtype == 'pop' and g.zdim > 1:\n zd = g.zdim - 1\n else:\n zd = g.zdim\n data = lib.empty((g.tdim, zd, g.ydim-2*g.meridional_halo, g.xdim-2*g.zonal_halo), dtype=np.float32)\n if signdt >= 0:\n f.loaded_time_indices = [1]\n if f.filebuffers[0] is not None:\n f.filebuffers[0].close()\n f.filebuffers[0] = None\n f.filebuffers[0] = f.filebuffers[1]\n data = f.computeTimeChunk(data, 1)\n else:\n f.loaded_time_indices = [0]\n if f.filebuffers[1] is not None:\n f.filebuffers[1].close()\n f.filebuffers[1] = None\n f.filebuffers[1] = f.filebuffers[0]\n data = f.computeTimeChunk(data, 0)\n data = f.rescale_and_set_minmax(data)\n if signdt >= 0:\n data = f.reshape(data)[1, :]\n if lib is da:\n f.data = lib.stack([f.data[1, :], data], axis=0)\n else:\n if not isinstance(f.data, DeferredArray):\n if isinstance(f.data, list):\n del f.data[0, :]\n else:\n f.data[0, :] = None\n f.data[0, :] = f.data[1, :]\n f.data[1, :] = data\n else:\n data = f.reshape(data)[0, :]\n if lib is da:\n f.data = lib.stack([data, f.data[0, :]], axis=0)\n else:\n if not isinstance(f.data, DeferredArray):\n if isinstance(f.data, list):\n del f.data[1, :]\n else:\n f.data[1, :] = None\n f.data[1, :] = f.data[0, :]\n f.data[0, :] = data\n g.load_chunk = np.where(g.load_chunk == g.chunk_loaded_touched,\n g.chunk_loading_requested, g.load_chunk)\n g.load_chunk = np.where(g.load_chunk == g.chunk_deprecated,\n g.chunk_not_loaded, g.load_chunk)\n if isinstance(f.data, da.core.Array) and len(g.load_chunk) > 0:\n if signdt >= 0:\n for block_id in range(len(g.load_chunk)):\n if g.load_chunk[block_id] == g.chunk_loaded_touched:\n if f.data_chunks[block_id] is None:\n # file chunks were never loaded.\n # happens when field not called by kernel, but shares a grid with another field called by kernel\n break\n block = f.get_block(block_id)\n f.data_chunks[block_id][0] = None\n f.data_chunks[block_id][1] = np.array(f.data.blocks[(slice(2),)+block][1])\n else:\n for block_id in range(len(g.load_chunk)):\n if g.load_chunk[block_id] == g.chunk_loaded_touched:\n if f.data_chunks[block_id] is None:\n # file chunks were never loaded.\n # happens when field not called by kernel, but shares a grid with another field called by kernel\n break\n block = f.get_block(block_id)\n f.data_chunks[block_id][1] = None\n f.data_chunks[block_id][0] = np.array(f.data.blocks[(slice(2),)+block][0])\n # do user-defined computations on fieldset data\n if self.compute_on_defer:\n self.compute_on_defer(self)\n\n # update time varying grid depth\n for f in self.get_fields():\n if type(f) in [VectorField, NestedField, SummedField] or not f.grid.defer_load or f.dataFiles is None:\n continue\n if f.grid.depth_field is not None:\n depth_data = f.grid.depth_field.data\n f.grid.depth = depth_data if isinstance(depth_data, np.ndarray) else np.array(depth_data)\n\n if abs(nextTime) == np.infty or np.isnan(nextTime): # Second happens when dt=0\n return nextTime\n else:\n nSteps = int((nextTime - time) / dt)\n if nSteps == 0:\n return nextTime\n else:\n return time + nSteps * dt\n" ]
[ [ "numpy.sign", "numpy.isclose", "numpy.where", "numpy.ctypeslib.load_library" ], [ "numpy.isnan", "numpy.sign", "numpy.any", "numpy.array", "numpy.zeros", "numpy.where" ] ]
[ { "matplotlib": [], "numpy": [ "1.10", "1.12", "1.11", "1.19", "1.24", "1.13", "1.16", "1.9", "1.18", "1.23", "1.21", "1.22", "1.20", "1.7", "1.15", "1.14", "1.17", "1.8" ], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
joaopdss/aXeleRate
[ "791c8b29056ed11bd0ed306e620664577ec9724c", "791c8b29056ed11bd0ed306e620664577ec9724c", "791c8b29056ed11bd0ed306e620664577ec9724c" ]
[ "axelerate/networks/common_utils/callbacks.py", "tests_training_and_inference.py", "axelerate/networks/segnet/metrics.py" ]
[ "import numpy as np\nfrom tensorflow import keras\nfrom tensorflow.keras import backend as K\n\ndef cosine_decay_with_warmup(global_step,\n learning_rate_base,\n total_steps,\n warmup_learning_rate=0.0,\n warmup_steps=0,\n hold_base_rate_steps=0):\n \"\"\"Cosine decay schedule with warm up period.\n Cosine annealing learning rate as described in:\n Loshchilov and Hutter, SGDR: Stochastic Gradient Descent with Warm Restarts.\n ICLR 2017. https://arxiv.org/abs/1608.03983\n In this schedule, the learning rate grows linearly from warmup_learning_rate\n to learning_rate_base for warmup_steps, then transitions to a cosine decay\n schedule.\n Arguments:\n global_step {int} -- global step.\n learning_rate_base {float} -- base learning rate.\n total_steps {int} -- total number of training steps.\n Keyword Arguments:\n warmup_learning_rate {float} -- initial learning rate for warm up. (default: {0.0})\n warmup_steps {int} -- number of warmup steps. (default: {0})\n hold_base_rate_steps {int} -- Optional number of steps to hold base learning rate\n before decaying. (default: {0})\n Returns:\n a float representing learning rate.\n Raises:\n ValueError: if warmup_learning_rate is larger than learning_rate_base,\n or if warmup_steps is larger than total_steps.\n \"\"\"\n\n if total_steps < warmup_steps:\n raise ValueError('total_steps must be larger or equal to '\n 'warmup_steps.')\n learning_rate = 0.5 * learning_rate_base * (1 + np.cos(\n np.pi *\n (global_step - warmup_steps - hold_base_rate_steps\n ) / float(total_steps - warmup_steps - hold_base_rate_steps)))\n if hold_base_rate_steps > 0:\n learning_rate = np.where(global_step > warmup_steps + hold_base_rate_steps,\n learning_rate, learning_rate_base)\n if warmup_steps > 0:\n if learning_rate_base < warmup_learning_rate:\n raise ValueError('learning_rate_base must be larger or equal to '\n 'warmup_learning_rate.')\n slope = (learning_rate_base - warmup_learning_rate) / warmup_steps\n warmup_rate = slope * global_step + warmup_learning_rate\n learning_rate = np.where(global_step < warmup_steps, warmup_rate,\n learning_rate)\n return np.where(global_step > total_steps, 0.0, learning_rate)\n\n\nclass WarmUpCosineDecayScheduler(keras.callbacks.Callback):\n \"\"\"Cosine decay with warmup learning rate scheduler\n \"\"\"\n\n def __init__(self,\n learning_rate_base,\n total_steps,\n global_step_init=0,\n warmup_learning_rate=0.0,\n warmup_steps=0,\n hold_base_rate_steps=0,\n verbose=0):\n \"\"\"Constructor for cosine decay with warmup learning rate scheduler.\n Arguments:\n learning_rate_base {float} -- base learning rate.\n total_steps {int} -- total number of training steps.\n Keyword Arguments:\n global_step_init {int} -- initial global step, e.g. from previous checkpoint.\n warmup_learning_rate {float} -- initial learning rate for warm up. (default: {0.0})\n warmup_steps {int} -- number of warmup steps. (default: {0})\n hold_base_rate_steps {int} -- Optional number of steps to hold base learning rate\n before decaying. (default: {0})\n verbose {int} -- 0: quiet, 1: update messages. (default: {0})\n \"\"\"\n\n super(WarmUpCosineDecayScheduler, self).__init__()\n self.learning_rate_base = learning_rate_base\n self.total_steps = total_steps\n self.global_step = global_step_init\n self.warmup_learning_rate = warmup_learning_rate\n self.warmup_steps = warmup_steps\n self.hold_base_rate_steps = hold_base_rate_steps\n self.verbose = verbose\n self.learning_rates = []\n self.current_lr = 0.0\n \n def on_epoch_end(self, epoch, logs={}):\n if self.verbose == 1:\n print('Epoch %05d: Learning rate is %s.\\n' % (epoch, self.current_lr)) \n\n def on_batch_end(self, batch, logs=None):\n self.global_step = self.global_step + 1\n lr = K.get_value(self.model.optimizer.lr)\n self.learning_rates.append(lr)\n\n def on_batch_begin(self, batch, logs=None):\n self.current_lr = cosine_decay_with_warmup(global_step=self.global_step,\n learning_rate_base=self.learning_rate_base,\n total_steps=self.total_steps,\n warmup_learning_rate=self.warmup_learning_rate,\n warmup_steps=self.warmup_steps,\n hold_base_rate_steps=self.hold_base_rate_steps)\n K.set_value(self.model.optimizer.lr, self.current_lr)\n if self.verbose ==2:\n print('\\nBatch %05d: setting learning rate to %s.' % (self.global_step + 1, self.current_lr))\n\n", "import argparse\r\nimport json\r\nfrom axelerate import setup_training, setup_evaluation\r\nimport tensorflow.keras.backend as K\r\nfrom termcolor import colored\r\nimport traceback\r\nimport time \r\n\r\ndef configs(network_type):\r\n\r\n classifier = {\r\n \"model\" : {\r\n \"type\": \"Classifier\",\r\n \"architecture\": \"Tiny Yolo\",\r\n \"input_size\": [224,224],\r\n \"fully-connected\": [],\r\n \"labels\": [],\r\n \"dropout\" : \t\t 0.5,\r\n \"activation_func\": \"\"\r\n },\r\n \"weights\" : {\r\n \"full\": \t\t\t\t\"\",\r\n \"backend\": \t\t None,\r\n \"save_bottleneck\": True\r\n \r\n },\r\n \"train\" : {\r\n \"actual_epoch\": 5,\r\n \"train_image_folder\": \"sample_datasets/classifier/imgs\",\r\n \"train_times\": 1,\r\n \"valid_image_folder\": \"sample_datasets/classifier/imgs_validation\",\r\n \"valid_times\": 1,\r\n \"valid_metric\": \"val_accuracy\",\r\n \"batch_size\": 2,\r\n \"learning_rate\": 1e-4,\r\n \"saved_folder\": \t\t\"classifier\",\r\n \"first_trainable_layer\": \"\",\r\n \"augumentation\":\t\tTrue,\r\n \"loss_func\": \"\"\r\n },\r\n \"converter\" : {\r\n \"type\": \t\t\t\t[]\r\n }\r\n }\r\n\r\n\r\n detector = {\r\n \"model\":{\r\n \"type\": \"Detector\",\r\n \"architecture\": \"MobileNet7_5\",\r\n \"input_size\": 224,\r\n \"anchors\": [0.57273, 0.677385, 1.87446, 2.06253, 3.33843, 5.47434, 7.88282, 3.52778, 9.77052, 9.16828],\r\n \"labels\": [\"aeroplane\",\"person\",\"diningtable\",\"bottle\",\"bird\",\"bus\",\"boat\",\"cow\",\"sheep\",\"train\"],\r\n \"coord_scale\" : \t\t1.0,\r\n \"class_scale\" : \t\t1.0,\r\n \"object_scale\" : \t\t5.0,\r\n \"no_object_scale\" : \t1.0\r\n },\r\n \"weights\" : {\r\n \"full\": \t\t\t\t\"\",\r\n \"backend\": \t\t None\r\n },\r\n \"train\" : {\r\n \"actual_epoch\": 5,\r\n \"train_image_folder\": \"sample_datasets/detector/imgs\",\r\n \"train_annot_folder\": \"sample_datasets/detector/anns\",\r\n \"train_times\": 1,\r\n \"valid_image_folder\": \"sample_datasets/detector/imgs_validation\",\r\n \"valid_annot_folder\": \"sample_datasets/detector/anns_validation\",\r\n \"valid_times\": 1,\r\n \"valid_metric\": \"mAP\",\r\n \"batch_size\": 2,\r\n \"learning_rate\": 1e-4,\r\n \"saved_folder\": \t\t\"detector\",\r\n \"first_trainable_layer\": \"\",\r\n \"augumentation\":\t\tTrue,\r\n \"is_only_detect\" : \t\tFalse\r\n },\r\n \"converter\" : {\r\n \"type\": \t\t\t\t[]\r\n }\r\n }\r\n\r\n segnet = {\r\n \"model\" : {\r\n \"type\": \"SegNet\",\r\n \"architecture\": \"MobileNet5_0\",\r\n \"input_size\": [224,224],\r\n \"n_classes\" : \t\t20\r\n },\r\n \"weights\" : {\r\n \"full\": \t\t\t\t\"\",\r\n \"backend\": \t\t None\r\n },\r\n \"train\" : {\r\n \"actual_epoch\": 5,\r\n \"train_image_folder\": \"sample_datasets/segmentation/imgs\",\r\n \"train_annot_folder\": \"sample_datasets/segmentation/anns\",\r\n \"train_times\": 4,\r\n \"valid_image_folder\": \"sample_datasets/segmentation/imgs_validation\",\r\n \"valid_annot_folder\": \"sample_datasets/segmentation/anns_validation\",\r\n \"valid_times\": 4,\r\n \"valid_metric\": \"val_loss\",\r\n \"batch_size\": 2,\r\n \"learning_rate\": 1e-4,\r\n \"saved_folder\": \t\t\"segment\",\r\n \"first_trainable_layer\": \"\",\r\n \"ignore_zero_class\": False,\r\n \"augumentation\":\t\tTrue\r\n },\r\n \"converter\" : {\r\n \"type\": \t\t\t\t[]\r\n }\r\n }\r\n\r\n dict = {'all':[classifier,detector,segnet],'classifier':[classifier],'detector':[detector],'segnet':[segnet]}\r\n\r\n return dict[network_type]\r\n\r\n\r\nargparser = argparse.ArgumentParser(description='Test axelerate on sample datasets')\r\n\r\nargparser.add_argument(\r\n '-t',\r\n '--type',\r\n default=\"all\",\r\n help='type of network to test:classifier,detector,segnet or all')\r\n \r\nargparser.add_argument(\r\n '-a',\r\n '--arch',\r\n type=bool,\r\n default=False,\r\n help='test all architectures?')\r\n\r\nargparser.add_argument(\r\n '-c',\r\n '--conv',\r\n type=bool,\r\n default=False,\r\n help='test all converters?')\r\n\r\nargs = argparser.parse_args()\r\n\r\narchs = ['MobileNet7_5']\r\nconverters = [\"\"]\r\nerrors = []\r\n\r\nif args.arch:\r\n archs = ['Full Yolo', 'Tiny Yolo', 'MobileNet1_0', 'MobileNet7_5', 'MobileNet5_0', 'MobileNet2_5', 'SqueezeNet', 'NASNetMobile', 'ResNet50', 'DenseNet121']\r\nif args.conv:\r\n converters = ['k210', 'tflite_fullint', 'tflite_dynamic', 'edgetpu', 'openvino', 'onnx']\r\n\r\nfor item in configs(args.type):\r\n for arch in archs:\r\n for converter in converters:\r\n try:\r\n item['model']['architecture'] = arch\r\n item['converter']['type'] = converter\r\n print(json.dumps(item, indent=4, sort_keys=False))\r\n model_path = setup_training(config_dict=item)\r\n K.clear_session()\r\n setup_evaluation(item, model_path)\r\n except Exception as e:\r\n traceback.print_exc()\r\n print(colored(str(e), 'red'))\r\n time.sleep(2)\r\n errors.append(item['model']['type'] + \" \" + arch + \" \" + converter + \" \" + str(e))\r\n\r\nfor error in errors:\r\n print(error)\r\n\r\n\r\n\r\n", "import numpy as np\n\nEPS = 1e-12\n\ndef get_iou(gt, pr, n_classes):\n class_wise = np.zeros(n_classes)\n for cl in range(n_classes):\n intersection = np.sum((gt == cl)*(pr == cl))\n union = np.sum(np.maximum((gt == cl), (pr == cl)))\n iou = float(intersection)/(union + EPS)\n class_wise[cl] = iou\n return class_wise\n" ]
[ [ "tensorflow.keras.backend.get_value", "tensorflow.keras.backend.set_value", "numpy.where" ], [ "tensorflow.keras.backend.clear_session" ], [ "numpy.maximum", "numpy.zeros", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "2.6", "2.4", "2.3", "2.5", "2.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "2.2", "2.3", "2.4", "2.5", "2.6" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
monroid/openvino
[ "031e998a15ec738c64cc2379d7f30fb73087c272", "8272b3857ef5be0aaa8abbf7bd0d5d5615dc40b6", "031e998a15ec738c64cc2379d7f30fb73087c272", "031e998a15ec738c64cc2379d7f30fb73087c272", "031e998a15ec738c64cc2379d7f30fb73087c272", "031e998a15ec738c64cc2379d7f30fb73087c272", "031e998a15ec738c64cc2379d7f30fb73087c272", "8272b3857ef5be0aaa8abbf7bd0d5d5615dc40b6", "031e998a15ec738c64cc2379d7f30fb73087c272", "031e998a15ec738c64cc2379d7f30fb73087c272", "031e998a15ec738c64cc2379d7f30fb73087c272", "031e998a15ec738c64cc2379d7f30fb73087c272", "031e998a15ec738c64cc2379d7f30fb73087c272", "031e998a15ec738c64cc2379d7f30fb73087c272" ]
[ "model-optimizer/unit_tests/extensions/front/div_test.py", "runtime/bindings/python/tests/test_frontend/test_frontend_onnx.py", "tests/layer_tests/tensorflow_tests/test_tf_BatchToSpace.py", "tests/layer_tests/common/utils/tf_utils.py", "model-optimizer/unit_tests/extensions/front/mxnet/conv_ext_test.py", "ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_conv2d_transpose.py", "model-optimizer/mo/ops/power.py", "runtime/bindings/python/tests/test_inference_engine/test_infer_request.py", "model-optimizer/extensions/middle/EltwiseChecker.py", "model-optimizer/extensions/ops/reorgyolo.py", "model-optimizer/unit_tests/extensions/front/AttributedClampNormalizer_test.py", "ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_matmul_v2.py", "model-optimizer/unit_tests/extensions/ops/split_test.py", "model-optimizer/mo/middle/passes/fusing/fuse_linear_ops.py" ]
[ "# Copyright (C) 2018-2021 Intel Corporation\n# SPDX-License-Identifier: Apache-2.0\n\nimport unittest\n\nimport numpy as np\n\nfrom extensions.front.div import Div\nfrom mo.utils.ir_engine.compare_graphs import compare_graphs\nfrom unit_tests.utils.graph import build_graph, result, regular_op_with_shaped_data, valued_const_with_data, connect, \\\n connect_data\n\nnodes = {\n **regular_op_with_shaped_data('placeholder_1', [1, 227, 227, 3], {'type': 'Parameter'}),\n **regular_op_with_shaped_data('placeholder_2', [1, 227, 227, 3], {'type': 'Parameter'}),\n **regular_op_with_shaped_data('div', None, {'op': 'Div', 'type': 'Divide', 'name': 'my_div'}),\n\n **regular_op_with_shaped_data('reciprocal', [1, 227, 227, 3], {'type': 'Power'}),\n **valued_const_with_data('minus_one', np.array(-1.)),\n **regular_op_with_shaped_data('mul', None, {'type': 'Multiply'}),\n\n **result(),\n}\n\n\nclass TestDiv(unittest.TestCase):\n def test_div_test_1(self):\n # Test with two different inputs from two placeholders\n graph = build_graph(nodes, [\n *connect('placeholder_1', '0:div'),\n *connect('placeholder_2', '1:div'),\n *connect('div', 'output'),\n ], nodes_with_edges_only=True)\n Div().find_and_replace_pattern(graph)\n\n graph_ref = build_graph(nodes, [\n *connect('placeholder_1', '0:mul'),\n *connect('placeholder_2', '0:reciprocal'),\n *connect('minus_one', '1:reciprocal'),\n *connect('reciprocal', '1:mul'),\n *connect('mul', 'output'),\n ], nodes_with_edges_only=True)\n\n (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True)\n self.assertTrue(flag, resp)\n self.assertTrue(graph.node[graph.get_nodes_with_attributes(type='Multiply')[0]]['name'] == 'my_div')\n\n def test_div_test_2(self):\n # Test with two same inputs from one placeholder\n graph = build_graph(nodes, [\n *connect('placeholder_1:0', '0:div'),\n *connect_data('placeholder_1:0', '1:div'),\n *connect('div', 'output'),\n ], nodes_with_edges_only=True)\n Div().find_and_replace_pattern(graph)\n\n graph_ref = build_graph(nodes, [\n *connect('placeholder_1:0', '0:mul'),\n *connect_data('placeholder_1:0', '0:reciprocal'),\n *connect('minus_one', '1:reciprocal'),\n *connect('reciprocal', '1:mul'),\n *connect('mul', 'output'),\n ], nodes_with_edges_only=True)\n\n (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True)\n self.assertTrue(flag, resp)\n self.assertTrue(graph.node[graph.get_nodes_with_attributes(type='Multiply')[0]]['name'] == 'my_div')\n\n def test_div_with_integer(self):\n # Test where transformation should not be applied because the divisor is integer\n graph = build_graph({\n **regular_op_with_shaped_data('parameter', [1, 227, 227, 3], {'type': 'Parameter', 'data_type': np.int32}),\n **valued_const_with_data('const', np.array([-1.], dtype=np.int32)),\n **regular_op_with_shaped_data('div', None, {'op': 'Div', 'type': 'Divide', 'name': 'my_div'}),\n **result()},\n [\n *connect('parameter:0', '0:div'),\n *connect_data('const:0', '1:div'),\n *connect('div', 'output'),\n ])\n graph_ref = graph.copy()\n Div().find_and_replace_pattern(graph)\n\n (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True)\n self.assertTrue(flag, resp)\n", "# Copyright (C) 2021 Intel Corporation\n# SPDX-License-Identifier: Apache-2.0\n\nimport os\nimport onnx\nimport numpy as np\nfrom onnx.helper import make_graph, make_model, make_tensor_value_info\nimport pytest\n\nfrom ngraph.frontend import FrontEndManager\nfrom tests.runtime import get_runtime\n\n\ndef create_onnx_model():\n add = onnx.helper.make_node(\"Add\", inputs=[\"x\", \"y\"], outputs=[\"z\"])\n const_tensor = onnx.helper.make_tensor(\"const_tensor\", onnx.TensorProto.FLOAT, (2, 2), [0.5, 1, 1.5, 2.0])\n const_node = onnx.helper.make_node(\"Constant\", [], outputs=[\"const_node\"],\n value=const_tensor, name=\"const_node\")\n mul = onnx.helper.make_node(\"Mul\", inputs=[\"z\", \"const_node\"], outputs=[\"out\"])\n input_tensors = [\n make_tensor_value_info(\"x\", onnx.TensorProto.FLOAT, (2, 2)),\n make_tensor_value_info(\"y\", onnx.TensorProto.FLOAT, (2, 2)),\n ]\n output_tensors = [make_tensor_value_info(\"out\", onnx.TensorProto.FLOAT, (2, 2))]\n graph = make_graph([add, const_node, mul], \"graph\", input_tensors, output_tensors)\n return make_model(graph, producer_name=\"ngraph ONNX Importer\")\n\n\ndef run_function(function, *inputs, expected):\n runtime = get_runtime()\n computation = runtime.computation(function)\n actual = computation(*inputs)\n assert len(actual) == len(expected)\n for i in range(len(actual)):\n np.testing.assert_allclose(expected[i], actual[i], rtol=1e-3, atol=1e-6)\n\n\nfem = FrontEndManager()\nonnx_model_filename = \"model.onnx\"\nONNX_FRONTEND_NAME = \"onnx\"\n\n\ndef setup_module():\n onnx.save_model(create_onnx_model(), onnx_model_filename)\n\n\ndef teardown_module():\n os.remove(onnx_model_filename)\n\n\ndef skip_if_onnx_frontend_is_disabled():\n front_ends = fem.get_available_front_ends()\n if ONNX_FRONTEND_NAME not in front_ends:\n pytest.skip()\n\n\ndef test_convert():\n skip_if_onnx_frontend_is_disabled()\n\n fe = fem.load_by_framework(framework=ONNX_FRONTEND_NAME)\n assert fe\n\n model = fe.load(onnx_model_filename)\n assert model\n\n function = fe.convert(model)\n assert function\n\n a = np.array([[1, 2], [3, 4]], dtype=np.float32)\n b = np.array([[2, 3], [4, 5]], dtype=np.float32)\n expected = np.array([[1.5, 5], [10.5, 18]], dtype=np.float32)\n run_function(function, a, b, expected=[expected])\n\n\ndef test_decode_and_convert():\n skip_if_onnx_frontend_is_disabled()\n\n fe = fem.load_by_framework(framework=ONNX_FRONTEND_NAME)\n assert fe\n\n model = fe.load(onnx_model_filename)\n assert model\n\n decoded_function = fe.decode(model)\n assert decoded_function\n for op in decoded_function.get_ordered_ops():\n assert op.get_type_name() in [\"Parameter\", \"Constant\", \"ONNXFrameworkNode\",\n \"ONNXSubgraphFrameworkNode\", \"Result\"]\n\n fe.convert(decoded_function)\n assert decoded_function\n for op in decoded_function.get_ordered_ops():\n assert op.get_type_name() not in [\"ONNXFrameworkNode\", \"ONNXSubgraphFrameworkNode\"]\n\n a = np.array([[1, 2], [3, 4]], dtype=np.float32)\n b = np.array([[2, 3], [4, 5]], dtype=np.float32)\n expected = np.array([[1.5, 5], [10.5, 18]], dtype=np.float32)\n run_function(decoded_function, a, b, expected=[expected])\n\n\ndef test_load_by_model():\n skip_if_onnx_frontend_is_disabled()\n\n fe = fem.load_by_model(onnx_model_filename)\n assert fe\n assert fe.get_name() == \"onnx\"\n model = fe.load(onnx_model_filename)\n assert model\n decoded_function = fe.decode(model)\n assert decoded_function\n\n assert not fem.load_by_model(\"test.xx\")\n assert not fem.load_by_model(\"onnx.yy\")\n", "# Copyright (C) 2018-2021 Intel Corporation\n# SPDX-License-Identifier: Apache-2.0\n\nimport pytest\n\nfrom common.tf_layer_test_class import CommonTFLayerTest\n\n\nclass TestBatchToSpace(CommonTFLayerTest):\n def create_batch_to_space_net(self, in_shape, crops_value, block_shape_value, out_shape, ir_version):\n \"\"\"\n Tensorflow net IR net\n\n Input->BatchToSpace => Input->BatchToSpace\n\n \"\"\"\n\n #\n # Create Tensorflow model\n #\n\n import tensorflow as tf\n\n tf.compat.v1.reset_default_graph()\n\n # Create the graph and model\n with tf.compat.v1.Session() as sess:\n x = tf.compat.v1.placeholder(tf.float32, in_shape, 'Input')\n crops = tf.constant(crops_value)\n block_shape = tf.constant(block_shape_value)\n tf.batch_to_space(x, block_shape, crops, name='Operation')\n\n tf.compat.v1.global_variables_initializer()\n tf_net = sess.graph_def\n\n #\n # Create reference IR net\n # Please, specify 'type': 'Input' for input node\n # Moreover, do not forget to validate ALL layer attributes!!!\n #\n\n ref_net = None\n\n return tf_net, ref_net\n\n test_data_4D = [\n dict(in_shape=[4, 1, 1, 3], block_shape_value=[1], crops_value=[[0, 0]],\n out_shape=[4, 1, 1, 3]),\n dict(in_shape=[4, 1, 1, 3], block_shape_value=[2, 2], crops_value=[[0, 0], [0, 0]],\n out_shape=[1, 2, 2, 3]),\n dict(in_shape=[60, 100, 30, 30], block_shape_value=[3, 2], crops_value=[[1, 5], [4, 1]],\n out_shape=[2, 2, 1, 1]),\n # todo: enable these tests after supporting the general case on CPU\n # dict(in_shape=[4, 1, 1, 1], block_shape_value=[2, 1, 2], crops_value=[[0, 0], [0, 0], [0, 0]],\n # out_shape=[]),\n # dict(in_shape=[12, 1, 1, 3], block_shape_value=[3, 2, 2], crops_value=[[1, 0], [0, 1], [1, 1]],\n # out_shape=[1, 2, 1, 4]),\n # dict(in_shape=[36, 2, 2, 3], block_shape_value=[2, 3, 3], crops_value=[[1, 0], [0, 0], [2, 2]],\n # out_shape=[2, 3, 6, 5])\n ]\n\n @pytest.mark.parametrize(\"params\", test_data_4D)\n @pytest.mark.nightly\n def test_batch_to_space_4D(self, params, ie_device, precision, ir_version, temp_dir):\n self._test(*self.create_batch_to_space_net(**params, ir_version=ir_version),\n ie_device, precision, ir_version, temp_dir=temp_dir)\n\n test_data_5D = [\n dict(in_shape=[72, 2, 1, 4, 2], block_shape_value=[3, 4, 2], crops_value=[[1, 2], [0, 0], [3, 0]],\n out_shape=[3, 3, 4, 5, 2]),\n # todo: enable these tests after supporting the general case on CPU\n # dict(in_shape=[144, 2, 1, 4, 1], block_shape_value=[3, 4, 2, 2],\n # crops_value=[[1, 2], [0, 0], [3, 0], [0, 0]], out_shape=[3, 3, 4, 5, 2]),\n ]\n\n @pytest.mark.parametrize(\"params\", test_data_5D)\n @pytest.mark.nightly\n def test_batch_to_space_5D(self, params, ie_device, precision, ir_version, temp_dir):\n self._test(*self.create_batch_to_space_net(**params, ir_version=ir_version),\n ie_device, precision, ir_version, temp_dir=temp_dir)\n", "# Copyright (C) 2018-2021 Intel Corporation\n# SPDX-License-Identifier: Apache-2.0\n\nimport os\nimport re\n\nimport tensorflow as tf\nimport numpy as np\n\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n\n\ndef load_graph(model_file, output_nodes_for_freeze=None):\n is_meta = os.path.splitext(model_file)[-1] == \".meta\"\n\n tf.compat.v1.reset_default_graph()\n graph = tf.Graph()\n graph_def = tf.compat.v1.GraphDef() if not is_meta else tf.compat.v1.MetaGraphDef()\n\n with open(model_file, \"rb\") as f:\n graph_def.ParseFromString(f.read())\n\n nodes_to_clear_device = graph_def.node if isinstance(graph_def, tf.compat.v1.GraphDef) else graph_def.graph_def.node\n for node in nodes_to_clear_device:\n node.device = \"\"\n\n if is_meta:\n with tf.compat.v1.Session() as sess:\n restorer = tf.compat.v1.train.import_meta_graph(graph_def)\n restorer.restore(sess, re.sub('\\.meta$', '', model_file))\n graph_def = tf.compat.v1.graph_util.convert_variables_to_constants(sess, graph_def.graph_def, output_nodes_for_freeze)\n\n with graph.as_default():\n tf.import_graph_def(graph_def, name='')\n\n return graph\n\n\ndef collect_tf_references(model_path, feed_dict, out_layer, output_nodes_for_freeze=None):\n _feed_dict = dict()\n\n graph = load_graph(model_path, output_nodes_for_freeze)\n output_tensors_list = list()\n outputs_list = list()\n for input in feed_dict:\n input_node = [node for node in graph.as_graph_def().node if node.name == input][0]\n if input_node.op == \"Placeholder\":\n tensor = graph.get_tensor_by_name(input + \":0\")\n _feed_dict[tensor] = feed_dict[input]\n else:\n for parrent_input in input_node.input:\n in_node = [node for node in graph.as_graph_def().node if node.name == parrent_input][0]\n if in_node.op in ['Const', 'Assign', 'NoOp', 'Assert']:\n continue\n else:\n tensor = graph.get_tensor_by_name(parrent_input + \":0\")\n _feed_dict[tensor] = feed_dict[input]\n\n for output in out_layer:\n tensor = graph.get_tensor_by_name(output + \":0\")\n output_tensors_list.append(tensor)\n outputs_list.append(output)\n with graph.as_default():\n with tf.compat.v1.Session(graph=graph) as sess:\n outputs = sess.run(output_tensors_list, feed_dict=_feed_dict)\n out_dict = dict(zip(outputs_list, outputs))\n return out_dict\n\n\ndef children(op, graph):\n op = graph.get_operation_by_name(op)\n return set(op for out in op.outputs for op in out.consumers())\n\n\ndef summarize_graph(model_path, output_nodes_for_freeze=None, reshape_net=None):\n placeholders = dict()\n variables = list()\n outputs = list()\n graph = load_graph(model_path, output_nodes_for_freeze)\n unlikely_output_types = ['Const', 'Assign', 'NoOp', 'Placeholder', 'Assert', 'switch_t', 'switch_f']\n for node in graph.as_graph_def().node:\n if node.op == 'Placeholder':\n node_dict = dict()\n node_dict['type'] = tf.DType(node.attr['dtype'].type).name\n node_dict['shape'] = str(node.attr['shape'].shape.dim).replace('\\n', '').replace(' ', '').replace(\n 'size:', '').replace('[', '').replace(']', '')\n node_dict['shape'] = tuple(map(lambda x: int(x), node_dict['shape'].split(',')))\n placeholders[node.name] = node_dict\n if node.op == \"Variable\" or node.op == \"VariableV2\":\n variables.append(node.name)\n if len(children(node.name, graph)) == 0:\n if node.op not in unlikely_output_types and node.name.split('/')[-1] not in unlikely_output_types:\n outputs.append(node.name)\n result = dict()\n result['inputs'] = placeholders\n result['outputs'] = outputs\n\n if reshape_net:\n out_layer = list(result['inputs'].keys()) + result['outputs']\n feed_dict = {}\n for inputl in reshape_net:\n feed_dict.update({inputl: np.ones(shape=reshape_net[inputl])})\n scoring_res = collect_tf_references(model_path=model_path, feed_dict=feed_dict, out_layer=out_layer)\n for layer in scoring_res:\n if layer in result['inputs']:\n result['inputs'][layer]['shape'] = scoring_res[layer].shape\n\n return result\n", "# Copyright (C) 2018-2021 Intel Corporation\n# SPDX-License-Identifier: Apache-2.0\n\nimport unittest\n\nimport numpy as np\n\nfrom extensions.front.mxnet.conv_ext import DeconvFrontExtractor\nfrom unit_tests.utils.extractors import PB\n\n\nclass TestDeconvShapesParsing(unittest.TestCase):\n def test_conv_ext_ideal_numbers(self):\n params = {'attrs': {\n \"kernel\": \"(4, 4)\",\n \"no_bias\": \"True\",\n \"num_filter\": \"21\",\n \"num_group\": \"14\",\n \"pad\": \"(4, 4)\",\n \"stride\": \"(2, 2)\",\n \"dilate\": \"(3, 3)\",\n \"workspace\": \"1536\"\n }}\n node = PB({'symbol_dict': params})\n DeconvFrontExtractor.extract(node)\n exp_res = {\n 'op': 'Deconvolution',\n 'pad': np.array([[0, 0], [0, 0], [4, 4], [4, 4]]),\n 'pad_spatial_shape': np.array([[4, 4], [4, 4]]),\n 'stride': np.array([1, 1, 2, 2]),\n 'kernel_spatial': np.array([4, 4]),\n 'dilation': np.array([1, 1, 3, 3]),\n 'group': 14,\n 'output': 21,\n 'bias_addable': True,\n 'bias_term': False,\n }\n for key in exp_res.keys():\n if key in ('pad', 'pad_spatial_shape', 'stride', 'kernel_spatial', 'dilation'):\n np.testing.assert_equal(node[key], exp_res[key])\n else:\n self.assertEqual(node[key], exp_res[key])\n\n\n def test_conv_ext_no_bias(self):\n params = { 'attrs':{\n \"kernel\": \"(4, 4)\",\n \"num_filter\": \"21\",\n \"num_group\": \"14\",\n \"pad\": \"(4, 4)\",\n \"stride\": \"(2, 2)\",\n \"dilate\": \"(3, 3)\",\n \"workspace\": \"1536\"\n }}\n node = PB({'symbol_dict': params})\n DeconvFrontExtractor.extract(node)\n exp_res = {\n 'op': 'Deconvolution',\n 'pad': np.array([[0, 0], [0, 0], [4, 4], [4, 4]]),\n 'pad_spatial_shape': np.array([[4, 4], [4, 4]]),\n 'stride': np.array([1, 1, 2, 2]),\n 'kernel_spatial': np.array([4, 4]),\n 'dilation': np.array([1, 1, 3, 3]),\n 'group': 14,\n 'output': 21,\n 'bias_addable': True,\n 'bias_term': False,\n }\n for key in exp_res.keys():\n if key in ('pad', 'pad_spatial_shape', 'stride', 'kernel_spatial', 'dilation'):\n np.testing.assert_equal(node[key], exp_res[key])\n else:\n self.assertEqual(node[key], exp_res[key])\n\n\n def test_conv_ext_with_bias(self):\n params = { 'attrs':{\n \"kernel\": \"(4, 4)\",\n \"no_bias\": \"False\",\n \"num_filter\": \"21\",\n \"num_group\": \"14\",\n \"pad\": \"(4, 4)\",\n \"stride\": \"(2, 2)\",\n \"dilate\": \"(3, 3)\",\n \"workspace\": \"1536\"\n }}\n node = PB({'symbol_dict': params})\n DeconvFrontExtractor.extract(node)\n exp_res = {\n 'op': 'Deconvolution',\n 'pad': np.array([[0, 0], [0, 0], [4, 4], [4, 4]]),\n 'pad_spatial_shape': np.array([[4, 4], [4, 4]]),\n 'stride': np.array([1, 1, 2, 2]),\n 'kernel_spatial': np.array([4, 4]),\n 'dilation': np.array([1, 1, 3, 3]),\n 'group': 14,\n 'output': 21,\n 'bias_addable': True,\n 'bias_term': True,\n }\n for key in exp_res.keys():\n if key in ('pad', 'pad_spatial_shape', 'stride', 'kernel_spatial', 'dilation'):\n np.testing.assert_equal(node[key], exp_res[key])\n else:\n self.assertEqual(node[key], exp_res[key])\n\n\n def test_deconv_ext_target_shape(self):\n params = {'attrs': {\n \"kernel\": \"(4, 4)\",\n \"no_bias\": \"True\",\n \"num_filter\": \"21\",\n \"num_group\": \"14\",\n \"pad\": \"(4, 4)\",\n \"stride\": \"(2, 2)\",\n \"dilate\": \"(3, 3)\",\n \"workspace\": \"1536\",\n \"target_shape\": \"(120, 120)\"\n }}\n node = PB({'symbol_dict': params})\n DeconvFrontExtractor.extract(node)\n exp_res = {\n 'op': 'Deconvolution',\n 'pad': np.array([[0, 0], [0, 0], [4, 4], [4, 4]]),\n 'pad_spatial_shape': np.array([[4, 4], [4, 4]]),\n 'stride': np.array([1, 1, 2, 2]),\n 'kernel_spatial': np.array([4, 4]),\n 'dilation': np.array([1, 1, 3, 3]),\n 'group': 14,\n 'output': 21,\n 'bias_addable': True,\n 'bias_term': False,\n 'output_spatial_shape': np.array([120, 120]),\n }\n for key in exp_res.keys():\n if key in ('pad', 'pad_spatial_shape', 'stride', 'kernel_spatial', 'dilation', 'output_spatial_shape'):\n np.testing.assert_equal(node[key], exp_res[key])\n else:\n self.assertEqual(node[key], exp_res[key])\n\n def test_deconv_ext_output_pad(self):\n params = {'attrs': {\n \"kernel\": \"(4, 4)\",\n \"no_bias\": \"True\",\n \"num_filter\": \"21\",\n \"num_group\": \"14\",\n \"pad\": \"(4, 4)\",\n \"stride\": \"(2, 2)\",\n \"dilate\": \"(3, 3)\",\n \"workspace\": \"1536\",\n \"adj\": \"(1, 1)\"\n }}\n node = PB({'symbol_dict': params})\n DeconvFrontExtractor.extract(node)\n exp_res = {\n 'op': 'Deconvolution',\n 'pad': np.array([[0, 0], [0, 0], [4, 4], [4, 4]]),\n 'pad_spatial_shape': np.array([[4, 4], [4, 4]]),\n 'stride': np.array([1, 1, 2, 2]),\n 'kernel_spatial': np.array([4, 4]),\n 'dilation': np.array([1, 1, 3, 3]),\n 'group': 14,\n 'output': 21,\n 'bias_addable': True,\n 'bias_term': False,\n 'output_padding': np.array([0, 0, 1, 1]),\n }\n for key in exp_res.keys():\n if key in ('pad', 'pad_spatial_shape', 'stride', 'kernel_spatial', 'dilation', 'output_spatial_shape', 'output_padding'):\n np.testing.assert_equal(node[key], exp_res[key])\n else:\n self.assertEqual(node[key], exp_res[key])\n\n def test_deconv_ext_target_shape_with_output_pad(self):\n params = {'attrs': {\n \"kernel\": \"(4, 4)\",\n \"no_bias\": \"True\",\n \"num_filter\": \"21\",\n \"num_group\": \"14\",\n \"pad\": \"(4, 4)\",\n \"stride\": \"(2, 2)\",\n \"dilate\": \"(3, 3)\",\n \"workspace\": \"1536\",\n \"target_shape\": \"(120, 120)\",\n \"adj\": \"(1, 1)\"\n }}\n node = PB({'symbol_dict': params})\n DeconvFrontExtractor.extract(node)\n exp_res = {\n 'op': 'Deconvolution',\n 'pad': np.array([[0, 0], [0, 0], [4, 4], [4, 4]]),\n 'pad_spatial_shape': np.array([[4, 4], [4, 4]]),\n 'stride': np.array([1, 1, 2, 2]),\n 'kernel_spatial': np.array([4, 4]),\n 'dilation': np.array([1, 1, 3, 3]),\n 'group': 14,\n 'output': 21,\n 'bias_addable': True,\n 'bias_term': False,\n 'output_spatial_shape': np.array([120, 120]),\n }\n for key in exp_res.keys():\n if key in ('pad', 'pad_spatial_shape', 'stride', 'kernel_spatial', 'dilation', 'output_spatial_shape'):\n np.testing.assert_equal(node[key], exp_res[key])\n else:\n self.assertEqual(node[key], exp_res[key])\n", "import numpy as np\nimport paddle as pdpd\npdpd.enable_static()\nfrom save_model import saveModel\nimport sys\n\n\ndef run_and_save_model(input_x, name, feed, fetch_list, main_prog, start_prog):\n cpu = pdpd.static.cpu_places(1)\n exe = pdpd.static.Executor(cpu[0])\n exe.run(start_prog)\n outs = exe.run(\n feed={'x': input_x},\n fetch_list=fetch_list,\n program=main_prog)\n with pdpd.static.program_guard(main_prog, start_prog):\n saveModel(name, exe, feedkeys=['x'], fetchlist=fetch_list, inputs=[input_x],\n outputs=[outs[0]], target_dir=sys.argv[1])\n\n\ndef pdpd_conv2d_transpose(input_x, name, input_shape, kernel, dilation, padding, stride, groups=1, use_cudnn=True):\n main_program = pdpd.static.Program()\n startup_program = pdpd.static.Program()\n with pdpd.static.program_guard(main_program, startup_program):\n data = pdpd.static.data(name='x', shape=input_shape, dtype='float32')\n weight_attr = pdpd.ParamAttr(name=\"conv2d_weight\", initializer=pdpd.nn.initializer.Assign(kernel))\n conv2d = pdpd.static.nn.conv2d_transpose(input=data, num_filters=kernel.shape[0], filter_size=kernel.shape[2:4],\n padding=padding, param_attr=weight_attr, dilation=dilation, stride=stride, groups=groups, use_cudnn=use_cudnn)\n run_and_save_model(input_x, name, data, conv2d, main_program, startup_program)\n\n\nif __name__ == \"__main__\":\n\n test_cases =[\n {\n \"input_x\": np.array([[[[0., 1., 2., 3., 4.], # (1, 1, 7, 5) input tensor\n [5., 6., 7., 8., 9.],\n [10., 11., 12., 13., 14.],\n [15., 16., 17., 18., 19.],\n [20., 21., 22., 23., 24.],\n [25., 26., 27., 28., 29.],\n [30., 31., 32., 33., 34.,]]]]).astype(np.float32),\n \"name\": \"conv2d_transpose_SAME_padding\",\n \"input_shape\": [1, 1, 7, 5],\n \"kernel\": np.array([[[[1., 1., 1.],[1., 1., 1.],[1., 1., 1.]]]]).astype(np.float32),\n \"dilation\": 1,\n \"padding\": \"SAME\",\n \"stride\" : 2,\n },\n {\n \"input_x\": np.array([[[[0., 1., 2., 3., 4.], # (1, 1, 7, 5) input tensor\n [5., 6., 7., 8., 9.],\n [10., 11., 12., 13., 14.],\n [15., 16., 17., 18., 19.],\n [20., 21., 22., 23., 24.],\n [25., 26., 27., 28., 29.],\n [30., 31., 32., 33., 34.,]]]]).astype(np.float32),\n \"name\": \"conv2d_transpose_VALID_padding\",\n \"input_shape\": [1, 1, 7, 5],\n \"kernel\": np.array([[[[1., 1., 1.],[1., 1., 1.],[1., 1., 1.]]]]).astype(np.float32),\n \"dilation\": 1,\n \"padding\": \"VALID\",\n \"stride\" : 2,\n },\n {\n \"input_x\": np.array([[[[0., 1., 2., 3., 4.], # (1, 1, 7, 5) input tensor\n [5., 6., 7., 8., 9.],\n [10., 11., 12., 13., 14.],\n [15., 16., 17., 18., 19.],\n [20., 21., 22., 23., 24.],\n [25., 26., 27., 28., 29.],\n [30., 31., 32., 33., 34.,]]]]).astype(np.float32),\n \"name\": \"conv2d_transpose_strides_padding\",\n \"input_shape\": [1, 1, 7, 5],\n \"kernel\": np.array([[[[1., 1., 1.],[1., 1., 1.],[1., 1., 1.]]]]).astype(np.float32),\n \"dilation\": 1,\n \"padding\": 1,\n \"stride\" : 2,\n },\n { \"input_x\": np.array([[[[0., 1., 2., 3., 4.], # (1, 1, 7, 5) input tensor\n [5., 6., 7., 8., 9.],\n [10., 11., 12., 13., 14.],\n [15., 16., 17., 18., 19.],\n [20., 21., 22., 23., 24.],\n [25., 26., 27., 28., 29.],\n [30., 31., 32., 33., 34.,]]]]).astype(np.float32),\n \"name\": \"conv2d_transpose_strides_no_padding\",\n \"input_shape\": [1, 1, 7, 5],\n \"kernel\": np.array([[[[1., 1., 1.],[1., 1., 1.],[1., 1., 1.]]]]).astype(np.float32),\n \"dilation\": 1,\n \"padding\": 0,\n \"stride\" : 2,\n },\n { \"input_x\": np.array([[[[0., 1., 2., 3., 4.], # (1, 1, 7, 5) input tensor\n [5., 6., 7., 8., 9.],\n [10., 11., 12., 13., 14.],\n [15., 16., 17., 18., 19.],\n [20., 21., 22., 23., 24.],\n [25., 26., 27., 28., 29.],\n [30., 31., 32., 33., 34.,]]]]).astype(np.float32),\n \"name\": \"conv2d_transpose_strides_assymetric_padding\",\n \"input_shape\": [1, 1, 7, 5],\n \"kernel\": np.array([[[[1., 1., 1.],[1., 1., 1.],[1., 1., 1.]]]]).astype(np.float32),\n \"dilation\": 1,\n \"padding\": [1,1,0,1],\n \"stride\" : 2,\n },\n {\n \"input_x\": np.array([[[[0., 1., 2., 3., 4.], # (1, 1, 7, 5) input tensor\n [5., 6., 7., 8., 9.],\n [10., 11., 12., 13., 14.],\n [15., 16., 17., 18., 19.],\n [20., 21., 22., 23., 24.],\n [25., 26., 27., 28., 29.],\n [30., 31., 32., 33., 34.,]]]]).astype(np.float32),\n \"name\": \"conv2d_transpose_dilation_assymetric_pads_strides\",\n \"input_shape\": [1, 1, 7, 5],\n \"kernel\": np.array([[[[1., 1., 1.],[1., 1., 1.],[1., 1., 1.]]]]).astype(np.float32),\n \"dilation\": 1,\n \"padding\": [1, 1, 1, 2],\n \"stride\" : [3, 1],\n },\n {\n \"input_x\": np.arange(27).astype(np.float32).reshape([1, 3, 3, 3]),\n \"name\": \"depthwise_conv2d_transpose_convolution\",\n \"input_shape\": [1, 3, 3, 3],\n \"kernel\": np.ones([3, 1, 3, 3]).astype(np.float32),\n \"dilation\": 1,\n \"padding\": 1,\n \"stride\": 1,\n \"groups\": 3,\n \"use_cudnn\": False\n }\n ]\n for test in test_cases:\n\n pdpd_conv2d_transpose(test['input_x'], test['name'], test[\"input_shape\"],\n test['kernel'], test['dilation'],\n test['padding'],\n test['stride'],\n 1 if \"groups\" not in test else test['groups'],\n True if \"use_cudnn\" not in test else test['use_cudnn'])\n\n\n", "# Copyright (C) 2018-2021 Intel Corporation\n# SPDX-License-Identifier: Apache-2.0\n\nimport numpy as np\n\nfrom mo.front.common.partial_infer.eltwise import eltwise_infer\nfrom mo.graph.graph import Graph, Node\nfrom mo.middle.passes.convert_data_type import data_type_str_to_np\nfrom mo.ops.op import Op\n\n\nclass AttributedPower(Op):\n op = 'AttributedPower'\n enabled = False\n\n def __init__(self, graph: Graph, attrs: dict):\n super().__init__(graph, {\n 'op': self.op,\n 'type': 'Power',\n\n 'power': 1,\n 'scale': 1,\n 'shift': 0,\n\n 'infer': self.infer,\n 'type_infer': self.type_infer,\n\n 'in_ports_count': 1,\n 'out_ports_count': 1,\n }, attrs)\n\n def supported_attrs(self):\n return ['power', 'scale', 'shift']\n\n @staticmethod\n def type_infer(node: Node):\n node.out_port(0).set_data_type(data_type_str_to_np(node.graph.graph['cmd_params'].data_type))\n\n @staticmethod\n def infer(node: Node):\n name = node.soft_get('name', node.id)\n connected_inputs = {idx: port for idx, port in node.in_ports().items() if not port.disconnected()}\n assert len(connected_inputs) == 1 and 0 in connected_inputs, \\\n \"AttributedPower should have 1 connected input port, but it doesn't for node: `{}`. Ports: {}\" \\\n \"\".format(name, connected_inputs)\n\n assert node.has_valid('scale'), \\\n 'AttributedPower operation should have `scale` parameter set, but it doesn`t for node {}'.format(name)\n assert node.has_valid('shift'), \\\n 'AttributedPower operation should have `shift` parameter set, but it doesn`t for node {}'.format(name)\n assert node.has_valid('power'), \\\n 'AttributedPower operation should have `power` parameter set, but it doesn`t for node {}'.format(name)\n\n eltwise_infer(node, lambda a: np.power(a * node.scale + node.shift, node.power))\n", "# Copyright (C) 2021 Intel Corporation\n# SPDX-License-Identifier: Apache-2.0\n\nimport numpy as np\nimport os\nimport pytest\n\nfrom openvino import Core, Blob, TensorDesc, StatusCode\n\n\ndef image_path():\n path_to_repo = os.environ[\"DATA_PATH\"]\n path_to_img = os.path.join(path_to_repo, \"validation_set\", \"224x224\", \"dog.bmp\")\n return path_to_img\n\n\ndef model_path(is_myriad=False):\n path_to_repo = os.environ[\"MODELS_PATH\"]\n if not is_myriad:\n test_xml = os.path.join(path_to_repo, \"models\", \"test_model\", \"test_model_fp32.xml\")\n test_bin = os.path.join(path_to_repo, \"models\", \"test_model\", \"test_model_fp32.bin\")\n else:\n test_xml = os.path.join(path_to_repo, \"models\", \"test_model\", \"test_model_fp16.xml\")\n test_bin = os.path.join(path_to_repo, \"models\", \"test_model\", \"test_model_fp16.bin\")\n return (test_xml, test_bin)\n\n\ndef read_image():\n import cv2\n n, c, h, w = (1, 3, 32, 32)\n image = cv2.imread(path_to_img)\n if image is None:\n raise FileNotFoundError(\"Input image not found\")\n\n image = cv2.resize(image, (h, w)) / 255\n image = image.transpose((2, 0, 1)).astype(np.float32)\n image = image.reshape((n, c, h, w))\n return image\n\n\nis_myriad = os.environ.get(\"TEST_DEVICE\") == \"MYRIAD\"\ntest_net_xml, test_net_bin = model_path(is_myriad)\npath_to_img = image_path()\n\n\ndef test_get_perf_counts(device):\n ie_core = Core()\n net = ie_core.read_network(test_net_xml, test_net_bin)\n ie_core.set_config({\"PERF_COUNT\": \"YES\"}, device)\n exec_net = ie_core.load_network(net, device)\n img = read_image()\n request = exec_net.create_infer_request()\n td = TensorDesc(\"FP32\", [1, 3, 32, 32], \"NCHW\")\n input_blob = Blob(td, img)\n request.set_input({\"data\": input_blob})\n request.infer()\n pc = request.get_perf_counts()\n assert pc[\"29\"][\"status\"] == \"EXECUTED\"\n assert pc[\"29\"][\"layer_type\"] == \"FullyConnected\"\n del exec_net\n del ie_core\n del net\n\n\[email protected](os.environ.get(\"TEST_DEVICE\", \"CPU\") != \"CPU\",\n reason=f\"Can't run test on device {os.environ.get('TEST_DEVICE', 'CPU')}, \"\n \"Dynamic batch fully supported only on CPU\")\[email protected](reason=\"Fix\")\ndef test_set_batch_size(device):\n ie_core = Core()\n ie_core.set_config({\"DYN_BATCH_ENABLED\": \"YES\"}, device)\n net = ie_core.read_network(test_net_xml, test_net_bin)\n net.batch_size = 10\n data = np.ones(shape=net.input_info[\"data\"].input_data.shape)\n exec_net = ie_core.load_network(net, device)\n data[0] = read_image()[0]\n request = exec_net.create_infer_request()\n request.set_batch(1)\n td = TensorDesc(\"FP32\", [1, 3, 32, 32], \"NCHW\")\n input_blob = Blob(td, data)\n request.set_input({\"data\": input_blob})\n request.infer()\n assert np.allclose(int(round(request.output_blobs[\"fc_out\"].buffer[0][2])), 1), \\\n \"Incorrect data for 1st batch\"\n del exec_net\n del ie_core\n del net\n\n\[email protected](reason=\"Fix\")\ndef test_set_zero_batch_size(device):\n ie_core = Core()\n net = ie_core.read_network(test_net_xml, test_net_bin)\n exec_net = ie_core.load_network(net, device)\n request = exec_net.create_infer_request()\n with pytest.raises(ValueError) as e:\n request.set_batch(0)\n assert \"Batch size should be positive integer number but 0 specified\" in str(e.value)\n del exec_net\n del ie_core\n del net\n\n\[email protected](reason=\"Fix\")\ndef test_set_negative_batch_size(device):\n ie_core = Core()\n net = ie_core.read_network(test_net_xml, test_net_bin)\n exec_net = ie_core.load_network(net, device)\n request = exec_net.create_infer_request()\n with pytest.raises(ValueError) as e:\n request.set_batch(-1)\n assert \"Batch size should be positive integer number but -1 specified\" in str(e.value)\n del exec_net\n del ie_core\n del net\n\n\ndef test_blob_setter(device):\n ie_core = Core()\n net = ie_core.read_network(test_net_xml, test_net_bin)\n exec_net_1 = ie_core.load_network(network=net, device_name=device)\n\n net.input_info[\"data\"].layout = \"NHWC\"\n exec_net_2 = ie_core.load_network(network=net, device_name=device)\n\n img = read_image()\n\n request1 = exec_net_1.create_infer_request()\n tensor_desc = TensorDesc(\"FP32\", [1, 3, img.shape[2], img.shape[3]], \"NCHW\")\n img_blob1 = Blob(tensor_desc, img)\n request1.set_input({\"data\": img_blob1})\n request1.infer()\n res_1 = np.sort(request1.get_blob(\"fc_out\").buffer)\n\n img = np.transpose(img, axes=(0, 2, 3, 1)).astype(np.float32)\n tensor_desc = TensorDesc(\"FP32\", [1, 3, 32, 32], \"NHWC\")\n img_blob = Blob(tensor_desc, img)\n request = exec_net_2.create_infer_request()\n request.set_blob(\"data\", img_blob)\n request.infer()\n res_2 = np.sort(request.get_blob(\"fc_out\").buffer)\n assert np.allclose(res_1, res_2, atol=1e-2, rtol=1e-2)\n\n\ndef test_cancel(device):\n ie_core = Core()\n net = ie_core.read_network(test_net_xml, test_net_bin)\n exec_net = ie_core.load_network(net, device)\n img = read_image()\n td = TensorDesc(\"FP32\", [1, 3, 32, 32], \"NCHW\")\n input_blob = Blob(td, img)\n request = exec_net.create_infer_request()\n\n def callback(req, code, array):\n array.append(42)\n\n data = []\n request.set_completion_callback(callback, data)\n request.set_input({\"data\": input_blob})\n request.async_infer()\n request.cancel()\n with pytest.raises(RuntimeError) as e:\n request.wait()\n assert \"[ INFER_CANCELLED ]\" in str(e.value)\n # check if callback has executed\n assert data == [42]\n\n request.async_infer()\n status = request.wait()\n assert status == StatusCode.OK\n assert data == [42, 42]\n", "# Copyright (C) 2018-2021 Intel Corporation\n# SPDX-License-Identifier: Apache-2.0\n\nimport numpy as np\n\nfrom mo.front.common.partial_infer.utils import shape_insert\nfrom mo.graph.graph import Node, Graph\nfrom mo.middle.passes.fusing.helpers import get_tensor_in_port, get_value_in_port\nfrom mo.middle.replacement import MiddleReplacementPattern\n\n\nclass EltwiseChecker(MiddleReplacementPattern):\n \"\"\"\n Checks if element-wise operation can be converted to ScaleShift or not:\n decision gets made by verifying constant input value shape is like 1,N,1,1\n \"\"\"\n enabled = True\n\n def run_after(self):\n from extensions.middle.EltwiseInputReshape import Eltwise1DInputReshape\n return [Eltwise1DInputReshape]\n\n def run_before(self):\n from extensions.middle.pass_separator import MiddleFinish\n return [MiddleFinish]\n\n @staticmethod\n def set_flags_to_false(node: Node, flags: list):\n for flag in flags:\n node[flag] = False\n\n def mark_eltwise_node(self, node, feature_channel=None):\n tensor_port, value_port = get_tensor_in_port(node), get_value_in_port(node)\n if tensor_port is None or value_port is None:\n self.set_flags_to_false(node, ['can_be_fused', 'can_be_scaleshift'])\n return\n\n connected_in_ports = {idx: port for idx, port in node.in_ports().items() if not port.disconnected()}\n if len(connected_in_ports) != 2:\n return\n\n tensor_shape = tensor_port.data.get_shape()\n out_shape = node.out_port(0).data.get_shape()\n assert tensor_shape is not None and out_shape is not None\n if not np.array_equal(tensor_shape, out_shape):\n # ScaleShift operation doesn't support broadcasting\n self.set_flags_to_false(node, ['can_be_fused', 'can_be_scaleshift'])\n return\n\n value_shape = value_port.data.get_shape()\n assert value_shape is not None\n assert len(value_shape) <= len(tensor_shape), \\\n \"No broadcasting was done for elementwise node {} due to previous checks in EltwiseChecker class. \" \\\n \"But constant input rank is larger than tensor input rank, that is inconsistent\".format(node.name)\n\n # if both tensors are 0D they cannot be converted to scaleshift\n if len(tensor_shape) == 0 and len(value_shape) == 0:\n self.set_flags_to_false(node, ['can_be_scaleshift'])\n return\n\n broadcasted_value_shape = shape_insert(value_shape, 0, [1] * (len(tensor_shape) - len(value_shape)))\n\n feature_dim = min(1, tensor_shape.size - 1) if node.graph.graph['layout'] == 'NCHW' else -1\n if feature_channel is not None:\n feature_dim = feature_channel\n ones = np.ones(len(tensor_shape))\n possible_shape = ones.copy()\n np.put(possible_shape, feature_dim, tensor_shape.item(feature_dim))\n\n if not np.array_equal(broadcasted_value_shape, ones) and \\\n not np.array_equal(broadcasted_value_shape, possible_shape):\n # ScaleShift weights should have [1,C,1,1]-like or [1,1,1,1]-like shape\n self.set_flags_to_false(node, ['can_be_fused', 'can_be_scaleshift'])\n return\n\n if len(tensor_shape) not in [2, 4, 5]:\n # ScaleShift operation is supported for 2D, 4D or 5D tensor inputs\n self.set_flags_to_false(node, ['can_be_scaleshift'])\n return\n\n def find_and_replace_pattern(self, graph: Graph, feature_channel=None):\n for node in graph.get_op_nodes(is_eltwise=True):\n self.mark_eltwise_node(node)\n", "# Copyright (C) 2018-2021 Intel Corporation\n# SPDX-License-Identifier: Apache-2.0\n\nimport numpy as np\n\nfrom mo.graph.graph import Node, Graph\nfrom mo.ops.op import Op, PermuteAttrs\nfrom mo.utils.error import Error\n\n\nclass ReorgYoloOp(Op):\n op = 'ReorgYolo'\n\n def __init__(self, graph: Graph, attrs: dict):\n mandatory_props = {\n 'type': self.op,\n 'op': self.op,\n 'version': 'opset2',\n 'infer': ReorgYoloOp.reorgyolo_infer\n }\n super().__init__(graph, mandatory_props, attrs)\n\n def supported_attrs(self):\n return [\n 'stride'\n ]\n\n @staticmethod\n def reorgyolo_infer(node: Node):\n input_shape = node.in_node(0).shape\n if input_shape is None:\n raise Error('Input shape for operation \"{}\" is None'.format(node.soft_get('name', node.id)))\n\n stride = node.stride\n\n output_shape = input_shape.copy()\n output_shape[node.batch_dims] = input_shape[node.batch_dims] # pylint: disable=unsupported-assignment-operation\n output_shape[node.channel_dims] = input_shape[node.channel_dims] * stride ** 2 # pylint: disable=unsupported-assignment-operation\n # Round as in caffe\n output_shape[node.spatial_dims] = np.ma.round(input_shape[node.spatial_dims] / stride) # pylint: disable=unsupported-assignment-operation\n\n node.out_port(0).data.set_shape(output_shape)\n PermuteAttrs.create_permute_attrs(node, attrs=[('channel_dims', 'input:0'), ('spatial_dims', 'input:0')])\n", "# Copyright (C) 2018-2021 Intel Corporation\n# SPDX-License-Identifier: Apache-2.0\n\nimport unittest\n\nimport numpy as np\n\nfrom extensions.front.AttributedClampNormalizer import AttributedClampNormalizer\nfrom mo.utils.ir_engine.compare_graphs import compare_graphs\nfrom unit_tests.utils.graph import build_graph, const\n\nnodes_attributes = {\n 'placeholder': {'shape': None, 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},\n 'attr_clamp': {'type': 'Clamp', 'kind': 'op', 'op': 'AttributedClamp', 'name': 'attr_clamp',\n 'min': np.array(-3.5, dtype=np.float32), 'max': np.array(3.5, dtype=np.float32)},\n 'result': {'type': 'Result', 'value': None, 'kind': 'op', 'op': 'Result'},\n\n # new Clamp layer and inputs\n 'clamp': {'type': None, 'kind': 'op', 'op': 'Clamp'},\n **const('min', np.array(-3.5, dtype=np.float32)),\n **const('max', np.array(3.5, dtype=np.float32)),\n}\n\n\nclass AttributedClampNormalizerTest(unittest.TestCase):\n def test_1(self):\n graph = build_graph(nodes_attributes,\n [('placeholder', 'attr_clamp', {'in': 0, 'out': 0}),\n ('attr_clamp', 'result', {'in': 0, 'out': 0}),\n ],\n {}, nodes_with_edges_only=True)\n\n graph_ref = build_graph(nodes_attributes,\n [('placeholder', 'clamp', {'in': 0, 'out': 0}),\n ('min', 'clamp', {'in': 1, 'out': 0}),\n ('max', 'clamp', {'in': 2, 'out': 0}),\n ('clamp', 'result')\n ],\n {}, nodes_with_edges_only=True)\n\n graph.graph['layout'] = 'NCHW'\n graph.stage = 'front'\n\n replacer = AttributedClampNormalizer()\n replacer.find_and_replace_pattern(graph)\n\n (flag, resp) = compare_graphs(graph, graph_ref, 'result', check_op_attrs=True)\n self.assertTrue(flag, resp)\n self.assertTrue(graph.node[graph.get_nodes_with_attributes(op='Clamp')[0]]['name'] == 'attr_clamp')\n", "import numpy as np\nfrom save_model import saveModel\nimport sys\n\ndef matmul(name, x1, x2, x_transpose=False, y_transpose=False):\n import paddle as pdpd\n\n pdpd.enable_static()\n with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()):\n node_x1 = pdpd.static.data(name='x1', shape=x1.shape, dtype=x1.dtype)\n node_x2 = pdpd.static.data(name='x2', shape=x2.shape, dtype=x2.dtype)\n result = pdpd.matmul(node_x1, node_x2, x_transpose, y_transpose)\n #result = pdpd.static.nn.batch_norm(mul_node, use_global_stats=True)\n\n cpu = pdpd.static.cpu_places(1)\n exe = pdpd.static.Executor(cpu[0])\n # startup program will call initializer to initialize the parameters.\n exe.run(pdpd.static.default_startup_program())\n\n outs = exe.run(\n feed={'x1': x1, 'x2': x2},\n fetch_list=[result])\n saveModel(name, exe, feedkeys=['x1', 'x2'], fetchlist=[result], inputs=[x1, x2], outputs=[outs[0]], target_dir=sys.argv[1])\n\n return outs[0]\n\n\nif __name__ == \"__main__\":\n input_2x5 = np.array([[1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10]]).astype(np.float32)\n\n input_5x3 = np.array([[1, 2, 3],\n [4, 5, 6],\n [7, 8, 9],\n [10, 11, 12],\n [13, 14, 15]]).astype(np.float32)\n\n input_5x2 = np.array([[1, 2],\n [4, 5],\n [7, 8],\n [10, 11],\n [13, 14]]).astype(np.float32)\n\n input_2x3 = np.array([[1, 2, 3],\n [4, 5, 6]]).astype(np.float32)\n\n input_1d = np.array([2, 3]).astype(np.float32)\n\n input_nd = np.random.rand(2, 1, 10, 3).astype(np.float32)\n input_md = np.random.rand(3, 3, 4).astype(np.float32)\n\n matmul(\"matmul_v2_1dx1d\", input_1d, input_1d)\n matmul(\"matmul_v2_1dx2d\", input_1d, input_2x3)\n matmul(\"matmul_v2_2dx1d\", input_5x2, input_1d)\n matmul(\"matmul_v2_ndxmd\", input_nd, input_md)\n\n matmul(\"matmul_v2_xt\", input_2x5, input_2x3, x_transpose=True, y_transpose=False)\n matmul(\"matmul_v2_yt\", input_2x3, input_5x3, x_transpose=False, y_transpose=True)\n matmul(\"matmul_v2_xt_yt\", input_2x5, input_5x2, x_transpose=True, y_transpose=True)\n", "# Copyright (C) 2018-2021 Intel Corporation\n# SPDX-License-Identifier: Apache-2.0\n\nimport unittest\n\nimport numpy as np\n\nfrom extensions.ops.split import AttributedSplit, AttributedVariadicSplit, VariadicSplit\nfrom mo.front.common.partial_infer.utils import int64_array, shape_array, dynamic_dimension_value, strict_compare_tensors\nfrom mo.graph.graph import Node\nfrom mo.utils.ir_engine.compare_graphs import compare_graphs\nfrom unit_tests.utils.graph import build_graph\n\nfrom generator import generator, generate\n\n\nclass TestSplitOp(unittest.TestCase):\n nodes = {\n 'input': {'kind': 'op'},\n 'split_input_data': {'kind': 'data', 'shape': None, 'value': None},\n 'split_op': {'kind': 'op', 'axis': None, 'num_splits': None, 'op': 'AttributedSplit'},\n 'split_output_0_data': {'kind': 'data', 'shape': None, 'value': None},\n 'output_0': {'kind': 'op'},\n 'split_output_1_data': {'kind': 'data', 'shape': None, 'value': None},\n 'output_1': {'kind': 'op'},\n }\n edges = [\n ('input', 'split_input_data'),\n ('split_input_data', 'split_op'),\n ('split_op', 'split_output_0_data'),\n ('split_output_0_data', 'output_0'),\n ('split_op', 'split_output_1_data'),\n ('split_output_1_data', 'output_1'),\n ]\n\n def test_split_shape_infer(self):\n # test configuration\n input_shape = [2, 10]\n input_value = None\n axis = 1\n num_splits = 2\n output_shape = [2, 5]\n output_value = [None, None]\n\n # action\n graph = build_graph(self.nodes, self.edges,\n {\n 'split_input_data': {'shape': int64_array(input_shape),\n 'value': input_value},\n 'split_op': {'axis': np.array(axis), 'num_splits': np.array(num_splits)},\n }\n )\n\n split_op = Node(graph, 'split_op')\n AttributedSplit.infer(split_op)\n\n # reference\n graph_ref = build_graph(self.nodes, self.edges,\n {\n 'split_input_data': {'shape': int64_array(input_shape),\n 'value': input_value},\n 'split_op': {'axis': np.array(axis), 'num_splits': np.array(num_splits)},\n 'split_output_0_data': {'shape': int64_array(output_shape),\n 'value': output_value[0]},\n 'split_output_1_data': {'shape': int64_array(output_shape),\n 'value': output_value[1]},\n }\n )\n\n # check\n (flag, resp) = compare_graphs(graph, graph_ref, 'split_input_data')\n self.assertTrue(flag, resp)\n\n def test_split_dynamic_shape_infer(self):\n # test configuration\n input_shape = [2, dynamic_dimension_value]\n input_value = None\n axis = 1\n num_splits = 2\n output_shape = [2, dynamic_dimension_value]\n output_value = [None, None]\n\n # action\n graph = build_graph(self.nodes, self.edges,\n {\n 'split_input_data': {'shape': shape_array(input_shape),\n 'value': input_value},\n 'split_op': {'axis': np.array(axis), 'num_splits': np.array(num_splits)},\n }\n )\n\n split_op = Node(graph, 'split_op')\n AttributedSplit.infer(split_op)\n\n # reference\n graph_ref = build_graph(self.nodes, self.edges,\n {\n 'split_input_data': {'shape': shape_array(input_shape),\n 'value': input_value},\n 'split_op': {'axis': np.array(axis), 'num_splits': np.array(num_splits)},\n 'split_output_0_data': {'shape': shape_array(output_shape),\n 'value': output_value[0]},\n 'split_output_1_data': {'shape': shape_array(output_shape),\n 'value': output_value[1]},\n }\n )\n\n # check\n (flag, resp) = compare_graphs(graph, graph_ref, 'split_input_data')\n self.assertTrue(flag, resp)\n self.assertTrue(strict_compare_tensors(Node(graph, 'split_output_0_data').shape, shape_array(output_shape)))\n\n def test_split_value_infer(self):\n # test configuration\n input_shape = [2, 10]\n input_value = [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [10, 11, 12, 13, 14, 15, 16, 17, 18, 19]]\n axis = 1\n num_splits = 2\n output_shape = [2, 5]\n output_value = [[[0, 1, 2, 3, 4], [10, 11, 12, 13, 14]], [[5, 6, 7, 8, 9], [15, 16, 17, 18, 19]]]\n\n # action\n graph = build_graph(self.nodes, self.edges,\n {\n 'split_input_data': {'shape': int64_array(input_shape),\n 'value': int64_array(input_value)},\n 'split_op': {'axis': np.array(axis), 'num_splits': np.array(num_splits)},\n }\n )\n\n split_op = Node(graph, 'split_op')\n AttributedSplit.infer(split_op)\n\n # reference\n graph_ref = build_graph(self.nodes, self.edges,\n {\n 'split_input_data': {'shape': int64_array(input_shape),\n 'value': int64_array(input_value)},\n 'split_op': {'axis': np.array(axis), 'num_splits': np.array(num_splits)},\n 'split_output_0_data': {'shape': int64_array(output_shape),\n 'value': int64_array(output_value[0])},\n 'split_output_1_data': {'shape': int64_array(output_shape),\n 'value': int64_array(output_value[1])},\n }\n )\n\n # check\n (flag, resp) = compare_graphs(graph, graph_ref, 'split_input_data')\n self.assertTrue(flag, resp)\n\n\nclass TestAttributedVariadicSplitOp(unittest.TestCase):\n nodes = {\n 'input': {'kind': 'op'},\n 'split_input_data': {'kind': 'data', 'shape': None, 'value': None},\n 'split_op': {'kind': 'op', 'axis': None, 'split_lengths': None, 'op': 'AttributedVariadicSplit'},\n 'split_output_0_data': {'kind': 'data', 'shape': None, 'value': None},\n 'output_0': {'kind': 'op'},\n 'split_output_1_data': {'kind': 'data', 'shape': None, 'value': None},\n 'output_1': {'kind': 'op'},\n 'split_output_2_data': {'kind': 'data', 'shape': None, 'value': None},\n 'output_2': {'kind': 'op'},\n }\n edges = [\n ('input', 'split_input_data'),\n ('split_input_data', 'split_op'),\n ('split_op', 'split_output_0_data'),\n ('split_output_0_data', 'output_0'),\n ('split_op', 'split_output_1_data'),\n ('split_output_1_data', 'output_1'),\n ('split_op', 'split_output_2_data'),\n ('split_output_2_data', 'output_2'),\n ]\n\n def test_splitv_zero(self):\n graph = build_graph(self.nodes, self.edges,\n {\n 'split_input_data': {'shape': int64_array([2, 12, 25, 30])},\n 'split_op': {'axis': np.array(2), 'split_lengths': np.array([2, 13, 10, 0]),\n 'out_ports_count': 4},\n }\n )\n node = Node(graph, 'split_op')\n for p in range(len(node.out_edges()), node.out_ports_count):\n node.add_output_port(p)\n\n AttributedVariadicSplit.infer(node)\n\n self.assertTrue(len(node.out_edges()) == 3)\n self.assertTrue(np.all(node.split_lengths == np.array([2, 13, 10])))\n\n def test_splitv_dynamic_input(self):\n graph = build_graph(self.nodes, self.edges,\n {\n 'split_input_data': {'shape': shape_array([2, 12, dynamic_dimension_value, 30])},\n 'split_op': {'axis': np.array(2), 'split_lengths': np.array([2, 13, 10]),\n 'out_ports_count': 4},\n }\n )\n node = Node(graph, 'split_op')\n for p in range(len(node.out_edges()), node.out_ports_count):\n node.add_output_port(p)\n\n AttributedVariadicSplit.infer(node)\n\n self.assertTrue(len(node.out_edges()) == 3)\n self.assertTrue(np.all(node.split_lengths == np.array([2, 13, 10])))\n\n def test_splitv_zero_not_last(self):\n graph = build_graph(self.nodes, self.edges,\n {\n 'split_input_data': {'shape': int64_array([2, 12, 25, 30])},\n 'split_op': {'axis': np.array(2), 'split_lengths': np.array([2, 13, 0, 10]),\n 'out_ports_count': 4},\n }\n )\n node = Node(graph, 'split_op')\n\n # extractor should do it\n for p in range(len(node.out_edges()), node.out_ports_count):\n node.add_output_port(p)\n node.out_port(2).get_connection().set_source(node.out_port(3))\n\n AttributedVariadicSplit.infer(node)\n\n self.assertTrue(node.out_port(3).disconnected())\n self.assertTrue(np.all(node.split_lengths == np.array([2, 13, 10])))\n\n def test_splitv_2_zero_not_last(self):\n graph = build_graph(self.nodes, self.edges,\n {\n 'split_input_data': {'shape': int64_array([2, 12, 25, 30])},\n 'split_op': {'axis': np.array(2), 'split_lengths': np.array([2, 13, 0, 0, 10]),\n 'out_ports_count': 5},\n }\n )\n node = Node(graph, 'split_op')\n\n # extractor should do it\n for p in range(len(node.out_edges()), node.out_ports_count):\n node.add_output_port(p)\n node.out_port(2).get_connection().set_source(node.out_port(4))\n\n AttributedVariadicSplit.infer(node)\n\n self.assertTrue(node.out_port(4).disconnected())\n self.assertTrue(node.out_port(3).disconnected())\n self.assertTrue(np.all(node.split_lengths == np.array([2, 13, 10])))\n\n\n@generator\nclass TestVariadicSplitOp(unittest.TestCase):\n nodes = {\n 'input': {'kind': 'op'},\n 'split_input_data': {'kind': 'data', 'shape': None, 'value': None},\n 'split_axis': {'kind': 'op', 'op': 'Const'},\n 'split_axis_data': {'kind': 'data', 'shape': None, 'value': None},\n 'split_lengths': {'kind': 'op', 'op': 'Const'},\n 'split_lengths_data': {'kind': 'data', 'shape': None, 'value': None},\n 'split_op': {'kind': 'op', 'op': 'VariadicSplit'},\n 'split_output_0_data': {'kind': 'data', 'shape': None, 'value': None},\n 'output_0': {'kind': 'op'},\n 'split_output_1_data': {'kind': 'data', 'shape': None, 'value': None},\n 'output_1': {'kind': 'op'},\n 'split_output_2_data': {'kind': 'data', 'shape': None, 'value': None},\n 'output_2': {'kind': 'op'},\n }\n edges = [\n ('input', 'split_input_data'),\n ('split_input_data', 'split_op'),\n ('split_axis', 'split_axis_data'),\n ('split_axis_data', 'split_op'),\n ('split_lengths', 'split_lengths_data'),\n ('split_lengths_data', 'split_op'),\n ('split_op', 'split_output_0_data'),\n ('split_output_0_data', 'output_0'),\n ('split_op', 'split_output_1_data'),\n ('split_output_1_data', 'output_1'),\n ('split_op', 'split_output_2_data'),\n ('split_output_2_data', 'output_2'),\n ]\n\n @generate(*[int64_array(2),\n int64_array([2])])\n def test_variadic_split_axis(self, axis):\n lengths = int64_array([2, 13, 10])\n graph = build_graph(self.nodes, self.edges,\n {\n 'split_input_data': {'shape': int64_array([2, 12, 25, 30])},\n 'split_axis_data': {'value': axis},\n 'split_lengths_data': {'value': lengths},\n 'split_op': {'out_ports_count': 4},\n }\n )\n node = Node(graph, 'split_op')\n for p in range(len(node.out_edges()), node.out_ports_count):\n node.add_output_port(p)\n\n VariadicSplit.infer(node)\n\n ont_nodes_count = len(node.out_edges())\n self.assertTrue(ont_nodes_count == 3)\n for out in range(ont_nodes_count):\n self.assertTrue(np.all(node.out_node(out).shape == int64_array([2, 12, lengths[out], 30])))\n\n @generate(*[int64_array([[2], [2]]),\n int64_array([2, 2])])\n def test_negative_variadic_split_axis(self, axis):\n lengths = int64_array([2, 13, 10])\n graph = build_graph(self.nodes, self.edges,\n {\n 'split_input_data': {'shape': int64_array([2, 12, 25, 30])},\n 'split_axis_data': {'value': axis},\n 'split_lengths_data': {'value': lengths},\n 'split_op': {'out_ports_count': 4},\n }\n )\n node = Node(graph, 'split_op')\n for p in range(len(node.out_edges()), node.out_ports_count):\n node.add_output_port(p)\n\n try:\n VariadicSplit.infer(node)\n except AssertionError as e:\n self.assertTrue(e.args[0] == 'VariadicSplit `axis` should be scalar or tensor with shape [1], '\n 'but it`s not for node split_op')\n", "# Copyright (C) 2018-2021 Intel Corporation\n# SPDX-License-Identifier: Apache-2.0\n\nimport logging as log\n\nimport numpy as np\n\nfrom mo.graph.graph import Node, Graph\nfrom mo.middle.passes.fusing.helpers import backward_bfs, forward_bfs, get_value_in_port, \\\n get_tensor_in_port\nfrom mo.ops.const import Const\n\n\ndef _fuse_mul(graph: Graph, node: Node, fuse_nodes: list, backward: bool = True):\n \"\"\"\n This function takes Mul node and array of convolution/fc nodes for further fusion\n Parameters\n ----------\n x : bool\n If backward is False, that means that Convolution/FC goes after Mul node\n else means that Mul goes after Convolutions/FC\n :param backward:\n :param fuse_nodes:\n :param node:\n :param graph:\n \"\"\"\n is_fused = False\n const_port, tensor_port = get_value_in_port(node), get_tensor_in_port(node)\n\n if const_port is None or tensor_port is None:\n log.warning('Cannot do fuse_mul for node {} because this node has wrong inputs'.format(node.id))\n return False\n\n for fuse_node in fuse_nodes:\n if fuse_node.soft_get('can_be_fused') is False:\n log.warning('Node {} can\\'t be used in fusing because attr can_be_fused = False'.format(fuse_node.name))\n return False\n\n if len(fuse_node.in_ports()) < 2:\n log.warning('Node {} has no weights node'.format(fuse_node.name))\n return False\n\n if not backward and not fuse_node.has_valid('layout'):\n log.warning('Node {} has no layout attr'.format(fuse_node.name))\n return False\n\n weights_port = fuse_node.in_port(1)\n if not weights_port.data.has_valid('output_channel_dim') or \\\n not weights_port.data.has_valid('input_channel_dim'):\n log.warning(\n 'Cannot do fuse_mul for node {} because there is no field ' +\n 'output_channel_dim and/or input_channel_dim in weights.'\n .format(fuse_node.soft_get('name'))\n )\n return False\n\n inp_ch = weights_port.data.get_attr('input_channel_dim')\n out_ch = weights_port.data.get_attr('output_channel_dim')\n if max(inp_ch, out_ch) >= len(weights_port.data.get_shape()):\n log.warning('Node {} has wrong weights shape'.format(fuse_node.name))\n return False\n\n for fuse_node in fuse_nodes:\n weights_port = fuse_node.in_port(1)\n value = np.array(const_port.data.get_value())\n\n value = np.squeeze(value)\n\n # TODO : ch_dim should be equal to node.in_node(1).value.shape\n # We will multiply weights according output/input channel dimension\n ch_dim = weights_port.data.get_attr('output_channel_dim' if backward else 'input_channel_dim')\n shape = np.array([weights_port.data.get_shape()[ch_dim]])\n\n # Scalar broadcast\n if value.size == 1:\n value = np.full(shape, value.item())\n\n # Common broadcast for forward fusion\n if not backward:\n cnt = shape[-1] / value.shape[0]\n if fuse_node.layout == 'NCHW':\n tmp = []\n for val in value:\n tmp = np.concatenate((tmp, np.repeat(val, cnt)))\n value = np.array(tmp)\n else:\n value = np.tile(value, int(cnt))\n\n # Expand dims for multiplication (ex. [38] to [38, 1, 1])\n wdims_number = weights_port.data.get_attr('dims_number')\n for x in range(wdims_number - ch_dim - 1):\n shape = np.append(shape, 1)\n\n mul_val = np.array(value)\n # If the value fails to reshape to the provided shape, skip fusing.\n # This can happen in case of group != 1 of the convolution.\n try:\n value = np.reshape(value, shape)\n except ValueError:\n log.error(\"Cannot fuse const from {} to {}. Reshape failed. Skipping.\".format(\n node.soft_get('name', node.id),fuse_node.soft_get('name', fuse_node.id)), extra={'is_warning': True})\n return False\n\n # Weights multiplication\n mul_name = node.name + '_copy'\n mul_const = Const(graph, {'value': value, 'name': mul_name + '/const'}).create_node()\n w_mul = node.copy_node({'name': mul_name, 'in_ports_count': len(node.in_ports()),\n 'out_ports_count': len(node.out_ports()), 'can_be_fused': False})\n w_mul.in_port(const_port.idx).connect(mul_const.out_port(0))\n w_const = weights_port.get_source()\n weights_port.get_connection().set_source(w_mul.out_port(0))\n w_const.connect(w_mul.in_port(tensor_port.idx))\n\n fuse_node_in_data = fuse_node.in_node(weights_port.idx)\n w_const_out_data = w_const.node.out_node(w_const.idx)\n\n # During this reconnection new data node name is copied from the data node\n # outgoing from w_const port. Duplicate names of data nodes lead to appearing\n # of duplicate op node names after constant folding. So we should manually\n # set a unique name for the new data node.\n if fuse_node_in_data.soft_get('name') == w_const_out_data.soft_get('name') and \\\n fuse_node_in_data.soft_get('name', None) is not None:\n fuse_node.in_node(weights_port.idx)['name'] = graph.unique_id(mul_name)\n\n # If we fuse in backward direction we should multiply biases if they exists\n if backward and len(fuse_node.in_ports()) == 3 and not fuse_node.in_port(2).disconnected() and \\\n not fuse_node.has_and_set('shape_input'):\n conv_bias = fuse_node.in_port(2)\n conv_bias.data.set_value(conv_bias.data.get_value() * np.squeeze(mul_val))\n\n mul_const.infer(mul_const)\n w_mul.infer(w_mul)\n\n log.debug('Fused: {} to {}'.format(node.name, fuse_node.name))\n is_fused = True\n\n if is_fused:\n # Delete Mul node\n producer_port = tensor_port.get_source()\n tensor_port.disconnect()\n const_port.disconnect()\n # as Mul node is added before convolution, output tensor from Convolution node\n # corresponds to original Mul node\n node.out_port(0).get_connection().set_source(producer_port, \"dest\")\n\n return is_fused\n\n\ndef fuse_linear_ops(graph: Graph):\n \"\"\"\n This function makes fusing of linear operations (Mul,Add) to Convolution/FC.\n \"\"\"\n fuse_count = 0\n\n # Fusion in backward direction\n nodes = graph.pseudo_topological_sort()\n for node in nodes:\n is_fused = False\n\n # Fuse Mul to Convolution/FC\n if node.soft_get('op') == 'Mul' and get_value_in_port(node) is not None and node.has_and_set('can_be_fused'):\n fuse_nodes = backward_bfs(node, [], ['Convolution', 'Deconvolution', 'MatMul'])\n is_fused = _fuse_mul(graph, node, fuse_nodes)\n\n fuse_count += is_fused\n\n # Fusion in forward direction\n nodes = graph.pseudo_topological_sort(reverse=True)\n for node in nodes:\n is_fused = False\n\n # Fuse Mul to Convolution/FC\n if node.soft_get('op') == 'Mul' and get_value_in_port(node) is not None and node.has_and_set('can_be_fused'):\n fuse_nodes = forward_bfs(node, [], ['Convolution', 'Deconvolution', 'MatMul'])\n is_fused = _fuse_mul(graph, node, fuse_nodes, False)\n\n fuse_count += is_fused\n\n log.debug(\"Fused {} nodes\".format(fuse_count))\n" ]
[ [ "numpy.array" ], [ "numpy.array", "numpy.testing.assert_allclose" ], [ "tensorflow.constant", "tensorflow.compat.v1.global_variables_initializer", "tensorflow.compat.v1.Session", "tensorflow.compat.v1.placeholder", "tensorflow.batch_to_space", "tensorflow.compat.v1.reset_default_graph" ], [ "tensorflow.Graph", "tensorflow.import_graph_def", "tensorflow.compat.v1.train.import_meta_graph", "tensorflow.compat.v1.MetaGraphDef", "tensorflow.compat.v1.graph_util.convert_variables_to_constants", "tensorflow.DType", "numpy.ones", "tensorflow.compat.v1.Session", "tensorflow.compat.v1.GraphDef", "tensorflow.compat.v1.reset_default_graph" ], [ "numpy.testing.assert_equal", "numpy.array" ], [ "numpy.arange", "numpy.array", "numpy.ones" ], [ "numpy.power" ], [ "numpy.allclose", "numpy.transpose", "numpy.ones" ], [ "numpy.array_equal" ], [ "numpy.ma.round" ], [ "numpy.array" ], [ "numpy.array", "numpy.random.rand" ], [ "numpy.array" ], [ "numpy.reshape", "numpy.squeeze", "numpy.append", "numpy.repeat", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [ "1.10", "1.12", "1.11", "1.19", "1.13", "1.16", "1.9", "1.18", "1.20", "1.7", "1.15", "1.14", "1.17", "1.8" ], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
SyneRBI/SIRF-Contribs
[ "130223d9bc11991eadcd11f9b715aea34c4842fd" ]
[ "src/Python/sirf/contrib/kcl/Prior.py" ]
[ "\r\n\r\nimport numpy as np\r\n\r\n\r\nclass Prior(object):\r\n \r\n def __init__(self,imageSize, sWindowSize=3, imageCropFactor=[0]):\r\n \r\n self.imageSize = imageSize if len(imageSize)==3 else imageSize.append(1)\r\n self.imageCropFactor = imageCropFactor\r\n if np.mod(sWindowSize,2):\r\n self.sWindowSize = sWindowSize\r\n else:\r\n raise ValueError(\"search window size must be odd\")\r\n self.is3D = 1 if imageSize[2]>1 else 0 \r\n self.nS = sWindowSize**3 if self.is3D else sWindowSize**2\r\n _,self.imageSizeCrop= self.imCrop() \r\n self.SearchWindow, self.Wd = self.__neighborhood(self.sWindowSize)\r\n\r\n def __neighborhood(self,w):\r\n \r\n n = self.imageSizeCrop[0]\r\n m = self.imageSizeCrop[1]\r\n h = self.imageSizeCrop[2]\r\n wlen = 2*np.floor(w/2)\r\n widx = xidx = yidx = np.arange(-wlen/2,wlen/2+1)\r\n\r\n if h==1:\r\n zidx = [0]\r\n nN = w*w\r\n else:\r\n zidx = widx\r\n nN = w*w*w\r\n \r\n Y,X,Z = np.meshgrid(np.arange(0,m), np.arange(0,n), np.arange(0,h)) \r\n N = np.zeros([n*m*h, nN],dtype='int32')\r\n D = np.zeros([n*m*h, nN],dtype='float')\r\n l = 0\r\n for x in xidx:\r\n Xnew = self.__setBoundary(X + x, n)\r\n for y in yidx:\r\n Ynew = self.__setBoundary(Y + y, m)\r\n for z in zidx:\r\n Znew = self.__setBoundary(Z + z, h)\r\n N[:,l] = (Xnew + (Ynew)*n + (Znew)*n*m).reshape(-1,1).flatten('F')\r\n D[:,l] = np.sqrt(x**2+y**2+z**2)\r\n l += 1\r\n D = 1/D\r\n D[np.isinf(D)]= 0\r\n D = D/np.sum(D,axis=1).reshape(-1,1)\r\n return N, D\r\n \r\n def __setBoundary(self,X,n):\r\n idx = X<0\r\n X[idx] = X[idx]+n\r\n idx = X>n-1\r\n X[idx] = X[idx]-n\r\n return X.flatten('F')\r\n\r\n def imCrop(self,img=None):\r\n if np.any(self.imageCropFactor):\r\n if len(self.imageCropFactor)==1:\r\n self.imageCropFactor = self.imageCropFactor*3\r\n I = 0\r\n if self.imageCropFactor[0]:\r\n self.imageCropFactor[0] = np.max([2.5, self.imageCropFactor[0]])\r\n I = np.floor(self.imageSize[0]/self.imageCropFactor[0]).astype('int')\r\n J = 0\r\n if self.imageCropFactor[1]:\r\n self.imageCropFactor[1] = np.max([2.5, self.imageCropFactor[1]])\r\n J = np.floor(self.imageSize[1]/self.imageCropFactor[1]).astype('int')\r\n K = 0\r\n if self.imageCropFactor[2] and self.is3D:\r\n self.imageCropFactor[2] = np.max([2.5, self.imageCropFactor[2]])\r\n K = np.floor(self.imageSize[2]/self.imageCropFactor[2]).astype('int') \r\n imageSizeCrop = [np.arange(I,self.imageSize[0]-I).shape[0],\r\n np.arange(J,self.imageSize[1]-J).shape[0],\r\n np.arange(K,self.imageSize[2]-K).shape[0]]\r\n if img is not None:\r\n if self.is3D:\r\n img = img[I:self.imageSize[0]-I, J:self.imageSize[1]-J, K:self.imageSize[2]-K] \r\n else:\r\n img = img[I:self.imageSize[0]-I, J:self.imageSize[1]-J] \r\n else:\r\n imageSizeCrop = self.imageSize\r\n return img,imageSizeCrop \r\n\r\n def imCropUndo(self,img):\r\n if np.any(self.imageCropFactor):\r\n tmp = img\r\n img = np.zeros(self.imageSize,tmp.dtype)\r\n I = (self.imageSize[0] - self.imageSizeCrop[0])//2\r\n J = (self.imageSize[1] - self.imageSizeCrop[1])//2\r\n K = (self.imageSize[2] - self.imageSizeCrop[2])//2\r\n if self.is3D:\r\n img[I:self.imageSize[0]-I, J:self.imageSize[1]-J, K:self.imageSize[2]-K] = tmp \r\n else:\r\n img[I:self.imageSize[0]-I, J:self.imageSize[1]-J] = tmp \r\n return img\r\n \r\n def Grad(self,img):\r\n img,_ = self.imCrop(img)\r\n img = img.flatten('F')\r\n imgGrad = img[self.SearchWindow] - img.reshape(-1,1)\r\n imgGrad[np.isnan(imgGrad)] = 0\r\n return imgGrad\r\n \r\n def GradT(self,imgGrad):\r\n dP = -2*np.sum(self.Wd*imgGrad,axis=1)\r\n dP = dP.reshape(self.imageSizeCrop,order='F')\r\n dP = self.imCropUndo(dP)\r\n dP[np.isnan(dP)] = 0\r\n return dP\r\n \r\n def Div(self,img):\r\n img,_ = self.imCrop(img)\r\n img = img.flatten('F')\r\n imgDiv = img[self.SearchWindow] + img.reshape(-1,1)\r\n imgDiv[np.isnan(imgDiv)] = 0\r\n return imgDiv\r\n \r\n def gaussianWeights(self,img,sigma):\r\n return 1/np.sqrt(2*np.pi*sigma**2)*np.exp(-0.5*self.Grad(img)**2/sigma**2)\r\n \r\n def BowshserWeights(self,img,b):\r\n if b>self.nS:\r\n raise ValueError(\"Number of most similar voxels must be smaller than number of voxels per neighbourhood\")\r\n imgGradAbs = np.abs(self.Grad(img))\r\n Wb = 0*imgGradAbs\r\n for i in range(imgGradAbs.shape[0]):\r\n idx = np.argsort(imgGradAbs[i,:])\r\n Wb[i,idx[0:b]]=1\r\n return Wb\r\n \r\n \r\n \r\n \r\n \r\n \r\n " ]
[ [ "numpy.isinf", "numpy.sqrt", "numpy.isnan", "numpy.arange", "numpy.max", "numpy.any", "numpy.floor", "numpy.mod", "numpy.argsort", "numpy.zeros", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jimmy-academia/Deeper-Learnings
[ "ac363efe5450dd2751c0c1bea0ee7af457f7ac24" ]
[ "codestosort/NaturalLanguage/module/bestmodel.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\nclass BestNet(torch.nn.Module):\n def __init__(self, embedding_dim):\n super(BestNet, self).__init__()\n\n self.embedding_dim = embedding_dim\n self.hidden_dim = 256\n self.embedding_dropout=0.6\n self.desc_rnn_size = 100\n\n self.rnn = nn.GRU(\n input_size=self.embedding_dim, hidden_size=self.hidden_dim,\n num_layers=1, batch_first=True, bidirectional=True\n )\n\n self.rnn_desc = nn.GRU(\n input_size=self.embedding_dim, hidden_size=self.desc_rnn_size,\n num_layers=1, batch_first=True, bidirectional=True\n )\n\n self.emb_drop = nn.Dropout(self.embedding_dropout)\n self.M = nn.Parameter(torch.FloatTensor(2*self.hidden_dim, 2*self.hidden_dim))\n self.b = nn.Parameter(torch.FloatTensor([0]))\n self.Wc = nn.Parameter(torch.FloatTensor(2*self.hidden_dim, self.embedding_dim))\n self.We = nn.Parameter(torch.FloatTensor(self.embedding_dim, self.embedding_dim))\n self.attn = nn.Linear(2*self.hidden_dim, 2*self.hidden_dim)\n self.init_params_()\n self.tech_w = 0.0\n\n def init_params_(self):\n #Initializing parameters\n nn.init.xavier_normal_(self.M)\n\n # Set forget gate bias to 2\n size = self.rnn.bias_hh_l0.size(0)\n self.rnn.bias_hh_l0.data[size//4:size//2] = 2\n\n size = self.rnn.bias_ih_l0.size(0)\n self.rnn.bias_ih_l0.data[size//4:size//2] = 2\n\n size = self.rnn_desc.bias_hh_l0.size(0)\n self.rnn_desc.bias_hh_l0.data[size//4:size//2] = 2\n\n size = self.rnn_desc.bias_ih_l0.size(0)\n self.rnn_desc.bias_ih_l0.data[size//4:size//2] = 2\n\n # def forward(self, context, options):\n # logits = []\n # for i, option in enumerate(options.transpose(1, 0)):\n # gits = []\n # for context in context.transpose(1,0):\n # git = self.forward_one_option(context, option)\n # gits.append(logit)\n # logit = torch.stack(gits).mean(0)\n # logits = torch.stack(logits, 1)\n\n # return logits.squeeze()\n\n # def forward(self, context, options):\n # logits = []\n # for i, option in enumerate(options.transpose(1, 0)):\n # logit = self.forward_one_option(context, option)\n # logits.append(logit)\n # logits = torch.stack(logits, 1)\n\n # return logits.squeeze()\n def forward(self, context, options):\n logits = []\n for i, option in enumerate(options.transpose(1, 0)):\n logit_ = []\n for utter in context.transpose(1,0):\n logit = self.forward_one_option(utter, option) # 10,1,1\n logit_.append(logit)\n logits.append(torch.stack(logit_,1).mean(1))\n logits = torch.stack(logits, 1)\n\n return logits.squeeze()\n\n def forward_one_option(self, context, option):\n context, c_h, option, o_h = self.forward_crosspath(context, option)\n context_attn = self.forward_attn(context, o_h)\n option_attn = self.forward_attn(option, c_h)\n final = self.forward_fc(context_attn, option_attn)\n return final\n\n def forward_crosspath(self, context, option):\n context, c_h = self.rnn(self.emb_drop(context))\n c_h = torch.cat([i for i in c_h], dim=-1)\n option, o_h = self.rnn(self.emb_drop(option))\n o_h = torch.cat([i for i in o_h], dim=-1)\n return context, c_h.squeeze(), option, o_h.squeeze()\n\n def forward_attn(self, output, hidden):\n max_len = output.size(1)\n b_size = output.size(0)\n\n hidden = hidden.squeeze(0).unsqueeze(2)\n attn = self.attn(output.contiguous().view(b_size*max_len, -1))\n attn = attn.view(b_size, max_len, -1)\n attn_energies = (attn.bmm(hidden).transpose(1,2))\n alpha = F.softmax(attn_energies.squeeze(1), dim=-1)\n alpha = alpha.unsqueeze(1)\n weighted_attn = alpha.bmm(output)\n\n return weighted_attn.squeeze()\n\n def forward_fc(self, context, option):\n out = torch.mm(context, self.M).unsqueeze(1)\n out = torch.bmm(out, option.unsqueeze(2))\n out = out + self.b\n return out\n\n def save(self, filepath):\n torch.save(self.state_dict(), filepath)\n\n" ]
[ [ "torch.nn.Dropout", "torch.mm", "torch.cat", "torch.nn.GRU", "torch.nn.init.xavier_normal_", "torch.nn.Linear", "torch.FloatTensor", "torch.stack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ameisner/legacypipe
[ "5ffe6fb2458618b68653580badc4a94e1ecb4f04" ]
[ "py/legacypipe/unwise.py" ]
[ "import os\nimport numpy as np\nimport fitsio\nfrom astrometry.util.fits import fits_table\nfrom astrometry.util.ttime import Time\n\nfrom wise.unwise import get_unwise_tractor_image\n\nimport logging\nlogger = logging.getLogger('legacypipe.unwise')\ndef info(*args):\n from legacypipe.utils import log_info\n log_info(logger, args)\ndef debug(*args):\n from legacypipe.utils import log_debug\n log_debug(logger, args)\n\n'''\nThis function was imported whole from the tractor repo:\nwise/forcedphot.py because I figured we were doing enough\nLegacySurvey-specific stuff in it that it was time to just import it\nand edit it rather than build elaborate options.\n'''\ndef unwise_forcedphot(cat, tiles, band=1, roiradecbox=None,\n use_ceres=True, ceres_block=8,\n save_fits=False, get_models=False, ps=None,\n psf_broadening=None,\n pixelized_psf=False,\n get_masks=None,\n move_crpix=False,\n modelsky_dir=None):\n '''\n Given a list of tractor sources *cat*\n and a list of unWISE tiles *tiles* (a fits_table with RA,Dec,coadd_id)\n runs forced photometry, returning a FITS table the same length as *cat*.\n\n *get_masks*: the WCS to resample mask bits into.\n '''\n from tractor import PointSource, Tractor, ExpGalaxy, DevGalaxy\n from tractor.sersic import SersicGalaxy\n\n if not pixelized_psf and psf_broadening is None:\n # PSF broadening in post-reactivation data, by band.\n # Newer version from Aaron's email to decam-chatter, 2018-06-14.\n broadening = { 1: 1.0405, 2: 1.0346, 3: None, 4: None }\n psf_broadening = broadening[band]\n\n if False:\n from astrometry.util.plotutils import PlotSequence\n ps = PlotSequence('wise-forced-w%i' % band)\n plots = (ps is not None)\n if plots:\n import pylab as plt\n\n wantims = (plots or save_fits or get_models)\n wanyband = 'w'\n if get_models:\n models = []\n\n wband = 'w%i' % band\n\n Nsrcs = len(cat)\n phot = fits_table()\n # Filled in based on unique tile overlap\n phot.wise_coadd_id = np.array([' '] * Nsrcs, dtype='U8')\n phot.wise_x = np.zeros(Nsrcs, np.float32)\n phot.wise_y = np.zeros(Nsrcs, np.float32)\n phot.set('psfdepth_%s' % wband, np.zeros(Nsrcs, np.float32))\n nexp = np.zeros(Nsrcs, np.int16)\n mjd = np.zeros(Nsrcs, np.float64)\n central_flux = np.zeros(Nsrcs, np.float32)\n\n ra = np.array([src.getPosition().ra for src in cat])\n dec = np.array([src.getPosition().dec for src in cat])\n\n fskeys = ['prochi2', 'profracflux']\n fitstats = {}\n\n if get_masks:\n mh,mw = get_masks.shape\n maskmap = np.zeros((mh,mw), np.uint32)\n\n tims = []\n for tile in tiles:\n info('Reading WISE tile', tile.coadd_id, 'band', band)\n tim = get_unwise_tractor_image(tile.unwise_dir, tile.coadd_id, band,\n bandname=wanyband, roiradecbox=roiradecbox)\n if tim is None:\n debug('Actually, no overlap with WISE coadd tile', tile.coadd_id)\n continue\n\n if plots:\n sig1 = tim.sig1\n plt.clf()\n plt.imshow(tim.getImage(), interpolation='nearest', origin='lower',\n cmap='gray', vmin=-3 * sig1, vmax=10 * sig1)\n plt.colorbar()\n tag = '%s W%i' % (tile.coadd_id, band)\n plt.title('%s: tim data' % tag)\n ps.savefig()\n plt.clf()\n plt.hist((tim.getImage() * tim.inverr)[tim.inverr > 0].ravel(),\n range=(-5,10), bins=100)\n plt.xlabel('Per-pixel intensity (Sigma)')\n plt.title(tag)\n ps.savefig()\n\n if move_crpix and band in [1, 2]:\n realwcs = tim.wcs.wcs\n x,y = realwcs.crpix\n tile_crpix = tile.get('crpix_w%i' % band)\n dx = tile_crpix[0] - 1024.5\n dy = tile_crpix[1] - 1024.5\n realwcs.set_crpix(x+dx, y+dy)\n debug('unWISE', tile.coadd_id, 'band', band, 'CRPIX', x,y,\n 'shift by', dx,dy, 'to', realwcs.crpix)\n\n if modelsky_dir and band in [1, 2]:\n fn = os.path.join(modelsky_dir, '%s.%i.mod.fits' % (tile.coadd_id, band))\n if not os.path.exists(fn):\n raise RuntimeError('WARNING: does not exist:', fn)\n x0,x1,y0,y1 = tim.roi\n bg = fitsio.FITS(fn)[2][y0:y1, x0:x1]\n assert(bg.shape == tim.shape)\n\n if plots:\n plt.clf()\n plt.subplot(1,2,1)\n plt.imshow(tim.getImage(), interpolation='nearest', origin='lower',\n cmap='gray', vmin=-3 * sig1, vmax=5 * sig1)\n plt.subplot(1,2,2)\n plt.imshow(bg, interpolation='nearest', origin='lower',\n cmap='gray', vmin=-3 * sig1, vmax=5 * sig1)\n tag = '%s W%i' % (tile.coadd_id, band)\n plt.suptitle(tag)\n ps.savefig()\n plt.clf()\n ha = dict(range=(-5,10), bins=100, histtype='step')\n plt.hist((tim.getImage() * tim.inverr)[tim.inverr > 0].ravel(),\n color='b', label='Original', **ha)\n plt.hist(((tim.getImage()-bg) * tim.inverr)[tim.inverr > 0].ravel(),\n color='g', label='Minus Background', **ha)\n plt.axvline(0, color='k', alpha=0.5)\n plt.xlabel('Per-pixel intensity (Sigma)')\n plt.legend()\n plt.title(tag + ': background')\n ps.savefig()\n\n # Actually subtract the background!\n tim.data -= bg\n\n # Floor the per-pixel variances,\n # and add Poisson contribution from sources\n if band in [1,2]:\n # in Vega nanomaggies per pixel\n floor_sigma = {1: 0.5, 2: 2.0}\n poissons = {1: 0.15, 2: 0.3}\n with np.errstate(divide='ignore'):\n new_ie = 1. / np.sqrt(\n (1./tim.inverr)**2 +\n floor_sigma[band]**2 +\n poissons[band]**2 * np.maximum(0., tim.data))\n new_ie[tim.inverr == 0] = 0.\n\n if plots:\n plt.clf()\n plt.plot((1. / tim.inverr[tim.inverr>0]).ravel(),\n (1./new_ie[tim.inverr>0]).ravel(), 'b.')\n plt.title('unWISE per-pixel error: %s band %i' %\n (tile.coadd_id, band))\n plt.xlabel('original')\n plt.ylabel('floored')\n ps.savefig()\n\n assert(np.all(np.isfinite(new_ie)))\n assert(np.all(new_ie >= 0.))\n tim.inverr = new_ie\n\n # Expand a 3-pixel radius around weight=0 (saturated) pixels\n # from Eddie via crowdsource\n # https://github.com/schlafly/crowdsource/blob/7069da3e7d9d3124be1cbbe1d21ffeb63fc36dcc/python/wise_proc.py#L74\n ## FIXME -- W3/W4 ??\n satlimit = 85000\n msat = ((tim.data > satlimit) | ((tim.nims == 0) & (tim.nuims > 1)))\n from scipy.ndimage.morphology import binary_dilation\n xx, yy = np.mgrid[-3:3+1, -3:3+1]\n dilate = xx**2+yy**2 <= 3**2\n msat = binary_dilation(msat, dilate)\n nbefore = np.sum(tim.inverr == 0)\n tim.inverr[msat] = 0\n nafter = np.sum(tim.inverr == 0)\n debug('Masking an additional', (nafter-nbefore), 'near-saturated pixels in unWISE',\n tile.coadd_id, 'band', band)\n\n # Read mask file?\n if get_masks:\n from astrometry.util.resample import resample_with_wcs, OverlapError\n # unwise_dir can be a colon-separated list of paths\n tilemask = None\n for d in tile.unwise_dir.split(':'):\n fn = os.path.join(d, tile.coadd_id[:3], tile.coadd_id,\n 'unwise-%s-msk.fits.gz' % tile.coadd_id)\n if os.path.exists(fn):\n debug('Reading unWISE mask file', fn)\n x0,x1,y0,y1 = tim.roi\n tilemask = fitsio.FITS(fn)[0][y0:y1,x0:x1]\n break\n if tilemask is None:\n info('unWISE mask file for tile', tile.coadd_id, 'does not exist')\n else:\n try:\n tanwcs = tim.wcs.wcs\n assert(tanwcs.shape == tilemask.shape)\n Yo,Xo,Yi,Xi,_ = resample_with_wcs(get_masks, tanwcs,\n intType=np.int16)\n # Only deal with mask pixels that are set.\n I, = np.nonzero(tilemask[Yi,Xi] > 0)\n # Trim to unique area for this tile\n rr,dd = get_masks.pixelxy2radec(Xo[I]+1, Yo[I]+1)\n good = radec_in_unique_area(rr, dd, tile.ra1, tile.ra2,\n tile.dec1, tile.dec2)\n I = I[good]\n maskmap[Yo[I],Xo[I]] = tilemask[Yi[I], Xi[I]]\n except OverlapError:\n # Shouldn't happen by this point\n print('Warning: no overlap between WISE tile', tile.coadd_id, 'and brick')\n\n if plots:\n plt.clf()\n plt.imshow(tilemask, interpolation='nearest', origin='lower')\n plt.title('Tile %s: mask' % tile.coadd_id)\n ps.savefig()\n plt.clf()\n plt.imshow(maskmap, interpolation='nearest', origin='lower')\n plt.title('Tile %s: accumulated maskmap' % tile.coadd_id)\n ps.savefig()\n\n # The tiles have some overlap, so zero out pixels outside the\n # tile's unique area.\n th,tw = tim.shape\n xx,yy = np.meshgrid(np.arange(tw), np.arange(th))\n rr,dd = tim.wcs.wcs.pixelxy2radec(xx+1, yy+1)\n unique = radec_in_unique_area(rr, dd, tile.ra1, tile.ra2,\n tile.dec1, tile.dec2)\n debug('Tile', tile.coadd_id, '- total of', np.sum(unique),\n 'unique pixels out of', len(unique.flat), 'total pixels')\n if get_models:\n # Save the inverr before blanking out non-unique pixels, for making coadds with no gaps!\n # (actually, slightly more subtly, expand unique area by 1 pixel)\n from scipy.ndimage.morphology import binary_dilation\n du = binary_dilation(unique)\n tim.coadd_inverr = tim.inverr * du\n tim.inverr[unique == False] = 0.\n del xx,yy,rr,dd,unique\n\n if plots:\n sig1 = tim.sig1\n plt.clf()\n plt.imshow(tim.getImage() * (tim.inverr > 0),\n interpolation='nearest', origin='lower',\n cmap='gray', vmin=-3 * sig1, vmax=10 * sig1)\n plt.colorbar()\n tag = '%s W%i' % (tile.coadd_id, band)\n plt.title('%s: tim data (unique)' % tag)\n ps.savefig()\n\n if pixelized_psf:\n from unwise_psf import unwise_psf\n if (band == 1) or (band == 2):\n # we only have updated PSFs for W1 and W2\n psfimg = unwise_psf.get_unwise_psf(band, tile.coadd_id,\n modelname='neo6_unwisecat')\n else:\n psfimg = unwise_psf.get_unwise_psf(band, tile.coadd_id)\n\n if band == 4:\n # oversample (the unwise_psf models are at native W4 5.5\"/pix,\n # while the unWISE coadds are made at 2.75\"/pix.\n ph,pw = psfimg.shape\n subpsf = np.zeros((ph*2-1, pw*2-1), np.float32)\n from astrometry.util.util import lanczos3_interpolate\n xx,yy = np.meshgrid(np.arange(0., pw-0.51, 0.5, dtype=np.float32),\n np.arange(0., ph-0.51, 0.5, dtype=np.float32))\n xx = xx.ravel()\n yy = yy.ravel()\n ix = xx.astype(np.int32)\n iy = yy.astype(np.int32)\n dx = (xx - ix).astype(np.float32)\n dy = (yy - iy).astype(np.float32)\n psfimg = psfimg.astype(np.float32)\n rtn = lanczos3_interpolate(ix, iy, dx, dy, [subpsf.flat], [psfimg])\n\n if plots:\n plt.clf()\n plt.imshow(psfimg, interpolation='nearest', origin='lower')\n plt.title('Original PSF model')\n ps.savefig()\n plt.clf()\n plt.imshow(subpsf, interpolation='nearest', origin='lower')\n plt.title('Subsampled PSF model')\n ps.savefig()\n\n psfimg = subpsf\n del xx, yy, ix, iy, dx, dy\n\n from tractor.psf import PixelizedPSF\n psfimg /= psfimg.sum()\n fluxrescales = {1: 1.04, 2: 1.005, 3: 1.0, 4: 1.0}\n psfimg *= fluxrescales[band]\n tim.psf = PixelizedPSF(psfimg)\n\n if psf_broadening is not None and not pixelized_psf:\n # psf_broadening is a factor by which the PSF FWHMs\n # should be scaled; the PSF is a little wider\n # post-reactivation.\n psf = tim.getPsf()\n from tractor import GaussianMixturePSF\n if isinstance(psf, GaussianMixturePSF):\n debug('Broadening PSF: from', psf)\n p0 = psf.getParams()\n pnames = psf.getParamNames()\n p1 = [p * psf_broadening**2 if 'var' in name else p\n for (p, name) in zip(p0, pnames)]\n psf.setParams(p1)\n debug('Broadened PSF:', psf)\n else:\n print('WARNING: cannot apply psf_broadening to WISE PSF of type', type(psf))\n\n wcs = tim.wcs.wcs\n _,fx,fy = wcs.radec2pixelxy(ra, dec)\n x = np.round(fx - 1.).astype(int)\n y = np.round(fy - 1.).astype(int)\n good = (x >= 0) * (x < tw) * (y >= 0) * (y < th)\n # Which sources are in this brick's unique area?\n usrc = radec_in_unique_area(ra, dec, tile.ra1, tile.ra2, tile.dec1, tile.dec2)\n I, = np.nonzero(good * usrc)\n\n nexp[I] = tim.nuims[y[I], x[I]]\n if hasattr(tim, 'mjdmin') and hasattr(tim, 'mjdmax'):\n mjd[I] = (tim.mjdmin + tim.mjdmax) / 2.\n phot.wise_coadd_id[I] = tile.coadd_id\n phot.wise_x[I] = fx[I] - 1.\n phot.wise_y[I] = fy[I] - 1.\n\n central_flux[I] = tim.getImage()[y[I], x[I]]\n del x,y,good,usrc\n\n # PSF norm for depth\n psf = tim.getPsf()\n h,w = tim.shape\n patch = psf.getPointSourcePatch(h//2, w//2).patch\n psfnorm = np.sqrt(np.sum(patch**2))\n # To handle zero-depth, we return 1/nanomaggies^2 units rather than mags.\n # In the small empty patches of the sky (eg W4 in 0922p702), we get sig1 = NaN\n if np.isfinite(tim.sig1):\n phot.get('psfdepth_%s' % wband)[I] = 1. / (tim.sig1 / psfnorm)**2\n\n tim.tile = tile\n tims.append(tim)\n\n if plots:\n plt.clf()\n mn,mx = 0.1, 20000\n plt.hist(np.log10(np.clip(central_flux, mn, mx)), bins=100,\n range=(np.log10(mn), np.log10(mx)))\n logt = np.arange(0, 5)\n plt.xticks(logt, ['%i' % i for i in 10.**logt])\n plt.title('Central fluxes (W%i)' % band)\n plt.axvline(np.log10(20000), color='k')\n plt.axvline(np.log10(1000), color='k')\n ps.savefig()\n\n # Eddie's non-secret recipe:\n #- central pixel <= 1000: 19x19 pix box size\n #- central pixel in 1000 - 20000: 59x59 box size\n #- central pixel > 20000 or saturated: 149x149 box size\n #- object near \"bright star\": 299x299 box size\n nbig = nmedium = nsmall = 0\n for src,cflux in zip(cat, central_flux):\n if cflux > 20000:\n R = 100\n nbig += 1\n elif cflux > 1000:\n R = 30\n nmedium += 1\n else:\n R = 15\n nsmall += 1\n if isinstance(src, PointSource):\n src.fixedRadius = R\n else:\n ### FIXME -- sizes for galaxies..... can we set PSF size separately?\n galrad = 0\n # RexGalaxy is a subclass of ExpGalaxy\n if isinstance(src, (ExpGalaxy, DevGalaxy, SersicGalaxy)):\n galrad = src.shape.re\n pixscale = 2.75\n src.halfsize = int(np.hypot(R, galrad * 5 / pixscale))\n debug('Set WISE source sizes:', nbig, 'big', nmedium, 'medium', nsmall, 'small')\n\n tractor = Tractor(tims, cat)\n if use_ceres:\n from tractor.ceres_optimizer import CeresOptimizer\n tractor.optimizer = CeresOptimizer(BW=ceres_block, BH=ceres_block)\n tractor.freezeParamsRecursive('*')\n tractor.thawPathsTo(wanyband)\n\n t0 = Time()\n R = tractor.optimize_forced_photometry(\n fitstats=True, variance=True, shared_params=False, wantims=wantims)\n info('unWISE forced photometry took', Time() - t0)\n\n if use_ceres:\n term = R.ceres_status['termination']\n # Running out of memory can cause failure to converge and term\n # status = 2. Fail completely in this case.\n if term != 0:\n info('Ceres termination status:', term)\n raise RuntimeError('Ceres terminated with status %i' % term)\n\n if wantims:\n ims1 = R.ims1\n # can happen if empty source list (we still want to generate coadds)\n if ims1 is None:\n ims1 = R.ims0\n\n flux_invvars = R.IV\n if R.fitstats is not None:\n for k in fskeys:\n x = getattr(R.fitstats, k)\n fitstats[k] = np.array(x).astype(np.float32)\n\n if save_fits:\n for i,tim in enumerate(tims):\n tile = tim.tile\n (dat, mod, _, chi, _) = ims1[i]\n wcshdr = fitsio.FITSHDR()\n tim.wcs.wcs.add_to_header(wcshdr)\n tag = 'fit-%s-w%i' % (tile.coadd_id, band)\n fitsio.write('%s-data.fits' %\n tag, dat, clobber=True, header=wcshdr)\n fitsio.write('%s-mod.fits' % tag, mod,\n clobber=True, header=wcshdr)\n fitsio.write('%s-chi.fits' % tag, chi,\n clobber=True, header=wcshdr)\n\n if plots:\n # Create models for just the brightest sources\n bright_cat = [src for src in cat\n if src.getBrightness().getBand(wanyband) > 1000]\n debug('Bright soures:', len(bright_cat))\n btr = Tractor(tims, bright_cat)\n for tim in tims:\n mod = btr.getModelImage(tim)\n tile = tim.tile\n tag = '%s W%i' % (tile.coadd_id, band)\n sig1 = tim.sig1\n plt.clf()\n plt.imshow(mod, interpolation='nearest', origin='lower',\n cmap='gray', vmin=-3 * sig1, vmax=25 * sig1)\n plt.colorbar()\n plt.title('%s: bright-star models' % tag)\n ps.savefig()\n\n if get_models:\n for i,tim in enumerate(tims):\n tile = tim.tile\n (dat, mod, _, _, _) = ims1[i]\n models.append((tile.coadd_id, band, tim.wcs.wcs, dat, mod,\n tim.coadd_inverr))\n\n if plots:\n for i,tim in enumerate(tims):\n tile = tim.tile\n tag = '%s W%i' % (tile.coadd_id, band)\n (dat, mod, _, chi, _) = ims1[i]\n sig1 = tim.sig1\n plt.clf()\n plt.imshow(dat, interpolation='nearest', origin='lower',\n cmap='gray', vmin=-3 * sig1, vmax=25 * sig1)\n plt.colorbar()\n plt.title('%s: data' % tag)\n ps.savefig()\n plt.clf()\n plt.imshow(mod, interpolation='nearest', origin='lower',\n cmap='gray', vmin=-3 * sig1, vmax=25 * sig1)\n plt.colorbar()\n plt.title('%s: model' % tag)\n ps.savefig()\n\n plt.clf()\n plt.imshow(chi, interpolation='nearest', origin='lower',\n cmap='gray', vmin=-5, vmax=+5)\n plt.colorbar()\n plt.title('%s: chi' % tag)\n ps.savefig()\n\n nm = np.array([src.getBrightness().getBand(wanyband) for src in cat])\n nm_ivar = flux_invvars\n # Sources out of bounds, eg, never change from their initial\n # fluxes. Zero them out instead.\n nm[nm_ivar == 0] = 0.\n\n phot.set('flux_%s' % wband, nm.astype(np.float32))\n phot.set('flux_ivar_%s' % wband, nm_ivar.astype(np.float32))\n for k in fskeys:\n phot.set(k + '_' + wband, fitstats.get(k, np.zeros(len(phot), np.float32)))\n phot.set('nobs_%s' % wband, nexp)\n phot.set('mjd_%s' % wband, mjd)\n\n rtn = wphotduck()\n rtn.phot = phot\n rtn.models = None\n rtn.maskmap = None\n if get_models:\n rtn.models = models\n if get_masks:\n rtn.maskmap = maskmap\n return rtn\n\nclass wphotduck(object):\n pass\n\ndef radec_in_unique_area(rr, dd, ra1, ra2, dec1, dec2):\n '''Are the given points within the given RA,Dec rectangle?\n\n Returns a boolean array.'''\n unique = (dd >= dec1) * (dd < dec2)\n if ra1 < ra2:\n # normal RA\n unique *= (rr >= ra1) * (rr < ra2)\n else:\n # RA wrap-around\n unique[rr > 180] *= (rr[rr > 180] >= ra1)\n unique[rr < 180] *= (rr[rr < 180] < ra2)\n return unique\n\ndef unwise_phot(X):\n '''\n This is the entry-point from runbrick.py, called via mp.map()\n '''\n (key, (wcat, tiles, band, roiradec, wise_ceres, pixelized_psf, get_mods, get_masks, ps,\n move_crpix, modelsky_dir)) = X\n kwargs = dict(roiradecbox=roiradec, band=band, pixelized_psf=pixelized_psf,\n get_masks=get_masks, ps=ps, move_crpix=move_crpix,\n modelsky_dir=modelsky_dir)\n if get_mods:\n kwargs.update(get_models=get_mods)\n\n if wise_ceres and len(wcat) == 0:\n wise_ceres = False\n\n # DEBUG\n #kwargs.update(save_fits=True)\n W = None\n try:\n W = unwise_forcedphot(wcat, tiles, use_ceres=wise_ceres, **kwargs)\n except:\n import traceback\n print('unwise_forcedphot failed:')\n traceback.print_exc()\n if wise_ceres:\n print('Trying without Ceres...')\n try:\n W = unwise_forcedphot(wcat, tiles, use_ceres=False, **kwargs)\n except:\n print('unwise_forcedphot failed (2):')\n traceback.print_exc()\n return key,W\n\ndef collapse_unwise_bitmask(bitmask, band):\n '''\n Converts WISE mask bits (in the unWISE data products) into the\n more compact codes reported in the tractor files as\n WISEMASK_W[12], and the \"maskbits\" WISE extensions.\n\n output bits :\n # 2^0 = bright star core and wings\n # 2^1 = PSF-based diffraction spike\n # 2^2 = optical ghost\n # 2^3 = first latent\n # 2^4 = second latent\n # 2^5 = AllWISE-like circular halo\n # 2^6 = bright star saturation\n # 2^7 = geometric diffraction spike\n '''\n assert((band == 1) or (band == 2))\n from collections import OrderedDict\n\n bits_w1 = OrderedDict([('core_wings', 2**0 + 2**1),\n ('psf_spike', 2**27),\n ('ghost', 2**25 + 2**26),\n ('first_latent', 2**13 + 2**14),\n ('second_latent', 2**17 + 2**18),\n ('circular_halo', 2**23),\n ('saturation', 2**4),\n ('geom_spike', 2**29)])\n\n bits_w2 = OrderedDict([('core_wings', 2**2 + 2**3),\n ('psf_spike', 2**28),\n ('ghost', 2**11 + 2**12),\n ('first_latent', 2**15 + 2**16),\n ('second_latent', 2**19 + 2**20),\n ('circular_halo', 2**24),\n ('saturation', 2**5),\n ('geom_spike', 2**30)])\n\n bits = (bits_w1 if (band == 1) else bits_w2)\n\n # hack to handle both scalar and array inputs\n result = 0*bitmask\n for i, feat in enumerate(bits.keys()):\n result += ((2**i)*(np.bitwise_and(bitmask, bits[feat]) != 0)).astype(np.uint8)\n return result.astype('uint8')\n\n###\n# This is taken directly from tractor/wise.py, replacing only the filename.\n###\ndef unwise_tiles_touching_wcs(wcs, polygons=True):\n '''\n Returns a FITS table (with RA,Dec,coadd_id) of unWISE tiles\n '''\n from astrometry.util.miscutils import polygons_intersect\n from astrometry.util.starutil_numpy import degrees_between\n\n from pkg_resources import resource_filename\n atlasfn = resource_filename('legacypipe', 'data/wise-tiles.fits')\n\n T = fits_table(atlasfn)\n trad = wcs.radius()\n wrad = np.sqrt(2.) / 2. * 2048 * 2.75 / 3600.\n rad = trad + wrad\n r, d = wcs.radec_center()\n I, = np.nonzero(np.abs(T.dec - d) < rad)\n I = I[degrees_between(T.ra[I], T.dec[I], r, d) < rad]\n\n if not polygons:\n return T[I]\n # now check actual polygon intersection\n tw, th = wcs.imagew, wcs.imageh\n targetpoly = [(0.5, 0.5), (tw + 0.5, 0.5),\n (tw + 0.5, th + 0.5), (0.5, th + 0.5)]\n cd = wcs.get_cd()\n tdet = cd[0] * cd[3] - cd[1] * cd[2]\n if tdet > 0:\n targetpoly = list(reversed(targetpoly))\n targetpoly = np.array(targetpoly)\n keep = []\n for i in I:\n wwcs = unwise_tile_wcs(T.ra[i], T.dec[i])\n cd = wwcs.get_cd()\n wdet = cd[0] * cd[3] - cd[1] * cd[2]\n H, W = wwcs.shape\n poly = []\n for x, y in [(0.5, 0.5), (W + 0.5, 0.5), (W + 0.5, H + 0.5), (0.5, H + 0.5)]:\n rr,dd = wwcs.pixelxy2radec(x, y)\n _,xx,yy = wcs.radec2pixelxy(rr, dd)\n poly.append((xx, yy))\n if wdet > 0:\n poly = list(reversed(poly))\n poly = np.array(poly)\n if polygons_intersect(targetpoly, poly):\n keep.append(i)\n I = np.array(keep)\n return T[I]\n\n### Also direct from tractor/wise.py\ndef unwise_tile_wcs(ra, dec, W=2048, H=2048, pixscale=2.75):\n from astrometry.util.util import Tan\n '''\n Returns a Tan WCS object at the given RA,Dec center, axis aligned, with the\n given pixel W,H and pixel scale in arcsec/pixel.\n '''\n cowcs = Tan(ra, dec, (W + 1) / 2., (H + 1) / 2.,\n -pixscale / 3600., 0., 0., pixscale / 3600., W, H)\n return cowcs\n" ]
[ [ "scipy.ndimage.morphology.binary_dilation", "numpy.maximum", "numpy.abs", "numpy.isfinite", "numpy.nonzero", "numpy.clip", "numpy.arange", "numpy.sqrt", "numpy.all", "numpy.round", "numpy.bitwise_and", "numpy.log10", "numpy.errstate", "numpy.array", "numpy.zeros", "numpy.sum", "numpy.hypot" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "0.15", "1.4", "0.16", "1.0", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "0.10", "0.17", "1.3" ], "tensorflow": [] } ]
GT-SALT/Disfluency-Generation-and-Detection
[ "72126172b466aa74277f3cf0f73b915e5dbeefbb", "72126172b466aa74277f3cf0f73b915e5dbeefbb" ]
[ "disf_gen_coarse2fine/table/Loss.py", "disf_gen_coarse2fine/evaluate.py" ]
[ "\"\"\"\nThis file handles the details of the loss function during training.\n\nThis includes: LossComputeBase and the standard NMTLossCompute, and\n sharded loss compute stuff.\n\"\"\"\nfrom __future__ import division\nfrom itertools import count\nimport torch\nimport torch.nn as nn\nimport random as rnd\n\nimport table\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\nclass CopyGeneratorLoss(nn.Module):\n \"\"\"Copy generator criterion.\"\"\"\n def __init__(self, vocab_size, force_copy, only_disf_loss, unk_index=0,\n ignore_index=-100, eps=1e-20):\n super(CopyGeneratorLoss, self).__init__()\n self.force_copy = force_copy\n self.eps = eps\n self.vocab_size = vocab_size\n self.ignore_index = ignore_index\n self.unk_index = unk_index\n self.only_disf_loss=only_disf_loss\n\n def forward(self, scores, tgt):\n \"\"\"\n Args:\n scores (FloatTensor): ``(batch_size*tgt_len)`` x dynamic vocab size\n whose sum along dim 1 is less than or equal to 1, i.e. cols\n softmaxed.\n tgt tuple (target, align)\n align (LongTensor): ``(tgt_len, batch_size)``\n target (LongTensor): ``(tgt_len, batch_size)``\n tgt_loss_mask (LongTensor): ``(tgt_len, batch_size)``\n \"\"\"\n # probabilities assigned by the model to the gold targets\n align=tgt[1]\n target=tgt[0]\n tgt_loss_mask=tgt[2]\n #print(scores, target)\n #print(scores.size(), target.size())\n target = target.view(-1)\n align = align.view(-1)\n tgt_loss_mask = tgt_loss_mask.view(-1)\n\n\n\n vocab_probs = scores.gather(1, target.unsqueeze(1)).squeeze(1)\n\n # probability of tokens copied from source\n copy_ix = align.unsqueeze(1) + self.vocab_size\n copy_tok_probs = scores.gather(1, copy_ix).squeeze(1) # Set scores for unk to 0 and add eps\n copy_tok_probs[align == self.unk_index] = 0\n copy_tok_probs += self.eps # to avoid -inf logs\n\n # find the indices in which you do not use the copy mechanism\n non_copy = align == self.unk_index\n if not self.force_copy:\n non_copy = non_copy | (target != self.unk_index)\n\n probs = torch.where(\n non_copy, copy_tok_probs + vocab_probs, copy_tok_probs\n )\n\n loss = - probs.log() # just NLLLoss; can the module be incorporated?\n\n # Drop padding.\n if self.only_disf_loss:\n loss[tgt_loss_mask == 1] = 0\n else:\n loss[tgt == self.ignore_index] = 0\n\n '''if self.normalize_by_length:\n # Compute Loss as NLL divided by seq length\n tgt_lens = batch.tgt[:, :, 0].ne(self.padding_idx).sum(0).float()\n # Compute Total Loss per sequence in batch\n loss = loss.view(-1, batch.batch_size).sum(0)\n # Divide by length of each sequence and sum\n loss = torch.div(loss, tgt_lens).sum()\n else:'''\n loss = loss.sum()\n return loss\n\nclass LossCompute(nn.Module):\n def __init__(self, vocab, opt, fields,unk_index=0,\n ignore_index=-100,smooth_eps=0):\n super(LossCompute, self).__init__()\n self.criterion = {}\n self.label_weights=torch.ones(len(fields['src_label'].vocab),dtype=torch.float,requires_grad=False,device=device)\n self.label_weights[fields['src_label'].vocab.stoi[table.IO.BOD_LABEL]]=opt.disf_label_weight\n self.label_weights[fields['src_label'].vocab.stoi[table.IO.UNK_WORD]] = 0\n self.label_weights[fields['src_label'].vocab.stoi[table.IO.PAD_WORD]] = 0\n self.criterion['lay'] = nn.NLLLoss( weight=self.label_weights,\n reduction='sum', ignore_index=ignore_index)\n if opt.no_attention:\n self.criterion['tgt'] = nn.NLLLoss(\n reduction='sum', ignore_index=ignore_index)\n else:\n if opt.no_copy:\n self.criterion['tgt'] = nn.NLLLoss(\n reduction='sum', ignore_index=ignore_index)\n else:\n self.criterion['tgt'] = CopyGeneratorLoss(len(vocab),\n opt.copy_attn_force, opt.only_disf_loss, unk_index=unk_index,\n ignore_index=ignore_index)\n\n def compute_loss(self, pred, gold):\n loss_list = []\n for loss_name in ('lay', 'tgt'):\n if loss_name not in gold:\n continue\n '''print(loss_name)\n print(pred[loss_name].size())\n print(gold[loss_name].size())'''\n loss = self.criterion[loss_name](pred[loss_name], gold[loss_name])\n loss_list.append(loss)\n # sum up the loss functions\n return loss_list, self.label_weights[gold['lay']].sum()#sum(loss_list)\n\nclass SegLossCompute(nn.Module):\n def __init__(self, vocab, opt, fields,unk_index=0,\n ignore_index=-100,smooth_eps=0):\n super(SegLossCompute, self).__init__()\n self.criterion= nn.NLLLoss(\n reduction='sum', ignore_index=ignore_index)\n\n def compute_loss(self, pred, gold):\n loss = self.criterion(pred, gold)\n\n return loss\n", "from __future__ import division\nimport os\nimport argparse\nimport torch\nimport codecs\nimport glob\nfrom nltk.translate.bleu_score import corpus_bleu\n\nimport table\nimport table.IO\nimport opts\n\nparser = argparse.ArgumentParser(description='evaluate.py')\nopts.translate_opts(parser)\nopt = parser.parse_args()\n#torch.cuda.set_device(opt.gpu)\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\nopt.dataset = opt.dataset + opt.tag_type\nopt.anno = os.path.join(opt.root_dir, opt.dataset, '{}.txt'.format(opt.split))\n\nif opt.beam_size > 0:\n opt.batch_size = 1\n\nprint('trans_opt:',vars(opt))\n\ndef main():\n dummy_parser = argparse.ArgumentParser(description='train.py')\n opts.model_opts(dummy_parser)\n opts.train_opts(dummy_parser)\n dummy_opt = dummy_parser.parse_known_args([])[0]\n\n js_list = table.IO.read_anno(opt.anno, opt)\n print('data_len',len(js_list))\n\n metric_name_list = ['lay','tgt']\n with open(os.path.join(opt.root_dir, opt.dataset, opt.output_file), 'a', encoding='utf-8') as writer:\n #writer.write('{}\\n'.format(vars(translator.model_opt)))\n writer.write('trans_opt: {}\\n'.format(vars(opt)))\n prev_best = (None, None, None, None)\n for fn_model in glob.glob(opt.model_path):\n opt.model = fn_model\n print(fn_model)\n #print(opt.anno)\n\n translator = table.Translator(opt, dummy_opt.__dict__)\n data = table.IO.TableDataset(\n js_list, translator.fields, translator.model_opt, test=True)\n\n\n test_data = table.IO.OrderedIterator(\n dataset=data, device=device, batch_size=opt.batch_size, train=False, sort=True, sort_within_batch=False)\n\n # inference\n r_list = []\n with torch.no_grad():\n for batch in test_data:\n r = translator.translate(batch,js_list)\n r_list += r\n\n r_list.sort(key=lambda x: x.idx)\n assert len(r_list) == len(js_list), 'len(r_list) != len(js_list): {} != {}'.format(\n len(r_list), len(js_list))\n\n # evaluation\n for pred, gold in zip(r_list, js_list):\n pred.eval(gold, gold_diversity=opt.gold_diversity if 'gold_diversity' in opt.__dict__ else False)\n with open(os.path.join(opt.root_dir, opt.dataset, opt.output_file), 'a', encoding='utf-8') as writer:\n writer.write('{}\\n'.format(fn_model))\n print('Results:\\n')\n for metric_name in metric_name_list:\n c_correct = sum((x.correct[metric_name] for x in r_list))\n acc = c_correct / len(r_list)\n print('{}: {} / {} = {:.2%}\\n'.format(metric_name,\n c_correct, len(r_list), acc))\n writer.write('{}: {} / {} = {:.2%}\\n'.format(metric_name,\n c_correct, len(r_list), acc))\n if metric_name == 'tgt' and (prev_best[0] is None or acc > prev_best[1]):\n prev_best = (fn_model, acc, r_list, translator.model_opt)\n print('disf_less_than_one:', sum((x.disflen_lessthanone for x in r_list)))\n onegrams=[]\n twograms=[]\n for x in r_list:\n onegrams.extend(x.one_grams)\n twograms.extend(x.two_grams)\n c_correct = sum(onegrams)\n acc = c_correct / len(onegrams)\n print('{}: {} / {} = {:.2%}'.format('onegram',\n c_correct, len(onegrams), acc))\n writer.write('{}: {} / {} = {:.2%}\\n'.format('onegram',\n c_correct, len(onegrams), acc))\n c_correct = sum(twograms)\n acc = c_correct / len(twograms)\n print('{}: {} / {} = {:.2%}'.format('twogram',\n c_correct, len(twograms), acc))\n writer.write('{}: {} / {} = {:.2%}\\n'.format('twogram',\n c_correct, len(twograms), acc))\n references=[]\n candidates=[]\n for x in r_list:\n references.append([x.gold_tgt])\n candidates.append(x.tgt)\n blue_score = corpus_bleu(references, candidates)\n print('BLUE:',blue_score)\n writer.write('{} = {:.5}\\n'.format('BLUE', blue_score))\n\n if (prev_best[0] is not None):\n print(\"Writing to \"+os.path.join(opt.root_dir, opt.dataset, opt.output_file))\n disf_generated=0\n with open(os.path.join(opt.root_dir, opt.dataset, opt.output_file+'_generated'), 'w', encoding='utf-8') as writer:\n for x in prev_best[2]:\n if 'I' in x.tgt_tags:\n disf_generated+=1\n assert (len(x.tgt) == len(x.tgt_tags))\n writer.write('\\t'.join(x.tgt) + '\\n')\n writer.write('\\t'.join(['P'] * len(x.tgt_tags)) + '\\n')\n writer.write('\\t'.join(x.tgt_tags) + '\\n')\n writer.write('\\n')\n with open(os.path.join(opt.root_dir, opt.dataset, opt.output_file+'_out'), 'w', encoding='utf-8') as writer:\n for x in prev_best[2]:\n writer.write(' '.join(x.tgt) + '\\n')\n print('Disf_sents/Total_sents',disf_generated,len(prev_best[2]))\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "torch.nn.NLLLoss", "torch.cuda.is_available", "torch.where" ], [ "torch.no_grad", "torch.cuda.is_available" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
vrsub/openconcept
[ "459aa24269cf54122ee4cfb3edf173c79c880be9", "459aa24269cf54122ee4cfb3edf173c79c880be9", "459aa24269cf54122ee4cfb3edf173c79c880be9" ]
[ "openconcept/components/splitter.py", "openconcept/utilities/tests/test_selector.py", "examples/B738_aerostructural.py" ]
[ "from __future__ import division\nimport numpy as np\nfrom openmdao.api import ExplicitComponent\nfrom openmdao.api import Group\n\n\nclass PowerSplit(ExplicitComponent):\n \"\"\"\n A power split mechanism for mechanical or electrical power.\n\n Inputs\n ------\n power_in : float\n Power fed to the splitter. (vector, W)\n power_rating : float\n Maximum rated power of the split mechanism. (scalar, W)\n power_split_fraction:\n If ``'rule'`` is set to ``'fraction'``, sets percentage of input power directed\n to Output A (minus losses). (vector, dimensionless)\n power_split_amount:\n If ``'rule'`` is set to ``'fixed'``, sets amount of input power to Output A (minus\n losses). (vector, W)\n\n Outputs\n -------\n power_out_A : float\n Power sent to first output (vector, W)\n power_out_B : float\n Power sent to second output (vector, W)\n heat_out : float\n Waste heat produced (vector, W)\n component_cost : float\n Nonrecurring cost of the component (scalar, USD)\n component_weight : float\n Weight of the component (scalar, kg)\n component_sizing_margin : float\n Equal to 1 when fed full rated power (vector, dimensionless)\n\n Options\n -------\n num_nodes : int\n Number of analysis points to run (sets vec length; default 1)\n rule : str\n Power split control rule to use; either ``'fixed'`` where a set\n amount of power is sent to Output A or ``'fraction'`` where a\n fraction of the total power is sent to Output A\n efficiency : float\n Component efficiency (default 1)\n weight_inc : float\n Weight per unit rated power\n (default 0, kg/W)\n weight_base : float\n Base weight\n (default 0, kg)\n cost_inc : float\n Nonrecurring cost per unit power\n (default 0, USD/W)\n cost_base : float\n Base cost\n (default 0 USD)\n \"\"\"\n def initialize(self):\n # define control rules\n self.options.declare('num_nodes', default=1, desc='Number of flight/control conditions')\n self.options.declare('rule', default='fraction',\n desc='Control strategy - fraction or fixed power')\n\n self.options.declare('efficiency', default=1., desc='Efficiency (dimensionless)')\n self.options.declare('weight_inc', default=0., desc='kg per input watt')\n self.options.declare('weight_base', default=0., desc='kg base weight')\n self.options.declare('cost_inc', default=0., desc='$ cost per input watt')\n self.options.declare('cost_base', default=0., desc='$ cost base')\n\n def setup(self):\n nn = self.options['num_nodes']\n self.add_input('power_in', units='W',\n desc='Input shaft power or incoming electrical load', shape=(nn,))\n self.add_input('power_rating', val=99999999, units='W', desc='Split mechanism power rating')\n\n rule = self.options['rule']\n if rule == 'fraction':\n self.add_input('power_split_fraction', val=0.5,\n desc='Fraction of power to output A', shape=(nn,))\n elif rule == 'fixed':\n self.add_input('power_split_amount', units='W',\n desc='Raw amount of power to output A', shape=(nn,))\n else:\n msg = 'Specify either \"fraction\" or \"fixed\" as power split control rule'\n raise ValueError(msg)\n\n eta = self.options['efficiency']\n weight_inc = self.options['weight_inc']\n weight_base = self.options['weight_base']\n cost_inc = self.options['cost_inc']\n cost_base = self.options['cost_base']\n\n self.add_output('power_out_A', units='W', desc='Output power or load to A', shape=(nn,))\n self.add_output('power_out_B', units='W', desc='Output power or load to B', shape=(nn,))\n self.add_output('heat_out', units='W', desc='Waste heat out', shape=(nn,))\n self.add_output('component_cost', units='USD', desc='Splitter component cost')\n self.add_output('component_weight', units='kg', desc='Splitter component weight')\n self.add_output('component_sizing_margin', desc='Fraction of rated power', shape=(nn,))\n\n if rule == 'fraction':\n self.declare_partials(['power_out_A', 'power_out_B'],\n ['power_in', 'power_split_fraction'],\n rows=range(nn), cols=range(nn))\n elif rule == 'fixed':\n self.declare_partials(['power_out_A', 'power_out_B'],\n ['power_in', 'power_split_amount'],\n rows=range(nn), cols=range(nn))\n self.declare_partials('heat_out', 'power_in', val=(1 - eta) * np.ones(nn),\n rows=range(nn), cols=range(nn))\n self.declare_partials('component_cost', 'power_rating', val=cost_inc)\n self.declare_partials('component_weight', 'power_rating', val=weight_inc)\n self.declare_partials('component_sizing_margin', 'power_in',\n rows=range(nn), cols=range(nn))\n self.declare_partials('component_sizing_margin', 'power_rating')\n\n def compute(self, inputs, outputs):\n nn = self.options['num_nodes']\n rule = self.options['rule']\n eta = self.options['efficiency']\n weight_inc = self.options['weight_inc']\n weight_base = self.options['weight_base']\n cost_inc = self.options['cost_inc']\n cost_base = self.options['cost_base']\n\n if rule == 'fraction':\n outputs['power_out_A'] = inputs['power_in'] * inputs['power_split_fraction'] * eta\n outputs['power_out_B'] = inputs['power_in'] * (1 - inputs['power_split_fraction']) * eta\n elif rule == 'fixed':\n # check to make sure enough power is available\n # if inputs['power_in'] < inputs['power_split_amount']:\n not_enough_idx = np.where(inputs['power_in'] < inputs['power_split_amount'])\n po_A = np.zeros(nn)\n po_B = np.zeros(nn)\n po_A[not_enough_idx] = inputs['power_in'][not_enough_idx] * eta\n po_B[not_enough_idx] = np.zeros(nn)[not_enough_idx]\n # else:\n enough_idx = np.where(inputs['power_in'] >= inputs['power_split_amount'])\n po_A[enough_idx] = inputs['power_split_amount'][enough_idx] * eta\n po_B[enough_idx] = (inputs['power_in'][enough_idx] -\n inputs['power_split_amount'][enough_idx]) * eta\n outputs['power_out_A'] = po_A\n outputs['power_out_B'] = po_B\n outputs['heat_out'] = inputs['power_in'] * (1 - eta)\n outputs['component_cost'] = inputs['power_rating'] * cost_inc + cost_base\n outputs['component_weight'] = inputs['power_rating'] * weight_inc + weight_base\n outputs['component_sizing_margin'] = inputs['power_in'] / inputs['power_rating']\n\n def compute_partials(self, inputs, J):\n nn = self.options['num_nodes']\n rule = self.options['rule']\n eta = self.options['efficiency']\n if rule == 'fraction':\n J['power_out_A', 'power_in'] = inputs['power_split_fraction'] * eta\n J['power_out_A', 'power_split_fraction'] = inputs['power_in'] * eta\n J['power_out_B', 'power_in'] = (1 - inputs['power_split_fraction']) * eta\n J['power_out_B', 'power_split_fraction'] = -inputs['power_in'] * eta\n elif rule == 'fixed':\n not_enough_idx = np.where(inputs['power_in'] < inputs['power_split_amount'])\n enough_idx = np.where(inputs['power_in'] >= inputs['power_split_amount'])\n # if inputs['power_in'] < inputs['power_split_amount']:\n Jpo_A_pi = np.zeros(nn)\n Jpo_A_ps = np.zeros(nn)\n Jpo_B_pi = np.zeros(nn)\n Jpo_B_ps = np.zeros(nn)\n Jpo_A_pi[not_enough_idx] = eta * np.ones(nn)[not_enough_idx]\n Jpo_A_ps[not_enough_idx] = np.zeros(nn)[not_enough_idx]\n Jpo_B_pi[not_enough_idx] = np.zeros(nn)[not_enough_idx]\n Jpo_B_ps[not_enough_idx] = np.zeros(nn)[not_enough_idx]\n # else:\n Jpo_A_ps[enough_idx] = eta * np.ones(nn)[enough_idx]\n Jpo_A_pi[enough_idx] = np.zeros(nn)[enough_idx]\n Jpo_B_ps[enough_idx] = -eta * np.ones(nn)[enough_idx]\n Jpo_B_pi[enough_idx] = eta * np.ones(nn)[enough_idx]\n J['power_out_A', 'power_in'] = Jpo_A_pi\n J['power_out_A', 'power_split_amount'] = Jpo_A_ps\n J['power_out_B', 'power_in'] = Jpo_B_pi\n J['power_out_B', 'power_split_amount'] = Jpo_B_ps\n J['component_sizing_margin', 'power_in'] = 1 / inputs['power_rating']\n J['component_sizing_margin', 'power_rating'] = - (inputs['power_in'] /\n inputs['power_rating'] ** 2)\n\n\nclass FlowSplit(ExplicitComponent):\n \"\"\"\n Split incoming flow from one inlet into two outlets at a fractional ratio.\n\n Inputs\n ------\n mdot_in : float\n Mass flow rate of incoming fluid (vector, kg/s)\n mdot_split_fraction : float\n Fraction of incoming mass flow directed to output A, must be in\n range 0-1 inclusive (vector, dimensionless)\n \n Outputs\n -------\n mdot_out_A : float\n Mass flow rate directed to first output (vector, kg/s)\n mdot_out_B : float\n Mass flow rate directed to second output (vector, kg/s)\n \n Options\n -------\n num_nodes : int\n Number of analysis points to run (sets vec length; default 1)\n \"\"\"\n def initialize(self):\n self.options.declare('num_nodes', default=1, desc='Number of analysis points')\n \n def setup(self):\n nn = self.options['num_nodes']\n rng = np.arange(0, nn)\n\n self.add_input('mdot_in', units='kg/s', shape=(nn,))\n self.add_input('mdot_split_fraction', units=None, shape=(nn,), val=0.5)\n\n self.add_output('mdot_out_A', units='kg/s', shape=(nn,))\n self.add_output('mdot_out_B', units='kg/s', shape=(nn,))\n\n self.declare_partials(['mdot_out_A'], ['mdot_in', 'mdot_split_fraction'], rows=rng, cols=rng)\n self.declare_partials(['mdot_out_B'], ['mdot_in', 'mdot_split_fraction'], rows=rng, cols=rng)\n \n def compute(self, inputs, outputs):\n if np.any(inputs['mdot_split_fraction'] < 0) or np.any(inputs['mdot_split_fraction'] > 1):\n raise RuntimeWarning(f\"mdot_split_fraction of {inputs['mdot_split_fraction']} has at least one element out of range [0, 1]\")\n outputs['mdot_out_A'] = inputs['mdot_in'] * inputs['mdot_split_fraction']\n outputs['mdot_out_B'] = inputs['mdot_in'] * (1 - inputs['mdot_split_fraction'])\n\n def compute_partials(self, inputs, J):\n J['mdot_out_A', 'mdot_in'] = inputs['mdot_split_fraction']\n J['mdot_out_A', 'mdot_split_fraction'] = inputs['mdot_in']\n\n J['mdot_out_B', 'mdot_in'] = 1 - inputs['mdot_split_fraction']\n J['mdot_out_B', 'mdot_split_fraction'] = - inputs['mdot_in']\n\n\nclass FlowCombine(ExplicitComponent):\n \"\"\"\n Combines two incoming flows into a single outgoing flow and does a weighted average\n of their temperatures based on the mass flow rate of each to compute the outlet temp.\n\n Inputs\n ------\n mdot_in_A : float\n Mass flow rate of fluid from first inlet, should be nonegative (vector, kg/s)\n mdot_in_B : float\n Mass flow rate of fluid from second inlet, should be nonnegative (vector, kg/s)\n T_in_A : float\n Temperature of fluid from first inlet (vector, K)\n T_in_B : float\n Temperature of fluid from second inlet (vector, K)\n\n Outputs\n -------\n mdot_out : float\n Outgoing fluid mass flow rate (vector, kg/s)\n T_out : float\n Outgoing fluid temperature (vector, K)\n\n Options\n -------\n num_nodes : int\n Number of analysis points (scalar, default 1)\n \"\"\"\n def initialize(self):\n self.options.declare('num_nodes', default=1, desc='Number of analysis points')\n \n def setup(self):\n nn = self.options['num_nodes']\n rng = np.arange(0, nn)\n\n self.add_input('mdot_in_A', units='kg/s', shape=(nn,))\n self.add_input('mdot_in_B', units='kg/s', shape=(nn,))\n self.add_input('T_in_A', units='K', shape=(nn,))\n self.add_input('T_in_B', units='K', shape=(nn,))\n\n self.add_output('mdot_out', units='kg/s', shape=(nn,))\n self.add_output('T_out', units='K', shape=(nn,))\n\n self.declare_partials(['mdot_out'], ['mdot_in_A', 'mdot_in_B'], rows=rng, cols=rng)\n self.declare_partials(['T_out'], ['mdot_in_A', 'mdot_in_B', 'T_in_A', 'T_in_B'], rows=rng, cols=rng)\n \n def compute(self, inputs, outputs):\n mdot_A = inputs['mdot_in_A']\n mdot_B = inputs['mdot_in_B']\n outputs['mdot_out'] = mdot_A + mdot_B\n # Weighted average of temperatures for output temperature\n outputs['T_out'] = (mdot_A * inputs['T_in_A'] + mdot_B * inputs['T_in_B']) / (mdot_A + mdot_B)\n\n def compute_partials(self, inputs, J):\n nn = self.options['num_nodes']\n J['mdot_out', 'mdot_in_A'] = np.ones((nn,))\n J['mdot_out', 'mdot_in_B'] = np.ones((nn,))\n\n mdot_A = inputs['mdot_in_A']\n mdot_B = inputs['mdot_in_B']\n mdot = mdot_A + mdot_B\n T_A = inputs['T_in_A']\n T_B = inputs['T_in_B']\n J['T_out', 'mdot_in_A'] = (mdot * T_A - mdot_A * T_A - mdot_B * T_B) / (mdot**2)\n J['T_out', 'mdot_in_B'] = (mdot * T_B - mdot_A * T_A - mdot_B * T_B) / (mdot**2)\n J['T_out', 'T_in_A'] = mdot_A / mdot\n J['T_out', 'T_in_B'] = mdot_B / mdot", "from __future__ import division\nimport unittest\nimport numpy as np\nfrom openmdao.utils.assert_utils import assert_near_equal, assert_check_partials\nfrom openmdao.api import Problem\nfrom openconcept.utilities.selector import SelectorComp\n\nclass SelectorCompTestCase(unittest.TestCase):\n \"\"\"\n Test the SelectorComp component\n \"\"\"\n def test_zero_inputs(self):\n p = Problem()\n p.model.add_subsystem('select', SelectorComp(input_names=[]), promotes=['*'])\n with self.assertRaises(ValueError):\n p.setup()\n\n def test_one_input(self):\n p = Problem()\n p.model.add_subsystem('select', SelectorComp(input_names=['A']), promotes=['*'])\n p.setup(check=True, force_alloc_complex=True)\n p.set_val('A', np.array([5.7]))\n p.set_val('selector', np.array([0]))\n p.run_model()\n assert_near_equal(p['result'], np.array([5.7]))\n\n partials = p.check_partials(method='cs',compact_print=True)\n assert_check_partials(partials)\n\n p.set_val('selector', np.array([1]))\n with self.assertRaises(RuntimeWarning):\n p.run_model()\n \n def test_two_inputs(self):\n nn = 5\n p = Problem()\n p.model.add_subsystem('select', SelectorComp(num_nodes=nn, input_names=['A', 'B']), promotes=['*'])\n p.setup(check=True, force_alloc_complex=True)\n p.set_val('A', np.array([5.7, 2.3, -10., 42., 77.]))\n p.set_val('B', np.array([-1., -1., -1., -1., -2.]))\n p.set_val('selector', np.array([0, 1, 1, 0, 1]))\n p.run_model()\n assert_near_equal(p['result'], np.array([5.7, -1., -1., 42., -2.]))\n \n partials = p.check_partials(method='cs',compact_print=True)\n assert_check_partials(partials)\n\n p.set_val('A', np.ones(nn))\n p.set_val('B', np.zeros(nn))\n p.set_val('selector', np.zeros(nn))\n p.run_model()\n assert_near_equal(p['result'], np.ones(nn))\n\n p.set_val('selector', np.array([0, 1, -1, 0, 0]))\n with self.assertRaises(RuntimeWarning):\n p.run_model()\n \n p.set_val('selector', np.array([0, 1, 2, 0, 0]))\n with self.assertRaises(RuntimeWarning):\n p.run_model()\n \n def test_three_inputs(self):\n nn = 5\n p = Problem()\n p.model.add_subsystem('selector', SelectorComp(num_nodes=nn, input_names=['A', 'B', 'C'], units='g'),\n promotes=['*'])\n p.setup(check=True, force_alloc_complex=True)\n p.set_val('A', np.array([5.7, 2.3, -10., 2., 77.]), units='g')\n p.set_val('B', np.array([-1., -1., -1., -1., -2.]), units='kg')\n p.set_val('C', 42.*np.ones(nn), units='g')\n p.set_val('selector', np.array([0, 1, 2, 0, 2]))\n p.run_model()\n assert_near_equal(p['result'], np.array([5.7, -1000., 42., 2., 42.]))\n \n partials = p.check_partials(method='cs',compact_print=True)\n assert_check_partials(partials)\n\n p.set_val('A', 5.*np.ones(nn), units='g')\n p.set_val('B', 6.*np.ones(nn), units='g')\n p.set_val('C', 7.*np.ones(nn), units='g')\n p.set_val('selector', np.zeros(nn))\n p.run_model()\n assert_near_equal(p['result'], 5.*np.ones(nn))\n \n p.set_val('selector', np.ones(nn))\n p.run_model()\n assert_near_equal(p['result'], 6.*np.ones(nn))\n\n p.set_val('selector', 2.*np.ones(nn))\n p.run_model()\n assert_near_equal(p['result'], 7.*np.ones(nn))\n\n p.set_val('selector', np.array([-1, 1, 0, 2, 0]))\n with self.assertRaises(RuntimeWarning):\n p.run_model()\n \n p.set_val('selector', np.array([0, 1, -1, 2, 3]))\n with self.assertRaises(RuntimeWarning):\n p.run_model()", "\"\"\"\nThis work was the basis of the following paper.\nPlease cite it if you use this for your own publication!\n\n@InProceedings{Adler2022a,\n author = {Eytan J. Adler and Joaquim R. R. A. Martins},\n title = {Aerostructural wing design optimization considering full mission analysis},\n booktitle = {AIAA SciTech Forum},\n doi = {10.2514/6.2022-0382},\n month = {January},\n year = {2022}\n}\n\nEytan Adler (Jan 2022)\n\"\"\"\n\nfrom __future__ import division\nimport sys\nimport os\nimport warnings\nimport numpy as np\n\nsys.path.insert(0, os.getcwd())\nimport openmdao.api as om\nimport openconcept.api as oc\n# imports for the airplane model itself\nfrom openconcept.analysis.openaerostruct.aerostructural import OASAerostructDragPolar, OASAerostructDragPolarExact\nfrom examples.aircraft_data.B738 import data as acdata\nfrom openconcept.analysis.performance.mission_profiles import BasicMission\nfrom openconcept.components.cfm56 import CFM56\nfrom openconcept.analysis.openaerostruct.aerostructural import Aerostruct\nfrom openconcept.analysis.aerodynamics import Lift\nfrom openconcept.analysis.atmospherics.dynamic_pressure_comp import DynamicPressureComp\n\nNUM_X = 5\nNUM_Y = 15\nNUM_TWIST = 3\nNUM_TOVERC = 3\nNUM_SKIN = 3\nNUM_SPAR = 3\nUSE_SURROGATE = True\n\nclass B738AirplaneModel(oc.IntegratorGroup):\n \"\"\"\n A custom model specific to the Boeing 737-800 airplane.\n This class will be passed in to the mission analysis code.\n\n \"\"\"\n def initialize(self):\n self.options.declare('num_nodes', default=1)\n self.options.declare('flight_phase', default=None)\n\n def setup(self):\n nn = self.options['num_nodes']\n flight_phase = self.options['flight_phase']\n\n\n # a propulsion system needs to be defined in order to provide thrust\n # information for the mission analysis code\n propulsion_promotes_inputs = [\"fltcond|*\", \"throttle\"]\n\n self.add_subsystem('propmodel', CFM56(num_nodes=nn, plot=False),\n promotes_inputs=propulsion_promotes_inputs)\n\n doubler = om.ExecComp(['thrust=2*thrust_in', 'fuel_flow=2*fuel_flow_in'], \n thrust_in={'val': 1.0*np.ones((nn,)),\n 'units': 'kN'},\n thrust={'val': 1.0*np.ones((nn,)),\n 'units': 'kN'},\n fuel_flow={'val': 1.0*np.ones((nn,)),\n 'units': 'kg/s',\n 'tags': ['integrate', 'state_name:fuel_used', 'state_units:kg', 'state_val:1.0', 'state_promotes:True']},\n fuel_flow_in={'val': 1.0*np.ones((nn,)),\n 'units': 'kg/s'})\n \n self.add_subsystem('doubler', doubler, promotes_outputs=['*'])\n self.connect('propmodel.thrust', 'doubler.thrust_in')\n self.connect('propmodel.fuel_flow', 'doubler.fuel_flow_in')\n\n oas_surf_dict = {} # options for OpenAeroStruct\n # Grid size and number of spline control points (must be same as B738AnalysisGroup)\n global NUM_X, NUM_Y, NUM_TWIST, NUM_TOVERC, NUM_SKIN, NUM_SPAR, USE_SURROGATE\n if USE_SURROGATE:\n self.add_subsystem('drag', OASAerostructDragPolar(num_nodes=nn, num_x=NUM_X, num_y=NUM_Y,\n num_twist=NUM_TWIST, num_toverc=NUM_TOVERC,\n num_skin=NUM_SKIN, num_spar=NUM_SPAR,\n surf_options=oas_surf_dict),\n promotes_inputs=['fltcond|CL', 'fltcond|M', 'fltcond|h', 'fltcond|q', 'ac|geom|wing|S_ref',\n 'ac|geom|wing|AR', 'ac|geom|wing|taper', 'ac|geom|wing|c4sweep',\n 'ac|geom|wing|twist', 'ac|geom|wing|toverc',\n 'ac|geom|wing|skin_thickness', 'ac|geom|wing|spar_thickness',\n 'ac|aero|CD_nonwing'],\n promotes_outputs=['drag', 'ac|weights|W_wing', ('failure', 'ac|struct|failure')])\n else:\n self.add_subsystem('drag', OASAerostructDragPolarExact(num_nodes=nn, num_x=NUM_X, num_y=NUM_Y,\n num_twist=NUM_TWIST, num_toverc=NUM_TOVERC,\n num_skin=NUM_SKIN, num_spar=NUM_SPAR,\n surf_options=oas_surf_dict),\n promotes_inputs=['fltcond|CL', 'fltcond|M', 'fltcond|h', 'fltcond|q', 'ac|geom|wing|S_ref',\n 'ac|geom|wing|AR', 'ac|geom|wing|taper', 'ac|geom|wing|c4sweep',\n 'ac|geom|wing|twist', 'ac|geom|wing|toverc',\n 'ac|geom|wing|skin_thickness', 'ac|geom|wing|spar_thickness',\n 'ac|aero|CD_nonwing'],\n promotes_outputs=['drag', 'ac|weights|W_wing', ('failure', 'ac|struct|failure')])\n\n # generally the weights module will be custom to each airplane\n passthru = om.ExecComp('OEW=x',\n x={'val': 1.0,\n 'units': 'kg'},\n OEW={'val': 1.0,\n 'units': 'kg'})\n self.add_subsystem('OEW', passthru,\n promotes_inputs=[('x', 'ac|weights|OEW')],\n promotes_outputs=['OEW'])\n\n # Use Raymer as estimate for 737 original wing weight, subtract it\n # out, then add in OpenAeroStruct wing weight estimate\n self.add_subsystem('weight', oc.AddSubtractComp(output_name='weight',\n input_names=['ac|weights|MTOW', 'fuel_used',\n 'ac|weights|orig_W_wing',\n 'ac|weights|W_wing'],\n units='kg', vec_size=[1, nn, 1, 1],\n scaling_factors=[1, -1, -1, 1]),\n promotes_inputs=['*'],\n promotes_outputs=['weight'])\n\nclass B738AnalysisGroup(om.Group):\n def initialize(self):\n self.options.declare('num_nodes', default=11, desc='Number of analysis points per flight segment')\n self.options.declare('num_x', default=3, desc='Aerostructural chordwise nodes')\n self.options.declare('num_y', default=7, desc='Aerostructural halfspan nodes')\n self.options.declare('num_twist', default=3, desc='Number of twist control points')\n self.options.declare('num_toverc', default=3, desc='Number of t/c control points')\n self.options.declare('num_skin', default=3, desc='Number of skin control points')\n self.options.declare('num_spar', default=3, desc='Number of spar control points')\n self.options.declare('use_surrogate', default=True, desc='Use surrogate for aerostructural drag ' +\n 'polar instead of OpenAeroStruct directly')\n\n def setup(self):\n # Define number of analysis points to run pers mission segment\n nn = self.options['num_nodes']\n\n global NUM_X, NUM_Y, NUM_TWIST, NUM_TOVERC, NUM_SKIN, NUM_SPAR, USE_SURROGATE\n NUM_X = self.options['num_x']\n NUM_Y = self.options['num_y']\n NUM_TWIST = self.options['num_twist']\n NUM_TOVERC = self.options['num_toverc']\n NUM_SKIN = self.options['num_skin']\n NUM_SPAR = self.options['num_spar']\n USE_SURROGATE = self.options['use_surrogate']\n\n # Define a bunch of design varaiables and airplane-specific parameters\n dv_comp = self.add_subsystem('dv_comp', oc.DictIndepVarComp(acdata),\n promotes_outputs=[\"*\"])\n dv_comp.add_output_from_dict('ac|aero|CLmax_TO')\n dv_comp.add_output_from_dict('ac|aero|polar|e')\n dv_comp.add_output_from_dict('ac|aero|polar|CD0_TO')\n dv_comp.add_output_from_dict('ac|aero|polar|CD0_cruise')\n\n dv_comp.add_output_from_dict('ac|geom|wing|S_ref')\n dv_comp.add_output_from_dict('ac|geom|wing|AR')\n dv_comp.add_output_from_dict('ac|geom|wing|c4sweep')\n dv_comp.add_output_from_dict('ac|geom|wing|taper')\n # dv_comp.add_output_from_dict('ac|geom|wing|toverc')\n dv_comp.add_output_from_dict('ac|geom|hstab|S_ref')\n dv_comp.add_output_from_dict('ac|geom|hstab|c4_to_wing_c4')\n dv_comp.add_output_from_dict('ac|geom|vstab|S_ref')\n\n dv_comp.add_output_from_dict('ac|geom|nosegear|length')\n dv_comp.add_output_from_dict('ac|geom|maingear|length')\n\n dv_comp.add_output_from_dict('ac|weights|MTOW')\n dv_comp.add_output_from_dict('ac|weights|W_fuel_max')\n dv_comp.add_output_from_dict('ac|weights|MLW')\n dv_comp.add_output_from_dict('ac|weights|OEW')\n\n dv_comp.add_output_from_dict('ac|propulsion|engine|rating')\n\n dv_comp.add_output_from_dict('ac|num_passengers_max')\n dv_comp.add_output_from_dict('ac|q_cruise')\n\n # Aerostructural design parameters\n twist = np.linspace(-2, 2, NUM_TWIST)\n toverc = acdata['ac']['geom']['wing']['toverc']['value'] * np.ones(NUM_TOVERC)\n t_skin = np.linspace(0.005, 0.015, NUM_SKIN)\n t_spar = np.linspace(0.005, 0.01, NUM_SPAR)\n self.set_input_defaults('ac|geom|wing|twist', twist, units='deg')\n self.set_input_defaults('ac|geom|wing|toverc', toverc)\n self.set_input_defaults('ac|geom|wing|skin_thickness', t_skin, units='m')\n self.set_input_defaults('ac|geom|wing|spar_thickness', t_spar, units='m')\n self.set_input_defaults('ac|aero|CD_nonwing', 0.0145) # based on matching fuel burn of B738.py example\n\n # Compute Raymer wing weight to know what to subtract from the MTOW before adding the OpenAeroStruct weight\n W_dg = 174.2e3 # design gross weight, lbs\n N_z = 1.5*3. # ultimate load factor (1.5 x limit load factor of 3g)\n S_w = 1368. # trapezoidal wing area, ft^2 (from photogrammetry)\n A = 9.44 # aspect ratio\n t_c = 0.12 # root thickness to chord ratio\n taper = 0.159 # taper ratio\n sweep = 25. # wing sweep at 25% MAC\n S_csw = 196.8 # wing-mounted control surface area, ft^2 (from photogrammetry)\n W_wing_raymer = 0.0051 * (W_dg * N_z)**0.557 * S_w**0.649 * A**0.5 * \\\n (t_c)**(-0.4) * (1 + taper)**0.1 / np.cos(np.deg2rad(sweep)) * S_csw**0.1\n self.set_input_defaults('ac|weights|orig_W_wing', W_wing_raymer, units='lb')\n\n # ======================== Mission analysis ========================\n # Run a full mission analysis including takeoff, reserve_, cruise,reserve_ and descereserve_nt\n analysis = self.add_subsystem('analysis',\n BasicMission(num_nodes=nn,\n aircraft_model=B738AirplaneModel),\n promotes_inputs=['*'], promotes_outputs=['*'])\n \n # ======================== Aerostructural sizing at 2.5g ========================\n # Add single point aerostructural analysis at 2.5g and MTOW to size the wingbox structure\n self.add_subsystem('aerostructural_maneuver', Aerostruct(num_x=NUM_X, num_y=NUM_Y, num_twist=NUM_TWIST,\n num_toverc=NUM_TOVERC, num_skin=NUM_SKIN,\n num_spar=NUM_SPAR),\n promotes_inputs=['ac|geom|wing|S_ref', 'ac|geom|wing|AR', 'ac|geom|wing|taper',\n 'ac|geom|wing|c4sweep', 'ac|geom|wing|toverc',\n 'ac|geom|wing|skin_thickness', 'ac|geom|wing|spar_thickness',\n 'ac|geom|wing|twist', 'load_factor'],\n promotes_outputs=[('failure', '2_5g_KS_failure')])\n \n # Flight condition of 2.5g maneuver load case\n self.set_input_defaults('aerostructural_maneuver.fltcond|M', 0.8)\n self.set_input_defaults('aerostructural_maneuver.fltcond|h', 20e3, units='ft')\n self.set_input_defaults('load_factor', 2.5) # multiplier on weights in structural problem\n\n # Find angle of attack for 2.5g sizing flight condition such that lift = 2.5 * MTOW\n self.add_subsystem('dyn_pressure', DynamicPressureComp(num_nodes=1))\n self.add_subsystem('lift', Lift(num_nodes=1), promotes_inputs=['ac|geom|wing|S_ref'])\n self.add_subsystem('kg_to_N', om.ExecComp('lift = load_factor * (MTOW - orig_W_wing + W_wing) * a',\n lift={'units': 'N'},\n MTOW={'units': 'kg'},\n orig_W_wing={'units': 'kg', 'val': W_wing_raymer/2.20462},\n W_wing={'units': 'kg'},\n a={'units': 'm/s**2', 'val': 9.807}),\n promotes_inputs=['load_factor', ('MTOW', 'ac|weights|MTOW')])\n self.add_subsystem('struct_sizing_AoA', om.BalanceComp('alpha', eq_units='N', lhs_name='MTOW',\n rhs_name='lift', units='deg', val=10.,\n lower=0.))\n self.connect('climb.ac|weights|W_wing', 'kg_to_N.W_wing')\n self.connect('kg_to_N.lift', 'struct_sizing_AoA.MTOW')\n self.connect('aerostructural_maneuver.density.fltcond|rho', 'dyn_pressure.fltcond|rho')\n self.connect('aerostructural_maneuver.airspeed.Utrue', 'dyn_pressure.fltcond|Utrue')\n self.connect('dyn_pressure.fltcond|q', 'lift.fltcond|q')\n self.connect('aerostructural_maneuver.fltcond|CL', 'lift.fltcond|CL')\n self.connect('lift.lift', 'struct_sizing_AoA.lift')\n self.connect('struct_sizing_AoA.alpha', 'aerostructural_maneuver.fltcond|alpha')\n \n\ndef configure_problem(num_nodes):\n prob = om.Problem()\n prob.model.add_subsystem('analysis', B738AnalysisGroup(num_nodes=num_nodes), promotes=['*'])\n prob.model.nonlinear_solver = om.NewtonSolver(iprint=2,solve_subsystems=True)\n prob.model.linear_solver = om.DirectSolver()\n prob.model.nonlinear_solver.options['maxiter'] = 10\n prob.model.nonlinear_solver.options['atol'] = 1e-6\n prob.model.nonlinear_solver.options['rtol'] = 1e-6\n prob.model.nonlinear_solver.options['err_on_non_converge'] = True\n prob.model.nonlinear_solver.linesearch = om.BoundsEnforceLS(bound_enforcement='scalar', print_bound_enforce=True)\n\n prob.driver = om.ScipyOptimizeDriver()\n prob.driver.options['optimizer'] = 'SLSQP'\n prob.driver.opt_settings['tol'] = 1e-5\n prob.driver.options['debug_print'] = ['objs', 'desvars', 'nl_cons']\n\n # =========================== Mission design variables/constraints ===========================\n prob.model.add_objective('descent.fuel_used_final', scaler=1e-4) # minimize block fuel burn\n prob.model.add_constraint('climb.throttle', lower=0.01, upper=1.05)\n prob.model.add_constraint('cruise.throttle', lower=0.01, upper=1.05)\n prob.model.add_constraint('descent.throttle', lower=0.01, upper=1.05)\n\n # =========================== Aerostructural wing design variables/constraints ===========================\n # Find twist distribution that minimizes fuel burn; lock the twist tip in place\n # to prevent rigid rotation of the whole wing\n prob.model.add_design_var('ac|geom|wing|twist', lower=np.array([0, -10, -10]),\n upper=np.array([0, 10, 10]), units='deg')\n prob.model.add_design_var('ac|geom|wing|AR', lower=5., upper=10.4) # limit to fit in group III gate\n prob.model.add_design_var('ac|geom|wing|c4sweep', lower=0., upper=35.)\n prob.model.add_design_var('ac|geom|wing|toverc', lower=np.linspace(.03, 0.1, NUM_TOVERC), upper=0.25)\n prob.model.add_design_var(\"ac|geom|wing|spar_thickness\", lower=0.003, upper=0.1, scaler=1e2, units='m')\n prob.model.add_design_var(\"ac|geom|wing|skin_thickness\", lower=0.003, upper=0.1, scaler=1e2, units='m')\n prob.model.add_design_var('ac|geom|wing|taper', lower=.01, upper=0.35, scaler=1e1)\n prob.model.add_constraint('2_5g_KS_failure', upper=0.)\n \n return prob\n\ndef set_values(prob, num_nodes, range=2050):\n # set some (required) mission parameters. Each phase needs a vertical and air-speed\n # the entire mission needs a cruise altitude and range\n prob.set_val('cruise|h0',35000.,units='ft')\n prob.set_val('mission_range',range,units='NM')\n prob.set_val('climb.fltcond|vs', np.linspace(2000., 400.,num_nodes), units='ft/min')\n prob.set_val('climb.fltcond|Ueas', np.linspace(220, 200,num_nodes), units='kn')\n prob.set_val('cruise.fltcond|vs', np.zeros((num_nodes,)), units='ft/min')\n prob.set_val('cruise.fltcond|Ueas', np.linspace(250.279, 250.279, num_nodes), units='kn') # M 0.78 @ 35k ft\n prob.set_val('descent.fltcond|vs', np.linspace(-2000, -1000, num_nodes), units='ft/min')\n prob.set_val('descent.fltcond|Ueas', np.linspace(240, 250, num_nodes), units='kn')\n\ndef show_outputs(prob, plots=True):\n # print some outputs\n vars_list = ['descent.fuel_used_final']\n units = ['lb','lb']\n nice_print_names = ['Block fuel', 'Total fuel']\n print(\"=======================================================================\")\n for i, thing in enumerate(vars_list):\n print(nice_print_names[i]+': '+str(prob.get_val(thing,units=units[i])[0])+' '+units[i])\n\n # plot some stuff\n if plots:\n x_var = 'range'\n x_unit = 'NM'\n y_vars = ['fltcond|h','fltcond|Ueas','fuel_used','throttle','fltcond|vs','fltcond|M','fltcond|CL']\n y_units = ['ft','kn','lbm',None,'ft/min', None, None]\n x_label = 'Range (nmi)'\n y_labels = ['Altitude (ft)', 'Veas airspeed (knots)', 'Fuel used (lb)', 'Throttle setting', 'Vertical speed (ft/min)', 'Mach number', 'CL']\n phases = ['climb', 'cruise', 'descent']\n oc.plot_trajectory(prob, x_var, x_unit, y_vars, y_units, phases,\n x_label=x_label, y_labels=y_labels, marker='-',\n plot_title='737-800 Mission Profile')\n\ndef run_738_analysis(plots=False):\n num_nodes = 11\n global NUM_X, NUM_Y\n NUM_X = 3\n NUM_Y = 7\n prob = configure_problem(num_nodes)\n prob.setup(check=False, mode='fwd')\n set_values(prob, num_nodes)\n prob.run_model()\n om.n2(prob, show_browser=False)\n show_outputs(prob, plots=plots)\n print(f\"Wing weight = {prob.get_val('ac|weights|W_wing', units='lb')[0]} lb\")\n print(f\"Raymer wing weight = {prob.get_val('ac|weights|orig_W_wing', units='lb')[0]} lb\")\n print(f\"2.5g failure = {prob.get_val('2_5g_KS_failure')}\")\n print(f\"Climb failure = {prob.get_val('climb.ac|struct|failure')}\")\n print(f\"Cruise failure = {prob.get_val('cruise.ac|struct|failure')}\")\n print(f\"Descent failure = {prob.get_val('descent.ac|struct|failure')}\")\n return prob\n\ndef run_738_optimization(plots=False):\n num_nodes = 11\n global NUM_X, NUM_Y\n NUM_X = 3\n NUM_Y = 7\n prob = configure_problem(num_nodes)\n prob.setup(check=True, mode='fwd')\n set_values(prob, num_nodes)\n prob.run_driver()\n prob.list_problem_vars(driver_scaling=False)\n print(f\"Wing weight = {prob.get_val('ac|weights|W_wing', units='lb')[0]} lb\")\n print(f\"Raymer wing weight = {prob.get_val('ac|weights|orig_W_wing', units='lb')[0]} lb\")\n print(f\"2.5g failure = {prob.get_val('2_5g_KS_failure')}\")\n print(f\"Climb failure = {prob.get_val('climb.ac|struct|failure')}\")\n print(f\"Cruise failure = {prob.get_val('cruise.ac|struct|failure')}\")\n print(f\"Descent failure = {prob.get_val('descent.ac|struct|failure')}\")\n if plots:\n show_outputs(prob)\n return prob\n\n\nif __name__ == \"__main__\":\n run_738_analysis(plots=False)\n # run_738_optimization(plots=True)\n" ]
[ [ "numpy.arange", "numpy.ones", "numpy.any", "numpy.where", "numpy.zeros" ], [ "numpy.array", "numpy.zeros", "numpy.ones" ], [ "numpy.linspace", "numpy.ones", "numpy.deg2rad", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
amit-bohra/Interactive-Image-Segmentation-with-OpenCV-Watershed-Algorithm-in-Python3
[ "9fd6e2551fe19af76f1c91c714ba029d2d8599ca" ]
[ "56_interactive_watershed.py" ]
[ "import cv2\r\nimport numpy as np\r\nfrom copy import deepcopy as dp\r\n\r\naqua=(255,255,0)\r\nmarine=(116,139,69)\r\nbanana=(87,207,277)\r\nblue=(255,0,0)\r\nalmond=(205,235,255)\r\nbrown=(64,64,255)\r\nblue1=(255,245,152)\r\ngreen=(0,100,0)\r\norange=(0,140,255)\r\norchid=(139,34,104)\r\npink=(147,20,255)\r\ngold=(0,215,255)\r\ngray=(127,127,127)\r\nindigo=(130,0,75)\r\n\r\ncolors=[aqua,marine,banana,blue,almond,brown,blue1,green,orange,orchid,\r\n pink,gold,gray,indigo]\r\n\r\n\r\n\r\nsize=0\r\ncolor=0\r\n\r\ndef draw(event,x,y,flags,param):\r\n global color,colors,img,marker,segment,tmg,size\r\n mark=color+1\r\n if event==cv2.EVENT_LBUTTONDOWN:\r\n cv2.circle(marker,(x,y),size,mark,-1)\r\n cv2.circle(tmg,(x,y),size,colors[color],-1)\r\n marker_copy=dp(marker)\r\n cv2.watershed(img,marker_copy)\r\n segment=np.zeros(img.shape,np.uint8)\r\n for i in range(1,len(colors)+1):\r\n segment[marker_copy==i]=colors[i-1]\r\n\r\ndef func(x):\r\n pass\r\na=0\r\na=int(input('Enter 1 for VideoCam else 0 '))\r\nif a==1:\r\n cap=cv2.VideoCapture(0)\r\n if cap.isOpened():\r\n ret,img=cap.read()\r\n else:\r\n ret=False\r\nelse:\r\n img=cv2.imread('a.jpg')\r\nimg=cv2.GaussianBlur(img,(1,1),0)\r\ncv2.namedWindow('image',cv2.WINDOW_NORMAL)\r\ncv2.createTrackbar('color','image',0,len(colors)-1,func)\r\ncv2.createTrackbar('size','image',10,200,func)\r\ncv2.setMouseCallback('image',draw)\r\nmarker=np.zeros(img.shape[:2],np.int32)\r\nsegment=np.zeros(img.shape,np.uint8)\r\ntmg=dp(img)\r\nif a==1:\r\n cap.release()\r\nwhile True:\r\n color=cv2.getTrackbarPos('color','image')\r\n size=cv2.getTrackbarPos('size','image')\r\n cv2.imshow('image',tmg)\r\n cv2.imshow('segment',segment)\r\n if cv2.waitKey(1)==27:\r\n break\r\n if cv2.waitKey(1)==ord('p'):\r\n print()\r\n if cv2.waitKey(1)==ord('c'):\r\n tmg=dp(img)\r\n marker=np.zeros(img.shape[:2],np.int32)\r\n segment=np.zeros(img.shape,np.uint8)\r\n color=0\r\ncv2.destroyAllWindows()\r\n\r\n" ]
[ [ "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
emailandxu/neurst
[ "235bddfc93b7784df01eddccec6791e1281651cf", "235bddfc93b7784df01eddccec6791e1281651cf", "235bddfc93b7784df01eddccec6791e1281651cf" ]
[ "neurst/data/datasets/parallel_text_dataset.py", "neurst/data/datasets/audio/librispeech.py", "neurst/utils/checkpoints.py" ]
[ "# Copyright 2020 ByteDance Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom abc import ABCMeta, abstractmethod\n\nimport six\nimport tensorflow as tf\nfrom absl import logging\n\nfrom neurst.data.datasets import register_dataset\nfrom neurst.data.datasets.dataset import TFRecordDataset\nfrom neurst.data.datasets.text_gen_dataset import TextGenDataset\nfrom neurst.utils.compat import DataStatus\nfrom neurst.utils.flags_core import Flag\n\n\[email protected]_metaclass(ABCMeta)\nclass AbstractParallelDataset(TextGenDataset):\n \"\"\" The abstract dataset for parallel text.\n The element spec must be\n {\n 'feature': tf.TensorSpec(shape=(None,), dtype=tf.int64),\n 'label': tf.TensorSpec(shape=(None,), dtype=tf.int64)\n }\n \"\"\"\n\n def __init__(self):\n self._sources = None\n super(AbstractParallelDataset, self).__init__()\n\n @property\n @abstractmethod\n def status(self) -> str:\n raise NotImplementedError\n\n @abstractmethod\n def build_iterator(self, map_func=None, shard_id=0, total_shards=1):\n \"\"\" Returns the iterator of the dataset.\n\n Args:\n map_func: A function mapping a dataset element to another dataset element.\n shard_id: Generator yields on the `shard_id`-th shard of the whole dataset.\n total_shards: The number of total shards.\n \"\"\"\n raise NotImplementedError\n\n @property\n def sources(self):\n \"\"\" Returns a list of source texts. \"\"\"\n return self._sources\n\n\n@register_dataset(\"parallel_text\")\nclass ParallelTextDataset(AbstractParallelDataset):\n\n def __init__(self, args):\n \"\"\" Initializes the dataset. \"\"\"\n super(ParallelTextDataset, self).__init__()\n self._src_file = args[\"src_file\"]\n assert self._src_file, \"`src_file` must be provided for ParallelTextDataset.\"\n self._trg_file = args[\"trg_file\"]\n self._data_is_processed = args[\"data_is_processed\"]\n\n @staticmethod\n def class_or_method_args():\n return [\n Flag(\"src_file\", dtype=Flag.TYPE.STRING, help=\"The source text file\"),\n Flag(\"trg_file\", dtype=Flag.TYPE.STRING, help=\"The target text file\"),\n Flag(\"data_is_processed\", dtype=Flag.TYPE.BOOLEAN,\n help=\"Whether the text data is already processed.\"),\n ]\n\n @property\n def status(self):\n if self._data_is_processed:\n return DataStatus.PROCESSED\n return DataStatus.RAW\n\n def build_iterator(self, map_func=None, shard_id=0, total_shards=1):\n \"\"\" Reads data from files and returns the iterator.\n\n Args:\n map_func: A function mapping a dataset element to another dataset element.\n shard_id: Generator yields on the `shard_id`-th shard of the whole dataset.\n total_shards: The number of total shards.\n \"\"\"\n if total_shards > 1:\n total_samples = self.num_samples\n samples_per_part = total_samples // total_shards\n range_begin = samples_per_part * shard_id\n if shard_id == total_shards - 1:\n range_end = total_samples + 1\n logging.info(f\"Iterate on dataset from {range_begin} \"\n f\"to the end (total {total_samples}).\")\n else:\n range_end = range_begin + samples_per_part\n logging.info(f\"Iterate on dataset from {range_begin} \"\n f\"to {range_end} (total {total_samples}).\")\n\n def gen():\n fsrc = tf.io.gfile.GFile(self._src_file)\n ftrg = None if self._trg_file is None else tf.io.gfile.GFile(self._trg_file)\n n = 0\n for src in fsrc:\n n += 1\n data = {\"feature\": src.strip()}\n if ftrg is not None:\n data[\"label\"] = ftrg.readline().strip()\n if total_shards > 1:\n if n < range_begin:\n continue\n if n >= range_end:\n break\n if map_func is not None:\n data = map_func(data)\n yield data\n fsrc.close()\n if ftrg is not None:\n ftrg.close()\n\n return gen\n\n @property\n def sources(self):\n \"\"\" Returns a list of sources. \"\"\"\n if self._sources is None and self._src_file:\n with tf.io.gfile.GFile(self._src_file) as fp:\n self._sources = [line.strip() for line in fp]\n return self._sources\n\n @property\n def targets(self):\n \"\"\" Returns a list of targets. \"\"\"\n if self._targets is None and self._trg_file:\n with tf.io.gfile.GFile(self._trg_file) as fp:\n self._targets = [line.strip() for line in fp]\n return self._targets\n\n\n@register_dataset(\"parallel_tfrecord\")\nclass ParallelTFRecordDataset(TFRecordDataset, AbstractParallelDataset):\n\n @property\n def status(self):\n return DataStatus.PROJECTED\n\n @property\n def fields(self):\n return {\"feature\": tf.io.VarLenFeature(tf.int64),\n \"label\": tf.io.VarLenFeature(tf.int64)}\n", "# Copyright 2020 ByteDance Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport re\n\nimport tensorflow as tf\nfrom absl import logging\n\nfrom neurst.data.datasets import register_dataset\nfrom neurst.data.datasets.audio.audio_dataset import RawAudioDataset\nfrom neurst.utils.compat import DataStatus\nfrom neurst.utils.flags_core import Flag\n\n\n@register_dataset(\"Librispeech\")\nclass LibriSpeech(RawAudioDataset):\n \"\"\"\n LibriSpeech is a corpus of approximately 1000 hours of read English speech.\n Homepage: http://www.openslr.org/12\n The raw dataset contains 7 files:\n - train-clean-100.tar.gz\n - train-clean-360.tar.gz\n - train-other-500.tar.gz\n - dev-clean.tar.gz\n - dev-other.tar.gz\n - test-clean.tar.gz\n - test-other.tar.gz\n \"\"\"\n\n def __init__(self, args):\n super(LibriSpeech, self).__init__(args)\n self._excluded_file = args[\"excluded_file\"]\n self._excluded_list = None\n if self._excluded_file is not None:\n if not tf.io.gfile.exists(self._excluded_file):\n raise ValueError(f\"`excluded_file` not found: {self._excluded_file}\")\n with tf.io.gfile.GFile(self._excluded_file) as fp:\n self._excluded_list = [x.strip().lower() for x in fp]\n\n self._transcripts_dict = None\n\n @staticmethod\n def class_or_method_args():\n this_args = super(LibriSpeech, LibriSpeech).class_or_method_args()\n this_args.append(\n Flag(\"excluded_file\", dtype=Flag.TYPE.STRING, default=None,\n help=\"A file containing transcriptions \"\n \"that would be removed in the LibriSpeech corpus.\"))\n return this_args\n\n @property\n def status(self):\n return {\n \"audio\": DataStatus.RAW,\n \"transcript\": DataStatus.RAW\n }\n\n def load_transcripts(self):\n \"\"\" Loads transcripts (and translations if exists). \"\"\"\n if self._transcripts_dict is not None:\n return\n logging.info(f\"Loading transcriptions from tarball: {self._input_tarball}\")\n n = 0\n trans = {}\n level0 = set()\n level1_cnt = 0\n level2_cnt = 0\n excluded_count = 0\n excluded_str = \"\"\n if self._excluded_list is not None:\n excluded_str = \" \".join(self._excluded_list)\n self._transcripts = []\n with self.open_tarball(\"tar\") as tar:\n for tarinfo in tar:\n if not tarinfo.isreg():\n continue\n n += 1\n if n % 10000 == 0:\n logging.info(\"Scanned %d entries...\", n)\n if not tarinfo.name.endswith(\".trans.txt\"):\n continue\n level1_cnt += 1\n # The file LibriSpeech/dev-clean/3170/137482/3170-137482.trans.txt\n # will contain lines such as:\n # 3170-137482-0000 WITH AN EDUCATION WHICH OUGHT TO ...\n # 3170-137482-0001 I WAS COMPELLED BY POVERTY ...\n key = tarinfo.name.strip(\".trans.txt\")\n path0, path1 = key.split(\"/\")[-1].split(\"-\")\n level0.add(path0)\n f = tar.extractfile(tarinfo)\n this_dict = {}\n for line in f.readlines():\n tid, txt = line.decode(\"utf-8\").strip(\"\\n\").split(\" \", 1)\n txt_tokens = txt.split()\n if txt in excluded_str:\n excluded_count += 1\n this_dict[tid] = \"\"\n elif len(txt_tokens) > 10 and (\n \" \".join(txt_tokens[:len(txt_tokens) // 2]) in excluded_str\n or \" \".join(txt_tokens[len(txt_tokens) // 2:]) in excluded_str):\n excluded_count += 1\n this_dict[tid] = \"\"\n else:\n txt = txt.lower()\n this_dict[tid] = txt\n self._transcripts.append(txt)\n logging.info(\"[%s] = %d utterances.\", key, len(this_dict))\n level2_cnt += len(this_dict)\n if path0 not in trans:\n trans[path0] = dict()\n trans[path0][path1] = this_dict\n f.close()\n logging.info(\"Total %d directories, %d sub-directories, %d utterances, %d matched excluded file\",\n len(level0), level1_cnt, level2_cnt, excluded_count)\n # {'2277': {'149896': {'2277-149896-0000': \"HE WAS IN A FEVERED STATE OF MIND OWING TO THE', ...}, ...}\n self._transcripts_dict = trans\n\n def build_iterator(self, map_func=None, shard_id=0, total_shards=1):\n \"\"\" Returns the iterator of the dataset.\n\n Args:\n map_func: A function mapping a dataset element to another dataset element.\n \"\"\"\n if total_shards > 1:\n total_samples = self.num_samples\n samples_per_part = total_samples // total_shards\n range_begin = samples_per_part * shard_id\n if shard_id == total_shards - 1:\n range_end = total_samples + 1\n logging.info(f\"Iterate on dataset from {range_begin} \"\n f\"to the end (total {total_samples}).\")\n else:\n range_end = range_begin + samples_per_part\n logging.info(f\"Iterate on dataset from {range_begin} \"\n f\"to {range_end} (total {total_samples}).\")\n\n def gen():\n if self._transcripts_dict is None:\n self.load_transcripts()\n with self.open_tarball(\"tar\") as tar:\n n = 0\n for tarinfo in tar:\n if not tarinfo.isreg():\n continue\n if not tarinfo.name.endswith(\".flac\"):\n continue\n\n uttid = re.sub(\".*/(.+)\\\\.flac\", \"\\\\1\", tarinfo.name)\n path0, path1, _ = uttid.strip().split(\"-\")\n this_trans = self._transcripts_dict[path0][path1][uttid]\n if this_trans.strip() == \"\":\n continue\n n += 1\n if total_shards > 1:\n if n < range_begin:\n continue\n if n >= range_end:\n break\n f = tar.extractfile(tarinfo)\n audio = self.extract_audio_feature(fileobj=f, mode=\"flac\")\n f.close()\n data_sample = {\n \"audio\": audio,\n \"transcript\": this_trans\n }\n if map_func is None:\n yield data_sample\n else:\n yield map_func(data_sample)\n\n return gen\n", "# Copyright 2020 ByteDance Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport functools\nimport os\nimport re\nimport time\nimport traceback\n\nimport numpy\nimport tensorflow as tf\nfrom absl import logging\n\nfrom neurst.utils import compat\nfrom neurst.utils.converters import Converter, build_converter\n\n\ndef remove_checkpoint_by_prefix(dirname, prefix):\n if prefix is None:\n return\n prefix = os.path.join(dirname, prefix)\n datas = tf.io.gfile.glob(prefix + \".data-?????-of-?????\")\n for f in datas + [prefix + \".index\"]:\n try:\n tf.io.gfile.remove(f)\n except tf.errors.OpError:\n logging.info(traceback.format_exc())\n\n\ndef restore_custom_checkpoint(checkpoint, checkpoint_path, model):\n \"\"\" Restore checkpoint from checkpoint_path.\n\n Args:\n checkpoint: A tf.train.Checkpoint.\n checkpoint_path: A string indicating the checkpoint path.\n model: A keras model.\n\n Returns: The checkpoint path if successfully restored or None otherwise.\n \"\"\"\n traced_vars = model.weights\n pre_vars = dict(\n [(v.name.split(\":\")[0], v.numpy()) for v in traced_vars])\n try:\n checkpoint.restore(checkpoint_path).expect_partial()\n except (tf.errors.OpError, ValueError):\n logging.info(traceback.format_exc())\n return None\n logging.info('Restoring checkpoint from {latest_ckpt}'.format(\n latest_ckpt=checkpoint_path))\n after_vars = dict(\n [(v.name.split(\":\")[0], v.numpy()) for v in traced_vars])\n restored_var_names = []\n unrestored_var_names = []\n for v_name, v in pre_vars.items():\n try:\n if numpy.sqrt(numpy.sum((v - after_vars[v_name]) ** 2)) < 1e-6:\n unrestored_var_names.append(v_name)\n else:\n restored_var_names.append(v_name)\n except TypeError:\n logging.info(f\"Ignore non-numeric variable: {v_name}\")\n if len(unrestored_var_names) == 0:\n logging.info(\"All variables matched with checkpoint: {}\".format(checkpoint_path))\n elif len(restored_var_names) == 0:\n logging.info(\"No variables matched with checkpoint: {}\".format(checkpoint_path))\n if model is not None:\n logging.info(\"Trying `keras_model.load_weights()`\")\n try:\n model.load_weights(checkpoint_path)\n except (ImportError, ValueError, tf.errors.OpError, AssertionError):\n logging.info(\"Fail to call model.load_weights.\")\n logging.info(traceback.format_exc())\n return None\n else:\n for v_name in restored_var_names:\n logging.info(\"Restored {}\".format(v_name))\n for v_name in unrestored_var_names:\n logging.info(\"Unrestored {}\".format(v_name))\n return checkpoint_path\n\n\nclass _CustomSaver(object):\n \"\"\" Custom checkpoint manager for saving checkpoints. \"\"\"\n\n def __init__(self, directory, checkpoint, max_to_keep=8):\n \"\"\" Initializes the checkpoint manager.\n\n Args:\n directory: The path to a directory in which to write checkpoints.\n checkpoint: A checkpoint\n max_to_keep: The maximum checkpoint numbers to keep.\n\n Raises:\n ValueError: Neither `traced_vars` nor `model` is provided.\n \"\"\"\n self._directory = directory\n if not tf.io.gfile.exists(self._directory):\n try:\n tf.io.gfile.makedirs(self._directory)\n except tf.errors.OpError:\n pass\n self._checkpoint = checkpoint\n self._max_to_keep = max_to_keep\n # a list of tuple: (checkpoint name, timestamp)\n self._all_model_checkpoints = []\n\n @property\n def checkpoint(self):\n return self._checkpoint\n\n @property\n def directory(self):\n return self._directory\n\n def _update_checkpoint_meta(self):\n \"\"\"Updates the checkpoint file under each model dir. \"\"\"\n while len(self._all_model_checkpoints) > self._max_to_keep:\n prefix, _ = self._all_model_checkpoints.pop(0)\n remove_checkpoint_by_prefix(dirname=self.directory, prefix=prefix)\n meta_data_str = \"model_checkpoint_path: \\\"{}\\\"\\n\".format(self._all_model_checkpoints[-1][0])\n for path, _ in self._all_model_checkpoints:\n meta_data_str += \"all_model_checkpoint_paths: \\\"{}\\\"\\n\".format(path)\n for _, timestamp in self._all_model_checkpoints:\n meta_data_str += \"all_model_checkpoint_timestamps: {}\\n\".format(str(timestamp))\n with tf.io.gfile.GFile(os.path.join(self.directory, \"checkpoint.incomplete\"), \"w\") as fw:\n fw.write(meta_data_str)\n tf.io.gfile.rename(os.path.join(self.directory, \"checkpoint.incomplete\"),\n os.path.join(self.directory, \"checkpoint\"),\n overwrite=True)\n\n def save(self, prefix):\n output = self._checkpoint.write(os.path.join(self.directory, prefix))\n return output\n\n\nclass NameBasedCheckpointManager(_CustomSaver):\n \"\"\" The name-based checkpoint manager for saving and restoring variables. \"\"\"\n\n def __init__(self, model, directory, max_to_keep=8, checkpoint_name=\"ckpt\"):\n \"\"\" Initializes a custom checkpoint manager.\n\n Args:\n model: A tf.keras.Model.\n directory: The path to a directory in which to write checkpoints.\n max_to_keep: The maximum checkpoint numbers to keep.\n checkpoint_name: The name of each checkpoint.\n \"\"\"\n self._model = model\n super(NameBasedCheckpointManager, self).__init__(\n directory=directory, checkpoint=tf.train.Checkpoint(\n **dict([(x.name.split(\":\")[0], x) for x in model.weights])),\n max_to_keep=max_to_keep)\n self._checkpoint_name = checkpoint_name\n logging.info(\"Creates checkpoint manager for directory: {}\".format(directory))\n\n def restore(self, restore_path=None):\n \"\"\" Restores checkpoint from `save_path` or self._directory by default. \"\"\"\n if restore_path is None:\n restore_path = self.directory\n latest_ckpt = tf.train.latest_checkpoint(restore_path)\n if latest_ckpt is None:\n latest_ckpt = restore_path\n if latest_ckpt:\n return restore_custom_checkpoint(self.checkpoint, latest_ckpt, self._model)\n\n def save(self, checkpoint_number):\n prefix = \"{}-{}\".format(self._checkpoint_name, checkpoint_number)\n output = super(NameBasedCheckpointManager, self).save(prefix)\n self._all_model_checkpoints.append((prefix, time.time()))\n self._update_checkpoint_meta()\n return output\n\n\nclass KeepBestCheckpointSaver(_CustomSaver):\n \"\"\" Custom Checkpoint manager for saving and restoring variables. \"\"\"\n\n def __init__(self, model, directory, metric, max_to_keep=8, checkpoint_name=\"ckpt\"):\n \"\"\" Initializes a custom checkpoint manager.\n\n Args:\n model: A keras model.\n directory: The path to a directory in which to write checkpoints.\n metric: A metric object.\n max_to_keep: The maximum checkpoint numbers to keep.\n checkpoint_name: The name of each checkpoint.\n \"\"\"\n if directory is None:\n directory = compat.get_saver_or_default().directory\n if not directory.endswith(\"/\"):\n directory += \"/\"\n directory += \"best\"\n super(KeepBestCheckpointSaver, self).__init__(\n checkpoint=tf.train.Checkpoint(**dict([(x.name.split(\":\")[0], x) for x in model.weights])),\n directory=directory, max_to_keep=max_to_keep)\n self._metric = metric\n self._checkpoint_name = checkpoint_name\n logging.info(\"Creates custom keep-best checkpoint manager for directory: {}\".format(directory))\n\n def save(self, checkpoint_number, metric_value):\n \"\"\" Saves a checkpoint and updates meta values if `metric_value` is better.\n\n Args:\n checkpoint_number: The current step.\n metric_value: The metric result.\n\n Returns:\n The path to the checkpoint if it is saved, otherwise None.\n\n \"\"\"\n # whether to save or not\n if (0 <= len(self._all_model_checkpoints) < self._max_to_keep\n or self._metric.greater_or_eq(metric_value, self._all_model_checkpoints[0][1])):\n saved_prefix = \"{}-{}-{}\".format(self._checkpoint_name, checkpoint_number,\n (\"%.2f\" % self._metric.get_value(metric_value)))\n path = super(KeepBestCheckpointSaver, self).save(saved_prefix)\n self._all_model_checkpoints.append((saved_prefix, self._metric.get_value(metric_value)))\n self._all_model_checkpoints = sorted(\n self._all_model_checkpoints,\n key=functools.cmp_to_key(lambda x, y: (-int(self._metric.greater_or_eq(y[1], x[1])\n and y[1] != x[1]))))\n self._update_checkpoint_meta()\n return path\n\n return None\n\n\nclass AverageCheckpointSaver(_CustomSaver):\n \"\"\" Custom Checkpoint manager for saving averaged variables. \"\"\"\n\n def __init__(self, model, directory, metric, max_to_keep=8, checkpoint_name=\"ckpt\"):\n \"\"\" Initializes a custom checkpoint manager.\n\n Args:\n model: A keras model.\n directory: The path to a directory in which to write checkpoints.\n metric: A metric object.\n max_to_keep: The maximum checkpoint numbers to keep.\n checkpoint_name: The name of each checkpoint.\n \"\"\"\n if directory is None:\n directory = compat.get_saver_or_default().directory\n if not directory.endswith(\"/\"):\n directory += \"/\"\n directory += \"best_avg\"\n self._checkpoint_name = checkpoint_name\n self._traced_vars = dict([(x.name.split(\":\")[0], x) for x in model.weights])\n self._traced_var_names = list(self._traced_vars.keys())\n self._traced_var_numpys = []\n self._metric = metric\n\n v_numpys = dict([(n, v.numpy()) for n, v in self._traced_vars.items()])\n with tf.distribute.OneDeviceStrategy(device=\"/cpu:0\").scope():\n self._avg_traced_vars = dict([(n, tf.Variable(v, dtype=v.dtype, name=n + \"_avg\"))\n for n, v in v_numpys.items()])\n super(AverageCheckpointSaver, self).__init__(\n directory=directory, max_to_keep=max_to_keep,\n checkpoint=tf.train.Checkpoint(**self._avg_traced_vars))\n logging.info(\"Create checkpoint manager for averaged checkpoint \"\n \"of the latest {} checkpoints to dir: {}\".format(self._max_to_keep, self.directory))\n\n def _average_checkpoint(self):\n \"\"\" Averages the checkpoints. \"\"\"\n for idx, name in enumerate(self._traced_var_names):\n self._avg_traced_vars[name] = self._avg_traced_vars[name].assign(\n numpy.average([var_numpys[idx] for var_numpys in self._traced_var_numpys], axis=0))\n\n def save(self, checkpoint_number, metric_value):\n \"\"\" Saves a checkpoint and updates meta values if `metric_value` is better.\n\n Args:\n checkpoint_number: The current step.\n metric_value: The metric result.\n\n Returns:\n The path to the checkpoint if it is saved, otherwise None.\n\n \"\"\"\n # keep the latest checkpoints\n self._traced_var_numpys.append(\n [self._traced_vars[x].numpy() for x in self._traced_var_names])\n if len(self._traced_var_numpys) > self._max_to_keep:\n self._traced_var_numpys.pop(0)\n if (0 <= len(self._all_model_checkpoints) < self._max_to_keep\n or self._metric.greater_or_eq(metric_value, self._all_model_checkpoints[0][1])):\n # Averages the checkpoints.\n for idx, name in enumerate(self._traced_var_names):\n self._avg_traced_vars[name] = self._avg_traced_vars[name].assign(\n numpy.average([var_numpys[idx] for var_numpys in self._traced_var_numpys], axis=0))\n saved_prefix = \"{}-{}-{}\".format(self._checkpoint_name, checkpoint_number,\n (\"%.2f\" % self._metric.get_value(metric_value)))\n path = super(AverageCheckpointSaver, self).save(saved_prefix)\n self._all_model_checkpoints.append((saved_prefix, self._metric.get_value(metric_value)))\n self._all_model_checkpoints = sorted(\n self._all_model_checkpoints,\n key=functools.cmp_to_key(lambda x, y: (-int(self._metric.greater_or_eq(y[1], x[1])\n and y[1] != x[1]))))\n self._update_checkpoint_meta()\n return path\n\n return None\n\n\ndef checkpoint_scope_name(checkpoint_path):\n \"\"\" Lists checkpoint variables and extract top scope name.\n\n Args:\n checkpoint_path: A string, the checkpoint path.\n\n Returns: A string or None.\n \"\"\"\n var_names = [compat.wrapper_var_name(x[0]) for x in tf.train.list_variables(checkpoint_path)]\n prefixs = set()\n for n in var_names:\n n_tokens = n.strip().split(\"/\")\n if len(n_tokens) > 1:\n prefixs.add(n_tokens[0])\n if len(prefixs) > 1:\n logging.info(\"WARNING: more than one scope names({}) extracted from {}. \"\n \"Be careful to this behavior, \"\n \"which may lead to unknown issues.\".format(prefixs, checkpoint_path))\n return prefixs.pop()\n\n\ndef restore_checkpoint_if_possible(model, model_dir, var_name_pattern=None):\n \"\"\" Restores checkpoint from `model_dir` if exists. \"\"\"\n latest_ckpt_path = tf.train.latest_checkpoint(model_dir)\n if not latest_ckpt_path:\n latest_ckpt_path = model_dir\n try:\n tf.train.list_variables(latest_ckpt_path)\n except (tf.errors.NotFoundError, ValueError, tf.errors.DataLossError):\n return None\n\n ckpt_scope_name = checkpoint_scope_name(latest_ckpt_path)\n vars = model.weights\n if var_name_pattern is None:\n checkpoint = tf.train.Checkpoint(\n **dict([(ckpt_scope_name + x.name.split(\":\")[0][x.name.index(\"/\"):], x) for x in vars]))\n else:\n logging.info(\"Variables only match the {} will be restored.\".format(var_name_pattern))\n checkpoint = tf.train.Checkpoint(\n **dict([(ckpt_scope_name + x.name.split(\":\")[0][x.name.index(\"/\"):], x) for x in vars\n if re.search(var_name_pattern, x.name) is not None]))\n return restore_custom_checkpoint(checkpoint, latest_ckpt_path, model)\n\n\ndef restore_checkpoint_if_possible_v2(model, path, model_name=None, from_prefix=None,\n to_prefix=None, name_pattern=None):\n \"\"\" Restores checkpoint.\n\n Args:\n model: A keras model.\n path: The path to the neurst checkpoint or the path/key for the converter.\n model_name: The converter name for converting checkpoints.\n from_prefix: The name prefix.\n to_prefix: The target name prefix.\n name_pattern: A regex.\n\n Returns: The ckpt path if successfully restored else None.\n \"\"\"\n if not (model_name or from_prefix or to_prefix):\n return restore_checkpoint_if_possible(model, path, name_pattern)\n logging.info(f\"Loading {model_name} ({path}).\")\n converter: Converter = build_converter(model_name)\n tmp_ckpt = \"ram://tmp_ckpt\"\n converter.convert(path, tmp_ckpt)\n latest_ckpt_path = tf.train.latest_checkpoint(tmp_ckpt)\n if from_prefix is None:\n from_prefix = checkpoint_scope_name(latest_ckpt_path)\n else:\n from_prefix = from_prefix.strip(\"/\")\n vars = model.weights\n if to_prefix is None:\n to_prefix = vars[0].split(\"/\")[0]\n else:\n to_prefix = to_prefix.strip(\"/\")\n if name_pattern is None:\n checkpoint = tf.train.Checkpoint(\n **dict([(x.name.split(\":\")[0].replace(to_prefix, from_prefix, 1), x) for x in vars]))\n else:\n logging.info(\"Variables only match the {} will be restored.\".format(name_pattern))\n checkpoint = tf.train.Checkpoint(\n **dict([(x.name.split(\":\")[0].replace(to_prefix, from_prefix, 1), x) for x in vars\n if re.search(name_pattern, x.name) is not None]))\n return restore_custom_checkpoint(checkpoint, latest_ckpt_path, model)\n" ]
[ [ "tensorflow.io.gfile.GFile", "tensorflow.io.VarLenFeature" ], [ "tensorflow.io.gfile.exists", "tensorflow.io.gfile.GFile" ], [ "tensorflow.train.latest_checkpoint", "tensorflow.Variable", "tensorflow.io.gfile.exists", "tensorflow.train.Checkpoint", "tensorflow.io.gfile.glob", "tensorflow.io.gfile.makedirs", "tensorflow.io.gfile.remove", "tensorflow.train.list_variables", "numpy.average", "numpy.sum", "tensorflow.distribute.OneDeviceStrategy" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
semio/ddf_utils
[ "e10c4cb6dc7722415a5863579a552cc7b7e3668d", "e10c4cb6dc7722415a5863579a552cc7b7e3668d" ]
[ "ddf_utils/model/package.py", "ddf_utils/factory/ilo.py" ]
[ "# -*- coding: utf-8 -*-\n\n\"\"\"datapackage model\"\"\"\n\nimport os.path as osp\nfrom typing import List, Tuple, Dict, Union, Callable\nimport attr\nimport json\nfrom itertools import product\nfrom collections import OrderedDict\nfrom tqdm import tqdm\n\nimport pandas as pd\n\nfrom .ddf import DDF, Concept, EntityDomain, Entity, DaskDataPoint, Synonym\nfrom .utils import absolute_path\n\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\[email protected](auto_attribs=True, repr=False)\nclass TableSchema:\n \"\"\"Table Schema Object Class\"\"\"\n fields: List[dict]\n primaryKey: Union[List[str], str]\n\n @classmethod\n def from_dict(cls, d: dict):\n fields = d['fields']\n primaryKey = d['primaryKey']\n return cls(fields, primaryKey)\n\n @property\n def field_names(self):\n return [f['name'] for f in self.fields]\n\n @property\n def common_fields(self):\n field_names = self.field_names\n pkey = self.primaryKey\n if isinstance(pkey, str):\n common_fields = list(filter(lambda x: x != pkey, field_names))\n else:\n common_fields = list(filter(lambda x: x not in pkey, field_names))\n return common_fields\n\n def __repr__(self):\n return \"TableSchema(primaryKey: {}, fields: {})\".format(self.primaryKey, self.common_fields)\n\n\[email protected](auto_attribs=True)\nclass Resource:\n name: str\n path: str\n schema: TableSchema\n\n @classmethod\n def from_dict(cls, d: dict):\n path = d['path']\n name = d['name']\n schema = TableSchema.from_dict(d['schema'])\n return cls(name, path, schema)\n\n def to_dict(self):\n res = vars(self).copy()\n if 'schema' in res:\n res['schema'] = vars(res['schema']).copy()\n return res\n\n\[email protected](auto_attribs=True)\nclass DDFSchema:\n primaryKey: List[str]\n value: str\n resources: List[str] # a list of resource names\n\n @classmethod\n def from_dict(cls, d: dict):\n primaryKey = d['primaryKey']\n value = d['value']\n resources = d['resources']\n return cls(primaryKey=primaryKey, value=value, resources=resources)\n\n\[email protected](auto_attribs=True, repr=False)\nclass DataPackage:\n base_path: str\n resources: List[Resource]\n props: dict = attr.ib(factory=dict)\n\n def __attrs_post_init__(self):\n self.base_path = absolute_path(self.base_path)\n\n def __repr__(self):\n return f\"DataPackage({self.base_path})\"\n\n @classmethod\n def from_dict(cls, d_: dict, base_path='./'):\n d = d_.copy()\n resources = list(map(Resource.from_dict, d.pop('resources')))\n return cls(base_path=base_path, resources=resources, props=d)\n\n @classmethod\n def from_json(cls, json_path):\n json_path = absolute_path(json_path)\n base_path = osp.dirname(json_path)\n d = json.load(open(json_path))\n return cls.from_dict(d, base_path)\n\n @classmethod\n def from_path(cls, path):\n path = absolute_path(path)\n json_path = osp.join(path, 'datapackage.json')\n return cls.from_json(json_path)\n\n def to_dict(self):\n \"\"\"dump the datapackage to disk\"\"\"\n raise NotImplementedError\n\n\[email protected](repr=False)\nclass DDFcsv(DataPackage):\n \"\"\"DDFCSV datapackage.\"\"\"\n ddfSchema: Dict[str, List[DDFSchema]] = attr.ib(factory=dict)\n ddf: DDF = attr.ib(init=False)\n concepts_resources: List[Resource] = attr.ib(init=False)\n entities_resources: List[Resource] = attr.ib(init=False)\n datapoints_resources: List[Resource] = attr.ib(init=False)\n synonyms_resources: List[Resource] = attr.ib(init=False)\n\n # config for read_csv\n _default_reader_options = {'keep_default_na': False, 'na_values': ['']}\n _default_dask_reader_options = {'keep_default_na': False,\n 'na_values': [''],\n 'sample_rows': 1000000}\n\n def __attrs_post_init__(self):\n super(DDFcsv, self).__attrs_post_init__()\n conc = list()\n ent = list()\n dp = list()\n syn = list()\n for r in self.resources:\n pkey = r.schema.primaryKey\n if isinstance(pkey, str):\n if pkey == 'concept':\n conc.append(r)\n else:\n ent.append(r)\n else: # TODO: datapoints key might be one column, not list of columns?\n if 'synonym' in pkey:\n syn.append(r)\n else:\n dp.append(r)\n self.concepts_resources = conc\n self.entities_resources = ent\n self.datapoints_resources = dp\n self.synonyms_resources = syn\n self.ddf = self.load_ddf()\n\n @classmethod\n def from_dict(cls, d_: dict, base_path='./'):\n d = d_.copy()\n resources = list(map(Resource.from_dict, d.pop('resources')))\n if 'ddfSchema' in d.keys():\n ddf_schema_ = d.pop('ddfSchema')\n ddf_schema = dict()\n for k, v in ddf_schema_.items():\n ddf_schema[k] = [DDFSchema.from_dict(d) for d in v]\n else:\n ddf_schema = {}\n return cls(base_path=base_path, resources=resources, ddfSchema=ddf_schema, props=d)\n\n def to_dict(self):\n res = OrderedDict(self.props.copy())\n res['resources'] = [r.to_dict() for r in self.resources]\n if self.ddfSchema:\n res['ddfSchema'] = dict()\n for k, v in self.ddfSchema.items():\n res['ddfSchema'][k] = [vars(sch).copy() for sch in v]\n return res\n\n def _gen_concepts(self):\n concepts_paths = [osp.join(self.base_path, r.path) for r in self.concepts_resources]\n for p in concepts_paths:\n df = pd.read_csv(p, index_col='concept', dtype=str, **self._default_reader_options)\n for concept, row in df.iterrows():\n concept_type = row['concept_type']\n props = row.drop('concept_type').to_dict()\n yield (concept, Concept(id=concept, concept_type=concept_type, props=props))\n\n def _gen_entities(self, concepts: Dict[str, Concept]):\n for r in self.entities_resources:\n pkey = r.schema.primaryKey\n if concepts[pkey].concept_type == 'entity_domain':\n domain = concepts[pkey].id\n else:\n domain = concepts[pkey].props['domain']\n\n df = pd.read_csv(osp.join(self.base_path, r.path), dtype=str, # TODO: is it okay to use str for all?\n **self._default_reader_options)\n df = df.set_index(pkey)\n is_cols = list(filter(lambda x: x.startswith('is--'), df.columns.values))\n for ent, row in df.iterrows():\n sets = list()\n for c in is_cols:\n if row[c] == 'TRUE' and c[4:] != domain:\n sets.append(c[4:]) # strip the 'is--' part, only keep set name\n yield (domain, Entity(id=ent, domain=domain, sets=sets, props=row.drop(is_cols).to_dict()))\n\n def _gen_datapoints(self):\n for r in self.datapoints_resources:\n fields = r.schema.common_fields\n pkey = r.schema.primaryKey\n for f in fields:\n yield (f, pkey, osp.join(self.base_path, r.path))\n\n def _gen_synonyms(self):\n for r in self.synonyms_resources:\n # there should be only two columns\n pkey = r.schema.primaryKey\n if pkey[0] == 'synonym':\n concept = pkey[1]\n else:\n concept = pkey[0]\n df = pd.read_csv(osp.join(self.base_path, r.path), **self._default_reader_options)\n sym = Synonym(concept_id=concept, synonyms=df.set_index('synonym')[concept].to_dict())\n yield (concept, sym)\n\n @staticmethod\n def entity_domain_to_categorical(domain: EntityDomain):\n entities = [e.id for e in domain.entities]\n return pd.api.types.CategoricalDtype(entities)\n\n @staticmethod\n def entity_set_to_categorical(domain: EntityDomain, s: str):\n entity_set = domain.get_entity_set(s)\n entities = [e.id for e in entity_set]\n return pd.api.types.CategoricalDtype(entities)\n\n def load_ddf(self):\n \"\"\"-> DDF\"\"\"\n # load concepts\n concepts = dict(self._gen_concepts())\n\n # load entities\n entities = list(self._gen_entities(concepts))\n domains = dict()\n domains_tmp = dict()\n for domain, entity in entities:\n if domain not in domains_tmp.keys():\n domains_tmp[domain] = list()\n domains_tmp[domain].append(entity)\n\n for domain, entities_ in domains_tmp.items():\n # TODO: maybe get properties from concepts table\n # Allow duplicated entity because they may be defined in multiple resources\n # i.e. multiple entity sets in separated files.\n domains[domain] = EntityDomain.from_entity_list(domain_id=domain, entities=entities_, allow_duplicated=True)\n\n # load datapoints. Here we will use Dask for all\n # 1. create categories for entity domains\n dtypes = dict()\n # parse_dates = list()\n concept_types = dict()\n for domain_name, domain in domains.items():\n dtypes[domain_name] = self.entity_domain_to_categorical(domain)\n for eset in domain.entity_sets:\n dtypes[eset] = self.entity_set_to_categorical(domain, eset)\n # 2. get all concept types, update dtypes for time concepts\n for c_id, c in concepts.items():\n concept_types[c_id] = c.concept_type\n if c.concept_type == 'time':\n dtypes[c_id] = 'str'\n # 3. group files for same indicator together\n indicators = dict()\n for field, pkey, path in self._gen_datapoints():\n # import ipdb; ipdb.set_trace()\n indicator = field\n pkey = tuple(sorted(pkey))\n if indicator not in indicators:\n indicators.setdefault(indicator, dict([(pkey, [path])]))\n else:\n if pkey not in indicators[indicator]:\n indicators[indicator][pkey] = [path]\n else:\n indicators[indicator][pkey].append(path)\n datapoints = dict()\n for i, v in indicators.items():\n datapoints[i] = dict()\n # dtypes_ = dtypes.copy()\n # dtypes_[i] = 'float' # TODO: supporting string/float datatypes, not just float\n read_csv_options = self._default_dask_reader_options.copy()\n read_csv_options.update(dict(dtype=dtypes))\n for k, paths in v.items():\n dp = DaskDataPoint(id=i, dimensions=k, path=paths, concept_types=concept_types,\n read_csv_options=read_csv_options)\n datapoints[i][k] = dp\n\n # load synonyms\n synonyms = dict(self._gen_synonyms())\n\n # return complete DDF object\n return DDF(concepts=concepts, entities=domains, datapoints=datapoints, synonyms=synonyms, props=self.props)\n\n def generate_ddf_schema(self, progress_bar=False):\n \"\"\"generate ddf schema from all resources.\n\n Parameters\n ----------\n\n progress_bar : bool\n whether progress bar should be shown when generating ddfSchema.\n\n \"\"\"\n hash_table = {}\n ddf_schema = {'concepts': [], 'entities': [], 'datapoints': [], 'synonyms': []}\n entity_value_cache = dict()\n dtypes = dict()\n\n # check if we need progress bar\n if progress_bar:\n if logger.getEffectiveLevel() == 10: # debug: force not showing progress bar\n logger.warning(\"progress bar will be disabled in debugging mode.\")\n progress_bar = False\n\n # generate set-membership details for every single entity in dataset\n # also create dtypes for later use\n for domain_id, domain in self.ddf.entities.items():\n dtypes[domain_id] = self.entity_domain_to_categorical(domain)\n for s in self.ddf.entities[domain_id].entity_sets:\n dtypes[s] = self.entity_set_to_categorical(domain, s)\n entity_value_cache[domain_id] = dict()\n for ent in domain.entities:\n sets = set()\n sets.add(domain_id)\n for s in ent.sets:\n sets.add(s)\n entity_value_cache[domain_id][ent.id] = tuple(sets)\n\n def _which_sets(entity_, domain_):\n try:\n return entity_value_cache[domain_][entity_]\n except KeyError:\n logger.debug('entity {} is not in {} domain!'.format(entity_, domain_))\n raise\n\n def _gen_key_value_object(resource: Resource):\n logger.debug('working on: {}'.format(resource.path))\n if isinstance(resource.schema.primaryKey, str):\n pkeys = [resource.schema.primaryKey]\n else:\n pkeys = resource.schema.primaryKey\n\n entity_cols = [x for x in pkeys\n if x in self.ddf.concepts\n and self.ddf.concepts[x].concept_type in ['entity_domain', 'entity_set']]\n value_cols = resource.schema.common_fields\n data = pd.read_csv(osp.join(self.base_path, resource.path), dtype=dtypes,\n **self._default_reader_options)\n # check if entity columns data match entity defined in entity files\n for c in entity_cols:\n if data[c].hasnans:\n data_ = pd.read_csv(osp.join(self.base_path, resource.path), dtype={c: str}, **self._default_reader_options)\n ents = dtypes[c].categories.values\n ents_ = data_[c].unique()\n diff = set(ents_) - set(ents)\n logger.critical(\"in file {}:\".format(resource.path))\n logger.critical(\"{} column contains entity which does not belong to {} domain/set: {}\".format(c, c, list(diff)))\n raise ValueError(\"entity mismatch\")\n\n # for resources that have entity_columns: only consider all permutations on entity columns\n if len(entity_cols) > 0:\n data = data[entity_cols].drop_duplicates()\n\n pkeys_prop = dict()\n for c in pkeys:\n if c == 'cocnept':\n pkeys_prop[c] = {'type': 'concept'}\n elif c not in self.ddf.concepts:\n pkeys_prop[c] = {'type': 'non_concept'}\n else:\n concept = self.ddf.concepts[c]\n if concept.concept_type == 'entity_set':\n pkeys_prop[c] = {'type': 'entity_set',\n 'domain': concept.props['domain']}\n elif concept.concept_type == 'entity_domain':\n pkeys_prop[c] = {'type': 'entity_domain'}\n else:\n pkeys_prop[c] = {'type': 'others'}\n\n all_permutations = set()\n for _, r in data.iterrows():\n perm = list()\n for c in pkeys:\n if pkeys_prop[c]['type'] == 'entity_set':\n domain = pkeys_prop[c]['domain']\n perm.append(_which_sets(r[c], domain))\n elif pkeys_prop[c]['type'] == 'entity_domain':\n perm.append(_which_sets(r[c], c))\n else:\n perm.append(tuple([c]))\n\n all_permutations.add(tuple(perm))\n\n # if data is empty. Just emit an object with primarykey and null value\n if len(all_permutations) == 0:\n obj = {'primaryKey': pkeys, 'value': None, 'resource': resource.name}\n logger.debug('yielding: {}'.format(str(obj)))\n yield obj\n\n for row in all_permutations:\n for perm in product(*row):\n if len(value_cols) > 0:\n for c in value_cols:\n obj = {'primaryKey': list(perm), 'value': c, 'resource': resource.name}\n logger.debug('yielding: {}'.format(str(obj)))\n yield obj\n else:\n obj = {'primaryKey': list(perm), 'value': None, 'resource': resource.name}\n logger.debug('yielding: {}'.format(str(obj)))\n yield obj\n\n def _add_to_schema(resource_schema):\n \"\"\"handle objects generated by ``_gen_key_value_object``\"\"\"\n key = '-'.join(sorted(resource_schema['primaryKey']))\n if not pd.isnull(resource_schema['value']):\n hash_val = key + '--' + resource_schema['value']\n else:\n hash_val = key + '--' + 'nan'\n if hash_val not in hash_table.keys():\n hash_table[hash_val] = {\n 'primaryKey': sorted(resource_schema['primaryKey']),\n 'value': resource_schema['value'],\n 'resources': {resource_schema['resource']}\n }\n else:\n hash_table[hash_val]['resources'].add(resource_schema['resource'])\n\n # make progressbar and run the process to generate schema\n if progress_bar:\n pbar = tqdm(total=len(self.resources))\n\n for g in map(_gen_key_value_object, self.resources):\n if progress_bar:\n pbar.update(1)\n for kvo in g:\n logging.debug(\"adding kvo {}\".format(str(kvo)))\n _add_to_schema(kvo)\n\n if progress_bar:\n pbar.close()\n\n for sch in hash_table.values():\n sch['resources'] = list(sch['resources']) # convert set to list\n sch_object = DDFSchema.from_dict(sch)\n if len(sch['primaryKey']) == 1:\n if sch['primaryKey'][0] == 'concept':\n ddf_schema['concepts'].append(sch_object)\n else:\n ddf_schema['entities'].append(sch_object)\n else:\n if 'synonym' in sch['primaryKey']:\n ddf_schema['synonyms'].append(sch_object)\n else:\n ddf_schema['datapoints'].append(sch_object)\n\n return ddf_schema\n\n def get_ddf_schema(self, update=False):\n if not update and self.ddfSchema is not None:\n return self.ddfSchema\n elif not update and self.ddfSchema is None:\n raise ValueError('No ddfSchema, please use update=True to generate one')\n else:\n self.ddfSchema = self.generate_ddf_schema()\n return self.ddfSchema\n", "# -*- coding: utf-8 -*-\n\n\"\"\"Functions for scraping ILO datasets\n\nusing the bulk downloader, see `its doc`_.\n\n.. _its doc: http://www.ilo.org/ilostat-files/WEB_bulk_download/ILOSTAT_BulkDownload_Guidelines.pdf\n\"\"\"\n\nfrom . common import requests_retry_session, DataFactory\n\nfrom pathlib import Path\nfrom urllib.parse import urljoin\nfrom multiprocessing import Pool\nfrom functools import partial\n\nimport pandas as pd\n\n\nclass ILOLoader(DataFactory):\n main_url = 'http://www.ilo.org/ilostat-files/WEB_bulk_download/'\n indicator_meta_url_tmpl = urljoin(main_url, 'indicator/table_of_contents_{lang}.csv')\n other_meta_url_tmpl = urljoin(main_url, 'dic/{table}_{lang}.csv')\n\n def load_metadata(self, table='indicator', lang='en'):\n \"\"\"get code list for a specified table and language.\n\n Check ILO doc for all available tables and languages.\n \"\"\"\n if table == 'indicator':\n tmpl = self.indicator_meta_url_tmpl\n else:\n tmpl = self.other_meta_url_tmpl\n\n url = tmpl.format(table=table, lang=lang)\n\n metadata = {}\n metadata[table] = pd.read_csv(url)\n\n self.metadata = metadata[table]\n return self.metadata\n\n def has_newer_source(self, indicator, date):\n \"\"\"check if an indicator's last modified date is newer than given date.\n \"\"\"\n if self.metadata is None:\n self.load_metadata()\n md = self.metadata\n last_update = md.loc[md.id == indicator, 'last.update']\n assert len(last_update) == 1\n last_update = last_update.values[0]\n if pd.to_datetime(last_update) > pd.to_datetime(date):\n return True\n return False\n\n def download(self, i, out_dir):\n \"\"\"Download an indicator to out_dir.\n \"\"\"\n url = urljoin(self.main_url, f'indicator/{i}.csv.gz')\n res = requests_retry_session().get(url, stream=True, timeout=60)\n if res.status_code != 200:\n print(f'can not download source file: {url}')\n return\n\n with Path(out_dir, f'{i}.csv.gz').expanduser().open('wb') as f:\n for chunk in res.iter_content(chunk_size=1024):\n f.write(chunk)\n f.flush()\n\n def bulk_download(self, out_dir, indicators: list, pool_size=5):\n \"\"\"Download a list of indicators simultaneously.\n \"\"\"\n download_ = partial(self.download, out_dir=out_dir)\n\n with Pool(pool_size) as p:\n p.map(download_, indicators)\n" ]
[ [ "pandas.read_csv", "pandas.api.types.CategoricalDtype", "pandas.isnull" ], [ "pandas.read_csv", "pandas.to_datetime" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
837278709/Deep-Learning-Coursera-1
[ "2498a90d3f61ec0876752205066ec95323f83161" ]
[ "Neural Networks and Deep Learning/Week 3/Planar data classification with one hidden layer/planar_utils.py" ]
[ "import matplotlib.pyplot as plt\nimport numpy as np\nimport sklearn\nimport sklearn.datasets\nimport sklearn.linear_model\n\ndef plot_decision_boundary(model, X, y):\n # Set min and max values and give it some padding\n x_min, x_max = X[0, :].min() - 1, X[0, :].max() + 1\n y_min, y_max = X[1, :].min() - 1, X[1, :].max() + 1\n h = 0.01\n # Generate a grid of points with distance h between them\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\n # Predict the function value for the whole grid\n Z = model(np.c_[xx.ravel(), yy.ravel()])\n Z = Z.reshape(xx.shape)\n # Plot the contour and training examples\n plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)\n plt.ylabel('x2')\n plt.xlabel('x1')\n plt.scatter(X[0, :], X[1, :], c=y[0], cmap=plt.cm.Spectral)\n \n\ndef sigmoid(x):\n \"\"\"\n Compute the sigmoid of x\n\n Arguments:\n x -- A scalar or numpy array of any size.\n\n Return:\n s -- sigmoid(x)\n \"\"\"\n s = 1/(1+np.exp(-x))\n return s\n\ndef load_planar_dataset():\n np.random.seed(1)\n m = 400 # number of examples\n N = int(m/2) # number of points per class\n D = 2 # dimensionality\n X = np.zeros((m,D)) # data matrix where each row is a single example\n Y = np.zeros((m,1), dtype='uint8') # labels vector (0 for red, 1 for blue)\n a = 4 # maximum ray of the flower\n\n for j in range(2):\n ix = range(N*j,N*(j+1))\n t = np.linspace(j*3.12,(j+1)*3.12,N) + np.random.randn(N)*0.2 # theta\n r = a*np.sin(4*t) + np.random.randn(N)*0.2 # radius\n X[ix] = np.c_[r*np.sin(t), r*np.cos(t)]\n Y[ix] = j\n \n X = X.T\n Y = Y.T\n\n return X, Y\n\ndef load_extra_datasets(): \n N = 200\n noisy_circles = sklearn.datasets.make_circles(n_samples=N, factor=.5, noise=.3)\n noisy_moons = sklearn.datasets.make_moons(n_samples=N, noise=.2)\n blobs = sklearn.datasets.make_blobs(n_samples=N, random_state=5, n_features=2, centers=6)\n gaussian_quantiles = sklearn.datasets.make_gaussian_quantiles(mean=None, cov=0.5, n_samples=N, n_features=2, n_classes=2, shuffle=True, random_state=None)\n no_structure = np.random.rand(N, 2), np.random.rand(N, 2)\n \n return noisy_circles, noisy_moons, blobs, gaussian_quantiles, no_structure" ]
[ [ "matplotlib.pyplot.contourf", "sklearn.datasets.make_gaussian_quantiles", "numpy.random.seed", "matplotlib.pyplot.scatter", "numpy.linspace", "sklearn.datasets.make_moons", "numpy.arange", "numpy.cos", "numpy.sin", "sklearn.datasets.make_circles", "numpy.random.rand", "numpy.random.randn", "matplotlib.pyplot.xlabel", "numpy.exp", "numpy.zeros", "sklearn.datasets.make_blobs", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
NREL/PV-DEMICE
[ "6e2938950ff10c37f176f46aeb76c78de609f535" ]
[ "PV_ICE/main.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nMain.py contains the functions to calculate the different quantities of materials\nin each step of the process. Reffer to the diagram on Package-Overview for the \nsteps considered. \n\nSupport functions include Weibull functions for reliability and failure; also, \nfunctions to modify baseline values and evaluate sensitivity to the parameters.\n\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport datetime\nimport os\nimport matplotlib.pyplot as plt\n\ndef read_baseline_material(scenario, material='None', file=None):\n \n if file is None:\n try:\n file = _interactive_load('Select baseline file')\n except:\n raise Exception('Interactive load failed. Tkinter not supported'+\n 'on this system. Try installing X-Quartz and reloading')\n \n\ndef _interactive_load(title=None):\n # Tkinter file picker\n import tkinter\n from tkinter import filedialog\n root = tkinter.Tk()\n root.withdraw() #Start interactive file input\n root.attributes(\"-topmost\", True) #Bring window into foreground\n return filedialog.askopenfilename(parent=root, title=title) #initialdir = data_dir\n\ndef _unitReferences(keyword):\n '''\n Specify units for variable in scenario or materials\n \n Parameters\n ----------\n keyword : str\n String of scenario or material column label\n \n Returns\n -------\n yunits : str\n Unit specific to the keyword provided\n '''\n\n moduleDictionary = {'year': {'unit': 'Years', 'source': 'input'},\n 'new_Installed_Capacity_[MW]': {'unit': 'Power [MW]', 'source':'input'},\n 'mod_eff': {'unit': 'Efficiency $\\eta$ [%]', 'source':'input'},\n 'mod_reliability_t50': {'unit': 'Years' , 'source':'input'},\n 'mod_reliability_t90': {'unit': 'Years', 'source':'input'},\n 'mod_degradation': {'unit': 'Percentage [%]', 'source':'input'},\n 'mod_lifetime': {'unit': 'Years', 'source':'input'},\n 'mod_MFG_eff': {'unit': 'Efficiency $\\eta$ [%]', 'source':'input'},\n 'mod_EOL_collection_eff': {'unit': 'Efficiency $\\eta$ [%]', 'source':'input'},\n 'mod_EOL_collected_recycled': {'unit': 'Percentage [%]', 'source':'input'},\n 'mod_Repair': {'unit': 'Percentage [%]', 'source':'input'},\n 'mod_MerchantTail': {'unit': 'Percentage [%]', 'source':'input'},\n 'mod_Reuse': {'unit': 'Percentage [%]', 'source':'input'},\n 'Area': {'unit': 'm$^2$', 'source': 'generated'},\n 'Cumulative_Area_disposedby_Failure': {'unit': 'm$^2$', 'source': 'generated'},\n 'Cumulative_Area_disposedby_ProjectLifetime': {'unit': 'm$^2$', 'source': 'generated'},\n 'Cumulative_Area_disposed': {'unit': 'm$^2$', 'source': 'generated'},\n 'Cumulative_Active_Area': {'unit': 'm$^2$', 'source': 'generated'},\n 'Installed_Capacity_[W]': {'unit': 'Power [W]', 'source': 'generated'},\n 'EOL_on_Year_0': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_1': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_2': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_3': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_4': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_5': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_6': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_7': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_8': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_9': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_10': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_11': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_12': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_13': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_14': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_15': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_16': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_17': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_18': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_19': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_20': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_21': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_22': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_23': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_24': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_25': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_26': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_27': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_28': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_29': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_30': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_31': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_32': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_33': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_34': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_35': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_36': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_37': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_38': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_39': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_40': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_41': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_42': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_43': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_44': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_45': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_46': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_47': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_48': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_49': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_50': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_51': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_52': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_53': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_54': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_55': {'unit': 'm$^2$', 'source': 'generated'},\n 'EoL_Collected': {'unit': 'm$^2$', 'source': 'generated'},\n 'EoL_NotCollected': {'unit': 'm$^2$', 'source': 'generated'},\n 'EoL_Recycled': {'unit': 'm$^2$', 'source': 'generated'},\n 'EoL_NotRecycled_Landfilled': {'unit': 'm$^2$', 'source': 'generated'}\n }\n\n materialDictionary={'year': {'unit': 'Years', 'source': 'input'},\n 'mat_virgin_eff': {'unit': 'Efficiency $\\eta$ [%]', 'source': 'input'},\n 'mat_massperm2': {'unit': 'Mass [g]', 'source': 'input'},\n 'mat_MFG_eff': {'unit': 'Efficiency $\\eta$ [%]', 'source': 'input'},\n 'mat_MFG_scrap_recycled': {'unit': 'Percentage [%]', 'source': 'input'},\n 'mat_MFG_scrap_Recycled': {'unit': 'Efficiency $\\eta$ [%]', 'source': 'input'},\n 'mat_MFG_scrap_Recycled_into_HQ': {'unit': 'Percentage [%]', 'source': 'input'},\n 'mat_MFG_scrap_Recycled_into_HQ_Reused4MFG': {'unit': 'Percentage [%]', 'source': 'input'},\n 'mat_EOL_collected_Recycled': {'unit': 'Percentage [%]', 'source': 'input'},\n 'mat_EOL_Recycling_eff': {'unit': 'Efficiency $\\eta$ [%]', 'source': 'input'},\n 'mat_EOL_Recycled_into_HQ': {'unit': 'Percentage [%]', 'source': 'input'},\n 'mat_EOL_RecycledHQ_Reused4MFG': {'unit': 'Percentage [%]', 'source': 'input'},\n 'mat_modules_NotRecycled': {'unit': 'Mass [g]', 'source': 'generated'},\n 'mat_modules_NotCollected': {'unit': 'Mass [g]', 'source': 'generated'},\n 'mat_EOL_sento_Recycling': {'unit': 'Mass [g]', 'source': 'generated'},\n 'mat_EOL_NotRecycled_Landfilled': {'unit': 'Mass [g]', 'source': 'generated'},\n 'mat_EOL_Recycled': {'unit': 'Mass [g]', 'source': 'generated'},\n 'mat_EOL_Recycled_Losses_Landfilled': {'unit': 'Mass [g]', 'source': 'generated'},\n 'mat_EOL_Recycled_2_HQ': {'unit': 'Mass [g]', 'source': 'generated'},\n 'mat_EOL_Recycled_2_OQ': {'unit': 'Mass [g]', 'source': 'generated'},\n 'mat_EoL_Recycled_HQ_into_MFG': {'unit': 'Mass [g]', 'source': 'generated'},\n 'mat_EOL_Recycled_HQ_into_OU': {'unit': 'Mass [g]', 'source': 'generated'},\n 'mat_UsedinManufacturing': {'unit': 'Mass [g]', 'source': 'generated'},\n 'mat_Manufacturing_Input': {'unit': 'Mass [g]', 'source': 'generated'},\n 'mat_MFG_Scrap': {'unit': 'Mass [g]', 'source': 'generated'},\n 'mat_MFG_Scrap_Sentto_Recycling': {'unit': 'Mass [g]', 'source': 'generated'},\n 'mat_MFG_Scrap_Landfilled': {'unit': 'Mass [g]', 'source': 'generated'},\n 'mat_MFG_Scrap_Recycled_Successfully': {'unit': 'Mass [g]', 'source': 'generated'},\n 'mat_MFG_Scrap_Recycled_Losses_Landfilled': {'unit': 'Mass [g]', 'source': 'generated'},\n 'mat_MFG_Recycled_into_HQ': {'unit': 'Mass [g]', 'source': 'generated'},\n 'mat_MFG_Recycled_into_OQ': {'unit': 'Mass [g]', 'source': 'generated'},\n 'mat_MFG_Recycled_HQ_into_MFG': {'unit': 'Mass [g]', 'source': 'generated'},\n 'mat_MFG_Recycled_HQ_into_OU': {'unit': 'Mass [g]', 'source': 'generated'},\n 'mat_Virgin_Stock': {'unit': 'Mass [g]', 'source': 'generated'},\n 'mat_Total_EOL_Landfilled': {'unit': 'Mass [g]', 'source': 'generated'},\n 'mat_Total_MFG_Landfilled': {'unit': 'Mass [g]', 'source': 'generated'},\n 'mat_Total_Landfilled': {'unit': 'Mass [g]', 'source': 'generated'},\n 'mat_Total_Recycled_OU': {'unit': 'Mass [g]', 'source': 'generated'}\n }\n \n\n if keyword in moduleDictionary.keys():\n yunits = moduleDictionary[keyword]['unit']\n elif keyword in materialDictionary.keys():\n yunits = materialDictionary[keyword]['unit']\n else:\n print(\"Warning: Keyword / Units not Found\")\n yunits = 'UNITS'\n \n return yunits\n \n\ndef distance(s_lat, s_lng, e_lat, e_lng):\n \"\"\"\n # Haversine formula for numpy arrays\n # Author: MalyutinS\n # imported from comment on: https://gist.github.com/rochacbruno/2883505\n # Example: \n # s_lat = 45; s_lng = -110; e_lat=[33, 44]; e_lng = [-115, -140]\n # Returns distance from the source point to the two ending points:\n # r = distance(s_lat, s_lng, e_lat, e_lng)\n # r = array([1402.24996689, 2369.0150434 ])\n #\n \"\"\"\n \n \n # approximate radius of earth in km\n R = 6373.0 \n \n# s_lat = s_lat*np.pi/180.0 \n s_lat = np.deg2rad(s_lat) \n s_lng = np.deg2rad(s_lng) \n e_lat = np.deg2rad(e_lat) \n e_lng = np.deg2rad(e_lng) \n \n d = np.sin((e_lat - s_lat)/2)**2 + np.cos(s_lat)*np.cos(e_lat) * np.sin((e_lng - s_lng)/2)**2\n distance = 2 * R * np.arcsin(np.sqrt(d)) \n \n return distance\n\ndef drivingdistance(origin, destination, APIkey):\n \"\"\"\n Creates call for google-maps api to get driving directions betwen two points.\n \n Input\n -----\n origin: array\n [lat, lon] expected\n destination: array\n [lat, lon] expected\n APYkey: str\n String\n \"\"\"\n \n lat1, lon1 = origin\n lat2, lon2 = destination\n \n gm_url = ('https://maps.googleapis.com/maps/api/directions/xml?'+\n 'origin='+str(lat1)+','+str(lon1)+\n '&destination='+str(lat2)+','+str(lon2)+\n '&key='+APIkey)\n\n return gm_url\n \n \n \nclass Simulation:\n \"\"\"\n The ScenarioObj top level class is used to work on Circular Economy scenario objects, \n keep track of filenames, data for module and materials, operations modifying\n the baselines, etc.\n\n Parameters\n ----------\n name : text to append to output files\n nowstr : current date/time string\n path : working directory with circular economy results\n\n Methods\n -------\n __init__ : initialize the object\n _setPath : change the working directory\n\n \"\"\"\n \n def __init__(self, name=None, path=None):\n '''\n initialize ScenarioObj with path of Scenario's baseline of module and materials\n as well as a basename to append to\n\n Parameters\n ----------\n name: string, append temporary and output files with this value\n path: location of Radiance materials and objects\n\n Returns\n -------\n none\n '''\n\n self.path = \"\" # path of working directory\n self.name = \"\" # basename to append\n \n now = datetime.datetime.now()\n self.nowstr = str(now.date())+'_'+str(now.hour)+str(now.minute)+str(now.second)\n\n if path is None:\n self._setPath(os.getcwd())\n else:\n self._setPath(path)\n\n if name is None:\n self.name = self.nowstr # set default filename for output files\n else:\n self.name = name\n\n self.scenario={}\n\n \n def _setPath(self, path):\n \"\"\"\n setPath - move path and working directory\n\n \"\"\"\n self.path = os.path.abspath(path)\n\n print('path = '+ path)\n try:\n os.chdir(self.path)\n except OSError as exc:\n LOGGER.error('Path doesn''t exist: %s' % (path))\n LOGGER.exception(exc)\n raise(exc)\n\n # check for path in the new Radiance directory:\n def _checkPath(path): # create the file structure if it doesn't exist\n if not os.path.exists(path):\n os.makedirs(path)\n print('Making path: '+path)\n \n def createScenario(self, name, file=None):\n \n self.scenario[name] = Scenario(name, file)\n \n\n\n def modifyScenario(self, scenarios, stage, value, start_year=None):\n \n if start_year is None:\n start_year = int(datetime.datetime.now().year)\n \n if scenarios is None:\n scenarios = list(self.scenario.keys())\n else:\n if isinstance(scenarios, str):\n scenarios = [scenarios]\n \n selectyears = self.scenario[scenarios[0]].data['year']>start_year\n \n for scen in scenarios:\n self.scenario[scen].data.loc[selectyears, stage] = value\n \n def calculateMassFlow(self, scenarios = None, materials=None, weibullInputParams = None, \n bifacialityfactors = None, reducecapacity = True, debugflag=False):\n '''\n Function takes as input a baseline dataframe already imported, \n with the right number of columns and content.\n It returns the dataframe with all the added calculation columns.\n \n Parameters\n ------------\n weibullInputParams : None\n Dictionary with 'alpha' and 'beta' value for shaping the weibull\n curve. beta is sometimes exchanged with lifetime, for example on\n Irena 2016 values beta = 30. If weibullInputParams = None,\n alfa and beta are calcualted from the t50 and t90 columns on the\n module baseline.\n scenarios : None\n string with the scenario name or list of strings with\n scenarios names to loop over. Must exist on the PV ICE object.\n materials : None\n string with the material name or list of strings with the\n materials names to loop over. Must exists on the PV ICE object \n scenario(s) modeled.\n bifacialityfactors : str\n File with bifacialtiy factors for each year under consideration\n \n Returns\n --------\n df: dataframe \n input dataframe with addeds columns for the calculations of recycled,\n collected, waste, installed area, etc. \n \n '''\n \n if scenarios is None:\n scenarios = list(self.scenario.keys())\n else:\n if isinstance(scenarios, str):\n scenarios = [scenarios]\n \n for scen in scenarios:\n \n print(\"Working on Scenario: \", scen)\n print(\"********************\")\n df = self.scenario[scen].data\n\n # Constant\n if bifacialityfactors is not None: \n bf = pd.read_csv(bifacialityfactors)\n df['irradiance_stc'] = 1000.0 + bf['bifi']*100.0 # W/m^2 (min. Bifacial STC Increase)\n else:\n df['irradiance_stc'] = 1000.0 # W/m^2\n\n # Renaming and re-scaling\n df['t50'] = df['mod_reliability_t50']\n df['t90'] = df['mod_reliability_t90']\n \n # Calculating Area and Mass\n \n if 'Mass_[MetricTonnes]' in df:\n df['new_Installed_Capacity_[W]'] = 0\n df['new_Installed_Capacity_[MW]'] = 0\n df['Area'] = df['Mass_[MetricTonnes]']\n print(\"Warning, this is for special debuging of Wambach Procedure.\"+\n \"Make sure to use Wambach Module\")\n else:\n df['new_Installed_Capacity_[W]'] = df['new_Installed_Capacity_[MW]']*1e6\n\n if reducecapacity:\n df['Area'] = df['new_Installed_Capacity_[W]']/(df['mod_eff']*0.01)/df['irradiance_stc'] # m^2 \n else:\n df['Area'] = df['new_Installed_Capacity_[W]']/(df['mod_eff']*0.01)/1000.0 # m^2\n \n \n df['Area'] = df['Area'].fillna(0) # Chagne na's to 0s.\n\n # Calculating Wast by Generation by Year, and Cumulative Waste by Year.\n Generation_Disposed_byYear = []\n Generation_Active_byYear= []\n Generation_Power_byYear = []\n weibullParamList = []\n\n df['Cumulative_Area_disposedby_Failure'] = 0\n df['Cumulative_Area_disposedby_ProjectLifetime'] = 0\n df['Cumulative_Area_disposed'] = 0\n df['Repaired_[W]'] = 0\n df['Repaired_Area'] = 0\n df['Cumulative_Active_Area'] = 0\n df['Installed_Capacity_[W]'] = 0\n for generation, row in df.iterrows(): \n #generation is an int 0,1,2,.... etc.\n #generation=4\n #row=df.iloc[generation]\n \n if weibullInputParams:\n weibullIParams = weibullInputParams\n elif 'weibull_alpha' in row:\n # \"Weibull Input Params passed internally as a column\"\n weibullIParams = {'alpha': row['weibull_alpha'], 'beta': row['weibull_beta']}\n else:\n # \"Calculating Weibull Params from Modules t50 and T90\"\n t50, t90 = row['t50'], row['t90']\n weibullIParams = weibull_params({t50: 0.50, t90: 0.90}) \n \n f = weibull_cdf(weibullIParams['alpha'], weibullIParams['beta'])\n \n weibullParamList.append(weibullIParams)\n\n x = np.clip(df.index - generation, 0, np.inf)\n cdf = list(map(f, x))\n pdf = [0] + [j - i for i, j in zip(cdf[: -1], cdf[1 :])]\n\n activearea = row['Area']\n if np.isnan(activearea):\n activearea=0\n \n activeareacount = []\n areadisposed_failure = []\n areadisposed_projectlifetime = []\n arearepaired = []\n arearepaired_powergen = []\n areapowergen = []\n active=0\n disposed_projectlifetime=0\n for age in range(len(cdf)):\n disposed_projectlifetime=0\n if x[age] == 0.0:\n activeareacount.append(0)\n areadisposed_failure.append(0)\n areadisposed_projectlifetime.append(0)\n areapowergen.append(0)\n arearepaired.append(0)\n arearepaired_powergen.append(0)\n else:\n active += 1\n activeareaprev = activearea \n activearea = activearea-row['Area']*pdf[age]+row['Area']*pdf[age]*df.iloc[age]['mod_Repair']*0.01 \n# arearepaired_failure = activearea*cdf[age]*df.iloc[age]['mod_Repair']*0.01\n arearepaired_failure = row['Area']*pdf[age]*df.iloc[age]['mod_Repair']*0.01\n\n arearepaired.append(arearepaired_failure)\n arearepaired_powergen.append(arearepaired_failure*row['mod_eff']*0.01*row['irradiance_stc']*(1-row['mod_degradation']*0.01)**active) \n \n areadisposed_failure.append(activeareaprev-activearea)\n if age == int(row['mod_lifetime']+generation):\n activearea_temp = activearea\n activearea = 0+activearea*(df.iloc[age]['mod_MerchantTail']*0.01)\n disposed_projectlifetime = activearea_temp-activearea\n\n activearea2 = 0+disposed_projectlifetime*(df.iloc[age]['mod_Reuse']*0.01) # 12 \n activearea = activearea + activearea2 # 92\n disposed_projectlifetime = disposed_projectlifetime - activearea2 # 8\n\n# activearea = 0+disposed_projectlifetime*(df.iloc[age]['mod_Reuse']*0.01)\n# disposed_projectlifetime = activearea_temp-activearea\n areadisposed_projectlifetime.append(disposed_projectlifetime)\n activeareacount.append(activearea)\n areapowergen.append(activearea*row['mod_eff']*0.01*row['irradiance_stc']*(1-row['mod_degradation']*0.01)**active) \n \n try:\n # becuase the clip starts with 0 for the installation year, identifying installation year\n # and adding initial area\n fixinitialareacount = next((i for i, e in enumerate(x) if e), None) - 1\n activeareacount[fixinitialareacount] = activeareacount[fixinitialareacount]+row['Area'] \n areapowergen[fixinitialareacount] = (areapowergen[fixinitialareacount] + \n row['Area'] * row['mod_eff'] *0.01 * row['irradiance_stc']) \n except:\n # Last value does not have a xclip value of nonzero so it goes\n # to except. But it also means the loop finished for the calculations\n # of Lifetime.\n fixinitialareacount = len(cdf)-1\n activeareacount[fixinitialareacount] = activeareacount[fixinitialareacount]+row['Area'] \n areapowergen[fixinitialareacount] = (areapowergen[fixinitialareacount] + \n row['Area'] * row['mod_eff'] *0.01 * row['irradiance_stc']) \n print(\"Finished Area+Power Generation Calculations\")\n \n \n # area_disposed_of_generation_by_year = [element*row['Area'] for element in pdf]\n df['Cumulative_Area_disposedby_Failure'] += areadisposed_failure\n df['Cumulative_Area_disposedby_ProjectLifetime'] += areadisposed_projectlifetime\n df['Cumulative_Area_disposed'] += areadisposed_failure\n df['Cumulative_Area_disposed'] += areadisposed_projectlifetime\n \n \n df['Repaired_[W]'] += arearepaired_powergen\n df['Repaired_Area'] += arearepaired\n df['Cumulative_Active_Area'] += activeareacount\n df['Installed_Capacity_[W]'] += areapowergen\n Generation_Disposed_byYear.append([x + y for x, y in zip(areadisposed_failure, areadisposed_projectlifetime)])\n Generation_Active_byYear.append(activeareacount)\n Generation_Power_byYear.append(areapowergen)\n \n \n df['WeibullParams'] = weibullParamList\n MatrixDisposalbyYear = pd.DataFrame(Generation_Disposed_byYear, columns = df.index, index = df.index)\n MatrixDisposalbyYear = MatrixDisposalbyYear.add_prefix(\"EOL_on_Year_\")\n \n try:\n df = df[df.columns.drop(list(df.filter(regex='EOL_on_Year_')))]\n except:\n print(\"Warning: Issue dropping EOL columns generated by \" \\\n \"calculateMFC routine to overwrite\")\n \n df = df.join(MatrixDisposalbyYear)\n\n \n ## Start to do EOL Processes\n ############################\n \n filter_col = [col for col in df if col.startswith('EOL_on_Year_')]\n EOL = df[filter_col]\n \n # This Multiplication pattern goes through Module and then material.\n # It is for processes that depend on each year as they improve, i.e. \n # Collection Efficiency,\n #\n # [ G1_1 G1_2 G1_3 G2_4 ...] [N1\n # [ 0 G2_1 G2_2 G2_3 ...] X N2\n # [ 0 0 G3_1 G3_2 ...] N3\n # N4]\n #\n # EQUAL\n # EOL_Collected =\n # [ G1_1*N1 G1_2 *N2 G1_3 *N3 G2_4 *N4 ...]\n # [ 0 G2_1 *N2 G2_2 *N3 G2_3 *N4 ...]\n # [ 0 0 G3_1 *N3 G3_2 *N4 ...] \n #\n \n EOL_Collected = EOL.mul(df['mod_EOL_collection_eff'].values*0.01)\n df['EoL_Collected'] = list(EOL_Collected.sum())\n landfill_Collection = EOL.mul(1-(df['mod_EOL_collection_eff'].values*0.01)) \n df['EoL_NotCollected'] = list(landfill_Collection.sum())\n \n EOL_Recycled = EOL_Collected.mul(df['mod_EOL_collected_recycled'].values*0.01)\n df['EoL_Recycled'] = list(EOL_Recycled.sum())\n EOL_NotRecycled_Landfilled = EOL_Collected.mul((1-df['mod_EOL_collected_recycled'].values*0.01))\n df['EoL_NotRecycled_Landfilled'] = list(EOL_NotRecycled_Landfilled.sum())\n \n # Cleanup of internal renaming and internal use columns\n df.drop(['new_Installed_Capacity_[W]', 't50', 't90'], axis = 1, inplace=True) \n \n df['ModuleTotal_MFG']=df['Area']*100/df['mod_MFG_eff']\n \n self.scenario[scen].data = df\n \n # collection losses here\n \n # Recyle % here\n \n \n ################\n # Material Loop#\n ################\n\n if materials is None:\n materials = list(self.scenario[scenarios[0]].material.keys())\n else:\n if isinstance(materials, str):\n materials = [materials]\n \n for mat in materials:\n\n print(\"==> Working on Material : \", mat)\n\n dm = self.scenario[scen].material[mat].materialdata\n \n # SWITCH TO MASS UNITS FOR THE MATERILA NOW:\n # THIS IS DIFFERENT MULTIPLICATION THAN THE REST\n # BECAUSE IT DEPENDS TO THE ORIGINAL MASS OF EACH MODULE WHEN INSTALLED\n # [M1 * [ G1_1 G1_2 G1_3 G2_4 ...]\n # M2 [ 0 G2_1 G2_2 G2_3 ...]\n # M3] [ 0 0 G3_1 G3_2 ...]\n # \n # EQUAL\n # mat_EOL_sentoRecycling = \n # [ G1_1*M1 G1_2*M1 G1_3*M1 G2_4*M1 ...]\n # [ 0 G2_1*M2 G2_2*M2 G2_3*M2 ...]\n # [ 0 0 G3_1*M3 G3_2*M3 ...]\n #\n \n mat_modules_EOL_sentoRecycling = EOL_Recycled.multiply(dm['mat_massperm2'], axis=0)\n dm['mat_modules_Collected'] = list(EOL_Collected.multiply(dm['mat_massperm2'], axis=0).sum())\n dm['mat_modules_NotCollected'] = list(landfill_Collection.multiply(dm['mat_massperm2'], axis=0).sum())\n dm['mat_modules_Recycled'] = list(EOL_Recycled.multiply(dm['mat_massperm2'], axis=0).sum())\n dm['mat_modules_NotRecycled'] = list(EOL_NotRecycled_Landfilled.multiply(dm['mat_massperm2'], axis=0).sum())\n \n \n # mat_EOL_collected_Recycled CHANGE NAME\n # chnge also landfill_material_EOL_NotRecycled_Landfilled \n mat_EOL_sento_Recycling = mat_modules_EOL_sentoRecycling.mul(dm['mat_EOL_collected_Recycled'].values*0.01)\n dm['mat_EOL_sento_Recycling'] = list(mat_EOL_sento_Recycling.sum())\n landfill_material_EOL_NotRecycled_Landfilled = mat_modules_EOL_sentoRecycling.mul(1-(dm['mat_EOL_collected_Recycled'].values*0.01))\n dm['mat_EOL_NotRecycled_Landfilled'] = list(landfill_material_EOL_NotRecycled_Landfilled.sum())\n \n mat_EOL_Recycled_Succesfully = mat_EOL_sento_Recycling.mul(dm['mat_EOL_Recycling_eff'].values*0.01)\n dm['mat_EOL_Recycled'] = list(mat_EOL_Recycled_Succesfully.sum())\n landfill_material_EOL_Recyled_Losses_Landfilled = mat_EOL_sento_Recycling.mul(1-(dm['mat_EOL_Recycling_eff'].values*0.01))\n dm['mat_EOL_Recycled_Losses_Landfilled'] = list(landfill_material_EOL_Recyled_Losses_Landfilled.sum())\n \n \n mat_EOL_Recycled_HQ = mat_EOL_Recycled_Succesfully.mul(dm['mat_EOL_Recycled_into_HQ'].values*0.01)\n dm['mat_EOL_Recycled_2_HQ'] = list(mat_EOL_Recycled_HQ.sum())\n mat_EOL_Recycled_OQ = mat_EOL_Recycled_Succesfully.mul(1-(dm['mat_EOL_Recycled_into_HQ'].values*0.01))\n dm['mat_EOL_Recycled_2_OQ'] = list(mat_EOL_Recycled_OQ.sum())\n \n mat_EOL_Recycled_HQ_into_MFG = mat_EOL_Recycled_HQ.mul(dm['mat_EOL_RecycledHQ_Reused4MFG'].values*0.01)\n dm['mat_EoL_Recycled_HQ_into_MFG'] = list(mat_EOL_Recycled_HQ_into_MFG.sum())\n mat_EOL_Recycled_HQ_into_OU = mat_EOL_Recycled_HQ.mul(1-(dm['mat_EOL_RecycledHQ_Reused4MFG'].values*0.01))\n dm['mat_EOL_Recycled_HQ_into_OU'] = list(mat_EOL_Recycled_HQ_into_OU.sum())\n \n # BULK Calculations Now\n dm['mat_UsedSuccessfullyinModuleManufacturing'] = (df['Area'] * dm['mat_massperm2'])\n dm['mat_EnteringModuleManufacturing'] = (df['Area'] * dm['mat_massperm2']*100/df['mod_MFG_eff'])\n dm['mat_LostinModuleManufacturing'] = dm['mat_EnteringModuleManufacturing'] - dm['mat_UsedSuccessfullyinModuleManufacturing']\n \n dm['mat_Manufacturing_Input'] = dm['mat_EnteringModuleManufacturing'] / (dm['mat_MFG_eff'] * 0.01)\n \n # Scrap = Lost to Material manufacturing losses + Module manufacturing losses\n dm['mat_MFG_Scrap'] = (dm['mat_Manufacturing_Input'] - dm['mat_EnteringModuleManufacturing'] + \n dm['mat_LostinModuleManufacturing'])\n dm['mat_MFG_Scrap_Sentto_Recycling'] = dm['mat_MFG_Scrap'] * dm['mat_MFG_scrap_Recycled'] * 0.01\n \n \n \n dm['mat_MFG_Scrap_Landfilled'] = dm['mat_MFG_Scrap'] - dm['mat_MFG_Scrap_Sentto_Recycling'] \n dm['mat_MFG_Scrap_Recycled_Successfully'] = (dm['mat_MFG_Scrap_Sentto_Recycling'] *\n dm['mat_MFG_scrap_Recycling_eff'] * 0.01)\n dm['mat_MFG_Scrap_Recycled_Losses_Landfilled'] = (dm['mat_MFG_Scrap_Sentto_Recycling'] - \n dm['mat_MFG_Scrap_Recycled_Successfully'])\n dm['mat_MFG_Recycled_into_HQ'] = (dm['mat_MFG_Scrap_Recycled_Successfully'] * \n dm['mat_MFG_scrap_Recycled_into_HQ'] * 0.01)\n dm['mat_MFG_Recycled_into_OQ'] = dm['mat_MFG_Scrap_Recycled_Successfully'] - dm['mat_MFG_Recycled_into_HQ']\n dm['mat_MFG_Recycled_HQ_into_MFG'] = (dm['mat_MFG_Recycled_into_HQ'] * \n dm['mat_MFG_scrap_Recycled_into_HQ_Reused4MFG'] * 0.01)\n dm['mat_MFG_Recycled_HQ_into_OU'] = dm['mat_MFG_Recycled_into_HQ'] - dm['mat_MFG_Recycled_HQ_into_MFG']\n dm['mat_Virgin_Stock'] = dm['mat_Manufacturing_Input'] - dm['mat_EoL_Recycled_HQ_into_MFG'] - dm['mat_MFG_Recycled_HQ_into_MFG']\n \n # Calculate raw virgin needs before mining and refining efficiency losses\n dm['mat_Virgin_Stock_Raw'] = (dm['mat_Virgin_Stock'] * 100 / dm['mat_virgin_eff'])\n\n # Add Wastes\n dm['mat_Total_EOL_Landfilled'] = (dm['mat_modules_NotCollected'] + \n dm['mat_modules_NotRecycled'] +\n dm['mat_EOL_NotRecycled_Landfilled'] +\n dm['mat_EOL_Recycled_Losses_Landfilled']) \n \n dm['mat_Total_MFG_Landfilled'] = (dm['mat_MFG_Scrap_Landfilled'] + \n dm['mat_MFG_Scrap_Recycled_Losses_Landfilled'])\n \n dm['mat_Total_Landfilled'] = (dm['mat_Total_EOL_Landfilled'] + \n dm['mat_Total_MFG_Landfilled'])\n \n dm['mat_Total_Recycled_OU'] = (dm['mat_EOL_Recycled_2_OQ'] + \n dm['mat_EOL_Recycled_HQ_into_OU'] + \n dm['mat_MFG_Recycled_into_OQ'] + \n dm['mat_MFG_Recycled_HQ_into_OU'])\n \n \n self.scenario[scen].material[mat].materialdata = dm\n\n \n def scenMod_IRENIFY(self, scenarios=None, ELorRL='RL'):\n \n if ELorRL == 'RL':\n weibullInputParams = {'alpha': 5.3759, 'beta': 30} # Regular-loss scenario IRENA\n print(\"Using Irena Regular Loss Assumptions\")\n if ELorRL == 'EL':\n weibullInputParams = {'alpha': 2.4928, 'beta': 30} # Regular-loss scenario IRENA\n print(\"Using Irena Early Loss Assumptions\")\n \n if scenarios is None:\n scenarios = list(self.scenario.keys())\n else:\n if isinstance(scenarios, str):\n scenarios = [scenarios]\n\n for scen in scenarios:\n self.scenario[scen].data['weibull_alpha'] = weibullInputParams['alpha']\n self.scenario[scen].data['weibull_beta'] = weibullInputParams['beta']\n self.scenario[scen].data['mod_lifetime'] = 40.0\n self.scenario[scen].data['mod_MFG_eff'] = 100.0\n \n for mat in self.scenario[scen].material:\n self.scenario[scen].material[mat].materialdata['mat_MFG_eff'] = 100.0 \n self.scenario[scen].material[mat].materialdata['mat_MFG_scrap_Recycled'] = 0.0 \n \n return\n\n\n def check_Years_dataandMaterials(self, scenarios=None, materials=None):\n '''\n '''\n print (\"Not Done\")\n\n def trim_Years( self, startYear=None, endYear=None, aggregateInstalls=False, \n averageEfficiency=False, averageMaterialData = False, methodAddedYears='repeat', \n scenarios=None, materials=None):\n '''\n \n methodStart : str\n 'trim' or 'aggregate'. Trim cuts the values before the year specified.\n Aggregate sums the values (if any) up to the year specified and sets it\n in that year. No backfilling of data enabled at the moment.\n methodEnd : str\n 'repeat' or 'zeroes' only options at the moment. \n 'repeat' Increases to the endYear by repeating the last value. \n zeroes places zeroes.\n \n '''\n\n if scenarios is None:\n scenarios = list(self.scenario.keys())\n else:\n if isinstance(scenarios, str):\n scenarios = [scenarios]\n\n scen0 = scenarios[0]\n dataStartYear = int(self.scenario[scen0].data.iloc[0]['year'])\n dataEndYear = int(self.scenario[scen0].data.iloc[-1]['year'])\n\n if startYear is None:\n startYear = dataStartYear\n print(\"startYear not provided. Setting to start year of Module data\", startYear)\n\n if endYear is None:\n endYear = dataEndYear\n print(\"endYear not provided. Setting to end year of Module data\", endYear)\n\n startYear = startYear\n endYear = endYear\n\n\n for scen in scenarios:\n baseline = self.scenario[scen].data\n \n if int(startYear) < int(dataStartYear):\n print(\"ADD YEARS HERE. not done yet\")\n\n if int(endYear) > int(dataEndYear):\n print(\"ADD YEARS HERE. not done yet\")\n\n # Add check if data does not need to be reduced to not do these.\n reduced = baseline.loc[(baseline['year']>=startYear) & (baseline['year']<=endYear)].copy()\n\n if aggregateInstalls:\n prev = baseline.loc[(baseline['year']<startYear)].sum()\n reduced.loc[reduced['year'] == startYear, 'new_Installed_Capacity_[MW]'] = prev['new_Installed_Capacity_[MW]']\n \n if averageEfficiency:\n prev = baseline.loc[(baseline['year']<startYear)].mean()\n reduced.loc[reduced['year'] == startYear, 'mod_eff\t'] = prev['mod_eff\t']\n \n reduced.reset_index(drop=True, inplace=True)\n self.scenario[scen].data = reduced #reassign the material data to the simulation\n\n for mat in self.scenario[scen].material:\n if int(startYear) < int(dataStartYear):\n print(\"ADD YEARS HERE. not done yet\")\n \n if int(endYear) > int(dataEndYear):\n print(\"ADD YEARS HERE. not done yet\")\n \n matdf = self.scenario[scen].material[mat].materialdata #pull out the df\n reduced = matdf.loc[(matdf['year']>=startYear) & (matdf['year']<=endYear)].copy()\n \n if averageMaterialData == 'average':\n prev = matdf.loc[(baseline['year']<startYear)].mean()\n matkeys = list(reduced.keys())[1:12]\n for matkey in matkeys: # skipping year (0). Skipping added columsn from mass flow\n reduced.loc[reduced['year'] == startYear, matkey] = prev[matkey]\n \n reduced.reset_index(drop=True, inplace=True)\n self.scenario[scen].material[mat].materialdata = reduced #reassign the material data to the simulation\n \n\n def scenMod_IRENIFY(self, scenarios=None, ELorRL='RL'):\n \n if ELorRL == 'RL':\n weibullInputParams = {'alpha': 5.3759, 'beta': 30} # Regular-loss scenario IRENA\n print(\"Using Irena Regular Loss Assumptions\")\n if ELorRL == 'EL':\n weibullInputParams = {'alpha': 2.4928, 'beta': 30} # Regular-loss scenario IRENA\n print(\"Using Irena Early Loss Assumptions\")\n \n if scenarios is None:\n scenarios = list(self.scenario.keys())\n else:\n if isinstance(scenarios, str):\n scenarios = [scenarios]\n\n for scen in scenarios:\n self.scenario[scen].data['weibull_alpha'] = weibullInputParams['alpha']\n self.scenario[scen].data['weibull_beta'] = weibullInputParams['beta']\n self.scenario[scen].data['mod_lifetime'] = 40.0\n self.scenario[scen].data['mod_MFG_eff'] = 100.0\n \n for mat in self.scenario[scen].material:\n self.scenario[scen].material[mat].materialdata['mat_MFG_eff'] = 100.0 \n self.scenario[scen].material[mat].materialdata['mat_MFG_scrap_Recycled'] = 0.0 \n \n return\n\n\n\n def scenMod_PerfectManufacturing(self, scenarios=None):\n \n if scenarios is None:\n scenarios = list(self.scenario.keys())\n else:\n if isinstance(scenarios, str):\n scenarios = [scenarios]\n\n for scen in scenarios:\n self.scenario[scen].data['mod_MFG_eff'] = 100.0\n \n for mat in self.scenario[scen].material:\n self.scenario[scen].material[mat].materialdata['mat_virgin_eff'] = 100.0 \n self.scenario[scen].material[mat].materialdata['mat_MFG_eff'] = 100.0 \n return\n\n def scenMod_noCircularity(self, scenarios=None):\n \n if scenarios is None:\n scenarios = list(self.scenario.keys())\n else:\n if isinstance(scenarios, str):\n scenarios = [scenarios]\n\n for scen in scenarios:\n self.scenario[scen].data['mod_EOL_collection_eff '] = 0.0\n self.scenario[scen].data['mod_EOL_collected_recycled'] = 0.0\n self.scenario[scen].data['mod_Repair'] = 0.0\n self.scenario[scen].data['mod_MerchantTail'] = 0.0\n self.scenario[scen].data['mod_Reuse'] = 0.0\n\n for mat in self.scenario[scen].material:\n self.scenario[scen].material[mat].materialdata['mat_MFG_scrap_Recycled'] = 0.0 \n self.scenario[scen].material[mat].materialdata['mat_MFG_scrap_Recycling_eff'] = 0.0 \n self.scenario[scen].material[mat].materialdata['mat_MFG_scrap_Recycled_into_HQ'] = 0.0 \n self.scenario[scen].material[mat].materialdata['mat_MFG_scrap_Recycled_into_HQ_Reused4MFG'] = 0.0 \n\n self.scenario[scen].material[mat].materialdata['mat_EOL_collected_Recycled'] = 0.0 \n self.scenario[scen].material[mat].materialdata['mat_EOL_Recycling_eff'] = 0.0 \n self.scenario[scen].material[mat].materialdata['mat_EOL_Recycled_into_HQ'] = 0.0 \n self.scenario[scen].material[mat].materialdata['mat_EOL_RecycledHQ_Reused4MFG'] = 0.0 \n\n\n return \n\n def aggregateResults(self, scenarios=None, materials=None):\n\n if scenarios is None:\n scenarios = list(self.scenario.keys())\n else:\n if isinstance(scenarios, str):\n scenarios = [scenarios]\n\n if materials is None:\n materials = list(self.scenario[scenarios[0]].material.keys())\n else:\n if isinstance(materials, str):\n materials = [materials]\n\n keywds = ['mat_Virgin_Stock', 'mat_Total_Landfilled', 'mat_Total_EOL_Landfilled', 'mat_Total_MFG_Landfilled']\n nice_keywds = ['VirginStock', 'WasteAll', 'WasteEOL', 'WasteMFG']\n\n USyearly=pd.DataFrame()\n\n for scen in scenarios:\n for ii in range(len(keywds)):\n keywd = keywds[ii]\n nicekey = nice_keywds[ii]\n\n for mat in materials:\n USyearly[nicekey+'_'+mat+'_'+self.name+'_'+scen] = self.scenario[scen].material[mat].materialdata[keywd]\n filter_col = [col for col in USyearly if (col.startswith(nicekey) and col.endswith(self.name+'_'+scen)) ]\n USyearly[nicekey+'_Module_'+self.name+'_'+scen] = USyearly[filter_col].sum(axis=1)\n # 2DO: Add multiple objects option\n\n \n USyearly = USyearly/1000000 # This is the ratio for grams to Metric tonnes\n USyearly = USyearly.add_suffix('_[Tonnes]')\n \n # Different units, so no need to do the ratio to Metric tonnes :p\n keywd1='new_Installed_Capacity_[MW]'\n \n for scen in scenarios:\n USyearly['newInstalledCapacity_'+self.name+'_'+scen+'_[MW]'] = self.scenario[scen].data[keywd1]\n \n # Creating c umulative results\n UScum = USyearly.copy()\n UScum = UScum.cumsum()\n \n # Adding Installed Capacity to US (This is already 'Cumulative') so not including it in UScum\n # We are also renaming it to 'ActiveCapacity' and calculating Decommisioned Capacity. \n # TODO: Rename Installed_CApacity to ActiveCapacity throughout.\n keywd='Installed_Capacity_[W]' \n for scen in scenarios:\n USyearly['ActiveCapacity_'+self.name+'_'+scen+'_[MW]'] = self.scenario[scen].data[keywd]/1e6\n USyearly['DecommisionedCapacity_'+self.name+'_'+scen+'_[MW]'] = (\n UScum['newInstalledCapacity_'+self.name+'_'+scen+'_[MW]']-\n USyearly['ActiveCapacity_'+self.name+'_'+scen+'_[MW]'])\n\n # Adding Decommissioned Capacity\n\n # Reindexing and Merging\n USyearly.index = self.scenario[scen].data['year']\n UScum.index = self.scenario[scen].data['year']\n \n self.USyearly = USyearly\n self.UScum = UScum\n \n return USyearly, UScum\n \n def plotScenariosComparison(self, keyword=None, scenarios=None):\n\n if scenarios is None:\n scenarios = list(self.scenario.keys())\n else:\n if isinstance(scenarios, str):\n scenarios = [scenarios]\n \n if keyword is None:\n scens = list(self.scenario.keys())[0]\n print(\"Choose one of the keywords: \", list(self.scenario[scens].data.keys())) \n return\n \n yunits = _unitReferences(keyword)\n \n plt.figure()\n \n for scen in scenarios:\n plt.plot(self.scenario[scen].data['year'],self.scenario[scen].data[keyword], label=scen)\n plt.legend()\n plt.xlabel('Year')\n plt.title(keyword.replace('_', \" \"))\n plt.ylabel(yunits) \n\n\n def plotMetricResults(self):\n from plotly.subplots import make_subplots\n # import plotly.graph_objects as go\n\n \n y1 = self.plotMaterialResults(keyword='VirginStock', yearlyorcumulative='yearly') \n y2 = self.plotMaterialResults(keyword='WasteAll', yearlyorcumulative='yearly')\n y3 = self.plotMaterialResults(keyword='WasteEOL', yearlyorcumulative='yearly')\n y4 = self.plotMaterialResults(keyword='WasteMFG', yearlyorcumulative='yearly')\n c1 = self.plotMaterialResults(keyword='VirginStock', yearlyorcumulative='cumulative')\n c2 = self.plotMaterialResults(keyword='WasteAll', yearlyorcumulative='cumulative')\n c3 = self.plotMaterialResults(keyword='WasteEOL', yearlyorcumulative='cumulative')\n c4 = self.plotMaterialResults(keyword='WasteMFG', yearlyorcumulative='cumulative')\n ic = self.plotInstalledCapacityResults()\n \n def plotMaterialResults(self, keyword, yearlyorcumulative='yearly', cumplot=False):\n import plotly.express as px\n import re\n \n if yearlyorcumulative == 'yearly':\n data = self.USyearly\n else:\n data = self.UScum\n\n if keyword is None:\n print(\"keyword options are :\" 'VirginStock', 'WasteALL', 'WasteEOL', 'WasteMFG')\n return\n #TODO: add a split to first bracket and print unique values option and return.\n \n filter_col = [col for col in data if col.startswith(keyword)]\n \n # Getting Title, Y-Axis Labels, and Legend Readable\n titlekeyword = str.capitalize(yearlyorcumulative) + re.sub( r\"([A-Z])\", r\" \\1\", keyword)\n units = filter_col[0].split('_')[-1]\n \n mylegend = [col.split('_')[1:] for col in filter_col]\n mylegend = [col[:-1] for col in mylegend]\n mylegend = [' '.join(col) for col in mylegend]\n mylegend = [str.capitalize(col) for col in mylegend]\n\n fig = px.line(data[filter_col], template=\"plotly_white\")\n \n fig.update_layout(\n title=titlekeyword,\n xaxis_title=\"Year\", \n yaxis_title=units\n )\n \n for idx, name in enumerate(mylegend):\n fig.data[idx].name = name\n fig.data[idx].hovertemplate = name\n \n if cumplot:\n return fig\n else:\n fig.show() \n return\n \n def plotInstalledCapacityResults(self, cumplot=False):\n # TODO: Add scenarios input to subselect which ones to plot.\n\n import plotly.express as px\n \n datay = self.USyearly\n datac = self.UScum\n \n filter_colc = [col for col in datac if col.startswith('newInstalledCapacity')]\n filter_coly = [col for col in datay if col.startswith('Capacity')]\n\n datay = datay[filter_coly].copy()\n mylegend = [col.split('_')[1:] for col in datay]\n mylegend = [col[:-1] for col in mylegend]\n mylegend = [str(col)[2:-2] for col in mylegend]\n mylegendy = ['Cumulative New Installs, '+col for col in mylegend]\n\n print(mylegend)\n \n datac = datac[filter_colc].copy()\n mylegend = [col.split('_')[1:] for col in datac]\n mylegend = [col[:-1] for col in mylegend]\n mylegend = [str(col)[2:-2] for col in mylegend]\n mylegendc = ['Capacity, '+col for col in mylegend]\n\n data = datay.join(datac)\n mylegend = mylegendy + mylegendc\n \n titlekeyword = 'Installed Capacity and Cumulative new Installs'\n\n \n # Getting Title, Y-Axis Labels, and Legend Readable\n units = filter_colc[0].split('_')[-1]\n \n\n \n fig = px.line(data, template=\"plotly_white\")\n \n fig.update_layout(\n title=titlekeyword,\n xaxis_title=\"Year\", \n yaxis_title=units\n )\n \n for idx, name in enumerate(mylegend):\n fig.data[idx].name = name\n fig.data[idx].hovertemplate = name\n \n if cumplot:\n return fig\n else:\n fig.show() \n return\n \n\n def plotMaterialComparisonAcrossScenarios(self, keyword=None, scenarios=None, material = None):\n\n if scenarios is None:\n scenarios = list(self.scenario.keys())\n else:\n if isinstance(scenarios, str):\n scenarios = [scenarios]\n \n if keyword is None:\n scens = list(self.scenario.keys())[0]\n mats = list(self.scenario[scens].material.keys())[0]\n print(\"Choose one of the keywords: \", list(self.scenario[scens].material[mats].materialdata.keys())) \n return\n\n\n if material is None:\n scens = list(self.scenario.keys())[0]\n mats = list(self.scenario[scens].material.keys())\n print(\"Choose one of the Materials: \", mats) \n return\n else:\n if isinstance(material, str) is False: \n mats = list(self.scenario[scens].material.keys())\n print(\"Can only pass one material name (str). Choose one of the Materials: \", mats) \n return\n\n yunits = _unitReferences(keyword)\n\n plt.figure()\n \n for scen in scenarios:\n plt.plot(self.scenario[scen].data['year'], self.scenario[scen].material[material].materialdata[keyword], label=scen)\n plt.legend()\n \n plt.xlabel('Year')\n plt.title((material + ' ' + keyword.replace('_', \" \")))\n plt.ylabel(yunits) \n \n \nclass Scenario(Simulation):\n \n def __init__(self, name, file=None):\n self.name = name\n self.material = {}\n \n if file is None:\n try:\n file = _interactive_load('Select module baseline file')\n except:\n raise Exception('Interactive load failed. Tkinter not supported'+\n 'on this system. Try installing X-Quartz and reloading')\n \n csvdata = open(str(file), 'r', encoding=\"UTF-8\")\n csvdata = open(str(file), 'r', encoding=\"UTF-8-sig\")\n firstline = csvdata.readline()\n secondline = csvdata.readline()\n\n head = firstline.rstrip('\\n').split(\",\")\n meta = dict(zip(head, secondline.rstrip('\\n').split(\",\")))\n\n data = pd.read_csv(csvdata, names=head)\n data.loc[:, data.columns != 'year'] = data.loc[:, data.columns != 'year'].astype(float)\n self.baselinefile = file\n self.metdata = meta,\n self.data = data\n \n def addMaterial(self, materialname, file=None):\n self.material[materialname] = Material(materialname, file)\n\n def addMaterials(self, materials, baselinefolder=None, nameformat=None):\n \n if baselinefolder is None:\n baselinefolder = r'..\\..\\baselines' \n\n if nameformat is None:\n nameformat = r'\\baseline_material_{}.csv'\n for mat in materials:\n filemat = baselinefolder + nameformat.format(mat)\n self.material[mat] = Material(mat, filemat)\n \n \n def modifyMaterials(self, materials, stage, value, start_year=None):\n \n if start_year is None:\n start_year = int(datetime.datetime.now().year)\n \n if materials is None:\n materials = list(self.material.keys())\n else:\n if isinstance(materials, str):\n materials = [materials]\n\n selectyears = self.data['year']>start_year\n \n for mat in materials:\n self.material[mat].materialdata.loc[selectyears, stage] = value\n\n\n def __getitem__(self, key):\n return getattr(self, key)\n\n def __setitem__(self, key):\n return setattr(self, key)\n\nclass Material:\n def __init__(self, materialname, file):\n self.materialname = materialname\n \n if file is None:\n try:\n file = _interactive_load('Select material baseline file')\n except:\n raise Exception('Interactive load failed. Tkinter not supported'+\n 'on this system. Try installing X-Quartz and reloading')\n \n csvdata = open(str(file), 'r', encoding=\"UTF-8\")\n csvdata = open(str(file), 'r', encoding=\"UTF-8-sig\")\n firstline = csvdata.readline()\n secondline = csvdata.readline()\n\n head = firstline.rstrip('\\n').split(\",\")\n meta = dict(zip(head, secondline.rstrip('\\n').split(\",\")))\n\n data = pd.read_csv(csvdata, names=head)\n data.loc[:, data.columns != 'year'] = data.loc[:, data.columns != 'year'].astype(float)\n self.materialfile = file\n self.materialmetdata = meta\n self.materialdata = data\n\n\ndef weibull_params(keypoints):\n r'''Returns shape parameter `alpha` and scale parameter `beta`\n for a Weibull distribution whose CDF passes through the\n two time: value pairs in `keypoints`\n\n Parameters\n ----------\n keypoints : list\n Two lists of t50 and 590 values, where t50 is the year since deployment\n that the cohort has lost 50% of originally installed modules, and t90 \n is the year since deployment that the cohort has lost 90% of the originally\n installed modules. These values are used to calcualte the shape and scale \n parameters for the weibull distribution.\n \n Returns\n -------\n alpha : float\n Shape parameter `alpha` for weibull distribution.\n beta : float\n Scale parameter `beta` for weibull distribution. Often exchanged with ``lifetime``\n like in Irena 2016, beta = 30.\n \n '''\n \n t1, t2 = tuple(keypoints.keys())\n cdf1, cdf2 = tuple(keypoints.values())\n alpha = np.ndarray.item(np.real_if_close(\n (np.log(np.log(1 - cdf1)+0j) - np.log(np.log(1 - cdf2)+0j))/(np.log(t1) - np.log(t2))\n ))\n beta = np.abs(np.exp(\n (\n np.log(t2)*((0+1j)*np.pi + np.log(np.log(1 - cdf1)+0j))\n + np.log(t1)*(((0-1j))*np.pi - np.log(np.log(1 - cdf2)+0j))\n )/(\n np.log(np.log(1 - cdf1)+0j) - np.log(np.log(1 - cdf2)+0j)\n )\n ))\n return {'alpha': alpha, 'beta': beta}\n\ndef weibull_cdf(alpha, beta):\n '''Return the CDF for a Weibull distribution having:\n shape parameter `alpha`\n scale parameter `beta`\n \n Parameters\n ----------\n alpha : float\n Shape parameter `alpha` for weibull distribution.\n beta : float\n Scale parameter `beta` for weibull distribution. Often exchanged with ``lifetime``\n like in Irena 2016, beta = 30.\n \n '''\n \n def cdf(x):\n return 1 - np.exp(-(np.array(x)/beta)**alpha)\n return cdf\n\ndef weibull_pdf(alpha, beta):\n r'''Return the PDF for a Weibull distribution having:\n shape parameter `alpha`\n scale parameter `beta`\n \n Parameters\n ----------\n alpha : float\n Shape parameter `alpha` for weibull distribution.\n beta : float\n Scale parameter `beta` for weibull distribution. Often exchanged with ``lifetime``\n like in Irena 2016, beta = 30.\n \n '''\n \n def pdf(x):\n return (alpha/np.array(x)) * ((np.array(x)/beta)**alpha) * (np.exp(-(np.array(x)/beta)**alpha))\n \n return pdf\n\ndef weibull_pdf_vis(alpha, beta, xlim=56):\n r''' Returns the CDF for a weibull distribution of 1 generation\n so it can be plotted.\n \n Parameters\n ----------\n alpha : float\n Shape parameter `alpha` for weibull distribution.\n beta : float\n Scale parameter `beta` for weibull distribution. Often exchanged with ``lifetime``\n like in Irena 2016, beta = 30.\n xlim : int\n Number of years to calculate the distribution for. i.e. x-axis limit. \n\n Returns\n -------\n idf : list\n List of weibull cumulative distribution values for year 0 until xlim.\n\n '''\n\n dfindex = pd.RangeIndex(0,xlim,1)\n x = np.clip(dfindex - 0, 0, np.inf)\n\n if alpha and beta:\n i = weibull_pdf(alpha, beta)\n \n idf = list(map(i, x))\n \n return idf\n\n\ndef weibull_cdf_vis(alpha, beta, xlim=56):\n r''' Returns the CDF for a weibull distribution of 1 generation\n so it can be plotted.\n \n Parameters\n ----------\n alpha : float\n Shape parameter `alpha` for weibull distribution.\n beta : float\n Scale parameter `beta` for weibull distribution. Often exchanged with ``lifetime``\n like in Irena 2016, beta = 30.\n xlim : int\n Number of years to calculate the distribution for. i.e. x-axis limit. \n\n Returns\n -------\n idf : list\n List of weibull cumulative distribution values for year 0 until xlim.\n\n '''\n\n dfindex = pd.RangeIndex(0,xlim,1)\n x = np.clip(dfindex - 0, 0, np.inf)\n\n if alpha and beta:\n i = weibull_cdf(alpha, beta)\n \n idf = list(map(i, x))\n \n return idf\n\n \ndef sens_StageImprovement(df, stage, improvement=1.3, start_year=None):\n '''\n Modifies baseline scenario for evaluating sensitivity of lifetime parameter.\n t50 and t90 reliability years get incresed by `improvement` parameter\n starting the `year_increase` year specified. \n \n Parameters\n ----------\n df : dataframe\n dataframe to be modified\n stage : str\n Stage that wants to be modified. This can be any of the module or \n material specified values, for example:'MFG_Material_eff', \n 'mat_MFG_scrap_recycled', 'mat_MFG_scrap_Recycled', \n 'mat_MFG_scrap_Recycled_into_HQ', 'mat_MFG_scrap_Recycled_into_HQ_Reused4MFG'\n 'mod_EOL_collection_losses', 'mod_EOL_collected_recycled',\n 'mat_EOL_Recycling_eff', 'mat_EOL_Recycled_into_HQ', \n 'mat_EOL_RecycledHQ_Reused4MFG', 'mod_Repair',\n 'mod_MerchantTail', 'mod_Reuse', 'mod_eff', etc.\n improvement : decimal\n Percent increase in decimal (i.e. \"1.3\" for 30% increase in value) \n or percent decrease (i.e. \"0.3\") relative to values in df.\n start_year : \n the year at which the improvement occurs\n \n Returns\n --------\n df : dataframe\n dataframe of expected module lifetime increased or decreased at specified year\n '''\n\n\n if start_year is None:\n start_year = int(datetime.datetime.now().year)\n\n #df[df.index > 2000]['mod_reliability_t50'].apply(lambda x: x*1.3)\n df[stage] = df[stage].astype(float)\n df.loc[df.index > start_year, stage] = df[df.index > start_year][stage].apply(lambda x: x*improvement)\n \n return df\n\n\ndef sens_StageEfficiency(df, stage, target_eff = 95.0, start_year = None, \n goal_year = 2030, plotflag = False):\n '''\n Modifies baseline scenario for evaluating sensitivity to increasing a stage in the \n lifetime of the module's efficiency. It either increases or decreases from the \n start year until the goal year the value to the target efficiency by interpolation.\n \n Parameters\n ----------\n df : dataframe \n dataframe to be modified\n stage : str\n Stage that wants to be modified. This can be any of the module or \n material specified efficiencies, for example:'MFG_Material_eff', \n 'mat_MFG_scrap_recycled', 'mat_MFG_scrap_Recycled', \n 'mat_MFG_scrap_Recycled_into_HQ', 'mat_MFG_scrap_Recycled_into_HQ_Reused4MFG'\n 'mod_EOL_collection_losses', 'mod_EOL_collected_recycled',\n 'mat_EOL_Recycling_eff', 'mat_EOL_Recycled_into_HQ', \n 'mat_EOL_RecycledHQ_Reused4MFG', 'mod_Repair',\n 'mod_MerchantTail', 'mod_Reuse', 'mod_eff', etc.\n start_year: int\n Year to start modifying the value. This specifies the initial efficiency \n value that is going to be modified. If None is passed, current year is used.\n target_eff: flat\n target eff value in percentage to be reached. i.e. 95.0 %.\n goal_year : int\n year by which target efficiency will be reached. i.e. 2030. Must be higher than current year.\n \n Returns\n -------\n df : dataframe\n modified dataframe\n '''\n \n if start_year is None:\n start_year = int(datetime.datetime.now().year)\n \n if start_year > goal_year:\n print(\"Error. Goal Year is before start year\")\n return\n \n if 0 < abs(target_eff) < 1: # checking it is not 0.95 but 95% i.e.\n print(\"Warning: target_eff value is between 0 and 1; it has been\"\n \"multiplied by 100% assuming it was a percentage in decimal form.\")\n target_eff = target_eff*100\n \n if target_eff > 100 or target_eff < 0:\n print(\"Warning: target_eff is out of range. Input value between\"\n \"0 and 100\")\n return\n \n if stage in df.columns:\n df2 = df.copy()\n df2[stage]=df2[stage].astype(float)\n df2.loc[(df2.index < goal_year) & (df2.index > start_year), stage] = np.nan\n df2.loc[df2.index >= goal_year , stage] = target_eff\n df2[stage] = df2[stage].interpolate()\n \n if plotflag:\n plt.plot(df[stage], label='Original')\n plt.plot(df2[stage], label='Modified')\n plt.title('Updated values for '+stage)\n plt.legend()\n return df2\n else:\n print(\"Stage name incorrect.\")\n\n\n\n\n\n\ndef _modDict(originaldict, moddict):\n '''\n Compares keys in originaldict with moddict and updates values of \n originaldict to moddict if existing.\n \n Parameters\n ----------\n originaldict : dictionary\n Original dictionary calculated, for example frontscan or backscan dictionaries.\n moddict : dictionary\n Modified dictinoary, for example modscan['x'] = 0 to change position of x.\n \n Returns\n -------\n originaldict : dictionary\n Updated original dictionary with values from moddict.\n '''\n for key in moddict:\n try:\n originaldict[key] = moddict[key]\n except:\n print(\"Wrong key in modified dictionary\")\n \n return originaldict\n\n\ndef calculateLCA(PVarea, modified_impacts=None, printflag = False):\n '''\n\n\n '''\n \n if printflag:\n print(\"Doing calculations of LCA analysis for Silicon Photovoltaic Panels\")\n \n \n\n impacts = {'Acidification':{'UUID': '75d0c8a2-e466-3bd7-813b-5beef2209330',\n 'Result': 1.29374135667815,\n 'Unit': 'kg SO2' },\n 'Carcinogenics':{'UUID': 'a6e5e5d8-a1e5-3c77-8170-586c4fe37514',\n 'Result': 0.0000231966690476102,\n 'Unit': 'CTUh' },\n 'Ecotoxicity':{'UUID': '338e9370-ceb0-3d18-9d87-5f91feb7829c',\n 'Result': 5933.77859696668,\n 'Unit': 'CTUe' },\n 'Eutrophication':{'UUID': '45b8cd56-498a-3c6f-9488-134e951d8c02',\n 'Result': 1.34026194777363,\n 'Unit': 'kg N eq' },\n \n 'Fossil fuel depletion':{'UUID': '0e45786f-67fa-3b8a-b8a3-73a7c316434c',\n 'Result': 249.642261689385,\n 'Unit': 'MJ surplus' },\n \n 'Global warming':{'UUID': '31967441-d687-313d-9910-13da3a584ab7',\n 'Result': 268.548841324818,\n 'Unit': 'kg CO2 eq' },\n \n 'Non carcinogenics':{'UUID': 'd4827ae3-c873-3ea4-85fb-860b7f3f2dee',\n 'Result': 0.000135331806321799,\n 'Unit': 'CTUh' },\n \n 'Ozone depletion':{'UUID': '6c05dad1-6661-35f2-82aa-6e8e6a498aec',\n 'Result': 0.0000310937628622019,\n 'Unit': 'kg CFC-11 eq' },\n \n 'Respiratory effects':{'UUID': 'e0916d62-7fbd-3d0a-a4a5-52659b0ac9c1',\n 'Result': 0.373415542664206,\n 'Unit': 'kg PM2.5 eq' },\n 'Smog':{'UUID': '7a149078-e2fd-3e07-a5a3-79035c60e7c3',\n 'Result': 15.35483065, \n 'Unit': 'kg O3 eq' },\n }\n \n if modified_impacts is not None:\n impacts = _modDict(impacts, modified_impacts)\n if printflag:\n print(\"Following Modified impacts provided instead of TRACI 2.1 default\")\n print(impacts)\n print(\"\")\n else:\n if printflag:\n print(\"Following TRACI 2.1\")\n\n acidification = impacts['Acidification']['Result']*PVarea\n carcinogenics = impacts['Carcinogenics']['Result']*PVarea\n ecotoxicity = impacts['Ecotoxicity']['Result']*PVarea\n eutrophication = impacts['Eutrophication']['Result']*PVarea\n fossil_fuel_depletion = impacts['Fossil fuel depletion']['Result']*PVarea\n global_warming = impacts['Global warming']['Result']*PVarea\n non_carcinogenics = impacts['Non carcinogenics']['Result']*PVarea\n ozone_depletion = impacts['Ozone depletion']['Result']*PVarea\n respiratory_effects = impacts['Respiratory effects']['Result']*PVarea\n smog = impacts['Smog']['Result']*PVarea\n \n\n \n if printflag:\n print(\"RESULTS FOR PV AREA \", PVarea, \" m2 \")\n print(\"****************************************\")\n print('Acidification: ', round(impacts['Acidification']['Result']*PVarea, 2), ' ', impacts['Acidification']['Unit'])\n print('Carcinogenics: ', round(impacts['Carcinogenics']['Result']*PVarea, 2), ' ', impacts['Carcinogenics']['Unit'])\n print('Ecotoxicity: ', round(impacts['Ecotoxicity']['Result']*PVarea, 2), ' ', impacts['Ecotoxicity']['Unit'])\n print('Eutrophication: ', round(impacts['Eutrophication']['Result']*PVarea, 2), ' ', impacts['Eutrophication']['Unit'])\n print('Fossil fuel depletion: ', round(impacts['Fossil fuel depletion']['Result']*PVarea, 2), ' ', impacts['Fossil fuel depletion']['Unit'])\n print('Global warming: ', round(impacts['Global warming']['Result']*PVarea, 2), ' ', impacts['Global warming']['Unit'])\n print('Non carcinogenics: ', round(impacts['Non carcinogenics']['Result']*PVarea, 2), ' ', impacts['Non carcinogenics']['Unit'])\n print('Ozone depletion: ', round(impacts['Ozone depletion']['Result']*PVarea, 2), ' ', impacts['Ozone depletion']['Unit'])\n print('Respiratory effects: ', round(impacts['Respiratory effects']['Result']*PVarea, 2), ' ', impacts['Respiratory effects']['Unit'])\n print('Smog: ', round(impacts['Smog']['Result']*PVarea, 2), ' ', impacts['Smog']['Unit'])\n \n return (acidification, carcinogenics, ecotoxicity, eutrophication, \n fossil_fuel_depletion, global_warming,\n non_carcinogenics, ozone_depletion, respiratory_effects, smog)" ]
[ [ "matplotlib.pyplot.legend", "numpy.log", "pandas.read_csv", "numpy.sqrt", "matplotlib.pyplot.title", "numpy.clip", "pandas.RangeIndex", "numpy.isnan", "numpy.cos", "pandas.DataFrame", "numpy.sin", "matplotlib.pyplot.plot", "numpy.deg2rad", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.xlabel", "numpy.array", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
keshaviyengar/rl-baselines-zoo
[ "6e39f5c7c6c2d30873297308ed064551bffaa52d" ]
[ "trajectory_generator.py" ]
[ "import rospy\nfrom geometry_msgs.msg import Pose, Point\nfrom std_msgs.msg import Bool\n\nimport numpy as np\nimport os\n# This script creates a square trajectory for a robot to follow.\n# Will output errors as well.\n\n\nclass CircleTrajectory(object):\n def __init__(self, x_offset, y_offset, z_height, radius, theta_step):\n self.trajectory_pub = rospy.Publisher(\"desired_goal\", Pose, queue_size=10)\n self.trajectory_finish_pub = rospy.Publisher(\"trajectory_finish\", Bool, queue_size=10)\n self._current_pose = Pose()\n\n # Create a timer to update the desired trajectory\n self.trajectory_timer = rospy.Timer(rospy.Duration(0.01), self._trajectory_callback)\n\n self.traj_finish = False\n # For now set initial current pose as 0\n self._desired_pose = Pose()\n self.x_offset = x_offset\n self.y_offset = y_offset\n self.radius = radius\n self.thetas = np.arange(0, 2 * np.pi, np.deg2rad(theta_step))\n self.thetas_counter = 0\n self._desired_pose.position.x = self.x_offset + self.radius * np.cos(self.thetas[self.thetas_counter])\n self._desired_pose.position.y = self.y_offset + self.radius * np.sin(self.thetas[self.thetas_counter])\n self._desired_pose.position.z = z_height\n self._desired_pose.orientation.x = 0\n self._desired_pose.orientation.y = 0\n self._desired_pose.orientation.z = 0\n self._desired_pose.orientation.w = 1\n\n self.speed = 1\n\n def _trajectory_callback(self, event):\n self.thetas_counter += 1\n if self.thetas_counter == self.thetas.size - 1:\n self.traj_finish = True\n print(\"Trajectory is complete.\")\n self.trajectory_finish_pub.publish(True)\n self.trajectory_timer.shutdown()\n\n if not self.traj_finish:\n self._desired_pose.position.x = self.x_offset + self.radius * np.cos(self.thetas[self.thetas_counter])\n self._desired_pose.position.y = self.y_offset + self.radius * np.sin(self.thetas[self.thetas_counter])\n # Publish new pose\n self.trajectory_pub.publish(self._desired_pose)\n\n\nclass TriangleTrajectory(object):\n def __init__(self, point_a, point_b, point_c, z_height):\n self.trajectory_pub = rospy.Publisher(\"desired_goal\", Pose, queue_size=10)\n self.trajectory_finish_pub = rospy.Publisher(\"trajectory_finish\", Bool, queue_size=10)\n self._current_pose = Pose()\n\n\n # Second timer for how long to move in axis before moving to next\n # self.change_direction_timer = rospy.Timer(rospy.Duration(5.0), self._change_direction)\n\n # Specify three points to reach to create the triangle\n self.points = np.array([point_a, point_b, point_c])\n\n self._turn_count = 0\n self.del_vector = [(self.points[1][0] - self.points[0][0]), (self.points[1][1] - self.points[0][1])]\n\n self._done_trajectory = False\n\n self._desired_pose = Pose()\n self._desired_pose.position.x = point_a[0]\n self._desired_pose.position.y = point_a[1]\n self._desired_pose.position.z = z_height\n self._desired_pose.orientation.x = 0\n self._desired_pose.orientation.y = 0\n self._desired_pose.orientation.z = 0\n self._desired_pose.orientation.w = 1\n\n # Publish initial point and sleep to initialize\n for _ in range(10):\n self.trajectory_pub.publish(self._desired_pose)\n rospy.sleep(0.1)\n\n self.prev_time = rospy.get_time()\n self.traj_finish = False\n\n # Create a timer to update the desired trajectory\n self.trajectory_timer = rospy.Timer(rospy.Duration(0.01), self._trajectory_callback)\n\n # This callback changes the direction by 90 degrees, to make the square.\n def _change_direction(self):\n if self._turn_count == 0:\n if np.linalg.norm(self.points[self._turn_count + 1] - np.array(\n [self._desired_pose.position.x, self._desired_pose.position.y])) < 0.5:\n self._turn_count += 1\n self.del_vector = [(self.points[1][0] - self.points[0][0]),\n (self.points[1][1] - self.points[0][1])]\n\n if self._turn_count == 1:\n if np.linalg.norm(self.points[self._turn_count + 1] - np.array(\n [self._desired_pose.position.x, self._desired_pose.position.y])) < 0.5:\n self._turn_count += 1\n self.del_vector = [(self.points[2][0] - self.points[1][0]),\n (self.points[2][1] - self.points[1][1])]\n if self._turn_count == 2:\n if np.linalg.norm(self.points[0] - np.array(\n [self._desired_pose.position.x, self._desired_pose.position.y])) < 0.5:\n self._turn_count += 1\n self.del_vector = [(self.points[0][0] - self.points[2][0]),\n (self.points[0][1] - self.points[2][1])]\n if self._turn_count == 3:\n print(\"Trajectory is complete.\")\n self.traj_finish = True\n self.trajectory_finish_pub.publish(True)\n self.trajectory_timer.shutdown()\n # self.change_direction_timer.shutdown()\n\n def _trajectory_callback(self, event):\n # Compute current difference in time from last callback\n if not self.traj_finish:\n current_time = rospy.get_time()\n delta_t = current_time - self.prev_time\n self.prev_time = current_time\n\n self._change_direction()\n\n self._desired_pose.position.x += self.del_vector[0] * delta_t\n self._desired_pose.position.y += self.del_vector[1] * delta_t\n self.trajectory_pub.publish(self._desired_pose)\n\n\nclass SquareTrajectory2(object):\n def __init__(self, point_a, point_b, point_c, point_d, z_height):\n self.trajectory_pub = rospy.Publisher(\"desired_goal\", Pose, queue_size=10)\n self.trajectory_finish_pub = rospy.Publisher(\"trajectory_finish\", Bool, queue_size=10)\n self._current_pose = Pose()\n\n self.points = [point_a, point_b, point_c, point_d]\n\n self._turn_count = 0\n self.del_vector = [(self.points[1][0] - self.points[0][0]), (self.points[1][1] - self.points[0][1])]\n\n # For now set initial current pose as 0\n self._desired_pose = Pose()\n self._desired_pose.position.x = point_a[0]\n self._desired_pose.position.y = point_a[1]\n self._desired_pose.position.z = z_height\n self._desired_pose.orientation.x = 0\n self._desired_pose.orientation.y = 0\n self._desired_pose.orientation.z = 0\n self._desired_pose.orientation.w = 1\n\n # Publish initial point and sleep to initialize\n for _ in range(10):\n self.trajectory_pub.publish(self._desired_pose)\n rospy.sleep(0.1)\n\n self.prev_time = rospy.get_time()\n self.traj_finish = False\n\n # Create a timer to update the desired trajectory\n self.trajectory_timer = rospy.Timer(rospy.Duration(0.01), self._trajectory_callback)\n\n # This callback changes the direction by 90 degrees, to make the square.\n def _change_direction(self):\n if self._turn_count == 0:\n if np.linalg.norm(self.points[self._turn_count + 1] - np.array(\n [self._desired_pose.position.x, self._desired_pose.position.y])) < 0.5:\n self._turn_count += 1\n self.del_vector = [(self.points[1][0] - self.points[0][0]),\n (self.points[1][1] - self.points[0][1])]\n\n if self._turn_count == 1:\n if np.linalg.norm(self.points[self._turn_count + 1] - np.array(\n [self._desired_pose.position.x, self._desired_pose.position.y])) < 0.5:\n self._turn_count += 1\n self.del_vector = [(self.points[2][0] - self.points[1][0]),\n (self.points[2][1] - self.points[1][1])]\n if self._turn_count == 2:\n if np.linalg.norm(self.points[self._turn_count + 1] - np.array(\n [self._desired_pose.position.x, self._desired_pose.position.y])) < 0.5:\n self._turn_count += 1\n self.del_vector = [(self.points[3][0] - self.points[2][0]),\n (self.points[3][1] - self.points[2][1])]\n if self._turn_count == 3:\n if np.linalg.norm(self.points[0] - np.array(\n [self._desired_pose.position.x, self._desired_pose.position.y])) < 0.5:\n self._turn_count += 1\n self.del_vector = [(self.points[0][0] - self.points[3][0]),\n (self.points[0][1] - self.points[3][1])]\n if self._turn_count == 4:\n print(\"Trajectory is complete.\")\n self.traj_finish = True\n self.trajectory_finish_pub.publish(True)\n self.trajectory_timer.shutdown()\n\n def _trajectory_callback(self, event):\n # Compute current difference in time from last callback\n if not self.traj_finish:\n current_time = rospy.get_time()\n delta_t = current_time - self.prev_time\n self.prev_time = current_time\n\n self._change_direction()\n\n self._desired_pose.position.x += self.del_vector[0] * delta_t\n self._desired_pose.position.y += self.del_vector[1] * delta_t\n self.trajectory_pub.publish(self._desired_pose)\n\n\nif __name__ == '__main__':\n rospy.init_node(\"trajectory_generator\")\n experiments = [7]\n\n for exp in experiments:\n x_offset = 5\n y_offset = 5\n if exp in [1, 2, 3, 4, 5]:\n z_height = 100\n elif exp in [6, 7, 8, 9, 10]:\n z_height = 100\n else:\n z_height = 125\n\n radius = 2.0\n theta_step = 0.5\n print(\"Circle trajectory\")\n circle_trajectory = CircleTrajectory(x_offset, y_offset, z_height, radius, theta_step)\n while not circle_trajectory.traj_finish:\n if circle_trajectory.traj_finish:\n break\n\n # point_a = [20, 20]\n # point_b = [20, 30]\n # point_c = [30, 20]\n # point_a = [-5, 0]\n # point_b = [-10, -5]\n # point_c = [5, 0]\n # if exp in [1, 2, 3, 4, 5]:\n # z_height = 100\n # elif exp in [6, 7, 8, 9, 10]:\n # z_height = 125\n # else:\n # z_height = 125\n # print(\"Triangle trajectory\")\n # triangle_trajectory = TriangleTrajectory(point_a, point_b, point_c, z_height)\n # while not triangle_trajectory.traj_finish:\n # pass\n\n # point_a = [5, 0]\n # point_b = [-5, 0]\n # point_c = [-5, -5]\n # point_d = [5, -5]\n # if exp in [1, 2, 3, 4, 5]:\n # z_height = 100\n # elif exp in [6, 7, 8, 9, 10]:\n # z_height = 125\n # else:\n # z_height = 125\n # print(\"Square trajectory\")\n # square_trajectory = SquareTrajectory2(point_a, point_b, point_c, point_d, z_height)\n # while not square_trajectory.traj_finish:\n # pass\n\n" ]
[ [ "numpy.deg2rad", "numpy.array", "numpy.cos", "numpy.sin" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
truatpasteurdotfr/napari
[ "48cdf4d1c4bcf6f76603e90b1c0c7498e2aba6c0" ]
[ "napari/layers/surface/surface.py" ]
[ "import warnings\n\nimport numpy as np\n\nfrom ...utils.colormaps import AVAILABLE_COLORMAPS\nfrom ...utils.events import Event\nfrom ...utils.translations import trans\nfrom ..base import Layer\nfrom ..intensity_mixin import IntensityVisualizationMixin\nfrom ..utils.layer_utils import calc_data_range\nfrom ._surface_constants import Shading\nfrom .normals import SurfaceNormals\nfrom .wireframe import SurfaceWireframe\n\n\n# Mixin must come before Layer\nclass Surface(IntensityVisualizationMixin, Layer):\n \"\"\"\n Surface layer renders meshes onto the canvas.\n\n Parameters\n ----------\n data : 2-tuple or 3-tuple of array\n The first element of the tuple is an (N, D) array of vertices of\n mesh triangles. The second is an (M, 3) array of int of indices\n of the mesh triangles. The optional third element is the\n (K0, ..., KL, N) array of values used to color vertices where the\n additional L dimensions are used to color the same mesh with\n different values. If not provided, it defaults to ones.\n colormap : str, napari.utils.Colormap, tuple, dict\n Colormap to use for luminance images. If a string must be the name\n of a supported colormap from vispy or matplotlib. If a tuple the\n first value must be a string to assign as a name to a colormap and\n the second item must be a Colormap. If a dict the key must be a\n string to assign as a name to a colormap and the value must be a\n Colormap.\n contrast_limits : list (2,)\n Color limits to be used for determining the colormap bounds for\n luminance images. If not passed is calculated as the min and max of\n the image.\n gamma : float\n Gamma correction for determining colormap linearity. Defaults to 1.\n name : str\n Name of the layer.\n metadata : dict\n Layer metadata.\n scale : tuple of float\n Scale factors for the layer.\n translate : tuple of float\n Translation values for the layer.\n rotate : float, 3-tuple of float, or n-D array.\n If a float convert into a 2D rotation matrix using that value as an\n angle. If 3-tuple convert into a 3D rotation matrix, using a yaw,\n pitch, roll convention. Otherwise assume an nD rotation. Angles are\n assumed to be in degrees. They can be converted from radians with\n np.degrees if needed.\n shear : 1-D array or n-D array\n Either a vector of upper triangular values, or an nD shear matrix with\n ones along the main diagonal.\n affine : n-D array or napari.utils.transforms.Affine\n (N+1, N+1) affine transformation matrix in homogeneous coordinates.\n The first (N, N) entries correspond to a linear transform and\n the final column is a length N translation vector and a 1 or a napari\n `Affine` transform object. Applied as an extra transform on top of the\n provided scale, rotate, and shear values.\n opacity : float\n Opacity of the layer visual, between 0.0 and 1.0.\n blending : str\n One of a list of preset blending modes that determines how RGB and\n alpha values of the layer visual get mixed. Allowed values are\n {'opaque', 'translucent', and 'additive'}.\n shading : str, Shading\n One of a list of preset shading modes that determine the lighting model\n using when rendering the surface in 3D.\n\n * ``Shading.NONE``\n Corresponds to ``shading='none'``.\n * ``Shading.FLAT``\n Corresponds to ``shading='flat'``.\n * ``Shading.SMOOTH``\n Corresponds to ``shading='smooth'``.\n visible : bool\n Whether the layer visual is currently being displayed.\n cache : bool\n Whether slices of out-of-core datasets should be cached upon retrieval.\n Currently, this only applies to dask arrays.\n wireframe : dict or SurfaceWireframe\n Whether and how to display the edges of the surface mesh with a wireframe.\n normals : dict or SurfaceNormals\n Whether and how to display the face and vertex normals of the surface mesh.\n\n Attributes\n ----------\n data : 3-tuple of array\n The first element of the tuple is an (N, D) array of vertices of\n mesh triangles. The second is an (M, 3) array of int of indices\n of the mesh triangles. The third element is the (K0, ..., KL, N)\n array of values used to color vertices where the additional L\n dimensions are used to color the same mesh with different values.\n vertices : (N, D) array\n Vertices of mesh triangles.\n faces : (M, 3) array of int\n Indices of mesh triangles.\n vertex_values : (K0, ..., KL, N) array\n Values used to color vertices.\n colormap : str, napari.utils.Colormap, tuple, dict\n Colormap to use for luminance images. If a string must be the name\n of a supported colormap from vispy or matplotlib. If a tuple the\n first value must be a string to assign as a name to a colormap and\n the second item must be a Colormap. If a dict the key must be a\n string to assign as a name to a colormap and the value must be a\n Colormap.\n contrast_limits : list (2,)\n Color limits to be used for determining the colormap bounds for\n luminance images. If not passed is calculated as the min and max of\n the image.\n shading: str\n One of a list of preset shading modes that determine the lighting model\n using when rendering the surface.\n\n * ``'none'``\n * ``'flat'``\n * ``'smooth'``\n gamma : float\n Gamma correction for determining colormap linearity.\n wireframe : SurfaceWireframe\n Whether and how to display the edges of the surface mesh with a wireframe.\n normals : SurfaceNormals\n Whether and how to display the face and vertex normals of the surface mesh.\n\n\n Notes\n -----\n _data_view : (M, 2) or (M, 3) array\n The coordinates of the vertices given the viewed dimensions.\n _view_faces : (P, 3) array\n The integer indices of the vertices that form the triangles\n in the currently viewed slice.\n _colorbar : array\n Colorbar for current colormap.\n \"\"\"\n\n _colormaps = AVAILABLE_COLORMAPS\n\n def __init__(\n self,\n data,\n *,\n colormap='gray',\n contrast_limits=None,\n gamma=1,\n name=None,\n metadata=None,\n scale=None,\n translate=None,\n rotate=None,\n shear=None,\n affine=None,\n opacity=1,\n blending='translucent',\n shading='flat',\n visible=True,\n cache=True,\n experimental_clipping_planes=None,\n wireframe=None,\n normals=None,\n ):\n\n ndim = data[0].shape[1]\n\n super().__init__(\n data,\n ndim,\n name=name,\n metadata=metadata,\n scale=scale,\n translate=translate,\n rotate=rotate,\n shear=shear,\n affine=affine,\n opacity=opacity,\n blending=blending,\n visible=visible,\n cache=cache,\n experimental_clipping_planes=experimental_clipping_planes,\n )\n\n self.events.add(\n interpolation=Event,\n rendering=Event,\n shading=Event,\n )\n\n # assign mesh data and establish default behavior\n if len(data) not in (2, 3):\n raise ValueError(\n trans._(\n 'Surface data tuple must be 2 or 3, specifying verictes, faces, and optionally vertex values, instead got length {length}.',\n deferred=True,\n length=len(data),\n )\n )\n self._vertices = data[0]\n self._faces = data[1]\n if len(data) == 3:\n self._vertex_values = data[2]\n else:\n self._vertex_values = np.ones(len(self._vertices))\n\n # Set contrast_limits and colormaps\n self._gamma = gamma\n if contrast_limits is None:\n self._contrast_limits_range = calc_data_range(self._vertex_values)\n else:\n self._contrast_limits_range = contrast_limits\n self._contrast_limits = tuple(self._contrast_limits_range)\n self.colormap = colormap\n self.contrast_limits = self._contrast_limits\n\n # Data containing vectors in the currently viewed slice\n self._data_view = np.zeros((0, self._ndisplay))\n self._view_faces = np.zeros((0, 3))\n self._view_vertex_values = []\n\n # Trigger generation of view slice and thumbnail\n self._update_dims()\n\n # Shading mode\n self._shading = shading\n\n self.wireframe = wireframe or SurfaceWireframe()\n self.normals = normals or SurfaceNormals()\n\n def _calc_data_range(self, mode='data'):\n return calc_data_range(self.vertex_values)\n\n @property\n def dtype(self):\n return self.vertex_values.dtype\n\n @property\n def data(self):\n return (self.vertices, self.faces, self.vertex_values)\n\n @data.setter\n def data(self, data):\n if len(data) not in (2, 3):\n raise ValueError(\n trans._(\n 'Surface data tuple must be 2 or 3, specifying vertices, faces, and optionally vertex values, instead got length {data_length}.',\n deferred=True,\n data_length=len(data),\n )\n )\n self._vertices = data[0]\n self._faces = data[1]\n if len(data) == 3:\n self._vertex_values = data[2]\n else:\n self._vertex_values = np.ones(len(self._vertices))\n\n self._update_dims()\n self.events.data(value=self.data)\n if self._keep_auto_contrast:\n self.reset_contrast_limits()\n\n @property\n def vertices(self):\n return self._vertices\n\n @vertices.setter\n def vertices(self, vertices):\n \"\"\"Array of vertices of mesh triangles.\"\"\"\n\n self._vertices = vertices\n\n self._update_dims()\n self.refresh()\n self.events.data(value=self.data)\n self._set_editable()\n\n @property\n def vertex_values(self) -> np.ndarray:\n return self._vertex_values\n\n @vertex_values.setter\n def vertex_values(self, vertex_values: np.ndarray):\n \"\"\"Array of values used to color vertices..\"\"\"\n\n self._vertex_values = vertex_values\n\n self.refresh()\n self.events.data(value=self.data)\n self._set_editable()\n\n @property\n def faces(self) -> np.ndarray:\n return self._faces\n\n @faces.setter\n def faces(self, faces: np.ndarray):\n \"\"\"Array of indices of mesh triangles..\"\"\"\n\n self.faces = faces\n\n self.refresh()\n self.events.data(value=self.data)\n self._set_editable()\n\n def _get_ndim(self):\n \"\"\"Determine number of dimensions of the layer.\"\"\"\n return self.vertices.shape[1] + (self.vertex_values.ndim - 1)\n\n @property\n def _extent_data(self) -> np.ndarray:\n \"\"\"Extent of layer in data coordinates.\n\n Returns\n -------\n extent_data : array, shape (2, D)\n \"\"\"\n if len(self.vertices) == 0:\n extrema = np.full((2, self.ndim), np.nan)\n else:\n maxs = np.max(self.vertices, axis=0)\n mins = np.min(self.vertices, axis=0)\n\n # The full dimensionality and shape of the layer is determined by\n # the number of additional vertex value dimensions and the\n # dimensionality of the vertices themselves\n if self.vertex_values.ndim > 1:\n mins = [0] * (self.vertex_values.ndim - 1) + list(mins)\n maxs = list(self.vertex_values.shape[:-1]) + list(maxs)\n extrema = np.vstack([mins, maxs])\n return extrema\n\n @property\n def shading(self):\n return str(self._shading)\n\n @shading.setter\n def shading(self, shading):\n if isinstance(shading, Shading):\n self._shading = shading\n else:\n self._shading = Shading(shading)\n self.events.shading(value=self._shading)\n\n def _get_state(self):\n \"\"\"Get dictionary of layer state.\n\n Returns\n -------\n state : dict\n Dictionary of layer state.\n \"\"\"\n state = self._get_base_state()\n state.update(\n {\n 'colormap': self.colormap.name,\n 'contrast_limits': self.contrast_limits,\n 'gamma': self.gamma,\n 'shading': self.shading,\n 'data': self.data,\n 'wireframe': self.wireframe.dict(),\n 'normals': self.normals.dict(),\n }\n )\n return state\n\n def _set_view_slice(self):\n \"\"\"Sets the view given the indices to slice with.\"\"\"\n N, vertex_ndim = self.vertices.shape\n values_ndim = self.vertex_values.ndim - 1\n\n # Take vertex_values dimensionality into account if more than one value\n # is provided per vertex.\n if values_ndim > 0:\n # Get indices for axes corresponding to values dimensions\n values_indices = self._slice_indices[:-vertex_ndim]\n values = self.vertex_values[values_indices]\n if values.ndim > 1:\n warnings.warn(\n trans._(\n \"Assigning multiple values per vertex after slicing is not allowed. All dimensions corresponding to vertex_values must be non-displayed dimensions. Data will not be visible.\",\n deferred=True,\n )\n )\n self._data_view = np.zeros((0, self._ndisplay))\n self._view_faces = np.zeros((0, 3))\n self._view_vertex_values = []\n return\n\n self._view_vertex_values = values\n # Determine which axes of the vertices data are being displayed\n # and not displayed, ignoring the additional dimensions\n # corresponding to the vertex_values.\n indices = np.array(self._slice_indices[-vertex_ndim:])\n disp = [\n d\n for d in np.subtract(self._dims_displayed, values_ndim)\n if d >= 0\n ]\n not_disp = [\n d\n for d in np.subtract(self._dims_not_displayed, values_ndim)\n if d >= 0\n ]\n else:\n self._view_vertex_values = self.vertex_values\n indices = np.array(self._slice_indices)\n not_disp = list(self._dims_not_displayed)\n disp = list(self._dims_displayed)\n\n self._data_view = self.vertices[:, disp]\n if len(self.vertices) == 0:\n self._view_faces = np.zeros((0, 3))\n elif vertex_ndim > self._ndisplay:\n vertices = self.vertices[:, not_disp].astype('int')\n triangles = vertices[self.faces]\n matches = np.all(triangles == indices[not_disp], axis=(1, 2))\n matches = np.where(matches)[0]\n if len(matches) == 0:\n self._view_faces = np.zeros((0, 3))\n else:\n self._view_faces = self.faces[matches]\n else:\n self._view_faces = self.faces\n\n if self._keep_auto_contrast:\n self.reset_contrast_limits()\n\n def _update_thumbnail(self):\n \"\"\"Update thumbnail with current surface.\"\"\"\n pass\n\n def _get_value(self, position):\n \"\"\"Value of the data at a position in data coordinates.\n\n Parameters\n ----------\n position : tuple\n Position in data coordinates.\n\n Returns\n -------\n value : None\n Value of the data at the coord.\n \"\"\"\n return None\n" ]
[ [ "numpy.min", "numpy.subtract", "numpy.full", "numpy.all", "numpy.max", "numpy.array", "numpy.zeros", "numpy.where", "numpy.vstack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
tomstark99/epic-kitchens-100-fyrp
[ "cbc9e59569fb6110b900a51def1947b8a3c93699" ]
[ "src/models/esvs.py" ]
[ "import torch as t\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\n\r\nclass MTRN(nn.Module):\r\n \r\n def __init__(self, frame_count: int):\r\n super().__init__()\r\n self.frame_count = frame_count\r\n self.fc1 = nn.Linear(256 * frame_count, 1024)\r\n self.fc2 = nn.Linear(1024, 512)\r\n self.fc3 = nn.Linear(512, 397)\r\n \r\n def forward(self, x):\r\n x = x.view(-1, 256 * self.frame_count)\r\n x = F.relu(self.fc1(x))\r\n x = F.relu(self.fc2(x))\r\n x = self.fc3_verb(x)\r\n \r\n return x\r\n\r\nclass V_MTRN(nn.Module):\r\n \r\n def __init__(self, frame_count: int, hidden_layer_size: int, dropout_count: int, dropout_probability: int = 0.5):\r\n super().__init__()\r\n if dropout_probability < 0 or dropout_probability > 1:\r\n raise ValueError(f'Probability needs to be between 0 and 1, was: {dropout_probability}')\r\n self.frame_count = frame_count\r\n self.dropout_count = dropout_count\r\n self.fc1 = nn.Linear(256 * frame_count, hidden_layer_size)\r\n self.dropout = nn.Dropout(p=dropout_probability)\r\n self.fc2 = nn.Linear(hidden_layer_size, 512)\r\n self.fc3_verb = nn.Linear(512, 97)\r\n \r\n def forward(self, x):\r\n x = x.view(-1, 256 * self.frame_count)\r\n x = F.relu(self.fc1(x))\r\n if self.dropout_count >= 1:\r\n x = self.dropout(x)\r\n x = F.relu(self.fc2(x))\r\n if self.dropout_count == 2:\r\n x = self.dropout(x)\r\n x = self.fc3_verb(x)\r\n \r\n return x\r\n\r\nclass N_MTRN(nn.Module):\r\n \r\n def __init__(self, frame_count: int, hidden_layer_size: int, dropout_count: int, dropout_probability: int = 0.5):\r\n super().__init__()\r\n if dropout_probability < 0 or dropout_probability > 1:\r\n raise ValueError(f'Probability needs to be between 0 and 1, was: {dropout_probability}')\r\n self.frame_count = frame_count\r\n self.dropout_count = dropout_count\r\n self.fc1 = nn.Linear(256 * frame_count, hidden_layer_size)\r\n self.dropout = nn.Dropout(p=dropout_probability)\r\n self.fc2 = nn.Linear(hidden_layer_size, 512)\r\n self.fc3_noun = nn.Linear(512, 300)\r\n \r\n def forward(self, x):\r\n x = x.view(-1, 256 * self.frame_count)\r\n x = F.relu(self.fc1(x))\r\n if self.dropout_count >= 1:\r\n x = self.dropout(x)\r\n x = F.relu(self.fc2(x))\r\n if self.dropout_count == 2:\r\n x = self.dropout(x)\r\n x = self.fc3_noun(x)\r\n \r\n return x\r\n\r\nclass V_MF(nn.Module):\r\n \r\n def __init__(self, frame_count: int, hidden_layer_size: int, dropout_probability: int = 0.5):\r\n super().__init__()\r\n if dropout_probability < 0 or dropout_probability > 1:\r\n raise ValueError(f'Probability needs to be between 0 and 1, was: {dropout_probability}')\r\n self.frame_count = frame_count\r\n self.fc1 = nn.Linear(768 * frame_count, hidden_layer_size)\r\n self.dropout = nn.Dropout(p=dropout_probability)\r\n self.fc2_verb = nn.Linear(hidden_layer_size, 97)\r\n \r\n def forward(self, x):\r\n x = x.view(-1, 768 * self.frame_count)\r\n x = F.relu(self.fc1(x))\r\n x = self.dropout(x)\r\n x = self.fc2_verb(x)\r\n \r\n return x\r\n\r\nclass N_MF(nn.Module):\r\n \r\n def __init__(self, frame_count: int, hidden_layer_size: int, dropout_probability: int = 0.5):\r\n super().__init__()\r\n if dropout_probability < 0 or dropout_probability > 1:\r\n raise ValueError(f'Probability needs to be between 0 and 1, was: {dropout_probability}')\r\n self.frame_count = frame_count\r\n self.fc1 = nn.Linear(768 * frame_count, hidden_layer_size)\r\n self.dropout = nn.Dropout(p=dropout_probability)\r\n self.fc2_noun = nn.Linear(hidden_layer_size, 300)\r\n \r\n def forward(self, x):\r\n x = x.view(-1, 768 * self.frame_count)\r\n x = F.relu(self.fc1(x))\r\n x = self.dropout(x)\r\n x = self.fc2_noun(x)\r\n \r\n return x\r\n" ]
[ [ "torch.nn.Linear", "torch.nn.Dropout" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
JaimeCernuda/dlio_benchmark
[ "d9cfbf76b4c7fb0d48a0dd43b8d2f2ea6ba75949", "d9cfbf76b4c7fb0d48a0dd43b8d2f2ea6ba75949", "d9cfbf76b4c7fb0d48a0dd43b8d2f2ea6ba75949" ]
[ "src/data_generator/npz_generator.py", "src/reader/npz_reader.py", "src/reader/reader_handler.py" ]
[ "\"\"\"\n Copyright (C) 2020 Argonne, Hariharan Devarajan <[email protected]>\n This file is part of DLProfile\n DLIO is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as\n published by the Free Software Foundation, either version 3 of the published by the Free Software Foundation, either\n version 3 of the License, or (at your option) any later version.\n This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied\n warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more\n details.\n You should have received a copy of the GNU General Public License along with this program.\n If not, see <http://www.gnu.org/licenses/>.\n\"\"\"\n\nfrom src.common.enumerations import Compression\nfrom src.data_generator.data_generator import DataGenerator\n\nimport numpy as np\nfrom numpy import random\n\nfrom src.utils.utility import progress\nfrom shutil import copyfile\n\n\"\"\"\nGenerator for creating data in NPZ format.\n\"\"\"\nclass NPZGenerator(DataGenerator):\n def __init__(self):\n super().__init__()\n\n def generate(self):\n \"\"\"\n Generator for creating data in NPZ format of 3d dataset.\n \"\"\"\n super().generate()\n records = random.random((self._dimension, self._dimension, self.num_samples))\n record_labels = [0] * self.num_samples\n prev_out_spec =\"\"\n count = 0\n for i in range(0, int(self.num_files)):\n if i % self.comm_size == self.my_rank:\n progress(i+1, self.num_files, \"Generating NPZ Data\")\n out_path_spec = \"{}_{}_of_{}.npz\".format(self._file_prefix, i, self.num_files)\n if count == 0:\n prev_out_spec = out_path_spec\n if self.compression != Compression.ZIP:\n np.savez(out_path_spec, x=records, y=record_labels)\n else:\n np.savez_compressed(out_path_spec, x=records, y=record_labels)\n count += 1\n else:\n copyfile(prev_out_spec, out_path_spec)", "\"\"\"\n Copyright (C) 2020 Argonne, Hariharan Devarajan <[email protected]>\n This file is part of DLProfile\n DLIO is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as\n published by the Free Software Foundation, either version 3 of the published by the Free Software Foundation, either\n version 3 of the License, or (at your option) any later version.\n This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied\n warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more\n details.\n You should have received a copy of the GNU General Public License along with this program.\n If not, see <http://www.gnu.org/licenses/>.\n\"\"\"\nfrom src.common.enumerations import Shuffle, FileAccess\nfrom src.reader.reader_handler import FormatReader\nimport numpy as np\nimport math\nfrom numpy import random\n\nfrom src.utils.utility import progress\n\nclass NPZReader(FormatReader):\n \"\"\"\n Reader for NPZ files\n \"\"\"\n def __init__(self):\n super().__init__()\n\n def read(self, epoch_number):\n \"\"\"\n for each epoch it opens the npz files and reads the data into memory\n :param epoch_number:\n \"\"\"\n super().read(epoch_number)\n packed_array = []\n for file in self._local_file_list:\n with np.load(file, allow_pickle=True) as data:\n rows = data['x']\n packed_array.append({\n 'dataset': rows,\n 'current_sample': 0,\n 'total_samples': rows.shape[2]\n })\n self._dataset = packed_array\n\n def next(self):\n \"\"\"\n The iterator of the dataset just performs memory sub-setting for each portion of the data.\n :return: piece of data for training.\n \"\"\"\n super().next()\n total = 0\n count = 1\n for element in self._dataset:\n current_index = element['current_sample']\n total_samples = element['total_samples']\n if FileAccess.MULTI == self.file_access:\n num_sets = list(range(0, int(math.ceil(total_samples / self.batch_size))))\n else:\n total_samples_per_rank = int(total_samples / self.comm_size)\n part_start, part_end = (int(total_samples_per_rank * self.my_rank / self.batch_size),\n int(total_samples_per_rank * (self.my_rank + 1) / self.batch_size))\n num_sets = list(range(part_start, part_end))\n total += len(num_sets)\n if self.memory_shuffle != Shuffle.OFF:\n if self.memory_shuffle == Shuffle.SEED:\n random.seed(self.seed)\n random.shuffle(num_sets)\n for num_set in num_sets:\n progress(count, total, \"Reading NPZ Data\")\n count += 1\n yield element['dataset'][:][:][num_set * self.batch_size:(num_set + 1) * self.batch_size - 1]\n\n def finalize(self):\n pass", "from abc import ABC, abstractmethod\n\nfrom src.common.enumerations import Shuffle, FileAccess\nfrom src.utils.argument_parser import ArgumentParser\n\nimport os\nimport math\nfrom numpy import random\n\n\nclass FormatReader(ABC):\n def __init__(self):\n self._arg_parser = ArgumentParser.get_instance()\n self.read_shuffle = self._arg_parser.args.read_shuffle\n self.seed = self._arg_parser.args.seed\n self.seed_change_epoch = self._arg_parser.args.seed_change_epoch\n self.read_shuffle = self._arg_parser.args.read_shuffle\n self.memory_shuffle = self._arg_parser.args.memory_shuffle\n self.shuffle_size = self._arg_parser.args.shuffle_size\n self.data_dir = self._arg_parser.args.data_folder\n self.record_size = self._arg_parser.args.record_length\n self.prefetch = self._arg_parser.args.prefetch\n self.prefetch_size = self._arg_parser.args.prefetch_size\n self.batch_size = self._arg_parser.args.batch_size\n self.transfer_size = self._arg_parser.args.transfer_size\n self.file_access = self._arg_parser.args.file_access\n self.my_rank = self._arg_parser.args.my_rank\n self.comm_size = self._arg_parser.args.comm_size\n self.num_files = self._arg_parser.args.num_files\n self.num_samples = self._arg_parser.args.num_samples\n self._dataset = None\n self._local_file_list = None\n\n @abstractmethod\n def read(self, epoch_number):\n filenames = os.listdir(self.data_dir)\n files = list()\n # Iterate over all the entries\n for entry in filenames:\n # Create full path\n fullPath = os.path.join(self.data_dir, entry)\n files.append(fullPath)\n seed = None\n if FileAccess.MULTI == self.file_access:\n files = files[:self.num_files]\n read_shuffle = True\n if self.read_shuffle == Shuffle.OFF:\n read_shuffle = False\n if read_shuffle:\n seed = self.seed\n if self.seed_change_epoch:\n seed = self.seed + epoch_number\n partition_size = int(math.ceil(len(files) / self.comm_size))\n part_start, part_end = (partition_size * self.my_rank, partition_size * ( self.my_rank + 1))\n self._local_file_list = files[part_start:part_end]\n print(\"rank {}, file_list {}, size {}\".format(self.my_rank, self._local_file_list,partition_size))\n if seed is not None:\n random.seed(seed)\n if read_shuffle:\n random.shuffle(self._local_file_list)\n else:\n self._local_file_list = files\n\n @abstractmethod\n def next(self):\n pass\n\n @abstractmethod\n def finalize(self):\n pass\n" ]
[ [ "numpy.savez_compressed", "numpy.savez", "numpy.random.random" ], [ "numpy.load", "numpy.random.seed", "numpy.random.shuffle" ], [ "numpy.random.shuffle", "numpy.random.seed" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
CogStack/CAT
[ "5ac04d2676aede13f8e8d0ab408472c3c6d46a86" ]
[ "medcat/cat.py" ]
[ "import os\nimport shutil\nimport pickle\nimport traceback\nimport json\nimport logging\nimport math\nimport time\nimport psutil\nfrom time import sleep\nfrom copy import deepcopy\nfrom multiprocess import Process, Manager, cpu_count\nfrom multiprocess.queues import Queue\nfrom multiprocess.synchronize import Lock\nfrom typing import Union, List, Tuple, Optional, Dict, Iterable, Set\nfrom itertools import islice, chain, repeat\nfrom datetime import date\nfrom tqdm.autonotebook import tqdm, trange\nfrom spacy.tokens import Span, Doc, Token\nfrom spacy.language import Language\n\nfrom medcat import __version__\nfrom medcat.preprocessing.tokenizers import spacy_split_all\nfrom medcat.pipe import Pipe\nfrom medcat.preprocessing.taggers import tag_skip_and_punct\nfrom medcat.cdb import CDB\nfrom medcat.utils.matutils import intersect_nonempty_set\nfrom medcat.utils.data_utils import make_mc_train_test, get_false_positives\nfrom medcat.utils.normalizers import BasicSpellChecker\nfrom medcat.utils.checkpoint import Checkpoint, CheckpointConfig, CheckpointManager\nfrom medcat.utils.helpers import tkns_from_doc, get_important_config_parameters\nfrom medcat.utils.hasher import Hasher\nfrom medcat.ner.vocab_based_ner import NER\nfrom medcat.linking.context_based_linker import Linker\nfrom medcat.utils.filters import get_project_filters, check_filters\nfrom medcat.preprocessing.cleaners import prepare_name\nfrom medcat.meta_cat import MetaCAT\nfrom medcat.utils.meta_cat.data_utils import json_to_fake_spacy\nfrom medcat.config import Config\nfrom medcat.vocab import Vocab\nfrom medcat.utils.decorators import deprecated\nfrom medcat.ner.transformers_ner import TransformersNER\n\n\nclass CAT(object):\n r\"\"\"\n The main MedCAT class used to annotate documents, it is built on top of spaCy\n and works as a spaCy pipline. Creates an instance of a spaCy pipline that can\n be used as a spacy nlp model.\n\n Args:\n cdb (medcat.cdb.CDB):\n The concept database that will be used for NER+L\n config (medcat.config.Config):\n Global configuration for medcat\n vocab (medcat.vocab.Vocab, optional):\n Vocabulary used for vector embeddings and spelling. Default: None\n meta_cats (list of medcat.meta_cat.MetaCAT, optional):\n A list of models that will be applied sequentially on each\n detected annotation.\n\n Attributes (limited):\n cdb (medcat.cdb.CDB):\n Concept database used with this CAT instance, please do not assign\n this value directly.\n config (medcat.config.Config):\n The global configuration for medcat. Usually cdb.config will be used for this\n field. WILL BE REMOVED - TEMPORARY PLACEHOLDER\n vocab (medcat.utils.vocab.Vocab):\n The vocabulary object used with this instance, please do not assign\n this value directly.\n\n Examples:\n\n >>> cat = CAT(cdb, vocab)\n >>> spacy_doc = cat(\"Put some text here\")\n >>> print(spacy_doc.ents) # Detected entites\n \"\"\"\n # Add file and console handlers\n log = logging.getLogger(__package__)\n DEFAULT_MODEL_PACK_NAME = \"medcat_model_pack\"\n\n def __init__(self,\n cdb: CDB,\n vocab: Union[Vocab, None] = None,\n config: Optional[Config] = None,\n meta_cats: List[MetaCAT] = [],\n addl_ner: Union[TransformersNER, List[TransformersNER]] = []) -> None:\n self.cdb = cdb\n self.vocab = vocab\n if config is None:\n # Take config from the cdb\n self.config = cdb.config\n else:\n # Take the new config and assign it to the CDB also\n self.config = config\n self.cdb.config = config\n self._meta_cats = meta_cats\n self._addl_ner = addl_ner if isinstance(addl_ner, list) else [addl_ner]\n self._create_pipeline(self.config)\n\n def _create_pipeline(self, config):\n # Set log level\n self.log.setLevel(config.general['log_level'])\n\n # Build the pipeline\n self.pipe = Pipe(tokenizer=spacy_split_all, config=config)\n self.pipe.add_tagger(tagger=tag_skip_and_punct,\n name='skip_and_punct',\n additional_fields=['is_punct'])\n\n if self.vocab is not None:\n spell_checker = BasicSpellChecker(cdb_vocab=self.cdb.vocab, config=config, data_vocab=self.vocab)\n self.pipe.add_token_normalizer(spell_checker=spell_checker, config=config)\n\n # Add NER\n self.ner = NER(self.cdb, config)\n self.pipe.add_ner(self.ner)\n\n # Add LINKER\n self.linker = Linker(self.cdb, self.vocab, config)\n self.pipe.add_linker(self.linker)\n\n # Add addl_ner if they exist\n for ner in self._addl_ner:\n self.pipe.add_addl_ner(ner, ner.config.general['name'])\n\n # Add meta_annotaiton classes if they exist\n for meta_cat in self._meta_cats:\n self.pipe.add_meta_cat(meta_cat, meta_cat.config.general['category_name'])\n\n # Set max document length\n self.pipe.spacy_nlp.max_length = config.preprocessing.get('max_document_length', 1000000)\n\n @deprecated(message=\"Replaced with cat.pipe.spacy_nlp.\")\n def get_spacy_nlp(self) -> Language:\n \"\"\" Returns the spacy pipeline with MedCAT\n \"\"\"\n return self.pipe.spacy_nlp\n\n def get_hash(self):\n r\"\"\" Will not be a deep hash but will try to cactch all the changing parts during training.\n \"\"\"\n hasher = Hasher()\n hasher.update(self.cdb.get_hash())\n\n hasher.update(self.config.get_hash())\n\n for mc in self._meta_cats:\n hasher.update(mc.get_hash())\n\n for trf in self._addl_ner:\n hasher.update(trf.get_hash())\n\n return hasher.hexdigest()\n\n def get_model_card(self, as_dict=False):\n \"\"\"\n A minimal model card for MedCAT model packs.\n\n Args:\n as_dict: return the model card as a dictionary instead of a str.\n\n Returns:\n By default a str - indented JSON object.\n \"\"\"\n card = {\n 'Model ID': self.config.version['id'],\n 'Last Modified On': self.config.version['last_modified'],\n 'History (from least to most recent)': self.config.version['history'],\n 'Description': self.config.version['description'],\n 'Source Ontology': self.config.version['ontology'],\n 'Location': self.config.version['location'],\n 'MetaCAT models': self.config.version['meta_cats'],\n 'Basic CDB Stats': self.config.version['cdb_info'],\n 'Performance': self.config.version['performance'],\n 'Important Parameters (Partial view, all available in cat.config)': get_important_config_parameters(self.config),\n 'MedCAT Version': self.config.version['medcat_version']\n }\n\n if as_dict:\n return card\n else:\n return json.dumps(card, indent=2, sort_keys=False)\n\n def _versioning(self):\n # Check version info and do not allow without it\n if self.config.version['description'] == 'No description':\n self.log.warning(\"Please consider populating the version information [description, performance, location, ontology] in cat.config.version\")\n\n # Fill the stuff automatically that is needed for versioning\n m = self.get_hash()\n version = self.config.version\n if version['id'] is None or m != version['id']:\n if version['id'] is not None:\n version['history'].append(version['id'])\n version['id'] = m\n version['last_modified'] = date.today().strftime(\"%d %B %Y\")\n version['cdb_info'] = self.cdb._make_stats()\n version['meta_cats'] = [meta_cat.get_model_card(as_dict=True) for meta_cat in self._meta_cats]\n version['medcat_version'] = __version__\n self.log.warning(\"Please consider updating [description, performance, location, ontology] in cat.config.version\")\n\n def create_model_pack(self, save_dir_path: str, model_pack_name: str = DEFAULT_MODEL_PACK_NAME) -> str:\n r\"\"\" Will crete a .zip file containing all the models in the current running instance\n of MedCAT. This is not the most efficient way, for sure, but good enough for now.\n\n model_pack_name - an id will be appended to this name\n\n returns:\n Model pack name\n \"\"\"\n # Spacy model always should be just the name, but during loading it can be reset to path\n self.config.general['spacy_model'] = os.path.basename(self.config.general['spacy_model'])\n # Versioning\n self._versioning()\n model_pack_name += \"_{}\".format(self.config.version['id'])\n\n self.log.warning(\"This will save all models into a zip file, can take some time and require quite a bit of disk space.\")\n _save_dir_path = save_dir_path\n save_dir_path = os.path.join(save_dir_path, model_pack_name)\n\n # expand user path to make this work with '~'\n os.makedirs(os.path.expanduser(save_dir_path), exist_ok=True)\n\n # Save the used spacy model\n spacy_path = os.path.join(save_dir_path, self.config.general['spacy_model'])\n if str(self.pipe.spacy_nlp._path) != spacy_path:\n # First remove if something is there\n shutil.rmtree(spacy_path, ignore_errors=True)\n shutil.copytree(str(self.pipe.spacy_nlp._path), spacy_path)\n\n # Save the CDB\n cdb_path = os.path.join(save_dir_path, \"cdb.dat\")\n self.cdb.save(cdb_path)\n\n # Save the Vocab\n vocab_path = os.path.join(save_dir_path, \"vocab.dat\")\n if self.vocab is not None:\n # We will allow creation of modelpacks without vocabs\n self.vocab.save(vocab_path)\n\n # Save addl_ner\n for comp in self.pipe.spacy_nlp.components:\n if isinstance(comp[1], TransformersNER):\n trf_path = os.path.join(save_dir_path, \"trf_\" + comp[1].config.general['name'])\n comp[1].save(trf_path)\n\n # Save all meta_cats\n for comp in self.pipe.spacy_nlp.components:\n if isinstance(comp[1], MetaCAT):\n name = comp[0]\n meta_path = os.path.join(save_dir_path, \"meta_\" + name)\n comp[1].save(meta_path)\n\n # Add a model card also, why not\n model_card_path = os.path.join(save_dir_path, \"model_card.json\")\n json.dump(self.get_model_card(as_dict=True), open(model_card_path, 'w'), indent=2)\n\n # Zip everything\n shutil.make_archive(os.path.join(_save_dir_path, model_pack_name), 'zip', root_dir=save_dir_path)\n\n # Log model card and return new name\n self.log.info(self.get_model_card()) # Print the model card\n return model_pack_name\n\n @classmethod\n def load_model_pack(cls, zip_path: str, meta_cat_config_dict: Optional[Dict] = None) -> \"CAT\":\n r\"\"\"Load everything within the 'model pack', i.e. the CDB, config, vocab and any MetaCAT models\n (if present)\n\n Args:\n zip_path:\n path to model pack zip.\n meta_cat_config_dict:\n A config dict that will overwrite existing configs in meta_cat.\n e.g. meta_cat_config_dict = {'general': {'device': 'cpu'}}\n \"\"\"\n from medcat.cdb import CDB\n from medcat.vocab import Vocab\n from medcat.meta_cat import MetaCAT\n\n base_dir = os.path.dirname(zip_path)\n filename = os.path.basename(zip_path)\n foldername = filename.replace(\".zip\", '')\n\n model_pack_path = os.path.join(base_dir, foldername)\n if os.path.exists(model_pack_path):\n cls.log.info(\"Found an existing unziped model pack at: {}, the provided zip will not be touched.\".format(model_pack_path))\n else:\n cls.log.info(\"Unziping the model pack and loading models.\")\n shutil.unpack_archive(zip_path, extract_dir=model_pack_path)\n\n # Load the CDB\n cdb_path = os.path.join(model_pack_path, \"cdb.dat\")\n cdb = CDB.load(cdb_path)\n\n # TODO load addl_ner\n\n # Modify the config to contain full path to spacy model\n cdb.config.general['spacy_model'] = os.path.join(model_pack_path, os.path.basename(cdb.config.general['spacy_model']))\n\n # Load Vocab\n vocab_path = os.path.join(model_pack_path, \"vocab.dat\")\n if os.path.exists(vocab_path):\n vocab = Vocab.load(vocab_path)\n else:\n vocab = None\n\n # Find meta models in the model_pack\n trf_paths = [os.path.join(model_pack_path, path) for path in os.listdir(model_pack_path) if path.startswith('trf_')]\n addl_ner = []\n for trf_path in trf_paths:\n trf = TransformersNER.load(save_dir_path=trf_path)\n trf.cdb = cdb # Set the cat.cdb to be the CDB of the TRF model\n addl_ner.append(trf)\n\n # Find meta models in the model_pack\n meta_paths = [os.path.join(model_pack_path, path) for path in os.listdir(model_pack_path) if path.startswith('meta_')]\n meta_cats = []\n for meta_path in meta_paths:\n meta_cats.append(MetaCAT.load(save_dir_path=meta_path,\n config_dict=meta_cat_config_dict))\n\n cat = cls(cdb=cdb, config=cdb.config, vocab=vocab, meta_cats=meta_cats, addl_ner=addl_ner)\n cls.log.info(cat.get_model_card()) # Print the model card\n return cat\n\n def __call__(self, text: Optional[str], do_train: bool = False) -> Optional[Doc]:\n r\"\"\"\n Push the text through the pipeline.\n\n Args:\n text (string):\n The text to be annotated, if the text length is longer than\n self.config.preprocessing['max_document_length'] it will be trimmed to that length.\n do_train (bool, defaults to `False`):\n This causes so many screwups when not there, so I'll force training\n to False. To run training it is much better to use the self.train() function\n but for some special cases I'm leaving it here also.\n Returns:\n A single spacy document or multiple spacy documents with the extracted entities\n \"\"\"\n # Should we train - do not use this for training, unless you know what you are doing. Use the\n #self.train() function\n self.config.linking['train'] = do_train\n\n if text is None:\n self.log.error(\"The input text should be either a string or a sequence of strings but got %s\", type(text))\n return None\n else:\n text = self._get_trimmed_text(str(text))\n return self.pipe(text)\n\n def __repr__(self):\n \"\"\"\n Prints the model_card for this CAT instance.\n Returns:\n the 'Model Card' for this CAT instance. This includes NER+L config and any MetaCATs\n \"\"\"\n return self.get_model_card(as_dict=False)\n\n def _print_stats(self,\n data: Dict,\n epoch: int = 0,\n use_project_filters: bool = False,\n use_overlaps: bool = False,\n use_cui_doc_limit: bool = False,\n use_groups: bool = False,\n extra_cui_filter: Optional[Set] = None) -> Tuple:\n r\"\"\" TODO: Refactor and make nice\n Print metrics on a dataset (F1, P, R), it will also print the concepts that have the most FP,FN,TP.\n\n Args:\n data (list of dict):\n The json object that we get from MedCATtrainer on export.\n epoch (int):\n Used during training, so we know what epoch is it.\n use_project_filters (boolean):\n Each project in medcattrainer can have filters, do we want to respect those filters\n when calculating metrics.\n use_overlaps (boolean):\n Allow overlapping entites, nearly always False as it is very difficult to annotate overlapping entites.\n use_cui_doc_limit (boolean):\n If True the metrics for a CUI will be only calculated if that CUI appears in a document, in other words\n if the document was annotated for that CUI. Useful in very specific situations when during the annotation\n process the set of CUIs changed.\n use_groups (boolean):\n If True concepts that have groups will be combined and stats will be reported on groups.\n extra_cui_filter(Optional[Set]):\n This filter will be intersected with all other filters, or if all others are not set then only this one will be used.\n\n Returns:\n fps (dict):\n False positives for each CUI\n fns (dict):\n False negatives for each CUI\n tps (dict):\n True positives for each CUI\n cui_prec (dict):\n Precision for each CUI\n cui_rec (dict):\n Recall for each CUI\n cui_f1 (dict):\n F1 for each CUI\n cui_counts (dict):\n Number of occurrence for each CUI\n examples (dict):\n Examples for each of the fp, fn, tp. Format will be examples['fp']['cui'][<list_of_examples>]\n \"\"\"\n tp = 0\n fp = 0\n fn = 0\n fps: Dict = {}\n fns: Dict = {}\n tps: Dict = {}\n cui_prec: Dict = {}\n cui_rec: Dict = {}\n cui_f1: Dict = {}\n cui_counts: Dict = {}\n examples: Dict = {'fp': {}, 'fn': {}, 'tp': {}}\n\n fp_docs: Set = set()\n fn_docs: Set = set()\n # reset and back up filters\n _filters = deepcopy(self.config.linking['filters'])\n filters = self.config.linking['filters']\n for pind, project in tqdm(enumerate(data['projects']), desc=\"Stats project\", total=len(data['projects']), leave=False):\n filters['cuis'] = set()\n\n # Add extrafilter if set\n if isinstance(extra_cui_filter, set):\n filters['cuis'] = extra_cui_filter\n\n if use_project_filters:\n project_filter = get_project_filters(cuis=project.get('cuis', None),\n type_ids=project.get('tuis', None),\n cdb=self.cdb,\n project=project)\n # Intersect project filter with existing if it has something\n if project_filter:\n filters['cuis'] = intersect_nonempty_set(project_filter, filters['cuis'])\n\n for dind, doc in tqdm(\n enumerate(project[\"documents\"]),\n desc=\"Stats document\",\n total=len(project[\"documents\"]),\n leave=False,\n ):\n anns = self._get_doc_annotations(doc)\n\n # Apply document level filtering, in this case project_filter is ignored while the extra_cui_filter is respected still\n if use_cui_doc_limit:\n _cuis = set([ann['cui'] for ann in anns])\n if _cuis:\n filters['cuis'] = intersect_nonempty_set(_cuis, extra_cui_filter)\n else:\n filters['cuis'] = {'empty'}\n\n spacy_doc: Doc = self(doc['text'])\n\n if use_overlaps:\n p_anns = spacy_doc._.ents\n else:\n p_anns = spacy_doc.ents\n\n anns_norm = []\n anns_norm_neg = []\n anns_examples = []\n anns_norm_cui = []\n for ann in anns:\n cui = ann['cui']\n if check_filters(cui, filters):\n if use_groups:\n cui = self.cdb.addl_info['cui2group'].get(cui, cui)\n\n if ann.get('validated', True) and (not ann.get('killed', False) and not ann.get('deleted', False)):\n anns_norm.append((ann['start'], cui))\n anns_examples.append({\"text\": doc['text'][max(0, ann['start']-60):ann['end']+60],\n \"cui\": cui,\n \"source value\": ann['value'],\n \"acc\": 1,\n \"project index\": pind,\n \"document inedex\": dind})\n elif ann.get('validated', True) and (ann.get('killed', False) or ann.get('deleted', False)):\n anns_norm_neg.append((ann['start'], cui))\n\n\n if ann.get(\"validated\", True):\n # This is used to test was someone annotating for this CUI in this document\n anns_norm_cui.append(cui)\n cui_counts[cui] = cui_counts.get(cui, 0) + 1\n\n p_anns_norm = []\n p_anns_examples = []\n for ann in p_anns:\n cui = ann._.cui\n if use_groups:\n cui = self.cdb.addl_info['cui2group'].get(cui, cui)\n\n p_anns_norm.append((ann.start_char, cui))\n p_anns_examples.append({\"text\": doc['text'][max(0, ann.start_char-60):ann.end_char+60],\n \"cui\": cui,\n \"source value\": ann.text,\n \"acc\": float(ann._.context_similarity),\n \"project index\": pind,\n \"document inedex\": dind})\n\n\n for iann, ann in enumerate(p_anns_norm):\n cui = ann[1]\n if ann in anns_norm:\n tp += 1\n tps[cui] = tps.get(cui, 0) + 1\n\n example = p_anns_examples[iann]\n examples['tp'][cui] = examples['tp'].get(cui, []) + [example]\n else:\n fp += 1\n fps[cui] = fps.get(cui, 0) + 1\n fp_docs.add(doc.get('name', 'unk'))\n\n # Add example for this FP prediction\n example = p_anns_examples[iann]\n if ann in anns_norm_neg:\n # Means that it really was annotated as negative\n example['real_fp'] = True\n\n examples['fp'][cui] = examples['fp'].get(cui, []) + [example]\n\n for iann, ann in enumerate(anns_norm):\n if ann not in p_anns_norm:\n cui = ann[1]\n fn += 1\n fn_docs.add(doc.get('name', 'unk'))\n\n fns[cui] = fns.get(cui, 0) + 1\n examples['fn'][cui] = examples['fn'].get(cui, []) + [anns_examples[iann]]\n\n try:\n prec = tp / (tp + fp)\n rec = tp / (tp + fn)\n f1 = 2*(prec*rec) / (prec + rec)\n print(\"Epoch: {}, Prec: {}, Rec: {}, F1: {}\\n\".format(epoch, prec, rec, f1))\n print(\"Docs with false positives: {}\\n\".format(\"; \".join([str(x) for x in list(fp_docs)[0:10]])))\n print(\"Docs with false negatives: {}\\n\".format(\"; \".join([str(x) for x in list(fn_docs)[0:10]])))\n\n # Sort fns & prec\n fps = {k: v for k, v in sorted(fps.items(), key=lambda item: item[1], reverse=True)}\n fns = {k: v for k, v in sorted(fns.items(), key=lambda item: item[1], reverse=True)}\n tps = {k: v for k, v in sorted(tps.items(), key=lambda item: item[1], reverse=True)}\n\n\n # F1 per concept\n for cui in tps.keys():\n prec = tps[cui] / (tps.get(cui, 0) + fps.get(cui, 0))\n rec = tps[cui] / (tps.get(cui, 0) + fns.get(cui, 0))\n f1 = 2*(prec*rec) / (prec + rec)\n cui_prec[cui] = prec\n cui_rec[cui] = rec\n cui_f1[cui] = f1\n\n\n # Get top 10\n pr_fps = [(self.cdb.cui2preferred_name.get(cui,\n list(self.cdb.cui2names.get(cui, [cui]))[0]), cui, fps[cui]) for cui in list(fps.keys())[0:10]]\n pr_fns = [(self.cdb.cui2preferred_name.get(cui,\n list(self.cdb.cui2names.get(cui, [cui]))[0]), cui, fns[cui]) for cui in list(fns.keys())[0:10]]\n pr_tps = [(self.cdb.cui2preferred_name.get(cui,\n list(self.cdb.cui2names.get(cui, [cui]))[0]), cui, tps[cui]) for cui in list(tps.keys())[0:10]]\n\n\n print(\"\\n\\nFalse Positives\\n\")\n for one in pr_fps:\n print(\"{:70} - {:20} - {:10}\".format(str(one[0])[0:69], str(one[1])[0:19], one[2]))\n print(\"\\n\\nFalse Negatives\\n\")\n for one in pr_fns:\n print(\"{:70} - {:20} - {:10}\".format(str(one[0])[0:69], str(one[1])[0:19], one[2]))\n print(\"\\n\\nTrue Positives\\n\")\n for one in pr_tps:\n print(\"{:70} - {:20} - {:10}\".format(str(one[0])[0:69], str(one[1])[0:19], one[2]))\n print(\"*\"*110 + \"\\n\")\n\n except Exception:\n traceback.print_exc()\n\n # restore filters to original state\n self.config.linking['filters'] = _filters\n\n return fps, fns, tps, cui_prec, cui_rec, cui_f1, cui_counts, examples\n\n def _init_ckpts(self, is_resumed, checkpoint):\n if self.config.general['checkpoint']['steps'] is not None or checkpoint is not None:\n checkpoint_config = CheckpointConfig(**self.config.general.get('checkpoint', {}))\n checkpoint_manager = CheckpointManager('cat_train', checkpoint_config)\n if is_resumed:\n # TODO: probably remove is_resumed mark and always resume if a checkpoint is provided,\n #but I'll leave it for now\n checkpoint = checkpoint or checkpoint_manager.get_latest_checkpoint()\n self.log.info(f\"Resume training on the most recent checkpoint at {checkpoint.dir_path}...\")\n self.cdb = checkpoint.restore_latest_cdb()\n self.cdb.config.merge_config(self.config.__dict__)\n self.config = self.cdb.config\n self._create_pipeline(self.config)\n else:\n checkpoint = checkpoint or checkpoint_manager.create_checkpoint()\n self.log.info(f\"Start new training and checkpoints will be saved at {checkpoint.dir_path}...\")\n\n return checkpoint\n\n def train(self,\n data_iterator: Iterable,\n nepochs: int = 1,\n fine_tune: bool = True,\n progress_print: int = 1000,\n checkpoint: Optional[Checkpoint] = None,\n is_resumed: bool = False) -> None:\n \"\"\" Runs training on the data, note that the maximum length of a line\n or document is 1M characters. Anything longer will be trimmed.\n\n Args:\n data_iterator (Iterable):\n Simple iterator over sentences/documents, e.g. a open file\n or an array or anything that we can use in a for loop.\n nepochs (int):\n Number of epochs for which to run the training.\n fine_tune (bool):\n If False old training will be removed.\n progress_print (int):\n Print progress after N lines.\n checkpoint (Optional[medcat.utils.checkpoint.CheckpointUT]):\n The MedCAT checkpoint object\n is_resumed (bool):\n If True resume the previous training; If False, start a fresh new training.\n \"\"\"\n if not fine_tune:\n self.log.info(\"Removing old training data!\")\n self.cdb.reset_training()\n checkpoint = self._init_ckpts(is_resumed, checkpoint)\n\n latest_trained_step = checkpoint.count if checkpoint is not None else 0\n epochal_data_iterator = chain.from_iterable(repeat(data_iterator, nepochs))\n for line in islice(epochal_data_iterator, latest_trained_step, None):\n if line is not None and line:\n # Convert to string\n line = str(line).strip()\n\n try:\n _ = self(line, do_train=True)\n except Exception as e:\n self.log.warning(\"LINE: '%s...' \\t WAS SKIPPED\", line[0:100])\n self.log.warning(\"BECAUSE OF: %s\", str(e))\n else:\n self.log.warning(\"EMPTY LINE WAS DETECTED AND SKIPPED\")\n\n latest_trained_step += 1\n if latest_trained_step % progress_print == 0:\n self.log.info(\"DONE: %s\", str(latest_trained_step))\n if checkpoint is not None and checkpoint.steps is not None and latest_trained_step % checkpoint.steps == 0:\n checkpoint.save(cdb=self.cdb, count=latest_trained_step)\n\n self.config.linking['train'] = False\n\n def add_cui_to_group(self, cui: str, group_name: str) -> None:\n r\"\"\"\n Ads a CUI to a group, will appear in cdb.addl_info['cui2group']\n\n Args:\n cui (str):\n The concept to be added\n group_name (str):\n The group to whcih the concept will be added\n\n Examples:\n\n >>> cat.add_cui_to_group(\"S-17\", 'pain')\n \"\"\"\n\n # Add group_name\n self.cdb.addl_info['cui2group'][cui] = group_name\n\n def unlink_concept_name(self, cui: str, name: str, preprocessed_name: bool = False) -> None:\n r\"\"\"\n Unlink a concept name from the CUI (or all CUIs if full_unlink), removes the link from\n the Concept Database (CDB). As a consequence medcat will never again link the `name`\n to this CUI - meaning the name will not be detected as a concept in the future.\n\n Args:\n cui (str):\n The CUI from which the `name` will be removed\n name (str):\n The span of text to be removed from the linking dictionary\n Examples:\n\n >>> # To never again link C0020538 to HTN\n >>> cat.unlink_concept_name('C0020538', 'htn', False)\n \"\"\"\n\n cuis = [cui]\n if preprocessed_name:\n names = {name: 'nothing'}\n else:\n names = prepare_name(name, self.pipe.spacy_nlp, {}, self.config)\n\n # If full unlink find all CUIs\n if self.config.general.get('full_unlink', False):\n for n in names:\n cuis.extend(self.cdb.name2cuis.get(n, []))\n\n # Remove name from all CUIs\n for c in cuis:\n self.cdb.remove_names(cui=c, names=names)\n\n def add_and_train_concept(self,\n cui: str,\n name: str,\n spacy_doc: Optional[Doc] = None,\n spacy_entity: Optional[Union[List[Token], Span]] = None,\n ontologies: Set = set(),\n name_status: str = 'A',\n type_ids: Set = set(),\n description: str = '',\n full_build: bool = True,\n negative: bool = False,\n devalue_others: bool = False,\n do_add_concept: bool = True) -> None:\n r\"\"\" Add a name to an existing concept, or add a new concept, or do not do anything if the name or concept already exists. Perform\n training if spacy_entity and spacy_doc are set.\n\n Args:\n cui (str):\n CUI of the concept\n name (str):\n Name to be linked to the concept (in the case of MedCATtrainer this is simply the\n selected value in text, no preprocessing or anything needed).\n spacy_doc (spacy.tokens.Doc):\n Spacy represenation of the document that was manually annotated.\n spacy_entity (Optional[Union[List[Token], Span]]):\n Given the spacy document, this is the annotated span of text - list of annotated tokens that are marked with this CUI.\n negative (bool):\n Is this a negative or positive example.\n devalue_others:\n If set, cuis to which this name is assigned and are not `cui` will receive negative training given\n that negative=False.\n\n \\*\\*other:\n Refer to medcat.cat.cdb.CDB.add_concept\n \"\"\"\n names = prepare_name(name, self.pipe.spacy_nlp, {}, self.config)\n # Only if not negative, otherwise do not add the new name if in fact it should not be detected\n if do_add_concept and not negative:\n self.cdb.add_concept(cui=cui, names=names, ontologies=ontologies, name_status=name_status, type_ids=type_ids, description=description,\n full_build=full_build)\n\n if spacy_entity is not None and spacy_doc is not None:\n # Train Linking\n self.linker.context_model.train(cui=cui, entity=spacy_entity, doc=spacy_doc, negative=negative, names=names)\n\n if not negative and devalue_others:\n # Find all cuis\n cuis = set()\n for n in names:\n cuis.update(self.cdb.name2cuis.get(n, []))\n # Remove the cui for which we just added positive training\n if cui in cuis:\n cuis.remove(cui)\n # Add negative training for all other CUIs that link to these names\n for _cui in cuis:\n self.linker.context_model.train(cui=_cui, entity=spacy_entity, doc=spacy_doc, negative=True)\n\n def train_supervised(self,\n data_path: str,\n reset_cui_count: bool = False,\n nepochs: int = 1,\n print_stats: int = 0,\n use_filters: bool = False,\n terminate_last: bool = False,\n use_overlaps: bool = False,\n use_cui_doc_limit: bool = False,\n test_size: int = 0,\n devalue_others: bool = False,\n use_groups: bool = False,\n never_terminate: bool = False,\n train_from_false_positives: bool = False,\n extra_cui_filter: Optional[Set] = None,\n checkpoint: Optional[Checkpoint] = None,\n is_resumed: bool = False) -> Tuple:\n r\"\"\" TODO: Refactor, left from old\n Run supervised training on a dataset from MedCATtrainer. Please take care that this is more a simulated\n online training then supervised.\n\n Args:\n data_path (str):\n The path to the json file that we get from MedCATtrainer on export.\n reset_cui_count (boolean):\n Used for training with weight_decay (annealing). Each concept has a count that is there\n from the beginning of the CDB, that count is used for annealing. Resetting the count will\n significantly increase the training impact. This will reset the count only for concepts\n that exist in the the training data.\n nepochs (int):\n Number of epochs for which to run the training.\n print_stats (int):\n If > 0 it will print stats every print_stats epochs.\n use_filters (boolean):\n Each project in medcattrainer can have filters, do we want to respect those filters\n when calculating metrics.\n terminate_last (boolean):\n If true, concept termination will be done after all training.\n use_overlaps (boolean):\n Allow overlapping entities, nearly always False as it is very difficult to annotate overlapping entities.\n use_cui_doc_limit (boolean):\n If True the metrics for a CUI will be only calculated if that CUI appears in a document, in other words\n if the document was annotated for that CUI. Useful in very specific situations when during the annotation\n process the set of CUIs changed.\n test_size (float):\n If > 0 the data set will be split into train test based on this ration. Should be between 0 and 1.\n Usually 0.1 is fine.\n devalue_others(bool):\n Check add_name for more details.\n use_groups (boolean):\n If True concepts that have groups will be combined and stats will be reported on groups.\n never_terminate (boolean):\n If True no termination will be applied\n train_from_false_positives (boolean):\n If True it will use false positive examples detected by medcat and train from them as negative examples.\n extra_cui_filter(Optional[Set]):\n This filter will be intersected with all other filters, or if all others are not set then only this one will be used.\n checkpoint (Optional[Optional[medcat.utils.checkpoint.CheckpointST]):\n The MedCAT CheckpointST object\n is_resumed (bool):\n If True resume the previous training; If False, start a fresh new training.\n Returns:\n fp (dict):\n False positives for each CUI\n fn (dict):\n False negatives for each CUI\n tp (dict):\n True positives for each CUI\n p (dict):\n Precision for each CUI\n r (dict):\n Recall for each CUI\n f1 (dict):\n F1 for each CUI\n cui_counts (dict):\n Number of occurrence for each CUI\n examples (dict):\n FP/FN examples of sentences for each CUI\n \"\"\"\n checkpoint = self._init_ckpts(is_resumed, checkpoint)\n\n # Backup filters\n _filters = deepcopy(self.config.linking['filters'])\n filters = self.config.linking['filters']\n\n fp = fn = tp = p = r = f1 = examples = {}\n with open(data_path) as f:\n data = json.load(f)\n cui_counts = {}\n\n if test_size == 0:\n self.log.info(\"Running without a test set, or train==test\")\n test_set = data\n train_set = data\n else:\n train_set, test_set, _, _ = make_mc_train_test(data, self.cdb, test_size=test_size)\n\n if print_stats > 0:\n fp, fn, tp, p, r, f1, cui_counts, examples = self._print_stats(test_set,\n use_project_filters=use_filters,\n use_cui_doc_limit=use_cui_doc_limit,\n use_overlaps=use_overlaps,\n use_groups=use_groups,\n extra_cui_filter=extra_cui_filter)\n if reset_cui_count:\n # Get all CUIs\n cuis = []\n for project in train_set['projects']:\n for doc in project['documents']:\n doc_annotations = self._get_doc_annotations(doc)\n for ann in doc_annotations:\n cuis.append(ann['cui'])\n for cui in set(cuis):\n if cui in self.cdb.cui2count_train:\n self.cdb.cui2count_train[cui] = 100\n\n # Remove entities that were terminated\n if not never_terminate:\n for project in train_set['projects']:\n for doc in project['documents']:\n doc_annotations = self._get_doc_annotations(doc)\n for ann in doc_annotations:\n if ann.get('killed', False):\n self.unlink_concept_name(ann['cui'], ann['value'])\n\n latest_trained_step = checkpoint.count if checkpoint is not None else 0\n current_epoch, current_project, current_document = self._get_training_start(train_set, latest_trained_step)\n\n for epoch in trange(current_epoch, nepochs, initial=current_epoch, total=nepochs, desc='Epoch', leave=False):\n # Print acc before training\n for idx_project in trange(current_project, len(train_set['projects']), initial=current_project, total=len(train_set['projects']), desc='Project', leave=False):\n project = train_set['projects'][idx_project]\n\n # Set filters in case we are using the train_from_fp\n filters['cuis'] = set()\n if isinstance(extra_cui_filter, set):\n filters['cuis'] = extra_cui_filter\n\n if use_filters:\n project_filter = get_project_filters(cuis=project.get('cuis', None),\n type_ids=project.get('tuis', None),\n cdb=self.cdb,\n project=project)\n\n if project_filter:\n filters['cuis'] = intersect_nonempty_set(project_filter, filters['cuis'])\n\n for idx_doc in trange(current_document, len(project['documents']), initial=current_document, total=len(project['documents']), desc='Document', leave=False):\n doc = project['documents'][idx_doc]\n spacy_doc: Doc = self(doc['text'])\n\n # Compatibility with old output where annotations are a list\n doc_annotations = self._get_doc_annotations(doc)\n for ann in doc_annotations:\n if not ann.get('killed', False):\n cui = ann['cui']\n start = ann['start']\n end = ann['end']\n spacy_entity = tkns_from_doc(spacy_doc=spacy_doc, start=start, end=end)\n deleted = ann.get('deleted', False)\n self.add_and_train_concept(cui=cui,\n name=ann['value'],\n spacy_doc=spacy_doc,\n spacy_entity=spacy_entity,\n negative=deleted,\n devalue_others=devalue_others)\n if train_from_false_positives:\n fps: List[Span] = get_false_positives(doc, spacy_doc)\n\n for fp in fps:\n fp_: Span = fp\n self.add_and_train_concept(cui=fp_._.cui,\n name=fp_.text,\n spacy_doc=spacy_doc,\n spacy_entity=fp_,\n negative=True,\n do_add_concept=False)\n\n latest_trained_step += 1\n if checkpoint is not None and checkpoint.steps is not None and latest_trained_step % checkpoint.steps == 0:\n checkpoint.save(self.cdb, latest_trained_step)\n\n if terminate_last and not never_terminate:\n # Remove entities that were terminated, but after all training is done\n for project in train_set['projects']:\n for doc in project['documents']:\n doc_annotations = self._get_doc_annotations(doc)\n for ann in doc_annotations:\n if ann.get('killed', False):\n self.unlink_concept_name(ann['cui'], ann['value'])\n\n if print_stats > 0 and (epoch + 1) % print_stats == 0:\n fp, fn, tp, p, r, f1, cui_counts, examples = self._print_stats(test_set,\n epoch=epoch + 1,\n use_project_filters=use_filters,\n use_cui_doc_limit=use_cui_doc_limit,\n use_overlaps=use_overlaps,\n use_groups=use_groups,\n extra_cui_filter=extra_cui_filter)\n\n # Set the filters again\n self.config.linking['filters'] = _filters\n\n return fp, fn, tp, p, r, f1, cui_counts, examples\n\n def get_entities(self,\n text: str,\n only_cui: bool = False,\n addl_info: List[str] = ['cui2icd10', 'cui2ontologies', 'cui2snomed']) -> Dict:\n doc = self(text)\n out = self._doc_to_out(doc, only_cui, addl_info)\n return out\n\n def get_entities_multi_texts(self,\n texts: Union[Iterable[str], Iterable[Tuple]],\n only_cui: bool = False,\n addl_info: List[str] = ['cui2icd10', 'cui2ontologies', 'cui2snomed'],\n n_process: Optional[int] = None,\n batch_size: Optional[int] = None) -> List[Dict]:\n r\"\"\" Get entities\n text: text to be annotated\n return: entities\n \"\"\"\n out: List[Dict] = []\n\n if n_process is None:\n texts_ = self._generate_trimmed_texts(texts)\n for text in texts_:\n out.append(self._doc_to_out(self(text), only_cui, addl_info))\n else:\n self.pipe.set_error_handler(self._pipe_error_handler)\n try:\n texts_ = self._get_trimmed_texts(texts)\n docs = self.pipe.batch_multi_process(texts_, n_process, batch_size)\n\n for doc in tqdm(docs, total=len(texts_)):\n doc = None if doc.text.strip() == '' else doc\n out.append(self._doc_to_out(doc, only_cui, addl_info, out_with_text=True))\n\n # Currently spaCy cannot mark which pieces of texts failed within the pipe so be this workaround,\n # which also assumes texts are different from each others.\n if len(out) < len(texts_):\n self.log.warning(\"Found at least one failed batch and set output for enclosed texts to empty\")\n for i, text in enumerate(texts_):\n if i == len(out):\n out.append(self._doc_to_out(None, only_cui, addl_info))\n elif out[i].get('text', '') != text:\n out.insert(i, self._doc_to_out(None, only_cui, addl_info))\n\n cnf_annotation_output = getattr(self.config, 'annotation_output', {})\n if not(cnf_annotation_output.get('include_text_in_output', False)):\n for o in out:\n if o is not None:\n o.pop('text', None)\n finally:\n self.pipe.reset_error_handler()\n\n return out\n\n def get_json(self, text: str, only_cui: bool = False, addl_info=['cui2icd10', 'cui2ontologies']) -> str:\n \"\"\" Get output in json format\n\n text: text to be annotated\n return: json with fields {'entities': <>, 'text': text}\n \"\"\"\n ents = self.get_entities(text, only_cui, addl_info=addl_info)['entities']\n out = {'annotations': ents, 'text': text}\n\n return json.dumps(out)\n\n @staticmethod\n def _get_training_start(train_set, latest_trained_step):\n total_steps_per_epoch = sum([1 for project in train_set['projects'] for _ in project['documents']])\n if total_steps_per_epoch == 0:\n raise ValueError(\"MedCATtrainer export contains no documents\")\n current_epoch, last_step_in_epoch = divmod(latest_trained_step, total_steps_per_epoch)\n document_count = 0\n current_project = 0\n current_document = 0\n for idx_project, project in enumerate(train_set['projects']):\n for idx_doc, _ in enumerate(project['documents']):\n document_count += 1\n if document_count == last_step_in_epoch:\n current_project = idx_project\n current_document = idx_doc\n break\n if current_project > 0:\n break\n current_document = 0\n return current_epoch, current_project, current_document\n\n def _separate_nn_components(self):\n # Loop though the models and check are there GPU devices\n nn_components = []\n for component in self.pipe.spacy_nlp.components:\n if isinstance(component[1], MetaCAT) or isinstance(component[1], TransformersNER):\n self.pipe.spacy_nlp.disable_pipe(component[0])\n nn_components.append(component)\n\n return nn_components\n\n def _run_nn_components(self, docs: Dict, nn_components: List, id2text: Dict) -> None:\n r\"\"\" This will add meta_anns in-place to the docs dict.\n \"\"\"\n self.log.debug(\"Running GPU components separately\")\n\n # First convert the docs into the fake spacy doc format\n spacy_docs = json_to_fake_spacy(docs, id2text=id2text)\n # Disable component locks also\n for name, component in nn_components:\n component.config.general['disable_component_lock'] = True\n\n # For meta_cat compoments \n for name, component in [c for c in nn_components if isinstance(c[1], MetaCAT)]:\n spacy_docs = component.pipe(spacy_docs)\n for spacy_doc in spacy_docs:\n for ent in spacy_doc.ents:\n docs[spacy_doc.id]['entities'][ent._.id]['meta_anns'].update(ent._.meta_anns)\n\n def _batch_generator(self, data: Iterable, batch_size_chars: int, skip_ids: Set = set()):\n docs = []\n char_count = 0\n for doc in data:\n if doc[0] not in skip_ids:\n char_count += len(str(doc[1]))\n docs.append(doc)\n if char_count < batch_size_chars:\n continue\n yield docs\n docs = []\n char_count = 0\n\n if len(docs) > 0:\n yield docs\n\n def _save_docs_to_file(self, docs: Iterable, annotated_ids: List[str], save_dir_path: str, annotated_ids_path: Optional[str], part_counter: int = 0) -> int:\n path = os.path.join(save_dir_path, 'part_{}.pickle'.format(part_counter))\n pickle.dump(docs, open(path, \"wb\"))\n self.log.info(\"Saved part: %s, to: %s\", part_counter, path)\n part_counter = part_counter + 1 # Increase for save, as it should be what is the next part\n if annotated_ids_path is not None:\n pickle.dump((annotated_ids, part_counter), open(annotated_ids_path, 'wb'))\n return part_counter\n\n def multiprocessing(self,\n data: Union[List[Tuple], Iterable[Tuple]],\n nproc: int = 2,\n batch_size_chars: int = 5000 * 1000,\n only_cui: bool = False,\n addl_info: List[str] = [],\n separate_nn_components: bool = True,\n out_split_size_chars: Optional[int] = None,\n save_dir_path: str = os.path.abspath(os.getcwd()),\n min_free_memory=0.1) -> Dict:\n r\"\"\" Run multiprocessing for inference, if out_save_path and out_split_size_chars is used this will also continue annotating\n documents if something is saved in that directory.\n\n Args:\n data:\n Iterator or array with format: [(id, text), (id, text), ...]\n nproc (`int`, defaults to 8):\n Number of processors\n batch_size_chars (`int`, defaults to 1000000):\n Size of a batch in number of characters, this should be around: NPROC * average_document_length * 200\n separate_nn_components (`bool`, defaults to True):\n If set the medcat pipe will be broken up into NN and not-NN components and\n they will be run sequentially. This is useful as the NN components\n have batching and like to process many docs at once, while the rest of the pipeline\n runs the documents one by one.\n out_split_size_chars (`int`, None):\n If set once more than out_split_size_chars are annotated\n they will be saved to a file (save_dir_path) and the memory cleared. Recommended\n value is 20*batch_size_chars.\n save_dir_path(`str`, defaults to the current working directory):\n Where to save the annotated documents if splitting.\n min_free_memory(`float`, defaults to 0.1):\n If set a process will not start unless there is at least this much RAM memory left,\n should be a range between [0, 1] meaning how much of the memory has to be free. Helps when annotating\n very large datasets because spacy is not the best with memory management and multiprocessing.\n\n Returns:\n A dictionary: {id: doc_json, id2: doc_json2, ...}, in case out_split_size_chars is used\n the last batch will be returned while that and all previous batches will be\n written to disk (out_save_dir).\n \"\"\"\n for comp in self.pipe.spacy_nlp.components:\n if isinstance(comp[1], TransformersNER):\n raise Exception(\"Please do not use multiprocessing when running a transformer model for NER, run sequentially.\")\n\n # Set max document length\n self.pipe.spacy_nlp.max_length = self.config.preprocessing.get('max_document_length', 1000000)\n\n if self._meta_cats and not separate_nn_components:\n # Hack for torch using multithreading, which is not good if not \n #separate_nn_components, need for CPU runs only\n import torch\n torch.set_num_threads(1)\n\n nn_components = []\n if separate_nn_components:\n nn_components = self._separate_nn_components()\n\n if save_dir_path is not None:\n os.makedirs(save_dir_path, exist_ok=True)\n\n # \"5\" looks like a magic number here so better with comment about why the choice was made.\n internal_batch_size_chars = batch_size_chars // (5 * nproc)\n\n annotated_ids_path = os.path.join(save_dir_path, 'annotated_ids.pickle') if save_dir_path is not None else None\n if annotated_ids_path is not None and os.path.exists(annotated_ids_path):\n annotated_ids, part_counter = pickle.load(open(annotated_ids_path, 'rb'))\n else:\n annotated_ids = []\n part_counter = 0\n\n docs = {}\n _start_time = time.time()\n _batch_counter = 0 # Used for splitting the output, counts batches inbetween saves\n for batch in self._batch_generator(data, batch_size_chars, skip_ids=set(annotated_ids)):\n self.log.info(\"Annotated until now: %s docs; Current BS: %s docs; Elapsed time: %.2f minutes\",\n len(annotated_ids),\n len(batch),\n (time.time() - _start_time)/60)\n try:\n _docs = self._multiprocessing_batch(data=batch,\n nproc=nproc,\n only_cui=only_cui,\n batch_size_chars=internal_batch_size_chars,\n addl_info=addl_info,\n nn_components=nn_components,\n min_free_memory=min_free_memory)\n docs.update(_docs)\n annotated_ids.extend(_docs.keys())\n _batch_counter += 1\n del _docs\n if out_split_size_chars is not None and (_batch_counter * batch_size_chars) > out_split_size_chars:\n # Save to file and reset the docs \n part_counter = self._save_docs_to_file(docs=docs,\n annotated_ids=annotated_ids,\n save_dir_path=save_dir_path,\n annotated_ids_path=annotated_ids_path,\n part_counter=part_counter)\n del docs\n docs = {}\n _batch_counter = 0\n except Exception as e:\n self.log.warning(\"Failed an outer batch in the multiprocessing script\")\n self.log.warning(e, exc_info=True, stack_info=True)\n\n # Save the last batch\n if out_split_size_chars is not None and len(docs) > 0:\n # Save to file and reset the docs \n self._save_docs_to_file(docs=docs,\n annotated_ids=annotated_ids,\n save_dir_path=save_dir_path,\n annotated_ids_path=annotated_ids_path,\n part_counter=part_counter)\n\n # Enable the GPU Components again\n if separate_nn_components:\n for name, _ in nn_components:\n # No need to do anything else as it was already in the pipe\n self.pipe.spacy_nlp.enable_pipe(name)\n\n return docs\n\n def _multiprocessing_batch(self,\n data: Union[List[Tuple], Iterable[Tuple]],\n nproc: int = 8,\n batch_size_chars: int = 1000000,\n only_cui: bool = False,\n addl_info: List[str] = [],\n nn_components: List = [],\n min_free_memory: int = 0) -> Dict:\n r\"\"\" Run multiprocessing on one batch\n\n Args:\n data:\n Iterator or array with format: [(id, text), (id, text), ...]\n nproc (`int`, defaults to 8):\n Number of processors\n batch_size_chars (`int`, defaults to 1000000):\n Size of a batch in number of characters\n\n Returns:\n A dictionary: {id: doc_json, id2: doc_json2, ...}\n \"\"\"\n # Create the input output for MP\n with Manager() as manager:\n out_list = manager.list()\n lock = manager.Lock()\n in_q = manager.Queue(maxsize=10*nproc)\n\n id2text = {}\n for batch in self._batch_generator(data, batch_size_chars):\n if nn_components:\n # We need this for the json_to_fake_spacy\n id2text.update({k:v for k,v in batch})\n in_q.put(batch)\n\n # Final data point for workers\n for _ in range(nproc):\n in_q.put(None)\n sleep(2)\n\n # Create processes\n procs = []\n for i in range(nproc):\n p = Process(target=self._mp_cons,\n kwargs={'in_q': in_q,\n 'out_list': out_list,\n 'pid': i,\n 'only_cui': only_cui,\n 'addl_info': addl_info,\n 'min_free_memory': min_free_memory,\n 'lock': lock})\n p.start()\n procs.append(p)\n\n # Join processes\n for p in procs:\n p.join()\n\n docs = {}\n # Covnerts a touple into a dict\n docs.update({k:v for k,v in out_list})\n\n # If we have separate GPU components now we pipe that\n if nn_components:\n try:\n self._run_nn_components(docs, nn_components, id2text=id2text)\n except Exception as e:\n self.log.warning(e, exc_info=True, stack_info=True)\n\n return docs\n\n def multiprocessing_pipe(self,\n in_data: Union[List[Tuple], Iterable[Tuple]],\n nproc: Optional[int] = None,\n batch_size: Optional[int] = None,\n only_cui: bool = False,\n addl_info: List[str] = [],\n return_dict: bool = True,\n batch_factor: int = 2) -> Union[List[Tuple], Dict]:\n r\"\"\" Run multiprocessing NOT FOR TRAINING\n\n in_data: a list with format: [(id, text), (id, text), ...]\n nproc: the number of processors\n batch_size: the number of texts to buffer\n return_dict: a flag for returning either a dict or a list of tuples\n\n return: a dict: {id: doc_json, id: doc_json, ...} or if return_dict is False, a list of tuples: [(id, doc_json), (id, doc_json), ...]\n \"\"\"\n out: Union[Dict, List[Tuple]]\n\n if nproc == 0:\n raise ValueError(\"nproc cannot be set to zero\")\n\n in_data = list(in_data) if isinstance(in_data, Iterable) else in_data\n n_process = nproc if nproc is not None else min(max(cpu_count() - 1, 1), math.ceil(len(in_data) / batch_factor))\n batch_size = batch_size if batch_size is not None else math.ceil(len(in_data) / (batch_factor * abs(n_process)))\n\n start_method = None\n try:\n if self._meta_cats:\n import torch\n if torch.multiprocessing.get_start_method() != \"spawn\":\n start_method = torch.multiprocessing.get_start_method()\n torch.multiprocessing.set_start_method(\"spawn\", force=True)\n\n entities = self.get_entities_multi_texts(texts=in_data, only_cui=only_cui, addl_info=addl_info,\n n_process=n_process, batch_size=batch_size)\n finally:\n if start_method is not None:\n import torch\n torch.multiprocessing.set_start_method(start_method, force=True)\n\n if return_dict:\n out = {}\n for idx, data in enumerate(in_data):\n out[data[0]] = entities[idx]\n else:\n out = []\n for idx, data in enumerate(in_data):\n out.append((data[0], entities[idx]))\n\n return out\n\n def _mp_cons(self, in_q: Queue, out_list: List, min_free_memory: int, lock: Lock, pid: int = 0, only_cui: bool = False, addl_info: List = []) -> None:\n out: List = []\n\n while True:\n if not in_q.empty():\n if psutil.virtual_memory().available / psutil.virtual_memory().total < min_free_memory:\n with lock:\n out_list.extend(out)\n # Stop a process if there is not enough memory left\n break\n\n data = in_q.get()\n if data is None:\n with lock:\n out_list.extend(out)\n break\n\n for i_text, text in data:\n try:\n # Annotate document\n doc = self.get_entities(text=text, only_cui=only_cui, addl_info=addl_info)\n out.append((i_text, doc))\n except Exception as e:\n self.log.warning(\"PID: %s failed one document in _mp_cons, running will continue normally. \\n\" +\n \"Document length in chars: %s, and ID: %s\", pid, len(str(text)), i_text)\n self.log.warning(str(e))\n sleep(2)\n\n def _doc_to_out(self,\n doc: Doc,\n only_cui: bool,\n addl_info: List[str],\n out_with_text: bool = False) -> Dict:\n out: Dict = {'entities': {}, 'tokens': []}\n cnf_annotation_output = getattr(self.config, 'annotation_output', {})\n if doc is not None:\n out_ent: Dict = {}\n if self.config.general.get('show_nested_entities', False):\n _ents = []\n for _ent in doc._.ents:\n entity = Span(doc, _ent['start'], _ent['end'], label=_ent['label'])\n entity._.cui = _ent['cui']\n entity._.detected_name = _ent['detected_name']\n entity._.context_similarity = _ent['context_similarity']\n entity._.id = _ent['id']\n if 'meta_anns' in _ent:\n entity._.meta_anns = _ent['meta_anns']\n _ents.append(entity)\n else:\n _ents = doc.ents\n\n if cnf_annotation_output.get(\"lowercase_context\", True):\n doc_tokens = [tkn.text_with_ws.lower() for tkn in list(doc)]\n else:\n doc_tokens = [tkn.text_with_ws for tkn in list(doc)]\n\n if cnf_annotation_output.get('doc_extended_info', False):\n # Add tokens if extended info\n out['tokens'] = doc_tokens\n\n context_left = cnf_annotation_output.get('context_left', -1)\n context_right = cnf_annotation_output.get('context_right', -1)\n doc_extended_info = cnf_annotation_output.get('doc_extended_info', False)\n\n for _, ent in enumerate(_ents):\n cui = str(ent._.cui)\n if not only_cui:\n out_ent['pretty_name'] = self.cdb.get_name(cui)\n out_ent['cui'] = cui\n out_ent['type_ids'] = list(self.cdb.cui2type_ids.get(cui, ''))\n out_ent['types'] = [self.cdb.addl_info['type_id2name'].get(tui, '') for tui in out_ent['type_ids']]\n out_ent['source_value'] = ent.text\n out_ent['detected_name'] = str(ent._.detected_name)\n out_ent['acc'] = float(ent._.context_similarity)\n out_ent['context_similarity'] = float(ent._.context_similarity)\n out_ent['start'] = ent.start_char\n out_ent['end'] = ent.end_char\n for addl in addl_info:\n tmp = self.cdb.addl_info.get(addl, {}).get(cui, [])\n out_ent[addl.split(\"2\")[-1]] = list(tmp) if type(tmp) == set else tmp\n out_ent['id'] = ent._.id\n out_ent['meta_anns'] = {}\n\n if doc_extended_info:\n out_ent['start_tkn'] = ent.start\n out_ent['end_tkn'] = ent.end\n\n if context_left > 0 and context_right > 0:\n out_ent['context_left'] = doc_tokens[max(ent.start - context_left, 0):ent.start]\n out_ent['context_right'] = doc_tokens[ent.end:min(ent.end + context_right, len(doc_tokens))]\n out_ent['context_center'] = doc_tokens[ent.start:ent.end]\n\n if hasattr(ent._, 'meta_anns') and ent._.meta_anns:\n out_ent['meta_anns'] = ent._.meta_anns\n\n out['entities'][out_ent['id']] = dict(out_ent)\n else:\n out['entities'][ent._.id] = cui\n\n if cnf_annotation_output.get('include_text_in_output', False) or out_with_text:\n out['text'] = doc.text\n return out\n\n def _get_trimmed_text(self, text: Optional[str]) -> str:\n return text[0:self.config.preprocessing.get('max_document_length')] if text is not None and len(text) > 0 else \"\"\n\n def _generate_trimmed_texts(self, texts: Union[Iterable[str], Iterable[Tuple]]) -> Iterable[str]:\n text_: str\n for text in texts:\n text_ = text[1] if isinstance(text, tuple) else text\n yield self._get_trimmed_text(text_)\n\n def _get_trimmed_texts(self, texts: Union[Iterable[str], Iterable[Tuple]]) -> List[str]:\n trimmed: List = []\n text_: str\n for text in texts:\n text_ = text[1] if isinstance(text, tuple) else text\n trimmed.append(self._get_trimmed_text(text_))\n return trimmed\n\n @staticmethod\n def _pipe_error_handler(proc_name: str, proc: \"Pipe\", docs: List[Doc], e: Exception) -> None:\n CAT.log.warning(\"Exception raised when applying component %s to a batch of docs.\", proc_name)\n CAT.log.warning(e, exc_info=True, stack_info=True)\n if docs is not None:\n CAT.log.warning(\"Docs contained in the batch:\")\n for doc in docs:\n if hasattr(doc, \"text\"):\n CAT.log.warning(\"%s...\", doc.text[:50])\n\n @staticmethod\n def _get_doc_annotations(doc: Doc):\n if type(doc['annotations']) == list:\n return doc['annotations']\n if type(doc['annotations']) == dict:\n return doc['annotations'].values()\n return None\n\n def destroy_pipe(self):\n self.pipe.destroy()\n" ]
[ [ "torch.multiprocessing.set_start_method", "torch.multiprocessing.get_start_method", "torch.set_num_threads" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
akrouriad/rlberry
[ "dde4e2cbafca05fdef1df07646bb6368059eeadf" ]
[ "rlberry/utils/torch.py" ]
[ "import os\nimport re\nimport shutil\nfrom subprocess import check_output, run, PIPE\nimport numpy as np\nimport torch\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_gpu_memory_map():\n result = check_output(\n [\"nvidia-smi\", \"--query-gpu=memory.used\", \"--format=csv,nounits,noheader\"]\n )\n return [int(x) for x in result.split()]\n\n\ndef least_used_device():\n \"\"\"Get the GPU device with most available memory.\"\"\"\n if not torch.cuda.is_available():\n raise RuntimeError(\"cuda unavailable\")\n\n if shutil.which(\"nvidia-smi\") is None:\n raise RuntimeError(\n \"nvidia-smi unavailable: \\\ncannot select device with most least memory used.\"\n )\n\n memory_map = get_gpu_memory_map()\n device_id = np.argmin(memory_map)\n logger.info(\n f\"Choosing GPU device: {device_id}, \" f\"memory used: {memory_map[device_id]}\"\n )\n return torch.device(\"cuda:{}\".format(device_id))\n\n\ndef choose_device(preferred_device, default_device=\"cpu\"):\n if preferred_device == \"cuda:best\":\n try:\n preferred_device = least_used_device()\n except RuntimeError:\n logger.info(\n f\"Could not find least used device (nvidia-smi might be missing), use cuda:0 instead\"\n )\n if torch.cuda.is_available():\n return choose_device(\"cuda:0\")\n else:\n return choose_device(\"cpu\")\n try:\n torch.zeros((1,), device=preferred_device) # Test availability\n except (RuntimeError, AssertionError) as e:\n logger.info(\n f\"Preferred device {preferred_device} unavailable ({e}).\"\n f\"Switching to default {default_device}\"\n )\n return default_device\n return preferred_device\n\n\ndef get_memory(pid=None):\n if not pid:\n pid = os.getpid()\n command = \"nvidia-smi\"\n result = run(\n command, stdout=PIPE, stderr=PIPE, universal_newlines=True, shell=True\n ).stdout\n m = re.findall(\n \"\\| *[0-9] *\" + str(pid) + \" *C *.*python.*? +([0-9]+).*\\|\",\n result,\n re.MULTILINE,\n )\n return [int(mem) for mem in m]\n" ]
[ [ "numpy.argmin", "torch.cuda.is_available", "torch.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jtiscione/doodlecritic
[ "3af8245330523109b7452d3afc7d8d25d43d182c" ]
[ "train.py" ]
[ "import sys\nimport os\nfrom os.path import expanduser\nimport pickle\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.utils.data\nimport torch.onnx\nimport re\nimport json\nfrom PIL import Image, ImageDraw\nimport torch\nimport numpy as np\n\n# Training script- trains a Pytorch model against the Google Quickdraw dataset:\n# https://github.com/googlecreativelab/quickdraw-dataset\n#\n# Specifically, it uses the \"simplified Drawing files\":\n#\n# https://console.cloud.google.com/storage/browser/quickdraw_dataset/full/simplified\n#\n# Also see https://www.kaggle.com/google/tinyquickdraw for a single downloadable tar file\n# with about 50 million samples separated into 343 classes, which is where I got mine.\n#\n# It expects those files to be in ~/data/quickdraw. Specify any alternate path on the command line.\n# \n# As output it generates two files: doodles.pth (internal format) and doodles.onnx (ONNX export format).\n#\n# The model used here is a convolutional neural network accepting 1x64x64 inputs\n# (i.e. black-and-white 64x64 images). Output is 344 neurons (i.e. one per label) with an extra neuron\n# corresponding to label \"nothing\".\n# \n# NOTES:\n# \n# If doodles.pth is found (typically saved from a previous run), it will be loaded into the\n# current model; otherwise it will start with a set of random weights. File size is approx. 300 MB.\n# \n# If it finds at any point during training that the output files doodles.pth or doodles.onnx\n# are not on the drive, it will write new copies immediately with its current state (even though\n# this means the first versions will only contain random weights). Deleting the files\n# generates fresh copies, and so does finishing a training epoch (overwriting the prior versions).\n# Because the data set is so immense, each epoch takes several hours to complete.\n# In practice, with this model, performance levels off after about 3-4 epochs, with the network\n# agreeing with Google's classification about 73% of the time.\n# \n# This way, if you need to edit a hyperparameter or go to work, you can pause execution by\n# deleting the current doodles.pth and doodles.onnx files, letting it write new ones,\n# and then hitting Ctrl-C. Typically you will want to adjust the learning rate downward\n# or experiment with a different optimizer after the script has run for a few hours and\n# its performance has reached a plateau. After you make your edits the script will pick up\n# where it left off.\n#\n# If SAVE_BACKUP_FILES is set to True, the script will save backups as training progresses.\n# Each time performance reaches a new record, a file will be saved with a filename indicating the\n# new record number of correct responses. This is to avoid losing progress if the script crashes.\n# (Raising the batch size too high can cause spurious out-of-memory errors at random times.)\n\n\n# Specify data folder as command line argument; default is ~/data/quickdraw\nDATA_DIRECTORY = '~/data/quickdraw'\nif len(sys.argv) > 1:\n DATA_DIRECTORY = sys.argv[1]\nif DATA_DIRECTORY[0] == '~':\n DATA_DIRECTORY = expanduser(DATA_DIRECTORY)\n\n# Standard industry practice: Jack this number up as high as you can, then carefully lower it\n# until the script stops crashing. Final value is dependent on GPU memory.\n# This is a safe batch size to use on an RTX 2060 with 6 GB.\nBATCH_SIZE = 1000\n\n# Hyperparameters; both SGD and Adam work well, at least in the beginning; use SGD by default\nOPTIMIZER_NAME = 'SGD'\n\nSGD_LEARNING_RATE = 0.01\nSGD_MOMENTUM = 0\n\nADAM_LEARNING_RATE = 0.001\nADAM_BETAS = (0.9, 0.99)\nADAM_EPSILON = 0.0001\n\nINDEX_CACHE_FILE = './index_cache.pkl'\nLABELS_FILE = './labels.txt'\n\nSTATE_DICT_FILE = './doodles.pth'\nONNX_FILE = './doodles.onnx'\n\nSAVE_BACKUP_FILES = True\nNUMBERED_STATE_DICT_FILE_TEMPLATE = './doodles_{}_of_{}.pth'\nNUMBERED_ONNX_FILE_TEMPLATE = './doodles_{}_of_{}.onnx'\n\nDEVICE = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n# If it's installed, turn this on to enable NVidia's Apex AMP Pytorch extension.\n# This will let us do calculations in FP16 on the GPU which will save memory on the card\n# and let us raise the batch size. It will also leverage RTX tensor cores on RTX cards.\n# Default is set to False, because compiling and installing AMP is an involved process-\n# NVidia's CUDA Toolkit to be installed on your system before you can compile it using pip.\n\nMIXED_PRECISION = False\n\nif MIXED_PRECISION and torch.cuda.is_available():\n # See if the AMP Pytorch extension has been installed; otherwise stick to standard FP32.\n # If we are using mixed precision we can raise the batch size but keep it a multiple of 8.\n # All tensor dimensions must be multiples of 8 to trigger NVidia's tensor core optimizations.\n try:\n from apex import amp, optimizers\n MIXED_PRECISION = True\n BATCH_SIZE = int(BATCH_SIZE * 1.6) # Raising it by 60%\n print('Using mixed precision.')\n except ImportError:\n MIXED_PRECISION = False\n\n# This is a torch DataSet implementation that makes the following assumptions:\n#\n# 1. Data consists of a set of text files with \".ndjson\" extensions in the specified directory.\n# 2. Each line in the .ndjson file is a JSON string with all data for a single sample.\n# 3. Each line of JSON has the following format (omitting extraneous fields):\n# {\"word\":\"elephant\",\"drawing\":[[[0, 1, 10],[25, 103, 163]],[[4,15,134,234,250],[27,22,6,4,0]]]}\n# Array \"drawing\" has the brush strokes, each stroke a pair of arrays with x and y coordinates on a 256x256 grid.\n# 4. We can build our label list by only looking at the first line of each file. (All lines have same value for \"word\".)\nclass QuickDrawDataset(torch.utils.data.Dataset):\n\n # Take the batch size, so we know how much to pad with all-zero samples mapping to the \"blank\" channel.\n # This way we ensure we deliver full-sized batches interspersed with a few blank samples mapping to label \"nothing\".\n def __init__(self, dataDir, batch_size):\n super(QuickDrawDataset, self).__init__()\n print('Data folder: ' + dataDir)\n self.dataDir = dataDir\n self.filenames = list(filter(lambda x: x.endswith(\".ndjson\"), sorted(os.listdir(dataDir)))) #[1:20]\n self.filenameByIndex = []\n self.fileByteOffsetByIndex = []\n self.labelListIndices = {}\n self.labelList = []\n\n for filename in self.filenames:\n print('Indexing ' + filename)\n file = open(dataDir + \"/\" + filename, \"r\")\n byte_offset = 0\n word = None\n for line in file:\n if (word == None):\n words = re.findall('\\\"word\\\":\\\"([\\w\\s-]+)\\\"', line)\n word = words[0]\n self.labelListIndices[word] = len(self.labelList)\n self.labelList.append(word)\n # Only use the ones Google recognizes\n if (len(re.findall('\\\"recognized\\\":true', line)) > 0):\n self.filenameByIndex.append(filename)\n self.fileByteOffsetByIndex.append(byte_offset)\n byte_offset += len(line)\n file.close()\n\n self.labelListIndices['nothing'] = len(self.labelList)\n self.labelList.append('nothing')\n if MIXED_PRECISION:\n # NVidia really wants tensor dimensions to be multiples of 8, make sure here\n extra_nothings = 0\n while len(self.labelList) % 8 > 0:\n extra_nothings += 1\n self.labelListIndices['nothing_{}'.format(extra_nothings)] = len(self.labelList)\n self.labelList.append('nothing_{}'.format(extra_nothings))\n\n self.paddingLength = batch_size - (len(self.filenameByIndex) % batch_size)\n print('padding length {}'.format(self.paddingLength))\n\n def __len__(self):\n return len(self.filenameByIndex) + self.paddingLength\n\n def __getitem__(self, idx):\n if idx >= len(self.filenameByIndex):\n # NULL sample\n return torch.zeros(1, 64, 64, dtype=torch.float), self.labelListIndices['nothing']\n filename = self.filenameByIndex[idx]\n byte_offset = self.fileByteOffsetByIndex[idx]\n file = open(self.dataDir + '/' + filename, 'r')\n file.seek(byte_offset)\n line = file.readline()\n file.close()\n # Convert line containing brush stroke coordinate list to a 256x256 image tensor using PIL\n entry = json.loads(line)\n drawing = entry.get('drawing')\n im = Image.new(\"L\", (256, 256))\n draw = ImageDraw.Draw(im)\n for stroke in drawing:\n x_coords = stroke[0]\n y_coords = stroke[1]\n for i in range(len(x_coords) - 1):\n draw.line((x_coords[i], y_coords[i], x_coords[i + 1], y_coords[i + 1]), fill=255, width=5)\n im = im.resize((64, 64), Image.ANTIALIAS)\n word = entry.get('word')\n imageTensor = torch.tensor(np.array(im) / 256, dtype=torch.float)\n\n # Alter image slightly to look like the inputs we're eventually going to get from the client.\n # This is a limitation imposed by JavaScript which implements \"antialiasing\" on downsized canvases by\n # nearest-neighbor downsampling, smoothed onscreen by a WebGL filter that looks nice but doesn't alter the image data,\n # so we only get two-color jagged images.\n #\n # Tedious workarounds are possible: https://stackoverflow.com/questions/2303690/resizing-an-image-in-an-html5-canvas\n THRESHOLD = 0.1\n imageTensor[imageTensor >= THRESHOLD] = 1.0\n imageTensor[imageTensor < THRESHOLD] = 0.0\n\n imageTensor = imageTensor.unsqueeze(0)\n\n return imageTensor, self.labelListIndices.get(word)\n\n# Takes input of size Nx1x64x64, a batch of N black and white 64x64 images.\n# Applies two convolutional layers and three fully connected layers.\n\nclass CNNModel(nn.Module):\n\n # input_size is 64 (input samples are 64x64 images); num_classes is 344\n def __init__(self, input_size, num_classes):\n super(CNNModel, self).__init__()\n self.layer1 = nn.Sequential(\n nn.Conv2d(1, 32, kernel_size=5, stride=1, padding=2, bias=False),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=2, stride=2))\n self.layer2 = nn.Sequential(\n nn.Conv2d(32, 64, kernel_size=5, stride=1, padding=2, bias=False),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=2, stride=2))\n dimension = int(64 * pow(input_size / 4, 2))\n self.fc1 = nn.Sequential(nn.Linear(dimension, int(dimension / 4)), nn.Dropout(0.25))\n self.fc2 = nn.Sequential(nn.Linear(int(dimension / 4), int(dimension / 8)), nn.Dropout(0.25))\n self.fc3 = nn.Sequential(nn.Linear(int(dimension / 8), num_classes))\n\n def forward(self, x):\n out = self.layer1(x)\n out = self.layer2(out)\n out = out.view(out.size(0), -1)\n out = self.fc1(out)\n out = self.fc2(out)\n out = self.fc3(out)\n return out\n\n# Main part\nif __name__ == '__main__':\n\n if os.path.isfile(INDEX_CACHE_FILE):\n print(\"Loading {}\".format(INDEX_CACHE_FILE))\n infile = open(INDEX_CACHE_FILE, 'rb')\n dataSet = pickle.load(infile)\n infile.close()\n else:\n dataSet = QuickDrawDataset(DATA_DIRECTORY, BATCH_SIZE)\n outfile = open(INDEX_CACHE_FILE, 'wb')\n pickle.dump(dataSet, outfile)\n outfile.close()\n print(\"Saved {}\".format(INDEX_CACHE_FILE))\n\n if (os.path.isfile(LABELS_FILE) == False):\n with open(LABELS_FILE, 'w') as f:\n for label in dataSet.labelList:\n f.write(\"%s\\n\" % label)\n f.close()\n print(\"Saved {}\".format(LABELS_FILE))\n\n print('Total number of labels: {}'.format(len(dataSet.labelList)))\n print('Total number of samples: {}'.format(len(dataSet)))\n\n randomSampler = torch.utils.data.RandomSampler(dataSet)\n dataLoader = torch.utils.data.DataLoader(dataSet, batch_size = BATCH_SIZE, sampler = randomSampler, num_workers=4, pin_memory=True)\n\n model = CNNModel(input_size=64, num_classes=len(dataSet.labelList)).to(DEVICE)\n\n if (os.path.isfile(STATE_DICT_FILE)):\n # We found an existing doodles.pth file! Instead of starting from scratch we'll load this one.\n # and continue training it.\n print(\"Loading {}\".format(STATE_DICT_FILE))\n state_dict = torch.load(STATE_DICT_FILE)\n model.load_state_dict(state_dict)\n\n optimizer = None\n if (OPTIMIZER_NAME == 'SGD'):\n optimizer = optim.SGD(model.parameters(), lr = SGD_LEARNING_RATE, momentum=SGD_MOMENTUM)\n print('Using SGD with learning rate {} and momentum {}'.format(SGD_LEARNING_RATE, SGD_MOMENTUM))\n elif (OPTIMIZER_NAME == 'Adam'):\n if MIXED_PRECISION:\n optimizer = optim.Adam(model.parameters(), lr = ADAM_LEARNING_RATE, betas = ADAM_BETAS, eps = ADAM_EPSILON)\n else:\n optimizer = optim.Adam(model.parameters(), lr = ADAM_LEARNING_RATE)\n print('Using Adam with learning rate {}'.format(ADAM_LEARNING_RATE))\n else:\n print('No optimizer specified!')\n\n if MIXED_PRECISION:\n # Using NVidia's AMP Pytorch extension\n model, optimizer = amp.initialize(model, optimizer, opt_level=\"O1\")\n\n criterion = nn.CrossEntropyLoss()\n\n ROLLING_AVERAGE_RUN_LENGTH = 100\n rolling = np.zeros(0)\n record_rolling_average = 0\n count = 0\n\n # On my computer each epoch takes about 4 hours; the script consumes ~250 watts or about 1 kWh per epoch.\n # Performance reaches a plateau after 3-4 epochs.\n for epoch in range(4):\n print('Epoch: {}'.format(epoch))\n batch_number = 0\n for i, (images, labels) in enumerate(dataLoader):\n count = count + 1\n images = images.to(DEVICE)\n labels = labels.to(DEVICE)\n optimizer.zero_grad()\n outputs = model(images)\n _, predicted = torch.max(outputs.data, 1)\n correct = (predicted == labels).sum().item()\n if (count < ROLLING_AVERAGE_RUN_LENGTH):\n rolling = np.insert(rolling, 0, correct)\n else:\n rolling = np.roll(rolling, 1)\n rolling[0] = correct\n rolling_average = int(np.mean(rolling))\n loss = criterion(outputs, labels)\n if MIXED_PRECISION:\n # Use of FP16 requires loss scaling, due to underflow error.\n # See https://devblogs.nvidia.com/mixed-precision-training-deep-neural-networks/\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n optimizer.step()\n print('EPOCH: {} BATCH: {} SIZE: {} CORRECT: {} (ROLLING AVG: {})'.format(epoch, batch_number, BATCH_SIZE, correct, rolling_average))\n batch_number += 1\n # print(loss.item())\n\n # To be safe, save model whenever performance reaches a new high\n if (count < 2 * ROLLING_AVERAGE_RUN_LENGTH): # (once rolling average has had time to stabilize)\n record_rolling_average = max(rolling_average, record_rolling_average)\n else:\n if (rolling_average > record_rolling_average):\n # Save model with a munged filename; e.g. doodles_706.pth\n if (SAVE_BACKUP_FILES):\n backupPth = NUMBERED_STATE_DICT_FILE_TEMPLATE.format(rolling_average, BATCH_SIZE)\n torch.save(model.state_dict(), backupPth)\n print('Saved model file {}'.format(backupPth))\n # Delete the last backup .pth file we wrote to avoid filling up the drive\n if (record_rolling_average > 0):\n old_file = NUMBERED_STATE_DICT_FILE_TEMPLATE.format(record_rolling_average, BATCH_SIZE)\n if os.path.exists(old_file):\n os.remove(old_file)\n # Same for ONNX\n backupOnnx = NUMBERED_ONNX_FILE_TEMPLATE.format(rolling_average, BATCH_SIZE)\n if MIXED_PRECISION:\n with amp.disable_casts():\n dummy_input = torch.randn(1, 1, 64, 64).to(DEVICE)\n torch.onnx.export(model, dummy_input, backupOnnx, verbose=False)\n else:\n dummy_input = torch.randn(1, 1, 64, 64).to(DEVICE)\n torch.onnx.export(model, dummy_input, backupOnnx, verbose=False)\n print('Saved ONNX file {}'.format(backupOnnx))\n # Delete the last backup ONNX file we wrote to avoid filling up the drive\n if (record_rolling_average > 0):\n old_file = NUMBERED_ONNX_FILE_TEMPLATE.format(record_rolling_average, BATCH_SIZE)\n if os.path.exists(old_file):\n os.remove(old_file)\n record_rolling_average = rolling_average\n\n # Deleting the model file during training triggers a fresh rewrite:\n if (os.path.isfile(STATE_DICT_FILE) == False):\n torch.save(model.state_dict(), STATE_DICT_FILE)\n print('Saved model file {}'.format(STATE_DICT_FILE))\n # ONNX: same policy\n if (os.path.isfile(ONNX_FILE) == False):\n if MIXED_PRECISION:\n with amp.disable_casts():\n dummy_input = torch.randn(1, 1, 64, 64).to(DEVICE)\n torch.onnx.export(model, dummy_input, ONNX_FILE, verbose=False)\n else:\n dummy_input = torch.randn(1, 1, 64, 64).to(DEVICE)\n torch.onnx.export(model, dummy_input, ONNX_FILE, verbose=False)\n print('Exported ONNX file {}'.format(ONNX_FILE))\n # Epoch finished\n # Save the current model at the end of an epoch\n torch.save(model.state_dict(), STATE_DICT_FILE)\n # Export ONNX with loudmouth flag set\n if (MIXED_PRECISION):\n with amp.disable_casts():\n dummy_input = torch.randn(1, 1, 64, 64).to(DEVICE)\n torch.onnx.export(model, dummy_input, ONNX_FILE, verbose=True)\n else:\n dummy_input = torch.randn(1, 1, 64, 64).to(DEVICE)\n torch.onnx.export(model, dummy_input, ONNX_FILE, verbose=True)\n print('EPOCH {} FINISHED, SAVED {} AND {}'.format(epoch, STATE_DICT_FILE, ONNX_FILE))\n" ]
[ [ "torch.max", "torch.load", "torch.zeros", "torch.utils.data.DataLoader", "numpy.mean", "torch.cuda.is_available", "numpy.roll", "torch.nn.CrossEntropyLoss", "torch.nn.Dropout", "torch.onnx.export", "torch.randn", "numpy.insert", "numpy.zeros", "torch.nn.Conv2d", "torch.nn.BatchNorm2d", "numpy.array", "torch.utils.data.RandomSampler", "torch.nn.MaxPool2d", "torch.nn.ReLU" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
r-graves/demo_lab
[ "729cdf61774bf32d2c07ca68bf70e65470700cc2" ]
[ "venv/lib/python3.10/site-packages/pandas/tests/series/methods/test_drop_duplicates.py" ]
[ "import numpy as np\nimport pytest\n\nfrom pandas import (\n NA,\n Categorical,\n Series,\n)\nimport pandas._testing as tm\n\n\[email protected](\n \"keep, expected\",\n [\n (\"first\", Series([False, False, False, False, True, True, False])),\n (\"last\", Series([False, True, True, False, False, False, False])),\n (False, Series([False, True, True, False, True, True, False])),\n ],\n)\ndef test_drop_duplicates(any_numpy_dtype, keep, expected):\n tc = Series([1, 0, 3, 5, 3, 0, 4], dtype=np.dtype(any_numpy_dtype))\n\n if tc.dtype == \"bool\":\n pytest.skip(\"tested separately in test_drop_duplicates_bool\")\n\n tm.assert_series_equal(tc.duplicated(keep=keep), expected)\n tm.assert_series_equal(tc.drop_duplicates(keep=keep), tc[~expected])\n sc = tc.copy()\n return_value = sc.drop_duplicates(keep=keep, inplace=True)\n assert return_value is None\n tm.assert_series_equal(sc, tc[~expected])\n\n\[email protected](\n \"keep, expected\",\n [\n (\"first\", Series([False, False, True, True])),\n (\"last\", Series([True, True, False, False])),\n (False, Series([True, True, True, True])),\n ],\n)\ndef test_drop_duplicates_bool(keep, expected):\n tc = Series([True, False, True, False])\n\n tm.assert_series_equal(tc.duplicated(keep=keep), expected)\n tm.assert_series_equal(tc.drop_duplicates(keep=keep), tc[~expected])\n sc = tc.copy()\n return_value = sc.drop_duplicates(keep=keep, inplace=True)\n tm.assert_series_equal(sc, tc[~expected])\n assert return_value is None\n\n\[email protected](\"values\", [[], list(range(5))])\ndef test_drop_duplicates_no_duplicates(any_numpy_dtype, keep, values):\n tc = Series(values, dtype=np.dtype(any_numpy_dtype))\n expected = Series([False] * len(tc), dtype=\"bool\")\n\n if tc.dtype == \"bool\":\n # 0 -> False and 1-> True\n # any other value would be duplicated\n tc = tc[:2]\n expected = expected[:2]\n\n tm.assert_series_equal(tc.duplicated(keep=keep), expected)\n\n result_dropped = tc.drop_duplicates(keep=keep)\n tm.assert_series_equal(result_dropped, tc)\n\n # validate shallow copy\n assert result_dropped is not tc\n\n\nclass TestSeriesDropDuplicates:\n @pytest.fixture(\n params=[\"int_\", \"uint\", \"float_\", \"unicode_\", \"timedelta64[h]\", \"datetime64[D]\"]\n )\n def dtype(self, request):\n return request.param\n\n @pytest.fixture\n def cat_series1(self, dtype, ordered):\n # Test case 1\n cat_array = np.array([1, 2, 3, 4, 5], dtype=np.dtype(dtype))\n\n input1 = np.array([1, 2, 3, 3], dtype=np.dtype(dtype))\n cat = Categorical(input1, categories=cat_array, ordered=ordered)\n tc1 = Series(cat)\n return tc1\n\n def test_drop_duplicates_categorical_non_bool(self, cat_series1):\n tc1 = cat_series1\n\n expected = Series([False, False, False, True])\n\n result = tc1.duplicated()\n tm.assert_series_equal(result, expected)\n\n result = tc1.drop_duplicates()\n tm.assert_series_equal(result, tc1[~expected])\n\n sc = tc1.copy()\n return_value = sc.drop_duplicates(inplace=True)\n assert return_value is None\n tm.assert_series_equal(sc, tc1[~expected])\n\n def test_drop_duplicates_categorical_non_bool_keeplast(self, cat_series1):\n tc1 = cat_series1\n\n expected = Series([False, False, True, False])\n\n result = tc1.duplicated(keep=\"last\")\n tm.assert_series_equal(result, expected)\n\n result = tc1.drop_duplicates(keep=\"last\")\n tm.assert_series_equal(result, tc1[~expected])\n\n sc = tc1.copy()\n return_value = sc.drop_duplicates(keep=\"last\", inplace=True)\n assert return_value is None\n tm.assert_series_equal(sc, tc1[~expected])\n\n def test_drop_duplicates_categorical_non_bool_keepfalse(self, cat_series1):\n tc1 = cat_series1\n\n expected = Series([False, False, True, True])\n\n result = tc1.duplicated(keep=False)\n tm.assert_series_equal(result, expected)\n\n result = tc1.drop_duplicates(keep=False)\n tm.assert_series_equal(result, tc1[~expected])\n\n sc = tc1.copy()\n return_value = sc.drop_duplicates(keep=False, inplace=True)\n assert return_value is None\n tm.assert_series_equal(sc, tc1[~expected])\n\n @pytest.fixture\n def cat_series2(self, dtype, ordered):\n # Test case 2; TODO: better name\n cat_array = np.array([1, 2, 3, 4, 5], dtype=np.dtype(dtype))\n\n input2 = np.array([1, 2, 3, 5, 3, 2, 4], dtype=np.dtype(dtype))\n cat = Categorical(input2, categories=cat_array, ordered=ordered)\n tc2 = Series(cat)\n return tc2\n\n def test_drop_duplicates_categorical_non_bool2(self, cat_series2):\n # Test case 2; TODO: better name\n tc2 = cat_series2\n\n expected = Series([False, False, False, False, True, True, False])\n\n result = tc2.duplicated()\n tm.assert_series_equal(result, expected)\n\n result = tc2.drop_duplicates()\n tm.assert_series_equal(result, tc2[~expected])\n\n sc = tc2.copy()\n return_value = sc.drop_duplicates(inplace=True)\n assert return_value is None\n tm.assert_series_equal(sc, tc2[~expected])\n\n def test_drop_duplicates_categorical_non_bool2_keeplast(self, cat_series2):\n tc2 = cat_series2\n\n expected = Series([False, True, True, False, False, False, False])\n\n result = tc2.duplicated(keep=\"last\")\n tm.assert_series_equal(result, expected)\n\n result = tc2.drop_duplicates(keep=\"last\")\n tm.assert_series_equal(result, tc2[~expected])\n\n sc = tc2.copy()\n return_value = sc.drop_duplicates(keep=\"last\", inplace=True)\n assert return_value is None\n tm.assert_series_equal(sc, tc2[~expected])\n\n def test_drop_duplicates_categorical_non_bool2_keepfalse(self, cat_series2):\n tc2 = cat_series2\n\n expected = Series([False, True, True, False, True, True, False])\n\n result = tc2.duplicated(keep=False)\n tm.assert_series_equal(result, expected)\n\n result = tc2.drop_duplicates(keep=False)\n tm.assert_series_equal(result, tc2[~expected])\n\n sc = tc2.copy()\n return_value = sc.drop_duplicates(keep=False, inplace=True)\n assert return_value is None\n tm.assert_series_equal(sc, tc2[~expected])\n\n def test_drop_duplicates_categorical_bool(self, ordered):\n tc = Series(\n Categorical(\n [True, False, True, False], categories=[True, False], ordered=ordered\n )\n )\n\n expected = Series([False, False, True, True])\n tm.assert_series_equal(tc.duplicated(), expected)\n tm.assert_series_equal(tc.drop_duplicates(), tc[~expected])\n sc = tc.copy()\n return_value = sc.drop_duplicates(inplace=True)\n assert return_value is None\n tm.assert_series_equal(sc, tc[~expected])\n\n expected = Series([True, True, False, False])\n tm.assert_series_equal(tc.duplicated(keep=\"last\"), expected)\n tm.assert_series_equal(tc.drop_duplicates(keep=\"last\"), tc[~expected])\n sc = tc.copy()\n return_value = sc.drop_duplicates(keep=\"last\", inplace=True)\n assert return_value is None\n tm.assert_series_equal(sc, tc[~expected])\n\n expected = Series([True, True, True, True])\n tm.assert_series_equal(tc.duplicated(keep=False), expected)\n tm.assert_series_equal(tc.drop_duplicates(keep=False), tc[~expected])\n sc = tc.copy()\n return_value = sc.drop_duplicates(keep=False, inplace=True)\n assert return_value is None\n tm.assert_series_equal(sc, tc[~expected])\n\n def test_drop_duplicates_categorical_bool_na(self):\n # GH#44351\n ser = Series(\n Categorical(\n [True, False, True, False, NA], categories=[True, False], ordered=True\n )\n )\n result = ser.drop_duplicates()\n expected = Series(\n Categorical([True, False, np.nan], categories=[True, False], ordered=True),\n index=[0, 1, 4],\n )\n tm.assert_series_equal(result, expected)\n\n\ndef test_drop_duplicates_pos_args_deprecation():\n # GH#41485\n s = Series([\"a\", \"b\", \"c\", \"b\"])\n msg = (\n \"In a future version of pandas all arguments of \"\n \"Series.drop_duplicates will be keyword-only\"\n )\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = s.drop_duplicates(\"last\")\n expected = Series([\"a\", \"c\", \"b\"], index=[0, 2, 3])\n tm.assert_series_equal(expected, result)\n" ]
[ [ "pandas._testing.assert_produces_warning", "pandas.Series", "pandas.Categorical", "numpy.dtype", "pandas._testing.assert_series_equal" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Ivanfdezr/CentralSoftware
[ "8681fedd4814dc60deb527a370411350b40c994c", "8681fedd4814dc60deb527a370411350b40c994c" ]
[ "OneSpanAnalysis_Mdl.py", "MdlUtilities.py" ]
[ "import numpy as np\r\nimport numpy.linalg as la\r\nfrom MdlUtilities import Field, FieldList\r\nimport MdlUtilities as mdl\r\n\r\n\r\ndef get_osaCasing_fields():\r\n\r\n\t\r\n\tOD = Field(2030)\r\n\tID = Field(2031)\r\n\tWeight = Field(2032)\r\n\tDensity = Field(2039)\r\n\tE = Field(2040)\r\n\tosaCasing_fields = FieldList()\r\n\tosaCasing_fields.append( OD )\r\n\tosaCasing_fields.append( ID )\r\n\tosaCasing_fields.append( Weight )\r\n\tosaCasing_fields.append( Density )\r\n\tosaCasing_fields.append( E )\t\r\n\t\r\n\treturn osaCasing_fields\r\n\r\n\r\ndef get_osaCent_fields():\r\n\r\n\tType = Field(2049)\r\n\tIPOD = Field(2009)\r\n\tCentOD = Field(2011)\r\n\t#CentID = Field(2012)\r\n\tResF_SO67 = Field(2018)\r\n\tminResF = Field(2017)\r\n\tSO_minResF = Field(2019)\r\n\tResF_SO67.set_representation('Res. Force @ SO=67%')\r\n\tminResF.set_representation('minimum Res. Force')\r\n\tSO_minResF.set_representation('StandOff @ min. Res. F.')\r\n\tosaCent_fields = FieldList()\r\n\tosaCent_fields.append( Type )\r\n\tosaCent_fields.append( IPOD )\r\n\tosaCent_fields.append( CentOD )\r\n\t#osaCent_fields.append( CentID )\r\n\tosaCent_fields.append( ResF_SO67 )\r\n\tosaCent_fields.append( minResF )\r\n\tosaCent_fields.append( SO_minResF )\r\n\t\r\n\treturn osaCent_fields\r\n\r\n\r\ndef get_osaWellbore_fields():\r\n\r\n\tHoleID = Field(2010)\r\n\tMaxSpan = Field(2061)\r\n\tMudIPDensity = Field(2077)\r\n\tMudOPDensity = Field(2077)\r\n\tHoleID.set_representation('Hole ID')\r\n\tHoleID.set_abbreviation('HoleID')\r\n\tMaxSpan.set_representation('Max span')\r\n\tMaxSpan.set_abbreviation('MaxSpan')\r\n\tMudIPDensity.set_representation('Mud inside pipe')\r\n\tMudIPDensity.set_abbreviation('MudIPDensity')\r\n\tMudOPDensity.set_representation('Mud in annulus')\r\n\tMudOPDensity.set_abbreviation('MudOPDensity')\r\n\tosaWellbore_fields = FieldList()\r\n\tosaWellbore_fields.append( HoleID )\r\n\tosaWellbore_fields.append( MaxSpan )\r\n\tosaWellbore_fields.append( MudIPDensity )\r\n\tosaWellbore_fields.append( MudOPDensity )\r\n\t\r\n\treturn osaWellbore_fields\r\n\r\n\r\ndef get_osaOutputdata1_fields():\r\n\r\n\tclearanceA = Field(2073, altBg=True, altFg=True)\r\n\tclearanceB = Field(2073, altBg=True, altFg=True)\r\n\tclearanceM = Field(2073, altBg=True, altFg=True)\r\n\tsideForceA = Field(2074, altBg=True, altFg=True)\r\n\tsideForceB = Field(2074, altBg=True, altFg=True)\r\n\tsideForceM = Field(2074, altBg=True, altFg=True)\r\n\tstandoffA = Field(2078, altBg=True, altFg=True)\r\n\tstandoffB = Field(2078, altBg=True, altFg=True)\r\n\tstandoffM = Field(2078, altBg=True, altFg=True)\r\n\tclearanceA.set_representation('Annular clearance @ cent. A')\r\n\tclearanceA.set_abbreviation('ClearanceA')\r\n\tclearanceB.set_representation('Annular clearance @ cent. B')\r\n\tclearanceB.set_abbreviation('ClearanceB')\r\n\tclearanceM.set_representation('Annular clearance @ mid span')\r\n\tclearanceM.set_abbreviation('ClearanceM')\r\n\tsideForceA.set_representation('Side force @ cent. A')\r\n\tsideForceA.set_abbreviation('SideForceA')\r\n\tsideForceB.set_representation('Side force @ cent. B')\r\n\tsideForceB.set_abbreviation('SideForceB')\r\n\tsideForceM.set_representation('Side force @ mid span')\r\n\tsideForceM.set_abbreviation('SideForceM')\r\n\tstandoffA.set_representation('Standoff @ cent. A')\r\n\tstandoffA.set_abbreviation('StandoffA')\r\n\tstandoffB.set_representation('Standoff @ cent. B')\r\n\tstandoffB.set_abbreviation('StandoffB')\r\n\tstandoffM.set_representation('Standoff @ mid span')\r\n\tstandoffM.set_abbreviation('StandoffM')\r\n\tosaOutputdata1_fields = FieldList()\r\n\tosaOutputdata1_fields.append( clearanceA )\r\n\tosaOutputdata1_fields.append( clearanceB )\r\n\tosaOutputdata1_fields.append( clearanceM )\r\n\tosaOutputdata1_fields.append( sideForceA )\r\n\tosaOutputdata1_fields.append( sideForceB )\r\n\tosaOutputdata1_fields.append( sideForceM )\r\n\tosaOutputdata1_fields.append( standoffA )\r\n\tosaOutputdata1_fields.append( standoffB )\r\n\tosaOutputdata1_fields.append( standoffM )\r\n\t\r\n\treturn osaOutputdata1_fields\r\n\r\n\r\ndef get_osaOutputdata2_fields():\r\n\r\n\taxialForce = Field(2075, altBg=True, altFg=True)\r\n\tdeflection = Field(2076, altBg=True, altFg=True)\r\n\twClearance = Field(2073, altBg=True, altFg=True)\r\n\twStandoff = Field(2078, altBg=True, altFg=True)\r\n\taxialForce.set_representation('Axial extra force @ top')\r\n\taxialForce.set_abbreviation('AxialForce')\r\n\tdeflection.set_representation('Max. pipe deflection')\r\n\tdeflection.set_abbreviation('MaxDeflection')\r\n\twClearance.set_representation('Mean wellbore clearance')\r\n\twClearance.set_abbreviation('WellboreClearance')\r\n\twStandoff.set_representation('Mean wellbore standoff')\r\n\twStandoff.set_abbreviation('WellboreStandoff')\r\n\tosaOutputdata2_fields = FieldList()\r\n\tosaOutputdata2_fields.append( axialForce )\r\n\tosaOutputdata2_fields.append( deflection )\r\n\tosaOutputdata2_fields.append( wClearance )\r\n\tosaOutputdata2_fields.append( wStandoff )\r\n\t\r\n\treturn osaOutputdata2_fields\r\n\r\n\r\ndef get_casingDeflectionCurve(self):\r\n\r\n\t# Equation(s) Reference 1:\r\n\t# \tHans C. Juvkam-Wold, Jiang Wu. Casing Deflection and Centralizer Spacing Calculations.\r\n\t# \tSPE Drilling Engineering (December 1992).\r\n\r\n\t# Equation(s) Reference 2:\r\n\t# \tHans C. Juvkam-Wold, Richard L. Baxter. Discussion of Optimal Spacing for Casing Centralizers.\r\n\t# \tSPE Drilling Engineering (December 1988).\r\n\r\n\t# Equation(s) Reference 3:\r\n\t# \tCarlos F. H. Fonseca, Jacques Braile. Optimizing of Centralizer Distribution.\r\n\t# \tSPE Latin American Petroleum Engineering Conference (October 1990).\r\n\r\n\tself.osaCasing_fields.referenceUnitConvert_fields()\r\n\tself.osaCentA_fields.referenceUnitConvert_fields()\r\n\tself.osaCentB_fields.referenceUnitConvert_fields()\r\n\tself.osaWellbore_fields.referenceUnitConvert_fields()\r\n\r\n\tRot = lambda φ: np.array( [[np.cos(φ),-np.sin(φ)],[np.sin(φ),np.cos(φ)]] )\r\n\r\n\tdH = self.osaWellbore_fields.HoleID[0]\r\n\tL = self.osaWellbore_fields.MaxSpan[0]*self.osaSpacing_slider.sliderPosition()/100\r\n\tρe = self.osaWellbore_fields.MudOPDensity[0]\r\n\tρi = self.osaWellbore_fields.MudIPDensity[0]\r\n\tρs = self.osaCasing_fields.Density[0]\r\n\tE = self.osaCasing_fields.E[0]\r\n\tw = self.osaCasing_fields.PW[0]\r\n\tD = self.osaCasing_fields.OD[0]\r\n\td = self.osaCasing_fields.ID[0]\r\n\tType_A = self.osaCentA_fields.Type[0]\r\n\tF_So67_A = self.osaCentA_fields.ResF_SO67[0]\r\n\tminF_A = self.osaCentA_fields.minResF[0]\r\n\tSo_minF_A = self.osaCentA_fields.SO_minResF[0]\r\n\tDA = self.osaCentA_fields.COD[0]\r\n\tdA = self.osaCentA_fields.IPOD[0]\r\n\tType_B = self.osaCentB_fields.Type[0]\r\n\tF_So67_B = self.osaCentB_fields.ResF_SO67[0]\r\n\tminF_B = self.osaCentB_fields.minResF[0]\r\n\tSo_minF_B = self.osaCentB_fields.SO_minResF[0]\r\n\tDB = self.osaCentB_fields.COD[0]\r\n\tdB = self.osaCentB_fields.IPOD[0]\r\n\t#kA = ResFA/(DA/2-0.335*(DA-D)) # Con esto se calculan los coeficientes de los resortes ( 0.335=0.67/2 )\r\n\t#kB = ResFB/(DB/2-0.335*(DB-D))\r\n\r\n\tfor field in self.osaWellbore_fields:\r\n\t\tif field[0]<0:\r\n\t\t\traise mdl.LogicalError('Every parameter should be greater than zero.')\r\n\tfor field in self.osaCasing_fields:\r\n\t\tif field[0]<0:\r\n\t\t\traise mdl.LogicalError('Every parameter should be greater than zero.')\r\n\tfor field in self.osaCentA_fields[1:]:\r\n\t\tif field[0]<0:\r\n\t\t\traise mdl.LogicalError('Every parameter should be greater than zero.')\r\n\tfor field in self.osaCentB_fields[1:]:\r\n\t\tif field[0]<0:\r\n\t\t\traise mdl.LogicalError('Every parameter should be greater than zero.')\r\n\r\n\tif dA!=D or dB!=D or dH<=D:\r\n\t\traise mdl.LogicalError('The selected devices are not size-consistent.')\r\n\r\n\tθ = np.pi*self.osaInclination_slider.sliderPosition()/180\r\n\tI = np.pi/64*(D**4-d**4) # [Ref.3] Momento de inercia diferente a momento de inercia polar.\r\n\tF = 30000 # [Ref.1]\r\n\tRadio = L*1e6\r\n\taspr = L*0.02\r\n\r\n\tbuoyancyFactor = mdl.calculate_buoyancyFactor( OD=D, ID=d, ρs=ρs, ρe=ρe, ρi=ρi ) # [Ref.2]\r\n\tw *= buoyancyFactor\r\n\tfC = w*L*np.sin(θ)/2\r\n\r\n\tif Type_A=='Resin': #mdl.isNoneEntry(ResFA):\r\n\t\tyA = 0\r\n\t\tdA = d\r\n\telse:\r\n\t\tkA = 2*(F_So67_A-minF_A)/(So_minF_A-0.67)/(DA-dA)\r\n\t\tyA = fC/kA if (DA<dH) else fC/kA/2\r\n\t\t\r\n\r\n\tif Type_B=='Resin': #mdl.isNoneEntry(ResFB):\r\n\t\tyB = 0\r\n\t\tdB = d\r\n\telse:\r\n\t\tkB = 2*(F_So67_B-minF_B)/(So_minF_B-0.67)/(DB-dB)\r\n\t\tyB = fC/kB if (DB<dH) else fC/kB/2\r\n\r\n\tR = D/2\r\n\trH = dH/2\r\n\trA_min = R+(DA/2-R)*0.1\r\n\trB_min = R+(DB/2-R)*0.1\r\n\trA = (DA/2-yA) if (DA<dH) else (rH-yA)\r\n\trB = (DB/2-yB) if (DB<dH) else (rH-yB)\r\n\r\n\trA = rA_min if (rA<=rA_min) else rA\r\n\trB = rB_min if (rB<=rB_min) else rB\r\n\r\n\tα = np.arctan( (rB-rA)/L )\r\n\tLα = L/np.cos(α)\r\n\tx = np.linspace( 0, Lα, 101 )\r\n\r\n\tK = np.sqrt(F/E/I)\r\n\ty = (Lα/2/Radio/K + w*Lα*np.sin(θ)/2/K/F)*( (np.cosh(K*x)-1)/np.tanh(K*Lα/2) + K*x - np.sinh(K*x) ) - w*np.sin(θ)/2/F*x**2 # [Ref.1]\r\n\tRα = Rot(α)\r\n\txy = np.array([x,y])\r\n\tx,y = np.dot(Rα,xy)\r\n\tΔy = rH-rB\r\n\ty += Δy\r\n\r\n\tcH = rH-R\r\n\tcA = rA-R\r\n\tcB = rB-R\r\n\t\r\n\tindexes = y>cH\r\n\ty[indexes] = cH\r\n\tindexes = y<-cH\r\n\ty[indexes] =-cH\r\n\tcy = cH-y\r\n\r\n\trM = rH-y[50]\r\n\tif y[50]==cH:\r\n\t\tfM = fC\r\n\t\tfC = 0\r\n\telse:\r\n\t\tfM = 0\r\n\tcM = rM-R\r\n\r\n\tx -= L/2\r\n\tyoh = y*0\r\n\tohc = np.array([x, yoh])\r\n\tohp = np.array([x, (yoh+rH)*aspr])\r\n\tohm = np.array([x, (yoh-rH)*aspr])\r\n\r\n\txyc = np.array([x, y*aspr])\r\n\txyp = np.array([x, (y+R)*aspr])\r\n\txym = np.array([x, (y-R)*aspr])\r\n\r\n\tφ = θ + np.pi/2\r\n\tRφ = Rot(φ)\r\n\r\n\tOHc = np.dot(Rφ,ohc)\r\n\tOHp = np.dot(Rφ,ohp)\r\n\tOHm = np.dot(Rφ,ohm)\r\n\r\n\tXYc = np.dot(Rφ,xyc)\r\n\tXYp = np.dot(Rφ,xyp)\r\n\tXYm = np.dot(Rφ,xym)\r\n\r\n\tSA = cA/cH\r\n\tSB = cB/cH\r\n\tSM = cM/cH\r\n\tSy = cy/cH\r\n\tδ = (cA+cB)/2-cM\r\n\r\n\tself.osaOutputdata1_fields.clear_content()\r\n\tself.osaOutputdata2_fields.clear_content()\r\n\r\n\tself.osaOutputdata1_fields.ClearanceA.append( mdl.physicalValue( cA, self.osaOutputdata1_fields.ClearanceA.referenceUnit ) )\r\n\tself.osaOutputdata1_fields.ClearanceB.append( mdl.physicalValue( cB, self.osaOutputdata1_fields.ClearanceB.referenceUnit ) )\r\n\tself.osaOutputdata1_fields.ClearanceM.append( mdl.physicalValue( cM, self.osaOutputdata1_fields.ClearanceM.referenceUnit ) )\r\n\r\n\tself.osaOutputdata1_fields.SideForceA.append( mdl.physicalValue( fC, self.osaOutputdata1_fields.SideForceA.referenceUnit ) )\r\n\tself.osaOutputdata1_fields.SideForceB.append( mdl.physicalValue( fC, self.osaOutputdata1_fields.SideForceB.referenceUnit ) )\r\n\tself.osaOutputdata1_fields.SideForceM.append( mdl.physicalValue( fM, self.osaOutputdata1_fields.SideForceM.referenceUnit ) )\r\n\r\n\tself.osaOutputdata1_fields.StandoffA.append( mdl.physicalValue( SA, self.osaOutputdata1_fields.StandoffA.referenceUnit ) )\r\n\tself.osaOutputdata1_fields.StandoffB.append( mdl.physicalValue( SB, self.osaOutputdata1_fields.StandoffB.referenceUnit ) )\r\n\tself.osaOutputdata1_fields.StandoffM.append( mdl.physicalValue( SM, self.osaOutputdata1_fields.StandoffM.referenceUnit ) )\r\n\r\n\tself.osaOutputdata2_fields.AxialForce.append( mdl.physicalValue( w*L*np.cos(θ), self.osaOutputdata2_fields.AxialForce.referenceUnit ) )\r\n\tself.osaOutputdata2_fields.MaxDeflection.append( mdl.physicalValue( δ, self.osaOutputdata2_fields.MaxDeflection.referenceUnit ) )\r\n\tself.osaOutputdata2_fields.WellboreClearance.append( mdl.physicalValue( np.mean(cy), self.osaOutputdata2_fields.WellboreClearance.referenceUnit ) )\r\n\tself.osaOutputdata2_fields.WellboreStandoff.append( mdl.physicalValue( np.mean(Sy), self.osaOutputdata2_fields.WellboreStandoff.referenceUnit ) )\r\n\r\n\tself.osaCasing_fields.inverseReferenceUnitConvert_fields()\r\n\tself.osaCentA_fields.inverseReferenceUnitConvert_fields()\r\n\tself.osaCentB_fields.inverseReferenceUnitConvert_fields()\r\n\tself.osaWellbore_fields.inverseReferenceUnitConvert_fields()\r\n\tself.osaOutputdata1_fields.inverseReferenceUnitConvert_fields()\r\n\tself.osaOutputdata2_fields.inverseReferenceUnitConvert_fields()\r\n\r\n\tlim = L/2*1.05\r\n\r\n\r\n\r\n\treturn OHc, OHp, OHm, XYc, XYp, XYm, lim, rA, rB, rM\r\n\r\n\r\n\r\n", "import numpy as np\n#from numpy import array\nimport re\nimport copy\nimport dbUtils\nimport matplotlib.tri as mpltri\nimport sys, inspect\n\n\ngravitationalAcceleration = 32.17405*12 #in/s²\nconfigurations = {\t('b', None,None): {'nest':[['A']],'label':'=\\n|\\nA\\n|\\n=','PLfactor':0.05},\n\t\t\t\t\t('r', None,None): {'nest':[['A']],'label':'=\\n/\\nA\\n/\\n=','PLfactor':1},\n\t\t\t\t\t('b', 'b', None): {'nest':[['A','B']],'label':'=\\nA\\n|\\nB\\n=','PLfactor':1},\n\t\t\t\t\t('b', 'r', None): {'nest':[['A'],['B']],'label':'=\\n|\\nA\\n|\\n=\\n/\\nB\\n/\\n=','PLfactor':1.5},\n\t\t\t\t\t('r', 'b', None): {'nest':[['A'],['B']],'label':'=\\n/\\nA\\n/\\n=\\n|\\nB\\n|\\n=','PLfactor':1.5},\n\t\t\t\t\t('r', 'r', None): {'nest':[['A'],['B']],'label':'=\\n/\\nA\\n/\\n=\\n/\\nB\\n/\\n=','PLfactor':2},\n\t\t\t\t\t('b', None,'b' ): {'nest':[['A'],[],['C']],'label':'=\\n|\\nA\\n|\\n=\\n|\\n|\\n|\\n=\\n|\\nC\\n|\\n=','PLfactor':2},\n\t\t\t\t\t('b', None,'r' ): {'nest':[['A'],[],['C']],'label':'=\\n|\\nA\\n|\\n=\\n|\\n|\\n|\\n=\\n/\\nC\\n/\\n=','PLfactor':2.5},\n\t\t\t\t\t('r', None,'b' ): {'nest':[['A'],[],['C']],'label':'=\\n/\\nA\\n/\\n=\\n|\\n|\\n|\\n=\\n|\\nC\\n|\\n=','PLfactor':2.5},\n\t\t\t\t\t('r', None,'r' ): {'nest':[['A'],[],['C']],'label':'=\\n/\\nA\\n/\\n=\\n|\\n|\\n|\\n=\\n/\\nC\\n/\\n=','PLfactor':3},\n\t\t\t\t\t('b', 'b', 'b' ): {'nest':[['A','B','C']],'label':'=\\nA\\nB\\nC\\n=','PLfactor':1},\n\t\t\t\t\t('b', 'b', 'r' ): {'nest':[['A','B'],['C']],'label':'=\\nA\\n|\\nB\\n=\\n/\\nC\\n/\\n=','PLfactor':2},\n\t\t\t\t\t('r', 'b', 'b' ): {'nest':[['A'],['B','C']],'label':'=\\n/\\nA\\n/\\n=\\nB\\n|\\nC\\n=','PLfactor':2},\t\n\t\t\t\t\t('b', 'r', 'b' ): {'nest':[['A'],['B'],['C']],'label':'=\\n|\\nA\\n|\\n=\\n/\\nB\\n/\\n=\\n|\\nC\\n|\\n=','PLfactor':2},\n\t\t\t\t\t('b', 'r', 'r' ): {'nest':[['A'],['B'],['C']],'label':'=\\n|\\nA\\n|\\n=\\n/\\nB\\n/\\n=\\n/\\nC\\n/\\n=','PLfactor':2.5},\n\t\t\t\t\t('r', 'r', 'b' ): {'nest':[['A'],['B'],['C']],'label':'=\\n/\\nA\\n/\\n=\\n/\\nB\\n/\\n=\\n|\\nC\\n|\\n=','PLfactor':2.5},\n\t\t\t\t\t('r', 'b', 'r' ): {'nest':[['A'],['B'],['C']],'label':'=\\n/\\nA\\n/\\n=\\n|\\nB\\n|\\n=\\n/\\nC\\n/\\n=','PLfactor':3},\n\t\t\t\t\t('r', 'r', 'r' ): {'nest':[['A'],['B'],['C']],'label':'=\\n/\\nA\\n/\\n=\\n/\\nB\\n/\\n=\\n/\\nC\\n/\\n=','PLfactor':3}\t}\n\n\ndef __repr__(self):\n\tif len(self)==0:\n\t\treturn '[]'\n\telif len(self)==1:\n\t\treturn '[' + str(self[0]) +']'\n\telse:\n\t\treturn '[' + str(self[0]) +', ... '+ str(self[-1]) + ']'\n\nnp.set_string_function(__repr__)\narray = lambda L: np.array(L)\n\n\ndef get_decimalPointPattern():\n\t\n\treturn '(([\\-\\+]?\\d*\\.?\\d+)|([\\-\\+]?\\d+\\.?\\d*))'\n\n\ndef get_decimalPointWithThousandsCommaPattern():\n\t\n\treturn '(([\\-\\+]?\\d{1,3}(\\,\\d{3})*\\.\\d*)|([\\-\\+]?\\d*\\.?\\d+)|([\\-\\+]?\\d+\\.?\\d*))'\n\n\ndef get_decimalCommaPattern():\n\t\n\treturn '(([\\-\\+]?\\d{1,3}(\\.\\d{3})*\\,\\d*)|([\\-\\+]?\\d*\\,?\\d+)|([\\-\\+]?\\d+\\,?\\d*))'\n\n\ndef get_decimalFloatPointFunction():\n\tdef text2float(text):\n\t\titems = re.split(',',text)\n\t\ttext = ''.join(items)\n\t\treturn float(text)\n\treturn text2float\n\n\ndef get_decimalFloatCommaFunction():\n\tdef text2float(text):\n\t\titems = re.split(',',text)\n\t\tassert(len(items)==2)\n\t\ttridigs = re.split('\\.',items[0])\n\t\titems[0] = ''.join(tridigs)\n\t\ttext = '.'.join(items)\n\t\treturn float(text)\n\treturn text2float\n\n\ndef np_dot( u,v ):\n\t\n\treturn np.sum(u*v,axis=1,keepdims=True)\n\ndef np_cross( u,v ):\n\t\n\treturn np.cross(u,v,axis=1)\n\ndef np_norm( v ):\n\tnorm = np.linalg.norm(v,axis=1)\n\tnorm = norm.reshape(-1,1)\n\treturn v/norm\n\n\ndef calculate_buoyancyFactor( OD, ID, ρs, ρe, ρi ):\n\n\tdoverDsq = (ID/OD)**2\n\treturn ( (1-ρe/ρs)-doverDsq*(1-ρi/ρs) )/( 1-doverDsq )\n\n\ndef render_circle( center, radius, n=120, mode='all', xscale=1, yscale=1 ):\n\n\tif mode=='all':\n\t\tθ = np.linspace(0,np.pi*2,n)\n\t\tθ += np.pi/2 #- np.pi/20\n\telif mode=='top':\n\t\tθ = np.linspace(0,np.pi,n)\n\telif mode=='bottom':\n\t\tθ = np.linspace(np.pi,np.pi*2,n)\n\telif mode=='right':\n\t\tθ = np.linspace(-np.pi/2,np.pi*2,n)\n\telif mode=='left':\n\t\tθ = np.linspace(np.pi/2,np.pi*3/2,n)\n\n\tx = radius*np.cos(θ)*xscale\n\ty = radius*np.sin(θ)*yscale\n\tx += center[0]\n\ty += center[1]\n\n\treturn np.array([x,y])\n\n\ndef RodriguesRotationFormula( v, u, θ ): \n\n\t# Equation Reference:\n\t#\t\thttps://en.wikipedia.org/wiki/Rodrigues%27_rotation_formula\n\treturn v*np.cos(θ) + np_cross( u,v )*np.sin(θ) + u*np_dot( u,v )*(1-np.cos(θ))\n\n\ndef render_wellbore( fields, radius, n=12 ):\n\n\tx = np.array( fields.EW )\n\ty = np.array( fields.NS )\n\tz = np.array( fields.TVD )\n\tc = 1.0#np.array( fields.MD )\n\t\n\t\"\"\"\n\tmax_EW = max( fields.EW )\n\tmin_EW = min( fields.EW )\n\n\tmax_NS = max( fields.NS )\n\tmin_NS = min( fields.NS )\n\n\tmax_TVD = max( fields.TVD )\n\tmin_TVD = min( fields.TVD )\n\n\tΔEW = max_EW - min_EW\n\tΔNS = max_NS - min_NS\n\tΔTVD = max_TVD - min_TVD\n\n\tif ΔEW>ΔNS:\n\t\tzfactor = ΔEW/ΔTVD\n\telse:\n\t\tzfactor = ΔNS/ΔTVD\n\tzfactor=1\n\tz *= zfactor\n\t\"\"\"\n\n\tS = np.array([x,y,z])\n\tS = S.T\n\n\tU = S[2:] - S[:-2]\n\tU = np.append([U[0]], U, axis=0)\n\tU = np.append(U, [U[-1]], axis=0)\n\tU = np_norm(U)\n\n\tP = np.array([[1,0,0]])\n\t\n\tV = np_cross(U,P)\n\tV = radius*np_norm(V)\n\tl = len(V)\n\n\tR = [ V+S ]\n\tφ = 2*np.pi/n\n\n\tfor i in range(n):\n\t\tV = RodriguesRotationFormula( V, U, φ )\n\t\tR.append( V+S )\n\n\tR = np.array(R)\n\tR = R.reshape(-1,3)\n\t\n\tn+=1\n\t#nl = n*l\n\t#triangles = []\n\t\n\tX,Y,Z = R.T\n\n\tX = X.reshape(n,l)\n\tY = Y.reshape(n,l)\n\tZ = Z.reshape(n,l)\n\tC = c*np.ones((n,l))\n\n\treturn X,Y,Z,C#triangles #,Z/zfactor\n\n\ndef make_cleanAverage( X ):\n\n\tif len(X)>0:\n\t\ta = np.average(X)\n\t\tfor i in range(10):\n\t\t\tW = np.exp(-np.abs(X-a))\n\t\t\ta = np.average(X, weights=W)\n\t\treturn a\n\telse:\n\t\treturn None\n\n\ndef isNoneEntry( entry ):\n\t\n\treturn entry=='' and hasattr(entry,'unit')\n\n\ndef isSomething( value ):\n\t\n\treturn value!='' and value!=None and value!=False\n\n\ndef unitConvert_value( value, originUnit, targetUnit ):\n\n\tquery = \"\"\"select u.factorToReferenceUnit, u.offsetToReferenceUnit from units u\n\t\t\twhere u.representation = '{origin}' \"\"\".format(origin=originUnit)\n\titems_origin = dbUtils.execute_query(query)\n\n\tquery = \"\"\"select u.factorToReferenceUnit, u.offsetToReferenceUnit from units u \n\t\t\twhere u.representation = '{target}' \"\"\".format(target=targetUnit)\n\titems_target = dbUtils.execute_query(query)\n\n\tfactor_origin = float(items_origin[0][0])\n\tfactor_target = float(items_target[0][0])\n\t\n\toffset_origin = float(items_origin[0][1])\n\toffset_target = float(items_target[0][1])\n\n\tvalue = physicalValue( factor_origin/factor_target * value + (offset_origin - offset_target)/factor_target, targetUnit )\n\n\treturn value\n\n\ndef referenceUnitConvert_value( value, unit ):\n\n\tquery = \"\"\"select u.factorToReferenceUnit, u.offsetToReferenceUnit, u.referenceUnit from units u\n\t\t\twhere u.representation = '{unit}' \"\"\".format(unit=unit)\n\titems = dbUtils.execute_query(query)\n\n\tfactor = float(items[0][0])\n\toffset = float(items[0][1])\n\treferenceUnit = items[0][2]\n\n\tvalue = physicalValue( factor*value+offset, referenceUnit )\n\n\treturn value\n\n\ndef inverseReferenceUnitConvert_value( value, unit ):\n\n\tquery = \"\"\"select u.factorToReferenceUnit, u.offsetToReferenceUnit from units u\n\t\t\twhere u.representation = '{unit}' \"\"\".format(unit=unit)\n\titems = dbUtils.execute_query(query)\n\n\tfactor = float(items[0][0])\n\toffset = float(items[0][1])\n\n\tvalue = physicalValue( (value-offset)/factor, unit )\n\n\treturn value\n\n\ndef create_physicalValue_and_appendTo_field(value, field, unit=None ):\n\n\tif unit=='referenceUnit':\n\t\tvalue = physicalValue( value, field.referenceUnit )\n\telif unit==None:\n\t\tvalue = physicalValue( value, field.unit )\n\telse:\n\t\tvalue = physicalValue( value, unit )\n\tfield.append( value )\n\n\n\ndef xfloat( expression ):\n\tif isinstance(expression, float) or isinstance(expression, np.float32) or isinstance(expression, np.float64):\n\t\tvalue = __float__( expression )\n\t\treturn value\n\telse:\n\t\tif expression=='' or expression==None:\n\t\t\traise ValueError\n\t\titems = re.split('[ ]+',str(expression))\n\t\tvalue = __float__( eval( '+'.join(items) ) )\n\t\tvalue.fraction = expression\n\t\treturn value\n\n\ndef physicalValue(value, unit):\n\t\n\tif isinstance(value, int) or isinstance(value, np.int32) or isinstance(value, np.int64):\n\t\tentry = __int__(value)\n\telif isinstance(value, float) or isinstance(value, np.float32) or isinstance(value, np.float64):\n\t\tentry = __float__(value)\n\telif isinstance(value, str):\n\t\tentry = __str__(value)\n\telif isinstance(value, type(None)):\n\t\tentry = __str__('')\n\tentry.unit = unit\n\t#entry.repr = lambda: str(entry._repr_)+' '+entry._repr_.unit\n\t\n\treturn entry\n\n\nclass LogicalError( Exception ): pass\n\n\nclass __int__(int): pass \n\n\nclass __float__(float): pass \n\n\nclass __str__(str): pass\n\n\nclass FieldList( list ):\n\t\n\tdef __init__(self):\n\t\tsuper().__init__()\n\t\t\n\n\tdef append(self, field):\n\t\tfield.pos = len(self)\n\t\tsetattr(self, str(field.abbreviation), field)\n\t\tsuper().append(field)\n\n\n\tdef insert_data(self, data):\n\t\tfor field in self:\n\t\t\ttry:\n\t\t\t\ttry:\n\t\t\t\t\tfield.append(data[field.abbreviation])\n\t\t\t\texcept AttributeError:\n\t\t\t\t\tvalue = physicalValue(data[field.abbreviation],field.unit)\n\t\t\t\t\tfield.append(value)\n\t\t\texcept KeyError:\n\t\t\t\tvalue = physicalValue(None,field.unit)\n\t\t\t\tfield.append(value)\n\n\n\tdef extract_data_from_row(self, row, representation=False):\n\t\tdata = {}\n\t\tfor field in self:\n\t\t\tif representation:\n\t\t\t\tdata[field.abbreviation] = field[row]._repr_\n\t\t\telse:\n\t\t\t\tdata[field.abbreviation] = field[row]\n\t\treturn data\n\n\n\tdef extract_fields_from_row(self, row):\n\t\t\n\t\tfields = FieldList()\n\n\t\t\"\"\"\n\t\tfor field in self:\n\t\t\tnewfield = Field(field.id)\n\t\t\tnewfield.append(field[row])\n\t\t\tfields.append(newfield)\n\t\t\"\"\"\n\n\t\tfor field in self:\n\t\t\tnewfield = copy.deepcopy( field )\n\t\t\tnewfield.clear()\n\t\t\tnewfield.append(field[row])\n\t\t\tfields.append(newfield)\n\n\t\treturn fields\n\n\n\tdef clear_content(self):\n\t\tfor field in self:\n\t\t\tfield.clear()\n\n\n\tdef referenceUnitConvert_fields(self):\n\t\tfor field in self:\n\t\t\tfield.referenceUnitConvert()\n\n\n\tdef inverseReferenceUnitConvert_fields(self):\n\t\tfor field in self:\n\t\t\tfield.inverseReferenceUnitConvert()\n\n\nclass Field( list ):\n\t\n\tdef __init__(self, fieldID, altBg=False, altTx=False, altFg=False, mandatory=False, substitutefieldID=None):\n\t\tsuper().__init__()\n\t\tself.pos = None\n\t\tself.id = fieldID\n\t\tself.mandatory = mandatory\n\t\tself._altFg_ = altFg\n\n\t\tif substitutefieldID:\n\t\t\tquery = \"\"\" select f.abbreviation from fields f where f.fieldID = '{fieldID}' \"\"\".format(fieldID=substitutefieldID)\t\t\t\n\t\t\tself.substitute = dbUtils.execute_query(query)[0][0]\n\t\telse:\n\t\t\tself.substitute = None\n\t\t\n\t\tquery = \"\"\" select f.description, f.representation, f.dataType, f.precision, \n\t\t\t\t\tf.backgroundColor, f.altBackgroundColor, f.textColor, f.altTextColor, f.flag, f.altFlag, f.abbreviation\n\t\t\t\t\tfrom fields f where f.fieldID = '{fieldID}' \"\"\".format(fieldID=fieldID)\t\t\t\n\t\titems = dbUtils.execute_query(query)[0]\n\t\t\n\t\tnom_i,alt_i = (5,4) if altBg else (4,5)\n\t\tnom_j,alt_j = (7,6) if altTx else (6,7)\n\t\tnom_k,alt_k = (9,8) if altFg else (8,9)\n\t\t\n\t\tself.description = items[0]\n\t\tself.representation = items[1]\n\t\tself.dataType = eval(items[2])\n\t\tself.backgroundColor = np.array([ int(items[nom_i][:2],16), int(items[nom_i][2:4],16), int(items[nom_i][4:],16) ])\n\t\tself.altBackgroundColor = np.array([ int(items[alt_i][:2],16), int(items[alt_i][2:4],16), int(items[alt_i][4:],16) ])\n\t\tself.textColor = np.array([ int(items[nom_j][:2],16), int(items[nom_j][2:4],16), int(items[nom_j][4:],16) ])\n\t\tself.altTextColor = np.array([ int(items[alt_j][:2],16), int(items[alt_j][2:4],16), int(items[alt_j][4:],16) ])\n\t\tself.flag = int(items[nom_k])\n\t\tself.altFlag = int(items[alt_k])\n\t\tself.abbreviation = items[10]\n\t\t\n\t\ttry:\n\t\t\tself.precision = int(items[3])\n\t\texcept (TypeError, ValueError):\n\t\t\tself.precision = None\n\t\t\n\t\ttry:\n\t\t\tquery = \"\"\" select u.representation from units u, work_units qu, fields f\n\t\t\t\t\t\twhere u.unitID=qu.unitID and qu.parameterID=f.parameterID and f.fieldID='{fieldID}' \"\"\".format(fieldID=fieldID)\n\t\t\tself.unit = dbUtils.execute_query(query)[0][0]\n\t\t\tself.set_unit(self.unit) \n\n\t\texcept IndexError:\n\t\t\tself.headerName = self.representation\n\t\t\tself.unit = None\n\t\t\tself.factorToReferenceUnit = None\n\t\t\tself.offsetToReferenceUnit = None\n\t\t\tself.referenceUnit = None\n\n\tdef __repr__(self):\n\t\treturn __repr__(self)\n\n\t\n\tdef set_abbreviation(self, newAbbreviation):\n\n\t\tself.abbreviation = newAbbreviation\n\t\n\n\tdef set_representation(self, newRepresentation):\n\n\t\tself.representation = newRepresentation\n\t\tif self.unit:\n\t\t\tself.headerName = newRepresentation + ' ['+self.unit+']'\n\t\telse:\n\t\t\tself.headerName = newRepresentation\n\n\n\tdef set_unit(self, newUnit):\n\n\t\tself.headerName = self.representation + ' ['+newUnit+']'\n\n\t\tquery = \"\"\"select u.factorToReferenceUnit, u.offsetToReferenceUnit, u.referenceUnit from units u\n\t\t\t\twhere u.representation = '{unit}' \"\"\".format(unit=newUnit)\n\t\titems = dbUtils.execute_query(query)\n\n\t\tself.unit = newUnit\n\t\tself.factorToReferenceUnit = float(items[0][0])\n\t\tself.offsetToReferenceUnit = float(items[0][1])\n\t\tself.referenceUnit = items[0][2]\n\n\n\tdef append(self, newValue):\n\t\tif isNoneEntry(newValue) or newValue==None:\n\t\t\tvalue = physicalValue(None, self.unit)\n\t\t\tvalue._repr_ = physicalValue(None, self.unit)\n\t\telse:\n\t\t\tunit = newValue.unit\n\t\t\tvalue = self.dataType(newValue)\n\t\t\tvalue = physicalValue(value, unit)\n\t\t\tvalue._repr_ = newValue\n\t\tsuper().append(value)\n\n\n\tdef put(self, pos, newValue):\n\t\tif isNoneEntry(newValue) or newValue==None:\n\t\t\tvalue = physicalValue(None, self.unit)\n\t\t\tvalue._repr_ = physicalValue(None, self.unit)\n\t\telse:\n\t\t\tunit = newValue.unit\n\t\t\tvalue = self.dataType(newValue)\n\t\t\tvalue = physicalValue(value, unit)\n\t\t\tvalue._repr_ = newValue\n\t\ttry:\n\t\t\tself[pos] = value\n\t\texcept IndexError:\n\t\t\tsuper().append(value)\n\n\n\tdef insert(self, pos, newValue):\n\t\tif isNoneEntry(newValue) or newValue==None:\n\t\t\tvalue = physicalValue(None, self.unit)\n\t\t\tvalue._repr_ = physicalValue(None, self.unit)\n\t\telse:\n\t\t\tunit = newValue.unit\n\t\t\tvalue = self.dataType(newValue)\n\t\t\tvalue = physicalValue(value, unit)\n\t\t\tvalue._repr_ = newValue\n\t\tsuper().insert(pos, value)\n\n\n\tdef referenceUnitConvert(self):\n\n\t\tfor i,value in enumerate(self):\n\t\t\tif isNoneEntry(value):\n\t\t\t\tnewValue = physicalValue( None, self.referenceUnit )\n\t\t\t\tself[i] = newValue\n\t\t\telse:\n\t\t\t\tif value.unit==self.referenceUnit:\n\t\t\t\t\tnewValue = value\n\t\t\t\telif value.unit==self.unit:\n\t\t\t\t\tnewValue = physicalValue( self.factorToReferenceUnit*value + self.offsetToReferenceUnit, self.referenceUnit )\n\t\t\t\telse:\t\n\t\t\t\t\traise(ValueError)\n\t\t\t\tself[i] = newValue\n\t\treturn self\n\t\n\n\tdef inverseReferenceUnitConvert(self):\n\n\t\tfor i,value in enumerate(self):\n\t\t\tif isNoneEntry(value):\n\t\t\t\tnewValue = physicalValue( None, self.unit )\n\t\t\t\tself[i] = newValue\n\t\t\telse:\n\t\t\t\tif value.unit==self.unit:\n\t\t\t\t\tnewValue = value\n\t\t\t\telif value.unit==self.referenceUnit:\n\t\t\t\t\tnewValue = physicalValue( (value-self.offsetToReferenceUnit)/self.factorToReferenceUnit, self.unit )\n\t\t\t\telse:\n\t\t\t\t\traise(ValueError)\n\t\t\t\tself[i] = newValue\n\t\treturn self" ]
[ [ "numpy.dot", "numpy.sqrt", "numpy.arctan", "numpy.linspace", "numpy.cosh", "numpy.cos", "numpy.sinh", "numpy.sin", "numpy.mean", "numpy.tanh", "numpy.array" ], [ "numpy.set_string_function", "numpy.abs", "numpy.linspace", "numpy.linalg.norm", "numpy.average", "numpy.ones", "numpy.cos", "numpy.sin", "numpy.append", "numpy.cross", "numpy.array", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
HitkoDev/triplet-reid
[ "d80edf7bdcee2ebcab160f1a06224837ac624329" ]
[ "loss.py" ]
[ "import numbers\nimport tensorflow as tf\n\n\ndef all_diffs(a, b):\n \"\"\" Returns a tensor of all combinations of a - b.\n\n Args:\n a (2D tensor): A batch of vectors shaped (B1, F).\n b (2D tensor): A batch of vectors shaped (B2, F).\n\n Returns:\n The matrix of all pairwise differences between all vectors in `a` and in\n `b`, will be of shape (B1, B2).\n\n Note:\n For convenience, if either `a` or `b` is a `Distribution` object, its\n mean is used.\n \"\"\"\n return tf.expand_dims(a, axis=1) - tf.expand_dims(b, axis=0)\n\n\ndef cdist(a, b, metric='euclidean'):\n \"\"\"Similar to scipy.spatial's cdist, but symbolic.\n\n The currently supported metrics can be listed as `cdist.supported_metrics` and are:\n - 'euclidean', although with a fudge-factor epsilon.\n - 'sqeuclidean', the squared euclidean.\n - 'cityblock', the manhattan or L1 distance.\n\n Args:\n a (2D tensor): The left-hand side, shaped (B1, F).\n b (2D tensor): The right-hand side, shaped (B2, F).\n metric (string): Which distance metric to use, see notes.\n\n Returns:\n The matrix of all pairwise distances between all vectors in `a` and in\n `b`, will be of shape (B1, B2).\n\n Note:\n When a square root is taken (such as in the Euclidean case), a small\n epsilon is added because the gradient of the square-root at zero is\n undefined. Thus, it will never return exact zero in these cases.\n \"\"\"\n with tf.compat.v1.name_scope(\"cdist\"):\n diffs = all_diffs(a, b)\n if metric == 'sqeuclidean':\n return tf.reduce_sum(input_tensor=tf.square(diffs), axis=-1)\n elif metric == 'euclidean':\n return tf.sqrt(tf.reduce_sum(input_tensor=tf.square(diffs), axis=-1) + 1e-12)\n elif metric == 'cityblock':\n return tf.reduce_sum(input_tensor=tf.abs(diffs), axis=-1)\n else:\n raise NotImplementedError(\n 'The following metric is not implemented by `cdist` yet: {}'.format(metric))\ncdist.supported_metrics = [\n 'euclidean',\n 'sqeuclidean',\n 'cityblock',\n]\n\n\ndef get_at_indices(tensor, indices):\n \"\"\" Like `tensor[np.arange(len(tensor)), indices]` in numpy. \"\"\"\n counter = tf.range(tf.shape(input=indices, out_type=indices.dtype)[0])\n return tf.gather_nd(tensor, tf.stack((counter, indices), -1))\n\n\ndef batch_hard(dists, pids, margin, batch_precision_at_k=None):\n \"\"\"Computes the batch-hard loss from arxiv.org/abs/1703.07737.\n\n Args:\n dists (2D tensor): A square all-to-all distance matrix as given by cdist.\n pids (1D tensor): The identities of the entries in `batch`, shape (B,).\n This can be of any type that can be compared, thus also a string.\n margin: The value of the margin if a number, alternatively the string\n 'soft' for using the soft-margin formulation, or `None` for not\n using a margin at all.\n\n Returns:\n A 1D tensor of shape (B,) containing the loss value for each sample.\n \"\"\"\n with tf.compat.v1.name_scope(\"batch_hard\"):\n same_identity_mask = tf.equal(tf.expand_dims(pids, axis=1),\n tf.expand_dims(pids, axis=0))\n negative_mask = tf.logical_not(same_identity_mask)\n positive_mask = tf.math.logical_xor(same_identity_mask,\n tf.eye(tf.shape(input=pids)[0], dtype=tf.bool))\n\n furthest_positive = tf.reduce_max(input_tensor=dists*tf.cast(positive_mask, tf.float32), axis=1)\n closest_negative = tf.map_fn(lambda x: tf.reduce_min(input_tensor=tf.boolean_mask(tensor=x[0], mask=x[1])),\n (dists, negative_mask), tf.float32)\n # Another way of achieving the same, though more hacky:\n # closest_negative = tf.reduce_min(dists + 1e5*tf.cast(same_identity_mask, tf.float32), axis=1)\n\n diff = furthest_positive - closest_negative\n if isinstance(margin, numbers.Real):\n diff = tf.maximum(diff + margin, 0.0)\n elif margin == 'soft':\n diff = tf.nn.softplus(diff)\n elif margin.lower() == 'none':\n pass\n else:\n raise NotImplementedError(\n 'The margin {} is not implemented in batch_hard'.format(margin))\n\n if batch_precision_at_k is None:\n return diff\n\n # For monitoring, compute the within-batch top-1 accuracy and the\n # within-batch precision-at-k, which is somewhat more expressive.\n with tf.compat.v1.name_scope(\"monitoring\"):\n # This is like argsort along the last axis. Add one to K as we'll\n # drop the diagonal.\n _, indices = tf.nn.top_k(-dists, k=batch_precision_at_k+1)\n\n # Drop the diagonal (distance to self is always least).\n indices = indices[:,1:]\n\n # Generate the index indexing into the batch dimension.\n # This is simething like [[0,0,0],[1,1,1],...,[B,B,B]]\n batch_index = tf.tile(\n tf.expand_dims(tf.range(tf.shape(input=indices)[0]), 1),\n (1, tf.shape(input=indices)[1]))\n\n # Stitch the above together with the argsort indices to get the\n # indices of the top-k of each row.\n topk_indices = tf.stack((batch_index, indices), -1)\n\n # See if the topk belong to the same person as they should, or not.\n topk_is_same = tf.gather_nd(same_identity_mask, topk_indices)\n\n # All of the above could be reduced to the simpler following if k==1\n #top1_is_same = get_at_indices(same_identity_mask, top_idxs[:,1])\n\n topk_is_same_f32 = tf.cast(topk_is_same, tf.float32)\n top1 = tf.reduce_mean(input_tensor=topk_is_same_f32[:,0])\n prec_at_k = tf.reduce_mean(input_tensor=topk_is_same_f32)\n\n # Finally, let's get some more info that can help in debugging while\n # we're at it!\n negative_dists = tf.boolean_mask(tensor=dists, mask=negative_mask)\n positive_dists = tf.boolean_mask(tensor=dists, mask=positive_mask)\n\n return diff, top1, prec_at_k, topk_is_same, negative_dists, positive_dists\n\n\nLOSS_CHOICES = {\n 'batch_hard': batch_hard,\n}\n" ]
[ [ "tensorflow.boolean_mask", "tensorflow.gather_nd", "tensorflow.reduce_mean", "tensorflow.shape", "tensorflow.stack", "tensorflow.maximum", "tensorflow.cast", "tensorflow.expand_dims", "tensorflow.nn.top_k", "tensorflow.abs", "tensorflow.square", "tensorflow.logical_not", "tensorflow.nn.softplus", "tensorflow.compat.v1.name_scope" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
fg6/MachineLearning
[ "7c3f6e8f2f90b729dbcc345c5a8a5da712cfbb27" ]
[ "kaggle/mnist/bayes/naivebayes.py" ]
[ "\nimport numpy as np\nfrom sortedcontainers import SortedList\nfrom scipy.stats import multivariate_normal\n\nclass NaiveBayes:\n #def __init__(self):\n # pass\n\n def fit(self, X, Y):\n self.X = X\n self.Y = set(Y)\n\n self.Classes = set(Y) \n self.Prior = {}\n self.G = {}\n # smoothing\n epsilon=0.001*np.identity(28)\n \n for c in self.Classes:\n Xc = X[Y==c]\n Mean = np.mean(Xc, axis=0,dtype=np.float64)\n Sigma = np.var(Xc,axis=0,dtype=np.float64)+0.001 \n \n self.G[c] = (Mean, Sigma)\n self.Prior[c] = float(len(Xc))/len(Y) \n\n def predict(self, X):\n \n results=[]\n max_posterior = -1\n max_class = None\n c_posterior = np.zeros((X.shape[0], len(self.G)))\n for c in self.Classes:\n mean, sigma = self.G[c]\n c_posterior[:,c] = multivariate_normal.logpdf(X, mean, sigma) + np.log(self.Prior[c]) # add cov !\n\n #print(len(c_posterior), np.argmax(c_posterior, axis=1))\n \n\n return np.argmax(c_posterior, axis=1)\n\n \n\n def score(self, X, Y):\n results = self.predict(X)\n #for i,v in enumerate(Y):\n # print(i,v,results[i])\n score = np.mean(results == Y)\n return score\n\nclass Bayes:\n \n\n def fit(self, X, Y, e=0.001):\n\n self.X = X\n self.Y = set(Y)\n N,D = X.shape\n\n self.Classes = set(Y) \n self.Prior = {}\n self.G = {}\n # smoothing\n epsilon=e*np.identity(28)\n \n for c in self.Classes:\n Xc = X [ Y==c ]\n Mean = np.mean(Xc, axis=0, dtype=np.float64)\n #Sigma = np.var(Xc, axis=0, dtype=np.float64) + e\n Cov = np.cov(Xc.T)+ np.eye(D)*e\n \n self.G[c] = (Mean, Cov)\n self.Prior[c] = float(len(Xc))/len(Y) \n\n def predict(self, X):\n results=[]\n max_posterior = -1\n max_class = None\n c_posterior = np.zeros((X.shape[0], len(self.G)))\n for c in self.Classes:\n mean, cov = self.G[c]\n c_posterior[:,c] = multivariate_normal.logpdf(X, mean, cov) + np.log(self.Prior[c])\n\n return np.argmax(c_posterior, axis=1)\n\n \n \n def score(self, X, Y):\n results = self.predict(X)\n score = np.mean(results == Y)\n return score\n" ]
[ [ "numpy.log", "numpy.eye", "numpy.argmax", "scipy.stats.multivariate_normal.logpdf", "numpy.identity", "numpy.mean", "numpy.cov", "numpy.var" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
gitlost-murali/awesome-align
[ "39fb45ca85a98e005447bddb52c48e65ce7d399b" ]
[ "run_align.py" ]
[ "# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n# Modifications copyright (C) 2020 Zi-Yi Dou\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport argparse\nimport random\nimport itertools\nimport os\n\nimport numpy as np\nimport torch\nfrom tqdm import trange\nfrom torch.nn.utils.rnn import pad_sequence\nfrom torch.utils.data import DataLoader, Dataset, SequentialSampler\n\nimport modeling\nfrom configuration_bert import BertConfig\nfrom modeling import BertForMaskedLM\nfrom tokenization_bert import BertTokenizer\nfrom tokenization_utils import PreTrainedTokenizer\nfrom modeling_utils import PreTrainedModel\n\n\n\ndef set_seed(args):\n if args.seed >= 0:\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n torch.cuda.manual_seed_all(args.seed)\n\nclass LineByLineTextDataset(Dataset):\n def __init__(self, tokenizer: PreTrainedTokenizer, args, file_path):\n assert os.path.isfile(file_path)\n print('Loading the dataset...')\n self.examples = []\n with open(file_path, encoding=\"utf-8\") as f:\n for idx, line in enumerate(f.readlines()):\n if len(line) == 0 or line.isspace() or not len(line.split(' ||| ')) == 2:\n raise ValueError(f'Line {idx+1} is not in the correct format!')\n \n src, tgt = line.split(' ||| ')\n if src.rstrip() == '' or tgt.rstrip() == '':\n raise ValueError(f'Line {idx+1} is not in the correct format!')\n \n sent_src, sent_tgt = src.strip().split(), tgt.strip().split()\n token_src, token_tgt = [tokenizer.tokenize(word) for word in sent_src], [tokenizer.tokenize(word) for word in sent_tgt]\n wid_src, wid_tgt = [tokenizer.convert_tokens_to_ids(x) for x in token_src], [tokenizer.convert_tokens_to_ids(x) for x in token_tgt]\n\n ids_src, ids_tgt = tokenizer.prepare_for_model(list(itertools.chain(*wid_src)), return_tensors='pt', max_length=tokenizer.max_len)['input_ids'], tokenizer.prepare_for_model(list(itertools.chain(*wid_tgt)), return_tensors='pt', max_length=tokenizer.max_len)['input_ids']\n\n bpe2word_map_src = []\n for i, word_list in enumerate(token_src):\n bpe2word_map_src += [i for x in word_list]\n bpe2word_map_tgt = []\n for i, word_list in enumerate(token_tgt):\n bpe2word_map_tgt += [i for x in word_list]\n\n self.examples.append( (ids_src[0], ids_tgt[0], bpe2word_map_src, bpe2word_map_tgt) )\n\n def __len__(self):\n return len(self.examples)\n\n def __getitem__(self, i):\n return self.examples[i]\n\ndef word_align(args, model: PreTrainedModel, tokenizer: PreTrainedTokenizer, output_word_alignments = False):\n def collate(examples):\n ids_src, ids_tgt, bpe2word_map_src, bpe2word_map_tgt = zip(*examples)\n ids_src = pad_sequence(ids_src, batch_first=True, padding_value=tokenizer.pad_token_id)\n ids_tgt = pad_sequence(ids_tgt, batch_first=True, padding_value=tokenizer.pad_token_id)\n return ids_src, ids_tgt, bpe2word_map_src, bpe2word_map_tgt\n\n dataset = LineByLineTextDataset(tokenizer, args, file_path=args.data_file)\n sampler = SequentialSampler(dataset)\n dataloader = DataLoader(\n dataset, sampler=sampler, batch_size=args.batch_size, collate_fn=collate\n )\n\n model.to(args.device)\n model.eval()\n tqdm_iterator = trange(dataset.__len__(), desc=\"Extracting\")\n with open(args.output_file, 'w') as writer:\n for batch in dataloader:\n with torch.no_grad():\n ids_src, ids_tgt, bpe2word_map_src, bpe2word_map_tgt = batch\n word_aligns_list = model.get_aligned_word(ids_src, ids_tgt, bpe2word_map_src, bpe2word_map_tgt, args.device, 0, 0, align_layer=args.align_layer, extraction=args.extraction, softmax_threshold=args.softmax_threshold, test=True)\n for word_aligns in word_aligns_list:\n output_str = []\n for word_align in word_aligns:\n output_str.append(f'{word_align[0]}-{word_align[1]}')\n writer.write(' '.join(output_str)+'\\n')\n tqdm_iterator.update(len(ids_src))\n\n if output_word_alignments:\n with open(args.output_file, 'r') as fh:\n outputf = (fh.read()).split(\"\\n\")\n with open(args.data_file, 'r') as fh:\n datalines = (fh.read()).split(\"\\n\")\n\n with open(args.output_file+\".outtxt\", 'w') as fwriter:\n for indices, line in zip(outputf, datalines):\n srcline, tgtline = line.split(' ||| ')\n indices = indices.split()\n srcwrds = srcline.split()\n tgtwrds = tgtline.split()\n output_wrds = []\n for wrd in indices:\n srcix,tgtix = wrd.split(\"-\")\n srcix, tgtix = int(srcix), int(tgtix)\n output_wrds.append(f\"{srcwrds[srcix]}-{tgtwrds[tgtix]}\")\n fwriter.write(' '.join(output_wrds)+'\\n')\n\ndef main():\n parser = argparse.ArgumentParser()\n\n # Required parameters\n parser.add_argument(\n \"--data_file\", default=None, type=str, required=True, help=\"The input data file (a text file).\"\n )\n parser.add_argument(\n \"--output_file\",\n type=str,\n required=True,\n help=\"The output directory where the model predictions and checkpoints will be written.\",\n )\n parser.add_argument(\"--align_layer\", type=int, default=8, help=\"layer for alignment extraction\")\n parser.add_argument(\n \"--extraction\", default='softmax', type=str, help='softmax or entmax15'\n )\n parser.add_argument(\n \"--softmax_threshold\", type=float, default=0.001\n )\n parser.add_argument(\n \"--model_name_or_path\",\n default=None,\n type=str,\n help=\"The model checkpoint for weights initialization. Leave None if you want to train a model from scratch.\",\n )\n parser.add_argument(\n \"--config_name\",\n default=None,\n type=str,\n help=\"Optional pretrained config name or path if not the same as model_name_or_path. If both are None, initialize a new config.\",\n )\n parser.add_argument(\n \"--tokenizer_name\",\n default=None,\n type=str,\n help=\"Optional pretrained tokenizer name or path if not the same as model_name_or_path. If both are None, initialize a new tokenizer.\",\n )\n parser.add_argument(\"--seed\", type=int, default=42, help=\"random seed for initialization\")\n parser.add_argument(\"--batch_size\", default=32, type=int)\n parser.add_argument(\n \"--cache_dir\",\n default='cache_dir',\n type=str,\n help=\"Optional directory to store the pre-trained models downloaded from s3 (instead of the default one)\",\n )\n parser.add_argument(\"--no_cuda\", action=\"store_true\", help=\"Avoid using CUDA when available\")\n args = parser.parse_args()\n device = torch.device(\"cuda\" if torch.cuda.is_available() and not args.no_cuda else \"cpu\")\n args.device = device\n\n # Set seed\n set_seed(args)\n config_class, model_class, tokenizer_class = BertConfig, BertForMaskedLM, BertTokenizer\n if args.config_name:\n config = config_class.from_pretrained(args.config_name, cache_dir=args.cache_dir)\n elif args.model_name_or_path:\n config = config_class.from_pretrained(args.model_name_or_path, cache_dir=args.cache_dir)\n else:\n config = config_class()\n\n if args.tokenizer_name:\n tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name, cache_dir=args.cache_dir)\n elif args.model_name_or_path:\n tokenizer = tokenizer_class.from_pretrained(args.model_name_or_path, cache_dir=args.cache_dir)\n else:\n raise ValueError(\n \"You are instantiating a new {} tokenizer. This is not supported, but you can do it from another script, save it,\"\n \"and load it from here, using --tokenizer_name\".format(tokenizer_class.__name__)\n )\n\n modeling.PAD_ID = tokenizer.pad_token_id\n modeling.CLS_ID = tokenizer.cls_token_id\n modeling.SEP_ID = tokenizer.sep_token_id\n\n if args.model_name_or_path:\n model = model_class.from_pretrained(\n args.model_name_or_path,\n from_tf=bool(\".ckpt\" in args.model_name_or_path),\n config=config,\n cache_dir=args.cache_dir,\n )\n else:\n model = model_class(config=config)\n\n word_align(args, model, tokenizer)\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.random.seed", "torch.manual_seed", "torch.utils.data.SequentialSampler", "torch.nn.utils.rnn.pad_sequence", "torch.utils.data.DataLoader", "torch.no_grad", "torch.cuda.manual_seed_all", "torch.cuda.is_available" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
duoan/light-text-classification
[ "6c96c9fb6b52abd42e4b4358cb85c44473731668" ]
[ "src/lightextclassification/imdb.py" ]
[ "# _*_ coding: utf-8 _*_\nfrom argparse import ArgumentParser\n\nimport torch\nfrom torchtext import data, datasets\n\nfrom vocab import LocalVectors\n\nfrom models import *\n\nfrom torch.optim import SGD\nfrom torch.utils.data import DataLoader\nfrom ignite.engine import Events, create_supervised_trainer, create_supervised_evaluator\nfrom ignite.metrics import Accuracy, Loss\n\nfrom tqdm import tqdm\n\n\ndef get_data_loaders(batch_size=32):\n tokenize = lambda x: x.split()\n TEXT = data.Field(\n sequential=True,\n tokenize=tokenize,\n lower=True,\n include_lengths=True,\n batch_first=True,\n fix_length=200)\n LABEL = data.LabelField(dtype=torch.float)\n print('Load IMDB dataset')\n train_data, test_data = datasets.IMDB.splits(TEXT, LABEL)\n print('TEXT build vocab')\n TEXT.build_vocab(\n train_data,\n vectors=LocalVectors(\n '/Users/duoan/nbs/quora-insincere-questions-classification/input/embeddings/glove.840B.300d/glove.840B.300d.txt'\n ))\n print('LABEL build vocab')\n LABEL.build_vocab(train_data)\n\n word_embeddings = TEXT.vocab.vectors\n print('Length of TEXT Vocabulary: {}'.format(len(TEXT.vocab)))\n print('Vector size of TEXT Vocabulary: {}'.format(TEXT.vocab.vectors.size()))\n print('LABEL Length: {}'.format(len(LABEL.vocab)))\n\n train_data, valid_data = train_data.split()\n train_iter, valid_iter, test_iter = data.BucketIterator.splits(\n (train_data, valid_data, test_data),\n batch_size=batch_size,\n sort_key=lambda x: len(x.text),\n repeat=False,\n shuffle=True)\n vocab_size = len(TEXT.vocab)\n print('finished get data loaders')\n return vocab_size, word_embeddings, train_iter, valid_iter, test_iter\n\n\ndef run(batch_size, epochs, lr, momentum, log_interval):\n vocab_size, word_embeddings, train_iter, valid_iter, test_iter = get_data_loaders(\n batch_size)\n model = LSTMClassifier(32, 2, 256, vocab_size, 300, word_embeddings)\n device = 'cpu'\n\n if torch.cuda.is_available():\n device = 'cuda'\n\n optimizer = SGD(model.parameters(), lr=lr, momentum=momentum)\n trainer = create_supervised_trainer(\n model, optimizer, F.nll_loss, device=device)\n evaluator = create_supervised_evaluator(\n model,\n metrics={\n 'accuracy': Accuracy(),\n 'nll': Loss(F.nll_loss)\n },\n device=device)\n\n desc = \"ITERATION - loss: {:.2f}\"\n pbar = tqdm(\n initial=0, leave=False, total=len(train_iter), desc=desc.format(0))\n\n @trainer.on(Events.ITERATION_COMPLETED)\n def log_training_loss(engine):\n iter = (engine.state.iteration - 1) % len(train_iter) + 1\n if iter % log_interval == 0:\n pbar.desc = desc.format(engine.state.output)\n pbar.update(log_interval)\n\n @trainer.on(Events.EPOCH_COMPLETED)\n def log_training_results(engine):\n pbar.refresh()\n evaluator.run(train_iter)\n metrics = evaluator.state.metrics\n avg_accuracy = metrics['accuracy']\n avg_nll = metrics['nll']\n tqdm.write(\n \"Training Results - Epoch: {} Avg accuracy: {:.2f} Avg loss: {:.2f}\"\n .format(engine.state.epoch, avg_accuracy, avg_nll))\n\n @trainer.on(Events.EPOCH_COMPLETED)\n def log_validation_results(engine):\n evaluator.run(valid_iter)\n metrics = evaluator.state.metrics\n avg_accuracy = metrics['accuracy']\n avg_nll = metrics['nll']\n tqdm.write(\n \"Validation Results - Epoch: {} Avg accuracy: {:.2f} Avg loss: {:.2f}\"\n .format(engine.state.epoch, avg_accuracy, avg_nll))\n\n pbar.n = pbar.last_print_n = 0\n\n trainer.run(train_iter, max_epochs=epochs)\n pbar.close()\n\n\nif __name__ == \"__main__\":\n parser = ArgumentParser()\n parser.add_argument(\n '--batch_size',\n type=int,\n default=64,\n help='input batch size for training (default: 64)')\n parser.add_argument(\n '--val_batch_size',\n type=int,\n default=1000,\n help='input batch size for validation (default: 1000)')\n parser.add_argument(\n '--epochs',\n type=int,\n default=10,\n help='number of epochs to train (default: 10)')\n parser.add_argument(\n '--lr', type=float, default=0.01, help='learning rate (default: 0.01)')\n parser.add_argument(\n '--momentum', type=float, default=0.5, help='SGD momentum (default: 0.5)')\n parser.add_argument(\n '--log_interval',\n type=int,\n default=10,\n help='how many batches to wait before logging training status')\n\n args = parser.parse_args()\n\n run(args.batch_size, args.epochs, args.lr, args.momentum, args.log_interval)\n" ]
[ [ "torch.cuda.is_available" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
krishpop/CHER
[ "0633a45151b13f23acf20faabc65028c599a3551" ]
[ "baselines/cher/experiment/config.py" ]
[ "from copy import deepcopy\nimport numpy as np\nimport json\nimport os\nimport gym\n\nfrom baselines import logger\nfrom baselines.her.ddpg import DDPG\n\nfrom baselines.cher.her import make_sample_her_transitions\n\n\nDEFAULT_ENV_PARAMS = {\n 'FetchReach-v0': {\n 'n_cycles': 10,\n },\n}\n\n\nDEFAULT_PARAMS = {\n # env\n 'max_u': 1., # max absolute value of actions on different coordinates\n # ddpg\n 'layers': 3, # number of layers in the critic/actor networks\n 'hidden': 256, # number of neurons in each hidden layers\n 'network_class': 'baselines.her.actor_critic:ActorCritic',\n 'Q_lr': 0.001, # critic learning rate\n 'pi_lr': 0.001, # actor learning rate\n 'buffer_size': int(1E6), # for experience replay\n 'polyak': 0.95, # polyak averaging coefficient\n 'action_l2': 1.0, # quadratic penalty on actions (before rescaling by max_u)\n 'clip_obs': 200.,\n 'scope': 'ddpg', # can be tweaked for testing\n 'relative_goals': False,\n # training\n 'n_cycles': 50, # per epoch\n 'rollout_batch_size': 2, # per mpi thread\n 'n_batches': 40, # training batches per cycle\n 'batch_size': 64, # per mpi thread, measured in transitions and reduced to even multiple of chunk_length.\n 'n_test_rollouts': 10, # number of test rollouts per epoch, each consists of rollout_batch_size rollouts\n 'test_with_polyak': False, # run test episodes with the target network\n # exploration\n 'random_eps': 0.3, # percentage of time a random action is taken\n 'noise_eps': 0.2, # std of gaussian noise added to not-completely-random actions as a percentage of max_u\n # HER\n 'replay_strategy': 'future', # supported modes: future, none\n 'replay_k': 4, # number of additional goals used for replay, only used if off_policy_data=future\n # normalization\n 'norm_eps': 0.01, # epsilon used for observation normalization\n 'norm_clip': 5, # normalized observations are cropped to this values\n}\n\n\nCACHED_ENVS = {}\ndef cached_make_env(make_env):\n \"\"\"\n Only creates a new environment from the provided function if one has not yet already been\n created. This is useful here because we need to infer certain properties of the env, e.g.\n its observation and action spaces, without any intend of actually using it.\n \"\"\"\n if make_env not in CACHED_ENVS:\n env = make_env()\n CACHED_ENVS[make_env] = env\n return CACHED_ENVS[make_env]\n\n\ndef prepare_params(kwargs):\n # DDPG params\n ddpg_params = dict()\n\n env_name = kwargs['env_name']\n def make_env():\n return gym.make(env_name)\n kwargs['make_env'] = make_env\n tmp_env = cached_make_env(kwargs['make_env'])\n assert hasattr(tmp_env, '_max_episode_steps')\n kwargs['T'] = tmp_env._max_episode_steps\n tmp_env.reset()\n kwargs['max_u'] = np.array(kwargs['max_u']) if type(kwargs['max_u']) == list else kwargs['max_u']\n kwargs['gamma'] = 1. - 1. / kwargs['T']\n if 'lr' in kwargs:\n kwargs['pi_lr'] = kwargs['lr']\n kwargs['Q_lr'] = kwargs['lr']\n del kwargs['lr']\n for name in ['buffer_size', 'hidden', 'layers',\n 'network_class',\n 'polyak', \n 'batch_size', 'Q_lr', 'pi_lr',\n 'norm_eps', 'norm_clip', 'max_u',\n 'action_l2', 'clip_obs', 'scope', 'relative_goals']:\n ddpg_params[name] = kwargs[name]\n kwargs['_' + name] = kwargs[name]\n del kwargs[name]\n kwargs['ddpg_params'] = ddpg_params\n\n return kwargs\n\n\ndef log_params(params, logger=logger):\n for key in sorted(params.keys()):\n logger.info('{}: {}'.format(key, params[key]))\n\n\ndef configure_her(params):\n env = cached_make_env(params['make_env'])\n env.reset()\n def reward_fun(ag_2, g, info): # vectorized\n return env.compute_reward(achieved_goal=ag_2, desired_goal=g, info=info)\n\n # Prepare configuration for HER.\n her_params = {\n 'reward_fun': reward_fun,\n }\n for name in ['replay_strategy', 'replay_k']:\n her_params[name] = params[name]\n params['_' + name] = her_params[name]\n del params[name]\n sample_her_transitions = make_sample_her_transitions(**her_params)\n\n return sample_her_transitions\n\n\ndef simple_goal_subtract(a, b):\n assert a.shape == b.shape\n return a - b\n\n\ndef configure_ddpg(dims, params, reuse=False, use_mpi=True, clip_return=True):\n sample_her_transitions = configure_her(params)\n # Extract relevant parameters.\n gamma = params['gamma']\n rollout_batch_size = params['rollout_batch_size']\n ddpg_params = params['ddpg_params']\n\n input_dims = dims.copy()\n\n # DDPG agent\n env = cached_make_env(params['make_env'])\n env.reset()\n ddpg_params.update({'input_dims': input_dims, # agent takes an input observations\n 'T': params['T'],\n 'clip_pos_returns': True, # clip positive returns\n 'clip_return': (1. / (1. - gamma)) if clip_return else np.inf, # max abs of return\n 'rollout_batch_size': rollout_batch_size,\n 'subtract_goals': simple_goal_subtract,\n 'sample_transitions': sample_her_transitions,\n 'gamma': gamma,\n })\n ddpg_params['info'] = {\n 'env_name': params['env_name'],\n }\n policy = DDPG(reuse=reuse, **ddpg_params, use_mpi=use_mpi)\n return policy\n\n\ndef configure_dims(params):\n env = cached_make_env(params['make_env'])\n env.reset()\n obs, _, _, info = env.step(env.action_space.sample())\n\n dims = {\n 'o': obs['observation'].shape[0],\n 'u': env.action_space.shape[0],\n 'g': obs['desired_goal'].shape[0],\n }\n for key, value in info.items():\n value = np.array(value)\n if value.ndim == 0:\n value = value.reshape(1)\n dims['info_{}'.format(key)] = value.shape[0]\n return dims\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
tlambert03/image-demos
[ "a2974bcc7f040fd4d14e659c4cbfeabcf726c707", "a2974bcc7f040fd4d14e659c4cbfeabcf726c707", "a2974bcc7f040fd4d14e659c4cbfeabcf726c707", "a2974bcc7f040fd4d14e659c4cbfeabcf726c707" ]
[ "test-examples/million_points.py", "test-examples/shapely_annotation.py", "helpers/make_mesospim_vip_zarr_files.py", "examples/rendering.py" ]
[ "\"\"\"Test converting an image to a pyramid.\n\"\"\"\n\nimport numpy as np\nimport napari\n\npoints = np.random.randint(100, size=(50_000, 2))\n\nwith napari.gui_qt():\n viewer = napari.view_points(points, face_color='red')\n", "import napari\nimport numpy as np\nfrom shapely.ops import cascaded_union\nfrom sklearn.datasets import make_blobs\nfrom shapely.geometry import Point, Polygon, MultiPolygon\n\n\nwith napari.gui_qt():\n v = napari.Viewer()\n\n @v.bind_key('f')\n def get_selected_points(v):\n union_data = cascaded_union([Polygon(i) for i in v.layers['annotation'].data])\n if isinstance(union_data, type(MultiPolygon())):\n polys = [Polygon(np.array(poly.exterior.coords)) for poly in union_data]\n else:\n polys = [Polygon(np.array(union_data.exterior.coords))]\n \n all_pts = [Point(i) for i in X]\n mask = np.any([[p.within(poly) for p in all_pts] for poly in polys], axis=0)\n sel_pts = v.add_points(X[mask],face_color='red',size=0.1,name='selected')\n \n centers = [[1, 1], [-1, -1], [1, -1]]\n X, y = make_blobs(n_samples=1000, centers=centers, cluster_std=0.6)\n all_points = v.add_points(X, size=0.05)\n\n v.add_shapes(name='annotation', edge_width=0, opacity=0.1)\n", "import numpy as np\nfrom glob import glob\nfrom skimage.io import imread\nimport zarr\nfrom scipy.ndimage import zoom\n\n\nfile_name = 'data/mesospim/ExpA_VIP_ASLM_on.tif'\nprint('loading tif')\ndata = imread(file_name)\nprint('tif loaded')\nprint(data.shape)\n\nfile_name_2 = 'data/mesospim/ExpA_VIP_ASLM_on.zarr'\n\nroot = zarr.open_group(file_name_2, mode='a')\nprint('init')\n\nfor i in range(0, 6):\n print(i, 6, data.shape)\n z1 = root.create_dataset(str(i), shape=data.shape, chunks=(64, 64, 64),\n dtype=np.uint16)\n z1[:] = data\n print('shrinking')\n data = zoom(data, 0.5)\n", "# IPython log file\n\n\nimport numpy as np\nimport toolz as tz\nfrom skimage import data, util\n\n\nblobs_raw = np.stack([\n data.binary_blobs(length=64, n_dim=3, volume_fraction=f)\n for f in np.linspace(0.05, 0.5, 10)\n])\n\nadd_noise = tz.curry(util.random_noise)\nblobs = tz.pipe(\n blobs_raw,\n add_noise(mode='s&p'),\n add_noise(mode='gaussian'),\n add_noise(mode='poisson')\n)\n\nprint(blobs.shape)\nfrom scipy import ndimage as ndi\n\nneighbors3d = ndi.generate_binary_structure(3, connectivity=1)\nneighbors = neighbors3d[np.newaxis, ...]\n\nopening, closing = map(tz.curry, [ndi.grey_opening, ndi.grey_closing])\n\ndenoised = tz.pipe(\n blobs,\n opening(footprint=neighbors),\n closing(footprint=neighbors)\n)\nfrom skimage import filters\n\n# label needs a shape `(3,) * ndim` array\nneighbors2 = np.concatenate(\n (np.zeros_like(neighbors), neighbors, np.zeros_like(neighbors))\n)\n\nbinary = filters.threshold_li(denoised) < denoised\nlabels = ndi.label(binary, structure=neighbors2)[0]\n\n\nimport napari\n\nwith napari.gui_qt():\n viewer = napari.Viewer()\n viewer.add_image(blobs, name='blobs', colormap='magenta')\n viewer.add_image(denoised, name='denoised', colormap='cyan')\n viewer.add_labels(labels, name='labels')\n viewer.dims.ndisplay = 3\n" ]
[ [ "numpy.random.randint" ], [ "numpy.array", "sklearn.datasets.make_blobs" ], [ "scipy.ndimage.zoom" ], [ "scipy.ndimage.label", "scipy.ndimage.generate_binary_structure", "numpy.zeros_like", "numpy.linspace" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
BME-SmartLab/GraphConvWat
[ "6cdcb3cb1bd22eb274c19ad4a45a78e334462e44" ]
[ "evaluation/plot_WDS_topo_with_sensitivity.py" ]
[ "# -*- coding: utf-8 -*-\nimport argparse\nimport os\nimport sys\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nfrom matplotlib import collections as mc\nimport matplotlib.pyplot as plt\n\nfrom epynet import Network\n\nsys.path.insert(0, os.path.join('..'))\nfrom utils.graph_utils import get_nx_graph, get_sensitivity_matrix\nfrom utils.SensorInstaller import SensorInstaller\n\n# ----- ----- ----- ----- ----- -----\n# Command line arguments\n# ----- ----- ----- ----- ----- -----\nparser = argparse.ArgumentParser()\nparser.add_argument(\n '--wds',\n default = 'anytown',\n type = str\n )\nparser.add_argument(\n '--nodesize',\n default = 7,\n type = int,\n help = \"Size of nodes on the plot.\"\n )\nparser.add_argument(\n '--perturb',\n action = \"store_true\",\n )\nargs = parser.parse_args()\n\npathToRoot = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')\npathToModels = os.path.join(pathToRoot, 'experiments', 'models')\n\nwds = Network(os.path.join('..', 'water_networks', args.wds+'.inp'))\nwds.solve()\n\nprint('Calculating nodal sensitivity to demand change...\\n')\nptb = np.max(wds.junctions.basedemand) / 100\nif args.perturb:\n for pump in wds.pumps:\n pump.speed *= 1.1\n\n for junc in wds.junctions:\n tempo = np.random.rand()\n if tempo < .3:\n junc.basedemand *= 1.1\n elif tempo > .6:\n junc.basedemand *= .9\nS = get_sensitivity_matrix(wds, ptb)\n\ndef get_node_df(elements, get_head=False):\n data = []\n for junc in elements:\n ser = pd.Series({\n 'uid': junc.uid,\n 'x': junc.coordinates[0],\n 'y': junc.coordinates[1],\n })\n if get_head:\n ser['head'] = junc.head\n data.append(ser)\n data = pd.DataFrame(data)\n if get_head:\n data['head'] = (data['head'] - data['head'].min()) / (data['head'].max()-data['head'].min())\n return data\n\ndef get_elem_df(elements, nodes):\n data= []\n df = pd.DataFrame(data)\n if elements:\n for elem in elements:\n ser = pd.Series({\n 'uid': elem.uid,\n 'x1': nodes.loc[nodes['uid'] == elem.from_node.uid, 'x'].values,\n 'y1': nodes.loc[nodes['uid'] == elem.from_node.uid, 'y'].values,\n 'x2': nodes.loc[nodes['uid'] == elem.to_node.uid, 'x'].values,\n 'y2': nodes.loc[nodes['uid'] == elem.to_node.uid, 'y'].values,\n })\n data.append(ser)\n df = pd.DataFrame(data)\n df['x1'] = df['x1'].str[0]\n df['y1'] = df['y1'].str[0]\n df['x2'] = df['x2'].str[0]\n df['y2'] = df['y2'].str[0]\n df['center_x'] = (df['x1']+df['x2']) / 2\n df['center_y'] = (df['y1']+df['y2']) / 2\n df['orient'] = np.degrees(np.arctan((df['y2']-df['y1'])/(df['x2']-df['x1']))) + 90\n return df\n\ndef build_lc_from(df):\n line_collection = []\n for elem_id in df['uid']:\n line_collection.append([\n (df.loc[df['uid'] == elem_id, 'x1'].values[0],\n df.loc[df['uid'] == elem_id, 'y1'].values[0]),\n (df.loc[df['uid'] == elem_id, 'x2'].values[0],\n df.loc[df['uid'] == elem_id, 'y2'].values[0])\n ])\n return line_collection\n\nnodes = get_node_df(wds.nodes, get_head=True)\njuncs = get_node_df(wds.junctions, get_head=True)\ntanks = get_node_df(wds.tanks)\nreservoirs = get_node_df(wds.reservoirs)\npipes = get_elem_df(wds.pipes, nodes)\npumps = get_elem_df(wds.pumps, nodes)\nvalves= get_elem_df(wds.valves, nodes)\npipe_collection = build_lc_from(pipes)\npump_collection = build_lc_from(pumps)\nif not valves.empty:\n valve_collection = build_lc_from(valves)\n\nmew = .5\nfig, ax = plt.subplots()\nlc = mc.LineCollection(pipe_collection, linewidths=mew, color='k')\nax.add_collection(lc)\nlc = mc.LineCollection(pump_collection, linewidths=mew, color='k')\nax.add_collection(lc)\nif not valves.empty:\n lc = mc.LineCollection(valve_collection, linewidths=mew, color='k')\n ax.add_collection(lc)\n\nnodal_s = np.sum(np.abs(S), axis=0)\nnodal_s = (nodal_s-nodal_s.min()) / nodal_s.max()\ncolors = []\ncmap = plt.get_cmap('plasma')\nfor idx, junc in juncs.iterrows():\n color = cmap(nodal_s[idx])\n colors.append(color)\n ax.plot(junc['x'], junc['y'], 'ko', mfc=color, mec='k', ms=args.nodesize, mew=mew)\n\nfor _, tank in tanks.iterrows():\n ax.plot(tank['x'], tank['y'], marker=7, mfc='k', mec='k', ms=7, mew=mew)\nfor _, reservoir in reservoirs.iterrows():\n ax.plot(reservoir['x'], reservoir['y'], marker='o', mfc='k', mec='k', ms=3, mew=mew)\nax.plot(pumps['center_x'], pumps['center_y'], 'ko', ms=7, mfc='white', mew=mew)\nfor _, pump in pumps.iterrows():\n ax.plot(pump['center_x'], pump['center_y'],\n marker=(3, 0, pump['orient']),\n color='k',\n ms=5\n )\nax.autoscale()\nax.axis('off')\nplt.tight_layout()\nplt.show()\n" ]
[ [ "matplotlib.pyplot.tight_layout", "numpy.abs", "pandas.Series", "numpy.arctan", "matplotlib.collections.LineCollection", "matplotlib.pyplot.get_cmap", "matplotlib.pyplot.subplots", "pandas.DataFrame", "numpy.max", "numpy.random.rand", "matplotlib.pyplot.show" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
OphirGenomica/proteinFolding
[ "b4b6ea19307e176e58aa9d39ae161003c340416d" ]
[ "srcOld/loss.py" ]
[ "import time\n\nimport matplotlib\nimport numpy as np\n\nmatplotlib.use('Agg')\n\nimport torch\nimport torch.nn as nn\n\nclass LossMultiTargets(nn.Module):\n def __init__(self,loss_fnc=torch.nn.CrossEntropyLoss()):\n super(LossMultiTargets, self).__init__()\n self.loss = loss_fnc\n\n def forward(self, inputs,targets):\n # loss = []\n # for (input,target) in zip(inputs,targets):\n # loss.append(self.loss(input,target))\n loss = 0\n nb = len(targets)\n for (input,target) in zip(inputs,targets):\n loss += self.loss(input,target)\n loss /= nb\n return loss\n\nclass MSELoss(torch.nn.Module):\n def __init__(self):\n super(MSELoss,self).__init__()\n\n def forward(self, input, target):\n #We only want places where the target is larger than zero (remember this is for distances)\n # mask = target > 0\n # result = torch.mean((input[mask] - target[mask])**2)\n # result = torch.norm((input[mask] - target[mask])) ** 2 / torch.norm(target[mask]) ** 2\n nb = target.shape[0]\n result = 0\n for i in range(nb):\n inputi = input[i,:,:]\n targeti = target[i,:,:]\n maski = targeti > 0\n if torch.sum(maski) == 0: #nothing to learn from this one\n continue\n assert torch.norm(targeti[maski]) > 0\n result += torch.norm((inputi[maski] - targeti[maski])) ** 2 / torch.norm(targeti[maski]) ** 2\n\n return result/nb\n\n\ndef pc_translation_rotation_matching(r1,r2):\n '''\n Given two sets of 3D points of equal size. It computes the distance between these two sets of points, when allowing translation and rotation of the point clouds.\n We compute both chirality, and take whichever one has the lowest loss.\n r1 -> Tensor of shape (3,n)\n r2 -> Tensor of shape (3,n)\n '''\n\n #First we translate the two sets, by setting both their centroids to origin\n r1c = r1 - torch.mean(r1, dim=1, keepdim=True)\n r2c = r2 - torch.mean(r2, dim=1, keepdim=True)\n\n H = r1c @ r2c.transpose(0,1)\n t1 = time.time()\n\n U, S, V = torch.svd(H)\n\n t2 = time.time()\n\n d = torch.sign(torch.det(V @ U.transpose(0,1)))\n t3 = time.time()\n tmp = torch.diag_embed(torch.tensor([1, 1, d])).to(device=V.device)\n t4 = time.time()\n R = V @ tmp @ U.transpose(0,1)\n t5 = time.time()\n\n # tmp2 = torch.diag_embed(torch.tensor([1, 1, -d])).to(device=V.device)\n # R2 = V @ tmp2 @ U.transpose(0,1)\n\n r1cr = R @ r1c\n # r1cr2 = R2 @ r1c\n\n assert torch.norm(r2c) > 0\n loss_tr1 = torch.norm(r1cr - r2c) ** 2 / torch.norm(r2c) ** 2\n # loss_tr2 = torch.norm(r1cr2 - r2c) ** 2 / torch.norm(r2c) ** 2\n\n # if loss_tr1 < loss_tr2:\n loss_tr = loss_tr1\n # pred = r1cr.squeeze().cpu().detach().numpy()\n # else:\n # pred = r1cr2.squeeze().cpu().detach().numpy()\n # loss_tr = loss_tr2\n # target = r2c.squeeze().cpu().detach().numpy()\n print(\"{:2.4f},{:2.4f},{:2.4f},{:2.4f}\".format(t2-t1,t3-t2,t4-t3,t5-t4))\n return loss_tr#, pred, target\n\n\ndef loss_tr_wrapper(r1,r2):\n '''\n\n Note that any point with r2 coordinates set to zero is considered masked and will not be included in the calculation. (so use r1 for prediction and r2 for target, and just make sure no target point are accidently zero. Remember the point cloud is translation invariant, so you can just translate all points if needed)\n '''\n\n nb = r1.shape[0]\n loss_tr = 0\n for i in range(nb):\n r1i = r1[i, :, :]\n r2i = r2[i,:,:]\n mask = (r2i != 0).reshape(3, -1)\n mask = torch.sum(mask,dim=0) > 0\n r1i = r1i[:,mask]\n r2i = r2i[:,mask]\n # loss_tri, predi, targeti = pc_translation_rotation_matching(r1i, r2i)\n loss_tri = pc_translation_rotation_matching(r1i, r2i)\n loss_tr += loss_tri\n loss_tr /= nb\n return loss_tr#, predi, targeti\n\ndef loss_tr(r1,r2, return_coords=False):\n t1 = time.time()\n loss_tr = 0\n mask = (r2 != 0).reshape(r2.shape)\n mask = (torch.sum(mask,dim=1) > 0).unsqueeze(1)\n mask = mask.repeat(1,3,1)\n batch_mask = torch.sum(mask,dim=(1,2)) > 0\n\n r1 = r1[batch_mask,:,:]\n r2 = r2[batch_mask,:,:]\n mask = mask[batch_mask,:,:]\n\n nb = r1.shape[0]\n\n\n t2 = time.time()\n #First we translate the two sets, by setting both their centroids to origin\n r1c = torch.empty_like(r1)\n r2c = torch.empty_like(r2)\n for i in range(nb):\n r1c[i, :, :] = r1[i, :, :] - torch.mean(r1[i, mask[i, :, :]].reshape(3, -1), dim=1, keepdim=True)\n r2c[i, :, :] = r2[i, :, :] - torch.mean(r2[i, mask[i, :, :]].reshape(3, -1), dim=1, keepdim=True)\n t3 = time.time()\n r1c = r1c * mask\n r2c = r2c * mask\n\n H = torch.bmm(r1c,r2c.transpose(1,2))\n # try:\n # U, S, V = torch.svd(H)\n # except: # torch.svd may have convergence issues for GPU and CPU.\n # U, S, V = torch.svd(H + 1e-4 * H.mean() * torch.rand(H.shape,device=H.device))\n U, S, V = torch.svd(H)\n t4 = time.time()\n\n d = torch.sign(torch.det(torch.bmm(V, U.transpose(1,2))))\n t5 = time.time()\n\n tt=torch.tensor([[1]*nb, [1]*nb, d]).transpose(0,1)\n tmp = torch.diag_embed(tt).to(device=V.device)\n t6 = time.time()\n\n R = torch.bmm(V, torch.bmm(tmp, U.transpose(1,2)))\n\n r1cr = torch.bmm(R, r1c)\n\n loss_tr = torch.mean(torch.norm(r1cr - r2c, dim=(1, 2)) ** 2 / torch.norm(r2c, dim=(1, 2)) ** 2)\n t7 = time.time()\n # print(\"{:2.4f},{:2.4f},{:2.4f},{:2.4f},{:2.4f},{:2.4f}\".format(t2-t1,t3-t2,t4-t3,t5-t4,t6-t5,t7-t6))\n if return_coords:\n pred = r1cr[-1,:,:].squeeze().cpu().detach().numpy()\n target = r2c[-1,:,:].squeeze().cpu().detach().numpy()\n return loss_tr, pred, target\n else:\n return loss_tr" ]
[ [ "torch.mean", "torch.nn.CrossEntropyLoss", "torch.empty_like", "torch.svd", "torch.norm", "matplotlib.use", "torch.diag_embed", "torch.sum", "torch.tensor", "torch.bmm" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
johnwu0604/pytorch-tutorial
[ "bdbc283a0b79620d9b582f1c4d2c2220a853b856" ]
[ "tutorials/02-intermediate/recurrent_neural_network/main.py" ]
[ "import torch \nimport torch.nn as nn\nimport torchvision\nimport torchvision.transforms as transforms\n\n\n# Device configuration\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n# Hyper-parameters\nsequence_length = 28\ninput_size = 28\nhidden_size = 128\nnum_layers = 2\nnum_classes = 10\nbatch_size = 100\nnum_epochs = 2\nlearning_rate = 0.01\n\n# MNIST dataset\ntrain_dataset = torchvision.datasets.MNIST(root='../../data/',\n train=True, \n transform=transforms.ToTensor(),\n download=True)\n\ntest_dataset = torchvision.datasets.MNIST(root='../../data/',\n train=False, \n transform=transforms.ToTensor())\n\n# Data loader\ntrain_loader = torch.utils.data.DataLoader(dataset=train_dataset,\n batch_size=batch_size, \n shuffle=True)\n\ntest_loader = torch.utils.data.DataLoader(dataset=test_dataset,\n batch_size=batch_size, \n shuffle=False)\n\n# Recurrent neural network (many-to-one)\nclass RNN(nn.Module):\n def __init__(self, input_size, hidden_size, num_layers, num_classes):\n super(RNN, self).__init__()\n self.hidden_size = hidden_size\n self.num_layers = num_layers\n self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True)\n self.fc = nn.Linear(hidden_size, num_classes)\n \n def forward(self, x):\n # Set initial hidden and cell states \n h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device) \n c0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device)\n \n # Forward propagate LSTM\n out, _ = self.lstm(x, (h0, c0)) # out: tensor of shape (batch_size, seq_length, hidden_size)\n \n # Decode the hidden state of the last time step\n out = self.fc(out[:, -1, :])\n return out\n\nmodel = RNN(input_size, hidden_size, num_layers, num_classes).to(device)\n\n\n# Loss and optimizer\ncriterion = nn.CrossEntropyLoss()\noptimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\n\n# Train the model\ntotal_step = len(train_loader)\nfor epoch in range(num_epochs):\n for i, (images, labels) in enumerate(train_loader):\n images = images.reshape(-1, sequence_length, input_size).to(device)\n labels = labels.to(device)\n \n # Forward pass\n outputs = model(images)\n loss = criterion(outputs, labels)\n \n # Backward and optimize\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n \n if (i+1) % 100 == 0:\n print ('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}' \n .format(epoch+1, num_epochs, i+1, total_step, loss.item()))\n\n# Test the model\nmodel.eval()\nwith torch.no_grad():\n correct = 0\n total = 0\n for images, labels in test_loader:\n images = images.reshape(-1, sequence_length, input_size).to(device)\n labels = labels.to(device)\n outputs = model(images)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n\n print('Test Accuracy of the model on the 10000 test images: {} %'.format(100 * correct / total)) \n\n# Save the model checkpoint\ntorch.save(model.state_dict(), './outputs/model.ckpt')\n" ]
[ [ "torch.nn.CrossEntropyLoss", "torch.max", "torch.nn.LSTM", "torch.utils.data.DataLoader", "torch.nn.Linear", "torch.no_grad", "torch.cuda.is_available" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
JoseHernandez9094/CohortLexicase
[ "5179a3c0db6dcf0c2cae79fcfd08b4b919c9269d" ]
[ "Summarize/solution_timeseries_EVAL.py" ]
[ "#python3\r\n#This script will make csv so that graph_timeseries.py can create plots with them!\r\n\r\nimport pandas as p\r\n\r\nMAX_EVAL = 512*512*1000\r\ndf = p.read_csv('../Data/Raw/min_programs__eval_262144000.csv')\r\ntreat = {}\r\nTREATMENT = 'treatment'\r\nFOUND = 'solution_found'\r\nUPDATE = 'update_found'\r\nEVAL = 'evaluation_found'\r\nPOS_UPDATE = 0\r\nPOS_EVAL = 1\r\n\r\nfor i,row in df.iterrows():\r\n #If we do not have the treatment in our data dict\r\n if row[TREATMENT] in treat:\r\n if row[FOUND] == True:\r\n #If the row has found a solution store gen and eval\r\n tup = tuple([float(row[UPDATE]), float(row[EVAL])])\r\n treat[row[TREATMENT]].append(tup)\r\n else:\r\n if row[FOUND] == True: \r\n temp = [tuple([float(row[UPDATE]), float(row[EVAL])])]\r\n treat[row[TREATMENT]] = temp\r\n\r\n#Will gather data by problem into CN,CS lists for generations.\r\n#TODO\r\nK_PROB = 0\r\nK_SEL = 1\r\nK_CN = 2\r\nK_CS = 3\r\ndata_gen = {}\r\nfor k,val in treat.items():\r\n k = k[8:].split('__')\r\n gens = [x[POS_EVAL] for x in val]\r\n gens.sort()\r\n dimen = k[K_CN] + '-' + k[K_CS]\r\n prob = k[K_PROB]\r\n sele = k[K_SEL]\r\n\r\n #check if problem exists within the first layer of dict\r\n if prob not in data_gen:\r\n #If not in the dict, create an empty one for it\r\n data_gen[prob] = {}\r\n\r\n #Check if selection not within the second layer\r\n if sele not in data_gen[prob]:\r\n #Second level is the selection scheme\r\n data_gen[prob][sele] = {}\r\n #Third level is the dimensionality\r\n data_gen[prob][sele][dimen] = gens\r\n\r\n #Selection is within the second layer\r\n else:\r\n #Third level is the dimensionality\r\n data_gen[prob][sele][dimen] = gens\r\n\r\n else:\r\n #Check if selection not within the second layer\r\n if sele not in data_gen[prob]:\r\n #Second level is the selection scheme\r\n data_gen[prob][sele] = {}\r\n #Third level is the dimensionality\r\n data_gen[prob][sele][dimen] = gens\r\n\r\n #Selection is within the second layer\r\n else:\r\n #Third level is the dimensionality\r\n data_gen[prob][sele][dimen] = gens\r\n\r\n#Go through each problem\r\nfor prob in data_gen:\r\n #Go through each selection scheme\r\n for sele in data_gen[prob]:\r\n #Go through each dimensionality\r\n for dimen in data_gen[prob][sele]:\r\n raw = []\r\n raw.append(tuple([0,0]))\r\n d = data_gen[prob][sele][dimen]\r\n #Create the coordinates\r\n for i in range(0, len(d)):\r\n # raw.append(tuple([d[i], raw[len(raw)-1][1]]))\r\n raw.append(tuple([d[i], raw[len(raw)-1][1]+1]))\r\n raw.append([MAX_EVAL, raw[len(raw)-1][1]])\r\n\r\n gen = [x[0] for x in raw]\r\n cnt = [x[1] for x in raw]\r\n raw_data = {'Evaluation': gen, 'Solution_Count': cnt}\r\n df = p.DataFrame(raw_data, columns = ['Evaluation', 'Solution_Count'])\r\n fname = prob + '__' + sele[4:] + '__' + dimen + '.csv'\r\n df.to_csv('../Data/Polished/Evaluations/'+fname)" ]
[ [ "pandas.read_csv", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
ybubnov/dnsthreat
[ "75a3298379c8b48aeea6bae6c5c31a7d5e9fe315", "75a3298379c8b48aeea6bae6c5c31a7d5e9fe315" ]
[ "deeplookup/env.py", "deeplookup/ts.py" ]
[ "from enum import Enum\n\nimport gym\nimport numpy as np\nfrom gym import spaces\nfrom gym.utils import seeding\n\n\nclass Action(Enum):\n decrease_attention = 0\n increase_attention = 1\n access_detector = 2\n isolate_node = 3\n forget_node = 4\n\n\nclass State(Enum):\n healthy = 0\n infected = 1\n\n\nclass MalwareEnv(gym.Env):\n \"\"\"\n Observations:\n Type: Box(2)\n Num Observation Min Max\n 0 Attention Level 0.05 1.0\n 1 Malware Rate 0.0 1.0\n\n Actions:\n Type: Discrete(5)\n Num Action\n 0 Decrease attention\n 1 Increase attention\n 2 Access detector\n 3 Isolate node\n 4 Forget node\n\n Reward:\n Reward of -0.1 is awarded for accessing detector.\n Reward of -0.2 is awarded for decreasing attention.\n Reward of -0.8 is awarded for increasing attention.\n Reward of 1 is awarded for isolation of infected node.\n Reward of 1 is awarded for forgeting healthy node.\n Reward of -1 is awarded for isolation of healthy node.\n Reward of -1 if awarded for forgetting infected node.\n\n Starting State:\n Attention level is set between [0.1, 0.2]\n Actual state is set either to 'healthy' or 'infected'.\n\n Episode Termination:\n Node is either isolated of forgotten.\n Episode length is greater than 100.\n \"\"\"\n\n def __init__(self, malware_prob: float = 0.9, seed: int = 100, log: bool = False):\n self.min_attention = 0.05\n self.max_attention = 1.0\n\n self.min_rate = 0.0\n self.max_rate = 1.0\n\n self.attention_inc = 0.05\n\n self.low = np.array([self.min_attention, self.min_rate], dtype=np.float32)\n self.high = np.array([self.max_attention, self.max_rate], dtype=np.float32)\n\n self.action_space = spaces.Discrete(5)\n self.observation_space = spaces.Box(self.low, self.high, dtype=np.float32)\n\n self.malware_prob = malware_prob\n self.log = log\n\n # (attention, health)\n self.state = (None, None, None)\n self.latest_action = None\n self.actions = []\n self.seed(seed)\n\n def seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]\n\n def start_step_policy(self, observation):\n attention, malware_rate = observation\n if attention > self.min_attention:\n return Action.access_detector.value\n return Action.increase_attention.value\n\n def step(self, action):\n if isinstance(action, np.ndarray):\n action = np.argmax(action)\n\n assert self.action_space.contains(action), f\"{action} ({type(action)}) invalid\"\n action = Action(action)\n\n if self.log:\n self.actions.append(action)\n\n attention, malware_rate, health = self.state\n st = State(health)\n\n if action == Action.decrease_attention:\n attention = max(self.min_attention, attention - self.attention_inc)\n if action == Action.increase_attention:\n attention = min(self.max_attention, attention + self.attention_inc)\n if action == Action.access_detector:\n # Accessing a detector changes malware rate.\n #\n # When the node is healthy, there is a `1 - malware_prob` probability\n # to observe malware. And malware rate depends on the attention level.\n #\n # Throw a \"dice\" in order to calculate the malware rate.\n prob = self.np_random.uniform()\n T = (1 - self.malware_prob) if st == State.healthy else self.malware_prob\n\n mu = np.average([0, attention])\n # sigma = 0.2\n malware_rate = 0 if prob > T else self.np_random.normal(mu, 0.01)\n malware_rate = max(self.min_rate, malware_rate)\n malware_rate = min(self.max_rate, malware_rate)\n\n # Agent does not observe the node health directly, only through\n # malware rate.\n self.state = np.array([attention, malware_rate, health])\n self.latest_action = action\n\n observation = np.array([attention, malware_rate])\n reward = self.compute_reward(health, action)\n done = action in {Action.isolate_node, Action.forget_node}\n\n return observation, reward, done, {} # {\"state\": self.state}\n\n def compute_reward(self, health, action):\n if action == Action.decrease_attention:\n return -0.2\n if action == Action.increase_attention:\n return -0.8\n if action == Action.access_detector:\n return -0.1\n if action == Action.isolate_node:\n return 1 * (health * 2 - 1)\n if action == Action.forget_node:\n return -1 * (health * 2 - 1)\n return 0\n\n def reset(self):\n # Node if either healthy (0), or infected (1), when node is infected,\n # agent observes malware requests depending on the attention level.\n health = self.np_random.choice([0, 1])\n attention = self.min_attention\n malware_rate = 0\n\n self.state = np.array([attention, malware_rate, health])\n return np.array([attention, malware_rate])\n\n def render(self, mode=\"human\"):\n attention, malware_rate, infected = self.state\n print(f\"\\tattention: {attention} - malware rate: {malware_rate}\", end=\" - \")\n print(f\"health: {'infected' if infected else 'healthy'}\", end=\" - \")\n print(f\"action: {self.latest_action}\")\n\n def close(self):\n pass\n", "import math\nfrom dataclasses import dataclass, field\nfrom typing import NamedTuple\n\nimport numpy as np\n\n\nclass N(NamedTuple):\n \"\"\"A node in the hidden Markov model.\"\"\"\n\n ID: str\n prob: float\n klass: float\n ts: int = 0\n\n\n@dataclass\nclass Rsamp:\n k: float = field(default=1.35)\n La: float = field(default=1.19)\n _sigma: float = field(default=0.0, init=False)\n _window: float = field(default=0.1, init=False)\n\n def emission_prob(self, n: N) -> float:\n c = 1 / (self._sigma * math.sqrt(2 * math.pi))\n pw = abs(n.klass - n.prob) / self._sigma\n return c * math.exp(-(pw ** 2))\n\n def transition_prob(self, n1: N, n2: N) -> float:\n kls = 1 - (n1.klass ^ n2.klass)\n delta = abs(n1.ts - n2.ts) / math.exp(self.k * kls)\n return self.La * math.exp(-delta)\n\n def max_prob(self, u, v, w):\n prob0 = self.transition_prob(u, v) * self.emission_prob(v)\n prob1 = self.transition_prob(u, w) * self.emission_prob(w)\n return (v, prob0) if prob0 >= prob1 else (w, prob1)\n\n def search(self, x, s):\n joint_prob = self.emission_prob(s)\n records = len(x)\n y_pred = np.zeros(records)\n _Node = s\n\n for i in range(records):\n u = _Node\n v = N(\"a\", x[i], klass=0, ts=i + 1)\n w = N(\"b\", x[i], klass=1, ts=i + 1)\n _Node, new_prob = self.max_prob(u, v, w)\n y_pred[i] = _Node.klass\n joint_prob *= new_prob\n\n return y_pred\n\n def predict_proba(self, xx):\n num_samples = len(xx)\n res = np.zeros(shape=(num_samples, 2))\n for i in range(num_samples):\n pred = self.predict(xx[i])\n proba = (np.mean(pred) / self._window) % 1.0\n res[i][0], res[i][1] = 1 - proba, proba\n return res\n\n def predict(self, x):\n self._sigma = np.sqrt(np.std(x))\n s = N(\"S\", 0.5, 0, 0)\n return self.search(x, s)\n" ]
[ [ "numpy.array", "numpy.argmax", "numpy.average" ], [ "numpy.std", "numpy.zeros", "numpy.mean" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
RAJAGOPALAN-GANGADHARAN/PlasmaPy
[ "6df9583cc47375687a07300c0aa11ba31634d770" ]
[ "plasmapy/formulary/tests/test_parameters.py" ]
[ "\"\"\"Tests for functions that calculate plasma parameters.\"\"\"\n\nimport numpy as np\nimport pytest\n\nfrom astropy import units as u\nfrom astropy.constants import m_e, m_p\nfrom astropy.tests.helper import assert_quantity_allclose\n\nfrom plasmapy.formulary.parameters import (\n Alfven_speed,\n betaH_,\n Bohm_diffusion,\n cs_,\n cwp_,\n DB_,\n Debye_length,\n Debye_number,\n gyrofrequency,\n gyroradius,\n Hall_parameter,\n inertial_length,\n ion_sound_speed,\n lambdaD_,\n lower_hybrid_frequency,\n magnetic_energy_density,\n magnetic_pressure,\n mass_density,\n nD_,\n oc_,\n plasma_frequency,\n pmag_,\n pth_,\n rc_,\n rho_,\n rhoc_,\n thermal_pressure,\n thermal_speed,\n ub_,\n upper_hybrid_frequency,\n va_,\n wc_,\n wlh_,\n wp_,\n wuh_,\n)\nfrom plasmapy.particles import Particle\nfrom plasmapy.particles.exceptions import InvalidParticleError\nfrom plasmapy.utils.exceptions import (\n PhysicsError,\n PhysicsWarning,\n PlasmaPyFutureWarning,\n RelativityError,\n RelativityWarning,\n)\nfrom plasmapy.utils.pytest_helpers import assert_can_handle_nparray\n\nB = 1.0 * u.T\nZ = 1\nion = \"p\"\nm_i = m_p\nn_i = 5e19 * u.m ** -3\nn_e = Z * 5e19 * u.m ** -3\nrho = n_i * m_i + n_e * m_e\nT_e = 1e6 * u.K\nT_i = 1e6 * u.K\nk_1 = 3e1 * u.m ** -1\nk_2 = 3e7 * u.m ** -1\n\nB_arr = np.array([0.001, 0.002]) * u.T\nB_nanarr = np.array([0.001, np.nan]) * u.T\nB_allnanarr = np.array([np.nan, np.nan]) * u.T\n\nrho_arr = np.array([5e-10, 2e-10]) * u.kg / u.m ** 3\nrho_infarr = np.array([np.inf, 5e19]) * u.m ** -3\nrho_negarr = np.array([-5e19, 6e19]) * u.m ** -3\n\nT_arr = np.array([1e6, 2e6]) * u.K\nT_nanarr = np.array([1e6, np.nan]) * u.K\nT_nanarr2 = np.array([np.nan, 2e6]) * u.K\nT_allnanarr = np.array([np.nan, np.nan]) * u.K\nT_negarr = np.array([1e6, -5151.0]) * u.K\n\nV = 25.2 * u.m / u.s\nV_arr = np.array([25, 50]) * u.m / u.s\nV_nanarr = np.array([25, np.nan]) * u.m / u.s\nV_allnanarr = np.array([np.nan, np.nan]) * u.m / u.s\n\nmu = m_p.to(u.u).value\n\n\nclass Test_mass_density:\n r\"\"\"Test the mass_density function in parameters.py.\"\"\"\n\n @pytest.mark.parametrize(\n \"args, kwargs, conditional\",\n [\n ((-1 * u.kg * u.m ** -3, \"He\"), {}, pytest.raises(ValueError)),\n ((-1 * u.m ** -3, \"He\"), {}, pytest.raises(ValueError)),\n ((\"not a Quantity\", \"He\"), {}, pytest.raises(TypeError)),\n ((1 * u.m ** -3,), {}, pytest.raises(TypeError)),\n ((1 * u.J, \"He\"), {}, pytest.raises(u.UnitTypeError)),\n ((1 * u.m ** -3, None), {}, pytest.raises(TypeError)),\n (\n (1 * u.m ** -3, \"He\"),\n {\"z_ratio\": \"not a ratio\"},\n pytest.raises(TypeError),\n ),\n ],\n )\n def test_raises(self, args, kwargs, conditional):\n with conditional:\n mass_density(*args, **kwargs)\n\n @pytest.mark.parametrize(\n \"args, kwargs, expected\",\n [\n ((1.0 * u.g * u.m ** -3, \"\"), {}, 1.0e-3 * u.kg * u.m ** -3),\n ((5.0e12 * u.cm ** -3, \"He\"), {}, 3.32323849e-8 * u.kg * u.m ** -3),\n (\n (5.0e12 * u.cm ** -3, Particle(\"He\")),\n {},\n 3.32323849e-8 * u.kg * u.m ** -3,\n ),\n (\n (5.0e12 * u.cm ** -3, \"He\"),\n {\"z_ratio\": 0.5},\n 1.66161925e-08 * u.kg * u.m ** -3,\n ),\n (\n (5.0e12 * u.cm ** -3, \"He\"),\n {\"z_ratio\": -0.5},\n 1.66161925e-08 * u.kg * u.m ** -3,\n ),\n ],\n )\n def test_values(self, args, kwargs, expected):\n assert np.isclose(mass_density(*args, **kwargs), expected)\n\n def test_handle_nparrays(self):\n \"\"\"Test for ability to handle numpy array quantities\"\"\"\n assert_can_handle_nparray(mass_density)\n\n\n# Assertions below that are in CGS units with 2-3 significant digits\n# are generally from the NRL Plasma Formulary.\n\n\nclass TestAlfvenSpeed:\n \"\"\"Test `~plasmapy.formulary.parameters.Alfven_speed`.\"\"\"\n\n @pytest.mark.parametrize(\"alias\", [va_])\n def test_aliases(self, alias):\n assert alias is Alfven_speed\n\n @pytest.mark.parametrize(\n \"args, kwargs, _error\",\n [\n # scenarios that raise RelativityError\n ((10 * u.T, 1.0e-10 * u.kg * u.m ** -3), {}, RelativityError),\n ((np.inf * u.T, 1 * u.m ** -3), {\"ion\": \"p\"}, RelativityError),\n ((-np.inf * u.T, 1 * u.m ** -3), {\"ion\": \"p\"}, RelativityError),\n #\n # scenarios that raise InvalidParticleError\n ((1 * u.T, 5e19 * u.m ** -3), {\"ion\": \"spacecats\"}, InvalidParticleError),\n #\n # scenarios that raise TypeError\n ((\"not a Bfield\", 1.0e-10 * u.kg * u.m ** -3), {}, TypeError),\n ((10 * u.T, \"not a density\"), {}, TypeError),\n ((10 * u.T, 5), {\"ion\": \"p\"}, TypeError),\n ((1 * u.T, 1.0e18 * u.m ** -3), {\"ion\": [\"He\"]}, TypeError),\n ((1 * u.T, 1.0e18 * u.m ** -3), {\"ion\": \"He\", \"z_mean\": \"nope\"}, TypeError),\n #\n # scenarios that raise UnitTypeError\n ((1 * u.T, 1.0e18 * u.cm), {\"ion\": \"He\"}, u.UnitTypeError),\n ((1 * u.T, 5 * u.m ** -2), {\"ion\": \"p\"}, u.UnitTypeError),\n ((1 * u.cm, 1.0e18 * u.m ** -3), {\"ion\": \"He\"}, u.UnitTypeError),\n ((5 * u.A, 5e19 * u.m ** -3), {\"ion\": \"p\"}, u.UnitTypeError),\n #\n # scenarios that raise ValueError\n ((1 * u.T, -1.0e18 * u.m ** -3), {\"ion\": \"He\"}, ValueError),\n (\n (np.array([5, 6, 7]) * u.T, np.array([5, 6]) * u.m ** -3),\n {\"ion\": \"p\"},\n ValueError,\n ),\n (\n (np.array([0.001, 0.002]) * u.T, np.array([-5e19, 6e19]) * u.m ** -3),\n {\"ion\": \"p\"},\n ValueError,\n ),\n ],\n )\n def test_raises(self, args, kwargs, _error):\n \"\"\"Test scenarios that raise exceptions or warnings.\"\"\"\n with pytest.raises(_error):\n Alfven_speed(*args, **kwargs)\n\n @pytest.mark.parametrize(\n \"args, kwargs, expected, isclose_kw, _warning\",\n [\n # scenarios that issue RelativityWarning\n (\n (5 * u.T, 5e19 * u.m ** -3),\n {\"ion\": \"H\"},\n 15413707.39,\n {},\n RelativityWarning,\n ),\n (\n (5 * u.T, 5e19 * u.m ** -3),\n {\"ion\": \"H+\"},\n 15413707.39,\n {\"rtol\": 3.0e-4},\n RelativityWarning,\n ),\n (\n (5 * u.T, 5e19 * u.m ** -3),\n {\"ion\": \"p\"},\n 15413707.39,\n {\"rtol\": 4.0e-4},\n RelativityWarning,\n ),\n #\n # scenarios that issue UnitsWarning\n ((0.5, 1.0e18 * u.m ** -3), {\"ion\": \"He\"}, 5470657.93, {}, u.UnitsWarning),\n ],\n )\n def test_warns(self, args, kwargs, expected, isclose_kw, _warning):\n \"\"\"Test scenarios that issue warnings\"\"\"\n with pytest.warns(_warning):\n val = Alfven_speed(*args, **kwargs)\n assert isinstance(val, u.Quantity)\n assert val.unit == u.m / u.s\n assert np.isclose(val.value, expected, **isclose_kw)\n\n @pytest.mark.parametrize(\n \"args, kwargs, expected, isclose_kw\",\n [\n (\n (1 * u.T, 1e-8 * u.kg * u.m ** -3),\n {\"ion\": \"p\"},\n 8920620.58 * u.m / u.s,\n {\"rtol\": 1e-6},\n ),\n (\n (1 * u.T, 1e-8 * u.kg * u.m ** -3),\n {},\n 8920620.58 * u.m / u.s,\n {\"rtol\": 1e-6},\n ),\n (\n (0.05 * u.T, 1e18 * u.m ** -3),\n {\"ion\": \"He\"},\n Alfven_speed(0.05 * u.T, 6.64738793e-09 * u.kg * u.m ** -3),\n {},\n ),\n (\n (0.05 * u.T, 1e18 * u.m ** -3),\n {\"ion\": \"He+\"},\n Alfven_speed(0.05 * u.T, 1e18 * u.m ** -3, ion=\"He\"),\n {\"rtol\": 7e-5},\n ),\n (\n (0.05 * u.T, 1e18 * u.m ** -3),\n {\"ion\": \"He\", \"z_mean\": 2},\n Alfven_speed(0.05 * u.T, 1e18 * u.m ** -3, ion=\"He +2\"),\n {\"rtol\": 1.4e-4},\n ),\n (\n (0.05 * u.T, 1e18 * u.m ** -3),\n {\"ion\": Particle(\"He+\")},\n Alfven_speed(0.05 * u.T, 1e18 * u.m ** -3, ion=\"He+\"),\n {},\n ),\n (\n ([0.001, 0.002] * u.T, 5e-10 * u.kg * u.m ** -3),\n {},\n [\n va_(0.001 * u.T, 5e-10 * u.kg * u.m ** -3).value,\n va_(0.002 * u.T, 5e-10 * u.kg * u.m ** -3).value,\n ]\n * (u.m / u.s),\n {},\n ),\n (\n ([0.001, 0.002] * u.T, [5e-10, 2e-10] * u.kg * u.m ** -3),\n {},\n [\n va_(0.001 * u.T, 5e-10 * u.kg * u.m ** -3).value,\n va_(0.002 * u.T, 2e-10 * u.kg * u.m ** -3).value,\n ]\n * (u.m / u.s),\n {},\n ),\n (\n (0.001 * u.T, [1.0e18, 2e18] * u.m ** -3),\n {\"ion\": \"p\"},\n [\n va_(0.001 * u.T, 1e18 * u.m ** -3, ion=\"p\").value,\n va_(0.001 * u.T, 2e18 * u.m ** -3, ion=\"p\").value,\n ]\n * (u.m / u.s),\n {},\n ),\n ],\n )\n def test_values(self, args, kwargs, expected, isclose_kw):\n \"\"\"Test expected values.\"\"\"\n assert np.allclose(Alfven_speed(*args, **kwargs), expected, **isclose_kw)\n\n @pytest.mark.parametrize(\n \"args, kwargs, nan_mask\",\n [\n ((np.nan * u.T, 1 * u.kg * u.m ** -3), {}, []),\n ((0.001 * u.T, np.nan * u.kg * u.m ** -3), {}, []),\n (([np.nan, 0.001] * u.T, 1 * u.kg * u.m ** -3), {}, [True, False]),\n (\n (0.001 * u.T, [np.nan, 1.0, np.nan] * u.kg * u.m ** -3),\n {},\n [True, False, True],\n ),\n (([np.nan, 0.001] * u.T, [1, np.nan] * u.kg * u.m ** -3), {}, [True, True]),\n (\n (0.001 * u.T, [np.nan, 1e18, np.nan] * u.m ** -3),\n {\"ion\": \"Ar+\"},\n [True, False, True],\n ),\n ],\n )\n def test_nan_values(self, args, kwargs, nan_mask):\n \"\"\"Input scenarios that leat to `numpy.nan` values being returned.\"\"\"\n val = Alfven_speed(*args, **kwargs)\n if np.isscalar(val.value):\n assert np.isnan(val)\n else:\n nan_arr = np.isnan(val)\n assert np.all(nan_arr[nan_mask])\n assert np.all(np.logical_not(nan_arr[np.logical_not(nan_mask)]))\n\n def test_handle_nparrays(self):\n \"\"\"Test for ability to handle numpy array quantities\"\"\"\n assert_can_handle_nparray(Alfven_speed)\n\n\nclass Test_Ion_Sound_Speed:\n r\"\"\"Test the ion_sound_speed function in parameters.py.\"\"\"\n\n @pytest.mark.parametrize(\n \"args, kwargs, expected, isclose_kw\",\n [\n (\n (),\n {\n \"T_i\": 1.3232 * u.MK,\n \"T_e\": 1.831 * u.MK,\n \"ion\": \"p\",\n \"gamma_e\": 1,\n \"gamma_i\": 3,\n },\n 218816.06086407552 * (u.m / u.s),\n {},\n ),\n (\n (1.831 * u.MK, 1.3232 * u.MK, \"p\"),\n {},\n 218816.06086407552 * (u.m / u.s),\n {},\n ), # Test that function call without keyword argument works correctly\n (\n (),\n {\n \"T_i\": 1.3232 * u.MK,\n \"T_e\": 1.831 * u.MK,\n \"n_e\": n_e,\n \"k\": k_1,\n \"ion\": \"p\",\n \"gamma_e\": 1,\n \"gamma_i\": 3,\n },\n 218816.06086407552 * (u.m / u.s),\n {},\n ),\n (\n (),\n {\n \"T_i\": 1.3232 * u.MK,\n \"T_e\": 1.831 * u.MK,\n \"n_e\": n_e,\n \"k\": k_2,\n \"ion\": \"p\",\n \"gamma_e\": 1,\n \"gamma_i\": 3,\n },\n 552.3212936293337 * (u.m / u.s),\n {},\n ),\n (\n (),\n {\n \"T_i\": 0.88 * u.MK,\n \"T_e\": 1.28 * u.MK,\n \"n_e\": n_e,\n \"k\": 0 * u.m ** -1,\n \"ion\": \"p\",\n \"gamma_e\": 1.2,\n \"gamma_i\": 3.4,\n },\n 193328.52857788358 * (u.m / u.s),\n {},\n ),\n (\n (),\n {\"T_i\": T_i, \"T_e\": 0 * u.K, \"n_e\": n_e, \"k\": k_1, \"ion\": \"p+\"},\n ion_sound_speed(T_i=T_i, T_e=0 * u.K, n_e=n_e, k=k_1, ion=\"p+\").value\n * (u.m / u.s),\n {},\n ),\n (\n (),\n {\n \"T_e\": 1.2e6 * u.K,\n \"T_i\": 0 * u.K,\n \"n_e\": n_e,\n \"k\": 0 * u.m ** -1,\n \"z_mean\": 0.8,\n \"ion\": \"p\",\n },\n 89018.09 * (u.m / u.s),\n {\"atol\": 0.0, \"rtol\": 1e-6},\n ), # testing for user input z_mean\n ],\n )\n def test_values(self, args, kwargs, expected, isclose_kw):\n assert np.isclose(ion_sound_speed(*args, **kwargs), expected, **isclose_kw)\n\n # case when Z=1 is assumed\n # assert ion_sound_speed(T_i=T_i, T_e=T_e, ion='p+') == ion_sound_speed(T_i=T_i, T_e=T_e,\n # ion='H-1')\n\n @pytest.mark.parametrize(\n \"kwargs1, kwargs2, _warning\",\n [\n ({\"T_i\": T_i, \"T_e\": T_e, \"n_e\": n_e, \"ion\": \"p\"}, {}, PhysicsWarning),\n ({\"T_i\": T_i, \"T_e\": T_e, \"k\": k_1, \"ion\": \"p\"}, {}, PhysicsWarning),\n ({\"T_i\": 5e11 * u.K, \"T_e\": 0 * u.K, \"ion\": \"p\"}, {}, RelativityWarning),\n (\n {\"T_e\": 1.2e6, \"T_i\": 0 * u.K, \"n_e\": n_e, \"k\": k_1, \"ion\": \"p\"},\n {\"T_e\": 1.2e6 * u.K, \"T_i\": 0 * u.K, \"n_e\": n_e, \"k\": k_1, \"ion\": \"p\"},\n u.UnitsWarning,\n ),\n (\n {\"T_i\": 1.3e6, \"T_e\": 0 * u.K, \"n_e\": n_e, \"k\": k_1, \"ion\": \"p\"},\n {\"T_i\": 1.3e6 * u.K, \"T_e\": 0 * u.K, \"n_e\": n_e, \"k\": k_1, \"ion\": \"p\"},\n u.UnitsWarning,\n ),\n ],\n )\n def test_warns(self, kwargs1, kwargs2, _warning):\n with pytest.warns(_warning):\n val = ion_sound_speed(**kwargs1)\n if kwargs2 != {}:\n val == ion_sound_speed(**kwargs2)\n\n @pytest.mark.parametrize(\n \"args, kwargs, _error\",\n [\n (\n (),\n {\n \"T_i\": T_i,\n \"T_e\": T_e,\n \"n_e\": n_e,\n \"k\": k_1,\n \"ion\": \"p\",\n \"gamma_i\": np.inf,\n },\n RelativityError,\n ),\n (\n (),\n {\n \"T_i\": np.array([5, 6, 5]) * u.K,\n \"T_e\": np.array([3, 4]) * u.K,\n \"n_e\": np.array([5, 6, 5]) * u.m ** -3,\n \"k\": np.array([3, 4]) * u.m ** -3,\n \"ion\": \"p\",\n },\n u.UnitTypeError,\n ),\n ((5 * u.T), {\"ion\": \"p\"}, TypeError), # Is this test right??????\n ((), {\"ion\": \"p\"}, TypeError),\n (\n (),\n {\"T_i\": T_i, \"T_e\": 0 * u.K, \"gamma_i\": 0.9999, \"ion\": \"p\"},\n PhysicsError,\n ),\n (\n (),\n {\"T_i\": T_i, \"T_e\": 0 * u.K, \"gamma_e\": 0.9999, \"ion\": \"p\"},\n PhysicsError,\n ),\n (\n (),\n {\"T_i\": T_i, \"T_e\": 0 * u.K, \"gamma_e\": \"sdjklsf\", \"ion\": \"p\"},\n TypeError,\n ),\n (\n (),\n {\"T_i\": T_i, \"T_e\": 0 * u.K, \"gamma_i\": \"fsdfas\", \"ion\": \"p\"},\n TypeError,\n ),\n ((), {\"T_i\": T_i, \"T_e\": 0 * u.K, \"ion\": \"cupcakes\"}, InvalidParticleError),\n ((), {\"T_i\": -np.abs(T_i), \"T_e\": 0 * u.K, \"ion\": \"p\"}, ValueError),\n (\n (),\n {\"T_i\": T_i, \"T_e\": 0 * u.K, \"n_e\": -np.abs(n_e), \"k\": k_1, \"ion\": \"p\"},\n ValueError,\n ),\n (\n (),\n {\"T_i\": T_i, \"T_e\": 0 * u.K, \"n_e\": n_e, \"k\": -np.abs(k_1), \"ion\": \"p\"},\n ValueError,\n ),\n ((), {\"T_i\": 5e19 * u.K, \"T_e\": 0 * u.K, \"ion\": \"p\"}, RelativityError),\n (\n (),\n {\"T_i\": 5 * u.A, \"T_e\": 0 * u.K, \"n_e\": n_e, \"k\": k_1, \"ion\": \"p\"},\n u.UnitTypeError,\n ),\n (\n (),\n {\"T_i\": T_negarr, \"T_e\": 0 * u.K, \"n_e\": n_e, \"k\": k_1, \"ion\": \"p\"},\n ValueError,\n ),\n (\n (),\n {\"T_e\": T_negarr, \"T_i\": 0 * u.K, \"n_e\": n_e, \"k\": k_1, \"ion\": \"p\"},\n ValueError,\n ),\n ],\n )\n def test_raises(self, args, kwargs, _error):\n with pytest.raises(_error):\n ion_sound_speed(*args, **kwargs)\n\n @pytest.mark.parametrize(\n \"kwargs\",\n [\n ({\"T_i\": T_nanarr, \"T_e\": 0 * u.K, \"n_e\": n_e, \"k\": k_1, \"ion\": \"p\"}),\n ({\"T_e\": T_nanarr, \"T_i\": 0 * u.K, \"n_e\": n_e, \"k\": k_1, \"ion\": \"p\"}),\n ],\n )\n def test_nan_values(self, kwargs):\n np.isnan(ion_sound_speed(**kwargs)[1])\n\n def test_handle_nparrays(self):\n assert_can_handle_nparray(ion_sound_speed)\n\n\ndef test_thermal_pressure():\n assert thermal_pressure(T_e, n_i).unit.is_equivalent(u.Pa)\n\n # TODO: may be array issues with arg \"mass\"\n assert_can_handle_nparray(thermal_pressure)\n\n\ndef test_gyrofrequency():\n r\"\"\"Test the gyrofrequency function in parameters.py.\"\"\"\n\n assert gyrofrequency(B, \"e-\").unit.is_equivalent(u.rad / u.s)\n\n assert gyrofrequency(B, \"e-\", to_hz=True).unit.is_equivalent(u.Hz)\n\n assert np.isclose(gyrofrequency(1 * u.T, \"e-\").value, 175882008784.72018)\n\n assert np.isclose(gyrofrequency(2.4 * u.T, \"e-\").value, 422116821083.3284)\n\n assert np.isclose(\n gyrofrequency(1 * u.T, \"e-\", to_hz=True).value, 27992490076.528206\n )\n\n assert np.isclose(\n gyrofrequency(2.4 * u.T, \"e-\", signed=True).value, -422116821083.3284\n )\n\n assert np.isclose(gyrofrequency(1 * u.G, \"e-\").cgs.value, 1.76e7, rtol=1e-3)\n\n with pytest.raises(TypeError):\n with pytest.warns(u.UnitsWarning):\n gyrofrequency(u.m, \"e-\")\n\n with pytest.raises(u.UnitTypeError):\n gyrofrequency(u.m * 1, \"e-\")\n\n assert np.isnan(gyrofrequency(B_nanarr, \"e-\")[-1])\n\n # The following is a test to check that equivalencies from astropy\n # are working.\n omega_ce = gyrofrequency(2.2 * u.T, \"e-\")\n f_ce = (omega_ce / (2 * np.pi)) / u.rad\n f_ce_use_equiv = omega_ce.to(u.Hz, equivalencies=[(u.cy / u.s, u.Hz)])\n assert np.isclose(f_ce.value, f_ce_use_equiv.value)\n\n with pytest.warns(u.UnitsWarning):\n assert gyrofrequency(5.0, \"e-\") == gyrofrequency(5.0 * u.T, \"e-\")\n\n assert gyrofrequency(B, particle=ion).unit.is_equivalent(u.rad / u.s)\n\n assert np.isclose(gyrofrequency(1 * u.T, particle=\"p\").value, 95788335.834874)\n\n assert np.isclose(gyrofrequency(2.4 * u.T, particle=\"p\").value, 229892006.00369796)\n\n assert np.isclose(gyrofrequency(1 * u.G, particle=\"p\").cgs.value, 9.58e3, rtol=2e-3)\n\n assert gyrofrequency(-5 * u.T, \"p\") == gyrofrequency(5 * u.T, \"p\")\n\n # Case when Z=1 is assumed\n # assert gyrofrequency(B, particle='p+') == gyrofrequency(B, particle='H-1')\n\n assert gyrofrequency(B, particle=\"e+\") == gyrofrequency(B, \"e-\")\n\n with pytest.warns(u.UnitsWarning):\n gyrofrequency(8, \"p\")\n\n with pytest.raises(u.UnitTypeError):\n gyrofrequency(5 * u.m, \"p\")\n\n with pytest.raises(InvalidParticleError):\n gyrofrequency(8 * u.T, particle=\"asdfasd\")\n\n with pytest.warns(u.UnitsWarning):\n # TODO this should be WARNS, not RAISES. and it's probably still raised\n assert gyrofrequency(5.0, \"p\") == gyrofrequency(5.0 * u.T, \"p\")\n\n gyrofrequency(1 * u.T, particle=\"p\")\n # testing for user input Z\n testMeth1 = gyrofrequency(1 * u.T, particle=\"p\", Z=0.8).si.value\n testTrue1 = 76630665.79318453\n errStr = f\"gyrofrequency() gave {testMeth1}, should be {testTrue1}.\"\n assert np.isclose(testMeth1, testTrue1, atol=0.0, rtol=1e-5), errStr\n\n assert_can_handle_nparray(gyrofrequency, kwargs={\"signed\": True})\n\n assert_can_handle_nparray(gyrofrequency, kwargs={\"signed\": False})\n\n\ndef test_gyroradius():\n r\"\"\"Test the gyroradius function in parameters.py.\"\"\"\n\n assert gyroradius(B, \"e-\", T=T_e).unit.is_equivalent(u.m)\n\n assert gyroradius(B, \"e-\", Vperp=25 * u.m / u.s).unit.is_equivalent(u.m)\n\n # test for possiblity to allow nan for input values\n assert np.isnan(gyroradius(np.nan * u.T, particle=\"e-\", T=1 * u.K))\n assert np.isnan(gyroradius(1 * u.T, particle=\"e-\", T=np.nan * u.K))\n assert np.isnan(gyroradius(1 * u.T, particle=\"e-\", Vperp=np.nan * u.m / u.s))\n\n Vperp = 1e6 * u.m / u.s\n Bmag = 1 * u.T\n omega_ce = gyrofrequency(Bmag, \"e-\")\n analytical_result = (Vperp / omega_ce).to(\n u.m, equivalencies=u.dimensionless_angles()\n )\n assert gyroradius(Bmag, \"e-\", Vperp=Vperp) == analytical_result\n\n with pytest.raises(TypeError):\n with pytest.warns(u.UnitsWarning):\n gyroradius(u.T, \"e-\")\n\n with pytest.raises(u.UnitTypeError):\n gyroradius(5 * u.A, \"e-\", Vperp=8 * u.m / u.s)\n\n with pytest.raises(u.UnitTypeError):\n gyroradius(5 * u.T, \"e-\", Vperp=8 * u.m)\n\n with pytest.raises(ValueError):\n gyroradius(np.array([5, 6]) * u.T, \"e-\", Vperp=np.array([5, 6, 7]) * u.m / u.s)\n\n assert np.isnan(gyroradius(np.nan * u.T, \"e-\", Vperp=1 * u.m / u.s))\n\n with pytest.raises(ValueError):\n gyroradius(3.14159 * u.T, \"e-\", T=-1 * u.K)\n\n with pytest.warns(u.UnitsWarning):\n assert gyroradius(1.0, \"e-\", Vperp=1.0) == gyroradius(\n 1.0 * u.T, \"e-\", Vperp=1.0 * u.m / u.s\n )\n\n with pytest.warns(u.UnitsWarning):\n assert gyroradius(1.1, \"e-\", T=1.2) == gyroradius(1.1 * u.T, \"e-\", T=1.2 * u.K)\n\n with pytest.raises(ValueError):\n gyroradius(1.1 * u.T, \"e-\", Vperp=1 * u.m / u.s, T=1.2 * u.K)\n\n with pytest.raises(u.UnitTypeError):\n gyroradius(1.1 * u.T, \"e-\", Vperp=1.1 * u.m, T=1.2 * u.K)\n\n # Check for Deprecation warning when using T_i instead of T\n with pytest.warns(PlasmaPyFutureWarning):\n gyroradius(1.1 * u.T, \"e-\", T_i=1.2 * u.K)\n\n assert gyroradius(B, particle=\"p\", T=T_i).unit.is_equivalent(u.m)\n\n assert gyroradius(B, particle=\"p\", Vperp=25 * u.m / u.s).unit.is_equivalent(u.m)\n\n # Case when Z=1 is assumed\n assert np.isclose(\n gyroradius(B, particle=\"p\", T=T_i),\n gyroradius(B, particle=\"H+\", T=T_i),\n atol=1e-6 * u.m,\n )\n\n gyroPos = gyroradius(B, particle=\"p\", Vperp=V)\n gyroNeg = gyroradius(B, particle=\"p\", Vperp=-V)\n assert gyroPos == gyroNeg\n\n Vperp = 1e6 * u.m / u.s\n Bmag = 1 * u.T\n omega_ci = gyrofrequency(Bmag, particle=\"p\")\n analytical_result = (Vperp / omega_ci).to(\n u.m, equivalencies=u.dimensionless_angles()\n )\n assert gyroradius(Bmag, particle=\"p\", Vperp=Vperp) == analytical_result\n\n T2 = 1.2 * u.MK\n B2 = 123 * u.G\n particle2 = \"alpha\"\n Vperp2 = thermal_speed(T2, particle=particle2)\n gyro_by_vperp = gyroradius(B2, particle=\"alpha\", Vperp=Vperp2)\n assert gyro_by_vperp == gyroradius(B2, particle=\"alpha\", T=T2)\n\n explicit_positron_gyro = gyroradius(1 * u.T, particle=\"positron\", T=1 * u.MK)\n assert explicit_positron_gyro == gyroradius(1 * u.T, \"e-\", T=1 * u.MK)\n\n with pytest.raises(TypeError):\n with pytest.warns(u.UnitsWarning):\n gyroradius(u.T, particle=\"p\", Vperp=8 * u.m / u.s)\n\n with pytest.raises(ValueError):\n gyroradius(B, particle=\"p\", T=-1 * u.K)\n\n with pytest.warns(u.UnitsWarning):\n gyro_without_units = gyroradius(1.0, particle=\"p\", Vperp=1.0)\n gyro_with_units = gyroradius(1.0 * u.T, particle=\"p\", Vperp=1.0 * u.m / u.s)\n assert gyro_without_units == gyro_with_units\n\n with pytest.warns(u.UnitsWarning):\n gyro_t_without_units = gyroradius(1.1, particle=\"p\", T=1.2)\n gyro_t_with_units = gyroradius(1.1 * u.T, particle=\"p\", T=1.2 * u.K)\n assert gyro_t_with_units == gyro_t_without_units\n\n with pytest.raises(ValueError):\n gyroradius(1.1 * u.T, particle=\"p\", Vperp=1 * u.m / u.s, T=1.2 * u.K)\n\n with pytest.raises(u.UnitTypeError):\n gyroradius(1.1 * u.T, particle=\"p\", Vperp=1.1 * u.m, T=1.2 * u.K)\n\n with pytest.raises(u.UnitTypeError):\n gyroradius(1.1 * u.T, particle=\"p\", Vperp=1.2 * u.m, T=1.1 * u.K)\n\n\nclass Test_gyroradius:\n\n # some custom numpy array tests here, because of the T / Vperp situation\n def test_handle_numpy_array(self):\n # Tests to verify that can handle Quantities with numpy array as the value:\n assert gyroradius(B_arr, \"e-\", Vperp=V_arr)[0] == gyroradius(\n B_arr[0], \"e-\", Vperp=V_arr[0]\n )\n assert gyroradius(B_arr, \"e-\", T=T_arr)[0] == gyroradius(\n B_arr[0], \"e-\", T=T_arr[0]\n )\n\n def test_handle_mixed_Qarrays(self):\n # If both Vperp or T are input as Qarrays, but only one of the two is valid\n # at each element, then that's fine, the function should work:\n assert gyroradius(B_arr, \"e-\", Vperp=V_nanarr, T=T_nanarr2)[0] == gyroradius(\n B_arr[0], \"e-\", Vperp=V_nanarr[0], T=T_nanarr2[0]\n )\n\n def test_raise_two_valid_inputs(self):\n # If both Vperp or T are nan-less, Qarrays or not, should raise ValueError:\n with pytest.raises(ValueError):\n gyroradius(B_arr, \"e-\", Vperp=V, T=T_arr)\n with pytest.raises(ValueError):\n gyroradius(B_arr, \"e-\", Vperp=V_arr, T=T_i)\n\n def test_all_valid_and_one_valid(self):\n # If one of (Vperp, T) is a valid and one is Qarray with at least one valid, ValueError:\n with pytest.raises(ValueError):\n gyroradius(B_arr, \"e-\", Vperp=V, T=T_nanarr)\n with pytest.raises(ValueError):\n gyroradius(B_arr, \"e-\", Vperp=V_nanarr, T=T_i)\n\n def test_scalar_and_nan_qarray(self):\n # If either Vperp or T is a valid scalar and the other is a Qarray of all nans,\n # should do something valid and not raise a ValueError\n assert np.all(np.isfinite(gyroradius(B_arr, \"e-\", Vperp=V, T=T_allnanarr)))\n assert np.all(np.isfinite(gyroradius(B_arr, \"e-\", Vperp=V_allnanarr, T=T_i)))\n\n def test_keeps_arguments_unchanged(self):\n Vperp1 = u.Quantity([np.nan, 1], unit=u.m / u.s)\n Vperp2 = u.Quantity([np.nan, 1], unit=u.m / u.s) # an exact copy\n T_i = u.Quantity([1, np.nan], unit=u.K)\n\n gyroradius(B_arr, \"e-\", Vperp=Vperp1, T=T_i)\n assert_quantity_allclose(Vperp1, Vperp2)\n\n\ndef test_plasma_frequency():\n r\"\"\"Test the plasma_frequency function in parameters.py.\"\"\"\n\n assert plasma_frequency(n_e, \"e-\").unit.is_equivalent(u.rad / u.s)\n\n assert plasma_frequency(n_e, \"e-\", to_hz=True).unit.is_equivalent(u.Hz)\n\n assert np.isclose(plasma_frequency(1 * u.cm ** -3, \"e-\").value, 5.64e4, rtol=1e-2)\n\n assert np.isclose(\n plasma_frequency(1 * u.cm ** -3, particle=\"N\").value, 3.53e2, rtol=1e-1\n )\n\n assert np.isclose(\n plasma_frequency(1 * u.cm ** -3, particle=\"N\", to_hz=True).value,\n 56.19000195094519,\n )\n\n with pytest.raises(TypeError):\n with pytest.warns(u.UnitsWarning):\n plasma_frequency(u.m ** -3, \"e-\")\n\n with pytest.raises(u.UnitTypeError):\n plasma_frequency(5 * u.m ** -2, \"e-\")\n\n assert np.isnan(plasma_frequency(np.nan * u.m ** -3, \"e-\"))\n\n with pytest.warns(u.UnitsWarning):\n assert plasma_frequency(1e19, \"e-\") == plasma_frequency(1e19 * u.m ** -3, \"e-\")\n\n assert plasma_frequency(n_i, particle=\"p\").unit.is_equivalent(u.rad / u.s)\n\n # Case where Z=1 is assumed\n assert plasma_frequency(n_i, particle=\"H-1+\") == plasma_frequency(n_i, particle=\"p\")\n\n assert np.isclose(\n plasma_frequency(mu * u.cm ** -3, particle=\"p\").value, 1.32e3, rtol=1e-2\n )\n\n with pytest.raises(ValueError):\n plasma_frequency(n=5 * u.m ** -3, particle=\"sdfas\")\n\n with pytest.warns(u.UnitsWarning):\n plasma_freq_no_units = plasma_frequency(1e19, particle=\"p\")\n assert plasma_freq_no_units == plasma_frequency(1e19 * u.m ** -3, particle=\"p\")\n\n plasma_frequency(1e17 * u.cm ** -3, particle=\"p\")\n # testing for user input z_mean\n testMeth1 = plasma_frequency(1e17 * u.cm ** -3, particle=\"p\", z_mean=0.8).si.value\n testTrue1 = 333063562455.4028\n errStr = f\"plasma_frequency() gave {testMeth1}, should be {testTrue1}.\"\n assert np.isclose(testMeth1, testTrue1, atol=0.0, rtol=1e-6), errStr\n\n assert_can_handle_nparray(plasma_frequency)\n\n\ndef test_Debye_length():\n r\"\"\"Test the Debye_length function in parameters.py.\"\"\"\n\n assert Debye_length(T_e, n_e).unit.is_equivalent(u.m)\n\n assert np.isclose(Debye_length(1 * u.eV, 1 * u.cm ** -3).value, 7.43, atol=0.005)\n\n with pytest.warns(u.UnitsWarning):\n Debye_length(5, 5 * u.m ** -3)\n\n with pytest.raises(u.UnitTypeError):\n Debye_length(56 * u.kg, 5 * u.m ** -3)\n\n with pytest.raises(ValueError):\n Debye_length(5 * u.eV, -5 * u.m ** -3)\n\n with pytest.raises(ValueError):\n Debye_length(-45 * u.K, 5 * u.m ** -3)\n\n Tarr2 = np.array([1, 2]) * u.K\n narr3 = np.array([1, 2, 3]) * u.m ** -3\n with pytest.raises(ValueError):\n Debye_length(Tarr2, narr3)\n\n with pytest.warns(u.UnitsWarning):\n assert Debye_length(2.0, 2.0) == Debye_length(2.0 * u.K, 2.0 * u.m ** -3)\n\n with pytest.warns(u.UnitsWarning):\n assert Debye_length(2.0 * u.K, 2.0) == Debye_length(2.0, 2.0 * u.m ** -3)\n\n assert_can_handle_nparray(Debye_length)\n\n\ndef test_Debye_number():\n r\"\"\"Test the Debye_number function in parameters.py.\"\"\"\n\n assert Debye_number(T_e, n_e).unit.is_equivalent(u.dimensionless_unscaled)\n\n T_e_eV = T_e.to(u.eV, equivalencies=u.temperature_energy())\n assert np.isclose(Debye_number(T_e, n_e).value, Debye_number(T_e_eV, n_e).value)\n\n assert np.isclose(Debye_number(1 * u.eV, 1 * u.cm ** -3).value, 1720862385.43342)\n\n with pytest.warns(u.UnitsWarning):\n Debye_number(T_e, 4)\n\n with pytest.raises(ValueError):\n Debye_number(None, n_e)\n\n with pytest.raises(u.UnitTypeError):\n Debye_number(5 * u.m, 5 * u.m ** -3)\n\n with pytest.raises(u.UnitTypeError):\n Debye_number(5 * u.K, 5 * u.m ** 3)\n\n with pytest.raises(ValueError):\n Debye_number(5j * u.K, 5 * u.cm ** -3)\n\n Tarr2 = np.array([1, 2]) * u.K\n narr3 = np.array([1, 2, 3]) * u.m ** -3\n with pytest.raises(ValueError):\n Debye_number(Tarr2, narr3)\n\n with pytest.warns(u.UnitsWarning):\n assert Debye_number(1.1, 1.1) == Debye_number(1.1 * u.K, 1.1 * u.m ** -3)\n\n with pytest.warns(u.UnitsWarning):\n assert Debye_number(1.1 * u.K, 1.1) == Debye_number(1.1, 1.1 * u.m ** -3)\n\n assert_can_handle_nparray(Debye_number)\n\n\ndef test_inertial_length():\n r\"\"\"Test the inertial_length function in parameters.py.\"\"\"\n\n assert inertial_length(n_i, particle=\"p\").unit.is_equivalent(u.m)\n\n assert np.isclose(\n inertial_length(mu * u.cm ** -3, particle=\"p\").cgs.value, 2.28e7, rtol=0.01\n )\n\n inertial_length_electron_plus = inertial_length(5.351 * u.m ** -3, particle=\"e+\")\n assert inertial_length_electron_plus == inertial_length(\n 5.351 * u.m ** -3, particle=\"e\"\n )\n\n assert inertial_length(n_i, particle=\"p\") == inertial_length(n_i, particle=\"p\")\n\n with pytest.warns(u.UnitsWarning):\n inertial_length(4, particle=\"p\")\n\n with pytest.raises(u.UnitTypeError):\n inertial_length(4 * u.m ** -2, particle=\"p\")\n\n with pytest.raises(ValueError):\n inertial_length(-5 * u.m ** -3, particle=\"p\")\n\n with pytest.raises(InvalidParticleError):\n inertial_length(n_i, particle=-135)\n\n with pytest.warns(u.UnitsWarning):\n inertial_length_no_units = inertial_length(1e19, particle=\"p\")\n assert inertial_length_no_units == inertial_length(\n 1e19 * u.m ** -3, particle=\"p\"\n )\n\n assert inertial_length(n_e, \"e-\").unit.is_equivalent(u.m)\n\n assert np.isclose(\n inertial_length(1 * u.cm ** -3, \"e-\").cgs.value, 5.31e5, rtol=1e-3\n )\n\n with pytest.warns(u.UnitsWarning):\n inertial_length(5, \"e-\")\n\n with pytest.raises(u.UnitTypeError):\n inertial_length(5 * u.m, \"e-\")\n\n with pytest.raises(ValueError):\n inertial_length(-5 * u.m ** -3, \"e-\")\n\n with pytest.warns(u.UnitsWarning):\n assert inertial_length(1e19, \"e-\") == inertial_length(1e19 * u.m ** -3, \"e-\")\n\n assert_can_handle_nparray(inertial_length)\n\n\ndef test_magnetic_pressure():\n r\"\"\"Test the magnetic_pressure function in parameters.py.\"\"\"\n\n assert magnetic_pressure(B_arr).unit.is_equivalent(u.Pa)\n\n assert magnetic_pressure(B).unit.is_equivalent(u.Pa)\n\n assert magnetic_pressure(B).unit.name == \"Pa\"\n\n assert magnetic_pressure(B).value == magnetic_energy_density(B).value\n\n assert magnetic_pressure(B) == magnetic_energy_density(B.to(u.G))\n\n assert np.isclose(magnetic_pressure(B).value, 397887.35772973835)\n\n with pytest.warns(u.UnitsWarning):\n magnetic_pressure(5)\n\n with pytest.raises(u.UnitTypeError):\n magnetic_pressure(5 * u.m)\n\n assert np.isnan(magnetic_pressure(np.nan * u.T))\n\n with pytest.raises(ValueError):\n magnetic_pressure(5j * u.T)\n\n assert np.isnan(magnetic_pressure(B_nanarr)[-1])\n\n with pytest.warns(u.UnitsWarning):\n assert magnetic_pressure(22.2) == magnetic_pressure(22.2 * u.T)\n\n assert_can_handle_nparray(magnetic_pressure)\n\n\ndef test_magnetic_energy_density():\n r\"\"\"Test the magnetic_energy_density function in parameters.py.\"\"\"\n\n assert magnetic_energy_density(B_arr).unit.is_equivalent(u.J / u.m ** 3)\n\n assert magnetic_energy_density(B).unit.is_equivalent(\"J / m3\")\n\n assert magnetic_energy_density(B).value == magnetic_pressure(B).value\n\n assert_quantity_allclose(\n magnetic_energy_density(2 * B), 4 * magnetic_energy_density(B)\n )\n\n assert_quantity_allclose(magnetic_energy_density(B).value, 397887.35772973835)\n\n assert_quantity_allclose(\n magnetic_energy_density(B), magnetic_energy_density(B.to(u.G))\n )\n\n assert isinstance(magnetic_energy_density(B_arr), u.Quantity)\n\n with pytest.warns(u.UnitsWarning):\n magnetic_energy_density(5)\n\n with pytest.raises(u.UnitTypeError):\n magnetic_energy_density(5 * u.m)\n\n assert np.isnan(magnetic_energy_density(np.nan * u.T))\n\n with pytest.raises(ValueError):\n magnetic_energy_density(5j * u.T)\n\n assert np.isnan(magnetic_energy_density(B_nanarr)[-1])\n\n with pytest.warns(u.UnitsWarning):\n assert magnetic_energy_density(22.2) == magnetic_energy_density(22.2 * u.T)\n\n assert_can_handle_nparray(magnetic_energy_density)\n\n\ndef test_upper_hybrid_frequency():\n r\"\"\"Test the upper_hybrid_frequency function in parameters.py.\"\"\"\n\n omega_uh = upper_hybrid_frequency(B, n_e=n_e)\n omega_uh_hz = upper_hybrid_frequency(B, n_e=n_e, to_hz=True)\n omega_ce = gyrofrequency(B, \"e-\")\n omega_pe = plasma_frequency(n=n_e, particle=\"e-\")\n assert omega_ce.unit.is_equivalent(u.rad / u.s)\n assert omega_pe.unit.is_equivalent(u.rad / u.s)\n assert omega_uh.unit.is_equivalent(u.rad / u.s)\n assert omega_uh_hz.unit.is_equivalent(u.Hz)\n left_hand_side = omega_uh ** 2\n right_hand_side = omega_ce ** 2 + omega_pe ** 2\n assert np.isclose(left_hand_side.value, right_hand_side.value)\n\n assert np.isclose(omega_uh_hz.value, 69385868857.90918)\n\n with pytest.raises(ValueError):\n upper_hybrid_frequency(5 * u.T, n_e=-1 * u.m ** -3)\n\n with pytest.warns(u.UnitsWarning):\n assert upper_hybrid_frequency(1.2, 1.3) == upper_hybrid_frequency(\n 1.2 * u.T, 1.3 * u.m ** -3\n )\n\n with pytest.warns(u.UnitsWarning):\n assert upper_hybrid_frequency(1.4 * u.T, 1.3) == upper_hybrid_frequency(\n 1.4, 1.3 * u.m ** -3\n )\n\n assert_can_handle_nparray(upper_hybrid_frequency)\n\n\ndef test_lower_hybrid_frequency():\n r\"\"\"Test the lower_hybrid_frequency function in parameters.py.\"\"\"\n\n ion = \"He-4 1+\"\n omega_ci = gyrofrequency(B, particle=ion)\n omega_pi = plasma_frequency(n=n_i, particle=ion)\n omega_ce = gyrofrequency(B, \"e-\")\n omega_lh = lower_hybrid_frequency(B, n_i=n_i, ion=ion)\n omega_lh_hz = lower_hybrid_frequency(B, n_i=n_i, ion=ion, to_hz=True)\n assert omega_ci.unit.is_equivalent(u.rad / u.s)\n assert omega_pi.unit.is_equivalent(u.rad / u.s)\n assert omega_ce.unit.is_equivalent(u.rad / u.s)\n assert omega_lh.unit.is_equivalent(u.rad / u.s)\n left_hand_side = omega_lh ** -2\n right_hand_side = (\n 1 / (omega_ci ** 2 + omega_pi ** 2) + omega_ci ** -1 * omega_ce ** -1\n )\n assert np.isclose(left_hand_side.value, right_hand_side.value)\n\n assert np.isclose(omega_lh_hz.value, 299878691.3223296)\n\n with pytest.raises(ValueError):\n lower_hybrid_frequency(0.2 * u.T, n_i=5e19 * u.m ** -3, ion=\"asdfasd\")\n\n with pytest.raises(ValueError):\n lower_hybrid_frequency(0.2 * u.T, n_i=-5e19 * u.m ** -3, ion=\"asdfasd\")\n\n with pytest.raises(ValueError):\n lower_hybrid_frequency(np.nan * u.T, n_i=-5e19 * u.m ** -3, ion=\"asdfasd\")\n\n with pytest.warns(u.UnitsWarning):\n assert lower_hybrid_frequency(1.3, 1e19, \"p+\") == lower_hybrid_frequency(\n 1.3 * u.T, 1e19 * u.m ** -3, \"p+\"\n )\n assert_can_handle_nparray(lower_hybrid_frequency)\n\n\ndef test_Bohm_diffusion():\n r\"\"\"Test Mag_Reynolds in dimensionless.py\"\"\"\n\n T_e = 5000 * u.K\n B = 10 * u.T\n\n assert (Bohm_diffusion(T_e, B)).unit == u.m ** 2 / u.s\n\n with pytest.warns(u.UnitsWarning):\n Bohm_diffusion(5000, B)\n\n with pytest.raises(u.UnitTypeError):\n Bohm_diffusion(2.2 * u.kg, B)\n\n\[email protected](\n \"alias, parent\",\n [\n (rho_, mass_density),\n (va_, Alfven_speed),\n (cs_, ion_sound_speed),\n (pth_, thermal_pressure),\n (betaH_, Hall_parameter),\n (oc_, gyrofrequency),\n (wc_, gyrofrequency),\n (rc_, gyroradius),\n (rhoc_, gyroradius),\n (wp_, plasma_frequency),\n (lambdaD_, Debye_length),\n (nD_, Debye_number),\n (cwp_, inertial_length),\n (pmag_, magnetic_pressure),\n (ub_, magnetic_energy_density),\n (wuh_, upper_hybrid_frequency),\n (wlh_, lower_hybrid_frequency),\n (DB_, Bohm_diffusion),\n ],\n)\ndef test_parameters_aliases(alias, parent):\n \"\"\"Test all aliases defined in parameters.py\"\"\"\n assert alias is parent\n" ]
[ [ "numpy.logical_not", "numpy.abs", "numpy.isnan", "numpy.all", "numpy.isscalar", "numpy.array", "numpy.isclose" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
seo-95/elvis
[ "a89c759acdf6ce64c7e6863aeb68dc0ba3293fed" ]
[ "elvis/modeling/meta_arch/vl_pretrainer.py" ]
[ "import copy\nimport os\nimport pdb\nimport random\nfrom typing import Dict, List, Text, TypeVar\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom elvis.modeling.models import build_net\nfrom elvis.modeling.models.layers import FC, MLP\nfrom elvis.utils.vlp_objectives import optimal_transport_dist\n\nfrom .base import MetaArch\nfrom .build import ARCH_REGISTRY\n\nTensor = TypeVar('torch.tensor')\n\n\n__all__ = ['AlignmentVLP',\n 'build_align_vlp']\n\n\nclass AlignmentVLP(MetaArch):\n \"\"\"Meta architecture for Visual Language Pretraining (VLP) based on image-caption alignment\n \"\"\"\n def __init__(self, model, max_visual, max_tokens, tasks_dict) -> None:\n super().__init__()\n self.model = model\n self.max_visual = max_visual\n self.max_tokens = max_tokens+2 #take into account [CLS] and [SEP]\n self.tasks_dict = tasks_dict\n\n self.lm_mlp = MLP(in_features=self.model.embed_dim,\n hidden_dim=self.model.embed_dim,\n out_features=len(self.model.tokenizer)-1,\n dropout_p=.1)\n self.itm_fc = FC(in_features=self.model.embed_dim, out_features=2)\n \n def forward(self, vis_in, txt_in, vis_mask, txt_mask, **kwargs) -> Dict:\n cntx_emb = self.model(vis_in=vis_in, vis_mask=vis_mask, txt_in=txt_in, txt_mask=txt_mask)\n txt_emb = cntx_emb[:, :self.max_tokens]\n\n itm_logits = self.itm_fc(txt_emb[:, 0, :]) #pass everything but use only [CLS]: better parallelization of loss computation\n lm_logits = self.lm_mlp(txt_emb[:, 1:, :])\n\n #? exclude special tokens from ot computation\n vis_mask = torch.cat(\n (torch.ones((vis_mask.shape[0], 1), device=vis_mask.device), vis_mask),\n dim=-1) #add attention for [IMG]\n ot_dist = optimal_transport_dist(txt_emb=cntx_emb[:, :self.max_tokens, :].float(),\n img_emb=cntx_emb[:, self.max_tokens:, :].float(),\n txt_pad=~txt_mask.bool(),\n img_pad=~vis_mask.bool()\n )\n\n return {'lm_logits': lm_logits, 'itm_logits': itm_logits, 'ot_dist': ot_dist}\n\n def compute_loss(self, lm_logits, itm_logits, lm_targets, itm_targets, **kwargs) -> Dict:\n B = lm_logits.shape[0]\n n_mlm = sum([t == 'MLM' for t in kwargs['tasks']])\n n_itm = len(kwargs['tasks']) - n_mlm\n loss_dict = {}\n\n #compute lm loss (compute it also if n_mlm > 0 otherwise the DDP will raise an exception)\n lm_loss = F.cross_entropy(lm_logits.transpose(1, 2), lm_targets[:, 1:], reduction='sum')\n if n_mlm > 0:\n lm_loss /= n_mlm\n loss_dict['lm_loss'] = lm_loss\n\n #compute itm loss (compute it also if n_itm > 0 otherwise the DDP will raise an exception)\n itm_loss = F.cross_entropy(itm_logits, itm_targets[:, 0], reduction='sum')\n ot_pos = kwargs['ot_dist'].masked_select(itm_targets[:, 0] == 1)\n ot_neg = kwargs['ot_dist'].masked_select(itm_targets[:, 0] == 0)\n #we want to maximize the OT distance for negative pairs and minimize OT distance for positive ones\n ot_loss = ot_pos.sum() - ot_neg.sum()\n itm_loss = (itm_loss + 0.1 * ot_loss)\n if n_itm > 0:\n itm_loss /= n_itm\n loss_dict['itm_loss'] = itm_loss\n\n loss_dict['loss'] = sum(loss_dict.values())\n return loss_dict\n\n def save_on_disk(self, path):\n state_dict = copy.deepcopy(self).cpu().state_dict()\n ckp_file = os.path.join(path, 'state_dict.pt')\n torch.save(state_dict, ckp_file)\n\n\n\n@ARCH_REGISTRY.register()\ndef build_align_vlp(cfg):\n model, data_interface = build_net(cfg.MODEL, get_interface='vlp')\n vlp = AlignmentVLP(model,\n max_visual=cfg.MODEL.MAX_N_VISUAL,\n max_tokens=cfg.MODEL.MAX_N_TOKENS,\n tasks_dict=cfg.MODEL.TASKS.get_as_dict())\n return vlp, data_interface\n\n" ]
[ [ "torch.nn.functional.cross_entropy", "torch.ones", "torch.save" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
keisuke-umezawa/backlight
[ "db49a966fdb38de693bb8157cec88d98620f9946" ]
[ "tests/portfolio/test_portfolio.py" ]
[ "import pytest\nimport pandas as pd\nimport numpy as np\n\nimport backlight\nfrom backlight.portfolio.portfolio import create_portfolio as module\nfrom backlight.portfolio.portfolio import _fusion_positions\nimport backlight.positions.positions\nfrom backlight.trades.trades import make_trades\nfrom backlight.asset.currency import Currency\n\n\[email protected]\ndef trades():\n trades = []\n index = [\n \"2018-06-06 00:00:00\",\n \"2018-06-06 00:01:00\",\n \"2018-06-06 00:02:00\",\n \"2018-06-06 00:03:00\",\n \"2018-06-06 00:03:00\",\n \"2018-06-06 00:04:00 \",\n \"2018-06-06 00:05:00\",\n \"2018-06-06 00:05:00\",\n \"2018-06-06 00:06:00 \",\n \"2018-06-06 00:06:00 \",\n \"2018-06-06 00:07:00 \",\n \"2018-06-06 00:08:00 \",\n \"2018-06-06 00:09:00 \",\n \"2018-06-06 00:09:00 \",\n ]\n\n trade = pd.Series(\n index=pd.to_datetime(index),\n data=[1, -1, -1, 1, 1, 1, -1, -1, -1, -1, 1, 1, 1, -1],\n name=\"amount\",\n )\n ids = [0, 1, 0, 1, 2, 3, 2, 4, 3, 5, 4, 5, 6, 6]\n currency_unit = Currency.JPY\n\n trades.append(make_trades(\"USDJPY\", [trade], currency_unit, [ids]))\n trades.append(make_trades(\"EURJPY\", [trade], currency_unit, [ids]))\n trades.append(make_trades(\"USDJPY\", [trade], currency_unit, [ids]))\n return trades\n\n\[email protected]\ndef markets():\n markets = []\n symbol = \"USDJPY\"\n currency_unit = Currency.JPY\n quote_currency = Currency.USD\n periods = 13\n df = pd.DataFrame(\n index=pd.date_range(start=\"2018-06-05 23:57:00\", freq=\"1min\", periods=periods),\n data=np.repeat(2, periods)[:, None],\n columns=[\"mid\"],\n )\n markets.append(\n backlight.datasource.from_dataframe(\n df, symbol, currency_unit, quote_currency=quote_currency\n )\n )\n\n symbol = \"EURJPY\"\n currency_unit = Currency.JPY\n quote_currency = Currency.EUR\n df = pd.DataFrame(\n index=pd.date_range(start=\"2018-06-05 23:57:00\", freq=\"1min\", periods=periods),\n data=np.repeat(4, periods)[:, None],\n columns=[\"mid\"],\n )\n markets.append(\n backlight.datasource.from_dataframe(\n df, symbol, currency_unit, quote_currency=quote_currency\n )\n )\n return markets\n\n\[email protected]\ndef principal():\n return {\"USDJPY\": 10, \"EURJPY\": 10}\n\n\[email protected]\ndef lot_size():\n return {\"USDJPY\": 2, \"EURJPY\": 2}\n\n\ndef test_create_portfolio(trades, markets, principal, lot_size):\n portfolio = module(trades, markets, principal, lot_size, Currency.USD)\n\n index = [\n \"2018-06-05 23:59:00\",\n \"2018-06-06 00:00:00\",\n \"2018-06-06 00:01:00\",\n \"2018-06-06 00:02:00\",\n \"2018-06-06 00:03:00\",\n \"2018-06-06 00:04:00 \",\n \"2018-06-06 00:05:00\",\n \"2018-06-06 00:06:00 \",\n \"2018-06-06 00:07:00 \",\n \"2018-06-06 00:08:00 \",\n \"2018-06-06 00:09:00 \",\n ]\n\n data1 = [\n [0.0, 0.0, 5.0],\n [2.0, 2.0, 1.0],\n [0.0, 2.0, 5.0],\n [-2.0, 2.0, 9.0],\n [2.0, 2.0, 1.0],\n [4.0, 2.0, -3.0],\n [0.0, 2.0, 5.0],\n [-4.0, 2.0, 13.0],\n [-2.0, 2.0, 9.0],\n [0.0, 2.0, 5.0],\n [0.0, 2.0, 5.0],\n ]\n\n data2 = [\n [0.0, 0.0, 10.0],\n [4.0, 2.0, 6.0],\n [0.0, 2.0, 10.0],\n [-4.0, 2.0, 14.0],\n [4.0, 2.0, 6.0],\n [8.0, 2.0, 2.0],\n [0.0, 2.0, 10.0],\n [-8.0, 2.0, 18.0],\n [-4.0, 2.0, 14.0],\n [0.0, 2.0, 10.0],\n [0.0, 2.0, 10.0],\n ]\n\n data = [data1, data2]\n\n for (position, d) in zip(portfolio._positions, data):\n\n expected = pd.DataFrame(\n index=pd.to_datetime(index),\n data=d,\n columns=[\"amount\", \"price\", \"principal\"],\n )\n assert ((expected == position).all()).all()\n\n\ndef test_fusion_positions():\n periods = 3\n data = np.arange(periods * 3).reshape((periods, 3))\n columns = [\"amount\", \"price\", \"principal\"]\n currency_unit = Currency.JPY\n\n positions_list = []\n df = pd.DataFrame(\n data=data,\n index=pd.date_range(\"2012-1-1\", periods=periods, freq=\"D\"),\n columns=columns,\n )\n symbol = \"USDJPY\"\n positions_list.append(\n backlight.positions.positions.from_dataframe(df, symbol, currency_unit)\n )\n\n df = pd.DataFrame(\n data=data,\n index=pd.date_range(\"2012-1-2\", periods=periods, freq=\"D\"),\n columns=columns,\n )\n symbol = \"USDJPY\"\n positions_list.append(\n backlight.positions.positions.from_dataframe(df, symbol, currency_unit)\n )\n\n df = pd.DataFrame(\n data=data,\n index=pd.date_range(\"2012-1-4\", periods=periods, freq=\"D\"),\n columns=columns,\n )\n symbol = \"EURJPY\"\n positions_list.append(\n backlight.positions.positions.from_dataframe(df, symbol, currency_unit)\n )\n\n fusioned = _fusion_positions(positions_list)\n\n data1 = np.arange(periods * 3).reshape((periods, 3))\n data2 = [[0, 1, 2], [3, 5, 7], [9, 11, 13], [6, 7, 8]]\n\n df1 = pd.DataFrame(\n data=data1,\n index=pd.date_range(\"2012-1-1\", periods=periods, freq=\"D\"),\n columns=columns,\n )\n df2 = pd.DataFrame(\n data=data2,\n index=pd.date_range(\"2012-1-1\", periods=periods + 1, freq=\"D\"),\n columns=columns,\n )\n\n expected = [df1, df2]\n\n for exp, fus in zip(expected, fusioned):\n assert exp.all().all() == fus.all().all()\n" ]
[ [ "numpy.arange", "numpy.repeat", "pandas.to_datetime", "pandas.date_range" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
twice154/Spatial-Self-modulation-on-BigGAN
[ "6ca691231bf7e8fd388a08b5ce6b4e30a50dd57b" ]
[ "BigGAN-PyTorch/BigGAN_remove_condbn+++++.py" ]
[ "import numpy as np\nimport math\nimport functools\n\nimport torch\nimport torch.nn as nn\nfrom torch.nn import init\nimport torch.optim as optim\nimport torch.nn.functional as F\nfrom torch.nn import Parameter as P\n\nimport layers\nfrom sync_batchnorm import SynchronizedBatchNorm2d as SyncBatchNorm2d\n\n\n# Architectures for G\n# Attention is passed in in the format '32_64' to mean applying an attention\n# block at both resolution 32x32 and 64x64. Just '64' will apply at 64x64.\ndef G_arch(ch=64, attention='64', ksize='333333', dilation='111111'):\n arch = {}\n arch[512] = {'in_channels' : [ch * item for item in [16, 16, 8, 8, 4, 2, 1]],\n 'out_channels' : [ch * item for item in [16, 8, 8, 4, 2, 1, 1]],\n 'upsample' : [True] * 7,\n 'resolution' : [8, 16, 32, 64, 128, 256, 512],\n 'attention' : {2**i: (2**i in [int(item) for item in attention.split('_')])\n for i in range(3,10)}}\n arch[256] = {'in_channels' : [ch * item for item in [16, 16, 8, 8, 4, 2]],\n 'out_channels' : [ch * item for item in [16, 8, 8, 4, 2, 1]],\n 'upsample' : [True] * 6,\n 'resolution' : [8, 16, 32, 64, 128, 256],\n 'attention' : {2**i: (2**i in [int(item) for item in attention.split('_')])\n for i in range(3,9)}}\n arch[128] = {'in_channels' : [ch * item for item in [16, 16, 8, 4, 2]],\n 'out_channels' : [ch * item for item in [16, 8, 4, 2, 1]],\n 'upsample' : [True] * 5,\n 'resolution' : [8, 16, 32, 64, 128],\n 'attention' : {2**i: (2**i in [int(item) for item in attention.split('_')])\n for i in range(3,8)}}\n arch[64] = {'in_channels' : [ch * item for item in [16, 16, 8, 4]],\n 'out_channels' : [ch * item for item in [16, 8, 4, 2]],\n 'upsample' : [True] * 4,\n 'resolution' : [8, 16, 32, 64],\n 'attention' : {2**i: (2**i in [int(item) for item in attention.split('_')])\n for i in range(3,7)}}\n arch[32] = {'in_channels' : [ch * item for item in [4, 4, 4]],\n 'out_channels' : [ch * item for item in [4, 4, 4]],\n 'upsample' : [True] * 3,\n 'resolution' : [8, 16, 32],\n 'attention' : {2**i: (2**i in [int(item) for item in attention.split('_')])\n for i in range(3,6)}}\n\n return arch\n\nclass Generator(nn.Module):\n def __init__(self, G_ch=64, dim_z=128, bottom_width=4, resolution=128,\n G_kernel_size=3, G_attn='64', n_classes=1000,\n num_G_SVs=1, num_G_SV_itrs=1,\n G_shared=True, shared_dim=0, hier=False,\n cross_replica=False, mybn=False,\n G_activation=nn.ReLU(inplace=False),\n G_lr=5e-5, G_B1=0.0, G_B2=0.999, adam_eps=1e-8,\n BN_eps=1e-5, SN_eps=1e-12, G_mixed_precision=False, G_fp16=False,\n G_init='ortho', skip_init=False, no_optim=False,\n G_param='SN', norm_style='bn',\n **kwargs):\n super(Generator, self).__init__()\n # Channel width mulitplier\n self.ch = G_ch\n # Dimensionality of the latent space\n self.dim_z = dim_z\n # The initial spatial dimensions\n self.bottom_width = bottom_width\n # Resolution of the output\n self.resolution = resolution\n # Kernel size?\n self.kernel_size = G_kernel_size\n # Attention?\n self.attention = G_attn\n # number of classes, for use in categorical conditional generation\n self.n_classes = n_classes\n # Use shared embeddings?\n self.G_shared = G_shared\n # Dimensionality of the shared embedding? Unused if not using G_shared\n self.shared_dim = shared_dim if shared_dim > 0 else dim_z\n # Hierarchical latent space?\n self.hier = hier\n # Cross replica batchnorm?\n self.cross_replica = cross_replica\n # Use my batchnorm?\n self.mybn = mybn\n # nonlinearity for residual blocks\n self.activation = G_activation\n # Initialization style\n self.init = G_init\n # Parameterization style\n self.G_param = G_param\n # Normalization style\n self.norm_style = norm_style\n # Epsilon for BatchNorm?\n self.BN_eps = BN_eps\n # Epsilon for Spectral Norm?\n self.SN_eps = SN_eps\n # fp16?\n self.fp16 = G_fp16\n # Architecture dict\n self.arch = G_arch(self.ch, self.attention)[resolution]\n\n # If using hierarchical latents, adjust z\n if self.hier:\n # Number of places z slots into\n self.num_slots = len(self.arch['in_channels']) + 1\n self.z_chunk_size = (self.dim_z // self.num_slots)\n # Recalculate latent dimensionality for even splitting into chunks\n self.dim_z = self.z_chunk_size * self.num_slots\n else:\n self.num_slots = 1\n self.z_chunk_size = 0\n\n # Which convs, batchnorms, and linear layers to use\n if self.G_param == 'SN':\n self.which_conv = functools.partial(layers.SNConv2d,\n kernel_size=3, padding=1,\n num_svs=num_G_SVs, num_itrs=num_G_SV_itrs,\n eps=self.SN_eps)\n self.which_linear = functools.partial(layers.SNLinear,\n num_svs=num_G_SVs, num_itrs=num_G_SV_itrs,\n eps=self.SN_eps)\n else:\n self.which_conv = functools.partial(nn.Conv2d, kernel_size=3, padding=1)\n self.which_linear = nn.Linear\n \n # We use a non-spectral-normed embedding here regardless;\n # For some reason applying SN to G's embedding seems to randomly cripple G\n self.which_embedding = nn.Embedding\n # bn_linear = (functools.partial(self.which_linear, bias=False) if self.G_shared\n # else self.which_embedding)\n self.which_bn = functools.partial(layers.bn,\n # which_linear=bn_linear,\n cross_replica=self.cross_replica,\n mybn=self.mybn,\n # input_size=(self.shared_dim + self.z_chunk_size if self.G_shared\n # else self.n_classes),\n # norm_style=self.norm_style,\n eps=self.BN_eps)\n\n\n # Prepare model\n # If not using shared embeddings, self.shared is just a passthrough\n self.shared = (self.which_embedding(n_classes, self.shared_dim) if G_shared \n else layers.identity())\n # First linear layer\n self.linear = self.which_linear(self.dim_z // self.num_slots,\n self.arch['in_channels'][0] * (self.bottom_width **2))\n\n # self.blocks is a doubly-nested list of modules, the outer loop intended\n # to be over blocks at a given resolution (resblocks and/or self-attention)\n # while the inner loop is over a given block\n self.blocks = []\n for index in range(len(self.arch['out_channels'])):\n self.blocks += [[layers.GBlock(in_channels=self.arch['in_channels'][index],\n out_channels=self.arch['out_channels'][index],\n which_conv=self.which_conv,\n which_bn=self.which_bn,\n activation=self.activation,\n upsample=(functools.partial(F.interpolate, scale_factor=2)\n if self.arch['upsample'][index] else None))]]\n\n # If attention on this block, attach it to the end\n if self.arch['attention'][self.arch['resolution'][index]]:\n print('Adding attention layer in G at resolution %d' % self.arch['resolution'][index])\n self.blocks[-1] += [layers.Attention(self.arch['out_channels'][index], self.which_conv)]\n\n # Turn self.blocks into a ModuleList so that it's all properly registered.\n self.blocks = nn.ModuleList([nn.ModuleList(block) for block in self.blocks])\n\n # output layer: batchnorm-relu-conv.\n # Consider using a non-spectral conv here\n self.output_layer = nn.Sequential(layers.bn(self.arch['out_channels'][-1],\n cross_replica=self.cross_replica,\n mybn=self.mybn),\n self.activation,\n self.which_conv(self.arch['out_channels'][-1], 3))\n\n\n # Prepare spatial modulation model\n # If not using shared embeddings, self.shared is just a passthrough\n self.spatial_modulation_shared = (self.which_embedding(n_classes, self.shared_dim))\n # First linear layer\n self.spatial_modulation_linear = self.which_linear(self.dim_z + self.shared_dim,\n self.arch['in_channels'][0] * (self.bottom_width **2))\n\n # self.blocks is a doubly-nested list of modules, the outer loop intended\n # to be over blocks at a given resolution (resblocks and/or self-attention)\n # while the inner loop is over a given block\n self.spatial_modulation_blocks = []\n for index in range(len(self.arch['out_channels'])):\n self.spatial_modulation_blocks += [[layers.SpatialModulationGBlock(in_channels=self.arch['in_channels'][index],\n out_channels=self.arch['out_channels'][index],\n which_conv=self.which_conv,\n which_bn=self.which_bn,\n activation=self.activation,\n upsample=(functools.partial(F.interpolate, scale_factor=2)\n if self.arch['upsample'][index] else None))]]\n\n # Turn self.blocks into a ModuleList so that it's all properly registered.\n self.spatial_modulation_blocks = nn.ModuleList([nn.ModuleList(block) for block in self.spatial_modulation_blocks])\n\n\n # Initialize weights. Optionally skip init for testing.\n if not skip_init:\n self.init_weights()\n\n # Set up optimizer\n # If this is an EMA copy, no need for an optim, so just return now\n if no_optim:\n return\n self.lr, self.B1, self.B2, self.adam_eps = G_lr, G_B1, G_B2, adam_eps\n if G_mixed_precision:\n print('Using fp16 adam in G...')\n import utils\n self.optim = utils.Adam16(params=self.parameters(), lr=self.lr,\n betas=(self.B1, self.B2), weight_decay=0,\n eps=self.adam_eps)\n else:\n self.optim = optim.Adam(params=self.parameters(), lr=self.lr,\n betas=(self.B1, self.B2), weight_decay=0,\n eps=self.adam_eps)\n\n # LR scheduling, left here for forward compatibility\n # self.lr_sched = {'itr' : 0}# if self.progressive else {}\n # self.j = 0\n\n # Initialize\n def init_weights(self):\n self.param_count = 0\n for module in self.modules():\n if (isinstance(module, nn.Conv2d) \n or isinstance(module, nn.Linear) \n or isinstance(module, nn.Embedding)):\n if self.init == 'ortho':\n init.orthogonal_(module.weight)\n elif self.init == 'N02':\n init.normal_(module.weight, 0, 0.02)\n elif self.init in ['glorot', 'xavier']:\n init.xavier_uniform_(module.weight)\n else:\n print('Init style not recognized...')\n self.param_count += sum([p.data.nelement() for p in module.parameters()])\n print('Param count for G''s initialized parameters: %d' % self.param_count)\n\n # Note on this forward function: we pass in a y vector which has\n # already been passed through G.shared to enable easy class-wise\n # interpolation later. If we passed in the one-hot and then ran it through\n # G.shared in this forward function, it would be harder to handle.\n def forward(self, z, y):\n # If hierarchical, concatenate zs and ys\n if self.hier:\n zs = torch.split(z, self.z_chunk_size, 1)\n z = zs[0]\n ys = [torch.cat([y, item], 1) for item in zs[1:]]\n\n # Class embedding layer\n # spatial_c = self.spatial_modulation_shared(y)\n # Mixing layer\n spatial_h = self.spatial_modulation_linear(torch.cat([y, z], 1))\n # Reshape\n spatial_h = spatial_h.view(spatial_h.size(0), -1, self.bottom_width, self.bottom_width)\n else:\n ys = [y] * len(self.blocks)\n\n # Class embedding layer\n spatial_c = self.spatial_modulation_shared(y)\n # Mixing layer\n if len(spatial_c.shape) == 3:\n spatial_c = torch.squeeze(spatial_c, dim=1)\n spatial_h = self.spatial_modulation_linear(torch.cat([spatial_c, z], 1))\n # Reshape\n spatial_h = spatial_h.view(spatial_h.size(0), -1, self.bottom_width, self.bottom_width)\n \n # First linear layer\n h = self.linear(z)\n # Reshape\n h = h.view(h.size(0), -1, self.bottom_width, self.bottom_width)\n \n # Loop over blocks\n for index, blocklist in enumerate(self.blocks):\n # Spatial modulation calculation\n spatial_h, voxelwise_a_mod, voxelwise_b_mod = self.spatial_modulation_blocks[index][0](spatial_h)\n # Second inner loop in case block has multiple layers\n for block in blocklist:\n # Main layer forward\n h = block(h, ys[index])\n # Most coarse modulation\n # h = (h - torch.mean(h, dim=(2, 3), keepdim=True)) / torch.std(h, dim=(2, 3), keepdim=True)\n # h = h * (1 + global_a_mod.repeat(1, 1, h.shape[2], h.shape[3])) + global_b_mod.repeat(1, 1, h.shape[2], h.shape[3])\n # Most fine modulation\n h = (h - torch.mean(h, dim=(1, 2, 3), keepdim=True)) / torch.std(h, dim=(1, 2, 3), keepdim=True)\n h = h * (1 + voxelwise_a_mod) + voxelwise_b_mod\n \n # Apply batchnorm-relu-conv-tanh at output\n return torch.tanh(self.output_layer(h))\n\n\n# Discriminator architecture, same paradigm as G's above\ndef D_arch(ch=64, attention='64',ksize='333333', dilation='111111'):\n arch = {}\n arch[256] = {'in_channels' : [3] + [ch*item for item in [1, 2, 4, 8, 8, 16]],\n 'out_channels' : [item * ch for item in [1, 2, 4, 8, 8, 16, 16]],\n 'downsample' : [True] * 6 + [False],\n 'resolution' : [128, 64, 32, 16, 8, 4, 4 ],\n 'attention' : {2**i: 2**i in [int(item) for item in attention.split('_')]\n for i in range(2,8)}}\n arch[128] = {'in_channels' : [3] + [ch*item for item in [1, 2, 4, 8, 16]],\n 'out_channels' : [item * ch for item in [1, 2, 4, 8, 16, 16]],\n 'downsample' : [True] * 5 + [False],\n 'resolution' : [64, 32, 16, 8, 4, 4],\n 'attention' : {2**i: 2**i in [int(item) for item in attention.split('_')]\n for i in range(2,8)}}\n arch[64] = {'in_channels' : [3] + [ch*item for item in [1, 2, 4, 8]],\n 'out_channels' : [item * ch for item in [1, 2, 4, 8, 16]],\n 'downsample' : [True] * 4 + [False],\n 'resolution' : [32, 16, 8, 4, 4],\n 'attention' : {2**i: 2**i in [int(item) for item in attention.split('_')]\n for i in range(2,7)}}\n arch[32] = {'in_channels' : [3] + [item * ch for item in [4, 4, 4]],\n 'out_channels' : [item * ch for item in [4, 4, 4, 4]],\n 'downsample' : [True, True, False, False],\n 'resolution' : [16, 16, 16, 16],\n 'attention' : {2**i: 2**i in [int(item) for item in attention.split('_')]\n for i in range(2,6)}}\n return arch\n\nclass Discriminator(nn.Module):\n\n def __init__(self, D_ch=64, D_wide=True, resolution=128,\n D_kernel_size=3, D_attn='64', n_classes=1000,\n num_D_SVs=1, num_D_SV_itrs=1, D_activation=nn.ReLU(inplace=False),\n D_lr=2e-4, D_B1=0.0, D_B2=0.999, adam_eps=1e-8,\n SN_eps=1e-12, output_dim=1, D_mixed_precision=False, D_fp16=False,\n D_init='ortho', skip_init=False, D_param='SN', **kwargs):\n super(Discriminator, self).__init__()\n # Width multiplier\n self.ch = D_ch\n # Use Wide D as in BigGAN and SA-GAN or skinny D as in SN-GAN?\n self.D_wide = D_wide\n # Resolution\n self.resolution = resolution\n # Kernel size\n self.kernel_size = D_kernel_size\n # Attention?\n self.attention = D_attn\n # Number of classes\n self.n_classes = n_classes\n # Activation\n self.activation = D_activation\n # Initialization style\n self.init = D_init\n # Parameterization style\n self.D_param = D_param\n # Epsilon for Spectral Norm?\n self.SN_eps = SN_eps\n # Fp16?\n self.fp16 = D_fp16\n # Architecture\n self.arch = D_arch(self.ch, self.attention)[resolution]\n\n # Which convs, batchnorms, and linear layers to use\n # No option to turn off SN in D right now\n if self.D_param == 'SN':\n self.which_conv = functools.partial(layers.SNConv2d,\n kernel_size=3, padding=1,\n num_svs=num_D_SVs, num_itrs=num_D_SV_itrs,\n eps=self.SN_eps)\n self.which_linear = functools.partial(layers.SNLinear,\n num_svs=num_D_SVs, num_itrs=num_D_SV_itrs,\n eps=self.SN_eps)\n self.which_embedding = functools.partial(layers.SNEmbedding,\n num_svs=num_D_SVs, num_itrs=num_D_SV_itrs,\n eps=self.SN_eps)\n # Prepare model\n # self.blocks is a doubly-nested list of modules, the outer loop intended\n # to be over blocks at a given resolution (resblocks and/or self-attention)\n self.blocks = []\n for index in range(len(self.arch['out_channels'])):\n self.blocks += [[layers.DBlock(in_channels=self.arch['in_channels'][index],\n out_channels=self.arch['out_channels'][index],\n which_conv=self.which_conv,\n wide=self.D_wide,\n activation=self.activation,\n preactivation=(index > 0),\n downsample=(nn.AvgPool2d(2) if self.arch['downsample'][index] else None))]]\n # If attention on this block, attach it to the end\n if self.arch['attention'][self.arch['resolution'][index]]:\n print('Adding attention layer in D at resolution %d' % self.arch['resolution'][index])\n self.blocks[-1] += [layers.Attention(self.arch['out_channels'][index],\n self.which_conv)]\n # Turn self.blocks into a ModuleList so that it's all properly registered.\n self.blocks = nn.ModuleList([nn.ModuleList(block) for block in self.blocks])\n # Linear output layer. The output dimension is typically 1, but may be\n # larger if we're e.g. turning this into a VAE with an inference output\n self.linear = self.which_linear(self.arch['out_channels'][-1], output_dim)\n # Embedding for projection discrimination\n self.embed = self.which_embedding(self.n_classes, self.arch['out_channels'][-1])\n\n # Initialize weights\n if not skip_init:\n self.init_weights()\n\n # Set up optimizer\n self.lr, self.B1, self.B2, self.adam_eps = D_lr, D_B1, D_B2, adam_eps\n if D_mixed_precision:\n print('Using fp16 adam in D...')\n import utils\n self.optim = utils.Adam16(params=self.parameters(), lr=self.lr,\n betas=(self.B1, self.B2), weight_decay=0, eps=self.adam_eps)\n else:\n self.optim = optim.Adam(params=self.parameters(), lr=self.lr,\n betas=(self.B1, self.B2), weight_decay=0, eps=self.adam_eps)\n # LR scheduling, left here for forward compatibility\n # self.lr_sched = {'itr' : 0}# if self.progressive else {}\n # self.j = 0\n\n # Initialize\n def init_weights(self):\n self.param_count = 0\n for module in self.modules():\n if (isinstance(module, nn.Conv2d)\n or isinstance(module, nn.Linear)\n or isinstance(module, nn.Embedding)):\n if self.init == 'ortho':\n init.orthogonal_(module.weight)\n elif self.init == 'N02':\n init.normal_(module.weight, 0, 0.02)\n elif self.init in ['glorot', 'xavier']:\n init.xavier_uniform_(module.weight)\n else:\n print('Init style not recognized...')\n self.param_count += sum([p.data.nelement() for p in module.parameters()])\n print('Param count for D''s initialized parameters: %d' % self.param_count)\n\n def forward(self, x, y=None):\n # Stick x into h for cleaner for loops without flow control\n h = x\n # Loop over blocks\n for index, blocklist in enumerate(self.blocks):\n for block in blocklist:\n h = block(h)\n # Apply global sum pooling as in SN-GAN\n h = torch.sum(self.activation(h), [2, 3])\n # Get initial class-unconditional output\n out = self.linear(h)\n # Get projection of final featureset onto class vectors and add to evidence\n out = out + torch.sum(self.embed(y) * h, 1, keepdim=True)\n return out\n\n# Parallelized G_D to minimize cross-gpu communication\n# Without this, Generator outputs would get all-gathered and then rebroadcast.\nclass G_D(nn.Module):\n def __init__(self, G, D):\n super(G_D, self).__init__()\n self.G = G\n self.D = D\n\n def forward(self, z, gy, x=None, dy=None, train_G=False, return_G_z=False,\n split_D=False): \n # If training G, enable grad tape\n with torch.set_grad_enabled(train_G):\n # Get Generator output given noise\n G_z = self.G(z, self.G.shared(gy))\n # Cast as necessary\n if self.G.fp16 and not self.D.fp16:\n G_z = G_z.float()\n if self.D.fp16 and not self.G.fp16:\n G_z = G_z.half()\n # Split_D means to run D once with real data and once with fake,\n # rather than concatenating along the batch dimension.\n if split_D:\n D_fake = self.D(G_z, gy)\n if x is not None:\n D_real = self.D(x, dy)\n return D_fake, D_real\n else:\n if return_G_z:\n return D_fake, G_z\n else:\n return D_fake\n # If real data is provided, concatenate it with the Generator's output\n # along the batch dimension for improved efficiency.\n else:\n D_input = torch.cat([G_z, x], 0) if x is not None else G_z\n D_class = torch.cat([gy, dy], 0) if dy is not None else gy\n # Get Discriminator output\n D_out = self.D(D_input, D_class)\n if x is not None:\n return torch.split(D_out, [G_z.shape[0], x.shape[0]]) # D_fake, D_real\n else:\n if return_G_z:\n return D_out, G_z\n else:\n return D_out\n" ]
[ [ "torch.mean", "torch.cat", "torch.nn.ModuleList", "torch.nn.AvgPool2d", "torch.set_grad_enabled", "torch.std", "torch.nn.init.orthogonal_", "torch.nn.init.normal_", "torch.split", "torch.nn.init.xavier_uniform_", "torch.nn.ReLU", "torch.squeeze" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Ericonaldo/ILSwiss
[ "efd25d457fd1578005c6fbc45cae29e9ab64a99d", "efd25d457fd1578005c6fbc45cae29e9ab64a99d" ]
[ "rlkit/core/eval_util.py", "rlkit/torch/algorithms/sac/sac_alpha.py" ]
[ "\"\"\"\nCommon evaluation utilities.\n\"\"\"\n\nfrom collections import OrderedDict\nfrom numbers import Number\nimport os\nimport json\n\nimport numpy as np\n\nfrom rlkit.core.vistools import plot_returns_on_same_plot, save_plot\n\n\ndef get_generic_path_information(paths, stat_prefix=\"\"):\n \"\"\"\n Get an OrderedDict with a bunch of statistic names and values.\n \"\"\"\n statistics = OrderedDict()\n returns = [sum(path[\"rewards\"]) for path in paths]\n # rewards = np.vstack([path[\"rewards\"] for path in paths])\n rewards = np.concatenate([path[\"rewards\"] for path in paths])\n statistics.update(\n create_stats_ordered_dict(\n \"Rewards\", rewards, stat_prefix=stat_prefix, always_show_all_stats=True\n )\n )\n statistics.update(\n create_stats_ordered_dict(\n \"Returns\", returns, stat_prefix=stat_prefix, always_show_all_stats=True\n )\n )\n # print(paths[0][\"env_infos\"])\n if \"is_success\" in paths[0][\"env_infos\"][0].keys():\n acc_sum = [(np.sum([x['is_success'] for x in path[\"env_infos\"]])>0).astype(float) for path in paths]\n acc = np.sum(acc_sum) * 1.0 / len(paths)\n statistics.update(\n create_stats_ordered_dict(\n \"Success Num\", np.sum(acc_sum), stat_prefix=stat_prefix, always_show_all_stats=True\n )\n )\n statistics.update(\n create_stats_ordered_dict(\n \"Traj Num\", len(paths), stat_prefix=stat_prefix, always_show_all_stats=True\n )\n )\n statistics.update(\n create_stats_ordered_dict(\n \"Success Rate\", acc, stat_prefix=stat_prefix, always_show_all_stats=True\n )\n )\n actions = [path[\"actions\"] for path in paths]\n # if isinstance(actions[0][0], np.ndarray):\n # actions = np.vstack([path[\"actions\"] for path in paths])\n # else:\n # actions = np.hstack([path[\"actions\"] for path in paths])\n statistics.update(\n create_stats_ordered_dict(\n \"Actions\", actions, stat_prefix=stat_prefix, always_show_all_stats=True\n )\n )\n statistics.update(\n create_stats_ordered_dict(\n \"Ep. Len.\",\n np.array([len(path[\"terminals\"]) for path in paths]),\n stat_prefix=stat_prefix,\n always_show_all_stats=True,\n )\n )\n statistics[\"Num Paths\"] = len(paths)\n\n return statistics\n\n\ndef get_average_returns(paths, std=False):\n returns = [sum(path[\"rewards\"]) for path in paths]\n if std:\n return np.mean(returns), np.std(returns)\n\n return np.mean(returns)\n\n\ndef create_stats_ordered_dict(\n name,\n data,\n stat_prefix=None,\n always_show_all_stats=False,\n exclude_max_min=False,\n):\n # print('\\n<<<< STAT FOR {} {} >>>>'.format(stat_prefix, name))\n if stat_prefix is not None:\n name = \"{} {}\".format(stat_prefix, name)\n if isinstance(data, Number):\n # print('was a Number')\n return OrderedDict({name: data})\n\n if len(data) == 0:\n return OrderedDict()\n\n if isinstance(data, tuple):\n # print('was a tuple')\n ordered_dict = OrderedDict()\n for number, d in enumerate(data):\n sub_dict = create_stats_ordered_dict(\n \"{0}_{1}\".format(name, number),\n d,\n )\n ordered_dict.update(sub_dict)\n return ordered_dict\n\n if isinstance(data, list):\n # print('was a list')\n try:\n iter(data[0])\n except TypeError:\n pass\n else:\n data = np.concatenate(data)\n\n if isinstance(data, np.ndarray) and data.size == 1 and not always_show_all_stats:\n # print('was a numpy array of data.size==1')\n return OrderedDict({name: float(data)})\n\n # print('was a numpy array NOT of data.size==1')\n stats = OrderedDict(\n [\n (name + \" Mean\", np.mean(data)),\n (name + \" Std\", np.std(data)),\n ]\n )\n if not exclude_max_min:\n stats[name + \" Max\"] = np.max(data)\n stats[name + \" Min\"] = np.min(data)\n return stats\n\n\n# I (Kamyar) will be adding my own eval utils here too\ndef plot_experiment_returns(\n exp_path,\n title,\n save_path,\n column_name=\"Test_Returns_Mean\",\n x_axis_lims=None,\n y_axis_lims=None,\n constraints=None,\n plot_mean=False,\n plot_horizontal_lines_at=None,\n horizontal_lines_names=None,\n):\n \"\"\"\n plots the Test Returns Mean of all the\n \"\"\"\n arr_list = []\n names = []\n\n dir_path = os.path.split(save_path)[0]\n os.makedirs(dir_path, exist_ok=True)\n\n # print(exp_path)\n\n for sub_exp_dir in os.listdir(exp_path):\n try:\n sub_exp_path = os.path.join(exp_path, sub_exp_dir)\n if not os.path.isdir(sub_exp_path):\n continue\n if constraints is not None:\n constraints_satisfied = True\n with open(os.path.join(sub_exp_path, \"variant.json\"), \"r\") as j:\n d = json.load(j)\n for k, v in constraints.items():\n k = k.split(\".\")\n d_v = d[k[0]]\n for sub_k in k[1:]:\n d_v = d_v[sub_k]\n if d_v != v:\n constraints_satisfied = False\n break\n if not constraints_satisfied:\n # for debugging\n # print('\\nconstraints')\n # print(constraints)\n # print('\\nthis dict')\n # print(d)\n continue\n\n csv_full_path = os.path.join(sub_exp_path, \"progress.csv\")\n # print(csv_full_path)\n try:\n progress_csv = np.genfromtxt(\n csv_full_path, skip_header=0, delimiter=\",\", names=True\n )\n # print(progress_csv.dtype)\n if isinstance(column_name, str):\n column_name = [column_name]\n for c_name in column_name:\n if \"+\" in c_name:\n first, second = c_name.split(\"+\")\n returns = progress_csv[first] + progress_csv[second]\n elif \"-\" in c_name:\n first, second = c_name.split(\"-\")\n returns = progress_csv[first] - progress_csv[second]\n else:\n returns = progress_csv[c_name]\n arr_list.append(returns)\n names.append(c_name + \"_\" + sub_exp_dir)\n # print(csv_full_path)\n except:\n pass\n except:\n pass\n\n if plot_mean:\n min_len = min(map(lambda a: a.shape[0], arr_list))\n arr_list = list(map(lambda a: a[:min_len], arr_list))\n returns = np.stack(arr_list)\n mean = np.mean(returns, 0)\n std = np.std(returns, 0)\n x = np.arange(min_len)\n # save_plot(x, mean, title, save_path, color='cyan', x_axis_lims=x_axis_lims, y_axis_lims=y_axis_lims)\n plot_returns_on_same_plot(\n [mean, mean + std, mean - std],\n [\"mean\", \"mean+std\", \"mean-std\"],\n title,\n save_path,\n x_axis_lims=x_axis_lims,\n y_axis_lims=y_axis_lims,\n )\n else:\n if len(arr_list) == 0:\n print(0)\n if plot_horizontal_lines_at is not None:\n max_len = max(map(lambda a: a.shape[0], arr_list))\n arr_list += [np.ones(max_len) * y_val for y_val in plot_horizontal_lines_at]\n names += horizontal_lines_names\n try:\n # print(len(arr_list))\n plot_returns_on_same_plot(\n arr_list,\n names,\n title,\n save_path,\n x_axis_lims=x_axis_lims,\n y_axis_lims=y_axis_lims,\n )\n except Exception as e:\n print(\"Failed to plot:\")\n print(arr_list)\n print(title)\n print(exp_path)\n print(constraints)\n # raise e\n", "from collections import OrderedDict\n\nimport numpy as np\nimport torch\nfrom torch import nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport itertools\n\nimport rlkit.torch.utils.pytorch_util as ptu\nfrom rlkit.core.trainer import Trainer\nfrom rlkit.core.eval_util import create_stats_ordered_dict\n\n\nclass SoftActorCritic(Trainer):\n \"\"\"\n version that:\n - uses reparameterization trick\n - has two Q functions\n - has auto-tuned alpha\n \"\"\"\n\n def __init__(\n self,\n policy,\n qf1,\n qf2,\n reward_scale=1.0,\n discount=0.99,\n policy_lr=1e-3,\n qf_lr=1e-3,\n alpha_lr=3e-4,\n soft_target_tau=1e-2,\n alpha=0.2,\n train_alpha=True,\n policy_mean_reg_weight=1e-3,\n policy_std_reg_weight=1e-3,\n optimizer_class=optim.Adam,\n beta_1=0.9,\n **kwargs\n ):\n self.policy = policy\n self.qf1 = qf1\n self.qf2 = qf2\n self.reward_scale = reward_scale\n self.discount = discount\n self.soft_target_tau = soft_target_tau\n self.policy_mean_reg_weight = policy_mean_reg_weight\n self.policy_std_reg_weight = policy_std_reg_weight\n\n self.train_alpha = train_alpha\n self.log_alpha = torch.tensor(np.log(alpha), requires_grad=train_alpha)\n self.alpha = self.log_alpha.detach().exp()\n assert \"env\" in kwargs.keys(), \"env info should be taken into SAC alpha\"\n self.target_entropy = -np.prod(kwargs[\"env\"].action_space.shape)\n\n self.target_qf1 = qf1.copy()\n self.target_qf2 = qf2.copy()\n\n self.eval_statistics = None\n\n self.policy_optimizer = optimizer_class(\n self.policy.parameters(), lr=policy_lr, betas=(beta_1, 0.999)\n )\n self.qf1_optimizer = optimizer_class(\n self.qf1.parameters(), lr=qf_lr, betas=(beta_1, 0.999)\n )\n self.qf2_optimizer = optimizer_class(\n self.qf2.parameters(), lr=qf_lr, betas=(beta_1, 0.999)\n )\n self.alpha_optimizer = optimizer_class(\n [self.log_alpha], lr=alpha_lr, betas=(beta_1, 0.999)\n )\n\n def train_step(self, batch):\n # q_params = itertools.chain(self.qf1.parameters(), self.qf2.parameters())\n # policy_params = itertools.chain(self.policy.parameters())\n\n rewards = self.reward_scale * batch[\"rewards\"]\n terminals = batch[\"terminals\"]\n obs = batch[\"observations\"]\n actions = batch[\"actions\"]\n next_obs = batch[\"next_observations\"]\n\n \"\"\"\n QF Loss\n \"\"\"\n # Only unfreeze parameter of Q\n # for p in itertools.chain(self.qf1.parameters(), self.qf2.parameters()):\n # p.requires_grad = True\n # for p in self.policy.parameters():\n # p.requires_grad = False\n self.qf1_optimizer.zero_grad()\n self.qf2_optimizer.zero_grad()\n q1_pred = self.qf1(obs, actions)\n q2_pred = self.qf2(obs, actions)\n\n # Make sure policy accounts for squashing functions like tanh correctly!\n next_policy_outputs = self.policy(next_obs, return_log_prob=True)\n # in this part, we only need new_actions and log_pi with no grad\n (\n next_new_actions,\n next_policy_mean,\n next_policy_log_std,\n next_log_pi,\n ) = next_policy_outputs[:4]\n target_qf1_values = self.target_qf1(\n next_obs, next_new_actions\n ) # do not need grad || it's the shared part of two calculation\n target_qf2_values = self.target_qf2(\n next_obs, next_new_actions\n ) # do not need grad || it's the shared part of two calculation\n min_target_value = torch.min(target_qf1_values, target_qf2_values)\n q_target = rewards + (1.0 - terminals) * self.discount * (\n min_target_value - self.alpha * next_log_pi\n ) ## original implementation has detach\n q_target = q_target.detach()\n\n qf1_loss = 0.5 * torch.mean((q1_pred - q_target) ** 2)\n qf2_loss = 0.5 * torch.mean((q2_pred - q_target) ** 2)\n\n # freeze parameter of Q\n # for p in itertools.chain(self.qf1.parameters(), self.qf2.parameters()):\n # p.requires_grad = False\n\n qf1_loss.backward()\n qf2_loss.backward()\n\n self.qf1_optimizer.step()\n self.qf2_optimizer.step()\n\n \"\"\"\n Policy Loss\n \"\"\"\n # for p in itertools.chain(self.qf1.parameters(), self.qf2.parameters()):\n # p.requires_grad = False\n # for p in self.policy.parameters():\n # p.requires_grad = True\n policy_outputs = self.policy(obs, return_log_prob=True)\n new_actions, policy_mean, policy_log_std, log_pi = policy_outputs[:4]\n q1_new_acts = self.qf1(obs, new_actions)\n q2_new_acts = self.qf2(obs, new_actions) ## error\n q_new_actions = torch.min(q1_new_acts, q2_new_acts)\n\n self.policy_optimizer.zero_grad()\n policy_loss = torch.mean(self.alpha * log_pi - q_new_actions) ##\n mean_reg_loss = self.policy_mean_reg_weight * (policy_mean ** 2).mean()\n std_reg_loss = self.policy_std_reg_weight * (policy_log_std ** 2).mean()\n policy_reg_loss = mean_reg_loss + std_reg_loss\n policy_loss = policy_loss + policy_reg_loss\n policy_loss.backward()\n self.policy_optimizer.step()\n\n \"\"\"\n Update alpha\n \"\"\"\n if self.train_alpha:\n log_prob = log_pi.detach() + self.target_entropy\n alpha_loss = -(self.log_alpha * log_prob).mean()\n self.alpha_optimizer.zero_grad()\n alpha_loss.backward()\n self.alpha_optimizer.step()\n self.alpha = self.log_alpha.detach().exp()\n\n \"\"\"\n Update networks\n \"\"\"\n # unfreeze all -> initial states\n # for p in itertools.chain(self.qf1.parameters(), self.qf2.parameters()):\n # p.requires_grad = True\n # for p in self.policy.parameters():\n # p.requires_grad = True\n\n # unfreeze parameter of Q\n # for p in itertools.chain(self.qf1.parameters(), self.qf2.parameters()):\n # p.requires_grad = True\n\n self._update_target_network()\n\n \"\"\"\n Save some statistics for eval\n \"\"\"\n if self.eval_statistics is None:\n \"\"\"\n Eval should set this to None.\n This way, these statistics are only computed for one batch.\n \"\"\"\n self.eval_statistics = OrderedDict()\n self.eval_statistics[\"Reward Scale\"] = self.reward_scale\n self.eval_statistics[\"QF1 Loss\"] = np.mean(ptu.get_numpy(qf1_loss))\n self.eval_statistics[\"QF2 Loss\"] = np.mean(ptu.get_numpy(qf2_loss))\n if self.train_alpha:\n self.eval_statistics[\"Alpha Loss\"] = np.mean(ptu.get_numpy(alpha_loss))\n self.eval_statistics[\"Policy Loss\"] = np.mean(ptu.get_numpy(policy_loss))\n self.eval_statistics.update(\n create_stats_ordered_dict(\n \"Q1 Predictions\",\n ptu.get_numpy(q1_pred),\n )\n )\n self.eval_statistics.update(\n create_stats_ordered_dict(\n \"Q2 Predictions\",\n ptu.get_numpy(q2_pred),\n )\n )\n self.eval_statistics.update(\n create_stats_ordered_dict(\n \"Alpha\",\n [ptu.get_numpy(self.alpha)],\n )\n )\n self.eval_statistics.update(\n create_stats_ordered_dict(\n \"Log Pis\",\n ptu.get_numpy(log_pi),\n )\n )\n self.eval_statistics.update(\n create_stats_ordered_dict(\n \"Policy mu\",\n ptu.get_numpy(policy_mean),\n )\n )\n self.eval_statistics.update(\n create_stats_ordered_dict(\n \"Policy log std\",\n ptu.get_numpy(policy_log_std),\n )\n )\n\n @property\n def networks(self):\n return [\n self.policy,\n self.qf1,\n self.qf2,\n self.target_qf1,\n self.target_qf2,\n ]\n\n def _update_target_network(self):\n ptu.soft_update_from_to(self.qf1, self.target_qf1, self.soft_target_tau)\n ptu.soft_update_from_to(self.qf2, self.target_qf2, self.soft_target_tau)\n\n def get_snapshot(self):\n return dict(\n qf1=self.qf1,\n qf2=self.qf2,\n policy=self.policy,\n target_qf1=self.target_qf1,\n target_qf2=self.target_qf2,\n log_alpha=self.log_alpha,\n policy_optimizer=self.policy_optimizer,\n qf1_optimizer=self.qf1_optimizer,\n qf2_optimizer=self.qf2_optimizer,\n alpha_optimizer=self.alpha_optimizer,\n )\n\n def load_snapshot(self, snapshot):\n self.qf1 = snapshot[\"qf1\"]\n self.qf2 = snapshot[\"qf2\"]\n self.policy = snapshot[\"policy\"]\n self.target_qf1 = snapshot[\"target_qf1\"]\n self.target_qf2 = snapshot[\"target_qf2\"]\n self.log_alpha = snapshot[\"log_alpha\"]\n self.policy_optimizer = snapshot[\"policy_optimizer\"]\n self.qf1_optimizer = snapshot[\"qf1_optimizer\"]\n self.qf2_optimizer = snapshot[\"qf2_optimizer\"]\n self.alpha_optimizer = snapshot[\"alpha_optimizer\"]\n\n def get_eval_statistics(self):\n return self.eval_statistics\n\n def end_epoch(self):\n self.eval_statistics = None\n\n def to(self, device):\n self.log_alpha.to(device)\n super.to(device)\n" ]
[ [ "numpy.min", "numpy.arange", "numpy.stack", "numpy.genfromtxt", "numpy.concatenate", "numpy.max", "numpy.std", "numpy.ones", "numpy.mean", "numpy.sum" ], [ "torch.mean", "torch.min", "numpy.log", "numpy.prod" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
shelleyHLX/ai-server
[ "12c4a654a686462b8b725fa0641cc967d2f80e14", "12c4a654a686462b8b725fa0641cc967d2f80e14" ]
[ "model/nlp/topic.py", "model/image/repair.py" ]
[ "# -*- coding: utf-8 -*-\n# Author: XuMing <[email protected]>\n# Brief: \nimport operator\nimport os\n\nimport tensorflow as tf\nfrom keras.models import load_model\n\nfrom model.nlp.keras_data_reader import load_dict\nfrom model.nlp.keras_data_reader import pad_sequence\nfrom model.nlp.keras_data_reader import vectorize_words\nfrom utils.io_util import get_logger\n\nlogger = get_logger(__file__)\n\nlabel_revserv_dict = {0: '人类作者',\n 1: '机器作者',\n 2: '机器翻译',\n 3: '自动摘要'}\n\n\nclass Topic(object):\n topic_model = None\n\n def __init__(self, model_path, word_dict_path, maxlen=400):\n self.name = 'topic'\n self.maxlen = maxlen\n # load dict\n pwd_path = os.path.abspath(os.path.dirname(__file__))\n if word_dict_path:\n try:\n self.word_ids_dict = load_dict(word_dict_path)\n except IOError:\n word_dict_path = os.path.join(pwd_path, '../..', word_dict_path)\n self.word_ids_dict = load_dict(word_dict_path)\n\n # load parrots_model by file\n if model_path:\n try:\n self.topic_model = load_model(model_path)\n except IOError:\n model_path = os.path.join(pwd_path, '../..', model_path)\n self.topic_model = load_model(model_path)\n logger.info(\"Load topic model ok, path: \" + model_path)\n # self.topic_model._make_predict_function() # have to initialize before threading\n self.graph = tf.get_default_graph()\n else:\n logger.warn('topic model file is need')\n raise Exception('topic model file need')\n\n @classmethod\n def get_instance(cls, model_path, word_dict_path, maxlen=400):\n if cls.topic_model:\n return cls.topic_model\n else:\n obj = cls(model_path, word_dict_path, maxlen=maxlen)\n cls.topic_model = obj\n return obj\n\n def get_topic(self, text):\n # read data to index\n test_text_words = [list(text)]\n word_ids = vectorize_words(test_text_words, self.word_ids_dict)\n # pad sequence\n word_seq = pad_sequence(word_ids, self.maxlen)\n\n with self.graph.as_default():\n # predict prob\n predict_probs = self.topic_model.predict(word_seq)\n # get prob for one line test text\n probs = predict_probs[0]\n probs_dict = dict((idx, prob) for idx, prob in enumerate(probs))\n probs_order_dict = sorted(probs_dict.items(), key=operator.itemgetter(1), reverse=True)\n return probs_order_dict\n\n def check(self, text):\n \"\"\"\n Args:\n text: 欧洲冠军联赛是欧洲足球协会联盟主办的年度足球比赛\n Returns:\n {\n \"log_id\": 3591049593939822907,\n \"items\": {\n \"lv2_tag_list\": [\n {\n \"score\": 0.877436,\n \"tag\": \"足球\"\n },\n {\n \"score\": 0.793682,\n \"tag\": \"国际足球\"\n },\n {\n \"score\": 0.775911,\n \"tag\": \"英超\"\n }\n ],\n \"lv1_tag_list\": [\n {\n \"score\": 0.824329,\n \"tag\": \"体育\"\n }\n ]\n }\n }\n \"\"\"\n result_dict = {\"text\": text}\n topics = self.get_topic(text)\n items_list = []\n for idx, prob in topics:\n # get top 3\n if len(items_list) > 2:\n continue\n items = dict()\n items[\"score\"] = prob\n items[\"tag\"] = label_revserv_dict[idx]\n items_list.append(items)\n result_dict['items'] = items_list\n return result_dict\n", "# -*- coding: utf-8 -*-\n\"\"\"\n@author:XuMing([email protected])\n@description: 图片修复,水印去除\n\"\"\"\nimport base64\nimport os\nimport time\n\nimport cv2\nimport numpy as np\n\nfrom utils.io_util import get_logger\nfrom utils.string_util import get_suffix_base64, resize_img, rename_path\n\nlogger = get_logger(__file__)\n\n\nclass Repair(object):\n model = None\n\n def __init__(self):\n self.name = 'image_repair'\n self.model = cv2\n self.pwd_path = os.path.abspath(os.path.dirname(__file__))\n\n @classmethod\n def get_instance(cls):\n if cls.model:\n return cls.model\n else:\n obj = cls()\n cls.model = obj\n return obj\n\n def repair_image(self, input_image_path):\n # 加载图片\n img = self.model.imread(input_image_path)\n # hight, width, depth = img.shape[0:3]\n\n # 图片二值化处理,把[240, 240, 240]~[255, 255, 255]以外的颜色变成0\n thresh = self.model.inRange(img, np.array([240, 240, 240]), np.array([255, 255, 255]))\n\n # 创建形状和尺寸的结构元素\n kernel = np.ones((3, 3), np.uint8)\n\n # 扩张待修复区域\n mask = self.model.dilate(thresh, kernel, iterations=1)\n specular = self.model.inpaint(img, mask, 5, flags=cv2.INPAINT_TELEA)\n return specular\n\n def save_image(self, output_image_path, img):\n return self.model.imwrite(output_image_path, img)\n\n def check_file(self, input_image_path, output_image_path=''):\n \"\"\"\n Args:\n input_image_path: path(string)\n output_image_path: path(string)\n Returns:\n {\n \"log_id\": \"12345\",\n \"input\": path,\n \"output\": path\n }\n \"\"\"\n result_dict = {\"input_image_path\": input_image_path}\n\n predict_image = self.repair_image(input_image_path)\n if output_image_path:\n self.save_image(output_image_path, predict_image)\n else:\n dir_path, file_path = os.path.split(input_image_path)\n file_name, suffix = os.path.splitext(file_path)\n output_image_path = os.path.join(dir_path, 'repaired_' + file_name + suffix)\n self.save_image(output_image_path, predict_image)\n result_dict['output_image_path'] = output_image_path\n encoded = base64.b64encode(open(output_image_path, 'rb').read())\n result_dict['output_base64'] = encoded.decode('utf-8')\n return result_dict\n\n def check(self, input_image_base64, output_image_path=''):\n \"\"\"\n Args:\n input_image_base64: (string)\n output_image_path: path(string)\n Returns:\n {\n \"log_id\": \"12345\",\n \"input\": path,\n \"output\": path\n }\n \"\"\"\n input_image_base64, suffix = get_suffix_base64(input_image_base64)\n input_image = base64.b64decode(input_image_base64)\n now = time.strftime('%Y%m%d%H%M%S', time.localtime(time.time()))\n path = os.path.join(self.pwd_path, '../../upload/', self.name)\n if not os.path.exists(path):\n os.makedirs(path)\n input_image_path = os.path.join(path, now + '.' + suffix)\n with open(input_image_path, 'wb') as f:\n f.write(input_image)\n logger.debug(input_image_path)\n resize_img_path = rename_path(input_image_path, prefix='resize_')\n resize_img(input_image_path, resize_img_path)\n\n return self.check_file(resize_img_path, output_image_path)\n" ]
[ [ "tensorflow.get_default_graph" ], [ "numpy.array", "numpy.ones" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
skohtz1/web-scrapingHW
[ "11cf4686286a4fa51ef23a9e0afc5adca21f40c1" ]
[ "scrape_mars.py" ]
[ "from bs4 import BeautifulSoup\nimport requests\nfrom splinter import Browser\nimport pandas as pd\nimport time\n\ndef init_browser():\n # @NOTE: Replace the path with your actual path to the chromedriver\n executable_path = {\"executable_path\": \"./chromedriver\"}\n return Browser(\"chrome\", **executable_path, headless=False)\n\n\ndef scrape():\n browser = init_browser()\n url_nasa = \"https://mars.nasa.gov/news/?page=0&per_page=40&order=publish_date+desc%2Ccreated_at+desc&search=&category=19%2C165%2C184%2C204&blank_scope=Latest\"\n # Retrieve page with the requests module\n response_nasa = requests.get(url_nasa)\n # Create BeautifulSoup object; parse with 'html.parser'\n soup_nasa = BeautifulSoup(response_nasa.text, 'html.parser')\n \n ##finding the title and summary of first article\n results_titles = soup_nasa.find_all('div', class_='content_title')\n summaries = soup_nasa.find_all(\"div\", class_ = \"rollover_description_inner\")\n title_first = results_titles[0].text.strip()\n summaries_first = summaries[0].text.strip()\n \n ##finding feature image url\n url_mars_img = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars'\n browser.visit(url_mars_img)\n browser.click_link_by_partial_text('FULL IMAGE')\n time.sleep(5)\n browser.click_link_by_partial_text('more info')\n time.sleep(5)\n browser.click_link_by_partial_href('spaceimages/images')\n feature_image_url = browser.url\n time.sleep(5)\n \n ##getting the twitter weather\n url_twitter = \"https://twitter.com/marswxreport?lang=en\"\n # Retrieve page with the requests module\n response_twitter = requests.get(url_twitter)\n # Create BeautifulSoup object; parse with 'html.parser'\n soup3 = BeautifulSoup(response_twitter.text, 'html.parser')\n mars_weather = soup3.find_all(\"p\",class_ = \"TweetTextSize TweetTextSize--normal js-tweet-text tweet-text\")[0].text\n \n ##scraping Mars facts\n url_facts = \"https://space-facts.com/mars/\"\n tables = pd.read_html(url_facts)\n df = tables[0]\n df.columns = [\"Parameter\", \"Values\"]\n mars_data_df = df.set_index([\"Parameter\"])\n mars_data_df.to_html(\"mars_facts.html\")\n mars_data_html = mars_data_df.to_html()\n mars_data_html = mars_data_html.replace(\"\\n\", \"\")\n \n \n ##hemisphere\n url_hemis = \"https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars\"\n browser.visit(url_hemis)\n time.sleep(5)\n html4 = browser.html\n soup4 = BeautifulSoup(html4, 'html.parser')\n \n links = []\n\n for link in soup4.find_all('a'):\n finds = link.get(\"href\")\n if (\"/search/map/Mars\" in finds):\n links.append(finds)\n \n links = list(set(links))\n \n hemisphere_image_urls = []\n \n for i in range(len(links)):\n dicts1 = {}\n dicts1[\"title\"] = soup4.find_all(\"h3\")[i].text\n browser.click_link_by_partial_text(soup4.find_all(\"h3\")[i].text)\n time.sleep(5)\n n_html = browser.html\n soup5 = BeautifulSoup(n_html, \"html.parser\")\n for link in soup5.find_all(\"a\"):\n finds = link.get(\"href\")\n if (\"/full.jpg\" in finds):\n dicts1[\"img_url\"] = finds\n \n hemisphere_image_urls.append(dicts1)\n browser.back()\n \n \n print(hemisphere_image_urls)\n \n \n mars_data_dict = {\"weather\":mars_weather,\"mars_facts\":mars_data_html,\"hemisphere\":hemisphere_image_urls,\"feature_image\": feature_image_url,\"title_feature\":title_first,\"summary_feature\":summaries_first}\n \n return mars_data_dict\n \n \n\n \n \n \n \n\n" ]
[ [ "pandas.read_html" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
Zosit/Useful-Reusable-Code
[ "e5eab12f1ebcc6f16e456a7515ff8cc068b5ab16" ]
[ "Class Projects/CS545(MachineLearning)/qLearning/qlearn.py" ]
[ "print(__doc__)\n\nimport matplotlib\n# Force matplotlib to not use any Xwindows backend.\nmatplotlib.use('Agg')\n\nimport math\nfrom decimal import *\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom itertools import cycle\n\nimport pandas as pd\nfrom matplotlib.backends.backend_pdf import PdfPages\nimport csv\n\nfrom random import randint\n\n\n\n#init data\nEPISODE_COUNT = 5000\nACTION_COUNT = 200\ntrainingreward = np.zeros(50)\n\n#init Q array (5 state value dimensions, 5 action dimension)\nQarr = np.zeros((3, 3, 3, 3, 3, 5))\n\n#greedy selection variable (multiplied by 100 for simplicity)\nepsilon = 100\n\n\nfor i in range(0, EPISODE_COUNT):\n\t#init board (0 wall, 1 blank, 2 can)\n\tboard = np.zeros((12, 12))\n\tfor j in range(0, 10):\n\t\tfor k in range(0, 10):\n\t\t\tboard[j+1, k+1] = randint(1, 2)\n\t#init bot location (horizontal 0, vertical 1 from top left)\n\tbotloc = np.zeros(2)\n\tbotloc[0] = randint(1, 10)\n\tbotloc[1] = randint(1, 10)\n\n\tepisodereward = 0\n\tfor j in range(0, ACTION_COUNT):\n\t\t#observestate (self, up, left, right, down)\n\t\tstate = np.zeros(5)\n\t\t#self\n\t\tstate[0] = board[int(botloc[0]), int(botloc[1])]\n\t\tstate[1] = board[int(botloc[0]), int(botloc[1] - 1)]\n\t\tstate[2] = board[int(botloc[0] - 1), int(botloc[1])]\n\t\tstate[3] = board[int(botloc[0] + 1), int(botloc[1])]\n\t\tstate[4] = board[int(botloc[0]), int(botloc[1] + 1)]\n\t\t#greedy action selection\n\t\tif (randint(0, 100) > epsilon):\n\t\t\t#do greedy\n\t\t\trandoma = np.random.choice(np.where(Qarr[int(state[0]), int(state[1]), int(state[2]), int(state[3]), int(state[4]), :] == Qarr[int(state[0]), int(state[1]), int(state[2]), int(state[3]), int(state[4]), :].max())[0])\n\t\telse:\n\t\t\t#do random action (0 can, 1 up, 2 left, 3 right, 4 down)\n\t\t\trandoma = randint(0, 4)\n\t\t#save qtable location\n\t\toldq = np.zeros(6)\n\t\toldq[0] = state[0]\n\t\toldq[1] = state[1]\n\t\toldq[2] = state[2]\n\t\toldq[3] = state[3]\n\t\toldq[4] = state[4]\n\t\toldq[5] = randoma\n\t\t#take action get reward\n\t\t\t#can grab\n\t\tif(randoma == 0):\n\t\t\t#can grabbed\n\t\t\tif(state[0] == 2):\n\t\t\t\t#remove can\n\t\t\t\tboard[int(botloc[0]), int(botloc[1])] = 1\n\t\t\t\treward = 10\n\t\t\t#can not grabbed\n\t\t\telse:\n\t\t\t\treward = -1\n\t\t#move up\n\t\tif(randoma == 1):\n\t\t\t#wall\n\t\t\tif(state[1] == 0):\n\t\t\t\treward = -5\n\t\t\t#no wall\n\t\t\telse:\n\t\t\t\tbotloc[1] = botloc[1] - 1\n\t\t\t\treward = 0\n\t\t#move left\n\t\tif(randoma == 2):\n\t\t\t#wall\n\t\t\tif(state[2] == 0):\n\t\t\t\treward = -5\n\t\t\t#no wall\n\t\t\telse:\n\t\t\t\tbotloc[0] = botloc[0] - 1\n\t\t\t\treward = 0\n\t\t#move right\n\t\tif(randoma == 3):\n\t\t\t#wall\n\t\t\tif(state[3] == 0):\n\t\t\t\treward = -5\n\t\t\t#no wall\n\t\t\telse:\n\t\t\t\tbotloc[0] = botloc[0] + 1\n\t\t\t\treward = 0\n\t\t#move down\n\t\tif(randoma == 4):\n\t\t\t#wall\n\t\t\tif(state[4] == 0):\n\t\t\t\treward = -5\n\t\t\t#no wall\n\t\t\telse:\n\t\t\t\tbotloc[1] = botloc[1] + 1\n\t\t\t\treward = 0\n\t\t#print \"movement data\"\n\t\t#print state\n\t\t#print randoma\n\t\t#updatestate\n\t\tstate = np.zeros(5)\n\t\t#self\n\t\tstate[0] = board[int(botloc[0]), int(botloc[1])]\n\t\tstate[1] = board[int(botloc[0]), int(botloc[1] - 1)]\n\t\tstate[2] = board[int(botloc[0] - 1), int(botloc[1])]\n\t\tstate[3] = board[int(botloc[0] + 1), int(botloc[1])]\n\t\tstate[4] = board[int(botloc[0]), int(botloc[1] + 1)]\n\t\t#calculate best Qtable action value in new state\n\t\tmaxq = Qarr[int(state[0]),int(state[1]),int(state[2]),int(state[3]),int(state[4]),:].max()\n\t\t#update Q table\n\t\t#if(oldq[0] == 1 and Qarr[int(oldq[0]), int(oldq[1]), int(oldq[2]), int(oldq[3]), int(oldq[4]), 0] == Qarr[int(oldq[0]), int(oldq[1]), int(oldq[2]), int(oldq[3]), int(oldq[4]), :].max()):\n\t\t#\tprint \"ERROR\"\n\t\tQarr[int(oldq[0]), int(oldq[1]), int(oldq[2]), int(oldq[3]), int(oldq[4]), int(oldq[5])] = Qarr[int(oldq[0]), int(oldq[1]), int(oldq[2]), int(oldq[3]), int(oldq[4]), int(oldq[5])] + 0.2 * (reward + 0.5 * maxq - Qarr[int(oldq[0]), int(oldq[1]), int(oldq[2]), int(oldq[3]), int(oldq[4]), int(oldq[5])])\n\t\tepisodereward = episodereward + reward\n\t#decrement epsilon\n\tif(i % 50 == 49 and epsilon > 10):\n\t\tepsilon = epsilon - 1\n\tif(i % 100 == 99 ):\n\t\ttrainingreward[(int(i / 100))] = int(episodereward)\n#save Training reward data\n#trainingreward.to_csv('TrainReward.csv')\nnp.savetxt('TrainReward.csv', trainingreward, delimiter=',')\nQold = Qarr\n#Test runs\ntestrewards = np.zeros(EPISODE_COUNT)\nfor i in range(0, EPISODE_COUNT):\n\t#init board (0 wall, 1 blank, 2 can)\n\tboard = np.zeros((12, 12))\n\tfor j in range(0, 10):\n\t\tfor k in range(0, 10):\n\t\t\tboard[j+1, k+1] = randint(1, 2)\n\t#init bot location (horizontal 0, vertical 1 from top left)\n\tbotloc = np.zeros(2)\n\tbotloc[0] = randint(1, 10)\n\tbotloc[1] = randint(1, 10)\n\n\tepisodereward = 0\n\tfor j in range(0, ACTION_COUNT):\n\t\t#observestate (self, up, left, right, down)\n\t\tstate = np.zeros(5)\n\t\t#self\n\t\tstate[0] = board[int(botloc[0]), int(botloc[1])]\n\t\tstate[1] = board[int(botloc[0]), int(botloc[1] - 1)]\n\t\tstate[2] = board[int(botloc[0] - 1), int(botloc[1])]\n\t\tstate[3] = board[int(botloc[0] + 1), int(botloc[1])]\n\t\tstate[4] = board[int(botloc[0]), int(botloc[1] + 1)]\n\t\t#greedy action selection\n\t\trandoma = np.random.choice(np.where(Qarr[int(state[0]), int(state[1]), int(state[2]), int(state[3]), int(state[4]), :] == Qarr[int(state[0]), int(state[1]), int(state[2]), int(state[3]), int(state[4]), :].max())[0])\n\t\t#save qtable location\n\t\toldq = np.zeros(6)\n\t\toldq[0] = state[0]\n\t\toldq[1] = state[1]\n\t\toldq[2] = state[2]\n\t\toldq[3] = state[3]\n\t\toldq[4] = state[4]\n\t\toldq[5] = randoma\n\t\t#take action get reward\n\t\t\t#can grab\n\t\tif(randoma == 0):\n\t\t\t#can grabbed\n\t\t\tif(state[0] == 2):\n\t\t\t\t#remove can\n\t\t\t\tboard[int(botloc[0]), int(botloc[1])] = 1\n\t\t\t\treward = 10\n\t\t\t#can not grabbed\n\t\t\telse:\n\t\t\t\treward = -1\n\t\t#move up\n\t\telif(randoma == 1):\n\t\t\t#wall\n\t\t\tif(state[1] == 0):\n\t\t\t\treward = -5\n\t\t\t#no wall\n\t\t\telse:\n\t\t\t\tbotloc[1] = botloc[1] - 1\n\t\t\t\treward = 0\n\t\t#move left\n\t\telif(randoma == 2):\n\t\t\t#wall\n\t\t\tif(state[2] == 0):\n\t\t\t\treward = -5\n\t\t\t#no wall\n\t\t\telse:\n\t\t\t\tbotloc[0] = botloc[0] - 1\n\t\t\t\treward = 0\n\t\t#move right\n\t\telif(randoma == 3):\n\t\t\t#wall\n\t\t\tif(state[3] == 0):\n\t\t\t\treward = -5\n\t\t\t#no wall\n\t\t\telse:\n\t\t\t\tbotloc[0] = botloc[0] + 1\n\t\t\t\treward = 0\n\t\t#move down\n\t\telif(randoma == 4):\n\t\t\t#wall\n\t\t\tif(state[4] == 0):\n\t\t\t\treward = -5\n\t\t\t#no wall\n\t\t\telse:\n\t\t\t\tbotloc[1] = botloc[1] + 1\n\t\t\t\treward = 0\n\t\telse:\n\t\t\tprint(\"ERROR\")\n\t\t#print \"movement data\"\n\t\t#print state\n\t\t#print randoma\n\t\t#updatestate\n\t\tstate = np.zeros(5)\n\t\t#self\n\t\tstate[0] = board[int(botloc[0]), int(botloc[1])]\n\t\tstate[1] = board[int(botloc[0]), int(botloc[1] - 1)]\n\t\tstate[2] = board[int(botloc[0] - 1), int(botloc[1])]\n\t\tstate[3] = board[int(botloc[0] + 1), int(botloc[1])]\n\t\tstate[4] = board[int(botloc[0]), int(botloc[1] + 1)]\n\t\t#maxq = max(Qarr[int(state[0]),int(state[1]),int(state[2]),int(state[3]),int(state[4]),:])\n\t\t#update Q table\n\t\t#Qarr[int(oldq[0]), int(oldq[1]), int(oldq[2]), int(oldq[3]), int(oldq[4]), int(oldq[5])] = Qarr[int(oldq[0]), int(oldq[1]), int(oldq[2]), int(oldq[3]), int(oldq[4]), int(oldq[5])] + 0.01 * (reward + 0.9 * maxq - Qarr[int(oldq[0]), int(oldq[1]), int(oldq[2]), int(oldq[3]), int(oldq[4]), int(oldq[5])])\t\n\t\tepisodereward = episodereward + reward\n\ttestrewards[i] = episodereward\n\nprint(np.mean(testrewards))\nprint(np.std(testrewards))\n" ]
[ [ "matplotlib.use", "numpy.std", "numpy.mean", "numpy.savetxt", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
xhades/rates_classify
[ "225627dad22c162023bc6b5e4d8f5881c5a6f354" ]
[ "rates_classify/rdf.py" ]
[ "# !/usr/bin/env python\n# -*-coding:utf-8-*-\n\n\"\"\"\n@author: xhades\n@Date: 2017/12/28\n\n\"\"\"\n\n# 随机森林分类器\n\nimport numpy as np\nfrom numpy import *\nfrom numpy import array, argmax\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import LabelEncoder\nimport pickle\nfrom sklearn.ensemble import RandomForestClassifier as RDF\n\n\nnp.set_printoptions(threshold=np.inf)\n\n\n# 训练集测试集 3/7分割\ndef train(xFile, yFile):\n with open(xFile, \"rb\") as file_r:\n X = pickle.load(file_r)\n\n X = reshape(X, (212841, -1)) # reshape一下 (212841, 30*128)\n\n # 读取label数据,并且encodig\n with open(yFile, \"r\") as yFile_r:\n labelLines = [_.strip(\"\\n\") for _ in yFile_r.readlines()]\n values = array(labelLines)\n labelEncoder = LabelEncoder()\n integerEncoded = labelEncoder.fit_transform(values)\n integerEncoded = integerEncoded.reshape(len(integerEncoded), 1)\n # print(integerEncoded)\n\n # 获得label 编码\n Y = integerEncoded.reshape(212841, )\n X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.3, random_state=42)\n\n # 随机森林分类器\n clf = RDF(criterion=\"gini\")\n # criterion 可以使用\"gini\"或者\"entropy\",前者代表基尼系数,后者代表信息增益。一般说使用默认的基尼系数\"gini\"就可以了,即CART算法。除非你更喜欢类似ID3, C4.5的最优特征选择方法。\n\n clf.fit(X_train, Y_train)\n\n # 测试数据\n predict = clf.predict(X_test)\n count = 0\n for p, t in zip(predict, Y_test):\n if p == t:\n count += 1\n print(\"RandomForest Accuracy is:\", count/len(Y_test))\n\n\nif __name__ == \"__main__\":\n xFile = \"Res/char_embedded.pkl\"\n yFile = \"data/label.txt\"\n print(\"Start Training.....\")\n train(xFile, yFile)\n print(\"End.....\")\n" ]
[ [ "sklearn.ensemble.RandomForestClassifier", "numpy.set_printoptions", "sklearn.model_selection.train_test_split", "numpy.array", "sklearn.preprocessing.LabelEncoder" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
TomeRozen/IML.HUJI
[ "84b0d835a2a4dd4f52ea415e36382cb25a9eebdc" ]
[ "IMLearn/learners/regressors/linear_regression.py" ]
[ "from __future__ import annotations\nfrom typing import NoReturn\nfrom ...base import BaseEstimator\nimport numpy as np\nfrom numpy.linalg import pinv\n\nclass LinearRegression(BaseEstimator):\n \"\"\"\n Linear Regression Estimator\n\n Solving Ordinary Least Squares optimization problem\n \"\"\"\n\n def __init__(self, include_intercept: bool = True) -> LinearRegression:\n \"\"\"\n Instantiate a linear regression estimator\n\n Parameters\n ----------\n include_intercept: bool, default=True\n Should fitted model include an intercept or not\n\n Attributes\n ----------\n include_intercept_: bool\n Should fitted model include an intercept or not\n\n coefs_: ndarray of shape (n_features,) or (n_features+1,)\n Coefficients vector fitted by linear regression. To be set in\n `LinearRegression.fit` function.\n \"\"\"\n super().__init__()\n self.include_intercept_, self.coefs_ = include_intercept, None\n\n def _fit(self, X: np.ndarray, y: np.ndarray) -> NoReturn:\n \"\"\"\n Fit Least Squares model to given samples\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n Input data to fit an estimator for\n\n y : ndarray of shape (n_samples, )\n Responses of input data to fit to\n\n Notes\n -----\n Fits model with or without an intercept depending on value of `self.include_intercept_`\n \"\"\"\n if self.include_intercept_:\n X = np.hstack((np.ones((X.shape[0], 1)), X))\n self.coefs = pinv(X)@y\n\n def _predict(self, X: np.ndarray) -> np.ndarray:\n \"\"\"\n Predict responses for given samples using fitted estimator\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n Input data to predict responses for\n\n Returns\n -------\n responses : ndarray of shape (n_samples, )\n Predicted responses of given samples\n \"\"\"\n if self.include_intercept_:\n X = np.hstack((np.ones((X.shape[0], 1)), X))\n return X @ self.coefs\n\n def _loss(self, X: np.ndarray, y: np.ndarray) -> float:\n \"\"\"\n Evaluate performance under MSE loss function\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n Test samples\n\n y : ndarray of shape (n_samples, )\n True labels of test samples\n\n Returns\n -------\n loss : float\n Performance under MSE loss function\n \"\"\"\n return mean_square_error(y, self.predict(X))\n" ]
[ [ "numpy.linalg.pinv", "numpy.ones" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
xizaoqu/mmdetection3d
[ "1809f9650de95d7bc80035787b09e3b69390b702" ]
[ "mmdet3d/datasets/pipelines/transforms_3d.py" ]
[ "# Copyright (c) OpenMMLab. All rights reserved.\nimport numpy as np\nimport warnings\nfrom mmcv import is_tuple_of\nfrom mmcv.utils import build_from_cfg\n\nfrom mmdet3d.core import VoxelGenerator\nfrom mmdet3d.core.bbox import (CameraInstance3DBoxes, DepthInstance3DBoxes,\n LiDARInstance3DBoxes, box_np_ops)\nfrom mmdet.datasets.builder import PIPELINES\nfrom mmdet.datasets.pipelines import RandomFlip\nfrom ..builder import OBJECTSAMPLERS\nfrom .data_augment_utils import noise_per_object_v3_\n\n\[email protected]_module()\nclass RandomDropPointsColor(object):\n r\"\"\"Randomly set the color of points to all zeros.\n\n Once this transform is executed, all the points' color will be dropped.\n Refer to `PAConv <https://github.com/CVMI-Lab/PAConv/blob/main/scene_seg/\n util/transform.py#L223>`_ for more details.\n\n Args:\n drop_ratio (float): The probability of dropping point colors.\n Defaults to 0.2.\n \"\"\"\n\n def __init__(self, drop_ratio=0.2):\n assert isinstance(drop_ratio, (int, float)) and 0 <= drop_ratio <= 1, \\\n f'invalid drop_ratio value {drop_ratio}'\n self.drop_ratio = drop_ratio\n\n def __call__(self, input_dict):\n \"\"\"Call function to drop point colors.\n\n Args:\n input_dict (dict): Result dict from loading pipeline.\n\n Returns:\n dict: Results after color dropping, \\\n 'points' key is updated in the result dict.\n \"\"\"\n points = input_dict['points']\n assert points.attribute_dims is not None and \\\n 'color' in points.attribute_dims, \\\n 'Expect points have color attribute'\n\n # this if-expression is a bit strange\n # `RandomDropPointsColor` is used in training 3D segmentor PAConv\n # we discovered in our experiments that, using\n # `if np.random.rand() > 1.0 - self.drop_ratio` consistently leads to\n # better results than using `if np.random.rand() < self.drop_ratio`\n # so we keep this hack in our codebase\n if np.random.rand() > 1.0 - self.drop_ratio:\n points.color = points.color * 0.0\n return input_dict\n\n def __repr__(self):\n \"\"\"str: Return a string that describes the module.\"\"\"\n repr_str = self.__class__.__name__\n repr_str += f'(drop_ratio={self.drop_ratio})'\n return repr_str\n\n\[email protected]_module()\nclass RandomFlip3D(RandomFlip):\n \"\"\"Flip the points & bbox.\n\n If the input dict contains the key \"flip\", then the flag will be used,\n otherwise it will be randomly decided by a ratio specified in the init\n method.\n\n Args:\n sync_2d (bool, optional): Whether to apply flip according to the 2D\n images. If True, it will apply the same flip as that to 2D images.\n If False, it will decide whether to flip randomly and independently\n to that of 2D images. Defaults to True.\n flip_ratio_bev_horizontal (float, optional): The flipping probability\n in horizontal direction. Defaults to 0.0.\n flip_ratio_bev_vertical (float, optional): The flipping probability\n in vertical direction. Defaults to 0.0.\n \"\"\"\n\n def __init__(self,\n sync_2d=True,\n flip_ratio_bev_horizontal=0.0,\n flip_ratio_bev_vertical=0.0,\n **kwargs):\n super(RandomFlip3D, self).__init__(\n flip_ratio=flip_ratio_bev_horizontal, **kwargs)\n self.sync_2d = sync_2d\n self.flip_ratio_bev_vertical = flip_ratio_bev_vertical\n if flip_ratio_bev_horizontal is not None:\n assert isinstance(\n flip_ratio_bev_horizontal,\n (int, float)) and 0 <= flip_ratio_bev_horizontal <= 1\n if flip_ratio_bev_vertical is not None:\n assert isinstance(\n flip_ratio_bev_vertical,\n (int, float)) and 0 <= flip_ratio_bev_vertical <= 1\n\n def random_flip_data_3d(self, input_dict, direction='horizontal'):\n \"\"\"Flip 3D data randomly.\n\n Args:\n input_dict (dict): Result dict from loading pipeline.\n direction (str): Flip direction. Default: horizontal.\n\n Returns:\n dict: Flipped results, 'points', 'bbox3d_fields' keys are \\\n updated in the result dict.\n \"\"\"\n assert direction in ['horizontal', 'vertical']\n if len(input_dict['bbox3d_fields']) == 0: # test mode\n input_dict['bbox3d_fields'].append('empty_box3d')\n input_dict['empty_box3d'] = input_dict['box_type_3d'](\n np.array([], dtype=np.float32))\n assert len(input_dict['bbox3d_fields']) == 1\n for key in input_dict['bbox3d_fields']:\n if 'points' in input_dict:\n input_dict['points'] = input_dict[key].flip(\n direction, points=input_dict['points'])\n else:\n input_dict[key].flip(direction)\n if 'centers2d' in input_dict:\n assert self.sync_2d is True and direction == 'horizontal', \\\n 'Only support sync_2d=True and horizontal flip with images'\n w = input_dict['ori_shape'][1]\n input_dict['centers2d'][..., 0] = \\\n w - input_dict['centers2d'][..., 0]\n # need to modify the horizontal position of camera center\n # along u-axis in the image (flip like centers2d)\n # ['cam2img'][0][2] = c_u\n # see more details and examples at\n # https://github.com/open-mmlab/mmdetection3d/pull/744\n input_dict['cam2img'][0][2] = w - input_dict['cam2img'][0][2]\n\n def __call__(self, input_dict):\n \"\"\"Call function to flip points, values in the ``bbox3d_fields`` and \\\n also flip 2D image and its annotations.\n\n Args:\n input_dict (dict): Result dict from loading pipeline.\n\n Returns:\n dict: Flipped results, 'flip', 'flip_direction', \\\n 'pcd_horizontal_flip' and 'pcd_vertical_flip' keys are added \\\n into result dict.\n \"\"\"\n # filp 2D image and its annotations\n super(RandomFlip3D, self).__call__(input_dict)\n\n if self.sync_2d:\n input_dict['pcd_horizontal_flip'] = input_dict['flip']\n input_dict['pcd_vertical_flip'] = False\n else:\n if 'pcd_horizontal_flip' not in input_dict:\n flip_horizontal = True if np.random.rand(\n ) < self.flip_ratio else False\n input_dict['pcd_horizontal_flip'] = flip_horizontal\n if 'pcd_vertical_flip' not in input_dict:\n flip_vertical = True if np.random.rand(\n ) < self.flip_ratio_bev_vertical else False\n input_dict['pcd_vertical_flip'] = flip_vertical\n\n if 'transformation_3d_flow' not in input_dict:\n input_dict['transformation_3d_flow'] = []\n\n if input_dict['pcd_horizontal_flip']:\n self.random_flip_data_3d(input_dict, 'horizontal')\n input_dict['transformation_3d_flow'].extend(['HF'])\n if input_dict['pcd_vertical_flip']:\n self.random_flip_data_3d(input_dict, 'vertical')\n input_dict['transformation_3d_flow'].extend(['VF'])\n return input_dict\n\n def __repr__(self):\n \"\"\"str: Return a string that describes the module.\"\"\"\n repr_str = self.__class__.__name__\n repr_str += f'(sync_2d={self.sync_2d},'\n repr_str += f' flip_ratio_bev_vertical={self.flip_ratio_bev_vertical})'\n return repr_str\n\n\[email protected]_module()\nclass RandomJitterPoints(object):\n \"\"\"Randomly jitter point coordinates.\n\n Different from the global translation in ``GlobalRotScaleTrans``, here we \\\n apply different noises to each point in a scene.\n\n Args:\n jitter_std (list[float]): The standard deviation of jittering noise.\n This applies random noise to all points in a 3D scene, which is \\\n sampled from a gaussian distribution whose standard deviation is \\\n set by ``jitter_std``. Defaults to [0.01, 0.01, 0.01]\n clip_range (list[float] | None): Clip the randomly generated jitter \\\n noise into this range. If None is given, don't perform clipping.\n Defaults to [-0.05, 0.05]\n\n Note:\n This transform should only be used in point cloud segmentation tasks \\\n because we don't transform ground-truth bboxes accordingly.\n For similar transform in detection task, please refer to `ObjectNoise`.\n \"\"\"\n\n def __init__(self,\n jitter_std=[0.01, 0.01, 0.01],\n clip_range=[-0.05, 0.05]):\n seq_types = (list, tuple, np.ndarray)\n if not isinstance(jitter_std, seq_types):\n assert isinstance(jitter_std, (int, float)), \\\n f'unsupported jitter_std type {type(jitter_std)}'\n jitter_std = [jitter_std, jitter_std, jitter_std]\n self.jitter_std = jitter_std\n\n if clip_range is not None:\n if not isinstance(clip_range, seq_types):\n assert isinstance(clip_range, (int, float)), \\\n f'unsupported clip_range type {type(clip_range)}'\n clip_range = [-clip_range, clip_range]\n self.clip_range = clip_range\n\n def __call__(self, input_dict):\n \"\"\"Call function to jitter all the points in the scene.\n\n Args:\n input_dict (dict): Result dict from loading pipeline.\n\n Returns:\n dict: Results after adding noise to each point, \\\n 'points' key is updated in the result dict.\n \"\"\"\n points = input_dict['points']\n jitter_std = np.array(self.jitter_std, dtype=np.float32)\n jitter_noise = \\\n np.random.randn(points.shape[0], 3) * jitter_std[None, :]\n if self.clip_range is not None:\n jitter_noise = np.clip(jitter_noise, self.clip_range[0],\n self.clip_range[1])\n\n points.translate(jitter_noise)\n return input_dict\n\n def __repr__(self):\n \"\"\"str: Return a string that describes the module.\"\"\"\n repr_str = self.__class__.__name__\n repr_str += f'(jitter_std={self.jitter_std},'\n repr_str += f' clip_range={self.clip_range})'\n return repr_str\n\n\[email protected]_module()\nclass ObjectSample(object):\n \"\"\"Sample GT objects to the data.\n\n Args:\n db_sampler (dict): Config dict of the database sampler.\n sample_2d (bool): Whether to also paste 2D image patch to the images\n This should be true when applying multi-modality cut-and-paste.\n Defaults to False.\n \"\"\"\n\n def __init__(self, db_sampler, sample_2d=False):\n self.sampler_cfg = db_sampler\n self.sample_2d = sample_2d\n if 'type' not in db_sampler.keys():\n db_sampler['type'] = 'DataBaseSampler'\n self.db_sampler = build_from_cfg(db_sampler, OBJECTSAMPLERS)\n\n @staticmethod\n def remove_points_in_boxes(points, boxes):\n \"\"\"Remove the points in the sampled bounding boxes.\n\n Args:\n points (:obj:`BasePoints`): Input point cloud array.\n boxes (np.ndarray): Sampled ground truth boxes.\n\n Returns:\n np.ndarray: Points with those in the boxes removed.\n \"\"\"\n masks = box_np_ops.points_in_rbbox(points.coord.numpy(), boxes)\n points = points[np.logical_not(masks.any(-1))]\n return points\n\n def __call__(self, input_dict):\n \"\"\"Call function to sample ground truth objects to the data.\n\n Args:\n input_dict (dict): Result dict from loading pipeline.\n\n Returns:\n dict: Results after object sampling augmentation, \\\n 'points', 'gt_bboxes_3d', 'gt_labels_3d' keys are updated \\\n in the result dict.\n \"\"\"\n gt_bboxes_3d = input_dict['gt_bboxes_3d']\n gt_labels_3d = input_dict['gt_labels_3d']\n\n # change to float for blending operation\n points = input_dict['points']\n if self.sample_2d:\n img = input_dict['img']\n gt_bboxes_2d = input_dict['gt_bboxes']\n # Assume for now 3D & 2D bboxes are the same\n sampled_dict = self.db_sampler.sample_all(\n gt_bboxes_3d.tensor.numpy(),\n gt_labels_3d,\n gt_bboxes_2d=gt_bboxes_2d,\n img=img)\n else:\n sampled_dict = self.db_sampler.sample_all(\n gt_bboxes_3d.tensor.numpy(), gt_labels_3d, img=None)\n\n if sampled_dict is not None:\n sampled_gt_bboxes_3d = sampled_dict['gt_bboxes_3d']\n sampled_points = sampled_dict['points']\n sampled_gt_labels = sampled_dict['gt_labels_3d']\n\n gt_labels_3d = np.concatenate([gt_labels_3d, sampled_gt_labels],\n axis=0)\n gt_bboxes_3d = gt_bboxes_3d.new_box(\n np.concatenate(\n [gt_bboxes_3d.tensor.numpy(), sampled_gt_bboxes_3d]))\n\n points = self.remove_points_in_boxes(points, sampled_gt_bboxes_3d)\n # check the points dimension\n points = points.cat([sampled_points, points])\n\n if self.sample_2d:\n sampled_gt_bboxes_2d = sampled_dict['gt_bboxes_2d']\n gt_bboxes_2d = np.concatenate(\n [gt_bboxes_2d, sampled_gt_bboxes_2d]).astype(np.float32)\n\n input_dict['gt_bboxes'] = gt_bboxes_2d\n input_dict['img'] = sampled_dict['img']\n\n input_dict['gt_bboxes_3d'] = gt_bboxes_3d\n input_dict['gt_labels_3d'] = gt_labels_3d.astype(np.long)\n input_dict['points'] = points\n\n return input_dict\n\n def __repr__(self):\n \"\"\"str: Return a string that describes the module.\"\"\"\n repr_str = self.__class__.__name__\n repr_str += f' sample_2d={self.sample_2d},'\n repr_str += f' data_root={self.sampler_cfg.data_root},'\n repr_str += f' info_path={self.sampler_cfg.info_path},'\n repr_str += f' rate={self.sampler_cfg.rate},'\n repr_str += f' prepare={self.sampler_cfg.prepare},'\n repr_str += f' classes={self.sampler_cfg.classes},'\n repr_str += f' sample_groups={self.sampler_cfg.sample_groups}'\n return repr_str\n\n\[email protected]_module()\nclass ObjectNoise(object):\n \"\"\"Apply noise to each GT objects in the scene.\n\n Args:\n translation_std (list[float], optional): Standard deviation of the\n distribution where translation noise are sampled from.\n Defaults to [0.25, 0.25, 0.25].\n global_rot_range (list[float], optional): Global rotation to the scene.\n Defaults to [0.0, 0.0].\n rot_range (list[float], optional): Object rotation range.\n Defaults to [-0.15707963267, 0.15707963267].\n num_try (int, optional): Number of times to try if the noise applied is\n invalid. Defaults to 100.\n \"\"\"\n\n def __init__(self,\n translation_std=[0.25, 0.25, 0.25],\n global_rot_range=[0.0, 0.0],\n rot_range=[-0.15707963267, 0.15707963267],\n num_try=100):\n self.translation_std = translation_std\n self.global_rot_range = global_rot_range\n self.rot_range = rot_range\n self.num_try = num_try\n\n def __call__(self, input_dict):\n \"\"\"Call function to apply noise to each ground truth in the scene.\n\n Args:\n input_dict (dict): Result dict from loading pipeline.\n\n Returns:\n dict: Results after adding noise to each object, \\\n 'points', 'gt_bboxes_3d' keys are updated in the result dict.\n \"\"\"\n gt_bboxes_3d = input_dict['gt_bboxes_3d']\n points = input_dict['points']\n\n # TODO: check this inplace function\n numpy_box = gt_bboxes_3d.tensor.numpy()\n numpy_points = points.tensor.numpy()\n\n noise_per_object_v3_(\n numpy_box,\n numpy_points,\n rotation_perturb=self.rot_range,\n center_noise_std=self.translation_std,\n global_random_rot_range=self.global_rot_range,\n num_try=self.num_try)\n\n input_dict['gt_bboxes_3d'] = gt_bboxes_3d.new_box(numpy_box)\n input_dict['points'] = points.new_point(numpy_points)\n return input_dict\n\n def __repr__(self):\n \"\"\"str: Return a string that describes the module.\"\"\"\n repr_str = self.__class__.__name__\n repr_str += f'(num_try={self.num_try},'\n repr_str += f' translation_std={self.translation_std},'\n repr_str += f' global_rot_range={self.global_rot_range},'\n repr_str += f' rot_range={self.rot_range})'\n return repr_str\n\n\[email protected]_module()\nclass GlobalAlignment(object):\n \"\"\"Apply global alignment to 3D scene points by rotation and translation.\n\n Args:\n rotation_axis (int): Rotation axis for points and bboxes rotation.\n\n Note:\n We do not record the applied rotation and translation as in \\\n GlobalRotScaleTrans. Because usually, we do not need to reverse \\\n the alignment step.\n For example, ScanNet 3D detection task uses aligned ground-truth \\\n bounding boxes for evaluation.\n \"\"\"\n\n def __init__(self, rotation_axis):\n self.rotation_axis = rotation_axis\n\n def _trans_points(self, input_dict, trans_factor):\n \"\"\"Private function to translate points.\n\n Args:\n input_dict (dict): Result dict from loading pipeline.\n trans_factor (np.ndarray): Translation vector to be applied.\n\n Returns:\n dict: Results after translation, 'points' is updated in the dict.\n \"\"\"\n input_dict['points'].translate(trans_factor)\n\n def _rot_points(self, input_dict, rot_mat):\n \"\"\"Private function to rotate bounding boxes and points.\n\n Args:\n input_dict (dict): Result dict from loading pipeline.\n rot_mat (np.ndarray): Rotation matrix to be applied.\n\n Returns:\n dict: Results after rotation, 'points' is updated in the dict.\n \"\"\"\n # input should be rot_mat_T so I transpose it here\n input_dict['points'].rotate(rot_mat.T)\n\n def _check_rot_mat(self, rot_mat):\n \"\"\"Check if rotation matrix is valid for self.rotation_axis.\n\n Args:\n rot_mat (np.ndarray): Rotation matrix to be applied.\n \"\"\"\n is_valid = np.allclose(np.linalg.det(rot_mat), 1.0)\n valid_array = np.zeros(3)\n valid_array[self.rotation_axis] = 1.0\n is_valid &= (rot_mat[self.rotation_axis, :] == valid_array).all()\n is_valid &= (rot_mat[:, self.rotation_axis] == valid_array).all()\n assert is_valid, f'invalid rotation matrix {rot_mat}'\n\n def __call__(self, input_dict):\n \"\"\"Call function to shuffle points.\n\n Args:\n input_dict (dict): Result dict from loading pipeline.\n\n Returns:\n dict: Results after global alignment, 'points' and keys in \\\n input_dict['bbox3d_fields'] are updated in the result dict.\n \"\"\"\n assert 'axis_align_matrix' in input_dict['ann_info'].keys(), \\\n 'axis_align_matrix is not provided in GlobalAlignment'\n\n axis_align_matrix = input_dict['ann_info']['axis_align_matrix']\n assert axis_align_matrix.shape == (4, 4), \\\n f'invalid shape {axis_align_matrix.shape} for axis_align_matrix'\n rot_mat = axis_align_matrix[:3, :3]\n trans_vec = axis_align_matrix[:3, -1]\n\n self._check_rot_mat(rot_mat)\n self._rot_points(input_dict, rot_mat)\n self._trans_points(input_dict, trans_vec)\n\n return input_dict\n\n def __repr__(self):\n repr_str = self.__class__.__name__\n repr_str += f'(rotation_axis={self.rotation_axis})'\n return repr_str\n\n\[email protected]_module()\nclass GlobalRotScaleTrans(object):\n \"\"\"Apply global rotation, scaling and translation to a 3D scene.\n\n Args:\n rot_range (list[float]): Range of rotation angle.\n Defaults to [-0.78539816, 0.78539816] (close to [-pi/4, pi/4]).\n scale_ratio_range (list[float]): Range of scale ratio.\n Defaults to [0.95, 1.05].\n translation_std (list[float]): The standard deviation of translation\n noise. This applies random translation to a scene by a noise, which\n is sampled from a gaussian distribution whose standard deviation\n is set by ``translation_std``. Defaults to [0, 0, 0]\n shift_height (bool): Whether to shift height.\n (the fourth dimension of indoor points) when scaling.\n Defaults to False.\n \"\"\"\n\n def __init__(self,\n rot_range=[-0.78539816, 0.78539816],\n scale_ratio_range=[0.95, 1.05],\n translation_std=[0, 0, 0],\n shift_height=False):\n seq_types = (list, tuple, np.ndarray)\n if not isinstance(rot_range, seq_types):\n assert isinstance(rot_range, (int, float)), \\\n f'unsupported rot_range type {type(rot_range)}'\n rot_range = [-rot_range, rot_range]\n self.rot_range = rot_range\n\n assert isinstance(scale_ratio_range, seq_types), \\\n f'unsupported scale_ratio_range type {type(scale_ratio_range)}'\n self.scale_ratio_range = scale_ratio_range\n\n if not isinstance(translation_std, seq_types):\n assert isinstance(translation_std, (int, float)), \\\n f'unsupported translation_std type {type(translation_std)}'\n translation_std = [\n translation_std, translation_std, translation_std\n ]\n assert all([std >= 0 for std in translation_std]), \\\n 'translation_std should be positive'\n self.translation_std = translation_std\n self.shift_height = shift_height\n\n def _trans_bbox_points(self, input_dict):\n \"\"\"Private function to translate bounding boxes and points.\n\n Args:\n input_dict (dict): Result dict from loading pipeline.\n\n Returns:\n dict: Results after translation, 'points', 'pcd_trans' \\\n and keys in input_dict['bbox3d_fields'] are updated \\\n in the result dict.\n \"\"\"\n translation_std = np.array(self.translation_std, dtype=np.float32)\n trans_factor = np.random.normal(scale=translation_std, size=3).T\n\n input_dict['points'].translate(trans_factor)\n input_dict['pcd_trans'] = trans_factor\n for key in input_dict['bbox3d_fields']:\n input_dict[key].translate(trans_factor)\n\n def _rot_bbox_points(self, input_dict):\n \"\"\"Private function to rotate bounding boxes and points.\n\n Args:\n input_dict (dict): Result dict from loading pipeline.\n\n Returns:\n dict: Results after rotation, 'points', 'pcd_rotation' \\\n and keys in input_dict['bbox3d_fields'] are updated \\\n in the result dict.\n \"\"\"\n rotation = self.rot_range\n noise_rotation = np.random.uniform(rotation[0], rotation[1])\n\n # if no bbox in input_dict, only rotate points\n if len(input_dict['bbox3d_fields']) == 0:\n rot_mat_T = input_dict['points'].rotate(noise_rotation)\n input_dict['pcd_rotation'] = rot_mat_T\n return\n\n # rotate points with bboxes\n for key in input_dict['bbox3d_fields']:\n if len(input_dict[key].tensor) != 0:\n points, rot_mat_T = input_dict[key].rotate(\n noise_rotation, input_dict['points'])\n input_dict['points'] = points\n input_dict['pcd_rotation'] = rot_mat_T\n\n def _scale_bbox_points(self, input_dict):\n \"\"\"Private function to scale bounding boxes and points.\n\n Args:\n input_dict (dict): Result dict from loading pipeline.\n\n Returns:\n dict: Results after scaling, 'points'and keys in \\\n input_dict['bbox3d_fields'] are updated in the result dict.\n \"\"\"\n scale = input_dict['pcd_scale_factor']\n points = input_dict['points']\n points.scale(scale)\n if self.shift_height:\n assert 'height' in points.attribute_dims.keys(), \\\n 'setting shift_height=True but points have no height attribute'\n points.tensor[:, points.attribute_dims['height']] *= scale\n input_dict['points'] = points\n\n for key in input_dict['bbox3d_fields']:\n input_dict[key].scale(scale)\n\n def _random_scale(self, input_dict):\n \"\"\"Private function to randomly set the scale factor.\n\n Args:\n input_dict (dict): Result dict from loading pipeline.\n\n Returns:\n dict: Results after scaling, 'pcd_scale_factor' are updated \\\n in the result dict.\n \"\"\"\n scale_factor = np.random.uniform(self.scale_ratio_range[0],\n self.scale_ratio_range[1])\n input_dict['pcd_scale_factor'] = scale_factor\n\n def __call__(self, input_dict):\n \"\"\"Private function to rotate, scale and translate bounding boxes and \\\n points.\n\n Args:\n input_dict (dict): Result dict from loading pipeline.\n\n Returns:\n dict: Results after scaling, 'points', 'pcd_rotation',\n 'pcd_scale_factor', 'pcd_trans' and keys in \\\n input_dict['bbox3d_fields'] are updated in the result dict.\n \"\"\"\n if 'transformation_3d_flow' not in input_dict:\n input_dict['transformation_3d_flow'] = []\n\n self._rot_bbox_points(input_dict)\n\n if 'pcd_scale_factor' not in input_dict:\n self._random_scale(input_dict)\n self._scale_bbox_points(input_dict)\n\n self._trans_bbox_points(input_dict)\n\n input_dict['transformation_3d_flow'].extend(['R', 'S', 'T'])\n return input_dict\n\n def __repr__(self):\n \"\"\"str: Return a string that describes the module.\"\"\"\n repr_str = self.__class__.__name__\n repr_str += f'(rot_range={self.rot_range},'\n repr_str += f' scale_ratio_range={self.scale_ratio_range},'\n repr_str += f' translation_std={self.translation_std},'\n repr_str += f' shift_height={self.shift_height})'\n return repr_str\n\[email protected]_module()\nclass RotFlipScaleTrans(object):\n def __init__(self,\n scale_ratio_range=[0.95, 1.05],\n translation_std=[0, 0, 0],\n #TODO\n ):\n seq_types = (list, tuple, np.ndarray)\n if not isinstance(rot_range, seq_types):\n assert isinstance(rot_range, (int, float)), \\\n f'unsupported rot_range type {type(rot_range)}'\n rot_range = [-rot_range, rot_range]\n self.rot_range = rot_range\n\n assert isinstance(scale_ratio_range, seq_types), \\\n f'unsupported scale_ratio_range type {type(scale_ratio_range)}'\n self.scale_ratio_range = scale_ratio_range\n\n if not isinstance(translation_std, seq_types):\n assert isinstance(translation_std, (int, float)), \\\n f'unsupported translation_std type {type(translation_std)}'\n translation_std = [\n translation_std, translation_std, translation_std\n ]\n assert all([std >= 0 for std in translation_std]), \\\n 'translation_std should be positive'\n self.translation_std = translation_std\n self.shift_height = shift_height\n\n def _trans_bbox_points(self, input_dict):\n \"\"\"Private function to translate bounding boxes and points.\n\n Args:\n input_dict (dict): Result dict from loading pipeline.\n\n Returns:\n dict: Results after translation, 'points', 'pcd_trans' \\\n and keys in input_dict['bbox3d_fields'] are updated \\\n in the result dict.\n \"\"\"\n translation_std = np.array(self.translation_std, dtype=np.float32)\n trans_factor = np.random.normal(scale=translation_std, size=3).T\n\n input_dict['points'].translate(trans_factor)\n input_dict['pcd_trans'] = trans_factor\n for key in input_dict['bbox3d_fields']:\n input_dict[key].translate(trans_factor)\n\n def _rot_bbox_points(self, input_dict):\n \"\"\"Private function to rotate bounding boxes and points.\n\n Args:\n input_dict (dict): Result dict from loading pipeline.\n\n Returns:\n dict: Results after rotation, 'points', 'pcd_rotation' \\\n and keys in input_dict['bbox3d_fields'] are updated \\\n in the result dict.\n \"\"\"\n rotation = self.rot_range\n noise_rotation = np.random.uniform(rotation[0], rotation[1])\n\n # if no bbox in input_dict, only rotate points\n if len(input_dict['bbox3d_fields']) == 0:\n rot_mat_T = input_dict['points'].rotate(noise_rotation)\n input_dict['pcd_rotation'] = rot_mat_T\n return\n\n # rotate points with bboxes\n for key in input_dict['bbox3d_fields']:\n if len(input_dict[key].tensor) != 0:\n points, rot_mat_T = input_dict[key].rotate(\n noise_rotation, input_dict['points'])\n input_dict['points'] = points\n input_dict['pcd_rotation'] = rot_mat_T\n\n def _scale_bbox_points(self, input_dict):\n \"\"\"Private function to scale bounding boxes and points.\n\n Args:\n input_dict (dict): Result dict from loading pipeline.\n\n Returns:\n dict: Results after scaling, 'points'and keys in \\\n input_dict['bbox3d_fields'] are updated in the result dict.\n \"\"\"\n scale = input_dict['pcd_scale_factor']\n points = input_dict['points']\n points.scale(scale)\n if self.shift_height:\n assert 'height' in points.attribute_dims.keys(), \\\n 'setting shift_height=True but points have no height attribute'\n points.tensor[:, points.attribute_dims['height']] *= scale\n input_dict['points'] = points\n\n for key in input_dict['bbox3d_fields']:\n input_dict[key].scale(scale)\n\n def _random_scale(self, input_dict):\n \"\"\"Private function to randomly set the scale factor.\n\n Args:\n input_dict (dict): Result dict from loading pipeline.\n\n Returns:\n dict: Results after scaling, 'pcd_scale_factor' are updated \\\n in the result dict.\n \"\"\"\n scale_factor = np.random.uniform(self.scale_ratio_range[0],\n self.scale_ratio_range[1])\n input_dict['pcd_scale_factor'] = scale_factor\n\n def __call__(self, input_dict):\n \"\"\"Private function to rotate, scale and translate bounding boxes and \\\n points.\n\n Args:\n input_dict (dict): Result dict from loading pipeline.\n\n Returns:\n dict: Results after scaling, 'points', 'pcd_rotation',\n 'pcd_scale_factor', 'pcd_trans' and keys in \\\n input_dict['bbox3d_fields'] are updated in the result dict.\n \"\"\"\n if 'transformation_3d_flow' not in input_dict:\n input_dict['transformation_3d_flow'] = []\n\n self._rot_bbox_points(input_dict)\n\n if 'pcd_scale_factor' not in input_dict:\n self._random_scale(input_dict)\n self._scale_bbox_points(input_dict)\n\n self._trans_bbox_points(input_dict)\n\n input_dict['transformation_3d_flow'].extend(['R', 'S', 'T'])\n return input_dict\n\n def __repr__(self):\n \"\"\"str: Return a string that describes the module.\"\"\"\n repr_str = self.__class__.__name__\n repr_str += f'(rot_range={self.rot_range},'\n repr_str += f' scale_ratio_range={self.scale_ratio_range},'\n repr_str += f' translation_std={self.translation_std},'\n repr_str += f' shift_height={self.shift_height})'\n return repr_str\n\[email protected]_module()\nclass PointShuffle(object):\n \"\"\"Shuffle input points.\"\"\"\n\n def __call__(self, input_dict):\n \"\"\"Call function to shuffle points.\n\n Args:\n input_dict (dict): Result dict from loading pipeline.\n\n Returns:\n dict: Results after filtering, 'points', 'pts_instance_mask' \\\n and 'pts_semantic_mask' keys are updated in the result dict.\n \"\"\"\n idx = input_dict['points'].shuffle()\n idx = idx.numpy()\n\n pts_instance_mask = input_dict.get('pts_instance_mask', None)\n pts_semantic_mask = input_dict.get('pts_semantic_mask', None)\n\n if pts_instance_mask is not None:\n input_dict['pts_instance_mask'] = pts_instance_mask[idx]\n\n if pts_semantic_mask is not None:\n input_dict['pts_semantic_mask'] = pts_semantic_mask[idx]\n\n return input_dict\n\n def __repr__(self):\n return self.__class__.__name__\n\n\[email protected]_module()\nclass ObjectRangeFilter(object):\n \"\"\"Filter objects by the range.\n\n Args:\n point_cloud_range (list[float]): Point cloud range.\n \"\"\"\n\n def __init__(self, point_cloud_range):\n self.pcd_range = np.array(point_cloud_range, dtype=np.float32)\n\n def __call__(self, input_dict):\n \"\"\"Call function to filter objects by the range.\n\n Args:\n input_dict (dict): Result dict from loading pipeline.\n\n Returns:\n dict: Results after filtering, 'gt_bboxes_3d', 'gt_labels_3d' \\\n keys are updated in the result dict.\n \"\"\"\n # Check points instance type and initialise bev_range\n if isinstance(input_dict['gt_bboxes_3d'],\n (LiDARInstance3DBoxes, DepthInstance3DBoxes)):\n bev_range = self.pcd_range[[0, 1, 3, 4]]\n elif isinstance(input_dict['gt_bboxes_3d'], CameraInstance3DBoxes):\n bev_range = self.pcd_range[[0, 2, 3, 5]]\n\n gt_bboxes_3d = input_dict['gt_bboxes_3d']\n gt_labels_3d = input_dict['gt_labels_3d']\n mask = gt_bboxes_3d.in_range_bev(bev_range)\n gt_bboxes_3d = gt_bboxes_3d[mask]\n # mask is a torch tensor but gt_labels_3d is still numpy array\n # using mask to index gt_labels_3d will cause bug when\n # len(gt_labels_3d) == 1, where mask=1 will be interpreted\n # as gt_labels_3d[1] and cause out of index error\n gt_labels_3d = gt_labels_3d[mask.numpy().astype(np.bool)]\n\n # limit rad to [-pi, pi]\n gt_bboxes_3d.limit_yaw(offset=0.5, period=2 * np.pi)\n input_dict['gt_bboxes_3d'] = gt_bboxes_3d\n input_dict['gt_labels_3d'] = gt_labels_3d\n\n return input_dict\n\n def __repr__(self):\n \"\"\"str: Return a string that describes the module.\"\"\"\n repr_str = self.__class__.__name__\n repr_str += f'(point_cloud_range={self.pcd_range.tolist()})'\n return repr_str\n\n\[email protected]_module()\nclass PointsRangeFilter(object):\n \"\"\"Filter points by the range.\n\n Args:\n point_cloud_range (list[float]): Point cloud range.\n \"\"\"\n\n def __init__(self, point_cloud_range):\n self.pcd_range = np.array(point_cloud_range, dtype=np.float32)\n\n def __call__(self, input_dict):\n \"\"\"Call function to filter points by the range.\n\n Args:\n input_dict (dict): Result dict from loading pipeline.\n\n Returns:\n dict: Results after filtering, 'points', 'pts_instance_mask' \\\n and 'pts_semantic_mask' keys are updated in the result dict.\n \"\"\"\n points = input_dict['points']\n points_mask = points.in_range_3d(self.pcd_range)\n clean_points = points[points_mask]\n input_dict['points'] = clean_points\n points_mask = points_mask.numpy()\n\n pts_instance_mask = input_dict.get('pts_instance_mask', None)\n pts_semantic_mask = input_dict.get('pts_semantic_mask', None)\n\n if pts_instance_mask is not None:\n input_dict['pts_instance_mask'] = pts_instance_mask[points_mask]\n\n if pts_semantic_mask is not None:\n input_dict['pts_semantic_mask'] = pts_semantic_mask[points_mask]\n\n return input_dict\n\n def __repr__(self):\n \"\"\"str: Return a string that describes the module.\"\"\"\n repr_str = self.__class__.__name__\n repr_str += f'(point_cloud_range={self.pcd_range.tolist()})'\n return repr_str\n\n\[email protected]_module()\nclass ObjectNameFilter(object):\n \"\"\"Filter GT objects by their names.\n\n Args:\n classes (list[str]): List of class names to be kept for training.\n \"\"\"\n\n def __init__(self, classes):\n self.classes = classes\n self.labels = list(range(len(self.classes)))\n\n def __call__(self, input_dict):\n \"\"\"Call function to filter objects by their names.\n\n Args:\n input_dict (dict): Result dict from loading pipeline.\n\n Returns:\n dict: Results after filtering, 'gt_bboxes_3d', 'gt_labels_3d' \\\n keys are updated in the result dict.\n \"\"\"\n gt_labels_3d = input_dict['gt_labels_3d']\n gt_bboxes_mask = np.array([n in self.labels for n in gt_labels_3d],\n dtype=np.bool_)\n input_dict['gt_bboxes_3d'] = input_dict['gt_bboxes_3d'][gt_bboxes_mask]\n input_dict['gt_labels_3d'] = input_dict['gt_labels_3d'][gt_bboxes_mask]\n\n return input_dict\n\n def __repr__(self):\n \"\"\"str: Return a string that describes the module.\"\"\"\n repr_str = self.__class__.__name__\n repr_str += f'(classes={self.classes})'\n return repr_str\n\n\[email protected]_module()\nclass PointSample(object):\n \"\"\"Point sample.\n\n Sampling data to a certain number.\n\n Args:\n num_points (int): Number of points to be sampled.\n sample_range (float, optional): The range where to sample points.\n If not None, the points with depth larger than `sample_range` are\n prior to be sampled. Defaults to None.\n replace (bool, optional): Whether the sampling is with or without\n replacement. Defaults to False.\n \"\"\"\n\n def __init__(self, num_points, sample_range=None, replace=False):\n self.num_points = num_points\n self.sample_range = sample_range\n self.replace = replace\n\n def _points_random_sampling(self,\n points,\n num_samples,\n sample_range=None,\n replace=False,\n return_choices=False):\n \"\"\"Points random sampling.\n\n Sample points to a certain number.\n\n Args:\n points (np.ndarray | :obj:`BasePoints`): 3D Points.\n num_samples (int): Number of samples to be sampled.\n sample_range (float, optional): Indicating the range where the\n points will be sampled. Defaults to None.\n replace (bool, optional): Sampling with or without replacement.\n Defaults to None.\n return_choices (bool, optional): Whether return choice.\n Defaults to False.\n Returns:\n tuple[np.ndarray] | np.ndarray:\n - points (np.ndarray | :obj:`BasePoints`): 3D Points.\n - choices (np.ndarray, optional): The generated random samples.\n \"\"\"\n if not replace:\n replace = (points.shape[0] < num_samples)\n point_range = range(len(points))\n if sample_range is not None and not replace:\n # Only sampling the near points when len(points) >= num_samples\n depth = np.linalg.norm(points.tensor, axis=1)\n far_inds = np.where(depth > sample_range)[0]\n near_inds = np.where(depth <= sample_range)[0]\n # in case there are too many far points\n if len(far_inds) > num_samples:\n far_inds = np.random.choice(\n far_inds, num_samples, replace=False)\n point_range = near_inds\n num_samples -= len(far_inds)\n choices = np.random.choice(point_range, num_samples, replace=replace)\n if sample_range is not None and not replace:\n choices = np.concatenate((far_inds, choices))\n # Shuffle points after sampling\n np.random.shuffle(choices)\n if return_choices:\n return points[choices], choices\n else:\n return points[choices]\n\n def __call__(self, results):\n \"\"\"Call function to sample points to in indoor scenes.\n\n Args:\n input_dict (dict): Result dict from loading pipeline.\n Returns:\n dict: Results after sampling, 'points', 'pts_instance_mask' \\\n and 'pts_semantic_mask' keys are updated in the result dict.\n \"\"\"\n points = results['points']\n # Points in Camera coord can provide the depth information.\n # TODO: Need to suport distance-based sampling for other coord system.\n if self.sample_range is not None:\n from mmdet3d.core.points import CameraPoints\n assert isinstance(points, CameraPoints), \\\n 'Sampling based on distance is only appliable for CAMERA coord'\n points, choices = self._points_random_sampling(\n points,\n self.num_points,\n self.sample_range,\n self.replace,\n return_choices=True)\n results['points'] = points\n\n pts_instance_mask = results.get('pts_instance_mask', None)\n pts_semantic_mask = results.get('pts_semantic_mask', None)\n\n if pts_instance_mask is not None:\n pts_instance_mask = pts_instance_mask[choices]\n results['pts_instance_mask'] = pts_instance_mask\n\n if pts_semantic_mask is not None:\n pts_semantic_mask = pts_semantic_mask[choices]\n results['pts_semantic_mask'] = pts_semantic_mask\n\n return results\n\n def __repr__(self):\n \"\"\"str: Return a string that describes the module.\"\"\"\n repr_str = self.__class__.__name__\n repr_str += f'(num_points={self.num_points},'\n repr_str += f' sample_range={self.sample_range},'\n repr_str += f' replace={self.replace})'\n\n return repr_str\n\n\[email protected]_module()\nclass IndoorPointSample(PointSample):\n \"\"\"Indoor point sample.\n\n Sampling data to a certain number.\n NOTE: IndoorPointSample is deprecated in favor of PointSample\n\n Args:\n num_points (int): Number of points to be sampled.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n warnings.warn(\n 'IndoorPointSample is deprecated in favor of PointSample')\n super(IndoorPointSample, self).__init__(*args, **kwargs)\n\n\[email protected]_module()\nclass IndoorPatchPointSample(object):\n r\"\"\"Indoor point sample within a patch. Modified from `PointNet++ <https://\n github.com/charlesq34/pointnet2/blob/master/scannet/scannet_dataset.py>`_.\n\n Sampling data to a certain number for semantic segmentation.\n\n Args:\n num_points (int): Number of points to be sampled.\n block_size (float, optional): Size of a block to sample points from.\n Defaults to 1.5.\n sample_rate (float, optional): Stride used in sliding patch generation.\n This parameter is unused in `IndoorPatchPointSample` and thus has\n been deprecated. We plan to remove it in the future.\n Defaults to None.\n ignore_index (int, optional): Label index that won't be used for the\n segmentation task. This is set in PointSegClassMapping as neg_cls.\n If not None, will be used as a patch selection criterion.\n Defaults to None.\n use_normalized_coord (bool, optional): Whether to use normalized xyz as\n additional features. Defaults to False.\n num_try (int, optional): Number of times to try if the patch selected\n is invalid. Defaults to 10.\n enlarge_size (float | None, optional): Enlarge the sampled patch to\n [-block_size / 2 - enlarge_size, block_size / 2 + enlarge_size] as\n an augmentation. If None, set it as 0. Defaults to 0.2.\n min_unique_num (int | None, optional): Minimum number of unique points\n the sampled patch should contain. If None, use PointNet++'s method\n to judge uniqueness. Defaults to None.\n eps (float, optional): A value added to patch boundary to guarantee\n points coverage. Defaults to 1e-2.\n\n Note:\n This transform should only be used in the training process of point\n cloud segmentation tasks. For the sliding patch generation and\n inference process in testing, please refer to the `slide_inference`\n function of `EncoderDecoder3D` class.\n \"\"\"\n\n def __init__(self,\n num_points,\n block_size=1.5,\n sample_rate=None,\n ignore_index=None,\n use_normalized_coord=False,\n num_try=10,\n enlarge_size=0.2,\n min_unique_num=None,\n eps=1e-2):\n self.num_points = num_points\n self.block_size = block_size\n self.ignore_index = ignore_index\n self.use_normalized_coord = use_normalized_coord\n self.num_try = num_try\n self.enlarge_size = enlarge_size if enlarge_size is not None else 0.0\n self.min_unique_num = min_unique_num\n self.eps = eps\n\n if sample_rate is not None:\n warnings.warn(\n \"'sample_rate' has been deprecated and will be removed in \"\n 'the future. Please remove them from your code.')\n\n def _input_generation(self, coords, patch_center, coord_max, attributes,\n attribute_dims, point_type):\n \"\"\"Generating model input.\n\n Generate input by subtracting patch center and adding additional \\\n features. Currently support colors and normalized xyz as features.\n\n Args:\n coords (np.ndarray): Sampled 3D Points.\n patch_center (np.ndarray): Center coordinate of the selected patch.\n coord_max (np.ndarray): Max coordinate of all 3D Points.\n attributes (np.ndarray): features of input points.\n attribute_dims (dict): Dictionary to indicate the meaning of extra\n dimension.\n point_type (type): class of input points inherited from BasePoints.\n\n Returns:\n :obj:`BasePoints`: The generated input data.\n \"\"\"\n # subtract patch center, the z dimension is not centered\n centered_coords = coords.copy()\n centered_coords[:, 0] -= patch_center[0]\n centered_coords[:, 1] -= patch_center[1]\n\n if self.use_normalized_coord:\n normalized_coord = coords / coord_max\n attributes = np.concatenate([attributes, normalized_coord], axis=1)\n if attribute_dims is None:\n attribute_dims = dict()\n attribute_dims.update(\n dict(normalized_coord=[\n attributes.shape[1], attributes.shape[1] +\n 1, attributes.shape[1] + 2\n ]))\n\n points = np.concatenate([centered_coords, attributes], axis=1)\n points = point_type(\n points, points_dim=points.shape[1], attribute_dims=attribute_dims)\n\n return points\n\n def _patch_points_sampling(self, points, sem_mask):\n \"\"\"Patch points sampling.\n\n First sample a valid patch.\n Then sample points within that patch to a certain number.\n\n Args:\n points (:obj:`BasePoints`): 3D Points.\n sem_mask (np.ndarray): semantic segmentation mask for input points.\n\n Returns:\n tuple[:obj:`BasePoints`, np.ndarray] | :obj:`BasePoints`:\n\n - points (:obj:`BasePoints`): 3D Points.\n - choices (np.ndarray): The generated random samples.\n \"\"\"\n coords = points.coord.numpy()\n attributes = points.tensor[:, 3:].numpy()\n attribute_dims = points.attribute_dims\n point_type = type(points)\n\n coord_max = np.amax(coords, axis=0)\n coord_min = np.amin(coords, axis=0)\n\n for _ in range(self.num_try):\n # random sample a point as patch center\n cur_center = coords[np.random.choice(coords.shape[0])]\n\n # boundary of a patch, which would be enlarged by\n # `self.enlarge_size` as an augmentation\n cur_max = cur_center + np.array(\n [self.block_size / 2.0, self.block_size / 2.0, 0.0])\n cur_min = cur_center - np.array(\n [self.block_size / 2.0, self.block_size / 2.0, 0.0])\n cur_max[2] = coord_max[2]\n cur_min[2] = coord_min[2]\n cur_choice = np.sum(\n (coords >= (cur_min - self.enlarge_size)) *\n (coords <= (cur_max + self.enlarge_size)),\n axis=1) == 3\n\n if not cur_choice.any(): # no points in this patch\n continue\n\n cur_coords = coords[cur_choice, :]\n cur_sem_mask = sem_mask[cur_choice]\n point_idxs = np.where(cur_choice)[0]\n mask = np.sum(\n (cur_coords >= (cur_min - self.eps)) * (cur_coords <=\n (cur_max + self.eps)),\n axis=1) == 3\n\n # two criteria for patch sampling, adopted from PointNet++\n # 1. selected patch should contain enough unique points\n if self.min_unique_num is None:\n # use PointNet++'s method as default\n # [31, 31, 62] are just some big values used to transform\n # coords from 3d array to 1d and then check their uniqueness\n # this is used in all the ScanNet code following PointNet++\n vidx = np.ceil(\n (cur_coords[mask, :] - cur_min) / (cur_max - cur_min) *\n np.array([31.0, 31.0, 62.0]))\n vidx = np.unique(vidx[:, 0] * 31.0 * 62.0 + vidx[:, 1] * 62.0 +\n vidx[:, 2])\n flag1 = len(vidx) / 31.0 / 31.0 / 62.0 >= 0.02\n else:\n # if `min_unique_num` is provided, directly compare with it\n flag1 = mask.sum() >= self.min_unique_num\n\n # 2. selected patch should contain enough annotated points\n if self.ignore_index is None:\n flag2 = True\n else:\n flag2 = np.sum(cur_sem_mask != self.ignore_index) / \\\n len(cur_sem_mask) >= 0.7\n\n if flag1 and flag2:\n break\n\n # sample idx to `self.num_points`\n if point_idxs.size >= self.num_points:\n # no duplicate in sub-sampling\n choices = np.random.choice(\n point_idxs, self.num_points, replace=False)\n else:\n # do not use random choice here to avoid some points not counted\n dup = np.random.choice(point_idxs.size,\n self.num_points - point_idxs.size)\n idx_dup = np.concatenate(\n [np.arange(point_idxs.size),\n np.array(dup)], 0)\n choices = point_idxs[idx_dup]\n\n # construct model input\n points = self._input_generation(coords[choices], cur_center, coord_max,\n attributes[choices], attribute_dims,\n point_type)\n\n return points, choices\n\n def __call__(self, results):\n \"\"\"Call function to sample points to in indoor scenes.\n\n Args:\n input_dict (dict): Result dict from loading pipeline.\n\n Returns:\n dict: Results after sampling, 'points', 'pts_instance_mask' \\\n and 'pts_semantic_mask' keys are updated in the result dict.\n \"\"\"\n points = results['points']\n\n assert 'pts_semantic_mask' in results.keys(), \\\n 'semantic mask should be provided in training and evaluation'\n pts_semantic_mask = results['pts_semantic_mask']\n\n points, choices = self._patch_points_sampling(points,\n pts_semantic_mask)\n\n results['points'] = points\n results['pts_semantic_mask'] = pts_semantic_mask[choices]\n pts_instance_mask = results.get('pts_instance_mask', None)\n if pts_instance_mask is not None:\n results['pts_instance_mask'] = pts_instance_mask[choices]\n\n return results\n\n def __repr__(self):\n \"\"\"str: Return a string that describes the module.\"\"\"\n repr_str = self.__class__.__name__\n repr_str += f'(num_points={self.num_points},'\n repr_str += f' block_size={self.block_size},'\n repr_str += f' ignore_index={self.ignore_index},'\n repr_str += f' use_normalized_coord={self.use_normalized_coord},'\n repr_str += f' num_try={self.num_try},'\n repr_str += f' enlarge_size={self.enlarge_size},'\n repr_str += f' min_unique_num={self.min_unique_num},'\n repr_str += f' eps={self.eps})'\n return repr_str\n\n\[email protected]_module()\nclass BackgroundPointsFilter(object):\n \"\"\"Filter background points near the bounding box.\n\n Args:\n bbox_enlarge_range (tuple[float], float): Bbox enlarge range.\n \"\"\"\n\n def __init__(self, bbox_enlarge_range):\n assert (is_tuple_of(bbox_enlarge_range, float)\n and len(bbox_enlarge_range) == 3) \\\n or isinstance(bbox_enlarge_range, float), \\\n f'Invalid arguments bbox_enlarge_range {bbox_enlarge_range}'\n\n if isinstance(bbox_enlarge_range, float):\n bbox_enlarge_range = [bbox_enlarge_range] * 3\n self.bbox_enlarge_range = np.array(\n bbox_enlarge_range, dtype=np.float32)[np.newaxis, :]\n\n def __call__(self, input_dict):\n \"\"\"Call function to filter points by the range.\n\n Args:\n input_dict (dict): Result dict from loading pipeline.\n\n Returns:\n dict: Results after filtering, 'points', 'pts_instance_mask' \\\n and 'pts_semantic_mask' keys are updated in the result dict.\n \"\"\"\n points = input_dict['points']\n gt_bboxes_3d = input_dict['gt_bboxes_3d']\n\n # avoid groundtruth being modified\n gt_bboxes_3d_np = gt_bboxes_3d.tensor.clone().numpy()\n gt_bboxes_3d_np[:, :3] = gt_bboxes_3d.gravity_center.clone().numpy()\n\n enlarged_gt_bboxes_3d = gt_bboxes_3d_np.copy()\n enlarged_gt_bboxes_3d[:, 3:6] += self.bbox_enlarge_range\n points_numpy = points.tensor.clone().numpy()\n foreground_masks = box_np_ops.points_in_rbbox(\n points_numpy, gt_bboxes_3d_np, origin=(0.5, 0.5, 0.5))\n enlarge_foreground_masks = box_np_ops.points_in_rbbox(\n points_numpy, enlarged_gt_bboxes_3d, origin=(0.5, 0.5, 0.5))\n foreground_masks = foreground_masks.max(1)\n enlarge_foreground_masks = enlarge_foreground_masks.max(1)\n valid_masks = ~np.logical_and(~foreground_masks,\n enlarge_foreground_masks)\n\n input_dict['points'] = points[valid_masks]\n pts_instance_mask = input_dict.get('pts_instance_mask', None)\n if pts_instance_mask is not None:\n input_dict['pts_instance_mask'] = pts_instance_mask[valid_masks]\n\n pts_semantic_mask = input_dict.get('pts_semantic_mask', None)\n if pts_semantic_mask is not None:\n input_dict['pts_semantic_mask'] = pts_semantic_mask[valid_masks]\n return input_dict\n\n def __repr__(self):\n \"\"\"str: Return a string that describes the module.\"\"\"\n repr_str = self.__class__.__name__\n repr_str += f'(bbox_enlarge_range={self.bbox_enlarge_range.tolist()})'\n return repr_str\n\n\[email protected]_module()\nclass VoxelBasedPointSampler(object):\n \"\"\"Voxel based point sampler.\n\n Apply voxel sampling to multiple sweep points.\n\n Args:\n cur_sweep_cfg (dict): Config for sampling current points.\n prev_sweep_cfg (dict): Config for sampling previous points.\n time_dim (int): Index that indicate the time dimention\n for input points.\n \"\"\"\n\n def __init__(self, cur_sweep_cfg, prev_sweep_cfg=None, time_dim=3):\n self.cur_voxel_generator = VoxelGenerator(**cur_sweep_cfg)\n self.cur_voxel_num = self.cur_voxel_generator._max_voxels\n self.time_dim = time_dim\n if prev_sweep_cfg is not None:\n assert prev_sweep_cfg['max_num_points'] == \\\n cur_sweep_cfg['max_num_points']\n self.prev_voxel_generator = VoxelGenerator(**prev_sweep_cfg)\n self.prev_voxel_num = self.prev_voxel_generator._max_voxels\n else:\n self.prev_voxel_generator = None\n self.prev_voxel_num = 0\n\n def _sample_points(self, points, sampler, point_dim):\n \"\"\"Sample points for each points subset.\n\n Args:\n points (np.ndarray): Points subset to be sampled.\n sampler (VoxelGenerator): Voxel based sampler for\n each points subset.\n point_dim (int): The dimention of each points\n\n Returns:\n np.ndarray: Sampled points.\n \"\"\"\n voxels, coors, num_points_per_voxel = sampler.generate(points)\n if voxels.shape[0] < sampler._max_voxels:\n padding_points = np.zeros([\n sampler._max_voxels - voxels.shape[0], sampler._max_num_points,\n point_dim\n ],\n dtype=points.dtype)\n padding_points[:] = voxels[0]\n sample_points = np.concatenate([voxels, padding_points], axis=0)\n else:\n sample_points = voxels\n\n return sample_points\n\n def __call__(self, results):\n \"\"\"Call function to sample points from multiple sweeps.\n\n Args:\n input_dict (dict): Result dict from loading pipeline.\n\n Returns:\n dict: Results after sampling, 'points', 'pts_instance_mask' \\\n and 'pts_semantic_mask' keys are updated in the result dict.\n \"\"\"\n points = results['points']\n original_dim = points.shape[1]\n\n # TODO: process instance and semantic mask while _max_num_points\n # is larger than 1\n # Extend points with seg and mask fields\n map_fields2dim = []\n start_dim = original_dim\n points_numpy = points.tensor.numpy()\n extra_channel = [points_numpy]\n for idx, key in enumerate(results['pts_mask_fields']):\n map_fields2dim.append((key, idx + start_dim))\n extra_channel.append(results[key][..., None])\n\n start_dim += len(results['pts_mask_fields'])\n for idx, key in enumerate(results['pts_seg_fields']):\n map_fields2dim.append((key, idx + start_dim))\n extra_channel.append(results[key][..., None])\n\n points_numpy = np.concatenate(extra_channel, axis=-1)\n\n # Split points into two part, current sweep points and\n # previous sweeps points.\n # TODO: support different sampling methods for next sweeps points\n # and previous sweeps points.\n cur_points_flag = (points_numpy[:, self.time_dim] == 0)\n cur_sweep_points = points_numpy[cur_points_flag]\n prev_sweeps_points = points_numpy[~cur_points_flag]\n if prev_sweeps_points.shape[0] == 0:\n prev_sweeps_points = cur_sweep_points\n\n # Shuffle points before sampling\n np.random.shuffle(cur_sweep_points)\n np.random.shuffle(prev_sweeps_points)\n\n cur_sweep_points = self._sample_points(cur_sweep_points,\n self.cur_voxel_generator,\n points_numpy.shape[1])\n if self.prev_voxel_generator is not None:\n prev_sweeps_points = self._sample_points(prev_sweeps_points,\n self.prev_voxel_generator,\n points_numpy.shape[1])\n\n points_numpy = np.concatenate(\n [cur_sweep_points, prev_sweeps_points], 0)\n else:\n points_numpy = cur_sweep_points\n\n if self.cur_voxel_generator._max_num_points == 1:\n points_numpy = points_numpy.squeeze(1)\n results['points'] = points.new_point(points_numpy[..., :original_dim])\n\n # Restore the correspoinding seg and mask fields\n for key, dim_index in map_fields2dim:\n results[key] = points_numpy[..., dim_index]\n\n return results\n\n def __repr__(self):\n \"\"\"str: Return a string that describes the module.\"\"\"\n\n def _auto_indent(repr_str, indent):\n repr_str = repr_str.split('\\n')\n repr_str = [' ' * indent + t + '\\n' for t in repr_str]\n repr_str = ''.join(repr_str)[:-1]\n return repr_str\n\n repr_str = self.__class__.__name__\n indent = 4\n repr_str += '(\\n'\n repr_str += ' ' * indent + f'num_cur_sweep={self.cur_voxel_num},\\n'\n repr_str += ' ' * indent + f'num_prev_sweep={self.prev_voxel_num},\\n'\n repr_str += ' ' * indent + f'time_dim={self.time_dim},\\n'\n repr_str += ' ' * indent + 'cur_voxel_generator=\\n'\n repr_str += f'{_auto_indent(repr(self.cur_voxel_generator), 8)},\\n'\n repr_str += ' ' * indent + 'prev_voxel_generator=\\n'\n repr_str += f'{_auto_indent(repr(self.prev_voxel_generator), 8)})'\n return repr_str\n" ]
[ [ "numpy.amax", "numpy.concatenate", "numpy.random.randn", "numpy.where", "numpy.clip", "numpy.unique", "numpy.arange", "numpy.linalg.det", "numpy.zeros", "numpy.random.choice", "numpy.amin", "numpy.random.rand", "numpy.logical_and", "numpy.array", "numpy.sum", "numpy.linalg.norm", "numpy.random.shuffle", "numpy.random.normal", "numpy.random.uniform" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
AidenPearce7/python-tictactoe
[ "1f5aaaca87bfb8487a1366b4bc2bd567df8feb5e" ]
[ "src/opencv_backend/ui.py" ]
[ "\"\"\"UI class\"\"\"\nimport cv2 as cv\nimport numpy as np\n\n\nclass UI:\n \"\"\"Handles UI drawing and managing\"\"\"\n\n def __init__(self, frame):\n height, width, channels = frame.shape\n self.width = width\n self.height = height\n self.separators = {\n \"y\": (0, height // 3, 2 * height // 3),\n \"x\": (0, width // 3, 2 * width // 3),\n }\n self.figure = np.zeros((height, width, channels), dtype=np.uint8)\n self.grid_drawn = False\n\n def draw_grid(self, color=(255, 0, 0), thickness=9):\n \"\"\"Draws a 3 by 3 grid on the frame\"\"\"\n if not self.grid_drawn:\n for i in range(1, 3):\n startpoint_height = (0, self.separators[\"y\"][i])\n startpoint_width = (self.separators[\"x\"][i], 0)\n endpoint_height = (self.width, self.separators[\"y\"][i])\n endpoint_width = (self.separators[\"x\"][i], self.height)\n self.figure = cv.line(\n self.figure, startpoint_height, endpoint_height, color, thickness\n )\n self.figure = cv.line(\n self.figure, startpoint_width, endpoint_width, color, thickness\n )\n self.grid_drawn = True\n\n def _draw_x(self, x, y, color, thickness):\n \"\"\"Draws X on the selected grid marker.\\n\n location should be a tuple with two numbers indicating place on the grid\"\"\"\n width_offset = self.separators[\"x\"][1] * 0.25\n height_offset = self.separators[\"y\"][1] * 0.25\n\n left = int(self.separators[\"x\"][x] + width_offset)\n up = int(self.separators[\"y\"][y] + height_offset)\n right = int(self.separators[\"x\"][x] + width_offset * 3)\n down = int(self.separators[\"y\"][y] + height_offset * 3)\n self.figure = cv.line(self.figure, (left, up), (right, down), color, thickness)\n self.figure = cv.line(self.figure, (left, down), (right, up), color, thickness)\n\n def _draw_circle(self, x, y, color, thickness):\n \"\"\"Draws circle on the selected grid marker.\\n\n location should be a tuple with two numbers indicating place on the grid\"\"\"\n width_offset = self.separators[\"x\"][1] * 0.5\n height_offset = self.separators[\"y\"][1] * 0.5\n center = (\n int(self.separators[\"x\"][x] + width_offset),\n int(self.separators[\"y\"][y] + height_offset),\n )\n radius = int(height_offset * 0.75)\n self.figure = cv.circle(self.figure, center, radius, color, thickness)\n\n def draw_move(self, coords, color=(0, 0, 255), thickness=7):\n \"\"\"Draws a shape based on the coordinate object\"\"\"\n if coords.symbol == \"x\":\n self._draw_x(coords.x, coords.y, color, thickness)\n else:\n self._draw_circle(coords.x, coords.y, color, thickness)\n\n def get_separators(self):\n \"\"\"Returns the separators used for the processing\"\"\"\n return self.separators\n\n def overlay(self, frame):\n \"\"\"Returns the frame with added figure array\"\"\"\n return cv.add(frame, self.figure)\n" ]
[ [ "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
gabemery/gammapy
[ "99e5c5d38e4920dddd7bca41fb1539ccda8bea2d", "99e5c5d38e4920dddd7bca41fb1539ccda8bea2d", "99e5c5d38e4920dddd7bca41fb1539ccda8bea2d", "99e5c5d38e4920dddd7bca41fb1539ccda8bea2d", "99e5c5d38e4920dddd7bca41fb1539ccda8bea2d", "99e5c5d38e4920dddd7bca41fb1539ccda8bea2d", "99e5c5d38e4920dddd7bca41fb1539ccda8bea2d", "99e5c5d38e4920dddd7bca41fb1539ccda8bea2d" ]
[ "gammapy/astro/population/tests/test_simulate.py", "gammapy/astro/source/snr.py", "gammapy/cube/sherpa_.py", "gammapy/cube/tests/test_cube_pipe.py", "gammapy/data/tests/test_event_list.py", "gammapy/utils/tests/test_nddata.py", "gammapy/spectrum/tests/test_results.py", "gammapy/image/catalog.py" ]
[ "# Licensed under a 3-clause BSD style license - see LICENSE.rst\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nfrom numpy.testing import assert_allclose\nfrom astropy.table import Table\nimport astropy.units as u\nfrom ....utils.testing import requires_dependency\nfrom ...population import (\n make_base_catalog_galactic,\n make_catalog_random_positions_cube,\n make_catalog_random_positions_sphere,\n add_snr_parameters,\n add_pulsar_parameters,\n add_pwn_parameters,\n add_observed_parameters,\n add_observed_source_parameters,\n)\n\n\ndef test_make_catalog_random_positions_cube():\n size = 100\n table = make_catalog_random_positions_cube(size=size)\n assert len(table) == size\n\n\ndef test_make_catalog_random_positions_sphere():\n size = 100\n table = make_catalog_random_positions_sphere(size=size,\n center='Milky Way')\n assert len(table) == size\n\n\ndef test_make_base_catalog_galactic():\n \"\"\"Test that make_base_catalog_galactic uses random_state correctly.\n\n Calling with a given seed should always give the same output.\n\n Regression test for https://github.com/gammapy/gammapy/issues/959\n \"\"\"\n table = make_base_catalog_galactic(n_sources=10, random_state=0)\n assert len(table) == 10\n assert table.colnames == [\n 'age', 'n_ISM', 'spiralarm',\n 'x_birth', 'y_birth', 'z_birth',\n 'x', 'y', 'z',\n 'vx', 'vy', 'vz', 'v_abs',\n ]\n\n d = table[0]\n\n assert_allclose(d['age'], 548813.50392732478)\n assert_allclose(d['n_ISM'], 1.0)\n assert d['spiralarm'] == 'Crux Scutum'\n\n assert_allclose(d['x_birth'], 0.58513884292018437)\n assert_allclose(d['y_birth'], -11.682838052120154)\n assert_allclose(d['z_birth'], 0.15710279448905115)\n assert_allclose(d['x'], 0.5828226720259867)\n assert_allclose(d['y'], -11.658959390801584)\n assert_allclose(d['z'], 0.35098629652725671)\n assert_allclose(d['vx'], -4.1266001441394655)\n assert_allclose(d['vy'], 42.543357869627776)\n assert_allclose(d['vz'], 345.43206179709432)\n assert_allclose(d['v_abs'], 348.06648135803658)\n\n\ndef test_add_observed_parameters():\n table = make_base_catalog_galactic(n_sources=10, random_state=0)\n table = add_observed_parameters(table)\n\n assert len(table) == 10\n assert set(table.colnames).issuperset([\n 'distance', 'GLON', 'GLAT', 'VGLON', 'VGLAT', 'RA', 'DEC',\n ])\n\n d = table[0]\n\n assert_allclose(d['distance'], 3231.392591455106)\n assert_allclose(d['GLON'], 169.54657778189639)\n assert_allclose(d['GLAT'], 6.2356357665816162)\n assert_allclose(d['VGLON'], 0.066778795313076678)\n assert_allclose(d['VGLAT'], 5.6115948931932174)\n assert_allclose(d['RA'], 86.308826288823127)\n assert_allclose(d['DEC'], 41.090120056648828)\n\n\ndef test_add_snr_parameters():\n table = Table()\n table['age'] = [100, 1000] * u.yr\n table['n_ISM'] = u.Quantity(1, 'cm-3')\n\n table = add_snr_parameters(table)\n\n assert len(table) == 2\n assert table.colnames == ['age', 'n_ISM', 'E_SN', 'r_out', 'r_in', 'L_SNR']\n\n assert_allclose(table['E_SN'], 1e51)\n assert_allclose(table['r_out'], [1, 3.80730787743])\n assert_allclose(table['r_in'], [0.9086, 3.45931993743])\n assert_allclose(table['L_SNR'], [0, 1.0768e+33])\n\n\ndef test_add_pulsar_parameters():\n table = Table()\n table['age'] = [100, 1000] * u.yr\n\n table = add_pulsar_parameters(table, random_state=0)\n\n assert len(table) == 2\n assert table.colnames == ['age', 'P0', 'P1', 'P0_birth', 'P1_birth', 'CharAge',\n 'Tau0', 'L_PSR', 'L0_PSR', 'logB']\n\n assert_allclose(table['P0'], [0.322829453422, 0.51352778881])\n assert_allclose(table['P1'], [4.54295751161e-14, 6.98423128444e-13])\n assert_allclose(table['P0_birth'], [0.322254715288, 0.388110930459])\n assert_allclose(table['P1_birth'], [4.55105983192e-14, 9.24116423053e-13])\n assert_allclose(table['CharAge'], [2.32368825638e-22, 5.6826197937e-21])\n assert_allclose(table['Tau0'], [112189.64476, 6654.19039158])\n assert_allclose(table['L_PSR'], [5.37834069771e+34, 8.25708734631e+35])\n assert_allclose(table['L0_PSR'], [5.36876555682e+34, 6.24049160082e+35])\n assert_allclose(table['logB'], [12.5883058913, 13.2824912596])\n\n\n@requires_dependency('scipy')\ndef test_add_pwn_parameters():\n table = make_base_catalog_galactic(n_sources=10, random_state=0)\n # To compute PWN parameters we need PSR and SNR parameters first\n table = add_snr_parameters(table)\n table = add_pulsar_parameters(table, random_state=0)\n table = add_pwn_parameters(table)\n assert len(table) == 10\n\n d = table[0]\n assert_allclose(d['r_out_PWN'], 0.5892196771927385, atol=1e-3)\n assert_allclose(d['L_PWN'], 7.057857699785925e+45)\n\n\n@requires_dependency('scipy')\ndef test_chain_all():\n \"\"\"\n Test that running the simulation functions in chain works\n \"\"\"\n table = make_base_catalog_galactic(n_sources=10, random_state=0)\n table = add_snr_parameters(table)\n table = add_pulsar_parameters(table, random_state=0)\n table = add_pwn_parameters(table)\n table = add_observed_parameters(table)\n table = add_observed_source_parameters(table)\n\n # Note: the individual functions are tested above.\n # Here we just run them in a chain and do very basic asserts\n # on the output so that we make sure we notice changes.\n assert len(table) == 10\n assert len(table.colnames) == 43\n d = table[0]\n assert_allclose(d['r_out_PWN'], 0.5892196771927385, atol=1e-3)\n assert_allclose(d['RA'], 86.308826288823127)\n", "# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\"\"\"Supernova remnant (SNR) source models\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nimport numpy as np\nfrom astropy.units import Quantity\nimport astropy.constants as const\nfrom astropy.utils import lazyproperty\nfrom ...extern.validator import validate_physical_type\n\n__all__ = [\n 'SNR',\n 'SNRTrueloveMcKee',\n]\n\n\nclass SNR(object):\n \"\"\"Simple supernova remnant (SNR) evolution model.\n\n The model is based on the Sedov-Taylor solution for strong explosions.\n\n Reference: http://adsabs.harvard.edu/abs/1950RSPSA.201..159T\n\n Parameters\n ----------\n e_sn : `~astropy.units.Quantity`\n SNR energy (erg), equal to the SN energy after neutrino losses\n theta : `~astropy.units.Quantity`\n Fraction of E_SN that goes into cosmic rays\n n_ISM : `~astropy.units.Quantity`\n ISM density (g cm^-3)\n m_ejecta : `~astropy.units.Quantity`\n Ejecta mass (g)\n t_stop : `~astropy.units.Quantity`\n Post-shock temperature where gamma-ray emission stops.\n \"\"\"\n\n def __init__(self, e_sn=Quantity(1e51, 'erg'), theta=Quantity(0.1),\n n_ISM=Quantity(1, 'cm-3'), m_ejecta=const.M_sun,\n t_stop=Quantity(1e6, 'K'), age=None, morphology='Shell2D',\n spectral_index=2.1):\n self.e_sn = e_sn\n self.theta = theta\n self.rho_ISM = n_ISM * const.m_p\n self.n_ISM = n_ISM\n self.m_ejecta = m_ejecta\n self.t_stop = t_stop\n self.morphology = morphology\n self.spectral_index = spectral_index\n if age is not None:\n validate_physical_type('age', age, 'time')\n self.age = age\n\n def radius(self, t=None):\n \"\"\"Outer shell radius at age t.\n\n Parameters\n ----------\n t : `~astropy.units.Quantity`\n Time after birth of the SNR.\n\n Notes\n -----\n The radius during the free expansion phase is given by:\n\n .. math::\n\n r_{SNR}(t) \\\\approx 0.01 \\\\textnormal{}\n \\\\left(\\\\frac{E_{SN}}{10^{51}erg}\\\\right)^{1/2}\n \\\\left(\\\\frac{M_{ej}}{M_{\\\\odot}}\\\\right)^{-1/2} t\n\n The radius during the Sedov-Taylor phase evolves like:\n\n .. math::\n\n r_{SNR}(t) \\\\approx \\\\left(\\\\frac{E_{SN}}{\\\\rho_{ISM}}\\\\right)^{1/5}t^{2/5}\n\n \"\"\"\n if t is not None:\n validate_physical_type('t', t, 'time')\n elif hasattr(self, 'age'):\n t = self.age\n else:\n raise ValueError('Need time variable or age attribute.')\n r = np.where(t > self.sedov_taylor_begin,\n self._radius_sedov_taylor(t).to('cm').value,\n self._radius_free_expansion(t).to('cm').value)\n return Quantity(r, 'cm')\n\n def _radius_free_expansion(self, t):\n \"\"\"Shock radius at age t during free expansion phase.\n\n Parameters\n ----------\n t : `~astropy.units.Quantity`\n Time after birth of the SNR.\n\n \"\"\"\n # proportional constant for the free expansion phase\n term_1 = (self.e_sn / Quantity(1e51, 'erg')) ** (1. / 2)\n term_2 = (self.m_ejecta / const.M_sun) ** (-1. / 2)\n return Quantity(0.01, 'pc/yr') * term_1 * term_2 * t\n\n def _radius_sedov_taylor(self, t):\n \"\"\"Shock radius at age t during Sedov Taylor phase.\n\n Parameters\n ----------\n t : `~astropy.units.Quantity`\n Time after birth of the SNR.\n\n \"\"\"\n R_FE = self._radius_free_expansion(self.sedov_taylor_begin)\n return R_FE * (t / self.sedov_taylor_begin) ** (2. / 5)\n\n def radius_inner(self, t, fraction=0.0914):\n \"\"\"Inner radius at age t of the SNR shell.\n\n Parameters\n ----------\n t : `~astropy.units.Quantity`\n Time after birth of the SNR.\n\n \"\"\"\n return self.radius(t) * (1 - fraction)\n\n def luminosity_tev(self, t=None, energy_min=Quantity(1, 'TeV')):\n \"\"\"Gamma-ray luminosity above ``energy_min`` at age ``t``.\n\n The luminosity is assumed constant in a given age interval and zero\n before and after. The assumed spectral index is 2.1.\n\n Reference: http://adsabs.harvard.edu/abs/1994A%26A...287..959D (Formula (7)).\n\n Parameters\n ----------\n t : `~astropy.units.Quantity`\n Time after birth of the SNR.\n energy_min : `~astropy.units.Quantity`\n Lower energy limit for the luminosity.\n\n Notes\n -----\n The gamma-ray luminosity above 1 TeV is given by:\n\n .. math::\n\n L_{\\\\gamma}(\\\\geq 1TeV) \\\\approx 10^{34} \\\\theta\n \\\\left(\\\\frac{E_{SN}}{10^{51} erg}\\\\right)\n \\\\left(\\\\frac{\\\\rho_{ISM}}{1.66\\\\cdot 10^{-24} g/cm^{3}} \\\\right)\n \\\\textnormal{ph} s^{-1}\n\n \"\"\"\n if t is not None:\n validate_physical_type('t', t, 'time')\n elif hasattr(self, 'age'):\n t = self.age\n else:\n raise ValueError('Need time variable or age attribute.')\n\n # Flux in 1 k distance according to Drury formula 9\n term_0 = energy_min / Quantity(1, 'TeV')\n term_1 = self.e_sn / Quantity(1e51, 'erg')\n term_2 = self.rho_ISM / (Quantity(1, 'cm-3') * const.m_p)\n L = self.theta * term_0 ** (1 - self.spectral_index) * term_1 * term_2\n\n # Corresponding luminosity\n L = np.select([t <= self.sedov_taylor_begin, t <= self.sedov_taylor_end], [0, L])\n return Quantity(1.0768E34, 's-1') * L\n\n @lazyproperty\n def sedov_taylor_begin(self):\n \"\"\"Characteristic time scale when the Sedov-Taylor phase of the SNR's evolution begins.\n\n Notes\n -----\n The beginning of the Sedov-Taylor phase of the SNR is defined by the condition,\n that the swept up mass of the surrounding medium equals the mass of the\n ejected mass. The time scale is given by:\n\n .. math::\n\n t_{begin} \\\\approx 200 \\\\ \\\\textnormal{}\n \\\\left(\\\\frac{E_{SN}}{10^{51}erg}\\\\right)^{-1/2}\n \\\\left(\\\\frac{M_{ej}}{M_{\\\\odot}}\\\\right)^{5/6}\n \\\\left(\\\\frac{\\\\rho_{ISM}}{10^{-24}g/cm^3}\\\\right)^{-1/3}\n\n \"\"\"\n term1 = (self.e_sn / Quantity(1e51, 'erg')) ** (-1. / 2)\n term2 = (self.m_ejecta / const.M_sun) ** (5. / 6)\n term3 = (self.rho_ISM / (Quantity(1, 'cm-3') * const.m_p)) ** (-1. / 3)\n return Quantity(200, 'yr') * term1 * term2 * term3\n\n @lazyproperty\n def sedov_taylor_end(self):\n \"\"\"Characteristic time scale when the Sedov-Taylor phase of the SNR's evolution ends.\n\n Notes\n -----\n The end of the Sedov-Taylor phase of the SNR is defined by the condition, that the\n temperature at the shock drops below T = 10^6 K. The time scale is given by:\n\n .. math::\n\n t_{end} \\\\approx 43000 \\\\textnormal{ }\n \\\\left(\\\\frac{m}{1.66\\\\cdot 10^{-24}g}\\\\right)^{5/6}\n \\\\left(\\\\frac{E_{SN}}{10^{51}erg}\\\\right)^{1/3}\n \\\\left(\\\\frac{\\\\rho_{ISM}}{1.66\\\\cdot 10^{-24}g/cm^3}\\\\right)^{-1/3}\n\n \"\"\"\n term1 = 3 * const.m_p.cgs / (100 * const.k_B.cgs * self.t_stop)\n term2 = (self.e_sn / self.rho_ISM) ** (2. / 5)\n return ((term1 * term2) ** (5. / 6)).to('yr')\n\n\nclass SNRTrueloveMcKee(SNR):\n \"\"\"SNR model according to Truelove & McKee (1999).\n\n Reference: http://adsabs.harvard.edu/abs/1999ApJS..120..299T\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(SNRTrueloveMcKee, self).__init__(*args, **kwargs)\n\n # Characteristic dimensions\n self.r_c = self.m_ejecta ** (1. / 3) * self.rho_ISM ** (-1. / 3)\n self.t_c = self.e_sn ** (-1. / 2) * self.m_ejecta ** (5. / 6) * self.rho_ISM ** (-1. / 3)\n\n def radius(self, t=None):\n \"\"\"Outer shell radius at age t.\n\n Parameters\n ----------\n t : `~astropy.units.Quantity`\n Time after birth of the SNR.\n\n Notes\n -----\n The radius during the free expansion phase is given by:\n\n .. math::\n\n R_{SNR}(t) = 1.12R_{ch}\\\\left(\\\\frac{t}{t_{ch}}\\\\right)^{2/3}\n\n The radius during the Sedov-Taylor phase evolves like:\n\n .. math::\n\n R_{SNR}(t) = \\\\left[R_{SNR, ST}^{5/2} + \\\\left(2.026\\\\frac{E_{SN}}\n {\\\\rho_{ISM}}\\\\right)^{1/2}(t - t_{ST})\\\\right]^{2/5}\n\n Using the characteristic dimensions:\n\n .. math::\n\n R_{ch} = M_{ej}^{1/3}\\\\rho_{ISM}^{-1/3} \\\\ \\\\\n \\\\textnormal{and} \\\\ \\\\ t_{ch} = E_{SN}^{-1/2}M_{ej}^{5/6}\\\\rho_{ISM}^{-1/3}\n\n \"\"\"\n if t is not None:\n validate_physical_type('t', t, 'time')\n elif hasattr(self, 'age'):\n t = self.age\n else:\n raise ValueError('Need time variable or age attribute.')\n\n # Evaluate `_radius_sedov_taylor` on `t > self.sedov_taylor_begin`\n # only to avoid a warning\n r = np.empty(t.shape, dtype=np.float64)\n mask = t > self.sedov_taylor_begin\n r[mask] = self._radius_sedov_taylor(t[mask]).to('cm').value\n r[~mask] = self._radius_free_expansion(t[~mask]).to('cm').value\n return Quantity(r, 'cm')\n\n def _radius_free_expansion(self, t):\n \"\"\"Shock radius at age t during free expansion phase.\n\n Parameters\n ----------\n t : `~astropy.units.Quantity`\n Time after birth of the SNR.\n\n \"\"\"\n return 1.12 * self.r_c * (t / self.t_c) ** (2. / 3)\n\n def _radius_sedov_taylor(self, t):\n \"\"\"Shock radius at age t during Sedov Taylor phase.\n\n Parameters\n ----------\n t : `~astropy.units.Quantity`\n Time after birth of the SNR.\n \"\"\"\n term1 = self._radius_free_expansion(self.sedov_taylor_begin) ** (5. / 2)\n term2 = (2.026 * (self.e_sn / self.rho_ISM)) ** (1. / 2)\n return (term1 + term2 * (t - self.sedov_taylor_begin)) ** (2. / 5)\n\n @lazyproperty\n def sedov_taylor_begin(self):\n \"\"\"Characteristic time scale when the Sedov-Taylor phase starts.\n\n Given by :math:`t_{ST} \\\\approx 0.52 t_{ch}`.\n \"\"\"\n return 0.52 * self.t_c\n\n def radius_reverse_shock(self, t):\n \"\"\"Reverse shock radius at age t.\n\n Parameters\n ----------\n t : `~astropy.units.Quantity`\n Time after birth of the SNR.\n\n Notes\n -----\n Initially the reverse shock co-evolves with the radius of the SNR:\n\n .. math::\n\n R_{RS}(t) = \\\\frac{1}{1.19}r_{SNR}(t)\n\n After a time :math:`t_{core} \\\\simeq 0.25t_{ch}` the reverse shock reaches\n the core and then propagates as:\n\n .. math::\n\n R_{RS}(t) = \\\\left[1.49 - 0.16 \\\\frac{t - t_{core}}{t_{ch}} - 0.46\n \\\\ln \\\\left(\\\\frac{t}{t_{core}}\\\\right)\\\\right]\\\\frac{R_{ch}}{t_{ch}}t\n \"\"\"\n if t is not None:\n validate_physical_type('t', t, 'time')\n elif hasattr(self, 'age'):\n t = self.age\n else:\n raise ValueError('Need time variable or age attribute.')\n\n # Time when reverse shock reaches the \"core\"\n t_core = 0.25 * self.t_c\n\n term1 = (t - t_core) / (self.t_c)\n term2 = (1.49 - 0.16 * term1 - 0.46 * np.log(t / t_core))\n R_1 = self._radius_free_expansion(t) / 1.19\n R_RS = term2 * (self.r_c / self.t_c) * t\n r = np.where(t < t_core,\n R_1.to('cm').value,\n R_RS.to('cm').value)\n return Quantity(r, 'cm')\n", "# Licensed under a 3-clause BSD style license - see LICENSE.rst\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nimport numpy as np\nfrom ..utils.energy import EnergyBounds\nfrom sherpa.astro.ui import erf\nimport astropy.wcs as WCS\nfrom sherpa.models import ArithmeticModel, Parameter, modelCacher1d\nfrom sherpa.data import DataND, BaseData\nfrom sherpa.utils.err import DataErr, NotImplementedErr\nfrom sherpa.utils import SherpaFloat, NoNewAttributesAfterInit, \\\n print_fields, create_expr, calc_total_error, bool_cast, \\\n filter_bins, interpolate, linear_interp\n\n\"\"\"\nDefinition of the model NormGauss2DInt: Integrated 2D gaussian\n\"\"\"\nfwhm_to_sigma = 1 / (2 * np.sqrt(2 * np.log(2)))\nfwhm_to_sigma_erf = np.sqrt(2) * fwhm_to_sigma\n\n\nclass NormGauss2DInt(ArithmeticModel):\n \"\"\"Integrated 2D gaussian for sherpa models\n \"\"\"\n\n def __init__(self, name='normgauss2dint'):\n # Gauss source parameters\n self.wcs = WCS.WCS()\n self.coordsys = \"galactic\" # default\n self.binsize = 1.0\n self.xpos = Parameter(name, 'xpos', 0) # p[0]\n self.ypos = Parameter(name, 'ypos', 0) # p[1]\n self.ampl = Parameter(name, 'ampl', 1) # p[2]\n self.fwhm = Parameter(name, 'fwhm', 1, min=0) # p[3]\n self.shape = None\n self.n_ebins = None\n ArithmeticModel.__init__(self, name, (self.xpos, self.ypos, self.ampl, self.fwhm))\n\n def calc(self, p, xlo, xhi, ylo, yhi, *args, **kwargs):\n \"\"\"\n The normgauss2dint model uses the error function to evaluate the\n the gaussian. This corresponds to an integration over bins.\n \"\"\"\n return self.normgauss2d(p, xlo, xhi, ylo, yhi)\n\n def normgauss2d(self, p, xlo, xhi, ylo, yhi):\n sigma_erf = p[3] * fwhm_to_sigma_erf\n return p[2] / 4. * ((erf.calc.calc([1, p[0], sigma_erf], xhi)\n - erf.calc.calc([1, p[0], sigma_erf], xlo))\n * (erf.calc.calc([1, p[1], sigma_erf], yhi)\n - erf.calc.calc([1, p[1], sigma_erf], ylo)))\n\n\n# This class was copy pasted from sherpa.data.Data2D and modified to account\n# for the third dimension\n# it is set up to integrate over the energy (first) axis, but not all class\n# methods are adapted to that yet (TODO)\n\nclass Data3D(DataND):\n \"\"\"Sherpa 3-D data set.\n \"\"\"\n\n def _set_mask(self, val):\n DataND._set_mask(self, val)\n try:\n self._lo = self.apply_filter(self.xlo)\n self._hi = self.apply_filter(self.xhi)\n self._x1 = self.apply_filter(self.x1)\n self._x2 = self.apply_filter(self.x2)\n except DataErr:\n self._hi = self.xhi\n self._lo = self.xlo\n self._x1 = self.x1\n self._x2 = self.x2\n\n mask = property(DataND._get_mask, _set_mask,\n doc='Mask array for dependent variable')\n\n def __init__(self, name, xlo, xhi, x1, x2, y, shape=None, staterror=None,\n syserror=None):\n self._lo = xlo\n self._hi = xhi\n self._x1 = x1\n self._x2 = x2\n BaseData.__init__(self)\n\n def get_indep(self, filter=False):\n filter = bool_cast(filter)\n if filter:\n return (self._lo, self._hi, self._x1, self._x2)\n return (self.xlo, self.xhi, self.x1, self.x2)\n\n def get_x0(self, filter=False):\n return self.get_indep(filter)[0]\n\n def get_x1(self, filter=False):\n return self.get_indep(filter)[1]\n\n def get_x2(self, filter=False):\n return self.get_indep(filter)[2]\n\n def get_axes(self):\n self._check_shape()\n # FIXME: how to filter an axis when self.mask is size of self.y?\n return (np.arange(self.shape[1]) + 1, np.arange(self.shape[0]) + 1, np.arange(self.shape[0]) + 1)\n\n def get_dims(self, filter=False):\n # self._check_shape()\n if self.shape is not None:\n return self.shape[::-1]\n return (len(self.get_x0(filter)), len(self.get_x1(filter)), len(self.get_x2(filter)))\n\n def get_filter_expr(self):\n return ''\n\n get_filter = get_filter_expr\n\n def _check_shape(self):\n if self.shape is None:\n raise DataErr('shape', self.name)\n\n def get_max_pos(self, dep=None):\n if dep is None:\n dep = self.get_dep(True)\n x0 = self.get_x0(True)\n x1 = self.get_x1(True)\n x2 = self.get_x2(True)\n\n pos = np.asarray(np.where(dep == dep.max())).squeeze()\n if pos.ndim == 0:\n pos = int(pos)\n return (x0[pos], x1[pos], x2[pos])\n\n return [(x0[index], x1[index], x2[index]) for index in pos]\n\n def get_img(self, yfunc=None):\n self._check_shape()\n y_img = self.get_y(False, yfunc)\n if yfunc is not None:\n y_img = (y_img[0].reshape(*self.shape),\n y_img[1].reshape(*self.shape))\n else:\n y_img = y_img.reshape(*self.shape)\n return y_img\n\n def get_imgerr(self):\n self._check_shape()\n err = self.get_error()\n if err is not None:\n err = err.reshape(*self.shape)\n return err\n\n def notice(self, x0lo=None, x0hi=None, x1lo=None, x1hi=None, x2lo=None, x2hi=None, ignore=False):\n BaseData.notice(self, (x0lo, x1lo, x2lo), (x0hi, x1hi, x2hi), self.get_indep(),\n ignore)\n\n\nclass Data3DInt(DataND):\n \"3-D integrated data set\"\n\n def _set_mask(self, val):\n DataND._set_mask(self, val)\n try:\n self._x0lo = self.apply_filter(self.x0lo)\n self._x0hi = self.apply_filter(self.x0hi)\n self._x1lo = self.apply_filter(self.x1lo)\n self._x1hi = self.apply_filter(self.x1hi)\n self._x2lo = self.apply_filter(self.x2lo)\n self._x2hi = self.apply_filter(self.x2hi)\n except DataErr:\n self._x0lo = self.x0lo\n self._x1lo = self.x1lo\n self._x0hi = self.x0hi\n self._x1hi = self.x1hi\n self._x2hi = self.x2hi\n self._x2hi = self.x2hi\n\n mask = property(DataND._get_mask, _set_mask,\n doc='Mask array for dependent variable')\n\n def __init__(self, name, x0lo, x1lo, x2lo, x0hi, x1hi, x2hi, y, shape=None,\n staterror=None, syserror=None):\n self._x0lo = x0lo\n self._x1lo = x1lo\n self._x2lo = x2lo\n self._x0hi = x0hi\n self._x1hi = x1hi\n self._x2hi = x2hi\n BaseData.__init__(self)\n\n def get_indep(self, filter=False):\n filter = bool_cast(filter)\n if filter:\n return (self._x0lo, self._x1lo, self._x2lo, self._x0hi, self._x1hi, self._x2hi)\n return (self.x0lo, self.x1lo, self.x2lo, self.x0hi, self.x1hi, self.x2hi)\n\n def get_x0(self, filter=False):\n indep = self.get_indep(filter)\n return (indep[0] + indep[3]) / 2.0\n\n def get_x1(self, filter=False):\n indep = self.get_indep(filter)\n return (indep[1] + indep[4]) / 2.0\n\n def get_x2(self, filter=False):\n indep = self.get_indep(filter)\n return (indep[2] + indep[5]) / 2.0\n\n def notice(self, x0lo=None, x0hi=None, x1lo=None, x1hi=None, x2lo=None, x2hi=None, ignore=False):\n BaseData.notice(self, (x0lo, x1lo, x2hi),\n (x0hi, x1hi, x2hi), self.get_indep(), ignore)\n\n\nclass CombinedModel3D(ArithmeticModel):\n \"\"\"\n Combined spatial and spectral 3D model.\n If you ask for a selected region, it will only compare the data and the Combined model on the selected region\n \"\"\"\n\n def __init__(self, name='cube-model', spatial_model=None, spectral_model=None):\n self.spatial_model = spatial_model\n self.spectral_model = spectral_model\n\n # Fix spectral ampl parameter\n spectral_model.ampl = 1\n spectral_model.ampl.freeze()\n\n pars = []\n for _ in spatial_model.pars + spectral_model.pars:\n setattr(self, _.name, _)\n pars.append(_)\n\n self._spatial_pars = slice(0, len(spatial_model.pars))\n self._spectral_pars = slice(len(spatial_model.pars), len(pars))\n ArithmeticModel.__init__(self, name, pars)\n\n def calc(self, pars, elo, ehi, x, y):\n _spatial = self.spatial_model.calc(pars[self._spatial_pars], x, y)\n _spectral = self.spectral_model.calc(pars[self._spectral_pars], elo, ehi)\n return _spatial * _spectral\n\n\nclass CombinedModel3DInt(ArithmeticModel):\n \"\"\"\n Combined spatial and spectral 3D model with the possibility to convolve the spatial model*exposure by the PSF.\n If you ask for a selected region, it will only compare the data and the Combined model on the selected region\n\n Parameters\n ----------\n coord: `~astropy.coordinates.SkyCoord`\n Position of the edges of the pixel on the sky.\n energies: `~astropy.units.Quantity`\n Reconstructed energy used for the counts cube\n use_psf: bool\n if true will convolve the spatial model by the psf\n exposure: `~gammapy.cube.SkyCube`\n Exposure cube\n psf: `~gammapy.cube.SkyCube`\n Psf cube\n select_region: True\n If True select only the points of the region of interest for the fit\n index_selected_region: tuple\n tuple of three `~numpy.ndarray` containing the indexes of the points of the Cube to keep in the fit (Energy, x, y)\n \"\"\"\n\n def __init__(self, coord, energies, name='cube-model', use_psf=True, exposure=None, psf=None, spatial_model=None,\n spectral_model=None, select_region=False, index_selected_region=None):\n from scipy import signal\n self.spatial_model = spatial_model\n self.spectral_model = spectral_model\n self.use_psf = use_psf\n self.exposure = exposure\n self.psf = psf\n self._fftconvolve = signal.fftconvolve\n xx = coord.data.lon.degree\n yy = coord.data.lat.degree\n self.xx_lo = xx[0:-1, 1:]\n self.xx_hi = xx[0:-1, 0:-1]\n self.yy_lo = yy[0:-1, 0:-1]\n self.yy_hi = yy[1:, 0:-1]\n self.ee_lo = energies[:-1]\n self.ee_hi = energies[1:]\n self.select_region = select_region\n self.index_selected_region = index_selected_region\n\n # Fix spectral ampl parameter\n spectral_model.ampl = 1\n spectral_model.ampl.freeze()\n\n pars = []\n for _ in spatial_model.pars + spectral_model.pars:\n setattr(self, _.name, _)\n pars.append(_)\n\n self._spatial_pars = slice(0, len(spatial_model.pars))\n self._spectral_pars = slice(len(spatial_model.pars), len(pars))\n ArithmeticModel.__init__(self, name, pars)\n\n def calc(self, pars, elo, xlo, ylo, ehi, xhi, yhi):\n\n if self.use_psf:\n shape = (len(self.ee_lo), len(self.xx_lo[:, 0]), len(self.xx_lo[0, :]))\n result_convol = np.zeros(shape)\n a = self.spatial_model.calc(pars[self._spatial_pars], self.xx_lo.ravel(), self.xx_hi.ravel(),\n self.yy_lo.ravel(), self.yy_hi.ravel()).reshape(self.xx_lo.shape)\n # Convolve the spatial model * exposure by the psf\n for ind_E in range(shape[0]):\n result_convol[ind_E, :, :] = self._fftconvolve(a * self.exposure.data[ind_E, :, :],\n self.psf.data[ind_E, :, :] /\n (self.psf.data[ind_E, :, :].sum()), mode='same')\n\n spectral_1d = self.spectral_model.calc(pars[self._spectral_pars], self.ee_lo, self.ee_hi)\n if not self.select_region:\n _spatial = result_convol.ravel()\n _spectral = (spectral_1d.reshape(len(self.ee_lo), 1, 1) * np.ones_like(self.xx_lo)).ravel()\n else:\n _spatial = result_convol[self.index_selected_region].ravel()\n _spectral = (spectral_1d.reshape(len(self.ee_lo), 1, 1) * np.ones_like(self.xx_lo))[\n self.index_selected_region].ravel()\n\n else:\n _spatial = self.spatial_model.calc(pars[self._spatial_pars], xlo, xhi, ylo, yhi)\n _spectral = self.spectral_model.calc(pars[self._spectral_pars], elo, ehi)\n return _spatial * _spectral\n\n\nclass CombinedModel3DIntConvolveEdisp(ArithmeticModel):\n \"\"\"\n Combined spatial and spectral 3D model taking into account the energy resolution\n with the possibility to convolve the spatial model*exposure by the PSF.\n\n Parameters\n ----------\n coord: `~astropy.coordinates.SkyCoord`\n Position of the edges of the pixel on the sky.\n energies: `~astropy.units.Quantity`\n Reconstructed energy used for the counts cube\n use_psf: bool\n if true will convolve the spatial model by the psf\n exposure: `~gammapy.cube.SkyCube`\n Exposure Cube\n psf: `~gammapy.cube.SkyCube`\n Psf cube\n spatial_model: `~sherpa.models`\n spatial sherpa model\n spectral_model: `~sherpa.models`\n spectral sherpa model\n edisp: `~numpy.array`\n 2D array in (Ereco,Etrue) for the energy dispersion\n select_region: True\n If True select only the points of the region of interest for the fit\n index_selected_region: tuple\n tuple of three `~numpy.ndarray` containing the indexes of the points of the Cube to keep in the fit (Energy, x, y)\n\n \"\"\"\n\n def __init__(self, coord, energies, name='cube-model', use_psf=True, exposure=None, psf=None, spatial_model=None,\n spectral_model=None, edisp=None, select_region=False, index_selected_region=None):\n from scipy import signal\n self.spatial_model = spatial_model\n self.spectral_model = spectral_model\n xx = coord.data.lon.degree\n yy = coord.data.lat.degree\n self.xx_lo = xx[0:-1, 1:]\n self.xx_hi = xx[0:-1, 0:-1]\n self.yy_lo = yy[0:-1, 0:-1]\n self.yy_hi = yy[1:, 0:-1]\n self.ee_lo = energies[:-1]\n self.ee_hi = energies[1:]\n self.use_psf = use_psf\n self.exposure = exposure\n self.psf = psf\n self.edisp = edisp\n self.true_energy = EnergyBounds(self.exposure.energies(\"edges\"))\n self.dim_x, self.dim_y, self.dim_Ereco, self.dim_Etrue = len(self.xx_lo[:, 0]), len(self.xx_lo[0, :]), \\\n len(self.ee_lo), len(self.true_energy) - 1\n self._fftconvolve = signal.fftconvolve\n # The shape of the counts cube in (Ereco,x,y)\n self.shape_data = (self.dim_Ereco, self.dim_x, self.dim_y)\n # Array that will store the result after multipliying by the energy resolution in (x,y,Etrue,Ereco)\n self.convolve_edisp = np.zeros(\n (self.dim_x, self.dim_y, self.dim_Etrue, self.dim_Ereco))\n self.select_region = select_region\n self.index_selected_region = index_selected_region\n\n # Fix spectral ampl parameter\n spectral_model.ampl = 1\n spectral_model.ampl.freeze()\n\n pars = []\n for _ in spatial_model.pars + spectral_model.pars:\n setattr(self, _.name, _)\n pars.append(_)\n\n self._spatial_pars = slice(0, len(spatial_model.pars))\n self._spectral_pars = slice(len(spatial_model.pars), len(pars))\n ArithmeticModel.__init__(self, name, pars)\n\n def calc(self, pars, elo, xlo, ylo, ehi, xhi, yhi):\n etrue_centers = self.true_energy.log_centers\n if self.use_psf:\n # Convolve the spatial model * exposure by the psf in etrue\n spatial = np.zeros((self.dim_Etrue, self.dim_x, self.dim_y))\n a = self.spatial_model.calc(pars[self._spatial_pars], self.xx_lo.ravel(), self.xx_hi.ravel(),\n self.yy_lo.ravel(), self.yy_hi.ravel()).reshape(self.xx_lo.shape)\n for ind_E in range(self.dim_Etrue):\n spatial[ind_E, :, :] = self._fftconvolve(a * self.exposure.data[ind_E, :, :],\n self.psf.data[ind_E, :, :] /\n (self.psf.data[ind_E, :, :].sum()), mode='same')\n # To avoid nan value for the true energy values asked by the user for which the PSF is not defined.\n # The interpolation gives nan when you are outside the range and when you sum over all the true energy bin to calculate the expected\n # number of counts in the reconstucted energy bin, you get nan whereas you just want the bin in true energy\n # for which the PSF is not defined to not count in the sum.\n spatial[np.isnan(spatial)] = 0\n else:\n spatial_2d = self.spatial_model.calc(pars[self._spatial_pars], self.xx_lo.ravel(), self.xx_hi.ravel(),\n self.yy_lo.ravel(), self.yy_hi.ravel()).reshape(self.xx_lo.shape)\n spatial = np.tile(spatial_2d, (len(etrue_centers), 1, 1))\n # Calculate the spectral model in etrue\n spectral_1d = self.spectral_model.calc(pars[self._spectral_pars], etrue_centers)\n spectral = spectral_1d.reshape(len(etrue_centers), 1, 1) * np.ones_like(self.xx_lo)\n\n # Convolve by the energy resolution\n etrue_band = self.true_energy.bands\n for ireco in range(self.dim_Ereco):\n self.convolve_edisp[:, :, :, ireco] = (np.rollaxis(spatial, 0, spatial.ndim)\n * np.rollaxis(spectral, 0, spectral.ndim)\n * self.edisp[:, ireco] * etrue_band)\n # Integration in etrue\n sum_model = np.sum(self.convolve_edisp, axis=2)\n model = np.rollaxis(sum_model, -1, 0)\n if not self.select_region:\n return model.ravel()\n else:\n return model[self.index_selected_region].ravel()\n", "# Licensed under a 3-clause BSD style license - see LICENSE.rst\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nimport numpy as np\nfrom numpy.testing import assert_allclose\nfrom astropy.coordinates import Angle\nfrom ...extern.pathlib import Path\nfrom ...utils.testing import requires_dependency, requires_data\nfrom ...data import DataStore\nfrom ...image import SkyImage\nfrom ...background import OffDataBackgroundMaker\nfrom .. import StackedObsCubeMaker\nfrom .. import SkyCube\n\n\ndef make_empty_cube(emin, emax, enumbins, data_unit=''):\n \"\"\"Make a reference cube at the Crab nebula position for testing.\"\"\"\n return SkyCube.empty(\n emin=emin, emax=emax, enumbins=enumbins, eunit='TeV', mode='edges',\n nxpix=250, nypix=250, binsz=0.02,\n xref=184.55974014, yref=-5.78918015,\n proj='TAN', coordsys='GAL', unit=data_unit,\n )\n\n\n@requires_dependency('reproject')\n@requires_data('gammapy-extra')\ndef test_cube_pipe(tmpdir):\n \"\"\"Example how to make a Cube analysis from a 2D background model.\"\"\"\n tmpdir = str(tmpdir)\n outdir = tmpdir\n outdir2 = outdir + '/background'\n Path(outdir2).mkdir()\n\n ds = DataStore.from_dir(\"$GAMMAPY_EXTRA/datasets/hess-crab4-hd-hap-prod2\")\n ds.copy_obs(ds.obs_table, tmpdir)\n data_store = DataStore.from_dir(tmpdir)\n\n # Create the background model from the 4 Crab observations\n bgmaker = OffDataBackgroundMaker(data_store, outdir=outdir2)\n\n bgmaker.select_observations(selection='all')\n bgmaker.group_observations()\n bgmaker.make_model(\"2D\")\n bgmaker.save_models(\"2D\")\n\n fn = outdir2 + '/group-def.fits'\n\n # New hdu table that contains the link to the background model\n hdu_index_table = bgmaker.make_total_index_table(\n data_store=data_store,\n modeltype='2D',\n out_dir_background_model=outdir2,\n filename_obs_group_table=fn\n )\n\n fn = outdir + '/hdu-index.fits.gz'\n hdu_index_table.write(fn, overwrite=True)\n\n offset_band = Angle([0, 2.49], 'deg')\n\n ref_cube_images = make_empty_cube(emin=0.5, emax=100, enumbins=5)\n ref_cube_exposure = make_empty_cube(emin=0.1, emax=120, enumbins=80, data_unit=\"m2 s\")\n ref_cube_skymask = make_empty_cube(emin=0.5, emax=100, enumbins=5)\n\n data_store = DataStore.from_dir(tmpdir)\n\n refheader = ref_cube_images.sky_image_ref.to_image_hdu().header\n exclusion_mask = SkyImage.read('$GAMMAPY_EXTRA/datasets/exclusion_masks/tevcat_exclusion.fits')\n exclusion_mask = exclusion_mask.reproject(reference=refheader)\n ref_cube_skymask.data = np.tile(exclusion_mask.data, (5, 1, 1))\n\n # TODO: Problem with the load psftable for one of the run that is not implemented yet...\n data_store.hdu_table.remove_row(14)\n\n # Cube Analysis\n cube_maker = StackedObsCubeMaker(\n empty_cube_images=ref_cube_images, empty_exposure_cube=ref_cube_exposure,\n offset_band=offset_band, data_store=data_store, obs_table=data_store.obs_table,\n exclusion_mask=ref_cube_skymask, save_bkg_scale=True,\n )\n cube_maker.make_cubes(make_background_image=True, radius=10.)\n\n assert_allclose(cube_maker.counts_cube.data.sum(), 4898.0, atol=3)\n assert_allclose(cube_maker.bkg_cube.data.sum(), 4260.120595293951, atol=3)\n\n # Note: the tolerance in the following assert is low to pass here:\n # https://travis-ci.org/gammapy/gammapy/jobs/234062946#L2112\n\n cube_maker.significance_cube.data[np.where(np.isinf(cube_maker.significance_cube.data))] = 0\n actual = np.nansum(cube_maker.significance_cube.data)\n assert_allclose(actual, 65777.69960178432, rtol=0.1)\n\n actual = cube_maker.excess_cube.data.sum()\n assert_allclose(actual, 637.8794047060486, rtol=1e-2)\n\n actual = np.nansum(cube_maker.exposure_cube.data.to('m2 s').value)\n assert_allclose(actual, 5399539029926424.0, rtol=1e-2)\n\n assert_allclose(cube_maker.table_bkg_scale[0][\"bkg_scale\"], 0.8996676356375191, rtol=0.03)\n\n assert len(cube_maker.counts_cube.energies()) == 5\n assert len(cube_maker.bkg_cube.energies()) == 5\n assert len(cube_maker.significance_cube.energies()) == 5\n assert len(cube_maker.excess_cube.energies()) == 5\n assert len(cube_maker.exposure_cube.energies()) == 80\n", "# Licensed under a 3-clause BSD style license - see LICENSE.rst\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nfrom numpy.testing import assert_allclose\nfrom astropy.coordinates import Angle, SkyCoord\nimport pytest\nfrom regions import CircleSkyRegion\nfrom ...utils.testing import requires_dependency, requires_data\nfrom ...data import EventList, EventListDataset, EventListDatasetChecker\nfrom ...datasets import gammapy_extra\n\n\n@requires_data('gammapy-extra')\nclass TestEventListHESS:\n def setup(self):\n filename = '$GAMMAPY_EXTRA/test_datasets/unbundled/hess/run_0023037_hard_eventlist.fits.gz'\n self.events = EventList.read(filename)\n\n def test_basics(self):\n assert 'EventList' in str(self.events)\n\n assert len(self.events.table) == 49\n assert self.events.time[0].iso == '2004-10-14 00:08:39.214'\n assert self.events.radec[0].to_string() == '82.7068 19.8186'\n assert self.events.galactic[0].to_string(precision=2) == '185.96 -7.69'\n assert self.events.altaz[0].to_string() == '46.2059 31.2001'\n assert_allclose(self.events.offset[0].value, 1.904497742652893, rtol=1e-5)\n assert '{:1.5f}'.format(self.events.energy[0]) == '11.64355 TeV'\n\n lon, lat, height = self.events.observatory_earth_location.to_geodetic()\n assert '{:1.5f}'.format(lon) == '16.50022 deg'\n assert '{:1.5f}'.format(lat) == '-23.27178 deg'\n assert '{:1.5f}'.format(height) == '1835.00000 m'\n\n def test_stack(self):\n event_lists = [self.events] * 3\n stacked_list = EventList.stack(event_lists)\n assert len(stacked_list.table) == 49 * 3\n\n @requires_dependency('matplotlib')\n def test_peek(self):\n self.events.peek()\n\n @requires_dependency('matplotlib')\n def test_plot_offset2_distribution(self):\n self.events.plot_offset2_distribution()\n\n\n@requires_data('gammapy-extra')\nclass TestEventListFermi:\n def setup(self):\n filename = '$GAMMAPY_EXTRA/datasets/fermi_2fhl/2fhl_events.fits.gz'\n self.events = EventList.read(filename)\n\n def test_basics(self):\n assert 'EventList' in str(self.events)\n\n\n@requires_data('gammapy-extra')\ndef test_EventListDataset():\n filename = gammapy_extra.filename('test_datasets/unbundled/hess/run_0023037_hard_eventlist.fits.gz')\n dset = EventListDataset.read(filename)\n assert 'Event list dataset info' in str(dset)\n\n assert len(dset.event_list.table) == 49\n # TODO: test all methods ... get ~ 100% test coverage\n # even without running the following test.\n\n\[email protected]\n@requires_data('gammapy-extra')\ndef test_EventListDatasetChecker():\n filename = gammapy_extra.filename('test_datasets/unbundled/hess/run_0023037_hard_eventlist.fits.gz')\n dset = EventListDataset.read(filename)\n checker = EventListDatasetChecker(dset)\n checker.run('all')\n", "# Licensed under a 3-clause BSD style license - see LICENSE.rst\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nimport pytest\nimport numpy as np\nfrom numpy.testing import assert_allclose, assert_equal\nimport astropy.units as u\nfrom ..nddata import NDDataArray, BinnedDataAxis, DataAxis, sqrt_space\n\npytest.importorskip('scipy')\n\n\[email protected](scope='session')\ndef axis_x():\n return DataAxis([1, 3, 6], name='x')\n\n\[email protected](scope='session')\ndef axis_energy():\n return BinnedDataAxis.logspace(1, 10, 3, unit=u.TeV, name='energy', interpolation_mode='log')\n\n\[email protected](scope='session')\ndef axis_offset():\n return DataAxis([0.2, 0.3, 0.4, 0.5] * u.deg, name='offset')\n\n\[email protected](scope='session')\ndef nddata_1d(axis_x):\n return NDDataArray(\n axes=[axis_x],\n data=[1, -1, 2],\n interp_kwargs=dict(bounds_error=False, fill_value=None),\n )\n\n\[email protected](scope='session')\ndef nddata_2d(axis_energy, axis_offset):\n return NDDataArray(\n axes=[axis_energy, axis_offset],\n data=np.arange(12).reshape(3, 4) * u.cm * u.cm,\n interp_kwargs=dict(bounds_error=False, fill_value=None),\n )\n\n\nclass TestNDDataArray:\n\n def test_init_error(self):\n with pytest.raises(ValueError):\n NDDataArray(\n axes=[DataAxis([1, 3, 6], name='x')],\n data=np.arange(8).reshape(4, 2),\n )\n\n def test_str(self, nddata_1d):\n assert 'x' in str(nddata_1d)\n\n def test_find_node_1d(self, nddata_1d):\n node = nddata_1d.find_node(x=4)\n assert_equal(node, [1])\n\n def test_find_node_2d(self, nddata_2d):\n node = nddata_2d.find_node(energy=4 * u.TeV, offset=0.4 * u.deg)\n assert_equal(node[0], [1])\n assert_equal(node[1], [2])\n\n def test_evaluate_shape_1d(self, nddata_1d):\n # Scalar input\n out = nddata_1d.evaluate(x=1.5)\n assert out.shape == ()\n\n # Array input\n out = nddata_1d.evaluate(x=[0, 1.5])\n assert out.shape == (2,)\n\n # No input\n out = nddata_1d.evaluate()\n assert out.shape == (3,)\n\n def test_evaluate_2d(self, nddata_2d):\n # Case 1: axis1 = scalar, axis2 = array\n out = nddata_2d.evaluate(energy=0 * u.TeV, offset=[0, 0] * u.deg)\n assert out.shape == (2,)\n\n # Case 2: axis1 = array, axis2 = array\n out = nddata_2d.evaluate(energy=[0, 0, 0] * u.TeV, offset=[0, 0] * u.deg)\n assert out.shape == (3, 2)\n\n # Case 3: axis1 array, axis2 = 2Darray\n out = nddata_2d.evaluate(energy=np.zeros((12, 3)) * u.TeV, offset=[0, 0] * u.deg)\n assert out.shape == (12, 3, 2)\n\n def test_evaluate_1d_linear(self, nddata_1d):\n # This should test all cases of interest:\n # - evaluate outside node array, i.e. extrapolate: x=0\n # - evaluate on a given node: x=1\n # - evaluate in between nodes: x=2\n # - check that values < 0 are clipped to 0: x=3\n out = nddata_1d.evaluate(x=[0, 1, 2, 3], method='linear')\n assert_allclose(out, [2, 1, 0, 0])\n\n def test_evaluate_on_nodes(self, nddata_2d):\n # evaluating on interpolation nodes should give back the interpolation values\n out = nddata_2d.evaluate()\n assert_allclose(out, nddata_2d.data)\n\n\n# TODO: implement tests!\nclass TestDataAxis:\n pass\n\n\n# TODO: implement tests!\nclass TestBinnedDataAxis:\n pass\n\n\ndef test_sqrt_space():\n values = sqrt_space(0, 2, 5)\n\n assert_allclose(values, [0., 1., 1.41421356, 1.73205081, 2.])\n", "# Licensed under a 3-clause BSD style license - see LICENSE.rst\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nimport numpy as np\nimport astropy.units as u\nfrom ...utils.testing import assert_quantity_allclose\nfrom ...utils.testing import requires_dependency, requires_data\nfrom ..models import PowerLaw\nfrom .. import SpectrumObservation, SpectrumFitResult\n\n\n@requires_dependency('scipy')\n@requires_data('gammapy-extra')\nclass TestSpectrumFitResult:\n def setup(self):\n filename = \"$GAMMAPY_EXTRA/datasets/hess-crab4_pha/pha_obs23592.fits\"\n self.obs = SpectrumObservation.read(filename)\n self.best_fit_model = PowerLaw(index=2 * u.Unit(''),\n amplitude=1e-11 * u.Unit('cm-2 s-1 TeV-1'),\n reference=1 * u.TeV)\n self.npred = self.obs.predicted_counts(self.best_fit_model).data.data.value\n covar_axis = ['index', 'amplitude']\n covar = np.diag([0.1 ** 2, 1e-12 ** 2])\n self.best_fit_model.parameters.set_parameter_covariance(covar, covar_axis)\n self.fit_range = [0.1, 50] * u.TeV\n self.fit_result = SpectrumFitResult(\n model=self.best_fit_model,\n fit_range=self.fit_range,\n statname='wstat',\n statval=42,\n npred_src=self.npred,\n npred_bkg=self.npred * 0.5,\n obs=self.obs,\n )\n\n @requires_dependency('uncertainties')\n def test_basic(self):\n assert 'PowerLaw' in str(self.fit_result)\n assert 'index' in self.fit_result.to_table().colnames\n\n @requires_dependency('yaml')\n def test_io(self, tmpdir):\n filename = tmpdir / 'test.yaml'\n self.fit_result.to_yaml(filename)\n read_result = SpectrumFitResult.from_yaml(filename)\n test_e = 12.5 * u.TeV\n assert_quantity_allclose(self.fit_result.model(test_e),\n read_result.model(test_e))\n\n @requires_dependency('matplotlib')\n def test_plot(self):\n self.fit_result.plot()\n", "# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\"\"\"Make an image from a source catalog, or simulated catalog, e.g 1FHL 2FGL etc\n\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nfrom collections import OrderedDict\nimport numpy as np\nfrom astropy.wcs import WCS\nfrom astropy.units import Quantity\nfrom astropy import units as u\nfrom astropy.table import Table\nfrom .core import SkyImage\nfrom .lists import SkyImageList\n\n__all__ = [\n 'CatalogImageEstimator',\n]\n\nBBOX_DELTA2D_PIX = 5\n\n\nclass CatalogImageEstimator(object):\n \"\"\"Compute model image for given energy band from a catalog.\n\n Sources are only filled when their center lies within the image boundaries.\n\n Parameters\n ----------\n reference : `~gammapy.image.SkyImage`\n Reference sky image.\n emin : `~astropy.units.Quantity`\n Lower bound of energy range.\n emax : `~astropy.units.Quantity`\n Upper bound of energy range.\n\n Examples\n --------\n\n Here is an example how to compute a flux image from a catalog:\n\n from astropy import units as u\n from gammapy.image import SkyImage, CatalogImageEstimator\n from gammapy.catalog import SourceCatalogGammaCat\n\n reference = SkyImage.empty(xref=265, yref=-1.5, nxpix=201,\n nypix=201, binsz=0.04)\n\n image_estimator = CatalogImageEstimator(reference=reference,\n emin=1 * u.TeV,\n emax=10 * u.TeV)\n\n catalog = SourceCatalogGammaCat()\n result = image_estimator.run(catalog)\n result['flux'].show()\n\n Currently the `CatalogImageEstimator` class does not support to compute model\n cubes of catalogs. But this can achieved with only a little more of python code:\n\n from astropy import units as u\n from gammapy.image import CatalogImageEstimator, SkyImage\n from gammapy.cube import SkyCube\n from gammapy.catalog import SourceCatalogGammaCat\n from gammapy.utils.energy import EnergyBounds\n\n reference = SkyImage.empty(xref=265, yref=-1.5, nxpix=201,\n nypix=201, binsz=0.04)\n\n energies = EnergyBounds.equal_log_spacing(1 * u.TeV, 100 * u.TeV, 3)\n\n flux_cube = SkyCube.empty_like(reference=reference, energies=energies)\n\n catalog = SourceCatalogGammaCat()\n\n for idx in range(energies.size - 1):\n image_estimator = CatalogImageEstimator(reference=reference,\n emin=energies[idx],\n emax=energies[idx + 1])\n\n result = image_estimator.run(catalog)\n flux_cube.data[idx, :, :] = result['flux'].data\n\n flux_cube.show()\n\n \"\"\"\n\n def __init__(self, reference, emin, emax):\n self.reference = reference\n self.parameters = OrderedDict(emin=emin, emax=emax)\n\n def flux(self, catalog):\n \"\"\"Compute flux image from catalog.\n\n Sources are only filled when their center lies within the image boundaries.\n\n Parameters\n ----------\n catalog : `~gammapy.catalog.SourceCatalog`\n Source catalog instance.\n\n Returns\n -------\n image : `~gammapy.image.SkyImage`\n Flux sky image.\n \"\"\"\n from ..catalog.gammacat import NoDataAvailableError\n p = self.parameters\n image = SkyImage.empty_like(self.reference)\n\n selection = catalog.select_image_region(image)\n\n for source in selection:\n try:\n spatial_model = source.spatial_model(emin=p['emin'], emax=p['emax'])\n # TODO: remove this error handling and add selection to SourceCatalog\n # class\n except (NotImplementedError, NoDataAvailableError):\n continue\n\n if source.is_pointlike:\n # use 5 pixel bbox for point-like models\n size = BBOX_DELTA2D_PIX * image.wcs_pixel_scale().to('deg')\n else:\n height, width = np.diff(spatial_model.bounding_box)\n size = (float(height) * u.deg, float(width) * u.deg)\n\n cutout = image.cutout(source.position, size=size)\n\n if source.is_pointlike:\n solid_angle = 1.\n else:\n solid_angle = cutout.solid_angle().to('deg2').value\n\n # evaluate model on smaller image and paste\n c = cutout.coordinates()\n l, b = c.galactic.l.wrap_at('180d'), c.galactic.b\n cutout.data = spatial_model(l.deg, b.deg) * solid_angle\n image.paste(cutout)\n\n return image\n\n def run(self, catalog, which='flux'):\n \"\"\"Run catalog image estimator.\n\n Parameters\n ----------\n catalog : `~gammapy.catalog.SourceCatalog`\n Source catalog instance.\n\n Returns\n -------\n sky_images : `~gammapy.image.SkyImageList`\n List of sky images\n \"\"\"\n result = SkyImageList()\n\n # TODO: add input image list and computed derived quantities such as\n # excess, psf convolution etc.\n if 'flux' in which:\n result['flux'] = self.flux(catalog)\n\n return result\n" ]
[ [ "numpy.testing.assert_allclose" ], [ "numpy.empty", "numpy.log", "numpy.select" ], [ "numpy.rollaxis", "numpy.log", "numpy.ones_like", "numpy.sqrt", "numpy.isnan", "numpy.arange", "numpy.zeros", "numpy.sum" ], [ "numpy.isinf", "numpy.nansum", "numpy.tile", "numpy.testing.assert_allclose" ], [ "numpy.testing.assert_allclose" ], [ "numpy.testing.assert_equal", "numpy.arange", "numpy.zeros", "numpy.testing.assert_allclose" ], [ "numpy.diag" ], [ "numpy.diff" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
joelfrederico/mytools
[ "7bf57c49c7dde0a8b0aa337fbd2fbd527ce7a67f" ]
[ "scisalt/matplotlib/plot.py" ]
[ "import os as _os\n_on_rtd = _os.environ.get('READTHEDOCS', None) == 'True'\nif not _on_rtd:\n import matplotlib.pyplot as _plt\n import numpy as _np\n\nfrom .setup_axes import setup_axes as _setup_axes\n\n\ndef plot(*args, ax=None, **kwargs):\n \"\"\"\n Plots but automatically resizes x axis.\n\n .. versionadded:: 1.4\n\n Parameters\n ----------\n args\n Passed on to :meth:`matplotlib.axis.Axis.plot`.\n ax : :class:`matplotlib.axis.Axis`, optional\n The axis to plot to.\n kwargs\n Passed on to :meth:`matplotlib.axis.Axis.plot`.\n\n \"\"\"\n if ax is None:\n fig, ax = _setup_axes()\n\n pl = ax.plot(*args, **kwargs)\n\n if _np.shape(args)[0] > 1:\n if type(args[1]) is not str:\n min_x = min(args[0])\n max_x = max(args[0])\n ax.set_xlim((min_x, max_x))\n\n return pl\n" ]
[ [ "numpy.shape" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
purnendu91/allennlp
[ "7bdc142f3fba9b4b751be4de51299858613f134f", "5b513d4f7c7365ac33b3cbc557506b46a9b50450" ]
[ "allennlp/data/fields/sequence_label_field.py", "tests/models/reading_comprehension/bidaf_test.py" ]
[ "from typing import Dict, List, Union, Set\nimport logging\n\nfrom overrides import overrides\nimport torch\nfrom torch.autograd import Variable\n\nfrom allennlp.common.checks import ConfigurationError\nfrom allennlp.common.util import pad_sequence_to_length\nfrom allennlp.data.fields.field import Field\nfrom allennlp.data.fields.sequence_field import SequenceField\nfrom allennlp.data.vocabulary import Vocabulary\n\nlogger = logging.getLogger(__name__) # pylint: disable=invalid-name\n\n\nclass SequenceLabelField(Field[torch.Tensor]):\n \"\"\"\n A ``SequenceLabelField`` assigns a categorical label to each element in a\n :class:`~allennlp.data.fields.sequence_field.SequenceField`.\n Because it's a labeling of some other field, we take that field as input here, and we use it to\n determine our padding and other things.\n\n This field will get converted into a list of integer class ids, representing the correct class\n for each element in the sequence.\n\n Parameters\n ----------\n labels : ``Union[List[str], List[int]]``\n A sequence of categorical labels, encoded as strings or integers. These could be POS tags like [NN,\n JJ, ...], BIO tags like [B-PERS, I-PERS, O, O, ...], or any other categorical tag sequence. If the\n labels are encoded as integers, they will not be indexed using a vocab.\n sequence_field : ``SequenceField``\n A field containing the sequence that this ``SequenceLabelField`` is labeling. Most often, this is a\n ``TextField``, for tagging individual tokens in a sentence.\n label_namespace : ``str``, optional (default='labels')\n The namespace to use for converting tag strings into integers. We convert tag strings to\n integers for you, and this parameter tells the ``Vocabulary`` object which mapping from\n strings to integers to use (so that \"O\" as a tag doesn't get the same id as \"O\" as a word).\n \"\"\"\n # It is possible that users want to use this field with a namespace which uses OOV/PAD tokens.\n # This warning will be repeated for every instantiation of this class (i.e for every data\n # instance), spewing a lot of warnings so this class variable is used to only log a single\n # warning per namespace.\n _already_warned_namespaces: Set[str] = set()\n\n def __init__(self,\n labels: Union[List[str], List[int]],\n sequence_field: SequenceField,\n label_namespace: str = 'labels') -> None:\n self.labels = labels\n self.sequence_field = sequence_field\n self._label_namespace = label_namespace\n self._indexed_labels = None\n self._maybe_warn_for_namespace(label_namespace)\n if len(labels) != sequence_field.sequence_length():\n raise ConfigurationError(\"Label length and sequence length \"\n \"don't match: %d and %d\" % (len(labels), sequence_field.sequence_length()))\n\n if all([isinstance(x, int) for x in labels]):\n self._indexed_labels = labels\n\n elif not all([isinstance(x, str) for x in labels]):\n raise ConfigurationError(\"SequenceLabelFields must be passed either all \"\n \"strings or all ints. Found labels {} with \"\n \"types: {}.\".format(labels, [type(x) for x in labels]))\n\n def _maybe_warn_for_namespace(self, label_namespace: str) -> None:\n if not (self._label_namespace.endswith(\"labels\") or self._label_namespace.endswith(\"tags\")):\n if label_namespace not in self._already_warned_namespaces:\n logger.warning(\"Your label namespace was '%s'. We recommend you use a namespace \"\n \"ending with 'labels' or 'tags', so we don't add UNK and PAD tokens by \"\n \"default to your vocabulary. See documentation for \"\n \"`non_padded_namespaces` parameter in Vocabulary.\",\n self._label_namespace)\n self._already_warned_namespaces.add(label_namespace)\n\n @overrides\n def count_vocab_items(self, counter: Dict[str, Dict[str, int]]):\n if self._indexed_labels is None:\n for label in self.labels:\n counter[self._label_namespace][label] += 1 # type: ignore\n\n @overrides\n def index(self, vocab: Vocabulary):\n if self._indexed_labels is None:\n self._indexed_labels = [vocab.get_token_index(label, self._label_namespace) # type: ignore\n for label in self.labels]\n\n @overrides\n def get_padding_lengths(self) -> Dict[str, int]:\n return {'num_tokens': self.sequence_field.sequence_length()}\n\n @overrides\n def as_tensor(self,\n padding_lengths: Dict[str, int],\n cuda_device: int = -1,\n for_training: bool = True) -> torch.Tensor:\n desired_num_tokens = padding_lengths['num_tokens']\n padded_tags = pad_sequence_to_length(self._indexed_labels, desired_num_tokens)\n tensor = Variable(torch.LongTensor(padded_tags), volatile=not for_training)\n return tensor if cuda_device == -1 else tensor.cuda(cuda_device)\n\n @overrides\n def empty_field(self): # pylint: disable=no-self-use\n # pylint: disable=protected-access\n sequence_label_field = SequenceLabelField([], self.sequence_field.empty_field())\n sequence_label_field._indexed_labels = []\n return sequence_label_field\n", "# pylint: disable=no-self-use,invalid-name\nfrom flaky import flaky\nimport pytest\nimport numpy\nfrom numpy.testing import assert_almost_equal\nimport torch\nfrom torch.autograd import Variable\n\nfrom allennlp.common import Params\nfrom allennlp.common.checks import ConfigurationError\nfrom allennlp.common.testing import ModelTestCase\nfrom allennlp.data import DatasetReader, Vocabulary\nfrom allennlp.data.dataset import Batch\nfrom allennlp.models import BidirectionalAttentionFlow, Model\n\n\nclass BidirectionalAttentionFlowTest(ModelTestCase):\n def setUp(self):\n super(BidirectionalAttentionFlowTest, self).setUp()\n self.set_up_model('tests/fixtures/bidaf/experiment.json', 'tests/fixtures/data/squad.json')\n\n def test_forward_pass_runs_correctly(self):\n batch = Batch(self.instances)\n batch.index_instances(self.vocab)\n training_tensors = batch.as_tensor_dict()\n output_dict = self.model(**training_tensors)\n\n metrics = self.model.get_metrics(reset=True)\n # We've set up the data such that there's a fake answer that consists of the whole\n # paragraph. _Any_ valid prediction for that question should produce an F1 of greater than\n # zero, while if we somehow haven't been able to load the evaluation data, or there was an\n # error with using the evaluation script, this will fail. This makes sure that we've\n # loaded the evaluation data correctly and have hooked things up to the official evaluation\n # script.\n assert metrics['f1'] > 0\n\n span_start_probs = output_dict['span_start_probs'][0].data.numpy()\n span_end_probs = output_dict['span_start_probs'][0].data.numpy()\n assert_almost_equal(numpy.sum(span_start_probs, -1), 1, decimal=6)\n assert_almost_equal(numpy.sum(span_end_probs, -1), 1, decimal=6)\n span_start, span_end = tuple(output_dict['best_span'][0].data.numpy())\n assert span_start >= 0\n assert span_start <= span_end\n assert span_end < self.instances[0].fields['passage'].sequence_length()\n assert isinstance(output_dict['best_span_str'][0], str)\n\n # Some recent efficiency changes (using bmm for `weighted_sum`, the more efficient\n # `masked_softmax`...) have made this _very_ flaky...\n @flaky(max_runs=5)\n def test_model_can_train_save_and_load(self):\n self.ensure_model_can_train_save_and_load(self.param_file, tolerance=1e-4)\n\n @flaky\n def test_batch_predictions_are_consistent(self):\n # The CNN encoder has problems with this kind of test - it's not properly masked yet, so\n # changing the amount of padding in the batch will result in small differences in the\n # output of the encoder. Because BiDAF is so deep, these differences get magnified through\n # the network and make this test impossible. So, we'll remove the CNN encoder entirely\n # from the model for this test. If/when we fix the CNN encoder to work correctly with\n # masking, we can change this back to how the other models run this test, with just a\n # single line.\n # pylint: disable=protected-access,attribute-defined-outside-init\n\n # Save some state.\n saved_model = self.model\n saved_instances = self.instances\n\n # Modify the state, run the test with modified state.\n params = Params.from_file(self.param_file)\n reader = DatasetReader.from_params(params['dataset_reader'])\n reader._token_indexers = {'tokens': reader._token_indexers['tokens']}\n self.instances = reader.read('tests/fixtures/data/squad.json')\n vocab = Vocabulary.from_instances(self.instances)\n for instance in self.instances:\n instance.index_fields(vocab)\n del params['model']['text_field_embedder']['token_characters']\n params['model']['phrase_layer']['input_size'] = 2\n self.model = Model.from_params(vocab, params['model'])\n\n self.ensure_batch_predictions_are_consistent()\n\n # Restore the state.\n self.model = saved_model\n self.instances = saved_instances\n\n def test_get_best_span(self):\n # pylint: disable=protected-access\n\n span_begin_probs = Variable(torch.FloatTensor([[0.1, 0.3, 0.05, 0.3, 0.25]])).log()\n span_end_probs = Variable(torch.FloatTensor([[0.65, 0.05, 0.2, 0.05, 0.05]])).log()\n begin_end_idxs = BidirectionalAttentionFlow._get_best_span(span_begin_probs, span_end_probs)\n assert_almost_equal(begin_end_idxs.data.numpy(), [[0, 0]])\n\n # When we were using exlcusive span ends, this was an edge case of the dynamic program.\n # We're keeping the test to make sure we get it right now, after the switch in inclusive\n # span end. The best answer is (1, 1).\n span_begin_probs = Variable(torch.FloatTensor([[0.4, 0.5, 0.1]])).log()\n span_end_probs = Variable(torch.FloatTensor([[0.3, 0.6, 0.1]])).log()\n begin_end_idxs = BidirectionalAttentionFlow._get_best_span(span_begin_probs, span_end_probs)\n assert_almost_equal(begin_end_idxs.data.numpy(), [[1, 1]])\n\n # Another instance that used to be an edge case.\n span_begin_probs = Variable(torch.FloatTensor([[0.8, 0.1, 0.1]])).log()\n span_end_probs = Variable(torch.FloatTensor([[0.8, 0.1, 0.1]])).log()\n begin_end_idxs = BidirectionalAttentionFlow._get_best_span(span_begin_probs, span_end_probs)\n assert_almost_equal(begin_end_idxs.data.numpy(), [[0, 0]])\n\n span_begin_probs = Variable(torch.FloatTensor([[0.1, 0.2, 0.05, 0.3, 0.25]])).log()\n span_end_probs = Variable(torch.FloatTensor([[0.1, 0.2, 0.5, 0.05, 0.15]])).log()\n begin_end_idxs = BidirectionalAttentionFlow._get_best_span(span_begin_probs, span_end_probs)\n assert_almost_equal(begin_end_idxs.data.numpy(), [[1, 2]])\n\n def test_mismatching_dimensions_throws_configuration_error(self):\n params = Params.from_file(self.param_file)\n # Make the phrase layer wrong - it should be 10 to match\n # the embedding + char cnn dimensions.\n params[\"model\"][\"phrase_layer\"][\"input_size\"] = 12\n with pytest.raises(ConfigurationError):\n Model.from_params(self.vocab, params.pop(\"model\"))\n\n params = Params.from_file(self.param_file)\n # Make the modeling layer input_dimension wrong - it should be 40 to match\n # 4 * output_dim of the phrase_layer.\n params[\"model\"][\"phrase_layer\"][\"input_size\"] = 30\n with pytest.raises(ConfigurationError):\n Model.from_params(self.vocab, params.pop(\"model\"))\n\n params = Params.from_file(self.param_file)\n # Make the modeling layer input_dimension wrong - it should be 70 to match\n # 4 * phrase_layer.output_dim + 3 * modeling_layer.output_dim.\n params[\"model\"][\"span_end_encoder\"][\"input_size\"] = 50\n with pytest.raises(ConfigurationError):\n Model.from_params(self.vocab, params.pop(\"model\"))\n" ]
[ [ "torch.LongTensor" ], [ "torch.FloatTensor", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
sssssch/jupyter-examples
[ "cf9e26e22dcfa263bcd26323527911cdbcc2cd61", "cf9e26e22dcfa263bcd26323527911cdbcc2cd61" ]
[ "Project_google_task_usage/task_uasge_500_preprocess/data_inverse.py", "Project_Alibaba_workload/E50_Alibaba_cluster_predict_compare/Train_20000/Alibaba_Realworld_predict/alibaba_realworld_predict.py" ]
[ "# -*-coding:utf-8-*-\nimport pandas as pd\nfrom numpy import *\n\ndataset = pd.read_csv(\n 'test_data.csv', header=None)\ndataset = round(dataset, 8)\nList_data = mat(dataset)\nInverse = List_data.T\nprint(Inverse)\nname = [\n 'cpu',\n 'cmui',\n 'amui',\n 'upcmui',\n 'tpcmui',\n 'mmui',\n 'mditi',\n 'mldsui',\n 'mcui',\n 'scui'\n]\n\ntest = pd.DataFrame(columns=name, data=Inverse)\ntest.to_csv('test_data_inversed_bycode.csv', encoding='gbk', header=None)\n", "# -*- coding: utf-8 -*-\n\"\"\"\nAlibaba_Realworld_predict.ipynb\n\nAutomatically generated by Colaboratory.\n\nOriginal file is located at\n https://colab.research.google.com/drive/17d_0_LfRIh4WL-lFXY9WfNtudc6hFkjW\n\"\"\"\n\n\nfrom math import sqrt\nimport tensorflow as tf\nimport pandas as pd\nfrom tensorflow.keras import Sequential\nfrom tensorflow.keras.layers import Dense, Dropout, Conv1D, GRU\nfrom tensorflow.keras.losses import mean_squared_error\nfrom numpy.core._multiarray_umath import concatenate\nfrom sklearn.preprocessing import MinMaxScaler\nimport matplotlib.pyplot as plt\n\n# supervised监督学习函数\n\n\ndef series_to_supervised(data, columns, n_in=1, n_out=1, dropnan=True):\n n_vars = 1 if isinstance(data, list) else data.shape[1]\n df = pd.DataFrame(data)\n cols, names = list(), list()\n # input sequence (t-n, ... t-1)\n for i in range(n_in, 0, -1):\n cols.append(df.shift(i))\n names += [('%s%d(t-%d)' % (columns[j], j + 1, i))\n for j in range(n_vars)]\n # forecast sequence (t, t+1, ... t+n)\n for i in range(0, n_out):\n cols.append(df.shift(-i))\n if i == 0:\n names += [('%s%d(t)' % (columns[j], j + 1)) for j in range(n_vars)]\n else:\n names += [('%s%d(t+%d)' % (columns[j], j + 1, i))\n for j in range(n_vars)]\n # put it all together\n agg = pd.concat(cols, axis=1)\n agg.columns = names\n # drop rows with NaN values\n if dropnan:\n clean_agg = agg.dropna()\n return clean_agg\n # return agg\n\n\ndataset = pd.read_csv(\n 'Machine_usage_groupby.csv')\n\ndataset_columns = dataset.columns\nvalues = dataset.values\nprint(dataset)\n\n# 归一化处理\nscaler = MinMaxScaler(feature_range=(0, 1))\nscaled = scaler.fit_transform(values)\n\n\n# 监督学习\nreframed = series_to_supervised(scaled, dataset_columns, 1, 1)\nvalues = reframed.values\n\n# 学习与检测数据的划分\nn_train_hours = 20000\ntrain = values[:n_train_hours, :]\ntest = values[n_train_hours:, :]\n\n\n# 监督学习结果划分\ntrain_x, train_y = train[:, :-1], train[:, -1]\ntest_x, test_y = test[:, :-1], test[:, -1]\n\n\n# 为了在LSTM中应用该数据,需要将其格式转化为3D format,即[Samples, timesteps, features]\ntrain_X = train_x.reshape((train_x.shape[0], 1, train_x.shape[1]))\ntest_X = test_x.reshape((test_x.shape[0], 1, test_x.shape[1]))\n\nmodel = Sequential()\nmodel.add(Conv1D(filters=32, kernel_size=3,\n strides=1, padding=\"causal\",\n activation=\"relu\"))\nmodel.add(\n GRU(\n 32,\n input_shape=(\n train_X.shape[1],\n train_X.shape[2]),\n return_sequences=True))\nmodel.add(GRU(16, input_shape=(train_X.shape[1], train_X.shape[2])))\nmodel.add(Dense(16, activation=\"relu\"))\nmodel.add(Dropout(0.2))\nmodel.add(Dense(1))\nmodel.compile(loss=tf.keras.losses.Huber(),\n optimizer='adam',\n metrics=[\"mse\"])\nhistory = model.fit(\n train_X,\n train_y,\n epochs=50,\n batch_size=72,\n validation_split=0.2,\n verbose=2)\n\n# 画图\nplt.plot(history.history['loss'], label='train')\nplt.plot(history.history['val_loss'], label='test')\nplt.legend()\nplt.show()\n\n# make the prediction\nyHat = model.predict(test_X)\n\ninv_yHat = concatenate((yHat, test_x[:, 1:]), axis=1) # 数组拼接\ninv_yHat = inv_yHat[:, 0]\n\ntest_y = test_y.reshape((len(test_y), 1))\ninv_y = concatenate((test_y, test_x[:, 1:]), axis=1)\ninv_y = inv_y[:, 0]\n\nrmse = sqrt(mean_squared_error(inv_yHat, inv_y))\nprint('Test RMSE: %.8f' % rmse)\nmse = mean_squared_error(inv_yHat, inv_y)\nprint('Test MSE: %.8f' % mse)\n\nyhat = model.predict(test_X)\ntest_X_reshaped = test_X.reshape((test_X.shape[0], test_X.shape[2]))\n\ninv_yhat = concatenate((yhat, yhat, test_X_reshaped[:, 1:]), axis=1)\ninv_yhat = inv_yhat[:, 0]\ntest_y = test_y.reshape((len(test_y), 1))\ninv_y = concatenate((test_y, test_y, test_X_reshaped[:, 1:]), axis=1)\ninv_y = inv_y[:, 0]\nplt.plot(inv_yhat, label='prediction')\nplt.plot(inv_y, label='real')\nplt.xlabel('time')\nplt.ylabel('cpu_usage_percent')\nplt.legend()\nplt.show()\n\nplt.plot(inv_yhat[:500], label='prediction')\nplt.plot(inv_y[:500], label='real_cpu_usage_percent')\nplt.xlabel('time')\nplt.ylabel('cpu_usage_percent')\nplt.legend()\nplt.show()\n\nplt.plot(inv_yhat[:50], label='prediction')\nplt.plot(inv_y[:50], label='real_cpu_usage_percent')\nplt.xlabel('time')\nplt.ylabel('cpu_usage_percent')\nplt.legend()\nplt.show()\n" ]
[ [ "pandas.read_csv", "pandas.DataFrame" ], [ "matplotlib.pyplot.legend", "pandas.concat", "pandas.read_csv", "tensorflow.keras.layers.Dropout", "tensorflow.keras.layers.Dense", "tensorflow.keras.layers.Conv1D", "tensorflow.keras.Sequential", "pandas.DataFrame", "matplotlib.pyplot.plot", "tensorflow.keras.layers.GRU", "tensorflow.keras.losses.Huber", "matplotlib.pyplot.xlabel", "tensorflow.keras.losses.mean_squared_error", "numpy.core._multiarray_umath.concatenate", "matplotlib.pyplot.show", "sklearn.preprocessing.MinMaxScaler", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [ "2.7", "2.6", "2.4", "2.3", "2.5", "2.2" ] } ]
fredmontet/timeatlas
[ "9a439a913ef9a8a1ef9833b42e5fb4e988d7e35e", "9a439a913ef9a8a1ef9833b42e5fb4e988d7e35e", "9a439a913ef9a8a1ef9833b42e5fb4e988d7e35e" ]
[ "src/timeatlas/time_series/component_handler.py", "src/timeatlas/generators/anomaly_generator/anomaly_generator.py", "src/timeatlas/models/NN/dataset/classification_dataset.py" ]
[ "from typing import List, Union, NoReturn\nfrom copy import deepcopy, copy\n\nfrom pandas import Index\n\nfrom .component import Component\n\n\nclass ComponentHandler:\n \"\"\" Helper class to manage many components\n\n The purpose of this class is to make the management of components in a\n time series as simple as possible, with one or many components.\n\n The underlying data structure is a simple list where component are stored.\n \"\"\"\n\n def __init__(self, components: Union[List[Component], Component] = None):\n if isinstance(components, Component):\n components = [components]\n self.components = components if components is not None else []\n\n def __getitem__(self, item: Union[int, str, List[int], List[str]]):\n # handler[0]\n if isinstance(item, int):\n new_components = self.components[item]\n # handler[\"0_foo\"]\n elif isinstance(item, str):\n new_components = self.get_component_by_name(item)\n\n elif isinstance(item, list):\n\n # handler[[0,3,5]]\n if all(isinstance(i, int) for i in item):\n new_components = [self.components[i] for i in item]\n\n # handler[[\"0_foo\",\"1_bar\"]]\n elif all(isinstance(i, str) for i in item):\n new_components = [self.get_component_by_name(i_n)\n for i_n in item]\n else:\n raise TypeError(f\"ComponentHandler list indices must be int or \"\n f\"str, not {type(item)}\")\n else:\n raise TypeError(f\"ComponentHandler indices must be int, str or list,\"\n f\" not {type(item)}\")\n\n return ComponentHandler(new_components)\n\n def __delitem__(self, key: Union[int, str]) -> NoReturn:\n \"\"\" Delete an item from the ComponentHandler\n\n Args:\n key: int or str of the item to delete\n \"\"\"\n if isinstance(key, int):\n del self.components[key]\n elif isinstance(key, str):\n i = self.get_component_id_by_name(key)\n del self.components[i]\n\n def __len__(self) -> int:\n \"\"\" Get the number of item in the ComponentHandler\n\n Returns:\n int\n \"\"\"\n return len(self.components)\n\n def __str__(self):\n \"\"\" get the str representation of a ComponentHandler\n\n Returns:\n str\n \"\"\"\n return str(self.get_columns().to_list())\n\n def append(self, component: Component) -> NoReturn:\n \"\"\" Append a Component to the ComponentHandler\n\n Args:\n component: Component to append\n \"\"\"\n self.components.append(component)\n\n def clear(self):\n \"\"\" Removes all Components from the ComponentHandler\n \"\"\"\n self.components.clear()\n\n def get_component_id_by_name(self, name: str) -> int:\n \"\"\" Get a Component ID by its name\n\n Args:\n name: str of the name of the Component, including the ID (lol)\n e.g. \"0_temperature\"\n\n Returns:\n int\n \"\"\"\n for i, c in enumerate(self.get_columns().to_list()):\n if name == c:\n return i\n # if no component are found throughout the for loop\n raise KeyError(f\"Component with name '{name}' does not exist.\")\n\n def get_component_by_name(self, name: str):\n \"\"\" Get a Component by its name\n\n Args:\n name: str of the name of the Component, including the ID\n e.g. \"0_temperature\"\n\n Returns:\n Component\n \"\"\"\n for i, c in enumerate(self.components):\n component_name = self.__format_main_series(i, c.get_main())\n if name == component_name:\n return c\n raise KeyError(f\"Component with name '{name}' does not exist.\")\n\n def get_column_by_id(self, index: int) -> Index:\n \"\"\" Get a the name of a column by its Component ID\n\n Get Pandas Index of a Component from the ComponentHandler by its\n positional identifier\n\n Args:\n index: int of the index of the component in the ComponentHandler\n with_meta: bool to include or not meta series in the return value\n\n Returns:\n Pandas Index of the names of the component\n \"\"\"\n c = self.components[index]\n cols = [self.__format_main_series(index, c.get_main())]\n return Index(cols)\n\n def get_column_by_name(self, name: str) -> Index:\n \"\"\" Get the name of a column by its Component name\n\n Args:\n name: str if the name of the component in the ComponentHandler\n e.g: \"0_temperature\"\n\n Returns:\n Pandas Index of the names of the component\n \"\"\"\n for i, c in enumerate(self.get_columns().to_list()):\n if name == c:\n return self.get_column_by_id(i)\n # if no component are found throughout the for loop\n raise KeyError(f\"Component with name '{name}' does not exist.\")\n\n def get_columns(self) -> Index:\n \"\"\" Get names of all the Components columns\n\n Get Pandas Index of a Component from the ComponentHandler by its\n positional identifier\n\n Args:\n index: int of the index of the component in the ComponentHandler\n\n Returns:\n Pandas Index of the names of the component\n \"\"\"\n cols = []\n for i, c in enumerate(self.components):\n cols.extend(self.get_column_by_id(i).to_list())\n return Index(cols)\n\n def copy(self, deep=True) -> 'ComponentHandler':\n \"\"\" Copy function, deep by default\n\n Args:\n deep: bool if deep copy or not\n\n Returns:\n ComponentHandler\n \"\"\"\n return deepcopy(self) if deep else copy(self)\n\n @staticmethod\n def __format_main_series(index: int, value: Union[str, list]):\n \"\"\" Format a main series name\n\n Args:\n index: int of the position of the main series\n value: list with the main series name\n\n Returns:\n list with the formatted str of the series\n \"\"\"\n if isinstance(value, str):\n return f\"{index}_{value}\"\n elif isinstance(value, list):\n return [f\"{index}_{v}\" for v in value]\n else:\n TypeError(f\"Type {value} isn't accepted\")\n", "from typing import NoReturn, Tuple, Any, Union, Optional, List, Callable, Dict\n\nfrom timeatlas.abstract.abstract_base_generator import AbstractBaseGenerator\nfrom timeatlas.time_series import TimeSeries\nfrom timeatlas.time_series_dataset import TimeSeriesDataset\nfrom timeatlas.config.constants import COMPONENT_VALUES\n\nfrom .anomalies import AnomalyABC\nfrom .utils import get_operator\nfrom .labeler import AnomalySetLabeler\nfrom .config import AnomalyConfigParser\n\nimport pandas as pd\nimport numpy as np\nfrom itertools import cycle\nfrom copy import copy\nimport math\nfrom os import path\n\n\nclass AnomalyGenerator(AbstractBaseGenerator):\n \"\"\"\n\n A generator that introcudes an anomaly into a given TimeSeriesDataset.\n\n The types and parameters are controlled with a .ini file,\n that can be created with \"AnomalyGeneratorTemplate\"\n\n \"\"\"\n\n def __init__(self, data: TimeSeriesDataset, conf_file: str, save_as: str = 'text'):\n \"\"\"\n\n Args:\n data: TimeSeriesDataset containing the data\n conf_file: config file created with AnomalyGeneratorTemplate\n \"\"\"\n\n # Each generator set a label_suffix\n # Here: AGM -> Anomaly Generator Manual\n super().__init__()\n self.label_suffix = \"AGM\"\n\n assert save_as == 'text' or save_as == 'pickle' or save_as == 'tsd'\n self.save_as = save_as\n\n # assertions\n assert isinstance(data, TimeSeriesDataset)\n assert all(isinstance(x, TimeSeries) for x in\n data), \"One or more elements are not a TimeSeries-object\"\n assert path.isfile(\n conf_file), f\"No config file found under given path '{conf_file}'\"\n\n # set data\n self.data = data.copy(deep=True)\n\n # read the config file\n self.config = AnomalyConfigParser(config_file=conf_file)\n self.GLOBAL = self.config['GLOBAL']\n self.ANOMALIES = self.config['ANOMALIES']\n self.selection = self.GLOBAL['selection']\n self.percent = self.GLOBAL['percent']\n self.amount = self.GLOBAL['amount']\n self.outfile = self.GLOBAL['outfile']\n\n # create numpy-random.RandomState object\n self.seed = self.GLOBAL['seed']\n\n # functions for anomaly\n self.ABC = AnomalyABC(self.seed)\n self.anomaly_functions = self.get_anomaly_function()\n\n # adding a label column to the dataframe and creating the results anomaly labels\n self.labels = AnomalySetLabeler()\n\n # figure out the precision of the data\n self.precision = self.generation_precision()\n\n @staticmethod\n def precision_and_scale(x: float):\n \"\"\"\n\n Get the precision of a value\n\n Args:\n x: a (float) number\n\n Returns: the number of positions after the comma\n\n \"\"\"\n # 14 is the maximal number of digits python can handle (more is also unrealistic)\n max_digits = 14\n # if the number is NaN return nothing\n if math.isnan(x):\n return\n # figure out the magniture -> the numbers before the comma\n int_part = int(abs(x))\n magnitude = 1 if int_part == 0 else int(math.log10(int_part)) + 1\n if magnitude >= max_digits:\n return (magnitude, 0)\n # shift the number after the comma in front of the comma and figure out the amount\n frac_part = abs(x) - int_part\n multiplier = 10 ** (max_digits - magnitude)\n frac_digits = multiplier + int(multiplier * frac_part + 0.5)\n while frac_digits % 10 == 0:\n frac_digits /= 10\n scale = int(math.log10(frac_digits))\n return scale\n\n @staticmethod\n def clean_parameters(values) -> Dict:\n \"\"\"\n Function to cleanup the parameters. If the parameter in the config-file are None, they are removed.\n Args:\n values: parameter values from he config files\n\n Returns: Dict of the paramters without the None\n\n \"\"\"\n return {k: v for k, v in values['PARAMETERS'].items() if v is not None}\n\n @staticmethod\n def create_zip_object(data, anomaly_f):\n '''\n\n combines the two lists of the data, where the anomalies are added to and the anomaly-function\n\n if the function list is shorter it will cycle through them until all data has 1 anomaly\n\n if the data is shorter it will only assign one anomaly function\n\n Args:\n data: pd.Series of data\n anomaly_f: function of ABC.anomalies creating the anomaly\n\n Returns: zip-object\n\n '''\n\n # warnings.warn(\"Length of data > length of anomalies: Not all anomalies will be assigned.\")\n\n zip_list = zip(data, cycle(anomaly_f))\n return zip_list\n\n def generation_precision(self):\n '''\n\n Set the rounded average precision of the values inside a dataframe\n\n Returns: rounded average number of digits after the comma\n\n '''\n\n precision_df = np.array(\n [self.precision_and_scale(x) for ts in self.data for x in\n ts._data.values])\n # This is more of a security. A correctly formated TimeSeries-object has no None elements\n precision_df = precision_df[precision_df != None]\n\n return int(round(precision_df.mean()))\n\n def save(self) -> NoReturn:\n \"\"\"\n\n Saving the labels and the new TimeSeriesDataset to file.\n\n Returns: NoReturn\n\n \"\"\"\n\n self.labels.finalize()\n\n if self.save_as == 'text':\n self.data.to_text(path=f'./{self.outfile}_data')\n elif self.save_as == 'pickle':\n self.data.to_pickle(path=f'./{self.outfile}_data.pkl')\n elif self.save_as == 'tsd':\n return self.data\n\n # This function is no longer needed, since we save the labels now in the TimeSeries\n # self.labels.annotation.to_csv(f'./{self.outfile}_data/{self.outfile}_labels.csv', index=False)\n\n def get_anomaly_function(self) -> List:\n '''\n\n Get all functions in the config file\n\n Returns: list of tuples with the functions as (function, parameters)\n\n '''\n functions = []\n for key, values in self.ANOMALIES.items():\n function = getattr(self.ABC, values['function'])\n # removing the keys with None\n parameters = self.clean_parameters(values)\n functions.append((function, parameters))\n return functions\n\n def chose_amount(self) -> List:\n \"\"\"\n\n Chose the number of time windows based on a fixed amount given by the user in the config file:\n\n eg. amount = 10, will select 10 elements\n\n Returns: List of pair of indices and data\n\n \"\"\"\n\n ind, data = self.data.select_components_randomly(n=self.amount, seed=self.seed, indices=True)\n return list(zip(ind, data))\n\n def chose_selection(self) -> List:\n \"\"\"\n\n Chose the number of time windows based on a user selection given by the user in the config file:\n\n eg. selection = [0,1,5,9] will select the first, second, sixth and tenth element.\n\n Returns: List of pair of indices and data\n\n \"\"\"\n data = self.data[self.selection]\n return list(zip(self.selection, data))\n\n def chose_percentage(self) -> List:\n \"\"\"\n\n Chose the number of time windows based on a user selection given by the user in the config file:\n\n e.g. percent = 0.2 will select 20% of the TimeSeriesDataset (min=0, max=1)\n\n Returns: List of pair of indices and data\n\n \"\"\"\n ind, data = self.data.select_components_by_percentage(percent=self.percent, seed=self.seed, indices=True)\n return list(zip(ind, data))\n\n def add_data(self, new_data: TimeSeries, index: int) -> NoReturn:\n \"\"\"\n\n Replacing the old TimeSeries with the new TimeSeries containing the anomaly.\n\n Args:\n new_data: new TimeSeries that will replace the old one\n index: index of the TimeSeries to replace in the TimeSeriesDataset\n\n Returns: NoReturn\n\n \"\"\"\n\n self.data[index]._data[f'0_{COMPONENT_VALUES}'].replace(to_replace=pd.Series(new_data))\n\n def add_labels(self, index, coordinates, function_name):\n \"\"\"\n\n Create the labels that need to be added to the TimeSeries.\n Will create a new column for the labels and name them.\n\n Args:\n index: index of the TimeSeries in the TimeSeriesDataframe\n coordinates: start and end index of the anomaly in the TimeSeries\n function_name: label of the anomaly\n\n Returns:\n\n \"\"\"\n labels = [None] * len(self.data[index]._data)\n for coords in coordinates:\n start = coords[0] - 1\n end = coords[1] + 1\n labels[start:end] = [function_name] * len(labels[start:end])\n self.data[index]._data[f'label_{self.label_suffix}'] = labels\n self.data[index].label = function_name\n\n def generate(self) -> NoReturn:\n \"\"\"\n raise NotImplementedError\n\n Main function to generate the anomalies.\n\n Returns: NoReturn\n\n \"\"\"\n\n if self.amount:\n anomaly_series = self.chose_amount()\n elif self.selection:\n anomaly_series = self.chose_selection()\n else:\n anomaly_series = self.chose_percentage()\n\n zip_list_functions = self.create_zip_object(anomaly_series,\n self.anomaly_functions)\n\n # TODO: This adds the anomalies at the start and not where they belong\n for (ind, ts), (function, params) in zip_list_functions:\n data = ts._data\n operation_param = params['operation']\n function_params = copy(params)\n function_params.pop('operation')\n # TODO: Here we make DataFrame -> Series. A more elegant solution is to be found\n anomaly, coordinates = function(data[f'0_{COMPONENT_VALUES}'], **function_params)\n # creating the new data to add\n operator = get_operator(mode=operation_param)\n new_data = operator(data, start=coordinates, values=anomaly)\n # rounding the data to a precision typical for the given dataset\n new_data = new_data.round(decimals=self.precision)\n self.add_data(new_data=new_data, index=ind)\n self.labels.create_operation_dict(coordinates=coordinates,\n param=operation_param,\n function_name=function.__name__,\n name=ind,\n outfile=self.outfile)\n\n self.add_labels(index=ind,\n coordinates=coordinates,\n function_name=function.__name__)\n\n if self.GLOBAL['save']:\n return self.save()\n", "import numpy as np\n\nfrom .base_dataset import BaseDataset\nfrom timeatlas.time_series_dataset import TimeSeriesDataset\n\n\nclass TimeSeriesClassificationDataset(BaseDataset):\n \"\"\"\n A DataLoader for the classification of complete TimeSeries, where X: TimeSeries and y: label of the TimeSeries\n \"\"\"\n\n def __init__(self, timeseriesdataset: TimeSeriesDataset):\n super(TimeSeriesClassificationDataset, self).__init__(tsd=timeseriesdataset)\n self.data = np.array([ts._data for ts in timeseriesdataset])\n self.labels = [ts.class_label for ts in timeseriesdataset]\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, item):\n return self.data[item], self.labels[item]\n" ]
[ [ "pandas.Index" ], [ "pandas.Series" ], [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
cyente/OFA
[ "291a0abb76559a6379f1a7ebbdfdf1350c94a9f4" ]
[ "data/rec_data/rec_nextitem_dataset.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\nfrom io import BytesIO\n\nimport logging\nimport warnings\nimport string\n\nimport numpy as np\nimport torch\nimport base64\nfrom torchvision import transforms\n\nfrom PIL import Image, ImageFile\n\nfrom data import data_utils\nfrom data.ofa_dataset import OFADataset\n\nImageFile.LOAD_TRUNCATED_IMAGES = True\nImageFile.MAX_IMAGE_PIXELS = None\nImage.MAX_IMAGE_PIXELS = None\n\nlogger = logging.getLogger(__name__)\nwarnings.filterwarnings(\"ignore\", \"(Possibly )?corrupt EXIF data\", UserWarning)\n\nIMAGENET_DEFAULT_MEAN = (0.485, 0.456, 0.406)\nIMAGENET_DEFAULT_STD = (0.229, 0.224, 0.225)\n\n\ndef collate(samples, pad_idx, eos_idx):\n if len(samples) == 0:\n return {}\n\n def merge(key):\n return data_utils.collate_tokens(\n [s[key] for s in samples],\n pad_idx,\n eos_idx=eos_idx,\n )\n\n id = np.array([s[\"id\"] for s in samples])\n src_tokens = merge(\"source\")\n src_lengths = torch.LongTensor([s[\"source\"].ne(pad_idx).long().sum() for s in samples])\n\n # patch_images = torch.stack([sample['patch_image'] for sample in samples], dim=0)\n # patch_masks = torch.cat([sample['patch_mask'] for sample in samples])\n\n prev_output_tokens = None\n target = None\n if samples[0].get(\"target\", None) is not None:\n target = merge(\"target\")\n tgt_lengths = torch.LongTensor([s[\"target\"].ne(pad_idx).long().sum() for s in samples])\n ntokens = tgt_lengths.sum().item()\n\n if samples[0].get(\"prev_output_tokens\", None) is not None:\n prev_output_tokens = merge(\"prev_output_tokens\")\n else:\n ntokens = src_lengths.sum().item()\n\n batch = {\n \"id\": id,\n \"nsentences\": len(samples),\n \"ntokens\": ntokens,\n \"net_input\": {\n \"src_tokens\": src_tokens,\n \"src_lengths\": src_lengths,\n # \"patch_images\": patch_images,\n # \"patch_masks\": patch_masks,\n \"prev_output_tokens\": prev_output_tokens\n },\n \"target\": target,\n }\n\n return batch\n\n\nclass Rec_nextitemDataset(OFADataset):\n def __init__(\n self,\n split,\n dataset,\n bpe,\n src_dict,\n tgt_dict=None,\n max_src_length=128,\n max_tgt_length=30,\n # patch_image_size=224,\n # imagenet_default_mean_and_std=False,\n scst=False\n ):\n super().__init__(split, dataset, bpe, src_dict, tgt_dict)\n self.max_src_length = max_src_length\n self.max_tgt_length = max_tgt_length\n # self.patch_image_size = patch_image_size\n self.scst = scst\n\n self.transtab = str.maketrans({key: None for key in string.punctuation})\n\n # if imagenet_default_mean_and_std:\n # mean = IMAGENET_DEFAULT_MEAN\n # std = IMAGENET_DEFAULT_STD\n # else:\n # mean = [0.5, 0.5, 0.5]\n # std = [0.5, 0.5, 0.5]\n\n # self.patch_resize_transform = transforms.Compose([\n # lambda image: image.convert(\"RGB\"),\n # transforms.Resize((patch_image_size, patch_image_size), interpolation=Image.BICUBIC),\n # transforms.ToTensor(),\n # transforms.Normalize(mean=mean, std=std),\n # ])\n print(\"self.max_tgt_length\", self.max_tgt_length)\n\n def __getitem__(self, index):\n uniq_id, user_behavior, target_item, rating = self.dataset[index]\n\n while target_item.translate(self.transtab).strip() == \"\":\n uniq_id, user_behavior, target_item, rating = self.dataset[index]\n\n if len(user_behavior) >= self.max_src_length - 20:\n user_behavior = user_behavior[:self.max_src_length - 20]\n if user_behavior[-1] != \",\":\n user_behavior = ','.join(user_behavior.split(\",\")[:-1])\n else:\n user_behavior = user_behavior[:-1]\n\n if self.split == 'train' and not self.scst:\n target_item = target_item.translate(self.transtab).strip()\n target_item_token_list = target_item.strip().split(\" \")\n tgt_explain = ' '.join(target_item_token_list[:self.max_tgt_length])\n else:\n target_item = ' '.join(target_item.strip().split(\" \")[:self.max_tgt_length])\n target_item_list = [target_item.translate(self.transtab).strip() for explain in target_item.strip().split('&&')]\n tgt_explain = '&&'.join(target_item_list)\n\n print(\"user_behavior\", user_behavior)\n src_text = \"If you liked \" + user_behavior + \\\n \", you will also like \"\n\n assert len(src_text.split(\" \")) <= self.max_src_length\n src_item = self.encode_text(src_text)\n tgt_item = self.encode_text(\" {}\".format(tgt_explain))\n\n src_item = torch.cat([self.bos_item, src_item, self.eos_item])\n target_item = torch.cat([tgt_item, self.eos_item])\n prev_output_item = torch.cat([self.bos_item, tgt_item])\n example = {\n \"id\": uniq_id,\n \"source\": src_item,\n # \"patch_image\": patch_image,\n # \"patch_mask\": patch_mask,\n \"target\": target_item,\n \"prev_output_tokens\": prev_output_item\n }\n return example\n\n def collater(self, samples, pad_to_length=None):\n \"\"\"Merge a list of samples to form a mini-batch.\n Args:\n samples (List[dict]): samples to collate\n Returns:\n dict: a mini-batch with the following keys:\n \"\"\"\n return collate(samples, pad_idx=self.pad, eos_idx=self.eos)\n\n\n def __getitem__2(self, index):\n # uniq_id, user_behavior, explaination, fea, opt = self.dataset[index]\n # print(\"user_behavior\", user_behavior)\n # print(\"fea\", fea, \"opt\", opt)\n # print(\"user_behavior\", user_behavior)\n tgt_explain = \"asdasss ssa\"\n while tgt_explain.strip() != \"\":\n uniq_id, user_behavior, explaination, fea, opt = self.dataset[index]\n while explaination.translate(self.transtab).strip() == \"\":\n uniq_id, user_behavior, explaination, fea, opt = self.dataset[index]\n print(\"explaination begin\", explaination)\n tmp_user_beha = user_behavior.split(\" Right now, \")\n len_context = len(tmp_user_beha[1].split(\" \"))\n behavior_list = tmp_user_beha[0].split(\" \")[0: self.max_src_length - 40 - len_context]\n behavior_ = \" \".join(behavior_list)\n if behavior_[-1] == \",\":\n behavior_ = behavior_[:-1] + '.'\n if behavior_[-1] != \".\":\n behavior_ = ','.join(behavior_.split(\",\")[:-1]) + '.'\n\n user_behavior = \" right now, \".join([behavior_, tmp_user_beha[1]])\n\n user_behavior += \\\n \" the user cares about {} and the item is {}.\".format(fea, opt)\n\n\n # image = Image.open(BytesIO(base64.urlsafe_b64decode(image)))\n # patch_image = self.patch_resize_transform(image)\n # patch_mask = torch.tensor([True])\n\n if self.split == 'train' and not self.scst:\n explaination = explaination.translate(self.transtab).strip()\n print(\"explaination.translate(self.transtab).strip()\", explaination.translate(self.transtab).strip())\n explaination_token_list = explaination.strip().split(\" \")\n tgt_explain = ' '.join(explaination_token_list[:self.max_tgt_length])\n else:\n explaination = ' '.join(explaination.strip().split(\" \")[:self.max_tgt_length])\n explain_list = [explain.translate(self.transtab).strip() for explain in explaination.strip().split('&&')]\n tgt_explain = '&&'.join(explain_list)\n print(\"explaination\", explaination)\n print(\"tgt_explain\", tgt_explain)\n assert False\n src_text = user_behavior + \\\n \" how to persuade the user to buy the item?\"\n # print(\"src_text\", src_text.split(\" \")[0:320])\n assert len(src_text.split(\" \")) <= self.max_src_length\n src_item = self.encode_text(src_text)\n tgt_item = self.encode_text(\" {}\".format(tgt_explain))\n\n src_item = torch.cat([self.bos_item, src_item, self.eos_item])\n target_item = torch.cat([tgt_item, self.eos_item])\n prev_output_item = torch.cat([self.bos_item, tgt_item])\n example = {\n \"id\": uniq_id,\n \"source\": src_item,\n # \"patch_image\": patch_image,\n # \"patch_mask\": patch_mask,\n \"target\": target_item,\n \"prev_output_tokens\": prev_output_item\n }\n return example\n" ]
[ [ "numpy.array", "torch.cat" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Khumayun/FairDeepLearning
[ "e19947c17c282ce1e89ad105cc241ffc07190628" ]
[ "dataloaders/adult_loader.py" ]
[ "import os\nimport numpy as np\nimport pandas as pd\nimport torch\nfrom torch.utils.data import Dataset\nfrom dataloaders.adult_process import get_adult_data\n\n\nclass AdultDataset(Dataset):\n \"\"\"\n The UCI Adult dataset.\n \"\"\"\n\n def __init__(self, root_dir, phase, tar_attr, priv_attr, clr_ratio):\n self.tar_attr = tar_attr\n self.priv_attr = priv_attr\n\n self.data = get_adult_data(tar_attr, priv_attr, clr_ratio)\n if phase not in [\"train\", \"val\", \"test\"]:\n raise NotImplementedError\n\n if phase == \"train\":\n self.X = self.data[f\"x_train\"][self.data[\"train_inds\"]]\n self.Y = self.data[f\"y_train\"][self.data[\"train_inds\"]]\n self.A = self.data[f\"attr_train\"][self.data[\"train_inds\"]]\n elif phase == \"val\":\n self.X = self.data[f\"x_train\"][self.data[\"valid_inds\"]]\n self.Y = self.data[f\"y_train\"][self.data[\"valid_inds\"]]\n self.A = self.data[f\"attr_train\"][self.data[\"valid_inds\"]]\n elif phase == \"test\":\n self.X = self.data[f\"x_test\"]\n self.Y = self.data[f\"y_test\"]\n self.A = self.data[f\"attr_test\"]\n else:\n raise Exception(\"Wrong phase\")\n\n self.input_shape = self.X.shape\n self.num_samples = self.input_shape[0]\n self.xdim = self.X.shape[1]\n self.ydim = 1\n self.adim = 1\n\n def __len__(self):\n return len(self.X)\n\n def __getitem__(self, idx):\n if self.ydim == 1 and len(self.Y.shape) == 2: # binary classification\n return (\n torch.from_numpy(self.X[idx]).float(),\n torch.from_numpy(self.Y[idx]),\n torch.from_numpy(self.A[idx]),\n )\n raise NotImplementedError\n\n def onehot_2_int(self, ts):\n if len(ts.shape) == 2:\n return torch.argmax(ts, dim=1)\n if len(ts.shape) == 1:\n return torch.argmax(ts, dim=0)\n raise NotImplementedError\n\n def get_A_proportions(self):\n \"\"\"for catergorical attribute\"\"\"\n assert len(self.A.shape) == 2\n num_class = self.A.shape[1]\n\n A_label = np.argmax(self.A, axis=1)\n A_proportions = []\n for cls_idx in range(num_class):\n A_proportion = np.sum(cls_idx == A_label)\n A_proportions.append(A_proportion)\n A_proportions = [a_prop * 1.0 / len(A_label) for a_prop in A_proportions]\n return A_proportions\n\n def get_Y_proportions(self):\n \"\"\"for catergorical attribute\"\"\"\n assert len(self.Y.shape) == 2\n num_class = self.Y.shape[1]\n\n Y_label = np.argmax(self.Y, axis=1)\n Y_proportions = []\n for cls_idx in range(num_class):\n Y_proportion = np.sum(cls_idx == Y_label)\n Y_proportions.append(Y_proportion)\n Y_proportions = [y_prop * 1.0 / len(Y_label) for y_prop in Y_proportions]\n return Y_proportions\n\n def get_AY_proportions(self):\n \"\"\"for catergorical attributes\"\"\"\n assert len(self.Y.shape) == len(self.A.shape) == 2\n A_num_class = self.A.shape[1]\n Y_num_class = self.Y.shape[1]\n A_label = np.argmax(self.A, axis=1)\n Y_label = np.argmax(self.Y, axis=1)\n AY_proportions = []\n for A_cls_idx in range(A_num_class):\n Y_proportions = []\n for Y_cls_idx in range(Y_num_class):\n AY_proprtion = np.sum(\n np.logical_and(Y_cls_idx == Y_label, A_cls_idx == A_label)\n )\n Y_proportions.append(AY_proprtion)\n Y_proportions = [y_prop * 1.0 / len(Y_label) for y_prop in Y_proportions]\n AY_proportions.append(Y_proportions)\n return AY_proportions\n" ]
[ [ "torch.from_numpy", "numpy.argmax", "numpy.logical_and", "numpy.sum", "torch.argmax" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
MarsBighead/mustang
[ "ffbaf109931557e40da2d97e4eb914bc1c0aba0d" ]
[ "Python/npr.py" ]
[ "#!/usr/local/bin/python3\nimport numpy as np \n\nimport numpy.random as npr \nimport matplotlib.pyplot as plt\n\nprint (npr.rand(5,5))\na=5.\nb=10.\nprint (npr.rand(10)*(b-a)+a )\n\n\nsample_size =500 \nrn1 = npr.rand(sample_size,3) \nrn2 = npr.randint(0,10,sample_size) \nrn3 = npr.sample(size=sample_size) \na =[0, 25, 50, 75, 100] \nrn4=npr.choice(a, size=sample_size) \n\nfig, ((ax1,ax2),(ax3,ax4))= plt.subplots(\n nrows=2,\n ncols=2,\n figsize=(7,7)\n)\nax1.hist(rn1, bins=25, stacked=True)\nax1.set_title('rand')\nax1.set_ylabel('frequency')\nax1.grid(True)\n\nax2.hist(rn2, bins=25)\nax2.set_title('randint')\nax2.grid(True)\n\nax3.hist(rn3, bins=25)\nax3.set_title('sample')\nax3.set_ylabel('frequency')\nax3.grid(True)\n\nax4.hist(rn4, bins=25) \nax4.set_title('choice')\nax4.grid(True)\n\n#print (fig)\n#plt.show()\nfig.savefig(\"random-statistics.png\", bbox_inches='tight')\n\nplt.close(\"all\")\n\nsample_size =500 \nrn1 = npr.standard_normal(sample_size) \nrn2 = npr.normal(100,20,sample_size) \nrn3 = npr.chisquare(df=0.5, size=sample_size) \na =[0, 25, 50, 75, 100] \nrn4=npr.poisson(lam=1.0, size=sample_size) \n\nfig, ((ax1,ax2),(ax3,ax4))= plt.subplots(\n nrows=2,\n ncols=2,\n figsize=(7,7)\n)\nax1.hist(rn1, bins=25, stacked=True)\nax1.set_title('standard normal')\nax1.set_ylabel('frequency')\nax1.grid(True)\n\nax2.hist(rn2, bins=25)\nax2.set_title('normal(100, 20)')\nax2.grid(True)\n\nax3.hist(rn3, bins=25)\nax3.set_title('chi square')\nax3.set_ylabel('frequency')\nax3.grid(True)\n\nax4.hist(rn4, bins=25) \nax4.set_title('Poisson')\nax4.grid(True)\nfig.savefig(\"high-statistics.png\", bbox_inches='tight')\nplt.show()" ]
[ [ "numpy.random.chisquare", "numpy.random.choice", "numpy.random.standard_normal", "matplotlib.pyplot.subplots", "numpy.random.poisson", "numpy.random.normal", "numpy.random.rand", "numpy.random.sample", "matplotlib.pyplot.close", "matplotlib.pyplot.show", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mkturkcan/FC.AntennalLobe
[ "6a0e124f68c249fcb067c571b5170002b3335efc" ]
[ "feedbackcircuits/NDComponents/AntennalLobe/AlphaSpike.py" ]
[ "# pylint:disable=no-member\nimport os\nfrom collections import OrderedDict\nimport numpy as np\nimport pycuda.gpuarray as garray\nfrom pycuda.tools import dtype_to_ctype\nimport pycuda.driver as drv\nfrom pycuda.compiler import SourceModule\nfrom neurokernel.LPU.NDComponents.NDComponent import NDComponent\n\nCUDA_SRC = \"\"\"\n\n\n#define G_MIN\t\t0.0\n#define G_MAX\t\t50000.0\n\nstruct States {\n double s;\n double u;\n double g;\n};\n\nstruct Derivatives {\n double s;\n double u;\n};\n\n\n__device__ void clip(States &states)\n{\n states.g = fmax(states.g, G_MIN);\n states.g = fmin(states.g, G_MAX);\n}\n\n__device__ void forward(\n States &states,\n Derivatives &gstates,\n double dt\n)\n{\n states.s += dt * gstates.s;\n states.u += dt * gstates.u;\n}\n\n__device__ int ode(\n States &states,\n Derivatives &gstates,\n double AD,\n double AR,\n double GMAX,\n double &spike\n)\n{\n\n gstates.s = states.u;\n gstates.u = (((-(AR + AD)) * states.u) - ((AR * AD) * states.s));\n if (spike) {\n states.u = (states.u + (AR * AD));\n }\n states.g = (states.s * GMAX);\n return 0;\n}\n\n\n\n__global__ void run_step (\n int num_thread,\n double dt,\n double *g_state_s,\n double *g_state_u,\n double *g_state_g,\n double *g_param_ad,\n double *g_param_ar,\n double *g_param_gmax,\n double *g_input_spike,\n double *g_output_g\n)\n{\n /* TODO: option for 1-D or 2-D */\n int tid = blockIdx.x * blockDim.x + threadIdx.x;\n int total_threads = gridDim.x * blockDim.x;\n\n for (int nid = tid; nid < num_thread; nid += total_threads) {\n\n States states;\n Derivatives gstates;\n\n /* import data */\n states.s = g_state_s[nid];\n states.u = g_state_u[nid];\n states.g = g_state_g[nid];\n double param_AD = g_param_ad[nid];\n double param_AR = g_param_ar[nid];\n double param_GMAX = g_param_gmax[nid];\n double input_spike = g_input_spike[nid];\n\n \n \n /* compute gradient */\n ode(states, gstates, param_AD, param_AR, param_GMAX, input_spike);\n\n /* solve ode */\n forward(states, gstates, dt);\n\n /* clip */\n clip(states);\n\n \n\n /* export state (internals) data */\n g_state_s[nid] = states.s;\n g_state_u[nid] = states.u;\n g_state_g[nid] = states.g;\n\n /* export output (updates) data */\n g_output_g[nid] = states.g;\n }\n\n return;\n}\n\n\n\"\"\"\n\n\nclass AlphaSpike(NDComponent):\n \"\"\"AlphaSpike\n\n Attributes:\n accesses (list): list of input variables\n updates (list): list of output variables\n params (list): list of parameters\n params_default (dict): default values of the parameters\n internals (OrderedDict): internal variables of the model and initial value\n time_scale (float): scaling factor of the `dt`\n \"\"\"\n\n accesses = [\n \"spike\",\n ]\n updates = [\n \"g\",\n ]\n params = [\n \"ad\",\n \"ar\",\n \"gmax\",\n ]\n params_default = dict(\n ar=12.5,\n ad=12.19,\n gmax=0.1,\n )\n internals = OrderedDict(\n [\n (\"s\", 0.0),\n (\"u\", 0.0),\n (\"g\", 0.0),\n ]\n )\n time_scale = 1.0 # scales dt\n _has_rand = False\n\n def maximum_dt_allowed(self):\n return np.inf\n\n def __init__(\n self,\n params_dict,\n access_buffers,\n dt,\n LPU_id=None,\n debug=False,\n cuda_verbose=False,\n ):\n if cuda_verbose:\n self.compile_options = [\"--ptxas-options=-v\", \"--expt-relaxed-constexpr\"]\n else:\n self.compile_options = [\"--expt-relaxed-constexpr\"]\n\n self.debug = debug\n self.LPU_id = LPU_id\n self.num_comps = params_dict[self.params[0]].size\n self.dtype = params_dict[self.params[0]].dtype\n\n self.dt = dt * self.time_scale\n self.params_dict = params_dict\n self.access_buffers = access_buffers\n\n self.internal_states = {\n c: garray.zeros(self.num_comps, dtype=self.dtype) + self.internals[c]\n for c in self.internals\n }\n\n self.inputs = {\n k: garray.empty(self.num_comps, dtype=self.access_buffers[k].dtype)\n for k in self.accesses\n }\n\n # make all dtypes consistent\n dtypes = {\"dt\": self.dtype}\n dtypes.update(\n {\"state_\" + k: self.internal_states[k].dtype for k in self.internals}\n )\n dtypes.update({\"param_\" + k: self.params_dict[k].dtype for k in self.params})\n dtypes.update(\n {\"input_\" + k.format(k): self.inputs[k].dtype for k in self.accesses}\n )\n dtypes.update({\"output_\" + k: self.dtype for k in self.updates})\n self.update_func = self.get_update_func(dtypes)\n\n if self._has_rand:\n import neurokernel.LPU.utils.curand as curand\n\n self.randState = curand.curand_setup(\n self.num_comps, np.random.randint(10000)\n )\n dtypes.update({\"rand\": self.dtype})\n\n def run_step(self, update_pointers, st=None):\n for k in self.inputs:\n self.sum_in_variable(k, self.inputs[k], st=st)\n args = (\n [self.internal_states[k].gpudata for k in self.internals]\n + [self.params_dict[k].gpudata for k in self.params]\n + [self.inputs[k].gpudata for k in self.accesses]\n + [update_pointers[k] for k in self.updates]\n )\n if self._has_rand:\n args += [self.randState.gpudata]\n\n self.update_func.prepared_async_call(\n self.update_func.grid,\n self.update_func.block,\n st,\n self.num_comps,\n self.dt,\n *args\n )\n\n def get_update_func(self, dtypes):\n from pycuda.compiler import SourceModule\n\n mod = SourceModule(\n CUDA_SRC,\n options=self.compile_options,\n no_extern_c=self._has_rand,\n )\n func = mod.get_function(\"run_step\")\n type_dict = {k: dtype_to_ctype(dtypes[k]) for k in dtypes}\n\n func.prepare(\"i\" + np.dtype(self.dtype).char + \"P\" * (len(type_dict) - 1))\n func.block = (256, 1, 1)\n func.grid = (\n min(\n 6 * drv.Context.get_device().MULTIPROCESSOR_COUNT,\n (self.num_comps - 1) // 256 + 1,\n ),\n 1,\n )\n return func\n" ]
[ [ "numpy.dtype", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
lizhe960118/CenterNet
[ "d1a0d13974e2316c6d127ca7860866cdd93bcfa7", "d1a0d13974e2316c6d127ca7860866cdd93bcfa7", "d1a0d13974e2316c6d127ca7860866cdd93bcfa7", "d1a0d13974e2316c6d127ca7860866cdd93bcfa7", "d1a0d13974e2316c6d127ca7860866cdd93bcfa7", "d1a0d13974e2316c6d127ca7860866cdd93bcfa7" ]
[ "tools/test_file_dir/voc_test.py", "mmdet/models/losses/ctdet_loss.py", "mmdet/models/anchor_heads/weight_center_head.py", "mmdet/models/anchor_heads/fcos_head.py", "mmdet/models/losses/center_focal_loss.py", "mmdet/models/anchor_heads/center_head.py" ]
[ "import argparse\nimport os\nimport os.path as osp\nimport shutil\nimport tempfile\n\nimport mmcv\nimport torch\nimport torch.distributed as dist\nfrom mmcv.runner import load_checkpoint, get_dist_info\nfrom mmcv.parallel import MMDataParallel, MMDistributedDataParallel\n\nfrom mmdet.apis import init_dist\nfrom mmdet.core import results2json\n# , coco_eval, \nfrom txt_val import txt_eval\nfrom mmdet.core import wrap_fp16_model\nfrom mmdet.datasets import build_dataloader, build_dataset\nfrom mmdet.models import build_detector\nfrom mmdet import datasets\n\n\ndef single_gpu_test(model, data_loader, show=False):\n model.eval()\n results = []\n dataset = data_loader.dataset\n prog_bar = mmcv.ProgressBar(len(dataset))\n for i, data in enumerate(data_loader):\n with torch.no_grad():\n result = model(return_loss=False, rescale=not show, **data)\n results.append(result)\n\n if show:\n model.module.show_result(data, result, dataset.img_norm_cfg)\n\n# batch_size = data['img'][0].size(0)\n batch_size = 1\n for _ in range(batch_size):\n prog_bar.update()\n return results\n\n\ndef multi_gpu_test(model, data_loader, tmpdir=None):\n model.eval()\n results = []\n dataset = data_loader.dataset\n rank, world_size = get_dist_info()\n if rank == 0:\n prog_bar = mmcv.ProgressBar(len(dataset))\n for i, data in enumerate(data_loader):\n with torch.no_grad():\n result = model(return_loss=False, rescale=True, **data)\n results.append(result)\n\n if rank == 0:\n batch_size = data['img'][0].size(0)\n for _ in range(batch_size * world_size):\n prog_bar.update()\n\n # collect results from all ranks\n results = collect_results(results, len(dataset), tmpdir)\n\n return results\n\n\ndef collect_results(result_part, size, tmpdir=None):\n rank, world_size = get_dist_info()\n # create a tmp dir if it is not specified\n if tmpdir is None:\n MAX_LEN = 512\n # 32 is whitespace\n dir_tensor = torch.full((MAX_LEN, ),\n 32,\n dtype=torch.uint8,\n device='cuda')\n if rank == 0:\n tmpdir = tempfile.mkdtemp()\n tmpdir = torch.tensor(\n bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda')\n dir_tensor[:len(tmpdir)] = tmpdir\n dist.broadcast(dir_tensor, 0)\n tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip()\n else:\n mmcv.mkdir_or_exist(tmpdir)\n # dump the part result to the dir\n mmcv.dump(result_part, osp.join(tmpdir, 'part_{}.pkl'.format(rank)))\n dist.barrier()\n # collect all parts\n if rank != 0:\n return None\n else:\n # load results of all parts from tmp dir\n part_list = []\n for i in range(world_size):\n part_file = osp.join(tmpdir, 'part_{}.pkl'.format(i))\n part_list.append(mmcv.load(part_file))\n # sort the results\n ordered_results = []\n for res in zip(*part_list):\n ordered_results.extend(list(res))\n # the dataloader may pad some samples\n ordered_results = ordered_results[:size]\n # remove tmp dir\n shutil.rmtree(tmpdir)\n return ordered_results\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='MMDet test detector')\n parser.add_argument('config', help='test config file path')\n parser.add_argument('checkpoint', help='checkpoint file')\n parser.add_argument('--out', help='output result file')\n# parser.add_argument(\n# '--eval',\n# type=str,\n# nargs='+',\n# choices=['proposal', 'proposal_fast', 'bbox', 'segm', 'keypoints'],\n# help='eval types')\n parser.add_argument('--show', action='store_true', help='show results')\n parser.add_argument('--tmpdir', help='tmp dir for writing some results')\n parser.add_argument(\n '--launcher',\n choices=['none', 'pytorch', 'slurm', 'mpi'],\n default='none',\n help='job launcher')\n parser.add_argument('--local_rank', type=int, default=0)\n parser.add_argument('--iou_thr', type=float, default=0.5)\n args = parser.parse_args()\n if 'LOCAL_RANK' not in os.environ:\n os.environ['LOCAL_RANK'] = str(args.local_rank)\n return args\n\n\ndef main():\n #os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1\"\n args = parse_args()\n\n assert args.out or args.show, \\\n ('Please specify at least one operation (save or show the results) '\n 'with the argument \"--out\" or \"--show\"')\n\n if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):\n raise ValueError('The output file must be a pkl file.')\n\n cfg = mmcv.Config.fromfile(args.config)\n # set cudnn_benchmark\n if cfg.get('cudnn_benchmark', False):\n torch.backends.cudnn.benchmark = True\n cfg.model.pretrained = None\n cfg.data.test.test_mode = True\n\n # init distributed env first, since logger depends on the dist info.\n if args.launcher == 'none':\n distributed = False\n else:\n distributed = True\n init_dist(args.launcher, **cfg.dist_params)\n\n # build the dataloader\n # TODO: support multiple images per gpu (only minor changes are needed)\n dataset = build_dataset(cfg.data.test)\n data_loader = build_dataloader(\n dataset,\n imgs_per_gpu=1,\n workers_per_gpu=cfg.data.workers_per_gpu,\n dist=distributed,\n shuffle=False)\n\n # build the model and load checkpoint\n model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)\n fp16_cfg = cfg.get('fp16', None)\n if fp16_cfg is not None:\n wrap_fp16_model(model)\n checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')\n # old versions did not save class info in checkpoints, this walkaround is\n # for backward compatibility\n if 'CLASSES' in checkpoint['meta']:\n model.CLASSES = checkpoint['meta']['CLASSES']\n else:\n model.CLASSES = dataset.CLASSES\n\n if not distributed:\n model = MMDataParallel(model, device_ids=[0])\n outputs = single_gpu_test(model, data_loader, args.show)\n else:\n model = MMDistributedDataParallel(model.cuda())\n outputs = multi_gpu_test(model, data_loader, args.tmpdir)\n\n rank, _ = get_dist_info()\n if args.out and rank == 0:\n print('\\nwriting results to {}'.format(args.out))\n mmcv.dump(outputs, args.out)\n result_file = args.out\n# args = parser.parse_args()\n# cfg = mmcv.Config.fromfile(args.config)\n# test_dataset = mmcv.runner.obj_from_dict(cfg.data.test, datasets)\n# txt_eval(args.result, test_dataset, args.iou_thr)\n txt_eval(result_file, dataset, iou_thr=args.iou_thr)\n \n# eval_types = args.eval\n# if eval_types:\n# print('Starting evaluate {}'.format(' and '.join(eval_types)))\n# if eval_types == ['proposal_fast']:\n# result_file = args.out\n# coco_eval(result_file, eval_types, dataset.coco)\n# else:\n# if not isinstance(outputs[0], dict):\n# result_files = results2json(dataset, outputs, args.out)\n# coco_eval(result_files, eval_types, dataset.coco)\n# else:\n# for name in outputs[0]:\n# print('\\nEvaluating {}'.format(name))\n# outputs_ = [out[name] for out in outputs]\n# result_file = args.out + '.{}'.format(name)\n# result_files = results2json(dataset, outputs_,\n# result_file)\n# coco_eval(result_files, eval_types, dataset.coco)\n\n\nif __name__ == '__main__':\n main()\n", "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nimport math\n\nfrom ..registry import LOSSES\n\n# def gaussian_radius(det_size, min_overlap=0.7):\n# height, width = det_size\n\n# a1 = 1\n# b1 = (height + width)\n# c1 = width * height * (1 - min_overlap) / (1 + min_overlap)\n# sq1 = np.sqrt(b1 ** 2 - 4 * a1 * c1)\n# r1 = (b1 + sq1) / 2\n\n# a2 = 4\n# b2 = 2 * (height + width)\n# c2 = (1 - min_overlap) * width * height\n# sq2 = np.sqrt(b2 ** 2 - 4 * a2 * c2)\n# r2 = (b2 + sq2) / 2\n\n# a3 = 4 * min_overlap\n# b3 = -2 * min_overlap * (height + width)\n# c3 = (min_overlap - 1) * width * height\n# sq3 = np.sqrt(b3 ** 2 - 4 * a3 * c3)\n# r3 = (b3 + sq3) / 2\n# return min(r1, r2, r3)\n\n# def gaussian2D(shape, sigma=1):\n# m, n = [(ss - 1.) / 2. for ss in shape]\n# y, x = np.ogrid[-m:m+1,-n:n+1]\n\n# h = np.exp(-(x * x + y * y) / (2 * sigma * sigma))\n# h[h < np.finfo(h.dtype).eps * h.max()] = 0\n# return h\n\n# def draw_umich_gaussian(heatmap, center, radius, k=1):\n# diameter = 2 * radius + 1\n# gaussian = gaussian2D((diameter, diameter), sigma=diameter / 6)\n\n# x, y = int(center[0]), int(center[1])\n\n# height, width = heatmap.shape[0:2]\n\n# left, right = min(x, radius), min(width - x, radius + 1)\n# top, bottom = min(y, radius), min(height - y, radius + 1)\n\n# masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right]\n# masked_gaussian = gaussian[radius - top:radius + bottom, radius - left:radius + right]\n# if min(masked_gaussian.shape) > 0 and min(masked_heatmap.shape) > 0: # TODO debug\n# np.maximum(masked_heatmap, masked_gaussian * k, out=masked_heatmap)\n# return heatmap\n\ndef _neg_loss(pred, gt):\n ''' Modified focal loss. Exactly the same as CornerNet.\n Runs faster and costs a little bit more memory\n Arguments:\n pred (batch x c x h x w)\n gt_regr (batch x c x h x w)\n '''\n pos_inds = gt.eq(1).float()\n neg_inds = gt.lt(1).float()\n \n# print(pred) # 几乎全部是0\n neg_weights = torch.pow(1 - gt, 4)\n\n loss = 0\n\n pos_loss = torch.log(pred) * torch.pow(1 - pred, 2) * pos_inds\n neg_loss = torch.log(1 - pred) * torch.pow(pred, 2) * neg_weights * neg_inds\n\n num_pos = pos_inds.float().sum()\n pos_loss = pos_loss.sum()\n neg_loss = neg_loss.sum()\n \n# print(\"num_pos:\", num_pos)\n# print(\"pos_loss:\", pos_loss)\n# print(\"neg_loss:\", neg_loss)\n if num_pos == 0:\n loss = loss - neg_loss\n else:\n loss = loss - (pos_loss + neg_loss) / num_pos\n return loss\n\nclass FocalLoss(nn.Module):\n '''nn.Module warpper for focal loss'''\n def __init__(self):\n super(FocalLoss, self).__init__()\n self.neg_loss = _neg_loss\n\n def forward(self, out, target):\n return self.neg_loss(out, target)\n\ndef _gather_feat(feat, ind, mask=None):\n dim = feat.size(2)\n ind = ind.unsqueeze(2).expand(ind.size(0), ind.size(1), dim)\n feat = feat.gather(1, ind)\n if mask is not None:\n mask = mask.unsqueeze(2).expand_as(feat)\n feat = feat[mask]\n feat = feat.view(-1, dim)\n return feat\n\ndef _tranpose_and_gather_feat(feat, ind):\n feat = feat.permute(0, 2, 3, 1).contiguous()\n feat = feat.view(feat.size(0), -1, feat.size(3))\n feat = _gather_feat(feat, ind)\n return feat\n\nclass RegL1Loss(nn.Module):\n def __init__(self):\n super(RegL1Loss, self).__init__()\n\n def forward(self, output, mask, ind, target):\n pred = _tranpose_and_gather_feat(output, ind)\n mask = mask.unsqueeze(2).expand_as(pred).float()\n # print(target)\n # import pdb; pdb.set_trace()\n # loss = F.l1_loss(pred * mask, target * mask, reduction='elementwise_mean')\n # loss = F.l1_loss(pred * mask, target * mask, size_average=False)\n loss = F.l1_loss(pred * mask, target * mask, reduction='sum')\n loss = loss / (mask.sum() + 1e-4)\n return loss\n\[email protected]_module\nclass CtdetLoss(torch.nn.Module):\n def __init__(self):\n super(CtdetLoss, self).__init__()\n # self.crit = torch.nn.MSELoss() if opt.mse_loss else FocalLoss()\n # self.crit_reg = RegL1Loss() if opt.reg_loss == 'l1' else \\\n # RegLoss() if opt.reg_loss == 'sl1' else None\n # self.crit_wh = torch.nn.L1Loss(reduction='sum') if opt.dense_wh else \\\n # NormRegL1Loss() if opt.norm_wh else \\\n # RegWeightedL1Loss() if opt.cat_spec_wh else self.crit_reg\n self.crit = FocalLoss()\n self.crit_reg = RegL1Loss()\n # self.crit_wh = self.crit_reg\n # self.opt = opt\n # opts\n self.num_stacks = 1\n self.wh_weight = 0.1\n self.off_weight = 1\n self.hm_weight = 1\n\n def forward(self, outputs, **kwargs):\n batch = kwargs\n hm_loss, wh_loss, off_loss = 0, 0, 0\n for s in range(self.num_stacks):\n output = outputs[s]\n # for key, value in output.items():\n # print(key, value.shape)\n # if not opt.mse_loss:\n output['hm'] = torch.clamp(output['hm'].sigmoid_(), min=1e-4, max=1-1e-4)\n# output['hm'] = output['hm'].sigmoid_()\n # if opt.eval_oracle_hm:\n # output['hm'] = batch['hm']\n # if opt.eval_oracle_wh:\n # output['wh'] = torch.from_numpy(gen_oracle_map(\n # batch['wh'].detach().cpu().numpy(),\n # batch['ind'].detach().cpu().numpy(),\n # output['wh'].shape[3], output['wh'].shape[2])).to(opt.device)\n # if opt.eval_oracle_offset:\n # output['reg'] = torch.from_numpy(gen_oracle_map(\n # batch['reg'].detach().cpu().numpy(),\n # batch['ind'].detach().cpu().numpy(),\n # output['reg'].shape[3], output['reg'].shape[2])).to(opt.device)\n\n hm_loss += self.crit(output['hm'], batch['hm']) / self.num_stacks\n if self.wh_weight > 0:\n wh_loss += self.crit_reg(\n output['wh'], batch['reg_mask'],\n batch['ind'], batch['wh']) / self.num_stacks\n\n if self.off_weight > 0:\n off_loss += self.crit_reg(output['reg'], batch['reg_mask'],\n batch['ind'], batch['reg']) / self.num_stacks\n\n # loss = self.hm_weight * hm_loss + self.wh_weight * wh_loss + \\\n # self.off_weight * off_loss\n losses = {'hm_loss': self.hm_weight * hm_loss,\n 'wh_loss': self.wh_weight * wh_loss, 'off_loss': self.off_weight * off_loss}\n # loss_stats = {'loss': loss, 'hm_loss': hm_loss,\n # 'wh_loss': wh_loss, 'off_loss': off_loss}\n # return loss, loss_stats\n return losses", "import torch\nimport torch.nn as nn\nfrom mmcv.cnn import normal_init\nimport numpy as np\nimport cv2\nimport math\n#import torch.nn.functional as F\n\nfrom mmdet.core import multi_apply, multiclass_nms, distance2bbox, force_fp32\nfrom ..builder import build_loss\nfrom ..registry import HEADS\nfrom ..utils import bias_init_with_prob, Scale, ConvModule\n\nINF = 1e8\n\n\[email protected]_module\nclass WeightCenterHead(nn.Module):\n\n def __init__(self,\n num_classes, # init 80\n in_channels,\n feat_channels=256,\n stacked_convs=1,\n strides=(4, 8, 16, 32, 64),\n regress_ranges=((-1, 64), (64, 128), (128, 256), (256, 512),\n (512, INF)),\n use_cross = False,\n loss_hm = dict(\n type=\"CenterFocalLoss\"\n ), # 这里实现 CenterFocalLoss\n loss_wh = dict(\n type=\"L1Loss\",\n loss_weight=0.1\n ),\n loss_offset = dict(\n type=\"L1Loss\",\n loss_weight=1.0\n ),\n conv_cfg=None,\n norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)):\n super(WeightCenterHead, self).__init__()\n\n self.num_classes = num_classes\n # self.cls_out_channels = num_classes - 1\n self.cls_out_channels = num_classes\n self.in_channels = in_channels\n self.feat_channels = feat_channels\n self.stacked_convs = stacked_convs\n self.strides = strides\n self.regress_ranges = regress_ranges\n self.featmap_sizes = None\n self.loss_hm = build_loss(loss_hm)\n self.loss_wh = build_loss(loss_wh)\n self.loss_offset = build_loss(loss_offset)\n self.conv_cfg = conv_cfg\n self.norm_cfg = norm_cfg\n self.fp16_enabled = False\n self.use_cross = use_cross\n\n self._init_layers()\n\n def _init_layers(self):\n self.cls_convs = nn.ModuleList()\n self.wh_convs = nn.ModuleList()\n self.offset_convs = nn.ModuleList()\n for i in range(self.stacked_convs):\n chn = self.in_channels if i == 0 else self.feat_channels\n self.cls_convs.append(\n ConvModule(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg,\n bias=self.norm_cfg is None))\n self.wh_convs.append(\n ConvModule(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg,\n bias=self.norm_cfg is None))\n self.offset_convs.append(\n ConvModule(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg,\n bias=self.norm_cfg is None))\n self.center_hm = nn.Conv2d(self.feat_channels, self.cls_out_channels, 3, padding=1, bias=True)\n self.center_wh = nn.Conv2d(self.feat_channels, 2, 3, padding=1, bias=True)\n self.center_offset = nn.Conv2d(self.feat_channels, 2, 3, padding=1, bias=True)\n self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides])\n\n def init_weights(self):\n# for m in self.cls_convs:\n# normal_init(m.conv, std=0.01)\n# for m in self.wh_convs:\n# normal_init(m.conv, std=0.01)\n# for m in self.offset_convs:\n# normal_init(m.conv, std=0.01)\n \n #bias_hm = bias_init_with_prob(0.01) # 这里的初始化?\n #normal_init(self.center_hm, std=0.01, bias=bias_hm)\n self.center_hm.bias.data.fill_(-2.19)\n nn.init.constant_(self.center_wh.bias, 0)\n nn.init.constant_(self.center_offset.bias, 0)\n# normal_init(self.center_hm, std=0.01)\n# normal_init(self.center_wh, std=0.01)\n# normal_init(self.center_offset, std=0.01)\n\n def forward(self, feats):\n return multi_apply(self.forward_single, feats, self.scales)\n\n def forward_single(self, x, scale):\n cls_feat = x\n wh_feat = x\n offset_feat = x\n\n for cls_layer in self.cls_convs:\n cls_feat = cls_layer(cls_feat)\n cls_score = self.center_hm(cls_feat)\n\n for wh_layer in self.wh_convs:\n wh_feat = wh_layer(wh_feat)\n wh_pred = self.center_wh(wh_feat)\n \n for offset_layer in self.offset_convs:\n offset_feat = offset_layer(offset_feat)\n offset_pred = self.center_offset(offset_feat)\n \n return cls_score, wh_pred, offset_pred\n\n @force_fp32(apply_to=('cls_scores', 'wh_preds', 'offset_preds'))\n def loss(self,\n cls_scores,\n wh_preds,\n offset_preds,\n gt_bboxes,\n gt_labels,\n img_metas,\n cfg,\n gt_bboxes_ignore=None):\n\n assert len(cls_scores) == len(wh_preds) == len(offset_preds)\n featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]\n self.featmap_sizes = featmap_sizes\n \n all_level_points = self.get_points(featmap_sizes, offset_preds[0].dtype,\n offset_preds[0].device)\n #print(img_metas)\n #self.c = img_metas['c']\n #self.s = img_metas['s']\n self.tensor_dtype = offset_preds[0].dtype\n self.tensor_device = offset_preds[0].device\n heatmaps, wh_targets, offset_targets = self.center_target(gt_bboxes, gt_labels, img_metas, all_level_points) # 所有层的concat的, 每张图对应一个\n\n num_imgs = cls_scores[0].size(0) # batch_size\n #print(num_imgs)\n # flatten cls_scores, bbox_preds and centerness\n flatten_cls_scores = [\n cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels)\n for cls_score in cls_scores\n ] # cls_scores(num_levels, batch_size, 80, h, w) => (num_levels, batch_size * w * h, 80)\n flatten_wh_preds = [\n wh_pred.permute(0, 2, 3, 1).reshape(-1, 2) # batchsize, h, w, 2 => batchsize, h, w, 2\n for wh_pred in wh_preds\n ]\n flatten_offset_preds = [\n offset_pred.permute(0, 2, 3, 1).reshape(-1, 2)\n for offset_pred in offset_preds\n ]\n \n flatten_cls_scores = torch.cat(flatten_cls_scores)\n flatten_wh_preds = torch.cat(flatten_wh_preds)\n flatten_offset_preds = torch.cat(flatten_offset_preds)\n \n # targets\n flatten_heatmaps = torch.cat(heatmaps)\n flatten_wh_targets = torch.cat(wh_targets) # torch.Size([all_level_points, 2])\n flatten_offset_targets = torch.cat(offset_targets)\n\n # repeat points to align with bbox_preds\n # flatten_points = torch.cat(\n # [points.repeat(num_imgs, 1) for points in all_level_points])\n\n # pos_inds = flatten_labels.nonzero().reshape(-1)\n #print(flatten_wh_targets.shape)\n #print(flatten_wh_targets.nonzero())\n center_inds = flatten_wh_targets[...,0].nonzero().reshape(-1) \n #print(center_inds)\n num_center = len(center_inds)\n #print(num_center)\n \n # what about use the centerness * labels to indict an object\n # loss_cls = self.loss_cls(\n # flatten_cls_scores, flatten_labels, # labels gt is small area\n # avg_factor=num_pos + num_imgs) # avoid num_pos is 0\n flatten_cls_scores = torch.clamp(flatten_cls_scores.sigmoid_(), min=1e-4, max=1-1e-4)\n loss_hm = self.loss_hm(flatten_cls_scores, flatten_heatmaps)\n \n pos_wh_targets = flatten_wh_targets[center_inds]\n #print(pos_wh_targets.shape)\n pos_wh_preds = flatten_wh_preds[center_inds]\n \n pos_offset_preds = flatten_offset_preds[center_inds]\n pos_offset_targets = flatten_offset_targets[center_inds]\n \n if num_center > 0:\n # TODO: use the iou loss\n # center_points = flatten_points[center_inds]\n # center_decoded_bbox_preds = wh_offset2bbox(center_points, pos_wh_preds, pos_offset_preds)\n # center_decoded_bbox_targets = wh_offset2bbox(center_points, pos_wh_targets, pos_offset_targets)\n loss_wh = self.loss_wh(pos_wh_preds, pos_wh_targets, avg_factor=num_center + num_imgs)\n #loss_wh = F.l1_loss(pos_wh_preds, pos_wh_targets, reduction='sum') / (num_center + num_imgs)\n #loss_wh = 0.1 * loss_wh\n loss_offset = self.loss_offset(pos_offset_preds, pos_offset_targets, avg_factor=num_center + num_imgs)\n else:\n loss_wh = pos_wh_preds.sum()\n loss_offset = pos_offset_preds.sum()\n \n return dict(\n loss_hm = loss_hm,\n loss_wh = loss_wh,\n loss_offset = loss_offset)\n\n def get_points(self, featmap_sizes, dtype, device):\n \"\"\"Get points according to feature map sizes.\n\n Args:\n featmap_sizes (list[tuple]): Multi-level feature map sizes.\n dtype (torch.dtype): Type of points.\n device (torch.device): Device of points.\n\n Returns:\n tuple: points of each image.\n \"\"\"\n mlvl_points = []\n for i in range(len(featmap_sizes)):\n mlvl_points.append(\n self.get_points_single(featmap_sizes[i], self.strides[i],\n dtype, device))\n return mlvl_points\n\n def get_points_single(self, featmap_size, stride, dtype, device):\n h, w = featmap_size\n x_range = torch.arange(\n 0, w * stride, stride, dtype=dtype, device=device) # 以一定间隔取x的值\n y_range = torch.arange(\n 0, h * stride, stride, dtype=dtype, device=device)\n y, x = torch.meshgrid(y_range, x_range) # 得到featmap的所有点\n points = torch.stack(\n (x.reshape(-1), y.reshape(-1)), dim=-1) + stride // 2\n return points\n\n def center_target(self, gt_bboxes_list, gt_labels_list, img_metas, all_level_points):\n\n assert len(self.featmap_sizes) == len(self.regress_ranges)\n\n # get heatmaps and targets of each image\n # heatmaps in heatmaps_list: [num_points, 80]\n # wh_targets: [num_points, 2] => [batch_size, num_points, 2]\n heatmaps_list, wh_targets_list, offset_targets_list = multi_apply(\n self.center_target_single,\n gt_bboxes_list,\n gt_labels_list,\n img_metas\n )\n\n # split to per img, per level\n num_points = [center.size(0) for center in all_level_points] # 每一层多少个点 all_level_points [[12414, 2], []]\n \n heatmaps_list = [heatmaps.split(num_points, 0) for heatmaps in heatmaps_list]\n wh_targets_list = [wh_targets.split(num_points, 0) for wh_targets in wh_targets_list]\n offset_targets_list = [offset_targets.split(num_points, 0) for offset_targets in offset_targets_list]\n\n # concat per level image, 同一层的concat # [(batch_size,featmap_size[1]), ...)\n concat_lvl_heatmaps = []\n concat_lvl_wh_targets = []\n concat_lvl_offset_targets = []\n num_levels = len(self.featmap_sizes)\n for i in range(num_levels):\n concat_lvl_heatmaps.append(\n torch.cat([heatmaps[i] for heatmaps in heatmaps_list])) # (num_levels, batch_size * w * h, 80)\n concat_lvl_wh_targets.append(\n torch.cat(\n [wh_targets[i] for wh_targets in wh_targets_list]))\n concat_lvl_offset_targets.append(\n torch.cat(\n [offset_targets[i] for offset_targets in offset_targets_list]))\n return concat_lvl_heatmaps, concat_lvl_wh_targets, concat_lvl_offset_targets\n\n\n \n def center_target_single(self, gt_bboxes, gt_labels, img_meta):\n \"\"\"\n single image\n gt_bboxes:torch.Size([6, 4])\n gt_labels:torch.Size([6]) tensor([34, 34, 34, 34, 34, 34], device='cuda:0')\n featmap_sizes:(list[tuple]): Multi-level feature map sizes.\n regress_ranges=((-1, 64), (64, 128), (128, 256), (256, 512),(512, INF))\n \"\"\"\n # transform the gt_bboxes, gt_labels to numpy\n gt_bboxes = gt_bboxes.data.cpu().numpy()\n gt_labels = gt_labels.data.cpu().numpy()\n \n #print(gt_bboxes, gt_labels)\n num_objs = gt_labels.shape[0]\n #print(num_objs)\n # heatmaps [level1, level2, level3, level4, level5]\n num_levels = len(self.featmap_sizes)\n\n heatmaps_targets = []\n wh_targets = []\n offset_targets = []\n # get the target shape for each image\n for i in range(num_levels):\n h, w = self.featmap_sizes[i]\n hm = np.zeros((self.cls_out_channels, h, w), dtype=np.float32)\n heatmaps_targets.append(hm)\n wh = np.zeros((h, w, 2), dtype=np.float32)\n wh_targets.append(wh)\n offset = np.zeros((h, w, 2), dtype=np.float32)\n offset_targets.append(offset)\n\n for k in range(num_objs):\n bbox = gt_bboxes[k]\n cls_id = gt_labels[k]\n \n if img_meta['flipped']:\n bbox[[0, 2]] = img_meta['width'] - bbox[[2, 0]] - 1\n \n # condition: in the regress_ranges\n origin_h, origin_w = bbox[3] - bbox[1], bbox[2] - bbox[0]\n #max_h_w = max(h, w) / 2\n max_h_w = max(origin_h, origin_w)\n #max_h_w = max(origin_h, origin_w) * 2 # 最长边为32在P2\n # 根据max_h_w在哪一层将output设置为当前层的\n index_levels = []\n #index_level = 0\n for i in range(num_levels):\n min_regress_distance, max_regress_distance = self.regress_ranges[i]\n if not self.use_cross and (max_h_w > min_regress_distance) and (max_h_w <= max_regress_distance):\n index_levels.append(i)\n break\n \n if self.use_cross:\n min_regress_distance = min_regress_distance * 0.8\n max_regress_distance = max_regress_distance * 1.3\n if (max_h_w > min_regress_distance) and (max_h_w <= max_regress_distance):\n index_levels.append(i)\n \n for index_level in index_levels:\n output_h, output_w = self.featmap_sizes[index_level]\n #print(output_h, output_w)\n hm = heatmaps_targets[index_level]\n wh = wh_targets[index_level]\n offset = offset_targets[index_level]\n \n # c, s is passed by meta\n trans_output = get_affine_transform(img_meta['c'], img_meta['s'], 0, [output_w, output_h])\n bbox[:2] = affine_transform(bbox[:2], trans_output)\n bbox[2:] = affine_transform(bbox[2:], trans_output)\n bbox[[0, 2]] = np.clip(bbox[[0, 2]], 0, output_w - 1) #x1, x2\n bbox[[1, 3]] = np.clip(bbox[[1, 3]], 0, output_h - 1)\n h, w = bbox[3] - bbox[1], bbox[2] - bbox[0]\n #print(h, w)\n # 转换到当层\n if h > 0 and w > 0:\n radius = gaussian_radius((math.ceil(h), math.ceil(w)))\n radius = max(0, int(radius))\n ct = np.array(\n [(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2], dtype=np.float32)\n #print(ct)\n ct_int = ct.astype(np.int32)\n #hm[cls_id, ct_int[1], ct_int[0]] = 1\n\n #if (ct_int[1] - 1) > 0:\n # hm[cls_id, ct_int[1] - 1, ct_int[0]] = 0.5\n #if (ct_int[0] - 1) > 0:\n # hm[cls_id, ct_int[1], ct_int[0] - 1] = 0.5\n #if (ct_int[1] + 1) < output_h:\n # hm[cls_id, ct_int[1] + 1, ct_int[0]] = 0.5\n #if (ct_int[0] + 1) < output_w:\n # hm[cls_id, ct_int[1], ct_int[0] + 1] = 0.5\n draw_umich_gaussian(hm[cls_id], ct_int, radius)\n\n h, w = 1. * h, 1. * w\n offset_count = ct - ct_int # h, w\n # ct_int即表明在featmap的位置 ct_int[1] * output_w + ct_int[0] \n # TODO:如果当前位置有物体的中心,现在是直接覆盖\n # 这里设置监督信号,第1位表示w,第2位表示h\n # 这里对featmap进行缩放?\n # wh[ct_int[1], ct_int[0], 0] = w / output_w# output_h, output_w <= y, x\n # wh[ct_int[1], ct_int[0], 1] = h / output_h\n # offset[ct_int[1], ct_int[0], 0] = offset_count[0] / output_w\n # offset[ct_int[1], ct_int[0], 0] = offset_count[1] / output_h\n wh[ct_int[1], ct_int[0], 0] = w * (2 ** index_level) # baseline is P2\n wh[ct_int[1], ct_int[0], 1] = h * (2 ** index_level)\n offset[ct_int[1], ct_int[0], 0] = offset_count[0] * (2 ** index_level) \n offset[ct_int[1], ct_int[0], 0] = offset_count[1] * (2 ** index_level)\n \n \n heatmaps_targets[index_level] = hm\n wh_targets[index_level] = wh\n offset_targets[index_level] = offset\n\n flatten_heatmaps_targets = [\n hm.transpose(1, 2, 0).reshape(-1, self.cls_out_channels)\n for hm in heatmaps_targets\n ]\n #for i in range(len(flatten_heatmaps_targets)):\n # print(flatten_heatmaps_targets[i].shape)\n \n heatmaps_targets = np.concatenate(flatten_heatmaps_targets, axis=0) \n #print(heatmaps_targets.shape) # (13343, 80)\n #print(heatmaps_targets)\n \n flatten_wh_targets = [\n wh.reshape(-1, 2) for wh in wh_targets\n ]\n wh_targets = np.concatenate(flatten_wh_targets)\n \n flatten_offset_targets = [\n offset.reshape(-1, 2) for offset in offset_targets\n ]\n offset_targets = np.concatenate(flatten_offset_targets)\n\n # transform the heatmaps_targets, wh_targets, offset_targets into tensor\n heatmaps_targets = torch.from_numpy(np.stack(heatmaps_targets))\n heatmaps_targets = torch.tensor(heatmaps_targets.detach(), dtype=self.tensor_dtype, device=self.tensor_device)\n wh_targets = torch.from_numpy(np.stack(wh_targets))\n wh_targets = torch.tensor(wh_targets.detach(), dtype=self.tensor_dtype, device=self.tensor_device)\n offset_targets = torch.from_numpy(np.stack(offset_targets))\n offset_targets = torch.tensor(offset_targets.detach(), dtype=self.tensor_dtype, device=self.tensor_device)\n \n return heatmaps_targets, wh_targets, offset_targets\n\n # test use\n @force_fp32(apply_to=('cls_scores', 'wh_preds', 'offset_preds'))\n def get_bboxes(self,\n cls_scores,\n wh_preds,\n offset_preds,\n img_metas,\n cfg):\n assert len(cls_scores) == len(wh_preds) == len(offset_preds)\n # cls_scores => [num_levels] => [batch featmap] => [batch, 80, h, w]\n # wh_preds => [num_levels] => [featmap] => [2, h, w]\n # offset_preds => [num_levels] => [featmap] => [2, h, w]\n num_levels = len(cls_scores)\n\n featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]\n\n result_list = []\n #print(cls_scores[0].shape) # torch.Size([1, 80, 84, 56])\n #print(img_metas)\n\n for img_id in range(len(img_metas)): # 每个batch中id\n cls_score_list = [\n cls_scores[i][img_id].detach() for i in range(num_levels)\n ] # =>[num_levels] => [80, h, w]\n wh_pred_list = [\n wh_preds[i][img_id].detach() for i in range(num_levels)\n ]\n offset_pred_list = [\n offset_preds[i][img_id].detach() for i in range(num_levels)\n ]\n #img_shape = img_metas[img_id]['img_shape']\n scale_factor = img_metas[img_id]['scale_factor']\n c = img_metas[img_id]['c']\n s = img_metas[img_id]['s']\n det_bboxes = self.get_bboxes_single(cls_score_list, wh_pred_list,\n offset_pred_list,\n featmap_sizes, c, s,\n scale_factor, cfg) # 对每一张图像进行解调\n result_list.append(det_bboxes)\n return result_list # [batch_size]\n\n def get_bboxes_single(self,\n cls_scores,\n wh_preds,\n offset_preds,\n featmap_sizes,\n c, \n s,\n scale_factor,\n cfg):\n assert len(cls_scores) == len(wh_preds) == len(offset_preds) == len(featmap_sizes)\n \n detections = []\n for cls_score, wh_pred, offset_pred, featmap_size in zip(\n cls_scores, wh_preds, offset_preds, featmap_sizes): # 取出每一层的点\n assert cls_score.size()[-2:] == wh_pred.size()[-2:] == offset_pred.size()[-2:] == featmap_size\n \n output_h, output_w = featmap_size\n index_level = int((512 / 4) / output_h) - 1\n \n wh_pred = wh_pred / (2 ** index_level)\n offset_pred = offset_pred / (2 ** index_level)\n \n #实际上得到了每一层的hm, wh, offset\n hm = torch.clamp(cls_score.sigmoid_(), min=1e-4, max=1-1e-4).unsqueeze(0) # 增加一个纬度\n #wh_pred[0, :, :] = wh_pred[0, :, :] * output_w\n #wh_pred[1, :, :] = wh_pred[1, :, :] * output_h # 2, output_h, output_w\n wh = wh_pred.unsqueeze(0) # 这里需要乘以featuremap的尺度\n #offset_pred[0, : ,:] = offset_pred[0, : ,:] * output_w\n #offset_pred[1, : ,:] = offset_pred[1, : ,:] * output_h\n reg = offset_pred.unsqueeze(0)\n \n dets = ctdet_decode(hm, wh, reg=reg, K=40)\n dets = post_process(dets, c, s, output_h, output_w, scale=scale_factor, num_classes=self.num_classes)\n detections.append(dets)\n \n results = merge_outputs(detections, self.num_classes) # 单张图的结果\n\n return results\n\n#num_classes = 80\n\ndef gaussian_radius(det_size, min_overlap=0.7):\n height, width = det_size\n \n a1 = 1\n b1 = (height + width)\n c1 = width * height * (1 - min_overlap) / (1 + min_overlap)\n sq1 = np.sqrt(b1 ** 2 - 4 * a1 * c1)\n r1 = (b1 + sq1) / 2\n \n a2 = 4\n b2 = 2 * (height + width)\n c2 = (1 - min_overlap) * width * height\n sq2 = np.sqrt(b2 ** 2 - 4 * a2 * c2)\n r2 = (b2 + sq2) / 2\n \n a3 = 4 * min_overlap\n b3 = -2 * min_overlap * (height + width)\n c3 = (min_overlap - 1) * width * height\n sq3 = np.sqrt(b3 ** 2 - 4 * a3 * c3)\n r3 = (b3 + sq3) / 2\n return min(r1, r2, r3)\n\ndef gaussian_small_radius(det_size, min_overlap=0.7):\n height, width = det_size\n \n a1 = 1\n b1 = (height + width)\n c1 = width * height * (1 - min_overlap) / (1 + min_overlap)\n sq1 = np.sqrt(b1 ** 2 - 4 * a1 * c1)\n r1 = (b1 - sq1) / (2 * a1)\n \n a2 = 4\n b2 = 2 * (height + width)\n c2 = (1 - min_overlap) * width * height\n sq2 = np.sqrt(b2 ** 2 - 4 * a2 * c2)\n r2 = (b2 - sq2) / (2 * a2)\n \n a3 = 4 * min_overlap\n b3 = -2 * min_overlap * (height + width)\n c3 = (min_overlap - 1) * width * height\n sq3 = np.sqrt(b3 ** 2 - 4 * a3 * c3)\n r3 = (b3 + sq3) / (2 * a3)\n return min(r1, r2, r3)\n\ndef gaussian2D(shape, sigma=1):\n m, n = [(ss - 1.) / 2. for ss in shape]\n y, x = np.ogrid[-m:m+1,-n:n+1]\n\n h = np.exp(-(x * x + y * y) / (2 * sigma * sigma))\n h[h < np.finfo(h.dtype).eps * h.max()] = 0\n return h\n\ndef draw_umich_gaussian(heatmap, center, radius, k=1):\n diameter = 2 * radius + 1\n gaussian = gaussian2D((diameter, diameter), sigma=diameter / 6)\n \n x, y = int(center[0]), int(center[1])\n \n height, width = heatmap.shape[0:2]\n \n left, right = min(x, radius), min(width - x, radius + 1)\n top, bottom = min(y, radius), min(height - y, radius + 1)\n \n masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right]\n masked_gaussian = gaussian[radius - top:radius + bottom, radius - left:radius + right]\n if min(masked_gaussian.shape) > 0 and min(masked_heatmap.shape) > 0: # TODO debug\n np.maximum(masked_heatmap, masked_gaussian * k, out=masked_heatmap)\n return heatmap\n\ndef affine_transform(pt, t):\n new_pt = np.array([pt[0], pt[1], 1.], dtype=np.float32).T\n new_pt = np.dot(t, new_pt)\n return new_pt[:2]\n\ndef get_affine_transform(center,\n scale,\n rot,\n output_size,\n shift=np.array([0, 0], dtype=np.float32),\n inv=0):\n if not isinstance(scale, np.ndarray) and not isinstance(scale, list):\n scale = np.array([scale, scale], dtype=np.float32)\n\n scale_tmp = scale\n src_w = scale_tmp[0]\n dst_w = output_size[0]\n dst_h = output_size[1]\n\n rot_rad = np.pi * rot / 180\n src_dir = get_dir([0, src_w * -0.5], rot_rad)\n dst_dir = np.array([0, dst_w * -0.5], np.float32)\n\n src = np.zeros((3, 2), dtype=np.float32)\n dst = np.zeros((3, 2), dtype=np.float32)\n src[0, :] = center + scale_tmp * shift\n src[1, :] = center + src_dir + scale_tmp * shift\n dst[0, :] = [dst_w * 0.5, dst_h * 0.5]\n dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5], np.float32) + dst_dir\n\n src[2:, :] = get_3rd_point(src[0, :], src[1, :])\n dst[2:, :] = get_3rd_point(dst[0, :], dst[1, :])\n\n if inv:\n trans = cv2.getAffineTransform(np.float32(dst), np.float32(src))\n else:\n trans = cv2.getAffineTransform(np.float32(src), np.float32(dst))\n\n return trans\n\ndef get_dir(src_point, rot_rad):\n sn, cs = np.sin(rot_rad), np.cos(rot_rad)\n\n src_result = [0, 0]\n src_result[0] = src_point[0] * cs - src_point[1] * sn\n src_result[1] = src_point[0] * sn + src_point[1] * cs\n\n return src_result\n\ndef get_3rd_point(a, b):\n direct = a - b\n return b + np.array([-direct[1], direct[0]], dtype=np.float32)\n\ndef ctdet_decode(heat, wh, reg=None, cat_spec_wh=False, K=40):\n batch, cat, height, width = heat.size() # 1, 80, 128, 128\n \n #print(\"batch, cat, height, width\\n\", batch, cat, height, width)\n \n if height * width <= K:\n K = height * width \n #print(\"k:\", K)\n \n heat = _nms(heat)\n \n scores, inds, clses, ys, xs = _topk(heat, K=K)\n \n if reg is not None:\n reg = _tranpose_and_gather_feat(reg, inds)\n reg = reg.view(batch, K, 2)\n xs = xs.view(batch, K, 1) + reg[:, :, 0:1]\n ys = ys.view(batch, K, 1) + reg[:, :, 1:2]\n else:\n xs = xs.view(batch, K, 1) + 0.5\n ys = ys.view(batch, K, 1) + 0.5\n wh = _tranpose_and_gather_feat(wh, inds) # inds 对应 h, w的尺度\n if cat_spec_wh:\n wh = wh.view(batch, K, cat, 2)\n clses_ind = clses.view(batch, K, 1, 1).expand(batch, K, 1, 2).long()\n wh = wh.gather(2, clses_ind).view(batch, K, 2)\n else:\n wh = wh.view(batch, K, 2)\n \n clses = clses.view(batch, K, 1).float()\n scores = scores.view(batch, K, 1) # 0, 1, 2\n \n bboxes = torch.cat([xs - wh[..., 0:1] / 2, \n ys - wh[..., 1:2] / 2,\n xs + wh[..., 0:1] / 2, \n ys + wh[..., 1:2] / 2], dim=2)\n \n detections = torch.cat([bboxes, scores, clses], dim=2)\n return detections\n\ndef _nms(heat, kernel=3):\n pad = (kernel - 1) // 2\n hmax = nn.functional.max_pool2d(\n heat, (kernel, kernel), stride=1, padding=pad)\n keep = (hmax == heat).float()\n return heat * keep\n\ndef _gather_feat(feat, ind, mask=None):\n dim = feat.size(2)\n ind = ind.unsqueeze(2).expand(ind.size(0), ind.size(1), dim)\n feat = feat.gather(1, ind)\n if mask is not None:\n mask = mask.unsqueeze(2).expand_as(feat)\n feat = feat[mask]\n feat = feat.view(-1, dim)\n return feat\n\ndef _tranpose_and_gather_feat(feat, ind):\n feat = feat.permute(0, 2, 3, 1).contiguous()\n feat = feat.view(feat.size(0), -1, feat.size(3))\n feat = _gather_feat(feat, ind)\n return feat\n\ndef _topk(scores, K=40):\n batch, cat, height, width = scores.size() # 1, 80,height, width\n #print(\"batch, cat, height, width\\n\", batch, cat, height, width)\n #print(\"k:\", K)\n\n topk_scores, topk_inds = torch.topk(scores.view(batch, cat, -1), K)\n\n topk_inds = topk_inds % (height * width)\n topk_ys = (topk_inds / width).int().float() # y-> h, x-> w\n topk_xs = (topk_inds % width).int().float()\n\n topk_score, topk_ind = torch.topk(topk_scores.view(batch, -1), K)\n\n topk_clses = (topk_ind / K).int()\n topk_inds = _gather_feat(\n topk_inds.view(batch, -1, 1), topk_ind).view(batch, K)\n topk_ys = _gather_feat(topk_ys.view(batch, -1, 1), topk_ind).view(batch, K)\n topk_xs = _gather_feat(topk_xs.view(batch, -1, 1), topk_ind).view(batch, K)\n\n return topk_score, topk_inds, topk_clses, topk_ys, topk_xs\n\ndef post_process(dets, c, s, out_height, out_width, scale, num_classes):\n dets = dets.detach().cpu().numpy()\n# print(\"dets\", dets) # (1, 100, 6)\n\n dets = dets.reshape(1, -1, dets.shape[2]) # (x1, y1, x2, y2)\n\n dets = ctdet_post_process(\n dets.copy(), [c], [s],\n out_height, out_width, num_classes)\n \n for j in range(1, num_classes + 1):\n dets[0][j] = np.array(dets[0][j], dtype=np.float32).reshape(-1, 5)\n dets[0][j][:, :4] /= scale\n return dets[0]\n\ndef ctdet_post_process(dets, c, s, h, w, num_classes):\n ret = []\n# print(dets.shape) # (1, 100, 6)\n# print(c)\n for i in range(dets.shape[0]):\n top_preds = {}\n dets[i, :, :2] = transform_preds(\n dets[i, :, 0:2], c[i], s[i], (w, h))\n dets[i, :, 2:4] = transform_preds(\n dets[i, :, 2:4], c[i], s[i], (w, h))\n\n classes = dets[i, :, -1] # 类别这里是80\n \n for j in range(num_classes):\n inds = (classes == j)\n top_preds[j + 1] = np.concatenate([\n dets[i, inds, :4].astype(np.float32),\n dets[i, inds, 4:5].astype(np.float32)], axis=1).tolist() # 这里将框按照类别进行分类\n ret.append(top_preds)\n\n return ret \n \ndef merge_outputs(detections, num_classes):\n# print(detections)\n results = {}\n max_per_image = 100\n for j in range(1, num_classes + 1):\n results[j] = np.concatenate(\n [detection[j] for detection in detections], axis=0).astype(np.float32)\n# if len(self.scales) > 1 or self.opt.nms:\n results[j] = soft_nms(results[j], Nt=0.5, method=2, threshold=0.01)\n# print(results)\n scores = np.hstack([results[j][:, 4] for j in range(1, num_classes + 1)])\n\n if len(scores) > max_per_image:\n kth = len(scores) - max_per_image\n thresh = np.partition(scores, kth)[kth]\n for j in range(1, num_classes + 1):\n keep_inds = (results[j][:, 4] >= thresh)\n results[j] = results[j][keep_inds]\n# print(\"after merge out\\n\", results)\n return results2coco_boxes(results, num_classes)\n\ndef results2coco_boxes(results, num_classes): \n \"\"\"Convert detection results to a list of numpy arrays.\n\n Args:\n bboxes (Tensor): shape (n, 5)\n labels (Tensor): shape (n, )\n num_classes (int): class number, including background class\n\n Returns:\n list(ndarray): bbox results of each class\n \"\"\"\n bboxes = [0 for i in range(num_classes)]\n for j in range(1, num_classes + 1):\n if len(results[j]) == 0:\n bboxes[j - 1] = np.zeros((0, 5), dtype=np.float32)\n continue\n bboxes[j - 1] = results[j]\n# print(bboxes) # xyxy\n return bboxes\n\n\ndef soft_nms(boxes, sigma=0.5, Nt=0.3, threshold=0.01, method=0):\n N = boxes.shape[0]\n# cdef float iw, ih, box_area\n# cdef float ua\n# cdef int \n pos = 0\n# cdef float \n maxscore = 0\n# cdef int \n maxpos = 0\n# cdef float x1,x2,y1,y2,tx1,tx2,ty1,ty2,ts,area,weight,ov\n\n for i in range(N):\n maxscore = boxes[i, 4]\n maxpos = i\n\n tx1 = boxes[i,0]\n ty1 = boxes[i,1]\n tx2 = boxes[i,2]\n ty2 = boxes[i,3]\n ts = boxes[i,4]\n\n pos = i + 1\n # get max box\n while pos < N:\n if maxscore < boxes[pos, 4]:\n maxscore = boxes[pos, 4]\n maxpos = pos\n pos = pos + 1\n\n # add max box as a detection \n boxes[i,0] = boxes[maxpos,0]\n boxes[i,1] = boxes[maxpos,1]\n boxes[i,2] = boxes[maxpos,2]\n boxes[i,3] = boxes[maxpos,3]\n boxes[i,4] = boxes[maxpos,4]\n\n # swap ith box with position of max box\n boxes[maxpos,0] = tx1\n boxes[maxpos,1] = ty1\n boxes[maxpos,2] = tx2\n boxes[maxpos,3] = ty2\n boxes[maxpos,4] = ts\n\n tx1 = boxes[i,0]\n ty1 = boxes[i,1]\n tx2 = boxes[i,2]\n ty2 = boxes[i,3]\n ts = boxes[i,4]\n\n pos = i + 1\n # NMS iterations, note that N changes if detection boxes fall below threshold\n while pos < N:\n x1 = boxes[pos, 0]\n y1 = boxes[pos, 1]\n x2 = boxes[pos, 2]\n y2 = boxes[pos, 3]\n s = boxes[pos, 4]\n\n area = (x2 - x1 + 1) * (y2 - y1 + 1)\n iw = (min(tx2, x2) - max(tx1, x1) + 1)\n if iw > 0:\n ih = (min(ty2, y2) - max(ty1, y1) + 1)\n if ih > 0:\n ua = float((tx2 - tx1 + 1) * (ty2 - ty1 + 1) + area - iw * ih)\n ov = iw * ih / ua #iou between max box and detection box\n\n if method == 1: # linear\n if ov > Nt: \n weight = 1 - ov\n else:\n weight = 1\n elif method == 2: # gaussian\n weight = np.exp(-(ov * ov)/sigma)\n else: # original NMS\n if ov > Nt: \n weight = 0\n else:\n weight = 1\n\n boxes[pos, 4] = weight*boxes[pos, 4]\n \n # if box score falls below threshold, discard the box by swapping with last box\n # update N\n if boxes[pos, 4] < threshold:\n boxes[pos,0] = boxes[N-1, 0]\n boxes[pos,1] = boxes[N-1, 1]\n boxes[pos,2] = boxes[N-1, 2]\n boxes[pos,3] = boxes[N-1, 3]\n boxes[pos,4] = boxes[N-1, 4]\n N = N - 1\n pos = pos - 1\n\n pos = pos + 1\n\n keep = [i for i in range(N)]\n boxes = boxes[keep]\n return boxes\n \ndef transform_preds(coords, center, scale, output_size):\n target_coords = np.zeros(coords.shape)\n trans = get_affine_transform(center, scale, 0, output_size, inv=1) \n for p in range(coords.shape[0]):\n target_coords[p, 0:2] = affine_transform(coords[p, 0:2], trans)\n return target_coords\n", "import torch\nimport torch.nn as nn\nfrom mmcv.cnn import normal_init\n\nfrom mmdet.core import multi_apply, multiclass_nms, distance2bbox, force_fp32\nfrom ..builder import build_loss\nfrom ..registry import HEADS\nfrom ..utils import bias_init_with_prob, Scale, ConvModule\n\nINF = 1e8\n\n\[email protected]_module\nclass FCOSHead(nn.Module):\n\n def __init__(self,\n num_classes,\n in_channels,\n feat_channels=256,\n stacked_convs=4,\n strides=(4, 8, 16, 32, 64),\n regress_ranges=((-1, 64), (64, 128), (128, 256), (256, 512),\n (512, INF)),\n loss_cls=dict(\n type='FocalLoss',\n use_sigmoid=True,\n gamma=2.0,\n alpha=0.25,\n loss_weight=1.0),\n loss_bbox=dict(type='IoULoss', loss_weight=1.0),\n loss_centerness=dict(\n type='CrossEntropyLoss',\n use_sigmoid=True,\n loss_weight=1.0),\n conv_cfg=None,\n norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)):\n super(FCOSHead, self).__init__()\n\n self.num_classes = num_classes\n self.cls_out_channels = num_classes - 1\n self.in_channels = in_channels\n self.feat_channels = feat_channels\n self.stacked_convs = stacked_convs\n self.strides = strides\n self.regress_ranges = regress_ranges\n self.loss_cls = build_loss(loss_cls)\n self.loss_bbox = build_loss(loss_bbox)\n self.loss_centerness = build_loss(loss_centerness)\n self.conv_cfg = conv_cfg\n self.norm_cfg = norm_cfg\n self.fp16_enabled = False\n\n self._init_layers()\n\n def _init_layers(self):\n self.cls_convs = nn.ModuleList()\n self.reg_convs = nn.ModuleList()\n for i in range(self.stacked_convs):\n chn = self.in_channels if i == 0 else self.feat_channels\n self.cls_convs.append(\n ConvModule(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg,\n bias=self.norm_cfg is None))\n self.reg_convs.append(\n ConvModule(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg,\n bias=self.norm_cfg is None))\n self.fcos_cls = nn.Conv2d(\n self.feat_channels, self.cls_out_channels, 3, padding=1)\n self.fcos_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1)\n self.fcos_centerness = nn.Conv2d(self.feat_channels, 1, 3, padding=1)\n\n self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides])\n\n def init_weights(self):\n for m in self.cls_convs:\n normal_init(m.conv, std=0.01)\n for m in self.reg_convs:\n normal_init(m.conv, std=0.01)\n bias_cls = bias_init_with_prob(0.01)\n normal_init(self.fcos_cls, std=0.01, bias=bias_cls)\n normal_init(self.fcos_reg, std=0.01)\n normal_init(self.fcos_centerness, std=0.01)\n\n def forward(self, feats):\n return multi_apply(self.forward_single, feats, self.scales)\n\n def forward_single(self, x, scale):\n cls_feat = x\n reg_feat = x\n\n for cls_layer in self.cls_convs:\n cls_feat = cls_layer(cls_feat)\n cls_score = self.fcos_cls(cls_feat)\n centerness = self.fcos_centerness(cls_feat)\n\n for reg_layer in self.reg_convs:\n reg_feat = reg_layer(reg_feat)\n # scale the bbox_pred of different level\n # float to avoid overflow when enabling FP16\n bbox_pred = scale(self.fcos_reg(reg_feat)).float().exp()\n return cls_score, bbox_pred, centerness\n\n @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'centernesses'))\n def loss(self,\n cls_scores,\n bbox_preds,\n centernesses,\n gt_bboxes,\n gt_labels,\n img_metas,\n cfg,\n gt_bboxes_ignore=None):\n assert len(cls_scores) == len(bbox_preds) == len(centernesses)\n featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]\n all_level_points = self.get_points(featmap_sizes, bbox_preds[0].dtype,\n bbox_preds[0].device)\n labels, bbox_targets = self.fcos_target(all_level_points, gt_bboxes,\n gt_labels)\n\n num_imgs = cls_scores[0].size(0)\n # flatten cls_scores, bbox_preds and centerness\n flatten_cls_scores = [\n cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels)\n for cls_score in cls_scores\n ]\n flatten_bbox_preds = [\n bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4)\n for bbox_pred in bbox_preds\n ]\n flatten_centerness = [\n centerness.permute(0, 2, 3, 1).reshape(-1)\n for centerness in centernesses\n ]\n flatten_cls_scores = torch.cat(flatten_cls_scores)\n flatten_bbox_preds = torch.cat(flatten_bbox_preds)\n flatten_centerness = torch.cat(flatten_centerness)\n flatten_labels = torch.cat(labels)\n flatten_bbox_targets = torch.cat(bbox_targets)\n # repeat points to align with bbox_preds\n flatten_points = torch.cat(\n [points.repeat(num_imgs, 1) for points in all_level_points])\n\n pos_inds = flatten_labels.nonzero().reshape(-1)\n num_pos = len(pos_inds)\n loss_cls = self.loss_cls(\n flatten_cls_scores, flatten_labels,\n avg_factor=num_pos + num_imgs) # avoid num_pos is 0\n\n pos_bbox_preds = flatten_bbox_preds[pos_inds]\n pos_bbox_targets = flatten_bbox_targets[pos_inds]\n pos_centerness = flatten_centerness[pos_inds]\n pos_centerness_targets = self.centerness_target(pos_bbox_targets)\n\n if num_pos > 0:\n pos_points = flatten_points[pos_inds]\n pos_decoded_bbox_preds = distance2bbox(pos_points, pos_bbox_preds)\n pos_decoded_target_preds = distance2bbox(pos_points,\n pos_bbox_targets)\n # centerness weighted iou loss\n loss_bbox = self.loss_bbox(\n pos_decoded_bbox_preds,\n pos_decoded_target_preds,\n weight=pos_centerness_targets,\n avg_factor=pos_centerness_targets.sum())\n loss_centerness = self.loss_centerness(pos_centerness,\n pos_centerness_targets)\n else:\n loss_bbox = pos_bbox_preds.sum()\n loss_centerness = pos_centerness.sum()\n\n return dict(\n loss_cls=loss_cls,\n loss_bbox=loss_bbox,\n loss_centerness=loss_centerness)\n\n @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'centernesses'))\n def get_bboxes(self,\n cls_scores,\n bbox_preds,\n centernesses,\n img_metas,\n cfg,\n rescale=None):\n assert len(cls_scores) == len(bbox_preds)\n num_levels = len(cls_scores)\n\n featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]\n mlvl_points = self.get_points(featmap_sizes, bbox_preds[0].dtype,\n bbox_preds[0].device)\n result_list = []\n for img_id in range(len(img_metas)):\n cls_score_list = [\n cls_scores[i][img_id].detach() for i in range(num_levels)\n ]\n bbox_pred_list = [\n bbox_preds[i][img_id].detach() for i in range(num_levels)\n ]\n centerness_pred_list = [\n centernesses[i][img_id].detach() for i in range(num_levels)\n ]\n img_shape = img_metas[img_id]['img_shape']\n scale_factor = img_metas[img_id]['scale_factor']\n det_bboxes = self.get_bboxes_single(cls_score_list, bbox_pred_list,\n centerness_pred_list,\n mlvl_points, img_shape,\n scale_factor, cfg, rescale)\n result_list.append(det_bboxes)\n return result_list\n\n def get_bboxes_single(self,\n cls_scores,\n bbox_preds,\n centernesses,\n mlvl_points,\n img_shape,\n scale_factor,\n cfg,\n rescale=False):\n assert len(cls_scores) == len(bbox_preds) == len(mlvl_points)\n mlvl_bboxes = []\n mlvl_scores = []\n mlvl_centerness = []\n for cls_score, bbox_pred, centerness, points in zip(\n cls_scores, bbox_preds, centernesses, mlvl_points):\n assert cls_score.size()[-2:] == bbox_pred.size()[-2:]\n scores = cls_score.permute(1, 2, 0).reshape(\n -1, self.cls_out_channels).sigmoid()\n centerness = centerness.permute(1, 2, 0).reshape(-1).sigmoid()\n\n bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4)\n nms_pre = cfg.get('nms_pre', -1)\n if nms_pre > 0 and scores.shape[0] > nms_pre:\n max_scores, _ = (scores * centerness[:, None]).max(dim=1)\n _, topk_inds = max_scores.topk(nms_pre)\n points = points[topk_inds, :]\n bbox_pred = bbox_pred[topk_inds, :]\n scores = scores[topk_inds, :]\n centerness = centerness[topk_inds]\n bboxes = distance2bbox(points, bbox_pred, max_shape=img_shape)\n mlvl_bboxes.append(bboxes)\n mlvl_scores.append(scores)\n mlvl_centerness.append(centerness)\n mlvl_bboxes = torch.cat(mlvl_bboxes)\n if rescale:\n mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor)\n mlvl_scores = torch.cat(mlvl_scores)\n padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1)\n mlvl_scores = torch.cat([padding, mlvl_scores], dim=1)\n mlvl_centerness = torch.cat(mlvl_centerness)\n det_bboxes, det_labels = multiclass_nms(\n mlvl_bboxes,\n mlvl_scores,\n cfg.score_thr,\n cfg.nms,\n cfg.max_per_img,\n score_factors=mlvl_centerness)\n return det_bboxes, det_labels\n\n def get_points(self, featmap_sizes, dtype, device):\n \"\"\"Get points according to feature map sizes.\n\n Args:\n featmap_sizes (list[tuple]): Multi-level feature map sizes.\n dtype (torch.dtype): Type of points.\n device (torch.device): Device of points.\n\n Returns:\n tuple: points of each image.\n \"\"\"\n mlvl_points = []\n for i in range(len(featmap_sizes)):\n mlvl_points.append(\n self.get_points_single(featmap_sizes[i], self.strides[i],\n dtype, device))\n return mlvl_points\n\n def get_points_single(self, featmap_size, stride, dtype, device):\n h, w = featmap_size\n x_range = torch.arange(\n 0, w * stride, stride, dtype=dtype, device=device)\n y_range = torch.arange(\n 0, h * stride, stride, dtype=dtype, device=device)\n y, x = torch.meshgrid(y_range, x_range)\n points = torch.stack(\n (x.reshape(-1), y.reshape(-1)), dim=-1) + stride // 2\n return points\n\n def fcos_target(self, points, gt_bboxes_list, gt_labels_list):\n assert len(points) == len(self.regress_ranges)\n num_levels = len(points)\n \n # expand regress ranges to align with points\n expanded_regress_ranges = [\n points[i].new_tensor(self.regress_ranges[i])[None].expand_as(\n points[i]) for i in range(num_levels)\n ]\n #for i in range(num_levels):\n # print(i)\n # print(\"points.shape:\",points[i].shape) # torch.Size([15200, 2])\n # print(\"regress_ranges shape:\", self.regress_ranges[i])\n # print(\"expanded_regress_ranges shape\", expanded_regress_ranges[i].shape)\n # concat all levels points and regress ranges\n concat_regress_ranges = torch.cat(expanded_regress_ranges, dim=0)\n concat_points = torch.cat(points, dim=0)\n #print(\"concat_regress_ranges.shape\", concat_regress_ranges.shape)\n #print(\"concat_points.shape\", concat_points.shape)\n # get labels and bbox_targets of each image\n labels_list, bbox_targets_list = multi_apply(\n self.fcos_target_single,\n gt_bboxes_list,\n gt_labels_list,\n points=concat_points,\n regress_ranges=concat_regress_ranges)\n\n # split to per img, per level\n num_points = [center.size(0) for center in points]\n labels_list = [labels.split(num_points, 0) for labels in labels_list]\n bbox_targets_list = [\n bbox_targets.split(num_points, 0)\n for bbox_targets in bbox_targets_list\n ]\n\n # concat per level image\n concat_lvl_labels = []\n concat_lvl_bbox_targets = []\n for i in range(num_levels):\n concat_lvl_labels.append(\n torch.cat([labels[i] for labels in labels_list]))\n concat_lvl_bbox_targets.append(\n torch.cat(\n [bbox_targets[i] for bbox_targets in bbox_targets_list]))\n return concat_lvl_labels, concat_lvl_bbox_targets\n\n def fcos_target_single(self, gt_bboxes, gt_labels, points, regress_ranges):\n num_points = points.size(0)\n num_gts = gt_labels.size(0)\n print(\"gt_bboxes\", gt_bboxes.shape)\n print(\"gt_labels\", gt_labels.shape)\n print(\"points\", points.shape)\n print(points[:10])\n print(\"regress_ranges\", regress_ranges.shape)\n print(regress_ranges[:10])\n\n areas = (gt_bboxes[:, 2] - gt_bboxes[:, 0] + 1) * (\n gt_bboxes[:, 3] - gt_bboxes[:, 1] + 1)\n # TODO: figure out why these two are different\n # areas = areas[None].expand(num_points, num_gts)\n areas = areas[None].repeat(num_points, 1)\n regress_ranges = regress_ranges[:, None, :].expand(\n num_points, num_gts, 2)\n gt_bboxes = gt_bboxes[None].expand(num_points, num_gts, 4)\n xs, ys = points[:, 0], points[:, 1]\n xs = xs[:, None].expand(num_points, num_gts)\n ys = ys[:, None].expand(num_points, num_gts)\n\n left = xs - gt_bboxes[..., 0]\n right = gt_bboxes[..., 2] - xs\n top = ys - gt_bboxes[..., 1]\n bottom = gt_bboxes[..., 3] - ys\n bbox_targets = torch.stack((left, top, right, bottom), -1)\n\n # condition1: inside a gt bbox\n inside_gt_bbox_mask = bbox_targets.min(-1)[0] > 0\n\n # condition2: limit the regression range for each location\n max_regress_distance = bbox_targets.max(-1)[0]\n inside_regress_range = (\n max_regress_distance >= regress_ranges[..., 0]) & (\n max_regress_distance <= regress_ranges[..., 1])\n\n # if there are still more than one objects for a location,\n # we choose the one with minimal area\n areas[inside_gt_bbox_mask == 0] = INF\n areas[inside_regress_range == 0] = INF\n min_area, min_area_inds = areas.min(dim=1)\n\n labels = gt_labels[min_area_inds]\n labels[min_area == INF] = 0\n bbox_targets = bbox_targets[range(num_points), min_area_inds]\n\n return labels, bbox_targets\n\n def centerness_target(self, pos_bbox_targets):\n # only calculate pos centerness targets, otherwise there may be nan\n left_right = pos_bbox_targets[:, [0, 2]]\n top_bottom = pos_bbox_targets[:, [1, 3]]\n centerness_targets = (\n left_right.min(dim=-1)[0] / left_right.max(dim=-1)[0]) * (\n top_bottom.min(dim=-1)[0] / top_bottom.max(dim=-1)[0])\n return torch.sqrt(centerness_targets)\n", "import torch\nimport torch.nn as nn\nfrom ..registry import LOSSES\n\ndef _neg_loss(pred, gt):\n ''' Modified focal loss. Exactly the same as CornerNet.\n Runs faster and costs a little bit more memory\n Arguments:\n pred (batch x c x h x w) => (batch, c, num_points)\n gt_regr (batch x c x h x w)\n '''\n pos_inds = gt.eq(1).float()\n neg_inds = gt.lt(1).float()\n\n neg_weights = torch.pow(1 - gt, 4)\n\n loss = 0\n\n pos_loss = torch.log(pred) * torch.pow(1 - pred, 2) * pos_inds\n neg_loss = torch.log(1 - pred) * torch.pow(pred, 2) * neg_weights * neg_inds\n\n num_pos = pos_inds.float().sum()\n\n pos_loss = pos_loss.sum()\n neg_loss = neg_loss.sum()\n\n count = int(num_pos.cpu().detach())\n if count == 0:\n loss = loss - neg_loss\n else:\n loss = loss - (pos_loss + neg_loss) / num_pos\n return loss\n\[email protected]_module\nclass CenterFocalLoss(nn.Module):\n '''nn.Module warpper for focal loss'''\n def __init__(self):\n super(CenterFocalLoss, self).__init__()\n self.neg_loss = _neg_loss\n\n def forward(self, out, target):\n return self.neg_loss(out, target)", "import torch\nimport torch.nn as nn\nfrom mmcv.cnn import normal_init\nimport numpy as np\nimport cv2\nimport math\n#import torch.nn.functional as F\n\nfrom mmdet.core import multi_apply, multiclass_nms, distance2bbox, force_fp32\nfrom ..builder import build_loss\nfrom ..registry import HEADS\nfrom ..utils import bias_init_with_prob, Scale, ConvModule\n\nINF = 1e8\n\n\[email protected]_module\nclass CenterHead(nn.Module):\n\n def __init__(self,\n num_classes, # init 80\n in_channels,\n feat_channels=256,\n stacked_convs=1,\n strides=(4, 8, 16, 32, 64),\n regress_ranges=((-1, 64), (64, 128), (128, 256), (256, 512),\n (512, INF)),\n use_cross = False,\n loss_hm = dict(\n type=\"CenterFocalLoss\"\n ), # 这里实现 CenterFocalLoss\n loss_wh = dict(\n type=\"L1Loss\",\n loss_weight=0.1\n ),\n loss_offset = dict(\n type=\"L1Loss\",\n loss_weight=1.0\n ),\n conv_cfg=None,\n norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)):\n super(CenterHead, self).__init__()\n\n self.num_classes = num_classes\n # self.cls_out_channels = num_classes - 1\n self.cls_out_channels = num_classes\n self.in_channels = in_channels\n self.feat_channels = feat_channels\n self.stacked_convs = stacked_convs\n self.strides = strides\n self.regress_ranges = regress_ranges\n self.featmap_sizes = None\n self.loss_hm = build_loss(loss_hm)\n self.loss_wh = build_loss(loss_wh)\n self.loss_offset = build_loss(loss_offset)\n self.conv_cfg = conv_cfg\n self.norm_cfg = norm_cfg\n self.fp16_enabled = False\n self.use_cross = use_cross\n\n self._init_layers()\n\n def _init_layers(self):\n self.cls_convs = nn.ModuleList()\n self.wh_convs = nn.ModuleList()\n self.offset_convs = nn.ModuleList()\n for i in range(self.stacked_convs):\n chn = self.in_channels if i == 0 else self.feat_channels\n self.cls_convs.append(\n ConvModule(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg,\n bias=self.norm_cfg is None))\n self.wh_convs.append(\n ConvModule(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg,\n bias=self.norm_cfg is None))\n self.offset_convs.append(\n ConvModule(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg,\n bias=self.norm_cfg is None))\n self.center_hm = nn.Conv2d(self.feat_channels, self.cls_out_channels, 3, padding=1, bias=True)\n self.center_wh = nn.Conv2d(self.feat_channels, 2, 3, padding=1, bias=True)\n self.center_offset = nn.Conv2d(self.feat_channels, 2, 3, padding=1, bias=True)\n self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides])\n\n def init_weights(self):\n# for m in self.cls_convs:\n# normal_init(m.conv, std=0.01)\n# for m in self.wh_convs:\n# normal_init(m.conv, std=0.01)\n# for m in self.offset_convs:\n# normal_init(m.conv, std=0.01)\n \n #bias_hm = bias_init_with_prob(0.01) # 这里的初始化?\n #normal_init(self.center_hm, std=0.01, bias=bias_hm)\n self.center_hm.bias.data.fill_(-2.19)\n nn.init.constant_(self.center_wh.bias, 0)\n nn.init.constant_(self.center_offset.bias, 0)\n# normal_init(self.center_hm, std=0.01)\n# normal_init(self.center_wh, std=0.01)\n# normal_init(self.center_offset, std=0.01)\n\n def forward(self, feats):\n return multi_apply(self.forward_single, feats, self.scales)\n\n def forward_single(self, x, scale):\n cls_feat = x\n wh_feat = x\n offset_feat = x\n\n for cls_layer in self.cls_convs:\n cls_feat = cls_layer(cls_feat)\n cls_score = self.center_hm(cls_feat)\n\n for wh_layer in self.wh_convs:\n wh_feat = wh_layer(wh_feat)\n wh_pred = self.center_wh(wh_feat)\n \n for offset_layer in self.offset_convs:\n offset_feat = offset_layer(offset_feat)\n offset_pred = self.center_offset(offset_feat)\n \n return cls_score, wh_pred, offset_pred\n\n @force_fp32(apply_to=('cls_scores', 'wh_preds', 'offset_preds'))\n def loss(self,\n cls_scores,\n wh_preds,\n offset_preds,\n gt_bboxes,\n gt_labels,\n img_metas,\n cfg,\n gt_bboxes_ignore=None):\n\n assert len(cls_scores) == len(wh_preds) == len(offset_preds)\n featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]\n self.featmap_sizes = featmap_sizes\n \n all_level_points = self.get_points(featmap_sizes, offset_preds[0].dtype,\n offset_preds[0].device)\n #print(img_metas)\n #self.c = img_metas['c']\n #self.s = img_metas['s']\n self.tensor_dtype = offset_preds[0].dtype\n self.tensor_device = offset_preds[0].device\n heatmaps, wh_targets, offset_targets = self.center_target(gt_bboxes, gt_labels, img_metas, all_level_points) # 所有层的concat的, 每张图对应一个\n\n num_imgs = cls_scores[0].size(0) # batch_size\n #print(num_imgs)\n # flatten cls_scores, bbox_preds and centerness\n flatten_cls_scores = [\n cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels)\n for cls_score in cls_scores\n ] # cls_scores(num_levels, batch_size, 80, h, w) => (num_levels, batch_size * w * h, 80)\n flatten_wh_preds = [\n wh_pred.permute(0, 2, 3, 1).reshape(-1, 2) # batchsize, h, w, 2 => batchsize, h, w, 2\n for wh_pred in wh_preds\n ]\n flatten_offset_preds = [\n offset_pred.permute(0, 2, 3, 1).reshape(-1, 2)\n for offset_pred in offset_preds\n ]\n \n flatten_cls_scores = torch.cat(flatten_cls_scores)\n flatten_wh_preds = torch.cat(flatten_wh_preds)\n flatten_offset_preds = torch.cat(flatten_offset_preds)\n \n # targets\n flatten_heatmaps = torch.cat(heatmaps)\n flatten_wh_targets = torch.cat(wh_targets) # torch.Size([all_level_points, 2])\n flatten_offset_targets = torch.cat(offset_targets)\n\n # repeat points to align with bbox_preds\n # flatten_points = torch.cat(\n # [points.repeat(num_imgs, 1) for points in all_level_points])\n\n # pos_inds = flatten_labels.nonzero().reshape(-1)\n #print(flatten_wh_targets.shape)\n #print(flatten_wh_targets.nonzero())\n center_inds = flatten_wh_targets[...,0].nonzero().reshape(-1) \n #print(center_inds)\n num_center = len(center_inds)\n #print(num_center)\n \n # what about use the centerness * labels to indict an object\n # loss_cls = self.loss_cls(\n # flatten_cls_scores, flatten_labels, # labels gt is small area\n # avg_factor=num_pos + num_imgs) # avoid num_pos is 0\n flatten_cls_scores = torch.clamp(flatten_cls_scores.sigmoid_(), min=1e-4, max=1-1e-4)\n loss_hm = self.loss_hm(flatten_cls_scores, flatten_heatmaps)\n \n pos_wh_targets = flatten_wh_targets[center_inds]\n #print(pos_wh_targets.shape)\n pos_wh_preds = flatten_wh_preds[center_inds]\n \n pos_offset_preds = flatten_offset_preds[center_inds]\n pos_offset_targets = flatten_offset_targets[center_inds]\n \n if num_center > 0:\n # TODO: use the iou loss\n # center_points = flatten_points[center_inds]\n # center_decoded_bbox_preds = wh_offset2bbox(center_points, pos_wh_preds, pos_offset_preds)\n # center_decoded_bbox_targets = wh_offset2bbox(center_points, pos_wh_targets, pos_offset_targets)\n loss_wh = self.loss_wh(pos_wh_preds, pos_wh_targets, avg_factor=num_center + num_imgs)\n #loss_wh = F.l1_loss(pos_wh_preds, pos_wh_targets, reduction='sum') / (num_center + num_imgs)\n #loss_wh = 0.1 * loss_wh\n loss_offset = self.loss_offset(pos_offset_preds, pos_offset_targets, avg_factor=num_center + num_imgs)\n else:\n loss_wh = pos_wh_preds.sum()\n loss_offset = pos_offset_preds.sum()\n \n return dict(\n loss_hm = loss_hm,\n loss_wh = loss_wh,\n loss_offset = loss_offset)\n\n def get_points(self, featmap_sizes, dtype, device):\n \"\"\"Get points according to feature map sizes.\n\n Args:\n featmap_sizes (list[tuple]): Multi-level feature map sizes.\n dtype (torch.dtype): Type of points.\n device (torch.device): Device of points.\n\n Returns:\n tuple: points of each image.\n \"\"\"\n mlvl_points = []\n for i in range(len(featmap_sizes)):\n mlvl_points.append(\n self.get_points_single(featmap_sizes[i], self.strides[i],\n dtype, device))\n return mlvl_points\n\n def get_points_single(self, featmap_size, stride, dtype, device):\n h, w = featmap_size\n x_range = torch.arange(\n 0, w * stride, stride, dtype=dtype, device=device) # 以一定间隔取x的值\n y_range = torch.arange(\n 0, h * stride, stride, dtype=dtype, device=device)\n y, x = torch.meshgrid(y_range, x_range) # 得到featmap的所有点\n points = torch.stack(\n (x.reshape(-1), y.reshape(-1)), dim=-1) + stride // 2\n return points\n\n def center_target(self, gt_bboxes_list, gt_labels_list, img_metas, all_level_points):\n\n assert len(self.featmap_sizes) == len(self.regress_ranges)\n\n # get heatmaps and targets of each image\n # heatmaps in heatmaps_list: [num_points, 80]\n # wh_targets: [num_points, 2] => [batch_size, num_points, 2]\n heatmaps_list, wh_targets_list, offset_targets_list = multi_apply(\n self.center_target_single,\n gt_bboxes_list,\n gt_labels_list,\n img_metas\n )\n\n # split to per img, per level\n num_points = [center.size(0) for center in all_level_points] # 每一层多少个点 all_level_points [[12414, 2], []]\n \n heatmaps_list = [heatmaps.split(num_points, 0) for heatmaps in heatmaps_list]\n wh_targets_list = [wh_targets.split(num_points, 0) for wh_targets in wh_targets_list]\n offset_targets_list = [offset_targets.split(num_points, 0) for offset_targets in offset_targets_list]\n\n # concat per level image, 同一层的concat # [(batch_size,featmap_size[1]), ...)\n concat_lvl_heatmaps = []\n concat_lvl_wh_targets = []\n concat_lvl_offset_targets = []\n num_levels = len(self.featmap_sizes)\n for i in range(num_levels):\n concat_lvl_heatmaps.append(\n torch.cat([heatmaps[i] for heatmaps in heatmaps_list])) # (num_levels, batch_size * w * h, 80)\n concat_lvl_wh_targets.append(\n torch.cat(\n [wh_targets[i] for wh_targets in wh_targets_list]))\n concat_lvl_offset_targets.append(\n torch.cat(\n [offset_targets[i] for offset_targets in offset_targets_list]))\n return concat_lvl_heatmaps, concat_lvl_wh_targets, concat_lvl_offset_targets\n\n\n \n def center_target_single(self, gt_bboxes, gt_labels, img_meta):\n \"\"\"\n single image\n gt_bboxes:torch.Size([6, 4])\n gt_labels:torch.Size([6]) tensor([34, 34, 34, 34, 34, 34], device='cuda:0')\n featmap_sizes:(list[tuple]): Multi-level feature map sizes.\n regress_ranges=((-1, 64), (64, 128), (128, 256), (256, 512),(512, INF))\n \"\"\"\n # transform the gt_bboxes, gt_labels to numpy\n gt_bboxes = gt_bboxes.data.cpu().numpy()\n gt_labels = gt_labels.data.cpu().numpy()\n \n #print(gt_bboxes, gt_labels)\n num_objs = gt_labels.shape[0]\n #print(num_objs)\n # heatmaps [level1, level2, level3, level4, level5]\n num_levels = len(self.featmap_sizes)\n\n heatmaps_targets = []\n wh_targets = []\n offset_targets = []\n # get the target shape for each image\n for i in range(num_levels):\n h, w = self.featmap_sizes[i]\n hm = np.zeros((self.cls_out_channels, h, w), dtype=np.float32)\n heatmaps_targets.append(hm)\n wh = np.zeros((h, w, 2), dtype=np.float32)\n wh_targets.append(wh)\n offset = np.zeros((h, w, 2), dtype=np.float32)\n offset_targets.append(offset)\n\n for k in range(num_objs):\n bbox = gt_bboxes[k]\n cls_id = gt_labels[k]\n \n if img_meta['flipped']:\n bbox[[0, 2]] = img_meta['width'] - bbox[[2, 0]] - 1\n \n # condition: in the regress_ranges\n origin_h, origin_w = bbox[3] - bbox[1], bbox[2] - bbox[0]\n #max_h_w = max(h, w) / 2\n max_h_w = max(origin_h, origin_w)\n #max_h_w = max(origin_h, origin_w) * 2 # 最长边为32在P2\n # 根据max_h_w在哪一层将output设置为当前层的\n index_levels = []\n #index_level = 0\n for i in range(num_levels):\n min_regress_distance, max_regress_distance = self.regress_ranges[i]\n if not self.use_cross and (max_h_w > min_regress_distance) and (max_h_w <= max_regress_distance):\n index_levels.append(i)\n break\n \n if self.use_cross:\n min_regress_distance = min_regress_distance * 0.8\n max_regress_distance = max_regress_distance * 1.3\n if (max_h_w > min_regress_distance) and (max_h_w <= max_regress_distance):\n index_levels.append(i)\n \n for index_level in index_levels:\n output_h, output_w = self.featmap_sizes[index_level]\n #print(output_h, output_w)\n hm = heatmaps_targets[index_level]\n wh = wh_targets[index_level]\n offset = offset_targets[index_level]\n \n # c, s is passed by meta\n trans_output = get_affine_transform(img_meta['c'], img_meta['s'], 0, [output_w, output_h])\n bbox[:2] = affine_transform(bbox[:2], trans_output)\n bbox[2:] = affine_transform(bbox[2:], trans_output)\n bbox[[0, 2]] = np.clip(bbox[[0, 2]], 0, output_w - 1) #x1, x2\n bbox[[1, 3]] = np.clip(bbox[[1, 3]], 0, output_h - 1)\n h, w = bbox[3] - bbox[1], bbox[2] - bbox[0]\n #print(h, w)\n # 转换到当层\n if h > 0 and w > 0:\n radius = gaussian_radius((math.ceil(h), math.ceil(w)))\n radius = max(0, int(radius))\n ct = np.array(\n [(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2], dtype=np.float32)\n #print(ct)\n ct_int = ct.astype(np.int32)\n #hm[cls_id, ct_int[1], ct_int[0]] = 1\n\n #if (ct_int[1] - 1) > 0:\n # hm[cls_id, ct_int[1] - 1, ct_int[0]] = 0.5\n #if (ct_int[0] - 1) > 0:\n # hm[cls_id, ct_int[1], ct_int[0] - 1] = 0.5\n #if (ct_int[1] + 1) < output_h:\n # hm[cls_id, ct_int[1] + 1, ct_int[0]] = 0.5\n #if (ct_int[0] + 1) < output_w:\n # hm[cls_id, ct_int[1], ct_int[0] + 1] = 0.5\n draw_umich_gaussian(hm[cls_id], ct_int, radius)\n\n h, w = 1. * h, 1. * w\n offset_count = ct - ct_int # h, w\n # ct_int即表明在featmap的位置 ct_int[1] * output_w + ct_int[0] \n # TODO:如果当前位置有物体的中心,现在是直接覆盖\n # 这里设置监督信号,第1位表示w,第2位表示h\n # 这里对featmap进行缩放?\n # wh[ct_int[1], ct_int[0], 0] = w / output_w# output_h, output_w <= y, x\n # wh[ct_int[1], ct_int[0], 1] = h / output_h\n # offset[ct_int[1], ct_int[0], 0] = offset_count[0] / output_w\n # offset[ct_int[1], ct_int[0], 0] = offset_count[1] / output_h\n wh[ct_int[1], ct_int[0], 0] = w \n wh[ct_int[1], ct_int[0], 1] = h \n offset[ct_int[1], ct_int[0], 0] = offset_count[0]\n offset[ct_int[1], ct_int[0], 0] = offset_count[1]\n \n \n heatmaps_targets[index_level] = hm\n wh_targets[index_level] = wh\n offset_targets[index_level] = offset\n\n flatten_heatmaps_targets = [\n hm.transpose(1, 2, 0).reshape(-1, self.cls_out_channels)\n for hm in heatmaps_targets\n ]\n #for i in range(len(flatten_heatmaps_targets)):\n # print(flatten_heatmaps_targets[i].shape)\n \n heatmaps_targets = np.concatenate(flatten_heatmaps_targets, axis=0) \n #print(heatmaps_targets.shape) # (13343, 80)\n #print(heatmaps_targets)\n \n flatten_wh_targets = [\n wh.reshape(-1, 2) for wh in wh_targets\n ]\n wh_targets = np.concatenate(flatten_wh_targets)\n \n flatten_offset_targets = [\n offset.reshape(-1, 2) for offset in offset_targets\n ]\n offset_targets = np.concatenate(flatten_offset_targets)\n\n # transform the heatmaps_targets, wh_targets, offset_targets into tensor\n heatmaps_targets = torch.from_numpy(np.stack(heatmaps_targets))\n heatmaps_targets = torch.tensor(heatmaps_targets.detach(), dtype=self.tensor_dtype, device=self.tensor_device)\n wh_targets = torch.from_numpy(np.stack(wh_targets))\n wh_targets = torch.tensor(wh_targets.detach(), dtype=self.tensor_dtype, device=self.tensor_device)\n offset_targets = torch.from_numpy(np.stack(offset_targets))\n offset_targets = torch.tensor(offset_targets.detach(), dtype=self.tensor_dtype, device=self.tensor_device)\n \n return heatmaps_targets, wh_targets, offset_targets\n\n # test use\n @force_fp32(apply_to=('cls_scores', 'wh_preds', 'offset_preds'))\n def get_bboxes(self,\n cls_scores,\n wh_preds,\n offset_preds,\n img_metas,\n cfg):\n assert len(cls_scores) == len(wh_preds) == len(offset_preds)\n # cls_scores => [num_levels] => [batch featmap] => [batch, 80, h, w]\n # wh_preds => [num_levels] => [featmap] => [2, h, w]\n # offset_preds => [num_levels] => [featmap] => [2, h, w]\n num_levels = len(cls_scores)\n\n featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]\n\n result_list = []\n #print(cls_scores[0].shape) # torch.Size([1, 80, 84, 56])\n #print(img_metas)\n\n for img_id in range(len(img_metas)): # 每个batch中id\n cls_score_list = [\n cls_scores[i][img_id].detach() for i in range(num_levels)\n ] # =>[num_levels] => [80, h, w]\n wh_pred_list = [\n wh_preds[i][img_id].detach() for i in range(num_levels)\n ]\n offset_pred_list = [\n offset_preds[i][img_id].detach() for i in range(num_levels)\n ]\n #img_shape = img_metas[img_id]['img_shape']\n scale_factor = img_metas[img_id]['scale_factor']\n c = img_metas[img_id]['c']\n s = img_metas[img_id]['s']\n det_bboxes = self.get_bboxes_single(cls_score_list, wh_pred_list,\n offset_pred_list,\n featmap_sizes, c, s,\n scale_factor, cfg) # 对每一张图像进行解调\n result_list.append(det_bboxes)\n return result_list # [batch_size]\n\n def get_bboxes_single(self,\n cls_scores,\n wh_preds,\n offset_preds,\n featmap_sizes,\n c, \n s,\n scale_factor,\n cfg):\n assert len(cls_scores) == len(wh_preds) == len(offset_preds) == len(featmap_sizes)\n \n detections = []\n for cls_score, wh_pred, offset_pred, featmap_size in zip(\n cls_scores, wh_preds, offset_preds, featmap_sizes): # 取出每一层的点\n assert cls_score.size()[-2:] == wh_pred.size()[-2:] == offset_pred.size()[-2:] == featmap_size\n \n output_h, output_w = featmap_size\n #实际上得到了每一层的hm, wh, offset\n hm = torch.clamp(cls_score.sigmoid_(), min=1e-4, max=1-1e-4).unsqueeze(0) # 增加一个纬度\n #wh_pred[0, :, :] = wh_pred[0, :, :] * output_w\n #wh_pred[1, :, :] = wh_pred[1, :, :] * output_h # 2, output_h, output_w\n wh = wh_pred.unsqueeze(0) # 这里需要乘以featuremap的尺度\n #offset_pred[0, : ,:] = offset_pred[0, : ,:] * output_w\n #offset_pred[1, : ,:] = offset_pred[1, : ,:] * output_h\n reg = offset_pred.unsqueeze(0)\n \n dets = ctdet_decode(hm, wh, reg=reg, K=100)\n dets = post_process(dets, c, s, output_h, output_w, scale=scale_factor, num_classes=self.num_classes)\n detections.append(dets)\n \n results = merge_outputs(detections, self.num_classes) # 单张图的结果\n\n return results\n\n#num_classes = 80\n\ndef gaussian_radius(det_size, min_overlap=0.7):\n height, width = det_size\n \n a1 = 1\n b1 = (height + width)\n c1 = width * height * (1 - min_overlap) / (1 + min_overlap)\n sq1 = np.sqrt(b1 ** 2 - 4 * a1 * c1)\n r1 = (b1 + sq1) / 2\n \n a2 = 4\n b2 = 2 * (height + width)\n c2 = (1 - min_overlap) * width * height\n sq2 = np.sqrt(b2 ** 2 - 4 * a2 * c2)\n r2 = (b2 + sq2) / 2\n \n a3 = 4 * min_overlap\n b3 = -2 * min_overlap * (height + width)\n c3 = (min_overlap - 1) * width * height\n sq3 = np.sqrt(b3 ** 2 - 4 * a3 * c3)\n r3 = (b3 + sq3) / 2\n return min(r1, r2, r3)\n\ndef gaussian_small_radius(det_size, min_overlap=0.7):\n height, width = det_size\n \n a1 = 1\n b1 = (height + width)\n c1 = width * height * (1 - min_overlap) / (1 + min_overlap)\n sq1 = np.sqrt(b1 ** 2 - 4 * a1 * c1)\n r1 = (b1 - sq1) / (2 * a1)\n \n a2 = 4\n b2 = 2 * (height + width)\n c2 = (1 - min_overlap) * width * height\n sq2 = np.sqrt(b2 ** 2 - 4 * a2 * c2)\n r2 = (b2 - sq2) / (2 * a2)\n \n a3 = 4 * min_overlap\n b3 = -2 * min_overlap * (height + width)\n c3 = (min_overlap - 1) * width * height\n sq3 = np.sqrt(b3 ** 2 - 4 * a3 * c3)\n r3 = (b3 + sq3) / (2 * a3)\n return min(r1, r2, r3)\n\ndef gaussian2D(shape, sigma=1):\n m, n = [(ss - 1.) / 2. for ss in shape]\n y, x = np.ogrid[-m:m+1,-n:n+1]\n\n h = np.exp(-(x * x + y * y) / (2 * sigma * sigma))\n h[h < np.finfo(h.dtype).eps * h.max()] = 0\n return h\n\ndef draw_umich_gaussian(heatmap, center, radius, k=1):\n diameter = 2 * radius + 1\n gaussian = gaussian2D((diameter, diameter), sigma=diameter / 6)\n \n x, y = int(center[0]), int(center[1])\n \n height, width = heatmap.shape[0:2]\n \n left, right = min(x, radius), min(width - x, radius + 1)\n top, bottom = min(y, radius), min(height - y, radius + 1)\n \n masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right]\n masked_gaussian = gaussian[radius - top:radius + bottom, radius - left:radius + right]\n if min(masked_gaussian.shape) > 0 and min(masked_heatmap.shape) > 0: # TODO debug\n np.maximum(masked_heatmap, masked_gaussian * k, out=masked_heatmap)\n return heatmap\n\ndef affine_transform(pt, t):\n new_pt = np.array([pt[0], pt[1], 1.], dtype=np.float32).T\n new_pt = np.dot(t, new_pt)\n return new_pt[:2]\n\ndef get_affine_transform(center,\n scale,\n rot,\n output_size,\n shift=np.array([0, 0], dtype=np.float32),\n inv=0):\n if not isinstance(scale, np.ndarray) and not isinstance(scale, list):\n scale = np.array([scale, scale], dtype=np.float32)\n\n scale_tmp = scale\n src_w = scale_tmp[0]\n dst_w = output_size[0]\n dst_h = output_size[1]\n\n rot_rad = np.pi * rot / 180\n src_dir = get_dir([0, src_w * -0.5], rot_rad)\n dst_dir = np.array([0, dst_w * -0.5], np.float32)\n\n src = np.zeros((3, 2), dtype=np.float32)\n dst = np.zeros((3, 2), dtype=np.float32)\n src[0, :] = center + scale_tmp * shift\n src[1, :] = center + src_dir + scale_tmp * shift\n dst[0, :] = [dst_w * 0.5, dst_h * 0.5]\n dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5], np.float32) + dst_dir\n\n src[2:, :] = get_3rd_point(src[0, :], src[1, :])\n dst[2:, :] = get_3rd_point(dst[0, :], dst[1, :])\n\n if inv:\n trans = cv2.getAffineTransform(np.float32(dst), np.float32(src))\n else:\n trans = cv2.getAffineTransform(np.float32(src), np.float32(dst))\n\n return trans\n\ndef get_dir(src_point, rot_rad):\n sn, cs = np.sin(rot_rad), np.cos(rot_rad)\n\n src_result = [0, 0]\n src_result[0] = src_point[0] * cs - src_point[1] * sn\n src_result[1] = src_point[0] * sn + src_point[1] * cs\n\n return src_result\n\ndef get_3rd_point(a, b):\n direct = a - b\n return b + np.array([-direct[1], direct[0]], dtype=np.float32)\n\ndef ctdet_decode(heat, wh, reg=None, cat_spec_wh=False, K=40):\n batch, cat, height, width = heat.size() # 1, 80, 128, 128\n \n #print(\"batch, cat, height, width\\n\", batch, cat, height, width)\n \n if height * width <= K:\n K = height * width \n #print(\"k:\", K)\n \n heat = _nms(heat)\n \n scores, inds, clses, ys, xs = _topk(heat, K=K)\n \n if reg is not None:\n reg = _tranpose_and_gather_feat(reg, inds)\n reg = reg.view(batch, K, 2)\n xs = xs.view(batch, K, 1) + reg[:, :, 0:1]\n ys = ys.view(batch, K, 1) + reg[:, :, 1:2]\n else:\n xs = xs.view(batch, K, 1) + 0.5\n ys = ys.view(batch, K, 1) + 0.5\n wh = _tranpose_and_gather_feat(wh, inds) # inds 对应 h, w的尺度\n if cat_spec_wh:\n wh = wh.view(batch, K, cat, 2)\n clses_ind = clses.view(batch, K, 1, 1).expand(batch, K, 1, 2).long()\n wh = wh.gather(2, clses_ind).view(batch, K, 2)\n else:\n wh = wh.view(batch, K, 2)\n \n clses = clses.view(batch, K, 1).float()\n scores = scores.view(batch, K, 1) # 0, 1, 2\n \n bboxes = torch.cat([xs - wh[..., 0:1] / 2, \n ys - wh[..., 1:2] / 2,\n xs + wh[..., 0:1] / 2, \n ys + wh[..., 1:2] / 2], dim=2)\n \n detections = torch.cat([bboxes, scores, clses], dim=2)\n return detections\n\ndef _nms(heat, kernel=3):\n pad = (kernel - 1) // 2\n hmax = nn.functional.max_pool2d(\n heat, (kernel, kernel), stride=1, padding=pad)\n keep = (hmax == heat).float()\n return heat * keep\n\ndef _gather_feat(feat, ind, mask=None):\n dim = feat.size(2)\n ind = ind.unsqueeze(2).expand(ind.size(0), ind.size(1), dim)\n feat = feat.gather(1, ind)\n if mask is not None:\n mask = mask.unsqueeze(2).expand_as(feat)\n feat = feat[mask]\n feat = feat.view(-1, dim)\n return feat\n\ndef _tranpose_and_gather_feat(feat, ind):\n feat = feat.permute(0, 2, 3, 1).contiguous()\n feat = feat.view(feat.size(0), -1, feat.size(3))\n feat = _gather_feat(feat, ind)\n return feat\n\ndef _topk(scores, K=40):\n batch, cat, height, width = scores.size() # 1, 80,height, width\n #print(\"batch, cat, height, width\\n\", batch, cat, height, width)\n #print(\"k:\", K)\n\n topk_scores, topk_inds = torch.topk(scores.view(batch, cat, -1), K)\n\n topk_inds = topk_inds % (height * width)\n topk_ys = (topk_inds / width).int().float() # y-> h, x-> w\n topk_xs = (topk_inds % width).int().float()\n\n topk_score, topk_ind = torch.topk(topk_scores.view(batch, -1), K)\n\n topk_clses = (topk_ind / K).int()\n topk_inds = _gather_feat(\n topk_inds.view(batch, -1, 1), topk_ind).view(batch, K)\n topk_ys = _gather_feat(topk_ys.view(batch, -1, 1), topk_ind).view(batch, K)\n topk_xs = _gather_feat(topk_xs.view(batch, -1, 1), topk_ind).view(batch, K)\n\n return topk_score, topk_inds, topk_clses, topk_ys, topk_xs\n\ndef post_process(dets, c, s, out_height, out_width, scale, num_classes):\n dets = dets.detach().cpu().numpy()\n# print(\"dets\", dets) # (1, 100, 6)\n\n dets = dets.reshape(1, -1, dets.shape[2]) # (x1, y1, x2, y2)\n\n dets = ctdet_post_process(\n dets.copy(), [c], [s],\n out_height, out_width, num_classes)\n \n for j in range(1, num_classes + 1):\n dets[0][j] = np.array(dets[0][j], dtype=np.float32).reshape(-1, 5)\n dets[0][j][:, :4] /= scale\n return dets[0]\n\ndef ctdet_post_process(dets, c, s, h, w, num_classes):\n ret = []\n# print(dets.shape) # (1, 100, 6)\n# print(c)\n for i in range(dets.shape[0]):\n top_preds = {}\n dets[i, :, :2] = transform_preds(\n dets[i, :, 0:2], c[i], s[i], (w, h))\n dets[i, :, 2:4] = transform_preds(\n dets[i, :, 2:4], c[i], s[i], (w, h))\n\n classes = dets[i, :, -1] # 类别这里是80\n \n for j in range(num_classes):\n inds = (classes == j)\n top_preds[j + 1] = np.concatenate([\n dets[i, inds, :4].astype(np.float32),\n dets[i, inds, 4:5].astype(np.float32)], axis=1).tolist() # 这里将框按照类别进行分类\n ret.append(top_preds)\n\n return ret \n \ndef merge_outputs(detections, num_classes):\n# print(detections)\n results = {}\n max_per_image = 100\n for j in range(1, num_classes + 1):\n results[j] = np.concatenate(\n [detection[j] for detection in detections], axis=0).astype(np.float32)\n# if len(self.scales) > 1 or self.opt.nms:\n results[j] = soft_nms(results[j], Nt=0.5, method=2, threshold=0.01)\n# print(results)\n scores = np.hstack([results[j][:, 4] for j in range(1, num_classes + 1)])\n\n if len(scores) > max_per_image:\n kth = len(scores) - max_per_image\n thresh = np.partition(scores, kth)[kth]\n for j in range(1, num_classes + 1):\n keep_inds = (results[j][:, 4] >= thresh)\n results[j] = results[j][keep_inds]\n# print(\"after merge out\\n\", results)\n return results2coco_boxes(results, num_classes)\n\ndef results2coco_boxes(results, num_classes): \n \"\"\"Convert detection results to a list of numpy arrays.\n\n Args:\n bboxes (Tensor): shape (n, 5)\n labels (Tensor): shape (n, )\n num_classes (int): class number, including background class\n\n Returns:\n list(ndarray): bbox results of each class\n \"\"\"\n bboxes = [0 for i in range(num_classes)]\n for j in range(1, num_classes + 1):\n if len(results[j]) == 0:\n bboxes[j - 1] = np.zeros((0, 5), dtype=np.float32)\n continue\n bboxes[j - 1] = results[j]\n# print(bboxes) # xyxy\n return bboxes\n\n\ndef soft_nms(boxes, sigma=0.5, Nt=0.3, threshold=0.01, method=0):\n N = boxes.shape[0]\n# cdef float iw, ih, box_area\n# cdef float ua\n# cdef int \n pos = 0\n# cdef float \n maxscore = 0\n# cdef int \n maxpos = 0\n# cdef float x1,x2,y1,y2,tx1,tx2,ty1,ty2,ts,area,weight,ov\n\n for i in range(N):\n maxscore = boxes[i, 4]\n maxpos = i\n\n tx1 = boxes[i,0]\n ty1 = boxes[i,1]\n tx2 = boxes[i,2]\n ty2 = boxes[i,3]\n ts = boxes[i,4]\n\n pos = i + 1\n # get max box\n while pos < N:\n if maxscore < boxes[pos, 4]:\n maxscore = boxes[pos, 4]\n maxpos = pos\n pos = pos + 1\n\n # add max box as a detection \n boxes[i,0] = boxes[maxpos,0]\n boxes[i,1] = boxes[maxpos,1]\n boxes[i,2] = boxes[maxpos,2]\n boxes[i,3] = boxes[maxpos,3]\n boxes[i,4] = boxes[maxpos,4]\n\n # swap ith box with position of max box\n boxes[maxpos,0] = tx1\n boxes[maxpos,1] = ty1\n boxes[maxpos,2] = tx2\n boxes[maxpos,3] = ty2\n boxes[maxpos,4] = ts\n\n tx1 = boxes[i,0]\n ty1 = boxes[i,1]\n tx2 = boxes[i,2]\n ty2 = boxes[i,3]\n ts = boxes[i,4]\n\n pos = i + 1\n # NMS iterations, note that N changes if detection boxes fall below threshold\n while pos < N:\n x1 = boxes[pos, 0]\n y1 = boxes[pos, 1]\n x2 = boxes[pos, 2]\n y2 = boxes[pos, 3]\n s = boxes[pos, 4]\n\n area = (x2 - x1 + 1) * (y2 - y1 + 1)\n iw = (min(tx2, x2) - max(tx1, x1) + 1)\n if iw > 0:\n ih = (min(ty2, y2) - max(ty1, y1) + 1)\n if ih > 0:\n ua = float((tx2 - tx1 + 1) * (ty2 - ty1 + 1) + area - iw * ih)\n ov = iw * ih / ua #iou between max box and detection box\n\n if method == 1: # linear\n if ov > Nt: \n weight = 1 - ov\n else:\n weight = 1\n elif method == 2: # gaussian\n weight = np.exp(-(ov * ov)/sigma)\n else: # original NMS\n if ov > Nt: \n weight = 0\n else:\n weight = 1\n\n boxes[pos, 4] = weight*boxes[pos, 4]\n \n # if box score falls below threshold, discard the box by swapping with last box\n # update N\n if boxes[pos, 4] < threshold:\n boxes[pos,0] = boxes[N-1, 0]\n boxes[pos,1] = boxes[N-1, 1]\n boxes[pos,2] = boxes[N-1, 2]\n boxes[pos,3] = boxes[N-1, 3]\n boxes[pos,4] = boxes[N-1, 4]\n N = N - 1\n pos = pos - 1\n\n pos = pos + 1\n\n keep = [i for i in range(N)]\n boxes = boxes[keep]\n return boxes\n \ndef transform_preds(coords, center, scale, output_size):\n target_coords = np.zeros(coords.shape)\n trans = get_affine_transform(center, scale, 0, output_size, inv=1) \n for p in range(coords.shape[0]):\n target_coords[p, 0:2] = affine_transform(coords[p, 0:2], trans)\n return target_coords\n" ]
[ [ "torch.full", "torch.distributed.broadcast", "torch.no_grad", "torch.distributed.barrier" ], [ "torch.log", "torch.pow", "torch.nn.functional.l1_loss" ], [ "numpy.dot", "numpy.sqrt", "torch.cat", "numpy.concatenate", "numpy.exp", "numpy.partition", "numpy.clip", "numpy.stack", "numpy.sin", "numpy.finfo", "numpy.float32", "torch.arange", "torch.nn.functional.max_pool2d", "numpy.zeros", "torch.nn.init.constant_", "torch.nn.ModuleList", "torch.nn.Conv2d", "numpy.array", "numpy.maximum", "numpy.cos", "torch.meshgrid" ], [ "torch.cat", "torch.sqrt", "torch.nn.ModuleList", "torch.nn.Conv2d", "torch.arange", "torch.stack", "torch.meshgrid" ], [ "torch.log", "torch.pow" ], [ "numpy.dot", "numpy.sqrt", "torch.cat", "numpy.concatenate", "numpy.exp", "numpy.partition", "numpy.clip", "numpy.stack", "numpy.sin", "numpy.finfo", "numpy.float32", "torch.arange", "torch.nn.functional.max_pool2d", "numpy.zeros", "torch.nn.init.constant_", "torch.nn.ModuleList", "torch.nn.Conv2d", "numpy.array", "numpy.maximum", "numpy.cos", "torch.meshgrid" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Livioni/Cloud-Workflow-Scheduling-base-on-Deep-Reinforcement-Learning
[ "eb246ebba160567277c9c1aa226e359f48629dac" ]
[ "AblationExperiment.py" ]
[ "import gym, torch, copy, os, xlwt, random\nimport torch.nn as nn\nfrom datetime import datetime\nimport numpy as np\n\nenv = gym.make(\"clusterEnv-v0\").unwrapped\nstate_dim, action_dim = env.return_dim_info()\n\n####### initialize environment hyperparameters ######\nmax_ep_len = 1000 # max timesteps in one episode\nauto_save = 1\ntotal_test_episodes = 100 * auto_save # total num of testing episodes\n\n\ndef initial_excel():\n global worksheet, workbook\n # xlwt 库将数据导入Excel并设置默认字符编码为ascii\n workbook = xlwt.Workbook(encoding='ascii')\n # 添加一个表 参数为表名\n worksheet = workbook.add_sheet('makespan')\n # 生成单元格样式的方法\n # 设置列宽, 3为列的数目, 12为列的宽度, 256为固定值\n for i in range(3):\n worksheet.col(i).width = 256 * 12\n # 设置单元格行高, 25为行高, 20为固定值\n worksheet.row(1).height_mismatch = True\n worksheet.row(1).height = 20 * 25\n # 保存excel文件\n workbook.save('data/makespan_MCTSAE.xls')\n\n\ndef read_current_state():\n '''\n 读取当前env的状态\n :return: 当前env的状态\n '''\n state = copy.deepcopy(env.state)\n ready_list = copy.deepcopy(env.ready_list)\n done_job = copy.deepcopy(env.done_job)\n tasks = copy.deepcopy(env.tasks)\n wait_duration = copy.deepcopy(env.wait_duration)\n cpu_demand = copy.deepcopy(env.cpu_demand)\n memory_demand = copy.deepcopy(env.memory_demand)\n tasks_remaing_time = copy.deepcopy(env.tasks_remaing_time)\n time = env.time\n cpu_res = env.cpu_res\n memory_res = env.memory_res\n return state, ready_list, done_job, tasks, wait_duration, cpu_demand, memory_demand, tasks_remaing_time, cpu_res, memory_res, time\n\n\ndef load_current_state(state, ready_list, done_job, tasks, wait_duration, cpu_demand, memory_demand, tasks_remaing_time,\n cpu_res, memory_res, time):\n env.set_state(state[:])\n env.set_ready_list(ready_list[:])\n env.set_done_job(done_job[:])\n env.set_tasks(tasks[:])\n env.set_wait_duration(wait_duration[:])\n env.set_cpu_demand(cpu_demand[:])\n env.set_memory_demand(memory_demand[:])\n env.set_tasks_remaing_time(tasks_remaing_time)\n env.set_cpu_res(cpu_res)\n env.set_memory_res(memory_res)\n env.set_time(time)\n return\n\n\nclass TreeNode(object):\n def __init__(self, parent, state, ready_list, done_job, tasks, wait_duration, cpu_demand, memory_demand,\n tasks_remaing_time, cpu_res, memory_res, time):\n self._parent = parent\n self._children = {} # a map from action to TreeNode\n self._n_visits = 0\n self._makespan = 0\n self._total_makespan = 0\n self._state = state\n self._ready_list = ready_list\n self._done_job = done_job\n self._tasks = tasks\n self._wait_duration = wait_duration\n self._cpu_demand = cpu_demand\n self._memory_demand = memory_demand\n self._tasks_remaing_time = tasks_remaing_time\n self._cpu_res = cpu_res\n self._memory_res = memory_res\n self._time = time\n self._c = 40\n self._value = 0\n if self._parent != None:\n self.get_value()\n\n def expand(self):\n '''\n 扩展树\n '''\n load_current_state(self._state, self._ready_list, self._done_job, self._tasks, self._wait_duration,\n self._cpu_demand, self._memory_demand, self._tasks_remaing_time, self._cpu_res,\n self._memory_res, self._time)\n available_action = env.return_action_list()\n if available_action:\n for action in available_action:\n load_current_state(self._state, self._ready_list, self._done_job, self._tasks, self._wait_duration,\n self._cpu_demand, self._memory_demand, self._tasks_remaing_time, self._cpu_res,\n self._memory_res, self._time)\n if action not in self._children:\n env.step(action)\n state, ready_list, done_job, tasks, wait_duration, cpu_demand, memory_demand, tasks_remaing_time, cpu_res, memory_res, time = read_current_state()\n self._children[action] = TreeNode(self, state, ready_list, done_job, tasks, wait_duration,\n cpu_demand, memory_demand, tasks_remaing_time, cpu_res,\n memory_res, time)\n else:\n print(\"done\")\n\n def get_average_makespan(self):\n return self._makespan\n\n def get_value(self):\n self._value = self._makespan + self._c * np.sqrt(np.log(self._parent._n_visits + 1) / (self._n_visits + 1))\n return self._value\n\n def select(self):\n '''\n 在子节中选择具有搜索价值的点\n '''\n return max(self._children.items(), key=lambda act_node: act_node[1].get_value())[1]\n\n def update(self, makespan):\n # Count visit.\n self._n_visits += 1\n if self._makespan == 0:\n self._makespan = -makespan\n else:\n if -makespan > self._makespan:\n self._makespan = -makespan\n if self._parent != None:\n self._value = self.get_value()\n\n def update_recursive(self, leaf_value):\n # If it is not root, this node's parent should be updated first.\n if self._parent:\n self._parent.update_recursive(leaf_value)\n self.update(leaf_value)\n\n def is_leaf(self):\n return self._children == {}\n\n def is_root(self):\n return self._parent is None\n\n\nclass MCTS(object):\n def __init__(self, state, ready_list, done_job, tasks, wait_duration, cpu_demand, memory_demand, tasks_remaing_time,\n cpu_res, memory_res, time, depth):\n self._root = TreeNode(None, state, ready_list, done_job, tasks, wait_duration, cpu_demand, memory_demand,\n tasks_remaing_time, cpu_res, memory_res, time)\n self._root.expand() # 初始化扩展\n self._initial_buget = 100\n self._min_buget = 10\n self._depth = depth\n\n def playout(self):\n buget = max(self._initial_buget / self._depth, self._min_buget)\n for j in range(int(buget)):\n node = self._root\n while True:\n if node.is_leaf():\n if node._n_visits == 0:\n cur_state, cur_ready_list, cur_done_job, cur_tasks, cur_wait_duration, cur_cpu_demand, cur_memory_demand, cur_tasks_remaing_time, cur_cpu_res, cur_memory_res, cur_time = node._state, node._ready_list, node._done_job, node._tasks, node._wait_duration, node._cpu_demand, node._memory_demand, node._tasks_remaing_time, node._cpu_res, node._memory_res, node._time\n makespan = self._roll_out(cur_state, cur_ready_list, cur_done_job, cur_tasks, cur_wait_duration,\n cur_cpu_demand, cur_memory_demand, cur_tasks_remaing_time,\n cur_cpu_res, cur_memory_res, cur_time)\n node.update_recursive(makespan)\n break\n else:\n node.expand()\n node = node.select()\n else:\n node = node.select()\n node = self._root\n return max(node._children.items(), key=lambda act_node: act_node[1].get_average_makespan())[0]\n\n def _roll_out(self, cur_state, cur_ready_list, cur_done_job, cur_tasks, cur_wait_duration, cur_cpu_demand,\n cur_memory_demand, cur_tasks_remaing_time, cur_cpu_res, cur_memory_res, cur_time):\n load_current_state(cur_state, cur_ready_list, cur_done_job, cur_tasks, cur_wait_duration, cur_cpu_demand,\n cur_memory_demand, cur_tasks_remaing_time, cur_cpu_res, cur_memory_res, cur_time)\n state = cur_state\n max_ep_len = 1000 # max timesteps in one episode\n for t in range(1, max_ep_len + 1):\n action = random.choice(range(action_dim)) - 1\n state, reward, done, info = env.step(action)\n while (info[0] == False):\n action = random.choice(range(action_dim)) - 1\n state, reward, done, info = env.step(action) # 输入step的都是\n next_state, reward, done, _ = state, reward, done, info\n # break; if the episode is over\n state = next_state\n if done:\n makespan = state[0]\n break\n return makespan\n\n\nif __name__ == '__main__':\n initial_excel()\n makespans = []\n line = 0\n start_time = datetime.now().replace(microsecond=0)\n print(\"Started training at (GMT) : \", start_time)\n print(\"============================================================================================\")\n for ep in range(1, total_test_episodes + 1):\n initial_state = env.reset()\n state, ready_list, done_job, tasks, wait_duration, cpu_demand, memory_demand, tasks_remaing_time, cpu_res, memory_res, time = read_current_state()\n for depth in range(1, max_ep_len + 1):\n tree = MCTS(state, ready_list, done_job, tasks, wait_duration, cpu_demand, memory_demand,\n tasks_remaing_time, cpu_res, memory_res, time, depth=depth)\n best_action = tree.playout()\n load_current_state(tree._root._state, tree._root._ready_list, tree._root._done_job, tree._root._tasks,\n tree._root._wait_duration, tree._root._cpu_demand, tree._root._memory_demand,\n tree._root._tasks_remaing_time, tree._root._cpu_res, tree._root._memory_res,\n tree._root._time)\n observation, reward, done, info = env.step(best_action)\n state, ready_list, done_job, tasks, wait_duration, cpu_demand, memory_demand, tasks_remaing_time, cpu_res, memory_res, time = read_current_state()\n del tree\n if done:\n makespan = observation[0]\n makespans.append(makespan)\n print(\"Episode:\", ep, \"Makespan:\", makespan)\n if ep % auto_save == 0:\n average_makespan = np.mean(makespans)\n worksheet.write(line, 1, float(average_makespan))\n workbook.save('data/makespan_MCTSAE.xls')\n print('MCTS : Episode: {}, Makespan: {:.3f}s'.format((line + 1) * auto_save, average_makespan))\n line += 1\n makespans = []\n end_time = datetime.now().replace(microsecond=0)\n print(\"Finished testing at (GMT) : \", end_time)\n print(\"Total testing time : \", end_time - start_time)\n start_time = end_time\n break\n workbook.save('data/makespan_MCTSAE.xls')\n env.close()\n" ]
[ [ "numpy.log", "numpy.mean" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
OuyangChao/Paddle
[ "311b3b44fc7d51d4d66d90ab8a3fc0d42231afda", "311b3b44fc7d51d4d66d90ab8a3fc0d42231afda", "311b3b44fc7d51d4d66d90ab8a3fc0d42231afda", "311b3b44fc7d51d4d66d90ab8a3fc0d42231afda", "311b3b44fc7d51d4d66d90ab8a3fc0d42231afda", "311b3b44fc7d51d4d66d90ab8a3fc0d42231afda", "cac9635a6733ffbbd816b33e21c3054e0cd81ab1", "cac9635a6733ffbbd816b33e21c3054e0cd81ab1", "cac9635a6733ffbbd816b33e21c3054e0cd81ab1", "cac9635a6733ffbbd816b33e21c3054e0cd81ab1", "311b3b44fc7d51d4d66d90ab8a3fc0d42231afda" ]
[ "python/paddle/fluid/tests/unittests/test_transpose_op.py", "python/paddle/fluid/tests/unittests/test_pixel_shuffle.py", "python/paddle/fluid/tests/unittests/test_onnx_export.py", "python/paddle/fluid/incubate/fleet/utils/utils.py", "python/paddle/text/datasets/wmt16.py", "python/paddle/fluid/tests/unittests/test_linspace.py", "python/paddle/fluid/tests/unittests/ir/inference/test_trt_conv_pass.py", "python/paddle/fluid/tests/unittests/test_static_save_load_large.py", "python/paddle/fluid/dygraph/base.py", "python/paddle/fluid/tests/custom_op/test_custom_op.py", "python/paddle/fluid/tests/unittests/xpu/test_batch_norm_op_xpu.py" ]
[ "# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nimport unittest\nimport numpy as np\nfrom op_test import OpTest\nimport paddle\nimport paddle.fluid as fluid\nfrom paddle.fluid import Program, program_guard\n\npaddle.enable_static()\n\nclass TestTransposeOp(OpTest):\n def setUp(self):\n self.init_op_type()\n self.initTestCase()\n self.inputs = {'X': np.random.random(self.shape).astype(\"float64\")}\n self.attrs = {\n 'axis': list(self.axis),\n 'use_mkldnn': self.use_mkldnn,\n }\n self.outputs = {\n 'XShape': np.random.random(self.shape).astype(\"float64\"),\n 'Out': self.inputs['X'].transpose(self.axis)\n }\n\n def init_op_type(self):\n self.op_type = \"transpose2\"\n self.use_mkldnn = False\n\n def test_check_output(self):\n self.check_output(no_check_set=['XShape'])\n\n def test_check_grad(self):\n self.check_grad(['X'], 'Out')\n\n def initTestCase(self):\n self.shape = (3, 40)\n self.axis = (1, 0)\n\n\nclass TestCase0(TestTransposeOp):\n def initTestCase(self):\n self.shape = (100, )\n self.axis = (0, )\n\n\nclass TestCase1(TestTransposeOp):\n def initTestCase(self):\n self.shape = (3, 4, 10)\n self.axis = (0, 2, 1)\n\n\nclass TestCase2(TestTransposeOp):\n def initTestCase(self):\n self.shape = (2, 3, 4, 5)\n self.axis = (0, 2, 3, 1)\n\n\nclass TestCase3(TestTransposeOp):\n def initTestCase(self):\n self.shape = (2, 3, 4, 5, 6)\n self.axis = (4, 2, 3, 1, 0)\n\n\nclass TestCase4(TestTransposeOp):\n def initTestCase(self):\n self.shape = (2, 3, 4, 5, 6, 1)\n self.axis = (4, 2, 3, 1, 0, 5)\n\n\nclass TestCase5(TestTransposeOp):\n def initTestCase(self):\n self.shape = (2, 16, 96)\n self.axis = (0, 2, 1)\n\n\nclass TestCase6(TestTransposeOp):\n def initTestCase(self):\n self.shape = (2, 10, 12, 16)\n self.axis = (3, 1, 2, 0)\n\n\nclass TestCase7(TestTransposeOp):\n def initTestCase(self):\n self.shape = (2, 10, 2, 16)\n self.axis = (0, 1, 3, 2)\n\n\nclass TestCase8(TestTransposeOp):\n def initTestCase(self):\n self.shape = (2, 3, 2, 3, 2, 4, 3, 3)\n self.axis = (0, 1, 3, 2, 4, 5, 6, 7)\n\n\nclass TestCase9(TestTransposeOp):\n def initTestCase(self):\n self.shape = (2, 3, 2, 3, 2, 4, 3, 3)\n self.axis = (6, 1, 3, 5, 0, 2, 4, 7)\n\n\nclass TestTransposeOpError(unittest.TestCase):\n def test_errors(self):\n paddle.enable_static()\n with program_guard(Program(), Program()):\n x = fluid.layers.data(name='x', shape=[10, 5, 3], dtype='float64')\n\n def test_x_Variable_check():\n # the Input(x)'s type must be Variable\n fluid.layers.transpose(\"not_variable\", perm=[1, 0, 2])\n\n self.assertRaises(TypeError, test_x_Variable_check)\n\n def test_x_dtype_check():\n # the Input(x)'s dtype must be one of [float16, float32, float64, int32, int64]\n x1 = fluid.layers.data(\n name='x1', shape=[10, 5, 3], dtype='bool')\n fluid.layers.transpose(x1, perm=[1, 0, 2])\n\n self.assertRaises(TypeError, test_x_dtype_check)\n\n def test_perm_list_check():\n # Input(perm)'s type must be list\n fluid.layers.transpose(x, perm=\"[1, 0, 2]\")\n\n self.assertRaises(TypeError, test_perm_list_check)\n\n def test_perm_length_and_x_dim_check():\n # Input(perm) is the permutation of dimensions of Input(input)\n # its length should be equal to dimensions of Input(input)\n fluid.layers.transpose(x, perm=[1, 0, 2, 3, 4])\n\n self.assertRaises(ValueError, test_perm_length_and_x_dim_check)\n\n def test_each_elem_value_check():\n # Each element in Input(perm) should be less than Input(x)'s dimension\n fluid.layers.transpose(x, perm=[3, 5, 7])\n\n self.assertRaises(ValueError, test_each_elem_value_check)\n\nclass TestTransposeApi(unittest.TestCase):\n def test_static_out(self):\n paddle.enable_static()\n with paddle.static.program_guard(paddle.static.Program()):\n x = paddle.static.data(name='x', shape=[2, 3, 4], dtype='float32')\n x_trans1 = paddle.transpose(x, perm=[1, 0, 2])\n x_trans2 = paddle.transpose(x, perm=(2, 1, 0))\n place = paddle.CPUPlace()\n exe = paddle.static.Executor(place)\n x_np = np.random.random([2, 3, 4]).astype(\"float32\")\n result1, result2 = exe.run(feed={\"x\": x_np}, fetch_list=[x_trans1, x_trans2])\n expected_result1 = np.transpose(x_np, [1, 0, 2])\n expected_result2 = np.transpose(x_np, (2, 1, 0))\n \n np.testing.assert_array_equal(result1, expected_result1)\n np.testing.assert_array_equal(result2, expected_result2)\n\n def test_dygraph_out(self):\n # This is an old test before 2.0 API so we need to disable static\n # to trigger dygraph\n paddle.disable_static()\n x = paddle.randn([2, 3, 4])\n x_trans1 = paddle.transpose(x, perm=[1, 0, 2])\n x_trans2 = paddle.transpose(x, perm=(2, 1, 0))\n x_np = x.numpy()\n expected_result1 = np.transpose(x_np, [1, 0, 2])\n expected_result2 = np.transpose(x_np, (2, 1, 0))\n\n np.testing.assert_array_equal(x_trans1.numpy(), expected_result1)\n np.testing.assert_array_equal(x_trans2.numpy(), expected_result2)\n # This is an old test before 2.0 API so we enable static again after\n # dygraph test\n paddle.enable_static()\n\nclass TestTAPI(unittest.TestCase):\n def test_out(self):\n with fluid.program_guard(fluid.Program()):\n data = fluid.data(shape=[10], dtype=\"float64\", name=\"data\")\n data_t = paddle.t(data)\n place = fluid.CPUPlace()\n exe = fluid.Executor(place)\n data_np = np.random.random([10]).astype(\"float64\")\n result, = exe.run(feed={\"data\": data_np}, fetch_list=[data_t])\n expected_result = np.transpose(data_np)\n self.assertEqual((result == expected_result).all(), True)\n\n with fluid.program_guard(fluid.Program()):\n data = fluid.data(shape=[10, 5], dtype=\"float64\", name=\"data\")\n data_t = paddle.t(data)\n place = fluid.CPUPlace()\n exe = fluid.Executor(place)\n data_np = np.random.random([10, 5]).astype(\"float64\")\n result, = exe.run(feed={\"data\": data_np}, fetch_list=[data_t])\n expected_result = np.transpose(data_np)\n self.assertEqual((result == expected_result).all(), True)\n\n with fluid.program_guard(fluid.Program()):\n data = fluid.data(shape=[1, 5], dtype=\"float64\", name=\"data\")\n data_t = paddle.t(data)\n place = fluid.CPUPlace()\n exe = fluid.Executor(place)\n data_np = np.random.random([1, 5]).astype(\"float64\")\n result, = exe.run(feed={\"data\": data_np}, fetch_list=[data_t])\n expected_result = np.transpose(data_np)\n self.assertEqual((result == expected_result).all(), True)\n\n with fluid.dygraph.guard():\n np_x = np.random.random([10]).astype(\"float64\")\n data = fluid.dygraph.to_variable(np_x)\n z = paddle.t(data)\n np_z = z.numpy()\n z_expected = np.array(np.transpose(np_x))\n self.assertEqual((np_z == z_expected).all(), True)\n\n with fluid.dygraph.guard():\n np_x = np.random.random([10, 5]).astype(\"float64\")\n data = fluid.dygraph.to_variable(np_x)\n z = paddle.t(data)\n np_z = z.numpy()\n z_expected = np.array(np.transpose(np_x))\n self.assertEqual((np_z == z_expected).all(), True)\n\n with fluid.dygraph.guard():\n np_x = np.random.random([1, 5]).astype(\"float64\")\n data = fluid.dygraph.to_variable(np_x)\n z = paddle.t(data)\n np_z = z.numpy()\n z_expected = np.array(np.transpose(np_x))\n self.assertEqual((np_z == z_expected).all(), True)\n\n def test_errors(self):\n with fluid.program_guard(fluid.Program()):\n x = fluid.data(name='x', shape=[10, 5, 3], dtype='float64')\n\n def test_x_dimension_check():\n paddle.t(x)\n\n self.assertRaises(ValueError, test_x_dimension_check)\n\n\nif __name__ == '__main__':\n unittest.main()\n", "# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nimport unittest\nimport numpy as np\n\nfrom op_test import OpTest\nimport paddle\nimport paddle.nn.functional as F\nimport paddle.fluid.core as core\nimport paddle.fluid as fluid\n\n\ndef pixel_shuffle_np(x, up_factor, data_format=\"NCHW\"):\n if data_format == \"NCHW\":\n n, c, h, w = x.shape\n new_shape = (n, c // (up_factor * up_factor), up_factor, up_factor, h,\n w)\n # reshape to (num,output_channel,upscale_factor,upscale_factor,h,w)\n npresult = np.reshape(x, new_shape)\n # transpose to (num,output_channel,h,upscale_factor,w,upscale_factor)\n npresult = npresult.transpose(0, 1, 4, 2, 5, 3)\n oshape = [n, c // (up_factor * up_factor), h * up_factor, w * up_factor]\n npresult = np.reshape(npresult, oshape)\n return npresult\n else:\n n, h, w, c = x.shape\n new_shape = (n, h, w, c // (up_factor * up_factor), up_factor,\n up_factor)\n # reshape to (num,h,w,output_channel,upscale_factor,upscale_factor)\n npresult = np.reshape(x, new_shape)\n # transpose to (num,h,upscale_factor,w,upscale_factor,output_channel)\n npresult = npresult.transpose(0, 1, 4, 2, 5, 3)\n oshape = [n, h * up_factor, w * up_factor, c // (up_factor * up_factor)]\n npresult = np.reshape(npresult, oshape)\n return npresult\n\n\nclass TestPixelShuffleOp(OpTest):\n def setUp(self):\n self.op_type = \"pixel_shuffle\"\n self.init_data_format()\n n, c, h, w = 2, 9, 4, 4\n\n if self.format == \"NCHW\":\n shape = [n, c, h, w]\n if self.format == \"NHWC\":\n shape = [n, h, w, c]\n\n up_factor = 3\n\n x = np.random.random(shape).astype(\"float64\")\n npresult = pixel_shuffle_np(x, up_factor, self.format)\n\n self.inputs = {'X': x}\n self.outputs = {'Out': npresult}\n self.attrs = {'upscale_factor': up_factor, \"data_format\": self.format}\n\n def init_data_format(self):\n self.format = \"NCHW\"\n\n def test_check_output(self):\n self.check_output()\n\n def test_check_grad(self):\n self.check_grad(['X'], 'Out')\n\n\nclass TestChannelLast(TestPixelShuffleOp):\n def init_data_format(self):\n self.format = \"NHWC\"\n\n\nclass TestPixelShuffleAPI(unittest.TestCase):\n def setUp(self):\n self.x_1_np = np.random.random([2, 9, 4, 4]).astype(\"float64\")\n self.x_2_np = np.random.random([2, 4, 4, 9]).astype(\"float64\")\n self.out_1_np = pixel_shuffle_np(self.x_1_np, 3)\n self.out_2_np = pixel_shuffle_np(self.x_2_np, 3, \"NHWC\")\n\n def test_static_graph_functional(self):\n for use_cuda in ([False, True]\n if core.is_compiled_with_cuda() else [False]):\n place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace()\n\n paddle.enable_static()\n x_1 = paddle.fluid.data(name=\"x\", shape=[2, 9, 4, 4], dtype=\"float64\")\n x_2 = paddle.fluid.data(name=\"x2\", shape=[2, 4, 4, 9], dtype=\"float64\")\n out_1 = F.pixel_shuffle(x_1, 3)\n out_2 = F.pixel_shuffle(x_2, 3, \"NHWC\")\n\n exe = paddle.static.Executor(place=place)\n res_1 = exe.run(fluid.default_main_program(),\n feed={\"x\": self.x_1_np},\n fetch_list=out_1,\n use_prune=True)\n\n res_2 = exe.run(fluid.default_main_program(),\n feed={\"x2\": self.x_2_np},\n fetch_list=out_2,\n use_prune=True)\n\n assert np.allclose(res_1, self.out_1_np)\n assert np.allclose(res_2, self.out_2_np)\n\n # same test between layer and functional in this op.\n def test_static_graph_layer(self):\n for use_cuda in ([False, True]\n if core.is_compiled_with_cuda() else [False]):\n place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace()\n\n paddle.enable_static()\n x_1 = paddle.fluid.data(name=\"x\", shape=[2, 9, 4, 4], dtype=\"float64\")\n x_2 = paddle.fluid.data(name=\"x2\", shape=[2, 4, 4, 9], dtype=\"float64\")\n # init instance\n ps_1 = paddle.nn.PixelShuffle(3)\n ps_2 = paddle.nn.PixelShuffle(3, \"NHWC\")\n out_1 = ps_1(x_1)\n out_2 = ps_2(x_2)\n out_1_np = pixel_shuffle_np(self.x_1_np, 3)\n out_2_np = pixel_shuffle_np(self.x_2_np, 3, \"NHWC\")\n\n exe = paddle.static.Executor(place=place)\n res_1 = exe.run(fluid.default_main_program(),\n feed={\"x\": self.x_1_np},\n fetch_list=out_1,\n use_prune=True)\n\n res_2 = exe.run(fluid.default_main_program(),\n feed={\"x2\": self.x_2_np},\n fetch_list=out_2,\n use_prune=True)\n\n assert np.allclose(res_1, out_1_np)\n assert np.allclose(res_2, out_2_np)\n\n def run_dygraph(self, up_factor, data_format):\n\n n, c, h, w = 2, 9, 4, 4\n\n if data_format == \"NCHW\":\n shape = [n, c, h, w]\n if data_format == \"NHWC\":\n shape = [n, h, w, c]\n\n x = np.random.random(shape).astype(\"float64\")\n\n npresult = pixel_shuffle_np(x, up_factor, data_format)\n\n for use_cuda in ([False, True]\n if core.is_compiled_with_cuda() else [False]):\n place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace()\n\n paddle.disable_static(place=place)\n\n pixel_shuffle = paddle.nn.PixelShuffle(\n up_factor, data_format=data_format)\n result = pixel_shuffle(paddle.to_tensor(x))\n\n self.assertTrue(np.allclose(result.numpy(), npresult))\n\n result_functional = F.pixel_shuffle(\n paddle.to_tensor(x), 3, data_format)\n self.assertTrue(np.allclose(result_functional.numpy(), npresult))\n\n def test_dygraph1(self):\n self.run_dygraph(3, \"NCHW\")\n\n def test_dygraph2(self):\n self.run_dygraph(3, \"NHWC\")\n\n\nclass TestPixelShuffleError(unittest.TestCase):\n def test_error_functional(self):\n def error_upscale_factor():\n with paddle.fluid.dygraph.guard():\n x = np.random.random([2, 9, 4, 4]).astype(\"float64\")\n pixel_shuffle = F.pixel_shuffle(paddle.to_tensor(x), 3.33)\n\n self.assertRaises(TypeError, error_upscale_factor)\n\n def error_data_format():\n with paddle.fluid.dygraph.guard():\n x = np.random.random([2, 9, 4, 4]).astype(\"float64\")\n pixel_shuffle = F.pixel_shuffle(paddle.to_tensor(x), 3, \"WOW\")\n\n self.assertRaises(ValueError, error_data_format)\n\n def test_error_layer(self):\n def error_upscale_factor_layer():\n with paddle.fluid.dygraph.guard():\n x = np.random.random([2, 9, 4, 4]).astype(\"float64\")\n ps = paddle.nn.PixelShuffle(3.33)\n\n self.assertRaises(TypeError, error_upscale_factor_layer)\n\n def error_data_format_layer():\n with paddle.fluid.dygraph.guard():\n x = np.random.random([2, 9, 4, 4]).astype(\"float64\")\n ps = paddle.nn.PixelShuffle(3, \"MEOW\")\n\n self.assertRaises(ValueError, error_data_format_layer)\n\n\nif __name__ == '__main__':\n unittest.main()\n", "# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n# \n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# \n# http://www.apache.org/licenses/LICENSE-2.0\n# \n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nimport os\nimport pickle\nimport unittest\nimport numpy as np\nimport paddle\nfrom paddle.static import InputSpec\n\n\nclass LinearNet(paddle.nn.Layer):\n def __init__(self):\n super(LinearNet, self).__init__()\n self._linear = paddle.nn.Linear(128, 10)\n\n def forward(self, x):\n return self._linear(x)\n\n\nclass Logic(paddle.nn.Layer):\n def __init__(self):\n super(Logic, self).__init__()\n\n def forward(self, x, y, z):\n if z:\n return x\n else:\n return y\n\n\nclass TestExportWithTensor(unittest.TestCase):\n def setUp(self):\n self.x_spec = paddle.static.InputSpec(\n shape=[None, 128], dtype='float32')\n\n def test_with_tensor():\n model = LinearNet()\n paddle.onnx.export(model, 'linear_net', input_spec=[self.x_spec])\n\n\nclass TestExportWithTensor(unittest.TestCase):\n def setUp(self):\n self.x = paddle.to_tensor(np.random.random((1, 128)))\n\n def test_with_tensor(self):\n model = LinearNet()\n paddle.onnx.export(model, 'linear_net', input_spec=[self.x])\n\n\nclass TestExportPrunedGraph(unittest.TestCase):\n def setUp(self):\n self.x = paddle.to_tensor(np.array([1]))\n self.y = paddle.to_tensor(np.array([-1]))\n\n def test_prune_graph(self):\n model = Logic()\n paddle.jit.to_static(model)\n out = model(self.x, self.y, z=True)\n paddle.onnx.export(\n model, 'pruned', input_spec=[self.x], output_spec=[out])\n\n\nif __name__ == '__main__':\n unittest.main()\n", "# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function, absolute_import\nimport os\nimport sys\nimport logging\nimport subprocess\nimport numpy as np\nfrom collections import OrderedDict\nimport paddle.fluid as fluid\nfrom paddle.fluid import core\nfrom paddle.fluid.log_helper import get_logger\n\nfrom google.protobuf import text_format\nfrom paddle.fluid import debugger\nfrom paddle.fluid.framework import Program\nfrom paddle.fluid.proto import framework_pb2\n\n__all__ = [\n \"load_program\", \"save_program\", \"program_type_trans\",\n \"check_saved_vars_try_dump\", \"parse_program\", \"check_pruned_program_vars\",\n \"graphviz\"\n]\n\nlogging.basicConfig(\n format='%(asctime)s - %(levelname)s - %(message)s', level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\npersistable_vars_out_fn = \"vars_persistable.log\"\nall_vars_out_fn = \"vars_all.log\"\nops_out_fn = \"ops.log\"\n\nfeed_fetch_type_list = [\n core.VarDesc.VarType.FEED_MINIBATCH, core.VarDesc.VarType.FETCH_LIST\n]\nnot_expected_op_types = [\"lookup_table\"]\n\n\ndef load_program(model_filename, is_text=False):\n if is_text:\n return load_program_text(model_filename)\n return load_program_binary(model_filename)\n\n\ndef load_program_binary(model_filename):\n \"\"\"load program from binary string file\"\"\"\n with open(model_filename, \"rb\") as f:\n program_desc_str = f.read()\n return Program.parse_from_string(program_desc_str)\n\n\ndef load_program_text(model_filename):\n \"\"\"load program from human-readable text file\"\"\"\n with open(model_filename, \"r\") as f:\n program_desc_text = f.read()\n\n prog_desc = framework_pb2.ProgramDesc()\n text_format.Merge(program_desc_text, prog_desc)\n return Program.parse_from_string(prog_desc.SerializeToString())\n\n\ndef save_program(program, model_filename='__model__', is_text=False):\n if is_text:\n with open(model_filename, \"w\") as f:\n f.write(str(program))\n else:\n with open(model_filename, \"wb\") as f:\n f.write(program.desc.serialize_to_string())\n\n\ndef check_pruned_program_vars(train_prog, pruned_prog):\n is_match = True\n\n pruned_vars = [(v.name, v) for v in pruned_prog.list_vars()\n if fluid.io.is_persistable(v)]\n pruned_vars = OrderedDict(pruned_vars)\n pruned_vars_name = [name for name in pruned_vars]\n logger.info(\"persistable vars in pruned program: {}\".format(\n pruned_vars_name))\n\n for var_name in pruned_vars:\n var = pruned_vars[var_name]\n # feed and fetch op is added in pruned program when pruning, not need to be found in train program\n if var.type in feed_fetch_type_list:\n break\n try:\n train_prog_var = train_prog.global_block().var(var_name)\n except ValueError as e:\n logger.error(\n \"not find variable '%s' in train program. please check pruning.\"\n % var_name)\n logger.error(e)\n continue\n if var.shape != train_prog_var.shape or var.dtype != train_prog_var.dtype:\n logger.error(\n \"variable: {} not match. in pruned program shape: {} dtype:{}, in train program shape: {} dtype: {}\".\n format(var_name, var.shape, var.dtype, train_prog_var.shape,\n train_prog_var.dtype))\n is_match = False\n return is_match\n\n\ndef graphviz(block, output_dir=\"\", filename='debug'):\n dot_path = os.path.join(output_dir, filename + '.dot')\n pdf_path = os.path.join(output_dir, filename + '.pdf')\n debugger.draw_block_graphviz(block, path=dot_path)\n cmd = [\"dot\", \"-Tpdf\", dot_path, \"-o\", pdf_path]\n p = subprocess.Popen(\n cmd,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n p.wait()\n\n\ndef program_type_trans(prog_dir, prog_fn, is_text):\n prog = load_program(os.path.join(prog_dir, prog_fn), is_text)\n prog_out_fn = prog_fn + \".bin\" if is_text else prog_fn + \".pbtxt\"\n save_program(prog, os.path.join(prog_dir, prog_out_fn), 1 - is_text)\n return prog_out_fn\n\n\ndef append_save_op(block, var, path):\n block.append_op(\n type='save', inputs={'X': [var]}, outputs={},\n attrs={'file_path': path})\n\n\ndef append_load_op(block, var, path):\n block.append_op(\n type='load',\n inputs={},\n outputs={'Out': [var]},\n attrs={'file_path': path})\n\n\ndef save_var(np_array, var_name, shape_list, dtype, save_path):\n program = fluid.Program()\n place = fluid.CPUPlace()\n exe = fluid.Executor(place)\n with fluid.program_guard(program):\n d0_data = fluid.layers.data(var_name, shape=shape_list, dtype=dtype)\n append_save_op(program.global_block(), d0_data, save_path)\n exe.run(feed={var_name: np_array}, fetch_list=[])\n\n\ndef load_var(var_name, shape_list, dtype, save_path):\n program = fluid.Program()\n place = fluid.CPUPlace()\n exe = fluid.Executor(place)\n with fluid.program_guard(program):\n d0_data = fluid.layers.data(var_name, shape=shape_list, dtype=dtype)\n append_load_op(program.global_block(), d0_data, save_path)\n outs = exe.run(feed={}, fetch_list=[d0_data])\n return outs\n\n\ndef reader(batch_size, fn, dim):\n data = []\n if isinstance(dim, list) or isinstance(dim, tuple):\n shape = list(dim)\n _temp = 1\n for x in dim:\n _temp = _temp * x\n dim = _temp\n else:\n shape = [dim]\n\n shape = [batch_size] + shape\n dim = dim * batch_size\n\n for line in open(fn, 'r'):\n fields = line.strip().split(' ')\n fields = [float(d) for d in fields]\n while len(fields) >= dim:\n tmp = fields[:dim]\n fields = fields[dim:]\n data.append(np.array(tmp).reshape(shape))\n return data\n\n\ndef feed_gen(batch_size, feeded_vars_dims, feeded_vars_filelist):\n batch_feed = []\n for i, fn in enumerate(feeded_vars_filelist):\n batch_feed.append(reader(batch_size, fn, feeded_vars_dims[i]))\n return batch_feed\n\n\ndef try_load_model_vars(dump_dir, dump_prog_fn, is_text_dump_program,\n batch_size, feed_config, fetch_config, save_filename,\n saved_params):\n place = fluid.CPUPlace()\n exe = fluid.Executor(place)\n scope = fluid.core.Scope()\n with fluid.scope_guard(scope):\n if is_text_dump_program:\n dump_prog_fn = program_type_trans(dump_dir, dump_prog_fn,\n is_text_dump_program)\n inference_program, feed_target_names, fetch_targets = \\\n fluid.io.load_inference_model(dump_dir, exe, model_filename=dump_prog_fn,\n params_filename=save_filename)\n\n # check program vars and saved vars shape\n orig_para_shape = {\n each_var.name: tuple(each_var.desc.shape())\n for each_var in saved_params\n }\n for each_var in saved_params:\n var_temp = fluid.global_scope().find_var(each_var.name)\n assert var_temp != None, \"can't not find var: \" + each_var.name\n new_shape = (np.array(var_temp.get_tensor())).shape\n assert each_var.name in orig_para_shape, each_var.name + \"MUST in var list\"\n orig_shape = orig_para_shape.get(each_var.name)\n if new_shape != orig_shape:\n raise RuntimeError(\n \"Shape not matching: the Program requires a parameter with a shape of ({}), \"\n \"while the loaded parameter (namely [ {} ]) has a shape of ({}).\".\n format(orig_shape, each_var.name, new_shape))\n\n # check feed/fetch vars in program and config\n fetch_targets_names = [v.name for v in fetch_targets]\n if not feed_target_names:\n logger.warning(\"no feed targets in program.\")\n if not fetch_targets_names:\n logger.warning(\"no fetch targets in program.\")\n fetch_list = fetch_targets\n feed_name_list = feed_target_names\n if feed_config.feeded_vars_names is not None and feed_target_names != feed_config.feeded_vars_names:\n logger.warning(\n \"feed vars in program and config are diff: feed in program: {}. feed in config {}.\".\n format(feed_target_names, feed_config.feeded_vars_names))\n feed_name_list = feed_config.feeded_vars_names\n # remove feed op in inference_program. new feed op will be added in exe.run\n global_block = inference_program.global_block()\n need_to_remove_op_index = []\n for i, op in enumerate(global_block.ops):\n op.desc.set_is_target(False)\n if op.type == \"feed\": # only remove feed op here\n need_to_remove_op_index.append(i)\n for index in need_to_remove_op_index[::-1]:\n global_block._remove_op(index)\n if fetch_config.fetch_vars_names is not None and fetch_targets_names != fetch_config.fetch_vars_names:\n logger.warning(\n \"fetch vars in program and config are diff: fetch in program: {}. fetch in config {}.\".\n format(fetch_targets_names, fetch_config.fetch_vars_names))\n fetch_list = [\n inference_program.global_block().var(i)\n for i in fetch_config.fetch_vars_names\n ]\n # remove fetch op in inference_program. new fetch op will be added in exe.run\n global_block = inference_program.global_block()\n need_to_remove_op_index = []\n for i, op in enumerate(global_block.ops):\n op.desc.set_is_target(False)\n if op.type == \"fetch\": # only remove fetch op here\n need_to_remove_op_index.append(i)\n for index in need_to_remove_op_index[::-1]:\n global_block._remove_op(index)\n\n # if fetch_list have lod tensor\n return_numpy = all([v.lod_level == 0 for v in fetch_list])\n\n # try dump fetch_targets\n feed_tensors = []\n assert len(feed_config.feeded_vars_names) == len(\n feed_config.feeded_vars_dims) == len(feed_config.feeded_vars_types)\n # check program vars and feed tensor shape in config\n for i in range(len(feed_config.feeded_vars_names)):\n var = inference_program.global_block().var(\n feed_config.feeded_vars_names[i])\n if not isinstance(feed_config.feeded_vars_dims[i], (list, tuple)):\n tensor_shape = (feed_config.feeded_vars_dims[i], )\n else:\n tensor_shape = tuple(feed_config.feeded_vars_dims[i])\n feed_config.feeded_vars_dims[i] = tensor_shape\n var_shape = var.shape[1:]\n if tensor_shape != var_shape:\n raise RuntimeError(\n \"feed variable '{}' shape not match. infer program shape: {}. feed tensor shape: {}\".\n format(feed_config.feeded_vars_names[i], var_shape,\n tensor_shape))\n\n if not feed_config.feeded_vars_filelist:\n logger.info(\"generate random feed vars.\")\n for i in range(len(feed_config.feeded_vars_names)):\n var = inference_program.global_block().var(\n feed_config.feeded_vars_names[i])\n # create fake feed tensor. if lod_level > 1, should create_lod_tensor()\n if var.lod_level == 0:\n feed_tensors.append(\n np.array(\n np.random.random(\n tuple([batch_size] + list(\n feed_config.feeded_vars_dims[i]))),\n dtype=feed_config.feeded_vars_types[i]))\n elif var.lod_level == 1:\n t = np.array(\n np.random.random(\n tuple([batch_size] + list(\n feed_config.feeded_vars_dims[i]))),\n dtype=feed_config.feeded_vars_types[i])\n feed_tensors.append(\n fluid.create_lod_tensor(t, [[1] * batch_size], place))\n else:\n raise RuntimeError(\n \"vars with lod_level >= 2 is not supported now in this infer program check tool.\"\n )\n results = exe.run(inference_program,\n feed={\n name: feed_tensors[i]\n for i, name in enumerate(feed_name_list)\n },\n fetch_list=fetch_list,\n return_numpy=return_numpy)\n else:\n logger.info(\"load feed vars from files: {}.\".format(\n feed_config.feeded_vars_filelist))\n feed_vars = [\n inference_program.global_block().var(\n feed_config.feeded_vars_names[i])\n for i in range(len(feed_config.feeded_vars_names))\n ]\n feeder = fluid.DataFeeder(feed_list=feed_vars, place=place)\n batch_feed = feed_gen(batch_size, feed_config.feeded_vars_dims,\n feed_config.feeded_vars_filelist)\n slots = [batch_feed]\n results = exe.run(inference_program,\n feed=feeder.feed(slots),\n fetch_list=fetch_list,\n return_numpy=return_numpy)\n for i, v in enumerate(fetch_list):\n logger.info(\"fetch_targets name: %s\" % v.name)\n logger.info(\"fetch_targets: {}\".format(results[i]))\n return results\n\n\ndef check_not_expected_ops(prog):\n op_types_set = set()\n for op in prog.global_block().ops:\n if op.type in not_expected_op_types and op.type not in op_types_set:\n logger.warning(\n \"find op type '{}' in program, please check if your program is pruned correctly !\".\n format(op.type))\n op_types_set.add(op.type)\n\n\ndef check_saved_vars_try_dump(dump_dir,\n dump_prog_fn,\n is_text_dump_program,\n feed_config,\n fetch_config,\n batch_size=1,\n save_filename=None):\n dump_prog = load_program(\n os.path.join(dump_dir, dump_prog_fn), is_text_dump_program)\n saved_params = [\n v for v in dump_prog.list_vars() if fluid.io.is_persistable(v)\n ]\n logger.info(\"persistable vars in dump program: {}\".format(\n [v.name for v in saved_params]))\n\n check_not_expected_ops(dump_prog)\n\n return try_load_model_vars(dump_dir, dump_prog_fn, is_text_dump_program,\n batch_size, feed_config, fetch_config,\n save_filename, saved_params)\n\n\ndef parse_program(program, output_dir):\n # persistable vars\n output = {}\n persistable_vars = [\n v for v in program.list_vars() if fluid.io.is_persistable(v)\n ]\n output[\"persistable_vars\"] = [{\n 'name': str(v.name),\n 'shape': str(v.shape),\n 'lod_level': int(v.lod_level),\n 'dtype': str(v.dtype),\n 'type': str(v.type)\n } for v in persistable_vars]\n with open(os.path.join(output_dir, persistable_vars_out_fn), 'w') as f:\n f.write(\"persistable vars:\\n\")\n for var in output[\"persistable_vars\"]:\n f.write(str(var))\n f.write(\"\\n\")\n\n # all vars\n all_vars = [v for v in program.list_vars()]\n output[\"all_vars\"] = [{\n 'name': str(v.name),\n 'shape': str(v.shape),\n 'lod_level': int(v.lod_level),\n 'dtype': str(v.dtype)\n } if v.type not in feed_fetch_type_list else {\n 'name': str(v.name),\n 'type': str(v.type)\n } for v in all_vars]\n with open(os.path.join(output_dir, all_vars_out_fn), 'w') as f:\n f.write(\"all vars:\\n\")\n for var in output[\"all_vars\"]:\n f.write(str(var))\n f.write(\"\\n\")\n\n # ops\n ops = program.global_block().ops\n output[\"ops\"] = [{\n 'type': op.type,\n 'input_arg_names': str(op.input_arg_names),\n 'output_arg_names': str(op.output_arg_names)\n } for op in ops]\n with open(os.path.join(output_dir, ops_out_fn), 'w') as f:\n f.write(\"ops:\\n\")\n for op in output[\"ops\"]:\n f.write(str(op))\n f.write(\"\\n\")\n", "# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\n\"\"\"\n\nfrom __future__ import print_function\n\nimport os\nimport six\nimport tarfile\nimport numpy as np\nfrom collections import defaultdict\n\nimport paddle\nfrom paddle.io import Dataset\nimport paddle.compat as cpt\nfrom paddle.dataset.common import _check_exists_and_download\n\n__all__ = ['WMT16']\n\nDATA_URL = (\"http://paddlemodels.bj.bcebos.com/wmt/wmt16.tar.gz\")\nDATA_MD5 = \"0c38be43600334966403524a40dcd81e\"\n\nTOTAL_EN_WORDS = 11250\nTOTAL_DE_WORDS = 19220\n\nSTART_MARK = \"<s>\"\nEND_MARK = \"<e>\"\nUNK_MARK = \"<unk>\"\n\n\nclass WMT16(Dataset):\n \"\"\"\n Implementation of `WMT16 <http://www.statmt.org/wmt16/>`_ test dataset.\n ACL2016 Multimodal Machine Translation. Please see this website for more\n details: http://www.statmt.org/wmt16/multimodal-task.html#task1\n\n If you use the dataset created for your task, please cite the following paper:\n Multi30K: Multilingual English-German Image Descriptions.\n\n .. code-block:: text\n\n @article{elliott-EtAl:2016:VL16,\n author = {{Elliott}, D. and {Frank}, S. and {Sima\"an}, K. and {Specia}, L.},\n title = {Multi30K: Multilingual English-German Image Descriptions},\n booktitle = {Proceedings of the 6th Workshop on Vision and Language},\n year = {2016},\n pages = {70--74},\n year = 2016\n }\n\n Args:\n data_file(str): path to data tar file, can be set None if\n :attr:`download` is True. Default None\n mode(str): 'train', 'test' or 'val'. Default 'train'\n src_dict_size(int): word dictionary size for source language word. Default -1.\n trg_dict_size(int): word dictionary size for target language word. Default -1.\n lang(str): source language, 'en' or 'de'. Default 'en'.\n download(bool): whether to download dataset automatically if\n :attr:`data_file` is not set. Default True\n\n Returns:\n Dataset: instance of WMT16 dataset\n\n Examples:\n\n .. code-block:: python\n\n import paddle\n from paddle.text.datasets import WMT16\n\n class SimpleNet(paddle.nn.Layer):\n def __init__(self):\n super(SimpleNet, self).__init__()\n\n def forward(self, src_ids, trg_ids, trg_ids_next):\n return paddle.sum(src_ids), paddle.sum(trg_ids), paddle.sum(trg_ids_next)\n\n paddle.disable_static()\n\n wmt16 = WMT16(mode='train', src_dict_size=50, trg_dict_size=50)\n\n for i in range(10):\n src_ids, trg_ids, trg_ids_next = wmt16[i]\n src_ids = paddle.to_tensor(src_ids)\n trg_ids = paddle.to_tensor(trg_ids)\n trg_ids_next = paddle.to_tensor(trg_ids_next)\n\n model = SimpleNet()\n src_ids, trg_ids, trg_ids_next = model(src_ids, trg_ids, trg_ids_next)\n print(src_ids.numpy(), trg_ids.numpy(), trg_ids_next.numpy())\n\n \"\"\"\n\n def __init__(self,\n data_file=None,\n mode='train',\n src_dict_size=-1,\n trg_dict_size=-1,\n lang='en',\n download=True):\n assert mode.lower() in ['train', 'test', 'val'], \\\n \"mode should be 'train', 'test' or 'val', but got {}\".format(mode)\n self.mode = mode.lower()\n\n self.data_file = data_file\n if self.data_file is None:\n assert download, \"data_file is not set and downloading automatically is disabled\"\n self.data_file = _check_exists_and_download(\n data_file, DATA_URL, DATA_MD5, 'wmt16', download)\n\n self.lang = lang\n assert src_dict_size > 0, \"dict_size should be set as positive number\"\n assert trg_dict_size > 0, \"dict_size should be set as positive number\"\n self.src_dict_size = min(src_dict_size, (TOTAL_EN_WORDS if lang == \"en\"\n else TOTAL_DE_WORDS))\n self.trg_dict_size = min(trg_dict_size, (TOTAL_DE_WORDS if lang == \"en\"\n else TOTAL_EN_WORDS))\n\n # load source and target word dict\n self.src_dict = self._load_dict(lang, src_dict_size)\n self.trg_dict = self._load_dict(\"de\" if lang == \"en\" else \"en\",\n trg_dict_size)\n\n # load data\n self.data = self._load_data()\n\n def _load_dict(self, lang, dict_size, reverse=False):\n dict_path = os.path.join(paddle.dataset.common.DATA_HOME,\n \"wmt16/%s_%d.dict\" % (lang, dict_size))\n dict_found = False\n if os.path.exists(dict_path):\n with open(dict_path, \"rb\") as d:\n dict_found = len(d.readlines()) == dict_size\n if not dict_found:\n self._build_dict(dict_path, dict_size, lang)\n\n word_dict = {}\n with open(dict_path, \"rb\") as fdict:\n for idx, line in enumerate(fdict):\n if reverse:\n word_dict[idx] = cpt.to_text(line.strip())\n else:\n word_dict[cpt.to_text(line.strip())] = idx\n return word_dict\n\n def _build_dict(self, dict_path, dict_size, lang):\n word_dict = defaultdict(int)\n with tarfile.open(self.data_file, mode=\"r\") as f:\n for line in f.extractfile(\"wmt16/train\"):\n line = cpt.to_text(line)\n line_split = line.strip().split(\"\\t\")\n if len(line_split) != 2: continue\n sen = line_split[0] if self.lang == \"en\" else line_split[1]\n for w in sen.split():\n word_dict[w] += 1\n\n with open(dict_path, \"wb\") as fout:\n fout.write(\n cpt.to_bytes(\"%s\\n%s\\n%s\\n\" % (START_MARK, END_MARK, UNK_MARK)))\n for idx, word in enumerate(\n sorted(\n six.iteritems(word_dict),\n key=lambda x: x[1],\n reverse=True)):\n if idx + 3 == dict_size: break\n fout.write(cpt.to_bytes(word[0]))\n fout.write(cpt.to_bytes('\\n'))\n\n def _load_data(self):\n # the index for start mark, end mark, and unk are the same in source\n # language and target language. Here uses the source language\n # dictionary to determine their indices.\n start_id = self.src_dict[START_MARK]\n end_id = self.src_dict[END_MARK]\n unk_id = self.src_dict[UNK_MARK]\n\n src_col = 0 if self.lang == \"en\" else 1\n trg_col = 1 - src_col\n\n self.src_ids = []\n self.trg_ids = []\n self.trg_ids_next = []\n with tarfile.open(self.data_file, mode=\"r\") as f:\n for line in f.extractfile(\"wmt16/{}\".format(self.mode)):\n line = cpt.to_text(line)\n line_split = line.strip().split(\"\\t\")\n if len(line_split) != 2:\n continue\n src_words = line_split[src_col].split()\n src_ids = [start_id] + [\n self.src_dict.get(w, unk_id) for w in src_words\n ] + [end_id]\n\n trg_words = line_split[trg_col].split()\n trg_ids = [self.trg_dict.get(w, unk_id) for w in trg_words]\n\n trg_ids_next = trg_ids + [end_id]\n trg_ids = [start_id] + trg_ids\n\n self.src_ids.append(src_ids)\n self.trg_ids.append(trg_ids)\n self.trg_ids_next.append(trg_ids_next)\n\n def __getitem__(self, idx):\n return (np.array(self.src_ids[idx]), np.array(self.trg_ids[idx]),\n np.array(self.trg_ids_next[idx]))\n\n def __len__(self):\n return len(self.src_ids)\n\n def get_dict(self, lang, reverse=False):\n \"\"\"\n return the word dictionary for the specified language.\n\n Args:\n lang(string): A string indicating which language is the source\n language. Available options are: \"en\" for English\n and \"de\" for Germany.\n reverse(bool): If reverse is set to False, the returned python\n dictionary will use word as key and use index as value.\n If reverse is set to True, the returned python\n dictionary will use index as key and word as value.\n\n Returns:\n dict: The word dictionary for the specific language.\n\n Examples:\n \n .. code-block:: python\n \n from paddle.text.datasets import WMT16\n wmt16 = WMT16(mode='train', src_dict_size=50, trg_dict_size=50)\n en_dict = wmt16.get_dict('en')\n\n \"\"\"\n dict_size = self.src_dict_size if lang == self.lang else self.trg_dict_size\n\n dict_path = os.path.join(paddle.dataset.common.DATA_HOME,\n \"wmt16/%s_%d.dict\" % (lang, dict_size))\n assert os.path.exists(dict_path), \"Word dictionary does not exist. \"\n \"Please invoke paddle.dataset.wmt16.train/test/validation first \"\n \"to build the dictionary.\"\n return self._load_dict(lang, dict_size)\n", "# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nimport unittest\nimport numpy as np\nfrom op_test import OpTest\nimport paddle\nimport paddle.fluid as fluid\nfrom paddle.fluid import compiler, Program, program_guard\nfrom paddle.fluid import core\n\n\nclass TestLinspaceOpCommonCase(OpTest):\n def setUp(self):\n self.op_type = \"linspace\"\n dtype = 'float32'\n self.inputs = {\n 'Start': np.array([0]).astype(dtype),\n 'Stop': np.array([10]).astype(dtype),\n 'Num': np.array([11]).astype('int32')\n }\n self.attrs = {'dtype': int(core.VarDesc.VarType.FP32)}\n\n self.outputs = {'Out': np.arange(0, 11).astype(dtype)}\n\n def test_check_output(self):\n self.check_output()\n\n\nclass TestLinspaceOpReverseCase(OpTest):\n def setUp(self):\n self.op_type = \"linspace\"\n dtype = 'float32'\n self.inputs = {\n 'Start': np.array([10]).astype(dtype),\n 'Stop': np.array([0]).astype(dtype),\n 'Num': np.array([11]).astype('int32')\n }\n self.attrs = {'dtype': int(core.VarDesc.VarType.FP32)}\n\n self.outputs = {'Out': np.arange(10, -1, -1).astype(dtype)}\n\n def test_check_output(self):\n self.check_output()\n\n\nclass TestLinspaceOpNumOneCase(OpTest):\n def setUp(self):\n self.op_type = \"linspace\"\n dtype = 'float32'\n self.inputs = {\n 'Start': np.array([10]).astype(dtype),\n 'Stop': np.array([0]).astype(dtype),\n 'Num': np.array([1]).astype('int32')\n }\n self.attrs = {'dtype': int(core.VarDesc.VarType.FP32)}\n\n self.outputs = {'Out': np.array(10, dtype=dtype)}\n\n def test_check_output(self):\n self.check_output()\n\n\nclass TestLinspaceAPI(unittest.TestCase):\n def test_variable_input1(self):\n start = paddle.full(shape=[1], fill_value=0, dtype='float32')\n stop = paddle.full(shape=[1], fill_value=10, dtype='float32')\n num = paddle.full(shape=[1], fill_value=5, dtype='int32')\n out = paddle.linspace(start, stop, num, dtype='float32')\n exe = fluid.Executor(place=fluid.CPUPlace())\n res = exe.run(fluid.default_main_program(), fetch_list=[out])\n np_res = np.linspace(0, 10, 5, dtype='float32')\n self.assertEqual((res == np_res).all(), True)\n\n def test_variable_input2(self):\n paddle.disable_static()\n start = paddle.full(shape=[1], fill_value=0, dtype='float32')\n stop = paddle.full(shape=[1], fill_value=10, dtype='float32')\n num = paddle.full(shape=[1], fill_value=5, dtype='int32')\n out = paddle.linspace(start, stop, num, dtype='float32')\n np_res = np.linspace(0, 10, 5, dtype='float32')\n self.assertEqual((out.numpy() == np_res).all(), True)\n paddle.enable_static()\n\n def test_dtype(self):\n out_1 = paddle.linspace(0, 10, 5, dtype='float32')\n out_2 = paddle.linspace(0, 10, 5, dtype=np.float32)\n out_3 = paddle.linspace(0, 10, 5, dtype=core.VarDesc.VarType.FP32)\n exe = fluid.Executor(place=fluid.CPUPlace())\n res_1, res_2, res_3 = exe.run(fluid.default_main_program(),\n fetch_list=[out_1, out_2, out_3])\n assert np.array_equal(res_1, res_2)\n\n def test_name(self):\n with paddle.static.program_guard(paddle.static.Program()):\n out = paddle.linspace(\n 0, 10, 5, dtype='float32', name='linspace_res')\n assert 'linspace_res' in out.name\n\n def test_imperative(self):\n paddle.disable_static()\n out1 = paddle.linspace(0, 10, 5, dtype='float32')\n np_out1 = np.linspace(0, 10, 5, dtype='float32')\n out2 = paddle.linspace(0, 10, 5, dtype='int32')\n np_out2 = np.linspace(0, 10, 5, dtype='int32')\n out3 = paddle.linspace(0, 10, 200, dtype='int32')\n np_out3 = np.linspace(0, 10, 200, dtype='int32')\n paddle.enable_static()\n self.assertEqual((out1.numpy() == np_out1).all(), True)\n self.assertEqual((out2.numpy() == np_out2).all(), True)\n self.assertEqual((out3.numpy() == np_out3).all(), True)\n\n\nclass TestLinspaceOpError(unittest.TestCase):\n def test_errors(self):\n with program_guard(Program(), Program()):\n\n def test_dtype():\n fluid.layers.linspace(0, 10, 1, dtype=\"int8\")\n\n self.assertRaises(TypeError, test_dtype)\n\n def test_dtype():\n fluid.layers.linspace(0, 10, 1.33, dtype=\"int32\")\n\n self.assertRaises(TypeError, test_dtype)\n\n def test_start_type():\n fluid.layers.linspace([0], 10, 1, dtype=\"float32\")\n\n self.assertRaises(TypeError, test_start_type)\n\n def test_end_dtype():\n fluid.layers.linspace(0, [10], 1, dtype=\"float32\")\n\n self.assertRaises(TypeError, test_end_dtype)\n\n def test_step_dtype():\n fluid.layers.linspace(0, 10, [0], dtype=\"float32\")\n\n self.assertRaises(TypeError, test_step_dtype)\n\n def test_start_dtype():\n start = fluid.data(shape=[1], dtype=\"float64\", name=\"start\")\n fluid.layers.linspace(start, 10, 1, dtype=\"float32\")\n\n self.assertRaises(ValueError, test_start_dtype)\n\n def test_end_dtype():\n end = fluid.data(shape=[1], dtype=\"float64\", name=\"end\")\n fluid.layers.linspace(0, end, 1, dtype=\"float32\")\n\n self.assertRaises(ValueError, test_end_dtype)\n\n def test_num_dtype():\n num = fluid.data(shape=[1], dtype=\"int32\", name=\"step\")\n fluid.layers.linspace(0, 10, num, dtype=\"float32\")\n\n self.assertRaises(TypeError, test_step_dtype)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n", "# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport shutil\nimport unittest\nimport numpy as np\nfrom inference_pass_test import InferencePassTest\nimport paddle.fluid as fluid\nimport paddle.fluid.core as core\nfrom paddle.fluid.core import PassVersionChecker\nfrom paddle.fluid.core import AnalysisConfig\n\n\nclass TensorRTSubgraphPassConvTest(InferencePassTest):\n def setUp(self):\n self.set_params()\n with fluid.program_guard(self.main_program, self.startup_program):\n data = fluid.data(\n name=\"data\", shape=[-1, 6, 64, 64], dtype=\"float32\")\n conv_out = fluid.layers.conv2d(\n input=data,\n num_filters=self.conv_num_filters,\n filter_size=self.conv_filter_size,\n groups=self.conv_groups,\n padding=self.conv_padding,\n bias_attr=False,\n act=None)\n self.feeds = {\n \"data\": np.random.random([1, 6, 64, 64]).astype(\"float32\"),\n }\n self.enable_trt = True\n self.trt_parameters = TensorRTSubgraphPassConvTest.TensorRTParam(\n 1 << 30, 32, 0, AnalysisConfig.Precision.Float32, False, False)\n self.fetch_list = [conv_out]\n\n def set_params(self):\n self.conv_num_filters = 6\n self.conv_filter_size = 6\n self.conv_groups = 3\n self.conv_padding = [1, 1]\n\n def test_check_output(self):\n if core.is_compiled_with_cuda():\n use_gpu = True\n self.check_output_with_option(use_gpu)\n self.assertTrue(\n PassVersionChecker.IsCompatible('tensorrt_subgraph_pass'))\n\n\nclass TensorRTSubgraphPassConvValidPaddingTest(TensorRTSubgraphPassConvTest):\n def set_params(self):\n self.conv_num_filters = 6\n self.conv_filter_size = 6\n self.conv_groups = 3\n self.conv_padding = 'VALID'\n\n\nclass TensorRTSubgraphPassConvSamePaddingTest(InferencePassTest):\n def set_params(self):\n self.conv_num_filters = 6\n self.conv_filter_size = 6\n self.conv_groups = 3\n self.conv_padding = 'SAME'\n\n\nclass TensorRTSubgraphPassDepthwiseConvTest(TensorRTSubgraphPassConvTest):\n def set_params(self):\n self.conv_num_filters = 6\n self.conv_filter_size = 6\n self.conv_groups = 6\n self.conv_padding = [1, 1]\n\n\nclass TensorRTSubgraphPassConvTransposeTest(InferencePassTest):\n def setUp(self):\n self.set_params()\n with fluid.program_guard(self.main_program, self.startup_program):\n data = fluid.data(\n name=\"data\", shape=[-1, 6, 64, 64], dtype=\"float32\")\n conv_out = fluid.layers.conv2d_transpose(\n input=data,\n num_filters=self.conv_num_filters,\n filter_size=self.conv_filter_size,\n groups=self.conv_groups,\n padding=self.conv_padding,\n bias_attr=False,\n act=None)\n self.feeds = {\n \"data\": np.random.random([1, 6, 64, 64]).astype(\"float32\"),\n }\n self.enable_trt = True\n self.trt_parameters = TensorRTSubgraphPassConvTransposeTest.TensorRTParam(\n 1 << 30, 32, 0, AnalysisConfig.Precision.Float32, False, False)\n self.fetch_list = [conv_out]\n\n def set_params(self):\n self.conv_num_filters = 6\n self.conv_filter_size = 6\n self.conv_groups = 1\n self.conv_padding = [1, 1]\n\n def test_check_output(self):\n if core.is_compiled_with_cuda():\n use_gpu = True\n self.check_output_with_option(use_gpu)\n self.assertTrue(\n PassVersionChecker.IsCompatible('tensorrt_subgraph_pass'))\n\n\nclass TensorRTSubgraphPassConvTransposeValidPaddingTest(\n TensorRTSubgraphPassConvTransposeTest):\n def set_params(self):\n self.conv_num_filters = 6\n self.conv_filter_size = 6\n self.conv_groups = 1\n self.conv_padding = 'VALID'\n\n\nclass TensorRTSubgraphPassConvTransposeSamePaddingTest(\n TensorRTSubgraphPassConvTransposeTest):\n def set_params(self):\n self.conv_num_filters = 6\n self.conv_filter_size = 6\n self.conv_groups = 1\n self.conv_padding = 'SAME'\n\n\nclass TensorRTSubgraphPassDepthwiseConvTransposeTest(\n TensorRTSubgraphPassConvTransposeTest):\n def set_params(self):\n self.conv_num_filters = 6\n self.conv_filter_size = 6\n self.conv_groups = 1\n self.conv_padding = [1, 1]\n\n\nif __name__ == \"__main__\":\n unittest.main()\n", "# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nimport unittest\nimport paddle\nimport paddle.fluid as fluid\nimport paddle.fluid.framework as framework\nfrom test_imperative_base import new_program_scope\n\nimport numpy as np\nimport six\nimport pickle\nimport os\n\n\nclass TestStaticSaveLoadLargeParameters(unittest.TestCase):\n def test_large_parameters_static_save(self):\n # enable static mode\n paddle.enable_static()\n LARGE_PARAM = 2**26\n with new_program_scope():\n # create network\n x = paddle.static.data(\n name=\"static_save_load_large_x\",\n shape=[None, 10],\n dtype='float32')\n z = paddle.static.nn.fc(x, LARGE_PARAM, bias_attr=False)\n place = paddle.CPUPlace()\n exe = paddle.static.Executor(place)\n exe.run(paddle.static.default_startup_program())\n prog = paddle.static.default_main_program()\n\n base_map = {}\n for var in prog.list_vars():\n if isinstance(var, framework.Parameter) or var.persistable:\n t = np.array(fluid.global_scope().find_var(var.name)\n .get_tensor())\n # make sure all the paramerter or optimizer var have been update\n self.assertTrue(np.sum(np.abs(t)) != 0)\n base_map[var.name] = t\n\n path = os.path.join(\"test_static_save_load_large_param\",\n \"static_save\")\n paddle.fluid.save(prog, path)\n # set var to zero\n for var in prog.list_vars():\n if isinstance(var, framework.Parameter) or var.persistable:\n ten = fluid.global_scope().find_var(var.name).get_tensor()\n ten.set(np.zeros_like(np.array(ten)), place)\n\n new_t = np.array(fluid.global_scope().find_var(var.name)\n .get_tensor())\n self.assertTrue(np.sum(np.abs(new_t)) == 0)\n\n paddle.fluid.load(prog, path)\n\n for var in prog.list_vars():\n if isinstance(var, framework.Parameter) or var.persistable:\n new_t = np.array(fluid.global_scope().find_var(var.name)\n .get_tensor())\n base_t = base_map[var.name]\n self.assertTrue(np.array_equal(new_t, base_t))\n\n # set var to zero\n for var in prog.list_vars():\n if isinstance(var, framework.Parameter) or var.persistable:\n ten = fluid.global_scope().find_var(var.name).get_tensor()\n ten.set(np.zeros_like(np.array(ten)), place)\n\n new_t = np.array(fluid.global_scope().find_var(var.name)\n .get_tensor())\n self.assertTrue(np.sum(np.abs(new_t)) == 0)\n\n program_state = fluid.load_program_state(path)\n fluid.set_program_state(prog, program_state)\n for var in prog.list_vars():\n if isinstance(var, framework.Parameter) or var.persistable:\n new_t = np.array(fluid.global_scope().find_var(var.name)\n .get_tensor())\n base_t = base_map[var.name]\n self.assertTrue(np.array_equal(new_t, base_t))\n", "# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom ..wrapped_decorator import signature_safe_contextmanager, wrap_decorator\nimport decorator\nimport contextlib\nimport functools\nimport inspect\nimport sys\nimport numpy as np\nfrom paddle.fluid import core\nfrom paddle.fluid import framework\nfrom paddle.fluid.multiprocess_utils import CleanupFuncRegistrar\nfrom .tracer import Tracer\nimport logging\nfrom ..data_feeder import convert_dtype\nimport warnings\nfrom ..framework import _get_paddle_place\n\n__all__ = [\n 'no_grad', 'no_grad_', 'grad', 'guard', 'enable_dygraph', 'disable_dygraph',\n 'enabled', 'to_variable'\n]\n\n\ndef _switch_to_static_graph_(func):\n def __impl__(*args, **kwargs):\n with framework._dygraph_guard(None):\n return func(*args, **kwargs)\n\n return __impl__\n\n\nswitch_to_static_graph = wrap_decorator(_switch_to_static_graph_)\n\n\n@signature_safe_contextmanager\ndef program_desc_tracing_guard(enable):\n tracer = framework._dygraph_tracer()\n if tracer:\n original_val = tracer._enable_program_desc_tracing\n tracer._enable_program_desc_tracing = enable\n try:\n yield\n finally:\n if tracer:\n tracer._enable_program_desc_tracing = original_val\n\n\n_functional_dygraph_context_manager = None\n\n\n@signature_safe_contextmanager\ndef param_guard(parameters):\n # Note: parameters is a reference of self._parameters or self._buffers\n if not framework.in_dygraph_mode() and parameters:\n origin_parameters = parameters.copy()\n for name, var_base in parameters.items():\n if isinstance(var_base, core.VarBase):\n # Convert ParamBase into Parameter with same attributes in dy2stat.\n if isinstance(var_base, framework.ParamBase):\n new_var = var_base._to_static_var(to_parameter=True)\n else:\n # Check whether has been created before.\n if var_base.name in var_base.block.vars:\n new_var = var_base.block.vars[var_base.name]\n # Note(Aurelius84): Convert VarBase in self._buffers into Variabe with\n # same attributes and set persistable=True to allow saving this var.\n # Because users can create a VarBase in `__init__` like a\n # `mask` Tensor or `hidden_0` in RNN layers, which is equivalent to a Parameter\n # and necessary for inferring. It will be pruned if it's not necessary for inferring.\n else:\n # But if its shape is empty while created from `create_variable()`, we consider this buffer\n # non-persistable. See case of `drop_state` in lstm api.\n is_persistable = len(var_base.shape) > 0\n\n new_var = var_base._to_static_var(\n to_parameter=False, persistable=is_persistable)\n parameters[name] = new_var\n yield\n parameters.update(origin_parameters)\n else:\n yield\n\n\ndef enabled():\n \"\"\"\n This function checks whether the program runs in dynamic graph mode or not.\n You can enter dynamic graph mode with :ref:`api_fluid_dygraph_guard` api,\n or enable and disable dynamic graph mode with :ref:`api_fluid_dygraph_enable_dygraph`\n and :ref:`api_fluid_dygraph_disable_dygraph` api .\n\n **Note**:\n ``fluid.dygraph.enabled`` is the alias of ``fluid.in_dygraph_mode``, and\n ``fluid.in_dygraph_mode`` is recommended to use.\n\n Returns:\n bool: Whether the program is running in dynamic graph mode.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n\n fluid.enable_dygraph() # Now we are in dygragh mode\n print(fluid.dygraph.enabled()) # True\n fluid.disable_dygraph()\n print(fluid.dygraph.enabled()) # False\n \"\"\"\n return framework.in_dygraph_mode()\n\n\ndef enable_dygraph(place=None):\n \"\"\"\n\n .. note::\n Dynamic graph mode is turn ON by default since paddle 2.0.0\n\n This API turn OFF static graph mode. You can turn ON static graph mode by `enable_static <./disable_dygraph_en.html>`_ .\n\n Parameters:\n place(paddle.CPUPlace|paddle.CUDAPlace|str, optional): Place to run dynamic graph. Default: None. Which means that the running place will be \n determined according to the way of paddle compilation. If ``place`` is string, It can be ``cpu``, and ``gpu:x``, where ``x`` is the\n index of the GPUs.\n\n return:\n None\n\n Examples:\n .. code-block:: python\n\n import paddle\n print(paddle.in_dynamic_mode()) # True, dynamic mode is turn ON by default since paddle 2.0.0\n\n paddle.enable_static()\n print(paddle.in_dynamic_mode()) # False, Now we are in static mode\n\n paddle.disable_static()\n print(paddle.in_dynamic_mode()) # True, Now we are in dynamic mode\n\n \"\"\"\n global _functional_dygraph_context_manager\n if _functional_dygraph_context_manager is None:\n _functional_dygraph_context_manager = guard(\n place=_get_paddle_place(place))\n _functional_dygraph_context_manager.__enter__()\n\n # call disable_dygraph when Python exit\n CleanupFuncRegistrar.register(disable_dygraph)\n\n\ndef disable_dygraph():\n \"\"\"\n\n .. note::\n Dynamic graph mode is turn ON by default since paddle 2.0.0\n\n This API turn ON static graph mode. You can turn ON static graph mode by `disable_static <./enable_dygraph_en.html>`_ .\n\n return:\n None\n\n Examples:\n .. code-block:: python\n\n import paddle\n print(paddle.in_dynamic_mode()) # True, dynamic mode is turn ON by default since paddle 2.0.0\n\n paddle.enable_static()\n print(paddle.in_dynamic_mode()) # False, Now we are in static mode\n\n paddle.disable_static()\n print(paddle.in_dynamic_mode()) # True, Now we are in dynamic mode\n\n \"\"\"\n global _functional_dygraph_context_manager\n if _functional_dygraph_context_manager is not None:\n _functional_dygraph_context_manager.__exit__(*sys.exc_info())\n _functional_dygraph_context_manager = None\n\n\n@signature_safe_contextmanager\ndef _switch_tracer_mode_guard_(is_train=True):\n tracer = framework._dygraph_tracer()\n if tracer:\n has_grad = tracer._has_grad\n tracer._has_grad = is_train\n try:\n yield\n finally:\n tracer._has_grad = has_grad\n else:\n yield\n\n\ndef no_grad(func=None):\n \"\"\"\n :api_attr: imperative\n\n Create a context which disables dygraph gradient calculation.\n In this mode, the result of every computation will have `stop_gradient=True`.\n\n Also functions as a decorator. (Make sure to instantiate without parenthesis.)\n\n Examples:\n\n .. code-block:: python\n\n import numpy as np\n import paddle.fluid as fluid\n\n # use as generator\n\n data = np.array([[2, 3], [4, 5]]).astype('float32')\n with fluid.dygraph.guard():\n l0 = fluid.Linear(2, 2) # l0.weight.gradient() is None\n l1 = fluid.Linear(2, 2)\n with fluid.dygraph.no_grad():\n # l1.weight.stop_gradient is False\n tmp = l1.weight * 2 # tmp.stop_gradient is True\n x = fluid.dygraph.to_variable(data)\n y = l0(x) + tmp\n o = l1(y)\n o.backward()\n print(tmp.gradient() is None) # True\n print(l0.weight.gradient() is None) # False\n\n # use as decorator\n\n @fluid.dygraph.no_grad\n def test_layer():\n with fluid.dygraph.guard():\n inp = np.ones([3, 1024], dtype='float32')\n t = fluid.dygraph.base.to_variable(inp)\n linear1 = fluid.Linear(1024, 4, bias_attr=False)\n linear2 = fluid.Linear(4, 4)\n ret = linear1(t)\n dy_ret = linear2(ret)\n\n test_layer()\n\n \"\"\"\n if func is None:\n return _switch_tracer_mode_guard_(is_train=False)\n else:\n\n @decorator.decorator\n def __impl__(func, *args, **kwargs):\n with _switch_tracer_mode_guard_(is_train=False):\n return func(*args, **kwargs)\n\n return __impl__(func)\n\n\nclass no_grad_:\n \"\"\"\n :api_attr: imperative\n\n Create a context which disables dygraph gradient calculation.\n In this mode, the result of every computation will have `stop_gradient` set\n to `True`.\n\n Also functions as a decorator. (Make sure to use an instance.)\n\n Examples:\n\n .. code-block:: python\n\n import numpy as np\n import paddle\n\n # use as generator\n\n data = np.array([[2, 3], [4, 5]]).astype('float32')\n l0 = paddle.nn.Linear(2, 2) # l0.weight.gradient() is None\n l1 = paddle.nn.Linear(2, 2)\n with paddle.no_grad():\n # l1.weight.stop_gradient is False\n tmp = l1.weight * 2 # tmp.stop_gradient is True\n x = paddle.to_tensor(data)\n y = l0(x) + tmp\n o = l1(y)\n o.backward()\n print(tmp.gradient() is None) # True\n print(l0.weight.gradient() is None) # False\n\n # use as decorator\n\n @paddle.no_grad()\n def test_layer():\n inp = np.ones([3, 1024], dtype='float32')\n t = paddle.to_tensor(inp)\n linear1 = paddle.nn.Linear(1024, 4, bias_attr=False)\n linear2 = paddle.nn.Linear(4, 4)\n ret = linear1(t)\n dy_ret = linear2(ret)\n\n test_layer()\n \"\"\"\n\n def __call__(self, func):\n @decorator.decorator\n def _decorate_function(func, *args, **kwargs):\n with self:\n return func(*args, **kwargs)\n\n @decorator.decorator\n def _decorate_generator(func, *args, **kwargs):\n gen = func(*args, **kwargs)\n with self:\n for x in gen:\n yield x\n\n if inspect.isgeneratorfunction(func):\n return _decorate_generator(func)\n else:\n return _decorate_function(func)\n\n def __enter__(self):\n tracer = framework._dygraph_tracer()\n if tracer:\n self.orig = tracer._has_grad\n tracer._has_grad = False\n\n def __exit__(self, *args):\n tracer = framework._dygraph_tracer()\n if tracer:\n tracer._has_grad = self.orig\n\n\n@signature_safe_contextmanager\ndef guard(place=None):\n \"\"\"\n :api_attr: imperative\n\n This context will create a dygraph context for dygraph to run, using python ``with`` statement.\n\n Parameters:\n place(fluid.CPUPlace| fluid.CUDAPlace|str, optional): Place to execute dygraph. \n If None, the running place will be determined according to the way of paddle compilation.\n If ``place`` is string, It can be ``cpu``, ``gpu:x`` and ``xpu:x``, where ``x`` is the\n index of the GPUs or XPUs. Default: None\n\n return:\n None\n\n Examples:\n\n .. code-block:: python\n\n import numpy as np\n import paddle.fluid as fluid\n\n with fluid.dygraph.guard():\n inp = np.ones([3, 1024], dtype='float32')\n t = fluid.dygraph.base.to_variable(inp)\n linear1 = fluid.Linear(1024, 4, bias_attr=False)\n linear2 = fluid.Linear(4, 4)\n ret = linear1(t)\n dy_ret = linear2(ret)\n\n \"\"\"\n train = framework.Program()\n startup = framework.Program()\n tracer = Tracer()\n VarBase = core.VarBase\n\n if place is not None:\n expected_place = _get_paddle_place(place)\n else:\n expected_place = framework._current_expected_place()\n\n with framework.program_guard(train, startup):\n with framework.unique_name.guard():\n with framework._dygraph_guard(tracer):\n with framework._dygraph_place_guard(expected_place):\n yield\n\n\[email protected]_only\ndef grad(outputs,\n inputs,\n grad_outputs=None,\n retain_graph=None,\n create_graph=False,\n only_inputs=True,\n allow_unused=False,\n no_grad_vars=None):\n ''' \n .. note::\n **This API is ONLY available in Dygraph mode.**\n\n This API computes the sum of gradients of `outputs` with respect to each `inputs` .\n\n Parameters:\n outputs (Tensor|list(Tensor)|tuple(Tensor)): the output Tensor or \n Tensor list/tuple of the graph to compute gradients.\n inputs (Tensor|list(Tensor)|tuple(Tensor)): the input Tensor or \n Tensor list/tuple of the graph to compute gradients. The returned\n values of this API are the gradients of `inputs` . \n grad_outputs (Tensor|list(Tensor|None)|tuple(Tensor|None), optional): \n initial gradient values of `outputs` . If `grad_outputs` is None, \n the initial gradient values of `outputs` would be Tensors filled with 1; \n if `grad_outputs` is not None, it must have the same length as `outputs` , \n and in this case, the initial gradient value of the i-th `outputs` would\n be: (1) a Tensor filled with 1 when the i-th element of `grad_outputs` \n is None; (2) the i-th element of `grad_outputs` when the i-th element of\n `grad_outputs` is a Tensor. Default None.\n retain_graph (bool, optional): whether to retain the forward graph which \n is used to calculate the gradient. When it is True, the graph would \n be retained, in which way users can calculate backward twice for the \n same graph. When it is False, the graph would be freed. Default None,\n which means it is equal to `create_graph` . \n create_graph (bool, optional): whether to create the gradient graphs of\n the computing process. When it is True, higher order derivatives are\n supported to compute; when it is False, the gradient graphs of the\n computing process would be discarded. Default False.\n only_inputs (bool, optional): whether to only compute the gradients of\n `inputs` . If it is False, the gradients of all remaining leaf \n Tensors in the graph would be also computed and accumulated. \n If it is True, only the gradients of `inputs` would be computed.\n Default True. only_inputs=False is under development, and it is\n not supported yet. \n allow_unused (bool, optional): whether to raise error or return None if some \n Tensors of `inputs` are unreachable in the graph. If some Tensors of \n `inputs` are unreachable in the graph (i.e., their gradients are None), \n error would be raised if allow_unused=False, or None would be returned as\n their gradients if allow_unused=True. Default False.\n no_grad_vars (Tensor|list(Tensor)|tuple(Tensor)|set(Tensor), optional): \n the Tensors whose gradients are not needed to compute. Default None.\n\n Returns:\n tuple: a tuple of Tensors, whose length is the same as the Tensor number \n inside `inputs`, and the i-th returned Tensor is the sum of gradients of \n `outputs` with respect to the i-th `inputs`.\n\n Examples 1:\n .. code-block:: python\n\n import paddle\n\n def test_dygraph_grad(create_graph):\n x = paddle.ones(shape=[1], dtype='float32')\n x.stop_gradient = False\n y = x * x\n\n # Since y = x * x, dx = 2 * x\n dx = paddle.grad(\n outputs=[y],\n inputs=[x],\n create_graph=create_graph,\n retain_graph=True)[0]\n\n z = y + dx\n\n # If create_graph = False, the gradient of dx\n # would not be backpropagated. Therefore,\n # z = x * x + dx, and x.gradient() = 2 * x = 2.0\n\n # If create_graph = True, the gradient of dx\n # would be backpropagated. Therefore,\n # z = x * x + dx = x * x + 2 * x, and\n # x.gradient() = 2 * x + 2 = 4.0\n\n z.backward()\n return x.gradient()\n\n print(test_dygraph_grad(create_graph=False)) # [2.]\n print(test_dygraph_grad(create_graph=True)) # [4.]\n\n Examples 2:\n .. code-block:: python\n\n import paddle\n\n def test_dygraph_grad(grad_outputs=None):\n x = paddle.to_tensor(2.0)\n x.stop_gradient = False\n\n y1 = x * x\n y2 = x * 3 \n\n # If grad_outputs=None, dy1 = [1], dy2 = [1].\n # If grad_outputs=[g1, g2], then:\n # - dy1 = [1] if g1 is None else g1\n # - dy2 = [1] if g2 is None else g2\n\n # Since y1 = x * x, dx = 2 * x * dy1.\n # Since y2 = x * 3, dx = 3 * dy2.\n # Therefore, the final result would be:\n # dx = 2 * x * dy1 + 3 * dy2 = 4 * dy1 + 3 * dy2.\n\n dx = paddle.grad(\n outputs=[y1, y2], \n inputs=[x],\n grad_outputs=grad_outputs)[0]\n\n return dx.numpy()\n\n grad_value = paddle.to_tensor(4.0)\n # dy1 = [1], dy2 = [1]\n print(test_dygraph_grad(None)) # [7.]\n\n # dy1 = [1], dy2 = [4]\n print(test_dygraph_grad([None, grad_value])) # [16.]\n\n # dy1 = [4], dy2 = [1]\n print(test_dygraph_grad([grad_value, None])) # [19.]\n\n # dy1 = [3], dy2 = [4]\n grad_y1 = paddle.to_tensor(3.0)\n print(test_dygraph_grad([grad_y1, grad_value])) # [24.]\n\t'''\n\n def check_in_out(in_out_list, name):\n assert in_out_list is not None, \"{} should not be None\".format(name)\n\n if isinstance(in_out_list, (list, tuple)):\n assert len(in_out_list) > 0, \"{} cannot be empty\".format(name)\n for each_var in in_out_list:\n assert isinstance(\n each_var,\n core.VarBase), \"Elements of {} must be Variable\".format(\n name)\n return in_out_list\n else:\n assert isinstance(\n in_out_list,\n core.VarBase), \"{} must be Variable or list of Variable\".format(\n name)\n return [in_out_list]\n\n outputs = check_in_out(outputs, 'outputs')\n inputs = check_in_out(inputs, 'inputs')\n\n if grad_outputs is not None:\n if not isinstance(grad_outputs, (list, tuple)):\n grad_outputs = [grad_outputs]\n\n for each_var in grad_outputs:\n if each_var is not None:\n assert isinstance(\n each_var, core.VarBase\n ), \"grad_outputs must be None, a Variable or a list containing None or Variables\"\n else:\n grad_outputs = []\n\n if len(grad_outputs) > 0:\n assert len(grad_outputs) == len(\n outputs), \"The length of grad_outputs must be equal to outputs\"\n\n if no_grad_vars is None:\n no_grad_vars = []\n elif isinstance(no_grad_vars, core.VarBase):\n no_grad_vars = [no_grad_vars]\n elif isinstance(no_grad_vars, (list, tuple, set)):\n no_grad_vars = list(no_grad_vars)\n for var in no_grad_vars:\n assert isinstance(\n var, core.VarBase), \"no_grad_vars can only contains Variable\"\n else:\n raise AssertionError(\n \"no_grad_vars must be None, Variable or list/tuple/set of Variables\")\n\n assert isinstance(create_graph, bool), \"create_graph must be True or False\"\n\n if retain_graph is None:\n retain_graph = create_graph\n\n assert isinstance(retain_graph,\n bool), \"retain_graph must be None, True or False\"\n\n assert isinstance(allow_unused, bool), \"allow_unused must be True or False\"\n\n assert isinstance(only_inputs, bool), \"only_inputs must be True or False\"\n assert only_inputs, \"only_inputs=False is not supported yet\"\n\n place = core.Place()\n place.set_place(framework._current_expected_place())\n return core.dygraph_partial_grad(inputs, outputs, grad_outputs,\n no_grad_vars, place, create_graph,\n retain_graph, allow_unused, only_inputs)\n\n\[email protected]_only\ndef to_variable(value, name=None, zero_copy=None, dtype=None):\n r\"\"\"\n :api_attr: imperative\n\n The API will create a ``Variable`` object from \n tuple, list, numpy\\.ndarray or Variable object.\n\n Parameters:\n value(tuple|list|ndarray|Variable|Tensor): Initial data. \n Can be a list, tuple, NumPy ndarray, Variable, Tensor.\n The shape can be multi-dimensional. The data type is one of \n numpy\\.{float16, float32, float64, int16, int32, int64, \n uint8, uint16, complex64, complex128}.\n name(str, optional): The default value is None. Normally there is no \n need for user to set this property. For more information, please \n refer to :ref:`api_guide_Name` . \n zero_copy(bool, optional): Whether to share memory with the input numpy \n array. This parameter only works with CPUPlace and will be set to \n True when it is None. Default: None. (Note: zero_copy is discarded temporally for some reason.)\n dtype(str, optional): The desired data type of returned ``Variable`` .\n Can be 'bool' , 'float16' , 'float32' , 'float64' , 'int8' , 'int16' , \n 'int32' , 'int64' , 'uint8' . Default: None.\n\n Returns:\n Variable : If ``value`` is a tuple/list/numpy\\.ndarray object, \n return ``Tensor`` created from the corresponding numpy\\.ndarray object, which has \n same data type and shape with ``value``. \n\n\n Examples:\n\n .. code-block:: python\n\n import numpy as np\n import paddle.fluid as fluid\n\n with fluid.dygraph.guard(fluid.CPUPlace()):\n x = np.ones([2, 2], np.float32)\n y = fluid.dygraph.to_variable(x, zero_copy=False)\n x[0][0] = -1\n y[0][0].numpy() # array([1.], dtype=float32)\n y = fluid.dygraph.to_variable(x)\n x[0][0] = 0\n y[0][0].numpy() # array([0.], dtype=float32)\n c = np.array([2+1j, 2])\n z = fluid.dygraph.to_variable(c)\n z.numpy() # array([2.+1.j, 2.+0.j])\n z.dtype # 'complex128'\n\n y = fluid.dygraph.to_variable([[0.1, 1.2], [2.2, 3.1], [4.9, 5.2]])\n y.shape # [3L, 2L]\n\n y = fluid.dygraph.to_variable(((0.1, 1.2), (2.2, 3.1), (4.9, 5.2)), dtype='int32')\n y.shape # [3L, 2L]\n\n \"\"\"\n support_type = (list, tuple, np.ndarray, core.VarBase, framework.Variable,\n core.Tensor, core.LoDTensor)\n if not isinstance(value, support_type):\n raise TypeError(\n \"The type of 'value' in fluid.dygraph.to_variable must be %s, but received %s.\"\n % (support_type, type(value)))\n if isinstance(value, (core.VarBase, framework.Variable)):\n return value\n elif isinstance(value, (core.Tensor, core.LoDTensor)):\n return core.VarBase(value)\n else:\n if isinstance(framework._current_expected_place(),\n framework.core.CPUPlace):\n #TODO(zhiqiu): we found two problems when enable zero_copy on CPUPlace.\n # (1): eigen requires 16-bytes alignments, but the data of numpy array may not statisfy. \n # Details: https://eigen.tuxfamily.org/dox/group__TopicUnalignedArrayAssert.html\n # (2): when used in flask framework, it may result in hang.\n # Details: https://github.com/PaddlePaddle/Paddle/issues/26635\n # So, we temporally diable the zero_copy strategy.\n if zero_copy == True:\n warnings.warn(\n \"Currently, zero_copy is not supported, and it will be discarded.\"\n )\n zero_copy = False\n else:\n assert not zero_copy, \"zero_copy mode can only be used with CPUPlace\"\n\n if not isinstance(value, np.ndarray):\n value = np.array(value)\n\n if dtype is not None:\n dtype = convert_dtype(dtype)\n if value.dtype != dtype:\n value = value.astype(dtype)\n\n py_var = core.VarBase(\n value=value,\n place=framework._current_expected_place(),\n persistable=False,\n zero_copy=zero_copy,\n name=name if name else '')\n return py_var\n", "# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport sys\nimport numpy as np\nimport unittest\nimport contextlib\n\nimport paddle\nimport paddle.fluid as fluid\npaddle.enable_static()\n\n\ndef load_so(so_name):\n \"\"\"\n Load .so file and parse custom op into OpInfoMap.\n \"\"\"\n file_dir = os.path.dirname(os.path.abspath(__file__))\n fluid.load_op_library(os.path.join(file_dir, so_name))\n\n\nfrom paddle.fluid.layer_helper import LayerHelper\n\n\ndef relu2(x, name=None):\n helper = LayerHelper(\"relu2\", **locals())\n out = helper.create_variable(\n type=x.type, name=name, dtype=x.dtype, persistable=False)\n helper.append_op(type=\"relu2\", inputs={\"X\": x}, outputs={\"Y\": out})\n return out\n\n\[email protected]\ndef scope_prog_guard():\n prog = fluid.Program()\n startup_prog = fluid.Program()\n scope = fluid.core.Scope()\n with fluid.scope_guard(scope):\n with fluid.program_guard(prog, startup_prog):\n yield\n\n\ndef linear_fc(data, label, use_custom_relu):\n hidden = fluid.layers.fc(data, size=128)\n hidden = relu2(hidden) if use_custom_relu else fluid.layers.relu(hidden)\n hidden = fluid.layers.fc(hidden, size=128)\n hidden = fluid.layers.fc(hidden, size=10, act='softmax')\n loss = fluid.layers.cross_entropy(input=hidden, label=label)\n loss = fluid.layers.mean(loss)\n return loss\n\n\ndef custom_op_test(use_gpu=True, use_custom_relu=True):\n with scope_prog_guard():\n np.random.seed(0)\n fluid.default_startup_program().random_seed = 10\n fluid.default_main_program().random_seed = 10\n\n data = fluid.layers.data(\n name='data', shape=[1, 28, 28], dtype='float32')\n label = fluid.layers.data(name='label', shape=[1], dtype='int64')\n loss = linear_fc(data, label, use_custom_relu)\n\n optimizer = fluid.optimizer.Momentum(learning_rate=0.1, momentum=0.9)\n optimizer.minimize(loss)\n\n place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()\n exe = fluid.Executor(place)\n exe.run(fluid.default_startup_program())\n\n compile_program = fluid.compiler.CompiledProgram(\n fluid.default_main_program()).with_data_parallel(\n loss_name=loss.name)\n\n reader = paddle.batch(paddle.dataset.mnist.train(), batch_size=32)\n feeder = fluid.DataFeeder(feed_list=[data, label], place=place)\n\n num = 4\n for i, data in enumerate(reader()):\n outs, = exe.run(compile_program,\n feed=feeder.feed(data),\n fetch_list=[loss])\n if i == num:\n break\n return outs\n\n\nclass CustomOpTest(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n os.environ['CPU_NUM'] = str(2)\n\n def test_cpu(self):\n actual = custom_op_test(False, True)\n expect = custom_op_test(False, False)\n self.assertEqual(actual.all(), expect.all())\n\n def test_gpu(self):\n if not fluid.core.is_compiled_with_cuda():\n return\n actual = custom_op_test(True, True)\n expect = custom_op_test(True, False)\n self.assertEqual(actual.all(), expect.all())\n\n\nif __name__ == '__main__':\n load_so(so_name='librelu2_op.so')\n unittest.main()\n", "# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nimport sys\nsys.path.append(\"..\")\nimport unittest\nimport numpy as np\nimport paddle.fluid.core as core\nfrom op_test import OpTest\nfrom scipy.special import expit, erf\nimport paddle\nimport paddle.fluid as fluid\nimport paddle.nn as nn\nimport paddle.nn.functional as F\nfrom paddle.fluid import compiler, Program, program_guard\n\n\ndef ref_batch_norm_infer(x, scale, bias, mean, variance, momentum, epsilon,\n data_layout):\n if data_layout == \"NCHW\":\n n, c, h, w = x.shape\n mean_tile = np.reshape(mean, (1, c, 1, 1))\n mean_tile = np.tile(mean_tile, (n, 1, h, w))\n variance_tile = np.reshape(variance, (1, c, 1, 1))\n variance_tile = np.tile(variance_tile, (n, 1, h, w))\n normalized_x = (x - mean_tile) / np.sqrt(variance_tile + epsilon)\n scale_tile = np.reshape(scale, (1, c, 1, 1))\n scale_tile = np.tile(scale_tile, (n, 1, h, w))\n bias_tile = np.reshape(bias, (1, c, 1, 1))\n bias_tile = np.reshape(bias_tile, (1, c, 1, 1))\n y = normalized_x * scale_tile + bias_tile\n elif data_layout == \"NHWC\":\n normalized_x = (x - mean) / np.sqrt(variance + epsilon)\n y = normalized_x * scale + bias\n else:\n raise ValueError(\n \"Unsupported data layout! Only NCHW and NHWC is supported, but received \"\n + data_layout)\n return y\n\n\ndef ref_batch_norm_train(x, y_grad, scale, bias, mean, variance, momentum,\n epsilon, data_layout):\n # Forward\n if data_layout == \"NCHW\":\n n, c, h, w = x.shape\n x_square = x * x\n x_square_sum = np.sum(x_square, (0, 2, 3))\n x_sum = np.sum(x, axis=(0, 2, 3))\n element_count = np.size(x) / int(np.shape(x)[1])\n saved_mean = x_sum / element_count\n saved_variance = x_square_sum / element_count - saved_mean * saved_mean\n saved_mean_tile = np.reshape(saved_mean, (1, c, 1, 1))\n saved_mean_tile = np.tile(saved_mean_tile, (n, 1, h, w))\n saved_variance_tile = np.reshape(saved_variance, (1, c, 1, 1))\n saved_variance_tile = np.tile(saved_variance_tile, (n, 1, h, w))\n normalized_x = (\n x - saved_mean_tile) / np.sqrt(saved_variance_tile + epsilon)\n scale_tile = np.reshape(scale, (1, c, 1, 1))\n scale_tile = np.tile(scale_tile, (n, 1, h, w))\n bias_tile = np.reshape(bias, (1, c, 1, 1))\n bias_tile = np.reshape(bias_tile, (1, c, 1, 1))\n y = normalized_x * scale_tile + bias_tile\n elif data_layout == \"NHWC\":\n x_square = x * x\n x_square_sum = np.sum(x_square, (0, 1, 2))\n x_sum = np.sum(x, axis=(0, 1, 2))\n element_count = np.size(x) / int(np.shape(x)[-1])\n saved_mean = x_sum / element_count\n saved_variance = x_square_sum / element_count - saved_mean * saved_mean\n normalized_x = (x - saved_mean) / np.sqrt(saved_variance + epsilon)\n y = normalized_x * scale + bias\n else:\n raise ValueError(\n \"Unsupported data layout! Only NCHW and NHWC is supported, but received \"\n + data_layout)\n mean_out = saved_mean * (1. - momentum) + momentum * mean\n variance_out = saved_variance * (1. - momentum) + momentum * variance\n saved_inv_std = 1. / np.sqrt(saved_variance + epsilon)\n # Backward\n # Use the following formulas to calculate gradients:\n # grad_scale =\n # sum(grad_y * (x - mean)) * rsqrt(variance + epsilon)\n #\n # grad_bias = sum(y)\n #\n # x_grad =\n # 1/N * scale * rsqrt(variance + epsilon) * (N * grad_y - sum(grad_y) -\n # (x - mean) * sum(grad_y * (x - mean)) / (variance + epsilon))\n # Transfer from (N, C, H, W) to (N, H, W, C) to simplify computation\n if data_layout == \"NCHW\":\n x = np.transpose(x, (0, 2, 3, 1))\n y_grad = np.transpose(y_grad, (0, 2, 3, 1))\n x_grad = scale * (\n y_grad - np.mean(\n y_grad, axis=(0, 1, 2)) - (x - saved_mean) * np.mean(\n y_grad * (x - saved_mean), axis=(0, 1, 2)) /\n (saved_variance + epsilon)) / np.sqrt(saved_variance + epsilon)\n scale_grad = np.sum(y_grad * (x - saved_mean) /\n np.sqrt(saved_variance + epsilon),\n axis=(0, 1, 2))\n bias_grad = np.sum(y_grad, axis=(0, 1, 2))\n # Transfer back to N, C, H, W\n if data_layout == \"NCHW\":\n x_grad = np.transpose(x_grad, (0, 3, 1, 2))\n x = np.transpose(x, (0, 3, 1, 2))\n y_grad = np.transpose(y_grad, (0, 3, 1, 2))\n return y, mean_out, variance_out, saved_mean, saved_inv_std, x_grad, scale_grad, bias_grad\n\n\[email protected](not paddle.is_compiled_with_xpu(),\n \"core is not compiled with XPU\")\nclass TestXPUBatchNormOp(unittest.TestCase):\n def setUp(self):\n self.place = paddle.XPUPlace(0)\n self.op_type = \"batch_norm\"\n self.dtype = np.float32\n self.shape = [2, 3, 4, 5]\n self.data_layout = \"NCHW\"\n self.epsilon = 1e-05\n self.momentum = 0.9\n self.set_attrs()\n\n if self.data_layout == \"NHWC\":\n channel_size = self.shape[3]\n elif self.data_layout == \"NCHW\":\n channel_size = self.shape[1]\n else:\n raise ValueError(\n \"Unsupported data layout! Only NCHW and NHWC is supported, but received \"\n + data_layout)\n np.random.seed(1024)\n self.x_np = np.random.random_sample(self.shape).astype(self.dtype)\n self.scale_np = np.random.random_sample(\n [channel_size]).astype(self.dtype)\n self.bias_np = np.random.random_sample(\n [channel_size]).astype(self.dtype)\n self.mean_np = np.zeros([channel_size]).astype(self.dtype)\n self.variance_np = np.ones([channel_size]).astype(self.dtype)\n self.saved_mean_np = np.zeros([channel_size]).astype(self.dtype)\n self.saved_variance_np = np.ones([channel_size]).astype(self.dtype)\n\n def set_attrs(self):\n pass\n\n def test_infer(self):\n paddle.enable_static()\n with paddle.static.program_guard(paddle.static.Program()):\n x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)\n scale = paddle.fluid.data('Scale', self.scale_np.shape,\n self.scale_np.dtype)\n bias = paddle.fluid.data('Bias', self.bias_np.shape,\n self.bias_np.dtype)\n mean = paddle.fluid.data('Mean', self.mean_np.shape,\n self.mean_np.dtype)\n variance = paddle.fluid.data('Variance', self.variance_np.shape,\n self.variance_np.dtype)\n y = F.batch_norm(x, mean, variance, scale, bias, False,\n self.momentum, self.epsilon, self.data_layout)\n exe = paddle.static.Executor(self.place)\n [y_np] = exe.run(feed={\n 'X': self.x_np,\n 'Scale': self.scale_np,\n 'Bias': self.bias_np,\n 'Mean': self.mean_np,\n 'Variance': self.variance_np\n },\n fetch_list=[y])\n y_np_ref = ref_batch_norm_infer(\n self.x_np, self.scale_np, self.bias_np, self.mean_np,\n self.variance_np, self.momentum, self.epsilon, self.data_layout)\n self.assertEqual(np.allclose(y_np_ref, y_np), True)\n\n def test_train(self):\n y_grad_np = np.random.random_sample(self.shape).astype(self.dtype)\n y_np, mean_out_np, variance_out_np, saved_mean_np, saved_variance_np, x_grad_np, scale_grad_np, bias_grad_np = ref_batch_norm_train(\n self.x_np, y_grad_np, self.scale_np, self.bias_np, self.mean_np,\n self.variance_np, self.momentum, self.epsilon, self.data_layout)\n inputs = {\n 'X': self.x_np,\n 'Scale': self.scale_np,\n 'Bias': self.bias_np,\n 'Mean': self.mean_np,\n 'Variance': self.variance_np,\n 'Y@GRAD': y_grad_np\n }\n outputs = {\n 'Y': y_np,\n 'Mean': mean_out_np,\n 'Variance': variance_out_np,\n 'SavedMean': saved_mean_np,\n 'SavedVariance': saved_variance_np,\n 'X@GRAD': x_grad_np,\n 'Scale@GRAD': scale_grad_np,\n 'Bias@GRAD': bias_grad_np\n }\n attrs = {\n 'momentum': self.momentum,\n 'epsilon': self.epsilon,\n 'is_test': False,\n 'data_layout': self.data_layout,\n 'use_mkldnn': False,\n 'fuse_with_relu': False,\n 'use_global_stats': False,\n }\n paddle.enable_static()\n program = paddle.static.Program()\n with paddle.static.program_guard(program):\n block = program.global_block()\n # Set inputs, outputs and attributes to the forward op of batch_norm \n input_vars = {}\n for var_name in inputs:\n arg_name = var_name\n np_value = inputs[var_name]\n if not block.has_var(var_name):\n block.create_var(\n name=var_name,\n shape=np_value.shape,\n dtype=np_value.dtype)\n input_vars[arg_name] = block.var(var_name)\n fetch_list = []\n output_vars = {}\n for var_name in outputs:\n arg_name = var_name\n np_value = outputs[var_name]\n if not block.has_var(var_name):\n block.create_var(\n name=var_name,\n shape=np_value.shape,\n dtype=np_value.dtype)\n if var_name == 'Mean':\n arg_name = 'MeanOut' # Share memory\n if var_name == 'Variance':\n arg_name = 'VarianceOut' # Share memory\n output_vars[arg_name] = block.var(var_name)\n fetch_list.append(var_name)\n batch_norm_op = block.append_op(\n type=\"batch_norm\",\n inputs=input_vars,\n outputs=output_vars,\n attrs=attrs)\n # Generate the backward op_desc of batch_norm\n grad_op_desc_list, op_grad_to_var = core.get_grad_op_desc(\n batch_norm_op.desc, set(), [])\n grad_op_desc = grad_op_desc_list[0]\n new_op_desc = block.desc.append_op()\n new_op_desc.copy_from(grad_op_desc)\n program._sync_with_cpp()\n exe = paddle.static.Executor(self.place)\n outs = exe.run(program, feed=inputs, fetch_list=fetch_list)\n for id, name in enumerate(fetch_list):\n self.assertEqual(\n np.allclose(\n outputs[name], outs[id], atol=1e-4), True)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n" ]
[ [ "numpy.testing.assert_array_equal", "numpy.random.random", "numpy.transpose" ], [ "numpy.reshape", "numpy.random.random", "numpy.allclose" ], [ "numpy.array", "numpy.random.random" ], [ "numpy.array" ], [ "numpy.array" ], [ "numpy.arange", "numpy.array_equal", "numpy.array", "numpy.linspace" ], [ "numpy.random.random" ], [ "numpy.array", "numpy.abs", "numpy.array_equal" ], [ "numpy.array" ], [ "numpy.random.seed" ], [ "numpy.sqrt", "numpy.random.seed", "numpy.allclose", "numpy.reshape", "numpy.tile", "numpy.random.random_sample", "numpy.ones", "numpy.size", "numpy.shape", "numpy.mean", "numpy.transpose", "numpy.zeros", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
BreastGAN/augmentation
[ "0e1bcb7175e2b2a45cd8084bb14521e26b68caea", "0e1bcb7175e2b2a45cd8084bb14521e26b68caea" ]
[ "models/breast_cycle_gan/custom/conv/contrib.py", "models/rcnn/eval.py" ]
[ "# Copyright 2019 Lukas Jendele and Ondrej Skopek.\n# Adapted from The TensorFlow Authors, under the ASL 2.0.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n# This part is copied from:\n# https://github.com/tensorflow/tensorflow/blob/r1.11/tensorflow/contrib/layers/python/layers/layers.py\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.contrib.framework.python.ops import add_arg_scope\n# from tensorflow.contrib.framework.python.ops import variables\nfrom tensorflow.contrib.layers.python.layers import initializers\nfrom tensorflow.contrib.layers.python.layers import utils\n# from tensorflow.python.eager import context\n# from tensorflow.python.framework import constant_op\n# from tensorflow.python.framework import dtypes\n# from tensorflow.python.framework import function\nfrom tensorflow.python.framework import ops\n# from tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.layers import convolutional as convolutional_layers\nfrom tensorflow.python.ops import init_ops\nfrom tensorflow.python.ops import nn\nfrom tensorflow.python.ops import variable_scope\n\n# My imports\nfrom tensorflow.contrib.layers.python.layers.layers import _build_variable_getter, _add_variable_to_collections\nfrom models.breast_cycle_gan.custom.conv.layers import MyConv2D\nimport tensorflow as tf\n# This part is copied from:\n# https://github.com/tensorflow/tensorflow/blob/r1.11/tensorflow/contrib/layers/python/layers/layers.py\n\n\n@add_arg_scope\ndef convolution2d(inputs,\n num_outputs,\n kernel_size,\n stride=1,\n padding='SAME',\n data_format=None,\n rate=1,\n activation_fn=nn.relu,\n normalizer_fn=None,\n normalizer_params=None,\n weights_initializer=initializers.xavier_initializer(),\n weights_regularizer=None,\n biases_initializer=init_ops.zeros_initializer(),\n biases_regularizer=None,\n reuse=None,\n variables_collections=None,\n outputs_collections=None,\n trainable=True,\n use_spectral_norm=False,\n is_training=False,\n self_attention=False,\n scope=None):\n h = convolution(\n inputs,\n num_outputs,\n kernel_size,\n stride,\n padding,\n data_format,\n rate,\n activation_fn,\n normalizer_fn,\n normalizer_params,\n weights_initializer,\n weights_regularizer,\n biases_initializer,\n biases_regularizer,\n reuse,\n variables_collections,\n outputs_collections,\n trainable,\n use_spectral_norm,\n is_training,\n scope,\n conv_dims=2)\n if not self_attention:\n return h\n with tf.variable_scope(\"self_attention\"):\n with tf.variable_scope(\"f\"):\n f = convolution(\n inputs,\n num_outputs // 8,\n kernel_size,\n stride,\n padding,\n data_format,\n rate,\n activation_fn,\n normalizer_fn,\n normalizer_params,\n weights_initializer,\n weights_regularizer,\n biases_initializer,\n biases_regularizer,\n reuse,\n variables_collections,\n outputs_collections,\n trainable,\n use_spectral_norm,\n is_training,\n None,\n conv_dims=2)\n with tf.variable_scope(\"g\"):\n g = convolution(\n inputs,\n num_outputs // 8,\n kernel_size,\n stride,\n padding,\n data_format,\n rate,\n activation_fn,\n normalizer_fn,\n normalizer_params,\n weights_initializer,\n weights_regularizer,\n biases_initializer,\n biases_regularizer,\n reuse,\n variables_collections,\n outputs_collections,\n trainable,\n use_spectral_norm,\n is_training,\n None,\n conv_dims=2)\n\n def hw_flatten(x):\n return tf.reshape(x, shape=[x.shape[0], -1, x.shape[-1]])\n\n # N = h * w\n s = tf.matmul(hw_flatten(g), hw_flatten(f), transpose_b=True) # # [bs, N, N]\n\n beta = tf.nn.softmax(s, axis=-1) # attention map\n\n o = tf.matmul(beta, hw_flatten(h)) # [bs, N, C]\n gamma = tf.get_variable(\"gamma\", [1], initializer=tf.constant_initializer(0.0))\n\n o = tf.reshape(o, shape=inputs.shape) # [bs, h, w, C]\n x = gamma * o + inputs\n\n return x\n\n\n@add_arg_scope\ndef convolution(inputs,\n num_outputs,\n kernel_size,\n stride=1,\n padding='SAME',\n data_format=None,\n rate=1,\n activation_fn=nn.relu,\n normalizer_fn=None,\n normalizer_params=None,\n weights_initializer=initializers.xavier_initializer(),\n weights_regularizer=None,\n biases_initializer=init_ops.zeros_initializer(),\n biases_regularizer=None,\n reuse=None,\n variables_collections=None,\n outputs_collections=None,\n trainable=True,\n use_spectral_norm=False,\n is_training=False,\n scope=None,\n conv_dims=None):\n \"\"\"Adds an N-D convolution followed by an optional batch_norm layer.\n It is required that 1 <= N <= 3.\n `convolution` creates a variable called `weights`, representing the\n convolutional kernel, that is convolved (actually cross-correlated) with the\n `inputs` to produce a `Tensor` of activations. If a `normalizer_fn` is\n provided (such as `batch_norm`), it is then applied. Otherwise, if\n `normalizer_fn` is None and a `biases_initializer` is provided then a `biases`\n variable would be created and added the activations. Finally, if\n `activation_fn` is not `None`, it is applied to the activations as well.\n Performs atrous convolution with input stride/dilation rate equal to `rate`\n if a value > 1 for any dimension of `rate` is specified. In this case\n `stride` values != 1 are not supported.\n Args:\n inputs: A Tensor of rank N+2 of shape\n `[batch_size] + input_spatial_shape + [in_channels]` if data_format does\n not start with \"NC\" (default), or\n `[batch_size, in_channels] + input_spatial_shape` if data_format starts\n with \"NC\".\n num_outputs: Integer, the number of output filters.\n kernel_size: A sequence of N positive integers specifying the spatial\n dimensions of the filters. Can be a single integer to specify the same\n value for all spatial dimensions.\n stride: A sequence of N positive integers specifying the stride at which to\n compute output. Can be a single integer to specify the same value for all\n spatial dimensions. Specifying any `stride` value != 1 is incompatible\n with specifying any `rate` value != 1.\n padding: One of `\"VALID\"` or `\"SAME\"`.\n data_format: A string or None. Specifies whether the channel dimension of\n the `input` and output is the last dimension (default, or if `data_format`\n does not start with \"NC\"), or the second dimension (if `data_format`\n starts with \"NC\"). For N=1, the valid values are \"NWC\" (default) and\n \"NCW\". For N=2, the valid values are \"NHWC\" (default) and \"NCHW\".\n For N=3, the valid values are \"NDHWC\" (default) and \"NCDHW\".\n rate: A sequence of N positive integers specifying the dilation rate to use\n for atrous convolution. Can be a single integer to specify the same\n value for all spatial dimensions. Specifying any `rate` value != 1 is\n incompatible with specifying any `stride` value != 1.\n activation_fn: Activation function. The default value is a ReLU function.\n Explicitly set it to None to skip it and maintain a linear activation.\n normalizer_fn: Normalization function to use instead of `biases`. If\n `normalizer_fn` is provided then `biases_initializer` and\n `biases_regularizer` are ignored and `biases` are not created nor added.\n default set to None for no normalizer function\n normalizer_params: Normalization function parameters.\n weights_initializer: An initializer for the weights.\n weights_regularizer: Optional regularizer for the weights.\n biases_initializer: An initializer for the biases. If None skip biases.\n biases_regularizer: Optional regularizer for the biases.\n reuse: Whether or not the layer and its variables should be reused. To be\n able to reuse the layer scope must be given.\n variables_collections: Optional list of collections for all the variables or\n a dictionary containing a different list of collection per variable.\n outputs_collections: Collection to add the outputs.\n trainable: If `True` also add variables to the graph collection\n `GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).\n scope: Optional scope for `variable_scope`.\n conv_dims: Optional convolution dimensionality, when set it would use the\n corresponding convolution (e.g. 2 for Conv 2D, 3 for Conv 3D, ..). When\n leaved to None it would select the convolution dimensionality based on\n the input rank (i.e. Conv ND, with N = input_rank - 2).\n Returns:\n A tensor representing the output of the operation.\n Raises:\n ValueError: If `data_format` is invalid.\n ValueError: Both 'rate' and `stride` are not uniformly 1.\n \"\"\"\n if data_format not in [None, 'NWC', 'NCW', 'NHWC', 'NCHW', 'NDHWC', 'NCDHW']:\n raise ValueError('Invalid data_format: %r' % (data_format,))\n\n layer_variable_getter = _build_variable_getter({'bias': 'biases', 'kernel': 'weights'})\n\n with variable_scope.variable_scope(scope, 'Conv', [inputs], reuse=reuse, custom_getter=layer_variable_getter) as sc:\n inputs = ops.convert_to_tensor(inputs)\n input_rank = inputs.get_shape().ndims\n\n if conv_dims is not None and conv_dims + 2 != input_rank:\n raise ValueError('Convolution expects input with rank %d, got %d' % (conv_dims + 2, input_rank))\n if input_rank == 3:\n layer_class = convolutional_layers.Convolution1D\n elif input_rank == 4:\n layer_class = MyConv2D\n elif input_rank == 5:\n layer_class = convolutional_layers.Convolution3D\n else:\n raise ValueError('Convolution not supported for input with rank', input_rank)\n\n df = ('channels_first' if data_format and data_format.startswith('NC') else 'channels_last')\n layer = layer_class(\n filters=num_outputs,\n kernel_size=kernel_size,\n strides=stride,\n padding=padding,\n data_format=df,\n dilation_rate=rate,\n activation=None,\n use_bias=not normalizer_fn and biases_initializer,\n kernel_initializer=weights_initializer,\n bias_initializer=biases_initializer,\n kernel_regularizer=weights_regularizer,\n bias_regularizer=biases_regularizer,\n activity_regularizer=None,\n use_spectral_norm=use_spectral_norm,\n is_training=is_training,\n trainable=trainable,\n name=sc.name,\n dtype=inputs.dtype.base_dtype,\n _scope=sc,\n _reuse=reuse)\n outputs = layer.apply(inputs)\n\n # Add variables to collections.\n _add_variable_to_collections(layer.kernel, variables_collections, 'weights')\n if layer.use_bias:\n _add_variable_to_collections(layer.bias, variables_collections, 'biases')\n\n if normalizer_fn is not None:\n normalizer_params = normalizer_params or {}\n outputs = normalizer_fn(outputs, **normalizer_params)\n\n if activation_fn is not None:\n outputs = activation_fn(outputs)\n return utils.collect_named_outputs(outputs_collections, sc.name, outputs)\n", "# -*- coding: utf-8 -*-\n# File: eval.py\n\nimport tqdm\nimport os\nfrom collections import namedtuple, defaultdict\nfrom contextlib import ExitStack\nimport numpy as np\nimport cv2\nimport json\n\nfrom tensorpack.utils.utils import get_tqdm_kwargs\n\nfrom models.rcnn.common import CustomResize, clip_boxes\nfrom models.rcnn.config import config as cfg\n\nDetectionResult = namedtuple('DetectionResult', ['box', 'score', 'class_id', 'mask'])\n\"\"\"\nbox: 4 float\nscore: float\nclass_id: int, 1~NUM_CLASS\nmask: None, or a binary image of the original image shape\n\"\"\"\n\n\ndef fill_full_mask(box, mask, shape):\n \"\"\"\n Args:\n box: 4 float\n mask: MxM floats\n shape: h,w\n \"\"\"\n # int() is floor\n # box fpcoor=0.0 -> intcoor=0.0\n x0, y0 = list(map(int, box[:2] + 0.5))\n # box fpcoor=h -> intcoor=h-1, inclusive\n x1, y1 = list(map(int, box[2:] - 0.5)) # inclusive\n x1 = max(x0, x1) # require at least 1x1\n y1 = max(y0, y1)\n\n w = x1 + 1 - x0\n h = y1 + 1 - y0\n\n # rounding errors could happen here, because masks were not originally computed for this shape.\n # but it's hard to do better, because the network does not know the \"original\" scale\n mask = (cv2.resize(mask, (w, h)) > 0.5).astype('uint8')\n ret = np.zeros(shape, dtype='uint8')\n ret[y0:y1 + 1, x0:x1 + 1] = mask\n return ret\n\n\ndef detect_one_image(img, model_func):\n \"\"\"\n Run detection on one image, using the TF callable.\n This function should handle the preprocessing internally.\n\n Args:\n img: an image\n model_func: a callable from TF model,\n takes image and returns (boxes, probs, labels, [masks])\n\n Returns:\n [DetectionResult]\n \"\"\"\n\n orig_shape = img.shape[:2]\n resizer = CustomResize(cfg.PREPROC.TEST_SHORT_EDGE_SIZE, cfg.PREPROC.MAX_SIZE)\n resized_img = resizer.augment(img)\n scale = np.sqrt(resized_img.shape[0] * 1.0 / img.shape[0] * resized_img.shape[1] / img.shape[1])\n boxes, probs, labels, *masks = model_func(resized_img)\n boxes = boxes / scale\n # boxes are already clipped inside the graph, but after the floating point scaling, this may not be true any more.\n boxes = clip_boxes(boxes, orig_shape)\n\n if masks:\n # has mask\n full_masks = [fill_full_mask(box, mask, orig_shape) for box, mask in zip(boxes, masks[0])]\n masks = full_masks\n else:\n # fill with none\n masks = [None] * len(boxes)\n\n results = [DetectionResult(*args) for args in zip(boxes, probs, labels, masks)]\n return results\n\n\ndef eval_coco_old(df, detect_func, tqdm_bar=None):\n \"\"\"\n Args:\n df: a DataFlow which produces (image, image_id)\n detect_func: a callable, takes [image] and returns [DetectionResult]\n tqdm_bar: a tqdm object to be shared among multiple evaluation instances. If None,\n will create a new one.\n\n Returns:\n list of dict, to be dumped to COCO json format\n \"\"\"\n # lazy import\n import pycocotools.mask as cocomask\n from coco import COCOMeta\n\n df.reset_state()\n all_results = []\n # tqdm is not quite thread-safe: https://github.com/tqdm/tqdm/issues/323\n with ExitStack() as stack:\n if tqdm_bar is None:\n tqdm_bar = stack.enter_context(tqdm.tqdm(total=df.size(), **get_tqdm_kwargs()))\n for img, img_id in df:\n results = detect_func(img)\n for r in results:\n box = r.box\n cat_id = COCOMeta.class_id_to_category_id[r.class_id]\n box[2] -= box[0]\n box[3] -= box[1]\n\n res = {\n 'image_id': img_id,\n 'category_id': cat_id,\n 'bbox': list(map(lambda x: round(float(x), 3), box)),\n 'score': round(float(r.score), 4),\n }\n\n # also append segmentation to results\n if r.mask is not None:\n rle = cocomask.encode(np.array(r.mask[:, :, None], order='F'))[0]\n rle['counts'] = rle['counts'].decode('ascii')\n res['segmentation'] = rle\n all_results.append(res)\n tqdm_bar.update(1)\n return all_results\n\n\ndef eval_coco(df, detect_func, tqdm_bar=None):\n \"\"\"\n Args:\n df: a DataFlow which produces (image, image_id)\n detect_func: a callable, takes [image] and returns [DetectionResult]\n tqdm_bar: a tqdm object to be shared among multiple evaluation instances. If None,\n will create a new one.\n\n Returns:\n list of dict, to be dumped to COCO json format\n \"\"\"\n from models.rcnn.breasts import CLASS_NAMES\n df.reset_state()\n all_results = []\n # tqdm is not quite thread-safe: https://github.com/tqdm/tqdm/issues/323\n with ExitStack() as stack:\n if tqdm_bar is None:\n tqdm_bar = stack.enter_context(tqdm.tqdm(total=df.size(), **get_tqdm_kwargs()))\n for img, img_id in df:\n results = detect_func(img)\n for r in results:\n box = r.box\n cat_id = CLASS_NAMES[r.class_id]\n box[2] -= box[0]\n box[3] -= box[1]\n\n res = {\n 'image_id': img_id,\n 'category_id': cat_id,\n 'bbox': list(map(lambda x: round(float(x), 3), box)),\n 'score': round(float(r.score), 4),\n }\n\n # also append segmentation to results\n assert r.mask is None\n # if r.mask is not None:\n # rle = cocomask.encode(\n # np.array(r.mask[:, :, None], order='F'))[0]\n # rle['counts'] = rle['counts'].decode('ascii')\n # res['segmentation'] = rle\n all_results.append(res)\n tqdm_bar.update(1)\n return all_results\n\n\n# https://github.com/pdollar/coco/blob/master/PythonAPI/pycocoEvalDemo.ipynb\ndef print_evaluation_scores_old(json_file):\n import pycocotools.mask as COCOeval\n from coco import COCO\n\n ret = {}\n assert cfg.DATA.BASEDIR and os.path.isdir(cfg.DATA.BASEDIR)\n annofile = os.path.join(cfg.DATA.BASEDIR, 'annotations', 'instances_{}.json'.format(cfg.DATA.VAL))\n coco = COCO(annofile)\n cocoDt = coco.loadRes(json_file)\n cocoEval = COCOeval(coco, cocoDt, 'bbox')\n cocoEval.evaluate()\n cocoEval.accumulate()\n cocoEval.summarize()\n fields = ['IoU=0.5:0.95', 'IoU=0.5', 'IoU=0.75', 'small', 'medium', 'large']\n for k in range(6):\n ret['mAP(bbox)/' + fields[k]] = cocoEval.stats[k]\n\n if cfg.MODE_MASK:\n cocoEval = COCOeval(coco, cocoDt, 'segm')\n cocoEval.evaluate()\n cocoEval.accumulate()\n cocoEval.summarize()\n for k in range(6):\n ret['mAP(segm)/' + fields[k]] = cocoEval.stats[k]\n return ret\n\n\n# https://github.com/riblidezso/frcnn_cad/blob/master/demo.ipynb\ndef print_evaluation_scores(json_file,\n include_fooling_stats=cfg.BREASTS.CALC_FOOLING_STATS,\n confidence_score=cfg.BREASTS.MALIGNANT_CONFIDENCE):\n from models.rcnn.breasts import BreastDetection\n from sklearn import metrics\n with open(json_file, 'r') as f:\n results = json.load(f)\n breast_metadata = BreastDetection.load_many(cfg.DATA.VAL_PATTERN)\n breast_metadata = {m['id']: m for m in breast_metadata}\n\n def get_predictions(results, annotations, include_gen):\n preds = defaultdict(set)\n for result in results:\n preds[result['image_id']].add((result['category_id'], result['score']))\n output = {}\n scores = {}\n for id, findings in preds.items():\n if (not include_gen) and id.endswith(\"_gen\"):\n continue\n malignant_scores = [score for klass, score in findings if klass == cfg.DATA.CLASS_NAMES[2]]\n if not malignant_scores:\n scores[id] = 0.0\n else:\n scores[id] = max(malignant_scores)\n\n if malignant_scores and max(malignant_scores) >= confidence_score:\n output[id] = 1\n else:\n output[id] = 0\n\n # Handle cases when no bbox is found.\n for key, value in annotations.items():\n if (not include_gen) and key.endswith(\"_gen\"):\n continue\n if key not in output:\n output[key] = 0\n scores[key] = 0.0\n return output, scores\n\n def to_numpy(preds, annotations, dtype=np.int32):\n pred = []\n truth = []\n for id, lbl in preds.items():\n assert id in annotations\n pred.append(lbl)\n truth.append(annotations[id]['label'])\n return np.asarray(pred, dtype=dtype), np.asarray(truth, dtype=np.int32)\n\n preds, scores = get_predictions(results, breast_metadata, False)\n pred, truth = to_numpy(preds, breast_metadata)\n scores, truth_scores = to_numpy(scores, breast_metadata, dtype=np.float32)\n\n def get_fooling_stats(preds, annotations):\n total = wrong_clf_H = wrong_clf_C = fooled_H2C = fooled_C2H = 0\n inference_not_found = 0\n for id, pred_lbl in preds.items():\n assert id in annotations\n if id.endswith(\"_gen\"):\n continue\n total += 1\n lbl = annotations[id]['label']\n if pred_lbl == lbl:\n # Correctly classified.\n # t = list([k for k in preds.keys() if k.startswith(id)])\n # print(t)\n # print(t[0])\n # print(t[1])\n if (id + \"_gen\") not in preds:\n inference_not_found += 1\n continue\n gen_pred_lbl = preds[id + \"_gen\"]\n if lbl == 1:\n if gen_pred_lbl == 0:\n fooled_C2H += 1\n else:\n assert lbl == 0\n if gen_pred_lbl == 1:\n fooled_H2C += 1\n else:\n if lbl == 1:\n wrong_clf_C += 1\n else:\n wrong_clf_H += 1\n return {\n 'fooling/total_num': total,\n 'fooling/inference_not_found': inference_not_found,\n 'fooling/wrong_clf_H': wrong_clf_H,\n 'fooling/wrong_clf_C': wrong_clf_C,\n 'fooling/correct_clf': total - wrong_clf_H - wrong_clf_C,\n 'fooling/fooled': fooled_H2C + fooled_C2H,\n 'fooling/fooled_H2C': fooled_H2C,\n 'fooling/fooled_C2H': fooled_C2H,\n }\n\n ret = {\n 'acc': metrics.accuracy_score(truth, pred),\n 'roc_auc': metrics.roc_auc_score(truth_scores, scores),\n 'f1': metrics.f1_score(truth, pred),\n 'recall': metrics.recall_score(truth, pred),\n 'precision': metrics.precision_score(truth, pred),\n # 'roc': metrics.roc_curve(truth_scores, scores),\n }\n\n if include_fooling_stats:\n preds, scores = get_predictions(results, breast_metadata, True)\n pred, truth = to_numpy(preds, breast_metadata)\n scores, truth_scores = to_numpy(scores, breast_metadata, dtype=np.float32)\n ret2 = {\n 'all/acc': metrics.accuracy_score(truth, pred),\n 'all/roc_auc': metrics.roc_auc_score(truth_scores, scores),\n 'all/f1': metrics.f1_score(truth, pred),\n 'all/recall': metrics.recall_score(truth, pred),\n 'all/precision': metrics.precision_score(truth, pred),\n # 'all/roc': metrics.roc_curve(truth_scores, scores),\n }\n ret.update(get_fooling_stats(preds, breast_metadata))\n ret.update(ret2)\n\n return ret\n" ]
[ [ "tensorflow.nn.softmax", "tensorflow.contrib.layers.python.layers.initializers.xavier_initializer", "tensorflow.reshape", "tensorflow.python.ops.init_ops.zeros_initializer", "tensorflow.constant_initializer", "tensorflow.python.ops.variable_scope.variable_scope", "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.variable_scope", "tensorflow.contrib.layers.python.layers.layers._build_variable_getter", "tensorflow.contrib.layers.python.layers.layers._add_variable_to_collections", "tensorflow.contrib.layers.python.layers.utils.collect_named_outputs" ], [ "sklearn.metrics.roc_auc_score", "sklearn.metrics.recall_score", "numpy.sqrt", "numpy.asarray", "sklearn.metrics.precision_score", "sklearn.metrics.f1_score", "numpy.array", "numpy.zeros", "sklearn.metrics.accuracy_score" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
csp-inc/fluvius
[ "8eb8c3caee2b98720ae17bef384302d6fa88c828" ]
[ "bin/02-preprocess-data.py" ]
[ "import os\nimport pandas as pd\nimport fsspec\nimport argparse\nfrom src.defaults import args_info\n\nenv_vars = open(\"/content/credentials\",\"r\").read().split('\\n')\n\nfor var in env_vars[:-1]:\n key, value = var.split(' = ')\n os.environ[key] = value\n\nstorage_options={'account_name':os.environ['ACCOUNT_NAME'],\\\n 'account_key':os.environ['BLOB_KEY']}\nfs = fsspec.filesystem('az', account_name=storage_options['account_name'], account_key=storage_options['account_key'])\n\n##env data acquired\n\ndef return_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument('--data-src',\n type=args_info[\"data_src\"][\"type\"],\n help=args_info[\"data_src\"][\"help\"])\n parser.add_argument('--write-to-csv',\n action=args_info[\"write_to_csv\"][\"action\"],\n help=args_info[\"write_to_csv\"][\"help\"])\n return parser\n\nif __name__ == \"__main__\":\n\n args = return_parser().parse_args()\n\n if args.data_src == 'usgs':\n #USGS DATA PROCESS\n data_src = 'usgs'\n container = 'usgs-data'\n\n station_url = f'az://{container}/{args.data_src}_station_metadata_raw.csv'\n station_df = pd.read_csv(station_url, storage_options=storage_options)\n\n sites_str = [str(f).zfill(8) for f in station_df.site_no]\n station_df['sites_str'] = sites_str\n\n query = []\n for f in fs.ls(f'{container}/stations'):\n station = os.path.basename(f).split('_')[0]\n query.append(station)\n q = pd.DataFrame({'sites_str':query})\n out = station_df.merge(q, on='sites_str')\n out['site_no'] = out['sites_str']\n out = out[['site_no','site_name', 'Latitude', 'Longitude','geometry']]\n if args.write_to_csv:\n out.to_csv(f'az://{container}/usgs_station_metadata.csv',index=False, storage_options=storage_options)\n\n if args.data_src == 'ana':\n container = 'ana-data'\n station_url = f'az://{container}/ana_station_metadata.csv'\n station_df = pd.read_csv(station_url, storage_options=storage_options)\n for site_no in station_df.site_no:\n station_url = f'az://{container}/{site_no}.csv'\n station_url2 = f'az://{container}/{site_no}_2.csv'\n site_df1_raw = pd.read_csv(station_url, delimiter=',', skiprows=10, storage_options=storage_options)\n translation = pd.read_csv(f'az://{container}/ana_translations.csv', storage_options=storage_options)\n trans = {p:e for p,e in zip(translation.Portuguese, translation.English)}\n site_df1 = site_df1_raw.rename(columns=trans)\n site_df1 = site_df1.dropna(subset=['Date'])\n site_df1['TimeL'] = site_df1['TimeL'].fillna('01/01/1900 01:00')\n site_df1['Date-Time'] = [d for d in site_df1['Date']]\n site_df1['Date-Time'] = pd.to_datetime(site_df1['Date-Time'],\\\n format='%d/%m/%Y')\n\n site_df2_raw = pd.read_csv(station_url2, delimiter=',', skiprows=14, storage_options=storage_options)\n site_df2_raw = site_df2_raw.replace('01/01/1900', '01/01/1900 01:00')\n translation2 = {'Data':'Date','Hora':'Hour','Turbidez':'Turbidity'}\n site_df2 = site_df2_raw.rename(columns=translation2)\n site_df2 = site_df2.dropna(subset=['Date'])\n site_df2['Date-Time-HM'] = [f\"{d} {t.split(' ')[1]}\" for d,t in zip(site_df2['Date'],site_df2['Hour'])]\n site_df2['Date-Time'] = [d for d in site_df2['Date']]\n site_df2['Date-Time'] = pd.to_datetime(site_df2['Date-Time'],\\\n format='%d/%m/%Y')\n site_df2 = site_df2[['Date', 'Hour', 'Date-Time','Turbidity']]\n\n selection = ['Date-Time', 'Discharge', 'Suspended Sediment Concentration (mg/L)', 'Turbidity']\n site_df = site_df1.merge(site_df2, on='Date', how='outer', suffixes=('_',''))\n site_df['Date-Time'] = site_df['Date-Time'].fillna(site_df['Date-Time_'])\n #site_df['Hour'] = site_df['Hour'].fillna(site_df['Hour_'])\n site_df = site_df[selection]\n s = str(site_no).zfill(8)\n write_filename = f'az://{container}/stations/{str(site_no)}.csv'\n print(f'writing to {write_filename}')\n if args.write_to_csv:\n site_df.to_csv(write_filename, index=False, storage_options=storage_options)\n \n if args.data_src == 'itv':\n container = 'itv-data'\n station_url = f'az://{container}/itv_station_metadata.csv'\n station_df = pd.read_csv(station_url, storage_options=storage_options)\n for site_no in station_df.site_no:\n station_url = f'az://{container}/{site_no}.csv'\n site_df = pd.read_csv(station_url,\\\n storage_options=storage_options,\\\n delimiter=',')\n\n site_df['Date-Time'] = pd.to_datetime(site_df['Campaign Date'], \\\n format='%d/%m/%Y')\n\n if args.write_to_csv:\n write_filename = f'az://{container}/stations/{site_no}.csv'\n site_df.to_csv(write_filename, storage_options=storage_options,\\\n index=False)\n" ]
[ [ "pandas.read_csv", "pandas.to_datetime", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
tianyapiaozi/tensorflow
[ "7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae", "7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae", "7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae", "fb3ce0467766a8e91f1da0ad7ada7c24fde7a73a", "7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae", "7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae", "7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae", "7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae", "7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae", "7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae", "7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae", "7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae", "7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae", "7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae", "fb3ce0467766a8e91f1da0ad7ada7c24fde7a73a", "fb3ce0467766a8e91f1da0ad7ada7c24fde7a73a", "fb3ce0467766a8e91f1da0ad7ada7c24fde7a73a", "7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae", "7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae", "fb3ce0467766a8e91f1da0ad7ada7c24fde7a73a", "7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae", "7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae", "7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae", "7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae", "7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae", "7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae", "7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae", "7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae", "7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae", "7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae", "7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae", "7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae", "7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae", "7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae", "7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae", "7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae", "7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae", "7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae" ]
[ "tensorflow/contrib/training/python/training/training.py", "tensorflow/contrib/autograph/utils/multiple_dispatch_test.py", "tensorflow/contrib/eager/python/examples/revnet/cifar_tfrecords.py", "tensorflow/compiler/tests/while_test.py", "tensorflow/python/data/kernel_tests/list_files_dataset_op_test.py", "tensorflow/examples/adding_an_op/fact_test.py", "tensorflow/contrib/training/python/training/evaluation.py", "tensorflow/contrib/lite/python/op_hint.py", "tensorflow/contrib/eager/python/evaluator.py", "tensorflow/contrib/distributions/python/kernel_tests/bijectors/sinh_arcsinh_bijector_test.py", "tensorflow/examples/tutorials/mnist/mnist_with_summaries.py", "tensorflow/contrib/coder/python/layers/entropybottleneck_test.py", "tensorflow/contrib/learn/__init__.py", "tensorflow/contrib/tensorrt/python/trt_convert.py", "tensorflow/contrib/bayesflow/python/ops/monte_carlo_impl.py", "tensorflow/compiler/tests/categorical_op_test.py", "tensorflow/compiler/tests/extract_image_patches_op_test.py", "tensorflow/contrib/receptive_field/python/util/graph_compute_order.py", "tensorflow/contrib/kfac/python/ops/optimizer.py", "tensorflow/compiler/tests/depthwise_conv_op_test.py", "tensorflow/python/data/util/nest_test.py", "tensorflow/python/debug/lib/session_debug_grpc_test.py", "tensorflow/contrib/distributions/python/kernel_tests/mixture_same_family_test.py", "tensorflow/python/keras/applications/nasnet.py", "tensorflow/python/keras/layers/embeddings_test.py", "tensorflow/contrib/util/__init__.py", "tensorflow/python/training/training_util.py", "tensorflow/python/training/slot_creator_test.py", "tensorflow/python/kernel_tests/scan_ops_test.py", "tensorflow/python/estimator/canned/head_test.py", "tensorflow/contrib/opt/python/training/addsign_test.py", "tensorflow/python/estimator/model_fn_test.py", "tensorflow/python/ops/linalg/linear_operator_low_rank_update.py", "tensorflow/contrib/cluster_resolver/python/training/cluster_resolver.py", "tensorflow/contrib/gan/python/eval/python/classifier_metrics.py", "tensorflow/contrib/tensor_forest/python/tensor_forest_test.py", "tensorflow/contrib/tensor_forest/hybrid/python/models/stochastic_hard_decisions_to_data_then_nn.py", "tensorflow/tools/api/lib/python_object_to_proto_visitor.py" ]
[ "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Contains various routines and helper functions for training models.\n\nThis script contains various functions for training models. These include\nmanipulating gradients, creating a `train_op` (an operation that computes the\nloss and applies the gradients) and a training loop function. The training loop\nallows the user to pass in the `train_op` and runs the optimization according\nto user-specified arguments.\n\n************************************\n* A simple working training script *\n************************************\n\n # Load data and create the model:\n images, labels = LoadData(...)\n predictions = MyModel(images)\n\n # Define the loss:\n tf.contrib.losses.log_loss(predictions, labels)\n total_loss = tf.contrib.losses.get_total_loss()\n\n # Define the optimizer:\n optimizer = tf.train.MomentumOptimizer(FLAGS.learning_rate, FLAGS.momentum)\n\n # Create the train_op\n train_op = tf.contrib.training.create_train_op(total_loss, optimizer)\n\n # Run training.\n tf.contrib.training.train(train_op, my_log_dir)\n\n*************************\n* Creating the train_op *\n*************************\n\nIn order to use the `train` function, one needs a train_op: an `Operation` that\n(a) computes the loss, (b) applies the gradients to update the weights and\n(c) returns the value of the loss. tf.contrib.training.create_train_op creates\nsuch an `Operation`. This function also provides the ability to manipulate\nthe gradients using a few arguments:\n\n # Create the train_op and clip the gradient norms:\n train_op = tf.contrib.training.create_train_op(\n total_loss,\n optimizer,\n transform_grads_fn=clip_gradient_norms_fn(3))\n\n # Create the train_op and scale the gradients by providing a map from variable\n # name (or variable) to a scaling coefficient:\n def transform_grads_fn(grads):\n gradient_multipliers = {\n 'conv0/weights': 1.2,\n 'fc8/weights': 3.4,\n }\n return tf.contrib.training.multiply_gradients(\n grads, gradient_multipliers)\n\n train_op = tf.contrib.training.create_train_op(\n total_loss,\n optimizer,\n transform_grads_fn=transform_grads_fn)\n\n****************************************************************\n* Performing additional (non-gradient) updates during training *\n****************************************************************\n\nMany networks utilize modules, like BatchNorm, that require performing a series\nof non-gradient updates during training. tf.contrib.training.create_train_op\nallows a user to pass in a list of update_ops to call along with the gradient\nupdates.\n\n train_op = tf.contrib.training.create_train_op(\n total_loss, optimizer, update_ops)\n\nBy default, tf.contrib.training.create_train_op includes all update ops that are\npart of the `tf.GraphKeys.UPDATE_OPS` collection. Additionally, the\ntf.contrib.layers.batch_norm function adds the moving mean and moving variance\nupdates to this collection. Consequently, users who want to use\ntf.contrib.layers.batch_norm will not need to take any additional steps in order\nto have the moving mean and moving variance updates be computed.\n\nHowever, users with additional, specialized updates can either override the\ndefault update ops or simply add additional update ops to the\n`tf.GraphKeys.UPDATE_OPS` collection:\n\n # Force `create_train_op` to NOT use ANY update_ops:\n train_op = tf.contrib.training.create_train_op(\n total_loss,\n optimizer,\n update_ops=[])\n\n # Use an alternative set of update ops:\n train_op = tf.contrib.training.create_train_op(\n total_loss,\n optimizer,\n update_ops=my_other_update_ops)\n\n # Use a set of update ops in addition to the default updates:\n tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, my_update0)\n tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, my_update1)\n\n train_op = tf.contrib.training.create_train_op(\n total_loss,\n optimizer)\n\n # Which is the same as:\n train_op = tf.contrib.training.create_train_op(\n total_loss,\n optimizer,\n update_ops=tf.get_collection(tf.GraphKeys.UPDATE_OPS))\n\n******************************************\n* Initializing a model from a checkpoint *\n******************************************\n\nIt is common to want to 'warm-start' a model from a pre-trained checkpoint.\nOne can use a tf.Scaffold and an initializing function to do so.\n\n ...\n\n # Create the train_op\n train_op = tf.contrib.training.create_train_op(total_loss, optimizer)\n\n # Create the initial assignment op\n checkpoint_path = '/path/to/old_model_checkpoint'\n variables_to_restore = tf.contrib.framework.get_model_variables()\n init_fn = tf.contrib.framework.assign_from_checkpoint_fn(\n checkpoint_path, variables_to_restore)\n\n # Run training.\n scaffold = tf.Scaffold(init_fn=init_fn)\n tf.contrib.training.train(train_op, my_log_dir, scaffold=scaffold)\n\n***************************************************************************\n* Initializing a model from a checkpoint whose variable names don't match *\n***************************************************************************\n\nAt times, a user may want to initialize a new model with values from a\ncheckpoint whose variable names do not match those of the current model. In this\ncase, one needs to create a mapping from the checkpoint variable names to the\ncurrent model variables. This requires only a small modification of the code\nabove:\n ...\n # Creates a model with two variables, var0 and var1\n predictions = MyModel(images)\n ...\n\n # Create the train_op\n train_op = tf.contrib.training.create_train_op(total_loss, optimizer)\n\n checkpoint_path = '/path/to/old_model_checkpoint'\n\n # Create the mapping:\n variables_to_restore = {\n 'name_var_0_in_checkpoint':\n tf.contrib.framework.get_unique_variable('var0'),\n 'name_var_1_in_checkpoint':\n tf.contrib.framework.get_unique_variable('var1')\n }\n init_fn = tf.contrib.framework.assign_from_checkpoint_fn(\n checkpoint_path, variables_to_restore)\n scaffold = tf.Scaffold(init_fn=init_fn)\n\n # Run training.\n tf.contrib.training.train(train_op, my_log_dir, scaffold=scaffold)\n\n\n*************************************************\n* Fine-Tuning Part of a model from a checkpoint *\n*************************************************\n\nRather than initializing all of the weights of a given model, we sometimes\nonly want to restore some of the weights from a checkpoint. To do this, one\nneed only filter those variables to initialize as follows:\n\n ...\n\n # Create the train_op\n train_op = tf.contrib.training.create_train_op(total_loss, optimizer)\n\n checkpoint_path = '/path/to/old_model_checkpoint'\n\n # Specify the variables to restore via a list of inclusion or exclusion\n # patterns:\n variables_to_restore = tf.contrib.framework.get_variables_to_restore(\n include=[\"conv\"], exclude=[\"fc8\", \"fc9])\n # or\n variables_to_restore = tf.contrib.framework.get_variables_to_restore(\n exclude=[\"conv\"])\n\n init_fn = tf.contrib.framework.assign_from_checkpoint_fn(\n checkpoint_path, variables_to_restore)\n scaffold = tf.Scaffold(init_fn=init_fn)\n\n # Run training.\n tf.contrib.training.train(train_op, my_log_dir, scaffold=scaffold)\n\n******************************************************\n* Initializing model variables from values in memory *\n******************************************************\n\nOne may want to initialize the weights of a model from values coming from an\narbitrary source (a text document, matlab file, etc). While this is technically\nfeasible using assign operations, this strategy results in the values of your\nweights being stored in the graph. For large models, this becomes prohibitively\nlarge. However, it's possible to perform this initial assignment without having\nto store the values of the initial model in the graph itself by using\nplaceholders and a feed dictionary:\n\n ...\n\n # Create the train_op\n train_op = tf.contrib.training.create_train_op(total_loss, optimizer)\n\n # Create the mapping from variable names to values:\n var0_initial_value = ReadFromDisk(...)\n var1_initial_value = ReadFromDisk(...)\n\n var_names_to_values = {\n 'var0': var0_initial_value,\n 'var1': var1_initial_value,\n }\n\n init_fn = tf.contrib.framework.assign_from_values_fn(var_names_to_values)\n scaffold = tf.Scaffold(init_fn=init_fn)\n\n # Run training.\n tf.contrib.training.train(train_op, my_log_dir, scaffold=scaffold)\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import clip_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import variables as tf_variables\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.summary import summary\nfrom tensorflow.python.training import monitored_session\nfrom tensorflow.python.training import optimizer as tf_optimizer\nfrom tensorflow.python.training import training_util\n\n# TODO(nsilberman): move add_gradients_summaries, clip_gradient_norms and\n# multiply_gradients into contrib/summaries and contrib/optimizers.py\n__all__ = [\n 'add_gradients_summaries',\n 'clip_gradient_norms',\n 'clip_gradient_norms_fn',\n 'create_train_op',\n 'multiply_gradients',\n 'train',\n]\n\n\ndef add_gradients_summaries(grads_and_vars):\n \"\"\"Add summaries to gradients.\n\n Args:\n grads_and_vars: A list of gradient to variable pairs (tuples).\n\n Returns:\n The list of created summaries.\n \"\"\"\n summaries = []\n for grad, var in grads_and_vars:\n if grad is not None:\n if isinstance(grad, ops.IndexedSlices):\n grad_values = grad.values\n else:\n grad_values = grad\n summaries.append(\n summary.histogram(var.op.name + '_gradient', grad_values))\n summaries.append(\n summary.scalar(var.op.name + '_gradient_norm',\n clip_ops.global_norm([grad_values])))\n else:\n logging.info('Var %s has no gradient', var.op.name)\n\n return summaries\n\n\ndef clip_gradient_norms(gradients_to_variables, max_norm):\n \"\"\"Clips the gradients by the given value.\n\n Args:\n gradients_to_variables: A list of gradient to variable pairs (tuples).\n max_norm: the maximum norm value.\n\n Returns:\n A list of clipped gradient to variable pairs.\n \"\"\"\n clipped_grads_and_vars = []\n for grad, var in gradients_to_variables:\n if grad is not None:\n if isinstance(grad, ops.IndexedSlices):\n tmp = clip_ops.clip_by_norm(grad.values, max_norm)\n grad = ops.IndexedSlices(tmp, grad.indices, grad.dense_shape)\n else:\n grad = clip_ops.clip_by_norm(grad, max_norm)\n clipped_grads_and_vars.append((grad, var))\n return clipped_grads_and_vars\n\n\ndef clip_gradient_norms_fn(max_norm):\n \"\"\"Returns a `transform_grads_fn` function for gradient clipping.\"\"\"\n def clip_norms(gradients_to_variables):\n return clip_gradient_norms(gradients_to_variables, max_norm)\n return clip_norms\n\n\ndef multiply_gradients(grads_and_vars, gradient_multipliers):\n \"\"\"Multiply specified gradients.\n\n Args:\n grads_and_vars: A list of gradient to variable pairs (tuples).\n gradient_multipliers: A map from either `Variables` or `Variable` op names\n to the coefficient by which the associated gradient should be scaled.\n\n Returns:\n The updated list of gradient to variable pairs.\n\n Raises:\n ValueError: If `grads_and_vars` is not a list or if `gradient_multipliers`\n is empty or None or if `gradient_multipliers` is not a dictionary.\n \"\"\"\n if not isinstance(grads_and_vars, list):\n raise ValueError('`grads_and_vars` must be a list.')\n if not gradient_multipliers:\n raise ValueError('`gradient_multipliers` is empty.')\n if not isinstance(gradient_multipliers, dict):\n raise ValueError('`gradient_multipliers` must be a dict.')\n\n multiplied_grads_and_vars = []\n for grad, var in grads_and_vars:\n if var in gradient_multipliers or var.op.name in gradient_multipliers:\n key = var if var in gradient_multipliers else var.op.name\n if grad is None:\n raise ValueError('Requested multiple of `None` gradient.')\n\n if isinstance(grad, ops.IndexedSlices):\n tmp = grad.values * constant_op.constant(\n gradient_multipliers[key], dtype=grad.dtype)\n grad = ops.IndexedSlices(tmp, grad.indices, grad.dense_shape)\n else:\n grad *= constant_op.constant(\n gradient_multipliers[key], dtype=grad.dtype)\n multiplied_grads_and_vars.append((grad, var))\n return multiplied_grads_and_vars\n\n\n_USE_GLOBAL_STEP = 0\n\n\ndef create_train_op(total_loss,\n optimizer,\n global_step=_USE_GLOBAL_STEP,\n update_ops=None,\n variables_to_train=None,\n transform_grads_fn=None,\n summarize_gradients=False,\n gate_gradients=tf_optimizer.Optimizer.GATE_OP,\n aggregation_method=None,\n colocate_gradients_with_ops=False,\n check_numerics=True):\n \"\"\"Creates an `Operation` that evaluates the gradients and returns the loss.\n\n Args:\n total_loss: A `Tensor` representing the total loss.\n optimizer: A tf.Optimizer to use for computing the gradients.\n global_step: A `Tensor` representing the global step variable. If left as\n `_USE_GLOBAL_STEP`, then tf.contrib.framework.global_step() is used.\n update_ops: An optional list of updates to execute. If `update_ops` is\n `None`, then the update ops are set to the contents of the\n `tf.GraphKeys.UPDATE_OPS` collection. If `update_ops` is not `None`, but\n it doesn't contain all of the update ops in `tf.GraphKeys.UPDATE_OPS`,\n a warning will be displayed.\n variables_to_train: an optional list of variables to train. If None, it will\n default to all tf.trainable_variables().\n transform_grads_fn: A function which takes a single argument, a list of\n gradient to variable pairs (tuples), performs any requested gradient\n updates, such as gradient clipping or multipliers, and returns the updated\n list.\n summarize_gradients: Whether or not add summaries for each gradient.\n gate_gradients: How to gate the computation of gradients. See tf.Optimizer.\n aggregation_method: Specifies the method used to combine gradient terms.\n Valid values are defined in the class `AggregationMethod`.\n colocate_gradients_with_ops: Whether or not to try colocating the gradients\n with the ops that generated them.\n check_numerics: Whether or not we apply check_numerics.\n\n Returns:\n A `Tensor` that when evaluated, computes the gradients and returns the total\n loss value.\n \"\"\"\n if global_step is _USE_GLOBAL_STEP:\n global_step = training_util.get_or_create_global_step()\n\n # Update ops use GraphKeys.UPDATE_OPS collection if update_ops is None.\n global_update_ops = set(ops.get_collection(ops.GraphKeys.UPDATE_OPS))\n if update_ops is None:\n update_ops = global_update_ops\n else:\n update_ops = set(update_ops)\n if not global_update_ops.issubset(update_ops):\n logging.warning('update_ops in create_train_op does not contain all the '\n ' update_ops in GraphKeys.UPDATE_OPS')\n\n # Make sure update_ops are computed before total_loss.\n if update_ops:\n with ops.control_dependencies(update_ops):\n barrier = control_flow_ops.no_op(name='update_barrier')\n total_loss = control_flow_ops.with_dependencies([barrier], total_loss)\n\n if variables_to_train is None:\n # Default to tf.trainable_variables()\n variables_to_train = tf_variables.trainable_variables()\n else:\n # Make sure that variables_to_train are in tf.trainable_variables()\n for v in variables_to_train:\n assert v in tf_variables.trainable_variables()\n\n assert variables_to_train\n\n # Create the gradients. Note that apply_gradients adds the gradient\n # computation to the current graph.\n grads = optimizer.compute_gradients(\n total_loss,\n variables_to_train,\n gate_gradients=gate_gradients,\n aggregation_method=aggregation_method,\n colocate_gradients_with_ops=colocate_gradients_with_ops)\n\n if transform_grads_fn:\n grads = transform_grads_fn(grads)\n\n # Summarize gradients.\n if summarize_gradients:\n with ops.name_scope('summarize_grads'):\n add_gradients_summaries(grads)\n\n # Create gradient updates.\n grad_updates = optimizer.apply_gradients(grads, global_step=global_step)\n\n with ops.name_scope('train_op'):\n # Make sure total_loss is valid.\n if check_numerics:\n total_loss = array_ops.check_numerics(total_loss,\n 'LossTensor is inf or nan')\n\n # Ensure the train_tensor computes grad_updates.\n train_op = control_flow_ops.with_dependencies([grad_updates], total_loss)\n\n # Add the operation used for training to the 'train_op' collection\n train_ops = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP)\n if train_op not in train_ops:\n train_ops.append(train_op)\n\n return train_op\n\n\ndef train(train_op,\n logdir,\n master='',\n is_chief=True,\n scaffold=None,\n hooks=None,\n chief_only_hooks=None,\n save_checkpoint_secs=600,\n save_summaries_steps=100,\n config=None,\n max_wait_secs=7200):\n \"\"\"Runs the training loop.\n\n Args:\n train_op: A `Tensor` that, when executed, will apply the gradients and\n return the loss value.\n logdir: The directory where the graph and checkpoints are saved.\n master: The URL of the master.\n is_chief: Specifies whether or not the training is being run by the primary\n replica during replica training.\n scaffold: An tf.train.Scaffold instance.\n hooks: List of `tf.train.SessionRunHook` callbacks which are run inside the\n training loop.\n chief_only_hooks: List of `tf.train.SessionRunHook` instances which are run\n inside the training loop for the chief trainer only.\n save_checkpoint_secs: The frequency, in seconds, that a checkpoint is saved\n using a default checkpoint saver. If `save_checkpoint_secs` is set to\n `None`, then the default checkpoint saver isn't used.\n save_summaries_steps: The frequency, in number of global steps, that the\n summaries are written to disk using a default summary saver. If\n `save_summaries_steps` is set to `None`, then the default summary saver\n isn't used.\n config: An instance of `tf.ConfigProto`.\n max_wait_secs: Maximum time workers should wait for the session to\n become available. This should be kept relatively short to help detect\n incorrect code, but sometimes may need to be increased if the chief takes\n a while to start up.\n\n Returns:\n the value of the loss function after training.\n\n Raises:\n ValueError: if `logdir` is `None` and either `save_checkpoint_secs` or\n `save_summaries_steps` are `None.\n \"\"\"\n if logdir is None and is_chief:\n if save_summaries_steps:\n raise ValueError(\n 'logdir cannot be None when save_summaries_steps is not None')\n\n if save_checkpoint_secs:\n raise ValueError(\n 'logdir cannot be None when save_checkpoint_secs is not None')\n\n with monitored_session.MonitoredTrainingSession(\n master=master,\n is_chief=is_chief,\n checkpoint_dir=logdir,\n scaffold=scaffold,\n hooks=hooks,\n chief_only_hooks=chief_only_hooks,\n save_checkpoint_secs=save_checkpoint_secs,\n save_summaries_steps=save_summaries_steps,\n config=config,\n max_wait_secs=max_wait_secs) as session:\n loss = None\n while not session.should_stop():\n loss = session.run(train_op)\n return loss\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for multiple_dispatch.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.contrib.autograph.utils import multiple_dispatch\nfrom tensorflow.python.client.session import Session\nfrom tensorflow.python.framework.constant_op import constant\nfrom tensorflow.python.platform import test\n\n\nclass MultipleDispatchTest(test.TestCase):\n\n def test_dynamic_is_python(self):\n a = np.eye(3)\n also_a = a\n not_actually_a = np.eye(3)\n should_be_true1 = multiple_dispatch.dynamic_is(a, also_a)\n should_be_false1 = multiple_dispatch.dynamic_is_not(a, also_a)\n should_be_true2 = multiple_dispatch.dynamic_is_not(a, not_actually_a)\n should_be_false2 = multiple_dispatch.dynamic_is(a, not_actually_a)\n self.assertTrue(should_be_true1)\n self.assertTrue(should_be_true2)\n self.assertFalse(should_be_false1)\n self.assertFalse(should_be_false2)\n\n def test_dynamic_is_tf(self):\n with Session().as_default():\n a = constant([2.0])\n also_a = a\n not_actually_a = constant([2.0])\n should_be_true1 = multiple_dispatch.dynamic_is(a, also_a)\n should_be_false1 = multiple_dispatch.dynamic_is_not(a, also_a)\n should_be_true2 = multiple_dispatch.dynamic_is_not(a, not_actually_a)\n should_be_false2 = multiple_dispatch.dynamic_is(a, not_actually_a)\n self.assertTrue(should_be_true1)\n self.assertTrue(should_be_true2)\n self.assertFalse(should_be_false1)\n self.assertFalse(should_be_false2)\n\n def test_run_cond_python(self):\n true_fn = lambda: (2,)\n false_fn = lambda: (3,)\n self.assertEqual(multiple_dispatch.run_cond(True, true_fn, false_fn), 2)\n self.assertEqual(multiple_dispatch.run_cond(False, true_fn, false_fn), 3)\n\n def test_run_cond_tf(self):\n true_fn = lambda: (constant(2),)\n false_fn = lambda: (constant(3),)\n with Session() as sess:\n out = multiple_dispatch.run_cond(constant(True), true_fn, false_fn)\n self.assertEqual(sess.run(out), 2)\n out = multiple_dispatch.run_cond(constant(False), true_fn, false_fn)\n self.assertEqual(sess.run(out), 3)\n\n\nif __name__ == '__main__':\n test.main()\n", "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Read CIFAR-10 data from pickled numpy arrays and writes TFRecords.\n\nGenerates tf.train.Example protos and writes them to TFRecord files from the\npython version of the CIFAR-10 dataset downloaded from\nhttps://www.cs.toronto.edu/~kriz/cifar.html.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport sys\nimport tarfile\n\nfrom absl import flags\nfrom six.moves import cPickle as pickle\nfrom six.moves import urllib\nimport tensorflow as tf\n\nCIFAR_FILENAME = 'cifar-10-python.tar.gz'\nCIFAR_DOWNLOAD_URL = 'https://www.cs.toronto.edu/~kriz/' + CIFAR_FILENAME\nCIFAR_LOCAL_FOLDER = 'cifar-10-batches-py'\n\n\ndef download_and_extract(data_dir):\n \"\"\"Download CIFAR-10 if not already downloaded.\"\"\"\n filepath = os.path.join(data_dir, CIFAR_FILENAME)\n if tf.gfile.Exists(filepath):\n return filepath\n if not tf.gfile.Exists(data_dir):\n tf.gfile.MakeDirs(data_dir)\n\n urllib.request.urlretrieve(CIFAR_DOWNLOAD_URL, filepath)\n tarfile.open(os.path.join(filepath), 'r:gz').extractall(data_dir)\n return filepath\n\n\ndef _int64_feature(value):\n return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))\n\n\ndef _bytes_feature(value):\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))\n\n\ndef _get_file_names():\n \"\"\"Returns the file names expected to exist in the input_dir.\"\"\"\n file_names = {}\n file_names['train'] = ['data_batch_%d' % i for i in range(1, 5)]\n file_names['validation'] = ['data_batch_5']\n file_names['test'] = ['test_batch']\n return file_names\n\n\ndef read_pickle_from_file(filename):\n with tf.gfile.Open(filename, 'rb') as f:\n if sys.version_info >= (3, 0):\n data_dict = pickle.load(f, encoding='bytes')\n else:\n data_dict = pickle.load(f)\n return data_dict\n\n\ndef convert_to_tfrecord(input_files, output_file):\n \"\"\"Converts files with pickled data to TFRecords.\"\"\"\n print('Generating %s' % output_file)\n with tf.python_io.TFRecordWriter(output_file) as record_writer:\n for input_file in input_files:\n data_dict = read_pickle_from_file(input_file)\n data = data_dict[b'data']\n labels = data_dict[b'labels']\n num_entries_in_batch = len(labels)\n\n for i in range(num_entries_in_batch):\n example = tf.train.Example(\n features=tf.train.Features(\n feature={\n 'image': _bytes_feature(data[i].tobytes()),\n 'label': _int64_feature(labels[i])\n }))\n record_writer.write(example.SerializeToString())\n\n\ndef main(_):\n print('Download from {} and extract.'.format(CIFAR_DOWNLOAD_URL))\n download_and_extract(FLAGS.data_dir)\n file_names = _get_file_names()\n input_dir = os.path.join(FLAGS.data_dir, CIFAR_LOCAL_FOLDER)\n\n for mode, files in file_names.items():\n input_files = [os.path.join(input_dir, f) for f in files]\n output_file = os.path.join(FLAGS.data_dir, mode + '.tfrecords')\n try:\n os.remove(output_file)\n except OSError:\n pass\n convert_to_tfrecord(input_files, output_file)\n print('Done!')\n\n\nif __name__ == '__main__':\n FLAGS = flags.FLAGS\n flags.DEFINE_string(\n 'data_dir',\n default=None,\n help='Directory to download and extract CIFAR-10 to.')\n\n tf.app.run(main)\n", "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for while loops in XLA.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.compiler.tests.xla_test import XLATestCase\nfrom tensorflow.compiler.tf2xla.python import xla\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import function\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.platform import test\n\n\nclass WhileTest(XLATestCase):\n\n def testSingletonLoopHandrolled(self):\n # Define a function for the loop body\n @function.Defun(dtypes.int32)\n def loop_body(step):\n step_out = step + constant_op.constant(1, dtype=dtypes.int32)\n return step_out\n\n # Define a function for the loop condition\n @function.Defun(dtypes.int32)\n def loop_cond(step):\n return step < 10\n\n with self.test_session() as sess:\n init_index = array_ops.placeholder(dtypes.int32, [])\n with self.test_scope():\n loop_outputs = xla.while_loop([init_index], loop_cond, loop_body)\n\n result = sess.run(loop_outputs, {init_index: 0})\n self.assertAllClose(result, [10], rtol=1e-3)\n\n def testCountingLoopHandrolled(self):\n # Define a function for the loop body\n @function.Defun(dtypes.int32, dtypes.float32)\n def loop_body(step, rsum):\n step_out = step + constant_op.constant(1, dtype=dtypes.int32)\n sum_out = rsum + constant_op.constant(1.5, dtype=dtypes.float32)\n return step_out, sum_out\n\n # Define a function for the loop condition\n @function.Defun(dtypes.int32, dtypes.float32)\n def loop_cond(step, rsum):\n del rsum\n return step < 10\n\n with self.test_session() as sess:\n init_index = array_ops.placeholder(dtypes.int32, [])\n init_sum = array_ops.placeholder(dtypes.float32, [])\n with self.test_scope():\n loop_outputs = xla.while_loop([init_index, init_sum], loop_cond,\n loop_body)\n\n result = sess.run(loop_outputs, {init_index: 0, init_sum: 0.0})\n self.assertAllClose(result, [10, 15.0], rtol=1e-3)\n no_iters_result = sess.run(loop_outputs, {init_index: 10, init_sum: 0.0})\n self.assertAllClose(no_iters_result, [10, 0.0], rtol=1e-3)\n\n def testCountingLoopHandrolledC64(self):\n # Define a function for the loop body\n @function.Defun(dtypes.int32, dtypes.complex64)\n def loop_body(step, rsum):\n step_out = step + constant_op.constant(1, dtype=dtypes.int32)\n sum_out = rsum + constant_op.constant(1.5 + 2j, dtype=dtypes.complex64)\n return step_out, sum_out\n\n # Define a function for the loop condition\n @function.Defun(dtypes.int32, dtypes.complex64)\n def loop_cond(step, rsum):\n del rsum\n return step < 10\n\n with self.test_session() as sess:\n init_index = array_ops.placeholder(dtypes.int32, [])\n init_sum = array_ops.placeholder(dtypes.complex64, [])\n with self.test_scope():\n loop_outputs = xla.while_loop([init_index, init_sum], loop_cond,\n loop_body)\n\n result = sess.run(loop_outputs, {init_index: 0, init_sum: 0.0})\n self.assertAllClose(result[1], np.complex64(15 + 20j), rtol=1e-3)\n no_iters_result = sess.run(loop_outputs, {init_index: 10, init_sum: 0.0})\n self.assertAllClose(no_iters_result[1], np.complex64(0), rtol=1e-3)\n\n def testLoopWithConstantOutput(self):\n # Define a function for the loop body\n @function.Defun(dtypes.int32, dtypes.int32)\n def loop_body(step, x):\n del x\n step_out = step + constant_op.constant(1, dtype=dtypes.int32)\n return (step_out, 7)\n\n # Define a function for the loop condition\n @function.Defun(dtypes.int32, dtypes.int32)\n def loop_cond(step, x):\n del x\n return step < 10\n\n with self.test_session() as sess:\n init_index = array_ops.placeholder(dtypes.int32, [])\n with self.test_scope():\n loop_outputs = xla.while_loop([init_index, 42], loop_cond, loop_body)\n\n result = sess.run(loop_outputs, {init_index: 0})\n self.assertAllClose(result, [10, 7], rtol=1e-3)\n\n\nif __name__ == '__main__':\n test.main()\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for the experimental input pipeline ops.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom os import path\nimport shutil\nimport tempfile\n\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.util import compat\n\n\nclass ListFilesDatasetOpTest(test.TestCase):\n\n def setUp(self):\n self.tmp_dir = tempfile.mkdtemp()\n\n def tearDown(self):\n shutil.rmtree(self.tmp_dir, ignore_errors=True)\n\n def _touchTempFiles(self, filenames):\n for filename in filenames:\n open(path.join(self.tmp_dir, filename), 'a').close()\n\n def testEmptyDirectory(self):\n dataset = dataset_ops.Dataset.list_files(path.join(self.tmp_dir, '*'))\n with self.test_session() as sess:\n itr = dataset.make_one_shot_iterator()\n next_element = itr.get_next()\n with self.assertRaises(errors.OutOfRangeError):\n sess.run(next_element)\n\n def testSimpleDirectory(self):\n filenames = ['a', 'b', 'c']\n self._touchTempFiles(filenames)\n\n dataset = dataset_ops.Dataset.list_files(path.join(self.tmp_dir, '*'))\n with self.test_session() as sess:\n itr = dataset.make_one_shot_iterator()\n next_element = itr.get_next()\n\n full_filenames = []\n produced_filenames = []\n for filename in filenames:\n full_filenames.append(\n compat.as_bytes(path.join(self.tmp_dir, filename)))\n produced_filenames.append(compat.as_bytes(sess.run(next_element)))\n self.assertItemsEqual(full_filenames, produced_filenames)\n with self.assertRaises(errors.OutOfRangeError):\n sess.run(itr.get_next())\n\n def testSimpleDirectoryNotShuffled(self):\n filenames = ['b', 'c', 'a']\n self._touchTempFiles(filenames)\n\n dataset = dataset_ops.Dataset.list_files(\n path.join(self.tmp_dir, '*'), shuffle=False)\n with self.test_session() as sess:\n itr = dataset.make_one_shot_iterator()\n next_element = itr.get_next()\n\n for filename in sorted(filenames):\n self.assertEqual(compat.as_bytes(path.join(self.tmp_dir, filename)),\n sess.run(next_element))\n with self.assertRaises(errors.OutOfRangeError):\n sess.run(itr.get_next())\n\n def testFixedSeedResultsInRepeatableOrder(self):\n filenames = ['a', 'b', 'c']\n self._touchTempFiles(filenames)\n\n dataset = dataset_ops.Dataset.list_files(\n path.join(self.tmp_dir, '*'), shuffle=True, seed=37)\n with self.test_session() as sess:\n itr = dataset.make_initializable_iterator()\n next_element = itr.get_next()\n\n full_filenames = [compat.as_bytes(path.join(self.tmp_dir, filename))\n for filename in filenames]\n\n all_produced_filenames = []\n for _ in range(3):\n produced_filenames = []\n sess.run(itr.initializer)\n try:\n while True:\n produced_filenames.append(sess.run(next_element))\n except errors.OutOfRangeError:\n pass\n all_produced_filenames.append(produced_filenames)\n\n # Each run should produce the same set of filenames, which may be\n # different from the order of `full_filenames`.\n self.assertItemsEqual(full_filenames, all_produced_filenames[0])\n # However, the different runs should produce filenames in the same order\n # as each other.\n self.assertEqual(all_produced_filenames[0], all_produced_filenames[1])\n self.assertEqual(all_produced_filenames[0], all_produced_filenames[2])\n\n def testEmptyDirectoryInitializer(self):\n filename_placeholder = array_ops.placeholder(dtypes.string, shape=[])\n dataset = dataset_ops.Dataset.list_files(filename_placeholder)\n\n with self.test_session() as sess:\n itr = dataset.make_initializable_iterator()\n next_element = itr.get_next()\n sess.run(\n itr.initializer,\n feed_dict={filename_placeholder: path.join(self.tmp_dir, '*')})\n\n with self.assertRaises(errors.OutOfRangeError):\n sess.run(next_element)\n\n def testSimpleDirectoryInitializer(self):\n filenames = ['a', 'b', 'c']\n self._touchTempFiles(filenames)\n\n filename_placeholder = array_ops.placeholder(dtypes.string, shape=[])\n dataset = dataset_ops.Dataset.list_files(filename_placeholder)\n\n with self.test_session() as sess:\n itr = dataset.make_initializable_iterator()\n next_element = itr.get_next()\n sess.run(\n itr.initializer,\n feed_dict={filename_placeholder: path.join(self.tmp_dir, '*')})\n\n full_filenames = []\n produced_filenames = []\n for filename in filenames:\n full_filenames.append(\n compat.as_bytes(path.join(self.tmp_dir, filename)))\n produced_filenames.append(compat.as_bytes(sess.run(next_element)))\n\n self.assertItemsEqual(full_filenames, produced_filenames)\n\n with self.assertRaises(errors.OutOfRangeError):\n sess.run(itr.get_next())\n\n def testFileSuffixes(self):\n filenames = ['a.txt', 'b.py', 'c.py', 'd.pyc']\n self._touchTempFiles(filenames)\n\n filename_placeholder = array_ops.placeholder(dtypes.string, shape=[])\n dataset = dataset_ops.Dataset.list_files(filename_placeholder)\n\n with self.test_session() as sess:\n itr = dataset.make_initializable_iterator()\n next_element = itr.get_next()\n sess.run(\n itr.initializer,\n feed_dict={filename_placeholder: path.join(self.tmp_dir, '*.py')})\n\n full_filenames = []\n produced_filenames = []\n for filename in filenames[1:-1]:\n full_filenames.append(\n compat.as_bytes(path.join(self.tmp_dir, filename)))\n produced_filenames.append(compat.as_bytes(sess.run(next_element)))\n self.assertItemsEqual(full_filenames, produced_filenames)\n\n with self.assertRaises(errors.OutOfRangeError):\n sess.run(itr.get_next())\n\n def testFileMiddles(self):\n filenames = ['a.txt', 'b.py', 'c.pyc']\n self._touchTempFiles(filenames)\n\n filename_placeholder = array_ops.placeholder(dtypes.string, shape=[])\n dataset = dataset_ops.Dataset.list_files(filename_placeholder)\n\n with self.test_session() as sess:\n itr = dataset.make_initializable_iterator()\n next_element = itr.get_next()\n sess.run(\n itr.initializer,\n feed_dict={filename_placeholder: path.join(self.tmp_dir, '*.py*')})\n\n full_filenames = []\n produced_filenames = []\n for filename in filenames[1:]:\n full_filenames.append(\n compat.as_bytes(path.join(self.tmp_dir, filename)))\n produced_filenames.append(compat.as_bytes(sess.run(next_element)))\n\n self.assertItemsEqual(full_filenames, produced_filenames)\n\n with self.assertRaises(errors.OutOfRangeError):\n sess.run(itr.get_next())\n\n def testNoShuffle(self):\n filenames = ['a', 'b', 'c']\n self._touchTempFiles(filenames)\n\n # Repeat the list twice and ensure that the order is the same each time.\n # NOTE(mrry): This depends on an implementation detail of `list_files()`,\n # which is that the list of files is captured when the iterator is\n # initialized. Otherwise, or if e.g. the iterator were initialized more than\n # once, it's possible that the non-determinism of `tf.matching_files()`\n # would cause this test to fail. However, it serves as a useful confirmation\n # that the `shuffle=False` argument is working as intended.\n # TODO(b/73959787): Provide some ordering guarantees so that this test is\n # more meaningful.\n dataset = dataset_ops.Dataset.list_files(\n path.join(self.tmp_dir, '*'), shuffle=False).repeat(2)\n with self.test_session() as sess:\n itr = dataset.make_one_shot_iterator()\n next_element = itr.get_next()\n\n full_filenames = []\n produced_filenames = []\n for filename in filenames * 2:\n full_filenames.append(\n compat.as_bytes(path.join(self.tmp_dir, filename)))\n produced_filenames.append(compat.as_bytes(sess.run(next_element)))\n with self.assertRaises(errors.OutOfRangeError):\n sess.run(itr.get_next())\n self.assertItemsEqual(full_filenames, produced_filenames)\n self.assertEqual(produced_filenames[:len(filenames)],\n produced_filenames[len(filenames):])\n\n\nif __name__ == '__main__':\n test.main()\n", "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Test that user ops can be used as expected.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\n\nclass FactTest(tf.test.TestCase):\n\n def test(self):\n with self.test_session():\n print(tf.user_ops.my_fact().eval())\n\n\nif __name__ == '__main__':\n tf.test.main()\n", "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Contains functions for evaluation and summarization of metrics.\n\nThe evaluation.py module contains helper functions for evaluating TensorFlow\nmodules using a variety of metrics and summarizing the results.\n\n****************************************\n* Evaluating a Checkpointed Model Once *\n****************************************\n\nOnce we've trained a model, we'll want to evaluate it. The simplest way to do\nthis is to evaluate the performance of a saved model a single time. In order\nto do this, we can specify a number of metrics we'll want to evaluate as well\nas specify the summaries we want to save to disk. Furthermore, we can print\nout the metrics values to stdout:\n\n # Specify where the checkpoint is stored:\n checkpoint_path = ...\n\n # Create model and obtain the predictions:\n images, labels = LoadData(...)\n predictions = MyModel(images)\n\n # Choose the metrics to compute:\n names_to_values, names_to_updates = tf.contrib.metrics.aggregate_metric_map({\n \"accuracy\": tf.metrics.accuracy(labels, predictions),\n \"mse\": tf.metrics.mean_squared_error(labels, predictions),\n })\n\n # Define the summaries to write:\n for metric_name, metric_value in metrics_to_values.iteritems():\n tf.summary.scalar(metric_name, metric_value)\n\n checkpoint_dir = '/tmp/my_model_dir/'\n log_dir = '/tmp/my_model_eval/'\n\n # We'll evaluate 1000 batches:\n num_evals = 1000\n\n names_to_values = evaluate_once(\n checkpoint_path=checkpoint_path,\n eval_ops=names_to_updates.values(),\n final_ops=names_to_values,\n hooks=[\n tf.contrib.training.StopAfterNEvalsHook(num_evals),\n tf.contrib.training.SummaryAtEndHook(logdir),\n ],\n config=None)\n\n for name in names_to_values:\n print('Metric %s has value %f.' % (name, names_to_values[name]))\n\n\n************************************************\n* Evaluating a Checkpointed Model with Metrics *\n************************************************\n\nOften, one wants to evaluate a model checkpoint saved on disk. This can be\nperformed once or repeatedly on a set schedule.\n\nTo evaluate a particular model, users define zero or more metrics and zero or\nmore summaries and call the evaluate_repeatedly method:\n\n # Create model and obtain the predictions:\n images, labels = LoadData(...)\n predictions = MyModel(images)\n\n # Choose the metrics to compute:\n names_to_values, names_to_updates = tf.contrib.metrics.aggregate_metric_map({\n \"accuracy\": tf.metrics.accuracy(labels, predictions),\n \"mse\": tf.metrics.mean_squared_error(labels, predictions),\n })\n\n # Define the summaries to write:\n for metric_name, metric_value in metrics_to_values.iteritems():\n tf.summary.scalar(metric_name, metric_value)\n\n checkpoint_dir = '/tmp/my_model_dir/'\n log_dir = '/tmp/my_model_eval/'\n\n # We'll evaluate 1000 batches:\n num_evals = 1000\n\n # Evaluate every 10 minutes:\n tf.contrib.training.evaluate_repeatedly(\n checkpoint_dir,\n eval_ops=names_to_updates.values(),\n hooks=[\n tf.contrib.training.StopAfterNEvalsHook(num_evals),\n tf.contrib.training.SummaryAtEndHook(logdir),\n ],\n eval_interval_secs=600)\n\n*******************************************************\n* Evaluating a Checkpointed Model with Summaries Only *\n*******************************************************\n\nAt times, an evaluation can be performed without metrics at all but rather\nwith only summaries. The user need only leave out the 'eval_ops' argument:\n\n # Create model and obtain the predictions:\n images, labels = LoadData(...)\n predictions = MyModel(images)\n\n # Define the summaries to write:\n tf.summary.scalar(...)\n tf.summary.histogram(...)\n\n checkpoint_dir = '/tmp/my_model_dir/'\n log_dir = '/tmp/my_model_eval/'\n\n # Evaluate once every 10 minutes.\n tf.contrib.training.evaluate_repeatedly(\n checkpoint_dir,\n hooks=[\n tf.contrib.training.SummaryAtEndHook(logdir),\n ],\n eval_interval_secs=600)\n\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport time\n\nfrom tensorflow.python.ops import state_ops\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.summary import summary\nfrom tensorflow.python.training import basic_session_run_hooks\nfrom tensorflow.python.training import evaluation\nfrom tensorflow.python.training import monitored_session\nfrom tensorflow.python.training import saver as tf_saver\nfrom tensorflow.python.training import session_run_hook\nfrom tensorflow.python.training import training_util\n\n__all__ = [\n 'StopAfterNEvalsHook',\n 'SummaryAtEndHook',\n 'checkpoints_iterator',\n 'evaluate_once',\n 'evaluate_repeatedly',\n 'get_or_create_eval_step',\n 'wait_for_new_checkpoint',\n]\n\n# pylint: disable=protected-access\n# pylint: disable=invalid-name\nStopAfterNEvalsHook = evaluation._StopAfterNEvalsHook\nevaluate_once = evaluation._evaluate_once\nget_or_create_eval_step = evaluation._get_or_create_eval_step\n\n# pylint: enable=invalid-name\n# pylint: enable=protected-access\n\n\ndef wait_for_new_checkpoint(checkpoint_dir,\n last_checkpoint=None,\n seconds_to_sleep=1,\n timeout=None):\n \"\"\"Waits until a new checkpoint file is found.\n\n Args:\n checkpoint_dir: The directory in which checkpoints are saved.\n last_checkpoint: The last checkpoint path used or `None` if we're expecting\n a checkpoint for the first time.\n seconds_to_sleep: The number of seconds to sleep for before looking for a\n new checkpoint.\n timeout: The maximum amount of time to wait. If left as `None`, then the\n process will wait indefinitely.\n\n Returns:\n a new checkpoint path, or None if the timeout was reached.\n \"\"\"\n logging.info('Waiting for new checkpoint at %s', checkpoint_dir)\n stop_time = time.time() + timeout if timeout is not None else None\n while True:\n checkpoint_path = tf_saver.latest_checkpoint(checkpoint_dir)\n if checkpoint_path is None or checkpoint_path == last_checkpoint:\n if stop_time is not None and time.time() + seconds_to_sleep > stop_time:\n return None\n time.sleep(seconds_to_sleep)\n else:\n logging.info('Found new checkpoint at %s', checkpoint_path)\n return checkpoint_path\n\n\ndef checkpoints_iterator(checkpoint_dir,\n min_interval_secs=0,\n timeout=None,\n timeout_fn=None):\n \"\"\"Continuously yield new checkpoint files as they appear.\n\n The iterator only checks for new checkpoints when control flow has been\n reverted to it. This means it can miss checkpoints if your code takes longer\n to run between iterations than `min_interval_secs` or the interval at which\n new checkpoints are written.\n\n The `timeout` argument is the maximum number of seconds to block waiting for\n a new checkpoint. It is used in combination with the `timeout_fn` as\n follows:\n\n * If the timeout expires and no `timeout_fn` was specified, the iterator\n stops yielding.\n * If a `timeout_fn` was specified, that function is called and if it returns\n a true boolean value the iterator stops yielding.\n * If the function returns a false boolean value then the iterator resumes the\n wait for new checkpoints. At this point the timeout logic applies again.\n\n This behavior gives control to callers on what to do if checkpoints do not\n come fast enough or stop being generated. For example, if callers have a way\n to detect that the training has stopped and know that no new checkpoints\n will be generated, they can provide a `timeout_fn` that returns `True` when\n the training has stopped. If they know that the training is still going on\n they return `False` instead.\n\n Args:\n checkpoint_dir: The directory in which checkpoints are saved.\n min_interval_secs: The minimum number of seconds between yielding\n checkpoints.\n timeout: The maximum amount of time to wait between checkpoints. If left as\n `None`, then the process will wait indefinitely.\n timeout_fn: Optional function to call after a timeout. If the function\n returns True, then it means that no new checkpoints will be generated and\n the iterator will exit. The function is called with no arguments.\n\n Yields:\n String paths to latest checkpoint files as they arrive.\n \"\"\"\n checkpoint_path = None\n while True:\n new_checkpoint_path = wait_for_new_checkpoint(\n checkpoint_dir, checkpoint_path, timeout=timeout)\n if new_checkpoint_path is None:\n if not timeout_fn:\n # timed out\n logging.info('Timed-out waiting for a checkpoint.')\n return\n if timeout_fn():\n # The timeout_fn indicated that we are truly done.\n return\n else:\n # The timeout_fn indicated that more checkpoints may come.\n continue\n start = time.time()\n checkpoint_path = new_checkpoint_path\n yield checkpoint_path\n time_to_next_eval = start + min_interval_secs - time.time()\n if time_to_next_eval > 0:\n time.sleep(time_to_next_eval)\n\n\nclass SummaryAtEndHook(session_run_hook.SessionRunHook):\n \"\"\"A run hook that saves a summary with the results of evaluation.\"\"\"\n\n def __init__(self,\n log_dir=None,\n summary_writer=None,\n summary_op=None,\n feed_dict=None):\n \"\"\"Constructs the Summary Hook.\n\n Args:\n log_dir: The directory where the summary events are saved to. Used only\n when `summary_writer` is not specified.\n summary_writer: A `tf.summary.FileWriter` to write summary events with.\n summary_op: The summary op to run. If left as `None`, then all summaries\n in the tf.GraphKeys.SUMMARIES collection are used.\n feed_dict: An optional feed dictionary to use when evaluating the\n summaries.\n\n Raises:\n ValueError: If both `log_dir` and `summary_writer` are `None`.\n \"\"\"\n self._summary_op = summary_op\n self._replace_summary_op = summary_op is None\n self._feed_dict = feed_dict\n self._summary_writer = summary_writer\n self._log_dir = log_dir\n if self._log_dir is None and self._summary_writer is None:\n raise ValueError('One of log_dir or summary_writer should be used.')\n\n def begin(self):\n if self._replace_summary_op:\n self._summary_op = summary.merge_all()\n self._global_step = training_util.get_or_create_global_step()\n\n def after_create_session(self, session, coord):\n if self._summary_writer is None and self._log_dir:\n self._summary_writer = summary.FileWriterCache.get(self._log_dir)\n\n def end(self, session):\n global_step = training_util.global_step(session, self._global_step)\n summary_str = session.run(self._summary_op, self._feed_dict)\n if self._summary_writer:\n self._summary_writer.add_summary(summary_str, global_step)\n self._summary_writer.flush()\n\n\ndef _scaffold_with_init(scaffold, saver, checkpoint_path):\n \"\"\"Creates a scaffold that loads the given checkpoint using an init_fn.\n\n Args:\n scaffold: The scaffold to copy.\n saver: The saver to use when restoring the checkpoint.\n checkpoint_path: An absolute path to a checkpoint.\n\n Returns:\n A scaffold with an init_fn that loads the given checkpoint. If the scaffold\n provided already has an init_fn, the scaffold is returned unchanged.\n \"\"\"\n\n def restore_checkpoint(_, session):\n saver.restore(session, checkpoint_path)\n\n if not scaffold.init_fn:\n scaffold = monitored_session.Scaffold(\n init_op=scaffold.init_op,\n init_feed_dict=scaffold.init_feed_dict,\n init_fn=restore_checkpoint,\n ready_op=scaffold.ready_op,\n local_init_op=scaffold.local_init_op,\n summary_op=scaffold.summary_op,\n saver=scaffold.saver)\n return scaffold\n\n\ndef evaluate_repeatedly(checkpoint_dir,\n master='',\n scaffold=None,\n eval_ops=None,\n feed_dict=None,\n final_ops=None,\n final_ops_feed_dict=None,\n eval_interval_secs=60,\n hooks=None,\n config=None,\n max_number_of_evaluations=None,\n timeout=None,\n timeout_fn=None):\n \"\"\"Repeatedly searches for a checkpoint in `checkpoint_dir` and evaluates it.\n\n During a single evaluation, the `eval_ops` is run until the session is\n interrupted or requested to finish. This is typically requested via a\n `tf.contrib.training.StopAfterNEvalsHook` which results in `eval_ops` running\n the requested number of times.\n\n Optionally, a user can pass in `final_ops`, a single `Tensor`, a list of\n `Tensors` or a dictionary from names to `Tensors`. The `final_ops` is\n evaluated a single time after `eval_ops` has finished running and the fetched\n values of `final_ops` are returned. If `final_ops` is left as `None`, then\n `None` is returned.\n\n One may also consider using a `tf.contrib.training.SummaryAtEndHook` to record\n summaries after the `eval_ops` have run. If `eval_ops` is `None`, the\n summaries run immediately after the model checkpoint has been restored.\n\n Note that `evaluate_once` creates a local variable used to track the number of\n evaluations run via `tf.contrib.training.get_or_create_eval_step`.\n Consequently, if a custom local init op is provided via a `scaffold`, the\n caller should ensure that the local init op also initializes the eval step.\n\n Args:\n checkpoint_dir: The directory where checkpoints are stored.\n master: The address of the TensorFlow master.\n scaffold: An tf.train.Scaffold instance for initializing variables and\n restoring variables. Note that `scaffold.init_fn` is used by the function\n to restore the checkpoint. If you supply a custom init_fn, then it must\n also take care of restoring the model from its checkpoint.\n eval_ops: A single `Tensor`, a list of `Tensors` or a dictionary of names\n to `Tensors`, which is run until the session is requested to stop,\n commonly done by a `tf.contrib.training.StopAfterNEvalsHook`.\n feed_dict: The feed dictionary to use when executing the `eval_ops`.\n final_ops: A single `Tensor`, a list of `Tensors` or a dictionary of names\n to `Tensors`.\n final_ops_feed_dict: A feed dictionary to use when evaluating `final_ops`.\n eval_interval_secs: The minimum number of seconds between evaluations.\n hooks: List of `tf.train.SessionRunHook` callbacks which are run inside the\n evaluation loop.\n config: An instance of `tf.ConfigProto` that will be used to\n configure the `Session`. If left as `None`, the default will be used.\n max_number_of_evaluations: The maximum times to run the evaluation. If left\n as `None`, then evaluation runs indefinitely.\n timeout: The maximum amount of time to wait between checkpoints. If left as\n `None`, then the process will wait indefinitely.\n timeout_fn: Optional function to call after a timeout. If the function\n returns True, then it means that no new checkpoints will be generated and\n the iterator will exit. The function is called with no arguments.\n\n Returns:\n The fetched values of `final_ops` or `None` if `final_ops` is `None`.\n \"\"\"\n eval_step = get_or_create_eval_step()\n\n # Prepare the run hooks.\n hooks = hooks or []\n\n if eval_ops is not None:\n update_eval_step = state_ops.assign_add(eval_step, 1)\n\n for h in hooks:\n if isinstance(h, StopAfterNEvalsHook):\n h._set_evals_completed_tensor(update_eval_step) # pylint: disable=protected-access\n\n if isinstance(eval_ops, dict):\n eval_ops['update_eval_step'] = update_eval_step\n elif isinstance(eval_ops, (tuple, list)):\n eval_ops = list(eval_ops) + [update_eval_step]\n else:\n eval_ops = [eval_ops, update_eval_step]\n\n final_ops_hook = basic_session_run_hooks.FinalOpsHook(final_ops,\n final_ops_feed_dict)\n hooks.append(final_ops_hook)\n\n num_evaluations = 0\n for checkpoint_path in checkpoints_iterator(\n checkpoint_dir,\n min_interval_secs=eval_interval_secs,\n timeout=timeout,\n timeout_fn=timeout_fn):\n\n session_creator = monitored_session.ChiefSessionCreator(\n scaffold=scaffold,\n checkpoint_filename_with_path=checkpoint_path,\n master=master,\n config=config)\n\n with monitored_session.MonitoredSession(\n session_creator=session_creator, hooks=hooks) as session:\n logging.info('Starting evaluation at ' + time.strftime(\n '%Y-%m-%d-%H:%M:%S', time.gmtime()))\n if eval_ops is not None:\n while not session.should_stop():\n session.run(eval_ops, feed_dict)\n\n logging.info('Finished evaluation at ' + time.strftime(\n '%Y-%m-%d-%H:%M:%S', time.gmtime()))\n num_evaluations += 1\n\n if (max_number_of_evaluations is not None and\n num_evaluations >= max_number_of_evaluations):\n return final_ops_hook.final_ops_values\n\n return final_ops_hook.final_ops_values\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Define tflite op hints (intrinsic operations).\n\nThis essentially allows defining a TensorFlow API for tflite operations in\nPython with hints on how they are represented in TensorFlow Lite. This basically\nis a form of tflite intrinsic. It wraps a subpart of a TensorFlow execution\ngraph and is useful for LSTMs and other complicated TensorFlow constructions\nthat are difficult to pattern match in TOCO, but are represented by a single\naccelerated tflite op.\n\nExample:\n def tflite_cool_activation(input):\n # A cool activation function.\n custom = tf.contrib.lite.OpHint(\"cool_activation\")\n input = custom.add_inputs(input)\n output = tf.sigmoid(input) * input\n custom.add_outputs(output)\n return output\n\n image = tf.placeholder(tf.float32, (1, 16, 16, 1))\n output = tf.identity(tflite_cool_activation(image))\n\n session = tf.Session()\n\n graphdef_to_convert = tf.contrib.lite.convert_op_hints_to_stubs(session)\n tflite_graph = tf.contrib.lite.toco_convert(graphdef_to_convert,\n [image], [output])\n [image], [output])\n with open(\"/tmp/graph.fb\", \"wb\") as fp:\n fp.write(tflite_graph)\n\nHow does it work?:\n\nOpHint is a helper that you use when defining a vanilla python function.\nIt allows you to wrap arguments with tf.identities with some custom attributes.\nThese attributes allow you to find the original block of ops that was created.\nFor example, if you use cool_activation above you essentially get:\n\na_input = tf.identity()\nresult = tf.multiply(tf.sigmoid(a_input), a_input)\noutput = tf.identity()\n\na_input, output are identities that have parameters representing\nwhat argument they are, what the name of the function they should turn into\nin tf lite as well as a guid that uniquely identifies a particular invocation.\n\nOnce you have built your whole tensorflow graph, you can run it and train it\nas usual, but after you have done that, you need to convert the graph into\na form that replaces these subgraphs wrapped in identities to stub ops. These\nops don't actually exist in the normal TensorFlow runtime, but will be\nunderstood by toco later.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections as _collections\nimport itertools as _itertools\nimport uuid as _uuid\n\nfrom tensorflow.contrib import framework as _framework\nfrom tensorflow.core.framework import attr_value_pb2 as _attr_value_pb2\nfrom tensorflow.python.framework import ops as _ops\nfrom tensorflow.python.ops import array_ops as _array_ops\nfrom tensorflow.python.util.all_util import remove_undocumented\n\n\nclass OpHint(object):\n \"\"\"A class that helps build tflite function invocations.\n\n It allows you to take a bunch of TensorFlow ops and annotate the construction\n such that toco knows how to convert it to tflite. This embeds a pseudo\n function in a TensorFlow graph. This allows embedding high-level API usage\n information in a lower level TensorFlow implementation so that an alternative\n implementation can be substituted later.\n\n Essentially, any \"input\" into this pseudo op is fed into an identity, and\n attributes are added to that input before being used by the constituent ops\n that make up the pseudo op. A similar process is done to any output that\n is to be exported from the current op.\n\n TODO(aselle): When TensorFlow functions functionality works for arbitrary\n constructs, this mechanism can be retired and changed to use python defun's.\n \"\"\"\n\n # Attr constants that are used for representation in the GraphDef\n FUNCTION_NAME_ATTR = \"_tflite_function_name\"\n FUNCTION_UUID_ATTR = \"_tflite_function_uuid\"\n FUNCTION_INPUT_INDEX_ATTR = \"_tflite_function_input_index\"\n FUNCTION_OUTPUT_INDEX_ATTR = \"_tflite_function_output_index\"\n\n def __init__(self, function_name, **kwargs):\n \"\"\"Create a OpHint.\n\n Args:\n function_name: Name of the function (the custom op name in tflite)\n **kwargs: Keyword arguments of any constant attributes for the function.\n \"\"\"\n self._function_name = function_name\n self._unique_function_id = _uuid.uuid1().hex # TODO(aselle): Unique enough?\n self._curr_input_index = 0\n self._curr_output_index = 0\n self._attrs_to_store_later = kwargs\n self._stored_attrs = False\n\n def _setattr(self, dest_op, name, value):\n tensor_value = _ops.convert_to_tensor(value)\n # pylint: disable=protected-access\n dest_op.op._set_attr(name, _attr_value_pb2.AttrValue(\n tensor=tensor_value.op.node_def.attr[\"value\"].tensor))\n # pylint: enable=protected-access\n\n def add_inputs(self, *args):\n \"\"\"Add a sequence of inputs to the function invocation.\n\n Args:\n *args: List of inputs to be converted (should be Tf.Tensor).\n Returns:\n Wrapped inputs (identity standins that have additional metadata). These\n are also are also tf.Tensor's.\n \"\"\"\n\n def augmented_identity(arg):\n identity_op = _array_ops.identity(arg)\n # pylint: disable=protected-access\n identity_op.op._set_attr(\n OpHint.FUNCTION_NAME_ATTR,\n _attr_value_pb2.AttrValue(s=self._function_name))\n identity_op.op._set_attr(\n OpHint.FUNCTION_UUID_ATTR,\n _attr_value_pb2.AttrValue(s=self._unique_function_id))\n identity_op.op._set_attr(\n OpHint.FUNCTION_INPUT_INDEX_ATTR,\n _attr_value_pb2.AttrValue(i=self._curr_input_index))\n # pylint: enable=protected-access\n self._curr_input_index += 1\n return identity_op\n\n return [augmented_identity(arg) for arg in args]\n\n def add_outputs(self, *args):\n \"\"\"Add a sequence of outputs to the function invocation.\n\n Args:\n *args: List of outputs to be converted (should be tf.Tensor).\n Returns:\n Wrapped outputs (identity standins that have additional metadata). These\n are also tf.Tensor's.\n \"\"\"\n\n def augmented_identity(arg):\n identity_op = _array_ops.identity(arg)\n # pylint: disable=protected-access\n identity_op.op._set_attr(\n OpHint.FUNCTION_NAME_ATTR,\n _attr_value_pb2.AttrValue(s=self._function_name))\n identity_op.op._set_attr(\n OpHint.FUNCTION_UUID_ATTR,\n _attr_value_pb2.AttrValue(s=self._unique_function_id))\n identity_op.op._set_attr(\n OpHint.FUNCTION_OUTPUT_INDEX_ATTR,\n _attr_value_pb2.AttrValue(i=self._curr_output_index))\n # pylint: enable=protected-access\n self._curr_output_index += 1\n return identity_op\n\n wrapped_outputs = [augmented_identity(arg) for arg in args]\n\n if not self._stored_attrs:\n for key, value in self._attrs_to_store_later.iteritems():\n self._setattr(wrapped_outputs[0], \"_tflite_attr_\" + key, value)\n self._stored_attrs = True\n\n return wrapped_outputs\n\n\nclass _LiteFuncCall(object):\n \"\"\"Represent a TensorFlow Lite custom function.\n\n This is uses to accumulate found hints in the graphdef into a single\n conceptual unit.\n\n Properties:\n self.inputs: inputs to the op (hash from index # to argument)\n self.outputs: outputs to the op (hash from index # to argument)\n self.function_name: the tflite custom op name to use\n self.uuid: a unique call id for this particular call (i.e.\n multiple function calls would have the same function_name but different\n uuids.\n self.params: A param name to key value for op constant data. I.e. for\n axis on a reduction, strides on a convolution, etc.\n \"\"\"\n\n def __init__(self):\n self.inputs = {}\n self.outputs = {}\n self.function_name = None\n self.uuid = None\n self.params = {}\n\n def __str__(self):\n return \"tflite function %s call %s\\n\\tinputs: %r\\n\\toutputs: %r\" % (\n self.function_name, self.uuid, self.inputs, self.outputs)\n\n\ndef _find_all_hints_in_graph_def(session):\n \"\"\"Look at the current default graph and return a list of LiteFuncCall objs.\n\n Args:\n session: A TensorFlow session that contains the graph to convert.\n Returns:\n a list of `LifeFuncCall` objects in the form\n\n \"\"\"\n func_calls = _collections.defaultdict(_LiteFuncCall)\n seen_ops = set()\n\n for op in session.graph.get_operations():\n for operand in _itertools.chain(op.inputs, op.outputs):\n if operand in seen_ops:\n continue\n seen_ops.add(operand)\n attr = operand.op.node_def.attr\n uuid = attr[OpHint.FUNCTION_UUID_ATTR].s\n if OpHint.FUNCTION_UUID_ATTR not in attr:\n continue\n call_def = func_calls[uuid]\n call_def.uuid = uuid\n if OpHint.FUNCTION_UUID_ATTR in attr:\n call_def.function_name = attr[OpHint.FUNCTION_NAME_ATTR].s\n if OpHint.FUNCTION_INPUT_INDEX_ATTR in attr:\n call_def.inputs[attr[OpHint.FUNCTION_INPUT_INDEX_ATTR].i] = operand\n if OpHint.FUNCTION_OUTPUT_INDEX_ATTR in attr:\n call_def.outputs[attr[OpHint.FUNCTION_OUTPUT_INDEX_ATTR].i] = operand\n\n for a in attr:\n if a.startswith(\"_tflite_attr_\"):\n # TODO(aselle): Remember the attribute tensors so we can put them\n # in collapse.\n call_def.params[a.replace(\"_tflite_attr_,\", \"\")] = attr[a].tensor\n\n return func_calls\n\n\ndef _tensor_name_base(full_tensor_name):\n \"\"\"Removes the device assignment code from a tensor.\n\n e.g. _tensor_name_base(\"foo:3\") => \"foo\"\n\n Args:\n full_tensor_name: A tensor name that is annotated with a device placement\n (this is what tensor flow introspection gives).\n Returns:\n A name without any device assignment.\n \"\"\"\n return full_tensor_name.name.split(\":\")[0]\n\n\ndef convert_op_hints_to_stubs(session):\n \"\"\"Converts a graphdef with LiteOp hints into stub operations.\n\n This is used to prepare for toco conversion of complex intrinsic usages.\n\n Args:\n session: A TensorFlow session that contains the graph to convert.\n Returns:\n A new graphdef with all ops contained in OpHints being replaced by\n a single op call with the right parameters.\n \"\"\"\n hints = _find_all_hints_in_graph_def(session)\n current_graph_def = session.graph_def\n for call in hints.values():\n input_names = [None] * len(call.inputs)\n output_names = [None] * len(call.outputs)\n output_dtypes = [None] * len(call.outputs)\n output_quantized = False\n for input_index, tensor in call.inputs.items():\n input_names[input_index] = _tensor_name_base(tensor)\n for output_index, tensor in call.outputs.items():\n output_names[output_index] = _tensor_name_base(tensor)\n output_dtypes[output_index] = tensor.dtype.as_datatype_enum\n # TODO(aselle): Support quantized flag properly\n current_graph_def = _framework.fuse_op(\n current_graph_def, input_names, output_names, output_dtypes,\n output_quantized, call.uuid, call.function_name)\n for node in current_graph_def.node:\n if node.name == call.uuid:\n for param, tensor in call.params.items():\n node.attr[param].tensor.CopyFrom(tensor)\n return current_graph_def\n\n\n_allowed_symbols = [\"OpHint\", \"convert_op_hints_to_stubs\"]\nremove_undocumented(__name__, _allowed_symbols)\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Class Evaluator holds Metrics for the duration of an evaluation run.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport six\n\nfrom tensorflow.contrib.eager.python import datasets\nfrom tensorflow.contrib.eager.python import metrics\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.eager import function\nfrom tensorflow.python.framework import errors_impl\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import summary_ops_v2 as summary_ops\n\n\nclass Evaluator(object):\n \"\"\"This holds and updates Metrics for the duration of a single eval run.\n\n Usage:\n evaluator = my_model.evaluator() # or MyEvaluator(my_model)\n for example_batch in ...:\n evaluator(example_batch)\n results = evaluator.all_metric_results(optional_summary_logdir)\n\n Or, if you are getting your examples from a tf.data.Dataset, you can use\n the evaluate_on_dataset() method.\n\n Implementers of Evaluators should\n (a) Call `track_metric()` and/or `track_evaluator()` in __init__().\n (b) Override the `call()` method. It will be passed the output of the\n model's `eval_data()` method, and should call its contained metrics\n (treating them as callables) and any child Evaluators (using their\n call() method to avoid calling eval_data() again).\n\n Args:\n model: A `Model` object with an `eval_data()` method.\n \"\"\"\n\n def __init__(self, model):\n self._model = model\n self._metrics = {}\n self._evaluators = {}\n if not context.executing_eagerly():\n self.call = function.defun(self.call)\n\n # ---- API for users ----\n def __call__(self, *args, **kwargs):\n \"\"\"Update metrics with a minibatch of input examples.\n\n Args:\n *args:\n **kwargs: Arguments representing an input mini-batch of examples to\n pass to self.model.eval_data().\n\n Returns:\n The op to execute or None if executing eagerly.\n \"\"\"\n return self.call(self._model.eval_data(*args, **kwargs))\n\n def init_variables(self):\n \"\"\"Return an op for initializing all contained uninitialized variables.\n\n Only for graph execution. Should be called after variables are created\n in the first execution of __call__().\n\n Returns:\n An op.\n\n Raises:\n RuntimeError: if eager execution is enabled.\n\n @compatibility(eager)\n Only for graph execution.\n @end_compatibility\n \"\"\"\n if context.executing_eagerly():\n raise RuntimeError(\"Evaluator.init_variables() not needed when \"\n \"eager execution is enabled.\")\n return control_flow_ops.group([m.init_variables() for _, m in self.metrics])\n\n def all_metric_results(self, summary_logdir=None):\n \"\"\"Computes results for all contained metrics.\n\n Args:\n summary_logdir: An optional string. If specified, metric results\n will be written as summaries to this directory.\n\n Returns:\n A `dict` mapping string names to tensors.\n \"\"\"\n if summary_logdir is None:\n with summary_ops.never_record_summaries():\n return self._all_metric_results()\n else:\n def f():\n with summary_ops.create_file_writer(\n summary_logdir).as_default(), summary_ops.always_record_summaries():\n return self._all_metric_results()\n\n if context.executing_eagerly():\n return f()\n else:\n return function.defun(f)()\n\n def _all_metric_results(self):\n \"\"\"Implementation of `all_metric_results` in the summary context.\"\"\"\n results = {}\n for name, metric in six.iteritems(self._metrics):\n results[name] = metric.result()\n for prefix, evaluator in six.iteritems(self._evaluators):\n for name, metric in six.iteritems(evaluator._metrics): # pylint: disable=protected-access\n results[prefix + \"/\" + name] = metric.result()\n return results\n\n def evaluate_on_dataset(self, dataset, *args, **kwargs):\n \"\"\"Convenience method for performing an eval on a Dataset.\n\n Args:\n dataset: Dataset object with the input data to evaluate on.\n *args:\n **kwargs: Optional additional arguments to __call__(), except\n `summary_logdir`: if specified, metrics will be written as summaries\n to this directory.\n\n Returns:\n @compatibility(eager)\n When eager execution is enabled, this returns the result of performing\n an evaluation as a dictionary. With graph execution, this returns a tuple\n (init_op, call_op, results_op) which may be executed using this code:\n ```python\n sess.run(init_op)\n try:\n while True:\n sess.run(call_op)\n except tf.errors.OutOfRangeError:\n pass\n return sess.run(results_op) # A dictionary\n\n # equivalently:\n return evaluator.run_evaluation(init_op, call_op, results_op, sess=sess)\n ```\n @end_compatibility\n \"\"\"\n summary_logdir = kwargs.pop(\"summary_logdir\", None)\n if context.executing_eagerly():\n for example in datasets.Iterator(dataset):\n self.__call__(example, *args, **kwargs)\n return self.all_metric_results(summary_logdir)\n # Graph construction\n call_op = self.__call__(dataset.make_one_shot_iterator().get_next(), *args,\n **kwargs)\n init_op = self.init_variables()\n results_op = self.all_metric_results(summary_logdir)\n return (init_op, call_op, results_op)\n\n @staticmethod\n def run_evaluation(init_op, call_op, results_op, sess=None):\n \"\"\"Convenience method for running the ops returned by evaluate_on_dataset.\n\n Args:\n init_op: An op that initializes/resets evaluation state.\n call_op: An op that updates evaluation state on a mini-batch of examples.\n Must generate an tf.errors.OutOfRangeError when done.\n results_op: A dictionary of tensors that compute the final evaluation\n results from the evaluation state.\n sess: The Session to run the evaluation in. Defaults to the default\n Session.\n\n Returns:\n A dictionary of values, parallel to results_op.\n\n Raises:\n RuntimeError: if eager execution is enabled.\n\n @compatibility(eager)\n Only for graph execution.\n @end_compatibility\n \"\"\"\n if context.executing_eagerly():\n raise RuntimeError(\"Evaluator.run_evaluation() not supported when \"\n \"eager execution is enabled.\")\n sess = sess or ops.get_default_session()\n sess.run(init_op)\n try:\n while True:\n sess.run(call_op)\n except errors_impl.OutOfRangeError:\n pass\n return sess.run(results_op)\n\n # ---- To be implemented by descendants ---\n def call(self, eval_data):\n \"\"\"Update metrics using the output of self.model.\n\n Note: This function is executed as a graph function in graph mode.\n This means:\n a) Operations on the same resource are executed in textual order.\n This should make it easier to do things like add the updated\n value of a variable to another, for example.\n b) You don't need to worry about collecting the update ops to execute.\n All update ops added to the graph by this function will be executed.\n As a result, code should generally work the same way with graph or\n eager execution.\n\n Args:\n eval_data: The output of self.model.eval_data() on a mini-batch of\n examples.\n \"\"\"\n raise NotImplementedError(\"Evaluators must define a call member function.\")\n\n # ---- For use by descendants ---\n @property\n def model(self):\n return self._model\n\n def track_metric(self, metric):\n \"\"\"Add a Metric to be tracked.\n\n Metrics can only be tracked by one `Evaluator`. Metrics must be\n tracked or they will not appear in `all_metric_results()`.\n\n Args:\n metric: A `Metric` object.\n\n Returns:\n The `metric` passed into this function.\n\n Raises:\n RuntimeError: If called before __init__.\n TypeError: If `metric` is not of the correct type.\n ValueError: If there is a name collision between Metrics or `metric`\n has already been added to another `Evaluator`.\n \"\"\"\n if not hasattr(self, \"_metrics\"):\n raise RuntimeError(\n \"Need to call Evaluator.__init__ before adding metrics\")\n if not isinstance(metric, metrics.Metric):\n raise TypeError(\n \"Evaluator.track_metric() passed type %s, not a tfe.metrics.Metric\" %\n (type(metric),))\n if metric.name in self._metrics:\n if metric is self._metrics[metric.name]:\n return metric\n raise ValueError(\n \"Attempt to add two Metrics with the name '%s' to the same Evaluator \"\n \"'%s'\" % (metric.name, self.name))\n # pylint: disable=protected-access\n if hasattr(metric, \"_added_to_an_evaluator\"):\n raise ValueError(\"Metric %s already added to Evaluator %s\" %\n (metric.name, metric._added_to_an_evaluator))\n metric._added_to_an_evaluator = self.__class__.__name__\n # pylint: enable=protected-access\n self._metrics[metric.name] = metric\n return metric\n\n def track_evaluator(self, prefix, evaluator):\n \"\"\"Add a contained `Evaluator`.\n\n This is for delegating to another `Evaluator`, e.g. for when you have a\n model with multiple heads. Users should manually invoke the child\n `Evaluator`'s `call` method from their `call` method.\n\n Args:\n prefix: A string. Metrics from `evaluator` are exported with this\n prefix and a '/'.\n evaluator: An `Evaluator` object.\n\n Returns:\n The value of `evaluator` passed into this function.\n\n Raises:\n RuntimeError: If called before __init__.\n TypeError: If `evaluator` is not of the correct type.\n ValueError: If an `Evaluator` has already been added with that `prefix`.\n \"\"\"\n if not hasattr(self, \"_evaluators\"):\n raise RuntimeError(\n \"Need to call Evaluator.__init__ before adding evaluators\")\n if not isinstance(evaluator, Evaluator):\n raise TypeError(\n \"Evaluator.track_evaluator() passed type %s, not a tfe.Evaluator.\" %\n (type(evaluator),))\n if prefix in self._evaluators:\n if evaluator is self._evaluators[prefix]:\n return evaluator\n raise RuntimeError(\n \"Attempt to add two Evaluators with the same prefix '%s'.\" % prefix)\n self._evaluators[prefix] = evaluator\n return evaluator\n\n @property\n def metric_variables(self):\n v = []\n for metric in six.itervalues(self._metrics):\n v += metric.variables\n for evaluator in six.itervalues(self._evaluators):\n v += evaluator.metric_variables\n return v\n\n @property\n def metrics(self):\n \"\"\"Returns a list of (prefix, metric) pairs.\"\"\"\n m = []\n for metric in six.itervalues(self._metrics):\n m.append((\"\", metric))\n for prefix, evaluator in six.iteritems(self._evaluators):\n m += [(prefix + \"/\" + p, m) for p, m in evaluator.metrics]\n return m\n\n\nclass SparseSoftmaxEvaluator(Evaluator):\n \"\"\"Evaluator for a sparse softmax model.\n\n Computes a standard set of metrics for single-label, multi-class\n models.\n\n Args:\n model: A `SparseSoftmaxModel` object or a `Model` whose `eval_data()`\n method produces a `dict` containing values for the loss, true\n label, predicted class, and optional weights.\n loss_key: Optional key for looking up the value of the loss in the\n `eval_data()` dict. Defaults to \"loss\".\n label_key: Optional key for looking up the value of the label in the\n `eval_data()` dict. Defaults to \"label\".\n predicted_class_key: Optional key for looking up the value of the\n predicted class in the `eval_data()` dict. Defaults to \"predicted_class\".\n weights_key: Optional key for looking up the value of the weights\n in the `eval_data()` dict. Defaults to \"weights\". Note that weights\n are optional, and default to 1 if not present in `eval_data`.\n \"\"\"\n\n def __init__(self, model, loss_key=\"loss\", label_key=\"label\",\n predicted_class_key=\"predicted_class\", weights_key=\"weights\"):\n super(SparseSoftmaxEvaluator, self).__init__(model)\n # TODO(josh11b): Expand this to include everything from the standard\n # SparseSoftmax Head.\n self.avg_loss = self.track_metric(metrics.Mean(\"Avg Loss\"))\n self.accuracy = self.track_metric(metrics.Accuracy())\n self.loss_key = loss_key\n self.label_key = label_key\n self.predicted_class_key = predicted_class_key\n self.weights_key = weights_key\n\n def call(self, eval_data):\n \"\"\"Update metrics for `eval_data` dict (described above).\"\"\"\n weights = eval_data.get(self.weights_key, None)\n if weights is None:\n self.avg_loss(eval_data[self.loss_key])\n self.accuracy(eval_data[self.label_key],\n eval_data[self.predicted_class_key])\n else:\n self.avg_loss(eval_data[self.loss_key], weights=weights)\n self.accuracy(eval_data[self.label_key],\n eval_data[self.predicted_class_key],\n weights=weights)\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for SinhArcsinh Bijector.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\n# pylint: disable=g-importing-member\nfrom tensorflow.contrib.distributions.python.ops.bijectors.sinh_arcsinh import SinhArcsinh\nfrom tensorflow.python.ops.distributions.bijector_test_util import assert_bijective_and_finite\nfrom tensorflow.python.ops.distributions.bijector_test_util import assert_scalar_congruency\nfrom tensorflow.python.platform import test\n\n# pylint: enable=g-importing-member\n\n\nclass SinhArcsinhBijectorTest(test.TestCase):\n \"\"\"Tests correctness of the power transformation.\"\"\"\n\n def testBijectorVersusNumpyRewriteOfBasicFunctions(self):\n with self.test_session():\n skewness = 0.2\n tailweight = 2.0\n bijector = SinhArcsinh(\n skewness=skewness,\n tailweight=tailweight,\n validate_args=True)\n self.assertEqual(\"SinhArcsinh\", bijector.name)\n x = np.array([[[-2.01], [2.], [1e-4]]]).astype(np.float32)\n y = np.sinh((np.arcsinh(x) + skewness) * tailweight)\n self.assertAllClose(y, bijector.forward(x).eval())\n self.assertAllClose(x, bijector.inverse(y).eval())\n self.assertAllClose(\n np.sum(\n np.log(np.cosh(np.arcsinh(y) / tailweight - skewness)) -\n np.log(tailweight) - np.log(np.sqrt(y**2 + 1)),\n axis=-1),\n bijector.inverse_log_det_jacobian(y, event_ndims=1).eval())\n self.assertAllClose(\n -bijector.inverse_log_det_jacobian(y, event_ndims=1).eval(),\n bijector.forward_log_det_jacobian(x, event_ndims=1).eval(),\n rtol=1e-4,\n atol=0.)\n\n def testLargerTailWeightPutsMoreWeightInTails(self):\n with self.test_session():\n # Will broadcast together to shape [3, 2].\n x = [-1., 1.]\n tailweight = [[0.5], [1.0], [2.0]]\n bijector = SinhArcsinh(tailweight=tailweight, validate_args=True)\n y = bijector.forward(x).eval()\n\n # x = -1, 1 should be mapped to points symmetric about 0\n self.assertAllClose(y[:, 0], -1. * y[:, 1])\n\n # forward(1) should increase as tailweight increases, since higher\n # tailweight should map 1 to a larger number.\n forward_1 = y[:, 1] # The positive values of y.\n self.assertLess(forward_1[0], forward_1[1])\n self.assertLess(forward_1[1], forward_1[2])\n\n def testSkew(self):\n with self.test_session():\n # Will broadcast together to shape [3, 2].\n x = [-1., 1.]\n skewness = [[-1.], [0.], [1.]]\n bijector = SinhArcsinh(skewness=skewness, validate_args=True)\n y = bijector.forward(x).eval()\n\n # For skew < 0, |forward(-1)| > |forward(1)|\n self.assertGreater(np.abs(y[0, 0]), np.abs(y[0, 1]))\n\n # For skew = 0, |forward(-1)| = |forward(1)|\n self.assertAllClose(np.abs(y[1, 0]), np.abs(y[1, 1]))\n\n # For skew > 0, |forward(-1)| < |forward(1)|\n self.assertLess(np.abs(y[2, 0]), np.abs(y[2, 1]))\n\n def testScalarCongruencySkewness1Tailweight0p5(self):\n with self.test_session():\n bijector = SinhArcsinh(skewness=1.0, tailweight=0.5, validate_args=True)\n assert_scalar_congruency(bijector, lower_x=-2., upper_x=2.0, rtol=0.05)\n\n def testScalarCongruencySkewnessNeg1Tailweight1p5(self):\n with self.test_session():\n bijector = SinhArcsinh(skewness=-1.0, tailweight=1.5, validate_args=True)\n assert_scalar_congruency(bijector, lower_x=-2., upper_x=2.0, rtol=0.05)\n\n def testBijectiveAndFiniteSkewnessNeg1Tailweight0p5(self):\n with self.test_session():\n bijector = SinhArcsinh(skewness=-1., tailweight=0.5, validate_args=True)\n x = np.concatenate((-np.logspace(-2, 10, 1000), [0], np.logspace(\n -2, 10, 1000))).astype(np.float32)\n assert_bijective_and_finite(bijector, x, x, event_ndims=0, rtol=1e-3)\n\n def testBijectiveAndFiniteSkewness1Tailweight3(self):\n with self.test_session():\n bijector = SinhArcsinh(skewness=1., tailweight=3., validate_args=True)\n x = np.concatenate((-np.logspace(-2, 5, 1000), [0], np.logspace(\n -2, 5, 1000))).astype(np.float32)\n assert_bijective_and_finite(\n bijector, x, x, event_ndims=0, rtol=1e-3)\n\n def testBijectorEndpoints(self):\n with self.test_session():\n for dtype in (np.float32, np.float64):\n bijector = SinhArcsinh(\n skewness=dtype(0.), tailweight=dtype(1.), validate_args=True)\n bounds = np.array(\n [np.finfo(dtype).min, np.finfo(dtype).max], dtype=dtype)\n # Note that the above bijector is the identity bijector. Hence, the\n # log_det_jacobian will be 0. Because of this we use atol.\n assert_bijective_and_finite(\n bijector, bounds, bounds, event_ndims=0, atol=2e-6)\n\n def testBijectorOverRange(self):\n with self.test_session():\n for dtype in (np.float32, np.float64):\n skewness = np.array([1.2, 5.], dtype=dtype)\n tailweight = np.array([2., 10.], dtype=dtype)\n # The inverse will be defined up to where sinh is valid, which is\n # arcsinh(np.finfo(dtype).max).\n log_boundary = np.log(\n np.sinh(np.arcsinh(np.finfo(dtype).max) / tailweight - skewness))\n x = np.array([\n np.logspace(-2, log_boundary[0], base=np.e, num=1000),\n np.logspace(-2, log_boundary[1], base=np.e, num=1000)\n ], dtype=dtype)\n # Ensure broadcasting works.\n x = np.swapaxes(x, 0, 1)\n\n y = np.sinh((np.arcsinh(x) + skewness) * tailweight)\n bijector = SinhArcsinh(\n skewness=skewness, tailweight=tailweight, validate_args=True)\n\n self.assertAllClose(y, bijector.forward(x).eval(), rtol=1e-4, atol=0.)\n self.assertAllClose(x, bijector.inverse(y).eval(), rtol=1e-4, atol=0.)\n\n # On IBM PPC systems, longdouble (np.float128) is same as double except that it can have more precision.\n # Type double being of 8 bytes, can't hold square of max of float64 (which is also 8 bytes) and\n # below test fails due to overflow error giving inf. So this check avoids that error by skipping square\n # calculation and corresponding assert.\n\n if np.amax(y) <= np.sqrt(np.finfo(np.float128).max) and \\\n np.fabs(np.amin(y)) <= np.sqrt(np.fabs(np.finfo(np.float128).min)):\n\n # Do the numpy calculation in float128 to avoid inf/nan.\n y_float128 = np.float128(y)\n self.assertAllClose(\n np.log(np.cosh(\n np.arcsinh(y_float128) / tailweight - skewness) / np.sqrt(\n y_float128**2 + 1)) -\n np.log(tailweight),\n bijector.inverse_log_det_jacobian(y, event_ndims=0).eval(),\n rtol=1e-4,\n atol=0.)\n self.assertAllClose(\n -bijector.inverse_log_det_jacobian(y, event_ndims=0).eval(),\n bijector.forward_log_det_jacobian(x, event_ndims=0).eval(),\n rtol=1e-4,\n atol=0.)\n\n def testZeroTailweightRaises(self):\n with self.test_session():\n with self.assertRaisesOpError(\"not positive\"):\n SinhArcsinh(tailweight=0., validate_args=True).forward(1.0).eval()\n\n def testDefaultDtypeIsFloat32(self):\n with self.test_session():\n bijector = SinhArcsinh()\n self.assertEqual(bijector.tailweight.dtype, np.float32)\n self.assertEqual(bijector.skewness.dtype, np.float32)\n\n\nif __name__ == \"__main__\":\n test.main()\n", "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the 'License');\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an 'AS IS' BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"A simple MNIST classifier which displays summaries in TensorBoard.\n\nThis is an unimpressive MNIST model, but it is a good example of using\ntf.name_scope to make a graph legible in the TensorBoard graph explorer, and of\nnaming summary tags so that they are grouped meaningfully in TensorBoard.\n\nIt demonstrates the functionality of every TensorBoard dashboard.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport os\nimport sys\n\nimport tensorflow as tf\n\nfrom tensorflow.examples.tutorials.mnist import input_data\n\nFLAGS = None\n\n\ndef train():\n # Import data\n mnist = input_data.read_data_sets(FLAGS.data_dir,\n fake_data=FLAGS.fake_data)\n\n sess = tf.InteractiveSession()\n # Create a multilayer model.\n\n # Input placeholders\n with tf.name_scope('input'):\n x = tf.placeholder(tf.float32, [None, 784], name='x-input')\n y_ = tf.placeholder(tf.int64, [None], name='y-input')\n\n with tf.name_scope('input_reshape'):\n image_shaped_input = tf.reshape(x, [-1, 28, 28, 1])\n tf.summary.image('input', image_shaped_input, 10)\n\n # We can't initialize these variables to 0 - the network will get stuck.\n def weight_variable(shape):\n \"\"\"Create a weight variable with appropriate initialization.\"\"\"\n initial = tf.truncated_normal(shape, stddev=0.1)\n return tf.Variable(initial)\n\n def bias_variable(shape):\n \"\"\"Create a bias variable with appropriate initialization.\"\"\"\n initial = tf.constant(0.1, shape=shape)\n return tf.Variable(initial)\n\n def variable_summaries(var):\n \"\"\"Attach a lot of summaries to a Tensor (for TensorBoard visualization).\"\"\"\n with tf.name_scope('summaries'):\n mean = tf.reduce_mean(var)\n tf.summary.scalar('mean', mean)\n with tf.name_scope('stddev'):\n stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))\n tf.summary.scalar('stddev', stddev)\n tf.summary.scalar('max', tf.reduce_max(var))\n tf.summary.scalar('min', tf.reduce_min(var))\n tf.summary.histogram('histogram', var)\n\n def nn_layer(input_tensor, input_dim, output_dim, layer_name, act=tf.nn.relu):\n \"\"\"Reusable code for making a simple neural net layer.\n\n It does a matrix multiply, bias add, and then uses ReLU to nonlinearize.\n It also sets up name scoping so that the resultant graph is easy to read,\n and adds a number of summary ops.\n \"\"\"\n # Adding a name scope ensures logical grouping of the layers in the graph.\n with tf.name_scope(layer_name):\n # This Variable will hold the state of the weights for the layer\n with tf.name_scope('weights'):\n weights = weight_variable([input_dim, output_dim])\n variable_summaries(weights)\n with tf.name_scope('biases'):\n biases = bias_variable([output_dim])\n variable_summaries(biases)\n with tf.name_scope('Wx_plus_b'):\n preactivate = tf.matmul(input_tensor, weights) + biases\n tf.summary.histogram('pre_activations', preactivate)\n activations = act(preactivate, name='activation')\n tf.summary.histogram('activations', activations)\n return activations\n\n hidden1 = nn_layer(x, 784, 500, 'layer1')\n\n with tf.name_scope('dropout'):\n keep_prob = tf.placeholder(tf.float32)\n tf.summary.scalar('dropout_keep_probability', keep_prob)\n dropped = tf.nn.dropout(hidden1, keep_prob)\n\n # Do not apply softmax activation yet, see below.\n y = nn_layer(dropped, 500, 10, 'layer2', act=tf.identity)\n\n with tf.name_scope('cross_entropy'):\n # The raw formulation of cross-entropy,\n #\n # tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(tf.softmax(y)),\n # reduction_indices=[1]))\n #\n # can be numerically unstable.\n #\n # So here we use tf.losses.sparse_softmax_cross_entropy on the\n # raw logit outputs of the nn_layer above, and then average across\n # the batch.\n with tf.name_scope('total'):\n cross_entropy = tf.losses.sparse_softmax_cross_entropy(\n labels=y_, logits=y)\n tf.summary.scalar('cross_entropy', cross_entropy)\n\n with tf.name_scope('train'):\n train_step = tf.train.AdamOptimizer(FLAGS.learning_rate).minimize(\n cross_entropy)\n\n with tf.name_scope('accuracy'):\n with tf.name_scope('correct_prediction'):\n correct_prediction = tf.equal(tf.argmax(y, 1), y_)\n with tf.name_scope('accuracy'):\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n tf.summary.scalar('accuracy', accuracy)\n\n # Merge all the summaries and write them out to\n # /tmp/tensorflow/mnist/logs/mnist_with_summaries (by default)\n merged = tf.summary.merge_all()\n train_writer = tf.summary.FileWriter(FLAGS.log_dir + '/train', sess.graph)\n test_writer = tf.summary.FileWriter(FLAGS.log_dir + '/test')\n tf.global_variables_initializer().run()\n\n # Train the model, and also write summaries.\n # Every 10th step, measure test-set accuracy, and write test summaries\n # All other steps, run train_step on training data, & add training summaries\n\n def feed_dict(train):\n \"\"\"Make a TensorFlow feed_dict: maps data onto Tensor placeholders.\"\"\"\n if train or FLAGS.fake_data:\n xs, ys = mnist.train.next_batch(100, fake_data=FLAGS.fake_data)\n k = FLAGS.dropout\n else:\n xs, ys = mnist.test.images, mnist.test.labels\n k = 1.0\n return {x: xs, y_: ys, keep_prob: k}\n\n for i in range(FLAGS.max_steps):\n if i % 10 == 0: # Record summaries and test-set accuracy\n summary, acc = sess.run([merged, accuracy], feed_dict=feed_dict(False))\n test_writer.add_summary(summary, i)\n print('Accuracy at step %s: %s' % (i, acc))\n else: # Record train set summaries, and train\n if i % 100 == 99: # Record execution stats\n run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)\n run_metadata = tf.RunMetadata()\n summary, _ = sess.run([merged, train_step],\n feed_dict=feed_dict(True),\n options=run_options,\n run_metadata=run_metadata)\n train_writer.add_run_metadata(run_metadata, 'step%03d' % i)\n train_writer.add_summary(summary, i)\n print('Adding run metadata for', i)\n else: # Record a summary\n summary, _ = sess.run([merged, train_step], feed_dict=feed_dict(True))\n train_writer.add_summary(summary, i)\n train_writer.close()\n test_writer.close()\n\n\ndef main(_):\n if tf.gfile.Exists(FLAGS.log_dir):\n tf.gfile.DeleteRecursively(FLAGS.log_dir)\n tf.gfile.MakeDirs(FLAGS.log_dir)\n train()\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--fake_data', nargs='?', const=True, type=bool,\n default=False,\n help='If true, uses fake data for unit testing.')\n parser.add_argument('--max_steps', type=int, default=1000,\n help='Number of steps to run trainer.')\n parser.add_argument('--learning_rate', type=float, default=0.001,\n help='Initial learning rate')\n parser.add_argument('--dropout', type=float, default=0.9,\n help='Keep probability for training dropout.')\n parser.add_argument(\n '--data_dir',\n type=str,\n default=os.path.join(os.getenv('TEST_TMPDIR', '/tmp'),\n 'tensorflow/mnist/input_data'),\n help='Directory for storing input data')\n parser.add_argument(\n '--log_dir',\n type=str,\n default=os.path.join(os.getenv('TEST_TMPDIR', '/tmp'),\n 'tensorflow/mnist/logs/mnist_with_summaries'),\n help='Summaries log directory')\n FLAGS, unparsed = parser.parse_known_args()\n tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)\n", "# -*- coding: utf-8 -*-\n# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests of EntropyBottleneck class.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.contrib.coder.python.layers import entropybottleneck\n\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.training import gradient_descent\n\n\nclass EntropyBottleneckTest(test.TestCase):\n\n def test_noise(self):\n # Tests that the noise added is uniform noise between -0.5 and 0.5.\n inputs = array_ops.placeholder(dtypes.float32, (None, 1))\n layer = entropybottleneck.EntropyBottleneck()\n noisy, _ = layer(inputs, training=True)\n with self.test_session() as sess:\n sess.run(variables.global_variables_initializer())\n values = np.linspace(-50, 50, 100)[:, None]\n noisy, = sess.run([noisy], {inputs: values})\n self.assertFalse(np.allclose(values, noisy, rtol=0, atol=.49))\n self.assertAllClose(values, noisy, rtol=0, atol=.5)\n\n def test_quantization(self):\n # Tests that inputs are quantized to full integer values, even after\n # quantiles have been updated.\n inputs = array_ops.placeholder(dtypes.float32, (None, 1))\n layer = entropybottleneck.EntropyBottleneck(optimize_integer_offset=False)\n quantized, _ = layer(inputs, training=False)\n opt = gradient_descent.GradientDescentOptimizer(learning_rate=1)\n self.assertTrue(len(layer.losses) == 1)\n step = opt.minimize(layer.losses[0])\n with self.test_session() as sess:\n sess.run(variables.global_variables_initializer())\n sess.run(step)\n values = np.linspace(-50, 50, 100)[:, None]\n quantized, = sess.run([quantized], {inputs: values})\n self.assertAllClose(np.around(values), quantized, rtol=0, atol=1e-6)\n\n def test_quantization_optimized_offset(self):\n # Tests that inputs are not quantized to full integer values after quantiles\n # have been updated. However, the difference between input and output should\n # be between -0.5 and 0.5, and the offset must be consistent.\n inputs = array_ops.placeholder(dtypes.float32, (None, 1))\n layer = entropybottleneck.EntropyBottleneck(optimize_integer_offset=True)\n quantized, _ = layer(inputs, training=False)\n opt = gradient_descent.GradientDescentOptimizer(learning_rate=1)\n self.assertTrue(len(layer.losses) == 1)\n step = opt.minimize(layer.losses[0])\n with self.test_session() as sess:\n sess.run(variables.global_variables_initializer())\n sess.run(step)\n values = np.linspace(-50, 50, 100)[:, None]\n quantized, = sess.run([quantized], {inputs: values})\n self.assertAllClose(values, quantized, rtol=0, atol=.5)\n diff = np.ravel(np.around(values) - quantized) % 1\n self.assertAllClose(diff, np.full_like(diff, diff[0]), rtol=0, atol=5e-6)\n self.assertNotEqual(diff[0], 0)\n\n def test_codec(self):\n # Tests that inputs are compressed and decompressed correctly, and quantized\n # to full integer values, even after quantiles have been updated.\n inputs = array_ops.placeholder(dtypes.float32, (1, None, 1))\n layer = entropybottleneck.EntropyBottleneck(\n data_format=\"channels_last\", init_scale=60,\n optimize_integer_offset=False)\n bitstrings = layer.compress(inputs)\n decoded = layer.decompress(bitstrings, array_ops.shape(inputs)[1:])\n opt = gradient_descent.GradientDescentOptimizer(learning_rate=1)\n self.assertTrue(len(layer.losses) == 1)\n step = opt.minimize(layer.losses[0])\n with self.test_session() as sess:\n sess.run(variables.global_variables_initializer())\n sess.run(step)\n self.assertTrue(len(layer.updates) == 1)\n sess.run(layer.updates[0])\n values = np.linspace(-50, 50, 100)[None, :, None]\n decoded, = sess.run([decoded], {inputs: values})\n self.assertAllClose(np.around(values), decoded, rtol=0, atol=1e-6)\n\n def test_codec_optimized_offset(self):\n # Tests that inputs are compressed and decompressed correctly, and not\n # quantized to full integer values after quantiles have been updated.\n # However, the difference between input and output should be between -0.5\n # and 0.5, and the offset must be consistent.\n inputs = array_ops.placeholder(dtypes.float32, (1, None, 1))\n layer = entropybottleneck.EntropyBottleneck(\n data_format=\"channels_last\", init_scale=60,\n optimize_integer_offset=True)\n bitstrings = layer.compress(inputs)\n decoded = layer.decompress(bitstrings, array_ops.shape(inputs)[1:])\n opt = gradient_descent.GradientDescentOptimizer(learning_rate=1)\n self.assertTrue(len(layer.losses) == 1)\n step = opt.minimize(layer.losses[0])\n with self.test_session() as sess:\n sess.run(variables.global_variables_initializer())\n sess.run(step)\n self.assertTrue(len(layer.updates) == 1)\n sess.run(layer.updates[0])\n values = np.linspace(-50, 50, 100)[None, :, None]\n decoded, = sess.run([decoded], {inputs: values})\n self.assertAllClose(values, decoded, rtol=0, atol=.5)\n diff = np.ravel(np.around(values) - decoded) % 1\n self.assertAllClose(diff, np.full_like(diff, diff[0]), rtol=0, atol=5e-6)\n self.assertNotEqual(diff[0], 0)\n\n def test_codec_clipping(self):\n # Tests that inputs are compressed and decompressed correctly, and clipped\n # to the expected range.\n inputs = array_ops.placeholder(dtypes.float32, (1, None, 1))\n layer = entropybottleneck.EntropyBottleneck(\n data_format=\"channels_last\", init_scale=40)\n bitstrings = layer.compress(inputs)\n decoded = layer.decompress(bitstrings, array_ops.shape(inputs)[1:])\n with self.test_session() as sess:\n sess.run(variables.global_variables_initializer())\n self.assertTrue(len(layer.updates) == 1)\n sess.run(layer.updates[0])\n values = np.linspace(-50, 50, 100)[None, :, None]\n decoded, = sess.run([decoded], {inputs: values})\n expected = np.clip(np.around(values), -40, 40)\n self.assertAllClose(expected, decoded, rtol=0, atol=1e-6)\n\n def test_channels_last(self):\n # Test the layer with more than one channel and multiple input dimensions,\n # with the channels in the last dimension.\n inputs = array_ops.placeholder(dtypes.float32, (None, None, None, 2))\n layer = entropybottleneck.EntropyBottleneck(\n data_format=\"channels_last\", init_scale=50)\n noisy, _ = layer(inputs, training=True)\n quantized, _ = layer(inputs, training=False)\n bitstrings = layer.compress(inputs)\n decoded = layer.decompress(bitstrings, array_ops.shape(inputs)[1:])\n with self.test_session() as sess:\n sess.run(variables.global_variables_initializer())\n self.assertTrue(len(layer.updates) == 1)\n sess.run(layer.updates[0])\n values = 5 * np.random.normal(size=(7, 5, 3, 2))\n noisy, quantized, decoded = sess.run(\n [noisy, quantized, decoded], {inputs: values})\n self.assertAllClose(values, noisy, rtol=0, atol=.5)\n self.assertAllClose(values, quantized, rtol=0, atol=.5)\n self.assertAllClose(values, decoded, rtol=0, atol=.5)\n\n def test_channels_first(self):\n # Test the layer with more than one channel and multiple input dimensions,\n # with the channel dimension right after the batch dimension.\n inputs = array_ops.placeholder(dtypes.float32, (None, 3, None, None))\n layer = entropybottleneck.EntropyBottleneck(\n data_format=\"channels_first\", init_scale=50)\n noisy, _ = layer(inputs, training=True)\n quantized, _ = layer(inputs, training=False)\n bitstrings = layer.compress(inputs)\n decoded = layer.decompress(bitstrings, array_ops.shape(inputs)[1:])\n with self.test_session() as sess:\n sess.run(variables.global_variables_initializer())\n self.assertTrue(len(layer.updates) == 1)\n sess.run(layer.updates[0])\n values = 5 * np.random.normal(size=(2, 3, 5, 7))\n noisy, quantized, decoded = sess.run(\n [noisy, quantized, decoded], {inputs: values})\n self.assertAllClose(values, noisy, rtol=0, atol=.5)\n self.assertAllClose(values, quantized, rtol=0, atol=.5)\n self.assertAllClose(values, decoded, rtol=0, atol=.5)\n\n def test_compress(self):\n # Test compression and decompression, and produce test data for\n # `test_decompress`. If you set the constant at the end to `True`, this test\n # will fail and the log will contain the new test data.\n inputs = array_ops.placeholder(dtypes.float32, (2, 3, 10))\n layer = entropybottleneck.EntropyBottleneck(\n data_format=\"channels_first\", filters=(), init_scale=2)\n bitstrings = layer.compress(inputs)\n decoded = layer.decompress(bitstrings, array_ops.shape(inputs)[1:])\n with self.test_session() as sess:\n sess.run(variables.global_variables_initializer())\n self.assertTrue(len(layer.updates) == 1)\n sess.run(layer.updates[0])\n values = 5 * np.random.uniform(size=(2, 3, 10)) - 2.5\n bitstrings, quantized_cdf, decoded = sess.run(\n [bitstrings, layer._quantized_cdf, decoded], {inputs: values})\n self.assertAllClose(values, decoded, rtol=0, atol=.5)\n # Set this constant to `True` to log new test data for `test_decompress`.\n if False: # pylint:disable=using-constant-test\n assert False, (bitstrings, quantized_cdf, decoded)\n\n # Data generated by `test_compress`.\n # pylint:disable=g-inconsistent-quotes,bad-whitespace\n bitstrings = np.array([\n b'\\x1e\\xbag}\\xc2\\xdaN\\x8b\\xbd.',\n b'\\x8dF\\xf0%\\x1cv\\xccllW'\n ], dtype=object)\n\n quantized_cdf = np.array([\n [ 0, 15636, 22324, 30145, 38278, 65536],\n [ 0, 19482, 26927, 35052, 42904, 65535],\n [ 0, 21093, 28769, 36919, 44578, 65536]\n ], dtype=np.int32)\n\n expected = np.array([\n [[-2., 1., 0., -2., -1., -2., -2., -2., 2., -1.],\n [ 1., 2., 1., 0., -2., -2., 1., 2., 0., 1.],\n [ 2., 0., -2., 2., 0., -1., -2., 0., 2., 0.]],\n [[ 1., 2., 0., -1., 1., 2., 1., 1., 2., -2.],\n [ 2., -1., -1., 0., -1., 2., 0., 2., -2., 2.],\n [ 2., -2., -2., -1., -2., 1., -2., 0., 0., 0.]]\n ], dtype=np.float32)\n # pylint:enable=g-inconsistent-quotes,bad-whitespace\n\n def test_decompress(self):\n # Test that decompression of values compressed with a previous version\n # works, i.e. that the file format doesn't change across revisions.\n bitstrings = array_ops.placeholder(dtypes.string)\n input_shape = array_ops.placeholder(dtypes.int32)\n quantized_cdf = array_ops.placeholder(dtypes.int32)\n layer = entropybottleneck.EntropyBottleneck(\n data_format=\"channels_first\", filters=(), dtype=dtypes.float32)\n layer.build(self.expected.shape)\n layer._quantized_cdf = quantized_cdf\n decoded = layer.decompress(bitstrings, input_shape[1:])\n with self.test_session() as sess:\n sess.run(variables.global_variables_initializer())\n decoded, = sess.run([decoded], {\n bitstrings: self.bitstrings, input_shape: self.expected.shape,\n quantized_cdf: self.quantized_cdf})\n self.assertAllClose(self.expected, decoded, rtol=0, atol=1e-6)\n\n def test_build_decompress(self):\n # Test that layer can be built when `decompress` is the first call to it.\n bitstrings = array_ops.placeholder(dtypes.string)\n input_shape = array_ops.placeholder(dtypes.int32, shape=[3])\n layer = entropybottleneck.EntropyBottleneck(dtype=dtypes.float32)\n layer.decompress(bitstrings, input_shape[1:], channels=5)\n self.assertTrue(layer.built)\n\n def test_pmf_normalization(self):\n # Test that probability mass functions are normalized correctly.\n layer = entropybottleneck.EntropyBottleneck(dtype=dtypes.float32)\n layer.build((None, 10))\n with self.test_session() as sess:\n sess.run(variables.global_variables_initializer())\n pmf, = sess.run([layer._pmf])\n self.assertAllClose(np.ones(10), np.sum(pmf, axis=-1), rtol=0, atol=1e-6)\n\n def test_visualize(self):\n # Test that summary op can be constructed.\n layer = entropybottleneck.EntropyBottleneck(dtype=dtypes.float32)\n layer.build((None, 10))\n summary = layer.visualize()\n with self.test_session() as sess:\n sess.run(variables.global_variables_initializer())\n sess.run([summary])\n\n def test_normalization(self):\n # Test that densities are normalized correctly.\n inputs = array_ops.placeholder(dtypes.float32, (None, 1))\n layer = entropybottleneck.EntropyBottleneck(filters=(2,))\n _, likelihood = layer(inputs, training=True)\n with self.test_session() as sess:\n sess.run(variables.global_variables_initializer())\n x = np.repeat(np.arange(-200, 201), 1000)[:, None]\n likelihood, = sess.run([likelihood], {inputs: x})\n self.assertEqual(x.shape, likelihood.shape)\n integral = np.sum(likelihood) * .001\n self.assertAllClose(1, integral, rtol=0, atol=1e-4)\n\n def test_entropy_estimates(self):\n # Test that entropy estimates match actual range coding.\n inputs = array_ops.placeholder(dtypes.float32, (1, None, 1))\n layer = entropybottleneck.EntropyBottleneck(\n filters=(2, 3), data_format=\"channels_last\")\n _, likelihood = layer(inputs, training=True)\n diff_entropy = math_ops.reduce_sum(math_ops.log(likelihood)) / -np.log(2)\n _, likelihood = layer(inputs, training=False)\n disc_entropy = math_ops.reduce_sum(math_ops.log(likelihood)) / -np.log(2)\n bitstrings = layer.compress(inputs)\n with self.test_session() as sess:\n sess.run(variables.global_variables_initializer())\n self.assertTrue(len(layer.updates) == 1)\n sess.run(layer.updates[0])\n diff_entropy, disc_entropy, bitstrings = sess.run(\n [diff_entropy, disc_entropy, bitstrings],\n {inputs: np.random.normal(size=(1, 10000, 1))})\n codelength = 8 * sum(len(bitstring) for bitstring in bitstrings)\n self.assertAllClose(diff_entropy, disc_entropy, rtol=5e-3, atol=0)\n self.assertAllClose(disc_entropy, codelength, rtol=5e-3, atol=0)\n self.assertGreater(codelength, disc_entropy)\n\n\nif __name__ == \"__main__\":\n test.main()\n", "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"High level API for learning (DEPRECATED).\n\nThis module and all its submodules are deprecated. See\n[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)\nfor migration instructions.\n\nSee the @{$python/contrib.learn} guide.\n\n@@BaseEstimator\n@@Estimator\n@@Trainable\n@@Evaluable\n@@KMeansClustering\n@@ModeKeys\n@@ModelFnOps\n@@MetricSpec\n@@PredictionKey\n@@DNNClassifier\n@@DNNEstimator\n@@DNNRegressor\n@@DNNLinearCombinedRegressor\n@@DNNLinearCombinedEstimator\n@@DNNLinearCombinedClassifier\n@@DynamicRnnEstimator\n@@LinearClassifier\n@@LinearEstimator\n@@LinearRegressor\n@@LogisticRegressor\n@@StateSavingRnnEstimator\n@@SVM\n@@SKCompat\n\n@@Head\n@@multi_class_head\n@@multi_label_head\n@@binary_svm_head\n@@regression_head\n@@poisson_regression_head\n@@multi_head\n@@no_op_train_fn\n\n@@Experiment\n@@ExportStrategy\n@@TaskType\n\n@@NanLossDuringTrainingError\n@@RunConfig\n@@evaluate\n@@infer\n@@run_feeds\n@@run_n\n@@train\n\n@@extract_dask_data\n@@extract_dask_labels\n@@extract_pandas_data\n@@extract_pandas_labels\n@@extract_pandas_matrix\n@@infer_real_valued_columns_from_input\n@@infer_real_valued_columns_from_input_fn\n@@read_batch_examples\n@@read_batch_features\n@@read_batch_record_features\n@@read_keyed_batch_examples\n@@read_keyed_batch_examples_shared_queue\n@@read_keyed_batch_features\n@@read_keyed_batch_features_shared_queue\n\n@@InputFnOps\n@@ProblemType\n@@build_parsing_serving_input_fn\n@@make_export_strategy\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# pylint: disable=wildcard-import\nfrom tensorflow.contrib.learn.python.learn import *\n# pylint: enable=wildcard-import\n\nfrom tensorflow.contrib.learn.python.learn import learn_runner_lib as learn_runner\n\nfrom tensorflow.python.util.all_util import remove_undocumented\n\n_allowed_symbols = ['datasets', 'head', 'io', 'learn_runner', 'models',\n 'monitors', 'NotFittedError', 'ops', 'preprocessing',\n 'utils', 'graph_actions']\n\nremove_undocumented(__name__, _allowed_symbols)\n", "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =============================================================================\n\"\"\"Exposes the Python wrapper conversion to trt_graph.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# pylint: disable=unused-import,line-too-long\nimport six as _six\nfrom tensorflow.contrib.tensorrt.wrap_conversion import calib_convert\nfrom tensorflow.contrib.tensorrt.wrap_conversion import get_linked_tensorrt_version\nfrom tensorflow.contrib.tensorrt.wrap_conversion import get_loaded_tensorrt_version\nfrom tensorflow.contrib.tensorrt.wrap_conversion import trt_convert\nfrom tensorflow.core.framework import graph_pb2\nfrom tensorflow.core.protobuf import rewriter_config_pb2\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import errors_impl as _impl\nfrom tensorflow.python.framework import meta_graph\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.grappler import tf_optimizer\nfrom tensorflow.python.platform import tf_logging\nfrom tensorflow.python.util import compat\n\n# pylint: enable=unused-import,line-too-long\n\n\n# TODO(skama): get outputs from session when implemented as c++\n# optimization pass\ndef create_inference_graph(input_graph_def,\n outputs,\n max_batch_size=1,\n max_workspace_size_bytes=2 << 20,\n precision_mode=\"FP32\",\n minimum_segment_size=3,\n is_dynamic_op=False,\n maximum_cached_engines=1,\n cached_engine_batches=[]):\n \"\"\"Python wrapper for the TRT transformation.\n\n Args:\n input_graph_def: GraphDef object containing a model to be transformed.\n outputs: list of tensors or node names for the model outputs.\n max_batch_size: max size for the input batch\n max_workspace_size_bytes: parameter to control memory allocation (in Bytes)\n precision_mode: one of 'FP32', 'FP16' and 'INT8'\n minimum_segment_size: the minimum number of nodes required for a subgraph to\n be replaced by TRTEngineOp.\n is_dynamic_op: whether to generate dynamic TRT ops which will build the TRT\n network and engine at run time.\n maximum_cached_engines: max number of cached TRT engines in dynamic TRT ops.\n cached_engine_batches: batch sizes used to pre-create cached engines.\n\n Returns:\n New GraphDef with TRTEngineOps placed in graph replacing subgraphs.\n\n Raises:\n ValueError: if the provided precision mode is invalid.\n RuntimeError: if the returned status message is malformed.\n \"\"\"\n supported_precision_modes = {\"FP32\": 0, \"FP16\": 1, \"INT8\": 2}\n if precision_mode.upper() not in supported_precision_modes:\n raise ValueError((\"precision mode '{}' is not supported.\"\n \"It should be one of {}\").format(\n precision_mode, \"{'FP32', 'FP16', 'INT8'}\"))\n mode = supported_precision_modes[precision_mode.upper()]\n compiled_version = get_linked_tensorrt_version()\n loaded_version = get_loaded_tensorrt_version()\n version_mismatch = False\n if loaded_version[0] < compiled_version[0]:\n tf_logging.error(\n \"TensorRT version mismatch. Tensorflow was compiled against \" +\n \"TensorRT %s but library loaded from environment is TensorRT %s\" %\n (\".\".join([str(x) for x in compiled_version]),\n \".\".join([str(x) for x in loaded_version])) +\n \". Please make sure that correct version of TensorRT \" +\n \"is available in the system and added to ldconfig or LD_LIBRARY_PATH\"\n )\n raise RuntimeError(\"Incompatible TensorRT library version\")\n for i in zip(loaded_version, compiled_version):\n if i[0] != i[1]:\n tf_logging.warn(\"TensorRT mismatch. Compiled against version \" +\n \"%s, but loaded %s. Things may not work\" %\n (\".\".join([str(x) for x in compiled_version]),\n \".\".join([str(x) for x in loaded_version])))\n version_mismatch = True\n break\n if not version_mismatch:\n tf_logging.info(\"Running against TensorRT version %s\" % \".\".join(\n [str(x) for x in loaded_version]))\n\n def py2bytes(inp):\n return inp\n\n def py3bytes(inp):\n return inp.encode(\"utf-8\", errors=\"surrogateescape\")\n\n def py2string(inp):\n return inp\n\n def py3string(inp):\n return inp.decode(\"utf-8\")\n\n if _six.PY2:\n to_bytes = py2bytes\n to_string = py2string\n else:\n to_bytes = py3bytes\n to_string = py3string\n\n out_names = []\n for i in outputs:\n if isinstance(i, ops.Tensor):\n out_names.append(to_bytes(i.name))\n else:\n out_names.append(to_bytes(i))\n\n input_graph_def_str = input_graph_def.SerializeToString()\n\n # TODO(sami): Fix this when we can return status from C++ library\n # There is a problem with the TF internal library setup that doesn't\n # allow us to return a status object from C++. Thus we return a\n # pair or strings where first one is encoded status and the second\n # one is the transformed graphs protobuf string.\n out = trt_convert(input_graph_def_str, out_names, max_batch_size,\n max_workspace_size_bytes, mode, minimum_segment_size,\n is_dynamic_op, maximum_cached_engines,\n cached_engine_batches)\n status = to_string(out[0])\n output_graph_def_string = out[1]\n del input_graph_def_str # Save some memory\n if len(status) < 2:\n raise _impl.UnknownError(None, None, status)\n if status[:2] != \"OK\":\n msg = status.split(\";\")\n if len(msg) == 1:\n raise RuntimeError(\"Status message is malformed {}\".format(status))\n # pylint: disable=protected-access\n raise _impl._make_specific_exception(None, None, \";\".join(msg[1:]),\n int(msg[0]))\n # pylint: enable=protected-access\n output_graph_def = graph_pb2.GraphDef()\n output_graph_def.ParseFromString(output_graph_def_string)\n del output_graph_def_string # Save some memory\n return output_graph_def\n\n\ndef calib_graph_to_infer_graph(calibration_graph_def, is_dynamic_op=False):\n \"\"\"Convert an existing calibration graph to inference graph.\n\n Args:\n calibration_graph_def: the calibration GraphDef object with calibration data\n is_dynamic_op: whether to create dynamic static engines from calibration\n Returns:\n New GraphDef with TRTEngineOps placed in graph replacing calibration nodes.\n Raises:\n RuntimeError: if the returned status message is malformed.\n \"\"\"\n\n def py2string(inp):\n return inp\n\n def py3string(inp):\n return inp.decode(\"utf-8\")\n\n if _six.PY2:\n to_string = py2string\n else:\n to_string = py3string\n is_calib_graph = False\n for n in calibration_graph_def.node:\n if n.op == \"TRTEngineOp\":\n is_calib_graph = is_calib_graph or not n.attr[\"calibration_data\"].s\n if not is_calib_graph:\n tf_logging.error(\n \"Not a calib graph. Doesn't seem to contain any calibration nodes.\")\n return None\n graph_str = calibration_graph_def.SerializeToString()\n out = calib_convert(graph_str, is_dynamic_op)\n status = to_string(out[0])\n output_graph_def_string = out[1]\n del graph_str # Save some memory\n if len(status) < 2:\n raise _impl.UnknownError(None, None, status)\n if status[:2] != \"OK\":\n msg = status.split(\";\")\n if len(msg) == 1:\n raise RuntimeError(\"Status message is malformed {}\".format(status))\n # pylint: disable=protected-access\n raise _impl._make_specific_exception(None, None, \";\".join(msg[1:]),\n int(msg[0]))\n # pylint: enable=protected-access\n output_graph_def = graph_pb2.GraphDef()\n output_graph_def.ParseFromString(output_graph_def_string)\n del output_graph_def_string # Save some memory\n return output_graph_def\n", "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Monte Carlo integration and helpers.\n\nSee the @{$python/contrib.bayesflow.monte_carlo} guide.\n\n@@expectation\n@@expectation_importance_sampler\n@@expectation_importance_sampler_logspace\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import nn\n\n__all__ = [\n 'expectation',\n 'expectation_importance_sampler',\n 'expectation_importance_sampler_logspace',\n]\n\n\ndef expectation_importance_sampler(f,\n log_p,\n sampling_dist_q,\n z=None,\n n=None,\n seed=None,\n name='expectation_importance_sampler'):\n r\"\"\"Monte Carlo estimate of \\\\(E_p[f(Z)] = E_q[f(Z) p(Z) / q(Z)]\\\\).\n\n With \\\\(p(z) := exp^{log_p(z)}\\\\), this `Op` returns\n\n \\\\(n^{-1} sum_{i=1}^n [ f(z_i) p(z_i) / q(z_i) ], z_i ~ q,\\\\)\n \\\\(\\approx E_q[ f(Z) p(Z) / q(Z) ]\\\\)\n \\\\(= E_p[f(Z)]\\\\)\n\n This integral is done in log-space with max-subtraction to better handle the\n often extreme values that `f(z) p(z) / q(z)` can take on.\n\n If `f >= 0`, it is up to 2x more efficient to exponentiate the result of\n `expectation_importance_sampler_logspace` applied to `Log[f]`.\n\n User supplies either `Tensor` of samples `z`, or number of samples to draw `n`\n\n Args:\n f: Callable mapping samples from `sampling_dist_q` to `Tensors` with shape\n broadcastable to `q.batch_shape`.\n For example, `f` works \"just like\" `q.log_prob`.\n log_p: Callable mapping samples from `sampling_dist_q` to `Tensors` with\n shape broadcastable to `q.batch_shape`.\n For example, `log_p` works \"just like\" `sampling_dist_q.log_prob`.\n sampling_dist_q: The sampling distribution.\n `tf.contrib.distributions.Distribution`.\n `float64` `dtype` recommended.\n `log_p` and `q` should be supported on the same set.\n z: `Tensor` of samples from `q`, produced by `q.sample` for some `n`.\n n: Integer `Tensor`. Number of samples to generate if `z` is not provided.\n seed: Python integer to seed the random number generator.\n name: A name to give this `Op`.\n\n Returns:\n The importance sampling estimate. `Tensor` with `shape` equal\n to batch shape of `q`, and `dtype` = `q.dtype`.\n \"\"\"\n q = sampling_dist_q\n with ops.name_scope(name, values=[z, n]):\n z = _get_samples(q, z, n, seed)\n\n log_p_z = log_p(z)\n q_log_prob_z = q.log_prob(z)\n\n def _importance_sampler_positive_f(log_f_z):\n # Same as expectation_importance_sampler_logspace, but using Tensors\n # rather than samples and functions. Allows us to sample once.\n log_values = log_f_z + log_p_z - q_log_prob_z\n return _logspace_mean(log_values)\n\n # With \\\\(f_{plus}(z) = max(0, f(z)), f_{minus}(z) = max(0, -f(z))\\\\),\n # \\\\(E_p[f(Z)] = E_p[f_{plus}(Z)] - E_p[f_{minus}(Z)]\\\\)\n # \\\\( = E_p[f_{plus}(Z) + 1] - E_p[f_{minus}(Z) + 1]\\\\)\n # Without incurring bias, 1 is added to each to prevent zeros in logspace.\n # The logarithm is approximately linear around 1 + epsilon, so this is good\n # for small values of 'z' as well.\n f_z = f(z)\n log_f_plus_z = math_ops.log(nn.relu(f_z) + 1.)\n log_f_minus_z = math_ops.log(nn.relu(-1. * f_z) + 1.)\n\n log_f_plus_integral = _importance_sampler_positive_f(log_f_plus_z)\n log_f_minus_integral = _importance_sampler_positive_f(log_f_minus_z)\n\n return math_ops.exp(log_f_plus_integral) - math_ops.exp(log_f_minus_integral)\n\n\ndef expectation_importance_sampler_logspace(\n log_f,\n log_p,\n sampling_dist_q,\n z=None,\n n=None,\n seed=None,\n name='expectation_importance_sampler_logspace'):\n r\"\"\"Importance sampling with a positive function, in log-space.\n\n With \\\\(p(z) := exp^{log_p(z)}\\\\), and \\\\(f(z) = exp{log_f(z)}\\\\),\n this `Op` returns\n\n \\\\(Log[ n^{-1} sum_{i=1}^n [ f(z_i) p(z_i) / q(z_i) ] ], z_i ~ q,\\\\)\n \\\\(\\approx Log[ E_q[ f(Z) p(Z) / q(Z) ] ]\\\\)\n \\\\(= Log[E_p[f(Z)]]\\\\)\n\n This integral is done in log-space with max-subtraction to better handle the\n often extreme values that `f(z) p(z) / q(z)` can take on.\n\n In contrast to `expectation_importance_sampler`, this `Op` returns values in\n log-space.\n\n\n User supplies either `Tensor` of samples `z`, or number of samples to draw `n`\n\n Args:\n log_f: Callable mapping samples from `sampling_dist_q` to `Tensors` with\n shape broadcastable to `q.batch_shape`.\n For example, `log_f` works \"just like\" `sampling_dist_q.log_prob`.\n log_p: Callable mapping samples from `sampling_dist_q` to `Tensors` with\n shape broadcastable to `q.batch_shape`.\n For example, `log_p` works \"just like\" `q.log_prob`.\n sampling_dist_q: The sampling distribution.\n `tf.contrib.distributions.Distribution`.\n `float64` `dtype` recommended.\n `log_p` and `q` should be supported on the same set.\n z: `Tensor` of samples from `q`, produced by `q.sample` for some `n`.\n n: Integer `Tensor`. Number of samples to generate if `z` is not provided.\n seed: Python integer to seed the random number generator.\n name: A name to give this `Op`.\n\n Returns:\n Logarithm of the importance sampling estimate. `Tensor` with `shape` equal\n to batch shape of `q`, and `dtype` = `q.dtype`.\n \"\"\"\n q = sampling_dist_q\n with ops.name_scope(name, values=[z, n]):\n z = _get_samples(q, z, n, seed)\n log_values = log_f(z) + log_p(z) - q.log_prob(z)\n return _logspace_mean(log_values)\n\n\ndef _logspace_mean(log_values):\n \"\"\"Evaluate `Log[E[values]]` in a stable manner.\n\n Args:\n log_values: `Tensor` holding `Log[values]`.\n\n Returns:\n `Tensor` of same `dtype` as `log_values`, reduced across dim 0.\n `Log[Mean[values]]`.\n \"\"\"\n # center = Max[Log[values]], with stop-gradient\n # The center hopefully keep the exponentiated term small. It is canceled\n # from the final result, so putting stop gradient on it will not change the\n # final result. We put stop gradient on to eliminate unnecessary computation.\n center = array_ops.stop_gradient(_sample_max(log_values))\n\n # centered_values = exp{Log[values] - E[Log[values]]}\n centered_values = math_ops.exp(log_values - center)\n\n # log_mean_of_values = Log[ E[centered_values] ] + center\n # = Log[ E[exp{log_values - E[log_values]}] ] + center\n # = Log[E[values]] - E[log_values] + center\n # = Log[E[values]]\n log_mean_of_values = math_ops.log(_sample_mean(centered_values)) + center\n\n return log_mean_of_values\n\n\ndef expectation(f, samples, log_prob=None, use_reparametrization=True,\n axis=0, keep_dims=False, name=None):\n \"\"\"Computes the Monte-Carlo approximation of \\\\(E_p[f(X)]\\\\).\n\n This function computes the Monte-Carlo approximation of an expectation, i.e.,\n\n \\\\(E_p[f(X)] \\approx= m^{-1} sum_i^m f(x_j), x_j\\ ~iid\\ p(X)\\\\)\n\n where:\n\n - `x_j = samples[j, ...]`,\n - `log(p(samples)) = log_prob(samples)` and\n - `m = prod(shape(samples)[axis])`.\n\n Tricks: Reparameterization and Score-Gradient\n\n When p is \"reparameterized\", i.e., a diffeomorphic transformation of a\n parameterless distribution (e.g.,\n `Normal(Y; m, s) <=> Y = sX + m, X ~ Normal(0,1)`), we can swap gradient and\n expectation, i.e.,\n grad[ Avg{ \\\\(s_i : i=1...n\\\\) } ] = Avg{ grad[\\\\(s_i\\\\)] : i=1...n } where\n S_n = Avg{\\\\(s_i\\\\)}` and `\\\\(s_i = f(x_i), x_i ~ p\\\\).\n\n However, if p is not reparameterized, TensorFlow's gradient will be incorrect\n since the chain-rule stops at samples of non-reparameterized distributions.\n (The non-differentiated result, `approx_expectation`, is the same regardless\n of `use_reparametrization`.) In this circumstance using the Score-Gradient\n trick results in an unbiased gradient, i.e.,\n\n ```none\n grad[ E_p[f(X)] ]\n = grad[ int dx p(x) f(x) ]\n = int dx grad[ p(x) f(x) ]\n = int dx [ p'(x) f(x) + p(x) f'(x) ]\n = int dx p(x) [p'(x) / p(x) f(x) + f'(x) ]\n = int dx p(x) grad[ f(x) p(x) / stop_grad[p(x)] ]\n = E_p[ grad[ f(x) p(x) / stop_grad[p(x)] ] ]\n ```\n\n Unless p is not reparametrized, it is usually preferable to\n `use_reparametrization = True`.\n\n Warning: users are responsible for verifying `p` is a \"reparameterized\"\n distribution.\n\n Example Use:\n\n ```python\n bf = tf.contrib.bayesflow\n ds = tf.contrib.distributions\n\n # Monte-Carlo approximation of a reparameterized distribution, e.g., Normal.\n\n num_draws = int(1e5)\n p = ds.Normal(loc=0., scale=1.)\n q = ds.Normal(loc=1., scale=2.)\n exact_kl_normal_normal = ds.kl_divergence(p, q)\n # ==> 0.44314718\n approx_kl_normal_normal = bf.expectation(\n f=lambda x: p.log_prob(x) - q.log_prob(x),\n samples=p.sample(num_draws, seed=42),\n log_prob=p.log_prob,\n use_reparametrization=(p.reparameterization_type\n == distribution.FULLY_REPARAMETERIZED))\n # ==> 0.44632751\n # Relative Error: <1%\n\n # Monte-Carlo approximation of non-reparameterized distribution, e.g., Gamma.\n\n num_draws = int(1e5)\n p = ds.Gamma(concentration=1., rate=1.)\n q = ds.Gamma(concentration=2., rate=3.)\n exact_kl_gamma_gamma = ds.kl_divergence(p, q)\n # ==> 0.37999129\n approx_kl_gamma_gamma = bf.expectation(\n f=lambda x: p.log_prob(x) - q.log_prob(x),\n samples=p.sample(num_draws, seed=42),\n log_prob=p.log_prob,\n use_reparametrization=(p.reparameterization_type\n == distribution.FULLY_REPARAMETERIZED))\n # ==> 0.37696719\n # Relative Error: <1%\n\n # For comparing the gradients, see `monte_carlo_test.py`.\n ```\n\n Note: The above example is for illustration only. To compute approximate\n KL-divergence, the following is preferred:\n\n ```python\n approx_kl_p_q = bf.monte_carlo_csiszar_f_divergence(\n f=bf.kl_reverse,\n p_log_prob=q.log_prob,\n q=p,\n num_draws=num_draws)\n ```\n\n Args:\n f: Python callable which can return `f(samples)`.\n samples: `Tensor` of samples used to form the Monte-Carlo approximation of\n \\\\(E_p[f(X)]\\\\). A batch of samples should be indexed by `axis`\n dimensions.\n log_prob: Python callable which can return `log_prob(samples)`. Must\n correspond to the natural-logarithm of the pdf/pmf of each sample. Only\n required/used if `use_reparametrization=False`.\n Default value: `None`.\n use_reparametrization: Python `bool` indicating that the approximation\n should use the fact that the gradient of samples is unbiased. Whether\n `True` or `False`, this arg only affects the gradient of the resulting\n `approx_expectation`.\n Default value: `True`.\n axis: The dimensions to average. If `None`, averages all\n dimensions.\n Default value: `0` (the left-most dimension).\n keep_dims: If True, retains averaged dimensions using size `1`.\n Default value: `False`.\n name: A `name_scope` for operations created by this function.\n Default value: `None` (which implies \"expectation\").\n\n Returns:\n approx_expectation: `Tensor` corresponding to the Monte-Carlo approximation\n of \\\\(E_p[f(X)]\\\\).\n\n Raises:\n ValueError: if `f` is not a Python `callable`.\n ValueError: if `use_reparametrization=False` and `log_prob` is not a Python\n `callable`.\n \"\"\"\n\n with ops.name_scope(name, 'expectation', [samples]):\n if not callable(f):\n raise ValueError('`f` must be a callable function.')\n if use_reparametrization:\n return math_ops.reduce_mean(f(samples), axis=axis, keepdims=keep_dims)\n else:\n if not callable(log_prob):\n raise ValueError('`log_prob` must be a callable function.')\n stop = array_ops.stop_gradient # For readability.\n x = stop(samples)\n logpx = log_prob(x)\n fx = f(x) # Call `f` once in case it has side-effects.\n # We now rewrite f(x) so that:\n # `grad[f(x)] := grad[f(x)] + f(x) * grad[logqx]`.\n # To achieve this, we use a trick that\n # `h(x) - stop(h(x)) == zeros_like(h(x))`\n # but its gradient is grad[h(x)].\n # Note that IEEE754 specifies that `x - x == 0.` and `x + 0. == x`, hence\n # this trick loses no precision. For more discussion regarding the\n # relevant portions of the IEEE754 standard, see the StackOverflow\n # question,\n # \"Is there a floating point value of x, for which x-x == 0 is false?\"\n # http://stackoverflow.com/q/2686644\n fx += stop(fx) * (logpx - stop(logpx)) # Add zeros_like(logpx).\n return math_ops.reduce_mean(fx, axis=axis, keepdims=keep_dims)\n\n\ndef _sample_mean(values):\n \"\"\"Mean over sample indices. In this module this is always [0].\"\"\"\n return math_ops.reduce_mean(values, reduction_indices=[0])\n\n\ndef _sample_max(values):\n \"\"\"Max over sample indices. In this module this is always [0].\"\"\"\n return math_ops.reduce_max(values, reduction_indices=[0])\n\n\ndef _get_samples(dist, z, n, seed):\n \"\"\"Check args and return samples.\"\"\"\n with ops.name_scope('get_samples', values=[z, n]):\n if (n is None) == (z is None):\n raise ValueError(\n 'Must specify exactly one of arguments \"n\" and \"z\". Found: '\n 'n = %s, z = %s' % (n, z))\n if n is not None:\n return dist.sample(n, seed=seed)\n else:\n return ops.convert_to_tensor(z, name='z')\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for multinomial generation ops in the XLA JIT compiler.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\n\nimport numpy as np\n\nfrom tensorflow.compiler.tests.xla_test import XLATestCase\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import random_seed\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import random_ops\nfrom tensorflow.python.platform import googletest\n\n\n# TODO(srvasude): Merge this with\n# third_party/tensorflow/python/kernel_tests/random/multinomial_op_test.py.\nclass CategoricalTest(XLATestCase):\n \"\"\"Test cases for random-number generating operators.\"\"\"\n\n def output_dtypes(self):\n return set(self.int_types).intersection([np.int32, np.int64])\n\n def _chi2(self, expected, actual):\n \"\"\"Returns Chi2 GOF statistic.\"\"\"\n actual = np.asarray(actual)\n expected = np.asarray(expected)\n diff = actual - expected\n chi2 = np.sum(diff * diff / expected)\n return chi2\n\n def _do_sampling(self, logits, num_samples):\n \"\"\"Categorical samples from given input.\n\n Args:\n logits: Numpy ndarray of shape [batch_size, num_classes].\n num_samples: Int; number of samples to draw.\n\n Returns:\n Frequencies from sampled classes; shape [batch_size, num_classes].\n \"\"\"\n with self.test_session() as sess, self.test_scope():\n random_seed.set_random_seed(1618)\n op = random_ops.multinomial(logits, num_samples,\n output_dtype=dtypes.int32)\n d = sess.run(op)\n\n batch_size, num_classes = logits.shape\n freqs_mat = []\n for i in range(batch_size):\n cnts = dict(collections.Counter(d[i, :]))\n\n # Requires drawn class labels be in range.\n self.assertLess(max(cnts.keys()), num_classes)\n self.assertGreaterEqual(min(cnts.keys()), 0)\n\n freqs = [(cnts[k] * 1. / num_samples if k in cnts else 0)\n for k in range(num_classes)]\n freqs_mat.append(freqs)\n\n return freqs_mat\n\n def _testRngIsNotConstant(self, rng, dtype, output_dtype):\n # Tests that 'rng' does not always return the same value.\n with self.test_session() as sess:\n with self.test_scope():\n x = rng(dtype, output_dtype)\n\n # The random-number generator, if working correctly, should produce the\n # same output multiple times with low probability.\n y = sess.run(x)\n z = sess.run(x)\n w = sess.run(x)\n\n # We use exact equality here. If the random-number generator is producing\n # deterministic output, all three outputs will be bitwise identical.\n self.assertTrue((not np.array_equal(y, z)) or\n (not np.array_equal(z, w)) or\n (not np.array_equal(y, w)))\n\n def testCategoricalIsNotConstant(self):\n def rng(dtype, output_dtype):\n return random_ops.multinomial(np.array([[1., 1., 1.]], dtype=dtype), 10,\n output_dtype=output_dtype)\n\n dtype = np.float32\n for output_dtype in self.output_dtypes():\n self._testRngIsNotConstant(rng, dtype, output_dtype)\n\n def testCategoricalIsInRange(self):\n for dtype in self.float_types:\n for output_dtype in self.output_dtypes():\n with self.test_session() as sess:\n with self.test_scope():\n x = random_ops.multinomial(\n array_ops.ones(shape=[1, 20], dtype=dtype), 1000,\n output_dtype=output_dtype)\n y = sess.run(x)\n self.assertTrue((y >= 0).sum() == 1000)\n self.assertTrue((y < 20).sum() == 1000)\n\n def testSamplingCorrectness(self):\n np.random.seed(1618) # Make it reproducible.\n num_samples = 21000\n\n rand_probs = np.random.dirichlet([1., 1., 2., 3.])\n rand_probs2 = np.random.dirichlet([1., 4., 5.], size=3) # batched\n for probs in [[.5, .5], [.85, .05, .1], rand_probs, rand_probs2]:\n probs = np.asarray(probs)\n if len(probs.shape) == 1:\n probs = probs.reshape(1, probs.size) # singleton batch\n\n logits = np.log(probs).astype(np.float32)\n freqs = self._do_sampling(logits, num_samples)\n\n # the test here is similar to\n # python/kernel_tests/random/multinomial_op_test.py\n # Note that df >= 1 in all these cases. Choosing a cutoff of 1e-3\n # corresponds to an alpha value of 2.5% for df = 1, and smaller for larger\n # df.\n chi2 = self._chi2(probs, freqs)\n self.assertLess(chi2, 1e-3)\n\n\nif __name__ == '__main__':\n googletest.main()\n", "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Functional tests for ExtractImagePatches op.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.compiler.tests.xla_test import XLATestCase\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.platform import test\n\n\nclass ExtractImagePatches(XLATestCase):\n \"\"\"Functional tests for ExtractImagePatches op.\"\"\"\n\n def _VerifyValues(self, image, ksizes, strides, rates, padding, patches):\n \"\"\"Tests input-output pairs for the ExtractImagePatches op.\n\n Args:\n image: Input tensor with shape: [batch, in_rows, in_cols, depth].\n ksizes: Patch size specified as: [ksize_rows, ksize_cols].\n strides: Output strides, specified as [stride_rows, stride_cols].\n rates: Atrous rates, specified as [rate_rows, rate_cols].\n padding: Padding type.\n patches: Expected output.\n \"\"\"\n ksizes = [1] + ksizes + [1]\n strides = [1] + strides + [1]\n rates = [1] + rates + [1]\n\n with self.test_session():\n image_placeholder = array_ops.placeholder(dtypes.float32)\n with self.test_scope():\n out_tensor = array_ops.extract_image_patches(\n image_placeholder,\n ksizes=ksizes,\n strides=strides,\n rates=rates,\n padding=padding,\n name=\"im2col\")\n feed_dict = {image_placeholder: image}\n self.assertAllClose(patches, out_tensor.eval(feed_dict=feed_dict))\n\n def testKsize1x1Stride1x1Rate1x1(self):\n \"\"\"Verifies that for 1x1 kernel the output equals the input.\"\"\"\n # [2, 3, 4, 5]\n image = np.reshape(range(120), [2, 3, 4, 5])\n # [2, 3, 4, 5]\n patches = np.reshape(range(120), [2, 3, 4, 5])\n for padding in [\"VALID\", \"SAME\"]:\n self._VerifyValues(\n image,\n ksizes=[1, 1],\n strides=[1, 1],\n rates=[1, 1],\n padding=padding,\n patches=patches)\n\n def testKsize1x1Stride2x3Rate1x1(self):\n \"\"\"Test for 1x1 kernel and strides.\"\"\"\n # [2, 4, 5, 3]\n image = np.reshape(range(120), [2, 4, 5, 3])\n # [2, 2, 2, 3]\n patches = image[:, ::2, ::3, :]\n for padding in [\"VALID\", \"SAME\"]:\n self._VerifyValues(\n image,\n ksizes=[1, 1],\n strides=[2, 3],\n rates=[1, 1],\n padding=padding,\n patches=patches)\n\n def testKsize2x2Stride1x1Rate1x1Valid(self):\n \"\"\"Test for 2x2 kernel with VALID padding.\"\"\"\n # [1, 2, 2, 1]\n image = [[[[1], [2]], [[3], [4]]]]\n # [1, 1, 1, 4]\n patches = [[[[1, 2, 3, 4]]]]\n self._VerifyValues(\n image,\n ksizes=[2, 2],\n strides=[1, 1],\n rates=[1, 1],\n padding=\"VALID\",\n patches=patches)\n\n def testKsize2x2Stride1x1Rate1x1Same(self):\n \"\"\"Test for 2x2 kernel with SAME padding.\"\"\"\n # [1, 2, 2, 1]\n image = [[[[1], [2]], [[3], [4]]]]\n # [1, 2, 2, 4]\n patches = [[[[1, 2, 3, 4], [2, 0, 4, 0]], [[3, 4, 0, 0], [4, 0, 0, 0]]]]\n self._VerifyValues(\n image,\n ksizes=[2, 2],\n strides=[1, 1],\n rates=[1, 1],\n padding=\"SAME\",\n patches=patches)\n\n def testKsize2x2Stride1x1Rate2x2Valid(self):\n \"\"\"Test for 2x2 kernel with 2x2 dilation.\"\"\"\n # [1, 2, 2, 1]\n image = np.arange(16).reshape(1, 4, 4, 1).astype(np.float32)\n # [1, 2, 2, 4]\n patches = [[[[0, 2, 8, 10], [1, 3, 9, 11]],\n [[4, 6, 12, 14], [5, 7, 13, 15]]]]\n self._VerifyValues(\n image,\n ksizes=[2, 2],\n strides=[1, 1],\n rates=[2, 2],\n padding=\"VALID\",\n patches=patches)\n\nif __name__ == \"__main__\":\n test.main()\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Library to compute order of computations in a graph.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport math\nfrom tensorflow.contrib.receptive_field.python.util import parse_layer_parameters\nfrom tensorflow.python.platform import tf_logging as logging\n\n\ndef parse_graph_nodes(graph_def):\n \"\"\"Helper function to parse GraphDef's nodes.\n\n It returns a dict mapping from node name to NodeDef.\n\n Args:\n graph_def: A GraphDef object.\n\n Returns:\n name_to_node: Dict keyed by node name, each entry containing the node's\n NodeDef.\n \"\"\"\n name_to_node = {}\n for node_def in graph_def.node:\n name_to_node[node_def.name] = node_def\n return name_to_node\n\n\n# Named tuple used to collect information from each node in a computation graph.\n_node_info = collections.namedtuple(\n 'NodeInfo', field_names=['order', 'node', 'input_size', 'output_size'])\n\n\ndef _compute_output_resolution(input_spatial_resolution, kernel_size, stride,\n total_padding):\n \"\"\"Computes output resolution, given input resolution and layer parameters.\n\n Note that this computation is done only over one dimension (eg, x or y).\n If any of the inputs is None, returns None.\n\n Args:\n input_spatial_resolution: Input spatial resolution (int).\n kernel_size: Kernel size (int).\n stride: Stride (int).\n total_padding: Total padding to be applied (int).\n Returns:\n output_resolution: Output dimension (int) or None.\n \"\"\"\n if (input_spatial_resolution is None) or (kernel_size is None) or (\n stride is None) or (total_padding is None):\n return None\n return int(\n math.ceil((\n input_spatial_resolution + total_padding - kernel_size + 1) / stride))\n\n\ndef _get_computed_nodes(name_to_node,\n current,\n node_info,\n input_node_name='',\n input_node_size=None):\n \"\"\"Traverses the graph recursively to compute its topological order.\n\n Optionally, the function may also compute the input and output feature map\n resolutions at each node. In this case, input_node_name and input_node_size\n must be set. Note that if a node's op type is unknown, the input and output\n resolutions are ignored and set to None.\n\n Args:\n name_to_node: Dict keyed by node name, each entry containing the node's\n NodeDef.\n current: Current node name.\n node_info: Map of nodes we've already traversed, containing their _node_info\n information.\n input_node_name: Name of node with fixed input resolution (optional).\n input_node_size: Fixed input resolution to use (optional).\n Returns:\n order: Order in topological sort for 'current'.\n input_size: Tensor spatial resolution at input of current node.\n output_size: Tensor spatial resolution at output of current node.\n \"\"\"\n if current in node_info:\n return (node_info[current].order, node_info[current].input_size,\n node_info[current].output_size)\n\n node_def = name_to_node[current]\n\n if current == input_node_name:\n order = 0\n input_size = None\n output_size = input_node_size\n node_info[current] = _node_info(order, node_def, input_size, output_size)\n return (order, input_size, output_size)\n\n input_size = None\n output_size = None\n\n order = 0\n number_inputs = 0\n for each in node_def.input:\n # Parses name of input node.\n if each.startswith('^'):\n # The character '^' denotes a control dependency, so this input node can\n # be safely ignored.\n continue\n each = each.split(':')[0]\n # Recursively computes ordering.\n (parent_order, _, parent_output_size) = _get_computed_nodes(\n name_to_node, each, node_info, input_node_name, input_node_size)\n order = max(order, parent_order + 1)\n if number_inputs == 0:\n # For all the types of nodes we consider, the first input corresponds to\n # the feature map.\n input_size = parent_output_size\n number_inputs += 1\n\n # Figure out output size for this layer.\n logging.vlog(3, 'input_size = %s', input_size)\n if input_size is None:\n output_size = None\n else:\n (kernel_size_x, kernel_size_y, stride_x, stride_y, _, _, total_padding_x,\n total_padding_y) = (\n parse_layer_parameters.get_layer_params(\n node_def, name_to_node, input_size, force=True))\n logging.vlog(3, 'kernel_size_x = %s, kernel_size_y = %s, '\n 'stride_x = %s, stride_y = %s, '\n 'total_padding_x = %s, total_padding_y = %s' %\n (kernel_size_x, kernel_size_y, stride_x, stride_y,\n total_padding_x, total_padding_y))\n output_size = [None] * 2\n output_size[0] = _compute_output_resolution(input_size[0], kernel_size_x,\n stride_x, total_padding_x)\n output_size[1] = _compute_output_resolution(input_size[1], kernel_size_y,\n stride_y, total_padding_y)\n\n logging.vlog(3, 'output_size = %s', output_size)\n node_info[current] = _node_info(order, node_def, input_size, output_size)\n\n return order, input_size, output_size\n\n\ndef get_compute_order(graph_def, input_node_name='', input_node_size=None):\n \"\"\"Computes order of computation for a given CNN graph.\n\n Optionally, the function may also compute the input and output feature map\n resolutions at each node. In this case, input_node_name and input_node_size\n must be set. Note that if a node's op type is unknown, the input and output\n resolutions are ignored and set to None.\n\n Args:\n graph_def: GraphDef object.\n input_node_name: Name of node with fixed input resolution (optional). This\n is usually the node name for the input image in a CNN.\n input_node_size: 2D list of integers, fixed input resolution to use\n (optional). This is usually the input resolution used for the input image\n in a CNN (common examples are: [224, 224], [299, 299], [321, 321]).\n Returns:\n node_info: Default dict keyed by node name, mapping to a named tuple with\n the following fields:\n - order: Integer denoting topological order;\n - node: NodeDef for the given node;\n - input_size: 2D list of integers, denoting the input spatial resolution\n to the node;\n - output_size: 2D list of integers, denoting the output spatial resolution\n of the node.\n name_to_node: Dict keyed by node name, each entry containing the node's\n NodeDef.\n \"\"\"\n name_to_node = parse_graph_nodes(graph_def)\n node_info = collections.defaultdict(_node_info)\n for each in graph_def.node:\n _get_computed_nodes(name_to_node, each.name, node_info, input_node_name,\n input_node_size)\n return node_info, name_to_node\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"The KFAC optimizer.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport warnings\n\n# pylint disable=long-line\nfrom tensorflow.contrib.kfac.python.ops import curvature_matrix_vector_products as cmvp\nfrom tensorflow.contrib.kfac.python.ops import estimator as est\n# pylint enable=long-line\n\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import linalg_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import state_ops\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.ops import variables as tf_variables\nfrom tensorflow.python.training import gradient_descent\n\n\nclass KfacOptimizer(gradient_descent.GradientDescentOptimizer):\n \"\"\"The KFAC Optimizer (https://arxiv.org/abs/1503.05671).\"\"\"\n\n def __init__(self,\n learning_rate,\n cov_ema_decay,\n damping,\n layer_collection,\n var_list=None,\n momentum=0.9,\n momentum_type=\"regular\",\n norm_constraint=None,\n name=\"KFAC\",\n estimation_mode=\"gradients\",\n colocate_gradients_with_ops=True,\n batch_size=None,\n placement_strategy=None,\n **kwargs):\n \"\"\"Initializes the KFAC optimizer with the given settings.\n\n Args:\n learning_rate: The base learning rate for the optimizer. Should probably\n be set to 1.0 when using momentum_type = 'qmodel', but can still be\n set lowered if desired (effectively lowering the trust in the\n quadratic model.)\n cov_ema_decay: The decay factor used when calculating the covariance\n estimate moving averages.\n damping: The damping factor used to stabilize training due to errors in\n the local approximation with the Fisher information matrix, and to\n regularize the update direction by making it closer to the gradient.\n If damping is adapted during training then this value is used for\n initializing damping variable.\n (Higher damping means the update looks more like a standard gradient\n update - see Tikhonov regularization.)\n layer_collection: The layer collection object, which holds the fisher\n blocks, kronecker factors, and losses associated with the\n graph. The layer_collection cannot be modified after KfacOptimizer's\n initialization.\n var_list: Optional list or tuple of variables to train. Defaults to the\n list of variables collected in the graph under the key\n `GraphKeys.TRAINABLE_VARIABLES`.\n momentum: The momentum decay constant to use. Only applies when\n momentum_type is 'regular' or 'adam'. (Default: 0.9)\n momentum_type: The type of momentum to use in this optimizer, one of\n 'regular', 'adam', or 'qmodel'. (Default: 'regular')\n norm_constraint: float or Tensor. If specified, the update is scaled down\n so that its approximate squared Fisher norm v^T F v is at most the\n specified value. May only be used with momentum type 'regular'.\n (Default: None)\n name: The name for this optimizer. (Default: 'KFAC')\n estimation_mode: The type of estimator to use for the Fishers. Can be\n 'gradients', 'empirical', 'curvature_propagation', or 'exact'.\n (Default: 'gradients'). See the doc-string for FisherEstimator for\n more a more detailed description of these options.\n colocate_gradients_with_ops: Whether we should request gradients we\n compute in the estimator be colocated with their respective ops.\n (Default: True)\n batch_size: The size of the mini-batch. Only needed when momentum_type\n == 'qmodel' or when automatic adjustment is used. (Default: None)\n placement_strategy: string, Device placement strategy used when creating\n covariance variables, covariance ops, and inverse ops.\n (Default: `None`)\n **kwargs: Arguments to be passesd to specific placement\n strategy mixin. Check `placement.RoundRobinPlacementMixin` for example.\n\n Raises:\n ValueError: If the momentum type is unsupported.\n ValueError: If clipping is used with momentum type other than 'regular'.\n ValueError: If no losses have been registered with layer_collection.\n ValueError: If momentum is non-zero and momentum_type is not 'regular'\n or 'adam'.\n \"\"\"\n warnings.warn(\n \"third_party.tensorflow.contrib.kfac is deprecated.\"\n \"This will be removed on 15-07-2018. Check README for further details.\",\n DeprecationWarning)\n # Parameters to be passed to the Fisher estimator:\n self._variables = var_list or tf_variables.trainable_variables\n self._cov_ema_decay = cov_ema_decay\n self._layers = layer_collection\n self._estimation_mode = estimation_mode\n self._colocate_gradients_with_ops = colocate_gradients_with_ops\n\n # The below parameters are required only if damping needs to be adapated.\n # These parameters can be set by calling\n # set_damping_adaptation_params() explicitly.\n self._damping_adaptation_decay = 0.95\n self._damping_adaptation_interval = 5\n # Check section 6.5 KFAC paper. omega(1) = pow(damping decay, interval)\n self._omega = (\n self._damping_adaptation_decay**self._damping_adaptation_interval)\n self._adapt_damping = False\n self._min_damping = 1e-5\n self._prev_train_batch = None\n self._is_chief = False\n self._loss_fn = None\n self._damping_constant = damping\n self._damping = None\n self._rho = None\n self._prev_loss = None\n self._q_model_change = None\n self._update_damping_op = None\n\n momentum_type = momentum_type.lower()\n legal_momentum_types = [\"regular\", \"adam\", \"qmodel\"]\n\n if momentum_type not in legal_momentum_types:\n raise ValueError(\"Unsupported momentum type {}. Must be one of {}.\"\n .format(momentum_type, legal_momentum_types))\n if momentum_type != \"regular\" and norm_constraint is not None:\n raise ValueError(\"Update clipping is only supported with momentum \"\n \"type 'regular'.\")\n if momentum_type not in [\"regular\", \"adam\"] and momentum != 0:\n raise ValueError(\"Momentum must be unspecified if using a momentum_type \"\n \"other than 'regular' or 'adam'.\")\n\n # Extra parameters of the optimizer\n self._momentum = momentum\n self._momentum_type = momentum_type\n self._norm_constraint = norm_constraint\n self._batch_size = batch_size\n self._placement_strategy = placement_strategy\n\n with variable_scope.variable_scope(name):\n self._fisher_est = est.make_fisher_estimator(\n placement_strategy=placement_strategy,\n variables=self._variables,\n cov_ema_decay=self._cov_ema_decay,\n damping=self.damping,\n layer_collection=self._layers,\n exps=(-1,),\n estimation_mode=self._estimation_mode,\n colocate_gradients_with_ops=self._colocate_gradients_with_ops,\n **kwargs)\n\n super(KfacOptimizer, self).__init__(learning_rate, name=name)\n\n def set_damping_adaptation_params(self,\n is_chief,\n prev_train_batch,\n loss_fn,\n min_damping=1e-5,\n damping_adaptation_decay=0.99,\n damping_adaptation_interval=5):\n \"\"\"Sets parameters required to adapt damping during training.\n\n When called, enables damping adaptation according to the Levenberg-Marquardt\n style rule described in Section 6.5 of \"Optimizing Neural Networks with\n Kronecker-factored Approximate Curvature\".\n\n Note that this function creates Tensorflow variables which store a few\n scalars and are accessed by the ops which update the damping (as part\n of the training op returned by the minimize() method).\n\n Args:\n is_chief: `Boolean`, `True` if the worker is chief.\n prev_train_batch: Training data used to minimize loss in the previous\n step. This will be used to evaluate loss by calling\n `loss_fn(prev_train_batch)`.\n loss_fn: `function` that takes as input training data tensor and returns\n a scalar loss.\n min_damping: `float`(Optional), Minimum value the damping parameter\n can take. Default value 1e-5.\n damping_adaptation_decay: `float`(Optional), The `damping` parameter is\n multiplied by the `damping_adaptation_decay` every\n `damping_adaptation_interval` number of iterations. Default value 0.99.\n damping_adaptation_interval: `int`(Optional), Number of steps in between\n updating the `damping` parameter. Default value 5.\n\n Raises:\n ValueError: If `set_damping_adaptation_params` is already called and the\n the `adapt_damping` is `True`.\n \"\"\"\n if self._adapt_damping:\n raise ValueError(\"Damping adaptation parameters already set.\")\n\n with variable_scope.variable_scope(self.get_name()):\n self._adapt_damping = True\n self._is_chief = is_chief\n self._prev_train_batch = prev_train_batch\n self._loss_fn = loss_fn\n self._damping_adaptation_decay = damping_adaptation_decay\n self._damping_adaptation_interval = damping_adaptation_interval\n self._omega = (\n self._damping_adaptation_decay**self._damping_adaptation_interval)\n self._min_damping = min_damping\n\n self._rho = variable_scope.get_variable(\n \"rho\", shape=(), dtype=dtypes.float32, trainable=False) # LM ratio.\n self._prev_loss = variable_scope.get_variable(\n \"prev_loss\", shape=(), dtype=dtypes.float32, trainable=False)\n self._q_model_change = variable_scope.get_variable(\n \"q_model_change\", shape=(), dtype=dtypes.float32, trainable=False)\n self._damping = variable_scope.get_variable(\n \"damping\", initializer=self._damping_constant, trainable=False)\n\n @property\n def variables(self):\n return self._fisher_est.variables\n\n @property\n def damping(self):\n if self._damping:\n return self._damping\n else:\n return self._damping_constant\n\n @property\n def damping_adaptation_interval(self):\n return self._damping_adaptation_interval\n\n def make_vars_and_create_op_thunks(self):\n \"\"\"Make vars and create op thunks.\n\n Returns:\n cov_update_thunks: List of cov update thunks. Corresponds one-to-one with\n the list of factors given by the \"factors\" property.\n inv_update_thunks: List of inv update thunks. Corresponds one-to-one with\n the list of factors given by the \"factors\" property.\n \"\"\"\n scope = self.get_name() + \"/\" + self._fisher_est.name\n return self._fisher_est.make_vars_and_create_op_thunks(scope=scope)\n\n def create_ops_and_vars_thunks(self):\n \"\"\"Create thunks that make the ops and vars on demand.\n\n This function returns 4 lists of thunks: cov_variable_thunks,\n cov_update_thunks, inv_variable_thunks, and inv_update_thunks.\n\n The length of each list is the number of factors and the i-th element of\n each list corresponds to the i-th factor (given by the \"factors\" property).\n\n Note that the execution of these thunks must happen in a certain\n partial order. The i-th element of cov_variable_thunks must execute\n before the i-th element of cov_update_thunks (and also the i-th element\n of inv_update_thunks). Similarly, the i-th element of inv_variable_thunks\n must execute before the i-th element of inv_update_thunks.\n\n TL;DR (oversimplified): Execute the thunks according to the order that\n they are returned.\n\n Returns:\n cov_variable_thunks: A list of thunks that make the cov variables.\n cov_update_thunks: A list of thunks that make the cov update ops.\n inv_variable_thunks: A list of thunks that make the inv variables.\n inv_update_thunks: A list of thunks that make the inv update ops.\n \"\"\"\n scope = self.get_name() + \"/\" + self._fisher_est.name\n return self._fisher_est.create_ops_and_vars_thunks(scope=scope)\n\n def minimize(self, *args, **kwargs):\n # Should this variable scope encompass everything below? Or will the super-\n # class make another copy of the same name scope?\n with variable_scope.variable_scope(self.get_name()):\n kwargs[\"var_list\"] = kwargs.get(\"var_list\") or self.variables\n if set(kwargs[\"var_list\"]) != set(self.variables):\n raise ValueError(\"var_list doesn't match with set of Fisher-estimating \"\n \"variables.\")\n if self._adapt_damping and self._is_chief:\n global_step = kwargs.get(\"global_step\", None)\n if not global_step:\n raise KeyError(\"global_step needs to be passed to optimizer.minimize \"\n \"if damping parameter is adapted.\")\n update_damping_op = self._update_damping(self._prev_train_batch,\n global_step)\n with ops.control_dependencies([update_damping_op]):\n loss = args[0]\n loss_assign_op = state_ops.assign(self._prev_loss, loss)\n train_op = super(KfacOptimizer, self).minimize(*args, **kwargs)\n return control_flow_ops.group(loss_assign_op, train_op)\n else:\n return super(KfacOptimizer, self).minimize(*args, **kwargs)\n\n def compute_gradients(self, *args, **kwargs):\n # args[1] could be our var_list\n if len(args) > 1:\n var_list = args[1]\n else:\n kwargs[\"var_list\"] = kwargs.get(\"var_list\") or self.variables\n var_list = kwargs[\"var_list\"]\n\n if set(var_list) != set(self.variables):\n raise ValueError(\"var_list doesn't match with set of Fisher-estimating \"\n \"variables.\")\n return super(KfacOptimizer, self).compute_gradients(*args, **kwargs)\n\n def apply_gradients(self, grads_and_vars, *args, **kwargs):\n \"\"\"Applies gradients to variables.\n\n Args:\n grads_and_vars: List of (gradient, variable) pairs.\n *args: Additional arguments for super.apply_gradients.\n **kwargs: Additional keyword arguments for super.apply_gradients.\n\n Returns:\n An `Operation` that applies the specified gradients.\n \"\"\"\n # In Python 3, grads_and_vars can be a zip() object which can only be\n # iterated over once. By converting it to a list, we ensure that it can be\n # iterated over more than once.\n grads_and_vars = list(grads_and_vars)\n\n # Compute step.\n steps_and_vars = self._compute_update_steps(grads_and_vars)\n\n # Update trainable variables with this step.\n return super(KfacOptimizer, self).apply_gradients(steps_and_vars, *args,\n **kwargs)\n\n def _squared_fisher_norm(self, grads_and_vars, precon_grads_and_vars):\n \"\"\"Computes the squared (approximate) Fisher norm of the updates.\n\n This is defined as v^T F v, where F is the approximate Fisher matrix\n as computed by the estimator, and v = F^{-1} g, where g is the gradient.\n This is computed efficiently as v^T g.\n\n Args:\n grads_and_vars: List of (gradient, variable) pairs.\n precon_grads_and_vars: List of (preconditioned gradient, variable) pairs.\n Must be the result of calling `self._fisher_est.multiply_inverse`\n on `grads_and_vars`.\n\n Returns:\n Scalar representing the squared norm.\n\n Raises:\n ValueError: if the two list arguments do not contain the same variables,\n in the same order.\n \"\"\"\n for (_, gvar), (_, pgvar) in zip(grads_and_vars, precon_grads_and_vars):\n if gvar is not pgvar:\n raise ValueError(\"The variables referenced by the two arguments \"\n \"must match.\")\n terms = [\n math_ops.reduce_sum(grad * pgrad)\n for (grad, _), (pgrad, _) in zip(grads_and_vars, precon_grads_and_vars)\n ]\n return math_ops.reduce_sum(terms)\n\n def _update_clip_coeff(self, grads_and_vars, precon_grads_and_vars):\n \"\"\"Computes the scale factor for the update to satisfy the norm constraint.\n\n Defined as min(1, sqrt(c / r^T F r)), where c is the norm constraint,\n F is the approximate Fisher matrix, and r is the update vector, i.e.\n -alpha * v, where alpha is the learning rate, and v is the preconditioned\n gradient.\n\n This is based on Section 5 of Ba et al., Distributed Second-Order\n Optimization using Kronecker-Factored Approximations. Note that they\n absorb the learning rate alpha (which they denote eta_max) into the formula\n for the coefficient, while in our implementation, the rescaling is done\n before multiplying by alpha. Hence, our formula differs from theirs by a\n factor of alpha.\n\n Args:\n grads_and_vars: List of (gradient, variable) pairs.\n precon_grads_and_vars: List of (preconditioned gradient, variable) pairs.\n Must be the result of calling `self._fisher_est.multiply_inverse`\n on `grads_and_vars`.\n\n Returns:\n Scalar representing the coefficient which should be applied to the\n preconditioned gradients to satisfy the norm constraint.\n \"\"\"\n sq_norm_grad = self._squared_fisher_norm(grads_and_vars,\n precon_grads_and_vars)\n sq_norm_up = sq_norm_grad * self._learning_rate**2\n return math_ops.minimum(1.,\n math_ops.sqrt(self._norm_constraint / sq_norm_up))\n\n def _clip_updates(self, grads_and_vars, precon_grads_and_vars):\n \"\"\"Rescales the preconditioned gradients to satisfy the norm constraint.\n\n Rescales the preconditioned gradients such that the resulting update r\n (after multiplying by the learning rate) will satisfy the norm constraint.\n This constraint is that r^T F r <= C, where F is the approximate Fisher\n matrix, and C is the norm_constraint attribute. See Section 5 of\n Ba et al., Distributed Second-Order Optimization using Kronecker-Factored\n Approximations.\n\n Args:\n grads_and_vars: List of (gradient, variable) pairs.\n precon_grads_and_vars: List of (preconditioned gradient, variable) pairs.\n Must be the result of calling `self._fisher_est.multiply_inverse`\n on `grads_and_vars`.\n\n Returns:\n List of (rescaled preconditioned gradient, variable) pairs.\n \"\"\"\n coeff = self._update_clip_coeff(grads_and_vars, precon_grads_and_vars)\n return [(pgrad * coeff, var) for pgrad, var in precon_grads_and_vars]\n\n def _compute_prev_updates(self, variables):\n \"\"\"Computes previous updates as negative velocities scaled by learning rate.\n\n Args:\n variables: List of variables in the graph that the update will be\n applied to.\n\n Returns:\n List of previous updates applied to the `variables`.\n \"\"\"\n return list(\n -1 * self._learning_rate * self._zeros_slot(var, \"velocity\", self._name)\n for var in variables)\n\n def _compute_qmodel_hyperparams(self, precon_grads, prev_updates, grads,\n variables):\n \"\"\"Compute optimal update hyperparameters from the quadratic model.\n\n More specifically, if L is the loss we minimize a quadratic approximation\n of L(theta + d) which we denote by qmodel(d) with\n d = alpha*precon_grad + mu*prev_update with respect to alpha and mu, where\n\n qmodel(d) = (1/2) * d^T * B * d + grad^T*d + L(theta) .\n\n Unlike in the KL clipping approach we use the non-approximated quadratic\n model where the curvature matrix C is the true Fisher on the current\n mini-batch (computed without any approximations beyond mini-batch sampling),\n with the usual Tikhonov damping/regularization applied,\n\n C = F + damping * I\n\n See Section 7 of https://arxiv.org/abs/1503.05671 for a derivation of\n the formula. See Appendix C for a discussion of the trick of using\n a factorized Fisher matrix to more efficiently compute the required\n vector-matrix-vector products.\n\n Note that the elements of all 4 lists passed to this function must\n be in correspondence with each other.\n\n Args:\n precon_grads: List of preconditioned gradients.\n prev_updates: List of updates computed at the previous iteration.\n grads: List of gradients.\n variables: List of variables in the graph that the update will be\n applied to. (Note that this function doesn't actually apply the\n update.)\n\n Returns:\n (alpha, mu, qmodel_change), where alpha and mu are chosen to optimize the\n quadratic model, and\n qmodel_change = qmodel(alpha*precon_grad + mu*prev_update) - qmodel(0)\n = qmodel(alpha*precon_grad + mu*prev_update) - L(theta).\n \"\"\"\n\n cmvpc = cmvp.CurvatureMatrixVectorProductComputer(self._layers.losses,\n variables)\n\n # compute the matrix-vector products with the transposed Fisher factor\n fft_precon_grads = cmvpc.multiply_fisher_factor_transpose(precon_grads)\n fft_prev_updates = cmvpc.multiply_fisher_factor_transpose(prev_updates)\n batch_size = math_ops.cast(\n self._batch_size, dtype=fft_precon_grads[0].dtype)\n\n # compute the entries of the 2x2 matrix\n m_11 = (\n _inner_product_list(fft_precon_grads, fft_precon_grads) / batch_size +\n self.damping * _inner_product_list(precon_grads, precon_grads))\n\n m_21 = (\n _inner_product_list(fft_prev_updates, fft_precon_grads) / batch_size +\n self.damping * _inner_product_list(prev_updates, precon_grads))\n\n m_22 = (\n _inner_product_list(fft_prev_updates, fft_prev_updates) / batch_size +\n self.damping * _inner_product_list(prev_updates, prev_updates))\n\n def non_zero_prevupd_case():\n r\"\"\"Computes optimal (alpha, mu) given non-zero previous update.\n\n We solve the full 2x2 linear system. See Martens & Grosse (2015),\n Section 7, definition of $\\alpha^*$ and $\\mu^*$.\n\n Returns:\n (alpha, mu, qmodel_change), where alpha and mu are chosen to optimize\n the quadratic model, and\n qmodel_change = qmodel(alpha*precon_grad + mu*prev_update) - qmodel(0).\n \"\"\"\n m = ops.convert_to_tensor([[m_11, m_21], [m_21, m_22]])\n\n c = ops.convert_to_tensor([[_inner_product_list(grads, precon_grads)],\n [_inner_product_list(grads, prev_updates)]])\n\n sol = -1. * _two_by_two_solve(m, c)\n alpha = sol[0]\n mu = sol[1]\n qmodel_change = 0.5 * math_ops.reduce_sum(sol * c)\n\n return alpha, mu, qmodel_change\n\n def zero_prevupd_case():\n r\"\"\"Computes optimal (alpha, mu) given all-zero previous update.\n\n The linear system reduces to 1x1. See Martens & Grosse (2015),\n Section 6.4, definition of $\\alpha^*$.\n\n Returns:\n (alpha, 0.0, qmodel_change), where alpha is chosen to optimize the\n quadratic model, and\n qmodel_change = qmodel(alpha*precon_grad) - qmodel(0)\n \"\"\"\n m = m_11\n c = _inner_product_list(grads, precon_grads)\n\n alpha = -c / m\n mu = 0.0\n qmodel_change = 0.5 * alpha * c\n\n return alpha, mu, qmodel_change\n\n return control_flow_ops.cond(\n math_ops.equal(m_22, 0.0), zero_prevupd_case, non_zero_prevupd_case)\n\n def _assign_q_model_change(self, q_model_change):\n \"\"\"Assigns `q_model_change` to `self._q_model_change` if damping is adapted.\n\n Note only the chief worker does the assignment.\n\n Args:\n q_model_change: Scalar tensor of type `float32`.\n\n Returns:\n If `adapt_damping` is `True` then returns an assign op, Otherwise returns\n a no_op().\n \"\"\"\n if self._adapt_damping and self._is_chief:\n q_model_assign_op = state_ops.assign(self._q_model_change, q_model_change)\n else:\n q_model_assign_op = control_flow_ops.no_op()\n return q_model_assign_op\n\n def _compute_qmodel_hyperparams_wrapper(self, grads_and_vars,\n precon_grads_and_vars):\n \"\"\"Wrapper function for `self._compute_qmodel_hyperparams`.\n\n Constructs a list of preconditioned gradients and variables. Also creates a\n op to asssign the computed q model change to `self._q_model_change`.\n\n Args:\n grads_and_vars: List of (gradient, variable) pairs.\n precon_grads_and_vars: List of (preconditioned gradients, variable)\n pairs.\n\n Returns:\n (alpha, mu, q_model_assign_op), where alpha and mu are chosen to optimize\n the quadratic model, `q_model_assign_op` assigns the computed q model\n change to `self._q_model_change`.\n \"\"\"\n precon_grads = list(\n precon_grad for (precon_grad, _) in precon_grads_and_vars)\n grads = list(grad for (grad, _) in grads_and_vars)\n variables = list(var for (_, var) in grads_and_vars)\n prev_updates = self._compute_prev_updates(variables)\n # Compute optimal velocity update parameters according to quadratic model\n alpha, mu, q_model_change = self._compute_qmodel_hyperparams(\n precon_grads, prev_updates, grads, variables)\n\n return alpha, mu, self._assign_q_model_change(q_model_change)\n\n def _compute_update_steps(self, grads_and_vars):\n \"\"\"Computes the update steps for the variables given the gradients.\n\n Args:\n grads_and_vars: List of (gradient, variable) pairs.\n\n Returns:\n A list of tuple (assign_op ,var) where `assign_op` assigns the update\n steps to `var`.\n \"\"\"\n\n if self._momentum_type == \"regular\":\n # Compute \"preconditioned\" gradient.\n precon_grads_and_vars = self._fisher_est.multiply_inverse(grads_and_vars)\n\n # Apply \"KL clipping\" if asked for.\n if self._norm_constraint is not None:\n precon_grads_and_vars = self._clip_updates(grads_and_vars,\n precon_grads_and_vars)\n\n # Update the velocity with this and return it as the step.\n if self._adapt_damping and self._is_chief:\n _, _, q_model_assign_op = self._compute_qmodel_hyperparams_wrapper(\n grads_and_vars, precon_grads_and_vars)\n with ops.control_dependencies([q_model_assign_op]):\n return self._update_velocities(precon_grads_and_vars, self._momentum)\n else:\n return self._update_velocities(precon_grads_and_vars, self._momentum)\n elif self._momentum_type == \"adam\":\n # Update velocity.\n velocities_and_vars = self._update_velocities(grads_and_vars,\n self._momentum)\n # Return \"preconditioned\" velocity vector as the step.\n return self._fisher_est.multiply_inverse(velocities_and_vars)\n\n elif self._momentum_type == \"qmodel\":\n # Compute \"preconditioned\" gradient.\n precon_grads_and_vars = self._fisher_est.multiply_inverse(grads_and_vars)\n\n # Compute optimal velocity update parameters according to quadratic model\n alpha, mu, q_model_assign_op = self._compute_qmodel_hyperparams_wrapper(\n grads_and_vars, precon_grads_and_vars)\n\n with ops.control_dependencies([q_model_assign_op]):\n return self._update_velocities(\n precon_grads_and_vars, mu, vec_coeff=-alpha)\n\n def _update_velocities(self, vecs_and_vars, decay, vec_coeff=1.0):\n \"\"\"Updates the velocities of the variables with the given vectors.\n\n Args:\n vecs_and_vars: List of (vector, variable) pairs.\n decay: How much to decay the old velocity by. This is often referred to\n as the 'momentum constant'.\n vec_coeff: Coefficient to apply to the vectors before adding them to the\n velocity.\n\n Returns:\n A list of (velocity, var) indicating the new velocity for each var.\n \"\"\"\n\n def _update_velocity(vec, var):\n velocity = self._zeros_slot(var, \"velocity\", self._name)\n with ops.colocate_with(velocity):\n # NOTE(mattjj): read/modify/write race condition not suitable for async.\n\n # Compute the new velocity for this variable.\n new_velocity = decay * velocity + vec_coeff * vec\n\n # Save the updated velocity.\n return (array_ops.identity(velocity.assign(new_velocity)), var)\n\n # Go through variable and update its associated part of the velocity vector.\n return [_update_velocity(vec, var) for vec, var in vecs_and_vars]\n\n def _update_damping(self, prev_batch, global_step):\n \"\"\"Adapts damping parameter. Check KFAC (Section 6.5) for the details.\n\n The damping parameter is updated according to the Levenberg-Marquardt rule\n every `self._damping_adaptation_interval` iterations.\n\n Args:\n prev_batch: Tensor or tuple of tensors which can be passed to\n `self._loss_fn` to evaluate loss.\n global_step: `Variable` which keeps track of number of times the training\n variables have been updated.\n Returns:\n A `tf.cond` op which updates the damping parameter.\n \"\"\"\n def compute_damping():\n \"\"\"\"Adapts damping parameter based on \"reduction ratio\".\n\n Reduction ratio captures how closely the quadratic approximation to the\n loss function approximates the actual loss within a trust region. The\n damping update tries to make the damping as small as possible while\n maintaining the property that the quadratic model remains a good local\n approximation to the loss function.\n\n Returns:\n An Op to assign newly computed damping value to `self._damping`.\n \"\"\"\n prev_batch_loss = self._loss_fn(prev_batch)\n with ops.control_dependencies([prev_batch_loss]):\n rho_assign = self._rho.assign(\n (prev_batch_loss - self._prev_loss) / self._q_model_change)\n with ops.control_dependencies([rho_assign]):\n new_damping = control_flow_ops.case(\n [(self._rho < 0.25, lambda: self.damping / self._omega),\n (self._rho > 0.75, lambda: self.damping * self._omega)],\n lambda: self.damping)\n with ops.control_dependencies([new_damping]):\n new_damping_min = math_ops.maximum(new_damping, self._min_damping)\n return control_flow_ops.group(self._damping.assign(new_damping_min))\n\n return control_flow_ops.cond(\n math_ops.equal(\n math_ops.mod(global_step + 1, self._damping_adaptation_interval),\n 0), compute_damping, control_flow_ops.no_op)\n\n\ndef _inner_product_list(list1, list2):\n return math_ops.add_n(\n [math_ops.reduce_sum(elt1 * elt2) for elt1, elt2 in zip(list1, list2)])\n\n\ndef _two_by_two_solve(m, c):\n # it might be better just to crank out the exact formula for 2x2 inverses\n return math_ops.matmul(linalg_ops.matrix_inverse(m), c)\n", "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Functional tests for depthwise convolutional operations.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nfrom six.moves import xrange # pylint: disable=redefined-builtin\n\nfrom tensorflow.compiler.tests.xla_test import XLATestCase\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import nn_ops\nimport tensorflow.python.ops.nn_grad # pylint: disable=unused-import\nfrom tensorflow.python.platform import test\n\n\n# Reference implementation of depthwise_conv2d\ndef ReferenceDepthwiseConv2D(input_tensor, filter_tensor, strides, padding,\n data_format=None):\n # Reference implementation of depthwise convolution that uses regular\n # convolution.\n convs = []\n in_channels = filter_tensor.shape[2]\n # Use a custom implementation of depthwise conv2d using slicing.\n for channel in xrange(in_channels):\n # Slice the input along channel\n if data_format == \"NCHW\":\n input_slice = input_tensor[:, channel:channel+1, :, :]\n else:\n input_slice = input_tensor[:, :, :, channel:channel+1]\n\n # Slice the filters. Filters are H, W, InC, DepthMultiplier\n filter_slice = filter_tensor[:, :, channel:channel+1, :]\n # Do conv\n convs.append(nn_ops.conv2d(input_slice, filter_slice,\n strides, padding,\n data_format=data_format,\n name=\"depthwise_slice_%d\" % channel))\n\n # Concat along dimension.\n if data_format == \"NCHW\":\n return array_ops.concat(convs, 1)\n else:\n return array_ops.concat(convs, 3)\n\n\ndef ConfigsToTest():\n \"\"\"Iterator for different convolution shapes, strides and paddings.\n\n Yields:\n Tuple (input_size, filter_size, out_size, stride, padding), the depthwise\n convolution parameters.\n \"\"\"\n input_sizes = [[4, 5, 5, 48], [4, 8, 8, 84], [4, 17, 17, 48], [4, 9, 27, 8],\n [4, 31, 31, 7], [4, 35, 35, 2], [4, 147, 147, 2],\n [3, 299, 299, 3], [5, 183, 183, 1]]\n filter_sizes = [[1, 1, 48, 2], [1, 3, 84, 1], [3, 1, 48, 4], [3, 3, 8, 1],\n [3, 3, 7, 1], [5, 5, 2, 1], [3, 3, 2, 8], [2, 2, 3,\n 8], [5, 5, 1, 2]]\n out_sizes = [[4, 5, 5, 96], [4, 8, 8, 84], [4, 17, 17, 192], [4, 9, 27, 8],\n [4, 31, 31, 7], [4, 35, 35, 2], [4, 49, 49, 16],\n [3, 150, 150, 24], [5, 92, 92, 2]]\n strides = [1, 1, 1, 1, 1, 1, 3, 2, 2]\n # pylint: disable=invalid-name\n VALID = \"VALID\"\n SAME = \"SAME\"\n # pylint: enable=invalid-name\n paddings = [SAME, SAME, SAME, SAME, SAME, SAME, VALID, SAME, SAME, SAME]\n for i, f, o, s, p in zip(input_sizes, filter_sizes, out_sizes, strides,\n paddings):\n yield i, f, o, s, p\n\n\ndef CheckGradConfigsToTest():\n \"\"\"Iterator for different convolution shapes, strides and paddings.\n\n compute_gradient_error() is very expensive. So the configs should be\n relatively small.\n\n Yields:\n Tuple (input_size, filter_size, out_size, stride, padding), the depthwise\n convolution parameters.\n \"\"\"\n input_sizes = [[2, 5, 8, 1], [4, 5, 5, 1], [2, 4, 4, 2], [1, 15, 15, 2],\n [2, 15, 16, 1]]\n filter_sizes = [[4, 4, 1, 2], [2, 2, 1, 2], [3, 1, 2, 2], [1, 3, 2, 1],\n [3, 3, 1, 2]]\n out_sizes = [[2, 5, 8, 2], [4, 2, 2, 2], [2, 4, 4, 4], [1, 15, 15, 2],\n [2, 5, 5, 2]]\n strides = [1, 2, 1, 1, 3]\n # pylint: disable=invalid-name\n VALID = \"VALID\"\n SAME = \"SAME\"\n # pylint: enable=invalid-name\n paddings = [SAME, VALID, SAME, SAME, VALID]\n for i, f, o, s, p in zip(input_sizes, filter_sizes, out_sizes, strides,\n paddings):\n yield i, f, o, s, p\n\n\nclass DepthwiseConv2DTest(XLATestCase):\n\n # This is testing that depthwise_conv2d and depthwise_conv2d_native\n # produce the same results. It also tests that NCHW and NWHC\n # formats agree, by comparing the depthwise_conv2d_native with\n # 'NCHW' format (with transposition) matches the 'NHWC' format using\n # the higher level interface.\n def _VerifyValues(self,\n tensor_in_sizes,\n filter_in_sizes,\n stride,\n padding,\n data_type,\n data_format=\"NHWC\"):\n \"\"\"Verifies the output values of the convolution function.\n\n Args:\n tensor_in_sizes: Input tensor dimensions in\n [batch, input_rows, input_cols, input_depth].\n filter_in_sizes: Filter tensor dimensions in\n [filter_rows, filter_cols, input_depth, depth_multiplier].\n stride: Stride.\n padding: Padding type.\n data_type: The data type to use.\n data_format: The data_format of the input. \"NHWC\" or \"NCHW\".\n \"\"\"\n total_size_1 = 1\n total_size_2 = 1\n for s in tensor_in_sizes:\n total_size_1 *= s\n for s in filter_in_sizes:\n total_size_2 *= s\n # Initializes the input and filter tensor with numbers incrementing from 1.\n x1 = np.array([f * 1.0 for f in range(1, total_size_1 + 1)],\n dtype=data_type).reshape(tensor_in_sizes)\n x2 = np.array([f * 1.0 for f in range(1, total_size_2 + 1)],\n dtype=data_type).reshape(filter_in_sizes)\n with self.test_session() as sess:\n if data_type == np.float32:\n tolerance = 1e-4\n else:\n self.assertEqual(data_type, np.float64)\n tolerance = 1e-8\n\n t1 = array_ops.placeholder(shape=tensor_in_sizes, dtype=data_type)\n t2 = array_ops.placeholder(shape=filter_in_sizes, dtype=data_type)\n\n native_t1 = t1\n strides = [1, stride, stride, 1]\n if data_format == \"NCHW\":\n # Transpose from NWHC input to NCHW\n # Ex. [4, 5, 5, 48] to [4, 48, 5, 5]\n native_t1 = array_ops.transpose(t1, [0, 3, 1, 2])\n strides = [1, 1, stride, stride]\n\n with self.test_scope():\n conv_native = nn_ops.depthwise_conv2d_native(\n native_t1,\n t2,\n strides=strides,\n data_format=data_format,\n padding=padding)\n\n if data_format == \"NCHW\":\n # Transpose back from NCHW to NHWC\n conv_native = array_ops.transpose(conv_native, [0, 2, 3, 1])\n\n with ops.device(\"CPU\"):\n conv_interface = ReferenceDepthwiseConv2D(\n t1, t2, strides=[1, stride, stride, 1], padding=padding)\n\n native_result = sess.run(conv_native, {t1: x1, t2: x2})\n interface_result = sess.run(conv_interface, {t1: x1, t2: x2})\n\n print(\"data_type:\", data_type, \"max diff = \",\n np.amax(np.absolute(native_result - interface_result)))\n self.assertAllClose(\n np.ravel(native_result), np.ravel(interface_result), rtol=tolerance)\n\n def testDepthwiseConv2D(self):\n for index, (input_size, filter_size, _, stride,\n padding) in enumerate(ConfigsToTest()):\n print(\"Testing DepthwiseConv2D,\", index, \"th config:\", input_size, \"*\",\n filter_size, \"stride:\", stride, \"padding:\", padding)\n for data_type in self.float_types:\n # TODO(phawkins): the reference implementation only supports float32.\n if data_type == np.float32:\n self._VerifyValues(\n input_size, filter_size, stride, padding, data_type)\n\n def testDepthwiseConv2DFormat(self):\n for index, (input_size, filter_size, _, stride,\n padding) in enumerate(ConfigsToTest()):\n print(\"Testing DepthwiseConv2DFormat,\", index, \"th config:\", input_size,\n \"*\", filter_size, \"stride:\", stride, \"padding:\", padding)\n for data_type in self.float_types:\n # TODO(phawkins): the reference implementation only supports float32.\n if data_type == np.float32:\n self._VerifyValues(\n input_size,\n filter_size,\n stride,\n padding,\n data_type,\n data_format=\"NCHW\")\n\n# This is testing against hand calculated results.\n\n def _VerifyHandValues(self, tensor_in_sizes, filter_in_sizes, stride, padding,\n expected):\n \"\"\"Verifies the output values of the depthwise convolution function.\n\n Args:\n tensor_in_sizes: Input tensor dimensions in\n [batch, input_rows, input_cols, input_depth].\n filter_in_sizes: Filter tensor dimensions in\n [filter_rows, filter_cols, input_depth, depth_multiplier].\n stride: Stride.\n padding: Padding type.\n expected: An array containing the expected operation outputs.\n \"\"\"\n total_size_1 = 1\n total_size_2 = 1\n for s in tensor_in_sizes:\n total_size_1 *= s\n for s in filter_in_sizes:\n total_size_2 *= s\n # Initializes the input tensor with array containing incrementing\n # numbers from 1.\n x1 = np.array([f * 1.0 for f in range(1, total_size_1 + 1)],\n dtype=np.float32).reshape(tensor_in_sizes)\n x2 = np.array([f * 1.0 for f in range(1, total_size_2 + 1)],\n dtype=np.float32).reshape(filter_in_sizes)\n with self.test_session() as sess:\n t1 = array_ops.placeholder(shape=tensor_in_sizes, dtype=np.float32)\n t2 = array_ops.placeholder(shape=filter_in_sizes, dtype=np.float32)\n with self.test_scope():\n conv = nn_ops.depthwise_conv2d_native(\n t1, t2, strides=[1, stride, stride, 1], padding=padding)\n value = sess.run(conv, {t1: x1, t2: x2})\n print(\"value = \", value)\n self.assertArrayNear(expected, np.ravel(value), 1e-5)\n self.assertShapeEqual(value, conv)\n\n def testConv2D2x2Filter(self):\n # The inputs look like this (it's a 3 x 2 matrix, each of depth 2):\n #\n # [ (1.0, 2.0), (3.0, 4.0), ( 5.0, 6.0) ]\n # [ (7.0, 8.0), (9.0, 10.0), (11.0, 12.0) ]\n # We can view this as two inputs\n #\n # input depth 0:\n #\n # [ 1.0, 3.0, 5.0 ]\n # [ 7.0, 9.0, 11.0 ]\n #\n # input depth 1:\n #\n # [ 2.0, 4.0, 6.0 ]\n # [ 8.0, 10.0, 12.0 ]\n #\n # The filter looks like this (it has two 2 x 2 patches, each generating 2\n # depths):\n #\n # filter #0:\n #\n # [ (1.0, 3.0), ( 5.0, 7.0)]\n # [ (9.0, 11.0), (13.0, 15.0)]\n #\n # filter #1:\n #\n # [ ( 2.0, 4.0), ( 6.0, 8.0)]\n # [ (10.0, 12.0), (14.0, 16.0)]\n #\n # So the outputs are:\n #\n # (position 0, 0: in_depth 0, output_depth 0 -- using filter #0)\n # 1.0 * 1.0 + 7.0 * 9.0 + 3.0 * 5.0 + 9.0 * 13.0 = 196\n # (position 0, 0: in_depth 0, output_depth 1 -- using filter #1)\n # 1.0 * 2.0 + 7.0 * 10.0 + 3.0 * 6.0 + 9.0 * 14.0 = 216\n # (position 0, 0: in_depth 1, output_depth 2 -- using filter #0)\n # 2.0 * 3.0 + 8.0 * 11.0 + 4.0 * 7.0 + 10.0 * 15.0 = 272\n # (position 0, 0: in_depth 1, output_depth 3 -- using filter #1)\n # 2.0 * 4.0 + 8.0 * 12.0 + 4.0 * 8.0 + 10.0 * 16.0 = 296\n #\n # (position 1, 0: in_depth 0, output_depth 0 -- using filter #0)\n # 3.0 * 1.0 + 9.0 * 9.0 + 5.0 * 5.0 + 11.0 * 13.0 = 252\n # (position 1, 0: in_depth 0, output_depth 1 -- using filter #1)\n # 3.0 * 2.0 + 9.0 * 10.0 + 5.0 * 6.0 + 11.0 * 14.0 = 280\n # (position 1, 0: in_depth 1, output_depth 2 -- using filter #0)\n # 4.0 * 3.0 + 10.0 * 11.0 + 6.0 * 7.0 + 12.0 * 15.0 = 344\n # (position 1, 0: in_depth 1, output_depth 3 -- using filter #1)\n # 4.0 * 4.0 + 10.0 * 12.0 + 6.0 * 8.0 + 12.0 * 16.0 = 376\n expected_output = [196, 216, 272, 296, 252, 280, 344, 376]\n self._VerifyHandValues(\n tensor_in_sizes=[1, 2, 3, 2],\n filter_in_sizes=[2, 2, 2, 2],\n stride=1,\n padding=\"VALID\",\n expected=expected_output)\n\n def _CompareBackpropInput(self, input_sizes, filter_sizes, output_sizes,\n stride, padding):\n x1 = np.random.rand(*filter_sizes).astype(np.float32)\n x2 = np.random.rand(*output_sizes).astype(np.float32)\n\n def _GetVal(use_xla):\n with self.test_session():\n t0 = constant_op.constant(input_sizes, shape=[len(input_sizes)])\n t1 = array_ops.placeholder(np.float32, shape=filter_sizes)\n t2 = array_ops.placeholder(np.float32, shape=output_sizes)\n if use_xla:\n with self.test_scope():\n backprop = nn_ops.depthwise_conv2d_native_backprop_input(\n t0, t1, t2, strides=[1, stride, stride, 1], padding=padding)\n else:\n backprop = nn_ops.depthwise_conv2d_native_backprop_input(\n t0, t1, t2, strides=[1, stride, stride, 1], padding=padding)\n\n ret = backprop.eval({t1: x1, t2: x2})\n self.assertShapeEqual(ret, backprop)\n return ret\n\n gpu_value = _GetVal(use_xla=True)\n cpu_value = _GetVal(use_xla=False)\n self.assertAllClose(cpu_value, gpu_value, rtol=1e-3, atol=1e-3)\n\n def testDepthwiseConv2DInputGradCompare(self):\n for index, (input_size, filter_size, output_size, stride,\n padding) in enumerate(ConfigsToTest()):\n print(\"Testing DepthwiseConv2DInputGradCompare,\", index, \"th config:\",\n input_size, \"*\", filter_size, \"stride:\", stride, \"padding:\",\n padding)\n self._CompareBackpropInput(input_size, filter_size, output_size, stride,\n padding)\n\n def _CompareBackpropFilter(self, input_sizes, filter_sizes, output_sizes,\n stride, padding):\n x0 = np.random.rand(*input_sizes).astype(np.float32)\n x2 = np.random.rand(*output_sizes).astype(np.float32)\n\n def _GetVal(use_xla):\n with self.test_session():\n t0 = array_ops.placeholder(np.float32, shape=input_sizes)\n t1 = constant_op.constant(filter_sizes, shape=[len(filter_sizes)])\n t2 = array_ops.placeholder(np.float32, shape=output_sizes)\n if use_xla:\n with self.test_scope():\n backprop = nn_ops.depthwise_conv2d_native_backprop_filter(\n t0, t1, t2, strides=[1, stride, stride, 1], padding=padding)\n else:\n backprop = nn_ops.depthwise_conv2d_native_backprop_filter(\n t0, t1, t2, strides=[1, stride, stride, 1], padding=padding)\n ret = backprop.eval({t0: x0, t2: x2})\n self.assertShapeEqual(ret, backprop)\n return ret\n\n gpu_value = _GetVal(use_xla=True)\n cpu_value = _GetVal(use_xla=False)\n self.assertAllClose(cpu_value, gpu_value, rtol=1e-4, atol=1e-4)\n\n def testDepthwiseConv2DFilterGradCompare(self):\n for index, (input_size, filter_size, output_size, stride,\n padding) in enumerate(ConfigsToTest()):\n print(\"Testing DepthwiseConv2DFilterGradCompare,\", index, \"th config:\",\n input_size, \"*\", filter_size, \"stride:\", stride, \"padding:\",\n padding)\n self._CompareBackpropFilter(input_size, filter_size, output_size,\n stride, padding)\n\n\nif __name__ == \"__main__\":\n test.main()\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for utilities working with arbitrarily nested structures.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\n\nimport numpy as np\n\nfrom tensorflow.python.data.util import nest\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.platform import test\n\n\nclass NestTest(test.TestCase):\n\n def testFlattenAndPack(self):\n structure = ((3, 4), 5, (6, 7, (9, 10), 8))\n flat = [\"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\", \"h\"]\n self.assertEqual(nest.flatten(structure), [3, 4, 5, 6, 7, 9, 10, 8])\n self.assertEqual(\n nest.pack_sequence_as(structure, flat), ((\"a\", \"b\"), \"c\",\n (\"d\", \"e\", (\"f\", \"g\"), \"h\")))\n point = collections.namedtuple(\"Point\", [\"x\", \"y\"])\n structure = (point(x=4, y=2), ((point(x=1, y=0),),))\n flat = [4, 2, 1, 0]\n self.assertEqual(nest.flatten(structure), flat)\n restructured_from_flat = nest.pack_sequence_as(structure, flat)\n self.assertEqual(restructured_from_flat, structure)\n self.assertEqual(restructured_from_flat[0].x, 4)\n self.assertEqual(restructured_from_flat[0].y, 2)\n self.assertEqual(restructured_from_flat[1][0][0].x, 1)\n self.assertEqual(restructured_from_flat[1][0][0].y, 0)\n\n self.assertEqual([5], nest.flatten(5))\n self.assertEqual([np.array([5])], nest.flatten(np.array([5])))\n\n self.assertEqual(\"a\", nest.pack_sequence_as(5, [\"a\"]))\n self.assertEqual(\n np.array([5]), nest.pack_sequence_as(\"scalar\", [np.array([5])]))\n\n with self.assertRaisesRegexp(ValueError, \"Structure is a scalar\"):\n nest.pack_sequence_as(\"scalar\", [4, 5])\n\n with self.assertRaisesRegexp(TypeError, \"flat_sequence\"):\n nest.pack_sequence_as([4, 5], \"bad_sequence\")\n\n with self.assertRaises(ValueError):\n nest.pack_sequence_as([5, 6, [7, 8]], [\"a\", \"b\", \"c\"])\n\n def testFlattenDictOrder(self):\n \"\"\"`flatten` orders dicts by key, including OrderedDicts.\"\"\"\n ordered = collections.OrderedDict([(\"d\", 3), (\"b\", 1), (\"a\", 0), (\"c\", 2)])\n plain = {\"d\": 3, \"b\": 1, \"a\": 0, \"c\": 2}\n ordered_flat = nest.flatten(ordered)\n plain_flat = nest.flatten(plain)\n self.assertEqual([0, 1, 2, 3], ordered_flat)\n self.assertEqual([0, 1, 2, 3], plain_flat)\n\n def testPackDictOrder(self):\n \"\"\"Packing orders dicts by key, including OrderedDicts.\"\"\"\n ordered = collections.OrderedDict([(\"d\", 0), (\"b\", 0), (\"a\", 0), (\"c\", 0)])\n plain = {\"d\": 0, \"b\": 0, \"a\": 0, \"c\": 0}\n seq = [0, 1, 2, 3]\n ordered_reconstruction = nest.pack_sequence_as(ordered, seq)\n plain_reconstruction = nest.pack_sequence_as(plain, seq)\n self.assertEqual(\n collections.OrderedDict([(\"d\", 3), (\"b\", 1), (\"a\", 0), (\"c\", 2)]),\n ordered_reconstruction)\n self.assertEqual({\"d\": 3, \"b\": 1, \"a\": 0, \"c\": 2}, plain_reconstruction)\n\n def testFlattenAndPackWithDicts(self):\n # A nice messy mix of tuples, lists, dicts, and `OrderedDict`s.\n named_tuple = collections.namedtuple(\"A\", (\"b\", \"c\"))\n mess = (\n \"z\",\n named_tuple(3, 4),\n {\n \"c\": (\n 1,\n collections.OrderedDict([\n (\"b\", 3),\n (\"a\", 2),\n ]),\n ),\n \"b\": 5\n },\n 17\n )\n\n flattened = nest.flatten(mess)\n self.assertEqual(flattened, [\"z\", 3, 4, 5, 1, 2, 3, 17])\n\n structure_of_mess = (\n 14,\n named_tuple(\"a\", True),\n {\n \"c\": (\n 0,\n collections.OrderedDict([\n (\"b\", 9),\n (\"a\", 8),\n ]),\n ),\n \"b\": 3\n },\n \"hi everybody\",\n )\n\n unflattened = nest.pack_sequence_as(structure_of_mess, flattened)\n self.assertEqual(unflattened, mess)\n\n # Check also that the OrderedDict was created, with the correct key order.\n unflattened_ordered_dict = unflattened[2][\"c\"][1]\n self.assertIsInstance(unflattened_ordered_dict, collections.OrderedDict)\n self.assertEqual(list(unflattened_ordered_dict.keys()), [\"b\", \"a\"])\n\n def testFlattenSparseValue(self):\n st = sparse_tensor.SparseTensorValue([[0]], [0], [1])\n single_value = st\n list_of_values = [st, st, st]\n nest_of_values = ((st), ((st), (st)))\n dict_of_values = {\"foo\": st, \"bar\": st, \"baz\": st}\n self.assertEqual([st], nest.flatten(single_value))\n self.assertEqual([[st, st, st]], nest.flatten(list_of_values))\n self.assertEqual([st, st, st], nest.flatten(nest_of_values))\n self.assertEqual([st, st, st], nest.flatten(dict_of_values))\n\n def testIsSequence(self):\n self.assertFalse(nest.is_sequence(\"1234\"))\n self.assertFalse(nest.is_sequence([1, 3, [4, 5]]))\n self.assertTrue(nest.is_sequence(((7, 8), (5, 6))))\n self.assertFalse(nest.is_sequence([]))\n self.assertFalse(nest.is_sequence(set([1, 2])))\n ones = array_ops.ones([2, 3])\n self.assertFalse(nest.is_sequence(ones))\n self.assertFalse(nest.is_sequence(math_ops.tanh(ones)))\n self.assertFalse(nest.is_sequence(np.ones((4, 5))))\n self.assertTrue(nest.is_sequence({\"foo\": 1, \"bar\": 2}))\n self.assertFalse(\n nest.is_sequence(sparse_tensor.SparseTensorValue([[0]], [0], [1])))\n\n def testAssertSameStructure(self):\n structure1 = (((1, 2), 3), 4, (5, 6))\n structure2 = (((\"foo1\", \"foo2\"), \"foo3\"), \"foo4\", (\"foo5\", \"foo6\"))\n structure_different_num_elements = (\"spam\", \"eggs\")\n structure_different_nesting = (((1, 2), 3), 4, 5, (6,))\n nest.assert_same_structure(structure1, structure2)\n nest.assert_same_structure(\"abc\", 1.0)\n nest.assert_same_structure(\"abc\", np.array([0, 1]))\n nest.assert_same_structure(\"abc\", constant_op.constant([0, 1]))\n\n with self.assertRaisesRegexp(ValueError,\n \"don't have the same number of elements\"):\n nest.assert_same_structure(structure1, structure_different_num_elements)\n\n with self.assertRaisesRegexp(ValueError,\n \"don't have the same number of elements\"):\n nest.assert_same_structure((0, 1), np.array([0, 1]))\n\n with self.assertRaisesRegexp(ValueError,\n \"don't have the same number of elements\"):\n nest.assert_same_structure(0, (0, 1))\n\n with self.assertRaisesRegexp(ValueError,\n \"don't have the same nested structure\"):\n nest.assert_same_structure(structure1, structure_different_nesting)\n\n named_type_0 = collections.namedtuple(\"named_0\", (\"a\", \"b\"))\n named_type_1 = collections.namedtuple(\"named_1\", (\"a\", \"b\"))\n self.assertRaises(TypeError, nest.assert_same_structure, (0, 1),\n named_type_0(\"a\", \"b\"))\n\n nest.assert_same_structure(named_type_0(3, 4), named_type_0(\"a\", \"b\"))\n\n self.assertRaises(TypeError, nest.assert_same_structure,\n named_type_0(3, 4), named_type_1(3, 4))\n\n with self.assertRaisesRegexp(ValueError,\n \"don't have the same nested structure\"):\n nest.assert_same_structure(named_type_0(3, 4), named_type_0((3,), 4))\n\n with self.assertRaisesRegexp(ValueError,\n \"don't have the same nested structure\"):\n nest.assert_same_structure(((3,), 4), (3, (4,)))\n\n structure1_list = {\"a\": ((1, 2), 3), \"b\": 4, \"c\": (5, 6)}\n with self.assertRaisesRegexp(TypeError,\n \"don't have the same sequence type\"):\n nest.assert_same_structure(structure1, structure1_list)\n nest.assert_same_structure(structure1, structure2, check_types=False)\n nest.assert_same_structure(structure1, structure1_list, check_types=False)\n\n def testMapStructure(self):\n structure1 = (((1, 2), 3), 4, (5, 6))\n structure2 = (((7, 8), 9), 10, (11, 12))\n structure1_plus1 = nest.map_structure(lambda x: x + 1, structure1)\n nest.assert_same_structure(structure1, structure1_plus1)\n self.assertAllEqual(\n [2, 3, 4, 5, 6, 7],\n nest.flatten(structure1_plus1))\n structure1_plus_structure2 = nest.map_structure(\n lambda x, y: x + y, structure1, structure2)\n self.assertEqual(\n (((1 + 7, 2 + 8), 3 + 9), 4 + 10, (5 + 11, 6 + 12)),\n structure1_plus_structure2)\n\n self.assertEqual(3, nest.map_structure(lambda x: x - 1, 4))\n\n self.assertEqual(7, nest.map_structure(lambda x, y: x + y, 3, 4))\n\n with self.assertRaisesRegexp(TypeError, \"callable\"):\n nest.map_structure(\"bad\", structure1_plus1)\n\n with self.assertRaisesRegexp(ValueError, \"same nested structure\"):\n nest.map_structure(lambda x, y: None, 3, (3,))\n\n with self.assertRaisesRegexp(TypeError, \"same sequence type\"):\n nest.map_structure(lambda x, y: None, ((3, 4), 5), {\"a\": (3, 4), \"b\": 5})\n\n with self.assertRaisesRegexp(ValueError, \"same nested structure\"):\n nest.map_structure(lambda x, y: None, ((3, 4), 5), (3, (4, 5)))\n\n with self.assertRaisesRegexp(ValueError, \"same nested structure\"):\n nest.map_structure(lambda x, y: None, ((3, 4), 5), (3, (4, 5)),\n check_types=False)\n\n with self.assertRaisesRegexp(ValueError, \"Only valid keyword argument\"):\n nest.map_structure(lambda x: None, structure1, foo=\"a\")\n\n with self.assertRaisesRegexp(ValueError, \"Only valid keyword argument\"):\n nest.map_structure(lambda x: None, structure1, check_types=False, foo=\"a\")\n\n def testAssertShallowStructure(self):\n inp_ab = (\"a\", \"b\")\n inp_abc = (\"a\", \"b\", \"c\")\n expected_message = (\n \"The two structures don't have the same sequence length. Input \"\n \"structure has length 2, while shallow structure has length 3.\")\n with self.assertRaisesRegexp(ValueError, expected_message):\n nest.assert_shallow_structure(inp_abc, inp_ab)\n\n inp_ab1 = ((1, 1), (2, 2))\n inp_ab2 = {\"a\": (1, 1), \"b\": (2, 2)}\n expected_message = (\n \"The two structures don't have the same sequence type. Input structure \"\n \"has type <(type|class) 'tuple'>, while shallow structure has type \"\n \"<(type|class) 'dict'>.\")\n with self.assertRaisesRegexp(TypeError, expected_message):\n nest.assert_shallow_structure(inp_ab2, inp_ab1)\n nest.assert_shallow_structure(inp_ab2, inp_ab1, check_types=False)\n\n inp_ab1 = {\"a\": (1, 1), \"b\": {\"c\": (2, 2)}}\n inp_ab2 = {\"a\": (1, 1), \"b\": {\"d\": (2, 2)}}\n expected_message = (\n r\"The two structures don't have the same keys. Input \"\n r\"structure has keys \\['c'\\], while shallow structure has \"\n r\"keys \\['d'\\].\")\n with self.assertRaisesRegexp(ValueError, expected_message):\n nest.assert_shallow_structure(inp_ab2, inp_ab1)\n\n inp_ab = collections.OrderedDict([(\"a\", 1), (\"b\", (2, 3))])\n inp_ba = collections.OrderedDict([(\"b\", (2, 3)), (\"a\", 1)])\n nest.assert_shallow_structure(inp_ab, inp_ba)\n\n def testFlattenUpTo(self):\n input_tree = (((2, 2), (3, 3)), ((4, 9), (5, 5)))\n shallow_tree = ((True, True), (False, True))\n flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)\n flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)\n self.assertEqual(flattened_input_tree, [(2, 2), (3, 3), (4, 9), (5, 5)])\n self.assertEqual(flattened_shallow_tree, [True, True, False, True])\n\n input_tree = (((\"a\", 1), ((\"b\", 2), ((\"c\", 3), ((\"d\", 4))))))\n shallow_tree = ((\"level_1\", (\"level_2\", (\"level_3\", (\"level_4\")))))\n input_tree_flattened_as_shallow_tree = nest.flatten_up_to(shallow_tree,\n input_tree)\n input_tree_flattened = nest.flatten(input_tree)\n self.assertEqual(input_tree_flattened_as_shallow_tree,\n [(\"a\", 1), (\"b\", 2), (\"c\", 3), (\"d\", 4)])\n self.assertEqual(input_tree_flattened, [\"a\", 1, \"b\", 2, \"c\", 3, \"d\", 4])\n\n ## Shallow non-list edge-case.\n # Using iterable elements.\n input_tree = [\"input_tree\"]\n shallow_tree = \"shallow_tree\"\n flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)\n flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)\n self.assertEqual(flattened_input_tree, [input_tree])\n self.assertEqual(flattened_shallow_tree, [shallow_tree])\n\n input_tree = (\"input_tree_0\", \"input_tree_1\")\n shallow_tree = \"shallow_tree\"\n flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)\n flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)\n self.assertEqual(flattened_input_tree, [input_tree])\n self.assertEqual(flattened_shallow_tree, [shallow_tree])\n\n # Using non-iterable elements.\n input_tree = (0,)\n shallow_tree = 9\n flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)\n flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)\n self.assertEqual(flattened_input_tree, [input_tree])\n self.assertEqual(flattened_shallow_tree, [shallow_tree])\n\n input_tree = (0, 1)\n shallow_tree = 9\n flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)\n flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)\n self.assertEqual(flattened_input_tree, [input_tree])\n self.assertEqual(flattened_shallow_tree, [shallow_tree])\n\n ## Both non-list edge-case.\n # Using iterable elements.\n input_tree = \"input_tree\"\n shallow_tree = \"shallow_tree\"\n flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)\n flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)\n self.assertEqual(flattened_input_tree, [input_tree])\n self.assertEqual(flattened_shallow_tree, [shallow_tree])\n\n # Using non-iterable elements.\n input_tree = 0\n shallow_tree = 0\n flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)\n flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)\n self.assertEqual(flattened_input_tree, [input_tree])\n self.assertEqual(flattened_shallow_tree, [shallow_tree])\n\n ## Input non-list edge-case.\n # Using iterable elements.\n input_tree = \"input_tree\"\n shallow_tree = (\"shallow_tree\",)\n expected_message = (\"If shallow structure is a sequence, input must also \"\n \"be a sequence. Input has type: <(type|class) 'str'>.\")\n with self.assertRaisesRegexp(TypeError, expected_message):\n flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)\n flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)\n self.assertEqual(flattened_shallow_tree, list(shallow_tree))\n\n input_tree = \"input_tree\"\n shallow_tree = (\"shallow_tree_9\", \"shallow_tree_8\")\n with self.assertRaisesRegexp(TypeError, expected_message):\n flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)\n flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)\n self.assertEqual(flattened_shallow_tree, list(shallow_tree))\n\n # Using non-iterable elements.\n input_tree = 0\n shallow_tree = (9,)\n expected_message = (\"If shallow structure is a sequence, input must also \"\n \"be a sequence. Input has type: <(type|class) 'int'>.\")\n with self.assertRaisesRegexp(TypeError, expected_message):\n flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)\n flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)\n self.assertEqual(flattened_shallow_tree, list(shallow_tree))\n\n input_tree = 0\n shallow_tree = (9, 8)\n with self.assertRaisesRegexp(TypeError, expected_message):\n flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)\n flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)\n self.assertEqual(flattened_shallow_tree, list(shallow_tree))\n\n # Using dict.\n input_tree = {\"a\": ((2, 2), (3, 3)), \"b\": ((4, 9), (5, 5))}\n shallow_tree = {\"a\": (True, True), \"b\": (False, True)}\n flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)\n flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)\n self.assertEqual(flattened_input_tree, [(2, 2), (3, 3), (4, 9), (5, 5)])\n self.assertEqual(flattened_shallow_tree, [True, True, False, True])\n\n def testMapStructureUpTo(self):\n ab_tuple = collections.namedtuple(\"ab_tuple\", \"a, b\")\n op_tuple = collections.namedtuple(\"op_tuple\", \"add, mul\")\n inp_val = ab_tuple(a=2, b=3)\n inp_ops = ab_tuple(a=op_tuple(add=1, mul=2), b=op_tuple(add=2, mul=3))\n out = nest.map_structure_up_to(\n inp_val, lambda val, ops: (val + ops.add) * ops.mul, inp_val, inp_ops)\n self.assertEqual(out.a, 6)\n self.assertEqual(out.b, 15)\n\n data_list = ((2, 4, 6, 8), ((1, 3, 5, 7, 9), (3, 5, 7)))\n name_list = (\"evens\", (\"odds\", \"primes\"))\n out = nest.map_structure_up_to(\n name_list, lambda name, sec: \"first_{}_{}\".format(len(sec), name),\n name_list, data_list)\n self.assertEqual(out, (\"first_4_evens\", (\"first_5_odds\", \"first_3_primes\")))\n\n\nif __name__ == \"__main__\":\n test.main()\n", "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for debugger functionalities in tf.Session with grpc:// URLs.\n\nThis test file focuses on the grpc:// debugging of local (non-distributed)\ntf.Sessions.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport shutil\n\nfrom six.moves import xrange # pylint: disable=redefined-builtin\n\nfrom tensorflow.core.protobuf import config_pb2\nfrom tensorflow.python.client import session\nfrom tensorflow.python.debug.lib import debug_data\nfrom tensorflow.python.debug.lib import debug_utils\nfrom tensorflow.python.debug.lib import grpc_debug_test_server\nfrom tensorflow.python.debug.lib import session_debug_testlib\nfrom tensorflow.python.debug.wrappers import framework\nfrom tensorflow.python.debug.wrappers import grpc_wrapper\nfrom tensorflow.python.debug.wrappers import hooks\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import state_ops\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import googletest\nfrom tensorflow.python.training import monitored_session\n\n\nclass GrpcDebugServerTest(test_util.TensorFlowTestCase):\n\n def testRepeatedRunServerRaisesException(self):\n (_, _, _, server_thread,\n server) = grpc_debug_test_server.start_server_on_separate_thread(\n poll_server=True)\n # The server is started asynchronously. It needs to be polled till its state\n # has become started.\n\n with self.assertRaisesRegexp(\n ValueError, \"Server has already started running\"):\n server.run_server()\n\n server.stop_server().wait()\n server_thread.join()\n\n def testRepeatedStopServerRaisesException(self):\n (_, _, _, server_thread,\n server) = grpc_debug_test_server.start_server_on_separate_thread(\n poll_server=True)\n server.stop_server().wait()\n server_thread.join()\n\n with self.assertRaisesRegexp(ValueError, \"Server has already stopped\"):\n server.stop_server().wait()\n\n def testRunServerAfterStopRaisesException(self):\n (_, _, _, server_thread,\n server) = grpc_debug_test_server.start_server_on_separate_thread(\n poll_server=True)\n server.stop_server().wait()\n server_thread.join()\n\n with self.assertRaisesRegexp(ValueError, \"Server has already stopped\"):\n server.run_server()\n\n def testStartServerWithoutBlocking(self):\n (_, _, _, server_thread,\n server) = grpc_debug_test_server.start_server_on_separate_thread(\n poll_server=True, blocking=False)\n # The thread that starts the server shouldn't block, so we should be able to\n # join it before stopping the server.\n server_thread.join()\n server.stop_server().wait()\n\n\nclass SessionDebugGrpcTest(session_debug_testlib.SessionDebugTestBase):\n\n @classmethod\n def setUpClass(cls):\n session_debug_testlib.SessionDebugTestBase.setUpClass()\n (cls._server_port, cls._debug_server_url, cls._server_dump_dir,\n cls._server_thread,\n cls._server) = grpc_debug_test_server.start_server_on_separate_thread()\n\n @classmethod\n def tearDownClass(cls):\n # Stop the test server and join the thread.\n cls._server.stop_server().wait()\n cls._server_thread.join()\n\n session_debug_testlib.SessionDebugTestBase.tearDownClass()\n\n def setUp(self):\n # Override the dump root as the test server's dump directory.\n self._dump_root = self._server_dump_dir\n\n def tearDown(self):\n if os.path.isdir(self._server_dump_dir):\n shutil.rmtree(self._server_dump_dir)\n session_debug_testlib.SessionDebugTestBase.tearDown(self)\n\n def _debug_urls(self, run_number=None):\n return [\"grpc://localhost:%d\" % self._server_port]\n\n def _debug_dump_dir(self, run_number=None):\n if run_number is None:\n return self._dump_root\n else:\n return os.path.join(self._dump_root, \"run_%d\" % run_number)\n\n def testConstructGrpcDebugWrapperSessionWithInvalidTypeRaisesException(self):\n sess = session.Session(\n config=session_debug_testlib.no_rewrite_session_config())\n with self.assertRaisesRegexp(\n TypeError, \"Expected type str or list in grpc_debug_server_addresses\"):\n grpc_wrapper.GrpcDebugWrapperSession(sess, 1337)\n\n def testConstructGrpcDebugWrapperSessionWithInvalidTypeRaisesException2(self):\n sess = session.Session(\n config=session_debug_testlib.no_rewrite_session_config())\n with self.assertRaisesRegexp(\n TypeError, \"Expected type str in list grpc_debug_server_addresses\"):\n grpc_wrapper.GrpcDebugWrapperSession(sess, [\"localhost:1337\", 1338])\n\n def testUseInvalidWatchFnTypeWithGrpcDebugWrapperSessionRaisesException(self):\n sess = session.Session(\n config=session_debug_testlib.no_rewrite_session_config())\n with self.assertRaises(TypeError):\n grpc_wrapper.GrpcDebugWrapperSession(\n sess, \"localhost:%d\" % self._server_port, watch_fn=\"foo\")\n\n def testGrpcDebugWrapperSessionWithoutWatchFnWorks(self):\n u = variables.Variable(2.1, name=\"u\")\n v = variables.Variable(20.0, name=\"v\")\n w = math_ops.multiply(u, v, name=\"w\")\n\n sess = session.Session(\n config=session_debug_testlib.no_rewrite_session_config())\n sess.run(u.initializer)\n sess.run(v.initializer)\n\n sess = grpc_wrapper.GrpcDebugWrapperSession(\n sess, \"localhost:%d\" % self._server_port)\n w_result = sess.run(w)\n self.assertAllClose(42.0, w_result)\n\n dump = debug_data.DebugDumpDir(self._dump_root)\n self.assertEqual(5, dump.size)\n self.assertAllClose([2.1], dump.get_tensors(\"u\", 0, \"DebugIdentity\"))\n self.assertAllClose([2.1], dump.get_tensors(\"u/read\", 0, \"DebugIdentity\"))\n self.assertAllClose([20.0], dump.get_tensors(\"v\", 0, \"DebugIdentity\"))\n self.assertAllClose([20.0], dump.get_tensors(\"v/read\", 0, \"DebugIdentity\"))\n self.assertAllClose([42.0], dump.get_tensors(\"w\", 0, \"DebugIdentity\"))\n\n def testGrpcDebugWrapperSessionWithWatchFnWorks(self):\n def watch_fn(feeds, fetch_keys):\n del feeds, fetch_keys\n return [\"DebugIdentity\", \"DebugNumericSummary\"], r\".*/read\", None\n\n u = variables.Variable(2.1, name=\"u\")\n v = variables.Variable(20.0, name=\"v\")\n w = math_ops.multiply(u, v, name=\"w\")\n\n sess = session.Session(\n config=session_debug_testlib.no_rewrite_session_config())\n sess.run(u.initializer)\n sess.run(v.initializer)\n\n sess = grpc_wrapper.GrpcDebugWrapperSession(\n sess, \"localhost:%d\" % self._server_port, watch_fn=watch_fn)\n w_result = sess.run(w)\n self.assertAllClose(42.0, w_result)\n\n dump = debug_data.DebugDumpDir(self._dump_root)\n self.assertEqual(4, dump.size)\n self.assertAllClose([2.1], dump.get_tensors(\"u/read\", 0, \"DebugIdentity\"))\n self.assertEqual(\n 14, len(dump.get_tensors(\"u/read\", 0, \"DebugNumericSummary\")[0]))\n self.assertAllClose([20.0], dump.get_tensors(\"v/read\", 0, \"DebugIdentity\"))\n self.assertEqual(\n 14, len(dump.get_tensors(\"v/read\", 0, \"DebugNumericSummary\")[0]))\n\n def testGrpcDebugHookWithStatelessWatchFnWorks(self):\n # Perform some set up. Specifically, construct a simple TensorFlow graph and\n # create a watch function for certain ops.\n def watch_fn(feeds, fetch_keys):\n del feeds, fetch_keys\n return framework.WatchOptions(\n debug_ops=[\"DebugIdentity\", \"DebugNumericSummary\"],\n node_name_regex_whitelist=r\".*/read\",\n op_type_regex_whitelist=None,\n tolerate_debug_op_creation_failures=True)\n\n u = variables.Variable(2.1, name=\"u\")\n v = variables.Variable(20.0, name=\"v\")\n w = math_ops.multiply(u, v, name=\"w\")\n\n sess = session.Session(\n config=session_debug_testlib.no_rewrite_session_config())\n sess.run(u.initializer)\n sess.run(v.initializer)\n\n # Create a hook. One could use this hook with say a tflearn Estimator.\n # However, we use a HookedSession in this test to avoid depending on the\n # internal implementation of Estimators.\n grpc_debug_hook = hooks.GrpcDebugHook(\n [\"localhost:%d\" % self._server_port], watch_fn=watch_fn)\n sess = monitored_session._HookedSession(sess, [grpc_debug_hook])\n\n # Run the hooked session. This should stream tensor data to the GRPC\n # endpoints.\n w_result = sess.run(w)\n\n # Verify that the hook monitored the correct tensors.\n self.assertAllClose(42.0, w_result)\n dump = debug_data.DebugDumpDir(self._dump_root)\n self.assertEqual(4, dump.size)\n self.assertAllClose([2.1], dump.get_tensors(\"u/read\", 0, \"DebugIdentity\"))\n self.assertEqual(\n 14, len(dump.get_tensors(\"u/read\", 0, \"DebugNumericSummary\")[0]))\n self.assertAllClose([20.0], dump.get_tensors(\"v/read\", 0, \"DebugIdentity\"))\n self.assertEqual(\n 14, len(dump.get_tensors(\"v/read\", 0, \"DebugNumericSummary\")[0]))\n\n def testTensorBoardDebugHookWorks(self):\n u = variables.Variable(2.1, name=\"u\")\n v = variables.Variable(20.0, name=\"v\")\n w = math_ops.multiply(u, v, name=\"w\")\n\n sess = session.Session(\n config=session_debug_testlib.no_rewrite_session_config())\n sess.run(u.initializer)\n sess.run(v.initializer)\n\n grpc_debug_hook = hooks.TensorBoardDebugHook(\n [\"localhost:%d\" % self._server_port])\n sess = monitored_session._HookedSession(sess, [grpc_debug_hook])\n\n # Activate watch point on a tensor before calling sess.run().\n self._server.request_watch(\"u/read\", 0, \"DebugIdentity\")\n self.assertAllClose(42.0, sess.run(w))\n\n # self.assertAllClose(42.0, sess.run(w))\n dump = debug_data.DebugDumpDir(self._dump_root)\n self.assertAllClose([2.1], dump.get_tensors(\"u/read\", 0, \"DebugIdentity\"))\n\n # Check that the server has received the stack trace.\n self.assertTrue(self._server.query_op_traceback(\"u\"))\n self.assertTrue(self._server.query_op_traceback(\"u/read\"))\n self.assertTrue(self._server.query_op_traceback(\"v\"))\n self.assertTrue(self._server.query_op_traceback(\"v/read\"))\n self.assertTrue(self._server.query_op_traceback(\"w\"))\n\n # Check that the server has received the python file content.\n # Query an arbitrary line to make sure that is the case.\n with open(__file__, \"rt\") as this_source_file:\n first_line = this_source_file.readline().strip()\n self.assertEqual(\n first_line, self._server.query_source_file_line(__file__, 1))\n\n self._server.clear_data()\n # Call sess.run() again, and verify that this time the traceback and source\n # code is not sent, because the graph version is not newer.\n self.assertAllClose(42.0, sess.run(w))\n with self.assertRaises(ValueError):\n self._server.query_op_traceback(\"delta_1\")\n with self.assertRaises(ValueError):\n self._server.query_source_file_line(__file__, 1)\n\n def testTensorBoardDebugHookDisablingTracebackSourceCodeSendingWorks(self):\n u = variables.Variable(2.1, name=\"u\")\n v = variables.Variable(20.0, name=\"v\")\n w = math_ops.multiply(u, v, name=\"w\")\n\n sess = session.Session(\n config=session_debug_testlib.no_rewrite_session_config())\n sess.run(variables.global_variables_initializer())\n\n grpc_debug_hook = hooks.TensorBoardDebugHook(\n [\"localhost:%d\" % self._server_port],\n send_traceback_and_source_code=False)\n sess = monitored_session._HookedSession(sess, [grpc_debug_hook])\n\n # Activate watch point on a tensor before calling sess.run().\n self._server.request_watch(\"u/read\", 0, \"DebugIdentity\")\n self.assertAllClose(42.0, sess.run(w))\n\n # Check that the server has _not_ received any tracebacks, as a result of\n # the disabling above.\n with self.assertRaisesRegexp(\n ValueError, r\"Op .*u/read.* does not exist\"):\n self.assertTrue(self._server.query_op_traceback(\"u/read\"))\n with self.assertRaisesRegexp(\n ValueError, r\".* has not received any source file\"):\n self._server.query_source_file_line(__file__, 1)\n\n def testConstructGrpcDebugHookWithOrWithouGrpcInUrlWorks(self):\n hooks.GrpcDebugHook([\"grpc://foo:42424\"])\n hooks.GrpcDebugHook([\"foo:42424\"])\n\n\nclass SessionDebugConcurrentTest(\n session_debug_testlib.DebugConcurrentRunCallsTest):\n\n @classmethod\n def setUpClass(cls):\n session_debug_testlib.SessionDebugTestBase.setUpClass()\n (cls._server_port, cls._debug_server_url, cls._server_dump_dir,\n cls._server_thread,\n cls._server) = grpc_debug_test_server.start_server_on_separate_thread()\n\n @classmethod\n def tearDownClass(cls):\n # Stop the test server and join the thread.\n cls._server.stop_server().wait()\n cls._server_thread.join()\n session_debug_testlib.SessionDebugTestBase.tearDownClass()\n\n def setUp(self):\n self._num_concurrent_runs = 3\n self._dump_roots = []\n for i in range(self._num_concurrent_runs):\n self._dump_roots.append(\n os.path.join(self._server_dump_dir, \"thread%d\" % i))\n\n def tearDown(self):\n ops.reset_default_graph()\n if os.path.isdir(self._server_dump_dir):\n shutil.rmtree(self._server_dump_dir)\n\n def _get_concurrent_debug_urls(self):\n urls = []\n for i in range(self._num_concurrent_runs):\n urls.append(self._debug_server_url + \"/thread%d\" % i)\n return urls\n\n\nclass SessionDebugGrpcGatingTest(test_util.TensorFlowTestCase):\n \"\"\"Test server gating of debug ops.\"\"\"\n\n @classmethod\n def setUpClass(cls):\n (cls._server_port_1, cls._debug_server_url_1, _, cls._server_thread_1,\n cls._server_1) = grpc_debug_test_server.start_server_on_separate_thread(\n dump_to_filesystem=False)\n (cls._server_port_2, cls._debug_server_url_2, _, cls._server_thread_2,\n cls._server_2) = grpc_debug_test_server.start_server_on_separate_thread(\n dump_to_filesystem=False)\n cls._servers_and_threads = [(cls._server_1, cls._server_thread_1),\n (cls._server_2, cls._server_thread_2)]\n\n @classmethod\n def tearDownClass(cls):\n for server, thread in cls._servers_and_threads:\n server.stop_server().wait()\n thread.join()\n\n def tearDown(self):\n ops.reset_default_graph()\n self._server_1.clear_data()\n self._server_2.clear_data()\n\n def testToggleEnableTwoDebugWatchesNoCrosstalkBetweenDebugNodes(self):\n with session.Session(\n config=session_debug_testlib.no_rewrite_session_config()) as sess:\n v_1 = variables.Variable(50.0, name=\"v_1\")\n v_2 = variables.Variable(-50.0, name=\"v_1\")\n delta_1 = constant_op.constant(5.0, name=\"delta_1\")\n delta_2 = constant_op.constant(-5.0, name=\"delta_2\")\n inc_v_1 = state_ops.assign_add(v_1, delta_1, name=\"inc_v_1\")\n inc_v_2 = state_ops.assign_add(v_2, delta_2, name=\"inc_v_2\")\n\n sess.run([v_1.initializer, v_2.initializer])\n\n run_metadata = config_pb2.RunMetadata()\n run_options = config_pb2.RunOptions(output_partition_graphs=True)\n debug_utils.watch_graph(\n run_options,\n sess.graph,\n debug_ops=[\"DebugIdentity(gated_grpc=true)\",\n \"DebugNumericSummary(gated_grpc=true)\"],\n debug_urls=[self._debug_server_url_1])\n\n for i in xrange(4):\n self._server_1.clear_data()\n\n if i % 2 == 0:\n self._server_1.request_watch(\"delta_1\", 0, \"DebugIdentity\")\n self._server_1.request_watch(\"delta_2\", 0, \"DebugIdentity\")\n self._server_1.request_unwatch(\"delta_1\", 0, \"DebugNumericSummary\")\n self._server_1.request_unwatch(\"delta_2\", 0, \"DebugNumericSummary\")\n else:\n self._server_1.request_unwatch(\"delta_1\", 0, \"DebugIdentity\")\n self._server_1.request_unwatch(\"delta_2\", 0, \"DebugIdentity\")\n self._server_1.request_watch(\"delta_1\", 0, \"DebugNumericSummary\")\n self._server_1.request_watch(\"delta_2\", 0, \"DebugNumericSummary\")\n\n sess.run([inc_v_1, inc_v_2],\n options=run_options, run_metadata=run_metadata)\n\n # Watched debug tensors are:\n # Run 0: delta_[1,2]:0:DebugIdentity\n # Run 1: delta_[1,2]:0:DebugNumericSummary\n # Run 2: delta_[1,2]:0:DebugIdentity\n # Run 3: delta_[1,2]:0:DebugNumericSummary\n self.assertEqual(2, len(self._server_1.debug_tensor_values))\n if i % 2 == 0:\n self.assertAllClose(\n [5.0],\n self._server_1.debug_tensor_values[\"delta_1:0:DebugIdentity\"])\n self.assertAllClose(\n [-5.0],\n self._server_1.debug_tensor_values[\"delta_2:0:DebugIdentity\"])\n else:\n self.assertAllClose(\n [[1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 5.0, 5.0, 5.0,\n 0.0, 1.0, 0.0]],\n self._server_1.debug_tensor_values[\n \"delta_1:0:DebugNumericSummary\"])\n self.assertAllClose(\n [[1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, -5.0, -5.0, -5.0,\n 0.0, 1.0, 0.0]],\n self._server_1.debug_tensor_values[\n \"delta_2:0:DebugNumericSummary\"])\n\n def testToggleWatchesOnCoreMetadata(self):\n (_, debug_server_url, _, server_thread,\n server) = grpc_debug_test_server.start_server_on_separate_thread(\n dump_to_filesystem=False,\n toggle_watch_on_core_metadata=[(\"toggled_1\", 0, \"DebugIdentity\"),\n (\"toggled_2\", 0, \"DebugIdentity\")])\n self._servers_and_threads.append((server, server_thread))\n\n with session.Session(\n config=session_debug_testlib.no_rewrite_session_config()) as sess:\n v_1 = variables.Variable(50.0, name=\"v_1\")\n v_2 = variables.Variable(-50.0, name=\"v_1\")\n # These two nodes have names that match those in the\n # toggle_watch_on_core_metadata argument used when calling\n # start_server_on_separate_thread().\n toggled_1 = constant_op.constant(5.0, name=\"toggled_1\")\n toggled_2 = constant_op.constant(-5.0, name=\"toggled_2\")\n inc_v_1 = state_ops.assign_add(v_1, toggled_1, name=\"inc_v_1\")\n inc_v_2 = state_ops.assign_add(v_2, toggled_2, name=\"inc_v_2\")\n\n sess.run([v_1.initializer, v_2.initializer])\n\n run_metadata = config_pb2.RunMetadata()\n run_options = config_pb2.RunOptions(output_partition_graphs=True)\n debug_utils.watch_graph(\n run_options,\n sess.graph,\n debug_ops=[\"DebugIdentity(gated_grpc=true)\"],\n debug_urls=[debug_server_url])\n\n for i in xrange(4):\n server.clear_data()\n\n sess.run([inc_v_1, inc_v_2],\n options=run_options, run_metadata=run_metadata)\n\n if i % 2 == 0:\n self.assertEqual(2, len(server.debug_tensor_values))\n self.assertAllClose(\n [5.0],\n server.debug_tensor_values[\"toggled_1:0:DebugIdentity\"])\n self.assertAllClose(\n [-5.0],\n server.debug_tensor_values[\"toggled_2:0:DebugIdentity\"])\n else:\n self.assertEqual(0, len(server.debug_tensor_values))\n\n def testToggleEnableTwoDebugWatchesNoCrosstalkBetweenServers(self):\n with session.Session(\n config=session_debug_testlib.no_rewrite_session_config()) as sess:\n v = variables.Variable(50.0, name=\"v\")\n delta = constant_op.constant(5.0, name=\"delta\")\n inc_v = state_ops.assign_add(v, delta, name=\"inc_v\")\n\n sess.run(v.initializer)\n\n run_metadata = config_pb2.RunMetadata()\n run_options = config_pb2.RunOptions(output_partition_graphs=True)\n debug_utils.watch_graph(\n run_options,\n sess.graph,\n debug_ops=[\"DebugIdentity(gated_grpc=true)\"],\n debug_urls=[self._debug_server_url_1, self._debug_server_url_2])\n\n for i in xrange(4):\n self._server_1.clear_data()\n self._server_2.clear_data()\n\n if i % 2 == 0:\n self._server_1.request_watch(\"delta\", 0, \"DebugIdentity\")\n self._server_2.request_watch(\"v\", 0, \"DebugIdentity\")\n else:\n self._server_1.request_unwatch(\"delta\", 0, \"DebugIdentity\")\n self._server_2.request_unwatch(\"v\", 0, \"DebugIdentity\")\n\n sess.run(inc_v, options=run_options, run_metadata=run_metadata)\n\n if i % 2 == 0:\n self.assertEqual(1, len(self._server_1.debug_tensor_values))\n self.assertEqual(1, len(self._server_2.debug_tensor_values))\n self.assertAllClose(\n [5.0],\n self._server_1.debug_tensor_values[\"delta:0:DebugIdentity\"])\n self.assertAllClose(\n [50 + 5.0 * i],\n self._server_2.debug_tensor_values[\"v:0:DebugIdentity\"])\n else:\n self.assertEqual(0, len(self._server_1.debug_tensor_values))\n self.assertEqual(0, len(self._server_2.debug_tensor_values))\n\n def testToggleBreakpointsWorks(self):\n with session.Session(\n config=session_debug_testlib.no_rewrite_session_config()) as sess:\n v_1 = variables.Variable(50.0, name=\"v_1\")\n v_2 = variables.Variable(-50.0, name=\"v_2\")\n delta_1 = constant_op.constant(5.0, name=\"delta_1\")\n delta_2 = constant_op.constant(-5.0, name=\"delta_2\")\n inc_v_1 = state_ops.assign_add(v_1, delta_1, name=\"inc_v_1\")\n inc_v_2 = state_ops.assign_add(v_2, delta_2, name=\"inc_v_2\")\n\n sess.run([v_1.initializer, v_2.initializer])\n\n run_metadata = config_pb2.RunMetadata()\n run_options = config_pb2.RunOptions(output_partition_graphs=True)\n debug_utils.watch_graph(\n run_options,\n sess.graph,\n debug_ops=[\"DebugIdentity(gated_grpc=true)\"],\n debug_urls=[self._debug_server_url_1])\n\n for i in xrange(4):\n self._server_1.clear_data()\n\n if i in (0, 2):\n # Enable breakpoint at delta_[1,2]:0:DebugIdentity in runs 0 and 2.\n self._server_1.request_watch(\n \"delta_1\", 0, \"DebugIdentity\", breakpoint=True)\n self._server_1.request_watch(\n \"delta_2\", 0, \"DebugIdentity\", breakpoint=True)\n else:\n # Disable the breakpoint in runs 1 and 3.\n self._server_1.request_unwatch(\"delta_1\", 0, \"DebugIdentity\")\n self._server_1.request_unwatch(\"delta_2\", 0, \"DebugIdentity\")\n\n output = sess.run([inc_v_1, inc_v_2],\n options=run_options, run_metadata=run_metadata)\n self.assertAllClose([50.0 + 5.0 * (i + 1), -50 - 5.0 * (i + 1)], output)\n\n if i in (0, 2):\n # During runs 0 and 2, the server should have received the published\n # debug tensor delta:0:DebugIdentity. The breakpoint should have been\n # unblocked by EventReply reponses from the server.\n self.assertAllClose(\n [5.0],\n self._server_1.debug_tensor_values[\"delta_1:0:DebugIdentity\"])\n self.assertAllClose(\n [-5.0],\n self._server_1.debug_tensor_values[\"delta_2:0:DebugIdentity\"])\n # After the runs, the server should have properly registered the\n # breakpoints due to the request_unwatch calls.\n self.assertSetEqual({(\"delta_1\", 0, \"DebugIdentity\"),\n (\"delta_2\", 0, \"DebugIdentity\")},\n self._server_1.breakpoints)\n else:\n # After the end of runs 1 and 3, the server has received the requests\n # to disable the breakpoint at delta:0:DebugIdentity.\n self.assertSetEqual(set(), self._server_1.breakpoints)\n\n def testTensorBoardDebuggerWrapperToggleBreakpointsWorks(self):\n with session.Session(\n config=session_debug_testlib.no_rewrite_session_config()) as sess:\n v_1 = variables.Variable(50.0, name=\"v_1\")\n v_2 = variables.Variable(-50.0, name=\"v_2\")\n delta_1 = constant_op.constant(5.0, name=\"delta_1\")\n delta_2 = constant_op.constant(-5.0, name=\"delta_2\")\n inc_v_1 = state_ops.assign_add(v_1, delta_1, name=\"inc_v_1\")\n inc_v_2 = state_ops.assign_add(v_2, delta_2, name=\"inc_v_2\")\n\n sess.run([v_1.initializer, v_2.initializer])\n\n # The TensorBoardDebugWrapperSession should add a DebugIdentity debug op\n # with attribute gated_grpc=True for every tensor in the graph.\n sess = grpc_wrapper.TensorBoardDebugWrapperSession(\n sess, self._debug_server_url_1)\n\n for i in xrange(4):\n self._server_1.clear_data()\n\n if i in (0, 2):\n # Enable breakpoint at delta_[1,2]:0:DebugIdentity in runs 0 and 2.\n self._server_1.request_watch(\n \"delta_1\", 0, \"DebugIdentity\", breakpoint=True)\n self._server_1.request_watch(\n \"delta_2\", 0, \"DebugIdentity\", breakpoint=True)\n else:\n # Disable the breakpoint in runs 1 and 3.\n self._server_1.request_unwatch(\"delta_1\", 0, \"DebugIdentity\")\n self._server_1.request_unwatch(\"delta_2\", 0, \"DebugIdentity\")\n\n output = sess.run([inc_v_1, inc_v_2])\n self.assertAllClose([50.0 + 5.0 * (i + 1), -50 - 5.0 * (i + 1)], output)\n\n if i in (0, 2):\n # During runs 0 and 2, the server should have received the published\n # debug tensor delta:0:DebugIdentity. The breakpoint should have been\n # unblocked by EventReply reponses from the server.\n self.assertAllClose(\n [5.0],\n self._server_1.debug_tensor_values[\"delta_1:0:DebugIdentity\"])\n self.assertAllClose(\n [-5.0],\n self._server_1.debug_tensor_values[\"delta_2:0:DebugIdentity\"])\n # After the runs, the server should have properly registered the\n # breakpoints.\n else:\n # After the end of runs 1 and 3, the server has received the requests\n # to disable the breakpoint at delta:0:DebugIdentity.\n self.assertSetEqual(set(), self._server_1.breakpoints)\n\n if i == 0:\n # Check that the server has received the stack trace.\n self.assertTrue(self._server_1.query_op_traceback(\"delta_1\"))\n self.assertTrue(self._server_1.query_op_traceback(\"delta_2\"))\n self.assertTrue(self._server_1.query_op_traceback(\"inc_v_1\"))\n self.assertTrue(self._server_1.query_op_traceback(\"inc_v_2\"))\n # Check that the server has received the python file content.\n # Query an arbitrary line to make sure that is the case.\n with open(__file__, \"rt\") as this_source_file:\n first_line = this_source_file.readline().strip()\n self.assertEqual(\n first_line, self._server_1.query_source_file_line(__file__, 1))\n else:\n # In later Session.run() calls, the traceback shouldn't have been sent\n # because it is already sent in the 1st call. So calling\n # query_op_traceback() should lead to an exception, because the test\n # debug server clears the data at the beginning of every iteration.\n with self.assertRaises(ValueError):\n self._server_1.query_op_traceback(\"delta_1\")\n with self.assertRaises(ValueError):\n self._server_1.query_source_file_line(__file__, 1)\n\n def testTensorBoardDebuggerWrapperDisablingTracebackSourceSendingWorks(self):\n with session.Session(\n config=session_debug_testlib.no_rewrite_session_config()) as sess:\n v_1 = variables.Variable(50.0, name=\"v_1\")\n v_2 = variables.Variable(-50.0, name=\"v_2\")\n delta_1 = constant_op.constant(5.0, name=\"delta_1\")\n delta_2 = constant_op.constant(-5.0, name=\"delta_2\")\n inc_v_1 = state_ops.assign_add(v_1, delta_1, name=\"inc_v_1\")\n inc_v_2 = state_ops.assign_add(v_2, delta_2, name=\"inc_v_2\")\n\n sess.run(variables.global_variables_initializer())\n\n # Disable the sending of traceback and source code.\n sess = grpc_wrapper.TensorBoardDebugWrapperSession(\n sess, self._debug_server_url_1, send_traceback_and_source_code=False)\n\n for i in xrange(4):\n self._server_1.clear_data()\n\n if i == 0:\n self._server_1.request_watch(\n \"delta_1\", 0, \"DebugIdentity\", breakpoint=True)\n\n output = sess.run([inc_v_1, inc_v_2])\n self.assertAllClose([50.0 + 5.0 * (i + 1), -50 - 5.0 * (i + 1)], output)\n\n # No op traceback or source code should have been received by the debug\n # server due to the disabling above.\n with self.assertRaisesRegexp(\n ValueError, r\"Op .*delta_1.* does not exist\"):\n self.assertTrue(self._server_1.query_op_traceback(\"delta_1\"))\n with self.assertRaisesRegexp(\n ValueError, r\".* has not received any source file\"):\n self._server_1.query_source_file_line(__file__, 1)\n\n def testGetGrpcDebugWatchesReturnsCorrectAnswer(self):\n with session.Session() as sess:\n v = variables.Variable(50.0, name=\"v\")\n delta = constant_op.constant(5.0, name=\"delta\")\n inc_v = state_ops.assign_add(v, delta, name=\"inc_v\")\n\n sess.run(v.initializer)\n\n # Before any debugged runs, the server should be aware of no debug\n # watches.\n self.assertEqual([], self._server_1.gated_grpc_debug_watches())\n\n run_metadata = config_pb2.RunMetadata()\n run_options = config_pb2.RunOptions(output_partition_graphs=True)\n debug_utils.add_debug_tensor_watch(\n run_options, \"delta\", output_slot=0,\n debug_ops=[\"DebugNumericSummary(gated_grpc=true)\"],\n debug_urls=[self._debug_server_url_1])\n debug_utils.add_debug_tensor_watch(\n run_options, \"v\", output_slot=0,\n debug_ops=[\"DebugIdentity\"],\n debug_urls=[self._debug_server_url_1])\n sess.run(inc_v, options=run_options, run_metadata=run_metadata)\n\n # After the first run, the server should have noted the debug watches\n # for which gated_grpc == True, but not the ones with gated_grpc == False.\n self.assertEqual(1, len(self._server_1.gated_grpc_debug_watches()))\n debug_watch = self._server_1.gated_grpc_debug_watches()[0]\n self.assertEqual(\"delta\", debug_watch.node_name)\n self.assertEqual(0, debug_watch.output_slot)\n self.assertEqual(\"DebugNumericSummary\", debug_watch.debug_op)\n\n\nclass DelayedDebugServerTest(test_util.TensorFlowTestCase):\n\n def testDebuggedSessionRunWorksWithDelayedDebugServerStartup(self):\n \"\"\"Test debugged Session.run() tolerates delayed debug server startup.\"\"\"\n ops.reset_default_graph()\n\n # Start a debug server asynchronously, with a certain amount of delay.\n (debug_server_port, _, _, server_thread,\n debug_server) = grpc_debug_test_server.start_server_on_separate_thread(\n server_start_delay_sec=2.0, dump_to_filesystem=False)\n\n with self.test_session() as sess:\n a_init = constant_op.constant(42.0, name=\"a_init\")\n a = variables.Variable(a_init, name=\"a\")\n\n def watch_fn(fetches, feeds):\n del fetches, feeds\n return framework.WatchOptions(debug_ops=[\"DebugIdentity\"])\n\n sess = grpc_wrapper.GrpcDebugWrapperSession(\n sess, \"localhost:%d\" % debug_server_port, watch_fn=watch_fn)\n sess.run(a.initializer)\n self.assertAllClose(\n [42.0], debug_server.debug_tensor_values[\"a_init:0:DebugIdentity\"])\n\n debug_server.stop_server().wait()\n server_thread.join()\n\n\nif __name__ == \"__main__\":\n googletest.main()\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for MixtureSameFamily distribution.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.contrib.distributions.python.ops import mixture_same_family as mixture_same_family_lib\nfrom tensorflow.contrib.distributions.python.ops import mvn_diag as mvn_diag_lib\nfrom tensorflow.contrib.distributions.python.ops import test_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops.distributions import bernoulli as bernoulli_lib\nfrom tensorflow.python.ops.distributions import categorical as categorical_lib\nfrom tensorflow.python.ops.distributions import normal as normal_lib\nfrom tensorflow.python.platform import test\n\n\nclass MixtureSameFamilyTest(test_util.VectorDistributionTestHelpers,\n test.TestCase):\n\n def testSampleAndLogProbUnivariateShapes(self):\n with self.test_session():\n gm = mixture_same_family_lib.MixtureSameFamily(\n mixture_distribution=categorical_lib.Categorical(probs=[0.3, 0.7]),\n components_distribution=normal_lib.Normal(\n loc=[-1., 1], scale=[0.1, 0.5]))\n x = gm.sample([4, 5], seed=42)\n log_prob_x = gm.log_prob(x)\n self.assertEqual([4, 5], x.shape)\n self.assertEqual([4, 5], log_prob_x.shape)\n\n def testSampleAndLogProbBatch(self):\n with self.test_session():\n gm = mixture_same_family_lib.MixtureSameFamily(\n mixture_distribution=categorical_lib.Categorical(probs=[[0.3, 0.7]]),\n components_distribution=normal_lib.Normal(\n loc=[[-1., 1]], scale=[[0.1, 0.5]]))\n x = gm.sample([4, 5], seed=42)\n log_prob_x = gm.log_prob(x)\n self.assertEqual([4, 5, 1], x.shape)\n self.assertEqual([4, 5, 1], log_prob_x.shape)\n\n def testSampleAndLogProbShapesBroadcastMix(self):\n mix_probs = np.float32([.3, .7])\n bern_probs = np.float32([[.4, .6], [.25, .75]])\n with self.test_session():\n bm = mixture_same_family_lib.MixtureSameFamily(\n mixture_distribution=categorical_lib.Categorical(probs=mix_probs),\n components_distribution=bernoulli_lib.Bernoulli(probs=bern_probs))\n x = bm.sample([4, 5], seed=42)\n log_prob_x = bm.log_prob(x)\n x_ = x.eval()\n self.assertEqual([4, 5, 2], x.shape)\n self.assertEqual([4, 5, 2], log_prob_x.shape)\n self.assertAllEqual(\n np.ones_like(x_, dtype=np.bool), np.logical_or(x_ == 0., x_ == 1.))\n\n def testSampleAndLogProbMultivariateShapes(self):\n with self.test_session():\n gm = mixture_same_family_lib.MixtureSameFamily(\n mixture_distribution=categorical_lib.Categorical(probs=[0.3, 0.7]),\n components_distribution=mvn_diag_lib.MultivariateNormalDiag(\n loc=[[-1., 1], [1, -1]], scale_identity_multiplier=[1., 0.5]))\n x = gm.sample([4, 5], seed=42)\n log_prob_x = gm.log_prob(x)\n self.assertEqual([4, 5, 2], x.shape)\n self.assertEqual([4, 5], log_prob_x.shape)\n\n def testSampleAndLogProbBatchMultivariateShapes(self):\n with self.test_session():\n gm = mixture_same_family_lib.MixtureSameFamily(\n mixture_distribution=categorical_lib.Categorical(probs=[0.3, 0.7]),\n components_distribution=mvn_diag_lib.MultivariateNormalDiag(\n loc=[[[-1., 1],\n [1, -1]],\n [[0., 1],\n [1, 0]]],\n scale_identity_multiplier=[1., 0.5]))\n x = gm.sample([4, 5], seed=42)\n log_prob_x = gm.log_prob(x)\n self.assertEqual([4, 5, 2, 2], x.shape)\n self.assertEqual([4, 5, 2], log_prob_x.shape)\n\n def testSampleConsistentLogProb(self):\n with self.test_session() as sess:\n gm = mixture_same_family_lib.MixtureSameFamily(\n mixture_distribution=categorical_lib.Categorical(probs=[0.3, 0.7]),\n components_distribution=mvn_diag_lib.MultivariateNormalDiag(\n loc=[[-1., 1], [1, -1]], scale_identity_multiplier=[1., 0.5]))\n # Ball centered at component0's mean.\n self.run_test_sample_consistent_log_prob(\n sess.run, gm, radius=1., center=[-1., 1], rtol=0.02)\n # Larger ball centered at component1's mean.\n self.run_test_sample_consistent_log_prob(\n sess.run, gm, radius=1., center=[1., -1], rtol=0.02)\n\n def testLogCdf(self):\n with self.test_session() as sess:\n gm = mixture_same_family_lib.MixtureSameFamily(\n mixture_distribution=categorical_lib.Categorical(probs=[0.3, 0.7]),\n components_distribution=normal_lib.Normal(\n loc=[-1., 1], scale=[0.1, 0.5]))\n x = gm.sample(10, seed=42)\n actual_log_cdf = gm.log_cdf(x)\n expected_log_cdf = math_ops.reduce_logsumexp(\n (gm.mixture_distribution.logits +\n gm.components_distribution.log_cdf(x[..., array_ops.newaxis])),\n axis=1)\n actual_log_cdf_, expected_log_cdf_ = sess.run([\n actual_log_cdf, expected_log_cdf])\n self.assertAllClose(actual_log_cdf_, expected_log_cdf_,\n rtol=1e-6, atol=0.0)\n\n def testSampleConsistentMeanCovariance(self):\n with self.test_session() as sess:\n gm = mixture_same_family_lib.MixtureSameFamily(\n mixture_distribution=categorical_lib.Categorical(probs=[0.3, 0.7]),\n components_distribution=mvn_diag_lib.MultivariateNormalDiag(\n loc=[[-1., 1], [1, -1]], scale_identity_multiplier=[1., 0.5]))\n self.run_test_sample_consistent_mean_covariance(sess.run, gm)\n\n def testVarianceConsistentCovariance(self):\n with self.test_session() as sess:\n gm = mixture_same_family_lib.MixtureSameFamily(\n mixture_distribution=categorical_lib.Categorical(probs=[0.3, 0.7]),\n components_distribution=mvn_diag_lib.MultivariateNormalDiag(\n loc=[[-1., 1], [1, -1]], scale_identity_multiplier=[1., 0.5]))\n cov_, var_ = sess.run([gm.covariance(), gm.variance()])\n self.assertAllClose(cov_.diagonal(), var_, atol=0.)\n\n\nif __name__ == \"__main__\":\n test.main()\n", "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n# pylint: disable=line-too-long\n# pylint: disable=invalid-name\n# pylint: disable=unused-import\n\"\"\"NASNet-A models for Keras.\n\nNASNet refers to Neural Architecture Search Network, a family of models\nthat were designed automatically by learning the model architectures\ndirectly on the dataset of interest.\n\nHere we consider NASNet-A, the highest performance model that was found\nfor the CIFAR-10 dataset, and then extended to ImageNet 2012 dataset,\nobtaining state of the art performance on CIFAR-10 and ImageNet 2012.\nOnly the NASNet-A models, and their respective weights, which are suited\nfor ImageNet 2012 are provided.\n\nThe below table describes the performance on ImageNet 2012:\n--------------------------------------------------------------------------------\n Architecture | Top-1 Acc | Top-5 Acc | Multiply-Adds | Params (M)\n--------------------------------------------------------------------------------\n| NASNet-A (4 @ 1056) | 74.0 % | 91.6 % | 564 M | 5.3 |\n| NASNet-A (6 @ 4032) | 82.7 % | 96.2 % | 23.8 B | 88.9 |\n--------------------------------------------------------------------------------\n\nReferences:\n - [Learning Transferable Architectures for Scalable Image Recognition]\n (https://arxiv.org/abs/1707.07012)\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nfrom tensorflow.python.keras import backend as K\nfrom tensorflow.python.keras.applications.imagenet_utils import _obtain_input_shape\nfrom tensorflow.python.keras.applications.imagenet_utils import decode_predictions\nfrom tensorflow.python.keras.applications.inception_v3 import preprocess_input\nfrom tensorflow.python.keras.layers import Activation\nfrom tensorflow.python.keras.layers import add\nfrom tensorflow.python.keras.layers import AveragePooling2D\nfrom tensorflow.python.keras.layers import BatchNormalization\nfrom tensorflow.python.keras.layers import concatenate\nfrom tensorflow.python.keras.layers import Conv2D\nfrom tensorflow.python.keras.layers import Cropping2D\nfrom tensorflow.python.keras.layers import Dense\nfrom tensorflow.python.keras.layers import GlobalAveragePooling2D\nfrom tensorflow.python.keras.layers import GlobalMaxPooling2D\nfrom tensorflow.python.keras.layers import Input\nfrom tensorflow.python.keras.layers import MaxPooling2D\nfrom tensorflow.python.keras.layers import SeparableConv2D\nfrom tensorflow.python.keras.layers import ZeroPadding2D\nfrom tensorflow.python.keras.models import Model\nfrom tensorflow.python.keras.utils import layer_utils\nfrom tensorflow.python.keras.utils.data_utils import get_file\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.util.tf_export import tf_export\n\n\nNASNET_MOBILE_WEIGHT_PATH = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.8/NASNet-mobile.h5'\nNASNET_MOBILE_WEIGHT_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.8/NASNet-mobile-no-top.h5'\nNASNET_LARGE_WEIGHT_PATH = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.8/NASNet-large.h5'\nNASNET_LARGE_WEIGHT_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.8/NASNet-large-no-top.h5'\n\n\ndef NASNet(input_shape=None,\n penultimate_filters=4032,\n num_blocks=6,\n stem_block_filters=96,\n skip_reduction=True,\n filter_multiplier=2,\n include_top=True,\n weights=None,\n input_tensor=None,\n pooling=None,\n classes=1000,\n default_size=None):\n \"\"\"Instantiates a NASNet model.\n\n Note that only TensorFlow is supported for now,\n therefore it only works with the data format\n `image_data_format='channels_last'` in your Keras config\n at `~/.keras/keras.json`.\n\n Arguments:\n input_shape: Optional shape tuple, the input shape\n is by default `(331, 331, 3)` for NASNetLarge and\n `(224, 224, 3)` for NASNetMobile.\n It should have exactly 3 inputs channels,\n and width and height should be no smaller than 32.\n E.g. `(224, 224, 3)` would be one valid value.\n penultimate_filters: Number of filters in the penultimate layer.\n NASNet models use the notation `NASNet (N @ P)`, where:\n - N is the number of blocks\n - P is the number of penultimate filters\n num_blocks: Number of repeated blocks of the NASNet model.\n NASNet models use the notation `NASNet (N @ P)`, where:\n - N is the number of blocks\n - P is the number of penultimate filters\n stem_block_filters: Number of filters in the initial stem block\n skip_reduction: Whether to skip the reduction step at the tail\n end of the network. Set to `False` for CIFAR models.\n filter_multiplier: Controls the width of the network.\n - If `filter_multiplier` < 1.0, proportionally decreases the number\n of filters in each layer.\n - If `filter_multiplier` > 1.0, proportionally increases the number\n of filters in each layer.\n - If `filter_multiplier` = 1, default number of filters from the\n paper are used at each layer.\n include_top: Whether to include the fully-connected\n layer at the top of the network.\n weights: `None` (random initialization) or\n `imagenet` (ImageNet weights)\n input_tensor: Optional Keras tensor (i.e. output of\n `layers.Input()`)\n to use as image input for the model.\n pooling: Optional pooling mode for feature extraction\n when `include_top` is `False`.\n - `None` means that the output of the model\n will be the 4D tensor output of the\n last convolutional layer.\n - `avg` means that global average pooling\n will be applied to the output of the\n last convolutional layer, and thus\n the output of the model will be a\n 2D tensor.\n - `max` means that global max pooling will\n be applied.\n classes: Optional number of classes to classify images\n into, only to be specified if `include_top` is True, and\n if no `weights` argument is specified.\n default_size: Specifies the default image size of the model\n\n Returns:\n A Keras model instance.\n\n Raises:\n ValueError: In case of invalid argument for `weights`,\n invalid input shape or invalid `penultimate_filters` value.\n RuntimeError: If attempting to run this model with a\n backend that does not support separable convolutions.\n \"\"\"\n if K.backend() != 'tensorflow':\n raise RuntimeError('Only Tensorflow backend is currently supported, '\n 'as other backends do not support '\n 'separable convolution.')\n\n if not (weights in {'imagenet', None} or os.path.exists(weights)):\n raise ValueError('The `weights` argument should be either '\n '`None` (random initialization), `imagenet` '\n '(pre-training on ImageNet), '\n 'or the path to the weights file to be loaded.')\n\n if weights == 'imagenet' and include_top and classes != 1000:\n raise ValueError('If using `weights` as ImageNet with `include_top` '\n 'as true, `classes` should be 1000')\n\n if (isinstance(input_shape, tuple) and None in input_shape and\n weights == 'imagenet'):\n raise ValueError('When specifying the input shape of a NASNet'\n ' and loading `ImageNet` weights, '\n 'the input_shape argument must be static '\n '(no None entries). Got: `input_shape=' +\n str(input_shape) + '`.')\n\n if default_size is None:\n default_size = 331\n\n # Determine proper input shape and default size.\n input_shape = _obtain_input_shape(\n input_shape,\n default_size=default_size,\n min_size=32,\n data_format=K.image_data_format(),\n require_flatten=False,\n weights=weights)\n\n if K.image_data_format() != 'channels_last':\n logging.warning('The NASNet family of models is only available '\n 'for the input data format \"channels_last\" '\n '(width, height, channels). '\n 'However your settings specify the default '\n 'data format \"channels_first\" (channels, width, height).'\n ' You should set `image_data_format=\"channels_last\"` '\n 'in your Keras config located at ~/.keras/keras.json. '\n 'The model being returned right now will expect inputs '\n 'to follow the \"channels_last\" data format.')\n K.set_image_data_format('channels_last')\n old_data_format = 'channels_first'\n else:\n old_data_format = None\n\n if input_tensor is None:\n img_input = Input(shape=input_shape)\n else:\n if not K.is_keras_tensor(input_tensor):\n img_input = Input(tensor=input_tensor, shape=input_shape)\n else:\n img_input = input_tensor\n\n if penultimate_filters % 24 != 0:\n raise ValueError(\n 'For NASNet-A models, the value of `penultimate_filters` '\n 'needs to be divisible by 24. Current value: %d' % penultimate_filters)\n\n channel_dim = 1 if K.image_data_format() == 'channels_first' else -1\n filters = penultimate_filters // 24\n\n if not skip_reduction:\n x = Conv2D(\n stem_block_filters, (3, 3),\n strides=(2, 2),\n padding='valid',\n use_bias=False,\n name='stem_conv1',\n kernel_initializer='he_normal')(\n img_input)\n else:\n x = Conv2D(\n stem_block_filters, (3, 3),\n strides=(1, 1),\n padding='same',\n use_bias=False,\n name='stem_conv1',\n kernel_initializer='he_normal')(\n img_input)\n\n x = BatchNormalization(\n axis=channel_dim, momentum=0.9997, epsilon=1e-3, name='stem_bn1')(\n x)\n\n p = None\n if not skip_reduction: # imagenet / mobile mode\n x, p = _reduction_a_cell(\n x, p, filters // (filter_multiplier**2), block_id='stem_1')\n x, p = _reduction_a_cell(\n x, p, filters // filter_multiplier, block_id='stem_2')\n\n for i in range(num_blocks):\n x, p = _normal_a_cell(x, p, filters, block_id='%d' % (i))\n\n x, p0 = _reduction_a_cell(\n x, p, filters * filter_multiplier, block_id='reduce_%d' % (num_blocks))\n\n p = p0 if not skip_reduction else p\n\n for i in range(num_blocks):\n x, p = _normal_a_cell(\n x, p, filters * filter_multiplier, block_id='%d' % (num_blocks + i + 1))\n\n x, p0 = _reduction_a_cell(\n x,\n p,\n filters * filter_multiplier**2,\n block_id='reduce_%d' % (2 * num_blocks))\n\n p = p0 if not skip_reduction else p\n\n for i in range(num_blocks):\n x, p = _normal_a_cell(\n x,\n p,\n filters * filter_multiplier**2,\n block_id='%d' % (2 * num_blocks + i + 1))\n\n x = Activation('relu')(x)\n\n if include_top:\n x = GlobalAveragePooling2D()(x)\n x = Dense(classes, activation='softmax', name='predictions')(x)\n else:\n if pooling == 'avg':\n x = GlobalAveragePooling2D()(x)\n elif pooling == 'max':\n x = GlobalMaxPooling2D()(x)\n\n # Ensure that the model takes into account\n # any potential predecessors of `input_tensor`.\n if input_tensor is not None:\n inputs = layer_utils.get_source_inputs(input_tensor)\n else:\n inputs = img_input\n\n model = Model(inputs, x, name='NASNet')\n\n # load weights\n if weights == 'imagenet':\n if default_size == 224: # mobile version\n if include_top:\n weight_path = NASNET_MOBILE_WEIGHT_PATH\n model_name = 'nasnet_mobile.h5'\n else:\n weight_path = NASNET_MOBILE_WEIGHT_PATH_NO_TOP\n model_name = 'nasnet_mobile_no_top.h5'\n\n weights_file = get_file(model_name, weight_path, cache_subdir='models')\n model.load_weights(weights_file)\n\n elif default_size == 331: # large version\n if include_top:\n weight_path = NASNET_LARGE_WEIGHT_PATH\n model_name = 'nasnet_large.h5'\n else:\n weight_path = NASNET_LARGE_WEIGHT_PATH_NO_TOP\n model_name = 'nasnet_large_no_top.h5'\n\n weights_file = get_file(model_name, weight_path, cache_subdir='models')\n model.load_weights(weights_file)\n else:\n raise ValueError('ImageNet weights can only be loaded with NASNetLarge'\n ' or NASNetMobile')\n elif weights is not None:\n model.load_weights(weights)\n\n if old_data_format:\n K.set_image_data_format(old_data_format)\n\n return model\n\n\n@tf_export('keras.applications.NASNetLarge',\n 'keras.applications.nasnet.NASNetLarge')\ndef NASNetLarge(input_shape=None,\n include_top=True,\n weights='imagenet',\n input_tensor=None,\n pooling=None,\n classes=1000):\n \"\"\"Instantiates a NASNet model in ImageNet mode.\n\n Note that only TensorFlow is supported for now,\n therefore it only works with the data format\n `image_data_format='channels_last'` in your Keras config\n at `~/.keras/keras.json`.\n\n Arguments:\n input_shape: Optional shape tuple, only to be specified\n if `include_top` is False (otherwise the input shape\n has to be `(331, 331, 3)` for NASNetLarge.\n It should have exactly 3 inputs channels,\n and width and height should be no smaller than 32.\n E.g. `(224, 224, 3)` would be one valid value.\n include_top: Whether to include the fully-connected\n layer at the top of the network.\n weights: `None` (random initialization) or\n `imagenet` (ImageNet weights)\n input_tensor: Optional Keras tensor (i.e. output of\n `layers.Input()`)\n to use as image input for the model.\n pooling: Optional pooling mode for feature extraction\n when `include_top` is `False`.\n - `None` means that the output of the model\n will be the 4D tensor output of the\n last convolutional layer.\n - `avg` means that global average pooling\n will be applied to the output of the\n last convolutional layer, and thus\n the output of the model will be a\n 2D tensor.\n - `max` means that global max pooling will\n be applied.\n classes: Optional number of classes to classify images\n into, only to be specified if `include_top` is True, and\n if no `weights` argument is specified.\n\n Returns:\n A Keras model instance.\n\n Raises:\n ValueError: in case of invalid argument for `weights`,\n or invalid input shape.\n RuntimeError: If attempting to run this model with a\n backend that does not support separable convolutions.\n \"\"\"\n return NASNet(\n input_shape,\n penultimate_filters=4032,\n num_blocks=6,\n stem_block_filters=96,\n skip_reduction=False,\n filter_multiplier=2,\n include_top=include_top,\n weights=weights,\n input_tensor=input_tensor,\n pooling=pooling,\n classes=classes,\n default_size=331)\n\n\n@tf_export('keras.applications.NASNetMobile',\n 'keras.applications.nasnet.NASNetMobile')\ndef NASNetMobile(input_shape=None,\n include_top=True,\n weights='imagenet',\n input_tensor=None,\n pooling=None,\n classes=1000):\n \"\"\"Instantiates a Mobile NASNet model in ImageNet mode.\n\n Note that only TensorFlow is supported for now,\n therefore it only works with the data format\n `image_data_format='channels_last'` in your Keras config\n at `~/.keras/keras.json`.\n\n Arguments:\n input_shape: Optional shape tuple, only to be specified\n if `include_top` is False (otherwise the input shape\n has to be `(224, 224, 3)` for NASNetMobile\n It should have exactly 3 inputs channels,\n and width and height should be no smaller than 32.\n E.g. `(224, 224, 3)` would be one valid value.\n include_top: Whether to include the fully-connected\n layer at the top of the network.\n weights: `None` (random initialization) or\n `imagenet` (ImageNet weights)\n input_tensor: Optional Keras tensor (i.e. output of\n `layers.Input()`)\n to use as image input for the model.\n pooling: Optional pooling mode for feature extraction\n when `include_top` is `False`.\n - `None` means that the output of the model\n will be the 4D tensor output of the\n last convolutional layer.\n - `avg` means that global average pooling\n will be applied to the output of the\n last convolutional layer, and thus\n the output of the model will be a\n 2D tensor.\n - `max` means that global max pooling will\n be applied.\n classes: Optional number of classes to classify images\n into, only to be specified if `include_top` is True, and\n if no `weights` argument is specified.\n\n Returns:\n A Keras model instance.\n\n Raises:\n ValueError: In case of invalid argument for `weights`,\n or invalid input shape.\n RuntimeError: If attempting to run this model with a\n backend that does not support separable convolutions.\n \"\"\"\n return NASNet(\n input_shape,\n penultimate_filters=1056,\n num_blocks=4,\n stem_block_filters=32,\n skip_reduction=False,\n filter_multiplier=2,\n include_top=include_top,\n weights=weights,\n input_tensor=input_tensor,\n pooling=pooling,\n classes=classes,\n default_size=224)\n\n\ndef _separable_conv_block(ip,\n filters,\n kernel_size=(3, 3),\n strides=(1, 1),\n block_id=None):\n \"\"\"Adds 2 blocks of [relu-separable conv-batchnorm].\n\n Arguments:\n ip: Input tensor\n filters: Number of output filters per layer\n kernel_size: Kernel size of separable convolutions\n strides: Strided convolution for downsampling\n block_id: String block_id\n\n Returns:\n A Keras tensor\n \"\"\"\n channel_dim = 1 if K.image_data_format() == 'channels_first' else -1\n\n with K.name_scope('separable_conv_block_%s' % block_id):\n x = Activation('relu')(ip)\n x = SeparableConv2D(\n filters,\n kernel_size,\n strides=strides,\n name='separable_conv_1_%s' % block_id,\n padding='same',\n use_bias=False,\n kernel_initializer='he_normal')(\n x)\n x = BatchNormalization(\n axis=channel_dim,\n momentum=0.9997,\n epsilon=1e-3,\n name='separable_conv_1_bn_%s' % (block_id))(\n x)\n x = Activation('relu')(x)\n x = SeparableConv2D(\n filters,\n kernel_size,\n name='separable_conv_2_%s' % block_id,\n padding='same',\n use_bias=False,\n kernel_initializer='he_normal')(\n x)\n x = BatchNormalization(\n axis=channel_dim,\n momentum=0.9997,\n epsilon=1e-3,\n name='separable_conv_2_bn_%s' % (block_id))(\n x)\n return x\n\n\ndef _adjust_block(p, ip, filters, block_id=None):\n \"\"\"Adjusts the input `previous path` to match the shape of the `input`.\n\n Used in situations where the output number of filters needs to be changed.\n\n Arguments:\n p: Input tensor which needs to be modified\n ip: Input tensor whose shape needs to be matched\n filters: Number of output filters to be matched\n block_id: String block_id\n\n Returns:\n Adjusted Keras tensor\n \"\"\"\n channel_dim = 1 if K.image_data_format() == 'channels_first' else -1\n img_dim = 2 if K.image_data_format() == 'channels_first' else -2\n\n ip_shape = K.int_shape(ip)\n\n if p is not None:\n p_shape = K.int_shape(p)\n\n with K.name_scope('adjust_block'):\n if p is None:\n p = ip\n\n elif p_shape[img_dim] != ip_shape[img_dim]:\n with K.name_scope('adjust_reduction_block_%s' % block_id):\n p = Activation('relu', name='adjust_relu_1_%s' % block_id)(p)\n\n p1 = AveragePooling2D(\n (1, 1),\n strides=(2, 2),\n padding='valid',\n name='adjust_avg_pool_1_%s' % block_id)(\n p)\n p1 = Conv2D(\n filters // 2, (1, 1),\n padding='same',\n use_bias=False,\n name='adjust_conv_1_%s' % block_id,\n kernel_initializer='he_normal')(\n p1)\n\n p2 = ZeroPadding2D(padding=((0, 1), (0, 1)))(p)\n p2 = Cropping2D(cropping=((1, 0), (1, 0)))(p2)\n p2 = AveragePooling2D(\n (1, 1),\n strides=(2, 2),\n padding='valid',\n name='adjust_avg_pool_2_%s' % block_id)(\n p2)\n p2 = Conv2D(\n filters // 2, (1, 1),\n padding='same',\n use_bias=False,\n name='adjust_conv_2_%s' % block_id,\n kernel_initializer='he_normal')(\n p2)\n\n p = concatenate([p1, p2], axis=channel_dim)\n p = BatchNormalization(\n axis=channel_dim,\n momentum=0.9997,\n epsilon=1e-3,\n name='adjust_bn_%s' % block_id)(\n p)\n\n elif p_shape[channel_dim] != filters:\n with K.name_scope('adjust_projection_block_%s' % block_id):\n p = Activation('relu')(p)\n p = Conv2D(\n filters, (1, 1),\n strides=(1, 1),\n padding='same',\n name='adjust_conv_projection_%s' % block_id,\n use_bias=False,\n kernel_initializer='he_normal')(\n p)\n p = BatchNormalization(\n axis=channel_dim,\n momentum=0.9997,\n epsilon=1e-3,\n name='adjust_bn_%s' % block_id)(\n p)\n return p\n\n\ndef _normal_a_cell(ip, p, filters, block_id=None):\n \"\"\"Adds a Normal cell for NASNet-A (Fig. 4 in the paper).\n\n Arguments:\n ip: Input tensor `x`\n p: Input tensor `p`\n filters: Number of output filters\n block_id: String block_id\n\n Returns:\n A Keras tensor\n \"\"\"\n channel_dim = 1 if K.image_data_format() == 'channels_first' else -1\n\n with K.name_scope('normal_A_block_%s' % block_id):\n p = _adjust_block(p, ip, filters, block_id)\n\n h = Activation('relu')(ip)\n h = Conv2D(\n filters, (1, 1),\n strides=(1, 1),\n padding='same',\n name='normal_conv_1_%s' % block_id,\n use_bias=False,\n kernel_initializer='he_normal')(\n h)\n h = BatchNormalization(\n axis=channel_dim,\n momentum=0.9997,\n epsilon=1e-3,\n name='normal_bn_1_%s' % block_id)(\n h)\n\n with K.name_scope('block_1'):\n x1_1 = _separable_conv_block(\n h, filters, kernel_size=(5, 5), block_id='normal_left1_%s' % block_id)\n x1_2 = _separable_conv_block(\n p, filters, block_id='normal_right1_%s' % block_id)\n x1 = add([x1_1, x1_2], name='normal_add_1_%s' % block_id)\n\n with K.name_scope('block_2'):\n x2_1 = _separable_conv_block(\n p, filters, (5, 5), block_id='normal_left2_%s' % block_id)\n x2_2 = _separable_conv_block(\n p, filters, (3, 3), block_id='normal_right2_%s' % block_id)\n x2 = add([x2_1, x2_2], name='normal_add_2_%s' % block_id)\n\n with K.name_scope('block_3'):\n x3 = AveragePooling2D(\n (3, 3),\n strides=(1, 1),\n padding='same',\n name='normal_left3_%s' % (block_id))(\n h)\n x3 = add([x3, p], name='normal_add_3_%s' % block_id)\n\n with K.name_scope('block_4'):\n x4_1 = AveragePooling2D(\n (3, 3),\n strides=(1, 1),\n padding='same',\n name='normal_left4_%s' % (block_id))(\n p)\n x4_2 = AveragePooling2D(\n (3, 3),\n strides=(1, 1),\n padding='same',\n name='normal_right4_%s' % (block_id))(\n p)\n x4 = add([x4_1, x4_2], name='normal_add_4_%s' % block_id)\n\n with K.name_scope('block_5'):\n x5 = _separable_conv_block(\n h, filters, block_id='normal_left5_%s' % block_id)\n x5 = add([x5, h], name='normal_add_5_%s' % block_id)\n\n x = concatenate(\n [p, x1, x2, x3, x4, x5],\n axis=channel_dim,\n name='normal_concat_%s' % block_id)\n return x, ip\n\n\ndef _reduction_a_cell(ip, p, filters, block_id=None):\n \"\"\"Adds a Reduction cell for NASNet-A (Fig. 4 in the paper).\n\n Arguments:\n ip: Input tensor `x`\n p: Input tensor `p`\n filters: Number of output filters\n block_id: String block_id\n\n Returns:\n A Keras tensor\n \"\"\"\n channel_dim = 1 if K.image_data_format() == 'channels_first' else -1\n\n with K.name_scope('reduction_A_block_%s' % block_id):\n p = _adjust_block(p, ip, filters, block_id)\n\n h = Activation('relu')(ip)\n h = Conv2D(\n filters, (1, 1),\n strides=(1, 1),\n padding='same',\n name='reduction_conv_1_%s' % block_id,\n use_bias=False,\n kernel_initializer='he_normal')(\n h)\n h = BatchNormalization(\n axis=channel_dim,\n momentum=0.9997,\n epsilon=1e-3,\n name='reduction_bn_1_%s' % block_id)(\n h)\n\n with K.name_scope('block_1'):\n x1_1 = _separable_conv_block(\n h,\n filters, (5, 5),\n strides=(2, 2),\n block_id='reduction_left1_%s' % block_id)\n x1_2 = _separable_conv_block(\n p,\n filters, (7, 7),\n strides=(2, 2),\n block_id='reduction_1_%s' % block_id)\n x1 = add([x1_1, x1_2], name='reduction_add_1_%s' % block_id)\n\n with K.name_scope('block_2'):\n x2_1 = MaxPooling2D(\n (3, 3),\n strides=(2, 2),\n padding='same',\n name='reduction_left2_%s' % block_id)(\n h)\n x2_2 = _separable_conv_block(\n p,\n filters, (7, 7),\n strides=(2, 2),\n block_id='reduction_right2_%s' % block_id)\n x2 = add([x2_1, x2_2], name='reduction_add_2_%s' % block_id)\n\n with K.name_scope('block_3'):\n x3_1 = AveragePooling2D(\n (3, 3),\n strides=(2, 2),\n padding='same',\n name='reduction_left3_%s' % block_id)(\n h)\n x3_2 = _separable_conv_block(\n p,\n filters, (5, 5),\n strides=(2, 2),\n block_id='reduction_right3_%s' % block_id)\n x3 = add([x3_1, x3_2], name='reduction_add3_%s' % block_id)\n\n with K.name_scope('block_4'):\n x4 = AveragePooling2D(\n (3, 3),\n strides=(1, 1),\n padding='same',\n name='reduction_left4_%s' % block_id)(\n x1)\n x4 = add([x2, x4])\n\n with K.name_scope('block_5'):\n x5_1 = _separable_conv_block(\n x1, filters, (3, 3), block_id='reduction_left4_%s' % block_id)\n x5_2 = MaxPooling2D(\n (3, 3),\n strides=(2, 2),\n padding='same',\n name='reduction_right5_%s' % block_id)(\n h)\n x5 = add([x5_1, x5_2], name='reduction_add4_%s' % block_id)\n\n x = concatenate(\n [x2, x3, x4, x5],\n axis=channel_dim,\n name='reduction_concat_%s' % block_id)\n return x, ip\n", "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for embedding layers.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.python import keras\nfrom tensorflow.python.framework import test_util as tf_test_util\nfrom tensorflow.python.keras import testing_utils\nfrom tensorflow.python.platform import test\n\n\nclass EmbeddingTest(test.TestCase):\n\n @tf_test_util.run_in_graph_and_eager_modes(use_gpu=False)\n def test_embedding(self):\n testing_utils.layer_test(\n keras.layers.Embedding,\n kwargs={'output_dim': 4,\n 'input_dim': 10,\n 'input_length': 2},\n input_shape=(3, 2),\n input_dtype='int32',\n expected_output_dtype='float32')\n\n testing_utils.layer_test(\n keras.layers.Embedding,\n kwargs={'output_dim': 4,\n 'input_dim': 10,\n 'mask_zero': True},\n input_shape=(3, 2),\n input_dtype='int32',\n expected_output_dtype='float32')\n\n testing_utils.layer_test(\n keras.layers.Embedding,\n kwargs={'output_dim': 4,\n 'input_dim': 10,\n 'mask_zero': True},\n input_shape=(3, 4, 2),\n input_dtype='int32',\n expected_output_dtype='float32')\n\n testing_utils.layer_test(\n keras.layers.Embedding,\n kwargs={'output_dim': 4,\n 'input_dim': 10,\n 'mask_zero': True,\n 'input_length': (None, 2)},\n input_shape=(3, 4, 2),\n input_dtype='int32',\n expected_output_dtype='float32')\n\n def test_embedding_correctness(self):\n with self.test_session():\n layer = keras.layers.Embedding(output_dim=2, input_dim=2)\n layer.build((None, 2))\n matrix = np.array([[1, 1], [2, 2]])\n layer.set_weights([matrix])\n\n inputs = keras.backend.constant([[0, 1, 0]], dtype='int32')\n outputs = keras.backend.eval(layer(inputs))\n self.assertAllClose(outputs, [[[1, 1], [2, 2], [1, 1]]])\n\n\nif __name__ == '__main__':\n test.main()\n", "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Utilities for dealing with Tensors.\n\nSee @{$python/contrib.util} guide.\n\n@@constant_value\n@@make_tensor_proto\n@@make_ndarray\n@@ops_used_by_graph_def\n@@stripped_op_list_for_graph\n\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# pylint: disable=unused-import\nfrom tensorflow.python.framework.meta_graph import ops_used_by_graph_def\nfrom tensorflow.python.framework.meta_graph import stripped_op_list_for_graph\nfrom tensorflow.python.framework.tensor_util import constant_value\nfrom tensorflow.python.framework.tensor_util import make_tensor_proto\nfrom tensorflow.python.framework.tensor_util import MakeNdarray as make_ndarray\n# pylint: disable=unused_import\nfrom tensorflow.python.util.all_util import remove_undocumented\nremove_undocumented(__name__)\n", "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Utility functions for training.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import graph_io\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import init_ops\nfrom tensorflow.python.ops import resource_variable_ops\nfrom tensorflow.python.ops import state_ops\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.util.tf_export import tf_export\n\n# Picked a long key value to minimize the chance of collision with user defined\n# collection keys.\nGLOBAL_STEP_READ_KEY = 'global_step_read_op_cache'\n\n\n# TODO(drpng): remove this after legacy uses are resolved.\nwrite_graph = graph_io.write_graph\n\n\n@tf_export('train.global_step')\ndef global_step(sess, global_step_tensor):\n \"\"\"Small helper to get the global step.\n\n ```python\n # Creates a variable to hold the global_step.\n global_step_tensor = tf.Variable(10, trainable=False, name='global_step')\n # Creates a session.\n sess = tf.Session()\n # Initializes the variable.\n print('global_step: %s' % tf.train.global_step(sess, global_step_tensor))\n\n global_step: 10\n ```\n\n Args:\n sess: A TensorFlow `Session` object.\n global_step_tensor: `Tensor` or the `name` of the operation that contains\n the global step.\n\n Returns:\n The global step value.\n \"\"\"\n if context.executing_eagerly():\n return int(global_step_tensor.numpy())\n return int(sess.run(global_step_tensor))\n\n\n@tf_export('train.get_global_step')\ndef get_global_step(graph=None):\n \"\"\"Get the global step tensor.\n\n The global step tensor must be an integer variable. We first try to find it\n in the collection `GLOBAL_STEP`, or by name `global_step:0`.\n\n Args:\n graph: The graph to find the global step in. If missing, use default graph.\n\n Returns:\n The global step variable, or `None` if none was found.\n\n Raises:\n TypeError: If the global step tensor has a non-integer type, or if it is not\n a `Variable`.\n \"\"\"\n graph = graph or ops.get_default_graph()\n global_step_tensor = None\n global_step_tensors = graph.get_collection(ops.GraphKeys.GLOBAL_STEP)\n if len(global_step_tensors) == 1:\n global_step_tensor = global_step_tensors[0]\n elif not global_step_tensors:\n try:\n global_step_tensor = graph.get_tensor_by_name('global_step:0')\n except KeyError:\n return None\n else:\n logging.error('Multiple tensors in global_step collection.')\n return None\n\n assert_global_step(global_step_tensor)\n return global_step_tensor\n\n\n@tf_export('train.create_global_step')\ndef create_global_step(graph=None):\n \"\"\"Create global step tensor in graph.\n\n Args:\n graph: The graph in which to create the global step tensor. If missing,\n use default graph.\n\n Returns:\n Global step tensor.\n\n Raises:\n ValueError: if global step tensor is already defined.\n \"\"\"\n graph = graph or ops.get_default_graph()\n if get_global_step(graph) is not None:\n raise ValueError('\"global_step\" already exists.')\n if context.executing_eagerly():\n with ops.device('cpu:0'):\n return variable_scope.get_variable(\n ops.GraphKeys.GLOBAL_STEP,\n shape=[],\n dtype=dtypes.int64,\n initializer=init_ops.zeros_initializer(),\n trainable=False,\n collections=[ops.GraphKeys.GLOBAL_VARIABLES,\n ops.GraphKeys.GLOBAL_STEP])\n # Create in proper graph and base name_scope.\n with graph.as_default() as g, g.name_scope(None):\n return variable_scope.get_variable(\n ops.GraphKeys.GLOBAL_STEP,\n shape=[],\n dtype=dtypes.int64,\n initializer=init_ops.zeros_initializer(),\n trainable=False,\n collections=[ops.GraphKeys.GLOBAL_VARIABLES,\n ops.GraphKeys.GLOBAL_STEP])\n\n\n@tf_export('train.get_or_create_global_step')\ndef get_or_create_global_step(graph=None):\n \"\"\"Returns and create (if necessary) the global step tensor.\n\n Args:\n graph: The graph in which to create the global step tensor. If missing, use\n default graph.\n\n Returns:\n The global step tensor.\n \"\"\"\n graph = graph or ops.get_default_graph()\n global_step_tensor = get_global_step(graph)\n if global_step_tensor is None:\n global_step_tensor = create_global_step(graph)\n return global_step_tensor\n\n\n@tf_export('train.assert_global_step')\ndef assert_global_step(global_step_tensor):\n \"\"\"Asserts `global_step_tensor` is a scalar int `Variable` or `Tensor`.\n\n Args:\n global_step_tensor: `Tensor` to test.\n \"\"\"\n if not (isinstance(global_step_tensor, variables.Variable) or\n isinstance(global_step_tensor, ops.Tensor) or\n resource_variable_ops.is_resource_variable(global_step_tensor)):\n raise TypeError(\n 'Existing \"global_step\" must be a Variable or Tensor: %s.' %\n global_step_tensor)\n\n if not global_step_tensor.dtype.base_dtype.is_integer:\n raise TypeError('Existing \"global_step\" does not have integer type: %s' %\n global_step_tensor.dtype)\n\n if (global_step_tensor.get_shape().ndims != 0 and\n global_step_tensor.get_shape().is_fully_defined()):\n raise TypeError('Existing \"global_step\" is not scalar: %s' %\n global_step_tensor.get_shape())\n\n\ndef _get_global_step_read(graph=None):\n \"\"\"Gets global step read tensor in graph.\n\n Args:\n graph: The graph in which to create the global step read tensor. If missing,\n use default graph.\n\n Returns:\n Global step read tensor.\n\n Raises:\n RuntimeError: if multiple items found in collection GLOBAL_STEP_READ_KEY.\n \"\"\"\n graph = graph or ops.get_default_graph()\n global_step_read_tensors = graph.get_collection(GLOBAL_STEP_READ_KEY)\n if len(global_step_read_tensors) > 1:\n raise RuntimeError('There are multiple items in collection {}. '\n 'There should be only one.'.format(GLOBAL_STEP_READ_KEY))\n\n if len(global_step_read_tensors) == 1:\n return global_step_read_tensors[0]\n return None\n\n\ndef _get_or_create_global_step_read(graph=None):\n \"\"\"Gets or creates global step read tensor in graph.\n\n Args:\n graph: The graph in which to create the global step read tensor. If missing,\n use default graph.\n\n Returns:\n Global step read tensor if there is global_step_tensor else return None.\n \"\"\"\n graph = graph or ops.get_default_graph()\n global_step_read_tensor = _get_global_step_read(graph)\n if global_step_read_tensor is not None:\n return global_step_read_tensor\n global_step_tensor = get_global_step(graph)\n if global_step_tensor is None:\n return None\n # add 'zero' so that it will create a copy of variable as Tensor.\n with graph.as_default() as g, g.name_scope(None):\n with g.name_scope(global_step_tensor.op.name + '/'):\n # using initialized_value to ensure that global_step is initialized before\n # this run. This is needed for example Estimator makes all model_fn build\n # under global_step_read_tensor dependency.\n global_step_value = global_step_tensor.initialized_value() if isinstance(\n global_step_tensor, variables.Variable) else global_step_tensor\n global_step_read_tensor = global_step_value + 0\n ops.add_to_collection(GLOBAL_STEP_READ_KEY, global_step_read_tensor)\n return _get_global_step_read(graph)\n\n\ndef _increment_global_step(increment, graph=None):\n graph = graph or ops.get_default_graph()\n global_step_tensor = get_global_step(graph)\n if global_step_tensor is None:\n raise ValueError(\n 'Global step tensor should be created by '\n 'tf.train.get_or_create_global_step before calling increment.')\n global_step_read_tensor = _get_or_create_global_step_read(graph)\n with graph.as_default() as g, g.name_scope(None):\n with g.name_scope(global_step_tensor.op.name + '/'):\n with ops.control_dependencies([global_step_read_tensor]):\n return state_ops.assign_add(global_step_tensor, increment)\n", "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Functional test for slot_creator.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import random_ops\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.training import slot_creator\n\n\nclass SlotCreatorTest(test.TestCase):\n\n def testCreateSlotFromVariable(self):\n with self.test_session():\n v = variables.Variable([1.0, 2.5], name=\"var\")\n slot = slot_creator.create_slot(v, v.initialized_value(), name=\"slot\")\n\n variables.global_variables_initializer().run()\n\n self.assertEqual(\"var/slot\", slot.op.name)\n self.assertEqual([2], slot.get_shape().as_list())\n self.assertEqual(dtypes.float32, slot.dtype.base_dtype)\n self.assertAllEqual([1.0, 2.5], slot.eval())\n\n def testCreateSlotFromTensor(self):\n with self.test_session():\n v = constant_op.constant([1.0, 2.5], name=\"const\")\n slot = slot_creator.create_slot(v, v * 2, name=\"slot\")\n\n variables.global_variables_initializer().run()\n\n self.assertEqual(\"const/slot\", slot.op.name)\n self.assertEqual([2], slot.get_shape().as_list())\n self.assertEqual(dtypes.float32, slot.dtype.base_dtype)\n self.assertAllEqual([2.0, 5.0], slot.eval())\n\n def testCreateZerosSlotFromVariable(self):\n with self.test_session():\n v = variables.Variable([1.0, 2.5], name=\"var\")\n with ops.control_dependencies(None):\n slot = slot_creator.create_zeros_slot(\n v, name=\"slot\", dtype=dtypes.float64)\n\n variables.global_variables_initializer().run()\n\n self.assertEqual(\"var/slot\", slot.op.name)\n self.assertEqual([2], slot.get_shape().as_list())\n self.assertEqual(dtypes.float64, slot.dtype.base_dtype)\n self.assertAllEqual([0.0, 0.0], slot.eval())\n\n def testCreateZerosSlotFromDynamicShapedVariable(self):\n with self.test_session():\n dyn_shape = constant_op.constant([2], dtype=dtypes.int32)\n dyn_shape = array_ops.placeholder_with_default(dyn_shape,\n shape=[None])\n v = variable_scope.get_variable(\n \"var\",\n initializer=random_ops.random_uniform(dyn_shape,\n dtype=dtypes.float64),\n validate_shape=False)\n with ops.control_dependencies(None):\n slot = slot_creator.create_zeros_slot(\n v, name=\"slot\", dtype=dtypes.float64)\n\n variables.global_variables_initializer().run()\n\n self.assertEqual(\"var/slot\", slot.op.name)\n self.assertEqual([2], array_ops.shape(slot).eval())\n self.assertEqual(dtypes.float64, slot.dtype.base_dtype)\n self.assertAllEqual([0.0, 0.0], slot.eval())\n\n def testCreateZerosSlotFromTensor(self):\n with self.test_session():\n v = constant_op.constant([1.0, 2.5], name=\"const\")\n with ops.control_dependencies(None):\n slot = slot_creator.create_zeros_slot(v, name=\"slot\")\n\n variables.global_variables_initializer().run()\n\n self.assertEqual(\"const/slot\", slot.op.name)\n self.assertEqual([2], slot.get_shape().as_list())\n self.assertEqual(dtypes.float32, slot.dtype.base_dtype)\n self.assertAllEqual([0.0, 0.0], slot.eval())\n\n def testCreateZerosSlotFromDynamicShapedTensor(self):\n with self.test_session():\n v = random_ops.random_uniform([2], dtype=dtypes.float64)\n v = array_ops.placeholder_with_default(v, shape=[None], name=\"const\")\n with ops.control_dependencies(None):\n slot = slot_creator.create_zeros_slot(\n v, name=\"slot\", dtype=dtypes.float64)\n\n variables.global_variables_initializer().run()\n\n self.assertEqual(\"const/slot\", slot.op.name)\n self.assertEqual([2], array_ops.shape(slot).eval())\n self.assertEqual(dtypes.float64, slot.dtype.base_dtype)\n self.assertAllEqual([0.0, 0.0], slot.eval())\n\n def testCreateSlotFromVariableRespectsScope(self):\n # See discussion on #2740.\n with self.test_session():\n with variable_scope.variable_scope(\"scope\"):\n v = variables.Variable([1.0, 2.5], name=\"var\")\n slot = slot_creator.create_slot(v, v.initialized_value(), name=\"slot\")\n self.assertEqual(\"scope/scope/var/slot\", slot.op.name)\n\n\nif __name__ == \"__main__\":\n test.main()\n", "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Functional tests for scan ops.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors_impl\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import gradient_checker\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.platform import test\n\n\ndef numpy_reverse(x, axis):\n length = len(x.shape)\n if axis < 0:\n axis = length + axis\n\n ix = [\n slice(None, None, -1) if i == axis else slice(None) for i in range(length)\n ]\n return x[ix]\n\n\ndef handle_options(func, x, axis, exclusive, reverse):\n \"\"\"Adds tf options to numpy scan ops.\"\"\"\n length = len(x.shape)\n if axis < 0:\n axis = length + axis\n\n if reverse:\n x = numpy_reverse(x, axis)\n\n if exclusive:\n ix_head = [slice(0, 1) if i == axis else slice(None) for i in range(length)]\n ix_init = [\n slice(0, -1) if i == axis else slice(None) for i in range(length)\n ]\n if func == np.cumsum:\n init = np.zeros_like(x[ix_head])\n elif func == np.cumprod:\n init = np.ones_like(x[ix_head])\n else:\n raise ValueError(\"Unknown scan function.\")\n x = np.concatenate([init, func(x[ix_init], axis)], axis=axis)\n else:\n x = func(x, axis=axis)\n\n if reverse:\n x = numpy_reverse(x, axis)\n return x\n\n\nclass CumsumTest(test.TestCase):\n\n valid_dtypes = [\n np.int32, np.int64, np.float16, np.float32, np.float64, np.complex64,\n np.complex128\n ]\n\n def _compare(self, x, axis, exclusive, reverse):\n np_out = handle_options(np.cumsum, x, axis, exclusive, reverse)\n with self.test_session(use_gpu=True):\n tf_out = math_ops.cumsum(x, axis, exclusive, reverse).eval()\n\n self.assertAllClose(np_out, tf_out)\n\n def _compareAll(self, x, axis):\n for exclusive in [True, False]:\n for reverse in [True, False]:\n self._compare(x, axis, exclusive, reverse)\n\n def testEmpty(self):\n for dtype in self.valid_dtypes:\n x = np.zeros([0]).astype(dtype)\n for axis in (-1, 0):\n self._compareAll(x, axis)\n\n def testAxisType(self):\n for dtype in self.valid_dtypes:\n x = np.arange(1, 6).reshape([5]).astype(dtype)\n for axis_dtype in [dtypes.int64, dtypes.int32]:\n with self.test_session(use_gpu=True):\n axis = constant_op.constant(0, axis_dtype)\n tf_out = math_ops.cumsum(x, axis).eval()\n\n def test1D(self):\n for dtype in self.valid_dtypes:\n x = np.arange(1, 6).reshape([5]).astype(dtype)\n for axis in (-1, 0):\n self._compareAll(x, axis)\n\n def test2D(self):\n for dtype in self.valid_dtypes:\n x = np.arange(0, 10).reshape([2, 5]).astype(dtype)\n for axis in (-2, -1, 0, 1):\n self._compareAll(x, axis)\n\n def test3D(self):\n for dtype in self.valid_dtypes:\n x = np.arange(0, 20).reshape([2, 2, 5]).astype(dtype)\n for axis in (-3, -2, -1, 0, 1, 2):\n self._compareAll(x, axis)\n\n def test6D(self):\n for dtype in self.valid_dtypes:\n x = np.arange(1, 145).reshape([2, 2, 3, 3, 2, 2]).astype(dtype)\n for axis in range(-6, 6, 3):\n self._compareAll(x, axis)\n\n def testInvalidAxis(self):\n x = np.arange(0, 10).reshape([2, 5]).astype(np.float32)\n input_tensor = ops.convert_to_tensor(x)\n with self.test_session(use_gpu=True):\n with self.assertRaisesWithPredicateMatch(\n errors_impl.InvalidArgumentError,\n lambda e: \"Expected scan axis in the range [-2, 2)\" in str(e)):\n math_ops.cumsum(input_tensor, -3).eval()\n with self.assertRaisesWithPredicateMatch(\n errors_impl.InvalidArgumentError,\n lambda e: \"Expected scan axis in the range [-2, 2)\" in str(e)):\n math_ops.cumsum(input_tensor, 2).eval()\n with self.assertRaisesWithPredicateMatch(\n errors_impl.InvalidArgumentError,\n lambda e: \"axis must be a scalar\" in str(e)):\n math_ops.cumsum(input_tensor, [0]).eval()\n\n def _compareGradient(self, shape, axis, exclusive, reverse):\n x = np.arange(0, 50).reshape(shape).astype(np.float64)\n with self.test_session(use_gpu=True):\n t = ops.convert_to_tensor(x)\n result = math_ops.cumsum(t, axis, exclusive, reverse)\n jacob_t, jacob_n = gradient_checker.compute_gradient(\n t, shape, result, shape, x_init_value=x, delta=1)\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)\n\n def testGradient(self):\n for axis in (-1, 0):\n self._compareGradient([50], axis, False, False)\n\n def testGradientReverse(self):\n for axis in (-1, 0):\n self._compareGradient([50], axis, False, True)\n\n def testGradientExclusive(self):\n for axis in (-1, 0):\n self._compareGradient([50], axis, True, False)\n\n def testGradientExclusiveReverse(self):\n for axis in (-1, 0):\n self._compareGradient([50], axis, True, True)\n\n def testGradient2D(self):\n for axis in (-1, 0, 1):\n for exclusive in [True, False]:\n for reverse in [True, False]:\n self._compareGradient([5, 10], axis, exclusive, reverse)\n\n\nclass CumprodTest(test.TestCase):\n\n valid_dtypes = [\n np.int32, np.int64, np.float16, np.float32, np.float64, np.complex64,\n np.complex128\n ]\n\n def _compare(self, x, axis, exclusive, reverse):\n np_out = handle_options(np.cumprod, x, axis, exclusive, reverse)\n with self.test_session(use_gpu=True):\n tf_out = math_ops.cumprod(x, axis, exclusive, reverse).eval()\n\n self.assertAllClose(np_out, tf_out)\n\n def _compareAll(self, x, axis):\n for exclusive in [True, False]:\n for reverse in [True, False]:\n self._compare(x, axis, exclusive, reverse)\n\n def testEmpty(self):\n for dtype in self.valid_dtypes:\n x = np.zeros([0]).astype(dtype)\n for axis in (-1, 0):\n self._compareAll(x, axis)\n\n def testAxisType(self):\n for dtype in self.valid_dtypes:\n x = np.arange(1, 6).reshape([5]).astype(dtype)\n for axis_dtype in [dtypes.int64, dtypes.int32]:\n with self.test_session(use_gpu=True):\n axis = constant_op.constant(0, axis_dtype)\n tf_out = math_ops.cumprod(x, axis).eval()\n\n def test1D(self):\n for dtype in self.valid_dtypes:\n x = np.arange(1, 6).reshape([5]).astype(dtype)\n for axis in (-1, 0):\n self._compareAll(x, axis)\n\n def test2D(self):\n for dtype in self.valid_dtypes:\n x = np.arange(1, 11).reshape([2, 5]).astype(dtype)\n for axis in (-2, -1, 0, 1):\n self._compareAll(x, axis)\n\n def test3D(self):\n for dtype in self.valid_dtypes:\n x = np.arange(1, 21).reshape([2, 2, 5]).astype(dtype)\n for axis in (-3, -2, -1, 0, 1, 2):\n self._compareAll(x, axis)\n\n def test6D(self):\n for dtype in self.valid_dtypes:\n x = np.arange(1, 145).reshape([2, 2, 3, 3, 2, 2]).astype(dtype)\n for axis in range(-6, 6, 3):\n self._compareAll(x, axis)\n\n def testInvalidAxis(self):\n x = np.arange(0, 10).reshape([2, 5]).astype(np.float32)\n input_tensor = ops.convert_to_tensor(x)\n with self.test_session(use_gpu=True):\n with self.assertRaisesWithPredicateMatch(\n errors_impl.InvalidArgumentError,\n lambda e: \"Expected scan axis in the range [-2, 2)\" in str(e)):\n math_ops.cumprod(input_tensor, -3).eval()\n with self.assertRaisesWithPredicateMatch(\n errors_impl.InvalidArgumentError,\n lambda e: \"Expected scan axis in the range [-2, 2)\" in str(e)):\n math_ops.cumprod(input_tensor, 2).eval()\n with self.assertRaisesWithPredicateMatch(\n errors_impl.InvalidArgumentError,\n lambda e: \"axis must be a scalar\" in str(e)):\n math_ops.cumprod(input_tensor, [0]).eval()\n\n def _compareGradient(self, shape, axis, exclusive, reverse):\n x = np.arange(1, 9).reshape(shape).astype(np.float64)\n with self.test_session(use_gpu=True):\n t = ops.convert_to_tensor(x)\n result = math_ops.cumprod(t, axis, exclusive, reverse)\n jacob_t, jacob_n = gradient_checker.compute_gradient(\n t, shape, result, shape, x_init_value=x, delta=1)\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)\n\n def testGradient(self):\n for axis in (-1, 0):\n self._compareGradient([8], axis, False, False)\n\n def testGradientReverse(self):\n for axis in (-1, 0):\n self._compareGradient([8], axis, False, True)\n\n def testGradientExclusive(self):\n for axis in (-1, 0):\n self._compareGradient([8], axis, True, False)\n\n def testGradientExclusiveReverse(self):\n for axis in (-1, 0):\n self._compareGradient([8], axis, True, True)\n\n def testGradient2D(self):\n for axis in (-2, -1, 0, 1):\n for exclusive in [True, False]:\n for reverse in [True, False]:\n self._compareGradient([2, 4], axis, exclusive, reverse)\n\n\nif __name__ == \"__main__\":\n test.main()\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for head.py.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport six\n\nfrom tensorflow.core.framework import summary_pb2\nfrom tensorflow.python.estimator import model_fn\nfrom tensorflow.python.estimator.canned import dnn_testing_utils\nfrom tensorflow.python.estimator.canned import head as head_lib\nfrom tensorflow.python.estimator.canned import metric_keys\nfrom tensorflow.python.estimator.canned import prediction_keys\nfrom tensorflow.python.estimator.inputs import numpy_io\nfrom tensorflow.python.feature_column import feature_column as feature_column_lib\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import check_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import string_ops\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.ops.losses import losses\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.saved_model import signature_constants\nfrom tensorflow.python.training import monitored_session\nfrom tensorflow.python.training import queue_runner_impl\n\n\n_DEFAULT_SERVING_KEY = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY\n\n\ndef _initialize_variables(test_case, scaffold):\n scaffold.finalize()\n test_case.assertIsNone(scaffold.init_feed_dict)\n test_case.assertIsNone(scaffold.init_fn)\n scaffold.init_op.run()\n scaffold.ready_for_local_init_op.eval()\n scaffold.local_init_op.run()\n scaffold.ready_op.eval()\n test_case.assertIsNotNone(scaffold.saver)\n\n\ndef _assert_simple_summaries(test_case, expected_summaries, summary_str,\n tol=1e-6):\n \"\"\"Assert summary the specified simple values.\n\n Args:\n test_case: test case.\n expected_summaries: Dict of expected tags and simple values.\n summary_str: Serialized `summary_pb2.Summary`.\n tol: Tolerance for relative and absolute.\n \"\"\"\n summary = summary_pb2.Summary()\n summary.ParseFromString(summary_str)\n test_case.assertAllClose(expected_summaries, {\n v.tag: v.simple_value for v in summary.value\n }, rtol=tol, atol=tol)\n\n\ndef _assert_no_hooks(test_case, spec):\n test_case.assertAllEqual([], spec.training_chief_hooks)\n test_case.assertAllEqual([], spec.training_hooks)\n\n\ndef _sigmoid(logits):\n return 1 / (1 + np.exp(-logits))\n\n\nclass CreateEstimatorSpecTest(test.TestCase):\n\n class _HeadWithTPUSupport(head_lib._Head):\n \"\"\"Head that overrides _create_tpu_estimator_spec.\"\"\"\n\n def name(self):\n return 'HeadWithTPUSupport'\n\n def logits_dimension(self):\n return None\n\n def create_loss(self, features, mode, logits, labels):\n return None\n\n def _create_tpu_estimator_spec(self, features, mode, logits, labels=None,\n optimizer=None, train_op_fn=None,\n regularization_losses=None):\n return model_fn._TPUEstimatorSpec(\n mode=model_fn.ModeKeys.EVAL,\n loss=constant_op.constant(0.0, dtype=dtypes.float32))\n\n class _HeadWithOutTPUSupport(head_lib._Head):\n \"\"\"Head that overrides create_estimator_spec.\"\"\"\n\n def name(self):\n return 'HeadWithOutTPUSupport'\n\n def logits_dimension(self):\n return None\n\n def create_loss(self, features, mode, logits, labels):\n return None\n\n def create_estimator_spec(self, features, mode, logits, labels=None,\n optimizer=None, train_op_fn=None,\n regularization_losses=None):\n return model_fn.EstimatorSpec(\n mode=model_fn.ModeKeys.EVAL,\n loss=constant_op.constant(0.0, dtype=dtypes.float32))\n\n class _InvalidHead(head_lib._Head):\n \"\"\"Head that overrides neither estimator_spec functions.\"\"\"\n\n def name(self):\n return 'InvalidHead'\n\n def logits_dimension(self):\n return None\n\n def create_loss(self, features, mode, logits, labels):\n return None\n\n def test_head_override_tpu_estimator_spec(self):\n \"\"\"Test for `_Head` that overrides _create_tpu_estimator_spec.\"\"\"\n head = self._HeadWithTPUSupport()\n\n tpu_spec = head._create_tpu_estimator_spec(\n features=None, mode=None, logits=None)\n self.assertTrue(isinstance(tpu_spec, model_fn._TPUEstimatorSpec))\n est_spec = head.create_estimator_spec(\n features=None, mode=None, logits=None)\n self.assertTrue(isinstance(est_spec, model_fn.EstimatorSpec))\n\n def test_head_override_estimator_spec(self):\n \"\"\"Test for `_Head` that overrides create_estimator_spec.\"\"\"\n head = self._HeadWithOutTPUSupport()\n\n with self.assertRaisesRegexp(\n NotImplementedError,\n 'TPUEstimatorSpec not available for this model head.'):\n _ = head._create_tpu_estimator_spec(\n features=None, mode=None, logits=None)\n est_spec = head.create_estimator_spec(\n features=None, mode=None, logits=None)\n self.assertTrue(isinstance(est_spec, model_fn.EstimatorSpec))\n\n def test_invalid_head_class(self):\n head = self._InvalidHead()\n\n with self.assertRaisesRegexp(\n NotImplementedError,\n 'TPUEstimatorSpec not available for this model head.'):\n _ = head._create_tpu_estimator_spec(\n features=None, mode=None, logits=None)\n with self.assertRaisesRegexp(\n NotImplementedError,\n r'Subclasses of _Head must implement `create_estimator_spec\\(\\)` or '\n r'_create_tpu_estimator_spec\\(\\).'):\n _ = head.create_estimator_spec(\n features=None, mode=None, logits=None)\n\n\nclass MultiClassHeadWithSoftmaxCrossEntropyLoss(test.TestCase):\n\n def setUp(self):\n ops.reset_default_graph()\n\n def test_n_classes_is_none(self):\n with self.assertRaisesRegexp(ValueError, 'n_classes must be > 2'):\n head_lib._multi_class_head_with_softmax_cross_entropy_loss(\n n_classes=None)\n\n def test_n_classes_is_2(self):\n with self.assertRaisesRegexp(ValueError, 'n_classes must be > 2'):\n head_lib._multi_class_head_with_softmax_cross_entropy_loss(\n n_classes=2)\n\n def test_invalid_loss_reduction(self):\n with self.assertRaisesRegexp(\n ValueError, r'Invalid loss_reduction: invalid_loss_reduction'):\n head_lib._multi_class_head_with_softmax_cross_entropy_loss(\n n_classes=3, loss_reduction='invalid_loss_reduction')\n with self.assertRaisesRegexp(\n ValueError, r'Invalid loss_reduction: none'):\n head_lib._multi_class_head_with_softmax_cross_entropy_loss(\n n_classes=3, loss_reduction=losses.Reduction.NONE)\n\n def test_loss_fn_arg_labels_missing(self):\n def _loss_fn(logits):\n del logits # Unused\n with self.assertRaisesRegexp(\n ValueError,\n r'loss_fn must contain argument: labels\\. '\n r'Given arguments: \\(\\'logits\\',\\)'):\n head_lib._multi_class_head_with_softmax_cross_entropy_loss(\n n_classes=3, loss_fn=_loss_fn)\n\n def test_loss_fn_arg_logits_missing(self):\n def _loss_fn(labels):\n del labels # unused\n with self.assertRaisesRegexp(\n ValueError,\n r'loss_fn must contain argument: logits\\. '\n r'Given arguments: \\(\\'labels\\',\\)'):\n head_lib._multi_class_head_with_softmax_cross_entropy_loss(\n n_classes=3, loss_fn=_loss_fn)\n\n def test_loss_fn_arg_features_ok(self):\n def _loss_fn(labels, logits, features):\n del labels, logits, features # Unused\n head_lib._multi_class_head_with_softmax_cross_entropy_loss(\n n_classes=3, loss_fn=_loss_fn)\n\n def test_loss_fn_arg_invalid(self):\n def _loss_fn(labels, logits, name=None):\n del labels, logits, name # Unused\n with self.assertRaisesRegexp(\n ValueError,\n r'loss_fn has unexpected args: \\[\\'name\\'\\]'):\n head_lib._multi_class_head_with_softmax_cross_entropy_loss(\n n_classes=3, loss_fn=_loss_fn)\n\n def test_invalid_logits_shape(self):\n n_classes = 3\n head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(n_classes)\n self.assertEqual(n_classes, head.logits_dimension)\n\n # Logits should be shape (batch_size, 3).\n logits_2x2 = np.array(((45., 44.), (41., 42.),))\n\n # Static shape.\n with self.assertRaisesRegexp(ValueError, 'logits shape'):\n head.create_estimator_spec(\n features={'x': np.array(((30.,), (42.,),))},\n mode=model_fn.ModeKeys.PREDICT,\n logits=logits_2x2)\n\n # Dynamic shape.\n logits_placeholder = array_ops.placeholder(dtype=dtypes.float32)\n spec = head.create_estimator_spec(\n features={'x': np.array(((30.,), (42.,),))},\n mode=model_fn.ModeKeys.PREDICT,\n logits=logits_placeholder)\n with self.test_session():\n with self.assertRaisesRegexp(errors.OpError, 'logits shape'):\n spec.predictions[prediction_keys.PredictionKeys.PROBABILITIES].eval({\n logits_placeholder: logits_2x2\n })\n\n def test_invalid_labels_shape(self):\n n_classes = 3\n head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(n_classes)\n self.assertEqual(n_classes, head.logits_dimension)\n\n # Logits should be shape (batch_size, 3).\n # Labels should be shape (batch_size, 1).\n labels_2x2 = np.array(((45, 44), (41, 42),), dtype=np.int)\n logits_2x3 = np.array(((1., 2., 3.), (1., 2., 3.),))\n features = {'x': np.array(((42.,),))}\n\n # Static shape.\n with self.assertRaisesRegexp(ValueError, 'Mismatched label shape'):\n head.create_loss(\n features=features,\n mode=model_fn.ModeKeys.EVAL,\n logits=logits_2x3,\n labels=labels_2x2)\n\n # Dynamic shape.\n labels_placeholder = array_ops.placeholder(dtype=dtypes.int64)\n logits_placeholder = array_ops.placeholder(dtype=dtypes.float32)\n training_loss = head.create_loss(\n features=features,\n mode=model_fn.ModeKeys.EVAL,\n logits=logits_placeholder,\n labels=labels_placeholder)[0]\n with self.test_session():\n with self.assertRaisesRegexp(\n errors.InvalidArgumentError,\n r'\\[expected_labels_shape: \\] \\[2 1\\] \\[labels_shape: \\] \\[2 2\\]'):\n training_loss.eval({\n logits_placeholder: logits_2x3,\n labels_placeholder: labels_2x2\n })\n\n def test_invalid_labels_type(self):\n n_classes = 3\n head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(n_classes)\n self.assertEqual(n_classes, head.logits_dimension)\n\n # Logits should be shape (batch_size, 3).\n # Labels should be shape (batch_size, 1).\n labels_2x1 = np.array(((1.,), (1.,),))\n logits_2x3 = np.array(((1., 2., 3.), (1., 2., 3.),))\n features = {'x': np.array(((42.,),))}\n\n # Static shape.\n with self.assertRaisesRegexp(ValueError, 'Labels dtype'):\n head.create_loss(\n features=features,\n mode=model_fn.ModeKeys.EVAL,\n logits=logits_2x3,\n labels=labels_2x1)\n\n # Dynamic shape.\n labels_placeholder = array_ops.placeholder(dtype=dtypes.float32)\n logits_placeholder = array_ops.placeholder(dtype=dtypes.float32)\n with self.assertRaisesRegexp(ValueError, 'Labels dtype'):\n head.create_loss(\n features=features,\n mode=model_fn.ModeKeys.EVAL,\n logits=logits_placeholder,\n labels=labels_placeholder)\n\n def test_invalid_labels_values(self):\n n_classes = 3\n head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(n_classes)\n self.assertEqual(n_classes, head.logits_dimension)\n\n labels_2x1_with_large_id = np.array(((45,), (1,),), dtype=np.int)\n labels_2x1_with_negative_id = np.array(((-5,), (1,),), dtype=np.int)\n logits_2x3 = np.array(((1., 2., 4.), (1., 2., 3.),))\n\n labels_placeholder = array_ops.placeholder(dtype=dtypes.int64)\n logits_placeholder = array_ops.placeholder(dtype=dtypes.float32)\n training_loss = head.create_loss(\n features={'x': np.array(((42.,),))},\n mode=model_fn.ModeKeys.EVAL,\n logits=logits_placeholder,\n labels=labels_placeholder)[0]\n with self.test_session():\n with self.assertRaisesOpError('Labels must <= n_classes - 1'):\n training_loss.eval({\n labels_placeholder: labels_2x1_with_large_id,\n logits_placeholder: logits_2x3\n })\n\n with self.test_session():\n with self.assertRaisesOpError('Labels must >= 0'):\n training_loss.eval({\n labels_placeholder: labels_2x1_with_negative_id,\n logits_placeholder: logits_2x3\n })\n\n def test_invalid_labels_sparse_tensor(self):\n n_classes = 3\n head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(n_classes)\n self.assertEqual(n_classes, head.logits_dimension)\n\n labels_2x1 = sparse_tensor.SparseTensor(\n values=['english', 'italian'],\n indices=[[0, 0], [1, 0]],\n dense_shape=[2, 1])\n logits_2x3 = np.array(((1., 2., 4.), (1., 2., 3.),))\n\n with self.assertRaisesRegexp(\n ValueError, 'SparseTensor labels are not supported.'):\n head.create_loss(\n features={'x': np.array(((42.,),))},\n mode=model_fn.ModeKeys.EVAL,\n logits=logits_2x3,\n labels=labels_2x1)\n\n def test_incompatible_labels_shape(self):\n n_classes = 3\n head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(n_classes)\n self.assertEqual(n_classes, head.logits_dimension)\n\n # Logits should be shape (batch_size, 3).\n # Labels should be shape (batch_size, 1).\n # Here batch sizes are different.\n values_3x1 = np.array(((1,), (1,), (1,),))\n values_2x3 = np.array(((1., 2., 3.), (1., 2., 3.),))\n features = {'x': values_2x3}\n\n # Static shape.\n with self.assertRaisesRegexp(\n ValueError,\n r'Shape mismatch: The shape of labels \\(received \\(3,\\)\\) should equal '\n r'the shape of logits except for the last dimension '\n r'\\(received \\(2, 3\\)\\)\\.'\n ):\n head.create_loss(\n features=features,\n mode=model_fn.ModeKeys.EVAL,\n logits=values_2x3,\n labels=values_3x1)\n\n # Dynamic shape.\n labels_placeholder = array_ops.placeholder(dtype=dtypes.int64)\n logits_placeholder = array_ops.placeholder(dtype=dtypes.float32)\n training_loss = head.create_loss(\n features=features,\n mode=model_fn.ModeKeys.EVAL,\n logits=logits_placeholder,\n labels=labels_placeholder)[0]\n with self.test_session():\n with self.assertRaisesRegexp(\n errors.InvalidArgumentError,\n r'\\[expected_labels_shape: \\] \\[2 1\\] \\[labels_shape: \\] \\[3 1\\]'):\n training_loss.eval({\n labels_placeholder: values_3x1,\n logits_placeholder: values_2x3\n })\n\n def test_name(self):\n head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(\n n_classes=3, name='foo')\n self.assertEqual('foo', head.name)\n\n def test_predict(self):\n n_classes = 3\n head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(n_classes)\n self.assertEqual(n_classes, head.logits_dimension)\n\n logits = [[1., 0., 0.], [0., 0., 1.]]\n expected_probabilities = [[0.576117, 0.2119416, 0.2119416],\n [0.2119416, 0.2119416, 0.576117]]\n expected_class_ids = [[0], [2]]\n expected_classes = [[b'0'], [b'2']]\n expected_export_classes = [[b'0', b'1', b'2']] * 2\n\n spec = head.create_estimator_spec(\n features={'x': np.array(((42,),), dtype=np.int32)},\n mode=model_fn.ModeKeys.PREDICT,\n logits=logits)\n\n self.assertItemsEqual(\n (_DEFAULT_SERVING_KEY, 'predict', 'classification'),\n spec.export_outputs.keys())\n\n # Assert predictions and export_outputs.\n with self.test_session() as sess:\n _initialize_variables(self, spec.scaffold)\n self.assertIsNone(spec.scaffold.summary_op)\n predictions = sess.run(spec.predictions)\n self.assertAllClose(logits,\n predictions[prediction_keys.PredictionKeys.LOGITS])\n self.assertAllClose(\n expected_probabilities,\n predictions[prediction_keys.PredictionKeys.PROBABILITIES])\n self.assertAllClose(expected_class_ids,\n predictions[prediction_keys.PredictionKeys.CLASS_IDS])\n self.assertAllEqual(expected_classes,\n predictions[prediction_keys.PredictionKeys.CLASSES])\n\n self.assertAllClose(\n expected_probabilities,\n sess.run(spec.export_outputs[_DEFAULT_SERVING_KEY].scores))\n self.assertAllEqual(\n expected_export_classes,\n sess.run(spec.export_outputs[_DEFAULT_SERVING_KEY].classes))\n\n def test_predict_with_vocabulary_list(self):\n n_classes = 3\n head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(\n n_classes, label_vocabulary=['aang', 'iroh', 'zuko'])\n\n logits = [[1., 0., 0.], [0., 0., 1.]]\n expected_classes = [[b'aang'], [b'zuko']]\n expected_export_classes = [[b'aang', b'iroh', b'zuko']] * 2\n\n spec = head.create_estimator_spec(\n features={'x': np.array(((42,),), dtype=np.int32)},\n mode=model_fn.ModeKeys.PREDICT,\n logits=logits)\n\n with self.test_session() as sess:\n _initialize_variables(self, spec.scaffold)\n self.assertAllEqual(\n expected_classes,\n sess.run(spec.predictions[prediction_keys.PredictionKeys.CLASSES]))\n self.assertAllEqual(\n expected_export_classes,\n sess.run(spec.export_outputs[_DEFAULT_SERVING_KEY].classes))\n\n def test_weight_should_not_impact_prediction(self):\n n_classes = 3\n logits = [[1., 0., 0.], [0., 0., 1.]]\n expected_probabilities = [[0.576117, 0.2119416, 0.2119416],\n [0.2119416, 0.2119416, 0.576117]]\n head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(\n n_classes, weight_column='label_weights')\n\n weights_2x1 = [[1.], [2.]]\n spec = head.create_estimator_spec(\n features={\n 'x': np.array(((42,),), dtype=np.int32),\n 'label_weights': weights_2x1,\n },\n mode=model_fn.ModeKeys.PREDICT,\n logits=logits)\n\n with self.test_session() as sess:\n _initialize_variables(self, spec.scaffold)\n predictions = sess.run(spec.predictions)\n self.assertAllClose(logits,\n predictions[prediction_keys.PredictionKeys.LOGITS])\n self.assertAllClose(\n expected_probabilities,\n predictions[prediction_keys.PredictionKeys.PROBABILITIES])\n\n def test_eval_create_loss(self):\n n_classes = 3\n head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(n_classes)\n\n logits = np.array(((10, 0, 0), (0, 10, 0),), dtype=np.float32)\n labels = np.array(((1,), (1,)), dtype=np.int64)\n features = {'x': np.array(((42,),), dtype=np.int32)}\n # loss = cross_entropy(labels, logits) = [10, 0].\n expected_training_loss = 10.\n # Create loss.\n training_loss = head.create_loss(\n features=features,\n mode=model_fn.ModeKeys.EVAL,\n logits=logits,\n labels=labels)[0]\n with self.test_session():\n _initialize_variables(self, monitored_session.Scaffold())\n self.assertAllClose(\n expected_training_loss, training_loss.eval(), rtol=1e-2, atol=1e-2)\n\n def test_eval_create_loss_loss_fn(self):\n \"\"\"Tests head.create_loss for eval mode and custom loss_fn.\"\"\"\n loss = np.array([[1.], [2.]], dtype=np.float32)\n logits_input = np.array([[-10., 10., 0.], [-15., 10., 0]], dtype=np.float32)\n labels_input = np.array([[1], [2]], dtype=np.int64)\n def _loss_fn(labels, logits):\n check_labels = control_flow_ops.Assert(\n math_ops.reduce_all(math_ops.equal(labels, labels_input)),\n data=[labels])\n check_logits = control_flow_ops.Assert(\n math_ops.reduce_all(math_ops.equal(logits, logits_input)),\n data=[logits])\n with ops.control_dependencies([check_labels, check_logits]):\n return constant_op.constant(loss)\n head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(\n n_classes=3, loss_fn=_loss_fn)\n\n actual_training_loss = head.create_loss(\n features={'x': np.array(((42,),), dtype=np.int32)},\n mode=model_fn.ModeKeys.EVAL,\n logits=logits_input,\n labels=labels_input)[0]\n with self.test_session():\n _initialize_variables(self, monitored_session.Scaffold())\n self.assertAllClose(np.sum(loss), actual_training_loss.eval())\n\n def test_eval_create_loss_loss_fn_wrong_shape(self):\n \"\"\"Tests custom loss_fn that returns Tensor of unexpected shape.\"\"\"\n loss = np.array([1., 2.], dtype=np.float32)\n def _loss_fn(labels, logits):\n del labels, logits # Unused\n return constant_op.constant(loss)\n head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(\n n_classes=3, loss_fn=_loss_fn)\n\n logits = np.array([[-10., 10., 0.], [-15., 10., 0.]], dtype=np.float32)\n labels = np.array([[1], [2]], dtype=np.int64)\n actual_training_loss = head.create_loss(\n features={'x': np.array(((42,),), dtype=np.int32)},\n mode=model_fn.ModeKeys.EVAL,\n logits=logits,\n labels=labels)[0]\n with self.test_session():\n _initialize_variables(self, monitored_session.Scaffold())\n with self.assertRaisesRegexp(\n errors.InvalidArgumentError,\n r'\\[loss_fn must return Tensor of shape \\[D0, D1, ... DN, 1\\]\\. \\] '\n r'\\[logits_shape: \\] \\[2 3\\] \\[loss_shape: \\] \\[2\\]'):\n actual_training_loss.eval()\n\n def test_eval_labels_none(self):\n \"\"\"Tests that error is raised when labels is None.\"\"\"\n head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(\n n_classes=3)\n\n with self.assertRaisesRegexp(\n ValueError, r'You must provide a labels Tensor\\. Given: None\\.'):\n head.create_estimator_spec(\n features={'x': np.array(((42,),), dtype=np.int32)},\n mode=model_fn.ModeKeys.EVAL,\n logits=np.array(((10, 0, 0), (0, 10, 0),), dtype=np.float32),\n labels=None)\n\n def test_eval(self):\n n_classes = 3\n head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(n_classes)\n logits = np.array(((10, 0, 0), (0, 10, 0),), dtype=np.float32)\n labels = np.array(((1,), (1,)), dtype=np.int64)\n features = {'x': np.array(((42,),), dtype=np.int32)}\n # loss = sum(cross_entropy(labels, logits)) = sum(10, 0) = 10.\n expected_loss = 10.\n # Create estimator spec.\n spec = head.create_estimator_spec(\n features=features,\n mode=model_fn.ModeKeys.EVAL,\n logits=logits,\n labels=labels)\n\n keys = metric_keys.MetricKeys\n expected_metrics = {\n keys.LOSS_MEAN: expected_loss / 2,\n keys.ACCURACY: 0.5, # 1 of 2 labels is correct.\n }\n\n # Assert spec contains expected tensors.\n self.assertIsNotNone(spec.loss)\n self.assertItemsEqual(expected_metrics.keys(), spec.eval_metric_ops.keys())\n self.assertIsNone(spec.train_op)\n self.assertIsNone(spec.export_outputs)\n _assert_no_hooks(self, spec)\n\n # Assert predictions, loss, and metrics.\n tol = 1e-2\n with self.test_session() as sess:\n _initialize_variables(self, spec.scaffold)\n self.assertIsNone(spec.scaffold.summary_op)\n value_ops = {k: spec.eval_metric_ops[k][0] for k in spec.eval_metric_ops}\n update_ops = {k: spec.eval_metric_ops[k][1] for k in spec.eval_metric_ops}\n loss, metrics = sess.run((spec.loss, update_ops))\n self.assertAllClose(expected_loss, loss, rtol=tol, atol=tol)\n # Check results of both update (in `metrics`) and value ops.\n self.assertAllClose(expected_metrics, metrics, rtol=tol, atol=tol)\n self.assertAllClose(\n expected_metrics, {k: value_ops[k].eval()\n for k in value_ops},\n rtol=tol,\n atol=tol)\n\n def test_eval_metric_ops_with_head_name(self):\n n_classes = 3\n head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(\n n_classes, name='some_multiclass_head')\n logits = np.array(((10, 0, 0), (0, 10, 0),), dtype=np.float32)\n labels = np.array(((1,), (1,)), dtype=np.int64)\n features = {'x': np.array(((42,),), dtype=np.int32)}\n # Create estimator spec.\n spec = head.create_estimator_spec(\n features=features,\n mode=model_fn.ModeKeys.EVAL,\n logits=logits,\n labels=labels)\n\n expected_metric_keys = [\n '{}/some_multiclass_head'.format(metric_keys.MetricKeys.LOSS_MEAN),\n '{}/some_multiclass_head'.format(metric_keys.MetricKeys.ACCURACY)\n ]\n self.assertItemsEqual(expected_metric_keys, spec.eval_metric_ops.keys())\n\n def test_eval_with_regularization_losses(self):\n n_classes = 3\n head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(\n n_classes, loss_reduction=losses.Reduction.SUM_OVER_BATCH_SIZE)\n logits = np.array(((10, 0, 0), (0, 10, 0),), dtype=np.float32)\n labels = np.array(((1,), (1,)), dtype=np.int64)\n features = {'x': np.array(((42,),), dtype=np.int32)}\n regularization_losses = [1.5, 0.5]\n expected_regularization_loss = 2.\n # unregularized_loss = sum(cross_entropy(labels, logits)) / batch_size\n # = sum(10, 0) / 2 = 5.\n expected_unregularized_loss = 5.\n expected_regularized_loss = (\n expected_unregularized_loss + expected_regularization_loss)\n # Create estimator spec.\n spec = head.create_estimator_spec(\n features=features,\n mode=model_fn.ModeKeys.EVAL,\n logits=logits,\n labels=labels,\n regularization_losses=regularization_losses)\n\n keys = metric_keys.MetricKeys\n expected_metrics = {\n keys.LOSS_MEAN: expected_unregularized_loss,\n keys.LOSS_REGULARIZATION: expected_regularization_loss,\n keys.ACCURACY: 0.5, # 1 of 2 labels is correct.\n }\n\n # Assert predictions, loss, and metrics.\n tol = 1e-2\n with self.test_session() as sess:\n _initialize_variables(self, spec.scaffold)\n self.assertIsNone(spec.scaffold.summary_op)\n value_ops = {k: spec.eval_metric_ops[k][0] for k in spec.eval_metric_ops}\n update_ops = {k: spec.eval_metric_ops[k][1] for k in spec.eval_metric_ops}\n loss, metrics = sess.run((spec.loss, update_ops))\n self.assertAllClose(expected_regularized_loss, loss, rtol=tol, atol=tol)\n # Check results of both update (in `metrics`) and value ops.\n self.assertAllClose(expected_metrics, metrics, rtol=tol, atol=tol)\n self.assertAllClose(\n expected_metrics, {k: value_ops[k].eval()\n for k in value_ops},\n rtol=tol,\n atol=tol)\n\n def test_eval_with_label_vocabulary_create_loss(self):\n n_classes = 3\n head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(\n n_classes, label_vocabulary=['aang', 'iroh', 'zuko'])\n logits = [[10., 0, 0], [0, 10, 0]]\n labels = [[b'iroh'], [b'iroh']]\n features = {'x': np.array(((42,),), dtype=np.int32)}\n # loss = cross_entropy(labels, logits) = [10, 0].\n expected_training_loss = 10.\n training_loss = head.create_loss(\n features=features,\n mode=model_fn.ModeKeys.EVAL,\n logits=logits,\n labels=labels)[0]\n with self.test_session():\n _initialize_variables(self, monitored_session.Scaffold())\n self.assertAllClose(\n expected_training_loss, training_loss.eval(), rtol=1e-2, atol=1e-2)\n\n def test_eval_with_label_vocabulary(self):\n n_classes = 3\n head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(\n n_classes, label_vocabulary=['aang', 'iroh', 'zuko'])\n\n logits = [[10., 0, 0], [0, 10, 0]]\n labels = [[b'iroh'], [b'iroh']]\n features = {'x': np.array(((42,),), dtype=np.int32)}\n # loss = sum(cross_entropy(labels, logits)) = sum(10, 0) = 10.\n expected_loss = 10.\n spec = head.create_estimator_spec(\n features=features,\n mode=model_fn.ModeKeys.EVAL,\n logits=logits,\n labels=labels)\n\n keys = metric_keys.MetricKeys\n expected_metrics = {\n keys.LOSS_MEAN: expected_loss / 2,\n keys.ACCURACY: 0.5, # 1 of 2 labels is correct.\n }\n\n tol = 1e-2\n with self.test_session() as sess:\n _initialize_variables(self, spec.scaffold)\n value_ops = {k: spec.eval_metric_ops[k][0] for k in spec.eval_metric_ops}\n update_ops = {k: spec.eval_metric_ops[k][1] for k in spec.eval_metric_ops}\n loss, metrics = sess.run((spec.loss, update_ops))\n self.assertAllClose(expected_loss, loss, rtol=tol, atol=tol)\n # Check results of both update (in `metrics`) and value ops.\n self.assertAllClose(expected_metrics, metrics, rtol=tol, atol=tol)\n self.assertAllClose(\n expected_metrics, {k: value_ops[k].eval() for k in value_ops},\n rtol=tol, atol=tol)\n\n def test_weighted_multi_example_eval(self):\n n_classes = 3\n head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(\n n_classes, weight_column='label_weights')\n\n # Create estimator spec.\n logits = np.array(((10, 0, 0), (0, 10, 0), (0, 0, 10),), dtype=np.float32)\n labels = np.array(((1,), (2,), (2,)), dtype=np.int64)\n weights_3x1 = np.array(((1.,), (2.,), (3.,)), dtype=np.float64)\n # loss = sum(cross_entropy(labels, logits) * [1, 2, 3])\n # = sum([10, 10, 0] * [1, 2, 3]) = 30\n expected_loss = 30.\n spec = head.create_estimator_spec(\n features={\n 'x': np.array(((42,),), dtype=np.int32),\n 'label_weights': weights_3x1,\n },\n mode=model_fn.ModeKeys.EVAL,\n logits=logits,\n labels=labels)\n\n keys = metric_keys.MetricKeys\n expected_metrics = {\n keys.LOSS_MEAN: expected_loss / np.sum(weights_3x1),\n # Weighted accuracy is 1 * 3.0 / sum weights = 0.5\n keys.ACCURACY: 0.5,\n }\n\n # Assert spec contains expected tensors.\n self.assertIsNotNone(spec.loss)\n self.assertItemsEqual(expected_metrics.keys(), spec.eval_metric_ops.keys())\n self.assertIsNone(spec.train_op)\n self.assertIsNone(spec.export_outputs)\n _assert_no_hooks(self, spec)\n\n # Assert loss, and metrics.\n tol = 1e-2\n with self.test_session() as sess:\n _initialize_variables(self, spec.scaffold)\n self.assertIsNone(spec.scaffold.summary_op)\n value_ops = {k: spec.eval_metric_ops[k][0] for k in spec.eval_metric_ops}\n update_ops = {k: spec.eval_metric_ops[k][1] for k in spec.eval_metric_ops}\n loss, metrics = sess.run((spec.loss, update_ops))\n self.assertAllClose(expected_loss, loss, rtol=tol, atol=tol)\n # Check results of both update (in `metrics`) and value ops.\n self.assertAllClose(expected_metrics, metrics, rtol=tol, atol=tol)\n self.assertAllClose(\n expected_metrics, {k: value_ops[k].eval() for k in value_ops},\n rtol=tol, atol=tol)\n\n def test_train_create_loss(self):\n head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(\n n_classes=3)\n\n logits = np.array(((10, 0, 0), (0, 10, 0),), dtype=np.float32)\n labels = np.array(((1,), (1,)), dtype=np.int64)\n features = {'x': np.array(((42,),), dtype=np.int32)}\n\n # unreduced_loss = cross_entropy(labels, logits) = [10, 0].\n expected_unreduced_loss = [[10.], [0.]]\n # Weights default to 1.\n expected_weights = 1.\n # training_loss = 1 * 10 + 1 * 0\n expected_training_loss = 10.\n training_loss, unreduced_loss, actual_weights, _ = head.create_loss(\n features=features,\n mode=model_fn.ModeKeys.TRAIN,\n logits=logits,\n labels=labels)\n tol = 1e-2\n with self.test_session():\n _initialize_variables(self, monitored_session.Scaffold())\n self.assertAllClose(\n expected_training_loss, training_loss.eval(), rtol=tol, atol=tol)\n self.assertAllClose(\n expected_unreduced_loss, unreduced_loss.eval(), rtol=tol, atol=tol)\n self.assertAllClose(expected_weights, actual_weights)\n\n def test_train_create_loss_loss_reduction(self):\n \"\"\"Tests create_loss with loss_reduction.\"\"\"\n head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(\n n_classes=3, loss_reduction=losses.Reduction.SUM_BY_NONZERO_WEIGHTS)\n\n logits = np.array(((10, 0, 0), (0, 10, 0),), dtype=np.float32)\n labels = np.array(((1,), (1,)), dtype=np.int64)\n features = {'x': np.array(((42,),), dtype=np.int32)}\n\n # unreduced_loss = cross_entropy(labels, logits) = [10, 0].\n expected_unreduced_loss = [[10.], [0.]]\n # Weights default to 1.\n expected_weights = 1.\n # training_loss = 1 * 10 + 1 * 0 / num_nonzero_weights\n expected_training_loss = 10. / 2.\n training_loss, unreduced_loss, actual_weights, _ = head.create_loss(\n features=features,\n mode=model_fn.ModeKeys.TRAIN,\n logits=logits,\n labels=labels)\n tol = 1e-2\n with self.test_session():\n _initialize_variables(self, monitored_session.Scaffold())\n self.assertAllClose(\n expected_training_loss, training_loss.eval(), rtol=tol, atol=tol)\n self.assertAllClose(\n expected_unreduced_loss, unreduced_loss.eval(), rtol=tol, atol=tol)\n self.assertAllClose(expected_weights, actual_weights)\n\n def test_train_labels_none(self):\n \"\"\"Tests that error is raised when labels is None.\"\"\"\n head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(\n n_classes=3)\n def _no_op_train_fn(loss):\n del loss\n return control_flow_ops.no_op()\n\n with self.assertRaisesRegexp(\n ValueError, r'You must provide a labels Tensor\\. Given: None\\.'):\n head.create_estimator_spec(\n features={'x': np.array(((42,),), dtype=np.int32)},\n mode=model_fn.ModeKeys.TRAIN,\n logits=np.array(((10, 0, 0), (0, 10, 0),), dtype=np.float32),\n labels=None,\n train_op_fn=_no_op_train_fn)\n\n def test_train(self):\n n_classes = 3\n head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(n_classes)\n\n logits = np.array(((10, 0, 0), (0, 10, 0),), dtype=np.float32)\n labels = np.array(((1,), (1,)), dtype=np.int64)\n features = {'x': np.array(((42,),), dtype=np.int32)}\n expected_train_result = 'my_train_op'\n def _train_op_fn(loss):\n return string_ops.string_join(\n [constant_op.constant(expected_train_result),\n string_ops.as_string(loss, precision=2)])\n\n # loss = sum(cross_entropy(labels, logits)) = sum(10, 0) = 10.\n expected_loss = 10.\n spec = head.create_estimator_spec(\n features=features,\n mode=model_fn.ModeKeys.TRAIN,\n logits=logits,\n labels=labels,\n train_op_fn=_train_op_fn)\n\n self.assertIsNotNone(spec.loss)\n self.assertEqual({}, spec.eval_metric_ops)\n self.assertIsNotNone(spec.train_op)\n self.assertIsNone(spec.export_outputs)\n _assert_no_hooks(self, spec)\n\n # Assert predictions, loss, train_op, and summaries.\n tol = 1e-2\n with self.test_session() as sess:\n _initialize_variables(self, spec.scaffold)\n self.assertIsNotNone(spec.scaffold.summary_op)\n loss, train_result, summary_str = sess.run((spec.loss, spec.train_op,\n spec.scaffold.summary_op))\n self.assertAllClose(expected_loss, loss, rtol=tol, atol=tol)\n self.assertEqual(\n six.b('{0:s}{1:.2f}'.format(expected_train_result, expected_loss)),\n train_result)\n _assert_simple_summaries(self, {\n metric_keys.MetricKeys.LOSS: expected_loss,\n metric_keys.MetricKeys.LOSS_MEAN: expected_loss / 2,\n }, summary_str, tol)\n\n def test_train_with_optimizer(self):\n n_classes = 3\n head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(n_classes)\n\n logits = np.array(((10, 0, 0), (0, 10, 0),), dtype=np.float32)\n labels = np.array(((1,), (1,)), dtype=np.int64)\n features = {'x': np.array(((42,),), dtype=np.int32)}\n expected_train_result = 'my_train_op'\n\n class _Optimizer(object):\n\n def minimize(self, loss, global_step):\n del global_step\n return string_ops.string_join(\n [constant_op.constant(expected_train_result),\n string_ops.as_string(loss, precision=2)])\n\n # loss = sum(cross_entropy(labels, logits)) = sum(10, 0) = 10.\n expected_loss = 10.\n spec = head.create_estimator_spec(\n features=features,\n mode=model_fn.ModeKeys.TRAIN,\n logits=logits,\n labels=labels,\n optimizer=_Optimizer())\n\n tol = 1e-2\n with self.test_session() as sess:\n _initialize_variables(self, spec.scaffold)\n loss, train_result = sess.run((spec.loss, spec.train_op))\n self.assertAllClose(expected_loss, loss, rtol=tol, atol=tol)\n self.assertEqual(\n six.b('{0:s}{1:.2f}'.format(expected_train_result, expected_loss)),\n train_result)\n\n def test_train_with_update_ops(self):\n n_classes = 3\n head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(n_classes)\n\n with ops.Graph().as_default():\n w = variables.Variable(1)\n update_op = w.assign_add(1)\n ops.add_to_collection(ops.GraphKeys.UPDATE_OPS, update_op)\n\n t = variables.Variable('')\n expected_train_result = b'my_train_op'\n def _train_op_fn(loss):\n del loss\n return t.assign(expected_train_result)\n\n spec = head.create_estimator_spec(\n features={'x': np.array(((42,),), dtype=np.int32)},\n mode=model_fn.ModeKeys.TRAIN,\n logits=np.array(((10, 0, 0), (0, 10, 0),), dtype=np.float32),\n labels=np.array(((1,), (1,)), dtype=np.int64),\n train_op_fn=_train_op_fn)\n\n with self.test_session() as sess:\n _initialize_variables(self, spec.scaffold)\n sess.run(spec.train_op)\n w_value, t_value = sess.run([w, t])\n self.assertEqual(2, w_value)\n self.assertEqual(expected_train_result, t_value)\n\n def test_train_summaries_with_head_name(self):\n n_classes = 3\n head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(\n n_classes, name='some_multiclass_head')\n\n logits = np.array(((10, 0, 0), (0, 10, 0),), dtype=np.float32)\n labels = np.array(((1,), (1,)), dtype=np.int64)\n # loss = sum(cross_entropy(labels, logits)) = sum(10, 0) = 10.\n expected_loss = 10.\n features = {'x': np.array(((42,),), dtype=np.int32)}\n\n def _train_op_fn(loss):\n del loss\n return control_flow_ops.no_op()\n\n spec = head.create_estimator_spec(\n features=features,\n mode=model_fn.ModeKeys.TRAIN,\n logits=logits,\n labels=labels,\n train_op_fn=_train_op_fn)\n\n # Assert summaries.\n tol = 1e-2\n with self.test_session() as sess:\n _initialize_variables(self, spec.scaffold)\n self.assertIsNotNone(spec.scaffold.summary_op)\n summary_str = sess.run(spec.scaffold.summary_op)\n _assert_simple_summaries(self, {\n '{}/some_multiclass_head'.format(metric_keys.MetricKeys.LOSS):\n expected_loss,\n '{}/some_multiclass_head'.format(metric_keys.MetricKeys.LOSS_MEAN):\n expected_loss / 2,\n }, summary_str, tol)\n\n def test_train_with_regularization_losses(self):\n n_classes = 3\n head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(\n n_classes, loss_reduction=losses.Reduction.SUM_OVER_BATCH_SIZE)\n\n logits = np.array(((10, 0, 0), (0, 10, 0),), dtype=np.float32)\n labels = np.array(((1,), (1,)), dtype=np.int64)\n features = {'x': np.array(((42,),), dtype=np.int32)}\n expected_train_result = 'my_train_op'\n def _train_op_fn(loss):\n return string_ops.string_join(\n [constant_op.constant(expected_train_result),\n string_ops.as_string(loss, precision=2)])\n\n regularization_losses = [1.5, 0.5]\n expected_regularization_loss = 2.\n # unregularized_loss = sum(cross_entropy(labels, logits)) / batch_size\n # = sum(10, 0) / 2 = 5.\n # loss = unregularized_loss + regularization_loss = 7.\n expected_loss = 7.\n spec = head.create_estimator_spec(\n features=features,\n mode=model_fn.ModeKeys.TRAIN,\n logits=logits,\n labels=labels,\n train_op_fn=_train_op_fn,\n regularization_losses=regularization_losses)\n\n # Assert predictions, loss, train_op, and summaries.\n tol = 1e-2\n with self.test_session() as sess:\n _initialize_variables(self, spec.scaffold)\n self.assertIsNotNone(spec.scaffold.summary_op)\n loss, train_result, summary_str = sess.run((spec.loss, spec.train_op,\n spec.scaffold.summary_op))\n self.assertAllClose(expected_loss, loss, rtol=tol, atol=tol)\n self.assertEqual(\n six.b('{0:s}{1:.2f}'.format(expected_train_result, expected_loss)),\n train_result)\n _assert_simple_summaries(self, {\n metric_keys.MetricKeys.LOSS: expected_loss,\n metric_keys.MetricKeys.LOSS_REGULARIZATION: (\n expected_regularization_loss),\n }, summary_str, tol)\n\n def test_train_one_dim_create_loss(self):\n \"\"\"Tests create_loss with 1D labels and weights (shape [batch_size]).\"\"\"\n head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(\n n_classes=3, weight_column='label_weights')\n\n logits = np.array(((10, 0, 0), (0, 10, 0), (0, 0, 10),), dtype=np.float32)\n labels_rank_1 = np.array((1, 2, 2,), dtype=np.int64)\n weights_rank_1 = np.array((1., 2., 3.,), dtype=np.float64)\n features = {\n 'x': np.array(((42,),), dtype=np.float32),\n 'label_weights': weights_rank_1\n }\n\n # unreduced_loss = cross_entropy(labels, logits) = [10, 10, 0].\n expected_unreduced_loss = [[10.], [10.], [0.]]\n # weights are reshaped to [3, 1] to match logits.\n expected_weights = [[1.], [2.], [3.]]\n # training_loss = 1 * 10 + 2 * 10 + 3 * 0 = 30.\n expected_training_loss = 30.\n training_loss, unreduced_loss, actual_weights, _ = head.create_loss(\n features=features,\n mode=model_fn.ModeKeys.TRAIN,\n logits=logits,\n labels=labels_rank_1)\n tol = 1e-2\n with self.test_session():\n _initialize_variables(self, monitored_session.Scaffold())\n self.assertAllClose(\n expected_training_loss, training_loss.eval(), rtol=tol, atol=tol)\n self.assertAllClose(\n expected_unreduced_loss, unreduced_loss.eval(), rtol=tol, atol=tol)\n self.assertAllClose(expected_weights, actual_weights.eval())\n\n def test_train_one_dim(self):\n \"\"\"Tests train with 1D labels and weights (shape [batch_size]).\"\"\"\n head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(\n n_classes=3, weight_column='label_weights')\n\n logits = np.array(((10, 0, 0), (0, 10, 0), (0, 0, 10),), dtype=np.float32)\n labels_rank_1 = np.array((1, 2, 2,), dtype=np.int64)\n weights_rank_1 = np.array((1., 2., 3.,), dtype=np.float64)\n\n self.assertEqual((3,), labels_rank_1.shape)\n self.assertEqual((3,), weights_rank_1.shape)\n\n expected_train_result = 'my_train_op'\n def _train_op_fn(loss):\n return string_ops.string_join(\n [constant_op.constant(expected_train_result),\n string_ops.as_string(loss, precision=2)])\n\n # loss = sum(cross_entropy(labels, logits) * [1, 2, 3])\n # = sum([10, 10, 0] * [1, 2, 3]) = 30\n expected_loss = 30.\n\n features = {\n 'x': np.array(((42,),), dtype=np.float32),\n 'label_weights': weights_rank_1\n }\n spec = head.create_estimator_spec(\n features=features,\n mode=model_fn.ModeKeys.TRAIN,\n logits=logits,\n labels=labels_rank_1,\n train_op_fn=_train_op_fn)\n\n self.assertIsNotNone(spec.loss)\n self.assertEqual({}, spec.eval_metric_ops)\n self.assertIsNotNone(spec.train_op)\n self.assertIsNone(spec.export_outputs)\n _assert_no_hooks(self, spec)\n\n # Assert predictions, loss, train_op, and summaries.\n tol = 1e-2\n with self.test_session() as sess:\n _initialize_variables(self, spec.scaffold)\n self.assertIsNotNone(spec.scaffold.summary_op)\n loss, train_result, summary_str = sess.run((spec.loss, spec.train_op,\n spec.scaffold.summary_op))\n self.assertAllClose(expected_loss, loss, rtol=tol, atol=tol)\n self.assertEqual(\n six.b('{0:s}{1:.2f}'.format(expected_train_result, expected_loss)),\n train_result)\n _assert_simple_summaries(self, {\n metric_keys.MetricKeys.LOSS: expected_loss,\n metric_keys.MetricKeys.LOSS_MEAN: (\n expected_loss / np.sum(weights_rank_1)),\n }, summary_str, tol)\n\n def test_train_with_vocabulary_create_loss(self):\n n_classes = 3\n head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(\n n_classes, label_vocabulary=['aang', 'iroh', 'zuko'])\n\n logits = [[10., 0, 0], [0, 10, 0]]\n labels = [[b'iroh'], [b'iroh']]\n features = {'x': np.array(((42,),), dtype=np.int32)}\n # loss = cross_entropy(labels, logits) = [10, 0].\n expected_training_loss = 10.\n training_loss = head.create_loss(\n features=features,\n mode=model_fn.ModeKeys.TRAIN,\n logits=logits,\n labels=labels)[0]\n with self.test_session():\n _initialize_variables(self, monitored_session.Scaffold())\n self.assertAllClose(\n expected_training_loss, training_loss.eval(), rtol=1e-2, atol=1e-2)\n\n def test_train_with_vocabulary(self):\n n_classes = 3\n head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(\n n_classes, label_vocabulary=['aang', 'iroh', 'zuko'])\n\n logits = [[10., 0, 0], [0, 10, 0]]\n labels = [[b'iroh'], [b'iroh']]\n features = {'x': np.array(((42,),), dtype=np.int32)}\n\n def _train_op_fn(loss):\n del loss\n return control_flow_ops.no_op()\n\n # loss = sum(cross_entropy(labels, logits)) = sum(10, 0) = 10.\n expected_loss = 10.\n spec = head.create_estimator_spec(\n features=features,\n mode=model_fn.ModeKeys.TRAIN,\n logits=logits,\n labels=labels,\n train_op_fn=_train_op_fn)\n\n tol = 1e-2\n with self.test_session() as sess:\n _initialize_variables(self, spec.scaffold)\n loss = sess.run(spec.loss)\n self.assertAllClose(expected_loss, loss, rtol=tol, atol=tol)\n\n def test_weighted_multi_example_train(self):\n n_classes = 3\n head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(\n n_classes, weight_column='label_weights')\n\n # Create estimator spec.\n logits = np.array(((10, 0, 0), (0, 10, 0), (0, 0, 10),), dtype=np.float32)\n labels = np.array(((1,), (2,), (2,)), dtype=np.int64)\n weights_3x1 = np.array(((1.,), (2.,), (3.,)), dtype=np.float64)\n expected_train_result = 'my_train_op'\n # loss = sum(cross_entropy(labels, logits) * [1, 2, 3])\n # = sum([10, 10, 0] * [1, 2, 3]) = 30\n expected_loss = 30.\n\n def _train_op_fn(loss):\n return string_ops.string_join(\n [constant_op.constant(expected_train_result),\n string_ops.as_string(loss, precision=2)])\n\n spec = head.create_estimator_spec(\n features={\n 'x': np.array(((42,),), dtype=np.float32),\n 'label_weights': weights_3x1,\n },\n mode=model_fn.ModeKeys.TRAIN,\n logits=logits,\n labels=labels,\n train_op_fn=_train_op_fn)\n\n self.assertIsNotNone(spec.loss)\n self.assertEqual({}, spec.eval_metric_ops)\n self.assertIsNotNone(spec.train_op)\n self.assertIsNone(spec.export_outputs)\n _assert_no_hooks(self, spec)\n\n # Assert predictions, loss, train_op, and summaries.\n tol = 1e-2\n with self.test_session() as sess:\n _initialize_variables(self, spec.scaffold)\n self.assertIsNotNone(spec.scaffold.summary_op)\n loss, train_result, summary_str = sess.run((spec.loss, spec.train_op,\n spec.scaffold.summary_op))\n self.assertAllClose(expected_loss, loss, rtol=tol, atol=tol)\n self.assertEqual(\n six.b('{0:s}{1:.2f}'.format(expected_train_result, expected_loss)),\n train_result)\n _assert_simple_summaries(self, {\n metric_keys.MetricKeys.LOSS: expected_loss,\n # loss mean = sum(cross_entropy(labels, logits) * [1,2,3]) / (1+2+3)\n # = sum([10, 10, 0] * [1, 2, 3]) / 6 = 30 / 6\n metric_keys.MetricKeys.LOSS_MEAN:\n expected_loss / np.sum(weights_3x1),\n }, summary_str, tol)\n\n def test_multi_dim_weighted_train_create_loss(self):\n \"\"\"Logits of shape [2, 2, 2], labels [2, 2, 1], weights [2, 2].\"\"\"\n head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(\n n_classes=3, weight_column='weights')\n\n logits = np.array([[[10, 0, 0], [12, 0, 0]],\n [[0, 10, 0], [0, 15, 0]]], dtype=np.float32)\n labels = np.array([[[0], [1]], [[1], [2]]], dtype=np.int64)\n weights = np.array([[1., 1.5], [2., 2.5]], dtype=np.float32)\n\n # unreduced_loss = cross_entropy(labels, logits) = [[0, 12], [0, 15]].\n expected_unreduced_loss = [[[0.], [12.]], [[0.], [15.]]]\n # weights are reshaped to [2, 2, 1] to match logits.\n expected_weights = [[[1.], [1.5]], [[2.], [2.5]]]\n # training_loss = 1*0 + 1.5*12 + 2*0 + 2.5*15 = 55.5\n expected_training_loss = 55.5\n training_loss, unreduced_loss, actual_weights, _ = head.create_loss(\n features={'weights': weights},\n mode=model_fn.ModeKeys.TRAIN,\n logits=logits,\n labels=labels)\n tol = 1e-2\n with self.test_session():\n _initialize_variables(self, monitored_session.Scaffold())\n self.assertAllClose(\n expected_training_loss, training_loss.eval(), rtol=tol, atol=tol)\n self.assertAllClose(\n expected_unreduced_loss, unreduced_loss.eval(), rtol=tol, atol=tol)\n self.assertAllClose(expected_weights, actual_weights.eval())\n\n def test_multi_dim_weighted_train(self):\n \"\"\"Logits of shape [2, 2, 2], labels [2, 2, 1], weights [2, 2].\"\"\"\n head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(\n n_classes=3, weight_column='weights')\n\n logits = np.array([[[10, 0, 0], [12, 0, 0]],\n [[0, 10, 0], [0, 15, 0]]], dtype=np.float32)\n labels = np.array([[[0], [1]], [[1], [2]]], dtype=np.int64)\n weights = np.array([[1., 1.5], [2., 2.5]], dtype=np.float32)\n expected_train_result = 'my_train_op'\n def _train_op_fn(loss):\n return string_ops.string_join(\n [constant_op.constant(expected_train_result),\n string_ops.as_string(loss, precision=2)])\n\n # loss = cross_entropy(labels, logits) = [[0, 12], [0, 15]].\n # weighted_sum_loss = 1*0 + 1.5*12 + 2*0 + 2.5*15 = 55.5\n expected_loss = 55.5\n spec = head.create_estimator_spec(\n features={'weights': weights},\n mode=model_fn.ModeKeys.TRAIN,\n logits=logits,\n labels=labels,\n train_op_fn=_train_op_fn)\n\n # Assert predictions, loss, train_op, and summaries.\n tol = 1e-2\n with self.test_session() as sess:\n _initialize_variables(self, spec.scaffold)\n loss, train_result = sess.run((spec.loss, spec.train_op))\n self.assertAllClose(expected_loss, loss, rtol=tol, atol=tol)\n self.assertEqual(\n six.b('{0:s}{1:.2f}'.format(expected_train_result, expected_loss)),\n train_result)\n\n def test_multi_dim_train_weights_wrong_inner_dim(self):\n \"\"\"Logits of shape [2, 2, 2], labels [2, 2, 1], weights [2, 1].\"\"\"\n head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(\n n_classes=3, weight_column='weights')\n logits = np.array([[[10, 0, 0], [12, 0, 0]],\n [[0, 10, 0], [0, 15, 0]]], dtype=np.float32)\n labels = np.array([[[0], [1]], [[1], [2]]], dtype=np.int64)\n weights = np.array([[1.], [2.]], dtype=np.float32)\n def _no_op_train_fn(loss):\n del loss\n return control_flow_ops.no_op()\n\n spec = head.create_estimator_spec(\n features={'weights': weights},\n mode=model_fn.ModeKeys.TRAIN,\n logits=logits,\n labels=labels,\n train_op_fn=_no_op_train_fn)\n with self.test_session():\n _initialize_variables(self, monitored_session.Scaffold())\n with self.assertRaisesRegexp(\n errors.InvalidArgumentError,\n r'\\[logits_shape: \\] \\[2 2 3\\] \\[weights_shape: \\] \\[2 1\\]'):\n spec.loss.eval()\n\n def test_multi_dim_train_weights_wrong_outer_dim(self):\n \"\"\"Logits of shape [2, 2, 2], labels [2, 2, 1], weights [2, 2, 3].\"\"\"\n head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(\n n_classes=3, weight_column='weights')\n logits = np.array([[[10, 0, 0], [12, 0, 0]],\n [[0, 10, 0], [0, 15, 0]]], dtype=np.float32)\n labels = np.array([[[0], [1]], [[1], [2]]], dtype=np.int64)\n weights = np.array([[[1., 1.1, 1.2], [1.5, 1.6, 1.7]],\n [[2., 2.1, 2.2], [2.5, 2.6, 2.7]]])\n weights_placeholder = array_ops.placeholder(dtype=dtypes.float32)\n def _no_op_train_fn(loss):\n del loss\n return control_flow_ops.no_op()\n\n spec = head.create_estimator_spec(\n features={'weights': weights_placeholder},\n mode=model_fn.ModeKeys.TRAIN,\n logits=logits,\n labels=labels,\n train_op_fn=_no_op_train_fn)\n with self.test_session():\n _initialize_variables(self, monitored_session.Scaffold())\n with self.assertRaisesRegexp(\n errors.InvalidArgumentError,\n r'\\[logits_shape: \\]\\s\\[2 2 3\\]\\s\\[weights_shape: \\]\\s\\[2 2 3\\]'):\n spec.loss.eval({weights_placeholder: weights})\n\n def test_multi_dim_weighted_eval(self):\n \"\"\"Logits of shape [2, 2, 2], labels [2, 2, 1], weights [2, 2].\"\"\"\n head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(\n n_classes=3, weight_column='weights')\n logits = np.array([[[10, 0, 0], [12, 0, 0]],\n [[0, 10, 0], [0, 15, 0]]], dtype=np.float32)\n labels = np.array([[[0], [1]], [[1], [2]]], dtype=np.int64)\n weights = np.array([[1., 1.5], [2., 2.5]], dtype=np.float32)\n # loss = cross_entropy(labels, logits) = [[0, 12], [0, 15]].\n # weighted_sum_loss = 1*0 + 1.5*12 + 2*0 + 2.5*15 = 55.5\n expected_loss = 55.5\n # Create estimator spec.\n spec = head.create_estimator_spec(\n features={'weights': weights},\n mode=model_fn.ModeKeys.EVAL,\n logits=logits,\n labels=labels)\n\n keys = metric_keys.MetricKeys\n expected_metrics = {\n keys.LOSS_MEAN: expected_loss / np.sum(weights),\n keys.ACCURACY: (1.*1. + 1.5*0. + 2.*1. + 2.5*0.) / np.sum(weights),\n }\n\n # Assert predictions, loss, and metrics.\n tol = 1e-2\n with self.test_session() as sess:\n _initialize_variables(self, spec.scaffold)\n value_ops = {k: spec.eval_metric_ops[k][0] for k in spec.eval_metric_ops}\n update_ops = {k: spec.eval_metric_ops[k][1] for k in spec.eval_metric_ops}\n loss, metrics = sess.run((spec.loss, update_ops))\n self.assertAllClose(expected_loss, loss, rtol=tol, atol=tol)\n # Check results of both update (in `metrics`) and value ops.\n self.assertAllClose(expected_metrics, metrics, rtol=tol, atol=tol)\n self.assertAllClose(\n expected_metrics, {k: value_ops[k].eval() for k in value_ops},\n rtol=tol, atol=tol)\n\n\nclass BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(test.TestCase):\n\n def setUp(self):\n ops.reset_default_graph()\n\n def test_threshold_too_small(self):\n with self.assertRaisesRegexp(ValueError, r'thresholds not in \\(0, 1\\)'):\n head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(\n thresholds=(0., 0.5))\n\n def test_threshold_too_large(self):\n with self.assertRaisesRegexp(ValueError, r'thresholds not in \\(0, 1\\)'):\n head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(\n thresholds=(0.5, 1.))\n\n def test_invalid_loss_reduction(self):\n with self.assertRaisesRegexp(\n ValueError, r'Invalid loss_reduction: invalid_loss_reduction'):\n head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(\n loss_reduction='invalid_loss_reduction')\n with self.assertRaisesRegexp(\n ValueError, r'Invalid loss_reduction: none'):\n head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(\n loss_reduction=losses.Reduction.NONE)\n\n def test_loss_fn_arg_labels_missing(self):\n def _loss_fn(logits):\n del logits # Unused\n with self.assertRaisesRegexp(\n ValueError,\n r'loss_fn must contain argument: labels\\. '\n r'Given arguments: \\(\\'logits\\',\\)'):\n head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(\n loss_fn=_loss_fn)\n\n def test_loss_fn_arg_logits_missing(self):\n def _loss_fn(labels):\n del labels # unused\n with self.assertRaisesRegexp(\n ValueError,\n r'loss_fn must contain argument: logits\\. '\n r'Given arguments: \\(\\'labels\\',\\)'):\n head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(\n loss_fn=_loss_fn)\n\n def test_loss_fn_arg_features_ok(self):\n def _loss_fn(labels, logits, features):\n del labels, logits, features # Unused\n head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(\n loss_fn=_loss_fn)\n\n def test_loss_fn_arg_invalid(self):\n def _loss_fn(labels, logits, name=None):\n del labels, logits, name # Unused\n with self.assertRaisesRegexp(\n ValueError,\n r'loss_fn has unexpected args: \\[\\'name\\'\\]'):\n head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(\n loss_fn=_loss_fn)\n\n def test_invalid_logits_shape(self):\n head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss()\n self.assertEqual(1, head.logits_dimension)\n\n # Logits should be shape (batch_size, 1).\n logits_2x2 = np.array(((45., 44.), (41., 42.),))\n\n # Static shape.\n with self.assertRaisesRegexp(ValueError, 'logits shape'):\n head.create_estimator_spec(\n features={'x': np.array(((42.,),))},\n mode=model_fn.ModeKeys.PREDICT,\n logits=logits_2x2)\n\n # Dynamic shape.\n logits_placeholder = array_ops.placeholder(dtype=dtypes.float32)\n spec = head.create_estimator_spec(\n features={'x': np.array(((42.,),))},\n mode=model_fn.ModeKeys.PREDICT,\n logits=logits_placeholder)\n with self.test_session():\n with self.assertRaisesRegexp(errors.OpError, 'logits shape'):\n spec.predictions[prediction_keys.PredictionKeys.PROBABILITIES].eval({\n logits_placeholder: logits_2x2\n })\n\n def test_invalid_labels_shape(self):\n head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss()\n self.assertEqual(1, head.logits_dimension)\n\n # Labels and logits should be shape (batch_size, 1).\n labels_2x2 = np.array(((45., 44.), (41., 42.),))\n logits_2x1 = np.array(((45.,), (41.,),))\n\n # Static shape.\n with self.assertRaisesRegexp(ValueError, 'Mismatched label shape'):\n head.create_loss(\n features={'x': np.array(((42.,),))},\n mode=model_fn.ModeKeys.EVAL,\n logits=logits_2x1,\n labels=labels_2x2)\n\n # Dynamic shape.\n labels_placeholder = array_ops.placeholder(dtype=dtypes.float32)\n logits_placeholder = array_ops.placeholder(dtype=dtypes.float32)\n training_loss = head.create_loss(\n features={'x': np.array(((42.,),))},\n mode=model_fn.ModeKeys.EVAL,\n logits=logits_placeholder,\n labels=labels_placeholder)[0]\n with self.test_session():\n with self.assertRaisesRegexp(\n errors.InvalidArgumentError,\n r'\\[expected_labels_shape: \\] \\[2 1\\] \\[labels_shape: \\] \\[2 2\\]'):\n training_loss.eval({\n logits_placeholder: logits_2x1,\n labels_placeholder: labels_2x2\n })\n\n def test_incompatible_labels_shape(self):\n head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss()\n self.assertEqual(1, head.logits_dimension)\n\n # Both logits and labels should be shape (batch_size, 1).\n values_2x1 = np.array(((0.,), (1.,),))\n values_3x1 = np.array(((0.,), (1.,), (0.,),))\n\n # Static shape.\n with self.assertRaisesRegexp(\n ValueError, 'logits and labels must have the same shape'):\n head.create_loss(\n features={'x': values_2x1},\n mode=model_fn.ModeKeys.EVAL,\n logits=values_2x1,\n labels=values_3x1)\n with self.assertRaisesRegexp(\n ValueError, 'logits and labels must have the same shape'):\n head.create_loss(\n features={'x': values_2x1},\n mode=model_fn.ModeKeys.EVAL,\n logits=values_3x1,\n labels=values_2x1)\n\n # Dynamic shape.\n labels_placeholder = array_ops.placeholder(dtype=dtypes.float32)\n logits_placeholder = array_ops.placeholder(dtype=dtypes.float32)\n training_loss = head.create_loss(\n features={'x': values_2x1},\n mode=model_fn.ModeKeys.EVAL,\n logits=logits_placeholder,\n labels=labels_placeholder)[0]\n with self.test_session():\n with self.assertRaisesRegexp(\n errors.InvalidArgumentError,\n r'\\[expected_labels_shape: \\] \\[3 1\\] \\[labels_shape: \\] \\[2 1\\]'):\n training_loss.eval({\n labels_placeholder: values_2x1,\n logits_placeholder: values_3x1\n })\n with self.test_session():\n with self.assertRaisesRegexp(\n errors.InvalidArgumentError,\n r'\\[expected_labels_shape: \\] \\[2 1\\] \\[labels_shape: \\] \\[3 1\\]'):\n training_loss.eval({\n labels_placeholder: values_3x1,\n logits_placeholder: values_2x1\n })\n\n def test_name(self):\n head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(\n name='foo')\n self.assertEqual('foo', head.name)\n\n def test_predict(self):\n head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss()\n self.assertEqual(1, head.logits_dimension)\n\n # Create estimator spec.\n logits = [[0.3], [-0.4]]\n expected_logistics = [[0.574443], [0.401312]]\n expected_probabilities = [[0.425557, 0.574443], [0.598688, 0.401312]]\n expected_class_ids = [[1], [0]]\n expected_classes = [[b'1'], [b'0']]\n expected_export_classes = [[b'0', b'1']] * 2\n spec = head.create_estimator_spec(\n features={'x': np.array(((42,),), dtype=np.int32)},\n mode=model_fn.ModeKeys.PREDICT,\n logits=logits)\n\n # Assert spec contains expected tensors.\n self.assertIsNone(spec.loss)\n self.assertEqual({}, spec.eval_metric_ops)\n self.assertIsNone(spec.train_op)\n self.assertItemsEqual(('classification', 'regression', 'predict',\n _DEFAULT_SERVING_KEY), spec.export_outputs.keys())\n _assert_no_hooks(self, spec)\n\n # Assert predictions.\n with self.test_session() as sess:\n _initialize_variables(self, spec.scaffold)\n self.assertIsNone(spec.scaffold.summary_op)\n predictions = sess.run(spec.predictions)\n self.assertAllClose(logits,\n predictions[prediction_keys.PredictionKeys.LOGITS])\n self.assertAllClose(expected_logistics,\n predictions[prediction_keys.PredictionKeys.LOGISTIC])\n self.assertAllClose(\n expected_probabilities,\n predictions[prediction_keys.PredictionKeys.PROBABILITIES])\n self.assertAllClose(expected_class_ids,\n predictions[prediction_keys.PredictionKeys.CLASS_IDS])\n self.assertAllEqual(expected_classes,\n predictions[prediction_keys.PredictionKeys.CLASSES])\n self.assertAllClose(\n expected_probabilities,\n sess.run(spec.export_outputs[_DEFAULT_SERVING_KEY].scores))\n self.assertAllEqual(\n expected_export_classes,\n sess.run(spec.export_outputs[_DEFAULT_SERVING_KEY].classes))\n self.assertAllClose(expected_logistics,\n sess.run(spec.export_outputs['regression'].value))\n\n def test_predict_with_vocabulary_list(self):\n head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(\n label_vocabulary=['aang', 'iroh'])\n\n logits = [[1.], [0.]]\n expected_classes = [[b'iroh'], [b'aang']]\n\n spec = head.create_estimator_spec(\n features={'x': np.array(((42,),), dtype=np.int32)},\n mode=model_fn.ModeKeys.PREDICT,\n logits=logits)\n\n with self.test_session() as sess:\n _initialize_variables(self, spec.scaffold)\n self.assertAllEqual(\n expected_classes,\n sess.run(spec.predictions[prediction_keys.PredictionKeys.CLASSES]))\n\n def test_eval_create_loss(self):\n head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss()\n logits = np.array(((45,), (-41,),), dtype=np.float32)\n labels = np.array(((1,), (1,),), dtype=np.int32)\n features = {'x': np.array(((42,),), dtype=np.int32)}\n\n # loss = cross_entropy(labels, logits) = [0, 41].\n expected_training_loss = 41.\n # Create loss.\n training_loss = head.create_loss(\n features=features,\n mode=model_fn.ModeKeys.EVAL,\n logits=logits,\n labels=labels)[0]\n with self.test_session():\n _initialize_variables(self, monitored_session.Scaffold())\n self.assertAllClose(\n expected_training_loss, training_loss.eval(), rtol=1e-2, atol=1e-2)\n\n def test_eval_labels_none(self):\n \"\"\"Tests that error is raised when labels is None.\"\"\"\n head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss()\n\n with self.assertRaisesRegexp(\n ValueError, r'You must provide a labels Tensor\\. Given: None\\.'):\n head.create_estimator_spec(\n features={'x': np.array(((42,),), dtype=np.int32)},\n mode=model_fn.ModeKeys.EVAL,\n logits=np.array(((45,), (-41,),), dtype=np.float32),\n labels=None)\n\n def test_eval(self):\n head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss()\n logits = np.array(((45,), (-41,),), dtype=np.float32)\n labels = np.array(((1,), (1,),), dtype=np.int32)\n features = {'x': np.array(((42,),), dtype=np.int32)}\n\n # Create estimator spec.\n spec = head.create_estimator_spec(\n features=features,\n mode=model_fn.ModeKeys.EVAL,\n logits=logits,\n labels=labels)\n\n keys = metric_keys.MetricKeys\n expected_metrics = {\n # loss = sum(cross_entropy(labels, logits)) = sum(0, 41) = 41\n # loss_mean = loss/2 = 41./2 = 20.5\n keys.LOSS_MEAN: 20.5,\n keys.ACCURACY: 1./2,\n keys.PRECISION: 1.,\n keys.RECALL: 1./2,\n keys.PREDICTION_MEAN: 1./2,\n keys.LABEL_MEAN: 2./2,\n keys.ACCURACY_BASELINE: 2./2,\n keys.AUC: 0.,\n keys.AUC_PR: 1.,\n }\n\n # Assert spec contains expected tensors.\n self.assertIsNotNone(spec.loss)\n self.assertItemsEqual(expected_metrics.keys(), spec.eval_metric_ops.keys())\n self.assertIsNone(spec.train_op)\n self.assertIsNone(spec.export_outputs)\n _assert_no_hooks(self, spec)\n\n # Assert predictions, loss, and metrics.\n with self.test_session() as sess:\n _initialize_variables(self, spec.scaffold)\n self.assertIsNone(spec.scaffold.summary_op)\n value_ops = {k: spec.eval_metric_ops[k][0] for k in spec.eval_metric_ops}\n update_ops = {k: spec.eval_metric_ops[k][1] for k in spec.eval_metric_ops}\n loss, metrics = sess.run((spec.loss, update_ops))\n self.assertAllClose(41., loss)\n # Check results of both update (in `metrics`) and value ops.\n self.assertAllClose(expected_metrics, metrics)\n self.assertAllClose(\n expected_metrics, {k: value_ops[k].eval() for k in value_ops})\n\n def test_eval_metric_ops_with_head_name(self):\n head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(\n name='some_binary_head')\n logits = np.array(((45,), (-41,),), dtype=np.float32)\n labels = np.array(((1,), (1,),), dtype=np.int32)\n features = {'x': np.array(((42,),), dtype=np.int32)}\n # Create estimator spec.\n spec = head.create_estimator_spec(\n features=features,\n mode=model_fn.ModeKeys.EVAL,\n logits=logits,\n labels=labels)\n\n expected_metric_keys = [\n '{}/some_binary_head'.format(metric_keys.MetricKeys.LOSS_MEAN),\n '{}/some_binary_head'.format(metric_keys.MetricKeys.ACCURACY),\n '{}/some_binary_head'.format(metric_keys.MetricKeys.PRECISION),\n '{}/some_binary_head'.format(metric_keys.MetricKeys.RECALL),\n '{}/some_binary_head'.format(metric_keys.MetricKeys.PREDICTION_MEAN),\n '{}/some_binary_head'.format(metric_keys.MetricKeys.LABEL_MEAN),\n '{}/some_binary_head'.format(metric_keys.MetricKeys.ACCURACY_BASELINE),\n '{}/some_binary_head'.format(metric_keys.MetricKeys.AUC),\n '{}/some_binary_head'.format(metric_keys.MetricKeys.AUC_PR),\n ]\n self.assertItemsEqual(expected_metric_keys, spec.eval_metric_ops.keys())\n\n def test_eval_with_regularization_losses(self):\n head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(\n loss_reduction=losses.Reduction.SUM_OVER_BATCH_SIZE)\n logits = np.array(((45,), (-41,),), dtype=np.float32)\n labels = np.array(((1,), (1,),), dtype=np.int32)\n features = {'x': np.array(((42,),), dtype=np.int32)}\n regularization_losses = [1.5, 0.5]\n expected_regularization_loss = 2.\n # unregularized_loss = sum(cross_entropy(labels, logits)) / batch_size\n # = sum(0, 41) / 2 = 20.5\n expected_unregularized_loss = 20.5\n expected_regularized_loss = (\n expected_unregularized_loss + expected_regularization_loss)\n\n # Create estimator spec.\n spec = head.create_estimator_spec(\n features=features,\n mode=model_fn.ModeKeys.EVAL,\n logits=logits,\n labels=labels,\n regularization_losses=regularization_losses)\n\n keys = metric_keys.MetricKeys\n expected_metrics = {\n keys.LOSS_MEAN: expected_unregularized_loss,\n keys.LOSS_REGULARIZATION: expected_regularization_loss,\n keys.ACCURACY: 1./2,\n keys.PRECISION: 1.,\n keys.RECALL: 1./2,\n keys.PREDICTION_MEAN: 1./2,\n keys.LABEL_MEAN: 2./2,\n keys.ACCURACY_BASELINE: 2./2,\n keys.AUC: 0.,\n keys.AUC_PR: 1.,\n }\n\n # Assert predictions, loss, and metrics.\n with self.test_session() as sess:\n _initialize_variables(self, spec.scaffold)\n self.assertIsNone(spec.scaffold.summary_op)\n value_ops = {k: spec.eval_metric_ops[k][0] for k in spec.eval_metric_ops}\n update_ops = {k: spec.eval_metric_ops[k][1] for k in spec.eval_metric_ops}\n loss, metrics = sess.run((spec.loss, update_ops))\n self.assertAllClose(expected_regularized_loss, loss)\n # Check results of both update (in `metrics`) and value ops.\n self.assertAllClose(expected_metrics, metrics)\n self.assertAllClose(\n expected_metrics, {k: value_ops[k].eval() for k in value_ops})\n\n def test_eval_with_vocabulary_list_create_loss(self):\n head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(\n label_vocabulary=['aang', 'iroh'])\n logits = np.array(((45,), (-41,),), dtype=np.float32)\n labels = [[b'iroh'], [b'iroh']]\n features = {'x': np.array(((42,),), dtype=np.int32)}\n # Create loss.\n training_loss = head.create_loss(\n features=features,\n mode=model_fn.ModeKeys.EVAL,\n logits=logits,\n labels=labels)[0]\n with self.test_session():\n _initialize_variables(self, monitored_session.Scaffold())\n self.assertAllClose(41., training_loss.eval())\n\n def test_eval_with_vocabulary_list(self):\n head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(\n label_vocabulary=['aang', 'iroh'])\n logits = np.array(((45,), (-41,),), dtype=np.float32)\n labels = [[b'iroh'], [b'iroh']]\n features = {'x': np.array(((42,),), dtype=np.int32)}\n # Create estimator spec.\n spec = head.create_estimator_spec(\n features=features,\n mode=model_fn.ModeKeys.EVAL,\n logits=logits,\n labels=labels)\n\n with self.test_session() as sess:\n _initialize_variables(self, spec.scaffold)\n self.assertIsNone(spec.scaffold.summary_op)\n value_ops = {k: spec.eval_metric_ops[k][0] for k in spec.eval_metric_ops}\n update_ops = {k: spec.eval_metric_ops[k][1] for k in spec.eval_metric_ops}\n sess.run(update_ops)\n self.assertAllClose(1. / 2,\n value_ops[metric_keys.MetricKeys.ACCURACY].eval())\n\n def test_eval_with_thresholds_create_loss(self):\n thresholds = [0.25, 0.5, 0.75]\n head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(\n thresholds=thresholds)\n logits = np.array(((-1,), (1,),), dtype=np.float32)\n labels = np.array(((1,), (1,),), dtype=np.int32)\n features = {'x': np.array(((42,),), dtype=np.int32)}\n # probabilities[i] = 1/(1 + exp(-logits[i])) =>\n # probabilities = [1/(1 + exp(1)), 1/(1 + exp(-1))] = [0.269, 0.731]\n # loss = -ln(probabilities[label[i]])) = [-ln(0.269), -ln(0.731)]\n # = [1.31304389, 0.31334182]\n # weighted sum loss = 1.62638571\n expected_training_loss = 1.62638571\n # Create loss.\n training_loss = head.create_loss(\n features=features,\n mode=model_fn.ModeKeys.EVAL,\n logits=logits,\n labels=labels)[0]\n with self.test_session():\n _initialize_variables(self, monitored_session.Scaffold())\n self.assertAllClose(\n expected_training_loss, training_loss.eval(), rtol=1e-2, atol=1e-2)\n\n def test_eval_with_thresholds(self):\n thresholds = [0.25, 0.5, 0.75]\n head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(\n thresholds=thresholds)\n logits = np.array(((-1,), (1,),), dtype=np.float32)\n labels = np.array(((1,), (1,),), dtype=np.int32)\n features = {'x': np.array(((42,),), dtype=np.int32)}\n\n # Create estimator spec.\n spec = head.create_estimator_spec(\n features=features,\n mode=model_fn.ModeKeys.EVAL,\n logits=logits,\n labels=labels)\n\n # probabilities[i] = 1/(1 + exp(-logits[i])) =>\n # probabilities = [1/(1 + exp(1)), 1/(1 + exp(-1))] = [0.269, 0.731]\n # loss = -sum(ln(probabilities[label[i]])) = -ln(0.269) -ln(0.731)\n # = 1.62652338\n keys = metric_keys.MetricKeys\n expected_metrics = {\n keys.LOSS_MEAN: 1.62652338 / 2.,\n keys.ACCURACY: 1./2,\n keys.PRECISION: 1.,\n keys.RECALL: .5,\n keys.PREDICTION_MEAN: 1./2,\n keys.LABEL_MEAN: 2./2,\n keys.ACCURACY_BASELINE: 2./2,\n keys.AUC: 0.,\n keys.AUC_PR: 1.,\n keys.ACCURACY_AT_THRESHOLD % thresholds[0]: 1.,\n keys.PRECISION_AT_THRESHOLD % thresholds[0]: 1.,\n keys.RECALL_AT_THRESHOLD % thresholds[0]: 1.,\n keys.ACCURACY_AT_THRESHOLD % thresholds[1]: .5,\n keys.PRECISION_AT_THRESHOLD % thresholds[1]: 1.,\n keys.RECALL_AT_THRESHOLD % thresholds[1]: .5,\n keys.ACCURACY_AT_THRESHOLD % thresholds[2]: 0.,\n keys.PRECISION_AT_THRESHOLD % thresholds[2]: 0.,\n keys.RECALL_AT_THRESHOLD % thresholds[2]: 0.,\n }\n self.assertItemsEqual(expected_metrics.keys(), spec.eval_metric_ops.keys())\n tol = 1e-2\n with self.test_session() as sess:\n _initialize_variables(self, spec.scaffold)\n self.assertIsNone(spec.scaffold.summary_op)\n value_ops = {k: spec.eval_metric_ops[k][0] for k in spec.eval_metric_ops}\n update_ops = {k: spec.eval_metric_ops[k][1] for k in spec.eval_metric_ops}\n loss, metrics = sess.run((spec.loss, update_ops))\n self.assertAllClose(1.62652338, loss)\n # Check results of both update (in `metrics`) and value ops.\n self.assertAllClose(expected_metrics, metrics, rtol=tol, atol=tol)\n self.assertAllClose(\n expected_metrics, {k: value_ops[k].eval()\n for k in value_ops},\n atol=tol,\n rtol=tol)\n\n def test_train_create_loss(self):\n head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss()\n\n logits = np.array(((45,), (-41,),), dtype=np.float32)\n labels = np.array(((1,), (1,),), dtype=np.float64)\n features = {'x': np.array(((42,),), dtype=np.float32)}\n # unreduced_loss = cross_entropy(labels, logits) = [0, 41]\n expected_unreduced_loss = [[0.], [41.]]\n # weights default to 1.\n expected_weights = 1.\n # training loss = 1 * 0 + 1 * 41\n expected_training_loss = 41.\n # Create loss.\n training_loss, unreduced_loss, actual_weights, _ = head.create_loss(\n features=features,\n mode=model_fn.ModeKeys.TRAIN,\n logits=logits,\n labels=labels)\n with self.test_session():\n _initialize_variables(self, monitored_session.Scaffold())\n self.assertAllClose(expected_training_loss, training_loss.eval())\n self.assertAllClose(expected_unreduced_loss, unreduced_loss.eval())\n self.assertAllClose(expected_weights, actual_weights)\n\n def test_train_create_loss_loss_reduction(self):\n \"\"\"Tests create_loss with loss_reduction.\"\"\"\n head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(\n loss_reduction=losses.Reduction.SUM_BY_NONZERO_WEIGHTS)\n\n logits = np.array(((45,), (-41,),), dtype=np.float32)\n labels = np.array(((1,), (1,),), dtype=np.float64)\n features = {'x': np.array(((42,),), dtype=np.float32)}\n # unreduced_loss = cross_entropy(labels, logits) = [0, 41]\n expected_unreduced_loss = [[0.], [41.]]\n # weights default to 1.\n expected_weights = 1.\n # training loss = (1 * 0 + 1 * 41) / num_nonzero_weights\n expected_training_loss = 41. / 2.\n # Create loss.\n training_loss, unreduced_loss, actual_weights, _ = head.create_loss(\n features=features,\n mode=model_fn.ModeKeys.TRAIN,\n logits=logits,\n labels=labels)\n with self.test_session():\n _initialize_variables(self, monitored_session.Scaffold())\n self.assertAllClose(expected_training_loss, training_loss.eval())\n self.assertAllClose(expected_unreduced_loss, unreduced_loss.eval())\n self.assertAllClose(expected_weights, actual_weights)\n\n def test_eval_create_loss_loss_fn(self):\n \"\"\"Tests head.create_loss for eval mode and custom loss_fn.\"\"\"\n loss = np.array([[1.], [2.]], dtype=np.float32)\n logits_input = np.array([[-10.], [10.]], dtype=np.float32)\n labels_input = np.array([[1], [0]], dtype=np.int64)\n def _loss_fn(labels, logits):\n check_labels = control_flow_ops.Assert(\n math_ops.reduce_all(math_ops.equal(labels, labels_input)),\n data=[labels])\n check_logits = control_flow_ops.Assert(\n math_ops.reduce_all(math_ops.equal(logits, logits_input)),\n data=[logits])\n with ops.control_dependencies([check_labels, check_logits]):\n return constant_op.constant(loss)\n head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(\n loss_fn=_loss_fn)\n\n actual_training_loss = head.create_loss(\n features={'x': np.array(((42,),), dtype=np.int32)},\n mode=model_fn.ModeKeys.EVAL,\n logits=logits_input,\n labels=labels_input)[0]\n with self.test_session():\n _initialize_variables(self, monitored_session.Scaffold())\n self.assertAllClose(np.sum(loss), actual_training_loss.eval())\n\n def test_eval_create_loss_loss_fn_wrong_shape(self):\n \"\"\"Tests custom loss_fn that returns Tensor of unexpected shape.\"\"\"\n loss = np.array([1., 2.], dtype=np.float32)\n def _loss_fn(labels, logits):\n del labels, logits # Unused\n return constant_op.constant(loss)\n head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(\n loss_fn=_loss_fn)\n\n logits = np.array([[-10.], [10.]], dtype=np.float32)\n labels = np.array([[1], [0]], dtype=np.int64)\n actual_training_loss = head.create_loss(\n features={'x': np.array(((42,),), dtype=np.int32)},\n mode=model_fn.ModeKeys.EVAL,\n logits=logits,\n labels=labels)[0]\n with self.test_session():\n _initialize_variables(self, monitored_session.Scaffold())\n with self.assertRaisesRegexp(\n errors.InvalidArgumentError,\n r'\\[loss_fn must return Tensor of shape \\[D0, D1, ... DN, 1\\]\\. \\] '\n r'\\[logits_shape: \\] \\[2 1\\] \\[loss_shape: \\] \\[2\\]'):\n actual_training_loss.eval()\n\n def test_train_labels_none(self):\n \"\"\"Tests that error is raised when labels is None.\"\"\"\n head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss()\n def _no_op_train_fn(loss):\n del loss\n return control_flow_ops.no_op()\n\n with self.assertRaisesRegexp(\n ValueError, r'You must provide a labels Tensor\\. Given: None\\.'):\n head.create_estimator_spec(\n features={'x': np.array(((42,),), dtype=np.int32)},\n mode=model_fn.ModeKeys.TRAIN,\n logits=np.array(((45,), (-41,),), dtype=np.float32),\n labels=None,\n train_op_fn=_no_op_train_fn)\n\n def test_train(self):\n head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss()\n\n logits = np.array(((45,), (-41,),), dtype=np.float32)\n labels = np.array(((1,), (1,),), dtype=np.float64)\n expected_train_result = b'my_train_op'\n features = {'x': np.array(((42,),), dtype=np.float32)}\n # loss = sum(cross_entropy(labels, logits)) = sum(0, 41) = 41\n expected_loss = 41.\n def _train_op_fn(loss):\n with ops.control_dependencies((check_ops.assert_equal(\n math_ops.to_float(expected_loss), math_ops.to_float(loss),\n name='assert_loss'),)):\n return constant_op.constant(expected_train_result)\n\n # Create estimator spec.\n spec = head.create_estimator_spec(\n features=features,\n mode=model_fn.ModeKeys.TRAIN,\n logits=logits,\n labels=labels,\n train_op_fn=_train_op_fn)\n\n # Assert spec contains expected tensors.\n self.assertIsNotNone(spec.loss)\n self.assertEqual({}, spec.eval_metric_ops)\n self.assertIsNotNone(spec.train_op)\n self.assertIsNone(spec.export_outputs)\n _assert_no_hooks(self, spec)\n\n # Assert predictions, loss, train_op, and summaries.\n with self.test_session() as sess:\n _initialize_variables(self, spec.scaffold)\n self.assertIsNotNone(spec.scaffold.summary_op)\n loss, train_result, summary_str = sess.run((spec.loss, spec.train_op,\n spec.scaffold.summary_op))\n self.assertAllClose(expected_loss, loss)\n self.assertEqual(expected_train_result, train_result)\n _assert_simple_summaries(self, {\n metric_keys.MetricKeys.LOSS: expected_loss,\n # loss_mean = loss/2 = 41/2 = 20.5\n metric_keys.MetricKeys.LOSS_MEAN: 20.5,\n }, summary_str)\n\n def test_train_with_optimizer(self):\n head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss()\n\n logits = np.array(((45,), (-41,),), dtype=np.float32)\n labels = np.array(((1,), (1,),), dtype=np.float64)\n expected_train_result = b'my_train_op'\n features = {'x': np.array(((42,),), dtype=np.float32)}\n # loss = sum(cross_entropy(labels, logits)) = sum(0, 41) = 41\n expected_loss = 41.\n\n class _Optimizer(object):\n\n def minimize(self, loss, global_step):\n del global_step\n with ops.control_dependencies((check_ops.assert_equal(\n math_ops.to_float(expected_loss), math_ops.to_float(loss),\n name='assert_loss'),)):\n return constant_op.constant(expected_train_result)\n\n # Create estimator spec.\n spec = head.create_estimator_spec(\n features=features,\n mode=model_fn.ModeKeys.TRAIN,\n logits=logits,\n labels=labels,\n optimizer=_Optimizer())\n\n with self.test_session() as sess:\n _initialize_variables(self, spec.scaffold)\n loss, train_result = sess.run((spec.loss, spec.train_op))\n self.assertAllClose(expected_loss, loss)\n self.assertEqual(expected_train_result, train_result)\n\n def test_train_with_update_ops(self):\n head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss()\n\n with ops.Graph().as_default():\n w = variables.Variable(1)\n update_op = w.assign_add(1)\n ops.add_to_collection(ops.GraphKeys.UPDATE_OPS, update_op)\n\n t = variables.Variable('')\n expected_train_result = b'my_train_op'\n def _train_op_fn(loss):\n del loss\n return t.assign(expected_train_result)\n\n spec = head.create_estimator_spec(\n features={'x': np.array(((42,),), dtype=np.int32)},\n mode=model_fn.ModeKeys.TRAIN,\n logits=np.array(((45,), (-41,),), dtype=np.float32),\n labels=np.array(((1,), (1,),), dtype=np.float64),\n train_op_fn=_train_op_fn)\n\n with self.test_session() as sess:\n _initialize_variables(self, spec.scaffold)\n sess.run(spec.train_op)\n w_value, t_value = sess.run([w, t])\n self.assertEqual(2, w_value)\n self.assertEqual(expected_train_result, t_value)\n\n def test_train_summaries_with_head_name(self):\n head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(\n name='some_binary_head')\n\n logits = np.array(((45,), (-41,),), dtype=np.float32)\n labels = np.array(((1,), (1,),), dtype=np.float64)\n features = {'x': np.array(((42,),), dtype=np.float32)}\n # loss = sum(cross_entropy(labels, logits)) = sum(0, 41) = 41\n expected_loss = 41.\n\n def _train_op_fn(loss):\n del loss\n return control_flow_ops.no_op()\n\n # Create estimator spec.\n spec = head.create_estimator_spec(\n features=features,\n mode=model_fn.ModeKeys.TRAIN,\n logits=logits,\n labels=labels,\n train_op_fn=_train_op_fn)\n # Assert summaries.\n with self.test_session() as sess:\n _initialize_variables(self, spec.scaffold)\n self.assertIsNotNone(spec.scaffold.summary_op)\n summary_str = sess.run(spec.scaffold.summary_op)\n _assert_simple_summaries(\n self,\n {\n '{}/some_binary_head'.format(metric_keys.MetricKeys.LOSS):\n expected_loss,\n # loss_mean = loss/2 = 41/2 = 20.5\n '{}/some_binary_head'.format(metric_keys.MetricKeys.LOSS_MEAN):\n 20.5,\n },\n summary_str)\n\n def test_train_with_regularization_losses(self):\n head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(\n loss_reduction=losses.Reduction.SUM_OVER_BATCH_SIZE)\n\n logits = np.array(((45,), (-41,),), dtype=np.float32)\n labels = np.array(((1,), (1,),), dtype=np.float64)\n expected_train_result = b'my_train_op'\n features = {'x': np.array(((42,),), dtype=np.float32)}\n regularization_losses = [1.5, 0.5]\n expected_regularization_loss = 2.\n # unregularized_loss = sum(cross_entropy(labels, logits)) / batch_size\n # = sum(0, 41) / 2 = 20.5\n # loss = unregularized_loss + regularization_loss = 7.\n expected_loss = 22.5\n def _train_op_fn(loss):\n with ops.control_dependencies((check_ops.assert_equal(\n math_ops.to_float(expected_loss), math_ops.to_float(loss),\n name='assert_loss'),)):\n return constant_op.constant(expected_train_result)\n\n # Create estimator spec.\n spec = head.create_estimator_spec(\n features=features,\n mode=model_fn.ModeKeys.TRAIN,\n logits=logits,\n labels=labels,\n train_op_fn=_train_op_fn,\n regularization_losses=regularization_losses)\n\n # Assert predictions, loss, train_op, and summaries.\n with self.test_session() as sess:\n _initialize_variables(self, spec.scaffold)\n self.assertIsNotNone(spec.scaffold.summary_op)\n loss, train_result, summary_str = sess.run((spec.loss, spec.train_op,\n spec.scaffold.summary_op))\n self.assertAllClose(expected_loss, loss)\n self.assertEqual(expected_train_result, train_result)\n _assert_simple_summaries(self, {\n metric_keys.MetricKeys.LOSS: expected_loss,\n metric_keys.MetricKeys.LOSS_REGULARIZATION: (\n expected_regularization_loss),\n }, summary_str)\n\n def test_float_labels_invalid_values(self):\n head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss()\n\n logits = np.array([[0.5], [-0.3]], dtype=np.float32)\n labels = np.array([[1.2], [0.4]], dtype=np.float32)\n features = {'x': np.array([[42]], dtype=np.float32)}\n training_loss = head.create_loss(\n features=features,\n mode=model_fn.ModeKeys.TRAIN,\n logits=logits,\n labels=labels)[0]\n with self.assertRaisesRegexp(\n errors.InvalidArgumentError,\n r'Labels must <= n_classes - 1'):\n with self.test_session():\n _initialize_variables(self, monitored_session.Scaffold())\n training_loss.eval()\n\n def test_float_labels_train_create_loss(self):\n head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss()\n\n logits = np.array([[0.5], [-0.3]], dtype=np.float32)\n labels = np.array([[0.8], [0.4]], dtype=np.float32)\n features = {'x': np.array([[42]], dtype=np.float32)}\n # loss = cross_entropy(labels, logits)\n # = -label[i]*sigmoid(logit[i]) -(1-label[i])*sigmoid(-logit[i])\n # = [-0.8 * log(sigmoid(0.5)) -0.2 * log(sigmoid(-0.5)),\n # -0.4 * log(sigmoid(-0.3)) -0.6 * log(sigmoid(0.3))]\n # = [0.57407698418, 0.67435524446]\n # weighted sum loss = 0.57407698418 + 0.67435524446\n expected_training_loss = 1.24843222864\n # Create loss.\n training_loss = head.create_loss(\n features=features,\n mode=model_fn.ModeKeys.TRAIN,\n logits=logits,\n labels=labels)[0]\n with self.test_session():\n _initialize_variables(self, monitored_session.Scaffold())\n self.assertAllClose(\n expected_training_loss, training_loss.eval(), rtol=1e-2, atol=1e-2)\n\n def test_float_labels_train(self):\n head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss()\n\n logits = np.array([[0.5], [-0.3]], dtype=np.float32)\n labels = np.array([[0.8], [0.4]], dtype=np.float32)\n expected_train_result = b'my_train_op'\n features = {'x': np.array([[42]], dtype=np.float32)}\n # loss = sum(cross_entropy(labels, logits))\n # = sum(-label[i]*sigmoid(logit[i]) -(1-label[i])*sigmoid(-logit[i]))\n # = -0.8 * log(sigmoid(0.5)) -0.2 * log(sigmoid(-0.5))\n # -0.4 * log(sigmoid(-0.3)) -0.6 * log(sigmoid(0.3))\n # = 1.2484322\n expected_loss = 1.2484322\n def _train_op_fn(loss):\n with ops.control_dependencies((dnn_testing_utils.assert_close(\n math_ops.to_float(expected_loss), math_ops.to_float(loss)),)):\n return constant_op.constant(expected_train_result)\n\n # Create estimator spec.\n spec = head.create_estimator_spec(\n features=features,\n mode=model_fn.ModeKeys.TRAIN,\n logits=logits,\n labels=labels,\n train_op_fn=_train_op_fn)\n\n # Assert predictions, loss, train_op, and summaries.\n with self.test_session() as sess:\n _initialize_variables(self, spec.scaffold)\n loss, train_result = sess.run((spec.loss, spec.train_op))\n self.assertAlmostEqual(expected_loss, loss, delta=1.e-5)\n self.assertEqual(expected_train_result, train_result)\n\n def test_float_labels_eval_create_loss(self):\n head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss()\n\n logits = np.array([[0.5], [-0.3]], dtype=np.float32)\n labels = np.array([[0.8], [0.4]], dtype=np.float32)\n features = {'x': np.array([[42]], dtype=np.float32)}\n # loss = cross_entropy(labels, logits)\n # = -label[i]*sigmoid(logit[i]) -(1-label[i])*sigmoid(-logit[i])\n # = [-0.8 * log(sigmoid(0.5)) -0.2 * log(sigmoid(-0.5)),\n # -0.4 * log(sigmoid(-0.3)) -0.6 * log(sigmoid(0.3))]\n # = [0.57407698418, 0.67435524446]\n # weighted sum loss = 0.57407698418 + 0.67435524446\n expected_training_loss = 1.24843222864\n # Create loss.\n training_loss = head.create_loss(\n features=features,\n mode=model_fn.ModeKeys.EVAL,\n logits=logits,\n labels=labels)[0]\n with self.test_session():\n _initialize_variables(self, monitored_session.Scaffold())\n self.assertAllClose(\n expected_training_loss, training_loss.eval(), rtol=1e-2, atol=1e-2)\n\n def test_float_labels_eval(self):\n head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss()\n\n logits = np.array([[0.5], [-0.3]], dtype=np.float32)\n labels = np.array([[0.8], [0.4]], dtype=np.float32)\n features = {'x': np.array([[42]], dtype=np.float32)}\n # Create estimator spec.\n spec = head.create_estimator_spec(\n features=features,\n mode=model_fn.ModeKeys.EVAL,\n logits=logits,\n labels=labels)\n\n # loss = sum(cross_entropy(labels, logits))\n # = sum(-label[i]*sigmoid(logit[i]) -(1-label[i])*sigmoid(-logit[i]))\n # = -0.8 * log(sigmoid(0.5)) -0.2 * log(sigmoid(-0.5))\n # -0.4 * log(sigmoid(-0.3)) -0.6 * log(sigmoid(0.3))\n # = 1.2484322\n expected_loss = 1.2484322\n\n # Assert loss.\n with self.test_session() as sess:\n _initialize_variables(self, spec.scaffold)\n self.assertIsNone(spec.scaffold.summary_op)\n update_ops = {k: spec.eval_metric_ops[k][1] for k in spec.eval_metric_ops}\n loss, metrics = sess.run((spec.loss, update_ops))\n self.assertAlmostEqual(expected_loss, loss, delta=1.e-5)\n self.assertAlmostEqual(\n expected_loss / 2., metrics[metric_keys.MetricKeys.LOSS_MEAN])\n\n def test_weighted_multi_example_predict(self):\n \"\"\"3 examples, 1 batch.\"\"\"\n head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(\n weight_column='label_weights')\n\n # Create estimator spec.\n logits = np.array(((45,), (-41,), (44,)), dtype=np.int32)\n spec = head.create_estimator_spec(\n features={\n 'x': np.array(((42,), (43,), (44,)), dtype=np.int32),\n 'label_weights': np.array(((1.,), (.1,), (1.5,)), dtype=np.float32),\n },\n mode=model_fn.ModeKeys.PREDICT,\n logits=logits)\n\n # Assert predictions, loss, and metrics.\n with self.test_session() as sess:\n _initialize_variables(self, spec.scaffold)\n predictions = sess.run(spec.predictions)\n self.assertAllClose(\n logits.astype(np.float32),\n predictions[prediction_keys.PredictionKeys.LOGITS])\n self.assertAllClose(\n _sigmoid(logits).astype(np.float32),\n predictions[prediction_keys.PredictionKeys.LOGISTIC])\n self.assertAllClose(\n [[0., 1.], [1., 0.],\n [0., 1.]], predictions[prediction_keys.PredictionKeys.PROBABILITIES])\n self.assertAllClose([[1], [0], [1]],\n predictions[prediction_keys.PredictionKeys.CLASS_IDS])\n self.assertAllEqual([[b'1'], [b'0'], [b'1']],\n predictions[prediction_keys.PredictionKeys.CLASSES])\n\n def test_weighted_multi_example_eval(self):\n \"\"\"3 examples, 1 batch.\"\"\"\n head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(\n weight_column='label_weights')\n\n # Create estimator spec.\n logits = np.array(((45,), (-41,), (44,)), dtype=np.int32)\n spec = head.create_estimator_spec(\n features={\n 'x': np.array(((42,), (43,), (44,)), dtype=np.int32),\n 'label_weights': np.array(((1.,), (.1,), (1.5,)), dtype=np.float32),\n },\n mode=model_fn.ModeKeys.EVAL,\n logits=logits,\n labels=np.array(((1,), (1,), (0,)), dtype=np.int32))\n\n # label_mean = (1*1 + .1*1 + 1.5*0)/(1 + .1 + 1.5) = 1.1/2.6\n # = .42307692307\n expected_label_mean = .42307692307\n keys = metric_keys.MetricKeys\n expected_metrics = {\n # losses = label_weights*cross_entropy(labels, logits)\n # = (1*0 + .1*41 + 1.5*44) = (1, 4.1, 66)\n # loss = sum(losses) = 1 + 4.1 + 66 = 70.1\n # loss_mean = loss/sum(label_weights) = 70.1/(1 + .1 + 1.5)\n # = 70.1/2.6 = 26.9615384615\n keys.LOSS_MEAN: 26.9615384615,\n # accuracy = (1*1 + .1*0 + 1.5*0)/(1 + .1 + 1.5) = 1/2.6 = .38461538461\n keys.ACCURACY: .38461538461,\n keys.PRECISION: 1./2.5,\n keys.RECALL: 1./1.1,\n # prediction_mean = (1*1 + .1*0 + 1.5*1)/(1 + .1 + 1.5) = 2.5/2.6\n # = .96153846153\n keys.PREDICTION_MEAN: .96153846153,\n keys.LABEL_MEAN: expected_label_mean,\n keys.ACCURACY_BASELINE: 1 - expected_label_mean,\n keys.AUC: .45454565,\n keys.AUC_PR: .6737757325172424,\n }\n\n # Assert spec contains expected tensors.\n self.assertIsNotNone(spec.loss)\n self.assertItemsEqual(expected_metrics.keys(), spec.eval_metric_ops.keys())\n\n # Assert predictions, loss, and metrics.\n with self.test_session() as sess:\n _initialize_variables(self, spec.scaffold)\n value_ops = {k: spec.eval_metric_ops[k][0] for k in spec.eval_metric_ops}\n update_ops = {k: spec.eval_metric_ops[k][1] for k in spec.eval_metric_ops}\n loss, metrics = sess.run((spec.loss, update_ops))\n self.assertAllClose(70.1, loss)\n # Check results of both update (in `metrics`) and value ops.\n self.assertAllClose(expected_metrics, metrics)\n self.assertAllClose(\n expected_metrics, {k: value_ops[k].eval() for k in value_ops})\n\n def test_train_one_dim_create_loss(self):\n \"\"\"Tests create_loss with 1D labels and weights (shape [batch_size]).\"\"\"\n head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(\n weight_column='label_weights')\n\n # Create estimator spec.\n logits = np.array(((45,), (-41,), (44,)), dtype=np.float32)\n labels_rank_1 = np.array((1., 1., 0.,))\n weights_rank_1 = np.array(((1., .1, 1.5,)), dtype=np.float64)\n features = {\n 'x': np.array(((42.,), (43.,), (44.,)), dtype=np.float32),\n 'label_weights': weights_rank_1,\n }\n # unreduced_loss = cross_entropy(labels, logits) = [0, 41, 44]\n expected_unreduced_loss = [[0.], [41.], [44.]]\n # weights are reshaped to [3, 1] to match logits.\n expected_weights = [[1.], [.1], [1.5]]\n # training loss = 1 * 0 + .1 * 41 + 1.5 * 44\n expected_training_loss = 70.1\n # Create loss.\n training_loss, unreduced_loss, actual_weights, _ = head.create_loss(\n features=features,\n mode=model_fn.ModeKeys.TRAIN,\n logits=logits,\n labels=labels_rank_1)\n with self.test_session():\n _initialize_variables(self, monitored_session.Scaffold())\n self.assertAllClose(\n expected_training_loss, training_loss.eval(),\n rtol=1e-2, atol=1e-2)\n self.assertAllClose(\n expected_unreduced_loss, unreduced_loss.eval(),\n rtol=1e-2, atol=1e-2)\n self.assertAllClose(expected_weights, actual_weights.eval())\n\n def test_train_one_dim(self):\n \"\"\"Tests train with 1D labels and weights (shape [batch_size]).\"\"\"\n head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(\n weight_column='label_weights')\n\n # Create estimator spec.\n logits = np.array(((45,), (-41,), (44,)), dtype=np.float32)\n labels_rank_1 = np.array((1., 1., 0.,))\n weights_rank_1 = np.array(((1., .1, 1.5,)), dtype=np.float64)\n self.assertEqual((3,), labels_rank_1.shape)\n self.assertEqual((3,), weights_rank_1.shape)\n features = {\n 'x': np.array(((42.,), (43.,), (44.,)), dtype=np.float32),\n 'label_weights': weights_rank_1,\n }\n expected_train_result = b'my_train_op'\n # losses = label_weights*cross_entropy(labels, logits)\n # = (1*0 + .1*41 + 1.5*44) = (1, 4.1, 66)\n # loss = sum(losses) = 1 + 4.1 + 66 = 70.1\n expected_loss = 70.1\n def _train_op_fn(loss):\n with ops.control_dependencies((check_ops.assert_equal(\n math_ops.to_float(expected_loss), math_ops.to_float(loss),\n name='assert_loss'),)):\n return constant_op.constant(expected_train_result)\n\n spec = head.create_estimator_spec(\n features=features,\n mode=model_fn.ModeKeys.TRAIN,\n logits=logits,\n labels=labels_rank_1,\n train_op_fn=_train_op_fn)\n\n # Assert spec contains expected tensors.\n self.assertIsNotNone(spec.loss)\n self.assertIsNotNone(spec.train_op)\n\n # Assert predictions, loss, and metrics.\n with self.test_session() as sess:\n _initialize_variables(self, spec.scaffold)\n self.assertIsNotNone(spec.scaffold.summary_op)\n loss, train_result, summary_str = sess.run((\n spec.loss, spec.train_op, spec.scaffold.summary_op))\n self.assertAllClose(expected_loss, loss)\n self.assertEqual(expected_train_result, train_result)\n _assert_simple_summaries(self, {\n metric_keys.MetricKeys.LOSS: expected_loss,\n # loss_mean = loss/sum(label_weights) = 70.1/(1 + .1 + 1.5)\n # = 70.1/2.6 = 26.9615384615\n metric_keys.MetricKeys.LOSS_MEAN: 26.9615384615,\n }, summary_str)\n\n def test_weighted_multi_example_train(self):\n \"\"\"3 examples, 1 batch.\"\"\"\n head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(\n weight_column='label_weights')\n\n # Create estimator spec.\n logits = np.array(((45,), (-41,), (44,)), dtype=np.float32)\n expected_train_result = b'my_train_op'\n # losses = label_weights*cross_entropy(labels, logits)\n # = (1*0 + .1*41 + 1.5*44) = (1, 4.1, 66)\n # loss = sum(losses) = 1 + 4.1 + 66 = 70.1\n expected_loss = 70.1\n def _train_op_fn(loss):\n with ops.control_dependencies((check_ops.assert_equal(\n math_ops.to_float(expected_loss), math_ops.to_float(loss),\n name='assert_loss'),)):\n return constant_op.constant(expected_train_result)\n spec = head.create_estimator_spec(\n features={\n 'x': np.array(((42.,), (43.,), (44.,)), dtype=np.float32),\n 'label_weights': np.array(((1.,), (.1,), (1.5,)), dtype=np.float64),\n },\n mode=model_fn.ModeKeys.TRAIN,\n logits=logits,\n labels=np.array(((1.,), (1.,), (0.,))),\n train_op_fn=_train_op_fn)\n\n # Assert spec contains expected tensors.\n self.assertIsNotNone(spec.loss)\n self.assertIsNotNone(spec.train_op)\n\n # Assert predictions, loss, and metrics.\n with self.test_session() as sess:\n _initialize_variables(self, spec.scaffold)\n self.assertIsNotNone(spec.scaffold.summary_op)\n loss, train_result, summary_str = sess.run((\n spec.loss, spec.train_op, spec.scaffold.summary_op))\n self.assertAllClose(expected_loss, loss)\n self.assertEqual(expected_train_result, train_result)\n _assert_simple_summaries(self, {\n metric_keys.MetricKeys.LOSS: expected_loss,\n # loss_mean = loss/sum(label_weights) = 70.1/(1 + .1 + 1.5)\n # = 70.1/2.6 = 26.9615384615\n metric_keys.MetricKeys.LOSS_MEAN: 26.9615384615,\n }, summary_str)\n\n def test_multi_dim_weighted_train_create_loss(self):\n \"\"\"Logits and labels of shape [2, 2, 1], weights [2, 2].\"\"\"\n head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(\n weight_column='weights')\n\n logits = np.array([[[10], [-10]], [[12], [-12]]], dtype=np.float32)\n labels = np.array([[[0], [0]], [[1], [1]]], dtype=np.float64)\n weights = np.array([[1., 1.5], [2., 2.5]], dtype=np.float32)\n # unreduced_loss = cross_entropy(labels, logits) = [[10, 0], [0, 12]].\n expected_unreduced_loss = [[[10.], [0.]], [[0.], [12.]]]\n # Weights are reshaped to [2, 2, 1] to match logits.\n expected_weights = [[[1.], [1.5]], [[2.], [2.5]]]\n # training_loss = 1*10 + 1.5*0 + 2*0 + 2.5*12 = 40\n expected_training_loss = 40.\n # Create loss.\n training_loss, unreduced_loss, actual_weights, _ = head.create_loss(\n features={'weights': weights},\n mode=model_fn.ModeKeys.TRAIN,\n logits=logits,\n labels=labels)\n tol = 1e-2\n with self.test_session():\n _initialize_variables(self, monitored_session.Scaffold())\n self.assertAllClose(\n expected_training_loss, training_loss.eval(),\n rtol=tol, atol=tol)\n self.assertAllClose(\n expected_unreduced_loss, unreduced_loss.eval(),\n rtol=tol, atol=tol)\n self.assertAllClose(expected_weights, actual_weights.eval())\n\n def test_multi_dim_weighted_train(self):\n \"\"\"Logits and labels of shape [2, 2, 1], weights [2, 2].\"\"\"\n head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(\n weight_column='weights')\n\n logits = np.array([[[10], [-10]], [[12], [-12]]], dtype=np.float32)\n labels = np.array([[[0], [0]], [[1], [1]]], dtype=np.float64)\n weights = np.array([[1., 1.5], [2., 2.5]], dtype=np.float32)\n # loss = cross_entropy(labels, logits) = [[10, 0], [0, 12]].\n # weighted_sum_loss = 1*10 + 1.5*0 + 2*0 + 2.5*12 = 40\n expected_loss = 40.\n expected_train_result = 'my_train_op'\n def _train_op_fn(loss):\n return string_ops.string_join(\n [constant_op.constant(expected_train_result),\n string_ops.as_string(loss, precision=2)])\n\n # Create estimator spec.\n spec = head.create_estimator_spec(\n features={'weights': weights},\n mode=model_fn.ModeKeys.TRAIN,\n logits=logits,\n labels=labels,\n train_op_fn=_train_op_fn)\n\n # Assert predictions, loss, train_op, and summaries.\n tol = 1e-2\n with self.test_session() as sess:\n _initialize_variables(self, spec.scaffold)\n loss, train_result = sess.run((spec.loss, spec.train_op))\n self.assertAllClose(expected_loss, loss, rtol=tol, atol=tol)\n self.assertEqual(\n six.b('{0:s}{1:.2f}'.format(expected_train_result, expected_loss)),\n train_result)\n\n def test_multi_dim_train_weights_wrong_inner_dim(self):\n \"\"\"Logits and labels of shape [2, 2, 1], weights [2, 1].\"\"\"\n head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(\n weight_column='weights')\n\n logits = np.array([[[10], [-10]], [[12], [-12]]], dtype=np.float32)\n labels = np.array([[[0], [0]], [[1], [1]]], dtype=np.float64)\n weights = np.array([[1.], [2.]], dtype=np.float32)\n def _no_op_train_fn(loss):\n del loss\n return control_flow_ops.no_op()\n\n spec = head.create_estimator_spec(\n features={'weights': weights},\n mode=model_fn.ModeKeys.TRAIN,\n logits=logits,\n labels=labels,\n train_op_fn=_no_op_train_fn)\n with self.test_session():\n _initialize_variables(self, monitored_session.Scaffold())\n with self.assertRaisesRegexp(\n errors.InvalidArgumentError,\n r'\\[logits_shape: \\] \\[2 2 1\\] \\[weights_shape: \\] \\[2 1\\]'):\n spec.loss.eval()\n\n def test_multi_dim_train_weights_wrong_outer_dim(self):\n \"\"\"Logits and labels of shape [2, 2, 1], weights [2, 2, 2].\"\"\"\n head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(\n weight_column='weights')\n\n logits = np.array([[[10], [-10]], [[12], [-12]]], dtype=np.float32)\n labels = np.array([[[0], [0]], [[1], [1]]], dtype=np.float64)\n weights_placeholder = array_ops.placeholder(dtype=dtypes.float32)\n def _no_op_train_fn(loss):\n del loss\n return control_flow_ops.no_op()\n\n spec = head.create_estimator_spec(\n features={'weights': weights_placeholder},\n mode=model_fn.ModeKeys.TRAIN,\n logits=logits,\n labels=labels,\n train_op_fn=_no_op_train_fn)\n with self.test_session():\n _initialize_variables(self, monitored_session.Scaffold())\n with self.assertRaisesRegexp(\n errors.InvalidArgumentError,\n r'\\[logits_shape: \\]\\s\\[2 2 1\\]\\s\\[weights_shape: \\]\\s\\[2 2 2\\]'):\n spec.loss.eval({\n weights_placeholder: np.array([[[1., 1.1], [1.5, 1.6]],\n [[2., 2.1], [2.5, 2.6]]])})\n\n def test_multi_dim_weighted_eval(self):\n \"\"\"Logits and labels of shape [2, 2, 1], weights [2, 2].\"\"\"\n head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(\n weight_column='weights')\n\n logits = np.array([[[10], [-10]], [[12], [-12]]], dtype=np.float32)\n labels = np.array([[[0], [0]], [[1], [1]]], dtype=np.float64)\n weights = np.array([[1., 1.5], [2., 2.5]], dtype=np.float32)\n # loss = cross_entropy(labels, logits) = [[10, 0], [0, 12]].\n # weighted_sum_loss = 1*10 + 1.5*0 + 2*0 + 2.5*12 = 40\n expected_loss = 40.\n\n # Create estimator spec.\n spec = head.create_estimator_spec(\n features={'weights': weights},\n mode=model_fn.ModeKeys.EVAL,\n logits=logits,\n labels=labels)\n\n keys = metric_keys.MetricKeys\n expected_metrics = {\n keys.LOSS_MEAN: expected_loss / np.sum(weights),\n keys.ACCURACY: (1.*0. + 1.5*1. + 2.*1. + 2.5*0.) / np.sum(weights),\n keys.PRECISION: 2.0/3.0,\n keys.RECALL: 2.0/4.5,\n keys.PREDICTION_MEAN: (1.*1 + 1.5*0 + 2.*1 + 2.5*0) / np.sum(weights),\n keys.LABEL_MEAN: (1.*0 + 1.5*0 + 2.*1 + 2.5*1) / np.sum(weights),\n keys.ACCURACY_BASELINE: (1.*0 + 1.5*0 + 2.*1 + 2.5*1) / np.sum(weights),\n # We cannot reliably calculate AUC with only 4 data points, but the\n # values should not change because of backwards-compatibility.\n keys.AUC: 0.5222,\n keys.AUC_PR: 0.7341,\n }\n\n tol = 1e-2\n with self.test_session() as sess:\n _initialize_variables(self, spec.scaffold)\n value_ops = {k: spec.eval_metric_ops[k][0] for k in spec.eval_metric_ops}\n update_ops = {k: spec.eval_metric_ops[k][1] for k in spec.eval_metric_ops}\n loss, metrics = sess.run((spec.loss, update_ops))\n self.assertAllClose(expected_loss, loss, rtol=tol, atol=tol)\n # Check results of both update (in `metrics`) and value ops.\n self.assertAllClose(expected_metrics, metrics, rtol=tol, atol=tol)\n self.assertAllClose(\n expected_metrics, {k: value_ops[k].eval() for k in value_ops},\n rtol=tol, atol=tol)\n\n\nclass RegressionHead(test.TestCase):\n\n def setUp(self):\n ops.reset_default_graph()\n\n def test_invalid_label_dimension(self):\n with self.assertRaisesRegexp(ValueError, r'Invalid label_dimension'):\n head_lib._regression_head(label_dimension=-1)\n with self.assertRaisesRegexp(ValueError, r'Invalid label_dimension'):\n head_lib._regression_head(label_dimension=0)\n\n def test_invalid_loss_reduction(self):\n with self.assertRaisesRegexp(\n ValueError, r'Invalid loss_reduction: invalid_loss_reduction'):\n head_lib._regression_head(loss_reduction='invalid_loss_reduction')\n with self.assertRaisesRegexp(\n ValueError, r'Invalid loss_reduction: none'):\n head_lib._regression_head(loss_reduction=losses.Reduction.NONE)\n\n def test_loss_fn_arg_labels_missing(self):\n def _loss_fn(logits):\n del logits # Unused\n with self.assertRaisesRegexp(\n ValueError,\n r'loss_fn must contain argument: labels\\. '\n r'Given arguments: \\(\\'logits\\',\\)'):\n head_lib._regression_head(loss_fn=_loss_fn)\n\n def test_loss_fn_arg_logits_missing(self):\n def _loss_fn(labels):\n del labels # unused\n with self.assertRaisesRegexp(\n ValueError,\n r'loss_fn must contain argument: logits\\. '\n r'Given arguments: \\(\\'labels\\',\\)'):\n head_lib._regression_head(loss_fn=_loss_fn)\n\n def test_loss_fn_arg_features_ok(self):\n def _loss_fn(labels, logits, features):\n del labels, logits, features # Unused\n head_lib._regression_head(loss_fn=_loss_fn)\n\n def test_loss_fn_arg_invalid(self):\n def _loss_fn(labels, logits, name=None):\n del labels, logits, name # Unused\n with self.assertRaisesRegexp(\n ValueError,\n r'loss_fn has unexpected args: \\[\\'name\\'\\]'):\n head_lib._regression_head(loss_fn=_loss_fn)\n\n def test_invalid_logits(self):\n head = head_lib._regression_head(label_dimension=3)\n self.assertEqual(3, head.logits_dimension)\n logits_1d = np.array(((45.,), (41.,),))\n\n # Static shape.\n with self.assertRaisesRegexp(ValueError, 'logits shape'):\n head.create_estimator_spec(\n features={'x': np.array(((42.,),))},\n mode=model_fn.ModeKeys.PREDICT,\n logits=logits_1d)\n\n # Dynamic shape.\n logits_placeholder = array_ops.placeholder(dtype=dtypes.float32)\n spec = head.create_estimator_spec(\n features={'x': np.array(((42.,),))},\n mode=model_fn.ModeKeys.PREDICT,\n logits=logits_placeholder)\n with self.test_session():\n with self.assertRaisesRegexp(errors.OpError, 'logits shape'):\n spec.predictions[prediction_keys.PredictionKeys.PREDICTIONS].eval({\n logits_placeholder: logits_1d\n })\n\n def test_incompatible_labels_eval(self):\n head = head_lib._regression_head(label_dimension=3)\n self.assertEqual(3, head.logits_dimension)\n values_3d = np.array(((45., 46., 47.), (41., 42., 43.),))\n values_1d = np.array(((43.,), (44.,),))\n\n # Static shape.\n with self.assertRaisesRegexp(ValueError, 'Mismatched label shape'):\n head.create_loss(\n features={'x': values_1d},\n mode=model_fn.ModeKeys.EVAL,\n logits=values_3d,\n labels=values_1d)\n with self.assertRaisesRegexp(ValueError, 'logits shape'):\n head.create_estimator_spec(\n features={'x': values_3d}, labels=values_3d,\n mode=model_fn.ModeKeys.EVAL, logits=values_1d, train_op_fn=None)\n\n # Dynamic shape.\n labels_placeholder = array_ops.placeholder(dtype=dtypes.float32)\n logits_placeholder = array_ops.placeholder(dtype=dtypes.float32)\n spec = head.create_estimator_spec(\n features={'x': values_1d},\n mode=model_fn.ModeKeys.EVAL,\n logits=logits_placeholder,\n labels=labels_placeholder)\n with self.test_session():\n with self.assertRaisesRegexp(errors.OpError, 'logits shape'):\n spec.loss.eval({\n labels_placeholder: values_3d,\n logits_placeholder: values_1d\n })\n training_loss = head.create_loss(\n features={'x': values_1d},\n mode=model_fn.ModeKeys.EVAL,\n logits=logits_placeholder,\n labels=labels_placeholder)[0]\n with self.test_session():\n with self.assertRaisesRegexp(\n errors.InvalidArgumentError,\n r'\\[expected_labels_shape: \\] \\[2 3\\] \\[labels_shape: \\] \\[2 1\\]'):\n training_loss.eval({\n labels_placeholder: values_1d,\n logits_placeholder: values_3d\n })\n\n def test_incompatible_labels_train(self):\n head = head_lib._regression_head(label_dimension=3)\n self.assertEqual(3, head.logits_dimension)\n values_3d = np.array(((45., 46., 47.), (41., 42., 43.),))\n values_1d = np.array(((43.,), (44.,),))\n\n # Static shape.\n with self.assertRaisesRegexp(ValueError, 'Mismatched label shape'):\n head.create_loss(\n features={'x': values_1d},\n mode=model_fn.ModeKeys.TRAIN,\n logits=values_3d,\n labels=values_1d)\n\n with self.assertRaisesRegexp(ValueError, 'logits shape'):\n head.create_estimator_spec(\n features={'x': values_3d},\n mode=model_fn.ModeKeys.TRAIN,\n logits=values_1d,\n labels=values_3d,\n train_op_fn=lambda x: x)\n\n # Dynamic shape.\n labels_placeholder = array_ops.placeholder(dtype=dtypes.float32)\n logits_placeholder = array_ops.placeholder(dtype=dtypes.float32)\n spec = head.create_estimator_spec(\n features={'x': values_1d},\n mode=model_fn.ModeKeys.TRAIN,\n logits=logits_placeholder,\n labels=labels_placeholder,\n train_op_fn=lambda x: x)\n with self.test_session():\n with self.assertRaisesRegexp(errors.OpError, 'logits shape'):\n spec.loss.eval({\n labels_placeholder: values_3d,\n logits_placeholder: values_1d\n })\n training_loss = head.create_loss(\n features={'x': values_1d},\n mode=model_fn.ModeKeys.TRAIN,\n logits=logits_placeholder,\n labels=labels_placeholder)[0]\n with self.test_session():\n with self.assertRaisesRegexp(\n errors.InvalidArgumentError,\n r'\\[expected_labels_shape: \\] \\[2 3\\] \\[labels_shape: \\] \\[2 1\\]'):\n training_loss.eval({\n labels_placeholder: values_1d,\n logits_placeholder: values_3d\n })\n\n def test_name(self):\n head = head_lib._regression_head(name='foo')\n self.assertEqual('foo', head.name)\n\n def test_predict(self):\n head = head_lib._regression_head()\n self.assertEqual(1, head.logits_dimension)\n\n # Create estimator spec.\n logits = np.array(((45,), (41,),), dtype=np.int32)\n spec = head.create_estimator_spec(\n features={'x': np.array(((42.,),), dtype=np.int32)},\n mode=model_fn.ModeKeys.PREDICT,\n logits=logits)\n\n # Assert spec contains expected tensors.\n prediction_key = prediction_keys.PredictionKeys.PREDICTIONS\n self.assertItemsEqual((prediction_key,), spec.predictions.keys())\n self.assertEqual(dtypes.float32, spec.predictions[prediction_key].dtype)\n self.assertIsNone(spec.loss)\n self.assertEqual({}, spec.eval_metric_ops)\n self.assertIsNone(spec.train_op)\n default_serving_key = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY\n self.assertItemsEqual(\n (default_serving_key, 'predict', 'regression'),\n spec.export_outputs.keys())\n _assert_no_hooks(self, spec)\n\n # Assert predictions.\n with self.test_session():\n _initialize_variables(self, spec.scaffold)\n self.assertAllClose(logits, spec.predictions[prediction_key].eval())\n self.assertAllClose(\n logits, spec.export_outputs[default_serving_key].value.eval())\n self.assertAllClose(\n logits, spec.export_outputs['regression'].value.eval())\n self.assertAllClose(\n logits, spec.export_outputs['predict'].outputs['predictions'].eval())\n\n def test_predict_with_inverse_link_fn(self):\n def _inverse_link_fn(logits):\n return logits - 10.\n head = head_lib._regression_head(inverse_link_fn=_inverse_link_fn)\n\n # Create estimator spec.\n logits = np.array(((45,), (41,),), dtype=np.int32)\n expected_predictions = np.array(((35,), (31,),), dtype=np.int32)\n spec = head.create_estimator_spec(\n features={'x': np.array(((42.,),), dtype=np.int32)},\n mode=model_fn.ModeKeys.PREDICT,\n logits=logits)\n\n # Assert spec contains expected tensors.\n keys = prediction_keys.PredictionKeys\n self.assertItemsEqual(\n (keys.PREDICTIONS, keys.LOGITS), spec.predictions.keys())\n self.assertEqual(dtypes.float32, spec.predictions[keys.PREDICTIONS].dtype)\n self.assertEqual(dtypes.float32, spec.predictions[keys.LOGITS].dtype)\n default_serving_key = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY\n self.assertItemsEqual(\n (default_serving_key, 'predict', 'regression'),\n spec.export_outputs.keys())\n\n # Assert predictions.\n with self.test_session():\n _initialize_variables(self, spec.scaffold)\n self.assertAllClose(\n expected_predictions, spec.predictions[keys.PREDICTIONS].eval())\n self.assertAllClose(logits, spec.predictions[keys.LOGITS].eval())\n self.assertAllClose(\n expected_predictions,\n spec.export_outputs[default_serving_key].value.eval())\n self.assertAllClose(\n expected_predictions, spec.export_outputs['regression'].value.eval())\n self.assertAllClose(\n expected_predictions,\n spec.export_outputs['predict'].outputs['predictions'].eval())\n self.assertAllClose(\n logits, spec.export_outputs['predict'].outputs['logits'].eval())\n\n def test_eval_create_loss(self):\n head = head_lib._regression_head()\n logits = np.array(((45,), (41,),), dtype=np.float32)\n labels = np.array(((43,), (44,),), dtype=np.int32)\n features = {'x': np.array(((42,),), dtype=np.float32)}\n # Create loss.\n training_loss = head.create_loss(\n features=features,\n mode=model_fn.ModeKeys.EVAL,\n logits=logits,\n labels=labels)[0]\n with self.test_session():\n _initialize_variables(self, monitored_session.Scaffold())\n # loss = [(43-45)^2, (44-41)] = [4, 9]\n self.assertAllClose(13., training_loss.eval())\n\n def test_eval_create_loss_loss_fn(self):\n \"\"\"Tests head.create_loss for eval mode and custom loss_fn.\"\"\"\n loss = np.array([[0., 1.], [2., 3.]], dtype=np.float32)\n logits_input = np.array([[-1., 1.], [-2., 2.]], dtype=np.float32)\n labels_input = np.array([[1., 0.], [2., -1.]], dtype=np.float32)\n def _loss_fn(labels, logits):\n check_labels = control_flow_ops.Assert(\n math_ops.reduce_all(math_ops.equal(labels, labels_input)),\n data=[labels])\n check_logits = control_flow_ops.Assert(\n math_ops.reduce_all(math_ops.equal(logits, logits_input)),\n data=[logits])\n with ops.control_dependencies([check_labels, check_logits]):\n return constant_op.constant(loss)\n head = head_lib._regression_head(label_dimension=2, loss_fn=_loss_fn)\n\n actual_training_loss = head.create_loss(\n features={'x': np.array(((42,),), dtype=np.int32)},\n mode=model_fn.ModeKeys.EVAL,\n logits=logits_input,\n labels=labels_input)[0]\n with self.test_session():\n _initialize_variables(self, monitored_session.Scaffold())\n self.assertAllClose(np.sum(loss), actual_training_loss.eval())\n\n def test_eval_create_loss_loss_fn_wrong_shape(self):\n \"\"\"Tests custom loss_fn that returns Tensor of unexpected shape.\"\"\"\n loss = np.array([[1.], [2.]], dtype=np.float32)\n def _loss_fn(labels, logits):\n del labels, logits # Unused\n return constant_op.constant(loss)\n head = head_lib._regression_head(label_dimension=2, loss_fn=_loss_fn)\n\n logits = np.array([[-1., 1.], [-2., 2.]], dtype=np.float32)\n labels = np.array([[1., 0.], [2., -1.]], dtype=np.float32)\n actual_training_loss = head.create_loss(\n features={'x': np.array(((42,),), dtype=np.int32)},\n mode=model_fn.ModeKeys.EVAL,\n logits=logits,\n labels=labels)[0]\n with self.test_session():\n _initialize_variables(self, monitored_session.Scaffold())\n with self.assertRaisesRegexp(\n errors.InvalidArgumentError,\n r'\\[loss_fn must return Tensor of shape \\[D0, D1, ... DN, 2\\]\\. \\] '\n r'\\[logits_shape: \\] \\[2 2\\] \\[loss_shape: \\] \\[2 1\\]'):\n actual_training_loss.eval()\n\n def test_eval_labels_none(self):\n \"\"\"Tests that error is raised when labels is None.\"\"\"\n head = head_lib._regression_head()\n\n with self.assertRaisesRegexp(\n ValueError, r'You must provide a labels Tensor\\. Given: None\\.'):\n head.create_estimator_spec(\n features={'x': np.array(((42,),), dtype=np.int32)},\n mode=model_fn.ModeKeys.EVAL,\n logits=np.array(((45,), (41,),), dtype=np.float32),\n labels=None)\n\n def test_eval(self):\n head = head_lib._regression_head()\n self.assertEqual(1, head.logits_dimension)\n\n logits = np.array(((45,), (41,),), dtype=np.float32)\n labels = np.array(((43,), (44,),), dtype=np.int32)\n features = {'x': np.array(((42,),), dtype=np.float32)}\n # Create estimator spec.\n spec = head.create_estimator_spec(\n features=features,\n mode=model_fn.ModeKeys.EVAL,\n logits=logits,\n labels=labels)\n\n # Assert spec contains expected tensors.\n prediction_key = prediction_keys.PredictionKeys.PREDICTIONS\n self.assertItemsEqual((prediction_key,), spec.predictions.keys())\n self.assertEqual(dtypes.float32, spec.predictions[prediction_key].dtype)\n self.assertEqual(dtypes.float32, spec.loss.dtype)\n self.assertItemsEqual(\n (metric_keys.MetricKeys.LOSS_MEAN,), spec.eval_metric_ops.keys())\n self.assertIsNone(spec.train_op)\n self.assertIsNone(spec.export_outputs)\n _assert_no_hooks(self, spec)\n\n # Assert predictions, loss, and metrics.\n with self.test_session() as sess:\n _initialize_variables(self, spec.scaffold)\n self.assertIsNone(spec.scaffold.summary_op)\n loss_mean_value_op, loss_mean_update_op = spec.eval_metric_ops[\n metric_keys.MetricKeys.LOSS_MEAN]\n predictions, loss, loss_mean = sess.run((\n spec.predictions[prediction_key], spec.loss, loss_mean_update_op))\n self.assertAllClose(logits, predictions)\n # loss = (43-45)^2 + (44-41)^2 = 4+9 = 13\n self.assertAllClose(13., loss)\n # loss_mean = loss/2 = 13/2 = 6.5\n expected_loss_mean = 6.5\n # Check results of both update (in `loss_mean`) and value ops.\n self.assertAllClose(expected_loss_mean, loss_mean)\n self.assertAllClose(expected_loss_mean, loss_mean_value_op.eval())\n\n def test_eval_metric_ops_with_head_name_for_regression(self):\n head = head_lib._regression_head(name='some_regression_head')\n logits = np.array(((1,), (9,)), dtype=np.float32)\n labels = np.array(((1,), (1,)), dtype=np.int64)\n features = {'x': np.array(((42,),), dtype=np.int32)}\n # Create estimator spec.\n spec = head.create_estimator_spec(\n features=features,\n mode=model_fn.ModeKeys.EVAL,\n logits=logits,\n labels=labels)\n\n expected_metric_keys = [\n '{}/some_regression_head'.format(metric_keys.MetricKeys.LOSS_MEAN),\n ]\n self.assertItemsEqual(expected_metric_keys, spec.eval_metric_ops.keys())\n\n def test_eval_with_regularization_losses(self):\n head = head_lib._regression_head(\n loss_reduction=losses.Reduction.SUM_OVER_BATCH_SIZE)\n self.assertEqual(1, head.logits_dimension)\n\n logits = np.array(((45,), (41,),), dtype=np.float32)\n labels = np.array(((43,), (44,),), dtype=np.int32)\n features = {'x': np.array(((42,),), dtype=np.float32)}\n regularization_losses = [1.5, 0.5]\n expected_regularization_loss = 2.\n # unregularized_loss = ((43-45)^2 + (44-41)^2) / batch_size\n # = (4 + 9) / 2 = 6.5\n expected_unregularized_loss = 6.5\n expected_regularized_loss = (\n expected_unregularized_loss + expected_regularization_loss)\n # Create estimator spec.\n spec = head.create_estimator_spec(\n features=features,\n mode=model_fn.ModeKeys.EVAL,\n logits=logits,\n labels=labels,\n regularization_losses=regularization_losses)\n\n keys = metric_keys.MetricKeys\n expected_metrics = {\n keys.LOSS_MEAN: expected_unregularized_loss,\n keys.LOSS_REGULARIZATION: expected_regularization_loss,\n }\n\n # Assert predictions, loss, and metrics.\n with self.test_session() as sess:\n _initialize_variables(self, spec.scaffold)\n self.assertIsNone(spec.scaffold.summary_op)\n value_ops = {k: spec.eval_metric_ops[k][0] for k in spec.eval_metric_ops}\n update_ops = {k: spec.eval_metric_ops[k][1] for k in spec.eval_metric_ops}\n prediction_key = prediction_keys.PredictionKeys.PREDICTIONS\n predictions, loss, metrics = sess.run((\n spec.predictions[prediction_key], spec.loss, update_ops))\n self.assertAllClose(logits, predictions)\n self.assertAllClose(expected_regularized_loss, loss)\n # Check results of both update (in `metrics`) and value ops.\n self.assertAllClose(expected_metrics, metrics)\n self.assertAllClose(\n expected_metrics, {k: value_ops[k].eval() for k in value_ops})\n\n def test_train_create_loss(self):\n head = head_lib._regression_head()\n logits = np.array(((45,), (41,),), dtype=np.float32)\n labels = np.array(((43,), (44,),), dtype=np.int32)\n features = {'x': np.array(((42,),), dtype=np.float32)}\n # unreduced_loss = [(43-45)^2, (44-41)] = [4, 9]\n expected_unreduced_loss = [[4.], [9.]]\n # weights default to 1.\n expected_weights = 1\n # training_loss = 1 * 4 + 1 * 9 = 13\n expected_training_loss = 13.\n # Create loss.\n training_loss, unreduced_loss, actual_weights, _ = head.create_loss(\n features=features,\n mode=model_fn.ModeKeys.TRAIN,\n logits=logits,\n labels=labels)\n with self.test_session():\n _initialize_variables(self, monitored_session.Scaffold())\n self.assertAllClose(expected_training_loss, training_loss.eval())\n self.assertAllClose(expected_unreduced_loss, unreduced_loss.eval())\n self.assertAllClose(expected_weights, actual_weights)\n\n def test_train_create_loss_loss_reduction(self):\n \"\"\"Tests create_loss with loss_reduction.\"\"\"\n head = head_lib._regression_head(\n loss_reduction=losses.Reduction.SUM_BY_NONZERO_WEIGHTS)\n logits = np.array(((45,), (41,),), dtype=np.float32)\n labels = np.array(((43,), (44,),), dtype=np.int32)\n features = {'x': np.array(((42,),), dtype=np.float32)}\n # unreduced_loss = [(43-45)^2, (44-41)] = [4, 9]\n expected_unreduced_loss = [[4.], [9.]]\n # weights default to 1.\n expected_weights = 1\n # training_loss = (1 * 4 + 1 * 9) / num_nonzero_weights\n expected_training_loss = 13. / 2.\n # Create loss.\n training_loss, unreduced_loss, actual_weights, _ = head.create_loss(\n features=features,\n mode=model_fn.ModeKeys.TRAIN,\n logits=logits,\n labels=labels)\n with self.test_session():\n _initialize_variables(self, monitored_session.Scaffold())\n self.assertAllClose(expected_training_loss, training_loss.eval())\n self.assertAllClose(expected_unreduced_loss, unreduced_loss.eval())\n self.assertAllClose(expected_weights, actual_weights)\n\n def test_train_labels_none(self):\n \"\"\"Tests that error is raised when labels is None.\"\"\"\n head = head_lib._regression_head()\n def _no_op_train_fn(loss):\n del loss\n return control_flow_ops.no_op()\n\n with self.assertRaisesRegexp(\n ValueError, r'You must provide a labels Tensor\\. Given: None\\.'):\n head.create_estimator_spec(\n features={'x': np.array(((42,),), dtype=np.int32)},\n mode=model_fn.ModeKeys.TRAIN,\n logits=np.array(((45,), (41,),), dtype=np.float32),\n labels=None,\n train_op_fn=_no_op_train_fn)\n\n def test_train(self):\n head = head_lib._regression_head()\n self.assertEqual(1, head.logits_dimension)\n\n # Create estimator spec.\n logits = np.array(((45,), (41,),), dtype=np.float32)\n labels = np.array(((43.,), (44.,),), dtype=np.float64)\n expected_train_result = b'my_train_op'\n features = {'x': np.array(((42.,),), dtype=np.float32)}\n # loss = (43-45)^2 + (44-41)^2 = 4 + 9 = 13\n expected_loss = 13\n def _train_op_fn(loss):\n with ops.control_dependencies((check_ops.assert_equal(\n math_ops.to_float(expected_loss), math_ops.to_float(loss),\n name='assert_loss'),)):\n return constant_op.constant(expected_train_result)\n\n spec = head.create_estimator_spec(\n features=features,\n mode=model_fn.ModeKeys.TRAIN,\n logits=logits,\n labels=labels,\n train_op_fn=_train_op_fn)\n\n # Assert spec contains expected tensors.\n prediction_key = prediction_keys.PredictionKeys.PREDICTIONS\n self.assertItemsEqual((prediction_key,), spec.predictions.keys())\n self.assertEqual(dtypes.float32, spec.predictions[prediction_key].dtype)\n self.assertEqual(dtypes.float32, spec.loss.dtype)\n self.assertEqual({}, spec.eval_metric_ops)\n self.assertIsNotNone(spec.train_op)\n self.assertIsNone(spec.export_outputs)\n _assert_no_hooks(self, spec)\n\n # Assert predictions, loss, train_op, and summaries.\n with self.test_session() as sess:\n _initialize_variables(self, spec.scaffold)\n self.assertIsNotNone(spec.scaffold.summary_op)\n predictions, loss, train_result, summary_str = sess.run((\n spec.predictions[prediction_key], spec.loss, spec.train_op,\n spec.scaffold.summary_op))\n self.assertAllClose(logits, predictions)\n self.assertAllClose(expected_loss, loss)\n self.assertEqual(expected_train_result, train_result)\n _assert_simple_summaries(self, {\n metric_keys.MetricKeys.LOSS: expected_loss,\n # loss_mean = loss/2 = 13/2 = 6.5\n metric_keys.MetricKeys.LOSS_MEAN: 6.5,\n }, summary_str)\n\n def test_train_with_optimizer(self):\n head = head_lib._regression_head()\n self.assertEqual(1, head.logits_dimension)\n\n # Create estimator spec.\n logits = np.array(((45,), (41,),), dtype=np.float32)\n labels = np.array(((43.,), (44.,),), dtype=np.float64)\n expected_train_result = b'my_train_op'\n features = {'x': np.array(((42.,),), dtype=np.float32)}\n # loss = (43-45)^2 + (44-41)^2 = 4 + 9 = 13\n expected_loss = 13\n\n class _Optimizer(object):\n\n def minimize(self, loss, global_step):\n del global_step\n with ops.control_dependencies((check_ops.assert_equal(\n math_ops.to_float(expected_loss), math_ops.to_float(loss),\n name='assert_loss'),)):\n return constant_op.constant(expected_train_result)\n\n spec = head.create_estimator_spec(\n features=features,\n mode=model_fn.ModeKeys.TRAIN,\n logits=logits,\n labels=labels,\n optimizer=_Optimizer())\n\n with self.test_session() as sess:\n _initialize_variables(self, spec.scaffold)\n loss, train_result = sess.run((spec.loss, spec.train_op))\n self.assertAllClose(expected_loss, loss)\n self.assertEqual(expected_train_result, train_result)\n\n def test_train_with_update_ops(self):\n head = head_lib._regression_head()\n\n with ops.Graph().as_default():\n w = variables.Variable(1)\n update_op = w.assign_add(1)\n ops.add_to_collection(ops.GraphKeys.UPDATE_OPS, update_op)\n\n t = variables.Variable('')\n expected_train_result = b'my_train_op'\n def _train_op_fn(loss):\n del loss\n return t.assign(expected_train_result)\n\n spec = head.create_estimator_spec(\n features={'x': np.array(((42,),), dtype=np.int32)},\n mode=model_fn.ModeKeys.TRAIN,\n logits=np.array(((45,), (41,),), dtype=np.float32),\n labels=np.array(((43.,), (44.,),), dtype=np.float64),\n train_op_fn=_train_op_fn)\n\n with self.test_session() as sess:\n _initialize_variables(self, spec.scaffold)\n sess.run(spec.train_op)\n w_value, t_value = sess.run([w, t])\n self.assertEqual(2, w_value)\n self.assertEqual(expected_train_result, t_value)\n\n def test_train_summaries_with_head_name(self):\n head = head_lib._regression_head(name='some_regression_head')\n self.assertEqual(1, head.logits_dimension)\n\n # Create estimator spec.\n logits = np.array(((45,), (41,),), dtype=np.float32)\n labels = np.array(((43.,), (44.,),), dtype=np.float64)\n features = {'x': np.array(((42.,),), dtype=np.float32)}\n # loss = (43-45)^2 + (44-41)^2 = 4 + 9 = 13\n expected_loss = 13\n\n def _train_op_fn(loss):\n del loss\n return control_flow_ops.no_op()\n\n spec = head.create_estimator_spec(\n features=features,\n mode=model_fn.ModeKeys.TRAIN,\n logits=logits,\n labels=labels,\n train_op_fn=_train_op_fn)\n\n # Assert summaries.\n with self.test_session() as sess:\n _initialize_variables(self, spec.scaffold)\n self.assertIsNotNone(spec.scaffold.summary_op)\n summary_str = sess.run(spec.scaffold.summary_op)\n _assert_simple_summaries(\n self,\n {\n '{}/some_regression_head'.format(metric_keys.MetricKeys.LOSS):\n expected_loss,\n # loss_mean = loss/2 = 13/2 = 6.5\n '{}/some_regression_head'\n .format(metric_keys.MetricKeys.LOSS_MEAN):\n 6.5,\n },\n summary_str)\n\n def test_train_with_regularization_losses(self):\n head = head_lib._regression_head(\n loss_reduction=losses.Reduction.SUM_OVER_BATCH_SIZE)\n self.assertEqual(1, head.logits_dimension)\n\n # Create estimator spec.\n logits = np.array(((45,), (41,),), dtype=np.float32)\n labels = np.array(((43.,), (44.,),), dtype=np.float64)\n expected_train_result = b'my_train_op'\n features = {'x': np.array(((42.,),), dtype=np.float32)}\n regularization_losses = [1.5, 0.5]\n expected_regularization_loss = 2.\n # unregularized_loss = ((43-45)^2 + (44-41)^2) / batch_size\n # = (4 + 9) / 2 = 6.5\n # loss = unregularized_loss + regularization_loss = 8.5\n expected_loss = 8.5\n def _train_op_fn(loss):\n with ops.control_dependencies((check_ops.assert_equal(\n math_ops.to_float(expected_loss), math_ops.to_float(loss),\n name='assert_loss'),)):\n return constant_op.constant(expected_train_result)\n\n spec = head.create_estimator_spec(\n features=features,\n mode=model_fn.ModeKeys.TRAIN,\n logits=logits,\n labels=labels,\n train_op_fn=_train_op_fn,\n regularization_losses=regularization_losses)\n\n # Assert predictions, loss, train_op, and summaries.\n with self.test_session() as sess:\n _initialize_variables(self, spec.scaffold)\n self.assertIsNotNone(spec.scaffold.summary_op)\n prediction_key = prediction_keys.PredictionKeys.PREDICTIONS\n predictions, loss, train_result, summary_str = sess.run((\n spec.predictions[prediction_key], spec.loss, spec.train_op,\n spec.scaffold.summary_op))\n self.assertAllClose(logits, predictions)\n self.assertAllClose(expected_loss, loss)\n self.assertEqual(expected_train_result, train_result)\n _assert_simple_summaries(self, {\n metric_keys.MetricKeys.LOSS: expected_loss,\n metric_keys.MetricKeys.LOSS_REGULARIZATION: (\n expected_regularization_loss),\n }, summary_str)\n\n def test_weighted_multi_example_eval(self):\n \"\"\"1d label, 3 examples, 1 batch.\"\"\"\n head = head_lib._regression_head(weight_column='label_weights')\n self.assertEqual(1, head.logits_dimension)\n\n # Create estimator spec.\n logits = np.array(((45,), (41,), (44,)), dtype=np.int32)\n spec = head.create_estimator_spec(\n features={\n 'x': np.array(((42,), (43,), (44,)), dtype=np.int32),\n 'label_weights': np.array(((1.,), (.1,), (1.5,)), dtype=np.float32),\n },\n mode=model_fn.ModeKeys.EVAL,\n logits=logits,\n labels=np.array(((35,), (42,), (45,)), dtype=np.int32))\n\n # Assert spec contains expected tensors.\n prediction_key = prediction_keys.PredictionKeys.PREDICTIONS\n self.assertItemsEqual((prediction_key,), spec.predictions.keys())\n self.assertEqual(dtypes.float32, spec.predictions[prediction_key].dtype)\n self.assertEqual(dtypes.float32, spec.loss.dtype)\n self.assertItemsEqual(\n (metric_keys.MetricKeys.LOSS_MEAN,), spec.eval_metric_ops.keys())\n self.assertIsNone(spec.train_op)\n self.assertIsNone(spec.export_outputs)\n _assert_no_hooks(self, spec)\n\n # Assert predictions, loss, and metrics.\n with self.test_session() as sess:\n _initialize_variables(self, spec.scaffold)\n self.assertIsNone(spec.scaffold.summary_op)\n loss_mean_value_op, loss_mean_update_op = spec.eval_metric_ops[\n metric_keys.MetricKeys.LOSS_MEAN]\n predictions, loss, loss_mean = sess.run((\n spec.predictions[prediction_key], spec.loss, loss_mean_update_op))\n self.assertAllClose(logits, predictions)\n # loss = 1*(35-45)^2 + .1*(42-41)^2 + 1.5*(45-44)^2 = 100+.1+1.5 = 101.6\n self.assertAllClose(101.6, loss)\n # loss_mean = loss/(1+.1+1.5) = 101.6/2.6 = 39.0769231\n expected_loss_mean = 39.0769231\n # Check results of both update (in `loss_mean`) and value ops.\n self.assertAllClose(expected_loss_mean, loss_mean)\n self.assertAllClose(expected_loss_mean, loss_mean_value_op.eval())\n\n def test_weight_with_numeric_column(self):\n \"\"\"1d label, 3 examples, 1 batch.\"\"\"\n head = head_lib._regression_head(\n weight_column=feature_column_lib.numeric_column(\n 'label_weights', normalizer_fn=lambda x: x + 1.))\n\n # Create estimator spec.\n logits = np.array(((45,), (41,), (44,)), dtype=np.int32)\n spec = head.create_estimator_spec(\n features={\n 'x':\n np.array(((42,), (43,), (44,)), dtype=np.int32),\n 'label_weights':\n np.array(((0.,), (-0.9,), (0.5,)), dtype=np.float32),\n },\n mode=model_fn.ModeKeys.EVAL,\n logits=logits,\n labels=np.array(((35,), (42,), (45,)), dtype=np.int32))\n\n # Assert loss.\n with self.test_session() as sess:\n _initialize_variables(self, spec.scaffold)\n loss = sess.run(spec.loss)\n # loss = 1*(35-45)^2 + .1*(42-41)^2 + 1.5*(45-44)^2 = 100+.1+1.5 = 101.6\n self.assertAllClose(101.6, loss)\n\n def test_weighted_multi_example_train(self):\n \"\"\"1d label, 3 examples, 1 batch.\"\"\"\n head = head_lib._regression_head(weight_column='label_weights')\n self.assertEqual(1, head.logits_dimension)\n\n # Create estimator spec.\n logits = np.array(((45,), (41,), (44,)), dtype=np.float32)\n expected_train_result = b'my_train_op'\n # loss = 1*(35-45)^2 + .1*(42-41)^2 + 1.5*(45-44)^2 = 100+.1+1.5 = 101.6\n expected_loss = 101.6\n def _train_op_fn(loss):\n with ops.control_dependencies((check_ops.assert_equal(\n math_ops.to_float(expected_loss), math_ops.to_float(loss),\n name='assert_loss'),)):\n return constant_op.constant(expected_train_result)\n spec = head.create_estimator_spec(\n features={\n 'x': np.array(((42,), (43,), (44,)), dtype=np.float32),\n 'label_weights': np.array(((1.,), (.1,), (1.5,)), dtype=np.float64),\n },\n mode=model_fn.ModeKeys.TRAIN,\n logits=logits,\n labels=np.array(((35.,), (42.,), (45.,)), dtype=np.float32),\n train_op_fn=_train_op_fn)\n\n # Assert spec contains expected tensors.\n prediction_key = prediction_keys.PredictionKeys.PREDICTIONS\n self.assertItemsEqual((prediction_key,), spec.predictions.keys())\n self.assertEqual(dtypes.float32, spec.predictions[prediction_key].dtype)\n self.assertEqual(dtypes.float32, spec.loss.dtype)\n self.assertEqual({}, spec.eval_metric_ops)\n self.assertIsNotNone(spec.train_op)\n self.assertIsNone(spec.export_outputs)\n _assert_no_hooks(self, spec)\n\n # Assert predictions, loss, train_op, and summaries.\n with self.test_session() as sess:\n _initialize_variables(self, spec.scaffold)\n self.assertIsNotNone(spec.scaffold.summary_op)\n predictions, loss, train_result, summary_str = sess.run((\n spec.predictions[prediction_key], spec.loss, spec.train_op,\n spec.scaffold.summary_op))\n self.assertAllClose(logits, predictions)\n self.assertAllClose(expected_loss, loss)\n self.assertEqual(expected_train_result, train_result)\n _assert_simple_summaries(self, {\n metric_keys.MetricKeys.LOSS: expected_loss,\n # loss_mean = loss/(1+.1+1.5) = 101.6/2.6 = 39.0769231\n metric_keys.MetricKeys.LOSS_MEAN: 39.0769231,\n }, summary_str)\n\n def test_train_one_dim_create_loss(self):\n \"\"\"Tests create_loss with 1D labels and weights (shape [batch_size]).\"\"\"\n head = head_lib._regression_head(weight_column='label_weights')\n logits = np.array(((45,), (41,), (44,)), dtype=np.float32)\n x_feature_rank_1 = np.array((42., 43., 44.,), dtype=np.float32)\n weight_rank_1 = np.array((1., .1, 1.5,), dtype=np.float64)\n labels_rank_1 = np.array((35., 42., 45.,))\n # unreduced_loss = [(35-45)^2, (42-41)^2, (45-44)^2] = [100, 1, 1].\n expected_unreduced_loss = [[100.], [1.], [1.]]\n # weights are reshaped to [3, 1] to match logits.\n expected_weights = [[1.], [.1], [1.5]]\n # training_loss = 100 * 1 + 1 * .1 + 1.5 * 1 = 101.6\n expected_training_loss = 101.6\n features = {'x': x_feature_rank_1, 'label_weights': weight_rank_1}\n # Create loss.\n training_loss, unreduced_loss, actual_weights, _ = head.create_loss(\n features=features,\n mode=model_fn.ModeKeys.TRAIN,\n logits=logits,\n labels=labels_rank_1)\n with self.test_session():\n _initialize_variables(self, monitored_session.Scaffold())\n self.assertAllClose(expected_training_loss, training_loss.eval())\n self.assertAllClose(expected_unreduced_loss, unreduced_loss.eval())\n self.assertAllClose(expected_weights, actual_weights.eval())\n\n def test_train_one_dim(self):\n \"\"\"Tests train with 1D labels and weights (shape [batch_size]).\"\"\"\n head = head_lib._regression_head(weight_column='label_weights')\n self.assertEqual(1, head.logits_dimension)\n\n # Create estimator spec.\n logits = np.array(((45,), (41,), (44,)), dtype=np.float32)\n expected_train_result = b'my_train_op'\n # loss = 1*(35-45)^2 + .1*(42-41)^2 + 1.5*(45-44)^2 = 100+.1+1.5 = 101.6\n expected_loss = 101.6\n def _train_op_fn(loss):\n with ops.control_dependencies((check_ops.assert_equal(\n math_ops.to_float(expected_loss), math_ops.to_float(loss),\n name='assert_loss'),)):\n return constant_op.constant(expected_train_result)\n\n x_feature_rank_1 = np.array((42., 43., 44.,), dtype=np.float32)\n weight_rank_1 = np.array((1., .1, 1.5,), dtype=np.float64)\n labels_rank_1 = np.array((35., 42., 45.,))\n features = {'x': x_feature_rank_1, 'label_weights': weight_rank_1}\n self.assertEqual((3,), x_feature_rank_1.shape)\n self.assertEqual((3,), weight_rank_1.shape)\n self.assertEqual((3,), labels_rank_1.shape)\n\n spec = head.create_estimator_spec(\n features=features,\n mode=model_fn.ModeKeys.TRAIN,\n logits=logits,\n labels=labels_rank_1,\n train_op_fn=_train_op_fn)\n\n # Assert spec contains expected tensors.\n prediction_key = prediction_keys.PredictionKeys.PREDICTIONS\n self.assertItemsEqual((prediction_key,), spec.predictions.keys())\n self.assertEqual(dtypes.float32, spec.predictions[prediction_key].dtype)\n self.assertEqual(dtypes.float32, spec.loss.dtype)\n self.assertEqual({}, spec.eval_metric_ops)\n self.assertIsNotNone(spec.train_op)\n self.assertIsNone(spec.export_outputs)\n _assert_no_hooks(self, spec)\n\n # Assert predictions, loss, train_op, and summaries.\n with self.test_session() as sess:\n _initialize_variables(self, spec.scaffold)\n self.assertIsNotNone(spec.scaffold.summary_op)\n predictions, loss, train_result, summary_str = sess.run((\n spec.predictions[prediction_key], spec.loss, spec.train_op,\n spec.scaffold.summary_op))\n self.assertAllClose(logits, predictions)\n self.assertAllClose(expected_loss, loss)\n self.assertEqual(expected_train_result, train_result)\n _assert_simple_summaries(self, {\n metric_keys.MetricKeys.LOSS: expected_loss,\n # loss_mean = loss/(1+.1+1.5) = 101.6/2.6 = 39.0769231\n metric_keys.MetricKeys.LOSS_MEAN: 39.0769231,\n }, summary_str)\n\n def test_weighted_multi_value_eval_create_loss(self):\n \"\"\"3d label, 1 example, 1 batch.\"\"\"\n head = head_lib._regression_head(\n weight_column='label_weights', label_dimension=3)\n logits = np.array(((45., 41., 44.),))\n labels = np.array(((35., 42., 45.),))\n features = {\n 'x': np.array(((42., 43., 44.),)),\n 'label_weights': np.array(((1., .1, 1.5),))\n }\n # Create loss.\n training_loss = head.create_loss(\n features=features,\n mode=model_fn.ModeKeys.EVAL,\n logits=logits,\n labels=labels)[0]\n with self.test_session():\n _initialize_variables(self, monitored_session.Scaffold())\n # loss = [(35-45)^2, (42-41)^2, (45-44)^2] = [100, 1, 1].\n # weighted sum loss = 1 * 100 + .1 * 1 + 1.5 * 1 = 101.6\n self.assertAllClose(101.6, training_loss.eval())\n\n def test_weighted_multi_value_eval(self):\n \"\"\"3d label, 1 example, 1 batch.\"\"\"\n head = head_lib._regression_head(\n weight_column='label_weights', label_dimension=3)\n self.assertEqual(3, head.logits_dimension)\n\n logits = np.array(((45., 41., 44.),))\n labels = np.array(((35., 42., 45.),))\n features = {\n 'x': np.array(((42., 43., 44.),)),\n 'label_weights': np.array(((1., .1, 1.5),))\n }\n # Create estimator spec.\n spec = head.create_estimator_spec(\n features=features,\n mode=model_fn.ModeKeys.EVAL,\n logits=logits,\n labels=labels)\n\n # Assert spec contains expected tensors.\n prediction_key = prediction_keys.PredictionKeys.PREDICTIONS\n self.assertItemsEqual((prediction_key,), spec.predictions.keys())\n self.assertEqual(dtypes.float32, spec.predictions[prediction_key].dtype)\n self.assertEqual(dtypes.float32, spec.loss.dtype)\n self.assertItemsEqual(\n (metric_keys.MetricKeys.LOSS_MEAN,), spec.eval_metric_ops.keys())\n self.assertIsNone(spec.train_op)\n self.assertIsNone(spec.export_outputs)\n _assert_no_hooks(self, spec)\n\n # Assert predictions, loss, and metrics.\n with self.test_session() as sess:\n _initialize_variables(self, spec.scaffold)\n self.assertIsNone(spec.scaffold.summary_op)\n loss_mean_value_op, loss_mean_update_op = spec.eval_metric_ops[\n metric_keys.MetricKeys.LOSS_MEAN]\n predictions, loss, loss_mean = sess.run((\n spec.predictions[prediction_key], spec.loss, loss_mean_update_op))\n self.assertAllClose(logits, predictions)\n # loss = 1*(35-45)^2 + .1*(42-41)^2 + 1.5*(45-44)^2 = 100+.1+1.5 = 101.6\n self.assertAllClose(101.6, loss)\n # loss_mean = loss/(1+.1+1.5) = 101.6/2.6 = 39.076923\n expected_loss_mean = 39.076923\n # Check results of both update (in `loss_mean`) and value ops.\n self.assertAllClose(expected_loss_mean, loss_mean)\n self.assertAllClose(expected_loss_mean, loss_mean_value_op.eval())\n\n def test_weighted_multi_value_train_create_loss(self):\n \"\"\"3d label, 1 example, 1 batch.\"\"\"\n head = head_lib._regression_head(\n weight_column='label_weights', label_dimension=3)\n logits = np.array(((45., 41., 44.),))\n labels = np.array(((35., 42., 45.),))\n features = {\n 'x': np.array(((42., 43., 44.),)),\n 'label_weights': np.array(((1., .1, 1.5),))\n }\n # Create loss.\n training_loss = head.create_loss(\n features=features,\n mode=model_fn.ModeKeys.TRAIN,\n logits=logits,\n labels=labels)[0]\n with self.test_session():\n _initialize_variables(self, monitored_session.Scaffold())\n # loss = [(35-45)^2, (42-41)^2, (45-44)^2] = [100, 1, 1].\n # weighted sum loss = 1 * 100 + .1 * 1 + 1.5 * 1 = 101.6\n self.assertAllClose(101.6, training_loss.eval())\n\n def test_weighted_multi_value_train(self):\n \"\"\"3d label, 1 example, 1 batch.\"\"\"\n head = head_lib._regression_head(\n weight_column='label_weights', label_dimension=3)\n self.assertEqual(3, head.logits_dimension)\n\n logits = np.array(((45., 41., 44.),))\n labels = np.array(((35., 42., 45.),))\n expected_train_result = b'my_train_op'\n # loss = 1*(35-45)^2 + .1*(42-41)^2 + 1.5*(45-44)^2 = 100+.1+1.5 = 101.6\n expected_loss = 101.6\n def _train_op_fn(loss):\n with ops.control_dependencies((check_ops.assert_equal(\n math_ops.to_float(expected_loss), math_ops.to_float(loss),\n name='assert_loss'),)):\n return constant_op.constant(expected_train_result)\n\n features = {\n 'x': np.array(((42., 43., 44.),)),\n 'label_weights': np.array(((1., .1, 1.5),)),\n }\n # Create estimator spec.\n spec = head.create_estimator_spec(\n features=features,\n mode=model_fn.ModeKeys.TRAIN,\n logits=logits,\n labels=labels,\n train_op_fn=_train_op_fn)\n\n # Assert spec contains expected tensors.\n prediction_key = prediction_keys.PredictionKeys.PREDICTIONS\n self.assertItemsEqual((prediction_key,), spec.predictions.keys())\n self.assertEqual(dtypes.float32, spec.predictions[prediction_key].dtype)\n self.assertEqual(dtypes.float32, spec.loss.dtype)\n self.assertEqual({}, spec.eval_metric_ops)\n self.assertIsNotNone(spec.train_op)\n self.assertIsNone(spec.export_outputs)\n _assert_no_hooks(self, spec)\n\n # Evaluate predictions, loss, train_op, and summaries.\n with self.test_session() as sess:\n _initialize_variables(self, spec.scaffold)\n self.assertIsNotNone(spec.scaffold.summary_op)\n predictions, loss, train_result, summary_str = sess.run((\n spec.predictions[prediction_key], spec.loss, spec.train_op,\n spec.scaffold.summary_op))\n self.assertAllClose(logits, predictions)\n self.assertAllClose(expected_loss, loss)\n self.assertEqual(expected_train_result, train_result)\n _assert_simple_summaries(self, {\n metric_keys.MetricKeys.LOSS: expected_loss,\n # loss_mean = loss/(1+.1+1.5) = 101.6/2.6 = 39.076923\n metric_keys.MetricKeys.LOSS_MEAN: 39.076923,\n }, summary_str)\n\n def test_weighted_multi_batch_eval(self):\n \"\"\"1d label, 1 example, 3 batches.\"\"\"\n head = head_lib._regression_head(weight_column='label_weights')\n self.assertEqual(1, head.logits_dimension)\n\n # Create estimator spec.\n logits = np.array(((45.,), (41.,), (44.,)))\n input_fn = numpy_io.numpy_input_fn(\n x={\n 'x': np.array(((42.,), (43.,), (44.,))),\n 'label_weights': np.array(((1.,), (.1,), (1.5,))),\n # 'logits' is not a feature, but we use `numpy_input_fn` to make a\n # batched version of it, and pop it off before passing to\n # `create_estimator_spec`.\n 'logits': logits,\n },\n y=np.array(((35.,), (42.,), (45.,))),\n batch_size=1,\n num_epochs=1,\n shuffle=False)\n batched_features, batched_labels = input_fn()\n batched_logits = batched_features.pop('logits')\n spec = head.create_estimator_spec(\n features=batched_features,\n mode=model_fn.ModeKeys.EVAL,\n logits=batched_logits,\n labels=batched_labels,\n train_op_fn=None)\n\n # losses = [1*(35-45)^2, .1*(42-41)^2, 1.5*(45-44)^2] = [100, .1, 1.5]\n # loss = sum(losses) = 100+.1+1.5 = 101.6\n # loss_mean = loss/(1+.1+1.5) = 101.6/2.6 = 39.076923\n expected_metrics = {metric_keys.MetricKeys.LOSS_MEAN: 39.076923}\n\n # Assert spec contains expected tensors.\n self.assertEqual(dtypes.float32, spec.loss.dtype)\n self.assertItemsEqual(expected_metrics.keys(), spec.eval_metric_ops.keys())\n self.assertIsNone(spec.train_op)\n _assert_no_hooks(self, spec)\n\n with self.test_session() as sess:\n # Finalize graph and initialize variables.\n _initialize_variables(self, spec.scaffold)\n self.assertIsNotNone(spec.scaffold.summary_op)\n queue_runner_impl.start_queue_runners()\n\n # Run tensors for `steps` steps.\n steps = len(logits)\n results = tuple([\n sess.run((\n spec.loss,\n # The `[1]` gives us the metric update op.\n {k: spec.eval_metric_ops[k][1] for k in spec.eval_metric_ops}\n )) for _ in range(steps)\n ])\n\n # Assert losses and metrics.\n self.assertAllClose((100, .1, 1.5), [r[0] for r in results])\n # For metrics, check results of both update (in `results`) and value ops.\n # Note: we only check the result of the last step for streaming metrics.\n self.assertAllClose(expected_metrics, results[steps - 1][1])\n self.assertAllClose(expected_metrics, {\n k: spec.eval_metric_ops[k][0].eval() for k in spec.eval_metric_ops\n })\n\n def test_weighted_multi_batch_train(self):\n \"\"\"1d label, 1 example, 3 batches.\"\"\"\n head = head_lib._regression_head(weight_column='label_weights')\n self.assertEqual(1, head.logits_dimension)\n\n # Create estimator spec.\n logits = np.array(((45.,), (41.,), (44.,)))\n input_fn = numpy_io.numpy_input_fn(\n x={\n 'x': np.array(((42.,), (43.,), (44.,))),\n 'label_weights': np.array(((1.,), (.1,), (1.5,))),\n # 'logits' is not a feature, but we use `numpy_input_fn` to make a\n # batched version of it, and pop it off before passing to\n # `create_estimator_spec`.\n 'logits': logits,\n },\n y=np.array(((35.,), (42.,), (45.,))),\n batch_size=1,\n num_epochs=1,\n shuffle=False)\n batched_features, batched_labels = input_fn()\n batched_logits = batched_features.pop('logits')\n spec = head.create_estimator_spec(\n features=batched_features,\n mode=model_fn.ModeKeys.TRAIN,\n logits=batched_logits,\n labels=batched_labels,\n train_op_fn=lambda loss: loss * -7.)\n\n # Assert spec contains expected tensors.\n self.assertEqual(dtypes.float32, spec.loss.dtype)\n self.assertIsNotNone(spec.train_op)\n\n with self.test_session() as sess:\n # Finalize graph and initialize variables.\n _initialize_variables(self, spec.scaffold)\n self.assertIsNotNone(spec.scaffold.summary_op)\n queue_runner_impl.start_queue_runners()\n\n results = tuple([\n sess.run((spec.loss, spec.train_op)) for _ in range(len(logits))\n ])\n\n # losses = [1*(35-45)^2, .1*(42-41)^2, 1.5*(45-44)^2] = [100, .1, 1.5]\n expected_losses = np.array((100, .1, 1.5))\n self.assertAllClose(expected_losses, [r[0] for r in results])\n self.assertAllClose(expected_losses * -7., [r[1] for r in results])\n\n def test_multi_dim_weighted_train_create_loss(self):\n \"\"\"Logits, labels of shape [2, 2, 3], weight shape [2, 2].\"\"\"\n label_dimension = 3\n head = head_lib._regression_head(\n weight_column='label_weights', label_dimension=label_dimension)\n logits = np.array([[[00., 01., 02.], [10., 11., 12.]],\n [[20., 21., 22.], [30., 31., 32.]]])\n labels = np.array([[[01., 02., 03.], [12., 13., 14.]],\n [[23., 24., 25.], [34., 35., 36.]]])\n weights = np.array([[1., 1.5], [2., 2.5]])\n expected_unreduced_loss = [[[1., 1., 1.], [4., 4., 4.]],\n [[9., 9., 9.], [16., 16., 16.]]]\n expected_training_loss = np.sum(\n np.array([[[1. * x for x in [1., 1., 1.]],\n [1.5 * x for x in [4., 4., 4.]]],\n [[2. * x for x in [9., 9., 9.]],\n [2.5 * x for x in [16., 16., 16.]]]]))\n # Weights are expanded to [2, 2, 1] to match logits.\n expected_weights = [[[1.], [1.5]], [[2.], [2.5]]]\n # Create loss.\n training_loss, unreduced_loss, actual_weights, _ = head.create_loss(\n features={'label_weights': weights},\n mode=model_fn.ModeKeys.TRAIN,\n logits=logits,\n labels=labels)\n with self.test_session():\n _initialize_variables(self, monitored_session.Scaffold())\n self.assertAllClose(expected_training_loss, training_loss.eval())\n self.assertAllClose(expected_unreduced_loss, unreduced_loss.eval())\n self.assertAllClose(expected_weights, actual_weights.eval())\n\n def test_multi_dim_weighted_train(self):\n \"\"\"Logits, labels of shape [2, 2, 3], weight shape [2, 2].\"\"\"\n head = head_lib._regression_head(\n weight_column='label_weights', label_dimension=3)\n logits = np.array([[[00., 01., 02.], [10., 11., 12.]],\n [[20., 21., 22.], [30., 31., 32.]]])\n labels = np.array([[[01., 02., 03.], [12., 13., 14.]],\n [[23., 24., 25.], [34., 35., 36.]]])\n expected_train_result = b'my_train_op'\n features = {\n 'label_weights': np.array([[1., 1.5], [2., 2.5]]),\n }\n # loss = 1*3*1^2 + 1.5*3*2^2 + 2*3*3^2 +2.5*3*4^2 = 195\n expected_loss = 195.\n # Create estimator spec.\n def _train_op_fn(loss):\n with ops.control_dependencies((check_ops.assert_equal(\n math_ops.to_float(expected_loss), math_ops.to_float(loss),\n name='assert_loss'),)):\n return constant_op.constant(expected_train_result)\n\n spec = head.create_estimator_spec(\n features=features,\n mode=model_fn.ModeKeys.TRAIN,\n logits=logits,\n labels=labels,\n train_op_fn=_train_op_fn)\n with self.test_session():\n _initialize_variables(self, monitored_session.Scaffold())\n self.assertAllClose(expected_loss, spec.loss.eval())\n\n def test_multi_dim_train_weights_wrong_inner_dim(self):\n \"\"\"Logits, labels of shape [2, 2, 3], weight shape [2, 1].\"\"\"\n head = head_lib._regression_head(\n weight_column='label_weights', label_dimension=3)\n logits = np.array([[[00., 01., 02.], [10., 11., 12.]],\n [[20., 21., 22.], [30., 31., 32.]]])\n labels = np.array([[[01., 02., 03.], [12., 13., 14.]],\n [[23., 24., 25.], [34., 35., 36.]]])\n features = {\n 'label_weights': np.array([[1.], [2]]),\n }\n def _no_op_train_fn(loss):\n del loss\n return control_flow_ops.no_op()\n\n spec = head.create_estimator_spec(\n features=features,\n mode=model_fn.ModeKeys.TRAIN,\n logits=logits,\n labels=labels,\n train_op_fn=_no_op_train_fn)\n with self.test_session():\n _initialize_variables(self, monitored_session.Scaffold())\n with self.assertRaisesRegexp(\n errors.InvalidArgumentError,\n r'\\[logits_shape: \\] \\[2 2 3\\] \\[weights_shape: \\] \\[2 1\\]'):\n spec.loss.eval()\n\n def test_multi_dim_train_weights_wrong_outer_dim(self):\n \"\"\"Logits, labels of shape [2, 2, 3], weight shape [2, 2, 2].\"\"\"\n head = head_lib._regression_head(\n weight_column='label_weights', label_dimension=3)\n logits = np.array([[[00., 01., 02.], [10., 11., 12.]],\n [[20., 21., 22.], [30., 31., 32.]]])\n labels = np.array([[[01., 02., 03.], [12., 13., 14.]],\n [[23., 24., 25.], [34., 35., 36.]]])\n weights_placeholder = array_ops.placeholder(dtype=dtypes.float32)\n features = {\n 'label_weights': weights_placeholder,\n }\n def _no_op_train_fn(loss):\n del loss\n return control_flow_ops.no_op()\n\n spec = head.create_estimator_spec(\n features=features,\n mode=model_fn.ModeKeys.TRAIN,\n logits=logits,\n labels=labels,\n train_op_fn=_no_op_train_fn)\n with self.test_session():\n _initialize_variables(self, monitored_session.Scaffold())\n with self.assertRaisesRegexp(\n errors.InvalidArgumentError,\n r'\\[logits_shape: \\]\\s\\[2 2 3\\]\\s\\[weights_shape: \\]\\s\\[2 2 2\\]'):\n spec.loss.eval({\n weights_placeholder: np.array([[[1., 1.1], [1.5, 1.6]],\n [[2., 2.1], [2.5, 2.6]]])})\n\n\nif __name__ == '__main__':\n test.main()\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for AddSign.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.contrib.opt.python.training import addsign\nfrom tensorflow.contrib.opt.python.training import sign_decay\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import resource_variable_ops\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import test\n\n\ndef py_linear_decay_fn(decay_steps):\n def linear_decay(step):\n step = min(step, decay_steps)\n return float(decay_steps - step) / decay_steps\n return linear_decay\n\n\ndef addsign_update_numpy(params,\n g_t,\n m,\n lr,\n alpha=1.0,\n beta=0.9,\n py_sign_decay_fn=None,\n t=None):\n m_t = beta * m + (1 - beta) * g_t\n if py_sign_decay_fn is None:\n sign_decayed = 1.0\n else:\n sign_decayed = py_sign_decay_fn(t-1)\n multiplier = alpha + sign_decayed * np.sign(g_t) * np.sign(m_t)\n params_t = params - lr * multiplier * g_t\n return params_t, m_t\n\n\nclass AddSignTest(test.TestCase):\n\n def _testDense(self,\n use_resource=False,\n learning_rate=0.1,\n sign_decay_fn=None,\n py_sign_decay_fn=None,\n alpha=1.0,\n beta=0.9):\n for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:\n with self.test_session(use_gpu=True):\n # Initialize variables for numpy implementation.\n m0, m1 = 0.0, 0.0\n var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)\n grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)\n var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)\n grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)\n\n if use_resource:\n var0 = resource_variable_ops.ResourceVariable(var0_np)\n var1 = resource_variable_ops.ResourceVariable(var1_np)\n global_step = resource_variable_ops.ResourceVariable(\n 0, trainable=False)\n else:\n var0 = variables.Variable(var0_np)\n var1 = variables.Variable(var1_np)\n global_step = variables.Variable(\n 0, trainable=False)\n grads0 = constant_op.constant(grads0_np)\n grads1 = constant_op.constant(grads1_np)\n\n opt = addsign.AddSignOptimizer(\n learning_rate=learning_rate,\n alpha=alpha,\n beta=beta,\n sign_decay_fn=sign_decay_fn,\n )\n update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]),\n global_step=global_step)\n neg_update = opt.apply_gradients(zip([-grads0, -grads1], [var0, var1]),\n global_step=global_step)\n if not context.executing_eagerly():\n self.evaluate(variables.global_variables_initializer())\n # Fetch params to validate initial values\n self.assertAllClose([1.0, 2.0], self.evaluate(var0))\n self.assertAllClose([3.0, 4.0], self.evaluate(var1))\n\n # Run 7 steps of AddSign\n # first 4 steps with positive gradient\n # last 3 steps with negative gradient (sign(gm) should be -1)\n for t in range(1, 8):\n if t < 5:\n if not context.executing_eagerly():\n self.evaluate(update)\n elif t > 1:\n opt.apply_gradients(zip([grads0, grads1], [var0, var1]),\n global_step=global_step)\n else:\n if not context.executing_eagerly():\n self.evaluate(neg_update)\n elif t > 1:\n opt.apply_gradients(zip([-grads0, -grads1], [var0, var1]),\n global_step=global_step)\n\n var0_np, m0 = addsign_update_numpy(\n var0_np,\n grads0_np if t < 5 else -grads0_np,\n m0,\n learning_rate,\n alpha=alpha,\n beta=beta,\n py_sign_decay_fn=py_sign_decay_fn,\n t=t,\n )\n var1_np, m1 = addsign_update_numpy(\n var1_np,\n grads1_np if t < 5 else -grads1_np,\n m1,\n learning_rate,\n alpha=alpha,\n beta=beta,\n py_sign_decay_fn=py_sign_decay_fn,\n t=t,\n )\n\n # Validate updated params\n self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))\n self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))\n\n def testDense(self):\n decay_steps = 10\n sign_decay_fn = sign_decay.get_linear_decay_fn(decay_steps)\n py_sign_decay_fn = py_linear_decay_fn(decay_steps)\n self._testDense(use_resource=False)\n self._testDense(use_resource=False, learning_rate=0.01, alpha=0.1, beta=0.8)\n self._testDense(use_resource=False,\n sign_decay_fn=sign_decay_fn,\n py_sign_decay_fn=py_sign_decay_fn)\n\n self._testDense(use_resource=True)\n self._testDense(use_resource=True, learning_rate=0.01, alpha=0.1, beta=0.8)\n self._testDense(use_resource=True,\n sign_decay_fn=sign_decay_fn,\n py_sign_decay_fn=py_sign_decay_fn)\n\n def _testSparse(self,\n use_resource=False,\n learning_rate=0.1,\n sign_decay_fn=None,\n py_sign_decay_fn=None,\n alpha=1.0,\n beta=0.9):\n for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:\n with self.test_session(use_gpu=True):\n # Initialize variables for numpy implementation.\n m0, m1 = 0.0, 0.0\n var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)\n grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)\n var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)\n grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)\n\n if use_resource:\n var0 = resource_variable_ops.ResourceVariable(var0_np)\n var1 = resource_variable_ops.ResourceVariable(var1_np)\n global_step = resource_variable_ops.ResourceVariable(\n 0, trainable=False)\n else:\n var0 = variables.Variable(var0_np)\n var1 = variables.Variable(var1_np)\n global_step = variables.Variable(\n 0, trainable=False)\n grads0_np_indices = np.array([0, 1], dtype=np.int32)\n grads0 = ops.IndexedSlices(\n constant_op.constant(grads0_np),\n constant_op.constant(grads0_np_indices), constant_op.constant([2]))\n grads1_np_indices = np.array([0, 1], dtype=np.int32)\n grads1 = ops.IndexedSlices(\n constant_op.constant(grads1_np),\n constant_op.constant(grads1_np_indices), constant_op.constant([2]))\n opt = addsign.AddSignOptimizer(\n learning_rate=learning_rate,\n alpha=alpha,\n beta=beta,\n sign_decay_fn=sign_decay_fn,\n )\n update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]),\n global_step=global_step)\n neg_update = opt.apply_gradients(zip([-grads0, -grads1], [var0, var1]),\n global_step=global_step)\n variables.global_variables_initializer().run()\n\n # Fetch params to validate initial values\n self.assertAllClose([1.0, 2.0], var0.eval())\n self.assertAllClose([3.0, 4.0], var1.eval())\n\n # Run 7 steps of AddSign\n # first 4 steps with positive gradient\n # last 3 steps with negative gradient (sign(gm) should be -1)\n for t in range(1, 4):\n if t < 5:\n update.run()\n else:\n neg_update.run()\n\n var0_np, m0 = addsign_update_numpy(\n var0_np,\n grads0_np,\n m0,\n learning_rate,\n alpha=alpha,\n beta=beta,\n py_sign_decay_fn=py_sign_decay_fn,\n t=t,\n )\n var1_np, m1 = addsign_update_numpy(\n var1_np,\n grads1_np,\n m1,\n learning_rate,\n alpha=alpha,\n beta=beta,\n py_sign_decay_fn=py_sign_decay_fn,\n t=t,\n )\n\n # Validate updated params\n self.assertAllCloseAccordingToType(var0_np, var0.eval())\n self.assertAllCloseAccordingToType(var1_np, var1.eval())\n\n def testSparse(self):\n decay_steps = 10\n sign_decay_fn = sign_decay.get_linear_decay_fn(decay_steps)\n py_sign_decay_fn = py_linear_decay_fn(decay_steps)\n self._testSparse(use_resource=False)\n self._testSparse(use_resource=False,\n learning_rate=0.01,\n alpha=0.1,\n beta=0.8)\n self._testSparse(use_resource=False,\n sign_decay_fn=sign_decay_fn,\n py_sign_decay_fn=py_sign_decay_fn)\n\nif __name__ == '__main__':\n test.main()\n", "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Tests for model_fn.py.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python.estimator import model_fn\nfrom tensorflow.python.estimator.export import export_output\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.saved_model import signature_constants\nfrom tensorflow.python.training import monitored_session\nfrom tensorflow.python.training import session_run_hook\n\n\nclass _FakeHook(session_run_hook.SessionRunHook):\n \"\"\"Fake implementation of `SessionRunHook`.\"\"\"\n\n\nclass _InvalidHook(object):\n \"\"\"Invalid hook (not a subclass of `SessionRunHook`).\"\"\"\n\n\nclass _InvalidScaffold(object):\n \"\"\"Invalid scaffold (not a subclass of `Scaffold`).\"\"\"\n\n\nclass EstimatorSpecTrainTest(test.TestCase):\n \"\"\"Tests EstimatorSpec in train mode.\"\"\"\n\n def testRequiredArgumentsSet(self):\n \"\"\"Tests that no errors are raised when all required arguments are set.\"\"\"\n with ops.Graph().as_default(), self.test_session():\n model_fn.EstimatorSpec(\n mode=model_fn.ModeKeys.TRAIN,\n loss=constant_op.constant(1.),\n train_op=control_flow_ops.no_op())\n\n def testAllArgumentsSet(self):\n \"\"\"Tests that no errors are raised when all arguments are set.\"\"\"\n with ops.Graph().as_default(), self.test_session():\n loss = constant_op.constant(1.)\n predictions = {'loss': loss}\n classes = constant_op.constant('hello')\n model_fn.EstimatorSpec(\n mode=model_fn.ModeKeys.TRAIN,\n predictions=predictions,\n loss=loss,\n train_op=control_flow_ops.no_op(),\n eval_metric_ops={'loss': (control_flow_ops.no_op(), loss)},\n export_outputs={\n 'head_name': export_output.ClassificationOutput(classes=classes)\n },\n training_chief_hooks=[_FakeHook()],\n training_hooks=[_FakeHook()],\n scaffold=monitored_session.Scaffold(),\n evaluation_hooks=[_FakeHook()],\n prediction_hooks=[_FakeHook()])\n\n def testLossNumber(self):\n \"\"\"Tests that error is raised when loss is a number (not Tensor).\"\"\"\n with ops.Graph().as_default(), self.test_session():\n with self.assertRaisesRegexp(TypeError, 'loss must be Tensor'):\n model_fn.EstimatorSpec(\n mode=model_fn.ModeKeys.TRAIN,\n loss=1.,\n train_op=control_flow_ops.no_op())\n\n def testLoss1DTensor(self):\n \"\"\"Tests that no errors are raised when loss is 1D tensor.\"\"\"\n with ops.Graph().as_default(), self.test_session():\n model_fn.EstimatorSpec(\n mode=model_fn.ModeKeys.TRAIN,\n loss=constant_op.constant([1.]),\n train_op=control_flow_ops.no_op())\n\n def testLossMissing(self):\n with ops.Graph().as_default(), self.test_session():\n with self.assertRaisesRegexp(ValueError, 'Missing loss'):\n model_fn.EstimatorSpec(\n mode=model_fn.ModeKeys.TRAIN, train_op=control_flow_ops.no_op())\n\n def testLossNotScalar(self):\n with ops.Graph().as_default(), self.test_session():\n with self.assertRaisesRegexp(ValueError, 'Loss must be scalar'):\n model_fn.EstimatorSpec(\n mode=model_fn.ModeKeys.TRAIN,\n loss=constant_op.constant([1., 2.]),\n train_op=control_flow_ops.no_op())\n\n def testLossSparseTensor(self):\n with ops.Graph().as_default(), self.test_session():\n loss = sparse_tensor.SparseTensor(\n indices=[[0]],\n values=[0.],\n dense_shape=[1])\n with self.assertRaisesRegexp(TypeError, 'loss must be Tensor'):\n model_fn.EstimatorSpec(\n mode=model_fn.ModeKeys.TRAIN,\n loss=loss,\n train_op=control_flow_ops.no_op())\n\n def testLossFromDifferentGraph(self):\n with ops.Graph().as_default():\n loss = constant_op.constant(1.)\n with ops.Graph().as_default(), self.test_session():\n with self.assertRaisesRegexp(\n ValueError, 'must be from the default graph'):\n model_fn.EstimatorSpec(\n mode=model_fn.ModeKeys.TRAIN,\n loss=loss,\n train_op=control_flow_ops.no_op())\n\n def testTrainOpMissing(self):\n with ops.Graph().as_default(), self.test_session():\n with self.assertRaisesRegexp(ValueError, 'Missing train_op'):\n model_fn.EstimatorSpec(\n mode=model_fn.ModeKeys.TRAIN, loss=constant_op.constant(1.))\n\n def testTrainOpNotOperationAndTensor(self):\n with ops.Graph().as_default(), self.test_session():\n with self.assertRaisesRegexp(TypeError,\n 'train_op must be Operation or Tensor'):\n model_fn.EstimatorSpec(\n mode=model_fn.ModeKeys.TRAIN,\n loss=constant_op.constant(1.),\n train_op='Not an Operation or Tensor')\n\n def testTrainOpFromDifferentGraph(self):\n with ops.Graph().as_default():\n train_op = control_flow_ops.no_op()\n with ops.Graph().as_default(), self.test_session():\n with self.assertRaisesRegexp(\n ValueError, 'must be from the default graph'):\n model_fn.EstimatorSpec(\n mode=model_fn.ModeKeys.TRAIN,\n loss=constant_op.constant(1.),\n train_op=train_op)\n\n def testTrainingChiefHookInvalid(self):\n with ops.Graph().as_default(), self.test_session():\n with self.assertRaisesRegexp(\n TypeError, 'All hooks must be SessionRunHook instances'):\n model_fn.EstimatorSpec(\n mode=model_fn.ModeKeys.TRAIN,\n loss=constant_op.constant(1.),\n train_op=control_flow_ops.no_op(),\n training_chief_hooks=[_InvalidHook()])\n\n def testTrainingHookInvalid(self):\n with ops.Graph().as_default(), self.test_session():\n with self.assertRaisesRegexp(\n TypeError, 'All hooks must be SessionRunHook instances'):\n model_fn.EstimatorSpec(\n mode=model_fn.ModeKeys.TRAIN,\n loss=constant_op.constant(1.),\n train_op=control_flow_ops.no_op(),\n training_hooks=[_InvalidHook()])\n\n def testScaffoldInvalid(self):\n with ops.Graph().as_default(), self.test_session():\n with self.assertRaisesRegexp(\n TypeError, r'scaffold must be tf\\.train\\.Scaffold'):\n model_fn.EstimatorSpec(\n mode=model_fn.ModeKeys.TRAIN,\n loss=constant_op.constant(1.),\n train_op=control_flow_ops.no_op(),\n scaffold=_InvalidScaffold())\n\n def testReturnDefaultScaffold(self):\n with ops.Graph().as_default(), self.test_session():\n estimator_spec = model_fn.EstimatorSpec(\n mode=model_fn.ModeKeys.TRAIN,\n loss=constant_op.constant(1.),\n train_op=control_flow_ops.no_op())\n self.assertIsNotNone(estimator_spec.scaffold)\n\n\nclass EstimatorSpecEvalTest(test.TestCase):\n \"\"\"Tests EstimatorSpec in eval mode.\"\"\"\n\n def testRequiredArgumentsSet(self):\n \"\"\"Tests that no errors are raised when all required arguments are set.\"\"\"\n with ops.Graph().as_default(), self.test_session():\n loss = constant_op.constant(1.)\n model_fn.EstimatorSpec(\n mode=model_fn.ModeKeys.EVAL,\n predictions={'loss': loss},\n loss=loss)\n\n def testAllArgumentsSet(self):\n \"\"\"Tests that no errors are raised when all arguments are set.\"\"\"\n with ops.Graph().as_default(), self.test_session():\n loss = constant_op.constant(1.)\n predictions = {'loss': loss}\n classes = constant_op.constant('hello')\n model_fn.EstimatorSpec(\n mode=model_fn.ModeKeys.EVAL,\n predictions=predictions,\n loss=loss,\n train_op=control_flow_ops.no_op(),\n eval_metric_ops={'loss': (control_flow_ops.no_op(), loss)},\n export_outputs={\n 'head_name': export_output.ClassificationOutput(classes=classes)\n },\n training_chief_hooks=[_FakeHook()],\n training_hooks=[_FakeHook()],\n scaffold=monitored_session.Scaffold(),\n evaluation_hooks=[_FakeHook()])\n\n def testEvaluationHookInvalid(self):\n with ops.Graph().as_default(), self.test_session():\n with self.assertRaisesRegexp(\n TypeError, 'All hooks must be SessionRunHook instances'):\n model_fn.EstimatorSpec(\n mode=model_fn.ModeKeys.EVAL,\n loss=constant_op.constant(1.),\n evaluation_hooks=[_InvalidHook()])\n\n def testTupleMetric(self):\n \"\"\"Tests that no errors are raised when a metric is tuple-valued.\"\"\"\n with ops.Graph().as_default(), self.test_session():\n loss = constant_op.constant(1.)\n model_fn.EstimatorSpec(\n mode=model_fn.ModeKeys.EVAL,\n loss=loss,\n eval_metric_ops={\n 'some_metric': ((loss, loss, (constant_op.constant(2), loss)),\n control_flow_ops.no_op())})\n\n def testLoss1DTensor(self):\n \"\"\"Tests that no errors are raised when loss is 1D tensor.\"\"\"\n with ops.Graph().as_default(), self.test_session():\n loss = constant_op.constant([1.])\n model_fn.EstimatorSpec(\n mode=model_fn.ModeKeys.EVAL,\n predictions={'loss': loss},\n loss=loss)\n\n def testLossNumber(self):\n \"\"\"Tests that error is raised when loss is a number (not Tensor).\"\"\"\n with ops.Graph().as_default(), self.test_session():\n with self.assertRaisesRegexp(TypeError, 'loss must be Tensor'):\n model_fn.EstimatorSpec(\n mode=model_fn.ModeKeys.EVAL,\n predictions={'loss': constant_op.constant(1.)},\n loss=1.)\n\n def testLossMissing(self):\n with ops.Graph().as_default(), self.test_session():\n with self.assertRaisesRegexp(ValueError, 'Missing loss'):\n model_fn.EstimatorSpec(\n mode=model_fn.ModeKeys.EVAL,\n predictions={'loss': constant_op.constant(1.)})\n\n def testLossNotScalar(self):\n with ops.Graph().as_default(), self.test_session():\n loss = constant_op.constant([1., 2.])\n with self.assertRaisesRegexp(ValueError, 'Loss must be scalar'):\n model_fn.EstimatorSpec(\n mode=model_fn.ModeKeys.EVAL,\n predictions={'loss': loss},\n loss=loss)\n\n def testLossSparseTensor(self):\n with ops.Graph().as_default(), self.test_session():\n loss = sparse_tensor.SparseTensor(\n indices=[[0]],\n values=[0.],\n dense_shape=[1])\n with self.assertRaisesRegexp(\n TypeError, 'loss must be Tensor'):\n model_fn.EstimatorSpec(\n mode=model_fn.ModeKeys.EVAL,\n predictions={'prediction': constant_op.constant(1.)},\n loss=loss)\n\n def testLossFromDifferentGraph(self):\n with ops.Graph().as_default():\n loss = constant_op.constant(1.)\n with ops.Graph().as_default(), self.test_session():\n with self.assertRaisesRegexp(\n ValueError, 'must be from the default graph'):\n model_fn.EstimatorSpec(\n mode=model_fn.ModeKeys.EVAL,\n predictions={'prediction': constant_op.constant(1.)},\n loss=loss)\n\n def testReplaceRaisesConstructorChecks(self):\n with ops.Graph().as_default(), self.test_session():\n loss = constant_op.constant(1.)\n spec = model_fn.EstimatorSpec(\n mode=model_fn.ModeKeys.EVAL, predictions={'loss': loss}, loss=loss)\n with self.assertRaisesRegexp(ValueError, 'Loss must be scalar'):\n spec._replace(loss=constant_op.constant([1., 2.]))\n\n def testReplaceDoesReplace(self):\n with ops.Graph().as_default(), self.test_session():\n loss = constant_op.constant(1.)\n spec = model_fn.EstimatorSpec(\n mode=model_fn.ModeKeys.EVAL, predictions={'loss': loss}, loss=loss)\n new_spec = spec._replace(predictions={'m': loss})\n self.assertEqual(['m'], list(new_spec.predictions.keys()))\n\n def testReplaceNotAllowModeChange(self):\n with ops.Graph().as_default(), self.test_session():\n loss = constant_op.constant(1.)\n spec = model_fn.EstimatorSpec(\n mode=model_fn.ModeKeys.EVAL, predictions={'loss': loss}, loss=loss)\n spec._replace(mode=model_fn.ModeKeys.EVAL)\n with self.assertRaisesRegexp(ValueError,\n 'mode of EstimatorSpec cannot be changed'):\n spec._replace(mode=model_fn.ModeKeys.TRAIN)\n\n def testPredictionsMissingIsOkay(self):\n with ops.Graph().as_default(), self.test_session():\n model_fn.EstimatorSpec(\n mode=model_fn.ModeKeys.EVAL, loss=constant_op.constant(1.))\n\n def testPredictionsTensor(self):\n \"\"\"Tests that no error is raised when predictions is Tensor (not dict).\"\"\"\n with ops.Graph().as_default(), self.test_session():\n loss = constant_op.constant(1.)\n model_fn.EstimatorSpec(\n mode=model_fn.ModeKeys.EVAL,\n predictions=loss,\n loss=loss)\n\n def testPredictionsNumber(self):\n with ops.Graph().as_default(), self.test_session():\n with self.assertRaisesRegexp(\n TypeError, r'predictions\\[number\\] must be Tensor'):\n model_fn.EstimatorSpec(\n mode=model_fn.ModeKeys.EVAL,\n predictions={'number': 1.},\n loss=constant_op.constant(1.))\n\n def testPredictionsSparseTensor(self):\n with ops.Graph().as_default(), self.test_session():\n predictions = {\n 'sparse': sparse_tensor.SparseTensor(\n indices=[[0]],\n values=[0.],\n dense_shape=[1])}\n with self.assertRaisesRegexp(\n TypeError, r'predictions\\[sparse\\] must be Tensor'):\n model_fn.EstimatorSpec(\n mode=model_fn.ModeKeys.EVAL,\n predictions=predictions,\n loss=constant_op.constant(1.))\n\n def testPredictionsFromDifferentGraph(self):\n with ops.Graph().as_default():\n predictions = {'loss': constant_op.constant(1.)}\n with ops.Graph().as_default(), self.test_session():\n with self.assertRaisesRegexp(\n ValueError, 'must be from the default graph'):\n model_fn.EstimatorSpec(\n mode=model_fn.ModeKeys.EVAL,\n predictions=predictions,\n loss=constant_op.constant(1.))\n\n def testEvalMetricOpsNoDict(self):\n with ops.Graph().as_default(), self.test_session():\n loss = constant_op.constant(1.)\n with self.assertRaisesRegexp(\n TypeError, 'eval_metric_ops must be a dict'):\n model_fn.EstimatorSpec(\n mode=model_fn.ModeKeys.EVAL,\n predictions={'loss': loss},\n loss=loss,\n eval_metric_ops=loss)\n\n def testEvalMetricOpsNoTuple(self):\n with ops.Graph().as_default(), self.test_session():\n loss = constant_op.constant(1.)\n with self.assertRaisesRegexp(\n TypeError,\n (r'Values of eval_metric_ops must be \\(metric_value, update_op\\) '\n 'tuples')):\n model_fn.EstimatorSpec(\n mode=model_fn.ModeKeys.EVAL,\n predictions={'loss': loss},\n loss=loss,\n eval_metric_ops={'loss': loss})\n\n def testEvalMetricOpsNoTensorOrOperation(self):\n with ops.Graph().as_default(), self.test_session():\n loss = constant_op.constant(1.)\n with self.assertRaisesRegexp(TypeError, 'must be Operation or Tensor'):\n model_fn.EstimatorSpec(\n mode=model_fn.ModeKeys.EVAL,\n predictions={'loss': loss},\n loss=loss,\n eval_metric_ops={'loss': ('NonTensor', loss)})\n\n def testEvalMetricNestedNoTensorOrOperation(self):\n with ops.Graph().as_default(), self.test_session():\n loss = constant_op.constant(1.)\n with self.assertRaisesRegexp(TypeError, 'must be Operation or Tensor'):\n model_fn.EstimatorSpec(\n mode=model_fn.ModeKeys.EVAL,\n predictions={'loss': loss},\n loss=loss,\n eval_metric_ops={'loss': ((('NonTensor',),),\n control_flow_ops.no_op())})\n\n def testEvalMetricOpsFromDifferentGraph(self):\n with ops.Graph().as_default():\n eval_metric_ops = {\n 'loss': (control_flow_ops.no_op(), constant_op.constant(1.))}\n with ops.Graph().as_default(), self.test_session():\n loss = constant_op.constant(1.)\n with self.assertRaisesRegexp(\n ValueError, 'must be from the default graph'):\n model_fn.EstimatorSpec(\n mode=model_fn.ModeKeys.EVAL,\n predictions={'loss': loss},\n loss=loss,\n eval_metric_ops=eval_metric_ops)\n\n\nclass EstimatorSpecInferTest(test.TestCase):\n \"\"\"Tests EstimatorSpec in infer mode.\"\"\"\n\n def testRequiredArgumentsSet(self):\n \"\"\"Tests that no errors are raised when all required arguments are set.\"\"\"\n with ops.Graph().as_default(), self.test_session():\n model_fn.EstimatorSpec(\n mode=model_fn.ModeKeys.PREDICT,\n predictions={'loss': constant_op.constant(1.)})\n\n def testAllArgumentsSet(self):\n \"\"\"Tests that no errors are raised when all arguments are set.\"\"\"\n with ops.Graph().as_default(), self.test_session():\n loss = constant_op.constant(1.)\n predictions = {'loss': loss}\n classes = constant_op.constant('hello')\n model_fn.EstimatorSpec(\n mode=model_fn.ModeKeys.PREDICT,\n predictions=predictions,\n loss=loss,\n train_op=control_flow_ops.no_op(),\n eval_metric_ops={'loss': (control_flow_ops.no_op(), loss)},\n export_outputs={\n 'head_name': export_output.ClassificationOutput(classes=classes)\n },\n training_chief_hooks=[_FakeHook()],\n training_hooks=[_FakeHook()],\n scaffold=monitored_session.Scaffold(),\n evaluation_hooks=[_FakeHook()],\n prediction_hooks=[_FakeHook()])\n\n def testPredictionHookInvalid(self):\n with ops.Graph().as_default(), self.test_session():\n with self.assertRaisesRegexp(\n TypeError, 'All hooks must be SessionRunHook instances'):\n model_fn.EstimatorSpec(\n mode=model_fn.ModeKeys.PREDICT,\n predictions=constant_op.constant(1.),\n prediction_hooks=[_InvalidHook()])\n\n def testPredictionsMissing(self):\n with ops.Graph().as_default(), self.test_session():\n with self.assertRaisesRegexp(ValueError, 'Missing predictions'):\n model_fn.EstimatorSpec(mode=model_fn.ModeKeys.PREDICT)\n\n def testPredictionsTensor(self):\n \"\"\"Tests that no error is raised when predictions is Tensor (not dict).\"\"\"\n with ops.Graph().as_default(), self.test_session():\n model_fn.EstimatorSpec(\n mode=model_fn.ModeKeys.PREDICT, predictions=constant_op.constant(1.))\n\n def testPredictionsNumber(self):\n with ops.Graph().as_default(), self.test_session():\n with self.assertRaisesRegexp(\n TypeError, r'predictions\\[number\\] must be Tensor'):\n model_fn.EstimatorSpec(\n mode=model_fn.ModeKeys.PREDICT, predictions={'number': 1.})\n\n def testPredictionsSparseTensor(self):\n with ops.Graph().as_default(), self.test_session():\n predictions = {\n 'sparse': sparse_tensor.SparseTensor(\n indices=[[0]],\n values=[0.],\n dense_shape=[1])}\n with self.assertRaisesRegexp(\n TypeError, r'predictions\\[sparse\\] must be Tensor'):\n model_fn.EstimatorSpec(\n mode=model_fn.ModeKeys.PREDICT, predictions=predictions)\n\n def testExportOutputsNoDict(self):\n with ops.Graph().as_default(), self.test_session():\n predictions = {'loss': constant_op.constant(1.)}\n classes = constant_op.constant('hello')\n with self.assertRaisesRegexp(\n TypeError, 'export_outputs must be dict'):\n model_fn.EstimatorSpec(\n mode=model_fn.ModeKeys.PREDICT,\n predictions=predictions,\n export_outputs=export_output.ClassificationOutput(classes=classes))\n\n def testExportOutputsValueNotExportOutput(self):\n with ops.Graph().as_default(), self.test_session():\n predictions = {'loss': constant_op.constant(1.)}\n with self.assertRaisesRegexp(\n TypeError,\n r\"Values in export_outputs must be ExportOutput objects. \"\n r\"Given: {'head_name': {'loss': <tf.Tensor 'Const:0' shape=\\(\\) \"\n r\"dtype=float32>}}\"):\n model_fn.EstimatorSpec(\n mode=model_fn.ModeKeys.PREDICT,\n predictions=predictions,\n export_outputs={'head_name': predictions})\n\n def testExportOutputsSingleheadMissingDefault(self):\n with ops.Graph().as_default(), self.test_session():\n predictions = {'loss': constant_op.constant(1.)}\n output_1 = constant_op.constant([1.])\n regression_output = export_output.RegressionOutput(value=output_1)\n export_outputs = {\n 'head-1': regression_output,\n }\n estimator_spec = model_fn.EstimatorSpec(\n mode=model_fn.ModeKeys.PREDICT,\n predictions=predictions,\n export_outputs=export_outputs)\n expected_export_outputs = {\n signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:\n regression_output,\n 'head-1': regression_output,\n }\n self.assertEqual(expected_export_outputs, estimator_spec.export_outputs)\n\n def testExportOutputsMultiheadWithDefault(self):\n with ops.Graph().as_default(), self.test_session():\n predictions = {'loss': constant_op.constant(1.)}\n output_1 = constant_op.constant([1.])\n output_2 = constant_op.constant(['2'])\n output_3 = constant_op.constant(['3'])\n export_outputs = {\n signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:\n export_output.RegressionOutput(value=output_1),\n 'head-2': export_output.ClassificationOutput(classes=output_2),\n 'head-3': export_output.PredictOutput(outputs={\n 'some_output_3': output_3\n })}\n estimator_spec = model_fn.EstimatorSpec(\n mode=model_fn.ModeKeys.PREDICT,\n predictions=predictions,\n export_outputs=export_outputs)\n self.assertEqual(export_outputs, estimator_spec.export_outputs)\n\n def testExportOutputsMultiheadMissingDefault(self):\n with ops.Graph().as_default(), self.test_session():\n predictions = {'loss': constant_op.constant(1.)}\n output_1 = constant_op.constant([1.])\n output_2 = constant_op.constant(['2'])\n output_3 = constant_op.constant(['3'])\n export_outputs = {\n 'head-1': export_output.RegressionOutput(value=output_1),\n 'head-2': export_output.ClassificationOutput(classes=output_2),\n 'head-3': export_output.PredictOutput(outputs={\n 'some_output_3': output_3\n })}\n with self.assertRaisesRegexp(\n ValueError,\n 'Multiple export_outputs were provided, but none of them is '\n 'specified as the default. Do this by naming one of them with '\n 'signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY.'):\n model_fn.EstimatorSpec(\n mode=model_fn.ModeKeys.PREDICT,\n predictions=predictions,\n export_outputs=export_outputs)\n\n def testDefaultExportOutputCreated(self):\n \"\"\"Ensure that a default PredictOutput is created for export.\"\"\"\n with ops.Graph().as_default(), self.test_session():\n predictions = constant_op.constant(1.)\n self._assertDefaultExportOutputForPredictions(predictions)\n\n def testDefaultExportOutputCreatedDict(self):\n \"\"\"Ensure that a default PredictOutput is created for export for dicts.\"\"\"\n with ops.Graph().as_default(), self.test_session():\n predictions = {'loss': constant_op.constant(1.),\n 'score': constant_op.constant(10.)}\n self._assertDefaultExportOutputForPredictions(predictions)\n\n def _assertDefaultExportOutputForPredictions(self, predictions):\n spec = model_fn.EstimatorSpec(\n mode=model_fn.ModeKeys.PREDICT, predictions=predictions)\n\n expected = export_output.PredictOutput(predictions).outputs\n serving_output = spec.export_outputs[\n signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY]\n self.assertEqual(serving_output.outputs, expected)\n\nif __name__ == '__main__':\n test.main()\n", "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Perturb a `LinearOperator` with a rank `K` update.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import check_ops\nfrom tensorflow.python.ops import linalg_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops.linalg import linear_operator\nfrom tensorflow.python.ops.linalg import linear_operator_diag\nfrom tensorflow.python.ops.linalg import linear_operator_identity\nfrom tensorflow.python.ops.linalg import linear_operator_util\nfrom tensorflow.python.util.tf_export import tf_export\n\n__all__ = [\n \"LinearOperatorLowRankUpdate\",\n]\n\n\n@tf_export(\"linalg.LinearOperatorLowRankUpdate\")\nclass LinearOperatorLowRankUpdate(linear_operator.LinearOperator):\n \"\"\"Perturb a `LinearOperator` with a rank `K` update.\n\n This operator acts like a [batch] matrix `A` with shape\n `[B1,...,Bb, M, N]` for some `b >= 0`. The first `b` indices index a\n batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is\n an `M x N` matrix.\n\n `LinearOperatorLowRankUpdate` represents `A = L + U D V^H`, where\n\n ```\n L, is a LinearOperator representing [batch] M x N matrices\n U, is a [batch] M x K matrix. Typically K << M.\n D, is a [batch] K x K matrix.\n V, is a [batch] N x K matrix. Typically K << N.\n V^H is the Hermitian transpose (adjoint) of V.\n ```\n\n If `M = N`, determinants and solves are done using the matrix determinant\n lemma and Woodbury identities, and thus require L and D to be non-singular.\n\n Solves and determinants will be attempted unless the \"is_non_singular\"\n property of L and D is False.\n\n In the event that L and D are positive-definite, and U = V, solves and\n determinants can be done using a Cholesky factorization.\n\n ```python\n # Create a 3 x 3 diagonal linear operator.\n diag_operator = LinearOperatorDiag(\n diag_update=[1., 2., 3.], is_non_singular=True, is_self_adjoint=True,\n is_positive_definite=True)\n\n # Perturb with a rank 2 perturbation\n operator = LinearOperatorLowRankUpdate(\n operator=diag_operator,\n u=[[1., 2.], [-1., 3.], [0., 0.]],\n diag_update=[11., 12.],\n v=[[1., 2.], [-1., 3.], [10., 10.]])\n\n operator.shape\n ==> [3, 3]\n\n operator.log_abs_determinant()\n ==> scalar Tensor\n\n x = ... Shape [3, 4] Tensor\n operator.matmul(x)\n ==> Shape [3, 4] Tensor\n ```\n\n ### Shape compatibility\n\n This operator acts on [batch] matrix with compatible shape.\n `x` is a batch matrix with compatible shape for `matmul` and `solve` if\n\n ```\n operator.shape = [B1,...,Bb] + [M, N], with b >= 0\n x.shape = [B1,...,Bb] + [N, R], with R >= 0.\n ```\n\n ### Performance\n\n Suppose `operator` is a `LinearOperatorLowRankUpdate` of shape `[M, N]`,\n made from a rank `K` update of `base_operator` which performs `.matmul(x)` on\n `x` having `x.shape = [N, R]` with `O(L_matmul*N*R)` complexity (and similarly\n for `solve`, `determinant`. Then, if `x.shape = [N, R]`,\n\n * `operator.matmul(x)` is `O(L_matmul*N*R + K*N*R)`\n\n and if `M = N`,\n\n * `operator.solve(x)` is `O(L_matmul*N*R + N*K*R + K^2*R + K^3)`\n * `operator.determinant()` is `O(L_determinant + L_solve*N*K + K^2*N + K^3)`\n\n If instead `operator` and `x` have shape `[B1,...,Bb, M, N]` and\n `[B1,...,Bb, N, R]`, every operation increases in complexity by `B1*...*Bb`.\n\n #### Matrix property hints\n\n This `LinearOperator` is initialized with boolean flags of the form `is_X`,\n for `X = non_singular`, `self_adjoint`, `positive_definite`,\n `diag_update_positive` and `square`. These have the following meaning:\n\n * If `is_X == True`, callers should expect the operator to have the\n property `X`. This is a promise that should be fulfilled, but is *not* a\n runtime assert. For example, finite floating point precision may result\n in these promises being violated.\n * If `is_X == False`, callers should expect the operator to not have `X`.\n * If `is_X == None` (the default), callers should have no expectation either\n way.\n \"\"\"\n\n def __init__(self,\n base_operator,\n u,\n diag_update=None,\n v=None,\n is_diag_update_positive=None,\n is_non_singular=None,\n is_self_adjoint=None,\n is_positive_definite=None,\n is_square=None,\n name=\"LinearOperatorLowRankUpdate\"):\n \"\"\"Initialize a `LinearOperatorLowRankUpdate`.\n\n This creates a `LinearOperator` of the form `A = L + U D V^H`, with\n `L` a `LinearOperator`, `U, V` both [batch] matrices, and `D` a [batch]\n diagonal matrix.\n\n If `L` is non-singular, solves and determinants are available.\n Solves/determinants both involve a solve/determinant of a `K x K` system.\n In the event that L and D are self-adjoint positive-definite, and U = V,\n this can be done using a Cholesky factorization. The user should set the\n `is_X` matrix property hints, which will trigger the appropriate code path.\n\n Args:\n base_operator: Shape `[B1,...,Bb, M, N]` real `float16`, `float32` or\n `float64` `LinearOperator`. This is `L` above.\n u: Shape `[B1,...,Bb, M, K]` `Tensor` of same `dtype` as `base_operator`.\n This is `U` above.\n diag_update: Optional shape `[B1,...,Bb, K]` `Tensor` with same `dtype`\n as `base_operator`. This is the diagonal of `D` above.\n Defaults to `D` being the identity operator.\n v: Optional `Tensor` of same `dtype` as `u` and shape `[B1,...,Bb, N, K]`\n Defaults to `v = u`, in which case the perturbation is symmetric.\n If `M != N`, then `v` must be set since the perturbation is not square.\n is_diag_update_positive: Python `bool`.\n If `True`, expect `diag_update > 0`.\n is_non_singular: Expect that this operator is non-singular.\n Default is `None`, unless `is_positive_definite` is auto-set to be\n `True` (see below).\n is_self_adjoint: Expect that this operator is equal to its hermitian\n transpose. Default is `None`, unless `base_operator` is self-adjoint\n and `v = None` (meaning `u=v`), in which case this defaults to `True`.\n is_positive_definite: Expect that this operator is positive definite.\n Default is `None`, unless `base_operator` is positive-definite\n `v = None` (meaning `u=v`), and `is_diag_update_positive`, in which case\n this defaults to `True`.\n Note that we say an operator is positive definite when the quadratic\n form `x^H A x` has positive real part for all nonzero `x`.\n is_square: Expect that this operator acts like square [batch] matrices.\n name: A name for this `LinearOperator`.\n\n Raises:\n ValueError: If `is_X` flags are set in an inconsistent way.\n \"\"\"\n # TODO(langmore) support complex types.\n # Complex types are not allowed due to tf.cholesky() requiring float.\n # If complex dtypes are allowed, we update the following\n # 1. is_diag_update_positive should still imply that `diag > 0`, but we need\n # to remind the user that this implies diag is real. This is needed\n # because if diag has non-zero imaginary part, it will not be\n # self-adjoint positive definite.\n dtype = base_operator.dtype\n allowed_dtypes = [\n dtypes.float16,\n dtypes.float32,\n dtypes.float64,\n ]\n if dtype not in allowed_dtypes:\n raise TypeError(\n \"Argument matrix must have dtype in %s. Found: %s\"\n % (allowed_dtypes, dtype))\n\n if diag_update is None:\n if is_diag_update_positive is False:\n raise ValueError(\n \"Default diagonal is the identity, which is positive. However, \"\n \"user set 'is_diag_update_positive' to False.\")\n is_diag_update_positive = True\n\n # In this case, we can use a Cholesky decomposition to help us solve/det.\n self._use_cholesky = (\n base_operator.is_positive_definite and base_operator.is_self_adjoint\n and is_diag_update_positive\n and v is None)\n\n # Possibly auto-set some characteristic flags from None to True.\n # If the Flags were set (by the user) incorrectly to False, then raise.\n if base_operator.is_self_adjoint and v is None and not dtype.is_complex:\n if is_self_adjoint is False:\n raise ValueError(\n \"A = L + UDU^H, with L self-adjoint and D real diagonal. Since\"\n \" UDU^H is self-adjoint, this must be a self-adjoint operator.\")\n is_self_adjoint = True\n\n # The condition for using a cholesky is sufficient for SPD, and\n # we no weaker choice of these hints leads to SPD. Therefore,\n # the following line reads \"if hints indicate SPD...\"\n if self._use_cholesky:\n if (\n is_positive_definite is False\n or is_self_adjoint is False\n or is_non_singular is False):\n raise ValueError(\n \"Arguments imply this is self-adjoint positive-definite operator.\")\n is_positive_definite = True\n is_self_adjoint = True\n\n values = base_operator.graph_parents + [u, diag_update, v]\n with ops.name_scope(name, values=values):\n\n # Create U and V.\n self._u = ops.convert_to_tensor(u, name=\"u\")\n if v is None:\n self._v = self._u\n else:\n self._v = ops.convert_to_tensor(v, name=\"v\")\n\n if diag_update is None:\n self._diag_update = None\n else:\n self._diag_update = ops.convert_to_tensor(\n diag_update, name=\"diag_update\")\n\n # Create base_operator L.\n self._base_operator = base_operator\n graph_parents = base_operator.graph_parents + [\n self.u, self._diag_update, self.v]\n graph_parents = [p for p in graph_parents if p is not None]\n\n super(LinearOperatorLowRankUpdate, self).__init__(\n dtype=self._base_operator.dtype,\n graph_parents=graph_parents,\n is_non_singular=is_non_singular,\n is_self_adjoint=is_self_adjoint,\n is_positive_definite=is_positive_definite,\n is_square=is_square,\n name=name)\n\n # Create the diagonal operator D.\n self._set_diag_operators(diag_update, is_diag_update_positive)\n self._is_diag_update_positive = is_diag_update_positive\n\n check_ops.assert_same_float_dtype((base_operator, self.u, self.v,\n self._diag_update))\n self._check_shapes()\n\n # Pre-compute the so-called \"capacitance\" matrix\n # C := D^{-1} + V^H L^{-1} U\n self._capacitance = self._make_capacitance()\n if self._use_cholesky:\n self._chol_capacitance = linalg_ops.cholesky(self._capacitance)\n\n def _check_shapes(self):\n \"\"\"Static check that shapes are compatible.\"\"\"\n # Broadcast shape also checks that u and v are compatible.\n uv_shape = array_ops.broadcast_static_shape(\n self.u.get_shape(), self.v.get_shape())\n\n batch_shape = array_ops.broadcast_static_shape(\n self.base_operator.batch_shape, uv_shape[:-2])\n\n self.base_operator.domain_dimension.assert_is_compatible_with(\n uv_shape[-2])\n\n if self._diag_update is not None:\n uv_shape[-1].assert_is_compatible_with(self._diag_update.get_shape()[-1])\n array_ops.broadcast_static_shape(\n batch_shape, self._diag_update.get_shape()[:-1])\n\n def _set_diag_operators(self, diag_update, is_diag_update_positive):\n \"\"\"Set attributes self._diag_update and self._diag_operator.\"\"\"\n if diag_update is not None:\n self._diag_operator = linear_operator_diag.LinearOperatorDiag(\n self._diag_update, is_positive_definite=is_diag_update_positive)\n self._diag_inv_operator = linear_operator_diag.LinearOperatorDiag(\n 1. / self._diag_update, is_positive_definite=is_diag_update_positive)\n else:\n if self.u.get_shape()[-1].value is not None:\n r = self.u.get_shape()[-1].value\n else:\n r = array_ops.shape(self.u)[-1]\n self._diag_operator = linear_operator_identity.LinearOperatorIdentity(\n num_rows=r, dtype=self.dtype)\n self._diag_inv_operator = self._diag_operator\n\n @property\n def u(self):\n \"\"\"If this operator is `A = L + U D V^H`, this is the `U`.\"\"\"\n return self._u\n\n @property\n def v(self):\n \"\"\"If this operator is `A = L + U D V^H`, this is the `V`.\"\"\"\n return self._v\n\n @property\n def is_diag_update_positive(self):\n \"\"\"If this operator is `A = L + U D V^H`, this hints `D > 0` elementwise.\"\"\"\n return self._is_diag_update_positive\n\n @property\n def diag_update(self):\n \"\"\"If this operator is `A = L + U D V^H`, this is the diagonal of `D`.\"\"\"\n return self._diag_update\n\n @property\n def diag_operator(self):\n \"\"\"If this operator is `A = L + U D V^H`, this is `D`.\"\"\"\n return self._diag_operator\n\n @property\n def base_operator(self):\n \"\"\"If this operator is `A = L + U D V^H`, this is the `L`.\"\"\"\n return self._base_operator\n\n def _shape(self):\n batch_shape = array_ops.broadcast_static_shape(\n self.base_operator.batch_shape,\n self.u.get_shape()[:-2])\n return batch_shape.concatenate(self.base_operator.shape[-2:])\n\n def _shape_tensor(self):\n batch_shape = array_ops.broadcast_dynamic_shape(\n self.base_operator.batch_shape_tensor(),\n array_ops.shape(self.u)[:-2])\n return array_ops.concat(\n [batch_shape, self.base_operator.shape_tensor()[-2:]], axis=0)\n\n def _matmul(self, x, adjoint=False, adjoint_arg=False):\n u = self.u\n v = self.v\n l = self.base_operator\n d = self.diag_operator\n\n leading_term = l.matmul(x, adjoint=adjoint, adjoint_arg=adjoint_arg)\n\n if adjoint:\n uh_x = linear_operator_util.matmul_with_broadcast(\n u, x, adjoint_a=True, adjoint_b=adjoint_arg)\n d_uh_x = d.matmul(uh_x, adjoint=adjoint)\n v_d_uh_x = linear_operator_util.matmul_with_broadcast(\n v, d_uh_x)\n return leading_term + v_d_uh_x\n else:\n vh_x = linear_operator_util.matmul_with_broadcast(\n v, x, adjoint_a=True, adjoint_b=adjoint_arg)\n d_vh_x = d.matmul(vh_x, adjoint=adjoint)\n u_d_vh_x = linear_operator_util.matmul_with_broadcast(u, d_vh_x)\n return leading_term + u_d_vh_x\n\n def _determinant(self):\n if self.is_positive_definite:\n return math_ops.exp(self.log_abs_determinant())\n # The matrix determinant lemma gives\n # https://en.wikipedia.org/wiki/Matrix_determinant_lemma\n # det(L + UDV^H) = det(D^{-1} + V^H L^{-1} U) det(D) det(L)\n # = det(C) det(D) det(L)\n # where C is sometimes known as the capacitance matrix,\n # C := D^{-1} + V^H L^{-1} U\n det_c = linalg_ops.matrix_determinant(self._capacitance)\n det_d = self.diag_operator.determinant()\n det_l = self.base_operator.determinant()\n return det_c * det_d * det_l\n\n def _log_abs_determinant(self):\n # Recall\n # det(L + UDV^H) = det(D^{-1} + V^H L^{-1} U) det(D) det(L)\n # = det(C) det(D) det(L)\n log_abs_det_d = self.diag_operator.log_abs_determinant()\n log_abs_det_l = self.base_operator.log_abs_determinant()\n\n if self._use_cholesky:\n chol_cap_diag = array_ops.matrix_diag_part(self._chol_capacitance)\n log_abs_det_c = 2 * math_ops.reduce_sum(\n math_ops.log(chol_cap_diag), reduction_indices=[-1])\n else:\n det_c = linalg_ops.matrix_determinant(self._capacitance)\n log_abs_det_c = math_ops.log(math_ops.abs(det_c))\n\n return log_abs_det_c + log_abs_det_d + log_abs_det_l\n\n def _solve(self, rhs, adjoint=False, adjoint_arg=False):\n if self.base_operator.is_non_singular is False:\n raise ValueError(\n \"Solve not implemented unless this is a perturbation of a \"\n \"non-singular LinearOperator.\")\n # The Woodbury formula gives:\n # https://en.wikipedia.org/wiki/Woodbury_matrix_identity\n # (L + UDV^H)^{-1}\n # = L^{-1} - L^{-1} U (D^{-1} + V^H L^{-1} U)^{-1} V^H L^{-1}\n # = L^{-1} - L^{-1} U C^{-1} V^H L^{-1}\n # where C is the capacitance matrix, C := D^{-1} + V^H L^{-1} U\n # Note also that, with ^{-H} being the inverse of the adjoint,\n # (L + UDV^H)^{-H}\n # = L^{-H} - L^{-H} V C^{-H} U^H L^{-H}\n l = self.base_operator\n if adjoint:\n v = self.u\n u = self.v\n else:\n v = self.v\n u = self.u\n\n # L^{-1} rhs\n linv_rhs = l.solve(rhs, adjoint=adjoint, adjoint_arg=adjoint_arg)\n # V^H L^{-1} rhs\n vh_linv_rhs = linear_operator_util.matmul_with_broadcast(\n v, linv_rhs, adjoint_a=True)\n # C^{-1} V^H L^{-1} rhs\n if self._use_cholesky:\n capinv_vh_linv_rhs = linear_operator_util.cholesky_solve_with_broadcast(\n self._chol_capacitance, vh_linv_rhs)\n else:\n capinv_vh_linv_rhs = linear_operator_util.matrix_solve_with_broadcast(\n self._capacitance, vh_linv_rhs, adjoint=adjoint)\n # U C^{-1} V^H M^{-1} rhs\n u_capinv_vh_linv_rhs = linear_operator_util.matmul_with_broadcast(\n u, capinv_vh_linv_rhs)\n # L^{-1} U C^{-1} V^H L^{-1} rhs\n linv_u_capinv_vh_linv_rhs = l.solve(u_capinv_vh_linv_rhs, adjoint=adjoint)\n\n # L^{-1} - L^{-1} U C^{-1} V^H L^{-1}\n return linv_rhs - linv_u_capinv_vh_linv_rhs\n\n def _make_capacitance(self):\n # C := D^{-1} + V^H L^{-1} U\n # which is sometimes known as the \"capacitance\" matrix.\n\n # L^{-1} U\n linv_u = self.base_operator.solve(self.u)\n # V^H L^{-1} U\n vh_linv_u = linear_operator_util.matmul_with_broadcast(\n self.v, linv_u, adjoint_a=True)\n\n # D^{-1} + V^H L^{-1} V\n capacitance = self._diag_inv_operator.add_to_tensor(vh_linv_u)\n return capacitance\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Cluster Resolvers are used for dynamic cluster IP/hostname resolution.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport abc\n\nfrom tensorflow.python.training.server_lib import ClusterSpec\n\n\nclass ClusterResolver(object):\n \"\"\"Abstract class for all implementations of ClusterResolvers.\n\n This defines the skeleton for all implementations of ClusterResolvers.\n ClusterResolvers are a way for TensorFlow to communicate with various cluster\n management systems (e.g. GCE, AWS, etc...).\n\n By letting TensorFlow communicate with these systems, we will be able to\n automatically discover and resolve IP addresses for various TensorFlow\n workers. This will eventually allow us to automatically recover from\n underlying machine failures and scale TensorFlow worker clusters up and down.\n \"\"\"\n\n @abc.abstractmethod\n def cluster_spec(self):\n \"\"\"Retrieve the current state of the cluster and returns a ClusterSpec.\n\n Returns:\n A ClusterSpec representing the state of the cluster at the moment this\n function is called.\n\n Implementors of this function must take care in ensuring that the\n ClusterSpec returned is up-to-date at the time of calling this function.\n This usually means retrieving the information from the underlying cluster\n management system every time this function is invoked and reconstructing\n a cluster_spec, rather than attempting to cache anything.\n \"\"\"\n raise NotImplementedError(\n 'cluster_spec is not implemented for {}.'.format(self))\n\n @abc.abstractmethod\n def master(self):\n \"\"\"...\"\"\"\n raise NotImplementedError('master is not implemented for {}.'.format(self))\n\n\nclass SimpleClusterResolver(ClusterResolver):\n \"\"\"Simple implementation of ClusterResolver that accepts a ClusterSpec.\"\"\"\n\n def __init__(self, cluster_spec, master=''):\n \"\"\"Creates a SimpleClusterResolver from a ClusterSpec.\"\"\"\n super(SimpleClusterResolver, self).__init__()\n\n if not isinstance(cluster_spec, ClusterSpec):\n raise TypeError('cluster_spec must be a ClusterSpec.')\n self._cluster_spec = cluster_spec\n\n if not isinstance(master, str):\n raise TypeError('master must be a string.')\n self._master = master\n\n def cluster_spec(self):\n \"\"\"Returns the ClusterSpec passed into the constructor.\"\"\"\n return self._cluster_spec\n\n def master(self):\n \"\"\"Returns the master address to use when creating a session.\"\"\"\n return self._master\n\n\nclass UnionClusterResolver(ClusterResolver):\n \"\"\"Performs a union on underlying ClusterResolvers.\n\n This class performs a union given two or more existing ClusterResolvers. It\n merges the underlying ClusterResolvers, and returns one unified ClusterSpec\n when cluster_spec is called. The details of the merge function is\n documented in the cluster_spec function.\n \"\"\"\n\n def __init__(self, *args):\n \"\"\"Initializes a UnionClusterResolver with other ClusterResolvers.\n\n Args:\n *args: `ClusterResolver` objects to be unionized.\n\n Raises:\n TypeError: If any argument is not a subclass of `ClusterResolvers`.\n ValueError: If there are no arguments passed.\n \"\"\"\n super(UnionClusterResolver, self).__init__()\n\n if not args:\n raise ValueError('At least one ClusterResolver is required.')\n\n for cluster_resolver in args:\n if not isinstance(cluster_resolver, ClusterResolver):\n raise TypeError('All arguments must be a sub-class of '\n '`ClusterResolver.`')\n self._cluster_resolvers = args\n\n def cluster_spec(self):\n \"\"\"Returns a union of all the ClusterSpecs from the ClusterResolvers.\n\n Returns:\n A ClusterSpec containing host information merged from all the underlying\n ClusterResolvers.\n\n Raises:\n KeyError: If there are conflicting keys detected when merging two or\n more dictionaries, this exception is raised.\n\n Note: If there are multiple ClusterResolvers exposing ClusterSpecs with the\n same job name, we will merge the list/dict of workers.\n\n If *all* underlying ClusterSpecs expose the set of workers as lists, we will\n concatenate the lists of workers, starting with the list of workers from\n the first ClusterResolver passed into the constructor.\n\n If *any* of the ClusterSpecs expose the set of workers as a dict, we will\n treat all the sets of workers as dicts (even if they are returned as lists)\n and will only merge them into a dict if there is no conflicting keys. If\n there is a conflicting key, we will raise a `KeyError`.\n \"\"\"\n\n merged_cluster = {}\n\n # We figure out whether it is all lists for a particular job, or whether\n # there are dicts inside.\n for cluster_resolver in self._cluster_resolvers:\n cluster_spec = cluster_resolver.cluster_spec()\n cluster_dict = cluster_spec.as_dict()\n\n for job_name, tasks in cluster_dict.items():\n if job_name in merged_cluster:\n # If we see a dict, then we write a dict out regardless.\n if isinstance(tasks, dict):\n merged_cluster[job_name] = {}\n else:\n # We take whichever type is present.\n if isinstance(tasks, list):\n merged_cluster[job_name] = []\n else:\n merged_cluster[job_name] = {}\n\n # We then do the merge as appropriate in merged_cluster[job].\n for cluster_resolver in self._cluster_resolvers:\n cluster_spec = cluster_resolver.cluster_spec()\n cluster_dict = cluster_spec.as_dict()\n\n for job_name, tasks in cluster_dict.items():\n if isinstance(merged_cluster[job_name], list):\n # We all have lists, we can just concatenate and be done.\n merged_cluster[job_name].extend(tasks)\n else:\n if isinstance(tasks, list):\n # We convert to a dictionary if the type is a list.\n task_dict = dict(zip(range(0, len(tasks)), tasks))\n else:\n # We can simply make a copy (for update) and be done.\n task_dict = tasks.copy()\n\n # We detect if there are duplicates, and raise an error if so.\n task_keys = set(task_dict)\n merged_keys = set(merged_cluster[job_name].keys())\n intersected_keys = task_keys.intersection(merged_keys)\n if intersected_keys:\n raise KeyError('Duplicate keys detected when merging two '\n 'ClusterSpecs: %s' % repr(intersected_keys))\n\n # We do the merge after all the processing.\n merged_cluster[job_name].update(task_dict)\n\n return ClusterSpec(merged_cluster)\n\n def master(self):\n \"\"\"master returns the master address from the first cluster resolver.\"\"\"\n return self._cluster_resolvers[0].master()\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Model evaluation tools for TFGAN.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.contrib.gan.python.eval.python import classifier_metrics_impl\n# pylint: disable=wildcard-import\nfrom tensorflow.contrib.gan.python.eval.python.classifier_metrics_impl import *\n# pylint: enable=wildcard-import\nfrom tensorflow.python.util.all_util import remove_undocumented\n\n__all__ = classifier_metrics_impl.__all__\nremove_undocumented(__name__, __all__)\n", "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for tf.contrib.tensor_forest.ops.tensor_forest.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom google.protobuf.json_format import ParseDict\nfrom tensorflow.contrib.decision_trees.proto import generic_tree_model_pb2 as _tree_proto\nfrom tensorflow.contrib.tensor_forest.python import tensor_forest\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import resources\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import googletest\n\n\nclass TensorForestTest(test_util.TensorFlowTestCase):\n\n def testForestHParams(self):\n hparams = tensor_forest.ForestHParams(\n num_classes=2,\n num_trees=100,\n max_nodes=1000,\n split_after_samples=25,\n num_features=60).fill()\n self.assertEquals(2, hparams.num_classes)\n self.assertEquals(3, hparams.num_output_columns)\n self.assertEquals(10, hparams.num_splits_to_consider)\n # Default value of valid_leaf_threshold\n self.assertEquals(1, hparams.valid_leaf_threshold)\n self.assertEquals(0, hparams.base_random_seed)\n\n def testForestHParamsBigTree(self):\n hparams = tensor_forest.ForestHParams(\n num_classes=2,\n num_trees=100,\n max_nodes=1000000,\n split_after_samples=25,\n num_features=1000).fill()\n self.assertEquals(31, hparams.num_splits_to_consider)\n\n def testForestHParamsStringParams(self):\n hparams = tensor_forest.ForestHParams(\n num_classes=2,\n num_trees=100,\n max_nodes=1000000,\n split_after_samples=\"25\",\n num_splits_to_consider=\"1000000\",\n num_features=1000).fill()\n self.assertEquals(\"1000000\", hparams.num_splits_to_consider)\n\n def testTrainingConstructionClassification(self):\n input_data = [[-1., 0.], [-1., 2.], # node 1\n [1., 0.], [1., -2.]] # node 2\n input_labels = [0, 1, 2, 3]\n\n params = tensor_forest.ForestHParams(\n num_classes=4,\n num_features=2,\n num_trees=10,\n max_nodes=1000,\n split_after_samples=25).fill()\n\n graph_builder = tensor_forest.RandomForestGraphs(params)\n graph = graph_builder.training_graph(input_data, input_labels)\n self.assertTrue(isinstance(graph, ops.Operation))\n\n def testTrainingConstructionRegression(self):\n input_data = [[-1., 0.], [-1., 2.], # node 1\n [1., 0.], [1., -2.]] # node 2\n input_labels = [0, 1, 2, 3]\n\n params = tensor_forest.ForestHParams(\n num_classes=4,\n num_features=2,\n num_trees=10,\n max_nodes=1000,\n split_after_samples=25,\n regression=True).fill()\n\n graph_builder = tensor_forest.RandomForestGraphs(params)\n graph = graph_builder.training_graph(input_data, input_labels)\n self.assertTrue(isinstance(graph, ops.Operation))\n\n def testInferenceConstruction(self):\n input_data = [[-1., 0.], [-1., 2.], # node 1\n [1., 0.], [1., -2.]] # node 2\n\n params = tensor_forest.ForestHParams(\n num_classes=4,\n num_features=2,\n num_trees=10,\n max_nodes=1000,\n split_after_samples=25).fill()\n\n graph_builder = tensor_forest.RandomForestGraphs(params)\n probs, paths, var = graph_builder.inference_graph(input_data)\n self.assertTrue(isinstance(probs, ops.Tensor))\n self.assertTrue(isinstance(paths, ops.Tensor))\n self.assertTrue(isinstance(var, ops.Tensor))\n\n def testInfrenceFromRestoredModel(self):\n input_data = [[-1., 0.], [-1., 2.], # node 1\n [1., 0.], [1., -2.]] # node 2\n expected_prediction = [[0.0, 1.0], [0.0, 1.0],\n [0.0, 1.0], [0.0, 1.0]]\n hparams = tensor_forest.ForestHParams(\n num_classes=2,\n num_features=2,\n num_trees=1,\n max_nodes=1000,\n split_after_samples=25).fill()\n tree_weight = {'decisionTree':\n {'nodes':\n [{'binaryNode':\n {'rightChildId': 2,\n 'leftChildId': 1,\n 'inequalityLeftChildTest':\n {'featureId': {'id': '0'},\n 'threshold': {'floatValue': 0}}}},\n {'leaf': {'vector':\n {'value': [{'floatValue': 0.0},\n {'floatValue': 1.0}]}},\n 'nodeId': 1},\n {'leaf': {'vector':\n {'value': [{'floatValue': 0.0},\n {'floatValue': 1.0}]}},\n 'nodeId': 2}]}}\n restored_tree_param = ParseDict(tree_weight,\n _tree_proto.Model()).SerializeToString()\n graph_builder = tensor_forest.RandomForestGraphs(hparams,\n [restored_tree_param])\n probs, paths, var = graph_builder.inference_graph(input_data)\n self.assertTrue(isinstance(probs, ops.Tensor))\n self.assertTrue(isinstance(paths, ops.Tensor))\n self.assertTrue(isinstance(var, ops.Tensor))\n with self.test_session():\n variables.global_variables_initializer().run()\n resources.initialize_resources(resources.shared_resources()).run()\n self.assertEquals(probs.eval().shape, (4, 2))\n self.assertEquals(probs.eval().tolist(), expected_prediction)\n\n def testTrainingConstructionClassificationSparse(self):\n input_data = sparse_tensor.SparseTensor(\n indices=[[0, 0], [0, 3], [1, 0], [1, 7], [2, 1], [3, 9]],\n values=[-1.0, 0.0, -1., 2., 1., -2.0],\n dense_shape=[4, 10])\n input_labels = [0, 1, 2, 3]\n\n params = tensor_forest.ForestHParams(\n num_classes=4,\n num_features=10,\n num_trees=10,\n max_nodes=1000,\n split_after_samples=25).fill()\n\n graph_builder = tensor_forest.RandomForestGraphs(params)\n graph = graph_builder.training_graph(input_data, input_labels)\n self.assertTrue(isinstance(graph, ops.Operation))\n\n def testInferenceConstructionSparse(self):\n input_data = sparse_tensor.SparseTensor(\n indices=[[0, 0], [0, 3],\n [1, 0], [1, 7],\n [2, 1],\n [3, 9]],\n values=[-1.0, 0.0,\n -1., 2.,\n 1.,\n -2.0],\n dense_shape=[4, 10])\n\n params = tensor_forest.ForestHParams(\n num_classes=4,\n num_features=10,\n num_trees=10,\n max_nodes=1000,\n regression=True,\n split_after_samples=25).fill()\n\n graph_builder = tensor_forest.RandomForestGraphs(params)\n probs, paths, var = graph_builder.inference_graph(input_data)\n self.assertTrue(isinstance(probs, ops.Tensor))\n self.assertTrue(isinstance(paths, ops.Tensor))\n self.assertTrue(isinstance(var, ops.Tensor))\n\n\nif __name__ == \"__main__\":\n googletest.main()\n", "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"A hybrid model that samples paths when training.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.contrib.tensor_forest.hybrid.python.layers import decisions_to_data\nfrom tensorflow.contrib.tensor_forest.hybrid.python.layers import fully_connected\nfrom tensorflow.contrib.tensor_forest.hybrid.python.models import hard_decisions_to_data_then_nn\nfrom tensorflow.python.training import adagrad\n\n\nclass StochasticHardDecisionsToDataThenNN(\n hard_decisions_to_data_then_nn.HardDecisionsToDataThenNN):\n \"\"\"A hybrid model that samples paths when training.\"\"\"\n\n def __init__(self,\n params,\n device_assigner=None,\n optimizer_class=adagrad.AdagradOptimizer,\n **kwargs):\n\n super(StochasticHardDecisionsToDataThenNN, self).__init__(\n params,\n device_assigner=device_assigner,\n optimizer_class=optimizer_class,\n **kwargs)\n\n self.layers = [decisions_to_data.StochasticHardDecisionsToDataLayer(\n params, 0, device_assigner),\n fully_connected.FullyConnectedLayer(\n params, 1, device_assigner=device_assigner)]\n", "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# ==============================================================================\n\"\"\"A visitor class that generates protobufs for each python object.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom google.protobuf import message\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.util import tf_decorator\nfrom tensorflow.python.util import tf_inspect\nfrom tensorflow.tools.api.lib import api_objects_pb2\n\n# Following object need to be handled individually.\n_CORNER_CASES = {\n '': {'tools': {}},\n 'test.TestCase': {},\n 'test.TestCase.failureException': {},\n}\n\n\ndef _SanitizedArgSpec(obj):\n \"\"\"Get an ArgSpec string that is free of addresses.\n\n We have callables as function arg defaults. This results in addresses in\n getargspec output. This function returns a sanitized string list of base\n classes.\n\n Args:\n obj: A python routine for us the create the sanitized arspec of.\n\n Returns:\n string, a string representation of the argspec.\n \"\"\"\n output_string = ''\n unsanitized_arg_spec = tf_inspect.getargspec(obj)\n\n for clean_attr in ('args', 'varargs', 'keywords'):\n output_string += '%s=%s, ' % (clean_attr,\n getattr(unsanitized_arg_spec, clean_attr))\n\n if unsanitized_arg_spec.defaults:\n sanitized_defaults = []\n for val in unsanitized_arg_spec.defaults:\n str_val = str(val)\n # Sanitize argspecs that have hex code in them.\n if ' at 0x' in str_val:\n sanitized_defaults.append('%s instance>' % str_val.split(' at ')[0])\n else:\n sanitized_defaults.append(str_val)\n\n output_string += 'defaults=%s, ' % sanitized_defaults\n\n else:\n output_string += 'defaults=None'\n\n return output_string\n\n\ndef _SanitizedMRO(obj):\n \"\"\"Get a list of superclasses with minimal amount of non-TF classes.\n\n Based on many parameters like python version, OS, protobuf implementation\n or changes in google core libraries the list of superclasses of a class\n can change. We only return the first non-TF class to be robust to non API\n affecting changes. The Method Resolution Order returned by `tf_inspect.getmro`\n is still maintained in the return value.\n\n Args:\n obj: A python routine for us the create the sanitized arspec of.\n\n Returns:\n list of strings, string representation of the class names.\n \"\"\"\n return_list = []\n for cls in tf_inspect.getmro(obj):\n str_repr = str(cls)\n return_list.append(str_repr)\n if 'tensorflow' not in str_repr:\n break\n\n # Hack - tensorflow.test.StubOutForTesting may or may not be type <object>\n # depending on the environment. To avoid inconsistency, break after we add\n # StubOutForTesting to the return_list.\n if 'StubOutForTesting' in str_repr:\n break\n\n return return_list\n\n\ndef _IsProtoClass(obj):\n \"\"\"Returns whether the passed obj is a Protocol Buffer class.\"\"\"\n return isinstance(obj, type) and issubclass(obj, message.Message)\n\n\nclass PythonObjectToProtoVisitor(object):\n \"\"\"A visitor that summarizes given python objects as protobufs.\"\"\"\n\n def __init__(self):\n # A dict to store all protocol buffers.\n # Keyed by \"path\" to the object.\n self._protos = {}\n\n def GetProtos(self):\n \"\"\"Return the list of protos stored.\"\"\"\n return self._protos\n\n def __call__(self, path, parent, children):\n # The path to the object.\n lib_path = 'tensorflow.%s' % path if path else 'tensorflow'\n\n # A small helper method to construct members(children) protos.\n def _AddMember(member_name, member_obj, proto):\n \"\"\"Add the child object to the object being constructed.\"\"\"\n _, member_obj = tf_decorator.unwrap(member_obj)\n if member_name == '__init__' or not member_name.startswith('_'):\n if tf_inspect.isroutine(member_obj):\n new_method = proto.member_method.add()\n new_method.name = member_name\n # If member_obj is a python builtin, there is no way to get its\n # argspec, because it is implemented on the C side. It also has no\n # func_code.\n if getattr(member_obj, 'func_code', None):\n new_method.argspec = _SanitizedArgSpec(member_obj)\n else:\n new_member = proto.member.add()\n new_member.name = member_name\n new_member.mtype = str(type(member_obj))\n\n parent_corner_cases = _CORNER_CASES.get(path, {})\n\n if path not in _CORNER_CASES or parent_corner_cases:\n # Decide if we have a module or a class.\n if tf_inspect.ismodule(parent):\n # Create a module object.\n module_obj = api_objects_pb2.TFAPIModule()\n for name, child in children:\n if name in parent_corner_cases:\n # If we have an empty entry, skip this object.\n if parent_corner_cases[name]:\n module_obj.member.add(**(parent_corner_cases[name]))\n else:\n _AddMember(name, child, module_obj)\n\n # Store the constructed module object.\n self._protos[lib_path] = api_objects_pb2.TFAPIObject(\n path=lib_path, tf_module=module_obj)\n elif _IsProtoClass(parent):\n proto_obj = api_objects_pb2.TFAPIProto()\n parent.DESCRIPTOR.CopyToProto(proto_obj.descriptor)\n\n # Store the constructed proto object.\n self._protos[lib_path] = api_objects_pb2.TFAPIObject(\n path=lib_path, tf_proto=proto_obj)\n elif tf_inspect.isclass(parent):\n # Construct a class.\n class_obj = api_objects_pb2.TFAPIClass()\n class_obj.is_instance.extend(_SanitizedMRO(parent))\n for name, child in children:\n if name in parent_corner_cases:\n # If we have an empty entry, skip this object.\n if parent_corner_cases[name]:\n class_obj.member.add(**(parent_corner_cases[name]))\n else:\n _AddMember(name, child, class_obj)\n\n # Store the constructed class object.\n self._protos[lib_path] = api_objects_pb2.TFAPIObject(\n path=lib_path, tf_class=class_obj)\n else:\n logging.error('Illegal call to ApiProtoDump::_py_obj_to_proto.'\n 'Object is neither a module nor a class: %s', path)\n" ]
[ [ "tensorflow.python.framework.ops.get_collection_ref", "tensorflow.python.platform.tf_logging.warning", "tensorflow.python.ops.control_flow_ops.with_dependencies", "tensorflow.python.framework.ops.get_collection", "tensorflow.python.training.training_util.get_or_create_global_step", "tensorflow.python.ops.array_ops.check_numerics", "tensorflow.python.ops.clip_ops.global_norm", "tensorflow.python.framework.ops.IndexedSlices", "tensorflow.python.platform.tf_logging.info", "tensorflow.python.training.monitored_session.MonitoredTrainingSession", "tensorflow.python.summary.summary.histogram", "tensorflow.python.framework.ops.name_scope", "tensorflow.python.ops.clip_ops.clip_by_norm", "tensorflow.python.ops.variables.trainable_variables", "tensorflow.python.ops.control_flow_ops.no_op", "tensorflow.python.framework.ops.control_dependencies", "tensorflow.python.framework.constant_op.constant" ], [ "numpy.eye", "tensorflow.contrib.autograph.utils.multiple_dispatch.dynamic_is", "tensorflow.contrib.autograph.utils.multiple_dispatch.run_cond", "tensorflow.python.client.session.Session", "tensorflow.python.platform.test.main", "tensorflow.contrib.autograph.utils.multiple_dispatch.dynamic_is_not", "tensorflow.python.framework.constant_op.constant" ], [ "tensorflow.train.Int64List", "tensorflow.gfile.Open", "tensorflow.gfile.Exists", "tensorflow.python_io.TFRecordWriter", "tensorflow.gfile.MakeDirs", "tensorflow.train.BytesList", "tensorflow.app.run" ], [ "tensorflow.compiler.tf2xla.python.xla.while_loop", "tensorflow.python.framework.function.Defun", "numpy.complex64", "tensorflow.python.ops.array_ops.placeholder", "tensorflow.python.platform.test.main", "tensorflow.python.framework.constant_op.constant" ], [ "tensorflow.python.ops.array_ops.placeholder", "tensorflow.python.platform.test.main", "tensorflow.python.data.ops.dataset_ops.Dataset.list_files" ], [ "tensorflow.test.main", "tensorflow.user_ops.my_fact" ], [ "tensorflow.python.training.saver.latest_checkpoint", "tensorflow.python.training.monitored_session.Scaffold", "tensorflow.python.training.monitored_session.MonitoredSession", "tensorflow.python.training.monitored_session.ChiefSessionCreator", "tensorflow.python.training.training_util.get_or_create_global_step", "tensorflow.python.ops.state_ops.assign_add", "tensorflow.python.summary.summary.FileWriterCache.get", "tensorflow.python.platform.tf_logging.info", "tensorflow.python.training.basic_session_run_hooks.FinalOpsHook", "tensorflow.python.training.training_util.global_step", "tensorflow.python.summary.summary.merge_all" ], [ "tensorflow.contrib.framework.fuse_op", "tensorflow.python.util.all_util.remove_undocumented", "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.core.framework.attr_value_pb2.AttrValue", "tensorflow.python.ops.array_ops.identity" ], [ "tensorflow.contrib.eager.python.metrics.Mean", "tensorflow.python.ops.summary_ops_v2.always_record_summaries", "tensorflow.contrib.eager.python.datasets.Iterator", "tensorflow.python.eager.function.defun", "tensorflow.python.framework.ops.get_default_session", "tensorflow.contrib.eager.python.metrics.Accuracy", "tensorflow.python.ops.summary_ops_v2.create_file_writer", "tensorflow.python.eager.context.executing_eagerly", "tensorflow.python.ops.summary_ops_v2.never_record_summaries" ], [ "numpy.swapaxes", "numpy.amax", "numpy.log", "numpy.abs", "numpy.sqrt", "numpy.logspace", "numpy.amin", "numpy.float128", "numpy.finfo", "tensorflow.python.platform.test.main", "tensorflow.python.ops.distributions.bijector_test_util.assert_scalar_congruency", "tensorflow.contrib.distributions.python.ops.bijectors.sinh_arcsinh.SinhArcsinh", "numpy.array", "tensorflow.python.ops.distributions.bijector_test_util.assert_bijective_and_finite", "numpy.arcsinh" ], [ "tensorflow.gfile.DeleteRecursively", "tensorflow.gfile.Exists", "tensorflow.RunMetadata", "tensorflow.cast", "tensorflow.gfile.MakeDirs", "tensorflow.train.AdamOptimizer", "tensorflow.summary.scalar", "tensorflow.Variable", "tensorflow.summary.image", "tensorflow.name_scope", "tensorflow.square", "tensorflow.argmax", "tensorflow.examples.tutorials.mnist.input_data.read_data_sets", "tensorflow.nn.dropout", "tensorflow.app.run", "tensorflow.matmul", "tensorflow.truncated_normal", "tensorflow.InteractiveSession", "tensorflow.losses.sparse_softmax_cross_entropy", "tensorflow.RunOptions", "tensorflow.placeholder", "tensorflow.global_variables_initializer", "tensorflow.summary.merge_all", "tensorflow.summary.histogram", "tensorflow.reduce_max", "tensorflow.summary.FileWriter", "tensorflow.constant", "tensorflow.reduce_mean", "tensorflow.reshape", "tensorflow.reduce_min" ], [ "tensorflow.python.ops.math_ops.log", "numpy.log", "numpy.sum", "numpy.allclose", "numpy.linspace", "tensorflow.python.ops.array_ops.shape", "numpy.around", "numpy.arange", "numpy.ones", "tensorflow.python.ops.array_ops.placeholder", "numpy.full_like", "numpy.random.normal", "tensorflow.python.platform.test.main", "tensorflow.contrib.coder.python.layers.entropybottleneck.EntropyBottleneck", "numpy.random.uniform", "tensorflow.python.ops.variables.global_variables_initializer", "numpy.array", "tensorflow.python.training.gradient_descent.GradientDescentOptimizer" ], [ "tensorflow.python.util.all_util.remove_undocumented" ], [ "tensorflow.contrib.tensorrt.wrap_conversion.get_linked_tensorrt_version", "tensorflow.contrib.tensorrt.wrap_conversion.calib_convert", "tensorflow.contrib.tensorrt.wrap_conversion.get_loaded_tensorrt_version", "tensorflow.python.platform.tf_logging.error", "tensorflow.python.framework.errors_impl.UnknownError", "tensorflow.core.framework.graph_pb2.GraphDef", "tensorflow.contrib.tensorrt.wrap_conversion.trt_convert" ], [ "tensorflow.python.ops.math_ops.reduce_max", "tensorflow.python.ops.nn.relu", "tensorflow.python.ops.math_ops.exp", "tensorflow.python.ops.math_ops.reduce_mean", "tensorflow.python.framework.ops.name_scope", "tensorflow.python.framework.ops.convert_to_tensor" ], [ "numpy.log", "numpy.sum", "numpy.random.seed", "numpy.array_equal", "numpy.asarray", "tensorflow.python.platform.googletest.main", "tensorflow.python.ops.array_ops.ones", "tensorflow.python.ops.random_ops.multinomial", "numpy.array", "tensorflow.python.framework.random_seed.set_random_seed", "numpy.random.dirichlet" ], [ "tensorflow.python.ops.array_ops.placeholder", "numpy.arange", "tensorflow.python.ops.array_ops.extract_image_patches", "tensorflow.python.platform.test.main" ], [ "tensorflow.contrib.receptive_field.python.util.parse_layer_parameters.get_layer_params", "tensorflow.python.platform.tf_logging.vlog" ], [ "tensorflow.contrib.kfac.python.ops.curvature_matrix_vector_products.CurvatureMatrixVectorProductComputer", "tensorflow.python.ops.control_flow_ops.case", "tensorflow.python.ops.control_flow_ops.group", "tensorflow.python.ops.variable_scope.get_variable", "tensorflow.python.framework.ops.colocate_with", "tensorflow.python.ops.math_ops.equal", "tensorflow.python.ops.math_ops.mod", "tensorflow.python.ops.math_ops.maximum", "tensorflow.python.ops.math_ops.sqrt", "tensorflow.python.ops.variable_scope.variable_scope", "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.contrib.kfac.python.ops.estimator.make_fisher_estimator", "tensorflow.python.ops.control_flow_ops.no_op", "tensorflow.python.framework.ops.control_dependencies", "tensorflow.python.ops.state_ops.assign", "tensorflow.python.ops.linalg_ops.matrix_inverse", "tensorflow.python.ops.math_ops.reduce_sum", "tensorflow.python.ops.math_ops.cast" ], [ "tensorflow.python.ops.array_ops.transpose", "numpy.absolute", "tensorflow.python.ops.nn_ops.conv2d", "tensorflow.python.ops.array_ops.concat", "tensorflow.python.ops.nn_ops.depthwise_conv2d_native", "tensorflow.python.ops.nn_ops.depthwise_conv2d_native_backprop_input", "tensorflow.python.ops.array_ops.placeholder", "tensorflow.python.platform.test.main", "numpy.random.rand", "tensorflow.python.framework.ops.device", "numpy.ravel", "tensorflow.python.ops.nn_ops.depthwise_conv2d_native_backprop_filter" ], [ "tensorflow.python.data.util.nest.map_structure_up_to", "tensorflow.python.framework.sparse_tensor.SparseTensorValue", "tensorflow.python.data.util.nest.assert_same_structure", "tensorflow.python.data.util.nest.is_sequence", "tensorflow.python.data.util.nest.flatten_up_to", "numpy.ones", "tensorflow.python.data.util.nest.pack_sequence_as", "tensorflow.python.ops.math_ops.tanh", "tensorflow.python.platform.test.main", "tensorflow.python.data.util.nest.map_structure", "tensorflow.python.ops.array_ops.ones", "tensorflow.python.data.util.nest.assert_shallow_structure", "tensorflow.python.data.util.nest.flatten", "numpy.array", "tensorflow.python.framework.constant_op.constant" ], [ "tensorflow.python.debug.lib.grpc_debug_test_server.start_server_on_separate_thread", "tensorflow.core.protobuf.config_pb2.RunMetadata", "tensorflow.python.debug.lib.session_debug_testlib.no_rewrite_session_config", "tensorflow.python.ops.state_ops.assign_add", "tensorflow.python.debug.wrappers.grpc_wrapper.GrpcDebugWrapperSession", "tensorflow.core.protobuf.config_pb2.RunOptions", "tensorflow.python.ops.variables.Variable", "tensorflow.python.debug.lib.debug_utils.add_debug_tensor_watch", "tensorflow.python.debug.lib.session_debug_testlib.SessionDebugTestBase.tearDownClass", "tensorflow.python.platform.googletest.main", "tensorflow.python.debug.lib.debug_data.DebugDumpDir", "tensorflow.python.framework.ops.reset_default_graph", "tensorflow.python.debug.wrappers.hooks.GrpcDebugHook", "tensorflow.python.debug.wrappers.grpc_wrapper.TensorBoardDebugWrapperSession", "tensorflow.python.debug.lib.session_debug_testlib.SessionDebugTestBase.tearDown", "tensorflow.python.client.session.Session", "tensorflow.python.debug.lib.session_debug_testlib.SessionDebugTestBase.setUpClass", "tensorflow.python.debug.lib.debug_utils.watch_graph", "tensorflow.python.ops.math_ops.multiply", "tensorflow.python.debug.wrappers.hooks.TensorBoardDebugHook", "tensorflow.python.debug.wrappers.framework.WatchOptions", "tensorflow.python.ops.variables.global_variables_initializer", "tensorflow.python.training.monitored_session._HookedSession", "tensorflow.python.framework.constant_op.constant" ], [ "tensorflow.contrib.distributions.python.ops.mvn_diag.MultivariateNormalDiag", "numpy.ones_like", "tensorflow.python.ops.distributions.normal.Normal", "numpy.logical_or", "tensorflow.python.platform.test.main", "numpy.float32", "tensorflow.python.ops.distributions.bernoulli.Bernoulli", "tensorflow.python.ops.distributions.categorical.Categorical" ], [ "tensorflow.python.keras.backend.name_scope", "tensorflow.python.keras.layers.MaxPooling2D", "tensorflow.python.keras.layers.Dense", "tensorflow.python.keras.backend.int_shape", "tensorflow.python.keras.layers.Conv2D", "tensorflow.python.keras.layers.BatchNormalization", "tensorflow.python.keras.layers.Activation", "tensorflow.python.keras.backend.image_data_format", "tensorflow.python.keras.layers.concatenate", "tensorflow.python.keras.backend.is_keras_tensor", "tensorflow.python.util.tf_export.tf_export", "tensorflow.python.keras.layers.add", "tensorflow.python.keras.layers.AveragePooling2D", "tensorflow.python.keras.layers.Cropping2D", "tensorflow.python.keras.utils.data_utils.get_file", "tensorflow.python.keras.layers.SeparableConv2D", "tensorflow.python.platform.tf_logging.warning", "tensorflow.python.keras.utils.layer_utils.get_source_inputs", "tensorflow.python.keras.layers.Input", "tensorflow.python.keras.layers.ZeroPadding2D", "tensorflow.python.keras.layers.GlobalMaxPooling2D", "tensorflow.python.keras.backend.set_image_data_format", "tensorflow.python.keras.models.Model", "tensorflow.python.keras.backend.backend", "tensorflow.python.keras.layers.GlobalAveragePooling2D" ], [ "tensorflow.python.framework.test_util.run_in_graph_and_eager_modes", "tensorflow.python.keras.layers.Embedding", "tensorflow.python.keras.backend.constant", "tensorflow.python.keras.testing_utils.layer_test", "tensorflow.python.platform.test.main", "numpy.array" ], [ "tensorflow.python.util.all_util.remove_undocumented" ], [ "tensorflow.python.ops.state_ops.assign_add", "tensorflow.python.platform.tf_logging.error", "tensorflow.python.ops.init_ops.zeros_initializer", "tensorflow.python.util.tf_export.tf_export", "tensorflow.python.framework.ops.get_default_graph", "tensorflow.python.framework.ops.add_to_collection", "tensorflow.python.framework.ops.device", "tensorflow.python.ops.resource_variable_ops.is_resource_variable", "tensorflow.python.framework.ops.control_dependencies", "tensorflow.python.eager.context.executing_eagerly" ], [ "tensorflow.python.training.slot_creator.create_zeros_slot", "tensorflow.python.ops.array_ops.shape", "tensorflow.python.ops.variables.Variable", "tensorflow.python.training.slot_creator.create_slot", "tensorflow.python.ops.variables.global_variables_initializer", "tensorflow.python.platform.test.main", "tensorflow.python.ops.variable_scope.variable_scope", "tensorflow.python.framework.ops.control_dependencies", "tensorflow.python.ops.array_ops.placeholder_with_default", "tensorflow.python.ops.random_ops.random_uniform", "tensorflow.python.framework.constant_op.constant" ], [ "tensorflow.python.ops.gradient_checker.compute_gradient", "numpy.ones_like", "tensorflow.python.ops.math_ops.cumprod", "numpy.arange", "tensorflow.python.ops.math_ops.cumsum", "tensorflow.python.platform.test.main", "numpy.zeros_like", "tensorflow.python.framework.ops.convert_to_tensor", "numpy.zeros", "tensorflow.python.framework.constant_op.constant" ], [ "tensorflow.python.training.monitored_session.Scaffold", "tensorflow.python.estimator.canned.head._multi_class_head_with_softmax_cross_entropy_loss", "tensorflow.python.ops.array_ops.placeholder", "tensorflow.python.ops.variables.Variable", "tensorflow.python.framework.ops.add_to_collection", "tensorflow.python.ops.math_ops.to_float", "tensorflow.python.ops.control_flow_ops.no_op", "numpy.exp", "tensorflow.python.training.queue_runner_impl.start_queue_runners", "tensorflow.python.framework.sparse_tensor.SparseTensor", "tensorflow.python.platform.test.main", "tensorflow.python.estimator.canned.head._regression_head", "tensorflow.python.framework.ops.control_dependencies", "tensorflow.python.framework.ops.reset_default_graph", "tensorflow.python.estimator.canned.head._binary_logistic_head_with_sigmoid_cross_entropy_loss", "tensorflow.python.feature_column.feature_column.numeric_column", "tensorflow.python.ops.math_ops.equal", "numpy.array", "numpy.sum", "tensorflow.python.ops.string_ops.as_string", "tensorflow.python.framework.ops.Graph", "tensorflow.core.framework.summary_pb2.Summary", "tensorflow.python.framework.constant_op.constant" ], [ "tensorflow.python.eager.context.executing_eagerly", "tensorflow.python.ops.resource_variable_ops.ResourceVariable", "numpy.sign", "tensorflow.python.ops.variables.Variable", "tensorflow.python.ops.variables.global_variables_initializer", "tensorflow.python.platform.test.main", "tensorflow.contrib.opt.python.training.addsign.AddSignOptimizer", "tensorflow.contrib.opt.python.training.sign_decay.get_linear_decay_fn", "numpy.array", "tensorflow.python.framework.constant_op.constant" ], [ "tensorflow.python.training.monitored_session.Scaffold", "tensorflow.python.estimator.export.export_output.RegressionOutput", "tensorflow.python.estimator.model_fn.EstimatorSpec", "tensorflow.python.framework.ops.Graph", "tensorflow.python.estimator.export.export_output.ClassificationOutput", "tensorflow.python.framework.sparse_tensor.SparseTensor", "tensorflow.python.platform.test.main", "tensorflow.python.estimator.export.export_output.PredictOutput", "tensorflow.python.ops.control_flow_ops.no_op", "tensorflow.python.framework.constant_op.constant" ], [ "tensorflow.python.ops.math_ops.log", "tensorflow.python.ops.linalg.linear_operator_diag.LinearOperatorDiag", "tensorflow.python.ops.linalg.linear_operator_util.cholesky_solve_with_broadcast", "tensorflow.python.ops.linalg.linear_operator_util.matrix_solve_with_broadcast", "tensorflow.python.ops.array_ops.shape", "tensorflow.python.ops.linalg.linear_operator_util.matmul_with_broadcast", "tensorflow.python.ops.check_ops.assert_same_float_dtype", "tensorflow.python.ops.linalg_ops.cholesky", "tensorflow.python.ops.math_ops.abs", "tensorflow.python.util.tf_export.tf_export", "tensorflow.python.ops.linalg_ops.matrix_determinant", "tensorflow.python.framework.ops.name_scope", "tensorflow.python.ops.linalg.linear_operator_identity.LinearOperatorIdentity", "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.python.ops.array_ops.broadcast_static_shape", "tensorflow.python.ops.array_ops.matrix_diag_part" ], [ "tensorflow.python.training.server_lib.ClusterSpec" ], [ "tensorflow.python.util.all_util.remove_undocumented" ], [ "tensorflow.contrib.tensor_forest.python.tensor_forest.RandomForestGraphs", "tensorflow.contrib.tensor_forest.python.tensor_forest.ForestHParams", "tensorflow.python.ops.resources.shared_resources", "tensorflow.python.framework.sparse_tensor.SparseTensor", "tensorflow.python.platform.googletest.main", "tensorflow.contrib.decision_trees.proto.generic_tree_model_pb2.Model", "tensorflow.python.ops.variables.global_variables_initializer" ], [ "tensorflow.contrib.tensor_forest.hybrid.python.layers.fully_connected.FullyConnectedLayer", "tensorflow.contrib.tensor_forest.hybrid.python.layers.decisions_to_data.StochasticHardDecisionsToDataLayer" ], [ "tensorflow.tools.api.lib.api_objects_pb2.TFAPIProto", "tensorflow.python.util.tf_inspect.isclass", "tensorflow.tools.api.lib.api_objects_pb2.TFAPIModule", "tensorflow.python.util.tf_inspect.getmro", "tensorflow.python.platform.tf_logging.error", "tensorflow.python.util.tf_inspect.ismodule", "tensorflow.tools.api.lib.api_objects_pb2.TFAPIClass", "tensorflow.python.util.tf_inspect.isroutine", "tensorflow.python.util.tf_inspect.getargspec", "tensorflow.python.util.tf_decorator.unwrap", "tensorflow.tools.api.lib.api_objects_pb2.TFAPIObject" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "1.10", "1.12", "2.7", "2.6", "1.4", "1.13", "2.3", "2.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.2", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "1.10", "1.12", "2.7", "2.6", "1.4", "1.13", "2.3", "2.4", "2.9", "1.5", "1.7", "2.5", "2.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.4", "1.5", "1.7", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "1.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.8", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.13", "1.10", "1.12" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "1.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.8", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "1.10", "1.12", "2.7", "2.6", "1.4", "1.13", "2.3", "2.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.2", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "1.10", "1.12", "2.7", "2.6", "1.4", "1.13", "2.3", "2.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.2", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "1.10", "1.12", "2.7", "2.6", "1.4", "1.13", "2.3", "2.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.2", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.13", "1.7", "1.10", "1.12" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "1.10", "1.12", "2.7", "2.6", "1.4", "1.13", "2.3", "2.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.2", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.5", "1.7", "1.10", "1.4" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "1.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.8", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "1.10", "1.12", "2.7", "2.6", "1.4", "1.13", "2.3", "2.4", "2.9", "1.5", "1.7", "2.5", "2.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "2.9", "2.5", "2.8", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "1.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.8", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.7" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "1.10", "1.12", "2.7", "2.6", "1.4", "1.13", "2.3", "2.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.2", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "1.10", "1.12", "2.7", "2.6", "1.4", "1.13", "2.3", "2.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.2", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "1.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.8", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.13", "1.7", "1.10", "1.12" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.5", "1.7", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "2.9", "1.5", "1.7", "2.5", "2.8", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "1.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.8", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "1.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.8", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "1.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.8", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
MathijsMul/babyai-emergent-guidance
[ "9e37535134c89bd019affa51c7f199d1672811b6" ]
[ "babyai/arguments.py" ]
[ "\"\"\"\nCommon arguments for BabyAI training scripts\n\"\"\"\n\nimport os\nimport argparse\nimport numpy as np\n\n\nclass ArgumentParser(argparse.ArgumentParser):\n\n def __init__(self):\n super().__init__()\n\n # Base arguments\n self.add_argument(\"--env\", default=None,\n help=\"name of the environment to train on (REQUIRED)\")\n self.add_argument(\"--model\", default=None,\n help=\"name of the model (default: ENV_ALGO_TIME)\")\n self.add_argument(\"--pretrained-model\", default=None,\n help='If you\\'re using a pre-trained model and want the fine-tuned one to have a new name')\n self.add_argument(\"--seed\", type=int, default=1,\n help=\"random seed; if 0, a random random seed will be used (default: 1)\")\n self.add_argument(\"--task-id-seed\", action='store_true',\n help=\"use the task id within a Slurm job array as the seed\")\n self.add_argument(\"--procs\", type=int, default=64,\n help=\"number of processes (default: 64)\")\n self.add_argument(\"--tb\", action=\"store_true\", default=False,\n help=\"log into Tensorboard\")\n\n # Training arguments\n self.add_argument(\"--log-interval\", type=int, default=1,\n help=\"number of updates between two logs (default(Mathijs): 1, used to be 10)\")\n self.add_argument(\"--save-interval\", type=int, default=1000,\n help=\"number of updates between two saves (default: 1000, 0 means no saving)\")\n self.add_argument(\"--frames\", type=int, default=int(9e10),\n help=\"number of frames of training (default: 9e10)\")\n self.add_argument(\"--patience\", type=int, default=100,\n help=\"patience for early stopping (default: 100)\")\n self.add_argument(\"--epochs\", type=int, default=1000000,\n help=\"maximum number of epochs\")\n self.add_argument(\"--frames-per-proc\", type=int, default=40,\n help=\"number of frames per process before update (default: 40)\")\n self.add_argument(\"--lr\", type=float, default=1e-4,\n help=\"learning rate (default: 1e-4)\")\n self.add_argument(\"--beta1\", type=float, default=0.9,\n help=\"beta1 for Adam (default: 0.9)\")\n self.add_argument(\"--beta2\", type=float, default=0.999,\n help=\"beta2 for Adam (default: 0.999)\")\n self.add_argument(\"--recurrence\", type=int, default=20,\n help=\"number of timesteps gradient is backpropagated (default: 20)\")\n self.add_argument(\"--optim-eps\", type=float, default=1e-5,\n help=\"Adam and RMSprop optimizer epsilon (default: 1e-5)\")\n self.add_argument(\"--optim-alpha\", type=float, default=0.99,\n help=\"RMSprop optimizer apha (default: 0.99)\")\n self.add_argument(\"--batch-size\", type=int, default=1280,\n help=\"batch size for PPO (default: 1280)\")\n self.add_argument(\"--entropy-coef\", type=float, default=0.01,\n help=\"entropy term coefficient (default: 0.01)\")\n self.add_argument(\"--dropout\", type=float, default=0.5,\n help=\"dropout probability for processed corrections (default: 0.5)\")\n\n self.add_argument(\"--save-each-epoch\", action=\"store_true\", default=False,\n help=\"store model at each epoch\")\n self.add_argument(\"--class-weights\", action=\"store_true\", default=False,\n help=\"use class weights in loss function\")\n self.add_argument(\"--compute-cic\", action=\"store_true\", default=False,\n help=\"compute and log causal influence of communication metric after each epoch\")\n\n # Model parameters\n self.add_argument(\"--image-dim\", type=int, default=128,\n help=\"dimensionality of the image embedding\")\n self.add_argument(\"--memory-dim\", type=int, default=128,\n help=\"dimensionality of the memory LSTM\")\n self.add_argument(\"--instr-dim\", type=int, default=128,\n help=\"dimensionality of the memory LSTM\")\n self.add_argument(\"--no-instr\", action=\"store_true\", default=False,\n help=\"don't use instructions in the model\")\n self.add_argument(\"--instr-arch\", default=\"gru\",\n help=\"arch to encode instructions, possible values: gru, bigru, conv, bow (default: gru)\")\n self.add_argument(\"--no-mem\", action=\"store_true\", default=False,\n help=\"don't use memory in the model\")\n self.add_argument(\"--arch\", default='expert_filmcnn',\n help=\"image embedding architecture\")\n self.add_argument(\"--learner\", action=\"store_true\", default=False,\n help=\"use ordinary learner\")\n\n # Corrector parameters\n self.add_argument(\"--corrector\", action=\"store_true\", default=False,\n help=\"use correction module\")\n self.add_argument(\"--corr-length\", type=int, default=2,\n help=\"length of correction messages (max length if --var-corr-length true)\")\n self.add_argument(\"--corr-own-vocab\", action=\"store_true\", default=False,\n help=\"corrector uses its own vocabulary instead of instruction vocabulary\")\n self.add_argument(\"--corr-embedding-dim\", type=int, default=0,\n help=\"embedding dimensionality for corrector\")\n self.add_argument(\"--corr-vocab-size\", type=int, default=3,\n help=\"vocabulary size of corrector\")\n self.add_argument(\"--pretrained-corrector\", type=str, default=None,\n help=\"location of pretrained corrector to use and freeze\")\n self.add_argument(\"--show-corrections\", action=\"store_true\", default=False,\n help=\"show correction messages\")\n self.add_argument(\"--corrector-frozen\", action=\"store_true\", default=False,\n help=\"freeze pretrained corrector\")\n self.add_argument(\"--random-corrector\", action=\"store_true\", default=False,\n help=\"randomize correction messages\")\n self.add_argument(\"--var-corr-length\", action=\"store_true\", default=False,\n help=\"variable length correction messages with penalty for longer ones\")\n self.add_argument(\"--corr-loss-coef\", type=float, default=0.1,\n help=\"correction loss coefficient (untested default: 0.1)\")\n self.add_argument(\"--weigh-corrections\", action=\"store_true\", default=False,\n help=\"weigh corrections depending on entropy of previous timestep\")\n self.add_argument(\"--correction-weight-loss-coef\", type=float, default=1.0,\n help=\"coefficient for correction weight loss\")\n\n # Validation parameters\n self.add_argument(\"--val-seed\", type=int, default=0,\n help=\"seed for environment used for validation (default: 0)\")\n self.add_argument(\"--val-interval\", type=int, default=1,\n help=\"number of epochs between two validation checks (default: 1)\")\n self.add_argument(\"--val-episodes\", type=int, default=500,\n help=\"number of episodes used to evaluate the agent, and to evaluate validation accuracy\")\n\n def parse_args(self):\n \"\"\"\n Parse the arguments and perform some basic validation\n \"\"\"\n\n args = super().parse_args()\n\n # Set seed for all randomness sources\n if args.seed == 0:\n args.seed = np.random.randint(10000)\n if args.task_id_seed:\n args.seed = int(os.environ['SLURM_ARRAY_TASK_ID'])\n print('set seed to {}'.format(args.seed))\n\n # TODO: more validation\n\n return args\n" ]
[ [ "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jkxing/pytorch3d
[ "71dbebe8010a0dac3e56be464778aa48fbd3bcd3", "71dbebe8010a0dac3e56be464778aa48fbd3bcd3" ]
[ "tests/test_laplacian_matrices.py", "tests/test_rasterize_meshes.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates.\n# All rights reserved.\n#\n# This source code is licensed under the BSD-style license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport unittest\n\nimport torch\nfrom common_testing import TestCaseMixin, get_random_cuda_device\nfrom pytorch3d.ops import cot_laplacian, laplacian, norm_laplacian\nfrom pytorch3d.structures.meshes import Meshes\n\n\nclass TestLaplacianMatrices(TestCaseMixin, unittest.TestCase):\n def setUp(self) -> None:\n super().setUp()\n torch.manual_seed(1)\n\n def init_mesh(self) -> Meshes:\n V, F = 32, 64\n device = get_random_cuda_device()\n # random vertices\n verts = torch.rand((V, 3), dtype=torch.float32, device=device)\n # random valid faces (no self circles, e.g. (v0, v0, v1))\n faces = torch.stack([torch.randperm(V) for f in range(F)], dim=0)[:, :3]\n faces = faces.to(device=device)\n return Meshes(verts=[verts], faces=[faces])\n\n def test_laplacian(self):\n mesh = self.init_mesh()\n verts = mesh.verts_packed()\n edges = mesh.edges_packed()\n V, E = verts.shape[0], edges.shape[0]\n\n L = laplacian(verts, edges)\n\n Lnaive = torch.zeros((V, V), dtype=torch.float32, device=verts.device)\n for e in range(E):\n e0, e1 = edges[e]\n Lnaive[e0, e1] = 1\n # symetric\n Lnaive[e1, e0] = 1\n\n deg = Lnaive.sum(1).view(-1, 1)\n deg[deg > 0] = 1.0 / deg[deg > 0]\n Lnaive = Lnaive * deg\n diag = torch.eye(V, dtype=torch.float32, device=mesh.device)\n Lnaive.masked_fill_(diag > 0, -1)\n\n self.assertClose(L.to_dense(), Lnaive)\n\n def test_cot_laplacian(self):\n mesh = self.init_mesh()\n verts = mesh.verts_packed()\n faces = mesh.faces_packed()\n V = verts.shape[0]\n\n eps = 1e-12\n\n L, inv_areas = cot_laplacian(verts, faces, eps=eps)\n\n Lnaive = torch.zeros((V, V), dtype=torch.float32, device=verts.device)\n inv_areas_naive = torch.zeros((V, 1), dtype=torch.float32, device=verts.device)\n\n for f in faces:\n v0 = verts[f[0], :]\n v1 = verts[f[1], :]\n v2 = verts[f[2], :]\n A = (v1 - v2).norm()\n B = (v0 - v2).norm()\n C = (v0 - v1).norm()\n s = 0.5 * (A + B + C)\n\n face_area = (s * (s - A) * (s - B) * (s - C)).clamp_(min=1e-12).sqrt()\n inv_areas_naive[f[0]] += face_area\n inv_areas_naive[f[1]] += face_area\n inv_areas_naive[f[2]] += face_area\n\n A2, B2, C2 = A * A, B * B, C * C\n cota = (B2 + C2 - A2) / face_area / 4.0\n cotb = (A2 + C2 - B2) / face_area / 4.0\n cotc = (A2 + B2 - C2) / face_area / 4.0\n\n Lnaive[f[1], f[2]] += cota\n Lnaive[f[2], f[0]] += cotb\n Lnaive[f[0], f[1]] += cotc\n # symetric\n Lnaive[f[2], f[1]] += cota\n Lnaive[f[0], f[2]] += cotb\n Lnaive[f[1], f[0]] += cotc\n\n idx = inv_areas_naive > 0\n inv_areas_naive[idx] = 1.0 / inv_areas_naive[idx]\n\n self.assertClose(inv_areas, inv_areas_naive)\n self.assertClose(L.to_dense(), Lnaive)\n\n def test_norm_laplacian(self):\n mesh = self.init_mesh()\n verts = mesh.verts_packed()\n edges = mesh.edges_packed()\n V, E = verts.shape[0], edges.shape[0]\n\n eps = 1e-12\n\n L = norm_laplacian(verts, edges, eps=eps)\n\n Lnaive = torch.zeros((V, V), dtype=torch.float32, device=verts.device)\n for e in range(E):\n e0, e1 = edges[e]\n v0 = verts[e0]\n v1 = verts[e1]\n\n w01 = 1.0 / ((v0 - v1).norm() + eps)\n Lnaive[e0, e1] += w01\n Lnaive[e1, e0] += w01\n\n self.assertClose(L.to_dense(), Lnaive)\n", "# Copyright (c) Facebook, Inc. and its affiliates.\n# All rights reserved.\n#\n# This source code is licensed under the BSD-style license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport functools\nimport unittest\n\nimport torch\nfrom common_testing import TestCaseMixin, get_random_cuda_device\nfrom pytorch3d import _C\nfrom pytorch3d.renderer import FoVPerspectiveCameras, look_at_view_transform\nfrom pytorch3d.renderer.mesh import MeshRasterizer, RasterizationSettings\nfrom pytorch3d.renderer.mesh.rasterize_meshes import (\n rasterize_meshes,\n rasterize_meshes_python,\n)\nfrom pytorch3d.renderer.mesh.utils import (\n _clip_barycentric_coordinates,\n _interpolate_zbuf,\n)\nfrom pytorch3d.structures import Meshes\nfrom pytorch3d.utils import ico_sphere\n\n\nclass TestRasterizeMeshes(TestCaseMixin, unittest.TestCase):\n def test_simple_python(self):\n device = torch.device(\"cpu\")\n self._simple_triangle_raster(rasterize_meshes_python, device, bin_size=-1)\n self._simple_blurry_raster(rasterize_meshes_python, device, bin_size=-1)\n self._test_behind_camera(rasterize_meshes_python, device, bin_size=-1)\n self._test_perspective_correct(rasterize_meshes_python, device, bin_size=-1)\n self._test_barycentric_clipping(rasterize_meshes_python, device, bin_size=-1)\n self._test_back_face_culling(rasterize_meshes_python, device, bin_size=-1)\n\n def test_simple_cpu_naive(self):\n device = torch.device(\"cpu\")\n self._simple_triangle_raster(rasterize_meshes, device, bin_size=0)\n self._simple_blurry_raster(rasterize_meshes, device, bin_size=0)\n self._test_behind_camera(rasterize_meshes, device, bin_size=0)\n self._test_perspective_correct(rasterize_meshes, device, bin_size=0)\n self._test_back_face_culling(rasterize_meshes, device, bin_size=0)\n\n def test_simple_cuda_naive(self):\n device = get_random_cuda_device()\n self._simple_triangle_raster(rasterize_meshes, device, bin_size=0)\n self._simple_blurry_raster(rasterize_meshes, device, bin_size=0)\n self._test_behind_camera(rasterize_meshes, device, bin_size=0)\n self._test_perspective_correct(rasterize_meshes, device, bin_size=0)\n self._test_back_face_culling(rasterize_meshes, device, bin_size=0)\n\n def test_simple_cuda_binned(self):\n device = get_random_cuda_device()\n self._simple_triangle_raster(rasterize_meshes, device, bin_size=5)\n self._simple_blurry_raster(rasterize_meshes, device, bin_size=5)\n self._test_behind_camera(rasterize_meshes, device, bin_size=5)\n self._test_perspective_correct(rasterize_meshes, device, bin_size=5)\n self._test_back_face_culling(rasterize_meshes, device, bin_size=5)\n\n def test_python_vs_cpu_vs_cuda(self):\n torch.manual_seed(231)\n device = torch.device(\"cpu\")\n image_size = 32\n blur_radius = 0.1 ** 2\n faces_per_pixel = 3\n\n for d in [\"cpu\", get_random_cuda_device()]:\n device = torch.device(d)\n compare_grads = True\n # Mesh with a single face.\n verts1 = torch.tensor(\n [[0.0, 0.6, 0.1], [-0.7, -0.4, 0.5], [0.7, -0.4, 0.7]],\n dtype=torch.float32,\n requires_grad=True,\n device=device,\n )\n faces1 = torch.tensor([[0, 1, 2]], dtype=torch.int64, device=device)\n meshes1 = Meshes(verts=[verts1], faces=[faces1])\n args1 = (meshes1, image_size, blur_radius, faces_per_pixel)\n verts2 = verts1.detach().clone()\n verts2.requires_grad = True\n meshes2 = Meshes(verts=[verts2], faces=[faces1])\n args2 = (meshes2, image_size, blur_radius, faces_per_pixel)\n self._compare_impls(\n rasterize_meshes_python,\n rasterize_meshes,\n args1,\n args2,\n verts1,\n verts2,\n compare_grads=compare_grads,\n )\n\n # Mesh with multiple faces.\n # fmt: off\n verts1 = torch.tensor(\n [\n [ -0.5, 0.0, 0.1], # noqa: E241, E201\n [ 0.0, 0.6, 0.5], # noqa: E241, E201\n [ 0.5, 0.0, 0.7], # noqa: E241, E201\n [-0.25, 0.0, 0.9], # noqa: E241, E201\n [ 0.26, 0.5, 0.8], # noqa: E241, E201\n [ 0.76, 0.0, 0.8], # noqa: E241, E201\n [-0.41, 0.0, 0.5], # noqa: E241, E201\n [ 0.61, 0.6, 0.6], # noqa: E241, E201\n [ 0.41, 0.0, 0.5], # noqa: E241, E201\n [ -0.2, 0.0, -0.5], # noqa: E241, E201\n [ 0.3, 0.6, -0.5], # noqa: E241, E201\n [ 0.4, 0.0, -0.5], # noqa: E241, E201\n ],\n dtype=torch.float32,\n device=device,\n requires_grad=True\n )\n faces1 = torch.tensor(\n [\n [ 1, 0, 2], # noqa: E241, E201\n [ 4, 3, 5], # noqa: E241, E201\n [ 7, 6, 8], # noqa: E241, E201\n [10, 9, 11] # noqa: E241, E201\n ],\n dtype=torch.int64,\n device=device,\n )\n # fmt: on\n meshes = Meshes(verts=[verts1], faces=[faces1])\n args1 = (meshes, image_size, blur_radius, faces_per_pixel)\n verts2 = verts1.clone().detach()\n verts2.requires_grad = True\n meshes2 = Meshes(verts=[verts2], faces=[faces1])\n args2 = (meshes2, image_size, blur_radius, faces_per_pixel)\n self._compare_impls(\n rasterize_meshes_python,\n rasterize_meshes,\n args1,\n args2,\n verts1,\n verts2,\n compare_grads=compare_grads,\n )\n\n # Icosphere\n meshes = ico_sphere(device=device)\n verts1, faces1 = meshes.get_mesh_verts_faces(0)\n verts1.requires_grad = True\n meshes = Meshes(verts=[verts1], faces=[faces1])\n args1 = (meshes, image_size, blur_radius, faces_per_pixel)\n verts2 = verts1.detach().clone()\n verts2.requires_grad = True\n meshes2 = Meshes(verts=[verts2], faces=[faces1])\n args2 = (meshes2, image_size, blur_radius, faces_per_pixel)\n self._compare_impls(\n rasterize_meshes_python,\n rasterize_meshes,\n args1,\n args2,\n verts1,\n verts2,\n compare_grads=compare_grads,\n )\n\n def test_cpu_vs_cuda_naive(self):\n \"\"\"\n Compare naive versions of cuda and cpp\n \"\"\"\n\n torch.manual_seed(231)\n image_size = 64\n radius = 0.1 ** 2\n faces_per_pixel = 3\n device = torch.device(\"cpu\")\n meshes_cpu = ico_sphere(0, device)\n verts1, faces1 = meshes_cpu.get_mesh_verts_faces(0)\n verts1.requires_grad = True\n meshes_cpu = Meshes(verts=[verts1], faces=[faces1])\n\n device = get_random_cuda_device()\n meshes_cuda = ico_sphere(0, device)\n verts2, faces2 = meshes_cuda.get_mesh_verts_faces(0)\n verts2.requires_grad = True\n meshes_cuda = Meshes(verts=[verts2], faces=[faces2])\n\n barycentric_clip = True\n args_cpu = (\n meshes_cpu,\n image_size,\n radius,\n faces_per_pixel,\n None,\n None,\n False,\n barycentric_clip,\n False,\n )\n args_cuda = (\n meshes_cuda,\n image_size,\n radius,\n faces_per_pixel,\n 0,\n 0,\n False,\n barycentric_clip,\n False,\n )\n self._compare_impls(\n rasterize_meshes,\n rasterize_meshes,\n args_cpu,\n args_cuda,\n verts1,\n verts2,\n compare_grads=True,\n )\n\n def test_coarse_cpu(self):\n return self._test_coarse_rasterize(torch.device(\"cpu\"))\n\n def test_coarse_cuda(self):\n return self._test_coarse_rasterize(get_random_cuda_device())\n\n def test_cpp_vs_cuda_naive_vs_cuda_binned(self):\n # Make sure that the backward pass runs for all pathways\n image_size = 64 # test is too slow for very large images.\n N = 1\n radius = 0.1 ** 2\n faces_per_pixel = 3\n\n grad_zbuf = torch.randn(N, image_size, image_size, faces_per_pixel)\n grad_dist = torch.randn(N, image_size, image_size, faces_per_pixel)\n grad_bary = torch.randn(N, image_size, image_size, faces_per_pixel, 3)\n\n device = torch.device(\"cpu\")\n meshes = ico_sphere(0, device)\n verts, faces = meshes.get_mesh_verts_faces(0)\n verts.requires_grad = True\n meshes = Meshes(verts=[verts], faces=[faces])\n\n # Option I: CPU, naive\n args = (meshes, image_size, radius, faces_per_pixel)\n idx1, zbuf1, bary1, dist1 = rasterize_meshes(*args)\n\n loss = (\n (zbuf1 * grad_zbuf).sum()\n + (dist1 * grad_dist).sum()\n + (bary1 * grad_bary).sum()\n )\n loss.backward()\n idx1 = idx1.data.cpu().clone()\n zbuf1 = zbuf1.data.cpu().clone()\n dist1 = dist1.data.cpu().clone()\n grad1 = verts.grad.data.cpu().clone()\n\n # Option II: CUDA, naive\n device = get_random_cuda_device()\n meshes = ico_sphere(0, device)\n verts, faces = meshes.get_mesh_verts_faces(0)\n verts.requires_grad = True\n meshes = Meshes(verts=[verts], faces=[faces])\n\n args = (meshes, image_size, radius, faces_per_pixel, 0, 0)\n idx2, zbuf2, bary2, dist2 = rasterize_meshes(*args)\n grad_zbuf = grad_zbuf.to(device)\n grad_dist = grad_dist.to(device)\n grad_bary = grad_bary.to(device)\n loss = (\n (zbuf2 * grad_zbuf).sum()\n + (dist2 * grad_dist).sum()\n + (bary2 * grad_bary).sum()\n )\n loss.backward()\n idx2 = idx2.data.cpu().clone()\n zbuf2 = zbuf2.data.cpu().clone()\n dist2 = dist2.data.cpu().clone()\n grad2 = verts.grad.data.cpu().clone()\n\n # Option III: CUDA, binned\n meshes = ico_sphere(0, device)\n verts, faces = meshes.get_mesh_verts_faces(0)\n verts.requires_grad = True\n meshes = Meshes(verts=[verts], faces=[faces])\n\n args = (meshes, image_size, radius, faces_per_pixel, 32, 500)\n idx3, zbuf3, bary3, dist3 = rasterize_meshes(*args)\n\n loss = (\n (zbuf3 * grad_zbuf).sum()\n + (dist3 * grad_dist).sum()\n + (bary3 * grad_bary).sum()\n )\n loss.backward()\n idx3 = idx3.data.cpu().clone()\n zbuf3 = zbuf3.data.cpu().clone()\n dist3 = dist3.data.cpu().clone()\n grad3 = verts.grad.data.cpu().clone()\n\n # Make sure everything was the same\n self.assertTrue((idx1 == idx2).all().item())\n self.assertTrue((idx1 == idx3).all().item())\n self.assertClose(zbuf1, zbuf2, atol=1e-6)\n self.assertClose(zbuf1, zbuf3, atol=1e-6)\n self.assertClose(dist1, dist2, atol=1e-6)\n self.assertClose(dist1, dist3, atol=1e-6)\n\n self.assertClose(grad1, grad2, rtol=5e-3) # flaky test\n self.assertClose(grad1, grad3, rtol=5e-3)\n self.assertClose(grad2, grad3, rtol=5e-3)\n\n def test_compare_coarse_cpu_vs_cuda(self):\n torch.manual_seed(231)\n N = 1\n image_size = (512, 512)\n blur_radius = 0.0\n bin_size = 32\n max_faces_per_bin = 20\n\n device = torch.device(\"cpu\")\n\n meshes = ico_sphere(2, device)\n faces = meshes.faces_packed()\n verts = meshes.verts_packed()\n faces_verts = verts[faces]\n num_faces_per_mesh = meshes.num_faces_per_mesh()\n mesh_to_face_first_idx = meshes.mesh_to_faces_packed_first_idx()\n\n bin_faces_cpu = _C._rasterize_meshes_coarse(\n faces_verts,\n mesh_to_face_first_idx,\n num_faces_per_mesh,\n image_size,\n blur_radius,\n bin_size,\n max_faces_per_bin,\n )\n device = get_random_cuda_device()\n meshes = meshes.clone().to(device)\n\n faces = meshes.faces_packed()\n verts = meshes.verts_packed()\n faces_verts = verts[faces]\n num_faces_per_mesh = meshes.num_faces_per_mesh()\n mesh_to_face_first_idx = meshes.mesh_to_faces_packed_first_idx()\n\n bin_faces_cuda = _C._rasterize_meshes_coarse(\n faces_verts,\n mesh_to_face_first_idx,\n num_faces_per_mesh,\n image_size,\n blur_radius,\n bin_size,\n max_faces_per_bin,\n )\n\n # Bin faces might not be the same: CUDA version might write them in\n # any order. But if we sort the non-(-1) elements of the CUDA output\n # then they should be the same.\n\n for n in range(N):\n for by in range(bin_faces_cpu.shape[1]):\n for bx in range(bin_faces_cpu.shape[2]):\n K = (bin_faces_cuda[n, by, bx] != -1).sum().item()\n idxs_cpu = bin_faces_cpu[n, by, bx].tolist()\n idxs_cuda = bin_faces_cuda[n, by, bx].tolist()\n idxs_cuda[:K] = sorted(idxs_cuda[:K])\n self.assertEqual(idxs_cpu, idxs_cuda)\n\n def test_python_vs_cpp_bary_clip(self):\n torch.manual_seed(232)\n N = 2\n V = 10\n F = 5\n verts1 = torch.randn(N, V, 3, requires_grad=True)\n verts2 = verts1.detach().clone().requires_grad_(True)\n faces = torch.randint(V, size=(N, F, 3))\n meshes1 = Meshes(verts1, faces)\n meshes2 = Meshes(verts2, faces)\n\n kwargs = {\"image_size\": 24, \"clip_barycentric_coords\": True}\n fn1 = functools.partial(rasterize_meshes, meshes1, **kwargs)\n fn2 = functools.partial(rasterize_meshes_python, meshes2, **kwargs)\n args = ()\n self._compare_impls(fn1, fn2, args, args, verts1, verts2, compare_grads=True)\n\n def test_cpp_vs_cuda_bary_clip(self):\n meshes = ico_sphere(2, device=torch.device(\"cpu\"))\n verts1, faces1 = meshes.get_mesh_verts_faces(0)\n verts1.requires_grad = True\n meshes1 = Meshes(verts=[verts1], faces=[faces1])\n device = get_random_cuda_device()\n verts2 = verts1.detach().to(device).requires_grad_(True)\n faces2 = faces1.detach().clone().to(device)\n meshes2 = Meshes(verts=[verts2], faces=[faces2])\n\n kwargs = {\"image_size\": 64, \"clip_barycentric_coords\": True}\n fn1 = functools.partial(rasterize_meshes, meshes1, **kwargs)\n fn2 = functools.partial(rasterize_meshes, meshes2, bin_size=0, **kwargs)\n args = ()\n self._compare_impls(fn1, fn2, args, args, verts1, verts2, compare_grads=True)\n\n def test_python_vs_cpp_perspective_correct(self):\n torch.manual_seed(232)\n N = 2\n V = 10\n F = 5\n verts1 = torch.randn(N, V, 3, requires_grad=True)\n verts2 = verts1.detach().clone().requires_grad_(True)\n faces = torch.randint(V, size=(N, F, 3))\n meshes1 = Meshes(verts1, faces)\n meshes2 = Meshes(verts2, faces)\n\n kwargs = {\"image_size\": 24, \"perspective_correct\": True}\n fn1 = functools.partial(rasterize_meshes, meshes1, **kwargs)\n fn2 = functools.partial(rasterize_meshes_python, meshes2, **kwargs)\n args = ()\n self._compare_impls(fn1, fn2, args, args, verts1, verts2, compare_grads=True)\n\n def test_cpp_vs_cuda_perspective_correct(self):\n meshes = ico_sphere(2, device=torch.device(\"cpu\"))\n verts1, faces1 = meshes.get_mesh_verts_faces(0)\n verts1.requires_grad = True\n meshes1 = Meshes(verts=[verts1], faces=[faces1])\n device = get_random_cuda_device()\n verts2 = verts1.detach().to(device).requires_grad_(True)\n faces2 = faces1.detach().clone().to(device)\n meshes2 = Meshes(verts=[verts2], faces=[faces2])\n\n kwargs = {\"image_size\": 64, \"perspective_correct\": True}\n fn1 = functools.partial(rasterize_meshes, meshes1, **kwargs)\n fn2 = functools.partial(rasterize_meshes, meshes2, bin_size=0, **kwargs)\n args = ()\n self._compare_impls(fn1, fn2, args, args, verts1, verts2, compare_grads=True)\n\n def test_cuda_naive_vs_binned_perspective_correct(self):\n device = get_random_cuda_device()\n meshes = ico_sphere(2, device=device)\n verts1, faces1 = meshes.get_mesh_verts_faces(0)\n verts1.requires_grad = True\n meshes1 = Meshes(verts=[verts1], faces=[faces1])\n verts2 = verts1.detach().clone().requires_grad_(True)\n faces2 = faces1.detach().clone()\n meshes2 = Meshes(verts=[verts2], faces=[faces2])\n\n kwargs = {\"image_size\": 64, \"perspective_correct\": True}\n fn1 = functools.partial(rasterize_meshes, meshes1, bin_size=0, **kwargs)\n fn2 = functools.partial(rasterize_meshes, meshes2, bin_size=8, **kwargs)\n args = ()\n self._compare_impls(fn1, fn2, args, args, verts1, verts2, compare_grads=True)\n\n def test_bin_size_error(self):\n meshes = ico_sphere(2)\n image_size = 1024\n bin_size = 16\n with self.assertRaisesRegex(ValueError, \"bin_size too small\"):\n rasterize_meshes(meshes, image_size, 0.0, 2, bin_size)\n\n def _test_back_face_culling(self, rasterize_meshes_fn, device, bin_size):\n # Square based pyramid mesh.\n # fmt: off\n verts = torch.tensor([\n [-0.5, 0.0, 0.5], # noqa: E241 E201 Front right\n [ 0.5, 0.0, 0.5], # noqa: E241 E201 Front left\n [ 0.5, 0.0, 1.5], # noqa: E241 E201 Back left\n [-0.5, 0.0, 1.5], # noqa: E241 E201 Back right\n [ 0.0, 1.0, 1.0] # noqa: E241 E201 Top point of pyramid\n ], dtype=torch.float32, device=device)\n\n faces = torch.tensor([\n [2, 1, 0], # noqa: E241 E201 Square base\n [3, 2, 0], # noqa: E241 E201 Square base\n [1, 0, 4], # noqa: E241 E201 Triangle on front\n [2, 4, 3], # noqa: E241 E201 Triangle on back\n [3, 4, 0], # noqa: E241 E201 Triangle on left side\n [1, 4, 2] # noqa: E241 E201 Triangle on right side\n ], dtype=torch.int64, device=device)\n # fmt: on\n mesh = Meshes(verts=[verts], faces=[faces])\n kwargs = {\n \"meshes\": mesh,\n \"image_size\": 10,\n \"faces_per_pixel\": 2,\n \"blur_radius\": 0.0,\n \"perspective_correct\": False,\n \"cull_backfaces\": False,\n }\n if bin_size != -1:\n kwargs[\"bin_size\"] = bin_size\n\n # fmt: off\n pix_to_face_frontface = torch.tensor([\n [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241 E201\n [-1, -1, -1, -1, 2, 2, -1, -1, -1, -1], # noqa: E241 E201\n [-1, -1, -1, -1, 2, 2, -1, -1, -1, -1], # noqa: E241 E201\n [-1, -1, -1, 2, 2, 2, 2, -1, -1, -1], # noqa: E241 E201\n [-1, -1, -1, 2, 2, 2, 2, -1, -1, -1], # noqa: E241 E201\n [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241 E201\n [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241 E201\n [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241 E201\n [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241 E201\n [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1] # noqa: E241 E201\n ], dtype=torch.int64, device=device)\n pix_to_face_backface = torch.tensor([\n [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241 E201\n [-1, -1, -1, -1, 3, 3, -1, -1, -1, -1], # noqa: E241 E201\n [-1, -1, -1, -1, 3, 3, -1, -1, -1, -1], # noqa: E241 E201\n [-1, -1, -1, 3, 3, 3, 3, -1, -1, -1], # noqa: E241 E201\n [-1, -1, -1, 3, 3, 3, 3, -1, -1, -1], # noqa: E241 E201\n [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241 E201\n [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241 E201\n [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241 E201\n [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241 E201\n [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1] # noqa: E241 E201\n ], dtype=torch.int64, device=device)\n # fmt: on\n\n pix_to_face_padded = -(torch.ones_like(pix_to_face_frontface))\n # Run with and without culling\n # Without culling, for k=0, the front face (i.e. face 2) is\n # rasterized and for k=1, the back face (i.e. face 3) is\n # rasterized.\n idx_f, zbuf_f, bary_f, dists_f = rasterize_meshes_fn(**kwargs)\n self.assertTrue(torch.all(idx_f[..., 0].squeeze() == pix_to_face_frontface))\n self.assertTrue(torch.all(idx_f[..., 1].squeeze() == pix_to_face_backface))\n\n # With culling, for k=0, the front face (i.e. face 2) is\n # rasterized and for k=1, there are no faces rasterized\n kwargs[\"cull_backfaces\"] = True\n idx_t, zbuf_t, bary_t, dists_t = rasterize_meshes_fn(**kwargs)\n self.assertTrue(torch.all(idx_t[..., 0].squeeze() == pix_to_face_frontface))\n self.assertTrue(torch.all(idx_t[..., 1].squeeze() == pix_to_face_padded))\n\n def _compare_impls(\n self,\n fn1,\n fn2,\n args1,\n args2,\n grad_var1=None,\n grad_var2=None,\n compare_grads=False,\n ):\n idx1, zbuf1, bary1, dist1 = fn1(*args1)\n idx2, zbuf2, bary2, dist2 = fn2(*args2)\n self.assertTrue((idx1.cpu() == idx2.cpu()).all().item())\n self.assertClose(zbuf1.cpu(), zbuf2.cpu(), rtol=1e-4)\n self.assertClose(dist1.cpu(), dist2.cpu(), rtol=6e-3)\n self.assertClose(bary1.cpu(), bary2.cpu(), rtol=1e-3)\n if not compare_grads:\n return\n\n # Compare gradients.\n torch.manual_seed(231)\n grad_zbuf = torch.randn_like(zbuf1)\n grad_dist = torch.randn_like(dist1)\n grad_bary = torch.randn_like(bary1)\n loss1 = (\n (dist1 * grad_dist).sum()\n + (zbuf1 * grad_zbuf).sum()\n + (bary1 * grad_bary).sum()\n )\n\n # avoid gradient error if rasterize_meshes_python() culls all triangles\n loss1 += grad_var1.sum() * 0.0\n\n loss1.backward()\n grad_verts1 = grad_var1.grad.data.clone().cpu()\n\n grad_zbuf = grad_zbuf.to(zbuf2)\n grad_dist = grad_dist.to(dist2)\n grad_bary = grad_bary.to(bary2)\n loss2 = (\n (dist2 * grad_dist).sum()\n + (zbuf2 * grad_zbuf).sum()\n + (bary2 * grad_bary).sum()\n )\n\n # avoid gradient error if rasterize_meshes_python() culls all triangles\n loss2 += grad_var2.sum() * 0.0\n\n grad_var1.grad.data.zero_()\n loss2.backward()\n grad_verts2 = grad_var2.grad.data.clone().cpu()\n self.assertClose(grad_verts1, grad_verts2, rtol=2e-3)\n\n def _test_perspective_correct(self, rasterize_meshes_fn, device, bin_size=None):\n # fmt: off\n verts = torch.tensor([\n [-0.4, -0.4, 10], # noqa: E241, E201\n [ 0.4, -0.4, 10], # noqa: E241, E201\n [ 0.0, 0.4, 20], # noqa: E241, E201\n ], dtype=torch.float32, device=device)\n # fmt: on\n faces = torch.tensor([[0, 1, 2]], device=device)\n meshes = Meshes(verts=[verts], faces=[faces])\n kwargs = {\n \"meshes\": meshes,\n \"image_size\": 11,\n \"faces_per_pixel\": 1,\n \"blur_radius\": 0.2,\n \"perspective_correct\": False,\n }\n if bin_size != -1:\n kwargs[\"bin_size\"] = bin_size\n\n # Run with and without perspective correction\n idx_f, zbuf_f, bary_f, dists_f = rasterize_meshes_fn(**kwargs)\n\n kwargs[\"perspective_correct\"] = True\n idx_t, zbuf_t, bary_t, dists_t = rasterize_meshes_fn(**kwargs)\n\n # Expected output tensors in the format with axes +X left, +Y up, +Z in\n # idx and dists should be the same with or without perspecitve correction\n # fmt: off\n idx_expected = torch.tensor([\n [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201\n [-1, -1, -1, -1, 0, 0, 0, -1, -1, -1, -1], # noqa: E241, E201\n [-1, -1, -1, 0, 0, 0, 0, 0, -1, -1, -1], # noqa: E241, E201\n [-1, -1, -1, 0, 0, 0, 0, 0, -1, -1, -1], # noqa: E241, E201\n [-1, -1, 0, 0, 0, 0, 0, 0, 0, -1, -1], # noqa: E241, E201\n [-1, -1, 0, 0, 0, 0, 0, 0, 0, -1, -1], # noqa: E241, E201\n [-1, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1], # noqa: E241, E201\n [-1, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1], # noqa: E241, E201\n [-1, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1], # noqa: E241, E201\n [-1, -1, 0, 0, 0, 0, 0, 0, 0, -1, -1], # noqa: E241, E201\n [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1] # noqa: E241, E201\n ], dtype=torch.int64, device=device).view(1, 11, 11, 1)\n\n dists_expected = torch.tensor([\n [-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.], # noqa: E241, E201, B950\n [-1., -1., -1., -1., 0.1402, 0.1071, 0.1402, -1., -1., -1., -1.], # noqa: E241, E201, B950\n [-1., -1., - 1., 0.1523, 0.0542, 0.0212, 0.0542, 0.1523, -1., -1., -1.], # noqa: E241, E201, B950\n [-1., -1., -1., 0.0955, 0.0214, -0.0003, 0.0214, 0.0955, -1., -1., -1.], # noqa: E241, E201, B950\n [-1., -1., 0.1523, 0.0518, 0.0042, -0.0095, 0.0042, 0.0518, 0.1523, -1., -1.], # noqa: E241, E201, B950\n [-1., -1., 0.0955, 0.0214, -0.0003, -0.032, -0.0003, 0.0214, 0.0955, -1., -1.], # noqa: E241, E201, B950\n [-1., 0.1523, 0.0518, 0.0042, -0.0095, -0.0476, -0.0095, 0.0042, 0.0518, 0.1523, -1.], # noqa: E241, E201, B950\n [-1., 0.1084, 0.0225, -0.0003, -0.0013, -0.0013, -0.0013, -0.0003, 0.0225, 0.1084, -1.], # noqa: E241, E201, B950\n [-1., 0.1283, 0.0423, 0.0212, 0.0212, 0.0212, 0.0212, 0.0212, 0.0423, 0.1283, -1.], # noqa: E241, E201, B950\n [-1., -1., 0.1283, 0.1071, 0.1071, 0.1071, 0.1071, 0.1071, 0.1283, -1., -1.], # noqa: E241, E201, B950\n [-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.] # noqa: E241, E201, B950\n ], dtype=torch.float32, device=device).view(1, 11, 11, 1)\n\n # zbuf and barycentric will be different with perspective correction\n zbuf_f_expected = torch.tensor([\n [-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.], # noqa: E241, E201, B950\n [-1., -1., -1., -1., 24.0909, 24.0909, 24.0909, -1., -1., -1., -1.], # noqa: E241, E201, B950\n [-1., -1., -1., 21.8182, 21.8182, 21.8182, 21.8182, 21.8182, -1., -1., -1.], # noqa: E241, E201, B950\n [-1., -1., -1., 19.5455, 19.5455, 19.5455, 19.5455, 19.5455, -1., -1., -1.], # noqa: E241, E201, B950\n [-1., -1., 17.2727, 17.2727, 17.2727, 17.2727, 17.2727, 17.2727, 17.2727, -1., -1.], # noqa: E241, E201, B950\n [-1., -1., 15., 15., 15., 15., 15., 15., 15., -1., -1.], # noqa: E241, E201, B950\n [-1., 12.7273, 12.7273, 12.7273, 12.7273, 12.7273, 12.7273, 12.7273, 12.7273, 12.7273, -1.], # noqa: E241, E201, B950\n [-1., 10.4545, 10.4545, 10.4545, 10.4545, 10.4545, 10.4545, 10.4545, 10.4545, 10.4545, -1.], # noqa: E241, E201, B950\n [-1., 8.1818, 8.1818, 8.1818, 8.1818, 8.1818, 8.1818, 8.1818, 8.1818, 8.1818, -1.], # noqa: E241, E201, B950\n [-1., -1., 5.9091, 5.9091, 5.9091, 5.9091, 5.9091, 5.9091, 5.9091, -1., -1.], # noqa: E241, E201, B950\n [-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.], # noqa: E241, E201, B950\n ], dtype=torch.float32, device=device).view(1, 11, 11, 1)\n\n zbuf_t_expected = torch.tensor([\n [-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.], # noqa: E241, E201, B950\n [-1., -1., -1., -1., 33.8461, 33.8462, 33.8462, -1., -1., -1., -1.], # noqa: E241, E201, B950\n [-1., -1., -1., 24.4444, 24.4444, 24.4444, 24.4444, 24.4444, -1., -1., -1.], # noqa: E241, E201, B950\n [-1., -1., -1., 19.1304, 19.1304, 19.1304, 19.1304, 19.1304, -1., -1., -1.], # noqa: E241, E201, B950\n [-1., -1., 15.7143, 15.7143, 15.7143, 15.7143, 15.7143, 15.7143, 15.7143, -1., -1.], # noqa: E241, E201, B950\n [-1., -1., 13.3333, 13.3333, 13.3333, 13.3333, 13.3333, 13.3333, 13.3333, -1., -1.], # noqa: E241, E201, B950\n [-1., 11.5789, 11.5789, 11.5789, 11.5789, 11.5789, 11.5789, 11.5789, 11.5789, 11.5789, -1.], # noqa: E241, E201, B950\n [-1., 10.2326, 10.2326, 10.2326, 10.2326, 10.2326, 10.2326, 10.2326, 10.2326, 10.2326, -1.], # noqa: E241, E201, B950\n [-1., 9.1667, 9.1667, 9.1667, 9.1667, 9.1667, 9.1667, 9.1667, 9.1667, 9.1667, -1.], # noqa: E241, E201, B950\n [-1., -1., 8.3019, 8.3019, 8.3019, 8.3019, 8.3019, 8.3019, 8.3019, -1., -1.], # noqa: E241, E201, B950\n [-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.] # noqa: E241, E201, B950\n ], dtype=torch.float32, device=device).view(1, 11, 11, 1)\n # fmt: on\n\n self.assertTrue(torch.all(idx_f == idx_expected).item())\n self.assertTrue(torch.all(idx_t == idx_expected).item())\n dists_t_max_diff = (dists_t - dists_expected).abs().max().item()\n dists_f_max_diff = (dists_f - dists_expected).abs().max().item()\n self.assertLess(dists_t_max_diff, 1e-4)\n self.assertLess(dists_f_max_diff, 1e-4)\n zbuf_f_max_diff = (zbuf_f - zbuf_f_expected).abs().max().item()\n zbuf_t_max_diff = (zbuf_t - zbuf_t_expected).abs().max().item()\n self.assertLess(zbuf_f_max_diff, 1e-4)\n self.assertLess(zbuf_t_max_diff, 1e-4)\n\n # Check barycentrics by using them to re-compute zbuf\n z0 = verts[0, 2]\n z1 = verts[1, 2]\n z2 = verts[2, 2]\n w0_f, w1_f, w2_f = bary_f.unbind(dim=4)\n w0_t, w1_t, w2_t = bary_t.unbind(dim=4)\n zbuf_f_bary = w0_f * z0 + w1_f * z1 + w2_f * z2\n zbuf_t_bary = w0_t * z0 + w1_t * z1 + w2_t * z2\n mask = idx_expected != -1\n zbuf_f_bary_diff = (zbuf_f_bary[mask] - zbuf_f_expected[mask]).abs().max()\n zbuf_t_bary_diff = (zbuf_t_bary[mask] - zbuf_t_expected[mask]).abs().max()\n self.assertLess(zbuf_f_bary_diff, 1e-4)\n self.assertLess(zbuf_t_bary_diff, 1e-4)\n\n def _test_barycentric_clipping(self, rasterize_meshes_fn, device, bin_size=None):\n # fmt: off\n verts = torch.tensor([\n [-0.4, -0.4, 10], # noqa: E241, E201\n [ 0.4, -0.4, 10], # noqa: E241, E201\n [ 0.0, 0.4, 20], # noqa: E241, E201\n ], dtype=torch.float32, device=device)\n # fmt: on\n faces = torch.tensor([[0, 1, 2]], device=device)\n meshes = Meshes(verts=[verts], faces=[faces])\n kwargs = {\n \"meshes\": meshes,\n \"image_size\": 5,\n \"faces_per_pixel\": 1,\n \"blur_radius\": 0.2,\n \"perspective_correct\": False,\n \"clip_barycentric_coords\": False, # Initially set this to false\n }\n if bin_size != -1:\n kwargs[\"bin_size\"] = bin_size\n\n # Run with and without perspective correction\n idx_f, zbuf_f, bary_f, dists_f = rasterize_meshes_fn(**kwargs)\n\n # fmt: off\n expected_bary = torch.tensor([\n [\n [-1.0000, -1.0000, -1.0000], # noqa: E241, E201\n [-1.0000, -1.0000, -1.0000], # noqa: E241, E201\n [-0.2500, -0.2500, 1.5000], # noqa: E241, E201\n [-1.0000, -1.0000, -1.0000], # noqa: E241, E201\n [-1.0000, -1.0000, -1.0000] # noqa: E241, E201\n ],\n [\n [-1.0000, -1.0000, -1.0000], # noqa: E241, E201\n [-0.5000, 0.5000, 1.0000], # noqa: E241, E201\n [-0.0000, -0.0000, 1.0000], # noqa: E241, E201\n [ 0.5000, -0.5000, 1.0000], # noqa: E241, E201\n [-1.0000, -1.0000, -1.0000] # noqa: E241, E201\n ],\n [\n [-1.0000, -1.0000, -1.0000], # noqa: E241, E201\n [-0.2500, 0.7500, 0.5000], # noqa: E241, E201\n [ 0.2500, 0.2500, 0.5000], # noqa: E241, E201\n [ 0.7500, -0.2500, 0.5000], # noqa: E241, E201\n [-1.0000, -1.0000, -1.0000] # noqa: E241, E201\n ],\n [\n [-0.5000, 1.5000, -0.0000], # noqa: E241, E201\n [-0.0000, 1.0000, -0.0000], # noqa: E241, E201\n [ 0.5000, 0.5000, -0.0000], # noqa: E241, E201\n [ 1.0000, -0.0000, -0.0000], # noqa: E241, E201\n [ 1.5000, -0.5000, 0.0000] # noqa: E241, E201\n ],\n [\n [-1.0000, -1.0000, -1.0000], # noqa: E241, E201\n [ 0.2500, 1.2500, -0.5000], # noqa: E241, E201\n [ 0.7500, 0.7500, -0.5000], # noqa: E241, E201\n [ 1.2500, 0.2500, -0.5000], # noqa: E241, E201\n [-1.0000, -1.0000, -1.0000] # noqa: E241, E201\n ]\n ], dtype=torch.float32, device=device).view(1, 5, 5, 1, 3)\n # fmt: on\n\n self.assertClose(expected_bary, bary_f, atol=1e-4)\n\n # calculate the expected clipped barycentrics and zbuf\n expected_bary_clipped = _clip_barycentric_coordinates(expected_bary)\n expected_z_clipped = _interpolate_zbuf(idx_f, expected_bary_clipped, meshes)\n\n kwargs[\"clip_barycentric_coords\"] = True\n idx_t, zbuf_t, bary_t, dists_t = rasterize_meshes_fn(**kwargs)\n\n self.assertClose(expected_bary_clipped, bary_t, atol=1e-4)\n self.assertClose(expected_z_clipped, zbuf_t, atol=1e-4)\n\n def _test_behind_camera(self, rasterize_meshes_fn, device, bin_size=None):\n \"\"\"\n All verts are behind the camera so nothing should get rasterized.\n \"\"\"\n N = 1\n # fmt: off\n verts = torch.tensor(\n [\n [ -0.5, 0.0, -0.1], # noqa: E241, E201\n [ 0.0, 0.6, -0.1], # noqa: E241, E201\n [ 0.5, 0.0, -0.1], # noqa: E241, E201\n [-0.25, 0.0, -0.9], # noqa: E241, E201\n [ 0.25, 0.5, -0.9], # noqa: E241, E201\n [ 0.75, 0.0, -0.9], # noqa: E241, E201\n [ -0.4, 0.0, -0.5], # noqa: E241, E201\n [ 0.6, 0.6, -0.5], # noqa: E241, E201\n [ 0.8, 0.0, -0.5], # noqa: E241, E201\n [ -0.2, 0.0, -0.5], # noqa: E241, E201\n [ 0.3, 0.6, -0.5], # noqa: E241, E201\n [ 0.4, 0.0, -0.5], # noqa: E241, E201\n ],\n dtype=torch.float32,\n device=device,\n )\n # fmt: on\n faces = torch.tensor(\n [[1, 0, 2], [4, 3, 5], [7, 6, 8], [10, 9, 11]],\n dtype=torch.int64,\n device=device,\n )\n meshes = Meshes(verts=[verts], faces=[faces])\n image_size = 16\n faces_per_pixel = 1\n radius = 0.2\n idx_expected = torch.full(\n (N, image_size, image_size, faces_per_pixel),\n fill_value=-1,\n dtype=torch.int64,\n device=device,\n )\n bary_expected = torch.full(\n (N, image_size, image_size, faces_per_pixel, 3),\n fill_value=-1,\n dtype=torch.float32,\n device=device,\n )\n zbuf_expected = torch.full(\n (N, image_size, image_size, faces_per_pixel),\n fill_value=-1,\n dtype=torch.float32,\n device=device,\n )\n dists_expected = zbuf_expected.clone()\n if bin_size == -1:\n # naive python version with no binning\n idx, zbuf, bary, dists = rasterize_meshes_fn(\n meshes, image_size, radius, faces_per_pixel\n )\n else:\n idx, zbuf, bary, dists = rasterize_meshes_fn(\n meshes, image_size, radius, faces_per_pixel, bin_size\n )\n idx_same = (idx == idx_expected).all().item()\n zbuf_same = (zbuf == zbuf_expected).all().item()\n self.assertTrue(idx_same)\n self.assertTrue(zbuf_same)\n self.assertClose(bary, bary_expected)\n self.assertClose(dists, dists_expected)\n\n def _simple_triangle_raster(self, raster_fn, device, bin_size=None):\n image_size = 10\n\n # Mesh with a single non-symmetrical face - this will help\n # check that the XY directions are correctly oriented.\n verts0 = torch.tensor(\n [[-0.3, -0.4, 0.1], [0.0, 0.6, 0.1], [0.9, -0.4, 0.1]],\n dtype=torch.float32,\n device=device,\n )\n faces0 = torch.tensor([[1, 0, 2]], dtype=torch.int64, device=device)\n\n # Mesh with two overlapping faces.\n # fmt: off\n verts1 = torch.tensor(\n [\n [-0.9, -0.2, 0.1], # noqa: E241, E201\n [ 0.0, 0.6, 0.1], # noqa: E241, E201\n [ 0.7, -0.4, 0.1], # noqa: E241, E201\n [-0.7, 0.4, 0.5], # noqa: E241, E201\n [ 0.0, -0.6, 0.5], # noqa: E241, E201\n [ 0.7, 0.4, 0.5], # noqa: E241, E201\n ],\n dtype=torch.float32,\n device=device,\n )\n # fmt on\n faces1 = torch.tensor(\n [[1, 0, 2], [3, 4, 5]], dtype=torch.int64, device=device\n )\n\n # Expected output tensors in the format with axes +X left, +Y up, +Z in\n # k = 0, closest point.\n # fmt off\n expected_p2face_k0 = torch.tensor(\n [\n [\n [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201\n [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201\n [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201\n [-1, -1, -1, -1, 0, -1, -1, -1, -1, -1], # noqa: E241, E201\n [-1, -1, -1, 0, 0, 0, -1, -1, -1, -1], # noqa: E241, E201\n [-1, -1, 0, 0, 0, 0, -1, -1, -1, -1], # noqa: E241, E201\n [-1, 0, 0, 0, 0, 0, -1, -1, -1, -1], # noqa: E241, E201\n [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201\n [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201\n [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201\n ],\n [\n [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201\n [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201\n [-1, -1, -1, -1, -1, 1, -1, -1, -1, -1], # noqa: E241, E201\n [-1, -1, 2, 2, 1, 1, 1, 2, -1, -1], # noqa: E241, E201\n [-1, -1, -1, 1, 1, 1, 1, 1, -1, -1], # noqa: E241, E201\n [-1, -1, -1, 1, 1, 1, 1, 1, 1, -1], # noqa: E241, E201\n [-1, -1, 1, 1, 1, 2, -1, -1, -1, -1], # noqa: E241, E201\n [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201\n [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201\n [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201\n ],\n ],\n dtype=torch.int64,\n device=device,\n )\n expected_zbuf_k0 = torch.tensor(\n [\n [\n [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201\n [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201\n [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201\n [-1, -1, -1, -1, 0.1, -1, -1, -1, -1, -1], # noqa: E241, E201\n [-1, -1, -1, 0.1, 0.1, 0.1, -1, -1, -1, -1], # noqa: E241, E201\n [-1, -1, 0.1, 0.1, 0.1, 0.1, -1, -1, -1, -1], # noqa: E241, E201\n [-1, 0.1, 0.1, 0.1, 0.1, 0.1, -1, -1, -1, -1], # noqa: E241, E201\n [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201\n [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201\n [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1] # noqa: E241, E201\n ],\n [\n [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201\n [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201\n [-1, -1, -1, -1, -1, 0.1, -1, -1, -1, -1], # noqa: E241, E201\n [-1, -1, 0.5, 0.5, 0.1, 0.1, 0.1, 0.5, -1, -1], # noqa: E241, E201\n [-1, -1, -1, 0.1, 0.1, 0.1, 0.1, 0.1, -1, -1], # noqa: E241, E201\n [-1, -1, -1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, -1], # noqa: E241, E201\n [-1, -1, 0.1, 0.1, 0.1, 0.5, -1, -1, -1, -1], # noqa: E241, E201\n [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201\n [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201\n [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1] # noqa: E241, E201\n ]\n ],\n device=device,\n )\n # fmt: on\n\n meshes = Meshes(verts=[verts0, verts1], faces=[faces0, faces1])\n\n # k = 1, second closest point.\n expected_p2face_k1 = expected_p2face_k0.clone()\n expected_p2face_k1[0, :] = torch.ones_like(expected_p2face_k1[0, :]) * -1\n\n # fmt: off\n expected_p2face_k1[1, :] = torch.tensor(\n [\n [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201\n [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201\n [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201\n [-1, -1, -1, -1, 2, 2, 2, -1, -1, -1], # noqa: E241, E201\n [-1, -1, -1, 2, 2, 2, 2, -1, -1, -1], # noqa: E241, E201\n [-1, -1, -1, 2, 2, 2, 2, -1, -1, -1], # noqa: E241, E201\n [-1, -1, -1, -1, 2, -1, -1, -1, -1, -1], # noqa: E241, E201\n [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201\n [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201\n [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1] # noqa: E241, E201\n ],\n dtype=torch.int64,\n device=device,\n )\n expected_zbuf_k1 = expected_zbuf_k0.clone()\n expected_zbuf_k1[0, :] = torch.ones_like(expected_zbuf_k1[0, :]) * -1\n expected_zbuf_k1[1, :] = torch.tensor(\n [\n [-1., -1., -1., -1., -1., -1., -1., -1., -1., -1.], # noqa: E241, E201\n [-1., -1., -1., -1., -1., -1., -1., -1., -1., -1.], # noqa: E241, E201\n [-1., -1., -1., -1., -1., -1., -1., -1., -1., -1.], # noqa: E241, E201\n [-1., -1., -1., -1., 0.5, 0.5, 0.5, -1., -1., -1.], # noqa: E241, E201\n [-1., -1., -1., 0.5, 0.5, 0.5, 0.5, -1., -1., -1.], # noqa: E241, E201\n [-1., -1., -1., 0.5, 0.5, 0.5, 0.5, -1., -1., -1.], # noqa: E241, E201\n [-1., -1., -1., -1., 0.5, -1., -1., -1., -1., -1.], # noqa: E241, E201\n [-1., -1., -1., -1., -1., -1., -1., -1., -1., -1.], # noqa: E241, E201\n [-1., -1., -1., -1., -1., -1., -1., -1., -1., -1.], # noqa: E241, E201\n [-1., -1., -1., -1., -1., -1., -1., -1., -1., -1.] # noqa: E241, E201\n ],\n dtype=torch.float32,\n device=device,\n )\n # fmt: on\n\n # Coordinate conventions +Y up, +Z in, +X left\n if bin_size == -1:\n # simple python, no bin_size\n p2face, zbuf, bary, pix_dists = raster_fn(meshes, image_size, 0.0, 2)\n else:\n p2face, zbuf, bary, pix_dists = raster_fn(\n meshes, image_size, 0.0, 2, bin_size\n )\n\n self.assertClose(p2face[..., 0], expected_p2face_k0)\n self.assertClose(zbuf[..., 0], expected_zbuf_k0)\n self.assertClose(p2face[..., 1], expected_p2face_k1)\n self.assertClose(zbuf[..., 1], expected_zbuf_k1)\n\n def _simple_blurry_raster(self, raster_fn, device, bin_size=None):\n \"\"\"\n Check that pix_to_face, dist and zbuf values are invariant to the\n ordering of faces.\n \"\"\"\n image_size = 10\n blur_radius = 0.12 ** 2\n faces_per_pixel = 1\n\n # fmt: off\n verts = torch.tensor(\n [\n [ -0.3, 0.0, 0.1], # noqa: E241, E201\n [ 0.0, 0.6, 0.1], # noqa: E241, E201\n [ 0.8, 0.0, 0.1], # noqa: E241, E201\n [-0.25, 0.0, 0.9], # noqa: E241, E201\n [0.25, 0.5, 0.9], # noqa: E241, E201\n [0.75, 0.0, 0.9], # noqa: E241, E201\n [-0.4, 0.0, 0.5], # noqa: E241, E201\n [ 0.6, 0.6, 0.5], # noqa: E241, E201\n [ 0.8, 0.0, 0.5], # noqa: E241, E201\n [-0.2, 0.0, -0.5], # noqa: E241, E201 face behind the camera\n [ 0.3, 0.6, -0.5], # noqa: E241, E201\n [ 0.4, 0.0, -0.5], # noqa: E241, E201\n ],\n dtype=torch.float32,\n device=device,\n )\n # Face with index 0 is non symmetric about the X and Y axis to\n # test that the positive Y and X directions are correct in the output.\n faces_packed = torch.tensor(\n [[1, 0, 2], [4, 3, 5], [7, 6, 8], [10, 9, 11]],\n dtype=torch.int64,\n device=device,\n )\n expected_p2f = torch.tensor(\n [\n [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201\n [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201\n [-1, 2, 2, 0, 0, 0, -1, -1, -1, -1], # noqa: E241, E201\n [-1, 2, 0, 0, 0, 0, -1, -1, -1, -1], # noqa: E241, E201\n [-1, 0, 0, 0, 0, 0, 0, -1, -1, -1], # noqa: E241, E201\n [-1, 0, 0, 0, 0, 0, 0, -1, -1, -1], # noqa: E241, E201\n [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201\n [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201\n [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201\n [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201\n ],\n dtype=torch.int64,\n device=device,\n )\n expected_zbuf = torch.tensor(\n [\n [-1., -1., -1., -1., -1., -1., -1., -1., -1., -1.], # noqa: E241, E201\n [-1., -1., -1., -1., -1., -1., -1., -1., -1., -1.], # noqa: E241, E201\n [-1., 0.5, 0.5, 0.1, 0.1, 0.1, -1., -1., -1., -1.], # noqa: E241, E201\n [-1., 0.5, 0.1, 0.1, 0.1, 0.1, -1., -1., -1., -1.], # noqa: E241, E201\n [-1., 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, -1., -1., -1.], # noqa: E241, E201\n [-1., 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, -1., -1., -1.], # noqa: E241, E201\n [-1., -1., -1., -1., -1., -1., -1., -1., -1., -1.], # noqa: E241, E201\n [-1., -1., -1., -1., -1., -1., -1., -1., -1., -1.], # noqa: E241, E201\n [-1., -1., -1., -1., -1., -1., -1., -1., -1., -1.], # noqa: E241, E201\n [-1., -1., -1., -1., -1., -1., -1., -1., -1., -1.] # noqa: E241, E201\n ],\n dtype=torch.float32,\n device=device,\n )\n # fmt: on\n\n for i, order in enumerate([[0, 1, 2], [1, 2, 0], [2, 0, 1]]):\n faces = faces_packed[order] # rearrange order of faces.\n mesh = Meshes(verts=[verts], faces=[faces])\n if bin_size == -1:\n # simple python, no bin size arg\n pix_to_face, zbuf, bary_coords, dists = raster_fn(\n mesh, image_size, blur_radius, faces_per_pixel\n )\n else:\n pix_to_face, zbuf, bary_coords, dists = raster_fn(\n mesh, image_size, blur_radius, faces_per_pixel, bin_size\n )\n if i == 0:\n expected_dists = dists\n p2f = expected_p2f.clone()\n p2f[expected_p2f == 0] = order.index(0)\n p2f[expected_p2f == 1] = order.index(1)\n p2f[expected_p2f == 2] = order.index(2)\n self.assertClose(pix_to_face.squeeze(), p2f)\n self.assertClose(zbuf.squeeze(), expected_zbuf, rtol=1e-5)\n self.assertClose(dists, expected_dists)\n\n def _test_coarse_rasterize(self, device):\n image_size = (16, 16)\n # No blurring. This test checks that the XY directions are\n # correctly oriented.\n blur_radius = 0.0\n bin_size = 8\n max_faces_per_bin = 3\n\n # fmt: off\n verts = torch.tensor(\n [\n [-0.5, 0.1, 0.1], # noqa: E241, E201\n [-0.3, 0.6, 0.1], # noqa: E241, E201\n [-0.1, 0.1, 0.1], # noqa: E241, E201\n [-0.3, -0.1, 0.4], # noqa: E241, E201\n [ 0.3, 0.5, 0.4], # noqa: E241, E201\n [0.75, -0.1, 0.4], # noqa: E241, E201\n [ 0.2, -0.3, 0.9], # noqa: E241, E201\n [ 0.3, -0.7, 0.9], # noqa: E241, E201\n [ 0.6, -0.3, 0.9], # noqa: E241, E201\n [-0.4, 0.0, -1.5], # noqa: E241, E201\n [ 0.6, 0.6, -1.5], # noqa: E241, E201\n [ 0.8, 0.0, -1.5], # noqa: E241, E201\n ],\n device=device,\n )\n # Expected faces using axes convention +Y down, + X right, +Z in\n # Non symmetrical triangles i.e face 0 and 3 are in one bin only\n faces = torch.tensor(\n [\n [ 1, 0, 2], # noqa: E241, E201 bin 01 only\n [ 4, 3, 5], # noqa: E241, E201 all bins\n [ 7, 6, 8], # noqa: E241, E201 bin 10 only\n [10, 9, 11], # noqa: E241, E201 negative z, should not appear.\n ],\n dtype=torch.int64,\n device=device,\n )\n # fmt: on\n\n meshes = Meshes(verts=[verts], faces=[faces])\n faces_verts = verts[faces]\n num_faces_per_mesh = meshes.num_faces_per_mesh()\n mesh_to_face_first_idx = meshes.mesh_to_faces_packed_first_idx()\n\n # Expected faces using axes convention +Y down, + X right, + Z in\n bin_faces_expected = (\n torch.ones((1, 2, 2, max_faces_per_bin), dtype=torch.int32, device=device)\n * -1\n )\n bin_faces_expected[0, 1, 1, 0] = torch.tensor([1])\n bin_faces_expected[0, 0, 1, 0:2] = torch.tensor([1, 2])\n bin_faces_expected[0, 1, 0, 0:2] = torch.tensor([0, 1])\n bin_faces_expected[0, 0, 0, 0] = torch.tensor([1])\n\n # +Y up, +X left, +Z in\n bin_faces = _C._rasterize_meshes_coarse(\n faces_verts,\n mesh_to_face_first_idx,\n num_faces_per_mesh,\n image_size,\n blur_radius,\n bin_size,\n max_faces_per_bin,\n )\n\n bin_faces_same = (bin_faces.squeeze() == bin_faces_expected).all()\n self.assertTrue(bin_faces_same.item() == 1)\n\n def test_order_of_ties(self):\n # Tied faces are rasterized in index order\n # We rasterize a mesh with many faces.\n device = torch.device(\"cuda:0\")\n verts = -5 * torch.eye(3, dtype=torch.float32, device=device)[None]\n faces = torch.arange(3, device=device, dtype=torch.int64).expand(1, 100, 3)\n mesh = Meshes(verts=verts, faces=faces)\n\n R, T = look_at_view_transform(2.7, 0.0, 0.0)\n cameras = FoVPerspectiveCameras(device=device, R=R, T=T)\n\n raster_settings = RasterizationSettings(\n image_size=28, faces_per_pixel=100, bin_size=0\n )\n rasterizer = MeshRasterizer(raster_settings=raster_settings)\n\n out = rasterizer(mesh, cameras=cameras)\n self.assertClose(\n out.pix_to_face[0, 14:, :14],\n torch.arange(100, device=device).expand(14, 14, 100),\n )\n\n @staticmethod\n def rasterize_meshes_python_with_init(\n num_meshes: int,\n ico_level: int,\n image_size: int,\n blur_radius: float,\n faces_per_pixel: int,\n ):\n device = torch.device(\"cpu\")\n meshes = ico_sphere(ico_level, device)\n meshes_batch = meshes.extend(num_meshes)\n\n def rasterize():\n rasterize_meshes_python(\n meshes_batch, image_size, blur_radius, faces_per_pixel\n )\n\n return rasterize\n\n @staticmethod\n def rasterize_meshes_cpu_with_init(\n num_meshes: int,\n ico_level: int,\n image_size: int,\n blur_radius: float,\n faces_per_pixel: int,\n ):\n meshes = ico_sphere(ico_level, torch.device(\"cpu\"))\n meshes_batch = meshes.extend(num_meshes)\n\n def rasterize():\n rasterize_meshes(\n meshes_batch,\n image_size,\n blur_radius,\n faces_per_pixel=faces_per_pixel,\n bin_size=0,\n )\n\n return rasterize\n\n @staticmethod\n def rasterize_meshes_cuda_with_init(\n num_meshes: int,\n ico_level: int,\n image_size: int,\n blur_radius: float,\n faces_per_pixel: int,\n ):\n device = get_random_cuda_device()\n meshes = ico_sphere(ico_level, device)\n meshes_batch = meshes.extend(num_meshes)\n torch.cuda.synchronize(device)\n\n def rasterize():\n rasterize_meshes(meshes_batch, image_size, blur_radius, faces_per_pixel)\n torch.cuda.synchronize(device)\n\n return rasterize\n\n @staticmethod\n def bm_rasterize_meshes_with_clipping(\n num_meshes: int,\n ico_level: int,\n image_size: int,\n blur_radius: float,\n faces_per_pixel: int,\n dist: float,\n ):\n device = get_random_cuda_device()\n meshes = ico_sphere(ico_level, device)\n meshes_batch = meshes.extend(num_meshes)\n\n settings = RasterizationSettings(\n image_size=image_size,\n blur_radius=blur_radius,\n faces_per_pixel=faces_per_pixel,\n z_clip_value=1e-2,\n perspective_correct=True,\n cull_to_frustum=True,\n )\n\n # The camera is positioned so that the image plane intersects\n # the mesh and some faces are partially behind the image plane.\n R, T = look_at_view_transform(dist, 0, 0)\n cameras = FoVPerspectiveCameras(device=device, R=R, T=T, fov=90)\n rasterizer = MeshRasterizer(raster_settings=settings, cameras=cameras)\n\n # Transform the meshes to projec them onto the image plane\n meshes_screen = rasterizer.transform(meshes_batch)\n torch.cuda.synchronize(device)\n\n def rasterize():\n # Only measure rasterization speed (including clipping)\n rasterize_meshes(\n meshes_screen,\n image_size,\n blur_radius,\n faces_per_pixel,\n z_clip_value=1e-2,\n perspective_correct=True,\n cull_to_frustum=True,\n )\n torch.cuda.synchronize(device)\n\n return rasterize\n" ]
[ [ "torch.zeros", "torch.randperm", "torch.manual_seed", "torch.eye", "torch.rand" ], [ "torch.randn_like", "torch.all", "torch.cuda.synchronize", "torch.randint", "torch.ones", "torch.full", "torch.manual_seed", "torch.randn", "torch.eye", "torch.tensor", "torch.arange", "torch.device", "torch.ones_like" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
leimao/Logistic_Regression_Python
[ "a64ed85d0bea8010d85e9c1e056a3af09b2e43c4" ]
[ "utils.py" ]
[ "\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef train_test_splitter(X, y, ratio = 0.8, random_seed = 0):\n\n assert(len(X) == len(y)), \"The number of points in feature matrix and target vector should be the same.\"\n np.random.seed(random_seed)\n \n n = len(y)\n idx = np.arange(n)\n np.random.shuffle(idx)\n\n train_idx = idx[:int(n * ratio)]\n test_idx = idx[int(n * ratio):]\n\n return X[train_idx,:], X[test_idx,:], y[train_idx], y[test_idx]\n\ndef error_rate(y, y_predicted):\n \n assert len(y) == len(y_predicted), \"The number of targets and predictions should be the same.\"\n assert len(y) != 0, \"The number of targets and predictions should not be zero.\"\n \n return np.sum(np.array(y) != np.array(y_predicted)) / len(y)\n\ndef plot_losses(losses, savefig = False, showfig = False, filename = 'loss.png'):\n\n fig = plt.figure(figsize = (12,8))\n plt.plot(np.arange(len(losses)), losses, color = 'r', marker = 'o', label = 'Loss')\n plt.legend()\n plt.ylabel('Loss')\n plt.xlabel('Number of Iterations')\n\n if savefig:\n fig.savefig(filename, format = 'png', dpi = 600, bbox_inches = 'tight')\n if showfig:\n plt.show()\n plt.close()\n\n return " ]
[ [ "matplotlib.pyplot.legend", "numpy.random.seed", "numpy.arange", "numpy.random.shuffle", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.close", "matplotlib.pyplot.xlabel", "numpy.array", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
prkhrv/Python_and_the_Web
[ "6846334c4151ee94107ef393cbb5e8bc8f6a2e4b" ]
[ "Scripts/Web_Scrappers/cricketmonthly_articles/main.py" ]
[ "import pandas as pd\nimport re\nimport requests as rq\nfrom bs4 import BeautifulSoup\n\nheader = {'User-Agent' : 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36'}\nr = rq.get(\"https://www.thecricketmonthly.com/\", headers=header)\nsoup = BeautifulSoup(r.content, 'html.parser')\nmain_sec = soup.find('section', attrs={'class' : re.compile('col-lhs lhs_content')})\narticle = main_sec.find_all('article', attrs={'class' : re.compile('col-1-1 module')})\nabout=[]\nlink=[]\nsummary=[]\nprint('Fetching Latest Articles...')\nfor a in article:\n tag = a.find('h1')\n about.append(tag.text)\n link.append('https://www.thecricketmonthly.com'+tag.a['href'])\n tag = a.find('p')\n summary.append(tag.text)\nprint('Done!')\n\nmain_sec = soup.find('ul', attrs={'class' : re.compile('writer-ul')})\nli = main_sec.find_all('li')\nlinkauth=[]\nauth=[]\nheadline=[]\nsubhead=[]\nprint('Fetching articles of top Writers...')\nfor l in li:\n linkauth.append(l.a['href'])\n spn = l.find('span', attrs={'class' : re.compile('wname')})\n auth.append(spn.text)\n headline.append(l.a.text)\n spn = l.find('span', attrs={'class' : re.compile('subheadline')})\n subhead.append(spn.text)\nprint('Done!')\n\nprint('Processing Data...')\nla = {'About' : about, 'Short Summary' : summary, 'Further Reading' : link}\ntw = {'Writer' : auth, 'Headline' : headline, 'Sub-headline' : subhead, 'Further Reading' : linkauth}\nlatest_articles = pd.DataFrame.from_dict(la)\ntop_writers = pd.DataFrame.from_dict(tw)\nprint('Publishing csv...')\ntop_writers.to_csv('Articles from Top Writers.csv', index=False)\nlatest_articles.to_csv('Latest Articles from Cricket Monthly.csv', index=False)\nprint(\"Your output can be found in form of two files 'Articles from Top Writers.csv' and 'Latest Articles from Cricket Monthly.csv'\")\n" ]
[ [ "pandas.DataFrame.from_dict" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
freshjang/MyKiwoom
[ "6342ec7ba8da55194bb473f9052d87f7fa1a640e", "6342ec7ba8da55194bb473f9052d87f7fa1a640e" ]
[ "trader/strategy.py", "trader/collector.py" ]
[ "import os\nimport sys\nimport psutil\nimport numpy as np\nimport pandas as pd\nsys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))\nfrom utility.setting import ui_num, DICT_SET, columns_gj\nfrom utility.static import now, timedelta_sec, thread_decorator, strf_time, float2str1p6\n\n\nclass Strategy:\n def __init__(self, qlist):\n \"\"\"\n 0 1 2 3 4 5 6 7 8 9 10 11\n windowQ, traderQ, receivQ, stgQ, soundQ, queryQ, teleQ, hoga1Q, hoga2Q, chart1Q, chart2Q, chart3Q,\n chart4Q, chart5Q, chart6Q, chart7Q, chart8Q, chart9Q, chart10Q, tick1Q, tick2Q, tick3Q, tick4Q\n 12 13 14 15 16 17 18 19 20 21 22\n \"\"\"\n self.windowQ = qlist[0]\n self.traderQ = qlist[1]\n self.stgQ = qlist[3]\n\n self.list_buy = [] # 매수주문리스트\n self.list_sell = [] # 매도주문리스트\n self.int_tujagm = 0 # 종목당 투자금\n self.startjjstg = False # 장중전략\n\n self.dict_gsjm = {} # key: 종목코드, value: DataFrame\n self.dict_data = {} # key: 종목코드, value: list\n self.dict_high = {} # key: 종목코드, value: float\n self.dict_time = {\n '관심종목': now(),\n '부가정보': now(),\n '연산시간': now()\n }\n self.dict_intg = {\n '스레드': 0,\n '시피유': 0.,\n '메모리': 0.\n }\n\n self.Start()\n\n def Start(self):\n while True:\n data = self.stgQ.get()\n if type(data) == int:\n self.int_tujagm = data\n elif type(data) == list:\n if len(data) == 2:\n self.UpdateList(data[0], data[1])\n elif len(data) == 38:\n self.BuyStrategy(data[0], data[1], data[2], data[3], data[4], data[5], data[6], data[7], data[8],\n data[9], data[10], data[11], data[12], data[13], data[14], data[15], data[16],\n data[17], data[18], data[19], data[20], data[21], data[22], data[23], data[24],\n data[25], data[26], data[27], data[28], data[29], data[30], data[31], data[32],\n data[33], data[34], data[35], data[36], data[37])\n elif len(data) == 6:\n self.SellStrategy(data[0], data[1], data[2], data[3], data[4], data[5])\n elif data == '전략프로세스종료':\n break\n\n if now() > self.dict_time['관심종목']:\n self.windowQ.put([ui_num['관심종목'], self.dict_gsjm])\n self.dict_time['관심종목'] = timedelta_sec(1)\n if now() > self.dict_time['부가정보']:\n self.UpdateInfo()\n self.dict_time['부가정보'] = timedelta_sec(2)\n\n self.windowQ.put([1, '시스템 명령 실행 알림 - 전략 연산 프로세스 종료'])\n sys.exit()\n\n def UpdateList(self, gubun, code):\n if '조건진입' in gubun:\n if code not in self.dict_gsjm.keys():\n if int(strf_time('%H%M%S')) < 100000:\n data = np.zeros((DICT_SET['장초평균값계산틱수'] + 2, len(columns_gj))).tolist()\n else:\n data = np.zeros((DICT_SET['장중평균값계산틱수'] + 2, len(columns_gj))).tolist()\n df = pd.DataFrame(data, columns=columns_gj)\n self.dict_gsjm[code] = df.copy()\n elif gubun == '조건이탈':\n if code in self.dict_gsjm.keys():\n del self.dict_gsjm[code]\n elif gubun in ['매수완료', '매수취소']:\n if code in self.list_buy:\n self.list_buy.remove(code)\n elif gubun in ['매도완료', '매도취소']:\n if code in self.list_sell:\n self.list_sell.remove(code)\n if code in self.dict_high.keys():\n del self.dict_high[code]\n\n def BuyStrategy(self, 현재가, 시가, 고가, 저가, 등락율, 당일거래대금, 체결강도,\n 초당매수수량, 초당매도수량, VI해제시간, VI아래5호가, 매도총잔량, 매수총잔량,\n 매도호가5, 매도호가4, 매도호가3, 매도호가2, 매도호가1, 매수호가1, 매수호가2, 매수호가3, 매수호가4, 매수호가5,\n 매도잔량5, 매도잔량4, 매도잔량3, 매도잔량2, 매도잔량1, 매수잔량1, 매수잔량2, 매수잔량3, 매수잔량4, 매수잔량5,\n 종목코드, 체결시간, 틱수신시간, 종목명, 잔고종목):\n if 종목코드 not in self.dict_gsjm.keys():\n return\n\n self.CheckStrategy()\n\n 고저평균 = round((고가 + 저가) / 2)\n 고저평균대비등락율 = round((현재가 / 고저평균 - 1) * 100, 2)\n 직전당일거래대금 = self.dict_gsjm[종목코드]['당일거래대금'][0]\n 초당거래대금 = 0 if 직전당일거래대금 == 0 else int(당일거래대금 - 직전당일거래대금)\n\n 구분 = '장초' if int(strf_time('%H%M%S')) < 100000 else '장중'\n 평균값계산틱수 = DICT_SET[f'{구분}평균값계산틱수']\n 평균값인덱스 = 평균값계산틱수 + 1\n\n self.dict_gsjm[종목코드] = self.dict_gsjm[종목코드].shift(1)\n self.dict_gsjm[종목코드].at[0] = 등락율, 고저평균대비등락율, 초당거래대금, 당일거래대금, 체결강도, 0.\n if self.dict_gsjm[종목코드]['체결강도'][평균값계산틱수] != 0.:\n 초당거래대금평균 = int(self.dict_gsjm[종목코드]['초당거래대금'][1:평균값인덱스].mean())\n 체결강도평균 = round(self.dict_gsjm[종목코드]['체결강도'][1:평균값인덱스].mean(), 2)\n 최고체결강도 = round(self.dict_gsjm[종목코드]['체결강도'][1:평균값인덱스].max(), 2)\n self.dict_gsjm[종목코드].at[평균값인덱스] = 0., 0., 초당거래대금평균, 0, 체결강도평균, 최고체결강도\n\n 매수 = True\n 직전체결강도 = self.dict_gsjm[종목코드]['체결강도'][1]\n self.dict_data[종목코드] = [\n 현재가, 시가, 고가, 저가, 등락율, 고저평균대비등락율, 당일거래대금, 초당거래대금, 초당거래대금평균, 체결강도,\n 체결강도평균, 최고체결강도, 직전체결강도, 초당매수수량, 초당매도수량, VI해제시간, VI아래5호가, 매도총잔량, 매수총잔량,\n 매도호가5, 매도호가4, 매도호가3, 매도호가2, 매도호가1, 매수호가1, 매수호가2, 매수호가3, 매수호가4, 매수호가5,\n 매도잔량5, 매도잔량4, 매도잔량3, 매도잔량2, 매도잔량1, 매수잔량1, 매수잔량2, 매수잔량3, 매수잔량4, 매수잔량5\n ]\n\n if 잔고종목:\n return\n if 종목코드 in self.list_buy:\n return\n\n # 전략 비공개\n\n if 매수:\n 매수수량 = int(self.int_tujagm / 현재가)\n if 매수수량 > 0:\n 남은수량 = 매수수량\n 직전남은수량 = 매수수량\n 매수금액 = 0\n 호가정보 = {매도호가1: 매도잔량1}\n for 매도호가, 매도잔량 in 호가정보.items():\n 남은수량 -= 매도잔량\n if 남은수량 <= 0:\n 매수금액 += 매도호가 * 직전남은수량\n break\n else:\n 매수금액 += 매도호가 * 매도잔량\n 직전남은수량 = 남은수량\n if 남은수량 <= 0:\n 예상체결가 = round(매수금액 / 매수수량, 2)\n self.list_buy.append(종목코드)\n self.traderQ.put(['매수', 종목코드, 종목명, 예상체결가, 매수수량])\n\n if now() > self.dict_time['연산시간']:\n gap = float2str1p6((now() - 틱수신시간).total_seconds())\n self.windowQ.put([1, f'전략스 연산 시간 알림 - 수신시간과 연산시간의 차이는 [{gap}]초입니다.'])\n self.dict_time['연산시간'] = timedelta_sec(60)\n\n def SellStrategy(self, 종목코드, 종목명, 수익률, 보유수량, 현재가, 매수시간):\n if 종목코드 not in self.dict_gsjm.keys() or 종목코드 not in self.dict_data.keys():\n return\n if 종목코드 in self.list_sell:\n return\n\n 매도 = False\n 구분 = '장초' if int(strf_time('%H%M%S')) < 100000 else '장중'\n 현재가, 시가, 고가, 저가, 등락율, 고저평균대비등락율, 당일거래대금, 초당거래대금, 초당거래대금평균, 체결강도, \\\n 체결강도평균, 최고체결강도, 직전체결강도, 초당매수수량, 초당매도수량, VI해제시간, VI아래5호가, 매도총잔량, 매수총잔량, \\\n 매도호가5, 매도호가4, 매도호가3, 매도호가2, 매도호가1, 매수호가1, 매수호가2, 매수호가3, 매수호가4, 매수호가5, \\\n 매도잔량5, 매도잔량4, 매도잔량3, 매도잔량2, 매도잔량1, 매수잔량1, 매수잔량2, 매수잔량3, 매수잔량4, 매수잔량5 = \\\n self.dict_data[종목코드]\n\n if 종목코드 not in self.dict_high.keys():\n self.dict_high[종목코드] = 수익률\n elif 수익률 > self.dict_high[종목코드]:\n self.dict_high[종목코드] = 수익률\n 최고수익률 = self.dict_high[종목코드]\n\n \"\"\" 매도 조건 예시 \"\"\"\n if 수익률 <= -2 or 수익률 >= 3:\n 매도 = True\n\n # 전략 비공개\n\n if 매도:\n 남은수량 = 보유수량\n 직전남은수량 = 보유수량\n 매도금액 = 0\n 호가정보 = {매수호가1: 매수잔량1, 매수호가2: 매수잔량2, 매수호가3: 매수잔량3, 매수호가4: 매수잔량4, 매수호가5: 매수잔량5}\n for 매수호가, 매수잔량 in 호가정보.items():\n 남은수량 -= 매수잔량\n if 남은수량 <= 0:\n 매도금액 += 매수호가 * 직전남은수량\n break\n else:\n 매도금액 += 매수호가 * 매수잔량\n 직전남은수량 = 남은수량\n if 남은수량 <= 0:\n 예상체결가 = round(매도금액 / 보유수량, 2)\n self.list_sell.append(종목코드)\n self.traderQ.put(['매도', 종목코드, 종목명, 예상체결가, 보유수량])\n\n def CheckStrategy(self):\n if int(strf_time('%H%M%S')) >= 100000 and not self.startjjstg:\n for code in list(self.dict_gsjm.keys()):\n data = np.zeros((DICT_SET['장중평균값계산틱수'] + 2, len(columns_gj))).tolist()\n df = pd.DataFrame(data, columns=columns_gj)\n self.dict_gsjm[code] = df.copy()\n self.startjjstg = True\n\n @thread_decorator\n def UpdateInfo(self):\n info = [6, self.dict_intg['메모리'], self.dict_intg['스레드'], self.dict_intg['시피유']]\n self.windowQ.put(info)\n self.UpdateSysinfo()\n\n def UpdateSysinfo(self):\n p = psutil.Process(os.getpid())\n self.dict_intg['메모리'] = round(p.memory_info()[0] / 2 ** 20.86, 2)\n self.dict_intg['스레드'] = p.num_threads()\n self.dict_intg['시피유'] = round(p.cpu_percent(interval=2) / 2, 2)\n", "import os\nimport sys\nimport psutil\nimport pandas as pd\nsys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))\nfrom utility.static import now, strf_time, timedelta_sec, thread_decorator, float2str1p6\n\nDIVIDE_SAVE = True # 틱데이터 저장방식 선택 - True: 10초에 한번 저장(전체종목저장), False: 장마감 후 저장(저장종목선택)\nDTRADE_SAVE = False # 장마감 후 저장일 경우 - True: 당일거래목록만 저장, False: 전체종목 저장\n\n\nclass Collector:\n def __init__(self, gubun, qlist):\n \"\"\"\n 0 1 2 3 4 5 6 7 8 9 10 11\n windowQ, traderQ, receivQ, stgQ, soundQ, queryQ, teleQ, hoga1Q, hoga2Q, chart1Q, chart2Q, chart3Q,\n chart4Q, chart5Q, chart6Q, chart7Q, chart8Q, chart9Q, chart10Q, tick1Q, tick2Q, tick3Q, tick4Q\n 12 13 14 15 16 17 18 19 20 21 22\n \"\"\"\n self.gubun = gubun\n self.windowQ = qlist[0]\n self.traderQ = qlist[1]\n self.queryQ = qlist[5]\n if self.gubun == 1:\n self.tickQ = qlist[19]\n elif self.gubun == 2:\n self.tickQ = qlist[20]\n elif self.gubun == 3:\n self.tickQ = qlist[21]\n elif self.gubun == 4:\n self.tickQ = qlist[22]\n\n self.dict_df = {}\n self.dict_dm = {}\n self.dict_time = {\n '기록시간': now(),\n '저장시간': now(),\n '부가정보': now()\n }\n self.dict_intg = {\n '스레드': 0,\n '시피유': 0.,\n '메모리': 0.\n }\n self.str_tday = strf_time('%Y%m%d')\n self.Start()\n\n def Start(self):\n self.windowQ.put([1, '시스템 명령 실행 알림 - 콜렉터 시작 완료'])\n while True:\n data = self.tickQ.get()\n if len(data) != 2:\n self.UpdateTickData(data)\n elif data[0] == '콜렉터종료':\n if not DIVIDE_SAVE:\n self.SaveTickData(data[1])\n else:\n self.traderQ.put('틱데이터저장완료')\n break\n\n if now() > self.dict_time['부가정보']:\n self.UpdateInfo()\n self.dict_time['부가정보'] = timedelta_sec(2)\n\n if self.gubun == 4:\n self.windowQ.put([1, '시스템 명령 실행 알림 - 콜렉터 종료'])\n sys.exit()\n\n def UpdateTickData(self, data):\n code = data[-3]\n dt = data[-2]\n receivetime = data[-1]\n del data[-3:]\n\n if code not in self.dict_df.keys():\n columns = [\n '현재가', '시가', '고가', '저가', '등락율', '당일거래대금', '체결강도',\n '초당매수수량', '초당매도수량', 'VI해제시간', 'VI아래5호가', '매도총잔량', '매수총잔량',\n '매도호가5', '매도호가4', '매도호가3', '매도호가2', '매도호가1',\n '매수호가1', '매수호가2', '매수호가3', '매수호가4', '매수호가5',\n '매도잔량5', '매도잔량4', '매도잔량3', '매도잔량2', '매도잔량1',\n '매수잔량1', '매수잔량2', '매수잔량3', '매수잔량4', '매수잔량5'\n ]\n self.dict_df[code] = pd.DataFrame([data], columns=columns, index=[dt])\n else:\n self.dict_df[code].at[dt] = data\n\n if self.gubun == 4 and now() > self.dict_time['기록시간']:\n gap = float2str1p6((now() - receivetime).total_seconds())\n self.windowQ.put([1, f'콜렉터 수신 기록 알림 - 수신시간과 기록시간의 차이는 [{gap}]초입니다.'])\n self.dict_time['기록시간'] = timedelta_sec(60)\n\n if DIVIDE_SAVE and now() > self.dict_time['저장시간']:\n self.queryQ.put([2, self.dict_df])\n self.dict_df = {}\n self.dict_time['저장시간'] = timedelta_sec(10)\n\n def SaveTickData(self, codes):\n if DTRADE_SAVE:\n for code in list(self.dict_df.keys()):\n if code not in codes:\n del self.dict_df[code]\n self.queryQ.put([2, self.dict_df, '장마감후저장'])\n\n @thread_decorator\n def UpdateInfo(self):\n info = [8, self.dict_intg['메모리'], self.dict_intg['스레드'], self.dict_intg['시피유']]\n self.windowQ.put(info)\n self.UpdateSysinfo()\n\n def UpdateSysinfo(self):\n p = psutil.Process(os.getpid())\n self.dict_intg['메모리'] = round(p.memory_info()[0] / 2 ** 20.86, 2)\n self.dict_intg['스레드'] = p.num_threads()\n self.dict_intg['시피유'] = round(p.cpu_percent(interval=2) / 2, 2)\n" ]
[ [ "pandas.DataFrame" ], [ "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
Khan-Xu/Pyrod
[ "3ee62e3d6037328a010d9340bf1e8ff991f48414", "3ee62e3d6037328a010d9340bf1e8ff991f48414" ]
[ "tool/tools.py", "apply/xrr.py" ]
[ "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Aug 15 21:50:58 2018\r\n\r\n@author: USER\r\n\"\"\"\r\n\r\n# Codes are free to use. Do whatever you want\r\n\r\nfrom __future__ import absolute_import\r\n\r\n\"\"\"Read raw data\"\"\"\r\n\r\n####################### LIBRARY #############################\r\n\r\n# exceptions library\r\nfrom exceptions import (Data_Format_Exception,\r\n Data_Match_Exception)\r\n\r\n# Python stdlib imports\r\nimport datetime\r\nfrom math import factorial\r\n\r\n# data processing library\r\nimport numpy as np\r\n\r\n# pyrod library\r\n\r\n####################### CONSTANT ############################\r\n\r\n# constant \r\n\r\n####################### FUNCTIONS ###########################\r\n\r\n'.......................optimise.........................'\r\n\r\n# f - fitting data\r\n# y - experiment data\r\n# mask - mask data\r\n\r\ndef R_square(f, y, mask):\r\n \r\n if not len(f) == len(y) == len(mask):\r\n raise Data_Match_Exception('Please input equal length')\r\n \r\n def nplist(data):\r\n \r\n # check and transform data\r\n try:\r\n \r\n # check np array\r\n if isinstance(data, np.ndarray):\r\n pass\r\n # check list\r\n elif isinstance(data, list):\r\n rl = np.array(data)\r\n # check np mat\r\n elif isinstance(data, np.matrix):\r\n rl = np.asarray(data).reshape(-1)\r\n # for other unpoackable datatype\r\n else:\r\n # init a list first\r\n l = []\r\n # unpack raw data with for\r\n for e in data:\r\n l.append(e)\r\n # trans to np array\r\n rl = np.array(l)\r\n \r\n # unknown type\r\n except Data_Format_Exception:\r\n \r\n print('unknown data type')\r\n \r\n return rl\r\n\r\n # tranform to np array; apply mask \r\n rf, ry = nplist(f)*nplist(mask), nplist(y)*nplist(mask)\r\n\r\n # calculate r square\r\n ss_tot = np.sum((ry - np.sum(ry)/len(ry))**2)\r\n ss_res = np.sum((ry - rf)**2)\r\n \r\n r2 = 1 - ss_res/ss_tot\r\n \r\n return r2\r\n\r\n\r\ndef opt_step_brute(func,x0_range,grid_size = 10,step = 2):\r\n \r\n \"\"\"\r\n Brute method is much too slow and big.\r\n However, its usefull and simple. To improve it, we try to step it\r\n \r\n x0_range: range of variable, [x1-,x1+],[x2-,x2+]\r\n currently,only two axes are avaialble\r\n \"\"\"\r\n # current step is 3\r\n step = 3\r\n \r\n # grid_size and step have to be integer\r\n try:\r\n grid_size = int(grid_size)\r\n step = int(step)\r\n \r\n except ValueError:\r\n raise ValueError(\"grid_size and step have to be of type int\")\r\n \r\n # one dimensional step brute method\r\n if len(x0_range) == 1:\r\n \r\n # store func(grid_data) result\r\n grid_list0 = []\r\n x0 = np.linspace(x0_range[0][0],x0_range[0][1],grid_size)\r\n \r\n # func(grid_data)\r\n for px in range(grid_size):\r\n grid_list0.append(func(x0[px]))\r\n # store min in step1\r\n min_idx = np.argmin(grid_list0)\r\n \r\n # continue step2\r\n grid_list1 = []\r\n x1 = x0[min_idx]\r\n delta = (abs(x0_range[0][1] - x0_range[0][0]))/grid_size\r\n \r\n x2 = np.linspace(x1-delta,x1+delta,grid_size)\r\n for sx in range(grid_size):\r\n grid_list1.append(func(x2[sx]))\r\n \r\n min_step2 = x2[np.argmin(grid_list1)]\r\n \r\n elif len(x0_range) == 2:\r\n \r\n # step1: grid the x0_range\r\n min_step1 = []\r\n au = np.linspace(x0_range[0][0],x0_range[0][1],grid_size)\r\n av = np.linspace(x0_range[1][0],x0_range[1][1],grid_size)\r\n \r\n # find minimum in xu and xv grid\r\n def grid_min(xu,xv):\r\n \r\n x0_grid = np.meshgrid(xu, xv)\r\n \r\n #grid list\r\n grid_list = np.mat(np.zeros([grid_size**2,3]))\r\n idx = 0\r\n \r\n # pu-- for postion in u axes\r\n for pu in range(grid_size):\r\n # pv--for postion in v axes\r\n for pv in range(grid_size):\r\n \r\n grid_list[idx,0] = x0_grid[0][pu,pv]\r\n grid_list[idx,1] = x0_grid[1][pu,pv]\r\n grid_list[idx,2] = func([x0_grid[0][pu,pv],\r\n x0_grid[1][pu,pv]])\r\n idx = idx + 1\r\n # find the minimum in step1\r\n min_idx = np.argmin(grid_list[:,2])\r\n \r\n return grid_list[min_idx,:]\r\n \r\n # append the firt minimum before rocking\r\n min_step1.append(grid_min(au,av))\r\n \r\n # start rocking, try to avoid local minmum\r\n bu = au - (au[1]-au[0])/2\r\n bv = av - (av[1]-av[0])/2\r\n \r\n min_step1.append(grid_min(bu,bv))\r\n \r\n # step 2\r\n # step 2 new x range\r\n u_min = np.min([min_step1[0][0,0],\r\n min_step1[1][0,0]])\r\n u_max = np.max([min_step1[0][0,0],\r\n min_step1[1][0,0]])\r\n deta_u = u_max - u_min\r\n v_min = np.min([min_step1[0][0,1],\r\n min_step1[1][0,1]])\r\n v_max = np.max([min_step1[0][0,1],\r\n min_step1[1][0,1]])\r\n deta_v = v_max - v_min\r\n # new u and v\r\n cu = np.linspace(u_min-deta_u, u_min+deta_u, grid_size)\r\n cv = np.linspace(v_min-deta_v, v_min+deta_v, grid_size)\r\n \r\n min_step2 = grid_min(cu,cv).tolist()\r\n \r\n return min_step2\r\n \r\n \r\n'......................smooth.........................'\r\n\r\ndef savitzky_golay(y, window_size, order, deriv=0, rate=1):\r\n \r\n \"\"\" \r\n Smooth (and optionally differentiate) data with a Savitzky-Golay filter.\r\n The Savitzky-Golay filter removes high frequency noise from data.\r\n It has the advantage of preserving the original shape and\r\n features of the signal better than other types of filtering\r\n approaches, such as moving averages techniques.\r\n\r\n ----------\r\n .. [1] A. Savitzky, M. J. E. Golay, Smoothing and Differentiation of\r\n Data by Simplified Least Squares Procedures. Analytical\r\n Chemistry, 1964, 36 (8), pp 1627-1639.\r\n .. [2] Numerical Recipes 3rd Edition: The Art of Scientific Computing\r\n W.H. Press, S.A. Teukolsky, W.T. Vetterling, B.P. Flannery\r\n Cambridge University Press ISBN-13: 9780521880688\r\n \"\"\"\r\n \r\n # integer value\r\n try:\r\n window_size = np.abs(np.int(window_size))\r\n order = np.abs(np.int(order))\r\n except ValueError:\r\n raise ValueError(\"window_size and order have to be of type int\")\r\n \r\n if window_size % 2 != 1 or window_size < 1:\r\n raise TypeError(\"window_size size must be a positive odd number\")\r\n if window_size < order + 2:\r\n raise TypeError(\"window_size is too small for the polynomials order\")\r\n \r\n order_range = range(order+1)\r\n half_window = (window_size -1) // 2\r\n \r\n # precompute coefficients\r\n \r\n b = np.mat([[k**i for i in order_range] for k in range(-half_window, half_window+1)])\r\n m = np.linalg.pinv(b).A[deriv] * rate**deriv * factorial(deriv)\r\n \r\n # pad the signal at the extremes with\r\n # values taken from the signal itself\r\n \r\n firstvals = y[0] - np.abs( y[1:half_window+1][::-1] - y[0] )\r\n lastvals = y[-1] + np.abs(y[-half_window-1:-1][::-1] - y[-1])\r\n y = np.concatenate((firstvals, y, lastvals))\r\n \r\n return np.convolve( m[::-1], y, mode='valid')\r\n\r\n######################## CLASSS #############################\r\n\r\n ", "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Aug 20 10:03:37 2018\r\n\r\n@author: USER \r\n\"\"\"\r\n\r\n\r\n# Codes are free to use. Do whatever you want\r\n\r\nfrom __future__ import absolute_import \r\n\r\n\"\"\"Read raw data\"\"\"\r\n\r\n####################### LIBRARY #############################\r\n\r\n# exceptions library\r\nfrom exceptions import (Excel_Load_Exception,\r\n Data_Load_Exception,\r\n Data_Format_Exception)\r\n\r\n# Python stdlib imports\r\nimport os\r\nimport pickle\r\n\r\n# data processing library\r\nimport numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\n\r\n# pyrod library\r\n\r\nfrom read.read_parameters import initialization_parameters\r\nfrom read.read_raw_data import initialization_rhkl\r\n\r\n####################### CONSTANT ############################\r\n\r\n# constant \r\n\r\nR0 = 2.82e-5 # Thompson scattering length, the radi of electron in Angs\r\nNA = 6.022e23 # Avogadros number corresponding to g/cm3 e23\r\n\r\n# atomic_mass.xlsx and atomic_scattering_factor.pickle\r\n\r\nATOMIC_MASS = os.path.abspath(os.path.dirname('atomic_mass.xlsx')) +\\\r\n '/base/atomic_mass.xlsx'\r\nASF = os.path.abspath(os.path.dirname('atomic_scattering_factor.pickle')) +\\\r\n '\\\\base\\\\atomic_scattering_factor.pickle'\r\n\r\ntry:\r\n ATOM_TABLE = pd.read_excel(ATOMIC_MASS,\r\n sheet_name = 'Sheet1',\r\n index_col = 0)\r\n ATOM_LIST = ATOM_TABLE.index.values.tolist()\r\n \r\n f = open(ASF,'rb')\r\n ASF_TABLE = pickle.load(f)\r\n f.close()\r\n \r\nexcept Excel_Load_Exception:\r\n print('load atomic form factor data base load fail')\r\n\r\n# load ctr optimised data\r\n\r\nCTR_PATH = os.path.abspath(os.path.dirname('ctr_optimised_result.pickle')) +\\\r\n '/data/ctr_optimised_result.pickle'\r\n\r\nctr_optimised_result = {}\r\n\r\ntry:\r\n with open(CTR_PATH, 'rb') as f:\r\n unpickler = pickle.Unpickler(f)\r\n ctr_optimised_result = unpickler.load()\r\n \r\nexcept Data_Load_Exception:\r\n print('Please optimise and save the CTR data first!')\r\n\r\n# save xrr optimised data path\r\n\r\nXRR_PATH = os.path.abspath(os.path.dirname('xrr_optimised_result.pickle')) +\\\r\n '/data/xrr_optimised_result.pickle'\r\n \r\n####################### FUNCTIONS ###########################\r\n\r\n# elements = [A,B]\r\n# result density - g/cm3\r\ndef density(elements,lattice):\r\n \r\n#elements = ['Sr','Ti']\r\n#lattice = [3.905,3.905,3.905,np.pi/2,np.pi/2,np.pi/2]\r\n \r\n \"\"\"theroy density of ABO3 pervoskite oxide\"\"\"\r\n \r\n try:\r\n \r\n A_mass = ATOM_TABLE.at[elements[0],'mass']\r\n B_mass = ATOM_TABLE.at[elements[1],'mass']\r\n O_mass = ATOM_TABLE.at['O','mass']\r\n \r\n a = lattice[0]\r\n b = lattice[1]\r\n c = lattice[2]\r\n \r\n alpha = lattice[3]*np.pi/180\r\n beta = lattice[4]*np.pi/180\r\n gamma = lattice[5]*np.pi/180\r\n \r\n except Data_Format_Exception:\r\n print('Parameters error!')\r\n \r\n lattice_volume = a*b*c*np.sqrt(1 + 2*np.cos(alpha)*np.cos(beta)*np.cos(gamma)-\\\r\n np.cos(alpha)**2 - \\\r\n np.cos(beta)**2 - \\\r\n np.cos(gamma)**2)\r\n A_unitcell = A_mass\r\n B_unitcell = B_mass\r\n O_unitcell = 3*O_mass\r\n \r\n na = NA/1e23\r\n \r\n density_A = A_unitcell/(lattice_volume*na*0.1)\r\n density_B = B_unitcell/(lattice_volume*na*0.1)\r\n density_O = O_unitcell/(lattice_volume*na*0.1)\r\n \r\n density_pervoskite = density_A + density_B + density_O\r\n \r\n return density_pervoskite, density_A, density_B, density_O\r\n \r\n# wave length should be Angs\r\n# energy is the x-ray energy kev\r\n# elements = [A,B]\r\ndef imag_f(elements, energy):\r\n \r\n \"\"\"imag part of x-ray scattering factor for a certin element\"\"\"\r\n \r\n A_energys = ASF_TABLE[elements[0]]['E(kev)']\r\n B_energys = ASF_TABLE[elements[1]]['E(kev)']\r\n O_energys = ASF_TABLE['O']['E(kev)']\r\n \r\n A_imagf = ASF_TABLE[elements[0]]['imag_f']\r\n B_imagf = ASF_TABLE[elements[1]]['imag_f']\r\n O_imagf = ASF_TABLE['O']['imag_f']\r\n \r\n def f2(energy, energys, imag_f):\r\n \r\n atom_f2 = np.interp(energy, energys, imag_f)\r\n \r\n return atom_f2\r\n \r\n A_f2 = f2(energy, A_energys, A_imagf)\r\n B_f2 = f2(energy, B_energys, B_imagf)\r\n O_f2 = f2(energy, O_energys, O_imagf)\r\n \r\n return A_f2, B_f2, O_f2\r\n\r\n# energy is x-ray energy kev\r\n# elements = [A,B]\r\n# atttenuation coefficiment unit is Angs\r\ndef attenuation_coefficient(elements, densities, f2, energy):\r\n \r\n wave_length = 12.38/energy # unit Angs\r\n na = NA/1e23\r\n \r\n A_ac = (densities[1]*na/ATOM_TABLE.at[elements[0],'mass'])*2*R0*wave_length*f2[0]\r\n B_ac = (densities[2]*na/ATOM_TABLE.at[elements[1],'mass'])*2*R0*wave_length*f2[1]\r\n O_ac = (densities[3]*na/ATOM_TABLE.at['O','mass'])*2*R0*wave_length*f2[2]\r\n \r\n ac = A_ac + B_ac + O_ac\r\n \r\n return ac\r\n\r\n######################## CLASSS #############################\r\n \r\nclass XRR(initialization_parameters, initialization_rhkl):\r\n \r\n def __init__(self, parameters, experiment_data, energy):\r\n \r\n initialization_parameters.__init__(self, parameters)\r\n initialization_parameters._var_list(self)\r\n initialization_parameters._var_table(self)\r\n initialization_rhkl.__init__(self, experiment_data)\r\n \r\n self.energy = energy # unit - kev\r\n self.wave_length = 12.38/energy # unit-Angs\r\n self.coefficient = {}\r\n \r\n self.iq,self.i = self._read_rhkl(self)\r\n self.lattice_c = self.var_list['substrate']['lattice'].at['constant','c']\r\n self.q = self.iq*2*np.pi/self.lattice_c\r\n \r\n for key in self.var_list:\r\n \r\n element = [self.var_list[key]['ions'].columns[3], self.var_list[key]['ions'].columns[0]]\r\n lattice = self.var_list[key]['lattice'].as_matrix().tolist()[0]\r\n \r\n densities = density(element, lattice)\r\n f2 = imag_f(element, energy)\r\n ac = attenuation_coefficient(element, densities, f2, energy)\r\n \r\n self.coefficient[key] = [element,lattice,densities,ac]\r\n \r\n # stack list\r\n \r\n self.scattering_factor = [] # density*R0 + 1j*attenuation_coefficient\r\n self.d_space = [] # stack thickness\r\n self.roughness = [] # roughness at each layer\r\n \r\n for layeri in range(len(self.var_table['posz_list'])):\r\n \r\n self.d_space.append(self.var_table['posz_table'][0,0]*self.lattice_c*0.98)\r\n self.roughness.append(0)\r\n \r\n slab = self.var_table['slab_list'][layeri]\r\n dens = self.coefficient[slab][2][0]\r\n ac = self.coefficient[slab][3]\r\n \r\n self.scattering_factor.append(dens*R0+1j*ac)\r\n \r\n self.scattering_factor.append(1e-21) # the scattering factor of vacuum\r\n self.roughness.append(0)\r\n self.roughness[0] = 0\r\n self.thickness = np.sum(self.d_space)\r\n \r\n # rt - ratio\r\n self.rt = 1\r\n \r\n # re initilization the factors-d_space, scattering_factor, roughness, thichness\r\n def re(self):\r\n \r\n # stack list\r\n \r\n self.scattering_factor = [] # density*R0 + 1j*attenuation_coefficient\r\n self.d_space = [] # stack thickness\r\n self.roughness = [] # roughness at each layer\r\n \r\n for layeri in range(len(self.var_table['posz_list'])):\r\n \r\n self.d_space.append(self.var_table['posz_table'][0,0]*self.lattice_c*0.98)\r\n self.roughness.append(0)\r\n \r\n slab = self.var_table['slab_list'][layeri]\r\n dens = self.coefficient[slab][2][0]\r\n ac = self.coefficient[slab][3]\r\n \r\n self.scattering_factor.append(dens*R0+1j*ac)\r\n \r\n self.scattering_factor.append(1e-21) # the scattering factor of vacuum\r\n self.roughness.append(0)\r\n self.roughness[0] = 0\r\n self.thickness = np.sum(self.d_space)\r\n\r\n def disp(self):\r\n \r\n # list all the self parameters\r\n \r\n print(\"--------------------------------------------\")\r\n print(\"densities:\\n\")\r\n for key in self.coefficient:\r\n print(\" \" + key + \": %s\\n\" %self.coefficient[key][2][0])\r\n \r\n print(\"scattering factor: %s - %s\\n\" % (self.scattering_factor[ 0], \r\n self.scattering_factor[-1]))\r\n print(\"roughness: %s - %s\\n\" % (self.roughness[ 0],\r\n self.roughness[-1]))\r\n print(\"d_space: %s - %s Angs\\n\" % (self.d_space[ 0],\r\n self.d_space[-1]))\r\n print(\"thichness: %s Angs\\n\" % np.sum(self.d_space))\r\n print('--------------------------------------------')\r\n \r\n # homogeneous slab reflection\r\n def homo_slab(self):\r\n \r\n slabs = list(self.var_list.keys())\r\n slabs.remove('substrate')\r\n \r\n # check if the slab is homogeneous\r\n if len(slabs) != 1:\r\n raise Data_Format_Exception('Not homogeneous slab!')\r\n \r\n dens = self.coefficient[slabs[0]][2][0]\r\n thickness = np.sum(self.d_space)\r\n \r\n reflect_inten = -1j*(4*np.pi*dens*R0*thickness/self.q)*\\\r\n (np.sin(self.q*thickness/2)/(self.q*thickness/2))*\\\r\n np.exp(1j*self.q*thickness/2)\r\n \r\n return self.iq, reflect_inten\r\n \r\n # Parratt reflectivities \r\n # Maybe a little difficult while optimizing parameter, parratt model is accurate\r\n # Recomed method\r\n def parratt(self):\r\n \r\n \"\"\"\"Elements of Modern X-ray Physics\" by Jens Als-Nielsen and Des McMorrow,\r\n Calculates: Parratt reflectivity of a multilayer\"\"\"\r\n \r\n k = 2*np.pi/self.wave_length # diffraction vector#\r\n layer_num = len(self.scattering_factor) # layer number, vacuum is added\r\n \r\n #----- Calculate refractive index n of each layer\r\n delta = self.wave_length**2*np.real(self.scattering_factor)/(2*np.pi)\r\n beta = self.wave_length*np.imag(self.scattering_factor)/(4*np.pi)\r\n # relfractive index\r\n# nu = 1 - delta + 1j*beta\r\n \r\n #----- Wavevector transfer in each layer\r\n trans_vector = np.zeros([layer_num+1, len(self.q)], dtype = np.complex)\r\n trans_vector[0,:] = self.q\r\n \r\n for i in range(layer_num):\r\n trans_vector[i+1,:] = np.sqrt(self.q**2 - 8*k**2*delta[i] + 1j*8*k**2*beta[i])\r\n \r\n #----- Reflection coefficients (no multiple scattering)\r\n reflect_coe = np.zeros([layer_num, len(self.q)], dtype = np.complex)\r\n \r\n for i in range(layer_num):\r\n reflect_coe[i,:] = ((trans_vector[i,:] - trans_vector[i+1,:])/\\\r\n (trans_vector[i,:] + trans_vector[i+1,:]))*\\\r\n np.exp(-0.5*trans_vector[i,:]*trans_vector[i+1,:]*self.roughness[i])\r\n \r\n #----- Reflectivity from first layer\r\n reflectivity = np.zeros([layer_num-1, len(self.q)], dtype = np.complex)\r\n \r\n phase1 = np.exp(1j*trans_vector[layer_num-1]*self.d_space[layer_num-2])\r\n \r\n if layer_num > 1:\r\n reflectivity[0,:] = (reflect_coe[layer_num-2,:] + \\\r\n reflect_coe[layer_num-1,:]*phase1)/\\\r\n (1 + reflect_coe[layer_num-2,:]*\\\r\n reflect_coe[layer_num-1,:]*phase1)\r\n if layer_num > 2:\r\n for i in range(1,layer_num-1):\r\n \r\n phasei = np.exp(1j*trans_vector[layer_num-i-1,:]*self.d_space[layer_num-i-2])\r\n \r\n reflectivity[i,:] = (reflect_coe[layer_num-i-2,:] +\\\r\n reflectivity[i-1,:]*phasei)/\\\r\n (1 + reflect_coe[layer_num-i-2,:]*\\\r\n reflectivity[i-1,:]*phasei)\r\n \r\n #------ Intensity reflectivity\r\n \r\n # should be reminded here! The data is not squared! To keep uniform with ctr fitting\r\n \r\n if layer_num == 1:\r\n reflect_inten = reflect_coe[0,:]\r\n else:\r\n reflect_inten = reflectivity[-1,:]\r\n \r\n return self.iq, reflect_inten\r\n \r\n# def p_xrr(q, inten):\r\n# \r\n# plt.plot(q, np.log(inten))\r\n\r\nclass xr(XRR):\r\n \r\n # the relax of surface, secface and interface\r\n # parratt method only\r\n \r\n def xr_relax(self, interface = 0.9, secface = 1, surface = 1.03, rt = 1):\r\n \r\n XRR.re(self)\r\n \r\n self.rt = rt\r\n self.d_space[ 0] = interface\r\n self.d_space[-2] = secface\r\n self.d_space[-1] = surface\r\n \r\n q, inten = XRR.parratt(self)\r\n \r\n plt.plot(q, np.log(abs(self.rt*inten + ctr_optimised_result['substrate_ctr'])))\r\n plt.plot(q, np.log(abs(ctr_optimised_result['shkl'])))\r\n \r\n return q, inten\r\n \r\n # modulate the roughness at interface and surface\r\n # parratt method only\r\n def xr_roughness(self, interface = 0.1, surface = 0.1, rt = 1):\r\n \r\n XRR.re(self)\r\n \r\n self.rt = rt\r\n self.roughness[ 0] = interface\r\n self.roughness[-1] = surface\r\n \r\n q, inten = XRR.parratt(self)\r\n \r\n plt.plot(q, np.log(abs(rt*inten + ctr_optimised_result['substrate_ctr'])))\r\n plt.plot(q, np.log(abs(ctr_optimised_result['shkl'])))\r\n \r\n return q, inten\r\n \r\n # thickness modulation\r\n # homo slab method or parratt method\r\n def xr_thickness(self, miu = 0, mode = 'parratt', rt = 1):\r\n \r\n XRR.re(self)\r\n \r\n self.rt = rt\r\n self.d_space = (np.array(self.d_space) - miu).tolist()\r\n \r\n if mode == 'homo_slab':\r\n q, inten = XRR.homo_slab(self)\r\n elif mode == 'parratt':\r\n q, inten = XRR.parratt(self)\r\n \r\n plt.plot(q, np.log(abs(rt*inten + ctr_optimised_result['substrate_ctr'])))\r\n plt.plot(q, np.log(abs(ctr_optimised_result['shkl'])))\r\n \r\n return q, inten\r\n \r\n # export optimised xrr data to /data\r\n def save(self, mode = 'parratt'):\r\n \r\n xrr_optimised_result = {}\r\n \r\n if mode == 'homo_slab':\r\n q, inten = XRR.homo_slab(self)\r\n elif mode == 'parratt':\r\n q, inten = XRR.parratt(self)\r\n \r\n xrr_optimised_result['q'] = q\r\n xrr_optimised_result['inten'] = inten\r\n xrr_optimised_result['rt'] = self.rt\r\n \r\n f = open(XRR_PATH,'wb')\r\n pickle.dump(xrr_optimised_result, f)\r\n f.close()\r\n \r\n " ]
[ [ "numpy.convolve", "numpy.abs", "numpy.linspace", "numpy.min", "numpy.asarray", "numpy.concatenate", "numpy.max", "numpy.int", "numpy.linalg.pinv", "numpy.argmin", "numpy.array", "numpy.meshgrid", "numpy.zeros", "numpy.sum" ], [ "pandas.read_excel", "numpy.imag", "numpy.sqrt", "numpy.cos", "numpy.sin", "numpy.real", "numpy.interp", "numpy.array", "numpy.exp", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
miguelusque/NVTabular
[ "76e63d9df7b90433d552606e9cf87bd61d7eee3b", "e58d318a64d8c1607e91c10b9b5d4a8b48bcab69", "e58d318a64d8c1607e91c10b9b5d4a8b48bcab69" ]
[ "nvtabular/io/csv.py", "nvtabular/loader/torch.py", "examples/horovod/torch-nvt-horovod.py" ]
[ "#\n# Copyright (c) 2021, NVIDIA CORPORATION.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport functools\n\nimport dask.dataframe as dd\nimport dask_cudf\nimport numpy as np\nfrom dask.bytes import read_bytes\nfrom dask.utils import parse_bytes\nfrom fsspec.core import get_fs_token_paths\nfrom fsspec.utils import infer_compression\n\nfrom .dataset_engine import DatasetEngine\n\n\nclass CSVDatasetEngine(DatasetEngine):\n \"\"\"CSVDatasetEngine\n\n Thin wrapper around dask_cudf.read_csv.\n \"\"\"\n\n def __init__(self, paths, part_size, storage_options=None, cpu=False, **kwargs):\n super().__init__(paths, part_size, cpu=cpu, storage_options=storage_options)\n self._meta = {}\n self.csv_kwargs = kwargs\n self.csv_kwargs[\"storage_options\"] = storage_options\n\n # CSV reader needs a list of files\n # (Assume flat directory structure if this is a dir)\n if len(self.paths) == 1 and self.fs.isdir(self.paths[0]):\n self.paths = self.fs.glob(self.fs.sep.join([self.paths[0], \"*\"]))\n\n def to_ddf(self, columns=None, cpu=None):\n\n # Check if we are using cpu\n cpu = self.cpu if cpu is None else cpu\n if cpu:\n ddf = dd.read_csv(self.paths, blocksize=self.part_size, **self.csv_kwargs)\n else:\n ddf = dask_cudf.read_csv(self.paths, chunksize=self.part_size, **self.csv_kwargs)\n if columns:\n ddf = ddf[columns]\n return ddf\n\n @property\n @functools.lru_cache(1)\n def _file_partition_map(self):\n ind = 0\n _pp_map = {}\n for path, blocks in zip(\n *_byte_block_counts(\n self.paths,\n self.part_size,\n **self.csv_kwargs,\n )\n ):\n _pp_map[path.split(self.fs.sep)[-1]] = np.arange(ind, ind + blocks)\n ind += blocks\n return _pp_map\n\n def to_cpu(self):\n self.cpu = True\n\n def to_gpu(self):\n self.cpu = False\n\n\ndef _byte_block_counts(\n urlpath,\n blocksize,\n lineterminator=None,\n compression=\"infer\",\n storage_options=None,\n **kwargs,\n):\n \"\"\"Return a list of paths and block counts.\n\n Logic copied from dask.bytes.read_bytes\n \"\"\"\n\n if lineterminator is not None and len(lineterminator) == 1:\n kwargs[\"lineterminator\"] = lineterminator\n else:\n lineterminator = \"\\n\"\n\n if compression == \"infer\":\n paths = get_fs_token_paths(urlpath, mode=\"rb\", storage_options=storage_options)[2]\n compression = infer_compression(paths[0])\n\n if isinstance(blocksize, str):\n blocksize = parse_bytes(blocksize)\n if blocksize and compression:\n blocksize = None\n\n b_out = read_bytes(\n urlpath,\n delimiter=lineterminator.encode(),\n blocksize=blocksize,\n sample=False,\n compression=compression,\n include_path=True,\n **(storage_options or {}),\n )\n _, values, paths = b_out\n\n if not isinstance(values[0], (tuple, list)):\n values = [values]\n\n return paths, [len(v) for v in values]\n", "#\n# Copyright (c) 2021, NVIDIA CORPORATION.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport pandas as pd\nimport torch\nfrom torch.utils.dlpack import from_dlpack\n\nfrom .backend import DataLoader\n\n\nclass IterDL(torch.utils.data.IterableDataset):\n def __init__(self, file_paths, batch_size=1, shuffle=False):\n self.file_paths = file_paths\n self.batch_size = batch_size\n self.shuffle = shuffle\n\n def __iter__(self):\n for file_path in self.file_paths:\n pdf = pd.read_parquet(file_path)\n for start in range(0, pdf.shape[0], self.batch_size):\n df = pdf[start : start + self.batch_size]\n if self.shuffle:\n df = df.sample(frac=1).reset_index(drop=True)\n yield df\n\n\nclass TorchAsyncItr(torch.utils.data.IterableDataset, DataLoader):\n \"\"\"This class creates batches of tensor. Each batch size is specified by the user.\n The data input requires an NVTabular dataset. Handles spillover to ensure all\n batches are the specified size until the final batch.\n\n Parameters\n -----------\n dataset : NVTabular dataset\n cats : [str]\n the list of categorical columns in the dataset\n conts : [str]\n the list of continuous columns in the dataset\n labels : [str]\n the list of label columns in the dataset\n batch_size : int\n the size of each batch to supply to the model\n shuffle : bool\n enable/disable shuffling of dataset\n parts_per_chunk : int\n number of partitions from the iterator, an NVTabular Dataset, to concatenate into a \"chunk\"\n devices : [int]\n list representing all available GPU IDs\n \"\"\"\n\n def __init__(\n self,\n dataset,\n cats=None,\n conts=None,\n labels=None,\n batch_size=1,\n shuffle=False,\n seed_fn=None,\n parts_per_chunk=1,\n device=None,\n global_size=None,\n global_rank=None,\n drop_last=False,\n ):\n DataLoader.__init__(\n self,\n dataset,\n cats,\n conts,\n labels,\n batch_size,\n shuffle,\n seed_fn=seed_fn,\n parts_per_chunk=parts_per_chunk,\n device=device,\n global_size=global_size,\n global_rank=global_rank,\n drop_last=drop_last,\n )\n\n def __iter__(self):\n return DataLoader.__iter__(self)\n\n def _get_device_ctx(self, dev):\n return torch.cuda.device(\"cuda:{}\".format(dev))\n\n def _to_tensor(self, gdf, dtype=None):\n dl_pack = gdf.to_dlpack()\n tensor = from_dlpack(dl_pack)\n return tensor.type(dtype)\n\n def _split_fn(self, tensor, idx, axis=0):\n return torch.split(tensor, idx, dim=axis)\n\n @property\n def _LONG_DTYPE(self):\n return torch.long\n\n @property\n def _FLOAT32_DTYPE(self):\n return torch.float32\n\n def _handle_tensors(self, cats, conts, labels):\n if isinstance(conts, torch.Tensor):\n conts = conts.clone()\n return cats, conts, labels\n\n\nclass DLDataLoader(torch.utils.data.DataLoader):\n \"\"\"\n This class is an extension of the torch dataloader.\n It is required to support the FastAI framework.\n \"\"\"\n\n def __len__(self):\n return len(self.dataset)\n", "import argparse\nimport glob\nimport os\nfrom time import time\n\nimport cupy\nimport torch\n\nimport nvtabular as nvt\nfrom nvtabular.framework_utils.torch.models import Model\nfrom nvtabular.framework_utils.torch.utils import process_epoch\nfrom nvtabular.loader.torch import DLDataLoader, TorchAsyncItr\n\n# Horovod must be the last import to avoid conflicts\nimport horovod.torch as hvd # noqa: E402, isort:skip\n\n\nparser = argparse.ArgumentParser(description=\"Train a multi-gpu model with Torch and Horovod\")\nparser.add_argument(\"--dir_in\", default=None, help=\"Input directory\")\nparser.add_argument(\"--batch_size\", default=None, help=\"Batch size\")\nparser.add_argument(\"--cats\", default=None, help=\"Categorical columns\")\nparser.add_argument(\"--cats_mh\", default=None, help=\"Categorical multihot columns\")\nparser.add_argument(\"--conts\", default=None, help=\"Continuous columns\")\nparser.add_argument(\"--labels\", default=None, help=\"Label columns\")\nparser.add_argument(\"--epochs\", default=1, help=\"Training epochs\")\nargs = parser.parse_args()\n\nhvd.init()\n\ngpu_to_use = hvd.local_rank()\n\nif torch.cuda.is_available():\n torch.cuda.set_device(gpu_to_use)\n\n\nBASE_DIR = os.path.expanduser(args.dir_in or \"./data/\")\nBATCH_SIZE = int(args.batch_size) or 16384 # Batch Size\nCATEGORICAL_COLUMNS = args.cats or [\"movieId\", \"userId\"] # Single-hot\nCATEGORICAL_MH_COLUMNS = args.cats_mh or [\"genres\"] # Multi-hot\nNUMERIC_COLUMNS = args.conts or []\n\n# Output from ETL-with-NVTabular\nTRAIN_PATHS = sorted(glob.glob(os.path.join(BASE_DIR, \"train\", \"*.parquet\")))\n\nproc = nvt.Workflow.load(os.path.join(BASE_DIR, \"workflow/\"))\n\nEMBEDDING_TABLE_SHAPES = nvt.ops.get_embedding_sizes(proc)\n\n\n# TensorItrDataset returns a single batch of x_cat, x_cont, y.\ndef collate_fn(x):\n return x\n\n\n# Seed with system randomness (or a static seed)\ncupy.random.seed(None)\n\n\ndef seed_fn():\n \"\"\"\n Generate consistent dataloader shuffle seeds across workers\n\n Reseeds each worker's dataloader each epoch to get fresh a shuffle\n that's consistent across workers.\n \"\"\"\n\n max_rand = torch.iinfo(torch.int).max // hvd.size()\n\n # Generate a seed fragment\n seed_fragment = cupy.random.randint(0, max_rand)\n\n # Aggregate seed fragments from all Horovod workers\n seed_tensor = torch.tensor(seed_fragment)\n reduced_seed = hvd.allreduce(seed_tensor, name=\"shuffle_seed\", op=hvd.mpi_ops.Sum) % max_rand\n\n return reduced_seed\n\n\ntrain_dataset = TorchAsyncItr(\n nvt.Dataset(TRAIN_PATHS),\n batch_size=BATCH_SIZE,\n cats=CATEGORICAL_COLUMNS + CATEGORICAL_MH_COLUMNS,\n conts=NUMERIC_COLUMNS,\n labels=[\"rating\"],\n device=gpu_to_use,\n global_size=hvd.size(),\n global_rank=hvd.rank(),\n shuffle=True,\n seed_fn=seed_fn,\n)\ntrain_loader = DLDataLoader(\n train_dataset, batch_size=None, collate_fn=collate_fn, pin_memory=False, num_workers=0\n)\n\n\nEMBEDDING_TABLE_SHAPES_TUPLE = (\n {\n CATEGORICAL_COLUMNS[0]: EMBEDDING_TABLE_SHAPES[CATEGORICAL_COLUMNS[0]],\n CATEGORICAL_COLUMNS[1]: EMBEDDING_TABLE_SHAPES[CATEGORICAL_COLUMNS[1]],\n },\n {CATEGORICAL_MH_COLUMNS[0]: EMBEDDING_TABLE_SHAPES[CATEGORICAL_MH_COLUMNS[0]]},\n)\n\nmodel = Model(\n embedding_table_shapes=EMBEDDING_TABLE_SHAPES_TUPLE,\n num_continuous=0,\n emb_dropout=0.0,\n layer_hidden_dims=[128, 128, 128],\n layer_dropout_rates=[0.0, 0.0, 0.0],\n).cuda()\n\nlr_scaler = hvd.size()\n\noptimizer = torch.optim.Adam(model.parameters(), lr=0.01 * lr_scaler)\n\nhvd.broadcast_parameters(model.state_dict(), root_rank=0)\nhvd.broadcast_optimizer_state(optimizer, root_rank=0)\n\noptimizer = hvd.DistributedOptimizer(optimizer, named_parameters=model.named_parameters())\n\nfor epoch in range(args.epochs):\n start = time()\n print(f\"Training epoch {epoch}\")\n train_loss, y_pred, y = process_epoch(train_loader, model, train=True, optimizer=optimizer)\n hvd.join(gpu_to_use)\n hvd.broadcast_parameters(model.state_dict(), root_rank=0)\n print(f\"Epoch {epoch:02d}. Train loss: {train_loss:.4f}.\")\n hvd.join(gpu_to_use)\n t_final = time() - start\n total_rows = train_dataset.num_rows_processed\n print(\n f\"run_time: {t_final} - rows: {total_rows} - \"\n f\"epochs: {epoch} - dl_thru: {total_rows / t_final}\"\n )\n\n\nhvd.join(gpu_to_use)\nif hvd.local_rank() == 0:\n print(\"Training complete\")\n" ]
[ [ "numpy.arange" ], [ "pandas.read_parquet", "torch.utils.dlpack.from_dlpack", "torch.split" ], [ "torch.cuda.set_device", "torch.iinfo", "torch.cuda.is_available", "torch.tensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.1", "1.5", "1.2", "0.24", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
raamana/cca_zoo
[ "7137918a6bac098ec20ba998d1774d5335c178dd" ]
[ "cca_zoo/data/simulated.py" ]
[ "import itertools\nfrom typing import List, Union\n\nimport numpy as np\nfrom scipy import linalg\nfrom scipy.linalg import block_diag\n\nfrom ..utils.check_values import _process_parameter\n\n\ndef generate_covariance_data(n: int, view_features: List[int], latent_dims: int = 1,\n view_sparsity: List[Union[int, float]] = None,\n correlation: Union[List[float], float] = 1,\n structure: Union[str, List[str]] = None, sigma: List[float] = None, decay: float = 0.5,\n positive=None):\n \"\"\"\n Function to generate CCA dataset with defined population correlation\n\n :param view_sparsity: level of sparsity in features in each view either as number of active variables or percentage active\n :param view_features: number of features in each view\n :param n: number of samples\n :param latent_dims: number of latent dimensions\n :param signal: correlation\n :param structure: within view covariance structure\n :param sigma: gaussian sigma\n :param decay: ratio of second signal to first signal\n :return: tuple of numpy arrays: view_1, view_2, true weights from view 1, true weights from view 2, overall covariance structure\n\n :Example:\n\n >>> from cca_zoo.data import generate_covariance_data\n >>> [train_view_1,train_view_2],[true_weights_1,true_weights_2]=generate_covariance_data(200,[10,10],latent_dims=1,correlation=1)\n \"\"\"\n structure = _process_parameter('structure', structure, 'identity', len(view_features))\n view_sparsity = _process_parameter('view_sparsity', view_sparsity, 1, len(view_features))\n positive = _process_parameter('positive', positive, False, len(view_features))\n sigma = _process_parameter('sigma', sigma, 0.5, len(view_features))\n completed = False\n while not completed:\n try:\n mean = np.zeros(sum(view_features))\n if not isinstance(correlation, list):\n p = np.arange(0, latent_dims)\n correlation = correlation * decay ** p\n covs = []\n true_features = []\n for view_p, sparsity, view_structure, view_positive, view_sigma in zip(view_features, view_sparsity,\n structure,\n positive, sigma):\n # Covariance Bit\n if view_structure == 'identity':\n cov_ = np.eye(view_p)\n elif view_structure == 'gaussian':\n cov_ = _generate_gaussian_cov(view_p, view_sigma)\n elif view_structure == 'toeplitz':\n cov_ = _generate_toeplitz_cov(view_p, view_sigma)\n elif view_structure == 'random':\n cov_ = _generate_random_cov(view_p)\n else:\n completed = True\n print(\"invalid structure\")\n break\n weights = np.random.normal(size=(view_p, latent_dims))\n if sparsity <= 1:\n sparsity = np.ceil(sparsity * view_p).astype('int')\n if sparsity < view_p:\n mask = np.stack(\n (np.concatenate(([0] * (view_p - sparsity), [1] * sparsity)).astype(bool),) * latent_dims,\n axis=0).T\n np.random.shuffle(mask)\n while np.sum(np.unique(mask, axis=1, return_counts=True)[1] > 1) > 0 or np.sum(\n np.sum(mask, axis=0) == 0) > 0:\n np.random.shuffle(mask)\n weights = weights * mask\n if view_positive:\n weights[weights < 0] = 0\n weights = _decorrelate_dims(weights, cov_)\n weights /= np.sqrt(np.diag((weights.T @ cov_ @ weights)))\n true_features.append(weights)\n covs.append(cov_)\n\n cov = block_diag(*covs)\n\n splits = np.concatenate(([0], np.cumsum(view_features)))\n\n for i, j in itertools.combinations(range(len(splits) - 1), 2):\n cross = np.zeros((view_features[i], view_features[j]))\n for _ in range(latent_dims):\n A = correlation[_] * np.outer(true_features[i][:, _], true_features[j][:, _])\n # Cross Bit\n cross += covs[i] @ A @ covs[j]\n cov[splits[i]: splits[i] + view_features[i], splits[j]: splits[j] + view_features[j]] = cross\n cov[splits[j]: splits[j] + view_features[j], splits[i]: splits[i] + view_features[i]] = cross.T\n\n X = np.zeros((n, sum(view_features)))\n chol = np.linalg.cholesky(cov)\n for _ in range(n):\n X[_, :] = _chol_sample(mean, chol)\n views = np.split(X, np.cumsum(view_features)[:-1], axis=1)\n completed = True\n except:\n completed = False\n return views, true_features\n\n\ndef generate_simple_data(n: int, view_features: List[int], view_sparsity: List[int] = None,\n eps: float = 0):\n \"\"\"\n\n :param n: number of samples\n :param view_features: number of features view 1\n :param view_sparsity: number of features view 2\n :param eps: gaussian noise std\n :return: view1 matrix, view2 matrix, true weights view 1, true weights view 2\n\n :Example:\n\n >>> from cca_zoo.data import generate_simple_data\n >>> [train_view_1,train_view_2],[true_weights_1,true_weights_2]=generate_covariance_data(200,[10,10])\n \"\"\"\n z = np.random.normal(0, 1, n)\n views = []\n true_features = []\n for p, sparsity in zip(view_features, view_sparsity):\n weights = np.random.normal(size=(p, 1))\n if sparsity > 0:\n if sparsity < 1:\n sparsity = np.ceil(sparsity * p).astype('int')\n weights[np.random.choice(np.arange(p), p - sparsity, replace=False)] = 0\n\n gaussian_x = np.random.normal(0, eps, (n, p))\n view = np.outer(z, weights)\n view += gaussian_x\n views.append(view)\n true_features.append(weights)\n return views, true_features\n\n\ndef _decorrelate_dims(up, cov):\n A = up.T @ cov @ up\n for k in range(1, A.shape[0]):\n up[:, k:] -= np.outer(up[:, k - 1], A[k - 1, k:] / A[k - 1, k - 1])\n A = up.T @ cov @ up\n return up\n\n\ndef _chol_sample(mean, chol):\n return mean + chol @ np.random.standard_normal(mean.size)\n\n\ndef _gaussian(x, mu, sig, dn):\n \"\"\"\n Generate a gaussian covariance matrix\n\n :param x:\n :param mu:\n :param sig:\n :param dn:\n \"\"\"\n return np.exp(-np.power(x - mu, 2.) / (2 * np.power(sig, 2.))) * dn / (np.sqrt(2 * np.pi) * sig)\n\n\ndef _generate_gaussian_cov(p, sigma):\n x = np.linspace(-1, 1, p)\n x_tile = np.tile(x, (p, 1))\n mu_tile = np.transpose(x_tile)\n dn = 2 / (p - 1)\n cov = _gaussian(x_tile, mu_tile, sigma, dn)\n cov /= cov.max()\n return cov\n\n\ndef _generate_toeplitz_cov(p, sigma):\n c = np.arange(0, p)\n c = sigma ** c\n cov = linalg.toeplitz(c, c)\n return cov\n\n\ndef _generate_random_cov(p):\n cov_ = np.random.rand(p, p)\n U, S, Vt = np.linalg.svd(cov_.T @ cov_)\n cov = U @ (1 + np.diag(np.random.rand(p))) @ Vt\n return cov\n" ]
[ [ "numpy.diag", "numpy.sqrt", "numpy.linspace", "numpy.cumsum", "numpy.concatenate", "numpy.linalg.svd", "numpy.unique", "numpy.arange", "numpy.eye", "numpy.ceil", "numpy.outer", "numpy.zeros", "numpy.power", "numpy.random.rand", "numpy.linalg.cholesky", "numpy.transpose", "numpy.sum", "scipy.linalg.toeplitz", "scipy.linalg.block_diag", "numpy.random.standard_normal", "numpy.tile", "numpy.random.shuffle", "numpy.random.normal" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "0.14", "0.15", "0.12", "0.10" ], "tensorflow": [] } ]
Holldean/Recommender-System
[ "6a93e6ee970b32c76e2f71043383bf24a7e865d5" ]
[ "Recommender_System/algorithm/NeuMF/train.py" ]
[ "from Recommender_System.algorithm.NeuMF.model import NeuMF_model\r\nfrom Recommender_System.algorithm.train import train, test\r\nimport tensorflow as tf\r\n\r\n\r\ndef train_with_pretrain(n_user, n_item, train_data, test_data, topk_data, gmf_dim, mlp_dim, layers, l2):\r\n neumf_model, gmf_model, mlp_model = NeuMF_model(n_user, n_item, gmf_dim=gmf_dim, mlp_dim=mlp_dim, layers=layers, l2=l2)\r\n print('预训练GMF部分')\r\n train(gmf_model, train_data, test_data, topk_data, epochs=10, batch=512)\r\n print('预训练MLP部分')\r\n train(mlp_model, train_data, test_data, topk_data, epochs=10, batch=512)\r\n\r\n out_kernel = tf.concat((gmf_model.get_layer('gmf_out').get_weights()[0], mlp_model.get_layer('mlp_out').get_weights()[0]), 0)\r\n out_bias = gmf_model.get_layer('gmf_out').get_weights()[1] + mlp_model.get_layer('mlp_out').get_weights()[1]\r\n neumf_model.get_layer('out').set_weights([out_kernel * 0.5, out_bias * 0.5])\r\n\r\n test(neumf_model, train_data, test_data, topk_data, batch=512)\r\n train(neumf_model, train_data, test_data, topk_data, optimizer=tf.keras.optimizers.SGD(0.0001), epochs=10, batch=512)\r\n\r\n\r\ndef train_without_pretrain(n_user, n_item, train_data, test_data, topk_data, gmf_dim, mlp_dim, layers, l2):\r\n neumf_model, _, _ = NeuMF_model(n_user, n_item, gmf_dim=gmf_dim, mlp_dim=mlp_dim, layers=layers, l2=l2)\r\n train(neumf_model, train_data, test_data, topk_data, epochs=10, batch=512)\r\n" ]
[ [ "tensorflow.keras.optimizers.SGD" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "2.2", "2.3", "2.4", "2.5", "2.6" ] } ]
dingjr27/nerf
[ "b0e0554022f66d65705d3134c4cfdd71429eb574" ]
[ "test_nerf.py" ]
[ "import os, sys\n# os.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true'\nos.environ['CUDA_VISIBLE_DEVICES'] = '1'\nimport tensorflow as tf\ntf.compat.v1.enable_eager_execution()\n\nsys.path.append(r'/home/luca/Desktop/NERFPosit/Inference')\n\nimport numpy as np\nimport imageio\nimport json\nimport random\nimport time\nimport pprint\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\n\nimport matplotlib.pyplot as plt\n\nimport run_nerf\n\nfrom load_llff import load_llff_data\nfrom load_deepvoxels import load_dv_data\nfrom load_blender import load_blender_data\n\nbasedir = './logs'\nexpname = 'fern_example'\n\nconfig = os.path.join(basedir, expname, 'config.txt')\nprint('Args:')\nprint(open(config, 'r').read())\nparser = run_nerf.config_parser()\n\nargs = parser.parse_args('--config {} --ft_path {}'.format(config, os.path.join(basedir, expname, 'model_200000.npy')))\nprint('loaded args')\n\nimages, poses, bds, render_poses, i_test = load_llff_data(args.datadir, args.factor,\n recenter=True, bd_factor=.75,\n spherify=args.spherify)\nH, W, focal = poses[0,:3,-1].astype(np.float32)\n\nH = int(H)\nW = int(W)\nhwf = [H, W, focal]\n\nimages = images.astype(np.float32)\nposes = poses.astype(np.float32)\n\nif args.no_ndc:\n near = tf.reduce_min(bds) * .9\n far = tf.reduce_max(bds) * 1.\nelse:\n near = 0.\n far = 1.\n\n# Create nerf model\n_, render_kwargs_test, start, grad_vars, models = run_nerf.create_nerf(args)\n\nprint(models['model'].input)\nmodel = models['model']\nprint(model.summary())\n#extractor = keras.Model(inputs=model.inputs,\n # outputs=model.layers[1].output)\n#embed_fn, input_ch = run_nerf.get_embedder(10,1)\n#embed_fn1, input_ch = run_nerf.get_embedder(4,1)\n#a = embed_fn(tf.constant([[0.5,0.5,0.5]]))\n#b = embed_fn1(tf.constant([[0.5,0.5,0.5]]))\n#c = tf.concat([a,b],1)\n#print(c.shape)\n#print(extractor.predict(c))\n#exit(0)\n#features = extractor()\n\nbds_dict = {\n 'near' : tf.cast(near, tf.float32),\n 'far' : tf.cast(far, tf.float32),\n}\nrender_kwargs_test.update(bds_dict)\n\nprint('Render kwargs:')\npprint.pprint(render_kwargs_test)\n\n\ndown = 4\nrender_kwargs_fast = {k : render_kwargs_test[k] for k in render_kwargs_test}\nrender_kwargs_fast['N_importance'] = 0\n\nc2w = np.eye(4)[:3,:4].astype(np.float32) # identity pose matrix\ntest = run_nerf.render(H//down, W//down, focal/down, c2w=c2w, **render_kwargs_fast)\n\nimg = np.clip(test[0],0,1)\nplt.imshow(img)\nplt.show()\n\n" ]
[ [ "matplotlib.pyplot.imshow", "tensorflow.reduce_max", "numpy.clip", "numpy.eye", "tensorflow.cast", "tensorflow.compat.v1.enable_eager_execution", "tensorflow.reduce_min", "matplotlib.pyplot.show" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.4", "2.3", "2.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.8", "1.2", "2.10" ] } ]
kyeeh/holbertonschool-machine_learning
[ "8e4894c2b036ec7f4750de5bf99b95aee5b94449", "8e4894c2b036ec7f4750de5bf99b95aee5b94449", "8e4894c2b036ec7f4750de5bf99b95aee5b94449", "8e4894c2b036ec7f4750de5bf99b95aee5b94449", "8e4894c2b036ec7f4750de5bf99b95aee5b94449", "8e4894c2b036ec7f4750de5bf99b95aee5b94449" ]
[ "math/0x06-multivariate_prob/3-main.py", "supervised_learning/0x07-cnn/1-main.py", "supervised_learning/0x0A-object_detection/0-main.py", "supervised_learning/0x00-binary_classification/2-main.py", "supervised_learning/0x06-keras/0-sequential.py", "supervised_learning/0x06-keras/6-train.py" ]
[ "#!/usr/bin/env python3\n\nif __name__ == '__main__':\n import numpy as np\n from multinormal import MultiNormal\n\n np.random.seed(0)\n data = np.random.multivariate_normal([12, 30, 10], [[36, -30, 15], [-30, 100, -20], [15, -20, 25]], 10000).T\n mn = MultiNormal(data)\n x = np.random.multivariate_normal([12, 30, 10], [[36, -30, 15], [-30, 100, -20], [15, -20, 25]], 1).T\n print(x)\n print(mn.pdf(x))\n", "#!/usr/bin/env python3\n\nimport matplotlib.pyplot as plt\nimport numpy as np\npool_forward = __import__('1-pool_forward').pool_forward\n\nif __name__ == \"__main__\":\n np.random.seed(0)\n lib = np.load('../data/MNIST.npz')\n X_train = lib['X_train']\n m, h, w = X_train.shape\n X_train_a = X_train.reshape((-1, h, w, 1))\n X_train_b = 1 - X_train_a\n X_train_c = np.concatenate((X_train_a, X_train_b), axis=3)\n\n print(X_train_c.shape)\n plt.imshow(X_train_c[0, :, :, 0])\n plt.show()\n plt.imshow(X_train_c[0, :, :, 1])\n plt.show()\n A = pool_forward(X_train_c, (2, 2), stride=(2, 2))\n print(A.shape)\n plt.imshow(A[0, :, :, 0])\n plt.show()\n plt.imshow(A[0, :, :, 1])\n plt.show()\n", "#!/usr/bin/env python3\n\nif __name__ == '__main__':\n import numpy as np\n Yolo = __import__('0-yolo').Yolo\n\n np.random.seed(0)\n anchors = np.array([[[116, 90], [156, 198], [373, 326]],\n [[30, 61], [62, 45], [59, 119]],\n [[10, 13], [16, 30], [33, 23]]])\n yolo = Yolo('../data/yolo.h5', '../data/coco_classes.txt', 0.6, 0.5, anchors)\n yolo.model.summary()\n print('Class names:', yolo.class_names)\n print('Class threshold:', yolo.class_t)\n print('NMS threshold:', yolo.nms_t)\n print('Anchor boxes:', yolo.anchors)\n", "#!/usr/bin/env python3\n\nimport numpy as np\n\nNeuron = __import__('2-neuron').Neuron\n\nlib_train = np.load('../data/Binary_Train.npz')\nX_3D, Y = lib_train['X'], lib_train['Y']\nX = X_3D.reshape((X_3D.shape[0], -1)).T\n\nnp.random.seed(0)\nneuron = Neuron(X.shape[0])\nneuron._Neuron__b = 1\nA = neuron.forward_prop(X)\nif (A is neuron.A):\n print(A)\n", "#!/usr/bin/env python3\n\"\"\"\nKeras Module\n\"\"\"\nimport tensorflow.keras as K\n\n\ndef build_model(nx, layers, activations, lambtha, keep_prob):\n \"\"\"\n Builds a neural network with the Keras library\n\n nx is the number of input features to the network\n layers is a list containing the number of nodes in each layer\n activations is a list containing the functions used for each layer\n lambtha is the L2 regularization parameter\n keep_prob is the probability that a node will be kept for dropout\n\n Returns: the keras model\n \"\"\"\n km = K.Sequential()\n for i in range(len(layers)):\n km.add(K.layers.Dense(units=layers[i], activation=activations[i],\n kernel_regularizer=K.regularizers.l2(lambtha),\n input_shape=(nx,)))\n if i != (len(layers) - 1):\n km.add(K.layers.Dropout(1 - keep_prob))\n return km\n", "#!/usr/bin/env python3\n\"\"\"\nKeras Module\n\"\"\"\nimport tensorflow.keras as K\n\n\ndef train_model(network, data, labels, batch_size, epochs,\n validation_data=None, early_stopping=False, patience=0,\n verbose=True, shuffle=False):\n \"\"\"\n Trains a model using mini-batch gradient descent\n\n network is the model to train\n data is a numpy.ndarray of shape (m, nx) containing the input data\n labels is a one-hot numpy.ndarray of shape (m, classes) containing the\n labels of data\n batch_size is the size of the batch used for mini-batch gradient descent\n epochs is the number of passes through data for minibatch gradient descent\n verbose is a boolean that determines if output should be printed during\n training\n shuffle is a boolean that determines whether to shuffle the batches every\n epoch. Normally, it is a good idea to shuffle, but for reproducibility, we\n have chosen to set the default to False.\n validation_data is the data to validate the model with, if not None\n early_stopping is a bool that indicates whether early-stop should be used\n early stopping should only be performed if validation_data exists\n early stopping should be based on validation loss\n patience is the patience used for early stopping\n\n Returns: the History object generated after training the model\n \"\"\"\n stop_call = None\n if validation_data:\n stop_call = [K.callbacks.EarlyStopping(patience=patience)]\n return network.fit(x=data, y=labels, batch_size=batch_size,\n callbacks=stop_call, epochs=epochs, shuffle=shuffle,\n validation_data=validation_data, verbose=verbose)\n" ]
[ [ "numpy.random.multivariate_normal", "numpy.random.seed" ], [ "matplotlib.pyplot.imshow", "numpy.random.seed", "numpy.concatenate", "numpy.load", "matplotlib.pyplot.show" ], [ "numpy.array", "numpy.random.seed" ], [ "numpy.load", "numpy.random.seed" ], [ "tensorflow.keras.layers.Dropout", "tensorflow.keras.regularizers.l2", "tensorflow.keras.Sequential" ], [ "tensorflow.keras.callbacks.EarlyStopping" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "2.2", "2.3", "2.4", "2.5", "2.6" ] } ]
mbonnema/SWAV
[ "d5dd4dd1a88de008f27b0232c536491c7dc84623" ]
[ "src/an_FilterS1.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Oct 28 11:35:01 2021\n\n@author: mbonnema\n\"\"\"\n\nimport numpy as np\ndef FilterS1(D,A,WE,LE):\n D_f = {}\n A_f = {}\n WE_f = {}\n LE_f = {}\n for key in D:\n dates = D[key]\n areas = A[key]\n werrors = WE[key]\n lerrors = LE[key]\n \n \n d_f = []\n a_f = []\n we_f = []\n le_f = []\n \n for d,a,we,le in zip(dates,areas,werrors,lerrors):\n #print(a)\n if we < 0:\n we = 0\n if le < 0:\n le = 0\n if a > 0:\n if we/a > 0.1:\n #print('fail 1')\n continue\n if a > 0:\n if le/a > 0.1:\n #print('fail 2')\n continue\n #print('passed')\n d_f.append(d)\n a_f.append(a)\n we_f.append(we)\n le_f.append(le)\n a_std = np.std(np.array(a_f))\n a_mean = np.mean(np.array(a_f))\n d_f = np.array(d_f)[np.array([a_f<=(a_mean+a_std*3),a_f>=(a_mean-a_std*3)]).all(axis=0)]\n we_f = np.array(we_f)[np.array([a_f<=(a_mean+a_std*3),a_f>=(a_mean-a_std*3)]).all(axis=0)]\n le_f = np.array(le_f)[np.array([a_f<=(a_mean+a_std*3),a_f>=(a_mean-a_std*3)]).all(axis=0)]\n a_f = np.array(a_f)[np.array([a_f<=(a_mean+a_std*3),a_f>=(a_mean-a_std*3)]).all(axis=0)]\n D_f[key] = d_f\n A_f[key] = a_f\n WE_f[key] = we_f\n LE_f[key] = le_f\n \n return(D_f,A_f,WE_f,LE_f)\n \n " ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jialuechen/augustus
[ "d4fbda427e3d9c60896b0e22c06cd593b484ef9d" ]
[ "augustus/custom/trade_log_analysis.py" ]
[ "import dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport dash_table_experiments as dt\nimport pandas as pd\nimport plotly\nfrom dash.dependencies import Input, Output, State\nfrom plotly import graph_objs as go\n\nfrom augustus.systemabase_env import augustusEnvBase\n\nTRADE_LOG = augustusEnvBase.full_trade_log\n\nAPP = dash.Dash()\nAPP.scripts.config.serve_locally = True\n\nAPP.layout = html.Div([\n html.H4('augustus Trade Log Analysis'),\n dt.DataTable(\n rows=TRADE_LOG.to_dict('records'),\n\n row_selectable=True,\n filterable=True,\n sortable=True,\n selected_row_indices=[],\n id='trade_log'\n ),\n\n dcc.Graph(\n id='drawdown_pnl'\n ),\n\n dcc.Graph(\n id='run_up_pnl'\n ),\n\n], className=\"container\")\n\n\[email protected](\n Output('trade_log', 'selected_row_indices'),\n [Input('drawdown_pnl', 'clickData')],\n [State('trade_log', 'selected_row_indices')])\ndef update_selected_row_indices(clickData, selected_row_indices):\n if clickData:\n for point in clickData['points']:\n if point['pointNumber'] in selected_row_indices:\n selected_row_indices.remove(point['pointNumber'])\n else:\n selected_row_indices.append(point['pointNumber'])\n\n return selected_row_indices\n\n\[email protected](\n Output('drawdown_pnl', 'figure'),\n [Input('trade_log', 'rows'),\n Input('trade_log', 'selected_row_indices')])\ndef update_run_up_figure(rows, selected_row_indices):\n\n dff = pd.DataFrame(rows)\n profit_diff = dff.loc[dff.returns_diff > 0]\n loss_diff = dff.loc[dff.returns_diff < 0]\n\n fig = plotly.tools.make_subplots(\n rows=1, cols=1,\n shared_xaxes=True)\n\n fig['layout'].update(dict(title='Profit & Loss vs Run-up'))\n fig['layout']['xaxis'].update(dict(title='Run-up(%)'))\n fig['layout']['yaxis'].update(dict(title='Profit & Loss(%)'))\n\n fig.append_trace({\n 'x': profit_diff['run_up']*100,\n 'y': profit_diff['returns_diff']*100,\n 'text': profit_diff.entry_date + ' to ' + profit_diff.exit_date,\n 'type': 'scatter',\n 'marker': dict(color='black'),\n 'mode': 'markers',\n 'name': 'win',\n 'line': {'width': 1}\n }, 1, 1)\n fig.append_trace({\n 'x': loss_diff['run_up']*100,\n 'y': -loss_diff['returns_diff']*100,\n 'type': 'scatter',\n 'text': loss_diff.entry_date + ' to ' + loss_diff.exit_date,\n 'marker': dict(color='red'),\n 'mode': 'markers',\n 'name': 'lose',\n 'line': {'width': 1}\n }, 1, 1)\n\n fig.append_trace({\n 'x': [0, 10],\n 'y': [0, 10],\n 'type': 'scatter',\n 'mode': 'lines',\n 'name': 'Win diagonal',\n 'line': {'width': 1}\n }, 1, 1)\n\n return fig\n\n\[email protected](\n Output('run_up_pnl', 'figure'),\n [Input('trade_log', 'rows'),\n Input('trade_log', 'selected_row_indices')])\ndef update__drawdown_figure(rows, selected_row_indices):\n\n dff = pd.DataFrame(rows)\n profit_diff = dff.loc[dff.returns_diff > 0]\n loss_diff = dff.loc[dff.returns_diff < 0]\n\n fig = plotly.tools.make_subplots(\n rows=1, cols=1,\n shared_xaxes=True)\n fig['layout'].update(dict(title='Profit & Loss vs Drawdown'))\n fig['layout']['xaxis'].update(dict(title='Drawdown(%)'))\n fig['layout']['yaxis'].update(dict(title='Profit & Loss(%)'))\n\n fig.append_trace({\n 'x': profit_diff['drawdown']*100,\n 'y': profit_diff['returns_diff']*100,\n 'type': 'scatter',\n 'marker': dict(color='black'),\n 'text': profit_diff.entry_date + ' to ' + profit_diff.exit_date,\n 'mode': 'markers',\n 'name': 'win',\n 'line': {'width': 1}\n }, 1, 1)\n\n fig.append_trace({\n 'x': loss_diff['drawdown']*100,\n 'y': -loss_diff['returns_diff']*100,\n 'text': loss_diff.entry_date + ' to ' + loss_diff.exit_date,\n 'type': 'scatter',\n 'marker': dict(color='red'),\n 'mode': 'markers',\n 'name': 'lose',\n 'line': {'width': 1}\n }, 1, 1)\n\n fig.append_trace({\n 'x': [0, 10],\n 'y': [0, 10],\n 'type': 'scatter',\n 'mode': 'lines',\n 'name': 'Loss diagonal',\n 'line': {'width': 1}\n }, 1, 1)\n\n return fig\n\n\nif __name__ == '__main__':\n APP.run_server(debug=True)\n" ]
[ [ "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
tjulitianyi1997/mindspore
[ "c802a8c31fe2b51530d932fdd364824e45264b12" ]
[ "tests/ut/python/parallel/test_reshape.py" ]
[ "# Copyright 2019 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom mindspore.train import Model, ParallelMode\nfrom mindspore.nn.loss import SoftmaxCrossEntropyWithLogits\nfrom mindspore.nn.optim.momentum import Momentum\nfrom mindspore import Tensor\nimport mindspore as ms\nimport numpy as np\nfrom mindspore.ops import operations as P\nimport mindspore.nn as nn\nfrom mindspore.common.parameter import Parameter\nfrom tests.dataset_mock import MindData\nfrom mindspore import context\nfrom tests.ut.python.ops.test_math_ops import VirtualLoss\nfrom mindspore.common.api import _executor\nfrom mindspore.ops import composite as C\nfrom mindspore.ops.operations.comm_ops import _VirtualDataset\nfrom mindspore.ops import functional as F\nfrom mindspore.common.parameter import ParameterTuple\nfrom mindspore.common import dtype as mstype\nfrom mindspore.parallel import set_algo_parameters\ncontext.set_context(mode=context.GRAPH_MODE)\ncontext.reset_auto_parallel_context()\n\nclass Dataset(MindData):\n def __init__(self, predict, label, length=3, input_num=2):\n super(Dataset, self).__init__(size=length)\n self.predict = predict\n self.label = label\n self.index = 0\n self.length = length\n self.input_num = input_num\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if self.index >= self.length:\n raise StopIteration\n self.index += 1\n if self.input_num == 2:\n return self.predict, self.label\n else:\n return self.predict,\n\n def reset(self):\n self.index = 0\n\n\nclass ReshapeNet(nn.Cell):\n def __init__(self, strategy0, strategy1, strategy2):\n super(ReshapeNet, self).__init__()\n self.relu = P.ReLU().set_strategy(strategy0)\n self.reshape = P.Reshape().set_strategy(strategy1)\n self.matmul = P.MatMul().set_strategy(strategy2)\n self.matmul_weight = Parameter(Tensor(np.ones([25088, 256]), dtype=ms.float32), name=\"weight\")\n\n def construct(self, x):\n x = self.relu(x)\n x = self.reshape(x, (256, 25088))\n x = self.matmul(x, self.matmul_weight)\n return x\n\n\ndef reshape_net(strategy0, strategy1, strategy2):\n return ReshapeNet(strategy0=strategy0, strategy1=strategy1, strategy2=strategy2)\n\n\ndef reshape_common(parallel_mode, strategy0, strategy1, strategy2, strategy_loss):\n batch_size = 32\n learning_rate = 0.1\n momentum = 0.9\n epoch_size = 2\n\n context.reset_auto_parallel_context()\n context.set_auto_parallel_context(parallel_mode=parallel_mode, device_num=8)\n predict = Tensor(np.ones([32, 512, 7, 7]), dtype=ms.float32)\n label = Tensor(np.ones([32]), dtype=ms.int32)\n dataset = Dataset(predict, label, 2)\n net = reshape_net(strategy0, strategy1, strategy2)\n\n loss = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True)\n loss.softmax_cross_entropy.set_strategy(strategy_loss)\n loss.one_hot.set_strategy(((8,1), (), ()))\n opt = Momentum(net.trainable_params(), learning_rate, momentum)\n model = Model(net, loss, opt)\n model.train(epoch_size, dataset, dataset_sink_mode=False)\n\n\ndef test_reshape1():\n strategy0 = ((8, 1, 1, 1), )\n strategy1 = None\n strategy2 = ((8, 1), (1, 1))\n strategy_loss = ((8, 1), (8, 1))\n reshape_common(ParallelMode.SEMI_AUTO_PARALLEL, strategy0, strategy1, strategy2, strategy_loss)\n\n\ndef test_reshape1_strategy_1():\n strategy0 = ((8, 1, 1, 1), )\n strategy1 = ((8, 1, 1, 1), )\n strategy2 = ((8, 1), (1, 1))\n strategy_loss = ((8, 1), (8, 1))\n try:\n reshape_common(ParallelMode.SEMI_AUTO_PARALLEL, strategy0, strategy1, strategy2, strategy_loss)\n except:\n pass\n\n\ndef test_reshape1_strategy_2():\n strategy0 = ((8, 1, 1, 1), )\n strategy1 = ((8, 1, 1, 1), )\n strategy2 = ((8, 1), (1, 1))\n strategy_loss = ((8, 1), (8, 1))\n try:\n reshape_common(ParallelMode.AUTO_PARALLEL, strategy0, strategy1, strategy2, strategy_loss)\n except:\n pass\n\n\ndef test_reshape2():\n strategy0 = ((8, 1, 1, 1), )\n strategy1 = None\n strategy2 = ((8, 1), (1, 1))\n strategy_loss = ((8, 1), (8, 1))\n reshape_common(ParallelMode.SEMI_AUTO_PARALLEL, strategy0, strategy1, strategy2, strategy_loss)\n\n\ndef test_reshape3():\n strategy0 = ((2, 1, 1, 1), )\n strategy1 = None\n strategy2 = ((8, 1), (1, 1))\n strategy_loss = ((8, 1), (8, 1))\n reshape_common(ParallelMode.SEMI_AUTO_PARALLEL, strategy0, strategy1, strategy2, strategy_loss)\n\n\ndef test_reshape4():\n strategy0 = ((1, 1, 1, 1), )\n strategy1 = None\n strategy2 = ((8, 1), (1, 1))\n strategy_loss = ((8, 1), (8, 1))\n reshape_common(ParallelMode.SEMI_AUTO_PARALLEL, strategy0, strategy1, strategy2, strategy_loss)\n\n\ndef test_reshape5():\n strategy0 = ((2, 1, 1, 1), )\n strategy1 = None\n strategy2 = ((1, 8), (8, 1))\n strategy_loss = ((8, 1), (8, 1))\n reshape_common(ParallelMode.SEMI_AUTO_PARALLEL, strategy0, strategy1, strategy2, strategy_loss)\n\n\ndef test_reshape_auto():\n strategy0 = None\n strategy1 = None\n strategy2 = None\n strategy_loss = None\n reshape_common(ParallelMode.AUTO_PARALLEL, strategy0, strategy1, strategy2, strategy_loss)\n\n\nclass NetWithLoss(nn.Cell):\n def __init__(self, network):\n super(NetWithLoss, self).__init__()\n self.loss = VirtualLoss()\n self.network = network\n\n def construct(self, x):\n predict = self.network(x)\n return self.loss(predict)\n\n\nclass GradWrap(nn.Cell):\n def __init__(self, network):\n super(GradWrap, self).__init__()\n self.network = network\n\n def construct(self, x):\n return C.grad_all(self.network)(x)\n\n\nclass ReshapeNet1(nn.Cell):\n def __init__(self, strategy0):\n super(ReshapeNet1, self).__init__()\n self.virtual_dataset = _VirtualDataset()\n self.reshape = P.Reshape()\n self.matmul = P.MatMul().set_strategy(strategy0)\n self.matmul_weight = Parameter(Tensor(np.ones([25088, 256]), dtype=ms.float32), name=\"weight\")\n self.reshape2 = P.Reshape()\n\n def construct(self, x):\n x = self.virtual_dataset(x)\n x = self.reshape(x, (256, 25088))\n x = self.matmul(x, self.matmul_weight)\n x = self.reshape2(x, (256 * 256,))\n return x\n\n\nclass ReshapeNet2(nn.Cell):\n def __init__(self, strategy0):\n super(ReshapeNet2, self).__init__()\n self.virtual_dataset = _VirtualDataset()\n self.reshape = P.Reshape()\n self.matmul = P.MatMul().set_strategy(strategy0)\n self.matmul_weight = Parameter(Tensor(np.ones([25088, 256]), dtype=ms.float32), name=\"weight\")\n self.reshape2 = P.Reshape()\n self.reduce_sum = P.ReduceSum(keep_dims=True)\n self.reshape3 = P.Reshape()\n\n def construct(self, x):\n x = self.virtual_dataset(x)\n x = self.reshape(x, (256, 25088))\n x = self.matmul(x, self.matmul_weight)\n x = self.reshape2(x, (256 * 256,))\n x = self.reduce_sum(x, -1)\n x = self.reshape3(x, ())\n return x\n\n\nclass ReshapeNet3(nn.Cell):\n def __init__(self, strategy0):\n super(ReshapeNet3, self).__init__()\n self.virtual_dataset = _VirtualDataset()\n self.reshape = P.Reshape()\n self.matmul = P.MatMul().set_strategy(strategy0)\n self.matmul_weight = Parameter(Tensor(np.ones([25088, 256]), dtype=ms.float32), name=\"weight\")\n self.reshape2 = P.Reshape()\n self.reduce_sum = P.ReduceSum(keep_dims=False)\n self.reshape3 = P.Reshape()\n\n def construct(self, x):\n x = self.virtual_dataset(x)\n x = self.reshape(x, (256, 25088))\n x = self.matmul(x, self.matmul_weight)\n x = self.reshape2(x, (256 * 256,))\n x = self.reduce_sum(x, -1)\n x = self.reshape3(x, (1, 1))\n return x\n\n\nclass ReshapeNet4(nn.Cell):\n def __init__(self, strategy0):\n super(ReshapeNet4, self).__init__()\n self.virtual_dataset = _VirtualDataset()\n self.reshape = P.Reshape()\n self.reshape2 = P.Reshape()\n self.matmul = P.MatMul().set_strategy(strategy0)\n self.matmul_weight = Parameter(Tensor(np.ones([25088, 256]), dtype=ms.float32), name=\"weight\")\n\n def construct(self, x):\n x = self.virtual_dataset(x)\n x = self.reshape(x, (256, 25088))\n w = self.reshape2(self.matmul_weight, (25088, 256))\n x = self.matmul(x, w)\n return x\n\n\nclass ReshapeNet5(nn.Cell):\n def __init__(self, strategy0):\n super(ReshapeNet5, self).__init__()\n self.virtual_dataset = _VirtualDataset()\n self.reshape = P.Reshape()\n self.matmul1 = P.MatMul().set_strategy(strategy0)\n self.matmul1_weight = Parameter(Tensor(np.ones([25088, 256]), dtype=ms.float32), name=\"weight\")\n self.matmul2 = P.MatMul().set_strategy(strategy0)\n\n def construct(self, x):\n x = self.virtual_dataset(x)\n x = self.reshape(x, (256, 25088))\n matmul1_o = self.matmul1(x, self.matmul1_weight)\n matmul2_o = self.matmul2(matmul1_o, x)\n return matmul2_o\n\n\nclass ReshapeNet6(nn.Cell):\n def __init__(self, strategy0):\n super(ReshapeNet6, self).__init__()\n self.virtual_dataset = _VirtualDataset()\n self.reshape = P.Reshape()\n self.matmul1_1 = P.MatMul().set_strategy(strategy0)\n self.matmul1_2 = P.MatMul().set_strategy(strategy0)\n self.matmul1_weight = Parameter(Tensor(np.ones([25088, 256]), dtype=ms.float32), name=\"weight\")\n self.matmul2 = P.MatMul().set_strategy(strategy0)\n self.add = P.TensorAdd()\n\n def construct(self, x):\n x = self.virtual_dataset(x)\n x = self.reshape(x, (256, 25088))\n matmul1_1_o = self.matmul1_1(x, self.matmul1_weight)\n matmul1_2_o = self.matmul1_2(x, self.matmul1_weight)\n matmul1_o = self.add(matmul1_1_o, matmul1_2_o)\n matmul2_o = self.matmul2(matmul1_o, x)\n return matmul2_o\n\n\ndef reshape_net2(backbone):\n batch_size = 16\n device_num = 16\n context.set_auto_parallel_context(device_num=device_num, global_rank=0)\n input = Tensor(np.ones([batch_size * device_num, 512, 7, 7]).astype(np.float32) * 0.01)\n\n net = GradWrap(NetWithLoss(backbone))\n context.set_auto_parallel_context(parallel_mode=\"semi_auto_parallel\")\n \n _executor.compile(net, input)\n\n\ndef test_reshape_net1_1():\n reshape_net2(ReshapeNet1(((1, 8), (8, 1))))\n\n\ndef test_reshape_net1_2():\n reshape_net2(ReshapeNet1(((1, 8), (8, 2))))\n\n\ndef test_reshape_net2_1():\n reshape_net2(ReshapeNet2(((1, 8), (8, 1))))\n\n\ndef test_reshape_net2_2():\n reshape_net2(ReshapeNet2(((1, 8), (8, 2))))\n\n\ndef test_reshape_net3_1():\n reshape_net2(ReshapeNet3(((1, 8), (8, 1))))\n\n\ndef test_reshape_net3_2():\n reshape_net2(ReshapeNet3(((1, 8), (8, 2))))\n\n\ndef test_reshape_net4_1():\n try:\n reshape_net2(ReshapeNet4(((1, 8), (8, 1))))\n except:\n pass\n\n\ndef test_reshape_net4_2():\n try:\n reshape_net2(ReshapeNet4(((1, 8), (8, 2))))\n except:\n pass\n\n\ndef test_reshape_net5_1():\n reshape_net2(ReshapeNet5(((1, 8), (8, 1))))\n\n\ndef test_reshape_net5_2():\n reshape_net2(ReshapeNet5(((1, 8), (8, 2))))\n\n\ndef test_reshape_net6_1():\n reshape_net2(ReshapeNet6(((1, 8), (8, 1))))\n\n\ndef test_reshape_net6_2():\n reshape_net2(ReshapeNet6(((1, 8), (8, 2))))\n\n\nclass TrainOneStepCell(nn.Cell):\n \"\"\"\n Network training package class.\n\n Append an optimizer to the training network after that the construct function\n can be called to create the backward graph.\n\n Args:\n network (Cell): The training network.\n optimizer (Cell): Optimizer for updating the weights.\n sens (Number): The adjust parameter. Default: 1.0.\n\n Examples:\n >>> net = Net()\n >>> loss_fn = nn.SoftmaxCrossEntropyWithLogits()\n >>> optim = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)\n >>> loss_net = WithLossCell(net, loss_fn)\n >>> train_net = TrainOneStepCell(loss_net, optim)\n \"\"\"\n def __init__(self, network, optimizer, sens=1.0):\n super(TrainOneStepCell, self).__init__(auto_prefix=False)\n self.network = network\n self.network.add_flags(defer_inline=True)\n self.weights = ParameterTuple(network.trainable_params())\n self.optimizer = optimizer\n self.grad = C.GradOperation('grad',\n get_by_list=True,\n sens_param=True)\n self.sens = sens\n\n def construct(self, data):\n weights = self.weights\n loss = self.network(data)\n sens = P.Fill()(P.DType()(loss), P.Shape()(loss), self.sens)\n grads = self.grad(self.network, weights)(data, sens)\n\n return F.depend(loss, self.optimizer(grads))\n\n\ndef reshape_common2(parallel_mode, net):\n batch_size = 16\n learning_rate = 0.1\n momentum = 0.9\n epoch_size = 2\n\n predict = Tensor(np.ones([batch_size, 512, 7, 7]), dtype=ms.float32)\n label = Tensor(np.ones([batch_size]), dtype=ms.int32)\n dataset = Dataset(predict, label, 2, input_num=1)\n context.reset_auto_parallel_context()\n context.set_auto_parallel_context(parallel_mode=parallel_mode, device_num=16)\n\n opt = Momentum(net.trainable_params(), learning_rate, momentum)\n train_net = TrainOneStepCell(net, opt).set_train()\n model = Model(train_net)\n model.train(epoch_size, dataset, dataset_sink_mode=False)\n\n\ndef test_reshape_common2_0():\n reshape_common2(ParallelMode.SEMI_AUTO_PARALLEL, ReshapeNet1(((1, 8), (8, 1))))\n\n\ndef test_reshape_common2_1():\n reshape_common2(ParallelMode.SEMI_AUTO_PARALLEL, ReshapeNet1(((1, 8), (8, 2))))\n\n\ndef test_reshape_common2_2():\n reshape_common2(ParallelMode.SEMI_AUTO_PARALLEL, ReshapeNet2(((1, 8), (8, 1))))\n\n\ndef test_reshape_common2_3():\n reshape_common2(ParallelMode.SEMI_AUTO_PARALLEL, ReshapeNet2(((1, 8), (8, 2))))\n\n\ndef test_reshape_common2_4():\n reshape_common2(ParallelMode.SEMI_AUTO_PARALLEL, ReshapeNet3(((1, 8), (8, 1))))\n\n\ndef test_reshape_common2_5():\n reshape_common2(ParallelMode.SEMI_AUTO_PARALLEL, ReshapeNet3(((1, 8), (8, 2))))\n\n\nclass BatchNormReshapeNet(nn.Cell):\n def __init__(self):\n super(BatchNormReshapeNet, self).__init__()\n self.vd = P._VirtualDataset()\n self.batch_norm = nn.BatchNorm1d(512, affine=False)\n self.reshape = P.Reshape()\n self.prelu = nn.PReLU(channel=256)\n\n def construct(self, x):\n x = self.vd(x)\n x = self.batch_norm(x)\n x = self.reshape(x, (512, 256))\n x = self.prelu(x)\n return x\n\n\ndef test_batchnorm_reshape_train():\n batch_size = 16\n device_num = 16\n context.set_auto_parallel_context(device_num=device_num, global_rank=0)\n context.set_auto_parallel_context(parallel_mode=\"semi_auto_parallel\")\n input = Tensor(np.ones([batch_size * device_num, 512]).astype(np.float32) * 0.01)\n\n net = GradWrap(NetWithLoss(BatchNormReshapeNet()))\n \n _executor.compile(net, input)\n\n\ndef bn_with_initialize(out_channels):\n bn = nn.BatchNorm2d(out_channels, momentum=0.3, eps=1e-5).add_flags_recursive(fp32=True)\n return bn\n\n\ndef fc_with_initialize(input_channels, out_channels):\n return nn.Dense(input_channels, out_channels).add_flags_recursive(fp16=True)\n\n\nclass BNReshapeDenseBNNet(nn.Cell):\n def __init__(self):\n super(BNReshapeDenseBNNet, self).__init__()\n self.batch_norm = bn_with_initialize(2)\n self.reshape = P.Reshape()\n self.cast = P.Cast()\n self.batch_norm2 = nn.BatchNorm1d(512, affine=False)\n self.fc = fc_with_initialize(2 * 32 * 32, 512)\n\n def construct(self, x):\n x = self.batch_norm(x)\n x = self.reshape(x, (16, 2*32*32))\n x = self.fc(x)\n x = self.batch_norm2(x)\n return x\n\n\ndef test_bn_reshape_dense_bn_train():\n batch_size = 16\n device_num = 16\n context.set_auto_parallel_context(device_num=device_num, global_rank=0)\n input = Tensor(np.ones([batch_size, 2, 32, 32]).astype(np.float32) * 0.01)\n\n net = GradWrap(NetWithLoss(BNReshapeDenseBNNet()))\n context.set_auto_parallel_context(parallel_mode=\"semi_auto_parallel\")\n \n _executor.compile(net, input)\n\n\nclass ParallelReduceMeanNet(nn.Cell):\n def __init__(self, conv_in_channel, conv_out_channel,\n reducemean_keep_dims=False, reducemean_axis=-1, strategy=None):\n super().__init__()\n self.conv = nn.Conv2d(in_channels=conv_in_channel, out_channels=conv_out_channel,\n kernel_size=1, stride=1, pad_mode='valid', has_bias=True,\n weight_init='ones', bias_init='ones')\n self.reduce_mean = P.ReduceMean(keep_dims=reducemean_keep_dims)\n self.flat = nn.Flatten()\n self.reducemean_axis = reducemean_axis\n if strategy is not None:\n self.reduce_mean.set_strategy(strategy)\n\n def construct(self, inputs):\n x = self.conv(inputs)\n x = self.reduce_mean(x, self.reducemean_axis)\n x = self.flat(x)\n return x\n\n\nclass CrossEntropyLoss(nn.Cell):\n def __init__(self, reduction='mean'):\n super(CrossEntropyLoss, self).__init__()\n\n self.reduce_mean = P.ReduceMean()\n self.cross_entropy = SoftmaxCrossEntropyWithLogits()\n self.reduction = reduction\n\n def construct(self, logits, label):\n loss = self.cross_entropy(logits, label)\n if self.reduction == 'mean':\n loss = self.reduce_mean(loss, (-1,))\n return loss\n\n\ndef test_flatten_reshape(parallel_mode=\"auto_parallel\"):\n batch_size = 16\n learning_rate = 0.1\n momentum = 0.9\n epoch_size = 2\n context.reset_auto_parallel_context()\n context.set_auto_parallel_context(parallel_mode=parallel_mode, device_num=8)\n net = ParallelReduceMeanNet(conv_in_channel=3, conv_out_channel=64, reducemean_axis=(2, 3), strategy=((4, 2, 1, 1),))\n loss = CrossEntropyLoss()\n predict = Tensor(np.ones([batch_size, 3, 32, 32]), dtype=ms.float32)\n label = Tensor(np.ones([batch_size, 64]), dtype=ms.float32)\n dataset = Dataset(predict, label, 2, input_num=2)\n\n opt = Momentum(net.trainable_params(), learning_rate, momentum)\n model = Model(net, loss_fn = loss, optimizer=opt)\n model.train(epoch_size, dataset, dataset_sink_mode=False)\n\n\ndef test_flatten_reshape2(parallel_mode=\"auto_parallel\"):\n batch_size = 16\n learning_rate = 0.1\n momentum = 0.9\n epoch_size = 2\n context.reset_auto_parallel_context()\n context.set_auto_parallel_context(parallel_mode=parallel_mode, device_num=8)\n set_algo_parameters(fully_use_devices=False)\n net = ParallelReduceMeanNet(conv_in_channel=3, conv_out_channel=64, reducemean_axis=(2, 3), strategy=((4, 1, 1, 1),))\n loss = CrossEntropyLoss()\n predict = Tensor(np.ones([batch_size, 3, 32, 32]), dtype=ms.float32)\n label = Tensor(np.ones([batch_size, 64]), dtype=ms.float32)\n dataset = Dataset(predict, label, 2, input_num=2)\n\n opt = Momentum(net.trainable_params(), learning_rate, momentum)\n model = Model(net, loss_fn = loss, optimizer=opt)\n model.train(epoch_size, dataset, dataset_sink_mode=False)\n\n\nclass ParallelReshapeNet(nn.Cell):\n def __init__(self, dense_in_channel, dense_out_channel, shape, strategy=None):\n super().__init__()\n self.flat = nn.Flatten()\n self.dense = nn.Dense(in_channels=dense_in_channel,\n out_channels=dense_out_channel,\n weight_init='ones',\n bias_init='ones',\n has_bias=True)\n self.reshape = P.Reshape()\n self.shape = shape\n self.reshape.set_strategy(strategy)\n\n def construct(self, inputs):\n x = self.flat(inputs)\n x = self.dense(x)\n x = self.reshape(x, self.shape)\n return x\n\n\n# the shape of input and output of reshape is the same\n# reshape is optimized before step_parallel\ndef test_flatten_reshape3(parallel_mode=\"auto_parallel\"):\n batch_size = 16\n learning_rate = 0.1\n momentum = 0.9\n epoch_size = 2\n context.reset_auto_parallel_context()\n context.set_auto_parallel_context(parallel_mode=parallel_mode, device_num=8)\n set_algo_parameters(fully_use_devices=False)\n net = ParallelReshapeNet(dense_in_channel=2048, dense_out_channel=1000, shape=(128, 1000), strategy=((16, 1),))\n loss = CrossEntropyLoss()\n predict = Tensor(np.ones([batch_size, 1, 2, 1024]), dtype=ms.float32)\n label = Tensor(np.ones([batch_size, 1000]), dtype=ms.float32)\n dataset = Dataset(predict, label, 2, input_num=2)\n\n opt = Momentum(net.trainable_params(), learning_rate, momentum)\n model = Model(net, loss_fn = loss, optimizer=opt)\n model.train(epoch_size, dataset, dataset_sink_mode=False)\n\n\nclass CrossEntropyLoss2(nn.Cell):\n def __init__(self, reduction='mean'):\n super(CrossEntropyLoss2, self).__init__()\n self.cross_entropy = SoftmaxCrossEntropyWithLogits(reduction=reduction)\n\n def construct(self, logits, label):\n loss = self.cross_entropy(logits, label)\n return loss\n\n\ndef test_flatten_reshape4(parallel_mode=\"semi_auto_parallel\"):\n batch_size = 16\n learning_rate = 0.1\n momentum = 0.9\n epoch_size = 2\n context.reset_auto_parallel_context()\n context.set_auto_parallel_context(parallel_mode=parallel_mode, device_num=8)\n set_algo_parameters(fully_use_devices=False)\n net = ParallelReduceMeanNet(conv_in_channel=3, conv_out_channel=64, reducemean_keep_dims=True, strategy=((4, 1, 1, 1),))\n loss = CrossEntropyLoss2()\n predict = Tensor(np.ones([batch_size, 3, 32, 32]), dtype=ms.float32)\n label = Tensor(np.ones([batch_size, 2048]), dtype=ms.float32)\n dataset = Dataset(predict, label, 2, input_num=2)\n\n opt = Momentum(net.trainable_params(), learning_rate, momentum)\n model = Model(net, loss_fn=loss, optimizer=opt)\n model.train(epoch_size, dataset, dataset_sink_mode=False)\n" ]
[ [ "numpy.ones" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
angseung/torch_cifar10
[ "3160f749f3bffd941d6c0fb98ddaad63d4e5641d" ]
[ "models/clnet.py" ]
[ "'''\nCrossLink Network\n'''\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\ndef swish(x):\n return x * x.sigmoid()\n\n\ndef mish(x):\n return x * torch.tanh(F.softplus(x))\n\n\nclass CrossLinkBlock(nn.Module):\n '''Cross-Link Block'''\n\n def __init__(self, in_channels, out_channels, kernel_size, pool_enable):\n super(CrossLinkBlock, self).__init__()\n\n self.pool_enable = pool_enable\n self.ReLU = nn.ReLU()\n\n # basic blocks\n self.dconv1_1 = nn.Conv2d(in_channels,\n in_channels,\n kernel_size=kernel_size[0],\n stride=1,\n padding='same',\n groups=1,\n bias=False)\n\n self.dconv1_2 = nn.Conv2d(in_channels,\n in_channels,\n kernel_size=kernel_size[1],\n stride=1,\n padding='same',\n groups=1,\n bias=False)\n\n self.bn1 = nn.BatchNorm2d(in_channels)\n self.bn2 = nn.BatchNorm2d(in_channels)\n\n self.pconv = nn.Conv2d(in_channels,\n out_channels,\n kernel_size=3,\n stride=1,\n padding='same',\n groups=1,\n bias=False)\n\n self.bn3 = nn.BatchNorm2d(out_channels)\n\n self.maxpool = nn.MaxPool2d(2, 2)\n\n def forward(self, x):\n '''add forward here'''\n\n out1 = self.dconv1_1(x)\n out2 = self.dconv1_2(x)\n\n out1 = torch.mul(out1, self.ReLU(out1))\n out2 = torch.mul(out1, self.ReLU(out2))\n\n out = self.bn1(out1) + self.bn2(out2)\n out = self.bn3(self.pconv(out))\n\n if self.pool_enable:\n out = self.maxpool(out)\n\n return out\n\n\nclass CLNET(nn.Module):\n def __init__(self, cfg, num_classes=10):\n super(CLNET, self).__init__()\n self.cfg = cfg\n\n self.conv1 = nn.Conv2d(3,\n 32,\n kernel_size=3,\n stride=1,\n padding=1,\n bias=False)\n\n self.bn1 = nn.BatchNorm2d(32)\n self.pool1 = nn.MaxPool2d(2, 2)\n\n self.conv2 = nn.Conv2d(32,\n 32,\n kernel_size=3,\n stride=1,\n padding=1,\n groups=1,\n bias=False)\n\n self.bn2 = nn.BatchNorm2d(32)\n\n self.conv3 = nn.Conv2d(32,\n 16,\n kernel_size=1,\n stride=1,\n padding=0,\n bias=False)\n\n self.layers = self._make_layers(in_channels=16)\n self.linear = nn.Linear(cfg['out_channels'][-1], num_classes)\n\n def _make_layers(self, in_channels):\n layers = []\n cfg = [self.cfg[k] for k in ['out_channels', 'kernel_size', 'pool_enable']]\n\n for out_channels, kernel_size, pool_enable in zip(*cfg):\n layers.append(\n CrossLinkBlock(in_channels,\n out_channels,\n kernel_size,\n pool_enable))\n in_channels = out_channels\n return nn.Sequential(*layers)\n\n def forward(self, x):\n out = mish(self.bn1(self.pool1(self.conv1(x)))) # conv block\n out = self.conv3(swish(self.bn2(self.conv2(out)))) # sep block\n out = self.layers(out)\n out = F.adaptive_avg_pool2d(out, 1)\n out = out.view(out.size(0), -1)\n dropout_rate = self.cfg['dropout_rate']\n if self.training and dropout_rate > 0:\n out = F.dropout(out, p=dropout_rate)\n out = self.linear(out)\n return out\n\n\ndef CLNet_V0(num_classes):\n cfg = {\n 'out_channels': [24, 40, 80, 112, 160],\n 'kernel_size': [(5, 3), (3, 5), (3, 3), (5, 5), (3, 3)],\n 'pool_enable': [True, True, True, True, False],\n 'dropout_rate': 0.2\n }\n return CLNET(cfg, num_classes=num_classes)\n\n\nimport torchinfo\n\n\ndef test():\n net = CLNet_V0(10)\n torchinfo.summary(net, (1, 3, 32, 32))\n x = torch.randn(3, 3, 32, 32, device='cuda')\n y = net(x)\n print(y.shape)\n\n\nif __name__ == '__main__':\n test()\n" ]
[ [ "torch.nn.Sequential", "torch.nn.functional.dropout", "torch.randn", "torch.nn.Conv2d", "torch.nn.functional.adaptive_avg_pool2d", "torch.nn.MaxPool2d", "torch.nn.Linear", "torch.nn.BatchNorm2d", "torch.nn.ReLU", "torch.nn.functional.softplus" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
TheVikJ/SUAVE
[ "eff37d167a4318ba8ba77dff873422c89db489b2" ]
[ "JinaAI/utils/get_data.py" ]
[ "import json\nimport requests\nimport pandas as pd\nimport os\n\nbaseurl = \"http://exploreapiswith.tech/api/\"\n\n\ncategories = json.loads(requests.get(\n baseurl + \"category\").text)\n\n\ndef get_category_api(category_name=None):\n category_apis = json.loads(requests.get(\n baseurl + \"category/\" + category_name).text)\n return category_apis\n\n\napi_list = []\nfor category in categories:\n\n api = get_category_api(category)\n api_list += api\n\nif os.path.exists(\"data/apis.json\"):\n os.remove(\"data/apis.json\")\n\n\nif os.path.exists(\"data/apis.csv\"):\n os.remove(\"data/apis.csv\")\n\nwith open(r\"data/apis.json\", \"x\") as f:\n json.dump(api_list, f)\n\n\njson_file = pd.read_json(r\"data/apis.json\")\njson_file.to_csv(r\"data/apis.csv\", index=False)\n" ]
[ [ "pandas.read_json" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
jdailey/EnergyPATHWAYS
[ "0fb0ead475b6395f6b07fc43fe6c85826ee47d0f" ]
[ "energyPATHWAYS/tests/test_time_series.py" ]
[ "# -*- coding: utf-8 -*-\n__author__ = 'Ben, Ryan, Michael'\n\nimport numpy as np\nfrom collections import defaultdict\nimport pandas as pd\nimport energyPATHWAYS\nfrom energyPATHWAYS.time_series import TimeSeries\nimport unittest\nfrom matplotlib import pyplot as plt\n\n\nclass TestTimeSeries(unittest.TestCase):\n def setUp(self):\n self.methods = ('linear_interpolation',\n 'linear_regression',\n 'logistic',\n 'nearest',\n 'quadratic',\n 'cubic',\n 'exponential',\n 'none',\n 'decay_towards_linear_regression',\n 'average')\n\n def _help_test_clean_empty_data(self):\n newindex = np.arange(2000, 2051)\n\n x = np.array([])\n y = np.array([])\n self.run_all_cleaning_methods(x, y, newindex)\n\n def test_clean_empty_data(self):\n self.assertRaises(IndexError, self._help_test_clean_empty_data)\n\n def test_clean_one_point(self):\n newindex = np.arange(2000, 2051)\n \n x = np.array([2010])\n y = np.array([.1])\n self.run_all_cleaning_methods(x, y, newindex)\n\n def test_clean_two_points(self):\n newindex = np.arange(2000, 2051)\n \n x = np.array([2010, 2050])\n y = np.array([.1, .5])\n self.run_all_cleaning_methods(x, y, newindex)\n\n def test_clean_three_points(self):\n newindex = np.arange(2000, 2051)\n \n x = np.array([2010, 2018, 2025])\n y = np.array([.8, .7, .4])\n self.run_all_cleaning_methods(x, y, newindex)\n\n def test_clean_scurve_points(self):\n newindex = np.arange(2000, 2051)\n \n x = np.array([2010, 2018, 2025, 2040, 2050])\n y = np.array([.8, .7, .4, .35, .34])\n self.run_all_cleaning_methods(x, y, newindex)\n\n def test_clean_linear_points(self):\n newindex = np.arange(2000, 2051)\n \n x = np.array([2010, 2020, 2030, 2040, 2050])\n y = np.array([.1, .2, .3, .4, .5])\n self.run_all_cleaning_methods(x, y, newindex)\n\n def test_clean_quadratic_points(self):\n newindex = np.arange(2000, 2051)\n \n x = np.arange(2010, 2030)\n y = (x-2010)**2\n self.run_all_cleaning_methods(x, y, newindex)\n\n def test_three_zeros(self):\n # this has been a problem with logistic curve fitting\n newindex = np.arange(2000, 2051)\n\n x = np.array([2010, 2011, 2013])\n y = np.array([0, 0, 0])\n self.run_all_cleaning_methods(x, y, newindex)\n\n def test_two_zeros(self):\n newindex = np.arange(2000, 2051)\n\n x = np.array([2010, 2013])\n y = np.array([0, 0])\n self.run_all_cleaning_methods(x, y, newindex)\n\n def run_all_cleaning_methods(self, x, y, newindex):\n for method in self.methods:\n data = pd.DataFrame(y, index=x)\n newdata = TimeSeries.clean(data,\n newindex=newindex,\n interpolation_method=(None if method=='decay_towards_linear_regression' else method), # not supported for linear regression\n extrapolation_method=method)\n\n\n#newindex = np.arange(2015, 2025)\n\nnewindex = np.arange(2012, 2017)\nx = np.array([2015, 2018, 2020])\ny = np.array([.8, .7, .4])\ndata = pd.DataFrame(y, index=x)\nnewdata = TimeSeries.clean(data, newindex=newindex, interpolation_method='linear_interpolation', extrapolation_method='nearest')\n#\n#\n#newindex = np.arange(2020, 2025)\n#multi_data = pd.concat([data]*3, keys=['a', 'b', 'c'], names=['dummy', 'year'])\n#newdata2 = TimeSeries.clean(multi_data, time_index_name='year', newindex=newindex, interpolation_method='linear_interpolation', extrapolation_method='nearest')\n\n\nnewindex = np.arange(2015, 2050)\nmulti_data = pd.concat([data]*3, keys=['a', 'b', 'c'], names=['dummy', 'year'])\nnewdata2 = TimeSeries.clean(multi_data, time_index_name='year', newindex=newindex, interpolation_method='nearest', extrapolation_method='exponential')\n\n\n#raw_values = pd.read_csv('raw_values_example_for_clean_timeseries.csv')\n#raw_values.set_index(['us', 'efficiency_type', 'supply_node', 'year'], inplace=True)\n#raw_values.sort_index(inplace=True)\n#\n#newindex = [2015]\n#newdata3 = TimeSeries.clean(raw_values, time_index_name='year', newindex=newindex, interpolation_method='linear_interpolation', extrapolation_method='nearest')\n#\n#print newdata3\n\n\nnewindex = np.arange(2012, 2030)\nx = np.array([2015, 2016, 2018, 2020, 2021, 2025])\ny = np.array([.8, np.inf, .7, .4, np.inf, np.nan])\ndata = pd.DataFrame(y, index=x)\nnewdata = TimeSeries.clean(data, newindex=newindex, interpolation_method='linear_interpolation', extrapolation_method='exponential')\n\n\n" ]
[ [ "numpy.arange", "pandas.concat", "numpy.array", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
ikamensh/scipy
[ "d645404be21b7c0b1e7ba24bf8d525b624aeb848", "d645404be21b7c0b1e7ba24bf8d525b624aeb848", "d645404be21b7c0b1e7ba24bf8d525b624aeb848" ]
[ "scipy/io/matlab/mio5.py", "scipy/fftpack/tests/test_pseudo_diffs.py", "scipy/optimize/_shgo_lib/triangulation.py" ]
[ "''' Classes for read / write of matlab (TM) 5 files\n\nThe matfile specification last found here:\n\nhttps://www.mathworks.com/access/helpdesk/help/pdf_doc/matlab/matfile_format.pdf\n\n(as of December 5 2008)\n'''\n'''\n=================================\n Note on functions and mat files\n=================================\n\nThe document above does not give any hints as to the storage of matlab\nfunction handles, or anonymous function handles. I had, therefore, to\nguess the format of matlab arrays of ``mxFUNCTION_CLASS`` and\n``mxOPAQUE_CLASS`` by looking at example mat files.\n\n``mxFUNCTION_CLASS`` stores all types of matlab functions. It seems to\ncontain a struct matrix with a set pattern of fields. For anonymous\nfunctions, a sub-fields of one of these fields seems to contain the\nwell-named ``mxOPAQUE_CLASS``. This seems to contain:\n\n* array flags as for any matlab matrix\n* 3 int8 strings\n* a matrix\n\nIt seems that whenever the mat file contains a ``mxOPAQUE_CLASS``\ninstance, there is also an un-named matrix (name == '') at the end of\nthe mat file. I'll call this the ``__function_workspace__`` matrix.\n\nWhen I saved two anonymous functions in a mat file, or appended another\nanonymous function to the mat file, there was still only one\n``__function_workspace__`` un-named matrix at the end, but larger than\nthat for a mat file with a single anonymous function, suggesting that\nthe workspaces for the two functions had been merged.\n\nThe ``__function_workspace__`` matrix appears to be of double class\n(``mxCLASS_DOUBLE``), but stored as uint8, the memory for which is in\nthe format of a mini .mat file, without the first 124 bytes of the file\nheader (the description and the subsystem_offset), but with the version\nU2 bytes, and the S2 endian test bytes. There follow 4 zero bytes,\npresumably for 8 byte padding, and then a series of ``miMATRIX``\nentries, as in a standard mat file. The ``miMATRIX`` entries appear to\nbe series of un-named (name == '') matrices, and may also contain arrays\nof this same mini-mat format.\n\nI guess that:\n\n* saving an anonymous function back to a mat file will need the\n associated ``__function_workspace__`` matrix saved as well for the\n anonymous function to work correctly.\n* appending to a mat file that has a ``__function_workspace__`` would\n involve first pulling off this workspace, appending, checking whether\n there were any more anonymous functions appended, and then somehow\n merging the relevant workspaces, and saving at the end of the mat\n file.\n\nThe mat files I was playing with are in ``tests/data``:\n\n* sqr.mat\n* parabola.mat\n* some_functions.mat\n\nSee ``tests/test_mio.py:test_mio_funcs.py`` for the debugging\nscript I was working with.\n\n'''\n\n# Small fragments of current code adapted from matfile.py by Heiko\n# Henkelmann; parts of the code for simplify_cells=True adapted from\n# http://blog.nephics.com/2019/08/28/better-loadmat-for-scipy/.\n\nimport os\nimport time\nimport sys\nimport zlib\n\nfrom io import BytesIO\n\nimport warnings\n\nimport numpy as np\nfrom numpy.compat import asbytes, asstr\n\nimport scipy.sparse\n\nfrom .byteordercodes import native_code, swapped_code\n\nfrom .miobase import (MatFileReader, docfiller, matdims, read_dtype,\n arr_to_chars, arr_dtype_number, MatWriteError,\n MatReadError, MatReadWarning)\n\n# Reader object for matlab 5 format variables\nfrom .mio5_utils import VarReader5\n\n# Constants and helper objects\nfrom .mio5_params import (MatlabObject, MatlabFunction, MDTYPES, NP_TO_MTYPES,\n NP_TO_MXTYPES, miCOMPRESSED, miMATRIX, miINT8,\n miUTF8, miUINT32, mxCELL_CLASS, mxSTRUCT_CLASS,\n mxOBJECT_CLASS, mxCHAR_CLASS, mxSPARSE_CLASS,\n mxDOUBLE_CLASS, mclass_info, mat_struct)\n\nfrom .streams import ZlibInputStream\n\n\ndef _has_struct(elem):\n \"\"\"Determine if elem is an array and if first array item is a struct.\"\"\"\n return (isinstance(elem, np.ndarray) and (elem.size > 0) and\n isinstance(elem[0], mat_struct))\n\n\ndef _inspect_cell_array(ndarray):\n \"\"\"Construct lists from cell arrays (loaded as numpy ndarrays), recursing\n into items if they contain mat_struct objects.\"\"\"\n elem_list = []\n for sub_elem in ndarray:\n if isinstance(sub_elem, mat_struct):\n elem_list.append(_matstruct_to_dict(sub_elem))\n elif _has_struct(sub_elem):\n elem_list.append(_inspect_cell_array(sub_elem))\n else:\n elem_list.append(sub_elem)\n return elem_list\n\n\ndef _matstruct_to_dict(matobj):\n \"\"\"Construct nested dicts from mat_struct objects.\"\"\"\n d = {}\n for f in matobj._fieldnames:\n elem = matobj.__dict__[f]\n if isinstance(elem, mat_struct):\n d[f] = _matstruct_to_dict(elem)\n elif _has_struct(elem):\n d[f] = _inspect_cell_array(elem)\n else:\n d[f] = elem\n return d\n\n\ndef _simplify_cells(d):\n \"\"\"Convert mat objects in dict to nested dicts.\"\"\"\n for key in d:\n if isinstance(d[key], mat_struct):\n d[key] = _matstruct_to_dict(d[key])\n elif _has_struct(d[key]):\n d[key] = _inspect_cell_array(d[key])\n return d\n\n\nclass MatFile5Reader(MatFileReader):\n ''' Reader for Mat 5 mat files\n Adds the following attribute to base class\n\n uint16_codec - char codec to use for uint16 char arrays\n (defaults to system default codec)\n\n Uses variable reader that has the following stardard interface (see\n abstract class in ``miobase``::\n\n __init__(self, file_reader)\n read_header(self)\n array_from_header(self)\n\n and added interface::\n\n set_stream(self, stream)\n read_full_tag(self)\n\n '''\n @docfiller\n def __init__(self,\n mat_stream,\n byte_order=None,\n mat_dtype=False,\n squeeze_me=False,\n chars_as_strings=True,\n matlab_compatible=False,\n struct_as_record=True,\n verify_compressed_data_integrity=True,\n uint16_codec=None,\n simplify_cells=False):\n '''Initializer for matlab 5 file format reader\n\n %(matstream_arg)s\n %(load_args)s\n %(struct_arg)s\n uint16_codec : {None, string}\n Set codec to use for uint16 char arrays (e.g., 'utf-8').\n Use system default codec if None\n '''\n super(MatFile5Reader, self).__init__(\n mat_stream,\n byte_order,\n mat_dtype,\n squeeze_me,\n chars_as_strings,\n matlab_compatible,\n struct_as_record,\n verify_compressed_data_integrity,\n simplify_cells)\n # Set uint16 codec\n if not uint16_codec:\n uint16_codec = sys.getdefaultencoding()\n self.uint16_codec = uint16_codec\n # placeholders for readers - see initialize_read method\n self._file_reader = None\n self._matrix_reader = None\n\n def guess_byte_order(self):\n ''' Guess byte order.\n Sets stream pointer to 0 '''\n self.mat_stream.seek(126)\n mi = self.mat_stream.read(2)\n self.mat_stream.seek(0)\n return mi == b'IM' and '<' or '>'\n\n def read_file_header(self):\n ''' Read in mat 5 file header '''\n hdict = {}\n hdr_dtype = MDTYPES[self.byte_order]['dtypes']['file_header']\n hdr = read_dtype(self.mat_stream, hdr_dtype)\n hdict['__header__'] = hdr['description'].item().strip(b' \\t\\n\\000')\n v_major = hdr['version'] >> 8\n v_minor = hdr['version'] & 0xFF\n hdict['__version__'] = '%d.%d' % (v_major, v_minor)\n return hdict\n\n def initialize_read(self):\n ''' Run when beginning read of variables\n\n Sets up readers from parameters in `self`\n '''\n # reader for top level stream. We need this extra top-level\n # reader because we use the matrix_reader object to contain\n # compressed matrices (so they have their own stream)\n self._file_reader = VarReader5(self)\n # reader for matrix streams\n self._matrix_reader = VarReader5(self)\n\n def read_var_header(self):\n ''' Read header, return header, next position\n\n Header has to define at least .name and .is_global\n\n Parameters\n ----------\n None\n\n Returns\n -------\n header : object\n object that can be passed to self.read_var_array, and that\n has attributes .name and .is_global\n next_position : int\n position in stream of next variable\n '''\n mdtype, byte_count = self._file_reader.read_full_tag()\n if not byte_count > 0:\n raise ValueError(\"Did not read any bytes\")\n next_pos = self.mat_stream.tell() + byte_count\n if mdtype == miCOMPRESSED:\n # Make new stream from compressed data\n stream = ZlibInputStream(self.mat_stream, byte_count)\n self._matrix_reader.set_stream(stream)\n check_stream_limit = self.verify_compressed_data_integrity\n mdtype, byte_count = self._matrix_reader.read_full_tag()\n else:\n check_stream_limit = False\n self._matrix_reader.set_stream(self.mat_stream)\n if not mdtype == miMATRIX:\n raise TypeError('Expecting miMATRIX type here, got %d' % mdtype)\n header = self._matrix_reader.read_header(check_stream_limit)\n return header, next_pos\n\n def read_var_array(self, header, process=True):\n ''' Read array, given `header`\n\n Parameters\n ----------\n header : header object\n object with fields defining variable header\n process : {True, False} bool, optional\n If True, apply recursive post-processing during loading of\n array.\n\n Returns\n -------\n arr : array\n array with post-processing applied or not according to\n `process`.\n '''\n return self._matrix_reader.array_from_header(header, process)\n\n def get_variables(self, variable_names=None):\n ''' get variables from stream as dictionary\n\n variable_names - optional list of variable names to get\n\n If variable_names is None, then get all variables in file\n '''\n if isinstance(variable_names, str):\n variable_names = [variable_names]\n elif variable_names is not None:\n variable_names = list(variable_names)\n\n self.mat_stream.seek(0)\n # Here we pass all the parameters in self to the reading objects\n self.initialize_read()\n mdict = self.read_file_header()\n mdict['__globals__'] = []\n while not self.end_of_stream():\n hdr, next_position = self.read_var_header()\n name = asstr(hdr.name)\n if name in mdict:\n warnings.warn('Duplicate variable name \"%s\" in stream'\n ' - replacing previous with new\\n'\n 'Consider mio5.varmats_from_mat to split '\n 'file into single variable files' % name,\n MatReadWarning, stacklevel=2)\n if name == '':\n # can only be a matlab 7 function workspace\n name = '__function_workspace__'\n # We want to keep this raw because mat_dtype processing\n # will break the format (uint8 as mxDOUBLE_CLASS)\n process = False\n else:\n process = True\n if variable_names is not None and name not in variable_names:\n self.mat_stream.seek(next_position)\n continue\n try:\n res = self.read_var_array(hdr, process)\n except MatReadError as err:\n warnings.warn(\n f'Unreadable variable \"{name}\", because \"{err}\"',\n Warning, stacklevel=2)\n res = f\"Read error: {err}\"\n self.mat_stream.seek(next_position)\n mdict[name] = res\n if hdr.is_global:\n mdict['__globals__'].append(name)\n if variable_names is not None:\n variable_names.remove(name)\n if len(variable_names) == 0:\n break\n if self.simplify_cells:\n return _simplify_cells(mdict)\n else:\n return mdict\n\n def list_variables(self):\n ''' list variables from stream '''\n self.mat_stream.seek(0)\n # Here we pass all the parameters in self to the reading objects\n self.initialize_read()\n self.read_file_header()\n vars = []\n while not self.end_of_stream():\n hdr, next_position = self.read_var_header()\n name = asstr(hdr.name)\n if name == '':\n # can only be a matlab 7 function workspace\n name = '__function_workspace__'\n\n shape = self._matrix_reader.shape_from_header(hdr)\n if hdr.is_logical:\n info = 'logical'\n else:\n info = mclass_info.get(hdr.mclass, 'unknown')\n vars.append((name, shape, info))\n\n self.mat_stream.seek(next_position)\n return vars\n\n\ndef varmats_from_mat(file_obj):\n \"\"\" Pull variables out of mat 5 file as a sequence of mat file objects\n\n This can be useful with a difficult mat file, containing unreadable\n variables. This routine pulls the variables out in raw form and puts them,\n unread, back into a file stream for saving or reading. Another use is the\n pathological case where there is more than one variable of the same name in\n the file; this routine returns the duplicates, whereas the standard reader\n will overwrite duplicates in the returned dictionary.\n\n The file pointer in `file_obj` will be undefined. File pointers for the\n returned file-like objects are set at 0.\n\n Parameters\n ----------\n file_obj : file-like\n file object containing mat file\n\n Returns\n -------\n named_mats : list\n list contains tuples of (name, BytesIO) where BytesIO is a file-like\n object containing mat file contents as for a single variable. The\n BytesIO contains a string with the original header and a single var. If\n ``var_file_obj`` is an individual BytesIO instance, then save as a mat\n file with something like ``open('test.mat',\n 'wb').write(var_file_obj.read())``\n\n Examples\n --------\n >>> import scipy.io\n\n BytesIO is from the ``io`` module in Python 3, and is ``cStringIO`` for\n Python < 3.\n\n >>> mat_fileobj = BytesIO()\n >>> scipy.io.savemat(mat_fileobj, {'b': np.arange(10), 'a': 'a string'})\n >>> varmats = varmats_from_mat(mat_fileobj)\n >>> sorted([name for name, str_obj in varmats])\n ['a', 'b']\n \"\"\"\n rdr = MatFile5Reader(file_obj)\n file_obj.seek(0)\n # Raw read of top-level file header\n hdr_len = MDTYPES[native_code]['dtypes']['file_header'].itemsize\n raw_hdr = file_obj.read(hdr_len)\n # Initialize variable reading\n file_obj.seek(0)\n rdr.initialize_read()\n rdr.read_file_header()\n next_position = file_obj.tell()\n named_mats = []\n while not rdr.end_of_stream():\n start_position = next_position\n hdr, next_position = rdr.read_var_header()\n name = asstr(hdr.name)\n # Read raw variable string\n file_obj.seek(start_position)\n byte_count = next_position - start_position\n var_str = file_obj.read(byte_count)\n # write to stringio object\n out_obj = BytesIO()\n out_obj.write(raw_hdr)\n out_obj.write(var_str)\n out_obj.seek(0)\n named_mats.append((name, out_obj))\n return named_mats\n\n\nclass EmptyStructMarker(object):\n \"\"\" Class to indicate presence of empty matlab struct on output \"\"\"\n\n\ndef to_writeable(source):\n ''' Convert input object ``source`` to something we can write\n\n Parameters\n ----------\n source : object\n\n Returns\n -------\n arr : None or ndarray or EmptyStructMarker\n If `source` cannot be converted to something we can write to a matfile,\n return None. If `source` is equivalent to an empty dictionary, return\n ``EmptyStructMarker``. Otherwise return `source` converted to an\n ndarray with contents for writing to matfile.\n '''\n if isinstance(source, np.ndarray):\n return source\n if source is None:\n return None\n # Objects that implement mappings\n is_mapping = (hasattr(source, 'keys') and hasattr(source, 'values') and\n hasattr(source, 'items'))\n # Objects that don't implement mappings, but do have dicts\n if isinstance(source, np.generic):\n # NumPy scalars are never mappings (PyPy issue workaround)\n pass\n elif not is_mapping and hasattr(source, '__dict__'):\n source = dict((key, value) for key, value in source.__dict__.items()\n if not key.startswith('_'))\n is_mapping = True\n if is_mapping:\n dtype = []\n values = []\n for field, value in source.items():\n if (isinstance(field, str) and\n field[0] not in '_0123456789'):\n dtype.append((str(field), object))\n values.append(value)\n if dtype:\n return np.array([tuple(values)], dtype)\n else:\n return EmptyStructMarker\n # Next try and convert to an array\n narr = np.asanyarray(source)\n if narr.dtype.type in (object, np.object_) and \\\n narr.shape == () and narr == source:\n # No interesting conversion possible\n return None\n return narr\n\n\n# Native byte ordered dtypes for convenience for writers\nNDT_FILE_HDR = MDTYPES[native_code]['dtypes']['file_header']\nNDT_TAG_FULL = MDTYPES[native_code]['dtypes']['tag_full']\nNDT_TAG_SMALL = MDTYPES[native_code]['dtypes']['tag_smalldata']\nNDT_ARRAY_FLAGS = MDTYPES[native_code]['dtypes']['array_flags']\n\n\nclass VarWriter5(object):\n ''' Generic matlab matrix writing class '''\n mat_tag = np.zeros((), NDT_TAG_FULL)\n mat_tag['mdtype'] = miMATRIX\n\n def __init__(self, file_writer):\n self.file_stream = file_writer.file_stream\n self.unicode_strings = file_writer.unicode_strings\n self.long_field_names = file_writer.long_field_names\n self.oned_as = file_writer.oned_as\n # These are used for top level writes, and unset after\n self._var_name = None\n self._var_is_global = False\n\n def write_bytes(self, arr):\n self.file_stream.write(arr.tobytes(order='F'))\n\n def write_string(self, s):\n self.file_stream.write(s)\n\n def write_element(self, arr, mdtype=None):\n ''' write tag and data '''\n if mdtype is None:\n mdtype = NP_TO_MTYPES[arr.dtype.str[1:]]\n # Array needs to be in native byte order\n if arr.dtype.byteorder == swapped_code:\n arr = arr.byteswap().newbyteorder()\n byte_count = arr.size*arr.itemsize\n if byte_count <= 4:\n self.write_smalldata_element(arr, mdtype, byte_count)\n else:\n self.write_regular_element(arr, mdtype, byte_count)\n\n def write_smalldata_element(self, arr, mdtype, byte_count):\n # write tag with embedded data\n tag = np.zeros((), NDT_TAG_SMALL)\n tag['byte_count_mdtype'] = (byte_count << 16) + mdtype\n # if arr.tobytes is < 4, the element will be zero-padded as needed.\n tag['data'] = arr.tobytes(order='F')\n self.write_bytes(tag)\n\n def write_regular_element(self, arr, mdtype, byte_count):\n # write tag, data\n tag = np.zeros((), NDT_TAG_FULL)\n tag['mdtype'] = mdtype\n tag['byte_count'] = byte_count\n self.write_bytes(tag)\n self.write_bytes(arr)\n # pad to next 64-bit boundary\n bc_mod_8 = byte_count % 8\n if bc_mod_8:\n self.file_stream.write(b'\\x00' * (8-bc_mod_8))\n\n def write_header(self,\n shape,\n mclass,\n is_complex=False,\n is_logical=False,\n nzmax=0):\n ''' Write header for given data options\n shape : sequence\n array shape\n mclass - mat5 matrix class\n is_complex - True if matrix is complex\n is_logical - True if matrix is logical\n nzmax - max non zero elements for sparse arrays\n\n We get the name and the global flag from the object, and reset\n them to defaults after we've used them\n '''\n # get name and is_global from one-shot object store\n name = self._var_name\n is_global = self._var_is_global\n # initialize the top-level matrix tag, store position\n self._mat_tag_pos = self.file_stream.tell()\n self.write_bytes(self.mat_tag)\n # write array flags (complex, global, logical, class, nzmax)\n af = np.zeros((), NDT_ARRAY_FLAGS)\n af['data_type'] = miUINT32\n af['byte_count'] = 8\n flags = is_complex << 3 | is_global << 2 | is_logical << 1\n af['flags_class'] = mclass | flags << 8\n af['nzmax'] = nzmax\n self.write_bytes(af)\n # shape\n self.write_element(np.array(shape, dtype='i4'))\n # write name\n name = np.asarray(name)\n if name == '': # empty string zero-terminated\n self.write_smalldata_element(name, miINT8, 0)\n else:\n self.write_element(name, miINT8)\n # reset the one-shot store to defaults\n self._var_name = ''\n self._var_is_global = False\n\n def update_matrix_tag(self, start_pos):\n curr_pos = self.file_stream.tell()\n self.file_stream.seek(start_pos)\n byte_count = curr_pos - start_pos - 8\n if byte_count >= 2**32:\n raise MatWriteError(\"Matrix too large to save with Matlab \"\n \"5 format\")\n self.mat_tag['byte_count'] = byte_count\n self.write_bytes(self.mat_tag)\n self.file_stream.seek(curr_pos)\n\n def write_top(self, arr, name, is_global):\n \"\"\" Write variable at top level of mat file\n\n Parameters\n ----------\n arr : array_like\n array-like object to create writer for\n name : str, optional\n name as it will appear in matlab workspace\n default is empty string\n is_global : {False, True}, optional\n whether variable will be global on load into matlab\n \"\"\"\n # these are set before the top-level header write, and unset at\n # the end of the same write, because they do not apply for lower levels\n self._var_is_global = is_global\n self._var_name = name\n # write the header and data\n self.write(arr)\n\n def write(self, arr):\n ''' Write `arr` to stream at top and sub levels\n\n Parameters\n ----------\n arr : array_like\n array-like object to create writer for\n '''\n # store position, so we can update the matrix tag\n mat_tag_pos = self.file_stream.tell()\n # First check if these are sparse\n if scipy.sparse.issparse(arr):\n self.write_sparse(arr)\n self.update_matrix_tag(mat_tag_pos)\n return\n # Try to convert things that aren't arrays\n narr = to_writeable(arr)\n if narr is None:\n raise TypeError('Could not convert %s (type %s) to array'\n % (arr, type(arr)))\n if isinstance(narr, MatlabObject):\n self.write_object(narr)\n elif isinstance(narr, MatlabFunction):\n raise MatWriteError('Cannot write matlab functions')\n elif narr is EmptyStructMarker: # empty struct array\n self.write_empty_struct()\n elif narr.dtype.fields: # struct array\n self.write_struct(narr)\n elif narr.dtype.hasobject: # cell array\n self.write_cells(narr)\n elif narr.dtype.kind in ('U', 'S'):\n if self.unicode_strings:\n codec = 'UTF8'\n else:\n codec = 'ascii'\n self.write_char(narr, codec)\n else:\n self.write_numeric(narr)\n self.update_matrix_tag(mat_tag_pos)\n\n def write_numeric(self, arr):\n imagf = arr.dtype.kind == 'c'\n logif = arr.dtype.kind == 'b'\n try:\n mclass = NP_TO_MXTYPES[arr.dtype.str[1:]]\n except KeyError:\n # No matching matlab type, probably complex256 / float128 / float96\n # Cast data to complex128 / float64.\n if imagf:\n arr = arr.astype('c128')\n elif logif:\n arr = arr.astype('i1') # Should only contain 0/1\n else:\n arr = arr.astype('f8')\n mclass = mxDOUBLE_CLASS\n self.write_header(matdims(arr, self.oned_as),\n mclass,\n is_complex=imagf,\n is_logical=logif)\n if imagf:\n self.write_element(arr.real)\n self.write_element(arr.imag)\n else:\n self.write_element(arr)\n\n def write_char(self, arr, codec='ascii'):\n ''' Write string array `arr` with given `codec`\n '''\n if arr.size == 0 or np.all(arr == ''):\n # This an empty string array or a string array containing\n # only empty strings. Matlab cannot distinguish between a\n # string array that is empty, and a string array containing\n # only empty strings, because it stores strings as arrays of\n # char. There is no way of having an array of char that is\n # not empty, but contains an empty string. We have to\n # special-case the array-with-empty-strings because even\n # empty strings have zero padding, which would otherwise\n # appear in matlab as a string with a space.\n shape = (0,) * np.max([arr.ndim, 2])\n self.write_header(shape, mxCHAR_CLASS)\n self.write_smalldata_element(arr, miUTF8, 0)\n return\n # non-empty string.\n #\n # Convert to char array\n arr = arr_to_chars(arr)\n # We have to write the shape directly, because we are going\n # recode the characters, and the resulting stream of chars\n # may have a different length\n shape = arr.shape\n self.write_header(shape, mxCHAR_CLASS)\n if arr.dtype.kind == 'U' and arr.size:\n # Make one long string from all the characters. We need to\n # transpose here, because we're flattening the array, before\n # we write the bytes. The bytes have to be written in\n # Fortran order.\n n_chars = np.prod(shape)\n st_arr = np.ndarray(shape=(),\n dtype=arr_dtype_number(arr, n_chars),\n buffer=arr.T.copy()) # Fortran order\n # Recode with codec to give byte string\n st = st_arr.item().encode(codec)\n # Reconstruct as 1-D byte array\n arr = np.ndarray(shape=(len(st),),\n dtype='S1',\n buffer=st)\n self.write_element(arr, mdtype=miUTF8)\n\n def write_sparse(self, arr):\n ''' Sparse matrices are 2D\n '''\n A = arr.tocsc() # convert to sparse CSC format\n A.sort_indices() # MATLAB expects sorted row indices\n is_complex = (A.dtype.kind == 'c')\n is_logical = (A.dtype.kind == 'b')\n nz = A.nnz\n self.write_header(matdims(arr, self.oned_as),\n mxSPARSE_CLASS,\n is_complex=is_complex,\n is_logical=is_logical,\n # matlab won't load file with 0 nzmax\n nzmax=1 if nz == 0 else nz)\n self.write_element(A.indices.astype('i4'))\n self.write_element(A.indptr.astype('i4'))\n self.write_element(A.data.real)\n if is_complex:\n self.write_element(A.data.imag)\n\n def write_cells(self, arr):\n self.write_header(matdims(arr, self.oned_as),\n mxCELL_CLASS)\n # loop over data, column major\n A = np.atleast_2d(arr).flatten('F')\n for el in A:\n self.write(el)\n\n def write_empty_struct(self):\n self.write_header((1, 1), mxSTRUCT_CLASS)\n # max field name length set to 1 in an example matlab struct\n self.write_element(np.array(1, dtype=np.int32))\n # Field names element is empty\n self.write_element(np.array([], dtype=np.int8))\n\n def write_struct(self, arr):\n self.write_header(matdims(arr, self.oned_as),\n mxSTRUCT_CLASS)\n self._write_items(arr)\n\n def _write_items(self, arr):\n # write fieldnames\n fieldnames = [f[0] for f in arr.dtype.descr]\n length = max([len(fieldname) for fieldname in fieldnames])+1\n max_length = (self.long_field_names and 64) or 32\n if length > max_length:\n raise ValueError(\"Field names are restricted to %d characters\" %\n (max_length-1))\n self.write_element(np.array([length], dtype='i4'))\n self.write_element(\n np.array(fieldnames, dtype='S%d' % (length)),\n mdtype=miINT8)\n A = np.atleast_2d(arr).flatten('F')\n for el in A:\n for f in fieldnames:\n self.write(el[f])\n\n def write_object(self, arr):\n '''Same as writing structs, except different mx class, and extra\n classname element after header\n '''\n self.write_header(matdims(arr, self.oned_as),\n mxOBJECT_CLASS)\n self.write_element(np.array(arr.classname, dtype='S'),\n mdtype=miINT8)\n self._write_items(arr)\n\n\nclass MatFile5Writer(object):\n ''' Class for writing mat5 files '''\n\n @docfiller\n def __init__(self, file_stream,\n do_compression=False,\n unicode_strings=False,\n global_vars=None,\n long_field_names=False,\n oned_as='row'):\n ''' Initialize writer for matlab 5 format files\n\n Parameters\n ----------\n %(do_compression)s\n %(unicode_strings)s\n global_vars : None or sequence of strings, optional\n Names of variables to be marked as global for matlab\n %(long_fields)s\n %(oned_as)s\n '''\n self.file_stream = file_stream\n self.do_compression = do_compression\n self.unicode_strings = unicode_strings\n if global_vars:\n self.global_vars = global_vars\n else:\n self.global_vars = []\n self.long_field_names = long_field_names\n self.oned_as = oned_as\n self._matrix_writer = None\n\n def write_file_header(self):\n # write header\n hdr = np.zeros((), NDT_FILE_HDR)\n hdr['description'] = 'MATLAB 5.0 MAT-file Platform: %s, Created on: %s' \\\n % (os.name,time.asctime())\n hdr['version'] = 0x0100\n hdr['endian_test'] = np.ndarray(shape=(),\n dtype='S2',\n buffer=np.uint16(0x4d49))\n self.file_stream.write(hdr.tobytes())\n\n def put_variables(self, mdict, write_header=None):\n ''' Write variables in `mdict` to stream\n\n Parameters\n ----------\n mdict : mapping\n mapping with method ``items`` returns name, contents pairs where\n ``name`` which will appear in the matlab workspace in file load, and\n ``contents`` is something writeable to a matlab file, such as a NumPy\n array.\n write_header : {None, True, False}, optional\n If True, then write the matlab file header before writing the\n variables. If None (the default) then write the file header\n if we are at position 0 in the stream. By setting False\n here, and setting the stream position to the end of the file,\n you can append variables to a matlab file\n '''\n # write header if requested, or None and start of file\n if write_header is None:\n write_header = self.file_stream.tell() == 0\n if write_header:\n self.write_file_header()\n self._matrix_writer = VarWriter5(self)\n for name, var in mdict.items():\n if name[0] == '_':\n continue\n is_global = name in self.global_vars\n if self.do_compression:\n stream = BytesIO()\n self._matrix_writer.file_stream = stream\n self._matrix_writer.write_top(var, asbytes(name), is_global)\n out_str = zlib.compress(stream.getvalue())\n tag = np.empty((), NDT_TAG_FULL)\n tag['mdtype'] = miCOMPRESSED\n tag['byte_count'] = len(out_str)\n self.file_stream.write(tag.tobytes())\n self.file_stream.write(out_str)\n else: # not compressing\n self._matrix_writer.write_top(var, asbytes(name), is_global)\n", "# Created by Pearu Peterson, September 2002\n\n__usage__ = \"\"\"\nBuild fftpack:\n python setup_fftpack.py build\nRun tests if scipy is installed:\n python -c 'import scipy;scipy.fftpack.test(<level>)'\nRun tests if fftpack is not installed:\n python tests/test_pseudo_diffs.py [<level>]\n\"\"\"\n\nfrom numpy.testing import (assert_equal, assert_almost_equal,\n assert_array_almost_equal)\nfrom scipy.fftpack import (diff, fft, ifft, tilbert, itilbert, hilbert,\n ihilbert, shift, fftfreq, cs_diff, sc_diff,\n ss_diff, cc_diff)\n\nimport numpy as np\nfrom numpy import arange, sin, cos, pi, exp, tanh, sum, sign\nfrom numpy.random import random\n\n\ndef direct_diff(x,k=1,period=None):\n fx = fft(x)\n n = len(fx)\n if period is None:\n period = 2*pi\n w = fftfreq(n)*2j*pi/period*n\n if k < 0:\n w = 1 / w**k\n w[0] = 0.0\n else:\n w = w**k\n if n > 2000:\n w[250:n-250] = 0.0\n return ifft(w*fx).real\n\n\ndef direct_tilbert(x,h=1,period=None):\n fx = fft(x)\n n = len(fx)\n if period is None:\n period = 2*pi\n w = fftfreq(n)*h*2*pi/period*n\n w[0] = 1\n w = 1j/tanh(w)\n w[0] = 0j\n return ifft(w*fx)\n\n\ndef direct_itilbert(x,h=1,period=None):\n fx = fft(x)\n n = len(fx)\n if period is None:\n period = 2*pi\n w = fftfreq(n)*h*2*pi/period*n\n w = -1j*tanh(w)\n return ifft(w*fx)\n\n\ndef direct_hilbert(x):\n fx = fft(x)\n n = len(fx)\n w = fftfreq(n)*n\n w = 1j*sign(w)\n return ifft(w*fx)\n\n\ndef direct_ihilbert(x):\n return -direct_hilbert(x)\n\n\ndef direct_shift(x,a,period=None):\n n = len(x)\n if period is None:\n k = fftfreq(n)*1j*n\n else:\n k = fftfreq(n)*2j*pi/period*n\n return ifft(fft(x)*exp(k*a)).real\n\n\nclass TestDiff(object):\n\n def test_definition(self):\n for n in [16,17,64,127,32]:\n x = arange(n)*2*pi/n\n assert_array_almost_equal(diff(sin(x)),direct_diff(sin(x)))\n assert_array_almost_equal(diff(sin(x),2),direct_diff(sin(x),2))\n assert_array_almost_equal(diff(sin(x),3),direct_diff(sin(x),3))\n assert_array_almost_equal(diff(sin(x),4),direct_diff(sin(x),4))\n assert_array_almost_equal(diff(sin(x),5),direct_diff(sin(x),5))\n assert_array_almost_equal(diff(sin(2*x),3),direct_diff(sin(2*x),3))\n assert_array_almost_equal(diff(sin(2*x),4),direct_diff(sin(2*x),4))\n assert_array_almost_equal(diff(cos(x)),direct_diff(cos(x)))\n assert_array_almost_equal(diff(cos(x),2),direct_diff(cos(x),2))\n assert_array_almost_equal(diff(cos(x),3),direct_diff(cos(x),3))\n assert_array_almost_equal(diff(cos(x),4),direct_diff(cos(x),4))\n assert_array_almost_equal(diff(cos(2*x)),direct_diff(cos(2*x)))\n assert_array_almost_equal(diff(sin(x*n/8)),direct_diff(sin(x*n/8)))\n assert_array_almost_equal(diff(cos(x*n/8)),direct_diff(cos(x*n/8)))\n for k in range(5):\n assert_array_almost_equal(diff(sin(4*x),k),direct_diff(sin(4*x),k))\n assert_array_almost_equal(diff(cos(4*x),k),direct_diff(cos(4*x),k))\n\n def test_period(self):\n for n in [17,64]:\n x = arange(n)/float(n)\n assert_array_almost_equal(diff(sin(2*pi*x),period=1),\n 2*pi*cos(2*pi*x))\n assert_array_almost_equal(diff(sin(2*pi*x),3,period=1),\n -(2*pi)**3*cos(2*pi*x))\n\n def test_sin(self):\n for n in [32,64,77]:\n x = arange(n)*2*pi/n\n assert_array_almost_equal(diff(sin(x)),cos(x))\n assert_array_almost_equal(diff(cos(x)),-sin(x))\n assert_array_almost_equal(diff(sin(x),2),-sin(x))\n assert_array_almost_equal(diff(sin(x),4),sin(x))\n assert_array_almost_equal(diff(sin(4*x)),4*cos(4*x))\n assert_array_almost_equal(diff(sin(sin(x))),cos(x)*cos(sin(x)))\n\n def test_expr(self):\n for n in [64,77,100,128,256,512,1024,2048,4096,8192][:5]:\n x = arange(n)*2*pi/n\n f = sin(x)*cos(4*x)+exp(sin(3*x))\n df = cos(x)*cos(4*x)-4*sin(x)*sin(4*x)+3*cos(3*x)*exp(sin(3*x))\n ddf = -17*sin(x)*cos(4*x)-8*cos(x)*sin(4*x)\\\n - 9*sin(3*x)*exp(sin(3*x))+9*cos(3*x)**2*exp(sin(3*x))\n d1 = diff(f)\n assert_array_almost_equal(d1,df)\n assert_array_almost_equal(diff(df),ddf)\n assert_array_almost_equal(diff(f,2),ddf)\n assert_array_almost_equal(diff(ddf,-1),df)\n\n def test_expr_large(self):\n for n in [2048,4096]:\n x = arange(n)*2*pi/n\n f = sin(x)*cos(4*x)+exp(sin(3*x))\n df = cos(x)*cos(4*x)-4*sin(x)*sin(4*x)+3*cos(3*x)*exp(sin(3*x))\n ddf = -17*sin(x)*cos(4*x)-8*cos(x)*sin(4*x)\\\n - 9*sin(3*x)*exp(sin(3*x))+9*cos(3*x)**2*exp(sin(3*x))\n assert_array_almost_equal(diff(f),df)\n assert_array_almost_equal(diff(df),ddf)\n assert_array_almost_equal(diff(ddf,-1),df)\n assert_array_almost_equal(diff(f,2),ddf)\n\n def test_int(self):\n n = 64\n x = arange(n)*2*pi/n\n assert_array_almost_equal(diff(sin(x),-1),-cos(x))\n assert_array_almost_equal(diff(sin(x),-2),-sin(x))\n assert_array_almost_equal(diff(sin(x),-4),sin(x))\n assert_array_almost_equal(diff(2*cos(2*x),-1),sin(2*x))\n\n def test_random_even(self):\n for k in [0,2,4,6]:\n for n in [60,32,64,56,55]:\n f = random((n,))\n af = sum(f,axis=0)/n\n f = f-af\n # zeroing Nyquist mode:\n f = diff(diff(f,1),-1)\n assert_almost_equal(sum(f,axis=0),0.0)\n assert_array_almost_equal(diff(diff(f,k),-k),f)\n assert_array_almost_equal(diff(diff(f,-k),k),f)\n\n def test_random_odd(self):\n for k in [0,1,2,3,4,5,6]:\n for n in [33,65,55]:\n f = random((n,))\n af = sum(f,axis=0)/n\n f = f-af\n assert_almost_equal(sum(f,axis=0),0.0)\n assert_array_almost_equal(diff(diff(f,k),-k),f)\n assert_array_almost_equal(diff(diff(f,-k),k),f)\n\n def test_zero_nyquist(self):\n for k in [0,1,2,3,4,5,6]:\n for n in [32,33,64,56,55]:\n f = random((n,))\n af = sum(f,axis=0)/n\n f = f-af\n # zeroing Nyquist mode:\n f = diff(diff(f,1),-1)\n assert_almost_equal(sum(f,axis=0),0.0)\n assert_array_almost_equal(diff(diff(f,k),-k),f)\n assert_array_almost_equal(diff(diff(f,-k),k),f)\n\n\nclass TestTilbert(object):\n\n def test_definition(self):\n for h in [0.1,0.5,1,5.5,10]:\n for n in [16,17,64,127]:\n x = arange(n)*2*pi/n\n y = tilbert(sin(x),h)\n y1 = direct_tilbert(sin(x),h)\n assert_array_almost_equal(y,y1)\n assert_array_almost_equal(tilbert(sin(x),h),\n direct_tilbert(sin(x),h))\n assert_array_almost_equal(tilbert(sin(2*x),h),\n direct_tilbert(sin(2*x),h))\n\n def test_random_even(self):\n for h in [0.1,0.5,1,5.5,10]:\n for n in [32,64,56]:\n f = random((n,))\n af = sum(f,axis=0)/n\n f = f-af\n assert_almost_equal(sum(f,axis=0),0.0)\n assert_array_almost_equal(direct_tilbert(direct_itilbert(f,h),h),f)\n\n def test_random_odd(self):\n for h in [0.1,0.5,1,5.5,10]:\n for n in [33,65,55]:\n f = random((n,))\n af = sum(f,axis=0)/n\n f = f-af\n assert_almost_equal(sum(f,axis=0),0.0)\n assert_array_almost_equal(itilbert(tilbert(f,h),h),f)\n assert_array_almost_equal(tilbert(itilbert(f,h),h),f)\n\n\nclass TestITilbert(object):\n\n def test_definition(self):\n for h in [0.1,0.5,1,5.5,10]:\n for n in [16,17,64,127]:\n x = arange(n)*2*pi/n\n y = itilbert(sin(x),h)\n y1 = direct_itilbert(sin(x),h)\n assert_array_almost_equal(y,y1)\n assert_array_almost_equal(itilbert(sin(x),h),\n direct_itilbert(sin(x),h))\n assert_array_almost_equal(itilbert(sin(2*x),h),\n direct_itilbert(sin(2*x),h))\n\n\nclass TestHilbert(object):\n\n def test_definition(self):\n for n in [16,17,64,127]:\n x = arange(n)*2*pi/n\n y = hilbert(sin(x))\n y1 = direct_hilbert(sin(x))\n assert_array_almost_equal(y,y1)\n assert_array_almost_equal(hilbert(sin(2*x)),\n direct_hilbert(sin(2*x)))\n\n def test_tilbert_relation(self):\n for n in [16,17,64,127]:\n x = arange(n)*2*pi/n\n f = sin(x)+cos(2*x)*sin(x)\n y = hilbert(f)\n y1 = direct_hilbert(f)\n assert_array_almost_equal(y,y1)\n y2 = tilbert(f,h=10)\n assert_array_almost_equal(y,y2)\n\n def test_random_odd(self):\n for n in [33,65,55]:\n f = random((n,))\n af = sum(f,axis=0)/n\n f = f-af\n assert_almost_equal(sum(f,axis=0),0.0)\n assert_array_almost_equal(ihilbert(hilbert(f)),f)\n assert_array_almost_equal(hilbert(ihilbert(f)),f)\n\n def test_random_even(self):\n for n in [32,64,56]:\n f = random((n,))\n af = sum(f,axis=0)/n\n f = f-af\n # zeroing Nyquist mode:\n f = diff(diff(f,1),-1)\n assert_almost_equal(sum(f,axis=0),0.0)\n assert_array_almost_equal(direct_hilbert(direct_ihilbert(f)),f)\n assert_array_almost_equal(hilbert(ihilbert(f)),f)\n\n\nclass TestIHilbert(object):\n\n def test_definition(self):\n for n in [16,17,64,127]:\n x = arange(n)*2*pi/n\n y = ihilbert(sin(x))\n y1 = direct_ihilbert(sin(x))\n assert_array_almost_equal(y,y1)\n assert_array_almost_equal(ihilbert(sin(2*x)),\n direct_ihilbert(sin(2*x)))\n\n def test_itilbert_relation(self):\n for n in [16,17,64,127]:\n x = arange(n)*2*pi/n\n f = sin(x)+cos(2*x)*sin(x)\n y = ihilbert(f)\n y1 = direct_ihilbert(f)\n assert_array_almost_equal(y,y1)\n y2 = itilbert(f,h=10)\n assert_array_almost_equal(y,y2)\n\n\nclass TestShift(object):\n\n def test_definition(self):\n for n in [18,17,64,127,32,2048,256]:\n x = arange(n)*2*pi/n\n for a in [0.1,3]:\n assert_array_almost_equal(shift(sin(x),a),direct_shift(sin(x),a))\n assert_array_almost_equal(shift(sin(x),a),sin(x+a))\n assert_array_almost_equal(shift(cos(x),a),cos(x+a))\n assert_array_almost_equal(shift(cos(2*x)+sin(x),a),\n cos(2*(x+a))+sin(x+a))\n assert_array_almost_equal(shift(exp(sin(x)),a),exp(sin(x+a)))\n assert_array_almost_equal(shift(sin(x),2*pi),sin(x))\n assert_array_almost_equal(shift(sin(x),pi),-sin(x))\n assert_array_almost_equal(shift(sin(x),pi/2),cos(x))\n\n\nclass TestOverwrite(object):\n \"\"\"Check input overwrite behavior \"\"\"\n\n real_dtypes = (np.float32, np.float64)\n dtypes = real_dtypes + (np.complex64, np.complex128)\n\n def _check(self, x, routine, *args, **kwargs):\n x2 = x.copy()\n routine(x2, *args, **kwargs)\n sig = routine.__name__\n if args:\n sig += repr(args)\n if kwargs:\n sig += repr(kwargs)\n assert_equal(x2, x, err_msg=f\"spurious overwrite in {sig}\")\n\n def _check_1d(self, routine, dtype, shape, *args, **kwargs):\n np.random.seed(1234)\n if np.issubdtype(dtype, np.complexfloating):\n data = np.random.randn(*shape) + 1j*np.random.randn(*shape)\n else:\n data = np.random.randn(*shape)\n data = data.astype(dtype)\n self._check(data, routine, *args, **kwargs)\n\n def test_diff(self):\n for dtype in self.dtypes:\n self._check_1d(diff, dtype, (16,))\n\n def test_tilbert(self):\n for dtype in self.dtypes:\n self._check_1d(tilbert, dtype, (16,), 1.6)\n\n def test_itilbert(self):\n for dtype in self.dtypes:\n self._check_1d(itilbert, dtype, (16,), 1.6)\n\n def test_hilbert(self):\n for dtype in self.dtypes:\n self._check_1d(hilbert, dtype, (16,))\n\n def test_cs_diff(self):\n for dtype in self.dtypes:\n self._check_1d(cs_diff, dtype, (16,), 1.0, 4.0)\n\n def test_sc_diff(self):\n for dtype in self.dtypes:\n self._check_1d(sc_diff, dtype, (16,), 1.0, 4.0)\n\n def test_ss_diff(self):\n for dtype in self.dtypes:\n self._check_1d(ss_diff, dtype, (16,), 1.0, 4.0)\n\n def test_cc_diff(self):\n for dtype in self.dtypes:\n self._check_1d(cc_diff, dtype, (16,), 1.0, 4.0)\n\n def test_shift(self):\n for dtype in self.dtypes:\n self._check_1d(shift, dtype, (16,), 1.0)\n", "import numpy as np\nimport copy\n\n\nclass Complex:\n def __init__(self, dim, func, func_args=(), symmetry=False, bounds=None,\n g_cons=None, g_args=()):\n self.dim = dim\n self.bounds = bounds\n self.symmetry = symmetry # TODO: Define the functions to be used\n # here in init to avoid if checks\n self.gen = 0\n self.perm_cycle = 0\n\n # Every cell is stored in a list of its generation,\n # e.g., the initial cell is stored in self.H[0]\n # 1st get new cells are stored in self.H[1] etc.\n # When a cell is subgenerated it is removed from this list\n\n self.H = [] # Storage structure of cells\n # Cache of all vertices\n self.V = VertexCache(func, func_args, bounds, g_cons, g_args)\n\n # Generate n-cube here:\n self.n_cube(dim, symmetry=symmetry)\n\n # TODO: Assign functions to a the complex instead\n if symmetry:\n self.generation_cycle = 1\n # self.centroid = self.C0()[-1].x\n # self.C0.centroid = self.centroid\n else:\n self.add_centroid()\n\n self.H.append([])\n self.H[0].append(self.C0)\n self.hgr = self.C0.homology_group_rank()\n self.hgrd = 0 # Complex group rank differential\n # self.hgr = self.C0.hg_n\n\n # Build initial graph\n self.graph_map()\n\n self.performance = []\n self.performance.append(0)\n self.performance.append(0)\n\n def __call__(self):\n return self.H\n\n def n_cube(self, dim, symmetry=False, printout=False):\n \"\"\"\n Generate the simplicial triangulation of the N-D hypercube\n containing 2**n vertices\n \"\"\"\n origin = list(np.zeros(dim, dtype=int))\n self.origin = origin\n supremum = list(np.ones(dim, dtype=int))\n self.supremum = supremum\n\n # tuple versions for indexing\n origintuple = tuple(origin)\n supremumtuple = tuple(supremum)\n\n x_parents = [origintuple]\n\n if symmetry:\n self.C0 = Simplex(0, 0, 0, self.dim) # Initial cell object\n self.C0.add_vertex(self.V[origintuple])\n\n i_s = 0\n self.perm_symmetry(i_s, x_parents, origin)\n self.C0.add_vertex(self.V[supremumtuple])\n else:\n self.C0 = Cell(0, 0, origin, supremum) # Initial cell object\n self.C0.add_vertex(self.V[origintuple])\n self.C0.add_vertex(self.V[supremumtuple])\n\n i_parents = []\n self.perm(i_parents, x_parents, origin)\n\n if printout:\n print(\"Initial hyper cube:\")\n for v in self.C0():\n v.print_out()\n\n def perm(self, i_parents, x_parents, xi):\n # TODO: Cut out of for if outside linear constraint cutting planes\n xi_t = tuple(xi)\n\n # Construct required iterator\n iter_range = [x for x in range(self.dim) if x not in i_parents]\n\n for i in iter_range:\n i2_parents = copy.copy(i_parents)\n i2_parents.append(i)\n xi2 = copy.copy(xi)\n xi2[i] = 1\n # Make new vertex list a hashable tuple\n xi2_t = tuple(xi2)\n # Append to cell\n self.C0.add_vertex(self.V[xi2_t])\n # Connect neighbors and vice versa\n # Parent point\n self.V[xi2_t].connect(self.V[xi_t])\n\n # Connect all family of simplices in parent containers\n for x_ip in x_parents:\n self.V[xi2_t].connect(self.V[x_ip])\n\n x_parents2 = copy.copy(x_parents)\n x_parents2.append(xi_t)\n\n # Permutate\n self.perm(i2_parents, x_parents2, xi2)\n\n def perm_symmetry(self, i_s, x_parents, xi):\n # TODO: Cut out of for if outside linear constraint cutting planes\n xi_t = tuple(xi)\n xi2 = copy.copy(xi)\n xi2[i_s] = 1\n # Make new vertex list a hashable tuple\n xi2_t = tuple(xi2)\n # Append to cell\n self.C0.add_vertex(self.V[xi2_t])\n # Connect neighbors and vice versa\n # Parent point\n self.V[xi2_t].connect(self.V[xi_t])\n\n # Connect all family of simplices in parent containers\n for x_ip in x_parents:\n self.V[xi2_t].connect(self.V[x_ip])\n\n x_parents2 = copy.copy(x_parents)\n x_parents2.append(xi_t)\n\n i_s += 1\n if i_s == self.dim:\n return\n # Permutate\n self.perm_symmetry(i_s, x_parents2, xi2)\n\n def add_centroid(self):\n \"\"\"Split the central edge between the origin and supremum of\n a cell and add the new vertex to the complex\"\"\"\n self.centroid = list(\n (np.array(self.origin) + np.array(self.supremum)) / 2.0)\n self.C0.add_vertex(self.V[tuple(self.centroid)])\n self.C0.centroid = self.centroid\n\n # Disconnect origin and supremum\n self.V[tuple(self.origin)].disconnect(self.V[tuple(self.supremum)])\n\n # Connect centroid to all other vertices\n for v in self.C0():\n self.V[tuple(self.centroid)].connect(self.V[tuple(v.x)])\n\n self.centroid_added = True\n return\n\n # Construct incidence array:\n def incidence(self):\n if self.centroid_added:\n self.structure = np.zeros([2 ** self.dim + 1, 2 ** self.dim + 1],\n dtype=int)\n else:\n self.structure = np.zeros([2 ** self.dim, 2 ** self.dim],\n dtype=int)\n\n for v in self.HC.C0():\n for v2 in v.nn:\n self.structure[v.index, v2.index] = 1\n\n return\n\n # A more sparse incidence generator:\n def graph_map(self):\n \"\"\" Make a list of size 2**n + 1 where an entry is a vertex\n incidence, each list element contains a list of indexes\n corresponding to that entries neighbors\"\"\"\n\n self.graph = [[v2.index for v2 in v.nn] for v in self.C0()]\n\n # Graph structure method:\n # 0. Capture the indices of the initial cell.\n # 1. Generate new origin and supremum scalars based on current generation\n # 2. Generate a new set of vertices corresponding to a new\n # \"origin\" and \"supremum\"\n # 3. Connected based on the indices of the previous graph structure\n # 4. Disconnect the edges in the original cell\n\n def sub_generate_cell(self, C_i, gen):\n \"\"\"Subgenerate a cell `C_i` of generation `gen` and\n homology group rank `hgr`.\"\"\"\n origin_new = tuple(C_i.centroid)\n centroid_index = len(C_i()) - 1\n\n # If not gen append\n try:\n self.H[gen]\n except IndexError:\n self.H.append([])\n\n # Generate subcubes using every extreme vertex in C_i as a supremum\n # and the centroid of C_i as the origin\n H_new = [] # list storing all the new cubes split from C_i\n for i, v in enumerate(C_i()[:-1]):\n supremum = tuple(v.x)\n H_new.append(\n self.construct_hypercube(origin_new, supremum, gen, C_i.hg_n))\n\n for i, connections in enumerate(self.graph):\n # Present vertex V_new[i]; connect to all connections:\n if i == centroid_index: # Break out of centroid\n break\n\n for j in connections:\n C_i()[i].disconnect(C_i()[j])\n\n # Destroy the old cell\n if C_i is not self.C0: # Garbage collector does this anyway; not needed\n del C_i\n\n # TODO: Recalculate all the homology group ranks of each cell\n return H_new\n\n def split_generation(self):\n \"\"\"\n Run sub_generate_cell for every cell in the current complex self.gen\n \"\"\"\n no_splits = False # USED IN SHGO\n try:\n for c in self.H[self.gen]:\n if self.symmetry:\n # self.sub_generate_cell_symmetry(c, self.gen + 1)\n self.split_simplex_symmetry(c, self.gen + 1)\n else:\n self.sub_generate_cell(c, self.gen + 1)\n except IndexError:\n no_splits = True # USED IN SHGO\n\n self.gen += 1\n return no_splits # USED IN SHGO\n\n def construct_hypercube(self, origin, supremum, gen, hgr,\n printout=False):\n \"\"\"\n Build a hypercube with triangulations symmetric to C0.\n\n Parameters\n ----------\n origin : vec\n supremum : vec (tuple)\n gen : generation\n hgr : parent homology group rank\n \"\"\"\n # Initiate new cell\n v_o = np.array(origin)\n v_s = np.array(supremum)\n\n C_new = Cell(gen, hgr, origin, supremum)\n C_new.centroid = tuple((v_o + v_s) * .5)\n\n # Build new indexed vertex list\n V_new = []\n\n for i, v in enumerate(self.C0()[:-1]):\n v_x = np.array(v.x)\n sub_cell_t1 = v_o - v_o * v_x\n sub_cell_t2 = v_s * v_x\n\n vec = sub_cell_t1 + sub_cell_t2\n\n vec = tuple(vec)\n C_new.add_vertex(self.V[vec])\n V_new.append(vec)\n\n # Add new centroid\n C_new.add_vertex(self.V[C_new.centroid])\n V_new.append(C_new.centroid)\n\n # Connect new vertices #TODO: Thread into other loop; no need for V_new\n for i, connections in enumerate(self.graph):\n # Present vertex V_new[i]; connect to all connections:\n for j in connections:\n self.V[V_new[i]].connect(self.V[V_new[j]])\n\n if printout:\n print(\"A sub hyper cube with:\")\n print(f\"origin: {origin}\")\n print(f\"supremum: {supremum}\")\n for v in C_new():\n v.print_out()\n\n # Append the new cell to the to complex\n self.H[gen].append(C_new)\n\n return C_new\n\n def split_simplex_symmetry(self, S, gen):\n \"\"\"\n Split a hypersimplex S into two sub simplices by building a hyperplane\n which connects to a new vertex on an edge (the longest edge in\n dim = {2, 3}) and every other vertex in the simplex that is not\n connected to the edge being split.\n\n This function utilizes the knowledge that the problem is specified\n with symmetric constraints\n\n The longest edge is tracked by an ordering of the\n vertices in every simplices, the edge between first and second\n vertex is the longest edge to be split in the next iteration.\n \"\"\"\n # If not gen append\n try:\n self.H[gen]\n except IndexError:\n self.H.append([])\n\n # Find new vertex.\n # V_new_x = tuple((np.array(C()[0].x) + np.array(C()[1].x)) / 2.0)\n s = S()\n firstx = s[0].x\n lastx = s[-1].x\n V_new = self.V[tuple((np.array(firstx) + np.array(lastx)) / 2.0)]\n\n # Disconnect old longest edge\n self.V[firstx].disconnect(self.V[lastx])\n\n # Connect new vertices to all other vertices\n for v in s[:]:\n v.connect(self.V[V_new.x])\n\n # New \"lower\" simplex\n S_new_l = Simplex(gen, S.hg_n, self.generation_cycle,\n self.dim)\n S_new_l.add_vertex(s[0])\n S_new_l.add_vertex(V_new) # Add new vertex\n for v in s[1:-1]: # Add all other vertices\n S_new_l.add_vertex(v)\n\n # New \"upper\" simplex\n S_new_u = Simplex(gen, S.hg_n, S.generation_cycle, self.dim)\n\n # First vertex on new long edge\n S_new_u.add_vertex(s[S_new_u.generation_cycle + 1])\n\n for v in s[1:-1]: # Remaining vertices\n S_new_u.add_vertex(v)\n\n for k, v in enumerate(s[1:-1]): # iterate through inner vertices\n if k == S.generation_cycle:\n S_new_u.add_vertex(V_new)\n else:\n S_new_u.add_vertex(v)\n\n S_new_u.add_vertex(s[-1]) # Second vertex on new long edge\n\n self.H[gen].append(S_new_l)\n self.H[gen].append(S_new_u)\n\n return\n\n # Plots\n def plot_complex(self):\n \"\"\"\n Here, C is the LIST of simplexes S in the\n 2- or 3-D complex\n\n To plot a single simplex S in a set C, use e.g., [C[0]]\n \"\"\"\n from matplotlib import pyplot # type: ignore[import]\n if self.dim == 2:\n pyplot.figure()\n for C in self.H:\n for c in C:\n for v in c():\n if self.bounds is None:\n x_a = np.array(v.x, dtype=float)\n else:\n x_a = np.array(v.x, dtype=float)\n for i in range(len(self.bounds)):\n x_a[i] = (x_a[i] * (self.bounds[i][1]\n - self.bounds[i][0])\n + self.bounds[i][0])\n\n # logging.info('v.x_a = {}'.format(x_a))\n\n pyplot.plot([x_a[0]], [x_a[1]], 'o')\n\n xlines = []\n ylines = []\n for vn in v.nn:\n if self.bounds is None:\n xn_a = np.array(vn.x, dtype=float)\n else:\n xn_a = np.array(vn.x, dtype=float)\n for i in range(len(self.bounds)):\n xn_a[i] = (xn_a[i] * (self.bounds[i][1]\n - self.bounds[i][0])\n + self.bounds[i][0])\n\n # logging.info('vn.x = {}'.format(vn.x))\n\n xlines.append(xn_a[0])\n ylines.append(xn_a[1])\n xlines.append(x_a[0])\n ylines.append(x_a[1])\n\n pyplot.plot(xlines, ylines)\n\n if self.bounds is None:\n pyplot.ylim([-1e-2, 1 + 1e-2])\n pyplot.xlim([-1e-2, 1 + 1e-2])\n else:\n pyplot.ylim(\n [self.bounds[1][0] - 1e-2, self.bounds[1][1] + 1e-2])\n pyplot.xlim(\n [self.bounds[0][0] - 1e-2, self.bounds[0][1] + 1e-2])\n\n pyplot.show()\n\n elif self.dim == 3:\n fig = pyplot.figure()\n ax = fig.add_subplot(111, projection='3d')\n\n for C in self.H:\n for c in C:\n for v in c():\n x = []\n y = []\n z = []\n # logging.info('v.x = {}'.format(v.x))\n x.append(v.x[0])\n y.append(v.x[1])\n z.append(v.x[2])\n for vn in v.nn:\n x.append(vn.x[0])\n y.append(vn.x[1])\n z.append(vn.x[2])\n x.append(v.x[0])\n y.append(v.x[1])\n z.append(v.x[2])\n # logging.info('vn.x = {}'.format(vn.x))\n\n ax.plot(x, y, z, label='simplex')\n\n pyplot.show()\n else:\n print(\"dimension higher than 3 or wrong complex format\")\n return\n\n\nclass VertexGroup(object):\n def __init__(self, p_gen, p_hgr):\n self.p_gen = p_gen # parent generation\n self.p_hgr = p_hgr # parent homology group rank\n self.hg_n = None\n self.hg_d = None\n\n # Maybe add parent homology group rank total history\n # This is the sum off all previously split cells\n # cumulatively throughout its entire history\n self.C = []\n\n def __call__(self):\n return self.C\n\n def add_vertex(self, V):\n if V not in self.C:\n self.C.append(V)\n\n def homology_group_rank(self):\n \"\"\"\n Returns the homology group order of the current cell\n \"\"\"\n if self.hg_n is None:\n self.hg_n = sum(1 for v in self.C if v.minimiser())\n\n return self.hg_n\n\n def homology_group_differential(self):\n \"\"\"\n Returns the difference between the current homology group of the\n cell and its parent group\n \"\"\"\n if self.hg_d is None:\n self.hgd = self.hg_n - self.p_hgr\n\n return self.hgd\n\n def polytopial_sperner_lemma(self):\n \"\"\"\n Returns the number of stationary points theoretically contained in the\n cell based information currently known about the cell\n \"\"\"\n pass\n\n def print_out(self):\n \"\"\"\n Print the current cell to console\n \"\"\"\n for v in self():\n v.print_out()\n\n\nclass Cell(VertexGroup):\n \"\"\"\n Contains a cell that is symmetric to the initial hypercube triangulation\n \"\"\"\n\n def __init__(self, p_gen, p_hgr, origin, supremum):\n super(Cell, self).__init__(p_gen, p_hgr)\n\n self.origin = origin\n self.supremum = supremum\n self.centroid = None # (Not always used)\n # TODO: self.bounds\n\n\nclass Simplex(VertexGroup):\n \"\"\"\n Contains a simplex that is symmetric to the initial symmetry constrained\n hypersimplex triangulation\n \"\"\"\n\n def __init__(self, p_gen, p_hgr, generation_cycle, dim):\n super(Simplex, self).__init__(p_gen, p_hgr)\n\n self.generation_cycle = (generation_cycle + 1) % (dim - 1)\n\n\nclass Vertex:\n def __init__(self, x, bounds=None, func=None, func_args=(), g_cons=None,\n g_cons_args=(), nn=None, index=None):\n self.x = x\n self.order = sum(x)\n x_a = np.array(x, dtype=float)\n if bounds is not None:\n for i, (lb, ub) in enumerate(bounds):\n x_a[i] = x_a[i] * (ub - lb) + lb\n\n # TODO: Make saving the array structure optional\n self.x_a = x_a\n\n # Note Vertex is only initiated once for all x so only\n # evaluated once\n if func is not None:\n self.feasible = True\n if g_cons is not None:\n for g, args in zip(g_cons, g_cons_args):\n if g(self.x_a, *args) < 0.0:\n self.f = np.inf\n self.feasible = False\n break\n if self.feasible:\n self.f = func(x_a, *func_args)\n\n if nn is not None:\n self.nn = nn\n else:\n self.nn = set()\n\n self.fval = None\n self.check_min = True\n\n # Index:\n if index is not None:\n self.index = index\n\n def __hash__(self):\n return hash(self.x)\n\n def connect(self, v):\n if v is not self and v not in self.nn:\n self.nn.add(v)\n v.nn.add(self)\n\n if self.minimiser():\n v._min = False\n v.check_min = False\n\n # TEMPORARY\n self.check_min = True\n v.check_min = True\n\n def disconnect(self, v):\n if v in self.nn:\n self.nn.remove(v)\n v.nn.remove(self)\n self.check_min = True\n v.check_min = True\n\n def minimiser(self):\n \"\"\"Check whether this vertex is strictly less than all its neighbors\"\"\"\n if self.check_min:\n self._min = all(self.f < v.f for v in self.nn)\n self.check_min = False\n\n return self._min\n\n def print_out(self):\n print(f\"Vertex: {self.x}\")\n constr = 'Connections: '\n for vc in self.nn:\n constr += f'{vc.x} '\n\n print(constr)\n print(f'Order = {self.order}')\n\n\nclass VertexCache:\n def __init__(self, func, func_args=(), bounds=None, g_cons=None,\n g_cons_args=(), indexed=True):\n\n self.cache = {}\n self.func = func\n self.g_cons = g_cons\n self.g_cons_args = g_cons_args\n self.func_args = func_args\n self.bounds = bounds\n self.nfev = 0\n self.size = 0\n\n if indexed:\n self.index = -1\n\n def __getitem__(self, x, indexed=True):\n try:\n return self.cache[x]\n except KeyError:\n if indexed:\n self.index += 1\n xval = Vertex(x, bounds=self.bounds,\n func=self.func, func_args=self.func_args,\n g_cons=self.g_cons,\n g_cons_args=self.g_cons_args,\n index=self.index)\n else:\n xval = Vertex(x, bounds=self.bounds,\n func=self.func, func_args=self.func_args,\n g_cons=self.g_cons,\n g_cons_args=self.g_cons_args)\n\n # logging.info(\"New generated vertex at x = {}\".format(x))\n # NOTE: Surprisingly high performance increase if logging is commented out\n self.cache[x] = xval\n\n # TODO: Check\n if self.func is not None:\n if self.g_cons is not None:\n if xval.feasible:\n self.nfev += 1\n self.size += 1\n else:\n self.size += 1\n else:\n self.nfev += 1\n self.size += 1\n\n return self.cache[x]\n" ]
[ [ "numpy.asarray", "numpy.compat.asbytes", "numpy.compat.asstr", "numpy.all", "numpy.max", "numpy.asanyarray", "numpy.atleast_2d", "numpy.uint16", "numpy.prod", "numpy.array", "numpy.zeros", "numpy.empty" ], [ "numpy.issubdtype", "scipy.fftpack.fft", "numpy.random.randn", "scipy.fftpack.fftfreq", "numpy.exp", "scipy.fftpack.ihilbert", "numpy.testing.assert_equal", "scipy.fftpack.diff", "numpy.arange", "numpy.sin", "numpy.testing.assert_array_almost_equal", "scipy.fftpack.ifft", "scipy.fftpack.tilbert", "numpy.tanh", "numpy.sum", "scipy.fftpack.hilbert", "numpy.random.random", "numpy.random.seed", "scipy.fftpack.itilbert", "numpy.cos", "numpy.sign" ], [ "matplotlib.pyplot.ylim", "numpy.ones", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlim", "numpy.array", "numpy.zeros", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
FJFranklin/BeesEtAl
[ "3fd21d044e77b4a1df56ac2f405e2084bebd54e1" ]
[ "BeesEtAl/Gholami.py" ]
[ "# *** References ***\n\n# Gholami & Mohammadi, A Novel Combination of Bees and Firefly Algorithm to Optimize Continuous Problems\n\n# Türker Tuncer, LDW-SCSA: Logistic Dynamic Weight based Sine Cosine Search Algorithm for Numerical Functions Optimization \n# https://arxiv.org/ftp/arxiv/papers/1809/1809.03055.pdf\n\n# Hartmut Pohlheim, Examples of Objective Functions\n# http://www.geatbx.com/download/GEATbx_ObjFunExpl_v38.pdf\n\n# Wikipedia, Test functions for optimization\n# https://en.wikipedia.org/wiki/Test_functions_for_optimization\n\nimport numpy as np\n\nfrom .Base_Coster import Base_Coster\n\nclass F1(Base_Coster):\n \"\"\"\n Function F1 from Gholami & Mohammadi FA-BA Hybrid paper\n De Jong / Sphere (ND) cost function; optimum @ (0,...\n \"\"\"\n\n @staticmethod\n def extents(Ndim):\n return -5.12 * np.ones(Ndim), 5.12 * np.ones(Ndim)\n\n def __init__(self, base_optimiser):\n Base_Coster.__init__(self, base_optimiser)\n\n def map_to_solution_space(self, X):\n return X\n\n def evaluate_cost(self):\n self.cost = sum(np.power(self.XA, 2))\n\n def meso(self):\n None\n\nclass F2(Base_Coster):\n \"\"\"\n Function F2 from Gholami & Mohammadi FA-BA Hybrid paper\n Schwefel 2.22 (ND) cost function; optimum @ (0,...\n \"\"\"\n\n @staticmethod\n def extents(Ndim):\n return -10 * np.ones(Ndim), 10 * np.ones(Ndim)\n\n def __init__(self, base_optimiser):\n Base_Coster.__init__(self, base_optimiser)\n\n def map_to_solution_space(self, X):\n return X\n\n def evaluate_cost(self):\n self.cost = sum(np.abs(self.XA)) + np.prod(np.abs(self.XA))\n\n def meso(self):\n None\n\nclass F3(Base_Coster):\n \"\"\"\n Function F3 from Gholami & Mohammadi FA-BA Hybrid paper\n Schwefel 1.2 - Rotated hyper-ellipsoid (ND) cost function; optimum @ (0,...\n \"\"\"\n\n @staticmethod\n def extents(Ndim):\n return -65.536 * np.ones(Ndim), 65.536 * np.ones(Ndim)\n\n def __init__(self, base_optimiser):\n Base_Coster.__init__(self, base_optimiser)\n\n def map_to_solution_space(self, X):\n return X\n\n def evaluate_cost(self):\n self.cost = 0\n for i in range(0, len(self.XA)):\n self.cost = self.cost + (sum(self.XA[0:(i+1)]))**2\n\n def meso(self):\n None\n\nclass F4(Base_Coster):\n \"\"\"\n Function F4 from Gholami & Mohammadi FA-BA Hybrid paper\n Schwefel 2.21 (ND) cost function; optimum @ (0,...\n \"\"\"\n\n @staticmethod\n def extents(Ndim):\n return -100 * np.ones(Ndim), 100 * np.ones(Ndim)\n\n def __init__(self, base_optimiser):\n Base_Coster.__init__(self, base_optimiser)\n\n def map_to_solution_space(self, X):\n return X\n\n def evaluate_cost(self):\n self.cost = max(np.abs(self.XA))\n\n def meso(self):\n None\n\nclass F5(Base_Coster):\n \"\"\"\n Function F5 from Gholami & Mohammadi FA-BA Hybrid paper\n Rosenbrock (ND) cost function; optimum @ (0,...\n \"\"\"\n\n @staticmethod\n def extents(Ndim):\n return -2.048 * np.ones(Ndim), 2.048 * np.ones(Ndim)\n\n def __init__(self, base_optimiser):\n Base_Coster.__init__(self, base_optimiser)\n\n def map_to_solution_space(self, X):\n return X\n\n def evaluate_cost(self):\n self.cost = sum(100 * np.power(self.XA[1:len(self.XA)] - np.power(self.XA[0:(len(self.XA)-1)], 2), 2) + np.power(1 - self.XA[0:(len(self.XA)-1)], 2))\n\n def meso(self):\n None\n\nclass F6(Base_Coster):\n \"\"\"\n Function F6 from Gholami & Mohammadi FA-BA Hybrid paper\n Step (ND) cost function; optimum @ (-0.5,...\n \"\"\"\n\n @staticmethod\n def extents(Ndim):\n return -100 * np.ones(Ndim), 100 * np.ones(Ndim)\n\n def __init__(self, base_optimiser):\n Base_Coster.__init__(self, base_optimiser)\n\n def map_to_solution_space(self, X):\n return X\n\n def evaluate_cost(self):\n self.cost = sum(np.floor(np.power(self.XA + 0.5, 2)))\n\n def meso(self):\n None\n\nclass F7(Base_Coster):\n \"\"\"\n Function F7 from Gholami & Mohammadi FA-BA Hybrid paper\n Noise (ND) cost function; optimum @ (0,...\n \"\"\"\n\n @staticmethod\n def extents(Ndim):\n return -1.28 * np.ones(Ndim), 1.28 * np.ones(Ndim)\n\n def __init__(self, base_optimiser):\n Base_Coster.__init__(self, base_optimiser)\n\n def map_to_solution_space(self, X):\n return X\n\n def evaluate_cost(self):\n self.cost = sum(np.power(self.XA, 4) * np.asarray(range(1, 1 + len(self.XA)))) + np.random.rand(1)\n\n def meso(self):\n None\n\nclass F8(Base_Coster):\n \"\"\"\n Function F8 from Gholami & Mohammadi FA-BA Hybrid paper\n Schwefel (ND) cost function\n \"\"\"\n\n @staticmethod\n def extents(Ndim):\n return -500 * np.ones(Ndim), 500 * np.ones(Ndim)\n\n def __init__(self, base_optimiser):\n Base_Coster.__init__(self, base_optimiser)\n\n def map_to_solution_space(self, X):\n return X\n\n def evaluate_cost(self):\n self.cost = -sum(self.XA * np.sin(np.sqrt(abs(self.XA))))\n\n def meso(self):\n None\n\nclass F9(Base_Coster):\n \"\"\"\n Function F9 from Gholami & Mohammadi FA-BA Hybrid paper\n Rastrigin (ND) cost function; optimum @ (0,...\n \"\"\"\n\n @staticmethod\n def extents(Ndim):\n return -5.12 * np.ones(Ndim), 5.12 * np.ones(Ndim)\n\n def __init__(self, base_optimiser):\n Base_Coster.__init__(self, base_optimiser)\n\n def map_to_solution_space(self, X):\n return X\n\n def evaluate_cost(self):\n self.cost = sum(np.power(self.XA, 2) - 10 * np.cos(2 * np.pi * self.XA) + 10)\n\n def meso(self):\n None\n\nclass F10(Base_Coster):\n \"\"\"\n Function F10 from Gholami & Mohammadi FA-BA Hybrid paper\n Ackley (ND) cost function; optimum @ (0,...\n \"\"\"\n\n @staticmethod\n def extents(Ndim):\n return -32.768 * np.ones(Ndim), 32.768 * np.ones(Ndim)\n\n def __init__(self, base_optimiser):\n Base_Coster.__init__(self, base_optimiser)\n\n def map_to_solution_space(self, X):\n return X\n\n @staticmethod\n def rms(X):\n return np.sqrt(X.dot(X) / len(X))\n\n def evaluate_cost(self):\n self.cost = np.exp(1) + 20 * (1 - np.exp(-F10.rms(self.XA) / 5)) - np.exp(sum(np.cos(2 * np.pi * self.XA)) / len(self.XA))\n\n def meso(self):\n None\n\nclass F11(Base_Coster):\n \"\"\"\n Function F11 from Gholami & Mohammadi FA-BA Hybrid paper\n Griewangk (ND) cost function; optimum @ (0,...\n \"\"\"\n\n @staticmethod\n def extents(Ndim):\n return -600 * np.ones(Ndim), 600 * np.ones(Ndim)\n\n def __init__(self, base_optimiser):\n Base_Coster.__init__(self, base_optimiser)\n\n def map_to_solution_space(self, X):\n return X\n\n def evaluate_cost(self):\n self.cost = sum(np.power(self.XA, 2)) / 4000 - np.prod(np.cos(np.power(self.XA, 2) / np.power(range(1, 1+len(self.XA)), 0.5))) + 1\n\n def meso(self):\n None\n\nclass F12(Base_Coster):\n \"\"\"\n Function F12 from Gholami & Mohammadi FA-BA Hybrid paper\n Generalised Penalised 1 (ND) cost function; optimum @ (0,...\n \"\"\"\n\n @staticmethod\n def extents(Ndim):\n return -50 * np.ones(Ndim), 50 * np.ones(Ndim)\n\n def __init__(self, base_optimiser):\n Base_Coster.__init__(self, base_optimiser)\n\n def map_to_solution_space(self, X):\n return X\n\n @staticmethod\n def u(xi, a, k, m):\n if xi > a:\n v = k * (xi - a)**m\n elif xi < -a:\n v = k * (-xi - a)**m\n else:\n v = 0\n return v\n \n def evaluate_cost(self):\n y = 1 + (self.XA + 1) / 4\n\n c = 0\n for i in range(0, len(self.XA)):\n c = c + F12.u(self.XA[i], 10, 100, 4)\n\n self.cost = sum(np.power(y[0:(len(self.XA)-1)] - 1, 2) * (1 + 10 * np.power(np.sin(np.pi * y[1:len(self.XA)]), 2)))\n self.cost = (self.cost + 10 * np.sin(np.pi * y[0]) + (y[len(self.XA)-1] - 1)**2) * np.pi / len(self.XA) + c\n\n def meso(self):\n None\n\ndef Gholami_TestFunction_Extents(number, Ndim=30):\n minima = None\n maxima = None\n\n if number == 1:\n minima, maxima = F1.extents(Ndim)\n if number == 2:\n minima, maxima = F2.extents(Ndim)\n if number == 3:\n minima, maxima = F3.extents(Ndim)\n if number == 4:\n minima, maxima = F4.extents(Ndim)\n if number == 5:\n minima, maxima = F5.extents(Ndim)\n if number == 6:\n minima, maxima = F6.extents(Ndim)\n if number == 7:\n minima, maxima = F7.extents(Ndim)\n if number == 8:\n minima, maxima = F8.extents(Ndim)\n if number == 9:\n minima, maxima = F9.extents(Ndim)\n if number == 10:\n minima, maxima = F10.extents(Ndim)\n if number == 11:\n minima, maxima = F11.extents(Ndim)\n if number == 12:\n minima, maxima = F12.extents(Ndim)\n\n return minima, maxima\n\ndef Gholami_TestFunction_Coster(number, base_optimiser):\n coster = None\n\n if number == 1:\n coster = F1(base_optimiser)\n if number == 2:\n coster = F2(base_optimiser)\n if number == 3:\n coster = F3(base_optimiser)\n if number == 4:\n coster = F4(base_optimiser)\n if number == 5:\n coster = F5(base_optimiser)\n if number == 6:\n coster = F6(base_optimiser)\n if number == 7:\n coster = F7(base_optimiser)\n if number == 8:\n coster = F8(base_optimiser)\n if number == 9:\n coster = F9(base_optimiser)\n if number == 10:\n coster = F10(base_optimiser)\n if number == 11:\n coster = F11(base_optimiser)\n if number == 12:\n coster = F12(base_optimiser)\n\n return coster\n" ]
[ [ "numpy.abs", "numpy.power", "numpy.cos", "numpy.ones", "numpy.sin", "numpy.random.rand", "numpy.exp" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
VenkateshBH99/django_local_library
[ "db834cbe6ec475a2d3224b3ea9b56b1fa3519e9f" ]
[ "predict_risk_1/machine_learning_models/KNN.py" ]
[ "# Importing the libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport random\n\n# Importing the dataset\ndataset = pd.read_csv('kidney_disease2.csv')\n\nX = dataset.iloc[:,:-1].values\ny = dataset.iloc[:,24].values\n\n\n#handling missing data\n\nfrom sklearn.preprocessing import Imputer\nimputer = Imputer(missing_values = 'NaN', strategy = 'mean', axis = 0)\nimputer = imputer.fit(X[:,:24])\nX[:,:24] = imputer.transform(X[:,:24])\n\n# Splitting the dataset into the Training set and Test set\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.5, random_state =101)\n\n# Feature Scaling\nfrom sklearn.preprocessing import StandardScaler\nsc = StandardScaler()\ntrain=list(X_train)\nX_train = sc.fit_transform(X_train)\nfrom sklearn.externals import joblib\n# Save it\nscaler_file = \"standard_scalar_KNN.pkl\"\njoblib.dump(sc, scaler_file)\nX_test = sc.transform(X_test)\n\n#EXPLORING THE DATASET\nimport seaborn as sn\nsn.countplot(x='classification',data=dataset)\ndataset.classification.value_counts()\nprint(\"------\",dataset.classification.value_counts(),\"----------\")\n# Fitting Decision Tree Classification to the Training set\nfrom sklearn.neighbors import KNeighborsClassifier\nclassifier = KNeighborsClassifier(n_neighbors=3)\nclassifier.fit(X_train, y_train)\n\nfrom sklearn.externals import joblib\nfilename ='KNN_model.pkl'\njoblib.dump(classifier,filename)\n\n\n# Predicting the Test set results\nprint(X_test)\ny_pred = classifier.predict(X_test)\nprint(y_pred)\nprint(y_test)\n\n#ACCURACY SCORE\nfrom sklearn.metrics import accuracy_score\naccuracy_score(y_test,y_pred)\n\n##CONFUSION MATRIX\nfrom sklearn.metrics import classification_report, confusion_matrix\ncm=confusion_matrix(y_test, y_pred)\n\n#Interpretation:\nfrom sklearn.metrics import classification_report\nprint(classification_report(y_test, y_pred))\n\n#ROC\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.metrics import roc_curve\nlogit_roc_auc = roc_auc_score(y_test, classifier.predict(X_test))\nfpr, tpr, thresholds = roc_curve(y_test, classifier.predict_proba(X_test)[:,1])\nplt.figure()\nplt.plot(fpr, tpr, label='Logistic Regression (area = %0.2f)' % logit_roc_auc)\nplt.plot([0, 1], [0, 1],'r--')\nplt.xlim([0.0, 1.0])\nplt.ylim([0.0, 1.05])\nplt.xlabel('False Positive Rate')\nplt.ylabel('True Positive Rate')\nplt.title('Receiver operating characteristic')\nplt.legend(loc=\"lower right\")\nplt.savefig('Log_ROC')\nplt.show()\n\n\n##PREDICTION FOR NEW DATASET\n\nNewdataset = pd.read_csv('newdata.csv')\nsca=StandardScaler()\ntrain=sca.fit_transform(train)\nNewdataset=sca.transform(Newdataset)\nprint(Newdataset)\n\nynew=classifier.predict(Newdataset)\nprint(\"---------\",ynew,\"------------\")\n" ]
[ [ "sklearn.externals.joblib.dump", "matplotlib.pyplot.legend", "sklearn.metrics.confusion_matrix", "matplotlib.pyplot.plot", "sklearn.metrics.classification_report", "pandas.read_csv", "sklearn.preprocessing.Imputer", "sklearn.neighbors.KNeighborsClassifier", "matplotlib.pyplot.figure", "matplotlib.pyplot.title", "matplotlib.pyplot.ylim", "sklearn.model_selection.train_test_split", "matplotlib.pyplot.savefig", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.xlim", "matplotlib.pyplot.xlabel", "sklearn.preprocessing.StandardScaler", "sklearn.metrics.accuracy_score" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
iamgreaser/fireball
[ "2c5afb3dc5756a3b26da9045278f7e4a2bc036d2" ]
[ "entity.py" ]
[ "\"\"\"\nCopyright 2011 Ben Russell & contributors. All rights reserved.\n\nRedistribution and use in source and binary forms, with or without modification, are\npermitted provided that the following conditions are met:\n\n 1. Redistributions of source code must retain the above copyright notice, this list of\n conditions and the following disclaimer.\n\n 2. Redistributions in binary form must reproduce the above copyright notice, this list\n of conditions and the following disclaimer in the documentation and/or other materials\n provided with the distribution.\n\nTHIS SOFTWARE IS PROVIDED ''AS IS'' AND ANY EXPRESS OR IMPLIED\nWARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND\nFITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ANY\nCONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\nCONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON\nANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING\nNEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF\nADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nThe views and conclusions contained in the software and documentation are those of the\nauthors and should not be interpreted as representing official policies, either expressed\nor implied, of the contributors.\n\"\"\"\n\nfrom math import *\n\nimport numpy as np\n\nimport pyglet\n\nimport helpers\n\nMOUSE_SENS_X = 0.3\nMOUSE_SENS_Y = 0.3\nPLAYER_SPEED = 3.0*2.0\nOBJECT_GRAVITY = 9.8*2.0\nPLAYER_FRICTION = 0.02\nPLAYER_JUMP_HEIGHT = 10.0\nCOLLISION_TOLERANCE = 0.2\n\nKEY_MOVE_FORWARD_BIT = 0x0001\nKEY_MOVE_BACKWARD_BIT = 0x0002\nKEY_MOVE_LEFT_BIT = 0x0004\nKEY_MOVE_RIGHT_BIT = 0x0008\nKEY_JUMP_BIT = 0x0010\nKEY_CROUCH_BIT = 0x0020\nKEY_CREEP_BIT = 0x0040\nKEY_ZOOM_BIT = 0x0080\n\nclass AbstractEntity(helpers.ArgGenerator):\n\tARGS = []\n\t\n\tdef set_game(self, idx, game):\n\t\tself.idx = idx\n\t\tself.game = game\n\nclass PositionedEntity(AbstractEntity):\n\tARGS = AbstractEntity.ARGS + [\"origin\",\"velocity\",\"orient_x\",\"orient_z\"]\n\nclass PhysicsEntity(PositionedEntity):\n\tARGS = PositionedEntity.ARGS + []\n\tgrounded = False\n\twalkable = False\n\t\n\t# i had to use floor,\n\t# otherwise the player would bounce like mad when it was in the water\n\tdef trace_vector(self, ox,oy,oz, nx,ny,nz, walkable = False):\n\t\t#walkable = False\n\t\t\n\t\t# prep values\n\t\tdx, dy, dz = (n-o for (o,n) in zip((ox,oy,oz),(nx,ny,nz))) # delta\n\t\t\n\t\t(x1,y1,z1), (x2,y2,z2) = self.BBOX\n\t\theight = floor(abs(z2-z1)-0.001)+1\n\t\t\n\t\tx3, y3, z3 = (v1 if d < 0.0 else v2 for (v1,v2,d) in zip(self.BBOX[0], self.BBOX[1], (dx, dy, dz)))\n\t\tx4, y4, z4 = (v2-v1 if d < 0.0 else v1-v2 for (v1,v2,d) in zip(self.BBOX[0], self.BBOX[1], (dx, dy, dz)))\n\t\t\n\t\tz5 = (0.0 if dz < 0.0 else z4)\n\t\t\n\t\tox += x3\n\t\toy += y3\n\t\toz += z3\n\t\t\n\t\tnx += x3\n\t\tny += y3\n\t\tnz += z3\n\t\t\n\t\tsx, sy, sz = (v%1.0 if d < 0.0 else 1.0-(v%1.0) for v,d in zip((ox,oy,oz),(dx,dy,dz))) # sub\n\t\tgx, gy, gz = (-1 if d < 0.0 else 1 for d in (dx, dy, dz)) # direction (\"go\")\n\t\twx, wy, wz = (0.001 if d < 0.0 else 0.999 for d in (dx, dy, dz)) # cell offset when hitting box\n\t\tvx, vy, vz = (max(0.00001,abs(d)) for d in (dx, dy, dz)) # abs velocity\n\t\tcx, cy, cz = (int(floor(v)) for v in (ox, oy, oz)) # cell\n\t\tdcx, dcy, dcz = (abs(int(floor(v))-c) for c,v in zip((cx,cy,cz),(nx,ny,nz))) # cell delta / count\n\t\t\n\t\twalkable = walkable and dz < 0.0\n\t\t\n\t\tdef sfix(sx,sy,sz):\n\t\t\treturn tuple(v if d < 0.0 else 1.0-v for (v,d) in zip((sx,sy,sz),(dx,dy,dz)))\n\t\t\n\t\t# flags to indicate if we've screwed with a value\n\t\tkeep_x = True\n\t\tkeep_y = True\n\t\tkeep_z = True\n\t\t\n\t\tdc = dcx+dcy+dcz\n\t\t\n\t\tfor i in xrange(dc):\n\t\t\t# get our lovely factoriffic stuff\n\t\t\tcalc_x = sx/vx\n\t\t\tcalc_y = sy/vy\n\t\t\tcalc_z = sz/vz\n\t\t\t\n\t\t\ttake_x = calc_x < calc_y and calc_x < calc_z\n\t\t\ttake_y = (not take_x) and calc_y < calc_z\n\t\t\ttake_z = (not take_x) and (not take_y)\n\t\t\t\n\t\t\tif take_x:\n\t\t\t\t# X trace\n\t\t\t\tt = sx/vx\n\t\t\t\tsy -= t*vy\n\t\t\t\tsz -= t*vz\n\t\t\t\t\n\t\t\t\tif keep_x:\n\t\t\t\t\tcx += gx\n\t\t\t\tsx = 1.0\n\t\t\telif take_y:\n\t\t\t\t# Y trace\n\t\t\t\tt = sy/vy\n\t\t\t\tsx -= t*vx\n\t\t\t\tsz -= t*vz\n\t\t\t\t\n\t\t\t\tif keep_y:\n\t\t\t\t\tcy += gy\n\t\t\t\tsy = 1.0\n\t\t\telse:\n\t\t\t\t# Z trace\n\t\t\t\tt = sz/vz\n\t\t\t\tsx -= t*vx\n\t\t\t\tsy -= t*vy\n\t\t\t\t\n\t\t\t\tif keep_z:\n\t\t\t\t\tcz += gz\n\t\t\t\tsz = 1.0\n\t\t\t\n\t\t\t# cell check!\n\t\t\t\n\t\t\tax,ay,az = sfix(sx,sy,sz) # add this to cx,cy,cz\n\t\t\tncx,ncy,ncz = cx+ax,cy+ay,cz+az\n\t\t\tif not keep_x:\n\t\t\t\tncx = nx\n\t\t\tif not keep_y:\n\t\t\t\tncy = ny\n\t\t\tif not keep_z:\n\t\t\t\tncz = nz\n\t\t\t\n\t\t\tif take_x:\n\t\t\t\tfloor_check = not self.game.world.solid_check_box(\n\t\t\t\t\tcx+0.5-gx,ncy,ncz+1,\n\t\t\t\t\tcx+0.5,ncy+y4,ncz+z4+1\n\t\t\t\t\t\t)\n\t\t\t\tchecked_out_as_solid = self.game.world.solid_check_box(\n\t\t\t\t\tcx+0.5-gx,ncy,ncz,\n\t\t\t\t\tcx+0.5,ncy+y4,ncz+z4\n\t\t\t\t\t\t)\n\t\t\telif take_y:\n\t\t\t\tfloor_check = not self.game.world.solid_check_box(\n\t\t\t\t\tncx,cy+0.5-gy,ncz+1,\n\t\t\t\t\tncx+x4,cy+0.5,ncz+z4+1\n\t\t\t\t\t\t)\n\t\t\t\tchecked_out_as_solid = self.game.world.solid_check_box(\n\t\t\t\t\tncx,cy+0.5-gy,ncz,\n\t\t\t\t\tncx+x4,cy+0.5,ncz+z4\n\t\t\t\t\t\t)\n\t\t\telse:\n\t\t\t\tchecked_out_as_solid = self.game.world.solid_check_box(\n\t\t\t\t\tncx,ncy,cz+0.5-gz,\n\t\t\t\t\tncx+x4,ncy+y4,cz+0.5\n\t\t\t\t\t\t)\n\t\t\t\n\t\t\t#if self.game.world.test_if_solid(cx,cy,cz):\n\t\t\tif checked_out_as_solid:\n\t\t\t\tif take_x:\n\t\t\t\t\tif walkable and keep_x and floor_check:\n\t\t\t\t\t\tcz += 1\n\t\t\t\t\t\tonz = nz\n\t\t\t\t\t\tnz = cz+0.001\n\t\t\t\t\t\tself.antijerk_stairs += onz-nz\n\t\t\t\t\t\tkeep_x = False\n\t\t\t\t\telse:\n\t\t\t\t\t\tcx -= gx\n\t\t\t\t\t\t#sx = 0.1\n\t\t\t\t\t\tif keep_x:\n\t\t\t\t\t\t\tnx = cx+wx\n\t\t\t\t\t\t\tself.velocity[0] *= -0.1\n\t\t\t\t\t\t\tkeep_x = False\n\t\t\t\telif take_y:\n\t\t\t\t\tif walkable and keep_y and floor_check:\n\t\t\t\t\t\tcz += 1\n\t\t\t\t\t\tonz = nz\n\t\t\t\t\t\tnz = cz+0.001\n\t\t\t\t\t\tself.antijerk_stairs += onz-nz\n\t\t\t\t\t\tkeep_z = False\n\t\t\t\t\telse:\n\t\t\t\t\t\tcy -= gy\n\t\t\t\t\t\t#sy = 0.1\n\t\t\t\t\t\tif keep_y:\n\t\t\t\t\t\t\tny = cy+wy\n\t\t\t\t\t\t\tself.velocity[1] *= -0.1\n\t\t\t\t\t\t\tkeep_y = False\n\t\t\t\telif take_z:\n\t\t\t\t\tcz -= gz\n\t\t\t\t\t#sz = 0.1\n\t\t\t\t\tif keep_z:\n\t\t\t\t\t\tnz = cz+wz\n\t\t\t\t\t\t\n\t\t\t\t\t\tif gz < 0:\n\t\t\t\t\t\t\tself.grounded = True\n\t\t\t\t\t\tself.velocity[2] *= -0.1\n\t\t\t\t\t\tkeep_z = False\n\t\t\n\t\treturn nx-x3, ny-y3, nz-z3\n\t\n\tdef update(self, dt):\n\t\t# get new position\n\t\tnvec = tuple(self.origin[i] + self.velocity[i]*dt for i in xrange(3))\n\t\t\n\t\t(x1, y1, z1), (x2, y2, z2) = self.BBOX\n\t\t\n\t\tox, oy, oz = self.origin\n\t\tnx, ny, nz = nvec\n\t\t\n\t\t# trace each corner\n\t\t\n\t\t#for vbase in self.BVEC:\n\t\t#\tvx, vy, vz, walkable = vbase\n\t\t#\ttnx, tny, tnz = self.trace_vector(ox+vx, oy+vy, oz+vz, nx+vx, ny+vy, nz+vz, walkable)\n\t\t#\tnx, ny, nz = (v-vo for (v,vo) in zip((tnx,tny,tnz),(vx,vy,vz)))\n\t\t\n\t\tnx, ny, nz = self.trace_vector(ox, oy, oz, nx, ny, nz, self.walkable)\n\t\t\n\t\tfor i,vt in zip(xrange(3), (nx, ny, nz)):\n\t\t\tself.origin[i] = vt\n\nclass PlayerEntity(PhysicsEntity):\n\tARGS = PhysicsEntity.ARGS + [\"name\",\"keys\"]\n\tBBOX_STAND = ((-0.4, -0.4, -2.4),(0.4, 0.4, 0.4))\n\tBBOX_CROUCH = ((-0.4, -0.4, -1.4),(0.4, 0.4, 0.4))\n\t\n\tBBOX = BBOX_STAND\n\t\n\tdef set_game(self, idx, game):\n\t\tself.idx = idx\n\t\tself.game = game\n\t\t\n\t\tself.target_velocity = [0.0, 0.0, 0.0]\n\t\tself.cam_vx = self.cam_vy = 0.0\n\t\tself.antijerk_stairs = 0.0\n\t\tself.crouching = False\n\t\tself.walkable = True\n\t\t\n\t\tif game != None:\n\t\t\t# init\n\t\t\tif self.origin == None:\n\t\t\t\tx = self.game.world.lx//2 + 0.5\n\t\t\t\ty = self.game.world.ly//2 + 0.5\n\t\t\t\tz = self.game.world.lz + 0.5\n\t\t\t\tself.origin = [x,y,z]\n\t\t\t\n\t\t\tif self.orient_x == None:\n\t\t\t\tself.orient_x = 0.0\n\t\t\tif self.orient_z == None:\n\t\t\t\tself.orient_z = 0.0\n\t\t\t\n\t\t\tif self.velocity == None:\n\t\t\t\tself.velocity = [0.0, 0.0, 0.0]\n\t\t\t\n\t\t\tif self.keys == None:\n\t\t\t\tself.keys = 0\n\t\t\t\n\t\t\tif self.name == None:\n\t\t\t\tself.name = \"Griefer\" + repr(self.idx)\n\t\telse:\n\t\t\t# destroy\n\t\t\tpass\n\t\n\tdef set_camera(self):\n\t\tx,y,z = self.origin\n\t\treturn x,y,z+self.antijerk_stairs,self.orient_z,self.orient_x\n\t\n\tdef update(self, dt):\n\t\t#print dt\n\t\tcam_rmatrix = self.get_cam_matrix_noxrot()\n\t\t\n\t\tself.cam_vx = 0.0\n\t\tself.cam_vy = 0.0\n\t\t\n\t\t# fix antijerk\n\t\tself.antijerk_stairs *= exp(-10.0*dt)\n\t\t\n\t\t# deal with key changes\n\t\tif (self.keys & KEY_JUMP_BIT) and self.grounded and not self.crouching:\n\t\t\tself.velocity[2] = PLAYER_JUMP_HEIGHT\n\t\t\tself.grounded = False\n\t\t\n\t\tif (self.keys & KEY_MOVE_LEFT_BIT):\n\t\t\tif not (self.keys & KEY_MOVE_RIGHT_BIT):\n\t\t\t\tself.cam_vx = -1.0\n\t\telif (self.keys & KEY_MOVE_RIGHT_BIT):\n\t\t\tself.cam_vx = 1.0\n\t\t\n\t\tif (self.keys & KEY_MOVE_BACKWARD_BIT):\n\t\t\tif not (self.keys & KEY_MOVE_FORWARD_BIT):\n\t\t\t\tself.cam_vy = -1.0\n\t\telif (self.keys & KEY_MOVE_FORWARD_BIT):\n\t\t\tself.cam_vy = 1.0\n\t\t\n\t\tbvx = self.cam_vx*PLAYER_SPEED\n\t\tbvy = -self.cam_vy*PLAYER_SPEED\n\t\t\n\t\tif bool(self.keys & KEY_CROUCH_BIT) != self.crouching:\n\t\t\tif self.crouching:\n\t\t\t\t# uncrouch check\n\t\t\t\t(x1,y1,z1),(x2,y2,z2) = self.BBOX_STAND\n\t\t\t\tx,y,z = self.origin\n\t\t\t\t\n\t\t\t\tif not self.game.world.solid_check_box(x1+x,y1+y,z1+z+2,x2+x,y2+y,z2+z+0.1+1):\n\t\t\t\t\tself.origin[2] += 1.0\n\t\t\t\t\tself.BBOX = self.BBOX_STAND\n\t\t\t\t\tself.antijerk_stairs -= 1.0\n\t\t\t\t\tself.crouching = False\n\t\t\t\t\tself.walkable = True \n\t\t\telse:\n\t\t\t\t# crouch - no check needed\n\t\t\t\tself.origin[2] -= 1.0\n\t\t\t\tself.BBOX = self.BBOX_CROUCH\n\t\t\t\tself.antijerk_stairs += 1.0\n\t\t\t\tself.crouching = True\n\t\t\t\tself.walkable = False\n\t\t\n\t\tif (self.keys & KEY_CREEP_BIT) or self.crouching:\n\t\t\tbvx *= 0.5\n\t\t\tbvy *= 0.5\n\t\t\n\t\tq = (np.asmatrix([bvx,bvy,0.0])*cam_rmatrix)\n\t\t#for i in xrange(3):\n\t\t#\tself.velocity[i] *= (1.0-PLAYER_FRICTION*dt)\n\t\t\n\t\tself.target_velocity[0] = q[0,0]\n\t\tself.target_velocity[1] = q[0,1]\n\t\tself.target_velocity[2] = q[0,2]\n\t\t\n\t\tfor i in [0,1]: # don't do this with Z.\n\t\t#for i in [0,1,2]: # ok, maybe as a temp measure\n\t\t\t# TODO: get the math behind this right\n\t\t\tself.velocity[i] += (self.target_velocity[i] - self.velocity[i])*(1.0 - exp(-dt*5.0))\n\t\t\n\t\tself.velocity[2] -= OBJECT_GRAVITY*dt\n\t\t\n\t\tPhysicsEntity.update(self, dt)\n\t\n\t\n\tdef get_cam_matrix_noxrot(self):\n\t\tsrz,crz = sin(self.orient_z*pi/180.0),cos(self.orient_z*pi/180.0)\n\t\t\n\t\tcam_rmatrix = np.asmatrix(np.identity(3))\n\t\t\n\t\tcam_rmatrix *= np.asmatrix([\n\t\t\t[crz,srz,0.0],\n\t\t\t[-srz,crz,0.0],\n\t\t\t[0.0,0.0,1.0],\n\t\t])\n\t\t\n\t\treturn cam_rmatrix\n\t\n\tdef get_cam_matrix(self):\n\t\tsrx,crx = sin(self.orient_x*pi/180.0),cos(self.orient_x*pi/180.0)\n\t\tsrz,crz = sin(self.orient_z*pi/180.0),cos(self.orient_z*pi/180.0)\n\t\t\n\t\tcam_rmatrix = np.asmatrix(np.identity(3))\n\t\t\n\t\tcam_rmatrix *= np.asmatrix([\n\t\t\t[1.0,0.0,0.0],\n\t\t\t[0.0,crx,srx],\n\t\t\t[0.0,srx,-crx],\n\t\t])\n\t\t\n\t\tcam_rmatrix *= np.asmatrix([\n\t\t\t[crz,srz,0.0],\n\t\t\t[-srz,crz,0.0],\n\t\t\t[0.0,0.0,1.0],\n\t\t])\n\t\t\n\t\treturn cam_rmatrix\n\t\n\tdef on_mouse_motion(self, x, y, dx, dy):\n\t\tself.orient_z += dx*MOUSE_SENS_X\n\t\tself.orient_x -= dy*MOUSE_SENS_Y\n\t\n\tdef on_key_press(self, key, mod):\n\t\tif key == pyglet.window.key.W:\n\t\t\tself.keys |= KEY_MOVE_FORWARD_BIT\n\t\telif key == pyglet.window.key.S:\n\t\t\tself.keys |= KEY_MOVE_BACKWARD_BIT\n\t\telif key == pyglet.window.key.A:\n\t\t\tself.keys |= KEY_MOVE_LEFT_BIT\n\t\telif key == pyglet.window.key.D:\n\t\t\tself.keys |= KEY_MOVE_RIGHT_BIT\n\t\telif key == pyglet.window.key.SPACE:\n\t\t\tself.keys |= KEY_JUMP_BIT\n\t\telif key == pyglet.window.key.LCTRL:\n\t\t\tself.keys |= KEY_CROUCH_BIT\n\t\telif key == pyglet.window.key.LSHIFT:\n\t\t\tself.keys |= KEY_CREEP_BIT\n\t\n\tdef on_key_release(self, key, mod):\n\t\tif key == pyglet.window.key.W:\n\t\t\tself.keys &= ~KEY_MOVE_FORWARD_BIT\n\t\telif key == pyglet.window.key.S:\n\t\t\tself.keys &= ~KEY_MOVE_BACKWARD_BIT\n\t\telif key == pyglet.window.key.A:\n\t\t\tself.keys &= ~KEY_MOVE_LEFT_BIT\n\t\telif key == pyglet.window.key.D:\n\t\t\tself.keys &= ~KEY_MOVE_RIGHT_BIT\n\t\telif key == pyglet.window.key.SPACE:\n\t\t\tself.keys &= ~KEY_JUMP_BIT\n\t\telif key == pyglet.window.key.LCTRL:\n\t\t\tself.keys &= ~KEY_CROUCH_BIT\n\t\telif key == pyglet.window.key.LSHIFT:\n\t\t\tself.keys &= ~KEY_CREEP_BIT\n" ]
[ [ "numpy.asmatrix", "numpy.identity" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
baheytharwat/tinygrad
[ "acf652c3c524ee3214e9ce58d41113738cb833ae" ]
[ "test/test_ops.py" ]
[ "import os\nimport torch\nimport numpy as np\nimport unittest\nimport timeit\nimport functools\nfrom tinygrad.tensor import Tensor, DEFAULT_DEVICE, Device\n\ndef helper_test_op(shps, torch_fxn, tinygrad_fxn, atol=1e-6, rtol=1e-3, grad_atol=1e-6, grad_rtol=1e-3, forward_only=False, vals=None, a=-0.5, b=20):\n torch.manual_seed(0)\n if shps is None:\n ts = [torch.tensor(x, requires_grad=True) for x in vals]\n else:\n ts = [torch.tensor((np.random.random(size=x).astype(np.float32)+a)*b, requires_grad=True) for x in shps]\n\n tst = [Tensor(x.detach().numpy()) for x in ts]\n out = torch_fxn(*ts)\n ret = tinygrad_fxn(*tst)\n\n np.testing.assert_allclose(ret.cpu().data, out.detach().numpy(), atol=atol, rtol=rtol)\n\n if not forward_only:\n out.mean().backward()\n ret.mean().backward()\n\n for t, tt in zip(ts, tst):\n np.testing.assert_allclose(t.grad, tt.cpu().grad.data, atol=grad_atol, rtol=grad_rtol)\n\n # speed\n torch_fp = timeit.Timer(functools.partial(torch_fxn, *ts)).timeit(5) * 1000/5\n tinygrad_fp = timeit.Timer(functools.partial(tinygrad_fxn, *tst)).timeit(5) * 1000/5\n\n if not forward_only:\n torch_fbp = timeit.Timer(functools.partial(lambda f,x: f(*x).mean().backward(), torch_fxn, ts)).timeit(5) * 1000/5\n tinygrad_fbp = timeit.Timer(functools.partial(lambda f,x: f(*x).mean().backward(), tinygrad_fxn, tst)).timeit(5) * 1000/5\n else:\n torch_fbp, tinygrad_fbp = np.nan, np.nan\n\n print(\"testing %30r torch/tinygrad fp: %.2f / %.2f ms bp: %.2f / %.2f ms\" % (shps, torch_fp, tinygrad_fp, torch_fbp-torch_fp, tinygrad_fbp-tinygrad_fp))\n\nclass TestOps(unittest.TestCase):\n\n def test_add(self):\n helper_test_op([(45,65), (45,65)], lambda x,y: x+y, Tensor.add)\n def test_sub(self):\n helper_test_op([(45,65), (45,65)], lambda x,y: x-y, Tensor.sub)\n def test_mul(self):\n helper_test_op([(45,65), (45,65)], lambda x,y: x*y, Tensor.mul)\n def test_div(self):\n helper_test_op([(45,65), (45,65)], lambda x,y: x/y, Tensor.div)\n def test_pow(self):\n helper_test_op([(45,65), (45,65)], lambda x,y: x**y, Tensor.pow, a=0)\n def test_sqrt(self):\n helper_test_op([(45,65)], lambda x: x.sqrt(), Tensor.sqrt, a=0)\n def test_relu(self):\n helper_test_op([(45,65)], lambda x: x.relu(), Tensor.relu)\n def test_leakyrelu(self):\n helper_test_op([(45,65)], lambda x: torch.nn.functional.leaky_relu(x,0.01), Tensor.leakyrelu)\n def test_abs(self):\n helper_test_op([(45,65)], lambda x: torch.abs(x), Tensor.abs)\n def test_log(self):\n helper_test_op([(45,65)], lambda x: torch.log(x), Tensor.log)\n def test_exp(self):\n helper_test_op([(45,65)], lambda x: torch.exp(x), Tensor.exp)\n def test_sign(self):\n helper_test_op([(45,65)], lambda x: torch.sign(x), Tensor.sign)\n def test_sigmoid(self):\n helper_test_op([(45,65)], lambda x: x.sigmoid(), Tensor.sigmoid)\n def test_softplus(self):\n helper_test_op([(45,65)], lambda x: torch.nn.functional.softplus(x), Tensor.softplus, atol=1e-6, grad_atol=1e-6)\n def test_relu6(self):\n helper_test_op([(45,65)], lambda x: torch.nn.functional.relu6(x), Tensor.relu6)\n def test_hardswish(self):\n helper_test_op([(45,65)], lambda x: torch.nn.functional.hardswish(x), Tensor.hardswish, atol=1e-6, grad_atol=1e-6)\n def test_mish(self):\n def _mish_pytorch(x):\n return x*torch.tanh(torch.nn.functional.softplus(x))\n helper_test_op([(45,65)], _mish_pytorch, Tensor.mish, atol=1e-4)\n def test_dot(self):\n helper_test_op([(45,65), (65,100)], lambda x,y: x.matmul(y), Tensor.dot, atol=1e-4)\n def test_multidot(self):\n helper_test_op([(10,45,65), (10,65,45)], lambda x,y: x @ y, Tensor.dot, atol=1e-4)\n helper_test_op([(3,3,45,65), (3,3,65,45)], lambda x,y: x @ y, Tensor.dot, atol=1e-4)\n def test_sum(self):\n helper_test_op([(45,3)], lambda x: x.sum(), Tensor.sum)\n helper_test_op([(3,4,5,6)], lambda x: x.sum(axis=(1,2)), lambda x: Tensor.sum(x, axis=(1,2)))\n helper_test_op([(3,4,5,6)], lambda x: x.sum(axis=1), lambda x: Tensor.sum(x, axis=1))\n def test_max(self):\n helper_test_op([(45,3)], lambda x: x.max(), Tensor.max)\n helper_test_op([(45,3)], lambda x: x.max().mul(0.5), lambda x: Tensor.max(x).mul(0.5))\n helper_test_op(None, lambda x: x.max().mul(0.5), lambda x: Tensor.max(x).mul(0.5),\n vals=[\n [[1.0,1.0,0.0,1.0]],\n ])\n helper_test_op([(3,4,5,6)], lambda x: x.max(axis=1)[0], lambda x: Tensor.max(x, axis=1))\n def test_mean_axis(self):\n helper_test_op([(3,4,5,6)], lambda x: x.mean(axis=(1,2)), lambda x: Tensor.mean(x, axis=(1,2)))\n def test_logsoftmax(self):\n helper_test_op([(45,65)], lambda x: torch.nn.LogSoftmax(dim=1)(x), Tensor.logsoftmax, atol=1e-7, grad_atol=1e-7)\n def test_tanh(self):\n helper_test_op([(45,65)], lambda x: x.tanh(), Tensor.tanh, atol=1e-6, grad_atol=1e-6)\n def test_topo_sort(self):\n helper_test_op([(45,65)], lambda x: (x+x)*x, lambda x: x.add(x).mul(x), atol=1e-6, grad_atol=1e-6)\n\n def test_scalar_mul(self):\n helper_test_op([(45,65)], lambda x: x*2, lambda x: x*2)\n def test_scalar_rmul(self):\n helper_test_op([(45,65)], lambda x: 2*x, lambda x: 2*x)\n\n def test_scalar_sub(self):\n helper_test_op([(45,65)], lambda x: x-2, lambda x: x-2)\n def test_scalar_rsub(self):\n helper_test_op([(45,65)], lambda x: 2-x, lambda x: 2-x)\n\n def test_broadcast_full(self):\n for torch_op, tinygrad_op in [(torch.add, Tensor.add), (torch.sub, Tensor.sub), (torch.mul, Tensor.mul),\n (torch.div, Tensor.div), (torch.pow, Tensor.pow)]:\n for shapes in [((5,13,24,16), (5,1,24,1)), ((1,3,1,7,1), (2,1,5,1,8))]:\n with self.subTest(op=torch_op.__name__, shapes=shapes):\n helper_test_op(shapes, torch_op, tinygrad_op, a=-0.5 if tinygrad_op != Tensor.pow else 0.0)\n\n\n def test_broadcast_partial(self):\n for torch_op, tinygrad_op in [(torch.add, Tensor.add), (torch.sub, Tensor.sub), (torch.mul, Tensor.mul),\n (torch.div, Tensor.div), (torch.pow, Tensor.pow)]:\n for shapes in [((1,32,32,32), (1,32,1,1)), ((5,13,24,16,2), (1,13,24,1,1)),\n ((4,1), (4,5)), ((1,4), (5,4))]:\n with self.subTest(op=torch_op.__name__, shapes=shapes):\n # NOTE: ANE backwards?\n helper_test_op(shapes, torch_op, tinygrad_op, a=-0.5 if tinygrad_op != Tensor.pow else 0.0)\n\n def test_slice(self):\n helper_test_op([(3,3,3,3)], lambda x: x[1:2], lambda x: x[1:2])\n helper_test_op([(3,3,3,3)], lambda x: x[1:2, 1:2], lambda x: x[1:2, 1:2])\n helper_test_op([(3,3,3,3)], lambda x: x[1:2, 1:2, 0:-1], lambda x: x[1:2, 1:2, 0:-1])\n\n def test_pad2d(self):\n helper_test_op([(3,3,3,3)], lambda x: torch.nn.functional.pad(x, (1,2,3,4)), lambda x: x.pad2d(padding=(1,2,3,4)))\n\n def test_transpose(self):\n helper_test_op([(3,3,3)], lambda x: x.transpose(1,2), lambda x: x.transpose(order=(0,2,1)))\n # This is failing on GPU because the dim is too large\n #helper_test_op([(21,22,23,24)], lambda x: x.movedim((3,0,2,1),(0,1,2,3)), lambda x: x.transpose(order=(3,0,2,1)))\n helper_test_op([(3,4,5,6)], lambda x: x.movedim((3,2,1,0),(0,1,2,3)), lambda x: x.transpose(order=(3,2,1,0)))\n\n def test_reshape(self):\n helper_test_op([(4,3,6,6)], lambda x: torch.reshape(x, (-1,3,6,6)), lambda x: x.reshape(shape=(-1,3,6,6)))\n helper_test_op([(4,3,6,6)], lambda x: torch.reshape(x, (-1,1,6,6)), lambda x: x.reshape(shape=(-1,1,6,6)))\n\n def test_detach(self):\n helper_test_op([(4,3,6,6)], lambda x: x.detach(), lambda x: x.detach(), forward_only=True)\n\n def test_conv2d(self):\n for bs in [1,8]:\n for cin in [1,3]:\n for groups in [1,3] if cin == 3 else [1]:\n for H in [1,2,5]:\n for W in [1,2,3,5]:\n with self.subTest(batch_size=bs, channels=cin, groups=groups, height=H, width=W):\n helper_test_op([(bs,cin,11,28), (6,cin//groups,H,W)],\n lambda x,w: torch.nn.functional.conv2d(x,w,groups=groups).relu(),\n lambda x,w: Tensor.conv2d(x,w,groups=groups).relu(), atol=1e-4, grad_rtol=1e-5)\n\n def test_strided_conv2d(self):\n bs = 4\n cin = 3\n H,W = 3,3\n with self.subTest(stride := 2):\n helper_test_op([(bs,cin,11,28), (4,cin,H,W)],\n lambda x,w: torch.nn.functional.conv2d(x,w,stride=2).relu(),\n lambda x,w: Tensor.conv2d(x,w,stride=stride).relu(), atol=1e-4)\n with self.subTest(stride := (2,1)):\n helper_test_op([(bs,cin,11,28), (4,cin,H,W)],\n lambda x,w: torch.nn.functional.conv2d(x,w,stride=stride).relu(),\n lambda x,w: Tensor.conv2d(x,w,stride=(2,1)).relu(), atol=1e-4)\n\n def test_maxpool2d(self):\n for ksz in [(2,2), (3,3), (3,2), (5,5), (5,1)]:\n with self.subTest(kernel_size=ksz):\n helper_test_op([(32,2,110,28)],\n lambda x: torch.nn.functional.max_pool2d(x, kernel_size=ksz),\n # TODO: why is this tolerance so high?\n lambda x: Tensor.max_pool2d(x, kernel_size=ksz), grad_atol=1e-4)\n\n def test_avgpool2d(self):\n shape = (32,2,111,28)\n for ksz in [(2,2), (3,3), (3,2), (5,5), (5,1), shape[2:]]:\n with self.subTest(kernel_size=ksz):\n helper_test_op([shape],\n lambda x: torch.nn.functional.avg_pool2d(x, kernel_size=ksz),\n lambda x: Tensor.avg_pool2d(x, kernel_size=ksz), rtol=1e-5)\n\n\n def test_upsample2d_nearest(self):\n for sf in [1, 2, 3, 4, 5]:\n with self.subTest(scale_factor=sf):\n helper_test_op([(32,2,110,28)],\n lambda x: torch.nn.functional.interpolate(x, scale_factor=sf, mode='nearest'),\n lambda x: Tensor.upsample_nearest2d(x, scale_factor=sf), forward_only=True)\n\nif __name__ == '__main__':\n unittest.main(verbosity=2)\n" ]
[ [ "torch.abs", "torch.nn.LogSoftmax", "numpy.random.random", "torch.sign", "torch.manual_seed", "torch.nn.functional.relu6", "torch.reshape", "torch.nn.functional.avg_pool2d", "torch.nn.functional.conv2d", "torch.tensor", "torch.exp", "torch.log", "torch.nn.functional.leaky_relu", "torch.nn.functional.hardswish", "torch.nn.functional.interpolate", "torch.nn.functional.max_pool2d", "torch.nn.functional.softplus", "torch.nn.functional.pad" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
onlyrico/contextualized-topic-models
[ "ac338eab6601cd34475d490ae8072fecb73bb0c2" ]
[ "contextualized_topic_models/evaluation/measures.py" ]
[ "from gensim.corpora.dictionary import Dictionary\nfrom gensim.models.coherencemodel import CoherenceModel\nfrom gensim.models import KeyedVectors\nimport gensim.downloader as api\nfrom scipy.spatial.distance import cosine\nimport abc\n\nfrom contextualized_topic_models.evaluation.rbo import rbo\nimport numpy as np\nimport itertools\n\n\nclass Measure:\n def __init__(self):\n pass\n\n def score(self):\n pass\n\n\nclass TopicDiversity(Measure):\n def __init__(self, topics):\n super().__init__()\n self.topics = topics\n\n def score(self, topk=25):\n \"\"\"\n :param topk: topk words on which the topic diversity will be computed\n :return:\n \"\"\"\n if topk > len(self.topics[0]):\n raise Exception('Words in topics are less than topk')\n else:\n unique_words = set()\n for t in self.topics:\n unique_words = unique_words.union(set(t[:topk]))\n td = len(unique_words) / (topk * len(self.topics))\n return td\n\n\nclass Coherence(abc.ABC):\n \"\"\"\n :param topics: a list of lists of the top-k words\n :param texts: (list of lists of strings) represents the corpus on which the empirical frequencies of words are computed\n \"\"\"\n def __init__(self, topics, texts):\n self.topics = topics\n self.texts = texts\n self.dictionary = Dictionary(self.texts)\n\n @abc.abstractmethod\n def score(self):\n pass\n\n\nclass CoherenceNPMI(Coherence):\n def __init__(self, topics, texts):\n super().__init__(topics, texts)\n\n def score(self, topk=10, per_topic=False):\n \"\"\"\n :param topk: how many most likely words to consider in the evaluation\n :param per_topic: if True, returns the coherence value for each topic (default: False)\n :return: NPMI coherence\n \"\"\"\n if topk > len(self.topics[0]):\n raise Exception('Words in topics are less than topk')\n else:\n npmi = CoherenceModel(topics=self.topics, texts=self.texts, dictionary=self.dictionary,\n coherence='c_npmi', topn=topk)\n if per_topic:\n return npmi.get_coherence_per_topic()\n else:\n return npmi.get_coherence()\n\nclass CoherenceUMASS(Coherence):\n def __init__(self, topics, texts):\n super().__init__(topics, texts)\n\n def score(self, topk=10, per_topic=False):\n \"\"\"\n :param topk: how many most likely words to consider in the evaluation\n :param per_topic: if True, returns the coherence value for each topic (default: False)\n :return: UMass coherence\n \"\"\"\n if topk > len(self.topics[0]):\n raise Exception('Words in topics are less than topk')\n else:\n umass = CoherenceModel(topics=self.topics, texts=self.texts, dictionary=self.dictionary,\n coherence='u_mass', topn=topk)\n if per_topic:\n return umass.get_coherence_per_topic()\n else:\n return umass.get_coherence()\n\nclass CoherenceUCI(Coherence):\n def __init__(self, topics, texts):\n super().__init__(topics, texts)\n\n def score(self, topk=10, per_topic=False):\n \"\"\"\n :param topk: how many most likely words to consider in the evaluation\n :param per_topic: if True, returns the coherence value for each topic (default: False)\n :return: UCI coherence\n \"\"\"\n if topk > len(self.topics[0]):\n raise Exception('Words in topics are less than topk')\n else:\n uci = CoherenceModel(topics=self.topics, texts=self.texts, dictionary=self.dictionary,\n coherence='c_uci', topn=topk)\n if per_topic:\n return uci.get_coherence_per_topic()\n else:\n return uci.get_coherence()\n\nclass CoherenceCV(Coherence):\n def __init__(self, topics, texts):\n super().__init__(topics, texts)\n\n def score(self, topk=10, per_topic=False):\n \"\"\"\n :param topk: how many most likely words to consider in the evaluation\n :param per_topic: if True, returns the coherence value for each topic (default: False)\n :return: C_V coherence\n \"\"\"\n if topk > len(self.topics[0]):\n raise Exception('Words in topics are less than topk')\n else:\n cv = CoherenceModel(topics=self.topics, texts=self.texts, dictionary=self.dictionary,\n coherence='c_v', topn=topk)\n if per_topic:\n return cv.get_coherence_per_topic()\n else:\n return cv.get_coherence()\n\n\nclass CoherenceWordEmbeddings(Measure):\n def __init__(self, topics, word2vec_path=None, binary=False):\n \"\"\"\n :param topics: a list of lists of the top-n most likely words\n :param word2vec_path: if word2vec_file is specified, it retrieves the word embeddings file (in word2vec format) to\n compute similarities between words, otherwise 'word2vec-google-news-300' is downloaded\n :param binary: if the word2vec file is binary\n \"\"\"\n super().__init__()\n self.topics = topics\n self.binary = binary\n if word2vec_path is None:\n self.wv = api.load('word2vec-google-news-300')\n else:\n self.wv = KeyedVectors.load_word2vec_format(word2vec_path, binary=binary)\n\n def score(self, topk=10, binary= False):\n \"\"\"\n :param topk: how many most likely words to consider in the evaluation\n :return: topic coherence computed on the word embeddings similarities\n \"\"\"\n if topk > len(self.topics[0]):\n raise Exception('Words in topics are less than topk')\n else:\n arrays = []\n for index, topic in enumerate(self.topics):\n if len(topic) > 0:\n local_simi = []\n for word1, word2 in itertools.combinations(topic[0:topk], 2):\n if word1 in self.wv.vocab and word2 in self.wv.vocab:\n local_simi.append(self.wv.similarity(word1, word2))\n arrays.append(np.mean(local_simi))\n return np.mean(arrays)\n\n\nclass InvertedRBO(Measure):\n def __init__(self, topics):\n \"\"\"\n :param topics: a list of lists of words\n \"\"\"\n super().__init__()\n self.topics = topics\n\n def score(self, topk = 10, weight=0.9):\n \"\"\"\n :param weight: p (float), default 1.0: Weight of each agreement at depth d:\n p**(d-1). When set to 1.0, there is no weight, the rbo returns to average overlap.\n :return: rank_biased_overlap over the topics\n \"\"\"\n if topk > len(self.topics[0]):\n raise Exception('Words in topics are less than topk')\n else:\n collect = []\n for list1, list2 in itertools.combinations(self.topics, 2):\n rbo_val = rbo.rbo(list1[:topk], list2[:topk], p=weight)[2]\n collect.append(rbo_val)\n return 1 - np.mean(collect)\n\n\nclass Matches(Measure):\n def __init__(self, doc_distribution_original_language, doc_distribution_unseen_language):\n \"\"\"\n :param doc_distribution_original_language: numpy array of the topical distribution of\n the documents in the original language (dim: num docs x num topics)\n :param doc_distribution_unseen_language: numpy array of the topical distribution of the\n documents in an unseen language (dim: num docs x num topics)\n \"\"\"\n super().__init__()\n self.orig_lang_docs = doc_distribution_original_language\n self.unseen_lang_docs = doc_distribution_unseen_language\n if len(self.orig_lang_docs) != len(self.unseen_lang_docs):\n raise Exception('Distributions of the comparable documents must have the same length')\n\n def score(self):\n \"\"\"\n :return: proportion of matches between the predicted topic in the original language and\n the predicted topic in the unseen language of the document distributions\n \"\"\"\n matches = 0\n for d1, d2 in zip(self.orig_lang_docs, self.unseen_lang_docs):\n if np.argmax(d1) == np.argmax(d2):\n matches = matches + 1\n return matches/len(self.unseen_lang_docs)\n\n\nclass KLDivergence(Measure):\n def __init__(self, doc_distribution_original_language, doc_distribution_unseen_language):\n \"\"\"\n :param doc_distribution_original_language: numpy array of the topical distribution of\n the documents in the original language (dim: num docs x num topics)\n :param doc_distribution_unseen_language: numpy array of the topical distribution of the\n documents in an unseen language (dim: num docs x num topics)\n \"\"\"\n super().__init__()\n self.orig_lang_docs = doc_distribution_original_language\n self.unseen_lang_docs = doc_distribution_unseen_language\n if len(self.orig_lang_docs) != len(self.unseen_lang_docs):\n raise Exception('Distributions of the comparable documents must have the same length')\n\n def score(self):\n \"\"\"\n :return: average kullback leibler divergence between the distributions\n \"\"\"\n kl_mean = 0\n for d1, d2 in zip(self.orig_lang_docs, self.unseen_lang_docs):\n kl_mean = kl_mean + kl_div(d1, d2)\n return kl_mean/len(self.unseen_lang_docs)\n\n\ndef kl_div(a, b):\n a = np.asarray(a, dtype=np.float)\n b = np.asarray(b, dtype=np.float)\n return np.sum(np.where(a != 0, a * np.log(a / b), 0))\n\n\nclass CentroidDistance(Measure):\n def __init__(self, doc_distribution_original_language, doc_distribution_unseen_language, topics, word2vec_path=None,\n binary=True, topk=10):\n \"\"\"\n :param doc_distribution_original_language: numpy array of the topical distribution of the\n documents in the original language (dim: num docs x num topics)\n :param doc_distribution_unseen_language: numpy array of the topical distribution of the\n documents in an unseen language (dim: num docs x num topics)\n :param topics: a list of lists of the top-n most likely words\n :param word2vec_path: if word2vec_file is specified, it retrieves the word embeddings\n file (in word2vec format) to compute similarities between words, otherwise\n 'word2vec-google-news-300' is downloaded\n :param binary: if the word2vec file is binary\n :param topk: max number of topical words\n \"\"\"\n super().__init__()\n self.topics = [t[:topk] for t in topics]\n self.orig_lang_docs = doc_distribution_original_language\n self.unseen_lang_docs = doc_distribution_unseen_language\n if len(self.orig_lang_docs) != len(self.unseen_lang_docs):\n raise Exception('Distributions of the comparable documents must have the same length')\n\n if word2vec_path is None:\n self.wv = api.load('word2vec-google-news-300')\n else:\n self.wv = KeyedVectors.load_word2vec_format(word2vec_path, binary=binary)\n\n def score(self):\n \"\"\"\n :return: average centroid distance between the words of the most likely topic of the\n document distributions\n \"\"\"\n cd = 0\n for d1, d2 in zip(self.orig_lang_docs, self.unseen_lang_docs):\n top_words_orig = self.topics[np.argmax(d1)]\n top_words_unseen = self.topics[np.argmax(d2)]\n\n centroid_lang = self.get_centroid(top_words_orig)\n centroid_en = self.get_centroid(top_words_unseen)\n\n cd += (1 - cosine(centroid_lang, centroid_en))\n return cd/len(self.unseen_lang_docs)\n\n def get_centroid(self, word_list):\n vector_list = []\n for word in word_list:\n if word in self.wv.vocab:\n vector_list.append(self.wv.get_vector(word))\n vec = sum(vector_list)\n return vec / np.linalg.norm(vec)\n\n" ]
[ [ "numpy.log", "numpy.asarray", "scipy.spatial.distance.cosine", "numpy.linalg.norm", "numpy.argmax", "numpy.mean" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] } ]
drivergroup/beliefs
[ "7e0b2a02d719f5b1c889d72ac1e9421971cc120b" ]
[ "beliefs/factors/discrete_factor.py" ]
[ "\"\"\"\nThe MIT License (MIT)\n\nCopyright (c) 2013-2017 pgmpy\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of\nthis software and associated documentation files (the \"Software\"), to deal in\nthe Software without restriction, including without limitation the rights to\nuse, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of\nthe Software, and to permit persons to whom the Software is furnished to do so,\nsubject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS\nFOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\nCOPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER\nIN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\nCONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\n\nimport copy\nimport numpy as np\n\n\nclass DiscreteFactor:\n\n def __init__(self, variables, cardinality, values=None, state_names=None):\n \"\"\"\n Args\n variables: list,\n variables in the scope of the factor\n cardinality: list,\n cardinalities of each variable, where len(cardinality)=len(variables)\n values: list,\n row vector of values of variables with ordering such that right-most variables\n defined in `variables` cycle through their values the fastest\n state_names: dictionary,\n mapping variables to their states, of format {label_name: ['state1', 'state2']}\n \"\"\"\n self.variables = list(variables)\n self.cardinality = list(cardinality)\n if values is None:\n self._values = None\n else:\n self._values = np.array(values).reshape(self.cardinality)\n self.state_names = state_names\n\n def __mul__(self, other):\n return self.product(other)\n\n def copy(self):\n \"\"\"Return a copy of the factor\"\"\"\n return self.__class__(self.variables,\n self.cardinality,\n self._values,\n copy.deepcopy(self.state_names))\n\n @property\n def values(self):\n return self._values\n\n def update_values(self, new_values):\n \"\"\"We make this available because _values is allowed to be None on init\"\"\"\n self._values = np.array(new_values).reshape(self.cardinality)\n\n def get_value_for_state_vector(self, dict_of_states):\n \"\"\"\n Return the value for a dictionary of variable states.\n\n Args\n dict_of_states: dictionary,\n of format {label_name1: 'state1', label_name2: 'True'}\n Returns\n probability, a float, the factor value for a specific combination of variable states\n \"\"\"\n assert sorted(dict_of_states.keys()) == sorted(self.variables), \\\n \"The keys for the dictionary of states must match the variables in factor scope.\"\n state_coordinates = []\n for var in self.variables:\n var_state = dict_of_states[var]\n idx_in_var_axis = self.state_names[var].index(var_state)\n state_coordinates.append(idx_in_var_axis)\n return self.values[tuple(state_coordinates)]\n\n def add_new_variables_from_other_factor(self, other):\n \"\"\"Add new variables from `other` factor to the factor.\"\"\"\n extra_vars = set(other.variables) - set(self.variables)\n # if all of these variables already exist there is nothing to do\n if len(extra_vars) == 0:\n return\n # otherwise, extend the values array\n slice_ = [slice(None)] * len(self.variables)\n slice_.extend([np.newaxis] * len(extra_vars))\n self._values = self._values[slice_]\n self.variables.extend(extra_vars)\n\n new_card_var = other.get_cardinality(extra_vars)\n self.cardinality.extend([new_card_var[var] for var in extra_vars])\n\n def get_cardinality(self, variables):\n return {var: self.cardinality[self.variables.index(var)] for var in variables}\n\n def product(self, other):\n left = self.copy()\n\n if isinstance(other, (int, float)):\n return self.values * other\n else:\n assert isinstance(other, DiscreteFactor), \\\n \"__mul__ is only defined between subclasses of DiscreteFactor\"\n right = other.copy()\n left.add_new_variables_from_other_factor(right)\n right.add_new_variables_from_other_factor(left)\n\n # reorder variables in right factor to match order in left\n source_axes = list(range(right.values.ndim))\n destination_axes = [right.variables.index(var) for var in left.variables]\n right.variables = [right.variables[idx] for idx in destination_axes]\n\n # rearrange values in right factor to correspond to the reordered variables\n right._values = np.moveaxis(right.values, source_axes, destination_axes)\n left._values = left.values * right.values\n return left\n\n def marginalize(self, vars):\n \"\"\"\n Args\n vars: list,\n variables over which to marginalize the factor\n Returns\n DiscreteFactor, whose scope is set(self.variables) - set(vars)\n \"\"\"\n phi = copy.deepcopy(self)\n\n var_indexes = []\n for var in vars:\n if var not in phi.variables:\n raise ValueError('{} not in scope'.format(var))\n else:\n var_indexes.append(self.variables.index(var))\n\n index_to_keep = sorted(set(range(len(self.variables))) - set(var_indexes))\n phi.variables = [self.variables[index] for index in index_to_keep]\n phi.cardinality = [self.cardinality[index] for index in index_to_keep]\n phi._values = np.sum(phi.values, axis=tuple(var_indexes))\n return phi\n" ]
[ [ "numpy.array", "numpy.moveaxis" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
LinZichuan/AdMRL
[ "50a22d4d480e99125cc91cc65dfcc0df4a883ac6", "50a22d4d480e99125cc91cc65dfcc0df4a883ac6", "50a22d4d480e99125cc91cc65dfcc0df4a883ac6" ]
[ "main.py", "slbo/envs/mujoco/half_cheetah_2d_env.py", "slbo/utils/multi_layer_perceptron.py" ]
[ "import sys\nsys.path = ['./rllab/'] + sys.path\nprint (sys.path)\nimport pickle\nimport os,time\nfrom collections import deque\nimport tensorflow as tf\nimport numpy as np\nimport lunzi.nn as nn\nfrom lunzi.Logger import logger\nfrom slbo.utils.average_meter import AverageMeter\nfrom slbo.utils.flags import FLAGS\nfrom slbo.utils.dataset import Dataset, gen_dtype\nfrom slbo.utils.OU_noise import OUNoise\nfrom slbo.utils.normalizer import Normalizers\nfrom slbo.utils.tf_utils import get_tf_config\nfrom slbo.utils.runner import Runner\nfrom slbo.policies.gaussian_mlp_policy import GaussianMLPPolicy\nfrom slbo.envs.virtual_env import VirtualEnv\nfrom slbo.dynamics_model import DynamicsModel\nfrom slbo.v_function.mlp_v_function import MLPVFunction\nfrom slbo.partial_envs import make_env, make_task\nfrom slbo.loss.multi_step_loss import MultiStepLoss\nfrom slbo.algos.TRPO import TRPO\nfrom slbo.algos.ADVTASK import ADVTASK\nfrom slbo.utils.tf_utils import initialize_uninitialized\nimport click\nfrom gym.wrappers.monitor import Monitor\nimport gym\nimport scipy.misc\nimport scipy.ndimage\ndef render(env_, policy=None):\n logger.info('start render video...')\n observation = env_.reset()\n imgs = []\n return_ = 0.\n cnt_ = 0\n obs = []\n for t in range(200):\n cnt_ += 1\n observation = observation.reshape(1, -1)\n obs.append(observation)\n if policy is not None:\n action = policy.get_actions(observation)\n observation, reward, done, info = env_.step(action[0])\n if done: break\n return_ += reward\n else:\n action = env_.action_space.sample()\n observation, reward, done, info = env_.step(action)\n if done: break\n return_ += reward\n logger.info (f\"render {cnt_} steps, return = {return_:.6f}\")\n res = {'obs': obs, 'return': return_}\n return res\n\ndef eval_rollout(runner, p, des):\n logger.info(des)\n runner.reset()\n data, ep_infos = runner.run(p, FLAGS.plan.n_trpo_samples)\n logp = p(data.state).log_prob(data.action).reduce_sum(axis=1).reduce_mean()\n logp = tf.get_default_session().run(logp)\n print (\"state_mean:\", np.mean(data.state))\n print (\"action_mean:\", np.mean(data.action))\n print (\"warmup_logpac_mean:\", logp)\n\ndef testeval(policy, runner):\n runner.reset()\n _, ep_infos = runner.run(policy, FLAGS.rollout.n_test_samples)\n returns = [info['return'] for info in ep_infos]\n returns = np.mean(returns)\n return returns\n\ndef evaluate(settings, tag):\n res = {}\n for runner, policy, name in settings:\n runner.reset()\n _, ep_infos = runner.run(policy, FLAGS.rollout.n_test_samples)\n returns = np.array([ep_info['return'] for ep_info in ep_infos])\n res[name] = np.mean(returns)\n logger.info('Tag = %s, Reward on %s (%d episodes): mean = %.6f, std = %.6f', tag, name,\n len(returns), np.mean(returns), np.std(returns))\n return res['Real Env'], res['Virt Env']\n\n\ndef add_multi_step(src: Dataset, dst: Dataset):\n n_envs = 1\n dst.extend(src[:-n_envs])\n\n ending = src[-n_envs:].copy()\n ending.timeout = True\n dst.extend(ending)\n\n\ndef make_real_runner(n_envs, task_config=None):\n from slbo.envs.batched_env import BatchedEnv\n batched_env = BatchedEnv([make_env(FLAGS.env.id, task_config=task_config) for _ in range(n_envs)])\n return Runner(batched_env, rescale_action=True, **FLAGS.runner.as_dict())\n\n\[email protected]()\[email protected]('--setting', default='default')\[email protected]('--adv', default=1)\[email protected]('--gpu', default=0)\[email protected]('--debug', is_flag=True, default=False)\[email protected]('--taskname', default='Ant2D')\[email protected]('--verbose', is_flag=True, default=False)\[email protected]('--test', is_flag=True, default=False)\[email protected]('--warmupent', default=0.005)\[email protected]('--alpha', default=1.0)\[email protected]('--beta', default=1.0)\[email protected]('--snapshot', default=1)\[email protected]('--testadv', default=0)\[email protected]('--seed', default=1)\[email protected]('--nsample', default=10000)\[email protected]('--fixedvel', default=None)\[email protected]('--initnslbo', default=20)\[email protected]('--nslbo', default=3)\[email protected]('--warmniter', default=40)\[email protected]('--slboniter', default=20)\[email protected]('--piter', default=20)\[email protected]('--miter', default=100)\[email protected]('--atype', default='gae') # gae, 1step, ret, adv\[email protected]('--video', is_flag=True, default=False)\[email protected]('--maxstep', default=1)\[email protected]('--genadvstrategy', default=None)\[email protected]('--inittask', default='none')\[email protected]('--decay', default='joint')\[email protected]('--testgiven', default=None)\[email protected]('--testnum', default=1)\[email protected]('--testparam', default='')\ndef main(setting, adv, gpu, debug, taskname, verbose, test, warmupent, alpha, beta, snapshot, testadv, seed, nsample, fixedvel, initnslbo, nslbo, warmniter, slboniter, piter, miter, atype, video, maxstep, genadvstrategy, inittask, decay, testgiven, testnum, testparam):\n print ('warmupent:', warmupent)\n print (\"seed:\", seed)\n setting = os.path.join('./data/', setting)\n #FLAGS.run_id = setting\n FLAGS.rollout.n_train_samples = 10000\n FLAGS.rollout.n_dev_samples = 10000\n FLAGS.rollout.n_test_samples = 10000\n FLAGS.plan.n_trpo_samples = 10000\n if taskname == 'HC':\n FLAGS.env.id = 'HalfCheetahTask-v2'\n elif taskname == 'HC2D':\n FLAGS.env.id = 'HalfCheetah2D-v2'\n elif taskname == 'HClinearstate':\n FLAGS.env.id = 'HalfCheetahLinearState-v2'\n elif taskname == 'HCgoalstate':\n FLAGS.env.id = 'HalfCheetahGoalState-v2'\n elif taskname == 'Hopper2D':\n FLAGS.env.id = 'Hopper2D-v2'\n elif taskname == 'Walker2D':\n FLAGS.env.id = 'Walker2D-v2'\n elif taskname == 'Ant3D':\n FLAGS.env.id = 'Ant3DTask-v2'\n elif taskname == 'Ant2D':\n FLAGS.env.id = 'Ant2DTask-v2'\n else:\n raise Exception(f'Unsupported taskname: {taskname}')\n if not os.path.isdir(setting):\n os.makedirs(setting)\n if not test:\n filename = f'res_{taskname}_adv{adv}.txt'\n infofilename = f'res_{taskname}_adv{adv}.npy'\n filename = setting+'/'+filename\n infofilename = setting+'/'+infofilename\n fout = open(filename, 'w')\n else:\n maxstep = 100\n logger.info(f'fixedvel={fixedvel}')\n if testadv:\n logger.info('Test with adversarial generated tasks!')\n logger.info(f'testadv=1, maxstep={maxstep}, using model revert!')\n else:\n logger.info('We still do not consider this senario: test with random tasks')\n print ('adv=', adv)\n FLAGS.seed = seed\n FLAGS.set_seed()\n FLAGS.freeze()\n print (\"FLAGS.log_dir:\", FLAGS.log_dir)\n if test:\n model_load = f'{FLAGS.log_dir}/{taskname}-stage-{snapshot}.npy'\n else:\n model_load = None\n print (\"model_load:\", model_load)\n\n task = make_task(FLAGS.env.id)\n env = make_env(FLAGS.env.id, task_config=task)\n dim_state = int(np.prod(env.observation_space.shape))\n dim_action = int(np.prod(env.action_space.shape))\n\n env.verify()\n\n normalizers = Normalizers(dim_action=dim_action, dim_state=dim_state)\n normalizers_copy = Normalizers(dim_action=dim_action, dim_state=dim_state)\n normalizers_parameters = normalizers.parameters(trainable=False, non_trainable=True)\n normalizers_copy_parameters = normalizers_copy.parameters(trainable=False, non_trainable=True)\n copy_normalizers = tf.group(*[tf.assign(w_v, p_v) for w_v, p_v in zip(normalizers_copy_parameters, normalizers_parameters)])\n revert_normalizers = tf.group(*[tf.assign(w_v, p_v) for w_v, p_v in zip(normalizers_parameters, normalizers_copy_parameters)])\n\n dtype = gen_dtype(env, 'state action next_state reward done timeout')\n train_set = Dataset(dtype, FLAGS.rollout.max_buf_size)\n dev_set = Dataset(dtype, FLAGS.rollout.max_buf_size)\n task_train_sets = [Dataset(dtype, FLAGS.rollout.max_buf_size) for i in range(100)]\n task_dev_sets = [Dataset(dtype, FLAGS.rollout.max_buf_size) for i in range(100)]\n\n print (\"state and action dim:\", dim_state, dim_action)\n policy = GaussianMLPPolicy(dim_state, dim_action, normalizer=normalizers.state, **FLAGS.policy.as_dict())\n warmup_policy = GaussianMLPPolicy(dim_state, dim_action, normalizer=normalizers.state, **FLAGS.policy.as_dict())\n print (policy.parameters())\n print (warmup_policy.parameters())\n sync_warmup_policy = tf.group(*[tf.assign(w_v, p_v) for w_v, p_v in zip(warmup_policy.parameters(), policy.parameters())])\n # batched noises\n noise = OUNoise(env.action_space, theta=FLAGS.OUNoise.theta, sigma=FLAGS.OUNoise.sigma, shape=(1, dim_action))\n vfn = MLPVFunction(dim_state, [64, 64], normalizers.state)\n warmup_vfn = MLPVFunction(dim_state, [64, 64], normalizers.state)\n sync_warmup_vfn = tf.group(*[tf.assign(w_v, p_v) for w_v, p_v in zip(warmup_vfn.parameters(), vfn.parameters())])\n\n model = DynamicsModel(dim_state, dim_action, normalizers, FLAGS.model.hidden_sizes)\n lazy_model = DynamicsModel(dim_state, dim_action, normalizers, FLAGS.model.hidden_sizes)\n warmup_model = DynamicsModel(dim_state, dim_action, normalizers, FLAGS.model.hidden_sizes)\n sync_warmup_model = tf.group(*[tf.assign(w_v, p_v) for w_v, p_v in zip(warmup_model.parameters(), model.parameters())])\n shadow_models = [DynamicsModel(dim_state, dim_action, normalizers, FLAGS.model.hidden_sizes) for n in range(FLAGS.warmup.n_shadow_models)]\n sync_model_from_lazymodel = tf.group(*[tf.assign(w_v, p_v) for w_v, p_v in zip(model.parameters(), lazy_model.parameters())])\n sync_model_to_lazymodel = tf.group(*[tf.assign(w_v, p_v) for w_v, p_v in zip(lazy_model.parameters(), model.parameters())])\n\n virt_env = VirtualEnv(model, make_env(FLAGS.env.id, task_config=task), FLAGS.plan.n_envs, opt_model=FLAGS.slbo.opt_model)\n virt_runner = Runner(virt_env, **{**FLAGS.runner.as_dict(), 'max_steps': FLAGS.plan.max_steps})\n virt_env_copy = VirtualEnv(model, make_env(FLAGS.env.id, task_config=task), nsample//FLAGS.plan.max_steps, opt_model=FLAGS.slbo.opt_model)\n virt_runner_copy = Runner(virt_env_copy, **{**FLAGS.runner.as_dict(), 'max_steps': FLAGS.plan.max_steps})\n extra_runners = {}\n for sam in [1000, 2000, 4000, 8000, 10000, 16000]:\n extra_runners[f'train{sam}']= Runner(VirtualEnv(model, make_env(FLAGS.env.id, task_config=task), sam//FLAGS.plan.max_steps, opt_model=FLAGS.slbo.opt_model), **{**FLAGS.runner.as_dict(), 'max_steps': FLAGS.plan.max_steps})\n extra_runners[f'collect{sam}'] = make_real_runner(sam//FLAGS.plan.max_steps, task_config=task)\n\n warmup_virt_env = VirtualEnv(warmup_model, make_env(FLAGS.env.id, task_config=task), FLAGS.plan.n_envs, opt_model=FLAGS.slbo.opt_model)\n warmup_virt_runner = Runner(warmup_virt_env, **{**FLAGS.runner.as_dict(), 'max_steps': FLAGS.plan.max_steps})\n\n logger.info('FLAGS.plan.n_envs=%d' % FLAGS.plan.n_envs)\n shadow_envs = [VirtualEnv(shadow_model, make_env(FLAGS.env.id, task_config=task), FLAGS.plan.n_envs, opt_model=FLAGS.slbo.opt_model) for shadow_model in shadow_models]\n shadow_runners = [Runner(shadow_env, **{**FLAGS.runner.as_dict(), 'max_steps': FLAGS.plan.max_steps}) for shadow_env in shadow_envs]\n\n criterion_map = {\n 'L1': nn.L1Loss(),\n 'L2': nn.L2Loss(),\n 'MSE': nn.MSELoss(),\n }\n criterion = criterion_map[FLAGS.model.loss]\n loss_mod = MultiStepLoss(model, normalizers, dim_state, dim_action, criterion, FLAGS.model.multi_step)\n loss_mod.build_backward(FLAGS.model.lr, FLAGS.model.weight_decay)\n shadow_loss_mods = [MultiStepLoss(shadow_model, normalizers, dim_state, dim_action, criterion, FLAGS.model.multi_step) for shadow_model in shadow_models]\n for shadow_loss_mod in shadow_loss_mods:\n shadow_loss_mod.build_backward(FLAGS.model.lr, FLAGS.model.weight_decay)\n algo = TRPO(vfn=vfn, policy=policy, dim_state=dim_state, dim_action=dim_action, **FLAGS.TRPO.as_dict())\n advtask = ADVTASK(dim_state, dim_action, policy, vfn, warmup_policy, warmup_vfn, task, alpha=alpha, beta=beta, nsample=nsample, atype=atype)\n tf.get_default_session().run(tf.global_variables_initializer())\n\n print (\"norm params:\", normalizers_parameters)\n print (\"norm_copy params:\", normalizers_copy_parameters)\n norm_before = tf.get_default_session().run(normalizers_parameters)\n print (\"norm_before:\", norm_before)\n\n assert FLAGS.algorithm != 'MF', \"don't support model free for now\"\n\n print (f\"n_envs for task: {nsample}//{FLAGS.plan.max_steps}={nsample//FLAGS.plan.max_steps}\")\n\n runners = {\n 'test': make_real_runner(FLAGS.plan.n_envs, task_config=task),\n 'collect': make_real_runner(FLAGS.plan.n_envs, task_config=task), #1\n 'collect_copy': make_real_runner(nsample//FLAGS.plan.max_steps, task_config=task), #1\n 'dev': make_real_runner(FLAGS.plan.n_envs, task_config=task),\n 'train': make_real_runner(FLAGS.plan.n_envs, task_config=task) if FLAGS.algorithm == 'MF' else virt_runner,\n 'train_copy': make_real_runner(nsample//FLAGS.plan.max_steps, task_config=task) if FLAGS.algorithm == 'MF' else virt_runner_copy,\n 'warmup_train': make_real_runner(FLAGS.plan.n_envs, task_config=task) if FLAGS.algorithm == 'MF' else warmup_virt_runner,\n }\n for name, runner in extra_runners.items():\n runners[name] = runner\n print (\"runner name is \", name)\n settings = [(runners['test'], policy, 'Real Env'), (runners['train'], policy, 'Virt Env')]\n for (i, runner) in enumerate(shadow_runners):\n settings.append((runner, policy, f'Shadow Env-{i}'))\n\n saver = nn.ModuleDict({'policy': policy, 'model': model, 'vfn': vfn, 'normalizers': normalizers}) #, 'loss_mod': loss_mod})\n print(saver)\n\n max_ent_coef = FLAGS.TRPO.ent_coef\n\n skip_metrics = []\n TASK_NUM = 0\n\n if test:\n verbose = True\n else:\n task.init()\n print (f\"task.params_={task.params_}, task.init_goal_vel={task.goal_velocity}\")\n\n if test:\n ITERS = testnum + 1\n warmup_n_iters = warmniter\n warmup_n_policy_iters = piter\n warmup_n_model_iters = miter\n slbo_n_iters = slboniter\n slbo_n_policy_iters = piter\n slbo_n_model_iters = miter\n else:\n ITERS = FLAGS.task.n_iters\n warmup_n_iters = warmniter\n warmup_n_policy_iters = piter\n warmup_n_model_iters = miter\n slbo_n_iters = slboniter\n slbo_n_policy_iters = piter\n slbo_n_model_iters = miter\n print (f\"Total Iters = {ITERS}\")\n alltaskres = []\n generated_adversarial_task = []\n init_generator = False\n logger.info(f'inittask:{inittask}')\n if not test:\n if inittask == 'none':\n pass\n elif not (os.path.exists(f'./{inittask}/{taskname}.trainset.task0.slbo0.pkl') and os.path.exists(f'./{inittask}/{taskname}.task0.saver.npy')):\n init_generator = True\n else:\n logger.info('Load the first task dataset!')\n for i in range(20):\n if not os.path.exists(f'./{inittask}/{taskname}.trainset.task0.slbo{i}.pkl'): continue\n traindata = pickle.load(open(f'./{inittask}/{taskname}.trainset.task0.slbo{i}.pkl', 'rb'))\n add_multi_step(traindata, train_set)\n add_multi_step(traindata, task_train_sets[0])\n logger.info(f'load trainset-{i} {len(traindata)}')\n\n for i in range(20):\n if not os.path.exists(f'./{inittask}/{taskname}.devset.task0.slbo{i}.pkl'): continue\n devdata = pickle.load(open(f'./{inittask}/{taskname}.devset.task0.slbo{i}.pkl', 'rb'))\n add_multi_step(devdata, task_dev_sets[0])\n logger.info(f'load devset-{i} {len(devdata)}')\n\n logger.info('Load the first task saver!')\n saver.load_state_dict(np.load(f'./{inittask}/{taskname}.task0.saver.npy', allow_pickle=True)[()])\n\n logger.info('Update all copies! (lazymodel, normalizers_copy)')\n tf.get_default_session().run(sync_model_to_lazymodel)\n tf.get_default_session().run(copy_normalizers)\n logger.info('Loaded normalizers:')\n load_norm = tf.get_default_session().run(normalizers_parameters)\n logger.info(load_norm)\n TASK_NUM = 1\n ########################## debug #########################\n #for task_idx in range(TASK_NUM):\n # total_loss = []\n # for scan in range(100):\n # samples = task_train_sets[task_idx].sample_multi_step(FLAGS.model.dev_batch_size, 1, FLAGS.model.multi_step)\n # loss_i = loss_mod.get_loss(samples.state, samples.next_state, samples.action, ~samples.done & ~samples.timeout)\n # total_loss.append(loss_i.mean())\n # total_loss = np.mean(total_loss)\n # print ('loaded model train loss:', total_loss)\n\n #for task_idx in range(TASK_NUM):\n # total_loss = []\n # for scan in range(100):\n # samples = task_dev_sets[task_idx].sample_multi_step(FLAGS.model.dev_batch_size, 1, FLAGS.model.multi_step)\n # loss_i = loss_mod.get_loss(samples.state, samples.next_state, samples.action, ~samples.done & ~samples.timeout)\n # total_loss.append(loss_i.mean())\n # total_loss = np.mean(total_loss)\n # print ('loaded model val loss:', total_loss)\n ##exit(0)\n ########################## debug #########################\n else:\n test_summary = {\n 'task':[],\n 'random':[],\n 'warmup':[],\n 'warmupprocess':[],\n 'slbo':[],\n }\n logger.info('Testing mode!')\n train_tasknum = snapshot + 1\n test_tasknum = testnum\n logger.info(f'train_tasknum = {train_tasknum}, test_tasknum = {test_tasknum}')\n assert(testgiven is not None)\n if 'noent' in testparam: warmupent = 0.\n have_data = False\n\n task_generator = 'fixed' # random or fixed\n if testgiven[-4:] == '.pkl':\n f = testgiven\n logger.info(f'Load all tasks from {f}!')\n task.fixed_velocities = pickle.load(open(f, 'rb'))\n logger.info(f\"Test on task\")\n logger.info(task.fixed_velocities)\n logger.info(f\"Task number: {np.array(task.fixed_velocities).shape}\")\n else:\n f = f'{testgiven}/all_task_parameter.pkl'\n gen_adv_task = pickle.load(open(f, 'rb'))\n logger.info(f'Load all adversarial task from {f}!')\n task.fixed_velocities = gen_adv_task[train_tasknum: train_tasknum + test_tasknum]\n logger.info(f\"Test random method on task {train_tasknum}~{train_tasknum+test_tasknum}:\")\n logger.info(task.fixed_velocities)\n logger.info(f\"Task number: {np.array(task.fixed_velocities).shape}\")\n\n def load_data_during_test():\n if inittask != 'none':\n logger.info('Load the first task dataset!')\n for i in range(20):\n if not os.path.exists(f'./{inittask}/{taskname}.trainset.task0.slbo{i}.pkl'): continue\n traindata = pickle.load(open(f'./{inittask}/{taskname}.trainset.task0.slbo{i}.pkl', 'rb'))\n add_multi_step(traindata, train_set)\n add_multi_step(traindata, task_train_sets[0])\n logger.info(f'load task0 trainset{i} size={len(traindata)}')\n have_data = True\n\n for i in range(20):\n if not os.path.exists(f'./{inittask}/{taskname}.devset.task0.slbo{i}.pkl'): continue\n devdata = pickle.load(open(f'./{inittask}/{taskname}.devset.task0.slbo{i}.pkl', 'rb'))\n add_multi_step(devdata, task_dev_sets[0])\n logger.info(f'load task0 devset{i} size={len(devdata)}')\n have_data = True\n\n logger.info(f'Load all task dataset from {setting}!')\n for t in range(0,train_tasknum):\n for i in range(20):\n if not os.path.exists(f'./{setting}/{taskname}.trainset.task{t}.slbo{i}.pkl'): continue\n traindata = pickle.load(open(f'./{setting}/{taskname}.trainset.task{t}.slbo{i}.pkl', 'rb'))\n add_multi_step(traindata, train_set)\n add_multi_step(traindata, task_train_sets[t])\n logger.info(f'load task{t} trainset{i} size={len(traindata)}')\n if not os.path.exists(f'./{setting}/{taskname}.devset.task{t}.slbo{i}.pkl'): continue\n devdata = pickle.load(open(f'./{setting}/{taskname}.devset.task{t}.slbo{i}.pkl', 'rb'))\n add_multi_step(devdata, task_dev_sets[t])\n logger.info(f'load task{t} devset{i} size={len(devdata)}')\n have_data = True\n load_data_during_test()\n\n logger.info(f'Load the task{snapshot} saver!')\n saver.load_state_dict(np.load(f'./{setting}/{taskname}.task{snapshot}.saver.npy', allow_pickle=True)[()])\n\n logger.info('Update all copies! (lazymodel, normalizers_copy)')\n tf.get_default_session().run(sync_model_to_lazymodel)\n tf.get_default_session().run(copy_normalizers)\n logger.info('Loaded normalizers:')\n load_norm = tf.get_default_session().run(normalizers_parameters)\n logger.info(load_norm)\n\n TASK_NUM = train_tasknum\n TEST_TASK_NUM = 0\n ########################## debug #########################\n #if have_data:\n # for task_idx in range(TASK_NUM):\n # total_loss = []\n # for scan in range(100):\n # samples = task_train_sets[task_idx].sample_multi_step(FLAGS.model.dev_batch_size, 1, FLAGS.model.multi_step)\n # loss_i = loss_mod.get_loss(samples.state, samples.next_state, samples.action, ~samples.done & ~samples.timeout)\n # total_loss.append(loss_i.mean())\n # total_loss = np.mean(total_loss)\n # print ('loaded model train loss:', total_loss)\n\n # for task_idx in range(TASK_NUM):\n # total_loss = []\n # for scan in range(100):\n # samples = task_dev_sets[task_idx].sample_multi_step(FLAGS.model.dev_batch_size, 1, FLAGS.model.multi_step)\n # loss_i = loss_mod.get_loss(samples.state, samples.next_state, samples.action, ~samples.done & ~samples.timeout)\n # total_loss.append(loss_i.mean())\n # total_loss = np.mean(total_loss)\n # print ('loaded model val loss:', total_loss)\n ##exit(0)\n ######################### debug #########################\n slbo_n_stages = nslbo\n print (f\"each task will do nslbo = {nslbo}\")\n for param in model.parameters():\n param.invalidate()\n\n all_task_parameter = []\n while (not test and TASK_NUM < ITERS) or (test and TEST_TASK_NUM < ITERS):\n # first task or maxstep, update the model. Otherwise, revert the model\n logger.info('Sync model from lazymodel')\n tf.get_default_session().run(sync_model_from_lazymodel)\n taskres = {}\n if 'goal_velocity' not in taskres.keys():\n taskres['goal_velocity'] = []\n if not test and inittask == 'none':\n slbo_n_stages = nslbo\n elif not test and TASK_NUM == 0:\n slbo_n_stages = initnslbo\n elif not test and TASK_NUM > 0:\n slbo_n_stages = nslbo\n\n time_start = time.time()\n trpo_warmup = []\n trpo_slbo = []\n surprisal = []\n train_losses_warmup = deque(maxlen=warmup_n_model_iters // FLAGS.model.validation_freq)\n train_losses_slbo = deque(maxlen=slbo_n_model_iters // FLAGS.model.validation_freq)\n val_losses_warmup = deque(maxlen=warmup_n_model_iters // FLAGS.model.validation_freq)\n val_losses_slbo = deque(maxlen=slbo_n_model_iters // FLAGS.model.validation_freq)\n # NOTE: For each test task, we should reset model to the loaded one, and randomly initialize policy and vfn\n #if test:\n # saver.load_state_dict(np.load(model_load, allow_pickle=True)[()])\n # logger.warning('Load model from %s', model_load)\n if test:\n logger.info(\"################################################## TESTING TASK %d ################################################\", TEST_TASK_NUM)\n logger.info(f'TEST_TASK_NUM={TEST_TASK_NUM}, TASK_NUM={TASK_NUM}')\n logger.warning('Revert model and normalizers')\n tf.get_default_session().run(sync_model_from_lazymodel)\n tf.get_default_session().run(revert_normalizers)\n else:\n logger.info(\"################################################## TRAINING TASK %d ################################################\", TASK_NUM)\n if test:\n test_returns = []\n test_summary['warmupprocess'].append([])\n test_summary['slbo'].append([])\n if not test: #and FLAGS.task.method == 'random':\n if inittask != 'none' and TASK_NUM == 1:\n if 'HClinearstate' in taskname:\n task.init([0.2] * task.n_params)\n else:\n task.init([0.] * task.n_params)\n else:\n if TASK_NUM > 0: #fix the 1st tasks during training\n if adv == 0:\n task.random_sample('uniform')\n elif adv == 2:\n task.random_sample('normal')\n elif adv == 1:\n if TASK_NUM == 1 and inittask != 'none':\n task.random_sample()\n print (f\"task.params_={task.params_}, task.init_goal_vel={task.goal_velocity}\")\n task.sample(adv=True)\n logger.info('Task Sampled: %s', task.goal_velocity)\n taskres['goal_velocity'].append(task.goal_velocity)\n all_task_parameter.append(task.goal_velocity)\n print (f\"task.params_={task.params_}, task.init_goal_vel={task.goal_velocity}\")\n if test:\n if task_generator == 'fixed':\n task.goal_velocity = task.fixed_velocities[TEST_TASK_NUM] #TODO\n logger.info('Task Fixed: %s', task.goal_velocity)\n if task_generator == 'random':\n task.sample(adv=False) #sample randomly\n logger.info('Task Sampled: %s', task.goal_velocity)\n if task_generator == 'adv':\n task.sample(adv=True) #sample adversarially\n logger.info('Task Sampled: %s', task.goal_velocity)\n generated_adversarial_task.append(task.goal_velocity)\n logger.info('Tasks dump!')\n assert (task_generator == 'fixed')\n test_summary['task'].append(task.goal_velocity)\n\n if FLAGS.task.reset_policy:\n # NOTE: reset policy and valuefunc\n logger.info(\"Resetting Policy\")\n pol_params = tf.get_default_session().run([nn.utils.parameters_to_vector(policy.parameters())])\n tf.get_default_session().run(tf.variables_initializer(policy.parameters()))\n pol_params_after = tf.get_default_session().run([nn.utils.parameters_to_vector(policy.parameters())])\n print (\"pol_params:\", np.linalg.norm(pol_params), \"pol_params_after_reset:\", np.linalg.norm(pol_params_after))\n logger.info(\"Resetting Valuefunc\")\n tf.get_default_session().run(tf.variables_initializer(vfn.parameters()))\n\n tf.get_default_session().run(tf.variables_initializer(warmup_policy.parameters()))\n tf.get_default_session().run(tf.variables_initializer(warmup_vfn.parameters()))\n for p in warmup_policy.parameters(): p.invalidate()\n for p in warmup_vfn.parameters(): p.invalidate()\n for p in policy.parameters(): p.invalidate()\n for p in vfn.parameters(): p.invalidate()\n\n last_end = None\n drops = []\n\n evaluate(settings, 'pre-warm-up')\n returns_pre_warmup = testeval(policy, runners['collect'])\n if test:\n test_returns.append(returns_pre_warmup)\n test_summary['random'].append(returns_pre_warmup)\n t1 = time.time()\n trpo_time = 0\n\n logger.info('----------------------------- Warmup for %d iterations ------------------------' % warmup_n_iters)\n if decay == 'joint':\n logger.info('Joint train from a joint dataset')\n elif decay == 'taskid':\n Z = np.sum([float(i+1) for i in range(0, TASK_NUM)])\n prop = [float(taskid+1) / Z for taskid in range(TASK_NUM)]\n logger.info(f'Sampling prop={prop}, Z={Z}')\n elif decay == 'none':\n Z = TASK_NUM\n prop = [1. / TASK_NUM for _ in range(TASK_NUM)]\n logger.info(f'Sampling prop={prop}, Z={Z}')\n for i in range(warmup_n_iters):\n #exit(0)\n if TASK_NUM == 0 and not test and not model_load:\n logger.info('Break because TASK_NUM=0')\n break\n\n losses = deque(maxlen=warmup_n_model_iters)\n grad_norm_meter = AverageMeter()\n n_model_iters = warmup_n_model_iters\n drop_plot = 0\n if test and verbose:\n logger.info(f'warmup iter #{i}/{warmup_n_iters}, Do Not train Model during warmup of test time')\n if 'warmup_task_val_loss' not in taskres.keys():\n taskres['warmup_task_val_loss'] = [[] for _ in range(TASK_NUM)]\n\n if verbose: logger.info('Train Model for %d iterations' % n_model_iters)\n model_time = time.time()\n if not test or (test and have_data):\n for _ in range(n_model_iters):\n if decay == 'joint':\n samples = train_set.sample_multi_step(FLAGS.model.train_batch_size, 1, FLAGS.model.multi_step)\n else:\n all_samples = []\n for taskid in range(TASK_NUM):\n samples_i = task_train_sets[taskid].sample_multi_step(int(FLAGS.model.train_batch_size*prop[taskid])+1, 1, FLAGS.model.multi_step)\n all_samples.append(samples_i)\n samples = np.concatenate(all_samples, axis=1).view(np.recarray)\n _, train_loss, grad_norm = loss_mod.get_loss(\n samples.state, samples.next_state, samples.action, ~samples.done & ~samples.timeout,\n fetch='train loss grad_norm')\n losses.append(train_loss.mean())\n grad_norm_meter.update(grad_norm)\n # ideally, we should define an Optimizer class, which takes parameters as inputs.\n # The `update` method of `Optimizer` will invalidate all parameters during updates.\n for param in model.parameters():\n param.invalidate()\n model_time = time.time() - model_time\n\n if i % FLAGS.model.validation_freq == 0:\n task_val_loss = []\n val_time = time.time()\n for task_idx in range(TASK_NUM):\n total_loss = []\n for scan in range(FLAGS.rollout.n_dev_samples // FLAGS.model.dev_batch_size + 1):\n samples = task_dev_sets[task_idx].sample_multi_step(FLAGS.model.dev_batch_size, 1, FLAGS.model.multi_step)\n loss_i = loss_mod.get_loss(samples.state, samples.next_state, samples.action, ~samples.done & ~samples.timeout)\n total_loss.append(loss_i.mean())\n total_loss = np.mean(total_loss)\n task_val_loss.append(total_loss)\n taskres['warmup_task_val_loss'][task_idx].append(total_loss)\n val_time = time.time() - val_time\n val_loss = np.mean(task_val_loss)\n val_losses_warmup.append(val_loss)\n train_losses_warmup.append(np.mean(losses))\n if np.isnan(val_loss) or np.isnan(np.mean(losses)):\n logger.info('nan! %s %s', np.isnan(val_loss), np.isnan(np.mean(losses)))\n logger.info('# Warmup Iter %3d: Loss = [train = %.3f, dev = %.3f], after %d steps, grad_norm = %.6f, drop = %.2f, model_time=%d, trpo_time=%d, val_time=%d',\n i, np.mean(losses), val_loss, n_model_iters, grad_norm_meter.get(), drop_plot, model_time, trpo_time, val_time)\n logger.info(f'# task_val_loss: {task_val_loss}')\n\n if verbose: logger.info('Train policy for %d iterations' % warmup_n_policy_iters)\n trpo_time = time.time()\n for n_updates in range(warmup_n_policy_iters):\n if FLAGS.algorithm != 'MF' and FLAGS.warmup.start == 'buffer':\n runners['train'].set_state(train_set.sample(FLAGS.plan.n_envs).state)\n else:\n runners['train'].reset()\n\n data, ep_infos = runners['train'].run(policy, FLAGS.plan.n_trpo_samples)\n advantages, advantages_params, values, td, coef_mat, coef_mat_returns, reward_ctrl, x_velocity, begin_mark = runners['train'].compute_advantage(vfn, data,task)\n dist_mean, dist_std, vf_loss, plotinfo = algo.train(warmupent, data, advantages, values)\n trpo_warmup.append(plotinfo)\n returns = [info['return'] for info in ep_infos]\n if n_updates == 0:\n if last_end is not None:\n drop_plot = last_end - np.mean(returns)\n drops.append(last_end - np.mean(returns))\n last_end = np.mean(returns)\n if n_updates == warmup_n_policy_iters-1:\n logger.info('[TRPO] # %d: n_episodes = %d, returns: {mean = %.0f, std = %.0f}, '\n 'dist std = %.10f, dist mean = %.10f, vf_loss = %.3f',\n n_updates, len(returns), np.mean(returns), np.std(returns) / np.sqrt(len(returns)),\n dist_std, dist_mean, vf_loss)\n trpo_time = time.time() - trpo_time\n\n if i % FLAGS.warmup.n_evaluate_iters == 0 or i == warmup_n_iters-1:# and i != 0:\n real_eval, virt_eval = evaluate(settings, 'iteration')\n if 'warmup_real_eval' not in taskres.keys(): taskres['warmup_real_eval'] = []\n if 'warmup_virt_eval' not in taskres.keys(): taskres['warmup_virt_eval'] = []\n taskres['warmup_real_eval'].append(real_eval)\n taskres['warmup_virt_eval'].append(virt_eval)\n if test:\n test_summary['warmupprocess'][TEST_TASK_NUM].append(real_eval)\n\n if not test:\n res = render(Monitor(make_env(FLAGS.env.id, task_config=task), f\"./{setting}/{taskname}-task{TASK_NUM}-warmup/\", force=True, video_callable=lambda episode_id: True), policy)\n else:\n res = render(Monitor(make_env(FLAGS.env.id, task_config=task), f\"./{setting}/{taskname}-testtask{TEST_TASK_NUM}-warm{warmup_n_iters}-warmup/\", force=True, video_callable=lambda episode_id: True), policy)\n taskres['warmup_monitor'] = [res]\n\n t2 = time.time()\n warmup_time = t2 - t1\n evaluate(settings, 'post-warm-up')\n returns_post_warmup = testeval(policy, runners['collect'])\n if test:\n test_returns.append(returns_post_warmup)\n test_summary['warmup'].append(returns_post_warmup)\n print (\"warmupprocess:\", test_summary['warmupprocess'][TEST_TASK_NUM])\n\n logger.info('Sync warmup policy and vfn and model')\n tf.get_default_session().run([sync_warmup_policy, sync_warmup_vfn, sync_warmup_model])\n for p in warmup_policy.parameters(): p.invalidate()\n for p in warmup_vfn.parameters(): p.invalidate()\n for p in warmup_model.parameters(): p.invalidate()\n for p in policy.parameters(): p.invalidate()\n task.parameters().invalidate()\n\n pol_params, warm_params = tf.get_default_session().run([nn.utils.parameters_to_vector(policy.parameters()), nn.utils.parameters_to_vector(warmup_policy.parameters())])\n print (\"After WARMUP, pol_params_norm:\", np.linalg.norm(pol_params), \"warm_params_norm:\", np.linalg.norm(warm_params))\n mod, warm_mod = tf.get_default_session().run([nn.utils.parameters_to_vector(model.parameters()), nn.utils.parameters_to_vector(warmup_model.parameters())])\n print (\"mod_norm:\", np.linalg.norm(mod), \"warm_mod_norm:\", np.linalg.norm(warm_mod))\n\n eval_rollout(runners['train'], warmup_policy, 'Use warmup policy to collect data from virtual env')\n warmup_collect_virt = []\n\n eval_rollout(runners['train'], policy, 'Use policy to collect data from virtual env')\n warmup_collect_real = []\n \n logger.info('--------------------------------------------- SLBO for %d outer stages -----------------------------------------' % slbo_n_stages)\n for T in range(slbo_n_stages):\n logger.info('-------- Starting Stage %d ---------', T)\n evaluate(settings, 'episode')\n\n # collect data\n if not test:\n logger.info('-------- Collect data from REAL env for %d samples --------' % FLAGS.rollout.n_train_samples)\n recent_train_set, ep_infos = runners['collect'].run(noise.make(policy), FLAGS.rollout.n_train_samples)\n recent_dev_set, _ = runners['dev'].run(noise.make(policy), FLAGS.rollout.n_dev_samples)\n else:\n logger.info('-------- Collect data from REAL env for %d samples --------' % 2000)\n recent_train_set, ep_infos = runners['collect2000'].run(noise.make(policy), 2000)\n recent_dev_set, _ = runners['dev'].run(noise.make(policy), FLAGS.rollout.n_dev_samples)\n\n logger.info('save setting dataset! trainset and devset!')\n if not test:\n pickle.dump(recent_train_set, open(f'./{setting}/{taskname}.trainset.task{TASK_NUM}.slbo{T}.pkl', 'wb'))\n pickle.dump(recent_dev_set, open(f'./{setting}/{taskname}.devset.task{TASK_NUM}.slbo{T}.pkl', 'wb'))\n\n # Add real data to task_train_sets and task_dev_sets\n #if not test:\n # add_multi_step(recent_train_set, train_set)\n add_multi_step(recent_train_set, task_train_sets[TASK_NUM])\n add_multi_step(recent_dev_set, task_dev_sets[TASK_NUM])\n\n #if not test:\n # states = recent_train_set.state\n # mean = np.mean(states, axis=0)\n # std = np.std(states, axis=0)\n # min_ = np.min(states, axis=0)\n # max_ = np.max(states, axis=0)\n # states_stat = {\"mean\": mean, \"std\": std, \"min\": min_, \"max\": max_}\n\n # evaluate the surprisal of collected real data for model\n new_set = Dataset(dtype, FLAGS.rollout.max_buf_size)\n add_multi_step(recent_train_set, new_set)\n losses_new = []\n for i in range(FLAGS.rollout.n_train_samples // FLAGS.model.dev_batch_size + 1):\n samples = new_set.sample_multi_step(FLAGS.model.dev_batch_size, 1, FLAGS.model.multi_step)\n loss = loss_mod.get_loss(samples.state, samples.next_state, samples.action, ~samples.done & ~samples.timeout)\n loss = loss.mean()\n losses_new.append(loss)\n losses_new_mean = np.mean(losses_new)\n surprisal.append(losses_new_mean)\n logger.info(f'(surprisal) model loss on new collected data is {losses_new_mean}')\n\n add_multi_step(recent_train_set, train_set)\n add_multi_step(\n runners['dev'].run(noise.make(policy), FLAGS.rollout.n_dev_samples)[0],\n dev_set,\n )\n\n returns = np.array([ep_info['return'] for ep_info in ep_infos])\n if len(returns) > 0:\n logger.info(\"episode: %s\", np.mean(returns))\n\n if T == 0: # check\n samples = train_set.sample_multi_step(100, 1, FLAGS.model.multi_step)\n for i in range(FLAGS.model.multi_step - 1):\n masks = 1 - (samples.done[i] | samples.timeout[i])[..., np.newaxis]\n assert np.allclose(samples.state[i + 1] * masks, samples.next_state[i] * masks)\n\n normalizers.state.update(recent_train_set.state)\n normalizers.action.update(recent_train_set.action)\n normalizers.diff.update(recent_train_set.next_state - recent_train_set.state)\n\n if TASK_NUM == 0: #In the 1st task, no warmup, but we validate loss of the random model\n samples = dev_set.sample_multi_step(FLAGS.model.train_batch_size, 1, FLAGS.model.multi_step)\n loss = loss_mod.get_loss(samples.state, samples.next_state, samples.action, ~samples.done & ~samples.timeout)\n loss = loss.mean()\n val_losses_warmup.append(loss)\n\n logger.info('SLBO for %d inner stages' % slbo_n_iters)\n model_time, trpo_time = 0, 0\n if 'slbo_task_val_loss' not in taskres.keys():\n taskres['slbo_task_val_loss'] = [[] for _ in range(TASK_NUM+1)]\n if decay == 'joint':\n logger.info('Joint train from a joint dataset')\n elif decay == 'taskid':\n Z = np.sum([float(i+1) for i in range(0, TASK_NUM+1)])\n prop = [float(taskid+1) / Z for taskid in range(TASK_NUM+1)]\n logger.info(f'Sampling prop={prop}, Z={Z}')\n elif decay == 'none':\n Z = TASK_NUM+1\n prop = [1. / float(Z) for _ in range(Z)]\n logger.info(f'Sampling prop={prop}, Z={Z}')\n for i in range(slbo_n_iters):\n if i % FLAGS.slbo.n_evaluate_iters == 0 or i == slbo_n_iters-1:# and i != 0:\n # cur_actions = policy.eval('actions_mean actions_std', states=recent_states)\n # kl_old_new = gaussian_kl(*ref_actions, *cur_actions).sum(axis=1).mean()\n # logger.info('KL(old || cur) = %.6f', kl_old_new)\n real_eval, virt_eval = evaluate(settings, 'iteration')\n if 'slbo_real_eval' not in taskres.keys(): taskres['slbo_real_eval'] = []\n if 'slbo_virt_eval' not in taskres.keys(): taskres['slbo_virt_eval'] = []\n taskres['slbo_real_eval'].append(real_eval)\n taskres['slbo_virt_eval'].append(virt_eval)\n\n losses = deque(maxlen=slbo_n_model_iters)\n grad_norm_meter = AverageMeter()\n n_model_iters = slbo_n_model_iters\n if verbose: logger.info('Train model %d iterations'% n_model_iters)\n model_time = time.time()\n for _ in range(n_model_iters):\n if decay == 'joint':\n samples = train_set.sample_multi_step(FLAGS.model.train_batch_size, 1, FLAGS.model.multi_step)\n else:\n all_samples = []\n sample_size = 0\n for taskid in range(TASK_NUM+1):\n samples_i = task_train_sets[taskid].sample_multi_step(int(FLAGS.model.train_batch_size*prop[taskid])+1, 1, FLAGS.model.multi_step)\n all_samples.append(samples_i)\n sample_size += int(FLAGS.model.train_batch_size*prop[taskid])+1\n samples = np.concatenate(all_samples, axis=1).view(np.recarray)\n\n _, train_loss, grad_norm = loss_mod.get_loss(\n samples.state, samples.next_state, samples.action, ~samples.done & ~samples.timeout,\n fetch='train loss grad_norm')\n losses.append(train_loss.mean())\n grad_norm_meter.update(grad_norm)\n # ideally, we should define an Optimizer class, which takes parameters as inputs.\n # The `update` method of `Optimizer` will invalidate all parameters during updates.\n for param in model.parameters():\n param.invalidate()\n model_time = time.time() - model_time\n\n if i % FLAGS.model.validation_freq == 0:\n task_val_loss = []\n val_time = time.time()\n for task_idx in range(TASK_NUM+1):\n total_loss = []\n for scan in range(FLAGS.rollout.n_dev_samples // FLAGS.model.dev_batch_size + 1):\n samples = task_dev_sets[task_idx].sample_multi_step(FLAGS.model.dev_batch_size, 1, FLAGS.model.multi_step)\n loss_i = loss_mod.get_loss(samples.state, samples.next_state, samples.action, ~samples.done & ~samples.timeout)\n total_loss.append(loss_i.mean())\n total_loss = np.mean(total_loss)\n task_val_loss.append(total_loss)\n taskres['slbo_task_val_loss'][task_idx].append(total_loss)\n val_loss = np.mean(task_val_loss)\n val_time = time.time() - val_time\n if np.isnan(val_loss) or np.isnan(np.mean(losses)):\n logger.info('nan! %s %s', np.isnan(val_loss), np.isnan(np.mean(losses)))\n logger.info('# SLBO Inner Iter %3d: Loss = [train = %.3f, dev = %.3f], after %d steps, grad_norm = %.6f, model_time=%d, trpo_time=%d, val_time=%d',\n i, np.mean(losses), val_loss, n_model_iters, grad_norm_meter.get(), model_time, trpo_time, val_time)\n logger.info(f'# task_val_loss: {task_val_loss}')\n model_time, trpo_time = 0, 0\n val_losses_slbo.append(val_loss)\n train_losses_slbo.append(np.mean(losses))\n\n if verbose: logger.info('Train policy %d iterations'% slbo_n_policy_iters)\n trpo_time = time.time()\n for n_updates in range(slbo_n_policy_iters):\n if FLAGS.algorithm != 'MF' and FLAGS.slbo.start == 'buffer':\n runners['train'].set_state(train_set.sample(FLAGS.plan.n_envs).state)\n else:\n runners['train'].reset()\n\n data, ep_infos = runners['train'].run(policy, FLAGS.plan.n_trpo_samples)\n advantages, advantages_params, values, td, coef_mat, coef_mat_returns, reward_ctrl, x_velocity, begin_mark = runners['train'].compute_advantage(vfn, data, task)\n dist_mean, dist_std, vf_loss, plotinfo = algo.train(max_ent_coef, data, advantages, values)\n trpo_slbo.append(plotinfo)\n returns = [info['return'] for info in ep_infos]\n if n_updates == slbo_n_policy_iters-1:\n logger.info('[TRPO] # %d: n_episodes = %d, returns: {mean = %.0f, std = %.0f}, '\n 'dist std = %.10f, dist mean = %.10f, vf_loss = %.3f',\n n_updates, len(returns), np.mean(returns), np.std(returns) / np.sqrt(len(returns)),\n dist_std, dist_mean, vf_loss)\n trpo_time = time.time() - trpo_time\n \n if not test and (TASK_NUM) % FLAGS.ckpt.n_save_stages == 0:\n np.save(f'{FLAGS.log_dir}/{taskname}-stage-{TASK_NUM}', saver.state_dict())\n np.save(f'{FLAGS.log_dir}/{taskname}-final', saver.state_dict())\n res = render(Monitor(make_env(FLAGS.env.id, task_config=task), f\"./{setting}/{taskname}-task{TASK_NUM}-slbo{T}/\", force=True, video_callable=lambda episode_id: True), policy)\n if 'slbo_monitor' not in taskres.keys():\n taskres['slbo_monitor'] = []\n taskres['slbo_monitor'].append(res)\n if not test and FLAGS.ckpt.n_save_stages == 1:\n pickle.dump(recent_train_set, open(f'{FLAGS.log_dir}/stage-{TASK_NUM}.inc-buf.pkl', 'wb'))\n if test:\n returns_post_slbo_update = testeval(policy, runners['collect'])\n test_returns.append(returns_post_slbo_update)\n real_eval, virt_eval = evaluate(settings, 'iteration')\n test_summary['slbo'][TEST_TASK_NUM].append(real_eval)\n test_summary[f'slbo{T+1}'].append(returns_post_slbo_update)\n res = render(Monitor(make_env(FLAGS.env.id, task_config=task), f\"./{setting}/{taskname}-testtask{TEST_TASK_NUM}-slbo{T}/\", force=True, video_callable=lambda episode_id: True), policy)\n print ('test_summary_slbo:', test_summary['slbo'][TEST_TASK_NUM])\n\n if not test:\n np.save(f'{setting}/{taskname}.task{TASK_NUM}.saver', saver.state_dict())\n np.save(f'{setting}/{taskname}.final.saver', saver.state_dict())\n\n if init_generator and TASK_NUM==0:\n print ('finished init generator!')\n exit(0)\n\n pol_params, warm_params = tf.get_default_session().run([nn.utils.parameters_to_vector(policy.parameters()), nn.utils.parameters_to_vector(warmup_policy.parameters())])\n print (\"After SLBO, pol_params_norm:\", np.linalg.norm(pol_params), \"warm_params_norm:\", np.linalg.norm(warm_params))\n\n eval_rollout(runners['train'], policy, 'Use optimal policy to collect data from real env')\n optimal_collect_real = []\n\n t3 = time.time()\n slbo_time = t3 - t2\n evaluate(settings, 'post-slbo')\n logger.info(f'Warmup time = {warmup_time}, SLBO time = {slbo_time}')\n\n alltaskres.append(taskres)\n if not test:\n pickle.dump(alltaskres, open(f'{setting}/{taskname}-alltaskres.info.pkl', 'wb'))\n pickle.dump(all_task_parameter, open(f'{setting}/all_task_parameter.pkl', 'wb'))\n else:\n pickle.dump(alltaskres, open(f'{setting}/{taskname}-alltaskres.info.pkl.{testparam}', 'wb'))\n pickle.dump(all_task_parameter, open(f'{setting}/all_task_parameter.pkl.{testparam}', 'wb'))\n\n eval_rollout(runners['train'], warmup_policy, 'Use warmup policy to collect data from virtual env')\n if not test:\n #if TASK_NUM > 0:\n if TASK_NUM > -1:\n task_params_before, final_grad, advtask_info = advtask.train(runners['train_copy'], runners['collect_copy'], warmup_collect_virt, warmup_collect_real, optimal_collect_real, returns_pre_warmup, val_losses_warmup, val_losses_slbo, train_losses_warmup, train_losses_slbo, surprisal, trpo_warmup, trpo_slbo, fout, infofilename, extra_runners)\n\n # first task or maxstep, update the model\n if not test and (TASK_NUM == 0 or TASK_NUM % maxstep == 0):\n logger.info(f\"task_num={TASK_NUM}, sync_model_to_lazymodel\")\n tf.get_default_session().run(sync_model_to_lazymodel)\n\n if test:\n pickle.dump(test_summary, open(f'{setting}/test_summary.pkl.{testparam}', 'wb'))\n TEST_TASK_NUM += 1\n TASK_NUM = train_tasknum\n #task_train_sets[TASK_NUM].clear()\n #task_dev_sets[TASK_NUM].clear()\n for tt in range(TASK_NUM+1):\n task_train_sets[tt].clear()\n task_dev_sets[tt].clear()\n train_set.clear()\n load_data_during_test()\n continue\n\n task_params_after = task_params_before + final_grad * alpha\n task.set_parameters(task_params_after)\n\n if not test:\n advtask_info['alpha'].append(alpha)\n with open(infofilename, 'wb') as handle:\n pickle.dump(advtask_info, handle, protocol=pickle.HIGHEST_PROTOCOL)\n print ('>>>>>>dump')\n\n TASK_NUM += 1\n time_end = time.time()\n print (f\"Task Done! Total Time Consumed for 1 task = {time_end - time_start}s\")\n\n\n\nif __name__ == '__main__':\n with tf.Session(config=get_tf_config()):\n main()\n", "import math\nimport numpy as np\nfrom rllab.envs.mujoco import half_cheetah_2d_env\nfrom slbo.envs import BaseModelBasedEnv\nfrom slbo.utils.flags import FLAGS\n\nHalfCheetah2DConfig = half_cheetah_2d_env.HalfCheetah2DConfig\n\nclass HalfCheetah2DEnv(half_cheetah_2d_env.HalfCheetah2DEnv, BaseModelBasedEnv):\n def get_current_obs(self):\n return self._get_obs()\n\n def mb_step(self, states, actions, next_states):\n # reward_ctrl\n reward_ctrl = -0.1 * np.sum(np.square(actions), axis=-1)\n\n # reward_run\n height, vel = next_states[...,0], next_states[...,self.qpos_len]\n reward_run = -np.abs(vel - self._task_config.goal_velocity[0]) * self._task_config.coef[0]\n reward_run += -np.abs(height - self._task_config.goal_velocity[1]) * self._task_config.coef[1]\n\n # reward, done, reward_state\n reward = reward_ctrl + reward_run\n dones = np.zeros_like(reward_run, dtype=np.bool)\n reward_state = np.concatenate([vel, height], axis=-1)\n return reward, dones, reward_ctrl, reward_state.reshape((-1, 1))\n", "import tensorflow as tf\nimport numpy as np\nimport lunzi.nn as nn\n\n\nclass MultiLayerPerceptron(nn.Module):\n def __init__(self, blocks, activation=nn.ReLU, squeeze=False, weight_initializer=None, build=True):\n super().__init__()\n\n self._blocks = blocks\n if build:\n self.op_inputs = tf.placeholder(tf.float32, [None, self._blocks[0]])\n\n with self.scope:\n kwargs = {}\n if weight_initializer is not None:\n kwargs['weight_initializer'] = weight_initializer\n layers = []\n for in_features, out_features in zip(blocks[:-1], blocks[1:]):\n if layers:\n layers.append(activation())\n layers.append(nn.Linear(in_features, out_features, **kwargs))\n if squeeze:\n layers.append(nn.Squeeze(axis=1))\n self.net = nn.Sequential(*layers)\n\n self._squeeze = squeeze\n self._activation = activation\n\n if build:\n self.build()\n\n def build(self):\n self.op_outputs = self.forward(self.op_inputs)\n\n def forward(self, *inputs):\n if len(inputs) > 1:\n inputs = tf.concat(inputs, axis=-1)\n else:\n inputs = inputs[0]\n return self.net(inputs)\n\n def fast(self, *inputs):\n return self.net.fast(np.concatenate(inputs, axis=-1))\n\n def clone(self):\n return MultiLayerPerceptron(self._blocks, self._activation, self._squeeze)\n\n def extra_repr(self):\n return f'activation = {self._activation}, blocks = {self._blocks}, squeeze = {self._squeeze}'\n" ]
[ [ "tensorflow.get_default_session", "numpy.allclose", "numpy.isnan", "tensorflow.assign", "numpy.linalg.norm", "numpy.concatenate", "tensorflow.global_variables_initializer", "numpy.std", "numpy.mean", "numpy.prod", "numpy.load", "numpy.array" ], [ "numpy.concatenate", "numpy.square", "numpy.zeros_like", "numpy.abs" ], [ "numpy.concatenate", "tensorflow.concat", "tensorflow.placeholder" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] } ]