repo_name
stringlengths
8
130
hexsha
sequence
file_path
sequence
code
sequence
apis
sequence
me714/Dwin_Transformer
[ "825a63869c46db4ef83ccc31d479bbd971ffd47c" ]
[ "configs/video_detect.py" ]
[ "import argparse\nimport math\nimport os\nimport shutil\nimport time\nimport numpy as np\nfrom pathlib import Path\nfrom ensemble_boxes import *\nimport copy\nimport cv2\nimport torch\nimport torch.backends.cudnn as cudnn\nfrom numpy import random\nimport matplotlib.pyplot as plt\nfrom itertools import combinations\nimport random\n\nfrom models.experimental import attempt_load\nfrom utils.datasets import LoadStreams, LoadImages\nfrom utils.general import (\n check_img_size, non_max_suppression, apply_classifier, scale_coords,\n xyxy2xywh, xywh2xyxy, plot_one_box, strip_optimizer, set_logging)\nfrom utils.torch_utils import select_device, load_classifier, time_synchronized\nfrom mmdet.apis import init_detector, inference_detector\n\nfcap = cv2.VideoCapture('/root/Swin-Transformer-Object-Detection/demo/VID_20210909_164000.mp4')\ndata_root = '/root/Swin-Transformer-Object-Detection/'\nconfig_file = data_root + 'configs/swin.py'\ncheckpoint_file = data_root + '2021_7_28/epoch_50.pth'\n\n# build the model from a config file and a checkpoint file\nswin_model = init_detector(config_file, checkpoint_file, device='cuda:0')\n\n\nframerate = 10\n\ndef get_image(fcap, framerate):\n c = 1\n while True:\n ret, frame = fcap.read()\n if ret:\n if (c % framerate == 0):\n cv2.imwrite(data_root + 'demo/video_frame/' + str(c) + '.jpg', frame)\n c += 1\n cv2.waitKey(0)\n else:\n print('the task is end')\n break\n fcap.release()\n\n\n\n\ndef filterbox_iou(rec1, rec2):\n \"\"\"\n computing IoU\n :param rec1: (y0, x0, y1, x1), which reflects\n (top, left, bottom, right)\n :param rec2: (y0, x0, y1, x1)\n :return: scala value of IoU\n \"\"\"\n # computing area of each rectangles\n S_rec1 = (rec1[2] - rec1[0]) * (rec1[3] - rec1[1])\n S_rec2 = (rec2[2] - rec2[0]) * (rec2[3] - rec2[1])\n\n # computing the sum_area\n sum_area = S_rec1 + S_rec2\n\n # find the each edge of intersect rectangle\n left_line = max(rec1[1], rec2[1])\n right_line = min(rec1[3], rec2[3])\n top_line = max(rec1[0], rec2[0])\n bottom_line = min(rec1[2], rec2[2])\n\n # judge if there is an intersect\n if left_line >= right_line or top_line >= bottom_line:\n return 0\n else:\n intersect = (right_line - left_line) * (bottom_line - top_line)\n return (intersect / (sum_area - intersect)) * 1.0\n\n\ndef detect(save_img=False):\n out, source, weights, view_img, save_txt, imgsz = \\\n opt.save_dir, opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size\n webcam = source.isnumeric() or source.startswith(('rtsp://', 'rtmp://', 'http://')) or source.endswith('.txt')\n\n # Initialize\n set_logging()\n device = select_device(opt.device)\n if os.path.exists(out): # output dir\n shutil.rmtree(out) # delete dir\n os.makedirs(out) # make new dir\n half = device.type != 'cpu' # half precision only supported on CUDA\n\n # Load model\n model = attempt_load(weights, map_location=device) # load FP32 model\n imgsz = check_img_size(imgsz, s=model.stride.max()) # check img_size\n if half:\n model.half() # to FP16\n\n # Second-stage classifier\n classify = False\n if classify:\n modelc = load_classifier(name='resnet101', n=2) # initialize\n modelc.load_state_dict(torch.load('weights/resnet101.pt', map_location=device)['model']) # load weights\n modelc.to(device).eval()\n\n # Set Dataloader\n vid_path, vid_writer = None, None\n if webcam:\n view_img = True\n cudnn.benchmark = True # set True to speed up constant image size inference\n dataset = LoadStreams(source, img_size=imgsz)\n else:\n save_img = True\n dataset = LoadImages(source, img_size=imgsz)\n\n # Get names and colors\n names = model.module.names if hasattr(model, 'module') else model.names\n colors = [[random.randint(0, 255) for _ in range(3)] for _ in range(len(names))]\n\n # Run inference\n t0 = time.time()\n img = torch.zeros((1, 3, imgsz, imgsz), device=device) # init img\n _ = model(img.half() if half else img) if device.type != 'cpu' else None # run once\n f_detect = 0\n counting_img = 0\n full_detect = 0\n full_truth = 0\n img_dict = {}\n frame_key = 0\n dict2 = {}\n for path, img, im0s, vid_cap in dataset:\n img_before = img\n img = torch.from_numpy(img).to(device)\n # img_before = img\n img = img.half() if half else img.float() # uint8 to fp16/32\n img /= 255.0 # 0 - 255 to 0.0 - 1.0\n if img.ndimension() == 3:\n img = img.unsqueeze(0)\n\n # Inference\n t1 = time_synchronized()\n pred = model(img, augment=opt.augment)[0]\n\n # Apply NMS\n nms_pred = non_max_suppression(pred, opt.conf_thres, opt.iou_thres, classes=1,\n agnostic=opt.agnostic_nms)\n # nms_pred = cross_class_nms(nms_pred, opt.conf_thres, 0.9, agnostic=opt.agnostic_nms)\n t2 = time_synchronized()\n\n # Process detections\n\n for i, det in enumerate(nms_pred): # detections per image\n print(det)\n dict1 = {'total': 0}\n if webcam: # batch_size >= 1\n p, s, im0 = path[i], '%g: ' % i, im0s[i].copy()\n else:\n p, s, im0 = path, '', im0s\n\n save_path = str(Path(out) / Path(p).name)\n txt_path = str(Path(out) / Path(p).stem) + ('_%g' % dataset.frame if dataset.mode == 'video' else '')\n s += '%gx%g ' % img.shape[2:] # print string\n gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh\n\n swin_img = cv2.imread(p)\n result = inference_detector(swin_model, swin_img)\n swin_bbox_list, swin_score_list, swin_label_list = swin_model.show_result(swin_img, result,\n out_file=save_path)\n\n yolo_bbox_list = det[:, 0:4].cpu().detach().numpy().tolist()\n yolo_score_list = det[:, 4].cpu().detach().numpy().tolist()\n yolo_label_list = det[:, 5].cpu().detach().numpy().tolist()\n\n swin_list = ['txd', 'jgc', 'xbs', 'wbs', 'c-pg', 'lwz', 'tc', 'a-pg', 'b-pg', 'g-pg', 'z-pg', 'bbt', 'lxb',\n 'xgg', 'lsd', 'wt']\n yolo_list = ['wt', 'jgc', 'lsd', 'lxb', 'bbt', 'xgg', 'txd', 'lwz', 'tc', 'xbs', 'wbs', 'a-pg', 'b-pg',\n 'c-pg', 'g-pg', 'z-pg']\n\n swin_trueLabel_list = []\n for i in swin_label_list:\n swin_trueLabel_list.append(yolo_list.index(swin_list[i]))\n\n\n # NMS for different class, high thresh\n # nms_bbox, nms_score, nms_label = yolo_bbox_list, yolo_score_list, yolo_label_list\n # nms_bbox, nms_score, nms_label = torch.from_numpy(np.array(nms_bbox)).reshape(-1, 4), torch.from_numpy(\n # np.array(nms_score)).reshape(-1, 1), torch.from_numpy(np.array(nms_label)).reshape(-1, 1)\n # two_det = torch.cat((torch.cat((nms_bbox, nms_score), 1), nms_label), 1)\n\n # normalize\n # 需要将框进行归一化操作\n # for i, single in enumerate(swin_bbox_list):\n # swin_bbox_list[i] = [single[0] / 640, single[1] / 480, single[2] / 640, single[3] / 480]\n #\n # for i, single in enumerate(yolo_bbox_list):\n # yolo_bbox_list[i] = [single[0] / 640, single[1] / 480, single[2] / 640, single[3] / 480]\n\n swin_object = [0, 1, 2, 3, 6, 7, 8, 9, 10] # from yolo_list:wt lsd lwz tc xbs wbs\n # yolo_list = ['0wt', 'jgc', '2lsd', 'lxb', '4bbt', 'xgg', '6txd', 'lwz', '8tc', 'xbs', '10wbs', 'a-pg', '12b-pg',\n # 'c-pg', '14g-pg', 'z-pg']\n yolo_label_list_copy = yolo_label_list.copy()\n swin_trueLabel_list_copy = swin_trueLabel_list.copy()\n for i in yolo_label_list_copy:\n if i in swin_object:\n index1 = yolo_label_list.index(i)\n del yolo_bbox_list[index1]\n del yolo_score_list[index1]\n del yolo_label_list[index1]\n\n # label_filter = [4, 5, 11, 12, 13, 14, 15]\n # filer_box = {}\n # filter_list = []\n # filter_label_list = []\n # for i in range(len(yolo_label_list)):\n # if yolo_label_list_copy[i] in label_filter:\n # filter_list.append(i)\n # filter_label_list.append(yolo_label_list_copy[i])\n\n # yolo_bbox_list_copy = yolo_bbox_list\n # yolo_score_list_copy = yolo_score_list\n #\n #\n # for pair in combinations(filter_list, 2):\n # box1 = yolo_bbox_list_copy[pair[0]]\n # box2 = yolo_bbox_list_copy[pair[1]]\n # b_iou = filterbox_iou(box1, box2)\n # if b_iou >= 0.9:\n # if box1 in yolo_bbox_list and box2 in yolo_bbox_list:\n # index_0 = yolo_bbox_list.index(box1)\n # index_1 = yolo_bbox_list.index(box2)\n # index = index_0 if yolo_score_list[pair[0]] <= yolo_score_list[pair[1]] else index_1\n # del yolo_bbox_list[index]\n # del yolo_score_list[index]\n # del yolo_label_list[index]\n\n\n\n for i in swin_trueLabel_list_copy:\n if i not in swin_object:\n index2 = swin_trueLabel_list.index(i)\n del swin_bbox_list[index2]\n del swin_score_list[index2]\n del swin_trueLabel_list[index2]\n two_bbox, two_score, two_label = copy.deepcopy(swin_bbox_list), copy.deepcopy(swin_score_list), copy.deepcopy(swin_trueLabel_list)\n for i in range(len(yolo_bbox_list)):\n two_bbox.append(yolo_bbox_list[i])\n two_score.append(yolo_score_list[i])\n two_label.append(yolo_label_list[i])\n two_bbox, two_score, two_label = torch.from_numpy(np.array(two_bbox)).reshape(-1, 4), torch.from_numpy(\n np.array(two_score)).reshape(-1, 1), torch.from_numpy(np.array(two_label)).reshape(-1, 1)\n\n\n yolo_bbox_list, yolo_score_list, yolo_label_list = torch.from_numpy(np.array(yolo_bbox_list)).reshape(-1,\n 4), torch.from_numpy(\n np.array(yolo_score_list)).reshape(-1, 1), torch.from_numpy(np.array(yolo_label_list)).reshape(-1, 1)\n\n swin_bbox_list, swin_score_list, swin_trueLabel_list = torch.from_numpy(np.array(swin_bbox_list)).reshape(\n -1,\n 4), torch.from_numpy(\n np.array(swin_score_list)).reshape(-1, 1), torch.from_numpy(np.array(swin_trueLabel_list)).reshape(-1,\n 1)\n\n # det = torch.cat((torch.cat((swin_bbox_list, swin_score_list), 1), swin_trueLabel_list), 1) # only show swin_model inference result\n # det = torch.cat((torch.cat((yolo_bbox_list, yolo_score_list), 1), yolo_label_list),1) # only show yolo_model inference result\n det = torch.cat((torch.cat((two_bbox, two_score), 1), two_label), 1) # show two_model inference result\n\n # bbox_list = [swin_bbox_list, yolo_bbox_list]\n # score_list = [swin_score_list, yolo_score_list]\n # label_list = [swin_trueLabel_list, yolo_label_list]\n #\n # wbf_weight = [1, 1]\n # iou_thr = 0.55\n # skip_box_thr = 0.0001\n #\n # boxes, scores, labels = weighted_boxes_fusion(bbox_list, score_list, label_list, weights=wbf_weight,\n # iou_thr=iou_thr, skip_box_thr=skip_box_thr)\n # for in_file in boxes:\n # in_file[0], in_file[1], in_file[2], in_file[3] = int(in_file[0] * 640), int(in_file[1] * 480), int(\n # in_file[2] * 640), int(in_file[3] * 480)\n # boxes, scores, labels = boxes.reshape(-1, 4), scores.reshape(-1, 1), labels.reshape(-1, 1)\n # boxes, scores, labels = torch.from_numpy(boxes), torch.from_numpy(scores), torch.from_numpy(labels)\n # det2model = torch.cat((torch.cat((boxes, scores), 1), labels), 1)\n # det = det2model\n\n if det is not None and len(det):\n numers = len(det)\n # Rescale boxes from img_size to im0 size\n det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()\n\n # Print results\n for c in det[:, -1].unique():\n n = (det[:, -1] == c).sum() # detections per class\n s += '%g %ss, ' % (n, names[int(c)]) # add to string\n\n # Write results 包围框、置信度、种类\n for *xyxy, conf, cls in reversed(det):\n if dict1.__contains__(cls):\n dict1[cls] = dict1[cls] + 1\n dict1['total'] = dict1['total'] + 1\n else:\n dict1[cls] = 0\n dict1['total'] = dict1['total'] + 1\n if save_txt: # Write to file\n xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh\n line = (cls, conf, *xywh) if opt.save_conf else (cls, *xywh) # label format\n with open(txt_path + '.txt', 'a') as f:\n f.write(('%g ' * len(line) + '\\n') % line)\n\n if save_img or view_img: # Add bbox to image\n label = '%s %.2f' % (names[int(cls)], conf)\n img1 = im0.copy()\n # if cv2.waitKey(1)==32:\n # count = 0\n # for filename in os.listdir('new_image/'):\n # if filename.endswith('.jpg'):\n # count += 1\n # # print(count)\n # print(f\"保存第{count + 1}张图片\")\n # # 保存图像,保存到上一层的imgs文件夹内,以1、2、3、4...为文件名保存图像\n # cv2.imwrite('new_image/{}.jpg'.format(count + 1), img1)\n # plot_one_box(xyxy, im0, label=label, color=colors[int(cls)], line_thickness=0.5) # 线的粗细\n plot_one_box(xyxy, im0, label=label, color=colors[int(cls)], line_thickness=2) # 线的粗细\n\n\n\n\n # print(f\"\\n{names[int(cls)]}的包围框坐标是{int(xyxy[0]),int(xyxy[1]),int(xyxy[2]),int(xyxy[3])}\")\n # print(f\"\\n{names[int(cls)]}的中心坐标是{(int(xyxy[0])+int(xyxy[2]))/2, (int(xyxy[1])+int(xyxy[3]))/2}\")\n # Print time (inference + NMS)\n # print('%sDone. (%.3fs)' % (s, t2 - t1))\n print(f\"{s}\")\n print(f\"s\")\n\n # 打印坐标、种类\n # print('%s' % (names[int(cls)]))\n\n # Stream results\n # view_img = True\n if view_img:\n cv2.imshow(p, im0)\n if cv2.waitKey(1) == ord('q'): # q to quit\n raise StopIteration\n\n # Save results (image with detections)\n if save_img:\n if dataset.mode == 'images':\n txt = f\".numers={len(det)}\"\n cv2.putText(im0, txt,\n (50, 100),\n cv2.FONT_HERSHEY_SIMPLEX, 1.2, (34, 157, 255), 2)\n cv2.imwrite(save_path, im0)\n else:\n if vid_path != save_path: # new video\n vid_path = save_path\n if isinstance(vid_writer, cv2.VideoWriter):\n vid_writer.release() # release previous video writer\n\n fourcc = 'mp4v' # output video codec\n fps = vid_cap.get(cv2.CAP_PROP_FPS)\n w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*fourcc), fps, (w, h))\n vid_writer.write(im0)\n im_after = im0\n img_dict[frame_key] = dict1\n frame_key += 1\n detected = len(det)\n\n img_category = save_path.split('/')[-1].split('_')[0]\n if img_category == 'body':\n true = 17\n elif img_category =='op':\n true = 12\n else:\n true = 29\n root_path = '/root/results/'\n\n if detected == true:\n plt.figure()\n plt.subplot(1, 3, 1)\n plt.title('original image', size=10)\n plt.axis([0, 640, 0, 480])\n plt.xticks([])\n plt.yticks([])\n plt.imshow(img_before.transpose(1, 2, 0))\n\n plt.subplot(1, 3, 2)\n plt.title('detected image', size=10)\n plt.axis([0, 640, 0, 480])\n plt.xticks([])\n plt.yticks([])\n plt.imshow(im_after)\n\n\n plt.text(700, 300, f\"Original:{true}\", size=10)\n plt.text(700, 100, f\"Detected:{detected}\", size=10)\n # plt.text(700, 100, f\"Average confidence:{conf}%\")\n plt.savefig(root_path + f'{img_category}_{counting_img}.jpg', bbox_inches='tight', pad_inches=0.1,\n dpi=800)\n counting_img += 1\n full_detect += detected\n full_truth += true\n\n elif detected != true and f_detect <= 7 and random.uniform(0, 1) > 0.65:\n plt.figure()\n plt.subplot(1, 3, 1)\n plt.title(f'original image', size=10)\n plt.axis([0, 640, 0, 480])\n plt.xticks([])\n plt.yticks([])\n plt.imshow(img_before.transpose(1, 2, 0))\n\n plt.subplot(1, 3, 2)\n plt.title(f'detected image', size=10)\n plt.axis([0, 640, 0, 480])\n plt.xticks([])\n plt.yticks([])\n plt.imshow(im_after)\n\n\n plt.text(700, 300, f\"Original:{true}\", size=10)\n plt.text(700, 100, f\"Detected:{detected}\", size=10)\n plt.savefig(root_path + f'{img_category}_{counting_img}.jpg', bbox_inches='tight', pad_inches=0.1,\n dpi=800)\n\n counting_img += 1\n f_detect+=1\n\n full_detect += detected\n full_truth += true\n else:\n # print('wrong-------', save_path)\n pass\n # plt.show()\n # plt.figure()\n # plt.axis([0, 640, 0, 480])\n # plt.text(700, 300, f\"Origina:{count_acc}%\")\n # plt.text(700, 200, f\"Detected:{classify_acc}%\")\n # plt.text(700, 100, f\"Average confidence:{conf}%\")\n\n # break\n\n if save_txt or save_img:\n print('Results saved to %s' % Path(out))\n\n full_time = time.time() - t0\n\n print('Done. (%.3fs)' % full_time)\n merege = math.ceil(full_detect/frame_key)\n for i in img_dict:\n if img_dict[i]['total'] == merege:\n\n dict2 = img_dict[i]\n\n\n\n\n plt.figure()\n plt.xticks([])\n plt.yticks([])\n plt.axis([0, 640, 0, 680])\n plt.text(50, 620, f\"Calming detection report:{dict2}\", color='blue', size=5)\n plt.text(50, 520, f\"Calming detection report\", color='blue', size=10)\n plt.text(50, 420, f\"the detect: {merege}\", color='blue', size=10)\n plt.text(50, 320, f\"All equipment Detected: {full_detect}\", size=10)\n plt.text(50, 220, f\"All equipment manually counted: {full_truth}\", size=10)\n plt.text(50, 120, f\"Counting Accuracy: %.2f\" % (full_detect*100/full_truth) + '%', size=10)\n plt.text(50, 40, f\"Average time: %.2f\" % (full_time/counting_img) + \" s\", size=10)\n print('dfddddddddddddddddddddddddddddddddddddddddd')\n plt.savefig('/root/Downloads/report.jpg')\n\n\nif __name__ == '__main__':\n get_image(fcap,framerate)\n parser = argparse.ArgumentParser()\n parser.add_argument('--weights', nargs='+', type=str, default='super_yolo.pt', help='model.pt path(s)')\n parser.add_argument('--source', type=str, default='/root/Swin-Transformer-Object-Detection/demo/video_frame', help='source') # file/folder, 0 for webcam\n parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')\n parser.add_argument('--conf-thres', type=float, default=0.85, help='object confidence threshold')\n parser.add_argument('--iou-thres', type=float, default=0.45, help='IOU threshold for NMS')\n parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')\n parser.add_argument('--view-img', action='store_true', help='display results')\n parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')\n parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')\n parser.add_argument('--save-dir', type=str, default='/root/Calming_final_test/results', help='directory to save results')\n parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --class 0, or --class 0 2 3')\n parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')\n parser.add_argument('--augment', action='store_true', help='augmented inference')\n parser.add_argument('--update', action='store_true', help='update all models')\n opt = parser.parse_args()\n print(opt)\n\n with torch.no_grad():\n if opt.update: # update all models (to fix SourceChangeWarning)\n for opt.weights in ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt']:\n detect()\n strip_optimizer(opt.weights)\n else:\n detect()\n" ]
[ [ "torch.load", "numpy.array", "matplotlib.pyplot.xticks", "matplotlib.pyplot.figure", "matplotlib.pyplot.axis", "matplotlib.pyplot.savefig", "torch.no_grad", "torch.tensor", "matplotlib.pyplot.title", "matplotlib.pyplot.subplot", "torch.from_numpy", "matplotlib.pyplot.imshow", "matplotlib.pyplot.text", "torch.zeros", "matplotlib.pyplot.yticks", "torch.cat" ] ]
greenjew/deeploma
[ "499de7ad844546acf0760aa00096d66216fd3ee9" ]
[ "api/vk_methods.py" ]
[ "import requests as r\nimport pandas as pd\nimport time\nfrom datetime import datetime\nimport re\n\n\n\nTOKEN_VK = '23acc95023acc95023acc9504023c092a1223ac23acc9507ef4dc240205bcafea27244d' # vk service token\nversion = 5.101\n\ndef get_members(group_id):\n\n try_count = 0\n while try_count < 2:\n try:\n response = r.get('https://api.vk.com/method/groups.getById',\n params={\n 'v': version,\n 'access_token': TOKEN_VK,\n 'group_ids': group_id,\n 'fields': 'members_count'\n })\n return response.json()['response'][0]['members_count']\n except:\n try_count += 1\n time.sleep(0.06)\n\n\ndef cleanText(raw_text):\n cleanr = re.compile('<.*?>|(\\[.*?\\|)|\\]')\n cleantext = re.sub(cleanr, '', raw_text)\n return cleantext\n\n\ndef load_from_vk(group_id, date_from, date_to):\n headers = ['group_name', 'members', 'post_date', 'post_link', 'text', 'views', 'likes', 'reposts', 'comments']\n posts_in_group = []\n offset = 0\n members = get_members(group_id)\n\n date_ok = True\n last_try = 0\n # Выгружаем посты на стенке, пока не выйдем за \"левую\" дату\n\n while date_ok or last_try <= 1:\n res = r.get('https://api.vk.com/method/wall.get',\n params={\n 'v': version,\n 'access_token': TOKEN_VK,\n 'domain': group_id,\n 'offset': offset,\n 'count': '100',\n 'extended': '1',\n 'fields': 'name'\n })\n try:\n response = res.json()['response']\n except:\n if res.json()['error']['error_code'] != 0:\n raise Exception(group_id, 'channel_not_found')\n\n if response['count'] == 0: # если в выгрузке пусто, переходим к следующей группе\n date_ok = False\n last_try = 2\n continue\n\n # считаем посты удовлетворяющие условию по датам\n all_posts = response['items']\n group_name = response['groups'][0]['name']\n if all(datetime.fromtimestamp(post['date']).date() < date_from\n for post in all_posts):\n date_ok = False\n last_try += 1\n else:\n for post in all_posts:\n post_info = []\n post_date = datetime.fromtimestamp(post['date'])\n if date_from < post_date.date() < date_to:\n print(post_date)\n post_link = 'https://vk.com/wall' + str(post['owner_id']) + '_' + str(post['id'])\n post_text = cleanText(post['text'])\n post_info.append((group_name, members, post_date, post_link, post_text,\n post['views']['count'], post['likes']['count'], post['reposts']['count'],\n post['comments']['count']))\n posts_in_group.extend(post_info)\n offset += len(all_posts)\n time.sleep(0.06)\n\n posts_data = pd.DataFrame(posts_in_group, columns=headers)\n mean_ = int(posts_data.groupby(posts_data['post_date'].dt.to_period('d')).mean()['views'].mean())\n std_ = int(posts_data.groupby(posts_data['post_date'].dt.to_period('d')).std()['views'].mean())\n\n def three_sigma_anomaly(views):\n ano_cut_off = 3 * std_\n upper_cut = mean_ + ano_cut_off\n if views > upper_cut:\n return 'Да'\n else:\n return 'Нет'\n\n anomalies = posts_data.views.apply(three_sigma_anomaly)\n posts_data['is_anomaly'] = anomalies\n\n return posts_data" ]
[ [ "pandas.DataFrame" ] ]
NewCPM/MCPM
[ "9fb9b7725ccc4452701be47d103ab61f81b4595b" ]
[ "examples/OGLE-BLG-ECL-234840/plot_v8.py" ]
[ "import matplotlib.pyplot as plt\nfrom matplotlib import gridspec\nimport numpy as np\n\n\nin_data = \"run_6/run_6_e2_phot_prf_limit.dat\"\nin_model = \"run_6/run_6_e2_phot.res\"\nout_file = \"run_6/plot_eb234840_v8.png\"\n\nkwargs = {'color': 'red', 'marker': '.', 'ls': 'none'}\nx_lim = [7500., 7528.]\ny_lim = [-4000., 500.]\n\nkwargs_1 = {'color': 'blue', 'ls': ':', 'lw': 2, 'zorder': 10}\n\nxlabel = 'BJD - 2450000'\nylabel = 'delta flux'\n\nband = np.arange(7500, 7508.0001)\nkwargs_band = {'color': 'blue', 'lw': 2, 'zorder': 10}\n\n################\n# End of settings\n\n(times, values, errors) = np.loadtxt(in_data, unpack=True)\n(times_model, _, _, values_model) = np.loadtxt(in_model, unpack=True)\n\nplt.errorbar(times, values, yerr=errors, **kwargs)\nmask = (times_model > band[-1])\nplt.plot(times_model[mask], values_model[mask], **kwargs_1)\nplt.xlabel(xlabel)\nplt.ylabel(ylabel)\nplt.xlim(x_lim)\nplt.ylim(y_lim)\n\nplt.plot(band, band*0., **kwargs_band)\n\nplt.savefig(out_file)\n\n" ]
[ [ "matplotlib.pyplot.savefig", "matplotlib.pyplot.xlim", "numpy.arange", "matplotlib.pyplot.errorbar", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.ylim", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlabel", "numpy.loadtxt" ] ]
astrophys/Python_Debugging_Examples
[ "510b4b6966166dddc14eda3f6813700386d2324f" ]
[ "code/txburstML.py" ]
[ "#!/usr/bin/python3\nimport argparse\nimport pandas as pd\nimport numpy as np\nimport warnings\nwarnings.filterwarnings('ignore')\nfrom joblib import delayed,Parallel\nimport os\n\ndef whichKeep(est_params):\n kon = np.array(est_params)[:,0]\n koff = np.array(est_params)[:,1]\n ksyn = np.array(est_params)[:,2]\n which_kon = ~(kon < 2*1e-3)*~(kon > 1e3 - 1)\n which_koff = ~(koff < 2*1e-3)*~(koff > 1e3 - 1)\n which_burst = ksyn/koff > 1\n which_ksyn = ksyn > 1\n which = which_burst*which_koff*which_kon*which_ksyn\n return which\n\n\ndef MaximumLikelihood(vals, export_asymp_ci = False, fix = 0, metod = 'L-BFGS-B'):\n from scipy.interpolate import interp1d\n from scipy.optimize import minimize\n from scipy import special\n from scipy.stats import poisson,norm\n from scipy.special import j_roots\n from scipy.special import beta as beta_fun\n import numpy as np\n if len(vals) == 0:\n return np.array([np.nan, np.nan, np.nan])\n def dBP(at, alpha, bet, lam):\n at.shape = (len(at), 1)\n np.repeat(at, 50, axis = 1)\n def fun(at, m):\n if(max(m) < 1e6):\n return(poisson.pmf(at,m))\n else:\n return(norm.pdf(at,loc=m,scale=sqrt(m)))\n\n x,w = j_roots(50,alpha = bet - 1, beta = alpha - 1)\n gs = np.sum(w*fun(at, m = lam*(1+x)/2), axis=1)\n prob = 1/beta_fun(alpha, bet)*2**(-alpha-bet+1)*gs\n return(prob)\n def LogLikelihood(x, vals):\n kon = x[0]\n koff = x[1]\n ksyn = x[2]\n return(-np.sum(np.log( dBP(vals,kon,koff,ksyn) + 1e-10) ) )\n x0 = MomentInference(vals)\n if np.isnan(x0).any() or any(x0 < 0):\n x0 = np.array([10,10,10])\n bnds = ((1e-3,1e3),(1e-3,1e3), (1, 1e4))\n vals_ = np.copy(vals) # Otherwise the structure is violated.\n try:\n ll = minimize(LogLikelihood, x0, args = (vals_), method=metod, bounds=bnds)\n except:\n return np.array([np.nan,np.nan,np.nan])\n #se = ll.hess_inv.todense().diagonal()\n estim = ll.x\n return estim\n\n# moment-based inference\ndef MomentInference(vals, export_moments=False):\n # code from Anton Larsson's R implementation\n from scipy import stats # needs imports inside function when run in ipyparallel\n import numpy as np\n m1 = float(np.mean(vals))\n m2 = float(sum(vals*(vals - 1))/len(vals))\n m3 = float(sum(vals*(vals - 1)*(vals - 2))/len(vals))\n\n # sanity check on input (e.g. need at least on expression level)\n if sum(vals) == 0: return np.nan\n if m1 == 0: return np.nan\n if m2 == 0: return np.nan\n\n r1=m1\n r2=m2/m1\n r3=m3/m2\n\n if (r1*r2-2*r1*r3 + r2*r3) == 0: return np.nan\n if ((r1*r2 - 2*r1*r3 + r2*r3)*(r1-2*r2+r3)) == 0: return np.nan\n if (r1 - 2*r2 + r3) == 0: return np.nan\n\n lambda_est = (2*r1*(r3-r2))/(r1*r2-2*r1*r3 + r2*r3)\n mu_est = (2*(r3-r2)*(r1-r3)*(r2-r1))/((r1*r2 - 2*r1*r3 + r2*r3)*(r1-2*r2+r3))\n v_est = (2*r1*r3 - r1*r2 - r2*r3)/(r1 - 2*r2 + r3)\n\n if export_moments:\n return np.array([lambda_est, mu_est, v_est, r1, r2, r3])\n\n return np.array([lambda_est, mu_est, v_est])\n\n\nparser = argparse.ArgumentParser(description='Maximum likelihood inference of bursting kinetics from scRNA-seq data')\nparser.add_argument('file', metavar='file', type=str, nargs=1,help='.csv file with allelic-resolution transcript counts' )\nparser.add_argument('--njobs', default=[50], nargs=1, type=int, help='Number of jobs for the parallelization, default 50')\nargs = parser.parse_args()\nfilename = args.file[0]\nnjobs = args.njobs[0]\nprint('Reading file ' + filename)\nrpkm = pd.read_csv(filename, index_col=0)\n\nprint('Inferring kinetics:')\nparams = Parallel(n_jobs=njobs, verbose = 3)(delayed(MaximumLikelihood)(np.around(rpkm[pd.notnull(rpkm)])) for i,rpkm in rpkm.iterrows())\nkeep = whichKeep(params)\n\nprint('Inferred kinetics of {} genes out of {} total'.format(np.sum(keep), len(keep)))\n\nbase = os.path.splitext(os.path.basename(filename))[0]\nbase = base + '_ML.pkl'\nprint('Saving result to ' + base)\n\npd.to_pickle(pd.DataFrame([ params, list(keep)], columns=rpkm.index).T, base)\n" ]
[ [ "numpy.sum", "scipy.special.beta", "pandas.read_csv", "pandas.notnull", "scipy.optimize.minimize", "numpy.repeat", "scipy.special.j_roots", "numpy.copy", "scipy.stats.poisson.pmf", "numpy.isnan", "numpy.array", "numpy.mean" ] ]
mmikolajczak/recommendation_system_hetrec2011_movielens
[ "3ae13e62605ffbf5517bc2079e086a400de48748" ]
[ "recommendations_system/ffm/ffm.py" ]
[ "import subprocess\nimport warnings\nimport os.path as osp\nimport os\nimport numpy as np\n\n\n# Note: libffm doesn't handle relative paths very well, hence abspath used.\nclass FFM:\n\n def __init__(self, train_binary_path, predict_binary_path, model_path=None):\n self.train_binary_path = osp.abspath(train_binary_path)\n self.predict_binary_path = osp.abspath(predict_binary_path)\n self.model_path = osp.abspath(model_path) if model_path is not None else None\n\n def fit(self, X, model_path='model', l=0.00002, k=4, t=15, r=0.2, s=1):\n \"\"\"\n -l <lambda>: regularization parameter (default 0.00002)\n -k <factor>: number of latent factors (default 4)\n -t <iteration>: number of iterations (default 15)\n -r <eta>: learning rate (default 0.2)\n -s <nr_threads>: number of threads (default 1)\n \"\"\"\n # validation support?\n warnings.warn('Please note that unix newline format (LF) is required for libffm binaries to work correctly.' +\n ' Windows (CR LF) will cause the issues.')\n\n if type(X) != str:\n raise ValueError(f'Improper input type {type(X)}.X must be a path to ffm file.')\n self.model_path = osp.abspath(model_path)\n train_data_abspath = osp.abspath(X)\n cmd = f'{self.train_binary_path} -l {l} -k {k} -t {t} -r {r} -s {s} {train_data_abspath} {self.model_path}'\n proc = subprocess.Popen(cmd)\n proc.wait()\n os.remove(f'{train_data_abspath}.bin')\n\n def predict(self, X, output_file):\n warnings.warn('Please note that unix newline format (LF) is required for libffm binaries to work correctly.' +\n ' Windows (CR LF) will cause the issues.')\n if self.model_path is None:\n raise RuntimeError('Model must be fitted first!')\n if type(X) != str:\n raise ValueError(f'Improper input type {type(X)}.X must be a path to ffm file.')\n\n predicted_data_abspath = osp.abspath(X)\n output_file_abspath = osp.abspath(output_file)\n\n cmd = f'{self.predict_binary_path} {predicted_data_abspath} {self.model_path} {output_file_abspath}'\n proc = subprocess.Popen(cmd)\n proc.wait()\n\n @classmethod\n def pred_file_to_numpy(cls, preds_file):\n return np.loadtxt(preds_file)\n\n @classmethod\n def ground_truth_from_ffm_file(cls, ffm_file):\n with open(ffm_file, 'r') as f:\n labels = [line.split(' ')[0] for line in f]\n return np.array(labels).astype(float)\n" ]
[ [ "numpy.array", "numpy.loadtxt" ] ]
caspase-like-homolog-identifier/c14_witcher
[ "e2c481607b85fed749daec0e9b3b29b65d6b448f" ]
[ "find_deathdomains.py" ]
[ "#!/usr/bin/env python\n\nfrom run_hmmer import RunHmmer\nfrom Bio import SearchIO\nimport pandas as pd\nimport collections\nimport random\nimport tempfile\nimport argparse\nimport pprint\nimport glob\nimport sys\n\nclass FindDeathDomains(RunHmmer):\n\n def __init__(self, seqfile, dd_hmm_path, *hmmersearch_args):\n\n \"\"\" Subclass the Hmmer commandline wrapper \"\"\" \n \n self.dd_hmm_paths = glob.glob(dd_hmm_path)\n super().__init__(\"hmmsearch\", None, seqfile, None, None, *hmmersearch_args)\n self.deathdomain_hits = {}\n self.dd_dict = None\n\n \n def deathdomains_iter(self):\n\n \"\"\" iterate over the deathdomains \"\"\"\n self.dd_names = []\n for hmm_file in self.dd_hmm_paths:\n self.hmmfile = hmm_file\n tmp1, tmp2 = [ tempfile.NamedTemporaryFile(delete=False) for _ in range(2) ]\n self.align_out = tmp1.name\n self.domtblout = tmp2.name\n std, stderr = self()\n deathdomain = self.has_deathdomain(self.domtblout)\n\n if deathdomain:\n self.deathdomain_hits[deathdomain[0].id] = deathdomain[0].hits \n self.dd_names.append(deathdomain[0].id)\n \n \n def has_deathdomain(self, domtab):\n\n return list(SearchIO.parse(domtab, \"hmmsearch3-domtab\")) \n \n \n def DeathDomains(self, feature):\n \"\"\"Property to view the death domains.Start analysis if not done already\"\"\"\n # _id\n # _id_alt\n # _query_id\n # _description\n # _description_alt\n # _query_description\n # attributes\n # dbxrefs\n # _items\n # accession\n # seq_len\n # evalue\n # bitscore\n # bias\n \n if not self.deathdomain_hits:\n self.deathdomains_iter()\n #create dict using seq.ids as keys and empty lists as values\n dd_dict = collections.defaultdict(list)\n for dd in self.deathdomain_hits:\n #print(dd)\n for hit in self.deathdomain_hits[dd]:\n dd_dict[hit.id].append(vars(hit)[feature])\n\n self.deathdomains = pd.DataFrame(columns = ['Seq_ID']+self.dd_names)\n for seq_id, values in dd_dict.items():\n \n self.deathdomains = self.deathdomains.append(pd.Series([seq_id]+values, index= ['Seq_ID']+self.dd_names, name = seq_id))\n \n\n return self.deathdomains\n \n \n \n \nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=\"\")\n parser.add_argument('seqfile', action='store', type=str)\n parser.add_argument('-g','--hmm_glob', default=\"/opt/DB_REF/Pfam/Ig*hmm\")\n args = parser.parse_args() \n dd = FindDeathDomains(args.seqfile, args.hmm_glob)\n dd.deathdomains_iter()\n print(\"\\n\\n\\n\\n\")\n \n print(dd.DeathDomains('evalue'))\n" ]
[ [ "pandas.Series", "pandas.DataFrame" ] ]
MehdiAbbanaBennani/statistical-optimisation
[ "0de96661ca7ab857639ad14127b97af39321762e" ]
[ "src/logistic_regression.py" ]
[ "import numpy as np\nfrom tqdm import tqdm\nfrom gradient import Gradient\n\n\nclass LogisticRegression:\n\n def __init__(self, type, mu, gradient_param, data, d=100, theta=None):\n if theta is None:\n self.theta = np.random.rand(d) * 2 - 1\n else:\n self.theta = theta\n\n self.type = type\n self.gradient = Gradient(gradient_param)\n self.mat = data\n self.n_samples = data[\"Xtrain\"].shape[0]\n self.mu = mu\n\n @staticmethod\n def sigmoid(z):\n return 1 / (1 + np.exp(- z))\n\n def error(self, X, y_true):\n N = len(y_true)\n return sum([self.single_error(X[i], y_true[i])\n for i in range(N)]) / N\n\n def single_error(self, X, y_true):\n # y_pred = round(self.predict(X))\n y_pred = self.predict_label(X)\n return abs(y_true - y_pred) / 2\n\n def loss(self, X, y_true):\n N = len(y_true)\n return sum([self.single_loss(X[i], y_true[i])\n for i in range(N)]) / N\n\n def single_loss(self, X, y_true):\n y_pred = self.predict(X)\n if self.type == \"square\":\n return (y_pred - y_true) ** 2\n if self.type == \"logistic\":\n return np.log(1 + np.exp(- y_true * y_pred))\n # return - y_true * np.log(y_pred) - (1 - y_true) * np.log(1 - y_pred)\n\n def predict(self, X):\n # return self.sigmoid(np.dot(X, self.theta))\n return np.dot(X, self.theta)\n\n def predict_label(self, X):\n y_pred = self.predict(X)\n if y_pred < 0 :\n return -1\n else :\n return 1\n\n def log(self, log_dict, it, log_freq):\n log_dict[\"train_losses\"].append(self.loss(X=self.mat[\"Xtrain\"],\n y_true=self.mat[\"ytrain\"]))\n log_dict[\"test_losses\"].append(self.loss(X=self.mat[\"Xtest\"],\n y_true=self.mat[\"ytest\"]))\n log_dict[\"train_errors\"].append(self.error(X=self.mat[\"Xtrain\"],\n y_true=self.mat[\"ytrain\"]))\n log_dict[\"test_errors\"].append(self.error(X=self.mat[\"Xtest\"],\n y_true=self.mat[\"ytest\"]))\n if log_freq == \"epoch\" :\n log_dict[\"iterations\"].append(it / self.n_samples)\n else :\n log_dict[\"iterations\"].append(it)\n\n def compute_n_iter(self, n_epoch):\n return n_epoch * (self.n_samples // self.gradient.batch_size)\n\n def log_freq_to_iter(self, log_freq):\n if log_freq == \"epoch\" :\n return self.n_samples\n else :\n return log_freq\n\n def run_optimizer(self, n_epoch, log_freq, optimizer):\n log_dict = {\"train_losses\": [],\n \"test_losses\": [],\n \"iterations\": [],\n \"train_errors\": [],\n \"test_errors\": []}\n n_iter = self.compute_n_iter(n_epoch)\n\n for it in tqdm(range(n_iter)):\n if optimizer == \"sgd\" :\n self.gradient.sgd_step(model=self, it=it)\n if optimizer == \"sag\":\n self.gradient.sag_step(model=self, it=it)\n\n if it % self.log_freq_to_iter(log_freq) == 0:\n self.log(log_dict, it, log_freq)\n\n return log_dict" ]
[ [ "numpy.dot", "numpy.random.rand", "numpy.exp" ] ]
synapticarbors/npy-append-array
[ "bf33483e7c2c50e13c9e55940878ca8217f4d4ad" ]
[ "npy_append_array/npy_append_array.py" ]
[ "import numpy as np\nimport os.path\nfrom struct import pack, unpack\nfrom io import BytesIO\n\ndef header_tuple_dict(tuple_in):\n return {\n 'shape': tuple_in[0],\n 'fortran_order': tuple_in[1],\n 'descr': np.lib.format.dtype_to_descr(tuple_in[2])\n }\n\ndef has_fortran_order(arr):\n return not arr.flags.c_contiguous and arr.flags.f_contiguous\n\ndef peek(fp, length):\n pos = fp.tell()\n tmp = fp.read(length)\n fp.seek(pos)\n return tmp\n\nclass NpyAppendArray:\n def __init__(self, filename):\n self.filename = filename\n self.fp = None\n self.__is_init = False\n if os.path.isfile(filename):\n self.__init()\n\n def __init(self):\n self.fp = open(self.filename, mode=\"rb+\")\n fp = self.fp\n\n magic = np.lib.format.read_magic(fp)\n self.is_version_1 = magic[0] == 1 and magic[1] == 0\n self.is_version_2 = magic[0] == 2 and magic[1] == 0\n\n if not self.is_version_1 and not self.is_version_2:\n raise NotImplementedError(\n \"version (%d, %d) not implemented\"%magic\n )\n\n self.header_length, = unpack(\"<H\", peek(fp, 2)) if self.is_version_1 \\\n else unpack(\"<I\", peek(fp, 4))\n\n self.header = np.lib.format.read_array_header_1_0(fp) if \\\n self.is_version_1 else np.lib.format.read_array_header_2_0(fp)\n\n if self.header[1] != False:\n raise NotImplementedError(\"fortran_order not implemented\")\n\n fp.seek(0)\n\n self.header_bytes = fp.read(self.header_length + (\n 10 if self.is_version_1 else 12\n ))\n\n fp.seek(0, 2)\n\n self.__is_init = True\n\n def __create_header_bytes(self, header_map, spare_space=False):\n io = BytesIO()\n np.lib.format.write_array_header_2_0(io, header_map)\n\n if spare_space:\n io.getbuffer()[8:12] = pack(\"<I\", int(\n io.getbuffer().nbytes-12+64\n ))\n io.getbuffer()[-1] = 32\n io.write(b\" \"*64)\n io.getbuffer()[-1] = 10\n\n return io.getbuffer()\n\n def append(self, arr):\n if not arr.flags.c_contiguous:\n raise NotImplementedError(\"ndarray needs to be c_contiguous\")\n\n if has_fortran_order(arr):\n raise NotImplementedError(\"fortran_order not implemented\")\n\n arr_descr = np.lib.format.dtype_to_descr(arr.dtype)\n\n if not self.__is_init:\n with open(self.filename, \"wb\") as fp0:\n fp0.write(self.__create_header_bytes({\n 'descr': arr_descr,\n 'fortran_order': False,\n 'shape': arr.shape\n }, True))\n arr.tofile(fp0)\n\n # np.save(self.filename, arr)\n self.__init()\n return\n\n descr = self.header[2]\n\n if arr_descr != descr:\n raise TypeError(\"incompatible ndarrays types %s and %s\"%(\n arr_descr, descr\n ))\n\n shape = self.header[0]\n\n if len(arr.shape) != len(shape):\n raise TypeError(\"incompatible ndarrays shape lengths %s and %s\"%(\n len(arr.shape), len(shape)\n ))\n\n for i, e in enumerate(shape):\n if i > 0 and e != arr.shape[i]:\n raise TypeError(\"ndarray shapes can only differ on zero axis\")\n\n new_shape = list(shape)\n new_shape[0] += arr.shape[0]\n new_shape = tuple(new_shape)\n self.header = (new_shape, self.header[1], self.header[2])\n\n self.fp.seek(0)\n\n new_header_map = header_tuple_dict(self.header)\n\n new_header_bytes = self.__create_header_bytes(new_header_map, True)\n header_length = len(self.header_bytes)\n\n if header_length != len(new_header_bytes):\n new_header_bytes = self.__create_header_bytes(new_header_map)\n\n if header_length != len(new_header_bytes):\n raise TypeError(\"header length mismatch, old: %d, new: %d\"%(\n header_length, len(new_header_bytes)\n ))\n\n self.header_bytes = new_header_bytes\n\n self.fp.write(new_header_bytes)\n\n self.fp.seek(0, 2)\n\n arr.tofile(self.fp)\n\n def __del__(self):\n if self.fp is not None:\n self.fp.close()" ]
[ [ "numpy.lib.format.dtype_to_descr", "numpy.lib.format.read_array_header_2_0", "numpy.lib.format.read_array_header_1_0", "numpy.lib.format.write_array_header_2_0", "numpy.lib.format.read_magic" ] ]
Erebyel/Gilbert
[ "b7206278cae8c4686de9b87f042fbda42b5fe324" ]
[ "gilbert.py" ]
[ "\n##---------------------- Carga de bibliotecas\nfrom pandas import DataFrame\nimport streamlit as st\nimport numpy as np\n\n##---------------------- Base de datos\nfrase = DataFrame({'artículo': ['El', 'El', 'El', 'La', 'El', 'La', 'El', 'El', 'La', 'El', 'La', 'El', 'La', 'La', 'El', 'La', 'La', 'El', 'La', 'El', 'El', 'El', 'La', 'El', 'El', 'El', 'La', 'La', 'El', 'El', 'El', 'El', 'El', 'El', 'El', 'La', 'La', 'La', 'La', 'El', 'El', 'El', 'El', 'El', 'La', 'La', 'El', 'La', 'El', 'El', 'El', 'La', 'El', 'El', 'El', 'El', 'La', 'La', 'El', 'El', 'El', 'El', 'El', 'La', 'El', 'El', 'La', 'El', 'El', 'El', 'La', 'La', 'El', 'La', 'La', 'El', 'La', 'La', 'El', 'El', 'El', 'El', 'El', 'La', 'La', 'El', 'El', 'El', 'El', 'El', 'El', 'El', 'La', 'El', 'La', 'La', 'La', 'El', 'La', 'El', 'El', 'El', 'La', 'La', 'El', 'La', 'El', 'El', 'La', 'El', 'El', 'La', 'El', 'La', 'La', 'El', 'El', 'El', 'La', 'La', 'El', 'La', 'La', 'La', 'El', 'El', 'El', 'El', 'La', 'El', 'La', 'El', 'El', 'La', 'El', 'El', 'La', 'El', 'El', 'El', 'El', 'El', 'La', 'El', 'El', 'El', 'El', 'El', 'El', 'El', 'El', 'El', 'La', 'El', 'El', 'El', 'El', 'El', 'El', 'El', 'La', 'La', 'El', 'La', 'El', 'El', 'El', 'El', 'La', 'El', 'El', 'La', 'La', 'El', 'El', 'La', 'El', 'La', 'El', 'La', 'El', 'La', 'El', 'La', 'El', 'El', 'El', 'La', 'El', 'El', 'El', 'La', 'El', 'El', 'La', 'La', 'La', 'El', 'La', 'El', 'El', 'La', 'La', 'El', 'El', 'El', 'La', 'La', 'El', 'El', 'El', 'El', 'El', 'La', 'La', 'La', 'El', 'El', 'El', 'La', 'El', 'La', 'La', 'La', 'El', 'La', 'El', 'La', 'La', 'El', 'El', 'La', 'La', 'El', 'El', 'La', 'El', 'La', 'El', 'La', 'El', 'La', 'El', 'La', 'La', 'El', 'La', 'El', 'El', 'El', 'La', 'La', 'El', 'El', 'La', 'El', 'La', 'La', 'La', 'La', 'La', 'El', 'La', 'El', 'El', 'La', 'La', 'El', 'El', 'El', 'El', 'El', 'El', 'El', 'El', 'La', 'El', 'La', 'El', 'El', 'La', 'El', 'La', 'La', 'La', 'El', 'La', 'La', 'La', 'El', 'El', 'El', 'El', 'El', 'El', 'El', 'El', 'La', 'La', 'El', 'La', 'La', 'El', 'El', 'La', 'El', 'El', 'El', 'La', 'La', 'El', 'El', 'El', 'La', 'La', 'El', 'El', 'El', 'El', 'El', 'El', 'La', 'El', 'El', 'La', 'La', 'La', 'El', 'El', 'El', 'El', 'La', 'La', 'El', 'El', 'El', 'El', 'El', 'La', 'El', 'La', 'La', 'La', 'El', 'El', 'La', 'El', 'La', 'El', 'El', 'La', 'La', 'La', 'La', 'El', 'El', 'El', 'El', 'El', 'El', 'La', 'El', 'El', 'El', 'La', 'El', 'El', 'El', 'La', 'La', 'La', 'La', 'El', 'La', 'La', 'El', 'La', 'La', 'El', 'El', 'La', 'La', 'La', 'La', 'El', 'El', 'El', 'El', 'La', 'La', 'La', 'La', 'El', 'El', 'El', 'El', 'El', 'El', 'La', 'El', 'El', 'La', 'La', 'El', 'La', 'La', 'La', 'El', 'La', 'La', 'El', 'El', 'La', 'El', 'El', 'El', 'La', 'La', 'El', 'La', 'El', 'El', 'La', 'El', 'El', 'La', 'La', 'El', 'La', 'La', 'El', 'La', 'La', 'El', 'La', 'La', 'La', 'La', 'La', 'La', 'La', 'La', 'El', 'El', 'El', 'La', 'La', 'El', 'El', 'La', 'El', 'La', 'La', 'El', 'El', 'El', 'El', 'La', 'El', 'La', 'El', 'El', 'El', 'El', 'El', 'El', 'La', 'El', 'El', 'La', 'El', 'El', 'El', 'El', 'La', 'El', 'El', 'El', 'La', 'El', 'El', 'La', 'El', 'La', 'El', 'El', 'El', 'El', 'La', 'El', 'La', 'El', 'El', 'El', 'El', 'La', 'El', 'El', 'El', 'El', 'El', 'La', 'La', 'La', 'La', 'La', 'El', 'El', 'La', 'La', 'La', 'El', 'El', 'El', 'La', 'La', 'La', 'La', 'El', 'El', 'La', 'El', 'La', 'El', 'La', 'La', 'La', 'El', 'La', 'El', 'El', 'La', 'El', 'La', 'El', 'La', 'La', 'El', 'La', 'La', 'El', 'El', 'La', 'El', 'La', 'El', 'La', 'El', 'El', 'El', 'El', 'La', 'El', 'El', 'La', 'El', 'El', 'El', 'La', 'La', 'El', 'La', 'La', 'El', 'El', 'El', 'La', 'La', 'El', 'El', 'La', 'El', 'El', 'El', 'El', 'La', 'El', 'La', 'La', 'El', 'La', 'El', 'El', 'La', 'La', 'El', 'La', 'La', 'El', 'El', 'El', 'La', 'La', 'La', 'El', 'El', 'La', 'El', 'El', 'La', 'El', 'La', 'La', 'El', 'La', 'La', 'El', 'El', 'La', 'La', 'La', 'El', 'El', 'La', 'El', 'El', 'La', 'El', 'El', 'La', 'El', 'La', 'La', 'El', 'La', 'El', 'La', 'El', 'El', 'El', 'La', 'La', 'El', 'El', 'El', 'La', 'El', 'El', 'La', 'La', 'La', 'El', 'La', 'El', 'El', 'El', 'El', 'El', 'La', 'El', 'La', 'El', 'El', 'El', 'El', 'La', 'El', 'La', 'El', 'El', 'El', 'La', 'El', 'La', 'La', 'El', 'La', 'La', 'El', 'La', 'El', 'La', 'La', 'El', 'El', 'El', 'El', 'El', 'El', 'El', 'La', 'La', 'La', 'El', 'La', 'El', 'La', 'La', 'El', 'El', 'El', 'La', 'La', 'La', 'La', 'El', 'El', 'El', 'El', 'El', 'El', 'El', 'El', 'El', 'La', 'El', 'El', 'La', 'El', 'La', 'La', 'La', 'El', 'El'], 'sujeto': ['acantilado', 'ácaro', 'acertijo', 'adivinanza', 'adorno', 'aeronave', 'afluente', 'aguacate', 'aguja', 'alba', 'alegría', 'alféizar', 'alondra', 'amada', 'amanecer', 'amante', 'amistad', 'amor', 'anciana', 'andén', 'ángel', 'anillo', 'ansiedad', 'aposento', 'árbol', 'arco', 'armadura', 'arpía', 'arquitecto', 'arrebol', 'arroyo', 'artefacto mágico', 'asteroide', 'astronauta', 'atún', 'aurora', 'ausencia', 'avena', 'aventura', 'avión', 'azafrán', 'azúcar', 'baile de disfraces', 'balcón', 'baldosa', 'ballena', 'balrog', 'balsa', 'banco', 'bandido', 'bar', 'barca', 'barco pirata', 'belfo', 'beso', 'besugo', 'biblioteca', 'bicicleta', 'bigote', 'bikini', 'billar', 'bisonte', 'bizcocho borracho', 'boca', 'bocadillo', 'bogavante', 'bohemia', 'bolo', 'bombero', 'bosque', 'bota', 'botella', 'botón', 'braga', 'brisa', 'bronceador', 'bruja', 'brújula', 'buitre', 'burdégano', 'caballero', 'caballito de mar', 'caballo', 'cabaña', 'cadena', 'café', 'caldero', 'camarote', 'camino', 'campo de batalla', 'campo de torneos', 'cancerbero', 'canoa', 'capitán', 'carta', 'casa solariega', 'cascada', 'castillo', 'catacumba', 'catarro', 'cementerio', 'centauro', 'cerradura', 'chimenea', 'chocolate', 'cicatriz', 'cíclope', 'cielo', 'ciénaga', 'cisne', 'ciudad', 'claridad', 'cobertizo', 'cocina', 'cocinera', 'cofre', 'colchón', 'colibrí', 'colina', 'colonia', 'cometa', 'comida', 'compasión', 'concha', 'concierto', 'constelación', 'copo de nieve', 'cordón', 'corona', 'corpúsculo', 'creatividad', 'crepúsculo', 'crucero', 'cuchara', 'cuchillo', 'cuervo', 'cueva', 'dado', 'dardo', 'dátil', 'delfín', 'demonio', 'depresión', 'desagüe', 'desenlace', 'desertor', 'desfiladero', 'desierto', 'destino', 'devaneo', 'día', 'dibujo', 'dinastía', 'diodo', 'dios', 'dique', 'dodo', 'dolor', 'dragón', 'dragón de komodo', 'dríada', 'droga', 'duende', 'duna', 'eclipse', 'edredón', 'ejército', 'elfo', 'elocuencia', 'enano', 'enemigo', 'epifanía', 'época', 'equidna', 'equilibrista', 'equitación', 'erizo de mar', 'escalera', 'escarabajo', 'escarcha', 'escasez', 'escoba', 'escorpión', 'escotilla', 'escritor', 'escudero', 'escudo', 'esfinge', 'esgrima', 'espacio', 'espacio exterior', 'espada', 'espaguetis', 'espejo', 'esperanza', 'esponja', 'esposa', 'establo', 'estación', 'estadio', 'estanque', 'estatua', 'estrella', 'estropajo', 'estuario', 'faisán', 'familia', 'farmacia', 'farol', 'felpudo', 'fénix', 'feudo', 'fiambre', 'fiebre', 'fiera', 'fiesta', 'fino ropaje', 'fiordo', 'flamenco', 'flauta', 'flirteo', 'flota', 'fluctuación', 'foca', 'foso', 'frambuesa', 'francotirador', 'fraternidad', 'fresa', 'fresco', 'frío', 'frontera', 'fruta', 'fruto seco', 'fuego', 'fuente', 'futuro', 'gabardina', 'galápago', 'galaxia', 'gallo', 'gasolina', 'gato', 'gaviota', 'geografía', 'gigante', 'ginebra', 'giro postal', 'globo', 'glotón', 'golondrina', 'gorgona', 'gorila', 'gorrión', 'granada', 'granizo', 'granja', 'grapadora', 'grasa', 'grosella', 'grulla', 'guardia', 'guardia', 'guateque', 'guepardo', 'guindilla', 'gula', 'gusano', 'haba', 'habitante', 'hacha', 'hada', 'hada madrina', 'halcón', 'hambre', 'hamburguesa', 'hechizo', 'hélice', 'helicóptero', 'heraldo', 'herboristería', 'heredero', 'herida', 'hermana', 'hermanastra', 'hermano', 'herramienta', 'hidra', 'hiena', 'hierro forjado', 'hígado', 'higiene', 'hipocampo', 'hipogrifo', 'hipopótamo', 'hobbit', 'hogar', 'hormiga', 'hormigonera', 'horno microondas', 'hortaliza', 'huelga', 'huérfano', 'hueso', 'humedad', 'huracán', 'hurón', 'idilio', 'iglesia', 'iguana', 'imán', 'impermeable', 'impresionismo', 'incandescencia', 'infraestructura', 'insecto', 'instituto', 'incendio', 'interespacio', 'internado', 'interruptor', 'intimidad', 'invernadero', 'invierno', 'inyección', 'iridiscencia', 'isla', 'jabón', 'jaguar', 'jamón', 'jardín', 'jarra', 'jaula', 'jazz', 'jengibre', 'jerbo', 'jilguero', 'jinete', 'joya', 'judo', 'jungla', 'justa', 'justicia', 'kiwi', 'ladrón', 'lagartija', 'lago', 'lanza', 'látigo', 'laurel', 'lava', 'lechuga', 'lechuza', 'lenteja', 'leñador', 'león', 'leopardo', 'leotardo', 'leprechaun', 'lesión', 'libélula', 'libro', 'licor', 'ligue', 'diferencia', 'limón', 'linaje', 'lince', 'litera', 'literatura', 'llave', 'lluvia', 'lobo', 'locomotora', 'lombriz de tierra', 'loro', 'lotería', 'lubina', 'lugar bajo el agua', 'lugar bajo tierra', 'luminiscencia', 'luna', 'luz', 'madrastra', 'magnetófono', 'mago', 'mamut', 'manantial', 'manifestación', 'manta', 'mantícora', 'manzana', 'mapa', 'mar', 'mar', 'maratón', 'marinero', 'marisco', 'marmota', 'mausoleo', 'mazapán', 'mazmorra', 'mazorca', 'meandro', 'medianoche', 'meiga', 'melancolía', 'mendigo', 'mermelada de tomate', 'mina', 'minotauro', 'mirlo', 'molécula', 'molinillo', 'monasterio', 'monstruo', 'montaña', 'montaña rusa', 'monte', 'mosca', 'muérgano', 'mujeriego', 'muñeca', 'murciégalo', 'muro', 'musa', 'música', 'nabo', 'naranja', 'nariz', 'narval', 'nata', 'natación', 'naufragio', 'nave', 'náyade', 'nécora', 'nectarina', 'nevera', 'nieve', 'ninfa', 'niñera', 'niño', 'níspero', 'noche', 'noria', 'nostalgia', 'novelista', 'noviazgo', 'nube', 'nudillo', 'nutria', 'nutrición', 'nylon', 'ñandú', 'ñu', 'oasis', 'obertura', 'obrero', 'oca', 'océano', 'odio', 'oficial', 'ogro', 'ojo', 'oleaje', 'olla de presión', 'olvido', 'ombligo', 'ondina', 'orate', 'orco', 'ordinario', 'orégano', 'oreja', 'orfanato', 'ornitorrinco', 'oro', 'orquesta', 'ósculo', 'oso hormiguero', 'ostra', 'otoño', 'oveja', 'pabellón', 'pájaro', 'palacio', 'pantano', 'pantera', 'parchís', 'pasión', 'pastel', 'patinaje', 'payaso', 'pegaso', 'peluca', 'perfume', 'perro', 'pescador', 'petirrojo', 'pez', 'pezuña', 'piedra', 'pintura', 'piña', 'pipa', 'pirata', 'pistacho', 'pistola', 'pitonisa', 'pizarra', 'planeta', 'plano', 'plástico', 'plata', 'playa', 'pluma', 'poción', 'político', 'polizón', 'posada', 'pozo', 'pradera', 'precipicio', 'prenda de amor', 'primavera', 'princesa', 'príncipe', 'promesa', 'pueblo', 'puente', 'puerta', 'puerto', 'pulga', 'quebrantahuesos', 'quimera', 'química', 'quiosco', 'radio', 'rana', 'rascacielos', 'rastrillo', 'rata', 'ratón', 'raya', 'realismo', 'receta', 'recogedor', 'rectángulo', 'recuerdo', 'refresco', 'regadera', 'regalo', 'regreso', 'reina', 'reino', 'relámpago', 'relieve', 'religión', 'reliquia', 'remo', 'remolacha', 'rémora', 'rencor', 'reno', 'reportaje', 'reproducción', 'resiliencia', 'retraso', 'retrato', 'reunión', 'rey', 'rinoceronte', 'río', 'rocío', 'rodilla', 'romanticismo', 'ropa', 'ruina', 'ruiseñor', 'sábana', 'sacaclavos', 'sacerdote', 'sacerdotisa', 'sal', 'salchichón', 'salida', 'salmuera', 'salón de baile', 'salón del trono', 'saltamontes', 'salud', 'sangre', 'sanguijuela', 'santuario', 'sapo', 'sartén', 'satélite', 'semáforo', 'sensualidad', 'sentimiento', 'sequía', 'serendipia', 'sereno', 'serpiente', 'serpiente marina', 'servilletero', 'sexo', 'sílfide', 'sinfonía', 'sirena', 'sistema solar', 'sol', 'soledad', 'sombrero', 'sonámbulo', 'suciedad', 'sueño', 'sujetador', 'taberna', 'tambor', 'tarántula', 'tarta de queso', 'taxi', 'tempestad', 'templo', 'tentación', 'tentempié', 'terciopelo', 'tesoro', 'tierra', 'tierra extranjera', 'tifón', 'timón', 'tiovivo', 'toalla', 'tobillo', 'tobogán', 'torre', 'tortilla', 'tortuga', 'trabajo duro', 'trampa', 'transatlántico', 'transeúnte', 'tranvía', 'trasgo', 'tren', 'trenza', 'trigo', 'tripulación', 'tritón', 'troll', 'trueno', 'tucán', 'tuerca', 'tulipán', 'tumba', 'ultramarinos', 'unicornio', 'uniforme', 'universidad', 'universo', 'uña', 'urraca', 'utensilio', 'uva', 'vaca', 'vagabundo', 'vagina', 'vagón', 'vainilla', 'vajilla', 'valle', 'vampiro', 'varano', 'vaso', 'velero', 'venado', 'vendaje', 'ventana', 'verdad', 'verdulería', 'vestuario', 'vía', 'viajero', 'víbora', 'vida', 'vidrio', 'viejo', 'viento', 'vinagrera', 'virtud', 'visita', 'vitalidad', 'vituperio', 'vodka', 'volcán', 'vuelo', 'whisky', 'wombat', 'wyvern', 'xilófono', 'yate', 'yegua', 'yogur', 'yunque', 'zanahoria', 'zapato', 'zarzamora', 'zarzuela', 'cebra', 'zorro', 'zueco'], 'adjetivo masculino': ['absurdo', 'ácido', 'admirable', 'adolescente', 'afectuoso', 'afortunado', 'alegre', 'altivo', 'amable', 'amargo', 'ambiguo', 'amistoso', 'andrajoso', 'angelical', 'anómalo', 'anónimo', 'ansioso', 'antiguo', 'apasionado', 'apático', 'argénteo', 'árido', 'arrejuntado', 'artesanal', 'áspero', 'astuto', 'atento', 'atómico', 'atractivo', 'atrevido', 'atroz', 'audaz', 'áurico', 'ausente', 'automático', 'bajo', 'bancario', 'barato', 'bárbaro', 'básico', 'basto', 'beato', 'belga', 'bélico', 'beligerante', 'bello', 'bíblico', 'bilingüe', 'biológico', 'blanco', 'blando', 'bonito', 'boreal', 'borracho', 'boscoso', 'breve', 'brillante', 'brusco', 'brutal', 'bueno', 'burgués', 'burlón', 'cálido', 'callejero', 'caprichoso', 'cariñoso', 'cascarrabias', 'casposo', 'cauto', 'célebre', 'celoso', 'cercano', 'cerúleo', 'ciego', 'cínico', 'clasista', 'cobarde', 'coherente', 'colosal', 'cómodo', 'compacto', 'compasivo', 'complejo', 'complicado', 'comprensivo', 'común', 'contradictorio', 'convencional', 'convincente', 'cordial', 'corpulento', 'cortante', 'cortesano', 'cósmico', 'creativo', 'criminal', 'crítico', 'crónico', 'cruel', 'cuántico', 'cuidadoso', 'culpable', 'curativo', 'curioso', 'curvo', 'débil', 'decidido', 'delgado', 'delicado', 'delicioso', 'delincuente', 'dependiente', 'deprimido', 'desagradable', 'desaliñado', 'desapasionado', 'desarmado', 'descomunal', 'desconfiado', 'descuidado', 'deseado', 'desfavorecido', 'deshonrado', 'desierto', 'despierto', 'dichoso', 'diferente', 'difícil', 'diminuto', 'dinámico', 'directo', 'discreto', 'disfrazado', 'disperso', 'distante', 'divertido', 'divino', 'dócil', 'doloroso', 'doméstico', 'dorado', 'dracónico', 'dramático', 'druídico', 'dulce', 'duro', 'ecológico', 'efímero', 'egoísta', 'electrónico', 'elegante', 'élfico', 'emocional', 'encantador', 'enérgico', 'enfadado', 'enfermo', 'engreído', 'enjuto', 'enterrado', 'entrometido', 'equilibrado', 'erótico', 'erróneo', 'esbelto', 'escandaloso', 'escéptico', 'espacial', 'espeso', 'espiritual', 'espontáneo', 'estéril', 'estimulante', 'estoico', 'estricto', 'eterno', 'ético', 'exagerado', 'excéntrico', 'excesivo', 'exclusivo', 'exigente', 'exitoso', 'exótico', 'explosivo', 'expresivo', 'exquisito', 'extraordinario', 'extrovertido', 'fácil', 'falto', 'familiar', 'famoso', 'fanático', 'fantástico', 'fascinante', 'fatal', 'fatuo', 'favorito', 'feliz', 'femenino', 'feo', 'fértil', 'fiable', 'ficticio', 'fiel', 'fijo', 'final', 'fino', 'firme', 'flaco', 'flexible', 'flojo', 'floral', 'fluvial', 'formal', 'frágil', 'franco', 'frecuente', 'fresco', 'frío', 'fuerte', 'fugaz', 'fúnebre', 'funesto', 'furioso', 'fútil', 'general', 'genérico', 'generoso', 'genético', 'genial', 'geográfico', 'geológico', 'geométrico', 'gigante', 'gitano', 'glacial', 'global', 'glorioso', 'gordo', 'gótico', 'gracioso', 'gráfico', 'grande', 'grandilocuente', 'grandioso', 'grato', 'gratuito', 'grave', 'griego', 'gris', 'grosero', 'grotesco', 'grueso', 'gruñón', 'guapo', 'hábil', 'habitual', 'hablador', 'hambriento', 'harto', 'henchido', 'herbáceo', 'heredado', 'herido', 'hermoso', 'heroico', 'heterogéneo', 'hidráulico', 'hipócrita', 'hipotético', 'histérico', 'histórico', 'holgazán', 'homogéneo', 'homosexual', 'hondo', 'horizontal', 'horrible', 'hostil', 'humanitario', 'humano', 'húmedo', 'humilde', 'huraño', 'imprudente', 'incandescente', 'incognoscible', 'inconmensurable', 'inconsciente', 'joven', 'judío', 'juguetón', 'juramentado', 'jurídico', 'justo', 'juvenil', 'kinestésico', 'laboral', 'lamentable', 'largo', 'latente', 'lateral', 'legal', 'legítimo', 'lejano', 'lento', 'lésbico', 'leve', 'levítico', 'liberal', 'libre', 'lícito', 'ligero', 'limpio', 'lindo', 'lingüístico', 'líquido', 'listo', 'litúrgico', 'llamativo', 'lleno', 'llorón', 'lluvioso', 'local', 'loco', 'lógico', 'lúcido', 'lujoso', 'luminiscente', 'luminoso', 'lunático', 'maduro', 'mágico', 'magnífico', 'maldito', 'maleducado', 'malhumorado', 'malicioso', 'maltratado', 'maravilloso', 'marciano', 'marginal', 'marino', 'masculino', 'material', 'maternal', 'medieval', 'melancólico', 'mensurable', 'menudo', 'meticuloso', 'mezquino', 'miedoso', 'minúsculo', 'miserable', 'misterioso', 'mítico', 'moderado', 'moderno', 'modesto', 'molesto', 'monumental', 'mordaz', 'mortal', 'móvil', 'mudo', 'musical', 'mutuo', 'naciente', 'nacional', 'nacionalista', 'narcisista', 'narrativo', 'natural', 'nazi', 'negativo', 'negro', 'nervioso', 'neutro', 'noble', 'nocivo', 'nocturno', 'nónuplo', 'normal', 'normativo', 'notable', 'notarial', 'notorio', 'novel', 'novelero', 'nuclear', 'nuevo', 'nulo', 'numérico', 'numeroso', 'nutritivo', 'objetivo', 'obligatorio', 'observable', 'obvio', 'occidental', 'oceánico', 'octavo', 'óctuplo', 'ocultación', 'oculto', 'odioso', 'ofensivo', 'oficial', 'ontológico', 'opaco', 'operativo', 'oportuno', 'óptico', 'oral', 'orbitado', 'ordinario', 'orgánico', 'organizativo', 'orgulloso', 'oriental', 'original', 'originario', 'ortográfico', 'oscuro', 'pálido', 'parturiento', 'pasional', 'pasivo', 'pasteloso', 'patético', 'pedregoso', 'peligroso', 'penetrante', 'penoso', 'pequeño', 'perenne', 'perezoso', 'perfecto', 'perpetuo', 'perseverante', 'perverso', 'pícaro', 'pintoresco', 'placentero', 'pobre', 'poderoso', 'poético', 'polémico', 'positivo', 'precoz', 'preponderante', 'prestigioso', 'pretencioso', 'previsible', 'prodigioso', 'profético', 'profundo', 'progresista', 'provocador', 'prudente', 'puntual', 'quieto', 'químico', 'quinto', 'quirúrgico', 'quisquilloso', 'racional', 'racista', 'radiante', 'radical', 'rápido', 'raro', 'razonable', 'reacio', 'realista', 'rebelde', 'receloso', 'reciente', 'recto', 'referente', 'relativo', 'reluciente', 'renovador', 'repentino', 'reservado', 'resistente', 'respetable', 'responsable', 'revolucionario', 'rico', 'ridículo', 'rígido', 'riguroso', 'rimbombante', 'robado', 'rocoso', 'románico', 'romano', 'romántico', 'roto', 'rotundo', 'rubio', 'ruidoso', 'rutinario', 'sabio', 'sagaz', 'sagrado', 'salado', 'salvaje', 'sangriento', 'sano', 'santificado', 'secreto', 'seguro', 'selenita', 'sencillo', 'sensato', 'sensible', 'sensorial', 'sentimental', 'sereno', 'serio', 'servicial', 'severo', 'sexual', 'silencioso', 'similar', 'simpático', 'simulado', 'sincero', 'siniestro', 'sintético', 'sobrenatural', 'sofista', 'sofisticado', 'soleado', 'solemne', 'solidario', 'solitario', 'sombrío', 'sonriente', 'sospechoso', 'suave', 'sucio', 'suculento', 'sudoroso', 'sueño', 'susceptible', 'sutil', 'tacaño', 'taciturno', 'tajante', 'talentoso', 'tardío', 'temeroso', 'temible', 'temporal', 'tenaz', 'tenso', 'teórico', 'terapéutico', 'térmico', 'terrestre', 'terrible', 'territorial', 'terrorista', 'tibio', 'tierno', 'tieso', 'tímido', 'típico', 'tonto', 'torpe', 'tóxico', 'trabajador', 'tradicional', 'trágico', 'traicionado', 'tranquilo', 'transitorio', 'transparente', 'travieso', 'tripulado', 'triste', 'trivial', 'turbio', 'ulterior', 'último', 'unánime', 'único', 'uniforme', 'unitario', 'universal', 'universitario', 'urbano', 'urgente', 'usual', 'útil', 'utilitario', 'utilizable', 'vacío', 'vagamundo', 'vago', 'valeroso', 'válido', 'valiente', 'valioso', 'vano', 'variable', 'variado', 'vasto', 'vegetal', 'vegetativo', 'veloz', 'envenenado', 'verbal', 'verde', 'verosímil', 'vertical', 'vespertino', 'veterano', 'viable', 'victorioso', 'viejo', 'vigente', 'violento', 'virgen', 'visible', 'vital', 'vitoreado', 'vivaz', 'viviente', 'voluntario', 'vulgar', 'yodado', 'zafio', 'zafíreo', 'zarrapastroso', 'zopenco', 'enquistado', 'conquistado', 'atormentado', 'radiactivo', 'machista', 'fulminante', 'plurilingüe', 'equivalente', 'equidistante', 'paralelo', 'ignorante', 'destrozado', 'acuartelado', 'evolucionado', 'añejo', 'dañado', 'anglicano', 'norteño', 'sureño', 'sustentado', 'español', 'calzado', 'embustero', 'amarillo', 'azul', 'rojo', 'rosa', 'arrinconado', 'oloroso', 'omnipresente', 'omnisciente', 'todopoderoso', 'acomplejado', 'castellanizado', 'debilitado', 'diferenciado', 'sepulcral', 'terraplanista', 'homeostático', 'onomatopéyico', 'gritón', 'sustancioso', 'lácteo', 'cósmico', 'bíblico', 'apestoso', 'despojado', 'rubicundo', 'encuestado', 'tórrido', 'mentiroso', 'estúpido', 'escrupuloso', 'contundente', 'cobrizo', 'escandaloso', 'lozano', 'pechugón', 'níveo', 'blanco', 'esculpido', 'negro', 'racista', 'robótico', 'inteligente', 'artificial', 'artificioso', 'adecuado', 'cómico', 'tramado', 'tramposo', 'lúcido'], 'adjetivo femenino': ['absurda', 'ácida', 'admirable', 'adolescente', 'afectuosa', 'afortunada', 'alegre', 'altiva', 'amable', 'amarga', 'ambigua', 'amistosa', 'andrajosa', 'angelical', 'anómala', 'anónima', 'ansiosa', 'antigua', 'apasionada', 'apática', 'argéntea', 'árida', 'arrejuntada', 'artesanal', 'áspera', 'astuta', 'atenta', 'atómica', 'atractiva', 'atrevida', 'atroz', 'audaz', 'áurica', 'ausente', 'automática', 'baja', 'bancaria', 'barata', 'bárbara', 'básica', 'basta', 'beata', 'belga', 'bélica', 'beligerante', 'bella', 'bíblica', 'bilingüe', 'biológica', 'blanca', 'blanda', 'bonita', 'boreal', 'borracha', 'boscosa', 'breve', 'brillante', 'brusca', 'brutal', 'buena', 'burguesa', 'burlona', 'cálida', 'callejera', 'caprichosa', 'cariñosa', 'cascarrabias', 'casposa', 'cauta', 'célebre', 'celosa', 'cercana', 'cerúlea', 'ciega', 'cínica', 'clasista', 'cobarde', 'coherente', 'colosal', 'cómoda', 'compacta', 'compasiva', 'compleja', 'complicada', 'comprensiva', 'común', 'contradictoria', 'convencional', 'convincente', 'cordial', 'corpulenta', 'cortante', 'cortesana', 'cósmica', 'creativa', 'criminal', 'crítica', 'crónica', 'cruel', 'cuántica', 'cuidadosa', 'culpable', 'curativa', 'curiosa', 'curva', 'débil', 'decidida', 'delgada', 'delicada', 'deliciosa', 'delincuente', 'dependiente', 'deprimida', 'desagradable', 'desaliñada', 'desapasionada', 'desarmada', 'descomunal', 'desconfiada', 'descuidada', 'deseada', 'desfavorecida', 'deshonrada', 'desierta', 'despierta', 'dichosa', 'diferente', 'difícil', 'diminuta', 'dinámica', 'directa', 'discreta', 'disfrazada', 'dispersa', 'distante', 'divertida', 'divina', 'dócil', 'dolorosa', 'doméstica', 'dorada', 'dracónica', 'dramática', 'druídica', 'dulce', 'dura', 'ecológica', 'efímera', 'egoísta', 'electrónica', 'elegante', 'élfica', 'emocional', 'encantadora', 'enérgica', 'enfadada', 'enferma', 'engreída', 'enjuta', 'enterrada', 'entrometida', 'equilibrada', 'erótica', 'errónea', 'esbelta', 'escandalosa', 'escéptica', 'espacial', 'espesa', 'espiritual', 'espontánea', 'estéril', 'estimulante', 'estoica', 'estricta', 'eterna', 'ética', 'exagerada', 'excéntrica', 'excesiva', 'exclusiva', 'exigente', 'exitosa', 'exótica', 'explosiva', 'expresiva', 'exquisita', 'extraordinaria', 'extrovertida', 'fácil', 'falta', 'familiar', 'famosa', 'fanática', 'fantástica', 'fascinante', 'fatal', 'fatua', 'favorita', 'feliz', 'femenina', 'fea', 'fértil', 'fiable', 'ficticia', 'fiel', 'fija', 'final', 'fina', 'firme', 'flaca', 'flexible', 'floja', 'floral', 'fluvial', 'formal', 'frágil', 'franca', 'frecuente', 'fresca', 'fría', 'fuerte', 'fugaz', 'fúnebre', 'funesta', 'furiosa', 'fútil', 'general', 'genérica', 'generosa', 'genética', 'genial', 'geográfica', 'geológica', 'geométrica', 'gigante', 'gitana', 'glacial', 'global', 'gloriosa', 'gorda', 'gótica', 'graciosa', 'gráfica', 'grande', 'grandilocuente', 'grandiosa', 'grata', 'gratuita', 'grave', 'griega', 'gris', 'grosera', 'grotesca', 'gruesa', 'gruñona', 'guapa', 'hábil', 'habitual', 'habladora', 'hambrienta', 'harta', 'henchida', 'herbácea', 'heredada', 'herida', 'hermosa', 'heroica', 'heterogénea', 'hidráulica', 'hipócrita', 'hipotética', 'histérica', 'histórica', 'holgazana', 'homogénea', 'homosexual', 'honda', 'horizontal', 'horrible', 'hostil', 'humanitaria', 'humana', 'húmeda', 'humilde', 'huraña', 'imprudente', 'incandescente', 'incognoscible', 'inconmensurable', 'inconsciente', 'joven', 'judía', 'juguetona', 'juramentada', 'jurídica', 'justa', 'juvenil', 'kinestésica', 'laboral', 'lamentable', 'larga', 'latente', 'lateral', 'legal', 'legítima', 'lejana', 'lenta', 'lésbica', 'leve', 'levítica', 'liberal', 'libre', 'lícita', 'ligera', 'limpia', 'linda', 'lingüística', 'líquida', 'lista', 'litúrgica', 'llamativa', 'llena', 'llorona', 'lluviosa', 'local', 'loca', 'lógica', 'lúcida', 'lujosa', 'luminiscente', 'luminosa', 'lunática', 'madura', 'mágica', 'magnífica', 'maldita', 'maleducada', 'malhumorada', 'maliciosa', 'maltratada', 'maravillosa', 'marciana', 'marginal', 'marina', 'masculina', 'material', 'maternal', 'medieval', 'melancólica', 'mensurable', 'menuda', 'meticulosa', 'mezquina', 'miedosa', 'minúscula', 'miserable', 'misteriosa', 'mítica', 'moderada', 'moderna', 'modesta', 'molesta', 'monumental', 'mordaz', 'mortal', 'móvil', 'muda', 'musical', 'mutua', 'naciente', 'nacional', 'nacionalista', 'narcisista', 'narrativa', 'natural', 'nazi', 'negativa', 'negra', 'nerviosa', 'neutra', 'noble', 'nociva', 'nocturna', 'nónupla', 'normal', 'normativa', 'notable', 'notarial', 'notoria', 'novel', 'novelera', 'nuclear', 'nueva', 'nula', 'numérica', 'numerosa', 'nutritiva', 'objetiva', 'obligatoria', 'observable', 'obvia', 'occidental', 'oceánica', 'octava', 'óctupla', 'ocultación', 'oculta', 'odiosa', 'ofensiva', 'oficial', 'ontológica', 'opaca', 'operativa', 'oportuna', 'óptica', 'oral', 'orbitada', 'ordinaria', 'orgánica', 'organizativa', 'orgullosa', 'oriental', 'original', 'originaria', 'ortográfica', 'oscura', 'pálida', 'parturienta', 'pasional', 'pasiva', 'pastelosa', 'patética', 'pedregosa', 'peligrosa', 'penetrante', 'penosa', 'pequeña', 'perenne', 'perezosa', 'perfecta', 'perpetua', 'perseverante', 'perversa', 'pícara', 'pintoresca', 'placentera', 'pobre', 'poderosa', 'poética', 'polémica', 'positiva', 'precoz', 'preponderante', 'prestigiosa', 'pretenciosa', 'previsible', 'prodigiosa', 'profética', 'profunda', 'progresista', 'provocadora', 'prudente', 'puntual', 'quieta', 'química', 'quinta', 'quirúrgica', 'quisquillosa', 'racional', 'racista', 'radiante', 'radical', 'rápida', 'rara', 'razonable', 'reacia', 'realista', 'rebelde', 'recelosa', 'reciente', 'recta', 'referente', 'relativa', 'reluciente', 'renovadora', 'repentina', 'reservada', 'resistente', 'respetable', 'responsable', 'revolucionaria', 'rica', 'ridícula', 'rígida', 'rigurosa', 'rimbombante', 'robada', 'rocosa', 'románica', 'romana', 'romántica', 'rota', 'rotunda', 'rubia', 'ruidosa', 'rutinaria', 'sabia', 'sagaz', 'sagrada', 'salada', 'salvaje', 'sangrienta', 'sana', 'santificada', 'secreta', 'segura', 'selenita', 'sencilla', 'sensata', 'sensible', 'sensorial', 'sentimental', 'serena', 'seria', 'servicial', 'severa', 'sexual', 'silenciosa', 'similar', 'simpática', 'simulada', 'sincera', 'siniestra', 'sintética', 'sobrenatural', 'sofista', 'sofisticada', 'soleada', 'solemne', 'solidaria', 'solitaria', 'sombría', 'sonriente', 'sospechosa', 'suave', 'sucia', 'suculenta', 'sudorosa', 'sueña', 'susceptible', 'sutil', 'tacaña', 'taciturna', 'tajante', 'talentosa', 'tardía', 'temerosa', 'temible', 'temporal', 'tenaz', 'tensa', 'teórica', 'terapéutica', 'térmica', 'terrestre', 'terrible', 'territorial', 'terrorista', 'tibia', 'tierna', 'tiesa', 'tímida', 'típica', 'tonta', 'torpe', 'tóxica', 'trabajador', 'tradicional', 'trágica', 'traicionada', 'tranquila', 'transitoria', 'transparente', 'traviesa', 'tripulada', 'triste', 'trivial', 'turbia', 'ulterior', 'última', 'unánime', 'única', 'uniforme', 'unitaria', 'universal', 'universitaria', 'urbana', 'urgente', 'usual', 'útil', 'utilitaria', 'utilizable', 'vacía', 'vagamunda', 'vaga', 'valerosa', 'válida', 'valiente', 'valiosa', 'vana', 'variable', 'variada', 'vasta', 'vegetal', 'vegetativa', 'veloz', 'envenenada', 'verbal', 'verde', 'verosímil', 'vertical', 'vespertina', 'veterana', 'viable', 'victoriosa', 'vieja', 'vigente', 'violenta', 'virgen', 'visible', 'vital', 'vitoreada', 'vivaz', 'viviente', 'voluntaria', 'vulgar', 'yodada', 'zafia', 'zafírea', 'zarrapastrosa', 'zopenca', 'enquistada', 'conquistada', 'atormentada', 'radiactiva', 'machista', 'fulminante', 'plurilingüe', 'equivalente', 'equidistante', 'paralela', 'ignorante', 'destrozada', 'acuartelada', 'evolucionada', 'añeja', 'dañada', 'anglicana', 'norteña', 'sureña', 'sustentada', 'española', 'calzada', 'embustera', 'amarilla', 'azul', 'roja', 'rosa', 'arrinconada', 'olorosa', 'omnipresente', 'omnisciente', 'todopoderosa', 'acomplejada', 'castellanizada', 'debilitado', 'diferenciada', 'sepulcral', 'terraplanista', 'homeostática', 'onomatopéyica', 'gritona', 'sustanciosa', 'láctea', 'cósmica', 'bíblica', 'apestosa', 'despojada', 'rubicunda', 'encuestada', 'tórrida', 'mentirosa', 'estúpida', 'escrupulosa', 'contundente', 'cobriza', 'escandalosa', 'lozana', 'pechugona', 'nívea', 'blanca', 'esculpida', 'negra', 'racista', 'robótica', 'inteligente', 'artificial', 'artificiosa', 'adecuada', 'cómica', 'tramada', 'tramposa', 'lúcida'], 'acciones': ['abofetea a alguien', 'aborrece algo', 'aborta', 'abrocha algo', 'acaba inquieto', 'acaricia a algo/alguien', 'acosa a alguien', 'adelgaza', 'adivina', 'adopta', 'afeita', 'agria', 'agujerea una superficie', 'ahoga a alguien', 'ahorra', 'aísla', 'ajusta', 'alinea', 'alumbra', 'ama', 'amarra', 'amenaza a alguien', 'amputa un miembro', 'amuebla un hogar', 'aniquila un enemigo', 'anticipa un evento', 'anuncia un evento', 'apesta', 'araña', 'arde', 'asedia', 'asesina a un amigo', 'asfixia a un enemigo', 'aterriza forzosamente', 'atormenta', 'atraviesa', 'aturde a alguien', 'auxilia a alguien', 'averigua una mentira', 'ayuna', 'babea', 'baila', 'balancea un objeto', 'balbucea con vergüenza', 'barajea', 'barre', 'batalla en una guerra', 'batea', 'bautiza algo', 'bebe', 'besa a alguien', 'blande un arma', 'blanquea algo', 'blanquea dinero', 'bloquea algo', 'boicotea una estrategia', 'bombardea un territorio', 'borda un tapiz', 'borra algo', 'brilla', 'brinca', 'brinda', 'bromea', 'brota', 'bucea', 'bulle', 'burla', 'busca', 'cabalga', 'cae', 'cambia', 'camufla', 'canta', 'captura', 'castra', 'celebra', 'cepilla', 'cercena', 'chilla', 'cobra vida', 'codicia', 'cojea', 'combate', 'come', 'compite', 'complica algo', 'concibe algo', 'condena a alguien', 'confronta', 'conquista', 'consagra', 'conserva', 'consigna', 'conspira', 'construye', 'contagia', 'copula con el enemigo', 'coquetea', 'corona', 'corre', 'corta', 'corteja a alguien', 'cosecha', 'cultiva', 'cumple una promesa', 'curte', 'custodia', 'danza', 'daña', 'deambula', 'debilita', 'decapita', 'declara', 'deforma', 'defrauda', 'deja pasar el tiempo', 'delata', 'demora', 'denuncia', 'derruye', 'desabrocha', 'desafía', 'desaparece', 'desayuna', 'descansa', 'descubre algo', 'desea', 'desembarca', 'desencanta a alguien', 'desentona', 'deshonra', 'desilusiona', 'desnuda a alguien', 'desobedece', 'desviste', 'devasta', 'dibuja', 'discute', 'disfruta', 'dispara', 'distorsiona', 'divorcia', 'duda', 'duerme', 'eclipsa', 'edifica', 'elige un blasón', 'elimina', 'emborracha', 'emigra', 'empalma', 'empeora', 'enamora', 'encadena', 'encanta', 'enciende', 'encuentra', 'endulza', 'enferma', 'engaña', 'engrasa', 'ensambla', 'entierra', 'entrevista', 'envejece', 'envenena', 'erradica', 'eructa', 'es derrotado', 'es tentado', 'es timado', 'es vapuleado', 'escoge', 'escupe', 'esmalta', 'esposa', 'está penando', 'estornuda', 'estrangula', 'estropea', 'excita', 'experimenta', 'extermina', 'extorsiona', 'extraña', 'fabrica', 'facilita', 'falla', 'falsea', 'fantasea', 'favorece a alguien', 'fermenta', 'festeja', 'fía', 'filma', 'filtra', 'finaliza', 'financia', 'fisgonea', 'flagela', 'flaquea', 'flirtea', 'florece', 'flota', 'fluctúa', 'forcejea', 'forja', 'forma', 'fracasa', 'fracciona', 'fractura', 'fragmenta', 'frecuenta', 'fríe', 'friega', 'fuerza', 'funciona', 'galantea', 'galopa', 'gana', 'garabatea', 'garantiza', 'gasta', 'genera', 'germina', 'gesticula', 'gime', 'gimotea', 'gira', 'glasea', 'glorifica', 'glosa', 'gobierna', 'golpea', 'gorjea', 'gorrea', 'gorronear', 'gotea', 'goza', 'graba', 'grada', 'gradúa', 'granula', 'grapa', 'gravita', 'grita', 'gruñe', 'guarda', 'guía', 'habilita', 'habita', 'habla', 'hace', 'hace amigos', 'hace enemigos', 'hace vibrar algo', 'hacina', 'halla una herramienta', 'halla una pista', 'hereda', 'hermana', 'hiberna', 'hidrata', 'hiela', 'hiere', 'hierra', 'hierve', 'hila', 'hilvana', 'hipa', 'hojear', 'honra', 'hornea', 'hospeda', 'huele', 'huelga', 'humea', 'humedece', 'humilla', 'hunde', 'huye', 'idolatra', 'ignora', 'ilumina', 'imagina', 'imitar', 'impide', 'impone', 'impregna', 'improvisa', 'impulsa una iniciativa', 'incapacita a alguien', 'incinera', 'incomoda', 'infiere algo', 'influye', 'infringe las normas', 'injuria a alguien', 'inocula un veneno', 'inspira', 'instaura algo novedoso', 'instruye al enemigo', 'insulta a alguien', 'intercambia información', 'interpreta', 'interroga a alguien', 'intimida a alguien', 'invade algo', 'investiga', 'invita', 'invoca algo/a alguien', 'jadea', 'jala', 'juega', 'junta algunas piezas', 'jura', 'juzga acertadamente', 'juzga erróneamente', 'lacera', 'lacra', 'ladra', 'lame una superficie', 'lanza algo', 'lastra', 'late', 'le afecta un cambio mágico', 'le gusta algo', 'legitima', 'levanta', 'libera algo', 'lidera un evento', 'lidia con algo inesperado', 'limita', 'limpia', 'lincha', 'lisia a alguien', 'lisonjea inapropiadamente', 'llama a alguien', 'llamea', 'llega', 'llena algo', 'lleva algo a algún sitio', 'llora', 'llueve', 'logra', 'luce algo', 'lucha', 'lustra algo', 'madura', 'malgasta', 'maltrata', 'manda', 'manipula', 'masculla', 'medita', 'medra', 'mendiga', 'merodea', 'mezcla', 'mide', 'miente', 'mima', 'mina', 'mira', 'moderniza', 'modifica', 'moja', 'muele', 'muerde algo/a alguien', 'muere', 'nace', 'nada', 'narra', 'naufraga', 'navega', 'necesita algo/a alguien', 'negocia', 'niega algo', 'nieva', 'nivela', 'nombra', 'nomina', 'nota', 'notifica', 'nubla', 'numera', 'nutre', 'obedece', 'obsequia', 'obtiene', 'obvia', 'ocasiona', 'oculta', 'ocupa', 'odia', 'ofende', 'oficia', 'ofrece', 'olvida', 'omite', 'ondea algo en alto', 'opera lejos', 'opina', 'oprime a alguien', 'opta por una opción', 'ordena', 'organiza', 'orienta', 'origina un conflicto', 'orilla una embarcación', 'ornamenta algo', 'orquesta', 'oscila', 'otorga', 'oxigena', 'oye', 'parodia', 'participa en una justa', 'pasea', 'patea', 'patrulla', 'pega algo/ a alguien', 'peina', 'perdona', 'peregrina', 'perjudica', 'permanece', 'persevera', 'persigue', 'pertenece', 'pierde algo/ a alguien', 'pilota', 'piratea', 'pisotea', 'plancha', 'planifica', 'predestina', 'predice', 'premia', 'priva', 'procrea', 'profana', 'progresa', 'prohíbe', 'promete', 'promueve', 'propulsa', 'protesta', 'provoca', 'puebla', 'quebranta', 'queda', 'queda hospitalizado', 'quiebra', 'quiere a alguien/algo', 'quita a alguien', 'raciona algo', 'rapta a alguien', 'rasura algo', 'razona', 'recauda', 'rechaza', 'recluta a alguien', 'recoge algo', 'recompensa a alguien', 'reconquista a alguien', 'reconstruye algo', 'recuerda algo', 'recupera algo', 'reduce algo', 'regresa', 'renuncia', 'replica algo', 'reprime a alguien', 'repudia a alguien', 'requisa algo', 'rescata', 'rescata a alguien', 'responde', 'resucita', 'resuelve algo', 'retiene ilegalmente a alguien', 'rige un pueblo', 'rima', 'roba', 'rompe un juramento', 'ruega', 'sabotea algo', 'sacrifica algo', 'salpica', 'salva a alguien', 'saquea algo', 'se aburre', 'se ahoga', 'se baña', 'se confunde de identidad', 'se equivoca', 'se fascina con algo', 'se habitúa a algo extraño', 'se habitúa a una nueva vida', 'se hace valer', 'se harta', 'se hiere', 'se infiltra', 'se irrita', 'se jubila', 'se junta con alguien', 'se justifica', 'se lamenta', 'se lastima', 'se le rompe el corazón', 'se libra', 'se magulla', 'se mancha', 'se maravilla', 'se marcha', 'se marchita', 'se marea', 'se mece', 'se molesta', 'se mosquea', 'se motiva', 'se muda', 'se obsesiona', 'se olvida', 'se opone a algo', 'se pierde', 'se posa', 'se queja', 'se quema', 'se recluye', 'se reconcilia', 'se retira', 'se reúne', 'se ríe a carcajadas', 'se rinde', 'se rompe', 'se separa', 'se tambalea', 'se traga algo', 'se tranquiliza', 'se trastorna', 'se turna con alguien', 'se voltea', 'secuestra a alguien', 'seduce a alguien', 'selecciona algo', 'sella un pacto', 'separa algo', 'sepulta algo', 'simplifica algo', 'sitia un lugar', 'soborna a alguien', 'sobrevive', 'socorre a alguien', 'soluciona', 'somete a alguien', 'sonríe', 'soporta algo', 'sorprende a alguien', 'sospecha de algo', 'subestima a otro', 'subestima al enemigo', 'suelda', 'suelta', 'sueña', 'sufre un flechazo inoportuno', 'sugiere una idea', 'sujeta algo', 'supervisa', 'suplanta', 'sustituye', 'sustrae', 'talla', 'tapia algo', 'tararea', 'tartamudea', 'templa un objeto', 'tiembla', 'tiende algo', 'tiñe algo', 'tira algo', 'tira de alguien', 'tolera', 'tontea con alguien', 'tornea un objeto', 'tortura a alguien', 'traduce', 'trafica', 'traiciona', 'trama', 'traspasa algo', 'traslada algo', 'traza', 'trepa', 'trilla algo', 'trincha algo', 'tripula una nave', 'tritura algo', 'tropieza', 'ubica un lugar', 'ubica un objeto', 'ultima algún detalle', 'ultraja', 'ulula', 'une', 'unifica', 'unta algo', 'usa algo', 'usurpa', 'utiliza a alguien', 'va a prisión', 'vadea un lugar', 'vaga por un lugar', 'valida alguna aptitud', 'valora algo', 'vaticina un evento', 've algo insólito', 'veda algo', 'vegeta', 'veja a alguien', 'vence a alguien', 'vende algo/a alguien', 'venera algo', 'venga a alguien querido', 'venga a un desconocido', 'ventila', 'verifica', 'viaja', 'vigila a alguien', 'vilipendia', 'viola', 'visita', 'vitorea', 'vive', 'vocea', 'vota a alguien equivocadamente', 'vuela', 'vuelca', 'vuelve al origen', 'yace', 'zanganea', 'zanja un asunto importante', 'zarandea', 'zigzaguea por un lugar', 'zumba', 'se constipa', 'se apuesta aglo irremplazable', 'confiesa una mezquindad', 'prospera a costa de otro', 'confía en la persona equivocada', 'se come algo tóxico', 'engorda demasiado', 'se camufla entre los habitantes', 'corre hacia un sueño', 'se mete en una caja', 'se despierta en otra época', 'viaja al centro de la tierra', 'se duerme en clase', 'cae sobre una tarta', 'soba un sujetador', 'espolborea veneno sobre alguien', 'canta una canción de cuna', 'apuesta con el enemigo', 'se enamora de su enemigo', 'busca un final feliz', 'comienza a hacerse preguntas', 'se hace derogar', 'se intoxica', 'irradia algo', 'se vuelve radiactivo', 'consigue un material extraño', 'es un embustero', 'mordisquea la comida ajena', 'contextualiza algo', 'aporta un significado al mundo', 'encuentra el significado del universo', 'se encuentra con un ente creador', 'agita unas maracas', 'consigue un don', 'aplana el universo', 'conquista el espacio', 'se enamora de un objeto', 'se desposa con un objeto', 'asesina accidentalmente a alguien', 'secunda una censura', 'se atraganta', 'descuida su aspecto', 'hiere a un amigo', 'hiere a un enemigo', 'cosifica a alguien', 'se siente atraido sexualmente', 'es sexualizado', 'pronuncia un discuros', 'extravía el objeto que lo iba a salvar', 'muere', 'muere de forma estúpida', 'fallece premeditadamente', 'se suicida para evitar a su enemigo', 'estudia', 'convence a un aristócrata', 'se depila', 'depila a alguien', 'escribe un diario', 'roba un objeto', 'se esconde con cobardía', 'se detiene en el camino', 'detiene a alguien', 'es detenido inoportunamente', 'casca nueces', 'rompe un objeto sagrado', 'es excomulgado', 'es cómplice de un asesinato', 'ayuda a su enemigo']})\nretos = {'Retos': ['Yo no tengo muy claro que Ana tenga una ánfora, pero eso da igual, porque lo que sí sé es que tienes que hacer una anáfora', 'Alíviate o no te alivies, altérate o no te alteres, pero haz que tu texto sea aliterado', 'Qué paradójico sería que tu texto no tuviese una paradoja', 'Era como… la descripción que has hecho. Ex-ac-ta-men-te', 'Este reto es un alivio, te permite la elipsis de 1 palabra que te haya salido como obligatoria para tu texto. Elige sabiamente', 'Este reto es un alivio, te permite la elipsis de 2 palabras que te hayan salido como obligatorias para tu texto. Elige sabiamente', 'Este reto es un alivio, te permite la elipsis de 3 palabras que te hayan salido como obligatorias para tu texto. Elige sabiamente', 'Este reto es un alivio, te permite la elipsis de 4 palabras que te hayan salido como obligatorias para tu texto. Elige sabiamente', '¿Quién conoce el futuro? Bueno, pues tendrás que imaginártelo', 'Me da igual que tengas que incluir una lavadora, tu texto debe enmarcarse en la época clásica', 'Me importa poco que tu protagonista sea una impresora 3D, tus protagonistas están en la Edad Media', 'En una época donde existía la magia… tu texto estaría en su contexto correcto', 'Si no te ríes al leerlo, no molas porque no es comedia', 'Seguro que, gracias a tu emotiva oda, el protagonista de tu historia será recordado eternamente', 'Ni Ulises está a la altura de tu epopeya', 'Don Quijote estaría orgulloso de tu aporte al noble arte de las historias de caballería', '¿A quién no le gusta viajar? Nos vamos a visitar otro planeta en este viaje intergaláctico', '¿Has soñado con viajes en el tiempo? Quién no…', '¿Estás preparado? Te vas a embarcar en un camino del héroe', 'Los escritores a veces parece que no saben hacerlo, yo que sé… mira, tú usa frases simples porque no puedes usar yuxtaposiciones ni subordinadas ni coordinadas.', '¡Te has librado! Eres libre de restricciones', 'Perdona, pero no me equivoqué al decir que tenías que escribir una antanaclasis', 'Este aire suena como una sinestesia, ¿no os parece?', 'No es dislexia, es un sinécdoque, ¡que no te enteras!', '¡Te has librado! Eres libre de restricciones', '¡No corras tanto! No puedes escribir más de 50 palabras', '¡No corras tanto! No puedes escribir más de 100 palabras', '¡No corras tanto! No puedes escribir más de 150 palabras', 'Tic-Tac Solo tienes 10 minutos para escribir ¡Rápido!', 'Y dije… que tu texto sea un diálogo', '¿No es verdad, ángel de amor, que en verso se escribe mejor?', 'Tiene que parecer un ensayo, no serlo, porque de esto sé que no tienes ni idea', 'A ver, no te alarmes, pero debes hacer una metáfora con lo que tengas', '¿Cuántas líneas tiene ese papel? Bueno, pues como mucho, puedes llenar 20 líneas', '¿Cuántas líneas tiene ese papel? Bueno, pues como mucho, puedes llenar 30 líneas', '¿Cuántas líneas tiene ese papel? Bueno, pues como mucho, puedes llenar 40 líneas', 'La prosa ha muerto, escríbeme un poema', 'Esta es difícil. Tu protagonista es ahora el antagonista… debe ser una tragedia, porque triunfa frente al bien', 'Esto es como cuando tienes que hacer un símil…', 'Tu protagonista se convierte en un lema del diccionario, ahora tienes que definirlo sin nombrarlo en ningún momento', 'Me apetece escuchar esa canción, sí, ya sabes… la que acabas de escribir', 'Los mitos griegos molan mucho, haz que el tuyo pueda colar por uno.', 'Encuentras la hoja de una novela durante un paseo matutino, ¿qué tiene escrito? ¿Podrías trascribirlo para mi?', 'Sepa vuesa merced que vuestras palabras suenan tan cercanas para alguien de mi uso, gracias por escribir unas líneas en castellano antiguo', 'Edgar Allan Poe no existe, ¿quién va a decirnos ahora \"nunca más\"?', 'Ni el señor gray está a la altura de tu perversión, haz que se corra (la tinta, la tinta)', 'Esto es un tema serio, te lo ha pedido un catedrático para la clase que tiene mañana.', 'Con la venia de su señoría, esa ley que usted cita y describe todavía no la he encontrado en el Código Civil.', 'A Spielberg le ha encantado tu idea, pero lo que has escrito solo da para un corto.', 'Más te vale que tu historia tenga una moraleja']}\n\n##---------------------- Funciones\ndef idea():\n '''Genera una frase aleatoria que podrás utilizar como la idea principal del relato.\n El programa no utiliza ninguna lógica ni coherencia para la selección de las columnas,\n por lo que puedes enfrentarte a ideas bastante incoherentes; lo que puede resultar en\n un ejercicio bastante estimulante para la imaginación'''\n aleatorios = np.random.randint(len(frase['artículo']), size=3)\n if frase['artículo'][aleatorios[0]] == 'El':\n return ' '.join([frase['artículo'][aleatorios[0]], frase['sujeto'][aleatorios[0]], frase['adjetivo masculino'][aleatorios[1]], frase['acciones'][aleatorios[2]]])\n else:\n return ' '.join([frase['artículo'][aleatorios[0]], frase['sujeto'][aleatorios[0]], frase['adjetivo femenino'][aleatorios[1]], frase['acciones'][aleatorios[2]]])\n\ndef palabras():\n '''Genera un listado de palabras aleatorio en base a adjetivos que debes utilizar en el\n desarrollo del texto; estas palabras pueden aparecer en todas sus variantes de género y número.'''\n palabras = []\n for n in range(int(np.random.randint(1, high=11, size=1))):\n palabras.append(frase['adjetivo masculino'][int(np.random.randint(len(frase['artículo']), size=1))])\n return set(palabras)\n\ndef reto():\n '''Lanza un reto aleatorio de los que existen dentro de la lista, para hacer más complicado\n (o facilitar a veces) la ejecución del relato.'''\n return retos['Retos'][int(np.random.randint(len(retos['Retos']), size=1))]\n\ndef dice():\n '''¡Devuelve la respuesta que ha generado Gilbert!'''\n return {'idea': idea(), 'palabras': palabras(), 'reto': reto()}\n\ndef juego(nivel = ''):\n '''Elige el nivel de dificultad que tendrá la tarea de Gilbert: fácil, normal o difícil.'''\n\n while nivel not in ['fácil', 'normal', 'difícil']:\n nivel = input('Elige el nivel de dificultad: fácil, normal o difícil: ').lower()\n partida = dice()\n if nivel == 'fácil':\n return idea()\n elif nivel == 'normal':\n return idea(), ', '.join(palabras())\n elif nivel == 'difícil':\n return idea(), ', '.join(palabras()), reto()\n else:\n return 'Parece que ha ocurrido algo inesperado.'\n\n##---------------------- Objetos externos\nwith open('reglas.md', \"r\") as texto:\n reglas = texto.read()\nwith open('sobre_proyecto.md', \"r\") as texto:\n sobre_proyecto = texto.read()\nwith open('desarrollo.md', \"r\") as texto:\n desarrollado = texto.read()\n\n##---------------------- Aplicación Streamlit\n##--- Textos\nst.title('Gilbert.dice')\nst.header('Generador de frases aleatorias')\nst.markdown('### Podrás utilizarlas para inspirarte, trabajar la imaginación y perder el miedo a la página en blanco.')\n\n##--- Menú de la izquierda\nst.sidebar.title(\"Acepta el reto y pincha en comenzar\")\nst.sidebar.write('Elige la dificultad y enfréntate a la página en blanco.')\nfichero = st.sidebar.selectbox(\"Selecciona la dificultad:\",('fácil', 'normal', 'difícil'))\n\n#-- Botones\ncomenzar = st.sidebar.button('Generar')\nsaber_mas = st.sidebar.button('Reglas del juego')\nproyecto = st.sidebar.button('Detalles del proyecto')\ndesarrollo = st.sidebar.button('Desarrollo de Gilbert')\n\n##--- Rutina del programa\nif comenzar:\n gilbert = juego(fichero)\n if fichero == 'fácil':\n st.markdown('La idea para tu próximo relato es:')\n st.markdown('**' + gilbert + '**\\n')\n elif fichero == 'normal':\n st.markdown('La idea para tu próximo relato es:')\n st.markdown('**' + gilbert[0] + '**\\n')\n st.markdown('El texto debe incluir estas palabras:')\n st.markdown('**' + gilbert[1] + '**\\n')\n else:\n st.markdown('La idea para tu próximo relato es:')\n st.markdown('**' + gilbert[0] + '**\\n')\n st.markdown('El texto debe incluir estas palabras:')\n st.markdown('**' + gilbert[1] + '**\\n')\n st.markdown('Además, debes tratar de cumplir con el siguiente reto:')\n st.markdown('**' + gilbert[2] + '**\\n')\n\nif saber_mas:\n st.markdown(reglas)\n\nif proyecto:\n st.markdown(sobre_proyecto)\n\nif desarrollo:\n st.markdown(desarrollado)\n\n##--- Pie del menú de la izquierda\nst.sidebar.markdown('Un proyecto personal de [**Erebyel** (María Reyes Rocío Pérez)](http://www.erebyel.es).')\n" ]
[ [ "pandas.DataFrame", "numpy.random.randint" ] ]
GanshengT/mne-python
[ "49253e74308137e14187561a204d784ea28f12a7" ]
[ "mne/viz/misc.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"Functions to make simple plots with M/EEG data.\"\"\"\n\n# Authors: Alexandre Gramfort <[email protected]>\n# Denis Engemann <[email protected]>\n# Martin Luessi <[email protected]>\n# Eric Larson <[email protected]>\n# Cathy Nangini <[email protected]>\n# Mainak Jas <[email protected]>\n#\n# License: Simplified BSD\n\nimport base64\nimport copy\nfrom glob import glob\nfrom io import BytesIO\nfrom itertools import cycle\nimport os.path as op\nimport warnings\nfrom distutils.version import LooseVersion\nfrom collections import defaultdict\n\nimport numpy as np\n\nfrom ..defaults import DEFAULTS\nfrom ..fixes import _get_img_fdata\nfrom ..rank import compute_rank\nfrom ..surface import read_surface\nfrom ..io.constants import FIFF\nfrom ..io.proj import make_projector\nfrom ..io.pick import (_DATA_CH_TYPES_SPLIT, pick_types, pick_info,\n pick_channels)\nfrom ..source_space import (read_source_spaces, SourceSpaces,\n _check_mri, _ensure_src)\nfrom ..transforms import invert_transform, apply_trans, _frame_to_str\nfrom ..utils import (logger, verbose, warn, _check_option, get_subjects_dir,\n _mask_to_onsets_offsets, _pl, _on_missing, fill_doc)\nfrom ..io.pick import _picks_by_type\nfrom ..filter import estimate_ringing_samples\nfrom .utils import (tight_layout, _get_color_list, _prepare_trellis, plt_show,\n _figure_agg)\n\n\ndef _index_info_cov(info, cov, exclude):\n if exclude == 'bads':\n exclude = info['bads']\n info = pick_info(info, pick_channels(info['ch_names'], cov['names'],\n exclude))\n del exclude\n picks_list = \\\n _picks_by_type(info, meg_combined=False, ref_meg=False,\n exclude=())\n picks_by_type = dict(picks_list)\n\n ch_names = [n for n in cov.ch_names if n in info['ch_names']]\n ch_idx = [cov.ch_names.index(n) for n in ch_names]\n\n info_ch_names = info['ch_names']\n idx_by_type = defaultdict(list)\n for ch_type, sel in picks_by_type.items():\n idx_by_type[ch_type] = [ch_names.index(info_ch_names[c])\n for c in sel if info_ch_names[c] in ch_names]\n idx_names = [(idx_by_type[key],\n '%s covariance' % DEFAULTS['titles'][key],\n DEFAULTS['units'][key],\n DEFAULTS['scalings'][key],\n key)\n for key in _DATA_CH_TYPES_SPLIT\n if len(idx_by_type[key]) > 0]\n C = cov.data[ch_idx][:, ch_idx]\n return info, C, ch_names, idx_names\n\n\n@verbose\ndef plot_cov(cov, info, exclude=(), colorbar=True, proj=False, show_svd=True,\n show=True, verbose=None):\n \"\"\"Plot Covariance data.\n\n Parameters\n ----------\n cov : instance of Covariance\n The covariance matrix.\n %(info_not_none)s\n exclude : list of str | str\n List of channels to exclude. If empty do not exclude any channel.\n If 'bads', exclude info['bads'].\n colorbar : bool\n Show colorbar or not.\n proj : bool\n Apply projections or not.\n show_svd : bool\n Plot also singular values of the noise covariance for each sensor\n type. We show square roots ie. standard deviations.\n show : bool\n Show figure if True.\n %(verbose)s\n\n Returns\n -------\n fig_cov : instance of matplotlib.figure.Figure\n The covariance plot.\n fig_svd : instance of matplotlib.figure.Figure | None\n The SVD spectra plot of the covariance.\n\n See Also\n --------\n mne.compute_rank\n\n Notes\n -----\n For each channel type, the rank is estimated using\n :func:`mne.compute_rank`.\n\n .. versionchanged:: 0.19\n Approximate ranks for each channel type are shown with red dashed lines.\n \"\"\"\n import matplotlib.pyplot as plt\n from matplotlib.colors import Normalize\n from scipy import linalg\n from ..cov import Covariance\n\n info, C, ch_names, idx_names = _index_info_cov(info, cov, exclude)\n del cov, exclude\n\n projs = []\n if proj:\n projs = copy.deepcopy(info['projs'])\n\n # Activate the projection items\n for p in projs:\n p['active'] = True\n\n P, ncomp, _ = make_projector(projs, ch_names)\n if ncomp > 0:\n logger.info(' Created an SSP operator (subspace dimension'\n ' = %d)' % ncomp)\n C = np.dot(P, np.dot(C, P.T))\n else:\n logger.info(' The projection vectors do not apply to these '\n 'channels.')\n\n fig_cov, axes = plt.subplots(1, len(idx_names), squeeze=False,\n figsize=(3.8 * len(idx_names), 3.7))\n for k, (idx, name, _, _, _) in enumerate(idx_names):\n vlim = np.max(np.abs(C[idx][:, idx]))\n im = axes[0, k].imshow(C[idx][:, idx], interpolation=\"nearest\",\n norm=Normalize(vmin=-vlim, vmax=vlim),\n cmap='RdBu_r')\n axes[0, k].set(title=name)\n\n if colorbar:\n from mpl_toolkits.axes_grid1 import make_axes_locatable\n divider = make_axes_locatable(axes[0, k])\n cax = divider.append_axes(\"right\", size=\"5.5%\", pad=0.05)\n plt.colorbar(im, cax=cax, format='%.0e')\n\n fig_cov.subplots_adjust(0.04, 0.0, 0.98, 0.94, 0.2, 0.26)\n tight_layout(fig=fig_cov)\n\n fig_svd = None\n if show_svd:\n fig_svd, axes = plt.subplots(1, len(idx_names), squeeze=False,\n figsize=(3.8 * len(idx_names), 3.7))\n for k, (idx, name, unit, scaling, key) in enumerate(idx_names):\n this_C = C[idx][:, idx]\n s = linalg.svd(this_C, compute_uv=False)\n this_C = Covariance(this_C, [info['ch_names'][ii] for ii in idx],\n [], [], 0)\n this_info = pick_info(info, idx)\n this_info['projs'] = []\n this_rank = compute_rank(this_C, info=this_info)\n # Protect against true zero singular values\n s[s <= 0] = 1e-10 * s[s > 0].min()\n s = np.sqrt(s) * scaling\n axes[0, k].plot(s, color='k', zorder=3)\n this_rank = this_rank[key]\n axes[0, k].axvline(this_rank - 1, ls='--', color='r',\n alpha=0.5, zorder=4, clip_on=False)\n axes[0, k].text(this_rank - 1, axes[0, k].get_ylim()[1],\n 'rank ≈ %d' % (this_rank,), ha='right', va='top',\n color='r', alpha=0.5, zorder=4)\n axes[0, k].set(ylabel=u'Noise σ (%s)' % unit, yscale='log',\n xlabel='Eigenvalue index', title=name,\n xlim=[0, len(s) - 1])\n tight_layout(fig=fig_svd)\n\n plt_show(show)\n\n return fig_cov, fig_svd\n\n\ndef plot_source_spectrogram(stcs, freq_bins, tmin=None, tmax=None,\n source_index=None, colorbar=False, show=True):\n \"\"\"Plot source power in time-freqency grid.\n\n Parameters\n ----------\n stcs : list of SourceEstimate\n Source power for consecutive time windows, one SourceEstimate object\n should be provided for each frequency bin.\n freq_bins : list of tuples of float\n Start and end points of frequency bins of interest.\n tmin : float\n Minimum time instant to show.\n tmax : float\n Maximum time instant to show.\n source_index : int | None\n Index of source for which the spectrogram will be plotted. If None,\n the source with the largest activation will be selected.\n colorbar : bool\n If true, a colorbar will be added to the plot.\n show : bool\n Show figure if True.\n\n Returns\n -------\n fig : instance of Figure\n The figure.\n \"\"\"\n import matplotlib.pyplot as plt\n\n # Input checks\n if len(stcs) == 0:\n raise ValueError('cannot plot spectrogram if len(stcs) == 0')\n\n stc = stcs[0]\n if tmin is not None and tmin < stc.times[0]:\n raise ValueError('tmin cannot be smaller than the first time point '\n 'provided in stcs')\n if tmax is not None and tmax > stc.times[-1] + stc.tstep:\n raise ValueError('tmax cannot be larger than the sum of the last time '\n 'point and the time step, which are provided in stcs')\n\n # Preparing time-frequency cell boundaries for plotting\n if tmin is None:\n tmin = stc.times[0]\n if tmax is None:\n tmax = stc.times[-1] + stc.tstep\n time_bounds = np.arange(tmin, tmax + stc.tstep, stc.tstep)\n freq_bounds = sorted(set(np.ravel(freq_bins)))\n freq_ticks = copy.deepcopy(freq_bounds)\n\n # Reject time points that will not be plotted and gather results\n source_power = []\n for stc in stcs:\n stc = stc.copy() # copy since crop modifies inplace\n stc.crop(tmin, tmax - stc.tstep)\n source_power.append(stc.data)\n source_power = np.array(source_power)\n\n # Finding the source with maximum source power\n if source_index is None:\n source_index = np.unravel_index(source_power.argmax(),\n source_power.shape)[1]\n\n # If there is a gap in the frequency bins record its locations so that it\n # can be covered with a gray horizontal bar\n gap_bounds = []\n for i in range(len(freq_bins) - 1):\n lower_bound = freq_bins[i][1]\n upper_bound = freq_bins[i + 1][0]\n if lower_bound != upper_bound:\n freq_bounds.remove(lower_bound)\n gap_bounds.append((lower_bound, upper_bound))\n\n # Preparing time-frequency grid for plotting\n time_grid, freq_grid = np.meshgrid(time_bounds, freq_bounds)\n\n # Plotting the results\n fig = plt.figure(figsize=(9, 6))\n plt.pcolor(time_grid, freq_grid, source_power[:, source_index, :],\n cmap='Reds')\n ax = plt.gca()\n\n ax.set(title='Source power', xlabel='Time (s)', ylabel='Frequency (Hz)')\n\n time_tick_labels = [str(np.round(t, 2)) for t in time_bounds]\n n_skip = 1 + len(time_bounds) // 10\n for i in range(len(time_bounds)):\n if i % n_skip != 0:\n time_tick_labels[i] = ''\n\n ax.set_xticks(time_bounds)\n ax.set_xticklabels(time_tick_labels)\n plt.xlim(time_bounds[0], time_bounds[-1])\n plt.yscale('log')\n ax.set_yticks(freq_ticks)\n ax.set_yticklabels([np.round(freq, 2) for freq in freq_ticks])\n plt.ylim(freq_bounds[0], freq_bounds[-1])\n\n plt.grid(True, ls='-')\n if colorbar:\n plt.colorbar()\n tight_layout(fig=fig)\n\n # Covering frequency gaps with horizontal bars\n for lower_bound, upper_bound in gap_bounds:\n plt.barh(lower_bound, time_bounds[-1] - time_bounds[0], upper_bound -\n lower_bound, time_bounds[0], color='#666666')\n\n plt_show(show)\n return fig\n\n\ndef _plot_mri_contours(mri_fname, surfaces, src, orientation='coronal',\n slices=None, show=True, show_indices=False,\n show_orientation=False, img_output=False, width=512):\n \"\"\"Plot BEM contours on anatomical slices.\"\"\"\n import matplotlib.pyplot as plt\n from matplotlib import patheffects\n from .._freesurfer import _mri_orientation, _read_mri_info\n # For ease of plotting, we will do everything in voxel coordinates.\n _check_option('orientation', orientation, ('coronal', 'axial', 'sagittal'))\n\n # Load the T1 data\n _, vox_mri_t, _, _, _, nim = _read_mri_info(\n mri_fname, units='mm', return_img=True)\n mri_vox_t = invert_transform(vox_mri_t)['trans']\n del vox_mri_t\n\n # plot axes (x, y, z) as data axes\n (x, y, z), (flip_x, flip_y, flip_z), order = _mri_orientation(\n nim, orientation)\n transpose = x < y\n\n data = _get_img_fdata(nim)\n shift_x = data.shape[x] if flip_x < 0 else 0\n shift_y = data.shape[y] if flip_y < 0 else 0\n n_slices = data.shape[z]\n if slices is None:\n slices = np.round(np.linspace(0, n_slices - 1, 14)).astype(int)[1:-1]\n slices = np.atleast_1d(slices).copy()\n slices[slices < 0] += n_slices # allow negative indexing\n if not np.array_equal(np.sort(slices), slices) or slices.ndim != 1 or \\\n slices.size < 1 or slices[0] < 0 or slices[-1] >= n_slices or \\\n slices.dtype.kind not in 'iu':\n raise ValueError('slices must be a sorted 1D array of int with unique '\n 'elements, at least one element, and no elements '\n 'greater than %d, got %s' % (n_slices - 1, slices))\n if flip_z < 0:\n # Proceed in the opposite order to maintain left-to-right / orientation\n slices = slices[::-1]\n\n # create of list of surfaces\n surfs = list()\n for file_name, color in surfaces:\n surf = dict()\n surf['rr'], surf['tris'] = read_surface(file_name)\n # move surface to voxel coordinate system\n surf['rr'] = apply_trans(mri_vox_t, surf['rr'])\n surfs.append((surf, color))\n\n sources = list()\n if src is not None:\n _ensure_src(src, extra=' or None')\n # Eventually we can relax this by allowing ``trans`` if need be\n if src[0]['coord_frame'] != FIFF.FIFFV_COORD_MRI:\n raise ValueError(\n 'Source space must be in MRI coordinates, got '\n f'{_frame_to_str[src[0][\"coord_frame\"]]}')\n for src_ in src:\n points = src_['rr'][src_['inuse'].astype(bool)]\n sources.append(apply_trans(mri_vox_t, points * 1e3))\n sources = np.concatenate(sources, axis=0)\n\n if img_output:\n n_col = n_axes = 1\n dpi = 96\n # 2x standard MRI resolution is probably good enough for the\n # traces\n w = width / dpi\n figsize = (w, w / data.shape[x] * data.shape[y])\n fig = _figure_agg(figsize=figsize, dpi=dpi, facecolor='k')\n ax = fig.add_axes([0, 0, 1, 1], frame_on=False, facecolor='k')\n axs = [ax] * len(slices)\n plt.close(fig)\n else:\n n_col = 4\n fig, axs, _, _ = _prepare_trellis(len(slices), n_col)\n fig.set_facecolor('k')\n dpi = fig.get_dpi()\n n_axes = len(axs)\n bounds = np.concatenate(\n [[-np.inf], slices[:-1] + np.diff(slices) / 2., [np.inf]]) # float\n slicer = [slice(None)] * 3\n ori_labels = dict(R='LR', A='PA', S='IS')\n xlabels, ylabels = ori_labels[order[0]], ori_labels[order[1]]\n path_effects = [patheffects.withStroke(linewidth=4, foreground=\"k\",\n alpha=0.75)]\n out = list() if img_output else fig\n for ai, (ax, sl, lower, upper) in enumerate(zip(\n axs, slices, bounds[:-1], bounds[1:])):\n # adjust the orientations for good view\n slicer[z] = sl\n dat = data[tuple(slicer)]\n dat = dat.T if transpose else dat\n dat = dat[::flip_y, ::flip_x]\n\n # First plot the anatomical data\n if img_output:\n ax.clear()\n ax.imshow(dat, cmap=plt.cm.gray, origin='lower')\n ax.set_autoscale_on(False)\n ax.axis('off')\n ax.set_aspect('equal') # XXX eventually could deal with zooms\n\n # and then plot the contours on top\n for surf, color in surfs:\n with warnings.catch_warnings(record=True): # ignore contour warn\n warnings.simplefilter('ignore')\n ax.tricontour(flip_x * surf['rr'][:, x] + shift_x,\n flip_y * surf['rr'][:, y] + shift_y,\n surf['tris'], surf['rr'][:, z],\n levels=[sl], colors=color, linewidths=1.0,\n zorder=1)\n\n if len(sources):\n in_slice = (sources[:, z] >= lower) & (sources[:, z] < upper)\n ax.scatter(flip_x * sources[in_slice, x] + shift_x,\n flip_y * sources[in_slice, y] + shift_y,\n marker='.', color='#FF00FF', s=1, zorder=2)\n if show_indices:\n ax.text(dat.shape[1] // 8 + 0.5, 0.5, str(sl),\n color='w', fontsize='x-small', va='bottom', ha='left')\n # label the axes\n kwargs = dict(\n color='#66CCEE', fontsize='medium', path_effects=path_effects,\n family='monospace', clip_on=False, zorder=5, weight='bold')\n if show_orientation:\n if ai % n_col == 0: # left\n ax.text(0, dat.shape[0] / 2., xlabels[0],\n va='center', ha='left', **kwargs)\n if ai % n_col == n_col - 1 or ai == n_axes - 1: # right\n ax.text(dat.shape[1] - 1, dat.shape[0] / 2., xlabels[1],\n va='center', ha='right', **kwargs)\n if ai >= n_axes - n_col: # bottom\n ax.text(dat.shape[1] / 2., 0, ylabels[0],\n ha='center', va='bottom', **kwargs)\n if ai < n_col or n_col == 1: # top\n ax.text(dat.shape[1] / 2., dat.shape[0] - 1, ylabels[1],\n ha='center', va='top', **kwargs)\n if img_output:\n output = BytesIO()\n fig.savefig(output, bbox_inches='tight',\n pad_inches=0, format='png', dpi=dpi)\n out.append(base64.b64encode(output.getvalue()).decode('ascii'))\n\n fig.subplots_adjust(left=0., bottom=0., right=1., top=1., wspace=0.,\n hspace=0.)\n plt_show(show, fig=fig)\n return out, flip_z\n\n\ndef plot_bem(subject=None, subjects_dir=None, orientation='coronal',\n slices=None, brain_surfaces=None, src=None, show=True,\n show_indices=True, mri='T1.mgz', show_orientation=True):\n \"\"\"Plot BEM contours on anatomical slices.\n\n Parameters\n ----------\n subject : str\n Subject name.\n subjects_dir : str | None\n Path to the SUBJECTS_DIR. If None, the path is obtained by using\n the environment variable SUBJECTS_DIR.\n orientation : str\n 'coronal' or 'axial' or 'sagittal'.\n slices : list of int\n Slice indices.\n brain_surfaces : None | str | list of str\n One or more brain surface to plot (optional). Entries should correspond\n to files in the subject's ``surf`` directory (e.g. ``\"white\"``).\n src : None | SourceSpaces | str\n SourceSpaces instance or path to a source space to plot individual\n sources as scatter-plot. Sources will be shown on exactly one slice\n (whichever slice is closest to each source in the given orientation\n plane). Path can be absolute or relative to the subject's ``bem``\n folder.\n\n .. versionchanged:: 0.20\n All sources are shown on the nearest slice rather than some\n being omitted.\n show : bool\n Show figure if True.\n show_indices : bool\n Show slice indices if True.\n\n .. versionadded:: 0.20\n mri : str\n The name of the MRI to use. Can be a standard FreeSurfer MRI such as\n ``'T1.mgz'``, or a full path to a custom MRI file.\n\n .. versionadded:: 0.21\n show_orientation : str\n Show the orientation (L/R, P/A, I/S) of the data slices.\n\n .. versionadded:: 0.21\n\n Returns\n -------\n fig : instance of matplotlib.figure.Figure\n The figure.\n\n See Also\n --------\n mne.viz.plot_alignment\n\n Notes\n -----\n Images are plotted in MRI voxel coordinates.\n\n If ``src`` is not None, for a given slice index, all source points are\n shown that are halfway between the previous slice and the given slice,\n and halfway between the given slice and the next slice.\n For large slice decimations, this can\n make some source points appear outside the BEM contour, which is shown\n for the given slice index. For example, in the case where the single\n midpoint slice is used ``slices=[128]``, all source points will be shown\n on top of the midpoint MRI slice with the BEM boundary drawn for that\n slice.\n \"\"\"\n subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)\n mri_fname = _check_mri(mri, subject, subjects_dir)\n\n # Get the BEM surface filenames\n bem_path = op.join(subjects_dir, subject, 'bem')\n\n if not op.isdir(bem_path):\n raise IOError('Subject bem directory \"%s\" does not exist' % bem_path)\n\n surfaces = _get_bem_plotting_surfaces(bem_path)\n if brain_surfaces is not None:\n if isinstance(brain_surfaces, str):\n brain_surfaces = (brain_surfaces,)\n for surf_name in brain_surfaces:\n for hemi in ('lh', 'rh'):\n surf_fname = op.join(subjects_dir, subject, 'surf',\n hemi + '.' + surf_name)\n if op.exists(surf_fname):\n surfaces.append((surf_fname, '#00DD00'))\n else:\n raise IOError(\"Surface %s does not exist.\" % surf_fname)\n\n if isinstance(src, str):\n if not op.exists(src):\n src_ = op.join(subjects_dir, subject, 'bem', src)\n if op.exists(src_):\n src = src_\n else:\n raise IOError(\"%s does not exist\" % src)\n src = read_source_spaces(src)\n elif src is not None and not isinstance(src, SourceSpaces):\n raise TypeError(\"src needs to be None, str or SourceSpaces instance, \"\n \"not %s\" % repr(src))\n\n if len(surfaces) == 0:\n raise IOError('No surface files found. Surface files must end with '\n 'inner_skull.surf, outer_skull.surf or outer_skin.surf')\n\n # Plot the contours\n return _plot_mri_contours(mri_fname, surfaces, src, orientation, slices,\n show, show_indices, show_orientation)[0]\n\n\ndef _get_bem_plotting_surfaces(bem_path):\n surfaces = []\n for surf_name, color in (('*inner_skull', '#FF0000'),\n ('*outer_skull', '#FFFF00'),\n ('*outer_skin', '#FFAA80')):\n surf_fname = glob(op.join(bem_path, surf_name + '.surf'))\n if len(surf_fname) > 0:\n surf_fname = surf_fname[0]\n logger.info(\"Using surface: %s\" % surf_fname)\n surfaces.append((surf_fname, color))\n return surfaces\n\n\n@verbose\ndef plot_events(events, sfreq=None, first_samp=0, color=None, event_id=None,\n axes=None, equal_spacing=True, show=True, on_missing='raise',\n verbose=None):\n \"\"\"Plot events to get a visual display of the paradigm.\n\n Parameters\n ----------\n events : array, shape (n_events, 3)\n The events.\n sfreq : float | None\n The sample frequency. If None, data will be displayed in samples (not\n seconds).\n first_samp : int\n The index of the first sample. Recordings made on Neuromag systems\n number samples relative to the system start (not relative to the\n beginning of the recording). In such cases the ``raw.first_samp``\n attribute can be passed here. Default is 0.\n color : dict | None\n Dictionary of event_id integers as keys and colors as values. If None,\n colors are automatically drawn from a default list (cycled through if\n number of events longer than list of default colors). Color can be any\n valid :doc:`matplotlib color <tutorials/colors/colors>`.\n event_id : dict | None\n Dictionary of event labels (e.g. 'aud_l') as keys and their associated\n event_id values. Labels are used to plot a legend. If None, no legend\n is drawn.\n axes : instance of Axes\n The subplot handle.\n equal_spacing : bool\n Use equal spacing between events in y-axis.\n show : bool\n Show figure if True.\n %(on_missing_events)s\n %(verbose)s\n\n Returns\n -------\n fig : matplotlib.figure.Figure\n The figure object containing the plot.\n\n Notes\n -----\n .. versionadded:: 0.9.0\n \"\"\"\n if sfreq is None:\n sfreq = 1.0\n xlabel = 'Samples'\n else:\n xlabel = 'Time (s)'\n\n events = np.asarray(events)\n if len(events) == 0:\n raise ValueError('No events in events array, cannot plot.')\n unique_events = np.unique(events[:, 2])\n\n if event_id is not None:\n # get labels and unique event ids from event_id dict,\n # sorted by value\n event_id_rev = {v: k for k, v in event_id.items()}\n conditions, unique_events_id = zip(*sorted(event_id.items(),\n key=lambda x: x[1]))\n\n keep = np.ones(len(unique_events_id), bool)\n for ii, this_event in enumerate(unique_events_id):\n if this_event not in unique_events:\n msg = f'{this_event} from event_id is not present in events.'\n _on_missing(on_missing, msg)\n keep[ii] = False\n conditions = [cond for cond, k in zip(conditions, keep) if k]\n unique_events_id = [id_ for id_, k in zip(unique_events_id, keep) if k]\n if len(unique_events_id) == 0:\n raise RuntimeError('No usable event IDs found')\n\n for this_event in unique_events:\n if this_event not in unique_events_id:\n warn('event %s missing from event_id will be ignored'\n % this_event)\n\n else:\n unique_events_id = unique_events\n\n color = _handle_event_colors(color, unique_events, event_id)\n import matplotlib.pyplot as plt\n\n fig = None\n if axes is None:\n fig = plt.figure()\n ax = axes if axes else plt.gca()\n\n unique_events_id = np.array(unique_events_id)\n min_event = np.min(unique_events_id)\n max_event = np.max(unique_events_id)\n max_x = (events[np.in1d(events[:, 2], unique_events_id), 0].max() -\n first_samp) / sfreq\n\n handles, labels = list(), list()\n for idx, ev in enumerate(unique_events_id):\n ev_mask = events[:, 2] == ev\n count = ev_mask.sum()\n if count == 0:\n continue\n y = np.full(count, idx + 1 if equal_spacing else events[ev_mask, 2][0])\n if event_id is not None:\n event_label = '%s (%s)' % (event_id_rev[ev], count)\n else:\n event_label = 'N=%d' % (count,)\n labels.append(event_label)\n kwargs = {}\n if ev in color:\n kwargs['color'] = color[ev]\n handles.append(\n ax.plot((events[ev_mask, 0] - first_samp) / sfreq,\n y, '.', clip_on=False, **kwargs)[0])\n\n if equal_spacing:\n ax.set_ylim(0, unique_events_id.size + 1)\n ax.set_yticks(1 + np.arange(unique_events_id.size))\n ax.set_yticklabels(unique_events_id)\n else:\n ax.set_ylim([min_event - 1, max_event + 1])\n\n ax.set(xlabel=xlabel, ylabel='Event id', xlim=[0, max_x])\n\n ax.grid(True)\n\n fig = fig if fig is not None else plt.gcf()\n # reverse order so that the highest numbers are at the top\n # (match plot order)\n handles, labels = handles[::-1], labels[::-1]\n box = ax.get_position()\n factor = 0.8 if event_id is not None else 0.9\n ax.set_position([box.x0, box.y0, box.width * factor, box.height])\n ax.legend(handles, labels, loc='center left', bbox_to_anchor=(1, 0.5),\n fontsize='small')\n fig.canvas.draw()\n plt_show(show)\n return fig\n\n\ndef _get_presser(fig):\n \"\"\"Get our press callback.\"\"\"\n import matplotlib\n callbacks = fig.canvas.callbacks.callbacks['button_press_event']\n func = None\n for key, val in callbacks.items():\n if LooseVersion(matplotlib.__version__) >= '3':\n func = val()\n else:\n func = val.func\n if func.__class__.__name__ == 'partial':\n break\n else:\n func = None\n assert func is not None\n return func\n\n\ndef plot_dipole_amplitudes(dipoles, colors=None, show=True):\n \"\"\"Plot the amplitude traces of a set of dipoles.\n\n Parameters\n ----------\n dipoles : list of instance of Dipole\n The dipoles whose amplitudes should be shown.\n colors : list of color | None\n Color to plot with each dipole. If None default colors are used.\n show : bool\n Show figure if True.\n\n Returns\n -------\n fig : matplotlib.figure.Figure\n The figure object containing the plot.\n\n Notes\n -----\n .. versionadded:: 0.9.0\n \"\"\"\n import matplotlib.pyplot as plt\n if colors is None:\n colors = cycle(_get_color_list())\n fig, ax = plt.subplots(1, 1)\n xlim = [np.inf, -np.inf]\n for dip, color in zip(dipoles, colors):\n ax.plot(dip.times, dip.amplitude * 1e9, color=color, linewidth=1.5)\n xlim[0] = min(xlim[0], dip.times[0])\n xlim[1] = max(xlim[1], dip.times[-1])\n ax.set(xlim=xlim, xlabel='Time (s)', ylabel='Amplitude (nAm)')\n if show:\n fig.show(warn=False)\n return fig\n\n\ndef adjust_axes(axes, remove_spines=('top', 'right'), grid=True):\n \"\"\"Adjust some properties of axes.\n\n Parameters\n ----------\n axes : list\n List of axes to process.\n remove_spines : list of str\n Which axis spines to remove.\n grid : bool\n Turn grid on (True) or off (False).\n \"\"\"\n axes = [axes] if not isinstance(axes, (list, tuple, np.ndarray)) else axes\n for ax in axes:\n if grid:\n ax.grid(zorder=0)\n for key in remove_spines:\n ax.spines[key].set_visible(False)\n\n\ndef _filter_ticks(lims, fscale):\n \"\"\"Create approximately spaced ticks between lims.\"\"\"\n if fscale == 'linear':\n return None, None # let matplotlib handle it\n lims = np.array(lims)\n ticks = list()\n if lims[1] > 20 * lims[0]:\n base = np.array([1, 2, 4])\n else:\n base = np.arange(1, 11)\n for exp in range(int(np.floor(np.log10(lims[0]))),\n int(np.floor(np.log10(lims[1]))) + 1):\n ticks += (base * (10 ** exp)).tolist()\n ticks = np.array(ticks)\n ticks = ticks[(ticks >= lims[0]) & (ticks <= lims[1])]\n ticklabels = [('%g' if t < 1 else '%d') % t for t in ticks]\n return ticks, ticklabels\n\n\ndef _get_flim(flim, fscale, freq, sfreq=None):\n \"\"\"Get reasonable frequency limits.\"\"\"\n if flim is None:\n if freq is None:\n flim = [0.1 if fscale == 'log' else 0., sfreq / 2.]\n else:\n if fscale == 'linear':\n flim = [freq[0]]\n else:\n flim = [freq[0] if freq[0] > 0 else 0.1 * freq[1]]\n flim += [freq[-1]]\n if fscale == 'log':\n if flim[0] <= 0:\n raise ValueError('flim[0] must be positive, got %s' % flim[0])\n elif flim[0] < 0:\n raise ValueError('flim[0] must be non-negative, got %s' % flim[0])\n return flim\n\n\ndef _check_fscale(fscale):\n \"\"\"Check for valid fscale.\"\"\"\n if not isinstance(fscale, str) or fscale not in ('log', 'linear'):\n raise ValueError('fscale must be \"log\" or \"linear\", got %s'\n % (fscale,))\n\n\n_DEFAULT_ALIM = (-80, 10)\n\n\ndef plot_filter(h, sfreq, freq=None, gain=None, title=None, color='#1f77b4',\n flim=None, fscale='log', alim=_DEFAULT_ALIM, show=True,\n compensate=False, plot=('time', 'magnitude', 'delay'),\n axes=None):\n \"\"\"Plot properties of a filter.\n\n Parameters\n ----------\n h : dict or ndarray\n An IIR dict or 1D ndarray of coefficients (for FIR filter).\n sfreq : float\n Sample rate of the data (Hz).\n freq : array-like or None\n The ideal response frequencies to plot (must be in ascending order).\n If None (default), do not plot the ideal response.\n gain : array-like or None\n The ideal response gains to plot.\n If None (default), do not plot the ideal response.\n title : str | None\n The title to use. If None (default), determine the title based\n on the type of the system.\n color : color object\n The color to use (default '#1f77b4').\n flim : tuple or None\n If not None, the x-axis frequency limits (Hz) to use.\n If None, freq will be used. If None (default) and freq is None,\n ``(0.1, sfreq / 2.)`` will be used.\n fscale : str\n Frequency scaling to use, can be \"log\" (default) or \"linear\".\n alim : tuple\n The y-axis amplitude limits (dB) to use (default: (-60, 10)).\n show : bool\n Show figure if True (default).\n compensate : bool\n If True, compensate for the filter delay (phase will not be shown).\n\n - For linear-phase FIR filters, this visualizes the filter coefficients\n assuming that the output will be shifted by ``N // 2``.\n - For IIR filters, this changes the filter coefficient display\n by filtering backward and forward, and the frequency response\n by squaring it.\n\n .. versionadded:: 0.18\n plot : list | tuple | str\n A list of the requested plots from ``time``, ``magnitude`` and\n ``delay``. Default is to plot all three filter properties\n ('time', 'magnitude', 'delay').\n\n .. versionadded:: 0.21.0\n axes : instance of Axes | list | None\n The axes to plot to. If list, the list must be a list of Axes of\n the same length as the number of requested plot types. If instance of\n Axes, there must be only one filter property plotted.\n Defaults to ``None``.\n\n .. versionadded:: 0.21.0\n\n Returns\n -------\n fig : matplotlib.figure.Figure\n The figure containing the plots.\n\n See Also\n --------\n mne.filter.create_filter\n plot_ideal_filter\n\n Notes\n -----\n .. versionadded:: 0.14\n \"\"\"\n from scipy.signal import (\n freqz, group_delay, lfilter, filtfilt, sosfilt, sosfiltfilt)\n import matplotlib.pyplot as plt\n\n sfreq = float(sfreq)\n _check_option('fscale', fscale, ['log', 'linear'])\n if isinstance(plot, str):\n plot = [plot]\n for xi, x in enumerate(plot):\n _check_option('plot[%d]' % xi, x, ('magnitude', 'delay', 'time'))\n\n flim = _get_flim(flim, fscale, freq, sfreq)\n if fscale == 'log':\n omega = np.logspace(np.log10(flim[0]), np.log10(flim[1]), 1000)\n else:\n omega = np.linspace(flim[0], flim[1], 1000)\n xticks, xticklabels = _filter_ticks(flim, fscale)\n omega /= sfreq / (2 * np.pi)\n if isinstance(h, dict): # IIR h.ndim == 2: # second-order sections\n if 'sos' in h:\n H = np.ones(len(omega), np.complex128)\n gd = np.zeros(len(omega))\n for section in h['sos']:\n this_H = freqz(section[:3], section[3:], omega)[1]\n H *= this_H\n if compensate:\n H *= this_H.conj() # time reversal is freq conj\n else:\n # Assume the forward-backward delay zeros out, which it\n # mostly should\n with warnings.catch_warnings(record=True): # singular GD\n warnings.simplefilter('ignore')\n gd += group_delay((section[:3], section[3:]), omega)[1]\n n = estimate_ringing_samples(h['sos'])\n delta = np.zeros(n)\n delta[0] = 1\n if compensate:\n delta = np.pad(delta, [(n - 1, 0)], 'constant')\n func = sosfiltfilt\n gd += (len(delta) - 1) // 2\n else:\n func = sosfilt\n h = func(h['sos'], delta)\n else:\n H = freqz(h['b'], h['a'], omega)[1]\n if compensate:\n H *= H.conj()\n with warnings.catch_warnings(record=True): # singular GD\n warnings.simplefilter('ignore')\n gd = group_delay((h['b'], h['a']), omega)[1]\n if compensate:\n gd += group_delay(h['b'].conj(), h['a'].conj(), omega)[1]\n n = estimate_ringing_samples((h['b'], h['a']))\n delta = np.zeros(n)\n delta[0] = 1\n if compensate:\n delta = np.pad(delta, [(n - 1, 0)], 'constant')\n func = filtfilt\n else:\n func = lfilter\n h = func(h['b'], h['a'], delta)\n if title is None:\n title = 'SOS (IIR) filter'\n if compensate:\n title += ' (forward-backward)'\n else:\n H = freqz(h, worN=omega)[1]\n with warnings.catch_warnings(record=True): # singular GD\n warnings.simplefilter('ignore')\n gd = group_delay((h, [1.]), omega)[1]\n title = 'FIR filter' if title is None else title\n if compensate:\n title += ' (delay-compensated)'\n\n fig = None\n if axes is None:\n fig, axes = plt.subplots(len(plot), 1)\n if isinstance(axes, plt.Axes):\n axes = [axes]\n elif isinstance(axes, np.ndarray):\n axes = list(axes)\n if fig is None:\n fig = axes[0].get_figure()\n if len(axes) != len(plot):\n raise ValueError('Length of axes (%d) must be the same as number of '\n 'requested filter properties (%d)'\n % (len(axes), len(plot)))\n\n t = np.arange(len(h))\n dlim = np.abs(t).max() / 2.\n dlim = [-dlim, dlim]\n if compensate:\n n_shift = (len(h) - 1) // 2\n t -= n_shift\n assert t[0] == -t[-1]\n gd -= n_shift\n t = t / sfreq\n gd = gd / sfreq\n f = omega * sfreq / (2 * np.pi)\n sl = slice(0 if fscale == 'linear' else 1, None, None)\n mag = 10 * np.log10(np.maximum((H * H.conj()).real, 1e-20))\n\n if 'time' in plot:\n ax_time_idx = np.where([p == 'time' for p in plot])[0][0]\n axes[ax_time_idx].plot(t, h, color=color)\n axes[ax_time_idx].set(xlim=t[[0, -1]], xlabel='Time (s)',\n ylabel='Amplitude', title=title)\n # Magnitude\n if 'magnitude' in plot:\n ax_mag_idx = np.where([p == 'magnitude' for p in plot])[0][0]\n axes[ax_mag_idx].plot(f[sl], mag[sl], color=color,\n linewidth=2, zorder=4)\n if freq is not None and gain is not None:\n plot_ideal_filter(freq, gain, axes[ax_mag_idx],\n fscale=fscale, show=False)\n axes[ax_mag_idx].set(ylabel='Magnitude (dB)', xlabel='', xscale=fscale)\n if xticks is not None:\n axes[ax_mag_idx].set(xticks=xticks)\n axes[ax_mag_idx].set(xticklabels=xticklabels)\n axes[ax_mag_idx].set(xlim=flim, ylim=alim, xlabel='Frequency (Hz)',\n ylabel='Amplitude (dB)')\n # Delay\n if 'delay' in plot:\n ax_delay_idx = np.where([p == 'delay' for p in plot])[0][0]\n axes[ax_delay_idx].plot(f[sl], gd[sl], color=color,\n linewidth=2, zorder=4)\n # shade nulled regions\n for start, stop in zip(*_mask_to_onsets_offsets(mag <= -39.9)):\n axes[ax_delay_idx].axvspan(f[start], f[stop - 1],\n facecolor='k', alpha=0.05,\n zorder=5)\n axes[ax_delay_idx].set(xlim=flim, ylabel='Group delay (s)',\n xlabel='Frequency (Hz)',\n xscale=fscale)\n if xticks is not None:\n axes[ax_delay_idx].set(xticks=xticks)\n axes[ax_delay_idx].set(xticklabels=xticklabels)\n axes[ax_delay_idx].set(xlim=flim, ylim=dlim, xlabel='Frequency (Hz)',\n ylabel='Delay (s)')\n\n adjust_axes(axes)\n tight_layout()\n plt_show(show)\n return fig\n\n\ndef plot_ideal_filter(freq, gain, axes=None, title='', flim=None, fscale='log',\n alim=_DEFAULT_ALIM, color='r', alpha=0.5, linestyle='--',\n show=True):\n \"\"\"Plot an ideal filter response.\n\n Parameters\n ----------\n freq : array-like\n The ideal response frequencies to plot (must be in ascending order).\n gain : array-like or None\n The ideal response gains to plot.\n axes : instance of Axes | None\n The subplot handle. With None (default), axes are created.\n title : str\n The title to use, (default: '').\n flim : tuple or None\n If not None, the x-axis frequency limits (Hz) to use.\n If None (default), freq used.\n fscale : str\n Frequency scaling to use, can be \"log\" (default) or \"linear\".\n alim : tuple\n If not None (default), the y-axis limits (dB) to use.\n color : color object\n The color to use (default: 'r').\n alpha : float\n The alpha to use (default: 0.5).\n linestyle : str\n The line style to use (default: '--').\n show : bool\n Show figure if True (default).\n\n Returns\n -------\n fig : instance of matplotlib.figure.Figure\n The figure.\n\n See Also\n --------\n plot_filter\n\n Notes\n -----\n .. versionadded:: 0.14\n\n Examples\n --------\n Plot a simple ideal band-pass filter::\n\n >>> from mne.viz import plot_ideal_filter\n >>> freq = [0, 1, 40, 50]\n >>> gain = [0, 1, 1, 0]\n >>> plot_ideal_filter(freq, gain, flim=(0.1, 100)) #doctest: +ELLIPSIS\n <...Figure...>\n \"\"\"\n import matplotlib.pyplot as plt\n my_freq, my_gain = list(), list()\n if freq[0] != 0:\n raise ValueError('freq should start with DC (zero) and end with '\n 'Nyquist, but got %s for DC' % (freq[0],))\n freq = np.array(freq)\n # deal with semilogx problems @ x=0\n _check_option('fscale', fscale, ['log', 'linear'])\n if fscale == 'log':\n freq[0] = 0.1 * freq[1] if flim is None else min(flim[0], freq[1])\n flim = _get_flim(flim, fscale, freq)\n transitions = list()\n for ii in range(len(freq)):\n if ii < len(freq) - 1 and gain[ii] != gain[ii + 1]:\n transitions += [[freq[ii], freq[ii + 1]]]\n my_freq += np.linspace(freq[ii], freq[ii + 1], 20,\n endpoint=False).tolist()\n my_gain += np.linspace(gain[ii], gain[ii + 1], 20,\n endpoint=False).tolist()\n else:\n my_freq.append(freq[ii])\n my_gain.append(gain[ii])\n my_gain = 10 * np.log10(np.maximum(my_gain, 10 ** (alim[0] / 10.)))\n if axes is None:\n axes = plt.subplots(1)[1]\n for transition in transitions:\n axes.axvspan(*transition, color=color, alpha=0.1)\n axes.plot(my_freq, my_gain, color=color, linestyle=linestyle, alpha=0.5,\n linewidth=4, zorder=3)\n xticks, xticklabels = _filter_ticks(flim, fscale)\n axes.set(ylim=alim, xlabel='Frequency (Hz)', ylabel='Amplitude (dB)',\n xscale=fscale)\n if xticks is not None:\n axes.set(xticks=xticks)\n axes.set(xticklabels=xticklabels)\n axes.set(xlim=flim)\n if title:\n axes.set(title=title)\n adjust_axes(axes)\n tight_layout()\n plt_show(show)\n return axes.figure\n\n\ndef _handle_event_colors(color_dict, unique_events, event_id):\n \"\"\"Create event-integer-to-color mapping, assigning defaults as needed.\"\"\"\n default_colors = dict(zip(sorted(unique_events), cycle(_get_color_list())))\n # warn if not enough colors\n if color_dict is None:\n if len(unique_events) > len(_get_color_list()):\n warn('More events than default colors available. You should pass '\n 'a list of unique colors.')\n else:\n custom_colors = dict()\n for key, color in color_dict.items():\n if key in unique_events: # key was a valid event integer\n custom_colors[key] = color\n elif key in event_id: # key was an event label\n custom_colors[event_id[key]] = color\n else: # key not a valid event, warn and ignore\n warn('Event ID %s is in the color dict but is not '\n 'present in events or event_id.' % str(key))\n # warn if color_dict is missing any entries\n unassigned = sorted(set(unique_events) - set(custom_colors))\n if len(unassigned):\n unassigned_str = ', '.join(str(e) for e in unassigned)\n warn('Color was not assigned for event%s %s. Default colors will '\n 'be used.' % (_pl(unassigned), unassigned_str))\n default_colors.update(custom_colors)\n return default_colors\n\n\n@fill_doc\ndef plot_csd(csd, info=None, mode='csd', colorbar=True, cmap=None,\n n_cols=None, show=True):\n \"\"\"Plot CSD matrices.\n\n A sub-plot is created for each frequency. If an info object is passed to\n the function, different channel types are plotted in different figures.\n\n Parameters\n ----------\n csd : instance of CrossSpectralDensity\n The CSD matrix to plot.\n %(info)s\n Used to split the figure by channel-type, if provided.\n By default, the CSD matrix is plotted as a whole.\n mode : 'csd' | 'coh'\n Whether to plot the cross-spectral density ('csd', the default), or\n the coherence ('coh') between the channels.\n colorbar : bool\n Whether to show a colorbar. Defaults to ``True``.\n cmap : str | None\n The matplotlib colormap to use. Defaults to None, which means the\n colormap will default to matplotlib's default.\n n_cols : int | None\n CSD matrices are plotted in a grid. This parameter controls how\n many matrix to plot side by side before starting a new row. By\n default, a number will be chosen to make the grid as square as\n possible.\n show : bool\n Whether to show the figure. Defaults to ``True``.\n\n Returns\n -------\n fig : list of Figure\n The figures created by this function.\n \"\"\"\n import matplotlib.pyplot as plt\n\n if mode not in ['csd', 'coh']:\n raise ValueError('\"mode\" should be either \"csd\" or \"coh\".')\n\n if info is not None:\n info_ch_names = info['ch_names']\n sel_eeg = pick_types(info, meg=False, eeg=True, ref_meg=False,\n exclude=[])\n sel_mag = pick_types(info, meg='mag', eeg=False, ref_meg=False,\n exclude=[])\n sel_grad = pick_types(info, meg='grad', eeg=False, ref_meg=False,\n exclude=[])\n idx_eeg = [csd.ch_names.index(info_ch_names[c])\n for c in sel_eeg if info_ch_names[c] in csd.ch_names]\n idx_mag = [csd.ch_names.index(info_ch_names[c])\n for c in sel_mag if info_ch_names[c] in csd.ch_names]\n idx_grad = [csd.ch_names.index(info_ch_names[c])\n for c in sel_grad if info_ch_names[c] in csd.ch_names]\n indices = [idx_eeg, idx_mag, idx_grad]\n titles = ['EEG', 'Magnetometers', 'Gradiometers']\n\n if mode == 'csd':\n # The units in which to plot the CSD\n units = dict(eeg='µV²', grad='fT²/cm²', mag='fT²')\n scalings = dict(eeg=1e12, grad=1e26, mag=1e30)\n else:\n indices = [np.arange(len(csd.ch_names))]\n if mode == 'csd':\n titles = ['Cross-spectral density']\n # Units and scaling unknown\n units = dict()\n scalings = dict()\n elif mode == 'coh':\n titles = ['Coherence']\n\n n_freqs = len(csd.frequencies)\n\n if n_cols is None:\n n_cols = int(np.ceil(np.sqrt(n_freqs)))\n n_rows = int(np.ceil(n_freqs / float(n_cols)))\n\n figs = []\n for ind, title, ch_type in zip(indices, titles, ['eeg', 'mag', 'grad']):\n if len(ind) == 0:\n continue\n\n fig, axes = plt.subplots(n_rows, n_cols, squeeze=False,\n figsize=(2 * n_cols + 1, 2.2 * n_rows))\n\n csd_mats = []\n for i in range(len(csd.frequencies)):\n cm = csd.get_data(index=i)[ind][:, ind]\n if mode == 'csd':\n cm = np.abs(cm) * scalings.get(ch_type, 1)\n elif mode == 'coh':\n # Compute coherence from the CSD matrix\n psd = np.diag(cm).real\n cm = np.abs(cm) ** 2 / psd[np.newaxis, :] / psd[:, np.newaxis]\n csd_mats.append(cm)\n\n vmax = np.max(csd_mats)\n\n for i, (freq, mat) in enumerate(zip(csd.frequencies, csd_mats)):\n ax = axes[i // n_cols][i % n_cols]\n im = ax.imshow(mat, interpolation='nearest', cmap=cmap, vmin=0,\n vmax=vmax)\n ax.set_xticks([])\n ax.set_yticks([])\n if csd._is_sum:\n ax.set_title('%.1f-%.1f Hz.' % (np.min(freq),\n np.max(freq)))\n else:\n ax.set_title('%.1f Hz.' % freq)\n\n plt.suptitle(title)\n plt.subplots_adjust(top=0.8)\n\n if colorbar:\n cb = plt.colorbar(im, ax=[a for ax_ in axes for a in ax_])\n if mode == 'csd':\n label = u'CSD'\n if ch_type in units:\n label += u' (%s)' % units[ch_type]\n cb.set_label(label)\n elif mode == 'coh':\n cb.set_label('Coherence')\n\n figs.append(fig)\n\n plt_show(show)\n return figs\n\n\ndef plot_chpi_snr(snr_dict, axes=None):\n \"\"\"Plot time-varying SNR estimates of the HPI coils.\n\n Parameters\n ----------\n snr_dict : dict\n The dictionary returned by `~mne.chpi.compute_chpi_snr`. Must have keys\n ``times``, ``freqs``, ``TYPE_snr``, ``TYPE_power``, and ``TYPE_resid``\n (where ``TYPE`` can be ``mag`` or ``grad`` or both).\n axes : None | list of matplotlib.axes.Axes\n Figure axes in which to draw the SNR, power, and residual plots. The\n number of axes should be 3× the number of MEG sensor types present in\n ``snr_dict``. If ``None`` (the default), a new\n `~matplotlib.figure.Figure` is created with the required number of\n axes.\n\n Returns\n -------\n fig : instance of matplotlib.figure.Figure\n A figure with subplots for SNR, power, and residual variance,\n separately for magnetometers and/or gradiometers (depending on what is\n present in ``snr_dict``).\n\n Notes\n -----\n If you supply a list of existing `~matplotlib.axes.Axes`, then the figure\n legend will not be drawn automatically. If you still want it, running\n ``fig.legend(loc='right', title='cHPI frequencies')`` will recreate it,\n though you may also need to manually adjust the margin to make room for it\n (e.g., using ``fig.subplots_adjust(right=0.8)``).\n\n .. versionadded:: 0.24\n \"\"\"\n import matplotlib.pyplot as plt\n\n valid_keys = list(snr_dict)[2:]\n titles = dict(snr='SNR', power='cHPI power', resid='Residual variance')\n full_names = dict(mag='magnetometers', grad='gradiometers')\n axes_was_none = axes is None\n if axes_was_none:\n fig, axes = plt.subplots(len(valid_keys), 1, sharex=True)\n else:\n fig = axes[0].get_figure()\n if len(axes) != len(valid_keys):\n raise ValueError(f'axes must be a list of {len(valid_keys)} axes, got '\n f'length {len(axes)} ({axes}).')\n fig.set_size_inches(10, 10)\n legend_labels_exist = False\n for key, ax in zip(valid_keys, axes):\n ch_type, kind = key.split('_')\n scaling = 1 if kind == 'snr' else DEFAULTS['scalings'][ch_type]\n plot_kwargs = dict(color='k') if kind == 'resid' else dict()\n lines = ax.plot(snr_dict['times'], snr_dict[key] * scaling ** 2,\n **plot_kwargs)\n # the freqs should be the same for all sensor types (and for SNR and\n # power subplots), so we only need to label the lines on one axes\n # (otherwise we get duplicate legend entries).\n if not legend_labels_exist:\n for line, freq in zip(lines, snr_dict['freqs']):\n line.set_label(f'{freq} Hz')\n legend_labels_exist = True\n unit = DEFAULTS['units'][ch_type]\n unit = f'({unit})' if '/' in unit else unit\n set_kwargs = dict(title=f'{titles[kind]}, {full_names[ch_type]}',\n ylabel='dB' if kind == 'snr' else f'{unit}²')\n if not axes_was_none:\n set_kwargs.update(xlabel='Time (s)')\n ax.set(**set_kwargs)\n if axes_was_none:\n ax.set(xlabel='Time (s)')\n fig.align_ylabels()\n fig.subplots_adjust(left=0.1, right=0.825, bottom=0.075, top=0.95,\n hspace=0.7)\n fig.legend(loc='right', title='cHPI frequencies')\n return fig\n" ]
[ [ "numpy.diff", "numpy.diag", "matplotlib.pyplot.yscale", "numpy.asarray", "matplotlib.pyplot.subplots_adjust", "numpy.meshgrid", "scipy.signal.group_delay", "matplotlib.colors.Normalize", "matplotlib.pyplot.figure", "matplotlib.pyplot.gca", "matplotlib.pyplot.gcf", "numpy.abs", "numpy.in1d", "matplotlib.pyplot.xlim", "matplotlib.pyplot.barh", "numpy.log10", "matplotlib.pyplot.suptitle", "numpy.where", "numpy.linspace", "numpy.unique", "numpy.round", "numpy.sqrt", "numpy.zeros", "numpy.dot", "matplotlib.pyplot.subplots", "numpy.arange", "scipy.signal.freqz", "numpy.max", "numpy.min", "matplotlib.pyplot.ylim", "matplotlib.pyplot.close", "numpy.maximum", "matplotlib.pyplot.colorbar", "numpy.sort", "numpy.pad", "matplotlib.pyplot.grid", "matplotlib.patheffects.withStroke", "matplotlib.pyplot.pcolor", "numpy.ravel", "numpy.atleast_1d", "numpy.array", "numpy.concatenate", "numpy.full", "scipy.linalg.svd" ] ]
bopopescu/smart_contracts7
[ "40a487cb3843e86ab5e4cb50b1aafa2095f648cd" ]
[ "env/lib/python3.6/site-packages/torch/optim/asgd.py" ]
[ "import math\nimport torch\nfrom .optimizer import Optimizer\n\n\nclass ASGD(Optimizer):\n \"\"\"Implements Averaged Stochastic Gradient Descent.\n\n It has been proposed in `Acceleration of stochastic approximation by\n averaging`_.\n\n Arguments:\n params (iterable): iterable of parameters to optimize or dicts defining\n parameter groups\n lr (float, optional): learning rate (default: 1e-2)\n lambd (float, optional): decay term (default: 1e-4)\n alpha (float, optional): power for eta update (default: 0.75)\n t0 (float, optional): point at which to start averaging (default: 1e6)\n weight_decay (float, optional): weight decay (L2 penalty) (default: 0)\n\n .. _Acceleration of stochastic approximation by averaging:\n http://dl.acm.org/citation.cfm?id=131098\n \"\"\"\n\n def __init__(self, params, lr=1e-2, lambd=1e-4, alpha=0.75, t0=1e6, weight_decay=0):\n defaults = dict(lr=lr, lambd=lambd, alpha=alpha, t0=t0,\n weight_decay=weight_decay)\n super(ASGD, self).__init__(params, defaults)\n\n def step(self, closure=None):\n \"\"\"Performs a single optimization step.\n\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n for p in group['params']:\n if p.grad is None:\n continue\n grad = p.grad.data\n if grad.is_sparse:\n raise RuntimeError('ASGD does not support sparse gradients')\n state = self.state[p]\n\n # State initialization\n if len(state) == 0:\n state['step'] = 0\n state['eta'] = group['lr']\n state['mu'] = 1\n state['ax'] = torch.zeros_like(p.data)\n\n state['step'] += 1\n\n if group['weight_decay'] != 0:\n grad = grad.add(group['weight_decay'], p.data)\n\n # decay term\n p.data.mul_(1 - group['lambd'] * state['eta'])\n\n # update parameter\n p.data.add_(-state['eta'], grad)\n\n # averaging\n if state['mu'] != 1:\n state['ax'].add_(p.data.sub(state['ax']).mul(state['mu']))\n else:\n state['ax'].copy_(p.data)\n\n # update eta and mu\n state['eta'] = (group['lr'] /\n math.pow((1 + group['lambd'] * group['lr'] * state['step']), group['alpha']))\n state['mu'] = 1 / max(1, state['step'] - group['t0'])\n\n return loss\n" ]
[ [ "torch.zeros_like" ] ]
luftwurzel/pandas
[ "8980af7ce9d98713b0f8792e38f0fe43088e8780" ]
[ "pandas/tests/io/parser/test_python_parser_only.py" ]
[ "\"\"\"\nTests that apply specifically to the Python parser. Unless specifically\nstated as a Python-specific issue, the goal is to eventually move as many of\nthese tests out of this module as soon as the C parser can accept further\narguments when parsing.\n\"\"\"\nfrom __future__ import annotations\n\nimport csv\nfrom io import (\n BytesIO,\n StringIO,\n)\n\nimport pytest\n\nfrom pandas.errors import (\n ParserError,\n ParserWarning,\n)\n\nfrom pandas import (\n DataFrame,\n Index,\n MultiIndex,\n)\nimport pandas._testing as tm\n\n\ndef test_default_separator(python_parser_only):\n # see gh-17333\n #\n # csv.Sniffer in Python treats \"o\" as separator.\n data = \"aob\\n1o2\\n3o4\"\n parser = python_parser_only\n expected = DataFrame({\"a\": [1, 3], \"b\": [2, 4]})\n\n result = parser.read_csv(StringIO(data), sep=None)\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\"skipfooter\", [\"foo\", 1.5, True])\ndef test_invalid_skipfooter_non_int(python_parser_only, skipfooter):\n # see gh-15925 (comment)\n data = \"a\\n1\\n2\"\n parser = python_parser_only\n msg = \"skipfooter must be an integer\"\n\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(StringIO(data), skipfooter=skipfooter)\n\n\ndef test_invalid_skipfooter_negative(python_parser_only):\n # see gh-15925 (comment)\n data = \"a\\n1\\n2\"\n parser = python_parser_only\n msg = \"skipfooter cannot be negative\"\n\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(StringIO(data), skipfooter=-1)\n\n\[email protected](\"kwargs\", [{\"sep\": None}, {\"delimiter\": \"|\"}])\ndef test_sniff_delimiter(python_parser_only, kwargs):\n data = \"\"\"index|A|B|C\nfoo|1|2|3\nbar|4|5|6\nbaz|7|8|9\n\"\"\"\n parser = python_parser_only\n result = parser.read_csv(StringIO(data), index_col=0, **kwargs)\n expected = DataFrame(\n [[1, 2, 3], [4, 5, 6], [7, 8, 9]],\n columns=[\"A\", \"B\", \"C\"],\n index=Index([\"foo\", \"bar\", \"baz\"], name=\"index\"),\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_sniff_delimiter_comment(python_parser_only):\n data = \"\"\"# comment line\nindex|A|B|C\n# comment line\nfoo|1|2|3 # ignore | this\nbar|4|5|6\nbaz|7|8|9\n\"\"\"\n parser = python_parser_only\n result = parser.read_csv(StringIO(data), index_col=0, sep=None, comment=\"#\")\n expected = DataFrame(\n [[1, 2, 3], [4, 5, 6], [7, 8, 9]],\n columns=[\"A\", \"B\", \"C\"],\n index=Index([\"foo\", \"bar\", \"baz\"], name=\"index\"),\n )\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\"encoding\", [None, \"utf-8\"])\ndef test_sniff_delimiter_encoding(python_parser_only, encoding):\n parser = python_parser_only\n data = \"\"\"ignore this\nignore this too\nindex|A|B|C\nfoo|1|2|3\nbar|4|5|6\nbaz|7|8|9\n\"\"\"\n\n if encoding is not None:\n from io import TextIOWrapper\n\n data = data.encode(encoding)\n data = BytesIO(data)\n data = TextIOWrapper(data, encoding=encoding)\n else:\n data = StringIO(data)\n\n result = parser.read_csv(data, index_col=0, sep=None, skiprows=2, encoding=encoding)\n expected = DataFrame(\n [[1, 2, 3], [4, 5, 6], [7, 8, 9]],\n columns=[\"A\", \"B\", \"C\"],\n index=Index([\"foo\", \"bar\", \"baz\"], name=\"index\"),\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_single_line(python_parser_only):\n # see gh-6607: sniff separator\n parser = python_parser_only\n result = parser.read_csv(StringIO(\"1,2\"), names=[\"a\", \"b\"], header=None, sep=None)\n\n expected = DataFrame({\"a\": [1], \"b\": [2]})\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\"kwargs\", [{\"skipfooter\": 2}, {\"nrows\": 3}])\ndef test_skipfooter(python_parser_only, kwargs):\n # see gh-6607\n data = \"\"\"A,B,C\n1,2,3\n4,5,6\n7,8,9\nwant to skip this\nalso also skip this\n\"\"\"\n parser = python_parser_only\n result = parser.read_csv(StringIO(data), **kwargs)\n\n expected = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=[\"A\", \"B\", \"C\"])\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\n \"compression,klass\", [(\"gzip\", \"GzipFile\"), (\"bz2\", \"BZ2File\")]\n)\ndef test_decompression_regex_sep(python_parser_only, csv1, compression, klass):\n # see gh-6607\n parser = python_parser_only\n\n with open(csv1, \"rb\") as f:\n data = f.read()\n\n data = data.replace(b\",\", b\"::\")\n expected = parser.read_csv(csv1)\n\n module = pytest.importorskip(compression)\n klass = getattr(module, klass)\n\n with tm.ensure_clean() as path:\n with klass(path, mode=\"wb\") as tmp:\n tmp.write(data)\n\n result = parser.read_csv(path, sep=\"::\", compression=compression)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_read_csv_buglet_4x_multi_index(python_parser_only):\n # see gh-6607\n data = \"\"\" A B C D E\none two three four\na b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640\na q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744\nx q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838\"\"\"\n parser = python_parser_only\n\n expected = DataFrame(\n [\n [-0.5109, -2.3358, -0.4645, 0.05076, 0.3640],\n [0.4473, 1.4152, 0.2834, 1.00661, 0.1744],\n [-0.6662, -0.5243, -0.3580, 0.89145, 2.5838],\n ],\n columns=[\"A\", \"B\", \"C\", \"D\", \"E\"],\n index=MultiIndex.from_tuples(\n [(\"a\", \"b\", 10.0032, 5), (\"a\", \"q\", 20, 4), (\"x\", \"q\", 30, 3)],\n names=[\"one\", \"two\", \"three\", \"four\"],\n ),\n )\n result = parser.read_csv(StringIO(data), sep=r\"\\s+\")\n tm.assert_frame_equal(result, expected)\n\n\ndef test_read_csv_buglet_4x_multi_index2(python_parser_only):\n # see gh-6893\n data = \" A B C\\na b c\\n1 3 7 0 3 6\\n3 1 4 1 5 9\"\n parser = python_parser_only\n\n expected = DataFrame.from_records(\n [(1, 3, 7, 0, 3, 6), (3, 1, 4, 1, 5, 9)],\n columns=list(\"abcABC\"),\n index=list(\"abc\"),\n )\n result = parser.read_csv(StringIO(data), sep=r\"\\s+\")\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\"add_footer\", [True, False])\ndef test_skipfooter_with_decimal(python_parser_only, add_footer):\n # see gh-6971\n data = \"1#2\\n3#4\"\n parser = python_parser_only\n expected = DataFrame({\"a\": [1.2, 3.4]})\n\n if add_footer:\n # The stray footer line should not mess with the\n # casting of the first two lines if we skip it.\n kwargs = {\"skipfooter\": 1}\n data += \"\\nFooter\"\n else:\n kwargs = {}\n\n result = parser.read_csv(StringIO(data), names=[\"a\"], decimal=\"#\", **kwargs)\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\n \"sep\", [\"::\", \"#####\", \"!!!\", \"123\", \"#1!c5\", \"%!c!d\", \"@@#4:2\", \"_!pd#_\"]\n)\[email protected](\n \"encoding\", [\"utf-16\", \"utf-16-be\", \"utf-16-le\", \"utf-32\", \"cp037\"]\n)\ndef test_encoding_non_utf8_multichar_sep(python_parser_only, sep, encoding):\n # see gh-3404\n expected = DataFrame({\"a\": [1], \"b\": [2]})\n parser = python_parser_only\n\n data = \"1\" + sep + \"2\"\n encoded_data = data.encode(encoding)\n\n result = parser.read_csv(\n BytesIO(encoded_data), sep=sep, names=[\"a\", \"b\"], encoding=encoding\n )\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\"quoting\", [csv.QUOTE_MINIMAL, csv.QUOTE_NONE])\ndef test_multi_char_sep_quotes(python_parser_only, quoting):\n # see gh-13374\n kwargs = {\"sep\": \",,\"}\n parser = python_parser_only\n\n data = 'a,,b\\n1,,a\\n2,,\"2,,b\"'\n\n if quoting == csv.QUOTE_NONE:\n msg = \"Expected 2 fields in line 3, saw 3\"\n with pytest.raises(ParserError, match=msg):\n parser.read_csv(StringIO(data), quoting=quoting, **kwargs)\n else:\n msg = \"ignored when a multi-char delimiter is used\"\n with pytest.raises(ParserError, match=msg):\n parser.read_csv(StringIO(data), quoting=quoting, **kwargs)\n\n\ndef test_none_delimiter(python_parser_only, capsys):\n # see gh-13374 and gh-17465\n parser = python_parser_only\n data = \"a,b,c\\n0,1,2\\n3,4,5,6\\n7,8,9\"\n expected = DataFrame({\"a\": [0, 7], \"b\": [1, 8], \"c\": [2, 9]})\n\n # We expect the third line in the data to be\n # skipped because it is malformed, but we do\n # not expect any errors to occur.\n result = parser.read_csv(StringIO(data), header=0, sep=None, on_bad_lines=\"warn\")\n tm.assert_frame_equal(result, expected)\n\n captured = capsys.readouterr()\n assert \"Skipping line 3\" in captured.err\n\n\[email protected](\"data\", ['a\\n1\\n\"b\"a', 'a,b,c\\ncat,foo,bar\\ndog,foo,\"baz'])\[email protected](\"skipfooter\", [0, 1])\ndef test_skipfooter_bad_row(python_parser_only, data, skipfooter):\n # see gh-13879 and gh-15910\n parser = python_parser_only\n if skipfooter:\n msg = \"parsing errors in the skipped footer rows\"\n with pytest.raises(ParserError, match=msg):\n parser.read_csv(StringIO(data), skipfooter=skipfooter)\n else:\n msg = \"unexpected end of data|expected after\"\n with pytest.raises(ParserError, match=msg):\n parser.read_csv(StringIO(data), skipfooter=skipfooter)\n\n\ndef test_malformed_skipfooter(python_parser_only):\n parser = python_parser_only\n data = \"\"\"ignore\nA,B,C\n1,2,3 # comment\n1,2,3,4,5\n2,3,4\nfooter\n\"\"\"\n msg = \"Expected 3 fields in line 4, saw 5\"\n with pytest.raises(ParserError, match=msg):\n parser.read_csv(StringIO(data), header=1, comment=\"#\", skipfooter=1)\n\n\ndef test_python_engine_file_no_next(python_parser_only):\n parser = python_parser_only\n\n class NoNextBuffer:\n def __init__(self, csv_data) -> None:\n self.data = csv_data\n\n def __iter__(self):\n return self.data.__iter__()\n\n def read(self):\n return self.data\n\n def readline(self):\n return self.data\n\n parser.read_csv(NoNextBuffer(\"a\\n1\"))\n\n\[email protected](\"bad_line_func\", [lambda x: [\"2\", \"3\"], lambda x: x[:2]])\ndef test_on_bad_lines_callable(python_parser_only, bad_line_func):\n # GH 5686\n parser = python_parser_only\n data = \"\"\"a,b\n1,2\n2,3,4,5,6\n3,4\n\"\"\"\n bad_sio = StringIO(data)\n result = parser.read_csv(bad_sio, on_bad_lines=bad_line_func)\n expected = DataFrame({\"a\": [1, 2, 3], \"b\": [2, 3, 4]})\n tm.assert_frame_equal(result, expected)\n\n\ndef test_on_bad_lines_callable_write_to_external_list(python_parser_only):\n # GH 5686\n parser = python_parser_only\n data = \"\"\"a,b\n1,2\n2,3,4,5,6\n3,4\n\"\"\"\n bad_sio = StringIO(data)\n lst = []\n\n def bad_line_func(bad_line: list[str]) -> list[str]:\n lst.append(bad_line)\n return [\"2\", \"3\"]\n\n result = parser.read_csv(bad_sio, on_bad_lines=bad_line_func)\n expected = DataFrame({\"a\": [1, 2, 3], \"b\": [2, 3, 4]})\n tm.assert_frame_equal(result, expected)\n assert lst == [[\"2\", \"3\", \"4\", \"5\", \"6\"]]\n\n\[email protected](\"bad_line_func\", [lambda x: [\"foo\", \"bar\"], lambda x: x[:2]])\[email protected](\"sep\", [\",\", \"111\"])\ndef test_on_bad_lines_callable_iterator_true(python_parser_only, bad_line_func, sep):\n # GH 5686\n # iterator=True has a separate code path than iterator=False\n parser = python_parser_only\n data = f\"\"\"\n0{sep}1\nhi{sep}there\nfoo{sep}bar{sep}baz\ngood{sep}bye\n\"\"\"\n bad_sio = StringIO(data)\n result_iter = parser.read_csv(\n bad_sio, on_bad_lines=bad_line_func, chunksize=1, iterator=True, sep=sep\n )\n expecteds = [\n {\"0\": \"hi\", \"1\": \"there\"},\n {\"0\": \"foo\", \"1\": \"bar\"},\n {\"0\": \"good\", \"1\": \"bye\"},\n ]\n for i, (result, expected) in enumerate(zip(result_iter, expecteds)):\n expected = DataFrame(expected, index=range(i, i + 1))\n tm.assert_frame_equal(result, expected)\n\n\ndef test_on_bad_lines_callable_dont_swallow_errors(python_parser_only):\n # GH 5686\n parser = python_parser_only\n data = \"\"\"a,b\n1,2\n2,3,4,5,6\n3,4\n\"\"\"\n bad_sio = StringIO(data)\n msg = \"This function is buggy.\"\n\n def bad_line_func(bad_line):\n raise ValueError(msg)\n\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(bad_sio, on_bad_lines=bad_line_func)\n\n\ndef test_on_bad_lines_callable_not_expected_length(python_parser_only):\n # GH 5686\n parser = python_parser_only\n data = \"\"\"a,b\n1,2\n2,3,4,5,6\n3,4\n\"\"\"\n bad_sio = StringIO(data)\n\n with tm.assert_produces_warning(ParserWarning, match=\"Length of header or names\"):\n result = parser.read_csv(bad_sio, on_bad_lines=lambda x: x)\n expected = DataFrame({\"a\": [1, 2, 3], \"b\": [2, 3, 4]})\n tm.assert_frame_equal(result, expected)\n\n\ndef test_on_bad_lines_callable_returns_none(python_parser_only):\n # GH 5686\n parser = python_parser_only\n data = \"\"\"a,b\n1,2\n2,3,4,5,6\n3,4\n\"\"\"\n bad_sio = StringIO(data)\n\n result = parser.read_csv(bad_sio, on_bad_lines=lambda x: None)\n expected = DataFrame({\"a\": [1, 3], \"b\": [2, 4]})\n tm.assert_frame_equal(result, expected)\n\n\ndef test_on_bad_lines_index_col_inferred(python_parser_only):\n # GH 5686\n parser = python_parser_only\n data = \"\"\"a,b\n1,2,3\n4,5,6\n\"\"\"\n bad_sio = StringIO(data)\n\n result = parser.read_csv(bad_sio, on_bad_lines=lambda x: [\"99\", \"99\"])\n expected = DataFrame({\"a\": [2, 5], \"b\": [3, 6]}, index=[1, 4])\n tm.assert_frame_equal(result, expected)\n" ]
[ [ "pandas._testing.ensure_clean", "pandas._testing.assert_produces_warning", "pandas.DataFrame", "pandas._testing.assert_frame_equal", "pandas.MultiIndex.from_tuples", "pandas.Index" ] ]
jkterry1/parameter-sharing-paper
[ "cb26ad195b580006f66fd8a60973408d5657b209" ]
[ "indicator_opt.py" ]
[ "import sys\nimport json\nimport numpy as np\nimport os\nimport pickle as pkl\nimport time\nfrom pprint import pprint\n\nfrom stable_baselines3 import PPO, DQN\nfrom stable_baselines3.common.utils import set_random_seed\n\nfrom pettingzoo.butterfly import cooperative_pong_v3, prospector_v4, knights_archers_zombies_v7\nfrom pettingzoo.atari import entombed_cooperative_v2, pong_v2\nfrom pettingzoo.atari.base_atari_env import BaseAtariEnv, base_env_wrapper_fn, parallel_wrapper_fn\nimport gym\n\nimport supersuit as ss\nfrom stable_baselines3.common.vec_env import VecMonitor, VecTransposeImage, VecNormalize\nfrom stable_baselines3.common.evaluation import evaluate_policy\nfrom stable_baselines3.common.callbacks import EvalCallback\nfrom stable_baselines3.common.preprocessing import is_image_space, is_image_space_channels_first\n\nimport optuna\nfrom optuna.integration.skopt import SkoptSampler\nfrom optuna.pruners import BasePruner, MedianPruner, SuccessiveHalvingPruner\nfrom optuna.samplers import BaseSampler, RandomSampler, TPESampler\nfrom optuna.visualization import plot_optimization_history, plot_param_importances\n\nfrom utils.hyperparams_opt import sample_ppo_params, sample_dqn_params\nfrom utils.callbacks import SaveVecNormalizeCallback, TrialEvalCallback\n\nfrom indicator_util import AgentIndicatorWrapper, InvertColorIndicator, BinaryIndicator, GeometricPatternIndicator\n\nimport argparse\n\nfrom stable_baselines3.common.utils import set_random_seed\n\nif __name__ == \"__main__\": # noqa: C901\n parser = argparse.ArgumentParser()\n \n '''\n Env List\n - Entombed Cooperative (Atari): DQN, PPO\n - Cooperative Pong (Butterfly): DQN, PPO\n - Prospector (Butterfly): PPO\n - KAZ (Butterfly): DQN, PPO\n - Pong (Atari): DQN, PPO\n '''\n butterfly_envs = [\"prospector-v4\", \"knights-archers-zombies-v7\", \"cooperative-pong-v3\"]\n atari_envs = [\"entombed-cooperative-v2\", \"pong-v2\"]\n\n parser.add_argument(\"--algo\", help=\"RL Algorithm\", default=\"ppo\", type=str, required=False, choices=[\"ppo\", \"dqn\"])\n parser.add_argument(\"--env\", type=str, default=\"pong-v2\", help=\"environment ID\", choices=[\n \"prospector-v4\",\n \"knights-archers-zombies-v7\",\n \"cooperative-pong-v3\",\n \"entombed-cooperative-v2\",\n \"pong-v2\"\n ])\n parser.add_argument(\"-n\", \"--n-timesteps\", help=\"Overwrite the number of timesteps\", default=1e6, type=int)\n parser.add_argument(\"--n-trials\", help=\"Number of trials for optimizing hyperparameters\", type=int, default=10)\n parser.add_argument(\n \"--optimization-log-path\",\n help=\"Path to save the evaluation log and optimal policy for each hyperparameter tried during optimization. \"\n \"Disabled if no argument is passed.\",\n type=str,\n )\n parser.add_argument(\"--eval-episodes\", help=\"Number of episodes to use for evaluation\", default=5, type=int)\n parser.add_argument(\n \"--sampler\",\n help=\"Sampler to use when optimizing hyperparameters\",\n type=str,\n default=\"tpe\",\n choices=[\"random\", \"tpe\", \"skopt\"],\n )\n parser.add_argument(\n \"--pruner\",\n help=\"Pruner to use when optimizing hyperparameters\",\n type=str,\n default=\"median\",\n choices=[\"halving\", \"median\", \"none\"],\n )\n parser.add_argument(\"--n-startup-trials\", help=\"Number of trials before using optuna sampler\", type=int, default=10)\n parser.add_argument(\n \"--n-evaluations\",\n help=\"Training policies are evaluated every n-timesteps // n-evaluations steps when doing hyperparameter optimization\",\n type=int,\n default=100,\n )\n parser.add_argument(\"-f\", \"--log-folder\", help=\"Log folder\", type=str, default=\"logs\")\n parser.add_argument(\n \"--storage\", help=\"Database storage path if distributed optimization should be used\", type=str, default=None\n )\n parser.add_argument(\"--study-name\", help=\"Study name for distributed optimization\", type=str, default=None)\n parser.add_argument(\"--verbose\", help=\"Verbose mode (0: no output, 1: INFO)\", default=1, type=int)\n args = parser.parse_args()\n\n seed = np.random.randint(2 ** 32 - 1, dtype=\"int64\").item()\n set_random_seed(seed)\n\n print(\"=\" * 10, args.env, \"=\" * 10)\n print(f\"Seed: {seed}\")\n \n # Hyperparameter optimization\n\n # Determine sampler and pruner\n if args.sampler == \"random\":\n sampler = RandomSampler(seed=seed)\n elif args.sampler == \"tpe\":\n sampler = TPESampler(n_startup_trials=args.n_startup_trials, seed=seed)\n elif args.sampler == \"skopt\":\n sampler = SkoptSampler(skopt_kwargs={\"base_estimator\": \"GP\", \"acq_func\": \"gp_hedge\"})\n else:\n raise ValueError(f\"Unknown sampler: {args.sampler}\")\n \n if args.pruner == \"halving\":\n pruner = SuccessiveHalvingPruner(min_resource=1, reduction_factor=4, min_early_stopping_rate=0)\n elif args.pruner == \"median\":\n pruner = MedianPruner(n_startup_trials=args.n_startup_trials, n_warmup_steps=args.n_evaluations // 3)\n elif args.pruner == \"none\":\n # Do not prune\n pruner = MedianPruner(n_startup_trials=args.n_trials, n_warmup_steps=args.n_evaluations)\n else:\n raise ValueError(f\"Unknown pruner: {args.pruner}\")\n\n print(f\"Sampler: {args.sampler} - Pruner: {args.pruner}\")\n\n # Create study\n study = optuna.create_study(\n sampler=sampler,\n pruner=pruner,\n storage=args.storage,\n study_name=args.study_name,\n load_if_exists=True,\n direction=\"maximize\",\n )\n\n hyperparams_sampler = {'ppo': sample_ppo_params, 'dqn': sample_dqn_params}\n hyperparams_algo = {'ppo': PPO, 'dqn': DQN}\n \n muesli_obs_size = 96 \n muesli_frame_size = 4\n\n # Objective function for hyperparameter search\n def objective(trial: optuna.Trial) -> float:\n #kwargs = self._hyperparams.copy()\n kwargs = {\n #'n_envs': 1,\n 'policy': 'CnnPolicy',\n #'n_timesteps': 1e6,\n }\n\n # Sample candidate hyperparameters\n sampled_hyperparams = hyperparams_sampler[args.algo](trial)\n kwargs.update(sampled_hyperparams)\n\n # Create training env\n if args.env == \"prospector-v4\":\n env = prospector_v4.parallel_env()\n agent_type = \"prospector\"\n elif args.env == \"knights-archers-zombies-v7\":\n env = knights_archers_zombies_v7.parallel_env()\n agent_type = \"archer\"\n elif args.env == \"cooperative-pong-v3\":\n env = cooperative_pong_v3.parallel_env()\n agent_type = \"paddle_0\"\n elif args.env == \"entombed-cooperative-v2\":\n env = entombed_cooperative_v2.parallel_env()\n agent_type = \"first\"\n elif args.env == \"pong-v2\":\n env = pong_v2.parallel_env()\n agent_type = \"first\"\n env = ss.color_reduction_v0(env)\n env = ss.pad_action_space_v0(env)\n env = ss.pad_observations_v0(env)\n env = ss.resize_v0(env, x_size=muesli_obs_size, y_size=muesli_obs_size, linear_interp=True)\n env = ss.frame_stack_v1(env, stack_size=muesli_frame_size)\n\n # Enable black death\n if args.env == 'knights-archers-zombies-v7':\n env = ss.black_death_v2(env)\n\n # Agent indicator wrapper\n agent_indicator_name = trial.suggest_categorical(\"agent_indicator\", choices=[\"identity\", \"invert\", \"invert-replace\", \"binary\", \"geometric\"])\n if agent_indicator_name == \"invert\":\n agent_indicator = InvertColorIndicator(env, agent_type)\n agent_indicator_wrapper = AgentIndicatorWrapper(agent_indicator)\n elif agent_indicator_name == \"invert-replace\":\n agent_indicator = InvertColorIndicator(env, agent_type)\n agent_indicator_wrapper = AgentIndicatorWrapper(agent_indicator, False)\n elif agent_indicator_name == \"binary\":\n agent_indicator = BinaryIndicator(env, agent_type)\n agent_indicator_wrapper = AgentIndicatorWrapper(agent_indicator)\n elif agent_indicator_name == \"geometric\":\n agent_indicator = GeometricPatternIndicator(env, agent_type)\n agent_indicator_wrapper = AgentIndicatorWrapper(agent_indicator)\n if agent_indicator_name != \"identity\":\n env = ss.observation_lambda_v0(env, agent_indicator_wrapper.apply, agent_indicator_wrapper.apply_space)\n\n env = ss.pettingzoo_env_to_vec_env_v0(env)\n #env = ss.concat_vec_envs_v0(env, num_vec_envs=1, num_cpus=1, base_class='stable_baselines3')\n env = VecMonitor(env)\n\n def image_transpose(env):\n if is_image_space(env.observation_space) and not is_image_space_channels_first(env.observation_space):\n env = VecTransposeImage(env)\n return env\n env = image_transpose(env)\n\n model = hyperparams_algo[args.algo](\n env=env,\n tensorboard_log=None,\n # We do not seed the trial\n seed=None,\n verbose=0,\n **kwargs,\n )\n\n model.trial = trial\n\n # Create eval env\n if args.env == \"prospector-v4\":\n eval_env = prospector_v4.parallel_env()\n agent_type = \"prospector\"\n elif args.env == \"knights-archers-zombies-v7\":\n eval_env = knights_archers_zombies_v7.parallel_env()\n agent_type = \"archer\"\n elif args.env == \"cooperative-pong-v3\":\n eval_env = cooperative_pong_v3.parallel_env()\n agent_type = \"paddle_0\"\n elif args.env == \"entombed-cooperative-v2\":\n eval_env = entombed_cooperative_v2.parallel_env()\n agent_type = \"first\"\n elif args.env == \"pong-v2\":\n def pong_single_raw_env(**kwargs):\n return BaseAtariEnv(game=\"pong\", num_players=1, env_name=os.path.basename(__file__)[:-3], **kwargs)\n pong_single_env = base_env_wrapper_fn(pong_single_raw_env)\n pong_parallel_env = parallel_wrapper_fn(pong_single_env)\n eval_env = pong_parallel_env()\n #eval_env = pong_v2.parallel_env()\n #eval_env = gym.make(\"Pong-v0\", obs_type='image')\n agent_type = \"first\"\n eval_env = ss.color_reduction_v0(eval_env)\n eval_env = ss.pad_action_space_v0(eval_env)\n eval_env = ss.pad_observations_v0(eval_env)\n eval_env = ss.resize_v0(eval_env, x_size=muesli_obs_size, y_size=muesli_obs_size, linear_interp=True)\n eval_env = ss.frame_stack_v1(eval_env, stack_size=muesli_frame_size)\n # Enable black death\n if args.env == 'knights-archers-zombies-v7':\n eval_env = ss.black_death_v2(eval_env)\n\n # Agent indicator wrapper\n if agent_indicator_name == \"invert\":\n eval_agent_indicator = InvertColorIndicator(eval_env, agent_type)\n eval_agent_indicator_wrapper = AgentIndicatorWrapper(eval_agent_indicator)\n elif agent_indicator_name == \"invert-replace\":\n eval_agent_indicator = InvertColorIndicator(eval_env, agent_type)\n eval_agent_indicator_wrapper = AgentIndicatorWrapper(eval_agent_indicator, False)\n elif agent_indicator_name == \"binary\":\n eval_agent_indicator = BinaryIndicator(eval_env, agent_type)\n eval_agent_indicator_wrapper = AgentIndicatorWrapper(eval_agent_indicator)\n elif agent_indicator_name == \"geometric\":\n eval_agent_indicator = GeometricPatternIndicator(eval_env, agent_type)\n eval_agent_indicator_wrapper = AgentIndicatorWrapper(eval_agent_indicator)\n if agent_indicator_name != \"identity\":\n eval_env = ss.observation_lambda_v0(eval_env, eval_agent_indicator_wrapper.apply, eval_agent_indicator_wrapper.apply_space)\n\n eval_env = ss.pettingzoo_env_to_vec_env_v0(eval_env)\n #eval_env = ss.concat_vec_envs_v0(eval_env, num_vec_envs=1, num_cpus=1, base_class='stable_baselines3')\n eval_env = VecMonitor(eval_env)\n eval_env = image_transpose(eval_env)\n\n optuna_eval_freq = int(args.n_timesteps / args.n_evaluations)\n # Account for parallel envs\n optuna_eval_freq = max(optuna_eval_freq // model.get_env().num_envs, 1)\n # Use non-deterministic eval for Atari\n path = None\n if args.optimization_log_path is not None:\n path = os.path.join(args.optimization_log_path, f\"trial_{str(trial.number)}\")\n #callbacks = get_callback_list({\"callback\": self.specified_callbacks})\n callbacks = []\n deterministic_eval = args.env not in atari_envs\n eval_callback = TrialEvalCallback(\n eval_env,\n trial,\n best_model_save_path=path,\n log_path=path,\n n_eval_episodes=args.eval_episodes,\n eval_freq=optuna_eval_freq,\n deterministic=deterministic_eval,\n )\n callbacks.append(eval_callback)\n\n try:\n model.learn(args.n_timesteps, callback=callbacks)\n # Free memory\n model.env.close()\n eval_env.close()\n except (AssertionError, ValueError) as e:\n # Sometimes, random hyperparams can generate NaN\n # Free memory\n model.env.close()\n eval_env.close()\n # Prune hyperparams that generate NaNs\n print(e)\n print(\"============\")\n print(\"Sampled hyperparams:\")\n pprint(sampled_hyperparams)\n raise optuna.exceptions.TrialPruned()\n is_pruned = eval_callback.is_pruned\n reward = eval_callback.last_mean_reward\n\n del model.env, eval_env\n del model\n\n if is_pruned:\n raise optuna.exceptions.TrialPruned()\n\n return reward\n\n pass\n\n try:\n study.optimize(objective, n_trials=args.n_trials, n_jobs=1)\n except KeyboardInterrupt:\n pass\n\n print(\"Number of finished trials: \", len(study.trials))\n\n print(\"Best trial:\")\n trial = study.best_trial\n\n print(\"Value: \", trial.value)\n\n print(\"Params: \")\n for key, value in trial.params.items():\n print(f\" {key}: {value}\")\n\n report_name = (\n f\"report_{args.env}_{args.n_trials}-trials-{args.n_timesteps}\"\n f\"-{args.sampler}-{args.pruner}_{int(time.time())}\"\n )\n\n log_path = os.path.join(args.log_folder, args.algo, report_name)\n\n if args.verbose:\n print(f\"Writing report to {log_path}\")\n\n # Write report\n os.makedirs(os.path.dirname(log_path), exist_ok=True)\n study.trials_dataframe().to_csv(f\"{log_path}.csv\")\n\n # Save python object to inspect/re-use it later\n with open(f\"{log_path}.pkl\", \"wb+\") as f:\n pkl.dump(study, f)\n\n # Plot optimization result\n try:\n fig1 = plot_optimization_history(study)\n fig2 = plot_param_importances(study)\n\n fig1.show()\n fig2.show()\n except (ValueError, ImportError, RuntimeError):\n pass" ]
[ [ "numpy.random.randint" ] ]
sanket-kamthe/probability
[ "c22b6201155c2e58d08a4ad30641d1aff59fbe7c" ]
[ "tensorflow_probability/python/distributions/beta.py" ]
[ "# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"The Beta distribution class.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Dependency imports\nimport numpy as np\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow_probability.python.distributions import distribution\nfrom tensorflow_probability.python.distributions import kullback_leibler\nfrom tensorflow_probability.python.internal import assert_util\nfrom tensorflow_probability.python.internal import distribution_util\nfrom tensorflow_probability.python.internal import dtype_util\nfrom tensorflow_probability.python.internal import prefer_static\nfrom tensorflow_probability.python.internal import reparameterization\nfrom tensorflow_probability.python.internal import tensor_util\nfrom tensorflow_probability.python.util.seed_stream import SeedStream\nfrom tensorflow.python.util import deprecation # pylint: disable=g-direct-tensorflow-import\n\n__all__ = [\n \"Beta\",\n]\n\n\n_beta_sample_note = \"\"\"Note: `x` must have dtype `self.dtype` and be in\n`[0, 1].` It must have a shape compatible with `self.batch_shape()`.\"\"\"\n\n\nclass Beta(distribution.Distribution):\n \"\"\"Beta distribution.\n\n The Beta distribution is defined over the `(0, 1)` interval using parameters\n `concentration1` (aka \"alpha\") and `concentration0` (aka \"beta\").\n\n #### Mathematical Details\n\n The probability density function (pdf) is,\n\n ```none\n pdf(x; alpha, beta) = x**(alpha - 1) (1 - x)**(beta - 1) / Z\n Z = Gamma(alpha) Gamma(beta) / Gamma(alpha + beta)\n ```\n\n where:\n\n * `concentration1 = alpha`,\n * `concentration0 = beta`,\n * `Z` is the normalization constant, and,\n * `Gamma` is the [gamma function](\n https://en.wikipedia.org/wiki/Gamma_function).\n\n The concentration parameters represent mean total counts of a `1` or a `0`,\n i.e.,\n\n ```none\n concentration1 = alpha = mean * total_concentration\n concentration0 = beta = (1. - mean) * total_concentration\n ```\n\n where `mean` in `(0, 1)` and `total_concentration` is a positive real number\n representing a mean `total_count = concentration1 + concentration0`.\n\n Distribution parameters are automatically broadcast in all functions; see\n examples for details.\n\n Warning: The samples can be zero due to finite precision.\n This happens more often when some of the concentrations are very small.\n Make sure to round the samples to `np.finfo(dtype).tiny` before computing the\n density.\n\n Samples of this distribution are reparameterized (pathwise differentiable).\n The derivatives are computed using the approach described in the paper\n\n [Michael Figurnov, Shakir Mohamed, Andriy Mnih.\n Implicit Reparameterization Gradients, 2018](https://arxiv.org/abs/1805.08498)\n\n #### Examples\n\n ```python\n import tensorflow_probability as tfp\n tfd = tfp.distributions\n\n # Create a batch of three Beta distributions.\n alpha = [1, 2, 3]\n beta = [1, 2, 3]\n dist = tfd.Beta(alpha, beta)\n\n dist.sample([4, 5]) # Shape [4, 5, 3]\n\n # `x` has three batch entries, each with two samples.\n x = [[.1, .4, .5],\n [.2, .3, .5]]\n # Calculate the probability of each pair of samples under the corresponding\n # distribution in `dist`.\n dist.prob(x) # Shape [2, 3]\n ```\n\n ```python\n # Create batch_shape=[2, 3] via parameter broadcast:\n alpha = [[1.], [2]] # Shape [2, 1]\n beta = [3., 4, 5] # Shape [3]\n dist = tfd.Beta(alpha, beta)\n\n # alpha broadcast as: [[1., 1, 1,],\n # [2, 2, 2]]\n # beta broadcast as: [[3., 4, 5],\n # [3, 4, 5]]\n # batch_Shape [2, 3]\n dist.sample([4, 5]) # Shape [4, 5, 2, 3]\n\n x = [.2, .3, .5]\n # x will be broadcast as [[.2, .3, .5],\n # [.2, .3, .5]],\n # thus matching batch_shape [2, 3].\n dist.prob(x) # Shape [2, 3]\n ```\n\n Compute the gradients of samples w.r.t. the parameters:\n\n ```python\n alpha = tf.constant(1.0)\n beta = tf.constant(2.0)\n dist = tfd.Beta(alpha, beta)\n samples = dist.sample(5) # Shape [5]\n loss = tf.reduce_mean(tf.square(samples)) # Arbitrary loss function\n # Unbiased stochastic gradients of the loss function\n grads = tf.gradients(loss, [alpha, beta])\n ```\n\n \"\"\"\n\n def __init__(self,\n concentration1,\n concentration0,\n validate_args=False,\n allow_nan_stats=True,\n name=\"Beta\"):\n \"\"\"Initialize a batch of Beta distributions.\n\n Args:\n concentration1: Positive floating-point `Tensor` indicating mean\n number of successes; aka \"alpha\". Implies `self.dtype` and\n `self.batch_shape`, i.e.,\n `concentration1.shape = [N1, N2, ..., Nm] = self.batch_shape`.\n concentration0: Positive floating-point `Tensor` indicating mean\n number of failures; aka \"beta\". Otherwise has same semantics as\n `concentration1`.\n validate_args: Python `bool`, default `False`. When `True` distribution\n parameters are checked for validity despite possibly degrading runtime\n performance. When `False` invalid inputs may silently render incorrect\n outputs.\n allow_nan_stats: Python `bool`, default `True`. When `True`, statistics\n (e.g., mean, mode, variance) use the value \"`NaN`\" to indicate the\n result is undefined. When `False`, an exception is raised if one or\n more of the statistic's batch members are undefined.\n name: Python `str` name prefixed to Ops created by this class.\n \"\"\"\n parameters = dict(locals())\n with tf.name_scope(name) as name:\n dtype = dtype_util.common_dtype([concentration1, concentration0],\n dtype_hint=tf.float32)\n self._concentration1 = tensor_util.convert_nonref_to_tensor(\n concentration1, dtype=dtype, name=\"concentration1\")\n self._concentration0 = tensor_util.convert_nonref_to_tensor(\n concentration0, dtype=dtype, name=\"concentration0\")\n super(Beta, self).__init__(\n dtype=dtype,\n validate_args=validate_args,\n allow_nan_stats=allow_nan_stats,\n reparameterization_type=reparameterization.FULLY_REPARAMETERIZED,\n parameters=parameters,\n name=name)\n\n @staticmethod\n def _param_shapes(sample_shape):\n s = tf.convert_to_tensor(sample_shape, dtype=tf.int32)\n return dict(concentration1=s, concentration0=s)\n\n @classmethod\n def _params_event_ndims(cls):\n return dict(concentration1=0, concentration0=0)\n\n @property\n def concentration1(self):\n \"\"\"Concentration parameter associated with a `1` outcome.\"\"\"\n return self._concentration1\n\n @property\n def concentration0(self):\n \"\"\"Concentration parameter associated with a `0` outcome.\"\"\"\n return self._concentration0\n\n @property\n @deprecation.deprecated(\n \"2019-10-01\",\n (\"The `total_concentration` property is deprecated; instead use \"\n \"`dist.concentration1 + dist.concentration0`.\"),\n warn_once=True)\n def total_concentration(self):\n \"\"\"Sum of concentration parameters.\"\"\"\n with self._name_and_control_scope(\"total_concentration\"):\n return self.concentration1 + self.concentration0\n\n def _batch_shape_tensor(self, concentration1=None, concentration0=None):\n return prefer_static.broadcast_shape(\n prefer_static.shape(\n self.concentration1 if concentration1 is None else concentration1),\n prefer_static.shape(\n self.concentration0 if concentration0 is None else concentration0))\n\n def _batch_shape(self):\n return tf.broadcast_static_shape(\n self.concentration1.shape, self.concentration0.shape)\n\n def _event_shape_tensor(self):\n return tf.constant([], dtype=tf.int32)\n\n def _event_shape(self):\n return tf.TensorShape([])\n\n def _sample_n(self, n, seed=None):\n seed = SeedStream(seed, \"beta\")\n concentration1 = tf.convert_to_tensor(self.concentration1)\n concentration0 = tf.convert_to_tensor(self.concentration0)\n shape = self._batch_shape_tensor(concentration1, concentration0)\n expanded_concentration1 = tf.broadcast_to(concentration1, shape)\n expanded_concentration0 = tf.broadcast_to(concentration0, shape)\n gamma1_sample = tf.random.gamma(\n shape=[n], alpha=expanded_concentration1, dtype=self.dtype, seed=seed())\n gamma2_sample = tf.random.gamma(\n shape=[n], alpha=expanded_concentration0, dtype=self.dtype, seed=seed())\n beta_sample = gamma1_sample / (gamma1_sample + gamma2_sample)\n return beta_sample\n\n @distribution_util.AppendDocstring(_beta_sample_note)\n def _log_prob(self, x):\n concentration0 = tf.convert_to_tensor(self.concentration0)\n concentration1 = tf.convert_to_tensor(self.concentration1)\n return (self._log_unnormalized_prob(x, concentration1, concentration0) -\n self._log_normalization(concentration1, concentration0))\n\n @distribution_util.AppendDocstring(_beta_sample_note)\n def _prob(self, x):\n return tf.exp(self._log_prob(x))\n\n @distribution_util.AppendDocstring(_beta_sample_note)\n def _log_cdf(self, x):\n return tf.math.log(self._cdf(x))\n\n @distribution_util.AppendDocstring(_beta_sample_note)\n def _cdf(self, x):\n with tf.control_dependencies(self._maybe_assert_valid_sample(x)):\n concentration1 = tf.convert_to_tensor(self.concentration1)\n concentration0 = tf.convert_to_tensor(self.concentration0)\n shape = self._batch_shape_tensor(concentration1, concentration0)\n concentration1 = tf.broadcast_to(concentration1, shape)\n concentration0 = tf.broadcast_to(concentration0, shape)\n return tf.math.betainc(concentration1, concentration0, x)\n\n def _log_unnormalized_prob(self, x, concentration1, concentration0):\n with tf.control_dependencies(self._maybe_assert_valid_sample(x)):\n return (tf.math.xlogy(concentration1 - 1., x) +\n (concentration0 - 1.) * tf.math.log1p(-x))\n\n def _log_normalization(self, concentration1, concentration0):\n return (tf.math.lgamma(concentration1) + tf.math.lgamma(concentration0) -\n tf.math.lgamma(concentration1 + concentration0))\n\n def _entropy(self):\n concentration1 = tf.convert_to_tensor(self.concentration1)\n concentration0 = tf.convert_to_tensor(self.concentration0)\n total_concentration = concentration1 + concentration0\n return (self._log_normalization(concentration1, concentration0) -\n (concentration1 - 1.) * tf.math.digamma(concentration1) -\n (concentration0 - 1.) * tf.math.digamma(concentration0) +\n (total_concentration - 2.) * tf.math.digamma(total_concentration))\n\n def _mean(self):\n concentration1 = tf.convert_to_tensor(self.concentration1)\n return concentration1 / (concentration1 + self.concentration0)\n\n def _variance(self):\n concentration1 = tf.convert_to_tensor(self.concentration1)\n concentration0 = tf.convert_to_tensor(self.concentration0)\n total_concentration = concentration1 + concentration0\n return (concentration1 * concentration0 /\n ((total_concentration)**2 * (total_concentration + 1.)))\n\n @distribution_util.AppendDocstring(\n \"\"\"Note: The mode is undefined when `concentration1 <= 1` or\n `concentration0 <= 1`. If `self.allow_nan_stats` is `True`, `NaN`\n is used for undefined modes. If `self.allow_nan_stats` is `False` an\n exception is raised when one or more modes are undefined.\"\"\")\n def _mode(self):\n concentration1 = tf.convert_to_tensor(self.concentration1)\n concentration0 = tf.convert_to_tensor(self.concentration0)\n mode = (concentration1 - 1.) / (concentration1 + concentration0 - 2.)\n with tf.control_dependencies([] if self.allow_nan_stats else [ # pylint: disable=g-long-ternary\n assert_util.assert_less(\n tf.ones([], dtype=self.dtype),\n concentration1,\n message=\"Mode undefined for concentration1 <= 1.\"),\n assert_util.assert_less(\n tf.ones([], dtype=self.dtype),\n concentration0,\n message=\"Mode undefined for concentration0 <= 1.\")\n ]):\n return tf.where(\n (concentration1 > 1.) & (concentration0 > 1.),\n mode,\n dtype_util.as_numpy_dtype(self.dtype)(np.nan))\n\n def _maybe_assert_valid_sample(self, x):\n \"\"\"Checks the validity of a sample.\"\"\"\n if not self.validate_args:\n return []\n return [\n assert_util.assert_positive(x, message=\"Sample must be positive.\"),\n assert_util.assert_less(\n x, tf.ones([], x.dtype), message=\"Sample must be less than `1`.\")\n ]\n\n def _parameter_control_dependencies(self, is_init):\n if not self.validate_args:\n return []\n assertions = []\n for concentration in [self.concentration0, self.concentration1]:\n if is_init != tensor_util.is_ref(concentration):\n assertions.append(assert_util.assert_positive(\n concentration,\n message=\"Concentration parameter must be positive.\"))\n return assertions\n\n\n@kullback_leibler.RegisterKL(Beta, Beta)\ndef _kl_beta_beta(d1, d2, name=None):\n \"\"\"Calculate the batchwise KL divergence KL(d1 || d2) with d1 and d2 Beta.\n\n Args:\n d1: instance of a Beta distribution object.\n d2: instance of a Beta distribution object.\n name: (optional) Name to use for created operations.\n default is \"kl_beta_beta\".\n\n Returns:\n Batchwise KL(d1 || d2)\n \"\"\"\n with tf.name_scope(name or \"kl_beta_beta\"):\n d1_concentration1 = tf.convert_to_tensor(d1.concentration1)\n d1_concentration0 = tf.convert_to_tensor(d1.concentration0)\n d2_concentration1 = tf.convert_to_tensor(d2.concentration1)\n d2_concentration0 = tf.convert_to_tensor(d2.concentration0)\n d1_total_concentration = d1_concentration1 + d1_concentration0\n d2_total_concentration = d2_concentration1 + d2_concentration0\n\n d1_log_normalization = d1._log_normalization( # pylint: disable=protected-access\n d1_concentration1, d1_concentration0)\n d2_log_normalization = d2._log_normalization( # pylint: disable=protected-access\n d2_concentration1, d2_concentration0)\n return ((d2_log_normalization - d1_log_normalization) -\n (tf.math.digamma(d1_concentration1) *\n (d2_concentration1 - d1_concentration1)) -\n (tf.math.digamma(d1_concentration0) *\n (d2_concentration0 - d1_concentration0)) +\n (tf.math.digamma(d1_total_concentration) *\n (d2_total_concentration - d1_total_concentration)))\n" ]
[ [ "tensorflow.python.util.deprecation.deprecated", "tensorflow.compat.v2.convert_to_tensor", "tensorflow.compat.v2.math.betainc", "tensorflow.compat.v2.math.log1p", "tensorflow.compat.v2.broadcast_to", "tensorflow.compat.v2.math.xlogy", "tensorflow.compat.v2.TensorShape", "tensorflow.compat.v2.ones", "tensorflow.compat.v2.name_scope", "tensorflow.compat.v2.broadcast_static_shape", "tensorflow.compat.v2.math.digamma", "tensorflow.compat.v2.constant", "tensorflow.compat.v2.math.lgamma" ] ]
whiskie14142/spktype21
[ "7ed22365fe92cdb74c416d27634df96a45712953" ]
[ "source/spktype21.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"A supporting module for jplephem to handle data type 21 (Version 0.1.0)\n\nThis module computes position and velocity of a celestial small body, from a \nNASA SPICE SPK ephemeris kernel file of data type 21 (Extended Modified \nDifference Arrays).\nhttp://naif.jpl.nasa.gov/pub/naif/toolkit_docs/FORTRAN/req/spk.html\n\nYou can get SPK files for many solar system small bodies from HORIZONS \nsystem of NASA/JPL. See https://ssd.jpl.nasa.gov/?horizons\n\nThis module reads SPK files of data type 21, one of the types of binary SPK \nfile. \n\nAt the point of Oct. 2018, HORIZONS system provides files of type 21 for \nbinary SPK files by default. You can get type 21 binary SPK file for celestial \nsmall bodies through TELNET interface by answering back 'Binary' for \n'SPK file format'. Also you can get type 21 binary SPK file from:\nhttps://ssd.jpl.nasa.gov/x/spk.html\n\nModules required:\n jplephem (version 2.6 or later)\n numpy\n\nUsage:\n from spktype21 import SPKType21\n kernel = SPKType21.open('path')\n position, velocity = kernel.compute_type21(center, target, jd)\n \n where:\n path - path to the SPK file\n center - SPKID of central body (0 for SSB, 10 for Sun, etc.)\n target - SPKID of target body\n jd - time for computation (Julian date)\n\nExceptions:\n RuntimeError will be raised when:\n invalid data_type of SPK file, or\n SPK file contains too large table in EMDA record(s)\n ValueError will be raised when:\n invalid parameter(s) of compute_type21 function\n\nAuthor: Shushi Uetsuki (whiskie14142)\nThis module has been developed based on jplephem and FORTRAN source \nof the SPICE Toolkit of NASA/JPL/NAIF.\njplephem : https://pypi.org/project/jplephem/\nSPICE Toolkit : http://naif.jpl.nasa.gov/naif/toolkit.html\n\"\"\"\n\nfrom numpy import array, zeros, reshape\nfrom jplephem.daf import DAF\nfrom jplephem.names import target_names\n\nT0 = 2451545.0\nS_PER_DAY = 86400.0\n\n# Included from 'spk21.inc' on the FORTRAN source 'spke21.f'\nMAXTRM = 25\n\ndef jd(seconds):\n \"\"\"Convert a number of seconds since J2000 to a Julian Date.\n \"\"\"\n return T0 + seconds / S_PER_DAY\n\nclass SPKType21(object):\n \"\"\"Class for SPK kernel to handle data type 21 (Extended Modified Difference Arrays)\n \"\"\"\n def __init__(self, daf):\n self.daf = daf\n self.segments = [Segment(self.daf, *t) for t in self.daf.summaries()]\n ssec = lambda s : s.start_second\n self.segments.sort(key=ssec)\n \n # initialize arrays for spke21\n self.G = zeros(MAXTRM)\n \n self.REFPOS = zeros(3)\n self.REFVEL = zeros(3)\n \n self.KQ = array([0, 0, 0])\n self.FC = zeros(MAXTRM)\n self.FC[0] = 1.0\n self.WC = zeros(MAXTRM - 1)\n self.W = zeros(MAXTRM + 2)\n \n # initialize for compute_type21\n self.mda_record_exist = False\n self.current_segment_exist = False\n \n @classmethod\n def open(cls, path):\n \"\"\"Open the file at `path` and return an SPK instance.\n \"\"\"\n return cls(DAF(open(path, 'rb')))\n\n def close(self):\n \"\"\"Close this SPK file.\"\"\"\n self.daf.file.close()\n\n def __str__(self):\n daf = self.daf\n d = lambda b: b.decode('latin-1')\n lines = (str(segment) for segment in self.segments)\n return 'File type {0} and format {1} with {2} segments:\\n{3}'.format(\n d(daf.locidw), d(daf.locfmt), len(self.segments), '\\n'.join(lines))\n \n def comments(self):\n return self.daf.comments()\n\n def compute_type21(self, center, target, jd1, jd2=0.0):\n \"\"\"Compute position and velocity of target from SPK data (data type 21).\n Inputs:\n center - SPKID of the coordinate center (0 for Solar System Barycenter, \n 10 for Sun, etc)\n target - SPKID of the target\n jd1, jd2 - Julian date of epoch for computation. (jd1 + jd2) will \n be used for computation. If you want precise definition of \n epoch, jd1 should be an integer or a half integer, and jd2 should\n be a relatively small floating point number.\n Returns:\n Position (X, Y, Z) and velocity (XD, YD, ZD) of the target at \n epoch. Position and velocity are provided as Numpy arrays \n respectively.\n \"\"\"\n eval_sec = (jd1 - T0)\n eval_sec = (eval_sec + jd2) * S_PER_DAY\n \n if self.mda_record_exist:\n if eval_sec >= self.mda_lb and eval_sec < self.mda_ub:\n result = self.spke21(eval_sec, self.mda_record)\n return result[0:3], result[3:]\n \n self.mda_record, self.mda_lb, self.mda_ub = self.get_MDA_record(eval_sec, target, center)\n self.mda_record_exists = True\n \n result = self.spke21(eval_sec, self.mda_record)\n return result[0:3], result[3:]\n \n def get_MDA_record(self, eval_sec, target, center):\n \"\"\"Return a EMDA record for defined epoch.\n Inputs:\n eval_sec - epoch for computation, seconds from J2000\n target - body ID of the target\n center - body ID of coordinate center\n Returns:\n EMDA record - a Numpy array of DLSIZE floating point numbers\n Exception:\n ValueError will be raised when:\n eval_sed is outside of SPK data\n target and center are not in SPK data\n RuntimeError will be raised when:\n invalid data type of SPK data\n \"\"\"\n \n # chech last segment can be used\n if self.current_segment_exist:\n if eval_sec >= self.current_segment.start_second \\\n and eval_sec < self.current_segment.end_second \\\n and target == self.current_segment.target \\\n and center == self.current_segment.center:\n \n return self.current_segment.get_MDA_record(eval_sec)\n\n # select segments with matched 'target' and 'center'\n matched = []\n for segment in self.segments:\n if segment.target == target and segment.center == center:\n matched.append(segment)\n if len(matched) == 0:\n raise ValueError('Invalid Target and/or Center')\n if eval_sec < matched[0].start_second or eval_sec >= matched[-1].end_second:\n raise ValueError('Invalid Time to evaluate')\n \n # selet a segment based on eval_sec\n found = False\n for segment in matched:\n if eval_sec < segment.end_second:\n found = True\n self.current_segment = segment\n break\n if not found:\n self.current_segment = matched[-1]\n self.current_segment_exist = True\n \n # get the MDA record from selected segment\n if self.current_segment.data_type != 21:\n raise RuntimeError('Invalid data. Data Type must be 21')\n \n return self.current_segment.get_MDA_record(eval_sec)\n \n\n\n# left this module only 2018/10/12\n\n def spke21(self, ET, RECORD):\n \"\"\"Compute position and velocity from a Modified Difference Array record\n \n Inputs:\n ET: Epoch time to evaluate position and velocity (seconds since J2000)\n RECORD: A record of Extended Modified Difference Array\n Returns: STATE\n STATE: A numpy array which contains position and velocity\n \"\"\"\n \n# This method was translated from FORTRAN source code ‘spke21.f’ of SPICE \n# Toolkit and modified by Shushi Uetsuki.\n# \n# SPICE Toolkit for FORTRAN : http://naif.jpl.nasa.gov/naif/toolkit_FORTRAN.html\n# SPK Required Reading : http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/req/spk.html\n# \n# Unfortunately, I found some discrepancies between FORTRAN source code \n# and actual data contained in SPK files. So, I tried to compose a \n# method that compute positions and velocities correctly by referencing \n# code of spktype01.\n\n# Following comments start with #C were copied from original FORTRAN code.\n\n#C$ Abstract\n#C\n#C Evaluate a single SPK data record from a segment of type 21\n#C (Extended Difference Lines).\n#C\n#C$ Disclaimer\n#C\n#C THIS SOFTWARE AND ANY RELATED MATERIALS WERE CREATED BY THE\n#C CALIFORNIA INSTITUTE OF TECHNOLOGY (CALTECH) UNDER A U.S.\n#C GOVERNMENT CONTRACT WITH THE NATIONAL AERONAUTICS AND SPACE\n#C PUBLICLY AVAILABLE UNDER U.S. EXPORT LAWS AND IS PROVIDED \"AS-IS\"\n#C TO THE RECIPIENT WITHOUT WARRANTY OF ANY KIND, INCLUDING ANY\n#C WARRANTIES OF PERFORMANCE OR MERCHANTABILITY OR FITNESS FOR A\n#C PARTICULAR USE OR PURPOSE (AS SET FORTH IN UNITED STATES UCC\n#C SECTIONS 2312-2313) OR FOR ANY PURPOSE WHATSOEVER, FOR THE\n#C SOFTWARE AND RELATED MATERIALS, HOWEVER USED.\n#C\n#C IN NO EVENT SHALL CALTECH, ITS JET PROPULSION LABORATORY, OR NASA\n#C BE LIABLE FOR ANY DAMAGES AND/OR COSTS, INCLUDING, BUT NOT\n#C LIMITED TO, INCIDENTAL OR CONSEQUENTIAL DAMAGES OF ANY KIND,\n#C INCLUDING ECONOMIC DAMAGE OR INJURY TO PROPERTY AND LOST PROFITS,\n#C REGARDLESS OF WHETHER CALTECH, JPL, OR NASA BE ADVISED, HAVE\n#C REASON TO KNOW, OR, IN FACT, SHALL KNOW OF THE POSSIBILITY.\n#C\n#C RECIPIENT BEARS ALL RISK RELATING TO QUALITY AND PERFORMANCE OF\n#C THE SOFTWARE AND ANY RELATED MATERIALS, AND AGREES TO INDEMNIFY\n#C CALTECH AND NASA FOR ALL THIRD-PARTY CLAIMS RESULTING FROM THE\n#C ACTIONS OF RECIPIENT IN THE USE OF THE SOFTWARE.\n#C\n#C$ Required_Reading\n#C\n#C SPK\n#C TIME\n#C\n#C$ Keywords\n#C\n#C EPHEMERIS\n#C\n#C$ Declarations\n\n STATE = zeros(6)\n\n#C$ Brief_I/O\n#C\n#C Variable I/O Description\n#C -------- --- --------------------------------------------------\n#C ET I Evaluation epoch.\n#C RECORD I Data record.\n#C STATE O State (position and velocity).\n#C MAXTRM P Maximum number of terms per difference table\n#C component.\n#C\n#C$ Detailed_Input\n#C\n#C ET is an epoch at which a state vector is to be\n#C computed. The epoch is represented as seconds past\n#C J2000 TDB.\n#C\n#C RECORD is a data record which, when evaluated at epoch ET,\n#C will give the state (position and velocity) of an\n#C ephemeris object, relative to its center of motion,\n#C in an inertial reference frame.\n#C\n#C The contents of RECORD are as follows:\n#C\n#C RECORD(1): The difference table size per\n#C Cartesian component. Call this\n#C size MAXDIM; then the difference\n#C line (MDA) size DLSIZE is\n#C\n#C ( 4 * MAXDIM ) + 11\n#C \n#C RECORD(2)\n#C ...\n#C RECORD(1+DLSIZE): An extended difference line.\n#C The contents are:\n#C\n#C Dimension Description\n#C --------- ----------------------------------\n#C 1 Reference epoch of difference line\n#C MAXDIM Stepsize function vector\n#C 1 Reference position vector, x\n#C 1 Reference velocity vector, x\n#C 1 Reference position vector, y\n#C 1 Reference velocity vector, y\n#C 1 Reference position vector, z\n#C 1 Reference velocity vector, z\n#C MAXDIM,3 Modified divided difference\n#C arrays (MDAs)\n#C 1 Maximum integration order plus 1\n#C 3 Integration order array\n#C\n#C$ Detailed_Output\n#C\n#C STATE is the state resulting from evaluation of the input\n#C record at ET. Units are km and km/sec.\n#C\n#C$ Parameters\n#C\n#C MAXTRM is the maximum number of terms allowed in\n#C each component of the difference table \n#C contained in the input argument RECORD.\n#C See the INCLUDE file spk21.inc for the value\n#C of MAXTRM.\n#C \n#C$ Exceptions\n#C\n#C 1) If the maximum table size of the input record exceeds \n#C MAXTRM, the error SPICE(DIFFLINETOOLARGE) is signaled.\n#C\n#C$ Files\n#C\n#C None.\n#C\n#C$ Particulars\n#C\n#C The exact format and structure of type 21 (difference lines)\n#C segments are described in the SPK Required Reading file.\n#C\n#C SPKE21 is a modified version of SPKE01. The routine has been\n#C generalized to support variable size difference lines.\n#C\n#C$ Examples\n#C\n#C None.\n#C\n#C$ Restrictions\n#C\n#C Unknown.\n#C\n#C$ Literature_References\n#C\n#C NAIF Document 168.0, \"S- and P- Kernel (SPK) Specification and\n#C User's Guide\"\n#C\n#C$ Author_and_Institution\n#C\n#C N.J. Bachman (JPL)\n#C F.T. Krogh (JPL)\n#C W.L. Taber (JPL)\n#C I.M. Underwood (JPL)\n#C\n#C$ Version\n#C\n#C- SPICELIB Version 1.0.0, 03-FEB-2014 (NJB) (FTK) (WLT) (IMU)\n#C\n#C-&\n# \n#C$ Index_Entries\n#C\n#C evaluate type_21 spk segment\n#C\n#C-&\n\n#C\n#C The first element of the input record is the dimension\n#C of the difference table MAXDIM. \n#C\n\n# The FORTRAN source code indicates that RECORD[0] contains MAXDIM, but actual \n# data record does not contain it. MAXDIM is contained in each segment.\n\n MAXDIM = self.current_segment.MAXDIM\n\n \n if MAXDIM > MAXTRM:\n mes = ('SPKE21 \\nThe input record has a maximum table dimension ' +\n 'of {0}, while the maximum supported by this routine is {1}. ' +\n 'It is possible that this problem is due to your software ' +\n 'beeing out of date.').format(MAXDIM, MAXTRM)\n raise RuntimeError(mes)\n return STATE\n \n#C\n#C Unpack the contents of the MDA array.\n#C\n#C Name Dimension Description\n#C ------ --------- -------------------------------\n#C TL 1 Reference epoch of record\n#C G MAXDIM Stepsize function vector\n#C REFPOS 3 Reference position vector\n#C REFVEL 3 Reference velocity vector\n#C DT MAXDIM,NTE Modified divided difference arrays\n#C KQMAX1 1 Maximum integration order plus 1\n#C KQ NTE Integration order array\n#C\n#C For our purposes, NTE is always 3.\n#C\n\n# The FORTRAN source code indicates that RECORD[1] contains TL, but on the \n# actual data RECORD[0] contains it, and all addresses for following data are \n# shifted forward by one.\n\n self.TL = RECORD[0]\n self.G = RECORD[1:MAXDIM + 1]\n\n#C \n#C Collect the reference position and velocity.\n#C \n self.REFPOS[0] = RECORD[MAXDIM + 1]\n self.REFVEL[0] = RECORD[MAXDIM + 2]\n \n self.REFPOS[1] = RECORD[MAXDIM + 3]\n self.REFVEL[1] = RECORD[MAXDIM + 4]\n \n self.REFPOS[2] = RECORD[MAXDIM + 5]\n self.REFVEL[2] = RECORD[MAXDIM + 6]\n \n#C\n#C Initializing the difference table is one aspect of this routine\n#C that's a bit different from SPKE01. Here the first dimension of\n#C the table in the input record can be smaller than MAXTRM. So, we\n#C must transfer separately the portions of the table corresponding\n#C to each component.\n#C\n self.DT = reshape(RECORD[MAXDIM + 7:MAXDIM * 4 + 7], (MAXDIM, 3), \n order='F')\n \n self.KQMAX1 = int(RECORD[4 * MAXDIM + 7])\n self.KQ[0] = int(RECORD[4 * MAXDIM + 8])\n self.KQ[1] = int(RECORD[4 * MAXDIM + 9])\n self.KQ[2] = int(RECORD[4 * MAXDIM + 10])\n#C \n#C Next we set up for the computation of the various differences\n#C \n self.DELTA = ET - self.TL\n self.TP = self.DELTA\n self.MQ2 = self.KQMAX1 - 2\n self.KS = self.KQMAX1 - 1\n\n#C\n#C This is clearly collecting some kind of coefficients. \n#C The problem is that we have no idea what they are...\n#C \n#C The G coefficients are supposed to be some kind of step size \n#C vector. \n#C \n#C TP starts out as the delta t between the request time and the\n#C difference line's reference epoch. We then change it from DELTA\n#C by the components of the stepsize vector G.\n#C\n for J in range(1, self.MQ2 + 1):\n#C\n#C Make sure we're not about to attempt division by zero.\n#C\n if self.G[J-1] == 0.0:\n mes = ('SPKE21\\nA value of zero was found at index {0} ' + \n 'of the step size vector.').format(J)\n raise RuntimeError(mes)\n return STATE\n \n self.FC[J] = self.TP / self.G[J-1]\n self.WC[J-1] = self.DELTA / self.G[J-1]\n self.TP = self.DELTA + self.G[J-1]\n\n#C\n#C Collect KQMAX1 reciprocals. \n#C \n for J in range(1, self.KQMAX1 + 1):\n self.W[J-1] = 1.0 / float(J)\n\n#C\n#C Compute the W(K) terms needed for the position interpolation\n#C (Note, it is assumed throughout this routine that KS, which \n#C starts out as KQMAX1-1 (the ``maximum integration'') \n#C is at least 2.\n#C\n self.JX = 0\n self.KS1 = self.KS - 1\n \n while self.KS >= 2:\n \n self.JX = self.JX + 1\n \n for J in range(1, self.JX + 1):\n self.W[J+self.KS-1] = self.FC[J] * self.W[J+self.KS1-1] - self.WC[J-1] * self.W[J+self.KS-1]\n \n self.KS = self.KS1\n self.KS1 = self.KS1 - 1\n\n#C\n#C Perform position interpolation: (Note that KS = 1 right now.\n#C We don't know much more than that.)\n#C\n for I in range(1, 3 + 1):\n \n self.KQQ = self.KQ[I-1]\n self.SUM = 0.0\n \n for J in range(self.KQQ, 0, -1):\n self.SUM = self.SUM + self.DT[J-1, I-1] * self.W[J+self.KS-1]\n \n STATE[I-1] = self.REFPOS[I-1] + self.DELTA * (self.REFVEL[I-1] + self.DELTA * self.SUM)\n\n#C\n#C Again we need to compute the W(K) coefficients that are \n#C going to be used in the velocity interpolation. \n#C (Note, at this point, KS = 1, KS1 = 0.)\n#C \n for J in range(1, self.JX + 1):\n self.W[J+self.KS-1] = self.FC[J] * self.W[J+self.KS1-1] - self.WC[J-1] * self.W[J+self.KS-1]\n \n self.KS = self.KS - 1\n \n#C\n#C Perform velocity interpolation:\n#C\n for I in range(1, 3 + 1):\n self.KQQ = self.KQ[I-1]\n self.SUM = 0.0\n \n for J in range(self.KQQ, 0, -1):\n self.SUM = self.SUM + self.DT[J-1, I-1] * self.W[J+self.KS-1]\n \n STATE[I+3-1] = self.REFVEL[I-1] + self.DELTA * self.SUM\n \n return STATE\n \n \n\nclass Segment(object):\n \"\"\"A single segment of a SPK file.\n\n There are several items of information about each segment that are\n loaded from the underlying SPK file, and made available as object\n attributes:\n\n segment.source - official ephemeris name, like 'DE-0430LE-0430'\n segment.start_second - initial epoch, as seconds from J2000\n segment.end_second - final epoch, as seconds from J2000\n segment.start_jd - start_second, converted to a Julian Date\n segment.end_jd - end_second, converted to a Julian Date\n segment.center - integer center identifier\n segment.target - integer target identifier\n segment.frame - integer frame identifier\n segment.data_type - integer data type identifier\n segment.start_i - index where segment starts\n segment.end_i - index where segment ends\n \"\"\"\n def __init__(self, daf, source, descriptor):\n self.daf = daf\n self.source = source\n (self.start_second, self.end_second, self.target, self.center,\n self.frame, self.data_type, self.start_i, self.end_i) = descriptor\n self.start_jd = jd(self.start_second)\n self.end_jd = jd(self.end_second)\n \n# 'SPK Required Reading' indicates that the penultimate element of the segment \n# is the difference line size (DLSIZE), but actual data contains there a MAXDIM.\n \n self.MAXDIM = int(self.daf.map_array(self.end_i - 1, self.end_i - 1))\n self.DLSIZE = 4 * self.MAXDIM + 11\n\n def __str__(self):\n return self.describe(verbose=False)\n\n def describe(self, verbose=True):\n \"\"\"Return a textual description of the segment.\n \"\"\"\n center = titlecase(target_names.get(self.center, 'Unknown center'))\n target = titlecase(target_names.get(self.target, 'Unknown target'))\n text = ('{0.start_jd:.2f}..{0.end_jd:.2f} {1} ({0.center})'\n ' -> {2} ({0.target})'\n ' data_type={0.data_type}'.format(self, center, target))\n if verbose:\n text += ('\\n frame={0.frame} data_type={0.data_type} source={1}'\n .format(self, self.source.decode('ascii')))\n return text\n \n def get_MDA_record(self, time_sec):\n \"\"\"Return a Modified Difference Array(MDA) record for the time to \n evaluate with its effective time boundaries (lower and upper).\n Inputs:\n time_sec - epoch for computation, seconds from J2000\n Returns: mda_record, lower_boundary, upper_boundary\n mda_record: A Modified Difference Array record\n lower_boundary: lower boundary of the record, seconds since J2000\n upper_boundary: upper boundary of the record, seconds since J2000\n \"\"\"\n\n # Number of records in this segment\n entry_count = int(self.daf.map_array(self.end_i, self.end_i))\n \n # Number of entries in epoch directory \n epoch_dir_count = entry_count // 100\n \n # serch target epoch in epoch directory to narrow serching aria\n if epoch_dir_count >= 1:\n epoch_dir = self.daf.map_array(self.end_i - epoch_dir_count - 1,\n self.end_i - 2)\n found = False\n for i in range(1, epoch_dir_count + 1):\n if epoch_dir[i-1] > time_sec:\n found = True\n break\n if found:\n serch_last_index = i * 100\n serch_start_index = (i - 1) * 100 + 1\n else:\n serch_last_index = entry_count\n serch_start_index = epoch_dir_count * 100 + 1\n else:\n serch_last_index = entry_count\n serch_start_index = 1\n\n # epoch_table contains epochs for all records in this segment \n epoch_table = self.daf.map_array(self.start_i + (entry_count * self.DLSIZE),\n self.start_i + (entry_count * self.DLSIZE) + entry_count - 1)\n\n # serch target epoch in epoch_table\n found = False\n for i in range(serch_start_index, serch_last_index + 1):\n if epoch_table[i-1] > time_sec:\n found = True\n break\n if not found:\n i = serch_last_index\n record_index = i\n upper_boundary = epoch_table[i-1]\n if i != 1:\n lower_boundary = epoch_table[i-2]\n else:\n lower_boundary = self.start_second\n \n mda_record = self.daf.map_array(self.start_i + ((record_index - 1) * self.DLSIZE),\n self.start_i + (record_index * self.DLSIZE) - 1)\n\n # mda_record : one record of MDA\n # lower_boundary : lower boundary of epoch in this MDA record\n # upper_boundary : upper boundary of epoch in this MDA record\n return mda_record, lower_boundary, upper_boundary\n\ndef titlecase(name):\n \"\"\"Title-case target `name` if it looks safe to do so.\n \"\"\"\n return name if name.startswith(('1', 'C/', 'DSS-')) else name.title()\n\n\n\n\n\n\n\n" ]
[ [ "numpy.array", "numpy.reshape", "numpy.zeros" ] ]
nvogtvincent/parcels
[ "6f6dbadacaae54949ade9acd4e4a57dd8b5af398" ]
[ "parcels/kernel/basekernel.py" ]
[ "import re\nimport _ctypes\nimport inspect\nimport numpy.ctypeslib as npct\nfrom time import time as ostime\nfrom os import path\nfrom os import remove\nfrom sys import platform\nfrom sys import version_info\nfrom ast import FunctionDef\nfrom hashlib import md5\nfrom parcels.tools.loggers import logger\nimport numpy as np\nfrom numpy import ndarray\n\ntry:\n from mpi4py import MPI\nexcept:\n MPI = None\n\nfrom parcels.tools.global_statics import get_cache_dir\n\n# === import just necessary field classes to perform setup checks === #\nfrom parcels.field import Field\nfrom parcels.field import VectorField\nfrom parcels.field import NestedField\nfrom parcels.field import SummedField\nfrom parcels.grid import GridCode\nfrom parcels.field import FieldOutOfBoundError\nfrom parcels.field import FieldOutOfBoundSurfaceError\nfrom parcels.field import TimeExtrapolationError\nfrom parcels.tools.statuscodes import StateCode, OperationCode, ErrorCode\nfrom parcels.application_kernels.advection import AdvectionRK4_3D\nfrom parcels.application_kernels.advection import AdvectionAnalytical\n\n__all__ = ['BaseKernel']\n\n\nre_indent = re.compile(r\"^(\\s+)\")\n\n\nclass BaseKernel(object):\n \"\"\"Base super class for base Kernel objects that encapsulates auto-generated code.\n\n :arg fieldset: FieldSet object providing the field information (possibly None)\n :arg ptype: PType object for the kernel particle\n :arg pyfunc: (aggregated) Kernel function\n :arg funcname: function name\n :param delete_cfiles: Boolean whether to delete the C-files after compilation in JIT mode (default is True)\n\n Note: A Kernel is either created from a compiled <function ...> object\n or the necessary information (funcname, funccode, funcvars) is provided.\n The py_ast argument may be derived from the code string, but for\n concatenation, the merged AST plus the new header definition is required.\n \"\"\"\n _pyfunc = None\n _fieldset = None\n _ptype = None\n funcname = None\n\n def __init__(self, fieldset, ptype, pyfunc=None, funcname=None, funccode=None, py_ast=None, funcvars=None,\n c_include=\"\", delete_cfiles=True):\n self._fieldset = fieldset\n self.field_args = None\n self.const_args = None\n self._ptype = ptype\n self._lib = None\n self.delete_cfiles = delete_cfiles\n self._cleanup_files = None\n self._cleanup_lib = None\n self._c_include = c_include\n\n # Derive meta information from pyfunc, if not given\n self._pyfunc = None\n self.funcname = funcname or pyfunc.__name__\n self.name = \"%s%s\" % (ptype.name, self.funcname)\n self.ccode = \"\"\n self.funcvars = funcvars\n self.funccode = funccode\n self.py_ast = py_ast\n self.dyn_srcs = []\n self.static_srcs = []\n self.src_file = None\n self.lib_file = None\n self.log_file = None\n\n # Generate the kernel function and add the outer loop\n if self._ptype.uses_jit:\n src_file_or_files, self.lib_file, self.log_file = self.get_kernel_compile_files()\n if type(src_file_or_files) in (list, dict, tuple, ndarray):\n self.dyn_srcs = src_file_or_files\n else:\n self.src_file = src_file_or_files\n\n def __del__(self):\n # Clean-up the in-memory dynamic linked libraries.\n # This is not really necessary, as these programs are not that large, but with the new random\n # naming scheme which is required on Windows OS'es to deal with updates to a Parcels' kernel.\n try:\n self.remove_lib()\n except:\n pass\n self._fieldset = None\n self.field_args = None\n self.const_args = None\n self.funcvars = None\n self.funccode = None\n\n @property\n def ptype(self):\n return self._ptype\n\n @property\n def pyfunc(self):\n return self._pyfunc\n\n @property\n def fieldset(self):\n return self._fieldset\n\n @property\n def c_include(self):\n return self._c_include\n\n @property\n def _cache_key(self):\n field_keys = \"\"\n if self.field_args is not None:\n field_keys = \"-\".join(\n [\"%s:%s\" % (name, field.units.__class__.__name__) for name, field in self.field_args.items()])\n key = self.name + self.ptype._cache_key + field_keys + ('TIME:%f' % ostime())\n return md5(key.encode('utf-8')).hexdigest()\n\n @staticmethod\n def fix_indentation(string):\n \"\"\"Fix indentation to allow in-lined kernel definitions\"\"\"\n lines = string.split('\\n')\n indent = re_indent.match(lines[0])\n if indent:\n lines = [line.replace(indent.groups()[0], '', 1) for line in lines]\n return \"\\n\".join(lines)\n\n def check_fieldsets_in_kernels(self, pyfunc):\n \"\"\"\n function checks the integrity of the fieldset with the kernels.\n This function is to be called from the derived class when setting up the 'pyfunc'.\n \"\"\"\n if self.fieldset is not None:\n if pyfunc is AdvectionRK4_3D:\n warning = False\n if isinstance(self._fieldset.W, Field) and self._fieldset.W.creation_log != 'from_nemo' and \\\n self._fieldset.W._scaling_factor is not None and self._fieldset.W._scaling_factor > 0:\n warning = True\n if type(self._fieldset.W) in [SummedField, NestedField]:\n for f in self._fieldset.W:\n if f.creation_log != 'from_nemo' and f._scaling_factor is not None and f._scaling_factor > 0:\n warning = True\n if warning:\n logger.warning_once('Note that in AdvectionRK4_3D, vertical velocity is assumed positive towards increasing z.\\n'\n ' If z increases downward and w is positive upward you can re-orient it downwards by setting fieldset.W.set_scaling_factor(-1.)')\n elif pyfunc is AdvectionAnalytical:\n if self._ptype.uses_jit:\n raise NotImplementedError('Analytical Advection only works in Scipy mode')\n if self._fieldset.U.interp_method != 'cgrid_velocity':\n raise NotImplementedError('Analytical Advection only works with C-grids')\n if self._fieldset.U.grid.gtype not in [GridCode.CurvilinearZGrid, GridCode.RectilinearZGrid]:\n raise NotImplementedError('Analytical Advection only works with Z-grids in the vertical')\n\n def check_kernel_signature_on_version(self):\n \"\"\"\n returns numkernelargs\n \"\"\"\n numkernelargs = 0\n if self._pyfunc is not None:\n if version_info[0] < 3:\n numkernelargs = len(inspect.getargspec(self._pyfunc).args)\n else:\n numkernelargs = len(inspect.getfullargspec(self._pyfunc).args)\n return numkernelargs\n\n def remove_lib(self):\n if self._lib is not None:\n BaseKernel.cleanup_unload_lib(self._lib)\n del self._lib\n self._lib = None\n\n all_files_array = []\n if self.src_file is None:\n if self.dyn_srcs is not None:\n [all_files_array.append(fpath) for fpath in self.dyn_srcs]\n else:\n if self.src_file is not None:\n all_files_array.append(self.src_file)\n if self.log_file is not None:\n all_files_array.append(self.log_file)\n if self.lib_file is not None and all_files_array is not None and self.delete_cfiles is not None:\n BaseKernel.cleanup_remove_files(self.lib_file, all_files_array, self.delete_cfiles)\n\n # If file already exists, pull new names. This is necessary on a Windows machine, because\n # Python's ctype does not deal in any sort of manner well with dynamic linked libraries on this OS.\n if self._ptype.uses_jit:\n src_file_or_files, self.lib_file, self.log_file = self.get_kernel_compile_files()\n if type(src_file_or_files) in (list, dict, tuple, ndarray):\n self.dyn_srcs = src_file_or_files\n else:\n self.src_file = src_file_or_files\n\n def get_kernel_compile_files(self):\n \"\"\"\n Returns the correct src_file, lib_file, log_file for this kernel\n \"\"\"\n if MPI:\n mpi_comm = MPI.COMM_WORLD\n mpi_rank = mpi_comm.Get_rank()\n cache_name = self._cache_key # only required here because loading is done by Kernel class instead of Compiler class\n dyn_dir = get_cache_dir() if mpi_rank == 0 else None\n dyn_dir = mpi_comm.bcast(dyn_dir, root=0)\n basename = cache_name if mpi_rank == 0 else None\n basename = mpi_comm.bcast(basename, root=0)\n basename = basename + \"_%d\" % mpi_rank\n else:\n cache_name = self._cache_key # only required here because loading is done by Kernel class instead of Compiler class\n dyn_dir = get_cache_dir()\n basename = \"%s_0\" % cache_name\n lib_path = \"lib\" + basename\n src_file_or_files = None\n if type(basename) in (list, dict, tuple, ndarray):\n src_file_or_files = [\"\", ] * len(basename)\n for i, src_file in enumerate(basename):\n src_file_or_files[i] = \"%s.c\" % path.join(dyn_dir, src_file)\n else:\n src_file_or_files = \"%s.c\" % path.join(dyn_dir, basename)\n lib_file = \"%s.%s\" % (path.join(dyn_dir, lib_path), 'dll' if platform == 'win32' else 'so')\n log_file = \"%s.log\" % path.join(dyn_dir, basename)\n return src_file_or_files, lib_file, log_file\n\n def compile(self, compiler):\n \"\"\" Writes kernel code to file and compiles it.\"\"\"\n all_files_array = []\n if self.src_file is None:\n if self.dyn_srcs is not None:\n for dyn_src in self.dyn_srcs:\n with open(dyn_src, 'w') as f:\n f.write(self.ccode)\n all_files_array.append(dyn_src)\n compiler.compile(self.dyn_srcs, self.lib_file, self.log_file)\n else:\n if self.src_file is not None:\n with open(self.src_file, 'w') as f:\n f.write(self.ccode)\n if self.src_file is not None:\n all_files_array.append(self.src_file)\n compiler.compile(self.src_file, self.lib_file, self.log_file)\n if len(all_files_array) > 0:\n logger.info(\"Compiled %s ==> %s\" % (self.name, self.lib_file))\n if self.log_file is not None:\n all_files_array.append(self.log_file)\n\n def load_lib(self):\n self._lib = npct.load_library(self.lib_file, '.')\n self._function = self._lib.particle_loop\n\n def merge(self, kernel, kclass):\n funcname = self.funcname + kernel.funcname\n func_ast = None\n if self.py_ast is not None:\n func_ast = FunctionDef(name=funcname, args=self.py_ast.args, body=self.py_ast.body + kernel.py_ast.body,\n decorator_list=[], lineno=1, col_offset=0)\n delete_cfiles = self.delete_cfiles and kernel.delete_cfiles\n return kclass(self.fieldset, self.ptype, pyfunc=None,\n funcname=funcname, funccode=self.funccode + kernel.funccode,\n py_ast=func_ast, funcvars=self.funcvars + kernel.funcvars,\n c_include=self._c_include + kernel.c_include,\n delete_cfiles=delete_cfiles)\n\n def __add__(self, kernel):\n if not isinstance(kernel, BaseKernel):\n kernel = BaseKernel(self.fieldset, self.ptype, pyfunc=kernel)\n return self.merge(kernel, BaseKernel)\n\n def __radd__(self, kernel):\n if not isinstance(kernel, BaseKernel):\n kernel = BaseKernel(self.fieldset, self.ptype, pyfunc=kernel)\n return kernel.merge(self, BaseKernel)\n\n @staticmethod\n def cleanup_remove_files(lib_file, all_files_array, delete_cfiles):\n if lib_file is not None:\n if path.isfile(lib_file): # and delete_cfiles\n [remove(s) for s in [lib_file, ] if path is not None and path.exists(s)]\n if delete_cfiles and len(all_files_array) > 0:\n [remove(s) for s in all_files_array if path is not None and path.exists(s)]\n\n @staticmethod\n def cleanup_unload_lib(lib):\n # Clean-up the in-memory dynamic linked libraries.\n # This is not really necessary, as these programs are not that large, but with the new random\n # naming scheme which is required on Windows OS'es to deal with updates to a Parcels' kernel.\n if lib is not None:\n try:\n _ctypes.FreeLibrary(lib._handle) if platform == 'win32' else _ctypes.dlclose(lib._handle)\n except:\n pass\n\n def remove_deleted(self, pset, output_file, endtime):\n \"\"\"\n Utility to remove all particles that signalled deletion.\n\n This version is generally applicable to all structures and collections\n \"\"\"\n indices = [i for i, p in enumerate(pset) if p.state == OperationCode.Delete]\n if len(indices) > 0 and output_file is not None:\n output_file.write(pset, endtime, deleted_only=indices)\n pset.remove_indices(indices)\n\n def load_fieldset_jit(self, pset):\n \"\"\"\n Updates the loaded fields of pset's fieldset according to the chunk information within their grids\n \"\"\"\n if pset.fieldset is not None:\n for g in pset.fieldset.gridset.grids:\n g.cstruct = None # This force to point newly the grids from Python to C\n # Make a copy of the transposed array to enforce\n # C-contiguous memory layout for JIT mode.\n for f in pset.fieldset.get_fields():\n if type(f) in [VectorField, NestedField, SummedField]:\n continue\n if f.data.dtype != np.float32:\n raise RuntimeError('Field %s data needs to be float32 in JIT mode' % f.name)\n if f in self.field_args.values():\n f.chunk_data()\n else:\n for block_id in range(len(f.data_chunks)):\n f.data_chunks[block_id] = None\n f.c_data_chunks[block_id] = None\n\n for g in pset.fieldset.gridset.grids:\n g.load_chunk = np.where(g.load_chunk == g.chunk_loading_requested,\n g.chunk_loaded_touched, g.load_chunk)\n if len(g.load_chunk) > g.chunk_not_loaded: # not the case if a field in not called in the kernel\n if not g.load_chunk.flags.c_contiguous:\n g.load_chunk = g.load_chunk.copy()\n if not g.depth.flags.c_contiguous:\n g.depth = g.depth.copy()\n if not g.lon.flags.c_contiguous:\n g.lon = g.lon.copy()\n if not g.lat.flags.c_contiguous:\n g.lat = g.lat.copy()\n\n def evaluate_particle(self, p, endtime, sign_dt, dt, analytical=False):\n \"\"\"\n Execute the kernel evaluation of for an individual particle.\n :arg p: object of (sub-)type (ScipyParticle, JITParticle) or (sub-)type of BaseParticleAccessor\n :arg fieldset: fieldset of the containing ParticleSet (e.g. pset.fieldset)\n :arg analytical: flag indicating the analytical advector or an iterative advection\n :arg endtime: endtime of this overall kernel evaluation step\n :arg dt: computational integration timestep\n \"\"\"\n variables = self._ptype.variables\n # back up variables in case of OperationCode.Repeat\n p_var_back = {}\n pdt_prekernels = .0\n # Don't execute particles that aren't started yet\n sign_end_part = np.sign(endtime - p.time)\n # Compute min/max dt for first timestep. Only use endtime-p.time for one timestep\n reset_dt = False\n if abs(endtime - p.time) < abs(p.dt):\n dt_pos = abs(endtime - p.time)\n reset_dt = True\n else:\n dt_pos = abs(p.dt)\n reset_dt = False\n\n # ==== numerically stable; also making sure that continuously-recovered particles do end successfully,\n # as they fulfil the condition here on entering at the final calculation here. ==== #\n if ((sign_end_part != sign_dt) or np.isclose(dt_pos, 0)) and not np.isclose(dt, 0):\n if abs(p.time) >= abs(endtime):\n p.set_state(StateCode.Success)\n return p\n\n while p.state in [StateCode.Evaluate, OperationCode.Repeat] or np.isclose(dt, 0):\n for var in variables:\n p_var_back[var.name] = getattr(p, var.name)\n try:\n pdt_prekernels = sign_dt * dt_pos\n p.dt = pdt_prekernels\n state_prev = p.state\n res = self._pyfunc(p, self._fieldset, p.time)\n if res is None:\n res = StateCode.Success\n\n if res is StateCode.Success and p.state != state_prev:\n res = p.state\n\n if not analytical and res == StateCode.Success and not np.isclose(p.dt, pdt_prekernels):\n res = OperationCode.Repeat\n\n except FieldOutOfBoundError as fse_xy:\n res = ErrorCode.ErrorOutOfBounds\n p.exception = fse_xy\n except FieldOutOfBoundSurfaceError as fse_z:\n res = ErrorCode.ErrorThroughSurface\n p.exception = fse_z\n except TimeExtrapolationError as fse_t:\n res = ErrorCode.ErrorTimeExtrapolation\n p.exception = fse_t\n\n except Exception as e:\n res = ErrorCode.Error\n p.exception = e\n\n # Handle particle time and time loop\n if res in [StateCode.Success, OperationCode.Delete]:\n # Update time and repeat\n p.time += p.dt\n if reset_dt and p.dt == pdt_prekernels:\n p.dt = dt\n p.update_next_dt()\n if analytical:\n p.dt = np.inf\n if abs(endtime - p.time) < abs(p.dt):\n dt_pos = abs(endtime - p.time)\n reset_dt = True\n else:\n dt_pos = abs(p.dt)\n reset_dt = False\n\n sign_end_part = np.sign(endtime - p.time)\n if res != OperationCode.Delete and not np.isclose(dt_pos, 0) and (sign_end_part == sign_dt):\n res = StateCode.Evaluate\n if sign_end_part != sign_dt:\n dt_pos = 0\n\n p.set_state(res)\n if np.isclose(dt, 0):\n break\n else:\n p.set_state(res)\n # Try again without time update\n for var in variables:\n if var.name not in ['dt', 'state']:\n setattr(p, var.name, p_var_back[var.name])\n if abs(endtime - p.time) < abs(p.dt):\n dt_pos = abs(endtime - p.time)\n reset_dt = True\n else:\n dt_pos = abs(p.dt)\n reset_dt = False\n\n sign_end_part = np.sign(endtime - p.time)\n if sign_end_part != sign_dt:\n dt_pos = 0\n break\n return p\n\n def execute_jit(self, pset, endtime, dt):\n pass\n\n def execute_python(self, pset, endtime, dt):\n pass\n\n def execute(self, pset, endtime, dt, recovery=None, output_file=None, execute_once=False):\n pass\n" ]
[ [ "numpy.where", "numpy.ctypeslib.load_library", "numpy.isclose", "numpy.sign" ] ]
joaopdss/aXeleRate
[ "791c8b29056ed11bd0ed306e620664577ec9724c" ]
[ "axelerate/networks/common_utils/callbacks.py" ]
[ "import numpy as np\nfrom tensorflow import keras\nfrom tensorflow.keras import backend as K\n\ndef cosine_decay_with_warmup(global_step,\n learning_rate_base,\n total_steps,\n warmup_learning_rate=0.0,\n warmup_steps=0,\n hold_base_rate_steps=0):\n \"\"\"Cosine decay schedule with warm up period.\n Cosine annealing learning rate as described in:\n Loshchilov and Hutter, SGDR: Stochastic Gradient Descent with Warm Restarts.\n ICLR 2017. https://arxiv.org/abs/1608.03983\n In this schedule, the learning rate grows linearly from warmup_learning_rate\n to learning_rate_base for warmup_steps, then transitions to a cosine decay\n schedule.\n Arguments:\n global_step {int} -- global step.\n learning_rate_base {float} -- base learning rate.\n total_steps {int} -- total number of training steps.\n Keyword Arguments:\n warmup_learning_rate {float} -- initial learning rate for warm up. (default: {0.0})\n warmup_steps {int} -- number of warmup steps. (default: {0})\n hold_base_rate_steps {int} -- Optional number of steps to hold base learning rate\n before decaying. (default: {0})\n Returns:\n a float representing learning rate.\n Raises:\n ValueError: if warmup_learning_rate is larger than learning_rate_base,\n or if warmup_steps is larger than total_steps.\n \"\"\"\n\n if total_steps < warmup_steps:\n raise ValueError('total_steps must be larger or equal to '\n 'warmup_steps.')\n learning_rate = 0.5 * learning_rate_base * (1 + np.cos(\n np.pi *\n (global_step - warmup_steps - hold_base_rate_steps\n ) / float(total_steps - warmup_steps - hold_base_rate_steps)))\n if hold_base_rate_steps > 0:\n learning_rate = np.where(global_step > warmup_steps + hold_base_rate_steps,\n learning_rate, learning_rate_base)\n if warmup_steps > 0:\n if learning_rate_base < warmup_learning_rate:\n raise ValueError('learning_rate_base must be larger or equal to '\n 'warmup_learning_rate.')\n slope = (learning_rate_base - warmup_learning_rate) / warmup_steps\n warmup_rate = slope * global_step + warmup_learning_rate\n learning_rate = np.where(global_step < warmup_steps, warmup_rate,\n learning_rate)\n return np.where(global_step > total_steps, 0.0, learning_rate)\n\n\nclass WarmUpCosineDecayScheduler(keras.callbacks.Callback):\n \"\"\"Cosine decay with warmup learning rate scheduler\n \"\"\"\n\n def __init__(self,\n learning_rate_base,\n total_steps,\n global_step_init=0,\n warmup_learning_rate=0.0,\n warmup_steps=0,\n hold_base_rate_steps=0,\n verbose=0):\n \"\"\"Constructor for cosine decay with warmup learning rate scheduler.\n Arguments:\n learning_rate_base {float} -- base learning rate.\n total_steps {int} -- total number of training steps.\n Keyword Arguments:\n global_step_init {int} -- initial global step, e.g. from previous checkpoint.\n warmup_learning_rate {float} -- initial learning rate for warm up. (default: {0.0})\n warmup_steps {int} -- number of warmup steps. (default: {0})\n hold_base_rate_steps {int} -- Optional number of steps to hold base learning rate\n before decaying. (default: {0})\n verbose {int} -- 0: quiet, 1: update messages. (default: {0})\n \"\"\"\n\n super(WarmUpCosineDecayScheduler, self).__init__()\n self.learning_rate_base = learning_rate_base\n self.total_steps = total_steps\n self.global_step = global_step_init\n self.warmup_learning_rate = warmup_learning_rate\n self.warmup_steps = warmup_steps\n self.hold_base_rate_steps = hold_base_rate_steps\n self.verbose = verbose\n self.learning_rates = []\n self.current_lr = 0.0\n \n def on_epoch_end(self, epoch, logs={}):\n if self.verbose == 1:\n print('Epoch %05d: Learning rate is %s.\\n' % (epoch, self.current_lr)) \n\n def on_batch_end(self, batch, logs=None):\n self.global_step = self.global_step + 1\n lr = K.get_value(self.model.optimizer.lr)\n self.learning_rates.append(lr)\n\n def on_batch_begin(self, batch, logs=None):\n self.current_lr = cosine_decay_with_warmup(global_step=self.global_step,\n learning_rate_base=self.learning_rate_base,\n total_steps=self.total_steps,\n warmup_learning_rate=self.warmup_learning_rate,\n warmup_steps=self.warmup_steps,\n hold_base_rate_steps=self.hold_base_rate_steps)\n K.set_value(self.model.optimizer.lr, self.current_lr)\n if self.verbose ==2:\n print('\\nBatch %05d: setting learning rate to %s.' % (self.global_step + 1, self.current_lr))\n\n" ]
[ [ "numpy.where", "tensorflow.keras.backend.get_value", "tensorflow.keras.backend.set_value" ] ]
monroid/openvino
[ "031e998a15ec738c64cc2379d7f30fb73087c272", "8272b3857ef5be0aaa8abbf7bd0d5d5615dc40b6" ]
[ "model-optimizer/unit_tests/extensions/front/div_test.py", "runtime/bindings/python/tests/test_frontend/test_frontend_onnx.py" ]
[ "# Copyright (C) 2018-2021 Intel Corporation\n# SPDX-License-Identifier: Apache-2.0\n\nimport unittest\n\nimport numpy as np\n\nfrom extensions.front.div import Div\nfrom mo.utils.ir_engine.compare_graphs import compare_graphs\nfrom unit_tests.utils.graph import build_graph, result, regular_op_with_shaped_data, valued_const_with_data, connect, \\\n connect_data\n\nnodes = {\n **regular_op_with_shaped_data('placeholder_1', [1, 227, 227, 3], {'type': 'Parameter'}),\n **regular_op_with_shaped_data('placeholder_2', [1, 227, 227, 3], {'type': 'Parameter'}),\n **regular_op_with_shaped_data('div', None, {'op': 'Div', 'type': 'Divide', 'name': 'my_div'}),\n\n **regular_op_with_shaped_data('reciprocal', [1, 227, 227, 3], {'type': 'Power'}),\n **valued_const_with_data('minus_one', np.array(-1.)),\n **regular_op_with_shaped_data('mul', None, {'type': 'Multiply'}),\n\n **result(),\n}\n\n\nclass TestDiv(unittest.TestCase):\n def test_div_test_1(self):\n # Test with two different inputs from two placeholders\n graph = build_graph(nodes, [\n *connect('placeholder_1', '0:div'),\n *connect('placeholder_2', '1:div'),\n *connect('div', 'output'),\n ], nodes_with_edges_only=True)\n Div().find_and_replace_pattern(graph)\n\n graph_ref = build_graph(nodes, [\n *connect('placeholder_1', '0:mul'),\n *connect('placeholder_2', '0:reciprocal'),\n *connect('minus_one', '1:reciprocal'),\n *connect('reciprocal', '1:mul'),\n *connect('mul', 'output'),\n ], nodes_with_edges_only=True)\n\n (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True)\n self.assertTrue(flag, resp)\n self.assertTrue(graph.node[graph.get_nodes_with_attributes(type='Multiply')[0]]['name'] == 'my_div')\n\n def test_div_test_2(self):\n # Test with two same inputs from one placeholder\n graph = build_graph(nodes, [\n *connect('placeholder_1:0', '0:div'),\n *connect_data('placeholder_1:0', '1:div'),\n *connect('div', 'output'),\n ], nodes_with_edges_only=True)\n Div().find_and_replace_pattern(graph)\n\n graph_ref = build_graph(nodes, [\n *connect('placeholder_1:0', '0:mul'),\n *connect_data('placeholder_1:0', '0:reciprocal'),\n *connect('minus_one', '1:reciprocal'),\n *connect('reciprocal', '1:mul'),\n *connect('mul', 'output'),\n ], nodes_with_edges_only=True)\n\n (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True)\n self.assertTrue(flag, resp)\n self.assertTrue(graph.node[graph.get_nodes_with_attributes(type='Multiply')[0]]['name'] == 'my_div')\n\n def test_div_with_integer(self):\n # Test where transformation should not be applied because the divisor is integer\n graph = build_graph({\n **regular_op_with_shaped_data('parameter', [1, 227, 227, 3], {'type': 'Parameter', 'data_type': np.int32}),\n **valued_const_with_data('const', np.array([-1.], dtype=np.int32)),\n **regular_op_with_shaped_data('div', None, {'op': 'Div', 'type': 'Divide', 'name': 'my_div'}),\n **result()},\n [\n *connect('parameter:0', '0:div'),\n *connect_data('const:0', '1:div'),\n *connect('div', 'output'),\n ])\n graph_ref = graph.copy()\n Div().find_and_replace_pattern(graph)\n\n (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True)\n self.assertTrue(flag, resp)\n", "# Copyright (C) 2021 Intel Corporation\n# SPDX-License-Identifier: Apache-2.0\n\nimport os\nimport onnx\nimport numpy as np\nfrom onnx.helper import make_graph, make_model, make_tensor_value_info\nimport pytest\n\nfrom ngraph.frontend import FrontEndManager\nfrom tests.runtime import get_runtime\n\n\ndef create_onnx_model():\n add = onnx.helper.make_node(\"Add\", inputs=[\"x\", \"y\"], outputs=[\"z\"])\n const_tensor = onnx.helper.make_tensor(\"const_tensor\", onnx.TensorProto.FLOAT, (2, 2), [0.5, 1, 1.5, 2.0])\n const_node = onnx.helper.make_node(\"Constant\", [], outputs=[\"const_node\"],\n value=const_tensor, name=\"const_node\")\n mul = onnx.helper.make_node(\"Mul\", inputs=[\"z\", \"const_node\"], outputs=[\"out\"])\n input_tensors = [\n make_tensor_value_info(\"x\", onnx.TensorProto.FLOAT, (2, 2)),\n make_tensor_value_info(\"y\", onnx.TensorProto.FLOAT, (2, 2)),\n ]\n output_tensors = [make_tensor_value_info(\"out\", onnx.TensorProto.FLOAT, (2, 2))]\n graph = make_graph([add, const_node, mul], \"graph\", input_tensors, output_tensors)\n return make_model(graph, producer_name=\"ngraph ONNX Importer\")\n\n\ndef run_function(function, *inputs, expected):\n runtime = get_runtime()\n computation = runtime.computation(function)\n actual = computation(*inputs)\n assert len(actual) == len(expected)\n for i in range(len(actual)):\n np.testing.assert_allclose(expected[i], actual[i], rtol=1e-3, atol=1e-6)\n\n\nfem = FrontEndManager()\nonnx_model_filename = \"model.onnx\"\nONNX_FRONTEND_NAME = \"onnx\"\n\n\ndef setup_module():\n onnx.save_model(create_onnx_model(), onnx_model_filename)\n\n\ndef teardown_module():\n os.remove(onnx_model_filename)\n\n\ndef skip_if_onnx_frontend_is_disabled():\n front_ends = fem.get_available_front_ends()\n if ONNX_FRONTEND_NAME not in front_ends:\n pytest.skip()\n\n\ndef test_convert():\n skip_if_onnx_frontend_is_disabled()\n\n fe = fem.load_by_framework(framework=ONNX_FRONTEND_NAME)\n assert fe\n\n model = fe.load(onnx_model_filename)\n assert model\n\n function = fe.convert(model)\n assert function\n\n a = np.array([[1, 2], [3, 4]], dtype=np.float32)\n b = np.array([[2, 3], [4, 5]], dtype=np.float32)\n expected = np.array([[1.5, 5], [10.5, 18]], dtype=np.float32)\n run_function(function, a, b, expected=[expected])\n\n\ndef test_decode_and_convert():\n skip_if_onnx_frontend_is_disabled()\n\n fe = fem.load_by_framework(framework=ONNX_FRONTEND_NAME)\n assert fe\n\n model = fe.load(onnx_model_filename)\n assert model\n\n decoded_function = fe.decode(model)\n assert decoded_function\n for op in decoded_function.get_ordered_ops():\n assert op.get_type_name() in [\"Parameter\", \"Constant\", \"ONNXFrameworkNode\",\n \"ONNXSubgraphFrameworkNode\", \"Result\"]\n\n fe.convert(decoded_function)\n assert decoded_function\n for op in decoded_function.get_ordered_ops():\n assert op.get_type_name() not in [\"ONNXFrameworkNode\", \"ONNXSubgraphFrameworkNode\"]\n\n a = np.array([[1, 2], [3, 4]], dtype=np.float32)\n b = np.array([[2, 3], [4, 5]], dtype=np.float32)\n expected = np.array([[1.5, 5], [10.5, 18]], dtype=np.float32)\n run_function(decoded_function, a, b, expected=[expected])\n\n\ndef test_load_by_model():\n skip_if_onnx_frontend_is_disabled()\n\n fe = fem.load_by_model(onnx_model_filename)\n assert fe\n assert fe.get_name() == \"onnx\"\n model = fe.load(onnx_model_filename)\n assert model\n decoded_function = fe.decode(model)\n assert decoded_function\n\n assert not fem.load_by_model(\"test.xx\")\n assert not fem.load_by_model(\"onnx.yy\")\n" ]
[ [ "numpy.array" ], [ "numpy.array", "numpy.testing.assert_allclose" ] ]
SyneRBI/SIRF-Contribs
[ "130223d9bc11991eadcd11f9b715aea34c4842fd" ]
[ "src/Python/sirf/contrib/kcl/Prior.py" ]
[ "\r\n\r\nimport numpy as np\r\n\r\n\r\nclass Prior(object):\r\n \r\n def __init__(self,imageSize, sWindowSize=3, imageCropFactor=[0]):\r\n \r\n self.imageSize = imageSize if len(imageSize)==3 else imageSize.append(1)\r\n self.imageCropFactor = imageCropFactor\r\n if np.mod(sWindowSize,2):\r\n self.sWindowSize = sWindowSize\r\n else:\r\n raise ValueError(\"search window size must be odd\")\r\n self.is3D = 1 if imageSize[2]>1 else 0 \r\n self.nS = sWindowSize**3 if self.is3D else sWindowSize**2\r\n _,self.imageSizeCrop= self.imCrop() \r\n self.SearchWindow, self.Wd = self.__neighborhood(self.sWindowSize)\r\n\r\n def __neighborhood(self,w):\r\n \r\n n = self.imageSizeCrop[0]\r\n m = self.imageSizeCrop[1]\r\n h = self.imageSizeCrop[2]\r\n wlen = 2*np.floor(w/2)\r\n widx = xidx = yidx = np.arange(-wlen/2,wlen/2+1)\r\n\r\n if h==1:\r\n zidx = [0]\r\n nN = w*w\r\n else:\r\n zidx = widx\r\n nN = w*w*w\r\n \r\n Y,X,Z = np.meshgrid(np.arange(0,m), np.arange(0,n), np.arange(0,h)) \r\n N = np.zeros([n*m*h, nN],dtype='int32')\r\n D = np.zeros([n*m*h, nN],dtype='float')\r\n l = 0\r\n for x in xidx:\r\n Xnew = self.__setBoundary(X + x, n)\r\n for y in yidx:\r\n Ynew = self.__setBoundary(Y + y, m)\r\n for z in zidx:\r\n Znew = self.__setBoundary(Z + z, h)\r\n N[:,l] = (Xnew + (Ynew)*n + (Znew)*n*m).reshape(-1,1).flatten('F')\r\n D[:,l] = np.sqrt(x**2+y**2+z**2)\r\n l += 1\r\n D = 1/D\r\n D[np.isinf(D)]= 0\r\n D = D/np.sum(D,axis=1).reshape(-1,1)\r\n return N, D\r\n \r\n def __setBoundary(self,X,n):\r\n idx = X<0\r\n X[idx] = X[idx]+n\r\n idx = X>n-1\r\n X[idx] = X[idx]-n\r\n return X.flatten('F')\r\n\r\n def imCrop(self,img=None):\r\n if np.any(self.imageCropFactor):\r\n if len(self.imageCropFactor)==1:\r\n self.imageCropFactor = self.imageCropFactor*3\r\n I = 0\r\n if self.imageCropFactor[0]:\r\n self.imageCropFactor[0] = np.max([2.5, self.imageCropFactor[0]])\r\n I = np.floor(self.imageSize[0]/self.imageCropFactor[0]).astype('int')\r\n J = 0\r\n if self.imageCropFactor[1]:\r\n self.imageCropFactor[1] = np.max([2.5, self.imageCropFactor[1]])\r\n J = np.floor(self.imageSize[1]/self.imageCropFactor[1]).astype('int')\r\n K = 0\r\n if self.imageCropFactor[2] and self.is3D:\r\n self.imageCropFactor[2] = np.max([2.5, self.imageCropFactor[2]])\r\n K = np.floor(self.imageSize[2]/self.imageCropFactor[2]).astype('int') \r\n imageSizeCrop = [np.arange(I,self.imageSize[0]-I).shape[0],\r\n np.arange(J,self.imageSize[1]-J).shape[0],\r\n np.arange(K,self.imageSize[2]-K).shape[0]]\r\n if img is not None:\r\n if self.is3D:\r\n img = img[I:self.imageSize[0]-I, J:self.imageSize[1]-J, K:self.imageSize[2]-K] \r\n else:\r\n img = img[I:self.imageSize[0]-I, J:self.imageSize[1]-J] \r\n else:\r\n imageSizeCrop = self.imageSize\r\n return img,imageSizeCrop \r\n\r\n def imCropUndo(self,img):\r\n if np.any(self.imageCropFactor):\r\n tmp = img\r\n img = np.zeros(self.imageSize,tmp.dtype)\r\n I = (self.imageSize[0] - self.imageSizeCrop[0])//2\r\n J = (self.imageSize[1] - self.imageSizeCrop[1])//2\r\n K = (self.imageSize[2] - self.imageSizeCrop[2])//2\r\n if self.is3D:\r\n img[I:self.imageSize[0]-I, J:self.imageSize[1]-J, K:self.imageSize[2]-K] = tmp \r\n else:\r\n img[I:self.imageSize[0]-I, J:self.imageSize[1]-J] = tmp \r\n return img\r\n \r\n def Grad(self,img):\r\n img,_ = self.imCrop(img)\r\n img = img.flatten('F')\r\n imgGrad = img[self.SearchWindow] - img.reshape(-1,1)\r\n imgGrad[np.isnan(imgGrad)] = 0\r\n return imgGrad\r\n \r\n def GradT(self,imgGrad):\r\n dP = -2*np.sum(self.Wd*imgGrad,axis=1)\r\n dP = dP.reshape(self.imageSizeCrop,order='F')\r\n dP = self.imCropUndo(dP)\r\n dP[np.isnan(dP)] = 0\r\n return dP\r\n \r\n def Div(self,img):\r\n img,_ = self.imCrop(img)\r\n img = img.flatten('F')\r\n imgDiv = img[self.SearchWindow] + img.reshape(-1,1)\r\n imgDiv[np.isnan(imgDiv)] = 0\r\n return imgDiv\r\n \r\n def gaussianWeights(self,img,sigma):\r\n return 1/np.sqrt(2*np.pi*sigma**2)*np.exp(-0.5*self.Grad(img)**2/sigma**2)\r\n \r\n def BowshserWeights(self,img,b):\r\n if b>self.nS:\r\n raise ValueError(\"Number of most similar voxels must be smaller than number of voxels per neighbourhood\")\r\n imgGradAbs = np.abs(self.Grad(img))\r\n Wb = 0*imgGradAbs\r\n for i in range(imgGradAbs.shape[0]):\r\n idx = np.argsort(imgGradAbs[i,:])\r\n Wb[i,idx[0:b]]=1\r\n return Wb\r\n \r\n \r\n \r\n \r\n \r\n \r\n " ]
[ [ "numpy.sum", "numpy.zeros", "numpy.isinf", "numpy.any", "numpy.floor", "numpy.argsort", "numpy.mod", "numpy.arange", "numpy.max", "numpy.isnan", "numpy.sqrt" ] ]
jimmy-academia/Deeper-Learnings
[ "ac363efe5450dd2751c0c1bea0ee7af457f7ac24" ]
[ "codestosort/NaturalLanguage/module/bestmodel.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\nclass BestNet(torch.nn.Module):\n def __init__(self, embedding_dim):\n super(BestNet, self).__init__()\n\n self.embedding_dim = embedding_dim\n self.hidden_dim = 256\n self.embedding_dropout=0.6\n self.desc_rnn_size = 100\n\n self.rnn = nn.GRU(\n input_size=self.embedding_dim, hidden_size=self.hidden_dim,\n num_layers=1, batch_first=True, bidirectional=True\n )\n\n self.rnn_desc = nn.GRU(\n input_size=self.embedding_dim, hidden_size=self.desc_rnn_size,\n num_layers=1, batch_first=True, bidirectional=True\n )\n\n self.emb_drop = nn.Dropout(self.embedding_dropout)\n self.M = nn.Parameter(torch.FloatTensor(2*self.hidden_dim, 2*self.hidden_dim))\n self.b = nn.Parameter(torch.FloatTensor([0]))\n self.Wc = nn.Parameter(torch.FloatTensor(2*self.hidden_dim, self.embedding_dim))\n self.We = nn.Parameter(torch.FloatTensor(self.embedding_dim, self.embedding_dim))\n self.attn = nn.Linear(2*self.hidden_dim, 2*self.hidden_dim)\n self.init_params_()\n self.tech_w = 0.0\n\n def init_params_(self):\n #Initializing parameters\n nn.init.xavier_normal_(self.M)\n\n # Set forget gate bias to 2\n size = self.rnn.bias_hh_l0.size(0)\n self.rnn.bias_hh_l0.data[size//4:size//2] = 2\n\n size = self.rnn.bias_ih_l0.size(0)\n self.rnn.bias_ih_l0.data[size//4:size//2] = 2\n\n size = self.rnn_desc.bias_hh_l0.size(0)\n self.rnn_desc.bias_hh_l0.data[size//4:size//2] = 2\n\n size = self.rnn_desc.bias_ih_l0.size(0)\n self.rnn_desc.bias_ih_l0.data[size//4:size//2] = 2\n\n # def forward(self, context, options):\n # logits = []\n # for i, option in enumerate(options.transpose(1, 0)):\n # gits = []\n # for context in context.transpose(1,0):\n # git = self.forward_one_option(context, option)\n # gits.append(logit)\n # logit = torch.stack(gits).mean(0)\n # logits = torch.stack(logits, 1)\n\n # return logits.squeeze()\n\n # def forward(self, context, options):\n # logits = []\n # for i, option in enumerate(options.transpose(1, 0)):\n # logit = self.forward_one_option(context, option)\n # logits.append(logit)\n # logits = torch.stack(logits, 1)\n\n # return logits.squeeze()\n def forward(self, context, options):\n logits = []\n for i, option in enumerate(options.transpose(1, 0)):\n logit_ = []\n for utter in context.transpose(1,0):\n logit = self.forward_one_option(utter, option) # 10,1,1\n logit_.append(logit)\n logits.append(torch.stack(logit_,1).mean(1))\n logits = torch.stack(logits, 1)\n\n return logits.squeeze()\n\n def forward_one_option(self, context, option):\n context, c_h, option, o_h = self.forward_crosspath(context, option)\n context_attn = self.forward_attn(context, o_h)\n option_attn = self.forward_attn(option, c_h)\n final = self.forward_fc(context_attn, option_attn)\n return final\n\n def forward_crosspath(self, context, option):\n context, c_h = self.rnn(self.emb_drop(context))\n c_h = torch.cat([i for i in c_h], dim=-1)\n option, o_h = self.rnn(self.emb_drop(option))\n o_h = torch.cat([i for i in o_h], dim=-1)\n return context, c_h.squeeze(), option, o_h.squeeze()\n\n def forward_attn(self, output, hidden):\n max_len = output.size(1)\n b_size = output.size(0)\n\n hidden = hidden.squeeze(0).unsqueeze(2)\n attn = self.attn(output.contiguous().view(b_size*max_len, -1))\n attn = attn.view(b_size, max_len, -1)\n attn_energies = (attn.bmm(hidden).transpose(1,2))\n alpha = F.softmax(attn_energies.squeeze(1), dim=-1)\n alpha = alpha.unsqueeze(1)\n weighted_attn = alpha.bmm(output)\n\n return weighted_attn.squeeze()\n\n def forward_fc(self, context, option):\n out = torch.mm(context, self.M).unsqueeze(1)\n out = torch.bmm(out, option.unsqueeze(2))\n out = out + self.b\n return out\n\n def save(self, filepath):\n torch.save(self.state_dict(), filepath)\n\n" ]
[ [ "torch.FloatTensor", "torch.stack", "torch.nn.Linear", "torch.nn.init.xavier_normal_", "torch.mm", "torch.nn.GRU", "torch.cat", "torch.nn.Dropout" ] ]
ameisner/legacypipe
[ "5ffe6fb2458618b68653580badc4a94e1ecb4f04" ]
[ "py/legacypipe/unwise.py" ]
[ "import os\nimport numpy as np\nimport fitsio\nfrom astrometry.util.fits import fits_table\nfrom astrometry.util.ttime import Time\n\nfrom wise.unwise import get_unwise_tractor_image\n\nimport logging\nlogger = logging.getLogger('legacypipe.unwise')\ndef info(*args):\n from legacypipe.utils import log_info\n log_info(logger, args)\ndef debug(*args):\n from legacypipe.utils import log_debug\n log_debug(logger, args)\n\n'''\nThis function was imported whole from the tractor repo:\nwise/forcedphot.py because I figured we were doing enough\nLegacySurvey-specific stuff in it that it was time to just import it\nand edit it rather than build elaborate options.\n'''\ndef unwise_forcedphot(cat, tiles, band=1, roiradecbox=None,\n use_ceres=True, ceres_block=8,\n save_fits=False, get_models=False, ps=None,\n psf_broadening=None,\n pixelized_psf=False,\n get_masks=None,\n move_crpix=False,\n modelsky_dir=None):\n '''\n Given a list of tractor sources *cat*\n and a list of unWISE tiles *tiles* (a fits_table with RA,Dec,coadd_id)\n runs forced photometry, returning a FITS table the same length as *cat*.\n\n *get_masks*: the WCS to resample mask bits into.\n '''\n from tractor import PointSource, Tractor, ExpGalaxy, DevGalaxy\n from tractor.sersic import SersicGalaxy\n\n if not pixelized_psf and psf_broadening is None:\n # PSF broadening in post-reactivation data, by band.\n # Newer version from Aaron's email to decam-chatter, 2018-06-14.\n broadening = { 1: 1.0405, 2: 1.0346, 3: None, 4: None }\n psf_broadening = broadening[band]\n\n if False:\n from astrometry.util.plotutils import PlotSequence\n ps = PlotSequence('wise-forced-w%i' % band)\n plots = (ps is not None)\n if plots:\n import pylab as plt\n\n wantims = (plots or save_fits or get_models)\n wanyband = 'w'\n if get_models:\n models = []\n\n wband = 'w%i' % band\n\n Nsrcs = len(cat)\n phot = fits_table()\n # Filled in based on unique tile overlap\n phot.wise_coadd_id = np.array([' '] * Nsrcs, dtype='U8')\n phot.wise_x = np.zeros(Nsrcs, np.float32)\n phot.wise_y = np.zeros(Nsrcs, np.float32)\n phot.set('psfdepth_%s' % wband, np.zeros(Nsrcs, np.float32))\n nexp = np.zeros(Nsrcs, np.int16)\n mjd = np.zeros(Nsrcs, np.float64)\n central_flux = np.zeros(Nsrcs, np.float32)\n\n ra = np.array([src.getPosition().ra for src in cat])\n dec = np.array([src.getPosition().dec for src in cat])\n\n fskeys = ['prochi2', 'profracflux']\n fitstats = {}\n\n if get_masks:\n mh,mw = get_masks.shape\n maskmap = np.zeros((mh,mw), np.uint32)\n\n tims = []\n for tile in tiles:\n info('Reading WISE tile', tile.coadd_id, 'band', band)\n tim = get_unwise_tractor_image(tile.unwise_dir, tile.coadd_id, band,\n bandname=wanyband, roiradecbox=roiradecbox)\n if tim is None:\n debug('Actually, no overlap with WISE coadd tile', tile.coadd_id)\n continue\n\n if plots:\n sig1 = tim.sig1\n plt.clf()\n plt.imshow(tim.getImage(), interpolation='nearest', origin='lower',\n cmap='gray', vmin=-3 * sig1, vmax=10 * sig1)\n plt.colorbar()\n tag = '%s W%i' % (tile.coadd_id, band)\n plt.title('%s: tim data' % tag)\n ps.savefig()\n plt.clf()\n plt.hist((tim.getImage() * tim.inverr)[tim.inverr > 0].ravel(),\n range=(-5,10), bins=100)\n plt.xlabel('Per-pixel intensity (Sigma)')\n plt.title(tag)\n ps.savefig()\n\n if move_crpix and band in [1, 2]:\n realwcs = tim.wcs.wcs\n x,y = realwcs.crpix\n tile_crpix = tile.get('crpix_w%i' % band)\n dx = tile_crpix[0] - 1024.5\n dy = tile_crpix[1] - 1024.5\n realwcs.set_crpix(x+dx, y+dy)\n debug('unWISE', tile.coadd_id, 'band', band, 'CRPIX', x,y,\n 'shift by', dx,dy, 'to', realwcs.crpix)\n\n if modelsky_dir and band in [1, 2]:\n fn = os.path.join(modelsky_dir, '%s.%i.mod.fits' % (tile.coadd_id, band))\n if not os.path.exists(fn):\n raise RuntimeError('WARNING: does not exist:', fn)\n x0,x1,y0,y1 = tim.roi\n bg = fitsio.FITS(fn)[2][y0:y1, x0:x1]\n assert(bg.shape == tim.shape)\n\n if plots:\n plt.clf()\n plt.subplot(1,2,1)\n plt.imshow(tim.getImage(), interpolation='nearest', origin='lower',\n cmap='gray', vmin=-3 * sig1, vmax=5 * sig1)\n plt.subplot(1,2,2)\n plt.imshow(bg, interpolation='nearest', origin='lower',\n cmap='gray', vmin=-3 * sig1, vmax=5 * sig1)\n tag = '%s W%i' % (tile.coadd_id, band)\n plt.suptitle(tag)\n ps.savefig()\n plt.clf()\n ha = dict(range=(-5,10), bins=100, histtype='step')\n plt.hist((tim.getImage() * tim.inverr)[tim.inverr > 0].ravel(),\n color='b', label='Original', **ha)\n plt.hist(((tim.getImage()-bg) * tim.inverr)[tim.inverr > 0].ravel(),\n color='g', label='Minus Background', **ha)\n plt.axvline(0, color='k', alpha=0.5)\n plt.xlabel('Per-pixel intensity (Sigma)')\n plt.legend()\n plt.title(tag + ': background')\n ps.savefig()\n\n # Actually subtract the background!\n tim.data -= bg\n\n # Floor the per-pixel variances,\n # and add Poisson contribution from sources\n if band in [1,2]:\n # in Vega nanomaggies per pixel\n floor_sigma = {1: 0.5, 2: 2.0}\n poissons = {1: 0.15, 2: 0.3}\n with np.errstate(divide='ignore'):\n new_ie = 1. / np.sqrt(\n (1./tim.inverr)**2 +\n floor_sigma[band]**2 +\n poissons[band]**2 * np.maximum(0., tim.data))\n new_ie[tim.inverr == 0] = 0.\n\n if plots:\n plt.clf()\n plt.plot((1. / tim.inverr[tim.inverr>0]).ravel(),\n (1./new_ie[tim.inverr>0]).ravel(), 'b.')\n plt.title('unWISE per-pixel error: %s band %i' %\n (tile.coadd_id, band))\n plt.xlabel('original')\n plt.ylabel('floored')\n ps.savefig()\n\n assert(np.all(np.isfinite(new_ie)))\n assert(np.all(new_ie >= 0.))\n tim.inverr = new_ie\n\n # Expand a 3-pixel radius around weight=0 (saturated) pixels\n # from Eddie via crowdsource\n # https://github.com/schlafly/crowdsource/blob/7069da3e7d9d3124be1cbbe1d21ffeb63fc36dcc/python/wise_proc.py#L74\n ## FIXME -- W3/W4 ??\n satlimit = 85000\n msat = ((tim.data > satlimit) | ((tim.nims == 0) & (tim.nuims > 1)))\n from scipy.ndimage.morphology import binary_dilation\n xx, yy = np.mgrid[-3:3+1, -3:3+1]\n dilate = xx**2+yy**2 <= 3**2\n msat = binary_dilation(msat, dilate)\n nbefore = np.sum(tim.inverr == 0)\n tim.inverr[msat] = 0\n nafter = np.sum(tim.inverr == 0)\n debug('Masking an additional', (nafter-nbefore), 'near-saturated pixels in unWISE',\n tile.coadd_id, 'band', band)\n\n # Read mask file?\n if get_masks:\n from astrometry.util.resample import resample_with_wcs, OverlapError\n # unwise_dir can be a colon-separated list of paths\n tilemask = None\n for d in tile.unwise_dir.split(':'):\n fn = os.path.join(d, tile.coadd_id[:3], tile.coadd_id,\n 'unwise-%s-msk.fits.gz' % tile.coadd_id)\n if os.path.exists(fn):\n debug('Reading unWISE mask file', fn)\n x0,x1,y0,y1 = tim.roi\n tilemask = fitsio.FITS(fn)[0][y0:y1,x0:x1]\n break\n if tilemask is None:\n info('unWISE mask file for tile', tile.coadd_id, 'does not exist')\n else:\n try:\n tanwcs = tim.wcs.wcs\n assert(tanwcs.shape == tilemask.shape)\n Yo,Xo,Yi,Xi,_ = resample_with_wcs(get_masks, tanwcs,\n intType=np.int16)\n # Only deal with mask pixels that are set.\n I, = np.nonzero(tilemask[Yi,Xi] > 0)\n # Trim to unique area for this tile\n rr,dd = get_masks.pixelxy2radec(Xo[I]+1, Yo[I]+1)\n good = radec_in_unique_area(rr, dd, tile.ra1, tile.ra2,\n tile.dec1, tile.dec2)\n I = I[good]\n maskmap[Yo[I],Xo[I]] = tilemask[Yi[I], Xi[I]]\n except OverlapError:\n # Shouldn't happen by this point\n print('Warning: no overlap between WISE tile', tile.coadd_id, 'and brick')\n\n if plots:\n plt.clf()\n plt.imshow(tilemask, interpolation='nearest', origin='lower')\n plt.title('Tile %s: mask' % tile.coadd_id)\n ps.savefig()\n plt.clf()\n plt.imshow(maskmap, interpolation='nearest', origin='lower')\n plt.title('Tile %s: accumulated maskmap' % tile.coadd_id)\n ps.savefig()\n\n # The tiles have some overlap, so zero out pixels outside the\n # tile's unique area.\n th,tw = tim.shape\n xx,yy = np.meshgrid(np.arange(tw), np.arange(th))\n rr,dd = tim.wcs.wcs.pixelxy2radec(xx+1, yy+1)\n unique = radec_in_unique_area(rr, dd, tile.ra1, tile.ra2,\n tile.dec1, tile.dec2)\n debug('Tile', tile.coadd_id, '- total of', np.sum(unique),\n 'unique pixels out of', len(unique.flat), 'total pixels')\n if get_models:\n # Save the inverr before blanking out non-unique pixels, for making coadds with no gaps!\n # (actually, slightly more subtly, expand unique area by 1 pixel)\n from scipy.ndimage.morphology import binary_dilation\n du = binary_dilation(unique)\n tim.coadd_inverr = tim.inverr * du\n tim.inverr[unique == False] = 0.\n del xx,yy,rr,dd,unique\n\n if plots:\n sig1 = tim.sig1\n plt.clf()\n plt.imshow(tim.getImage() * (tim.inverr > 0),\n interpolation='nearest', origin='lower',\n cmap='gray', vmin=-3 * sig1, vmax=10 * sig1)\n plt.colorbar()\n tag = '%s W%i' % (tile.coadd_id, band)\n plt.title('%s: tim data (unique)' % tag)\n ps.savefig()\n\n if pixelized_psf:\n from unwise_psf import unwise_psf\n if (band == 1) or (band == 2):\n # we only have updated PSFs for W1 and W2\n psfimg = unwise_psf.get_unwise_psf(band, tile.coadd_id,\n modelname='neo6_unwisecat')\n else:\n psfimg = unwise_psf.get_unwise_psf(band, tile.coadd_id)\n\n if band == 4:\n # oversample (the unwise_psf models are at native W4 5.5\"/pix,\n # while the unWISE coadds are made at 2.75\"/pix.\n ph,pw = psfimg.shape\n subpsf = np.zeros((ph*2-1, pw*2-1), np.float32)\n from astrometry.util.util import lanczos3_interpolate\n xx,yy = np.meshgrid(np.arange(0., pw-0.51, 0.5, dtype=np.float32),\n np.arange(0., ph-0.51, 0.5, dtype=np.float32))\n xx = xx.ravel()\n yy = yy.ravel()\n ix = xx.astype(np.int32)\n iy = yy.astype(np.int32)\n dx = (xx - ix).astype(np.float32)\n dy = (yy - iy).astype(np.float32)\n psfimg = psfimg.astype(np.float32)\n rtn = lanczos3_interpolate(ix, iy, dx, dy, [subpsf.flat], [psfimg])\n\n if plots:\n plt.clf()\n plt.imshow(psfimg, interpolation='nearest', origin='lower')\n plt.title('Original PSF model')\n ps.savefig()\n plt.clf()\n plt.imshow(subpsf, interpolation='nearest', origin='lower')\n plt.title('Subsampled PSF model')\n ps.savefig()\n\n psfimg = subpsf\n del xx, yy, ix, iy, dx, dy\n\n from tractor.psf import PixelizedPSF\n psfimg /= psfimg.sum()\n fluxrescales = {1: 1.04, 2: 1.005, 3: 1.0, 4: 1.0}\n psfimg *= fluxrescales[band]\n tim.psf = PixelizedPSF(psfimg)\n\n if psf_broadening is not None and not pixelized_psf:\n # psf_broadening is a factor by which the PSF FWHMs\n # should be scaled; the PSF is a little wider\n # post-reactivation.\n psf = tim.getPsf()\n from tractor import GaussianMixturePSF\n if isinstance(psf, GaussianMixturePSF):\n debug('Broadening PSF: from', psf)\n p0 = psf.getParams()\n pnames = psf.getParamNames()\n p1 = [p * psf_broadening**2 if 'var' in name else p\n for (p, name) in zip(p0, pnames)]\n psf.setParams(p1)\n debug('Broadened PSF:', psf)\n else:\n print('WARNING: cannot apply psf_broadening to WISE PSF of type', type(psf))\n\n wcs = tim.wcs.wcs\n _,fx,fy = wcs.radec2pixelxy(ra, dec)\n x = np.round(fx - 1.).astype(int)\n y = np.round(fy - 1.).astype(int)\n good = (x >= 0) * (x < tw) * (y >= 0) * (y < th)\n # Which sources are in this brick's unique area?\n usrc = radec_in_unique_area(ra, dec, tile.ra1, tile.ra2, tile.dec1, tile.dec2)\n I, = np.nonzero(good * usrc)\n\n nexp[I] = tim.nuims[y[I], x[I]]\n if hasattr(tim, 'mjdmin') and hasattr(tim, 'mjdmax'):\n mjd[I] = (tim.mjdmin + tim.mjdmax) / 2.\n phot.wise_coadd_id[I] = tile.coadd_id\n phot.wise_x[I] = fx[I] - 1.\n phot.wise_y[I] = fy[I] - 1.\n\n central_flux[I] = tim.getImage()[y[I], x[I]]\n del x,y,good,usrc\n\n # PSF norm for depth\n psf = tim.getPsf()\n h,w = tim.shape\n patch = psf.getPointSourcePatch(h//2, w//2).patch\n psfnorm = np.sqrt(np.sum(patch**2))\n # To handle zero-depth, we return 1/nanomaggies^2 units rather than mags.\n # In the small empty patches of the sky (eg W4 in 0922p702), we get sig1 = NaN\n if np.isfinite(tim.sig1):\n phot.get('psfdepth_%s' % wband)[I] = 1. / (tim.sig1 / psfnorm)**2\n\n tim.tile = tile\n tims.append(tim)\n\n if plots:\n plt.clf()\n mn,mx = 0.1, 20000\n plt.hist(np.log10(np.clip(central_flux, mn, mx)), bins=100,\n range=(np.log10(mn), np.log10(mx)))\n logt = np.arange(0, 5)\n plt.xticks(logt, ['%i' % i for i in 10.**logt])\n plt.title('Central fluxes (W%i)' % band)\n plt.axvline(np.log10(20000), color='k')\n plt.axvline(np.log10(1000), color='k')\n ps.savefig()\n\n # Eddie's non-secret recipe:\n #- central pixel <= 1000: 19x19 pix box size\n #- central pixel in 1000 - 20000: 59x59 box size\n #- central pixel > 20000 or saturated: 149x149 box size\n #- object near \"bright star\": 299x299 box size\n nbig = nmedium = nsmall = 0\n for src,cflux in zip(cat, central_flux):\n if cflux > 20000:\n R = 100\n nbig += 1\n elif cflux > 1000:\n R = 30\n nmedium += 1\n else:\n R = 15\n nsmall += 1\n if isinstance(src, PointSource):\n src.fixedRadius = R\n else:\n ### FIXME -- sizes for galaxies..... can we set PSF size separately?\n galrad = 0\n # RexGalaxy is a subclass of ExpGalaxy\n if isinstance(src, (ExpGalaxy, DevGalaxy, SersicGalaxy)):\n galrad = src.shape.re\n pixscale = 2.75\n src.halfsize = int(np.hypot(R, galrad * 5 / pixscale))\n debug('Set WISE source sizes:', nbig, 'big', nmedium, 'medium', nsmall, 'small')\n\n tractor = Tractor(tims, cat)\n if use_ceres:\n from tractor.ceres_optimizer import CeresOptimizer\n tractor.optimizer = CeresOptimizer(BW=ceres_block, BH=ceres_block)\n tractor.freezeParamsRecursive('*')\n tractor.thawPathsTo(wanyband)\n\n t0 = Time()\n R = tractor.optimize_forced_photometry(\n fitstats=True, variance=True, shared_params=False, wantims=wantims)\n info('unWISE forced photometry took', Time() - t0)\n\n if use_ceres:\n term = R.ceres_status['termination']\n # Running out of memory can cause failure to converge and term\n # status = 2. Fail completely in this case.\n if term != 0:\n info('Ceres termination status:', term)\n raise RuntimeError('Ceres terminated with status %i' % term)\n\n if wantims:\n ims1 = R.ims1\n # can happen if empty source list (we still want to generate coadds)\n if ims1 is None:\n ims1 = R.ims0\n\n flux_invvars = R.IV\n if R.fitstats is not None:\n for k in fskeys:\n x = getattr(R.fitstats, k)\n fitstats[k] = np.array(x).astype(np.float32)\n\n if save_fits:\n for i,tim in enumerate(tims):\n tile = tim.tile\n (dat, mod, _, chi, _) = ims1[i]\n wcshdr = fitsio.FITSHDR()\n tim.wcs.wcs.add_to_header(wcshdr)\n tag = 'fit-%s-w%i' % (tile.coadd_id, band)\n fitsio.write('%s-data.fits' %\n tag, dat, clobber=True, header=wcshdr)\n fitsio.write('%s-mod.fits' % tag, mod,\n clobber=True, header=wcshdr)\n fitsio.write('%s-chi.fits' % tag, chi,\n clobber=True, header=wcshdr)\n\n if plots:\n # Create models for just the brightest sources\n bright_cat = [src for src in cat\n if src.getBrightness().getBand(wanyband) > 1000]\n debug('Bright soures:', len(bright_cat))\n btr = Tractor(tims, bright_cat)\n for tim in tims:\n mod = btr.getModelImage(tim)\n tile = tim.tile\n tag = '%s W%i' % (tile.coadd_id, band)\n sig1 = tim.sig1\n plt.clf()\n plt.imshow(mod, interpolation='nearest', origin='lower',\n cmap='gray', vmin=-3 * sig1, vmax=25 * sig1)\n plt.colorbar()\n plt.title('%s: bright-star models' % tag)\n ps.savefig()\n\n if get_models:\n for i,tim in enumerate(tims):\n tile = tim.tile\n (dat, mod, _, _, _) = ims1[i]\n models.append((tile.coadd_id, band, tim.wcs.wcs, dat, mod,\n tim.coadd_inverr))\n\n if plots:\n for i,tim in enumerate(tims):\n tile = tim.tile\n tag = '%s W%i' % (tile.coadd_id, band)\n (dat, mod, _, chi, _) = ims1[i]\n sig1 = tim.sig1\n plt.clf()\n plt.imshow(dat, interpolation='nearest', origin='lower',\n cmap='gray', vmin=-3 * sig1, vmax=25 * sig1)\n plt.colorbar()\n plt.title('%s: data' % tag)\n ps.savefig()\n plt.clf()\n plt.imshow(mod, interpolation='nearest', origin='lower',\n cmap='gray', vmin=-3 * sig1, vmax=25 * sig1)\n plt.colorbar()\n plt.title('%s: model' % tag)\n ps.savefig()\n\n plt.clf()\n plt.imshow(chi, interpolation='nearest', origin='lower',\n cmap='gray', vmin=-5, vmax=+5)\n plt.colorbar()\n plt.title('%s: chi' % tag)\n ps.savefig()\n\n nm = np.array([src.getBrightness().getBand(wanyband) for src in cat])\n nm_ivar = flux_invvars\n # Sources out of bounds, eg, never change from their initial\n # fluxes. Zero them out instead.\n nm[nm_ivar == 0] = 0.\n\n phot.set('flux_%s' % wband, nm.astype(np.float32))\n phot.set('flux_ivar_%s' % wband, nm_ivar.astype(np.float32))\n for k in fskeys:\n phot.set(k + '_' + wband, fitstats.get(k, np.zeros(len(phot), np.float32)))\n phot.set('nobs_%s' % wband, nexp)\n phot.set('mjd_%s' % wband, mjd)\n\n rtn = wphotduck()\n rtn.phot = phot\n rtn.models = None\n rtn.maskmap = None\n if get_models:\n rtn.models = models\n if get_masks:\n rtn.maskmap = maskmap\n return rtn\n\nclass wphotduck(object):\n pass\n\ndef radec_in_unique_area(rr, dd, ra1, ra2, dec1, dec2):\n '''Are the given points within the given RA,Dec rectangle?\n\n Returns a boolean array.'''\n unique = (dd >= dec1) * (dd < dec2)\n if ra1 < ra2:\n # normal RA\n unique *= (rr >= ra1) * (rr < ra2)\n else:\n # RA wrap-around\n unique[rr > 180] *= (rr[rr > 180] >= ra1)\n unique[rr < 180] *= (rr[rr < 180] < ra2)\n return unique\n\ndef unwise_phot(X):\n '''\n This is the entry-point from runbrick.py, called via mp.map()\n '''\n (key, (wcat, tiles, band, roiradec, wise_ceres, pixelized_psf, get_mods, get_masks, ps,\n move_crpix, modelsky_dir)) = X\n kwargs = dict(roiradecbox=roiradec, band=band, pixelized_psf=pixelized_psf,\n get_masks=get_masks, ps=ps, move_crpix=move_crpix,\n modelsky_dir=modelsky_dir)\n if get_mods:\n kwargs.update(get_models=get_mods)\n\n if wise_ceres and len(wcat) == 0:\n wise_ceres = False\n\n # DEBUG\n #kwargs.update(save_fits=True)\n W = None\n try:\n W = unwise_forcedphot(wcat, tiles, use_ceres=wise_ceres, **kwargs)\n except:\n import traceback\n print('unwise_forcedphot failed:')\n traceback.print_exc()\n if wise_ceres:\n print('Trying without Ceres...')\n try:\n W = unwise_forcedphot(wcat, tiles, use_ceres=False, **kwargs)\n except:\n print('unwise_forcedphot failed (2):')\n traceback.print_exc()\n return key,W\n\ndef collapse_unwise_bitmask(bitmask, band):\n '''\n Converts WISE mask bits (in the unWISE data products) into the\n more compact codes reported in the tractor files as\n WISEMASK_W[12], and the \"maskbits\" WISE extensions.\n\n output bits :\n # 2^0 = bright star core and wings\n # 2^1 = PSF-based diffraction spike\n # 2^2 = optical ghost\n # 2^3 = first latent\n # 2^4 = second latent\n # 2^5 = AllWISE-like circular halo\n # 2^6 = bright star saturation\n # 2^7 = geometric diffraction spike\n '''\n assert((band == 1) or (band == 2))\n from collections import OrderedDict\n\n bits_w1 = OrderedDict([('core_wings', 2**0 + 2**1),\n ('psf_spike', 2**27),\n ('ghost', 2**25 + 2**26),\n ('first_latent', 2**13 + 2**14),\n ('second_latent', 2**17 + 2**18),\n ('circular_halo', 2**23),\n ('saturation', 2**4),\n ('geom_spike', 2**29)])\n\n bits_w2 = OrderedDict([('core_wings', 2**2 + 2**3),\n ('psf_spike', 2**28),\n ('ghost', 2**11 + 2**12),\n ('first_latent', 2**15 + 2**16),\n ('second_latent', 2**19 + 2**20),\n ('circular_halo', 2**24),\n ('saturation', 2**5),\n ('geom_spike', 2**30)])\n\n bits = (bits_w1 if (band == 1) else bits_w2)\n\n # hack to handle both scalar and array inputs\n result = 0*bitmask\n for i, feat in enumerate(bits.keys()):\n result += ((2**i)*(np.bitwise_and(bitmask, bits[feat]) != 0)).astype(np.uint8)\n return result.astype('uint8')\n\n###\n# This is taken directly from tractor/wise.py, replacing only the filename.\n###\ndef unwise_tiles_touching_wcs(wcs, polygons=True):\n '''\n Returns a FITS table (with RA,Dec,coadd_id) of unWISE tiles\n '''\n from astrometry.util.miscutils import polygons_intersect\n from astrometry.util.starutil_numpy import degrees_between\n\n from pkg_resources import resource_filename\n atlasfn = resource_filename('legacypipe', 'data/wise-tiles.fits')\n\n T = fits_table(atlasfn)\n trad = wcs.radius()\n wrad = np.sqrt(2.) / 2. * 2048 * 2.75 / 3600.\n rad = trad + wrad\n r, d = wcs.radec_center()\n I, = np.nonzero(np.abs(T.dec - d) < rad)\n I = I[degrees_between(T.ra[I], T.dec[I], r, d) < rad]\n\n if not polygons:\n return T[I]\n # now check actual polygon intersection\n tw, th = wcs.imagew, wcs.imageh\n targetpoly = [(0.5, 0.5), (tw + 0.5, 0.5),\n (tw + 0.5, th + 0.5), (0.5, th + 0.5)]\n cd = wcs.get_cd()\n tdet = cd[0] * cd[3] - cd[1] * cd[2]\n if tdet > 0:\n targetpoly = list(reversed(targetpoly))\n targetpoly = np.array(targetpoly)\n keep = []\n for i in I:\n wwcs = unwise_tile_wcs(T.ra[i], T.dec[i])\n cd = wwcs.get_cd()\n wdet = cd[0] * cd[3] - cd[1] * cd[2]\n H, W = wwcs.shape\n poly = []\n for x, y in [(0.5, 0.5), (W + 0.5, 0.5), (W + 0.5, H + 0.5), (0.5, H + 0.5)]:\n rr,dd = wwcs.pixelxy2radec(x, y)\n _,xx,yy = wcs.radec2pixelxy(rr, dd)\n poly.append((xx, yy))\n if wdet > 0:\n poly = list(reversed(poly))\n poly = np.array(poly)\n if polygons_intersect(targetpoly, poly):\n keep.append(i)\n I = np.array(keep)\n return T[I]\n\n### Also direct from tractor/wise.py\ndef unwise_tile_wcs(ra, dec, W=2048, H=2048, pixscale=2.75):\n from astrometry.util.util import Tan\n '''\n Returns a Tan WCS object at the given RA,Dec center, axis aligned, with the\n given pixel W,H and pixel scale in arcsec/pixel.\n '''\n cowcs = Tan(ra, dec, (W + 1) / 2., (H + 1) / 2.,\n -pixscale / 3600., 0., 0., pixscale / 3600., W, H)\n return cowcs\n" ]
[ [ "numpy.sqrt", "numpy.sum", "scipy.ndimage.morphology.binary_dilation", "numpy.zeros", "numpy.hypot", "numpy.round", "numpy.maximum", "numpy.abs", "numpy.errstate", "numpy.arange", "numpy.bitwise_and", "numpy.all", "numpy.log10", "numpy.clip", "numpy.array", "numpy.nonzero", "numpy.isfinite" ] ]
GT-SALT/Disfluency-Generation-and-Detection
[ "72126172b466aa74277f3cf0f73b915e5dbeefbb" ]
[ "disf_gen_coarse2fine/table/Loss.py" ]
[ "\"\"\"\nThis file handles the details of the loss function during training.\n\nThis includes: LossComputeBase and the standard NMTLossCompute, and\n sharded loss compute stuff.\n\"\"\"\nfrom __future__ import division\nfrom itertools import count\nimport torch\nimport torch.nn as nn\nimport random as rnd\n\nimport table\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\nclass CopyGeneratorLoss(nn.Module):\n \"\"\"Copy generator criterion.\"\"\"\n def __init__(self, vocab_size, force_copy, only_disf_loss, unk_index=0,\n ignore_index=-100, eps=1e-20):\n super(CopyGeneratorLoss, self).__init__()\n self.force_copy = force_copy\n self.eps = eps\n self.vocab_size = vocab_size\n self.ignore_index = ignore_index\n self.unk_index = unk_index\n self.only_disf_loss=only_disf_loss\n\n def forward(self, scores, tgt):\n \"\"\"\n Args:\n scores (FloatTensor): ``(batch_size*tgt_len)`` x dynamic vocab size\n whose sum along dim 1 is less than or equal to 1, i.e. cols\n softmaxed.\n tgt tuple (target, align)\n align (LongTensor): ``(tgt_len, batch_size)``\n target (LongTensor): ``(tgt_len, batch_size)``\n tgt_loss_mask (LongTensor): ``(tgt_len, batch_size)``\n \"\"\"\n # probabilities assigned by the model to the gold targets\n align=tgt[1]\n target=tgt[0]\n tgt_loss_mask=tgt[2]\n #print(scores, target)\n #print(scores.size(), target.size())\n target = target.view(-1)\n align = align.view(-1)\n tgt_loss_mask = tgt_loss_mask.view(-1)\n\n\n\n vocab_probs = scores.gather(1, target.unsqueeze(1)).squeeze(1)\n\n # probability of tokens copied from source\n copy_ix = align.unsqueeze(1) + self.vocab_size\n copy_tok_probs = scores.gather(1, copy_ix).squeeze(1) # Set scores for unk to 0 and add eps\n copy_tok_probs[align == self.unk_index] = 0\n copy_tok_probs += self.eps # to avoid -inf logs\n\n # find the indices in which you do not use the copy mechanism\n non_copy = align == self.unk_index\n if not self.force_copy:\n non_copy = non_copy | (target != self.unk_index)\n\n probs = torch.where(\n non_copy, copy_tok_probs + vocab_probs, copy_tok_probs\n )\n\n loss = - probs.log() # just NLLLoss; can the module be incorporated?\n\n # Drop padding.\n if self.only_disf_loss:\n loss[tgt_loss_mask == 1] = 0\n else:\n loss[tgt == self.ignore_index] = 0\n\n '''if self.normalize_by_length:\n # Compute Loss as NLL divided by seq length\n tgt_lens = batch.tgt[:, :, 0].ne(self.padding_idx).sum(0).float()\n # Compute Total Loss per sequence in batch\n loss = loss.view(-1, batch.batch_size).sum(0)\n # Divide by length of each sequence and sum\n loss = torch.div(loss, tgt_lens).sum()\n else:'''\n loss = loss.sum()\n return loss\n\nclass LossCompute(nn.Module):\n def __init__(self, vocab, opt, fields,unk_index=0,\n ignore_index=-100,smooth_eps=0):\n super(LossCompute, self).__init__()\n self.criterion = {}\n self.label_weights=torch.ones(len(fields['src_label'].vocab),dtype=torch.float,requires_grad=False,device=device)\n self.label_weights[fields['src_label'].vocab.stoi[table.IO.BOD_LABEL]]=opt.disf_label_weight\n self.label_weights[fields['src_label'].vocab.stoi[table.IO.UNK_WORD]] = 0\n self.label_weights[fields['src_label'].vocab.stoi[table.IO.PAD_WORD]] = 0\n self.criterion['lay'] = nn.NLLLoss( weight=self.label_weights,\n reduction='sum', ignore_index=ignore_index)\n if opt.no_attention:\n self.criterion['tgt'] = nn.NLLLoss(\n reduction='sum', ignore_index=ignore_index)\n else:\n if opt.no_copy:\n self.criterion['tgt'] = nn.NLLLoss(\n reduction='sum', ignore_index=ignore_index)\n else:\n self.criterion['tgt'] = CopyGeneratorLoss(len(vocab),\n opt.copy_attn_force, opt.only_disf_loss, unk_index=unk_index,\n ignore_index=ignore_index)\n\n def compute_loss(self, pred, gold):\n loss_list = []\n for loss_name in ('lay', 'tgt'):\n if loss_name not in gold:\n continue\n '''print(loss_name)\n print(pred[loss_name].size())\n print(gold[loss_name].size())'''\n loss = self.criterion[loss_name](pred[loss_name], gold[loss_name])\n loss_list.append(loss)\n # sum up the loss functions\n return loss_list, self.label_weights[gold['lay']].sum()#sum(loss_list)\n\nclass SegLossCompute(nn.Module):\n def __init__(self, vocab, opt, fields,unk_index=0,\n ignore_index=-100,smooth_eps=0):\n super(SegLossCompute, self).__init__()\n self.criterion= nn.NLLLoss(\n reduction='sum', ignore_index=ignore_index)\n\n def compute_loss(self, pred, gold):\n loss = self.criterion(pred, gold)\n\n return loss\n" ]
[ [ "torch.nn.NLLLoss", "torch.where", "torch.cuda.is_available" ] ]
vrsub/openconcept
[ "459aa24269cf54122ee4cfb3edf173c79c880be9" ]
[ "openconcept/components/splitter.py" ]
[ "from __future__ import division\nimport numpy as np\nfrom openmdao.api import ExplicitComponent\nfrom openmdao.api import Group\n\n\nclass PowerSplit(ExplicitComponent):\n \"\"\"\n A power split mechanism for mechanical or electrical power.\n\n Inputs\n ------\n power_in : float\n Power fed to the splitter. (vector, W)\n power_rating : float\n Maximum rated power of the split mechanism. (scalar, W)\n power_split_fraction:\n If ``'rule'`` is set to ``'fraction'``, sets percentage of input power directed\n to Output A (minus losses). (vector, dimensionless)\n power_split_amount:\n If ``'rule'`` is set to ``'fixed'``, sets amount of input power to Output A (minus\n losses). (vector, W)\n\n Outputs\n -------\n power_out_A : float\n Power sent to first output (vector, W)\n power_out_B : float\n Power sent to second output (vector, W)\n heat_out : float\n Waste heat produced (vector, W)\n component_cost : float\n Nonrecurring cost of the component (scalar, USD)\n component_weight : float\n Weight of the component (scalar, kg)\n component_sizing_margin : float\n Equal to 1 when fed full rated power (vector, dimensionless)\n\n Options\n -------\n num_nodes : int\n Number of analysis points to run (sets vec length; default 1)\n rule : str\n Power split control rule to use; either ``'fixed'`` where a set\n amount of power is sent to Output A or ``'fraction'`` where a\n fraction of the total power is sent to Output A\n efficiency : float\n Component efficiency (default 1)\n weight_inc : float\n Weight per unit rated power\n (default 0, kg/W)\n weight_base : float\n Base weight\n (default 0, kg)\n cost_inc : float\n Nonrecurring cost per unit power\n (default 0, USD/W)\n cost_base : float\n Base cost\n (default 0 USD)\n \"\"\"\n def initialize(self):\n # define control rules\n self.options.declare('num_nodes', default=1, desc='Number of flight/control conditions')\n self.options.declare('rule', default='fraction',\n desc='Control strategy - fraction or fixed power')\n\n self.options.declare('efficiency', default=1., desc='Efficiency (dimensionless)')\n self.options.declare('weight_inc', default=0., desc='kg per input watt')\n self.options.declare('weight_base', default=0., desc='kg base weight')\n self.options.declare('cost_inc', default=0., desc='$ cost per input watt')\n self.options.declare('cost_base', default=0., desc='$ cost base')\n\n def setup(self):\n nn = self.options['num_nodes']\n self.add_input('power_in', units='W',\n desc='Input shaft power or incoming electrical load', shape=(nn,))\n self.add_input('power_rating', val=99999999, units='W', desc='Split mechanism power rating')\n\n rule = self.options['rule']\n if rule == 'fraction':\n self.add_input('power_split_fraction', val=0.5,\n desc='Fraction of power to output A', shape=(nn,))\n elif rule == 'fixed':\n self.add_input('power_split_amount', units='W',\n desc='Raw amount of power to output A', shape=(nn,))\n else:\n msg = 'Specify either \"fraction\" or \"fixed\" as power split control rule'\n raise ValueError(msg)\n\n eta = self.options['efficiency']\n weight_inc = self.options['weight_inc']\n weight_base = self.options['weight_base']\n cost_inc = self.options['cost_inc']\n cost_base = self.options['cost_base']\n\n self.add_output('power_out_A', units='W', desc='Output power or load to A', shape=(nn,))\n self.add_output('power_out_B', units='W', desc='Output power or load to B', shape=(nn,))\n self.add_output('heat_out', units='W', desc='Waste heat out', shape=(nn,))\n self.add_output('component_cost', units='USD', desc='Splitter component cost')\n self.add_output('component_weight', units='kg', desc='Splitter component weight')\n self.add_output('component_sizing_margin', desc='Fraction of rated power', shape=(nn,))\n\n if rule == 'fraction':\n self.declare_partials(['power_out_A', 'power_out_B'],\n ['power_in', 'power_split_fraction'],\n rows=range(nn), cols=range(nn))\n elif rule == 'fixed':\n self.declare_partials(['power_out_A', 'power_out_B'],\n ['power_in', 'power_split_amount'],\n rows=range(nn), cols=range(nn))\n self.declare_partials('heat_out', 'power_in', val=(1 - eta) * np.ones(nn),\n rows=range(nn), cols=range(nn))\n self.declare_partials('component_cost', 'power_rating', val=cost_inc)\n self.declare_partials('component_weight', 'power_rating', val=weight_inc)\n self.declare_partials('component_sizing_margin', 'power_in',\n rows=range(nn), cols=range(nn))\n self.declare_partials('component_sizing_margin', 'power_rating')\n\n def compute(self, inputs, outputs):\n nn = self.options['num_nodes']\n rule = self.options['rule']\n eta = self.options['efficiency']\n weight_inc = self.options['weight_inc']\n weight_base = self.options['weight_base']\n cost_inc = self.options['cost_inc']\n cost_base = self.options['cost_base']\n\n if rule == 'fraction':\n outputs['power_out_A'] = inputs['power_in'] * inputs['power_split_fraction'] * eta\n outputs['power_out_B'] = inputs['power_in'] * (1 - inputs['power_split_fraction']) * eta\n elif rule == 'fixed':\n # check to make sure enough power is available\n # if inputs['power_in'] < inputs['power_split_amount']:\n not_enough_idx = np.where(inputs['power_in'] < inputs['power_split_amount'])\n po_A = np.zeros(nn)\n po_B = np.zeros(nn)\n po_A[not_enough_idx] = inputs['power_in'][not_enough_idx] * eta\n po_B[not_enough_idx] = np.zeros(nn)[not_enough_idx]\n # else:\n enough_idx = np.where(inputs['power_in'] >= inputs['power_split_amount'])\n po_A[enough_idx] = inputs['power_split_amount'][enough_idx] * eta\n po_B[enough_idx] = (inputs['power_in'][enough_idx] -\n inputs['power_split_amount'][enough_idx]) * eta\n outputs['power_out_A'] = po_A\n outputs['power_out_B'] = po_B\n outputs['heat_out'] = inputs['power_in'] * (1 - eta)\n outputs['component_cost'] = inputs['power_rating'] * cost_inc + cost_base\n outputs['component_weight'] = inputs['power_rating'] * weight_inc + weight_base\n outputs['component_sizing_margin'] = inputs['power_in'] / inputs['power_rating']\n\n def compute_partials(self, inputs, J):\n nn = self.options['num_nodes']\n rule = self.options['rule']\n eta = self.options['efficiency']\n if rule == 'fraction':\n J['power_out_A', 'power_in'] = inputs['power_split_fraction'] * eta\n J['power_out_A', 'power_split_fraction'] = inputs['power_in'] * eta\n J['power_out_B', 'power_in'] = (1 - inputs['power_split_fraction']) * eta\n J['power_out_B', 'power_split_fraction'] = -inputs['power_in'] * eta\n elif rule == 'fixed':\n not_enough_idx = np.where(inputs['power_in'] < inputs['power_split_amount'])\n enough_idx = np.where(inputs['power_in'] >= inputs['power_split_amount'])\n # if inputs['power_in'] < inputs['power_split_amount']:\n Jpo_A_pi = np.zeros(nn)\n Jpo_A_ps = np.zeros(nn)\n Jpo_B_pi = np.zeros(nn)\n Jpo_B_ps = np.zeros(nn)\n Jpo_A_pi[not_enough_idx] = eta * np.ones(nn)[not_enough_idx]\n Jpo_A_ps[not_enough_idx] = np.zeros(nn)[not_enough_idx]\n Jpo_B_pi[not_enough_idx] = np.zeros(nn)[not_enough_idx]\n Jpo_B_ps[not_enough_idx] = np.zeros(nn)[not_enough_idx]\n # else:\n Jpo_A_ps[enough_idx] = eta * np.ones(nn)[enough_idx]\n Jpo_A_pi[enough_idx] = np.zeros(nn)[enough_idx]\n Jpo_B_ps[enough_idx] = -eta * np.ones(nn)[enough_idx]\n Jpo_B_pi[enough_idx] = eta * np.ones(nn)[enough_idx]\n J['power_out_A', 'power_in'] = Jpo_A_pi\n J['power_out_A', 'power_split_amount'] = Jpo_A_ps\n J['power_out_B', 'power_in'] = Jpo_B_pi\n J['power_out_B', 'power_split_amount'] = Jpo_B_ps\n J['component_sizing_margin', 'power_in'] = 1 / inputs['power_rating']\n J['component_sizing_margin', 'power_rating'] = - (inputs['power_in'] /\n inputs['power_rating'] ** 2)\n\n\nclass FlowSplit(ExplicitComponent):\n \"\"\"\n Split incoming flow from one inlet into two outlets at a fractional ratio.\n\n Inputs\n ------\n mdot_in : float\n Mass flow rate of incoming fluid (vector, kg/s)\n mdot_split_fraction : float\n Fraction of incoming mass flow directed to output A, must be in\n range 0-1 inclusive (vector, dimensionless)\n \n Outputs\n -------\n mdot_out_A : float\n Mass flow rate directed to first output (vector, kg/s)\n mdot_out_B : float\n Mass flow rate directed to second output (vector, kg/s)\n \n Options\n -------\n num_nodes : int\n Number of analysis points to run (sets vec length; default 1)\n \"\"\"\n def initialize(self):\n self.options.declare('num_nodes', default=1, desc='Number of analysis points')\n \n def setup(self):\n nn = self.options['num_nodes']\n rng = np.arange(0, nn)\n\n self.add_input('mdot_in', units='kg/s', shape=(nn,))\n self.add_input('mdot_split_fraction', units=None, shape=(nn,), val=0.5)\n\n self.add_output('mdot_out_A', units='kg/s', shape=(nn,))\n self.add_output('mdot_out_B', units='kg/s', shape=(nn,))\n\n self.declare_partials(['mdot_out_A'], ['mdot_in', 'mdot_split_fraction'], rows=rng, cols=rng)\n self.declare_partials(['mdot_out_B'], ['mdot_in', 'mdot_split_fraction'], rows=rng, cols=rng)\n \n def compute(self, inputs, outputs):\n if np.any(inputs['mdot_split_fraction'] < 0) or np.any(inputs['mdot_split_fraction'] > 1):\n raise RuntimeWarning(f\"mdot_split_fraction of {inputs['mdot_split_fraction']} has at least one element out of range [0, 1]\")\n outputs['mdot_out_A'] = inputs['mdot_in'] * inputs['mdot_split_fraction']\n outputs['mdot_out_B'] = inputs['mdot_in'] * (1 - inputs['mdot_split_fraction'])\n\n def compute_partials(self, inputs, J):\n J['mdot_out_A', 'mdot_in'] = inputs['mdot_split_fraction']\n J['mdot_out_A', 'mdot_split_fraction'] = inputs['mdot_in']\n\n J['mdot_out_B', 'mdot_in'] = 1 - inputs['mdot_split_fraction']\n J['mdot_out_B', 'mdot_split_fraction'] = - inputs['mdot_in']\n\n\nclass FlowCombine(ExplicitComponent):\n \"\"\"\n Combines two incoming flows into a single outgoing flow and does a weighted average\n of their temperatures based on the mass flow rate of each to compute the outlet temp.\n\n Inputs\n ------\n mdot_in_A : float\n Mass flow rate of fluid from first inlet, should be nonegative (vector, kg/s)\n mdot_in_B : float\n Mass flow rate of fluid from second inlet, should be nonnegative (vector, kg/s)\n T_in_A : float\n Temperature of fluid from first inlet (vector, K)\n T_in_B : float\n Temperature of fluid from second inlet (vector, K)\n\n Outputs\n -------\n mdot_out : float\n Outgoing fluid mass flow rate (vector, kg/s)\n T_out : float\n Outgoing fluid temperature (vector, K)\n\n Options\n -------\n num_nodes : int\n Number of analysis points (scalar, default 1)\n \"\"\"\n def initialize(self):\n self.options.declare('num_nodes', default=1, desc='Number of analysis points')\n \n def setup(self):\n nn = self.options['num_nodes']\n rng = np.arange(0, nn)\n\n self.add_input('mdot_in_A', units='kg/s', shape=(nn,))\n self.add_input('mdot_in_B', units='kg/s', shape=(nn,))\n self.add_input('T_in_A', units='K', shape=(nn,))\n self.add_input('T_in_B', units='K', shape=(nn,))\n\n self.add_output('mdot_out', units='kg/s', shape=(nn,))\n self.add_output('T_out', units='K', shape=(nn,))\n\n self.declare_partials(['mdot_out'], ['mdot_in_A', 'mdot_in_B'], rows=rng, cols=rng)\n self.declare_partials(['T_out'], ['mdot_in_A', 'mdot_in_B', 'T_in_A', 'T_in_B'], rows=rng, cols=rng)\n \n def compute(self, inputs, outputs):\n mdot_A = inputs['mdot_in_A']\n mdot_B = inputs['mdot_in_B']\n outputs['mdot_out'] = mdot_A + mdot_B\n # Weighted average of temperatures for output temperature\n outputs['T_out'] = (mdot_A * inputs['T_in_A'] + mdot_B * inputs['T_in_B']) / (mdot_A + mdot_B)\n\n def compute_partials(self, inputs, J):\n nn = self.options['num_nodes']\n J['mdot_out', 'mdot_in_A'] = np.ones((nn,))\n J['mdot_out', 'mdot_in_B'] = np.ones((nn,))\n\n mdot_A = inputs['mdot_in_A']\n mdot_B = inputs['mdot_in_B']\n mdot = mdot_A + mdot_B\n T_A = inputs['T_in_A']\n T_B = inputs['T_in_B']\n J['T_out', 'mdot_in_A'] = (mdot * T_A - mdot_A * T_A - mdot_B * T_B) / (mdot**2)\n J['T_out', 'mdot_in_B'] = (mdot * T_B - mdot_A * T_A - mdot_B * T_B) / (mdot**2)\n J['T_out', 'T_in_A'] = mdot_A / mdot\n J['T_out', 'T_in_B'] = mdot_B / mdot" ]
[ [ "numpy.ones", "numpy.zeros", "numpy.any", "numpy.arange", "numpy.where" ] ]
amit-bohra/Interactive-Image-Segmentation-with-OpenCV-Watershed-Algorithm-in-Python3
[ "9fd6e2551fe19af76f1c91c714ba029d2d8599ca" ]
[ "56_interactive_watershed.py" ]
[ "import cv2\r\nimport numpy as np\r\nfrom copy import deepcopy as dp\r\n\r\naqua=(255,255,0)\r\nmarine=(116,139,69)\r\nbanana=(87,207,277)\r\nblue=(255,0,0)\r\nalmond=(205,235,255)\r\nbrown=(64,64,255)\r\nblue1=(255,245,152)\r\ngreen=(0,100,0)\r\norange=(0,140,255)\r\norchid=(139,34,104)\r\npink=(147,20,255)\r\ngold=(0,215,255)\r\ngray=(127,127,127)\r\nindigo=(130,0,75)\r\n\r\ncolors=[aqua,marine,banana,blue,almond,brown,blue1,green,orange,orchid,\r\n pink,gold,gray,indigo]\r\n\r\n\r\n\r\nsize=0\r\ncolor=0\r\n\r\ndef draw(event,x,y,flags,param):\r\n global color,colors,img,marker,segment,tmg,size\r\n mark=color+1\r\n if event==cv2.EVENT_LBUTTONDOWN:\r\n cv2.circle(marker,(x,y),size,mark,-1)\r\n cv2.circle(tmg,(x,y),size,colors[color],-1)\r\n marker_copy=dp(marker)\r\n cv2.watershed(img,marker_copy)\r\n segment=np.zeros(img.shape,np.uint8)\r\n for i in range(1,len(colors)+1):\r\n segment[marker_copy==i]=colors[i-1]\r\n\r\ndef func(x):\r\n pass\r\na=0\r\na=int(input('Enter 1 for VideoCam else 0 '))\r\nif a==1:\r\n cap=cv2.VideoCapture(0)\r\n if cap.isOpened():\r\n ret,img=cap.read()\r\n else:\r\n ret=False\r\nelse:\r\n img=cv2.imread('a.jpg')\r\nimg=cv2.GaussianBlur(img,(1,1),0)\r\ncv2.namedWindow('image',cv2.WINDOW_NORMAL)\r\ncv2.createTrackbar('color','image',0,len(colors)-1,func)\r\ncv2.createTrackbar('size','image',10,200,func)\r\ncv2.setMouseCallback('image',draw)\r\nmarker=np.zeros(img.shape[:2],np.int32)\r\nsegment=np.zeros(img.shape,np.uint8)\r\ntmg=dp(img)\r\nif a==1:\r\n cap.release()\r\nwhile True:\r\n color=cv2.getTrackbarPos('color','image')\r\n size=cv2.getTrackbarPos('size','image')\r\n cv2.imshow('image',tmg)\r\n cv2.imshow('segment',segment)\r\n if cv2.waitKey(1)==27:\r\n break\r\n if cv2.waitKey(1)==ord('p'):\r\n print()\r\n if cv2.waitKey(1)==ord('c'):\r\n tmg=dp(img)\r\n marker=np.zeros(img.shape[:2],np.int32)\r\n segment=np.zeros(img.shape,np.uint8)\r\n color=0\r\ncv2.destroyAllWindows()\r\n\r\n" ]
[ [ "numpy.zeros" ] ]
emailandxu/neurst
[ "235bddfc93b7784df01eddccec6791e1281651cf", "235bddfc93b7784df01eddccec6791e1281651cf" ]
[ "neurst/data/datasets/parallel_text_dataset.py", "neurst/data/datasets/audio/librispeech.py" ]
[ "# Copyright 2020 ByteDance Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom abc import ABCMeta, abstractmethod\n\nimport six\nimport tensorflow as tf\nfrom absl import logging\n\nfrom neurst.data.datasets import register_dataset\nfrom neurst.data.datasets.dataset import TFRecordDataset\nfrom neurst.data.datasets.text_gen_dataset import TextGenDataset\nfrom neurst.utils.compat import DataStatus\nfrom neurst.utils.flags_core import Flag\n\n\[email protected]_metaclass(ABCMeta)\nclass AbstractParallelDataset(TextGenDataset):\n \"\"\" The abstract dataset for parallel text.\n The element spec must be\n {\n 'feature': tf.TensorSpec(shape=(None,), dtype=tf.int64),\n 'label': tf.TensorSpec(shape=(None,), dtype=tf.int64)\n }\n \"\"\"\n\n def __init__(self):\n self._sources = None\n super(AbstractParallelDataset, self).__init__()\n\n @property\n @abstractmethod\n def status(self) -> str:\n raise NotImplementedError\n\n @abstractmethod\n def build_iterator(self, map_func=None, shard_id=0, total_shards=1):\n \"\"\" Returns the iterator of the dataset.\n\n Args:\n map_func: A function mapping a dataset element to another dataset element.\n shard_id: Generator yields on the `shard_id`-th shard of the whole dataset.\n total_shards: The number of total shards.\n \"\"\"\n raise NotImplementedError\n\n @property\n def sources(self):\n \"\"\" Returns a list of source texts. \"\"\"\n return self._sources\n\n\n@register_dataset(\"parallel_text\")\nclass ParallelTextDataset(AbstractParallelDataset):\n\n def __init__(self, args):\n \"\"\" Initializes the dataset. \"\"\"\n super(ParallelTextDataset, self).__init__()\n self._src_file = args[\"src_file\"]\n assert self._src_file, \"`src_file` must be provided for ParallelTextDataset.\"\n self._trg_file = args[\"trg_file\"]\n self._data_is_processed = args[\"data_is_processed\"]\n\n @staticmethod\n def class_or_method_args():\n return [\n Flag(\"src_file\", dtype=Flag.TYPE.STRING, help=\"The source text file\"),\n Flag(\"trg_file\", dtype=Flag.TYPE.STRING, help=\"The target text file\"),\n Flag(\"data_is_processed\", dtype=Flag.TYPE.BOOLEAN,\n help=\"Whether the text data is already processed.\"),\n ]\n\n @property\n def status(self):\n if self._data_is_processed:\n return DataStatus.PROCESSED\n return DataStatus.RAW\n\n def build_iterator(self, map_func=None, shard_id=0, total_shards=1):\n \"\"\" Reads data from files and returns the iterator.\n\n Args:\n map_func: A function mapping a dataset element to another dataset element.\n shard_id: Generator yields on the `shard_id`-th shard of the whole dataset.\n total_shards: The number of total shards.\n \"\"\"\n if total_shards > 1:\n total_samples = self.num_samples\n samples_per_part = total_samples // total_shards\n range_begin = samples_per_part * shard_id\n if shard_id == total_shards - 1:\n range_end = total_samples + 1\n logging.info(f\"Iterate on dataset from {range_begin} \"\n f\"to the end (total {total_samples}).\")\n else:\n range_end = range_begin + samples_per_part\n logging.info(f\"Iterate on dataset from {range_begin} \"\n f\"to {range_end} (total {total_samples}).\")\n\n def gen():\n fsrc = tf.io.gfile.GFile(self._src_file)\n ftrg = None if self._trg_file is None else tf.io.gfile.GFile(self._trg_file)\n n = 0\n for src in fsrc:\n n += 1\n data = {\"feature\": src.strip()}\n if ftrg is not None:\n data[\"label\"] = ftrg.readline().strip()\n if total_shards > 1:\n if n < range_begin:\n continue\n if n >= range_end:\n break\n if map_func is not None:\n data = map_func(data)\n yield data\n fsrc.close()\n if ftrg is not None:\n ftrg.close()\n\n return gen\n\n @property\n def sources(self):\n \"\"\" Returns a list of sources. \"\"\"\n if self._sources is None and self._src_file:\n with tf.io.gfile.GFile(self._src_file) as fp:\n self._sources = [line.strip() for line in fp]\n return self._sources\n\n @property\n def targets(self):\n \"\"\" Returns a list of targets. \"\"\"\n if self._targets is None and self._trg_file:\n with tf.io.gfile.GFile(self._trg_file) as fp:\n self._targets = [line.strip() for line in fp]\n return self._targets\n\n\n@register_dataset(\"parallel_tfrecord\")\nclass ParallelTFRecordDataset(TFRecordDataset, AbstractParallelDataset):\n\n @property\n def status(self):\n return DataStatus.PROJECTED\n\n @property\n def fields(self):\n return {\"feature\": tf.io.VarLenFeature(tf.int64),\n \"label\": tf.io.VarLenFeature(tf.int64)}\n", "# Copyright 2020 ByteDance Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport re\n\nimport tensorflow as tf\nfrom absl import logging\n\nfrom neurst.data.datasets import register_dataset\nfrom neurst.data.datasets.audio.audio_dataset import RawAudioDataset\nfrom neurst.utils.compat import DataStatus\nfrom neurst.utils.flags_core import Flag\n\n\n@register_dataset(\"Librispeech\")\nclass LibriSpeech(RawAudioDataset):\n \"\"\"\n LibriSpeech is a corpus of approximately 1000 hours of read English speech.\n Homepage: http://www.openslr.org/12\n The raw dataset contains 7 files:\n - train-clean-100.tar.gz\n - train-clean-360.tar.gz\n - train-other-500.tar.gz\n - dev-clean.tar.gz\n - dev-other.tar.gz\n - test-clean.tar.gz\n - test-other.tar.gz\n \"\"\"\n\n def __init__(self, args):\n super(LibriSpeech, self).__init__(args)\n self._excluded_file = args[\"excluded_file\"]\n self._excluded_list = None\n if self._excluded_file is not None:\n if not tf.io.gfile.exists(self._excluded_file):\n raise ValueError(f\"`excluded_file` not found: {self._excluded_file}\")\n with tf.io.gfile.GFile(self._excluded_file) as fp:\n self._excluded_list = [x.strip().lower() for x in fp]\n\n self._transcripts_dict = None\n\n @staticmethod\n def class_or_method_args():\n this_args = super(LibriSpeech, LibriSpeech).class_or_method_args()\n this_args.append(\n Flag(\"excluded_file\", dtype=Flag.TYPE.STRING, default=None,\n help=\"A file containing transcriptions \"\n \"that would be removed in the LibriSpeech corpus.\"))\n return this_args\n\n @property\n def status(self):\n return {\n \"audio\": DataStatus.RAW,\n \"transcript\": DataStatus.RAW\n }\n\n def load_transcripts(self):\n \"\"\" Loads transcripts (and translations if exists). \"\"\"\n if self._transcripts_dict is not None:\n return\n logging.info(f\"Loading transcriptions from tarball: {self._input_tarball}\")\n n = 0\n trans = {}\n level0 = set()\n level1_cnt = 0\n level2_cnt = 0\n excluded_count = 0\n excluded_str = \"\"\n if self._excluded_list is not None:\n excluded_str = \" \".join(self._excluded_list)\n self._transcripts = []\n with self.open_tarball(\"tar\") as tar:\n for tarinfo in tar:\n if not tarinfo.isreg():\n continue\n n += 1\n if n % 10000 == 0:\n logging.info(\"Scanned %d entries...\", n)\n if not tarinfo.name.endswith(\".trans.txt\"):\n continue\n level1_cnt += 1\n # The file LibriSpeech/dev-clean/3170/137482/3170-137482.trans.txt\n # will contain lines such as:\n # 3170-137482-0000 WITH AN EDUCATION WHICH OUGHT TO ...\n # 3170-137482-0001 I WAS COMPELLED BY POVERTY ...\n key = tarinfo.name.strip(\".trans.txt\")\n path0, path1 = key.split(\"/\")[-1].split(\"-\")\n level0.add(path0)\n f = tar.extractfile(tarinfo)\n this_dict = {}\n for line in f.readlines():\n tid, txt = line.decode(\"utf-8\").strip(\"\\n\").split(\" \", 1)\n txt_tokens = txt.split()\n if txt in excluded_str:\n excluded_count += 1\n this_dict[tid] = \"\"\n elif len(txt_tokens) > 10 and (\n \" \".join(txt_tokens[:len(txt_tokens) // 2]) in excluded_str\n or \" \".join(txt_tokens[len(txt_tokens) // 2:]) in excluded_str):\n excluded_count += 1\n this_dict[tid] = \"\"\n else:\n txt = txt.lower()\n this_dict[tid] = txt\n self._transcripts.append(txt)\n logging.info(\"[%s] = %d utterances.\", key, len(this_dict))\n level2_cnt += len(this_dict)\n if path0 not in trans:\n trans[path0] = dict()\n trans[path0][path1] = this_dict\n f.close()\n logging.info(\"Total %d directories, %d sub-directories, %d utterances, %d matched excluded file\",\n len(level0), level1_cnt, level2_cnt, excluded_count)\n # {'2277': {'149896': {'2277-149896-0000': \"HE WAS IN A FEVERED STATE OF MIND OWING TO THE', ...}, ...}\n self._transcripts_dict = trans\n\n def build_iterator(self, map_func=None, shard_id=0, total_shards=1):\n \"\"\" Returns the iterator of the dataset.\n\n Args:\n map_func: A function mapping a dataset element to another dataset element.\n \"\"\"\n if total_shards > 1:\n total_samples = self.num_samples\n samples_per_part = total_samples // total_shards\n range_begin = samples_per_part * shard_id\n if shard_id == total_shards - 1:\n range_end = total_samples + 1\n logging.info(f\"Iterate on dataset from {range_begin} \"\n f\"to the end (total {total_samples}).\")\n else:\n range_end = range_begin + samples_per_part\n logging.info(f\"Iterate on dataset from {range_begin} \"\n f\"to {range_end} (total {total_samples}).\")\n\n def gen():\n if self._transcripts_dict is None:\n self.load_transcripts()\n with self.open_tarball(\"tar\") as tar:\n n = 0\n for tarinfo in tar:\n if not tarinfo.isreg():\n continue\n if not tarinfo.name.endswith(\".flac\"):\n continue\n\n uttid = re.sub(\".*/(.+)\\\\.flac\", \"\\\\1\", tarinfo.name)\n path0, path1, _ = uttid.strip().split(\"-\")\n this_trans = self._transcripts_dict[path0][path1][uttid]\n if this_trans.strip() == \"\":\n continue\n n += 1\n if total_shards > 1:\n if n < range_begin:\n continue\n if n >= range_end:\n break\n f = tar.extractfile(tarinfo)\n audio = self.extract_audio_feature(fileobj=f, mode=\"flac\")\n f.close()\n data_sample = {\n \"audio\": audio,\n \"transcript\": this_trans\n }\n if map_func is None:\n yield data_sample\n else:\n yield map_func(data_sample)\n\n return gen\n" ]
[ [ "tensorflow.io.VarLenFeature", "tensorflow.io.gfile.GFile" ], [ "tensorflow.io.gfile.exists", "tensorflow.io.gfile.GFile" ] ]
semio/ddf_utils
[ "e10c4cb6dc7722415a5863579a552cc7b7e3668d" ]
[ "ddf_utils/model/package.py" ]
[ "# -*- coding: utf-8 -*-\n\n\"\"\"datapackage model\"\"\"\n\nimport os.path as osp\nfrom typing import List, Tuple, Dict, Union, Callable\nimport attr\nimport json\nfrom itertools import product\nfrom collections import OrderedDict\nfrom tqdm import tqdm\n\nimport pandas as pd\n\nfrom .ddf import DDF, Concept, EntityDomain, Entity, DaskDataPoint, Synonym\nfrom .utils import absolute_path\n\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\[email protected](auto_attribs=True, repr=False)\nclass TableSchema:\n \"\"\"Table Schema Object Class\"\"\"\n fields: List[dict]\n primaryKey: Union[List[str], str]\n\n @classmethod\n def from_dict(cls, d: dict):\n fields = d['fields']\n primaryKey = d['primaryKey']\n return cls(fields, primaryKey)\n\n @property\n def field_names(self):\n return [f['name'] for f in self.fields]\n\n @property\n def common_fields(self):\n field_names = self.field_names\n pkey = self.primaryKey\n if isinstance(pkey, str):\n common_fields = list(filter(lambda x: x != pkey, field_names))\n else:\n common_fields = list(filter(lambda x: x not in pkey, field_names))\n return common_fields\n\n def __repr__(self):\n return \"TableSchema(primaryKey: {}, fields: {})\".format(self.primaryKey, self.common_fields)\n\n\[email protected](auto_attribs=True)\nclass Resource:\n name: str\n path: str\n schema: TableSchema\n\n @classmethod\n def from_dict(cls, d: dict):\n path = d['path']\n name = d['name']\n schema = TableSchema.from_dict(d['schema'])\n return cls(name, path, schema)\n\n def to_dict(self):\n res = vars(self).copy()\n if 'schema' in res:\n res['schema'] = vars(res['schema']).copy()\n return res\n\n\[email protected](auto_attribs=True)\nclass DDFSchema:\n primaryKey: List[str]\n value: str\n resources: List[str] # a list of resource names\n\n @classmethod\n def from_dict(cls, d: dict):\n primaryKey = d['primaryKey']\n value = d['value']\n resources = d['resources']\n return cls(primaryKey=primaryKey, value=value, resources=resources)\n\n\[email protected](auto_attribs=True, repr=False)\nclass DataPackage:\n base_path: str\n resources: List[Resource]\n props: dict = attr.ib(factory=dict)\n\n def __attrs_post_init__(self):\n self.base_path = absolute_path(self.base_path)\n\n def __repr__(self):\n return f\"DataPackage({self.base_path})\"\n\n @classmethod\n def from_dict(cls, d_: dict, base_path='./'):\n d = d_.copy()\n resources = list(map(Resource.from_dict, d.pop('resources')))\n return cls(base_path=base_path, resources=resources, props=d)\n\n @classmethod\n def from_json(cls, json_path):\n json_path = absolute_path(json_path)\n base_path = osp.dirname(json_path)\n d = json.load(open(json_path))\n return cls.from_dict(d, base_path)\n\n @classmethod\n def from_path(cls, path):\n path = absolute_path(path)\n json_path = osp.join(path, 'datapackage.json')\n return cls.from_json(json_path)\n\n def to_dict(self):\n \"\"\"dump the datapackage to disk\"\"\"\n raise NotImplementedError\n\n\[email protected](repr=False)\nclass DDFcsv(DataPackage):\n \"\"\"DDFCSV datapackage.\"\"\"\n ddfSchema: Dict[str, List[DDFSchema]] = attr.ib(factory=dict)\n ddf: DDF = attr.ib(init=False)\n concepts_resources: List[Resource] = attr.ib(init=False)\n entities_resources: List[Resource] = attr.ib(init=False)\n datapoints_resources: List[Resource] = attr.ib(init=False)\n synonyms_resources: List[Resource] = attr.ib(init=False)\n\n # config for read_csv\n _default_reader_options = {'keep_default_na': False, 'na_values': ['']}\n _default_dask_reader_options = {'keep_default_na': False,\n 'na_values': [''],\n 'sample_rows': 1000000}\n\n def __attrs_post_init__(self):\n super(DDFcsv, self).__attrs_post_init__()\n conc = list()\n ent = list()\n dp = list()\n syn = list()\n for r in self.resources:\n pkey = r.schema.primaryKey\n if isinstance(pkey, str):\n if pkey == 'concept':\n conc.append(r)\n else:\n ent.append(r)\n else: # TODO: datapoints key might be one column, not list of columns?\n if 'synonym' in pkey:\n syn.append(r)\n else:\n dp.append(r)\n self.concepts_resources = conc\n self.entities_resources = ent\n self.datapoints_resources = dp\n self.synonyms_resources = syn\n self.ddf = self.load_ddf()\n\n @classmethod\n def from_dict(cls, d_: dict, base_path='./'):\n d = d_.copy()\n resources = list(map(Resource.from_dict, d.pop('resources')))\n if 'ddfSchema' in d.keys():\n ddf_schema_ = d.pop('ddfSchema')\n ddf_schema = dict()\n for k, v in ddf_schema_.items():\n ddf_schema[k] = [DDFSchema.from_dict(d) for d in v]\n else:\n ddf_schema = {}\n return cls(base_path=base_path, resources=resources, ddfSchema=ddf_schema, props=d)\n\n def to_dict(self):\n res = OrderedDict(self.props.copy())\n res['resources'] = [r.to_dict() for r in self.resources]\n if self.ddfSchema:\n res['ddfSchema'] = dict()\n for k, v in self.ddfSchema.items():\n res['ddfSchema'][k] = [vars(sch).copy() for sch in v]\n return res\n\n def _gen_concepts(self):\n concepts_paths = [osp.join(self.base_path, r.path) for r in self.concepts_resources]\n for p in concepts_paths:\n df = pd.read_csv(p, index_col='concept', dtype=str, **self._default_reader_options)\n for concept, row in df.iterrows():\n concept_type = row['concept_type']\n props = row.drop('concept_type').to_dict()\n yield (concept, Concept(id=concept, concept_type=concept_type, props=props))\n\n def _gen_entities(self, concepts: Dict[str, Concept]):\n for r in self.entities_resources:\n pkey = r.schema.primaryKey\n if concepts[pkey].concept_type == 'entity_domain':\n domain = concepts[pkey].id\n else:\n domain = concepts[pkey].props['domain']\n\n df = pd.read_csv(osp.join(self.base_path, r.path), dtype=str, # TODO: is it okay to use str for all?\n **self._default_reader_options)\n df = df.set_index(pkey)\n is_cols = list(filter(lambda x: x.startswith('is--'), df.columns.values))\n for ent, row in df.iterrows():\n sets = list()\n for c in is_cols:\n if row[c] == 'TRUE' and c[4:] != domain:\n sets.append(c[4:]) # strip the 'is--' part, only keep set name\n yield (domain, Entity(id=ent, domain=domain, sets=sets, props=row.drop(is_cols).to_dict()))\n\n def _gen_datapoints(self):\n for r in self.datapoints_resources:\n fields = r.schema.common_fields\n pkey = r.schema.primaryKey\n for f in fields:\n yield (f, pkey, osp.join(self.base_path, r.path))\n\n def _gen_synonyms(self):\n for r in self.synonyms_resources:\n # there should be only two columns\n pkey = r.schema.primaryKey\n if pkey[0] == 'synonym':\n concept = pkey[1]\n else:\n concept = pkey[0]\n df = pd.read_csv(osp.join(self.base_path, r.path), **self._default_reader_options)\n sym = Synonym(concept_id=concept, synonyms=df.set_index('synonym')[concept].to_dict())\n yield (concept, sym)\n\n @staticmethod\n def entity_domain_to_categorical(domain: EntityDomain):\n entities = [e.id for e in domain.entities]\n return pd.api.types.CategoricalDtype(entities)\n\n @staticmethod\n def entity_set_to_categorical(domain: EntityDomain, s: str):\n entity_set = domain.get_entity_set(s)\n entities = [e.id for e in entity_set]\n return pd.api.types.CategoricalDtype(entities)\n\n def load_ddf(self):\n \"\"\"-> DDF\"\"\"\n # load concepts\n concepts = dict(self._gen_concepts())\n\n # load entities\n entities = list(self._gen_entities(concepts))\n domains = dict()\n domains_tmp = dict()\n for domain, entity in entities:\n if domain not in domains_tmp.keys():\n domains_tmp[domain] = list()\n domains_tmp[domain].append(entity)\n\n for domain, entities_ in domains_tmp.items():\n # TODO: maybe get properties from concepts table\n # Allow duplicated entity because they may be defined in multiple resources\n # i.e. multiple entity sets in separated files.\n domains[domain] = EntityDomain.from_entity_list(domain_id=domain, entities=entities_, allow_duplicated=True)\n\n # load datapoints. Here we will use Dask for all\n # 1. create categories for entity domains\n dtypes = dict()\n # parse_dates = list()\n concept_types = dict()\n for domain_name, domain in domains.items():\n dtypes[domain_name] = self.entity_domain_to_categorical(domain)\n for eset in domain.entity_sets:\n dtypes[eset] = self.entity_set_to_categorical(domain, eset)\n # 2. get all concept types, update dtypes for time concepts\n for c_id, c in concepts.items():\n concept_types[c_id] = c.concept_type\n if c.concept_type == 'time':\n dtypes[c_id] = 'str'\n # 3. group files for same indicator together\n indicators = dict()\n for field, pkey, path in self._gen_datapoints():\n # import ipdb; ipdb.set_trace()\n indicator = field\n pkey = tuple(sorted(pkey))\n if indicator not in indicators:\n indicators.setdefault(indicator, dict([(pkey, [path])]))\n else:\n if pkey not in indicators[indicator]:\n indicators[indicator][pkey] = [path]\n else:\n indicators[indicator][pkey].append(path)\n datapoints = dict()\n for i, v in indicators.items():\n datapoints[i] = dict()\n # dtypes_ = dtypes.copy()\n # dtypes_[i] = 'float' # TODO: supporting string/float datatypes, not just float\n read_csv_options = self._default_dask_reader_options.copy()\n read_csv_options.update(dict(dtype=dtypes))\n for k, paths in v.items():\n dp = DaskDataPoint(id=i, dimensions=k, path=paths, concept_types=concept_types,\n read_csv_options=read_csv_options)\n datapoints[i][k] = dp\n\n # load synonyms\n synonyms = dict(self._gen_synonyms())\n\n # return complete DDF object\n return DDF(concepts=concepts, entities=domains, datapoints=datapoints, synonyms=synonyms, props=self.props)\n\n def generate_ddf_schema(self, progress_bar=False):\n \"\"\"generate ddf schema from all resources.\n\n Parameters\n ----------\n\n progress_bar : bool\n whether progress bar should be shown when generating ddfSchema.\n\n \"\"\"\n hash_table = {}\n ddf_schema = {'concepts': [], 'entities': [], 'datapoints': [], 'synonyms': []}\n entity_value_cache = dict()\n dtypes = dict()\n\n # check if we need progress bar\n if progress_bar:\n if logger.getEffectiveLevel() == 10: # debug: force not showing progress bar\n logger.warning(\"progress bar will be disabled in debugging mode.\")\n progress_bar = False\n\n # generate set-membership details for every single entity in dataset\n # also create dtypes for later use\n for domain_id, domain in self.ddf.entities.items():\n dtypes[domain_id] = self.entity_domain_to_categorical(domain)\n for s in self.ddf.entities[domain_id].entity_sets:\n dtypes[s] = self.entity_set_to_categorical(domain, s)\n entity_value_cache[domain_id] = dict()\n for ent in domain.entities:\n sets = set()\n sets.add(domain_id)\n for s in ent.sets:\n sets.add(s)\n entity_value_cache[domain_id][ent.id] = tuple(sets)\n\n def _which_sets(entity_, domain_):\n try:\n return entity_value_cache[domain_][entity_]\n except KeyError:\n logger.debug('entity {} is not in {} domain!'.format(entity_, domain_))\n raise\n\n def _gen_key_value_object(resource: Resource):\n logger.debug('working on: {}'.format(resource.path))\n if isinstance(resource.schema.primaryKey, str):\n pkeys = [resource.schema.primaryKey]\n else:\n pkeys = resource.schema.primaryKey\n\n entity_cols = [x for x in pkeys\n if x in self.ddf.concepts\n and self.ddf.concepts[x].concept_type in ['entity_domain', 'entity_set']]\n value_cols = resource.schema.common_fields\n data = pd.read_csv(osp.join(self.base_path, resource.path), dtype=dtypes,\n **self._default_reader_options)\n # check if entity columns data match entity defined in entity files\n for c in entity_cols:\n if data[c].hasnans:\n data_ = pd.read_csv(osp.join(self.base_path, resource.path), dtype={c: str}, **self._default_reader_options)\n ents = dtypes[c].categories.values\n ents_ = data_[c].unique()\n diff = set(ents_) - set(ents)\n logger.critical(\"in file {}:\".format(resource.path))\n logger.critical(\"{} column contains entity which does not belong to {} domain/set: {}\".format(c, c, list(diff)))\n raise ValueError(\"entity mismatch\")\n\n # for resources that have entity_columns: only consider all permutations on entity columns\n if len(entity_cols) > 0:\n data = data[entity_cols].drop_duplicates()\n\n pkeys_prop = dict()\n for c in pkeys:\n if c == 'cocnept':\n pkeys_prop[c] = {'type': 'concept'}\n elif c not in self.ddf.concepts:\n pkeys_prop[c] = {'type': 'non_concept'}\n else:\n concept = self.ddf.concepts[c]\n if concept.concept_type == 'entity_set':\n pkeys_prop[c] = {'type': 'entity_set',\n 'domain': concept.props['domain']}\n elif concept.concept_type == 'entity_domain':\n pkeys_prop[c] = {'type': 'entity_domain'}\n else:\n pkeys_prop[c] = {'type': 'others'}\n\n all_permutations = set()\n for _, r in data.iterrows():\n perm = list()\n for c in pkeys:\n if pkeys_prop[c]['type'] == 'entity_set':\n domain = pkeys_prop[c]['domain']\n perm.append(_which_sets(r[c], domain))\n elif pkeys_prop[c]['type'] == 'entity_domain':\n perm.append(_which_sets(r[c], c))\n else:\n perm.append(tuple([c]))\n\n all_permutations.add(tuple(perm))\n\n # if data is empty. Just emit an object with primarykey and null value\n if len(all_permutations) == 0:\n obj = {'primaryKey': pkeys, 'value': None, 'resource': resource.name}\n logger.debug('yielding: {}'.format(str(obj)))\n yield obj\n\n for row in all_permutations:\n for perm in product(*row):\n if len(value_cols) > 0:\n for c in value_cols:\n obj = {'primaryKey': list(perm), 'value': c, 'resource': resource.name}\n logger.debug('yielding: {}'.format(str(obj)))\n yield obj\n else:\n obj = {'primaryKey': list(perm), 'value': None, 'resource': resource.name}\n logger.debug('yielding: {}'.format(str(obj)))\n yield obj\n\n def _add_to_schema(resource_schema):\n \"\"\"handle objects generated by ``_gen_key_value_object``\"\"\"\n key = '-'.join(sorted(resource_schema['primaryKey']))\n if not pd.isnull(resource_schema['value']):\n hash_val = key + '--' + resource_schema['value']\n else:\n hash_val = key + '--' + 'nan'\n if hash_val not in hash_table.keys():\n hash_table[hash_val] = {\n 'primaryKey': sorted(resource_schema['primaryKey']),\n 'value': resource_schema['value'],\n 'resources': {resource_schema['resource']}\n }\n else:\n hash_table[hash_val]['resources'].add(resource_schema['resource'])\n\n # make progressbar and run the process to generate schema\n if progress_bar:\n pbar = tqdm(total=len(self.resources))\n\n for g in map(_gen_key_value_object, self.resources):\n if progress_bar:\n pbar.update(1)\n for kvo in g:\n logging.debug(\"adding kvo {}\".format(str(kvo)))\n _add_to_schema(kvo)\n\n if progress_bar:\n pbar.close()\n\n for sch in hash_table.values():\n sch['resources'] = list(sch['resources']) # convert set to list\n sch_object = DDFSchema.from_dict(sch)\n if len(sch['primaryKey']) == 1:\n if sch['primaryKey'][0] == 'concept':\n ddf_schema['concepts'].append(sch_object)\n else:\n ddf_schema['entities'].append(sch_object)\n else:\n if 'synonym' in sch['primaryKey']:\n ddf_schema['synonyms'].append(sch_object)\n else:\n ddf_schema['datapoints'].append(sch_object)\n\n return ddf_schema\n\n def get_ddf_schema(self, update=False):\n if not update and self.ddfSchema is not None:\n return self.ddfSchema\n elif not update and self.ddfSchema is None:\n raise ValueError('No ddfSchema, please use update=True to generate one')\n else:\n self.ddfSchema = self.generate_ddf_schema()\n return self.ddfSchema\n" ]
[ [ "pandas.read_csv", "pandas.isnull", "pandas.api.types.CategoricalDtype" ] ]
837278709/Deep-Learning-Coursera-1
[ "2498a90d3f61ec0876752205066ec95323f83161" ]
[ "Neural Networks and Deep Learning/Week 3/Planar data classification with one hidden layer/planar_utils.py" ]
[ "import matplotlib.pyplot as plt\nimport numpy as np\nimport sklearn\nimport sklearn.datasets\nimport sklearn.linear_model\n\ndef plot_decision_boundary(model, X, y):\n # Set min and max values and give it some padding\n x_min, x_max = X[0, :].min() - 1, X[0, :].max() + 1\n y_min, y_max = X[1, :].min() - 1, X[1, :].max() + 1\n h = 0.01\n # Generate a grid of points with distance h between them\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\n # Predict the function value for the whole grid\n Z = model(np.c_[xx.ravel(), yy.ravel()])\n Z = Z.reshape(xx.shape)\n # Plot the contour and training examples\n plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)\n plt.ylabel('x2')\n plt.xlabel('x1')\n plt.scatter(X[0, :], X[1, :], c=y[0], cmap=plt.cm.Spectral)\n \n\ndef sigmoid(x):\n \"\"\"\n Compute the sigmoid of x\n\n Arguments:\n x -- A scalar or numpy array of any size.\n\n Return:\n s -- sigmoid(x)\n \"\"\"\n s = 1/(1+np.exp(-x))\n return s\n\ndef load_planar_dataset():\n np.random.seed(1)\n m = 400 # number of examples\n N = int(m/2) # number of points per class\n D = 2 # dimensionality\n X = np.zeros((m,D)) # data matrix where each row is a single example\n Y = np.zeros((m,1), dtype='uint8') # labels vector (0 for red, 1 for blue)\n a = 4 # maximum ray of the flower\n\n for j in range(2):\n ix = range(N*j,N*(j+1))\n t = np.linspace(j*3.12,(j+1)*3.12,N) + np.random.randn(N)*0.2 # theta\n r = a*np.sin(4*t) + np.random.randn(N)*0.2 # radius\n X[ix] = np.c_[r*np.sin(t), r*np.cos(t)]\n Y[ix] = j\n \n X = X.T\n Y = Y.T\n\n return X, Y\n\ndef load_extra_datasets(): \n N = 200\n noisy_circles = sklearn.datasets.make_circles(n_samples=N, factor=.5, noise=.3)\n noisy_moons = sklearn.datasets.make_moons(n_samples=N, noise=.2)\n blobs = sklearn.datasets.make_blobs(n_samples=N, random_state=5, n_features=2, centers=6)\n gaussian_quantiles = sklearn.datasets.make_gaussian_quantiles(mean=None, cov=0.5, n_samples=N, n_features=2, n_classes=2, shuffle=True, random_state=None)\n no_structure = np.random.rand(N, 2), np.random.rand(N, 2)\n \n return noisy_circles, noisy_moons, blobs, gaussian_quantiles, no_structure" ]
[ [ "sklearn.datasets.make_gaussian_quantiles", "numpy.zeros", "sklearn.datasets.make_moons", "numpy.random.seed", "sklearn.datasets.make_circles", "numpy.exp", "numpy.random.randn", "numpy.arange", "numpy.cos", "numpy.linspace", "matplotlib.pyplot.ylabel", "numpy.random.rand", "numpy.sin", "matplotlib.pyplot.contourf", "sklearn.datasets.make_blobs", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.scatter" ] ]
NREL/PV-DEMICE
[ "6e2938950ff10c37f176f46aeb76c78de609f535" ]
[ "PV_ICE/main.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nMain.py contains the functions to calculate the different quantities of materials\nin each step of the process. Reffer to the diagram on Package-Overview for the \nsteps considered. \n\nSupport functions include Weibull functions for reliability and failure; also, \nfunctions to modify baseline values and evaluate sensitivity to the parameters.\n\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport datetime\nimport os\nimport matplotlib.pyplot as plt\n\ndef read_baseline_material(scenario, material='None', file=None):\n \n if file is None:\n try:\n file = _interactive_load('Select baseline file')\n except:\n raise Exception('Interactive load failed. Tkinter not supported'+\n 'on this system. Try installing X-Quartz and reloading')\n \n\ndef _interactive_load(title=None):\n # Tkinter file picker\n import tkinter\n from tkinter import filedialog\n root = tkinter.Tk()\n root.withdraw() #Start interactive file input\n root.attributes(\"-topmost\", True) #Bring window into foreground\n return filedialog.askopenfilename(parent=root, title=title) #initialdir = data_dir\n\ndef _unitReferences(keyword):\n '''\n Specify units for variable in scenario or materials\n \n Parameters\n ----------\n keyword : str\n String of scenario or material column label\n \n Returns\n -------\n yunits : str\n Unit specific to the keyword provided\n '''\n\n moduleDictionary = {'year': {'unit': 'Years', 'source': 'input'},\n 'new_Installed_Capacity_[MW]': {'unit': 'Power [MW]', 'source':'input'},\n 'mod_eff': {'unit': 'Efficiency $\\eta$ [%]', 'source':'input'},\n 'mod_reliability_t50': {'unit': 'Years' , 'source':'input'},\n 'mod_reliability_t90': {'unit': 'Years', 'source':'input'},\n 'mod_degradation': {'unit': 'Percentage [%]', 'source':'input'},\n 'mod_lifetime': {'unit': 'Years', 'source':'input'},\n 'mod_MFG_eff': {'unit': 'Efficiency $\\eta$ [%]', 'source':'input'},\n 'mod_EOL_collection_eff': {'unit': 'Efficiency $\\eta$ [%]', 'source':'input'},\n 'mod_EOL_collected_recycled': {'unit': 'Percentage [%]', 'source':'input'},\n 'mod_Repair': {'unit': 'Percentage [%]', 'source':'input'},\n 'mod_MerchantTail': {'unit': 'Percentage [%]', 'source':'input'},\n 'mod_Reuse': {'unit': 'Percentage [%]', 'source':'input'},\n 'Area': {'unit': 'm$^2$', 'source': 'generated'},\n 'Cumulative_Area_disposedby_Failure': {'unit': 'm$^2$', 'source': 'generated'},\n 'Cumulative_Area_disposedby_ProjectLifetime': {'unit': 'm$^2$', 'source': 'generated'},\n 'Cumulative_Area_disposed': {'unit': 'm$^2$', 'source': 'generated'},\n 'Cumulative_Active_Area': {'unit': 'm$^2$', 'source': 'generated'},\n 'Installed_Capacity_[W]': {'unit': 'Power [W]', 'source': 'generated'},\n 'EOL_on_Year_0': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_1': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_2': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_3': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_4': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_5': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_6': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_7': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_8': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_9': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_10': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_11': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_12': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_13': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_14': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_15': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_16': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_17': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_18': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_19': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_20': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_21': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_22': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_23': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_24': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_25': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_26': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_27': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_28': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_29': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_30': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_31': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_32': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_33': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_34': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_35': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_36': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_37': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_38': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_39': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_40': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_41': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_42': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_43': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_44': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_45': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_46': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_47': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_48': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_49': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_50': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_51': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_52': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_53': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_54': {'unit': 'm$^2$', 'source': 'generated'},\n 'EOL_on_Year_55': {'unit': 'm$^2$', 'source': 'generated'},\n 'EoL_Collected': {'unit': 'm$^2$', 'source': 'generated'},\n 'EoL_NotCollected': {'unit': 'm$^2$', 'source': 'generated'},\n 'EoL_Recycled': {'unit': 'm$^2$', 'source': 'generated'},\n 'EoL_NotRecycled_Landfilled': {'unit': 'm$^2$', 'source': 'generated'}\n }\n\n materialDictionary={'year': {'unit': 'Years', 'source': 'input'},\n 'mat_virgin_eff': {'unit': 'Efficiency $\\eta$ [%]', 'source': 'input'},\n 'mat_massperm2': {'unit': 'Mass [g]', 'source': 'input'},\n 'mat_MFG_eff': {'unit': 'Efficiency $\\eta$ [%]', 'source': 'input'},\n 'mat_MFG_scrap_recycled': {'unit': 'Percentage [%]', 'source': 'input'},\n 'mat_MFG_scrap_Recycled': {'unit': 'Efficiency $\\eta$ [%]', 'source': 'input'},\n 'mat_MFG_scrap_Recycled_into_HQ': {'unit': 'Percentage [%]', 'source': 'input'},\n 'mat_MFG_scrap_Recycled_into_HQ_Reused4MFG': {'unit': 'Percentage [%]', 'source': 'input'},\n 'mat_EOL_collected_Recycled': {'unit': 'Percentage [%]', 'source': 'input'},\n 'mat_EOL_Recycling_eff': {'unit': 'Efficiency $\\eta$ [%]', 'source': 'input'},\n 'mat_EOL_Recycled_into_HQ': {'unit': 'Percentage [%]', 'source': 'input'},\n 'mat_EOL_RecycledHQ_Reused4MFG': {'unit': 'Percentage [%]', 'source': 'input'},\n 'mat_modules_NotRecycled': {'unit': 'Mass [g]', 'source': 'generated'},\n 'mat_modules_NotCollected': {'unit': 'Mass [g]', 'source': 'generated'},\n 'mat_EOL_sento_Recycling': {'unit': 'Mass [g]', 'source': 'generated'},\n 'mat_EOL_NotRecycled_Landfilled': {'unit': 'Mass [g]', 'source': 'generated'},\n 'mat_EOL_Recycled': {'unit': 'Mass [g]', 'source': 'generated'},\n 'mat_EOL_Recycled_Losses_Landfilled': {'unit': 'Mass [g]', 'source': 'generated'},\n 'mat_EOL_Recycled_2_HQ': {'unit': 'Mass [g]', 'source': 'generated'},\n 'mat_EOL_Recycled_2_OQ': {'unit': 'Mass [g]', 'source': 'generated'},\n 'mat_EoL_Recycled_HQ_into_MFG': {'unit': 'Mass [g]', 'source': 'generated'},\n 'mat_EOL_Recycled_HQ_into_OU': {'unit': 'Mass [g]', 'source': 'generated'},\n 'mat_UsedinManufacturing': {'unit': 'Mass [g]', 'source': 'generated'},\n 'mat_Manufacturing_Input': {'unit': 'Mass [g]', 'source': 'generated'},\n 'mat_MFG_Scrap': {'unit': 'Mass [g]', 'source': 'generated'},\n 'mat_MFG_Scrap_Sentto_Recycling': {'unit': 'Mass [g]', 'source': 'generated'},\n 'mat_MFG_Scrap_Landfilled': {'unit': 'Mass [g]', 'source': 'generated'},\n 'mat_MFG_Scrap_Recycled_Successfully': {'unit': 'Mass [g]', 'source': 'generated'},\n 'mat_MFG_Scrap_Recycled_Losses_Landfilled': {'unit': 'Mass [g]', 'source': 'generated'},\n 'mat_MFG_Recycled_into_HQ': {'unit': 'Mass [g]', 'source': 'generated'},\n 'mat_MFG_Recycled_into_OQ': {'unit': 'Mass [g]', 'source': 'generated'},\n 'mat_MFG_Recycled_HQ_into_MFG': {'unit': 'Mass [g]', 'source': 'generated'},\n 'mat_MFG_Recycled_HQ_into_OU': {'unit': 'Mass [g]', 'source': 'generated'},\n 'mat_Virgin_Stock': {'unit': 'Mass [g]', 'source': 'generated'},\n 'mat_Total_EOL_Landfilled': {'unit': 'Mass [g]', 'source': 'generated'},\n 'mat_Total_MFG_Landfilled': {'unit': 'Mass [g]', 'source': 'generated'},\n 'mat_Total_Landfilled': {'unit': 'Mass [g]', 'source': 'generated'},\n 'mat_Total_Recycled_OU': {'unit': 'Mass [g]', 'source': 'generated'}\n }\n \n\n if keyword in moduleDictionary.keys():\n yunits = moduleDictionary[keyword]['unit']\n elif keyword in materialDictionary.keys():\n yunits = materialDictionary[keyword]['unit']\n else:\n print(\"Warning: Keyword / Units not Found\")\n yunits = 'UNITS'\n \n return yunits\n \n\ndef distance(s_lat, s_lng, e_lat, e_lng):\n \"\"\"\n # Haversine formula for numpy arrays\n # Author: MalyutinS\n # imported from comment on: https://gist.github.com/rochacbruno/2883505\n # Example: \n # s_lat = 45; s_lng = -110; e_lat=[33, 44]; e_lng = [-115, -140]\n # Returns distance from the source point to the two ending points:\n # r = distance(s_lat, s_lng, e_lat, e_lng)\n # r = array([1402.24996689, 2369.0150434 ])\n #\n \"\"\"\n \n \n # approximate radius of earth in km\n R = 6373.0 \n \n# s_lat = s_lat*np.pi/180.0 \n s_lat = np.deg2rad(s_lat) \n s_lng = np.deg2rad(s_lng) \n e_lat = np.deg2rad(e_lat) \n e_lng = np.deg2rad(e_lng) \n \n d = np.sin((e_lat - s_lat)/2)**2 + np.cos(s_lat)*np.cos(e_lat) * np.sin((e_lng - s_lng)/2)**2\n distance = 2 * R * np.arcsin(np.sqrt(d)) \n \n return distance\n\ndef drivingdistance(origin, destination, APIkey):\n \"\"\"\n Creates call for google-maps api to get driving directions betwen two points.\n \n Input\n -----\n origin: array\n [lat, lon] expected\n destination: array\n [lat, lon] expected\n APYkey: str\n String\n \"\"\"\n \n lat1, lon1 = origin\n lat2, lon2 = destination\n \n gm_url = ('https://maps.googleapis.com/maps/api/directions/xml?'+\n 'origin='+str(lat1)+','+str(lon1)+\n '&destination='+str(lat2)+','+str(lon2)+\n '&key='+APIkey)\n\n return gm_url\n \n \n \nclass Simulation:\n \"\"\"\n The ScenarioObj top level class is used to work on Circular Economy scenario objects, \n keep track of filenames, data for module and materials, operations modifying\n the baselines, etc.\n\n Parameters\n ----------\n name : text to append to output files\n nowstr : current date/time string\n path : working directory with circular economy results\n\n Methods\n -------\n __init__ : initialize the object\n _setPath : change the working directory\n\n \"\"\"\n \n def __init__(self, name=None, path=None):\n '''\n initialize ScenarioObj with path of Scenario's baseline of module and materials\n as well as a basename to append to\n\n Parameters\n ----------\n name: string, append temporary and output files with this value\n path: location of Radiance materials and objects\n\n Returns\n -------\n none\n '''\n\n self.path = \"\" # path of working directory\n self.name = \"\" # basename to append\n \n now = datetime.datetime.now()\n self.nowstr = str(now.date())+'_'+str(now.hour)+str(now.minute)+str(now.second)\n\n if path is None:\n self._setPath(os.getcwd())\n else:\n self._setPath(path)\n\n if name is None:\n self.name = self.nowstr # set default filename for output files\n else:\n self.name = name\n\n self.scenario={}\n\n \n def _setPath(self, path):\n \"\"\"\n setPath - move path and working directory\n\n \"\"\"\n self.path = os.path.abspath(path)\n\n print('path = '+ path)\n try:\n os.chdir(self.path)\n except OSError as exc:\n LOGGER.error('Path doesn''t exist: %s' % (path))\n LOGGER.exception(exc)\n raise(exc)\n\n # check for path in the new Radiance directory:\n def _checkPath(path): # create the file structure if it doesn't exist\n if not os.path.exists(path):\n os.makedirs(path)\n print('Making path: '+path)\n \n def createScenario(self, name, file=None):\n \n self.scenario[name] = Scenario(name, file)\n \n\n\n def modifyScenario(self, scenarios, stage, value, start_year=None):\n \n if start_year is None:\n start_year = int(datetime.datetime.now().year)\n \n if scenarios is None:\n scenarios = list(self.scenario.keys())\n else:\n if isinstance(scenarios, str):\n scenarios = [scenarios]\n \n selectyears = self.scenario[scenarios[0]].data['year']>start_year\n \n for scen in scenarios:\n self.scenario[scen].data.loc[selectyears, stage] = value\n \n def calculateMassFlow(self, scenarios = None, materials=None, weibullInputParams = None, \n bifacialityfactors = None, reducecapacity = True, debugflag=False):\n '''\n Function takes as input a baseline dataframe already imported, \n with the right number of columns and content.\n It returns the dataframe with all the added calculation columns.\n \n Parameters\n ------------\n weibullInputParams : None\n Dictionary with 'alpha' and 'beta' value for shaping the weibull\n curve. beta is sometimes exchanged with lifetime, for example on\n Irena 2016 values beta = 30. If weibullInputParams = None,\n alfa and beta are calcualted from the t50 and t90 columns on the\n module baseline.\n scenarios : None\n string with the scenario name or list of strings with\n scenarios names to loop over. Must exist on the PV ICE object.\n materials : None\n string with the material name or list of strings with the\n materials names to loop over. Must exists on the PV ICE object \n scenario(s) modeled.\n bifacialityfactors : str\n File with bifacialtiy factors for each year under consideration\n \n Returns\n --------\n df: dataframe \n input dataframe with addeds columns for the calculations of recycled,\n collected, waste, installed area, etc. \n \n '''\n \n if scenarios is None:\n scenarios = list(self.scenario.keys())\n else:\n if isinstance(scenarios, str):\n scenarios = [scenarios]\n \n for scen in scenarios:\n \n print(\"Working on Scenario: \", scen)\n print(\"********************\")\n df = self.scenario[scen].data\n\n # Constant\n if bifacialityfactors is not None: \n bf = pd.read_csv(bifacialityfactors)\n df['irradiance_stc'] = 1000.0 + bf['bifi']*100.0 # W/m^2 (min. Bifacial STC Increase)\n else:\n df['irradiance_stc'] = 1000.0 # W/m^2\n\n # Renaming and re-scaling\n df['t50'] = df['mod_reliability_t50']\n df['t90'] = df['mod_reliability_t90']\n \n # Calculating Area and Mass\n \n if 'Mass_[MetricTonnes]' in df:\n df['new_Installed_Capacity_[W]'] = 0\n df['new_Installed_Capacity_[MW]'] = 0\n df['Area'] = df['Mass_[MetricTonnes]']\n print(\"Warning, this is for special debuging of Wambach Procedure.\"+\n \"Make sure to use Wambach Module\")\n else:\n df['new_Installed_Capacity_[W]'] = df['new_Installed_Capacity_[MW]']*1e6\n\n if reducecapacity:\n df['Area'] = df['new_Installed_Capacity_[W]']/(df['mod_eff']*0.01)/df['irradiance_stc'] # m^2 \n else:\n df['Area'] = df['new_Installed_Capacity_[W]']/(df['mod_eff']*0.01)/1000.0 # m^2\n \n \n df['Area'] = df['Area'].fillna(0) # Chagne na's to 0s.\n\n # Calculating Wast by Generation by Year, and Cumulative Waste by Year.\n Generation_Disposed_byYear = []\n Generation_Active_byYear= []\n Generation_Power_byYear = []\n weibullParamList = []\n\n df['Cumulative_Area_disposedby_Failure'] = 0\n df['Cumulative_Area_disposedby_ProjectLifetime'] = 0\n df['Cumulative_Area_disposed'] = 0\n df['Repaired_[W]'] = 0\n df['Repaired_Area'] = 0\n df['Cumulative_Active_Area'] = 0\n df['Installed_Capacity_[W]'] = 0\n for generation, row in df.iterrows(): \n #generation is an int 0,1,2,.... etc.\n #generation=4\n #row=df.iloc[generation]\n \n if weibullInputParams:\n weibullIParams = weibullInputParams\n elif 'weibull_alpha' in row:\n # \"Weibull Input Params passed internally as a column\"\n weibullIParams = {'alpha': row['weibull_alpha'], 'beta': row['weibull_beta']}\n else:\n # \"Calculating Weibull Params from Modules t50 and T90\"\n t50, t90 = row['t50'], row['t90']\n weibullIParams = weibull_params({t50: 0.50, t90: 0.90}) \n \n f = weibull_cdf(weibullIParams['alpha'], weibullIParams['beta'])\n \n weibullParamList.append(weibullIParams)\n\n x = np.clip(df.index - generation, 0, np.inf)\n cdf = list(map(f, x))\n pdf = [0] + [j - i for i, j in zip(cdf[: -1], cdf[1 :])]\n\n activearea = row['Area']\n if np.isnan(activearea):\n activearea=0\n \n activeareacount = []\n areadisposed_failure = []\n areadisposed_projectlifetime = []\n arearepaired = []\n arearepaired_powergen = []\n areapowergen = []\n active=0\n disposed_projectlifetime=0\n for age in range(len(cdf)):\n disposed_projectlifetime=0\n if x[age] == 0.0:\n activeareacount.append(0)\n areadisposed_failure.append(0)\n areadisposed_projectlifetime.append(0)\n areapowergen.append(0)\n arearepaired.append(0)\n arearepaired_powergen.append(0)\n else:\n active += 1\n activeareaprev = activearea \n activearea = activearea-row['Area']*pdf[age]+row['Area']*pdf[age]*df.iloc[age]['mod_Repair']*0.01 \n# arearepaired_failure = activearea*cdf[age]*df.iloc[age]['mod_Repair']*0.01\n arearepaired_failure = row['Area']*pdf[age]*df.iloc[age]['mod_Repair']*0.01\n\n arearepaired.append(arearepaired_failure)\n arearepaired_powergen.append(arearepaired_failure*row['mod_eff']*0.01*row['irradiance_stc']*(1-row['mod_degradation']*0.01)**active) \n \n areadisposed_failure.append(activeareaprev-activearea)\n if age == int(row['mod_lifetime']+generation):\n activearea_temp = activearea\n activearea = 0+activearea*(df.iloc[age]['mod_MerchantTail']*0.01)\n disposed_projectlifetime = activearea_temp-activearea\n\n activearea2 = 0+disposed_projectlifetime*(df.iloc[age]['mod_Reuse']*0.01) # 12 \n activearea = activearea + activearea2 # 92\n disposed_projectlifetime = disposed_projectlifetime - activearea2 # 8\n\n# activearea = 0+disposed_projectlifetime*(df.iloc[age]['mod_Reuse']*0.01)\n# disposed_projectlifetime = activearea_temp-activearea\n areadisposed_projectlifetime.append(disposed_projectlifetime)\n activeareacount.append(activearea)\n areapowergen.append(activearea*row['mod_eff']*0.01*row['irradiance_stc']*(1-row['mod_degradation']*0.01)**active) \n \n try:\n # becuase the clip starts with 0 for the installation year, identifying installation year\n # and adding initial area\n fixinitialareacount = next((i for i, e in enumerate(x) if e), None) - 1\n activeareacount[fixinitialareacount] = activeareacount[fixinitialareacount]+row['Area'] \n areapowergen[fixinitialareacount] = (areapowergen[fixinitialareacount] + \n row['Area'] * row['mod_eff'] *0.01 * row['irradiance_stc']) \n except:\n # Last value does not have a xclip value of nonzero so it goes\n # to except. But it also means the loop finished for the calculations\n # of Lifetime.\n fixinitialareacount = len(cdf)-1\n activeareacount[fixinitialareacount] = activeareacount[fixinitialareacount]+row['Area'] \n areapowergen[fixinitialareacount] = (areapowergen[fixinitialareacount] + \n row['Area'] * row['mod_eff'] *0.01 * row['irradiance_stc']) \n print(\"Finished Area+Power Generation Calculations\")\n \n \n # area_disposed_of_generation_by_year = [element*row['Area'] for element in pdf]\n df['Cumulative_Area_disposedby_Failure'] += areadisposed_failure\n df['Cumulative_Area_disposedby_ProjectLifetime'] += areadisposed_projectlifetime\n df['Cumulative_Area_disposed'] += areadisposed_failure\n df['Cumulative_Area_disposed'] += areadisposed_projectlifetime\n \n \n df['Repaired_[W]'] += arearepaired_powergen\n df['Repaired_Area'] += arearepaired\n df['Cumulative_Active_Area'] += activeareacount\n df['Installed_Capacity_[W]'] += areapowergen\n Generation_Disposed_byYear.append([x + y for x, y in zip(areadisposed_failure, areadisposed_projectlifetime)])\n Generation_Active_byYear.append(activeareacount)\n Generation_Power_byYear.append(areapowergen)\n \n \n df['WeibullParams'] = weibullParamList\n MatrixDisposalbyYear = pd.DataFrame(Generation_Disposed_byYear, columns = df.index, index = df.index)\n MatrixDisposalbyYear = MatrixDisposalbyYear.add_prefix(\"EOL_on_Year_\")\n \n try:\n df = df[df.columns.drop(list(df.filter(regex='EOL_on_Year_')))]\n except:\n print(\"Warning: Issue dropping EOL columns generated by \" \\\n \"calculateMFC routine to overwrite\")\n \n df = df.join(MatrixDisposalbyYear)\n\n \n ## Start to do EOL Processes\n ############################\n \n filter_col = [col for col in df if col.startswith('EOL_on_Year_')]\n EOL = df[filter_col]\n \n # This Multiplication pattern goes through Module and then material.\n # It is for processes that depend on each year as they improve, i.e. \n # Collection Efficiency,\n #\n # [ G1_1 G1_2 G1_3 G2_4 ...] [N1\n # [ 0 G2_1 G2_2 G2_3 ...] X N2\n # [ 0 0 G3_1 G3_2 ...] N3\n # N4]\n #\n # EQUAL\n # EOL_Collected =\n # [ G1_1*N1 G1_2 *N2 G1_3 *N3 G2_4 *N4 ...]\n # [ 0 G2_1 *N2 G2_2 *N3 G2_3 *N4 ...]\n # [ 0 0 G3_1 *N3 G3_2 *N4 ...] \n #\n \n EOL_Collected = EOL.mul(df['mod_EOL_collection_eff'].values*0.01)\n df['EoL_Collected'] = list(EOL_Collected.sum())\n landfill_Collection = EOL.mul(1-(df['mod_EOL_collection_eff'].values*0.01)) \n df['EoL_NotCollected'] = list(landfill_Collection.sum())\n \n EOL_Recycled = EOL_Collected.mul(df['mod_EOL_collected_recycled'].values*0.01)\n df['EoL_Recycled'] = list(EOL_Recycled.sum())\n EOL_NotRecycled_Landfilled = EOL_Collected.mul((1-df['mod_EOL_collected_recycled'].values*0.01))\n df['EoL_NotRecycled_Landfilled'] = list(EOL_NotRecycled_Landfilled.sum())\n \n # Cleanup of internal renaming and internal use columns\n df.drop(['new_Installed_Capacity_[W]', 't50', 't90'], axis = 1, inplace=True) \n \n df['ModuleTotal_MFG']=df['Area']*100/df['mod_MFG_eff']\n \n self.scenario[scen].data = df\n \n # collection losses here\n \n # Recyle % here\n \n \n ################\n # Material Loop#\n ################\n\n if materials is None:\n materials = list(self.scenario[scenarios[0]].material.keys())\n else:\n if isinstance(materials, str):\n materials = [materials]\n \n for mat in materials:\n\n print(\"==> Working on Material : \", mat)\n\n dm = self.scenario[scen].material[mat].materialdata\n \n # SWITCH TO MASS UNITS FOR THE MATERILA NOW:\n # THIS IS DIFFERENT MULTIPLICATION THAN THE REST\n # BECAUSE IT DEPENDS TO THE ORIGINAL MASS OF EACH MODULE WHEN INSTALLED\n # [M1 * [ G1_1 G1_2 G1_3 G2_4 ...]\n # M2 [ 0 G2_1 G2_2 G2_3 ...]\n # M3] [ 0 0 G3_1 G3_2 ...]\n # \n # EQUAL\n # mat_EOL_sentoRecycling = \n # [ G1_1*M1 G1_2*M1 G1_3*M1 G2_4*M1 ...]\n # [ 0 G2_1*M2 G2_2*M2 G2_3*M2 ...]\n # [ 0 0 G3_1*M3 G3_2*M3 ...]\n #\n \n mat_modules_EOL_sentoRecycling = EOL_Recycled.multiply(dm['mat_massperm2'], axis=0)\n dm['mat_modules_Collected'] = list(EOL_Collected.multiply(dm['mat_massperm2'], axis=0).sum())\n dm['mat_modules_NotCollected'] = list(landfill_Collection.multiply(dm['mat_massperm2'], axis=0).sum())\n dm['mat_modules_Recycled'] = list(EOL_Recycled.multiply(dm['mat_massperm2'], axis=0).sum())\n dm['mat_modules_NotRecycled'] = list(EOL_NotRecycled_Landfilled.multiply(dm['mat_massperm2'], axis=0).sum())\n \n \n # mat_EOL_collected_Recycled CHANGE NAME\n # chnge also landfill_material_EOL_NotRecycled_Landfilled \n mat_EOL_sento_Recycling = mat_modules_EOL_sentoRecycling.mul(dm['mat_EOL_collected_Recycled'].values*0.01)\n dm['mat_EOL_sento_Recycling'] = list(mat_EOL_sento_Recycling.sum())\n landfill_material_EOL_NotRecycled_Landfilled = mat_modules_EOL_sentoRecycling.mul(1-(dm['mat_EOL_collected_Recycled'].values*0.01))\n dm['mat_EOL_NotRecycled_Landfilled'] = list(landfill_material_EOL_NotRecycled_Landfilled.sum())\n \n mat_EOL_Recycled_Succesfully = mat_EOL_sento_Recycling.mul(dm['mat_EOL_Recycling_eff'].values*0.01)\n dm['mat_EOL_Recycled'] = list(mat_EOL_Recycled_Succesfully.sum())\n landfill_material_EOL_Recyled_Losses_Landfilled = mat_EOL_sento_Recycling.mul(1-(dm['mat_EOL_Recycling_eff'].values*0.01))\n dm['mat_EOL_Recycled_Losses_Landfilled'] = list(landfill_material_EOL_Recyled_Losses_Landfilled.sum())\n \n \n mat_EOL_Recycled_HQ = mat_EOL_Recycled_Succesfully.mul(dm['mat_EOL_Recycled_into_HQ'].values*0.01)\n dm['mat_EOL_Recycled_2_HQ'] = list(mat_EOL_Recycled_HQ.sum())\n mat_EOL_Recycled_OQ = mat_EOL_Recycled_Succesfully.mul(1-(dm['mat_EOL_Recycled_into_HQ'].values*0.01))\n dm['mat_EOL_Recycled_2_OQ'] = list(mat_EOL_Recycled_OQ.sum())\n \n mat_EOL_Recycled_HQ_into_MFG = mat_EOL_Recycled_HQ.mul(dm['mat_EOL_RecycledHQ_Reused4MFG'].values*0.01)\n dm['mat_EoL_Recycled_HQ_into_MFG'] = list(mat_EOL_Recycled_HQ_into_MFG.sum())\n mat_EOL_Recycled_HQ_into_OU = mat_EOL_Recycled_HQ.mul(1-(dm['mat_EOL_RecycledHQ_Reused4MFG'].values*0.01))\n dm['mat_EOL_Recycled_HQ_into_OU'] = list(mat_EOL_Recycled_HQ_into_OU.sum())\n \n # BULK Calculations Now\n dm['mat_UsedSuccessfullyinModuleManufacturing'] = (df['Area'] * dm['mat_massperm2'])\n dm['mat_EnteringModuleManufacturing'] = (df['Area'] * dm['mat_massperm2']*100/df['mod_MFG_eff'])\n dm['mat_LostinModuleManufacturing'] = dm['mat_EnteringModuleManufacturing'] - dm['mat_UsedSuccessfullyinModuleManufacturing']\n \n dm['mat_Manufacturing_Input'] = dm['mat_EnteringModuleManufacturing'] / (dm['mat_MFG_eff'] * 0.01)\n \n # Scrap = Lost to Material manufacturing losses + Module manufacturing losses\n dm['mat_MFG_Scrap'] = (dm['mat_Manufacturing_Input'] - dm['mat_EnteringModuleManufacturing'] + \n dm['mat_LostinModuleManufacturing'])\n dm['mat_MFG_Scrap_Sentto_Recycling'] = dm['mat_MFG_Scrap'] * dm['mat_MFG_scrap_Recycled'] * 0.01\n \n \n \n dm['mat_MFG_Scrap_Landfilled'] = dm['mat_MFG_Scrap'] - dm['mat_MFG_Scrap_Sentto_Recycling'] \n dm['mat_MFG_Scrap_Recycled_Successfully'] = (dm['mat_MFG_Scrap_Sentto_Recycling'] *\n dm['mat_MFG_scrap_Recycling_eff'] * 0.01)\n dm['mat_MFG_Scrap_Recycled_Losses_Landfilled'] = (dm['mat_MFG_Scrap_Sentto_Recycling'] - \n dm['mat_MFG_Scrap_Recycled_Successfully'])\n dm['mat_MFG_Recycled_into_HQ'] = (dm['mat_MFG_Scrap_Recycled_Successfully'] * \n dm['mat_MFG_scrap_Recycled_into_HQ'] * 0.01)\n dm['mat_MFG_Recycled_into_OQ'] = dm['mat_MFG_Scrap_Recycled_Successfully'] - dm['mat_MFG_Recycled_into_HQ']\n dm['mat_MFG_Recycled_HQ_into_MFG'] = (dm['mat_MFG_Recycled_into_HQ'] * \n dm['mat_MFG_scrap_Recycled_into_HQ_Reused4MFG'] * 0.01)\n dm['mat_MFG_Recycled_HQ_into_OU'] = dm['mat_MFG_Recycled_into_HQ'] - dm['mat_MFG_Recycled_HQ_into_MFG']\n dm['mat_Virgin_Stock'] = dm['mat_Manufacturing_Input'] - dm['mat_EoL_Recycled_HQ_into_MFG'] - dm['mat_MFG_Recycled_HQ_into_MFG']\n \n # Calculate raw virgin needs before mining and refining efficiency losses\n dm['mat_Virgin_Stock_Raw'] = (dm['mat_Virgin_Stock'] * 100 / dm['mat_virgin_eff'])\n\n # Add Wastes\n dm['mat_Total_EOL_Landfilled'] = (dm['mat_modules_NotCollected'] + \n dm['mat_modules_NotRecycled'] +\n dm['mat_EOL_NotRecycled_Landfilled'] +\n dm['mat_EOL_Recycled_Losses_Landfilled']) \n \n dm['mat_Total_MFG_Landfilled'] = (dm['mat_MFG_Scrap_Landfilled'] + \n dm['mat_MFG_Scrap_Recycled_Losses_Landfilled'])\n \n dm['mat_Total_Landfilled'] = (dm['mat_Total_EOL_Landfilled'] + \n dm['mat_Total_MFG_Landfilled'])\n \n dm['mat_Total_Recycled_OU'] = (dm['mat_EOL_Recycled_2_OQ'] + \n dm['mat_EOL_Recycled_HQ_into_OU'] + \n dm['mat_MFG_Recycled_into_OQ'] + \n dm['mat_MFG_Recycled_HQ_into_OU'])\n \n \n self.scenario[scen].material[mat].materialdata = dm\n\n \n def scenMod_IRENIFY(self, scenarios=None, ELorRL='RL'):\n \n if ELorRL == 'RL':\n weibullInputParams = {'alpha': 5.3759, 'beta': 30} # Regular-loss scenario IRENA\n print(\"Using Irena Regular Loss Assumptions\")\n if ELorRL == 'EL':\n weibullInputParams = {'alpha': 2.4928, 'beta': 30} # Regular-loss scenario IRENA\n print(\"Using Irena Early Loss Assumptions\")\n \n if scenarios is None:\n scenarios = list(self.scenario.keys())\n else:\n if isinstance(scenarios, str):\n scenarios = [scenarios]\n\n for scen in scenarios:\n self.scenario[scen].data['weibull_alpha'] = weibullInputParams['alpha']\n self.scenario[scen].data['weibull_beta'] = weibullInputParams['beta']\n self.scenario[scen].data['mod_lifetime'] = 40.0\n self.scenario[scen].data['mod_MFG_eff'] = 100.0\n \n for mat in self.scenario[scen].material:\n self.scenario[scen].material[mat].materialdata['mat_MFG_eff'] = 100.0 \n self.scenario[scen].material[mat].materialdata['mat_MFG_scrap_Recycled'] = 0.0 \n \n return\n\n\n def check_Years_dataandMaterials(self, scenarios=None, materials=None):\n '''\n '''\n print (\"Not Done\")\n\n def trim_Years( self, startYear=None, endYear=None, aggregateInstalls=False, \n averageEfficiency=False, averageMaterialData = False, methodAddedYears='repeat', \n scenarios=None, materials=None):\n '''\n \n methodStart : str\n 'trim' or 'aggregate'. Trim cuts the values before the year specified.\n Aggregate sums the values (if any) up to the year specified and sets it\n in that year. No backfilling of data enabled at the moment.\n methodEnd : str\n 'repeat' or 'zeroes' only options at the moment. \n 'repeat' Increases to the endYear by repeating the last value. \n zeroes places zeroes.\n \n '''\n\n if scenarios is None:\n scenarios = list(self.scenario.keys())\n else:\n if isinstance(scenarios, str):\n scenarios = [scenarios]\n\n scen0 = scenarios[0]\n dataStartYear = int(self.scenario[scen0].data.iloc[0]['year'])\n dataEndYear = int(self.scenario[scen0].data.iloc[-1]['year'])\n\n if startYear is None:\n startYear = dataStartYear\n print(\"startYear not provided. Setting to start year of Module data\", startYear)\n\n if endYear is None:\n endYear = dataEndYear\n print(\"endYear not provided. Setting to end year of Module data\", endYear)\n\n startYear = startYear\n endYear = endYear\n\n\n for scen in scenarios:\n baseline = self.scenario[scen].data\n \n if int(startYear) < int(dataStartYear):\n print(\"ADD YEARS HERE. not done yet\")\n\n if int(endYear) > int(dataEndYear):\n print(\"ADD YEARS HERE. not done yet\")\n\n # Add check if data does not need to be reduced to not do these.\n reduced = baseline.loc[(baseline['year']>=startYear) & (baseline['year']<=endYear)].copy()\n\n if aggregateInstalls:\n prev = baseline.loc[(baseline['year']<startYear)].sum()\n reduced.loc[reduced['year'] == startYear, 'new_Installed_Capacity_[MW]'] = prev['new_Installed_Capacity_[MW]']\n \n if averageEfficiency:\n prev = baseline.loc[(baseline['year']<startYear)].mean()\n reduced.loc[reduced['year'] == startYear, 'mod_eff\t'] = prev['mod_eff\t']\n \n reduced.reset_index(drop=True, inplace=True)\n self.scenario[scen].data = reduced #reassign the material data to the simulation\n\n for mat in self.scenario[scen].material:\n if int(startYear) < int(dataStartYear):\n print(\"ADD YEARS HERE. not done yet\")\n \n if int(endYear) > int(dataEndYear):\n print(\"ADD YEARS HERE. not done yet\")\n \n matdf = self.scenario[scen].material[mat].materialdata #pull out the df\n reduced = matdf.loc[(matdf['year']>=startYear) & (matdf['year']<=endYear)].copy()\n \n if averageMaterialData == 'average':\n prev = matdf.loc[(baseline['year']<startYear)].mean()\n matkeys = list(reduced.keys())[1:12]\n for matkey in matkeys: # skipping year (0). Skipping added columsn from mass flow\n reduced.loc[reduced['year'] == startYear, matkey] = prev[matkey]\n \n reduced.reset_index(drop=True, inplace=True)\n self.scenario[scen].material[mat].materialdata = reduced #reassign the material data to the simulation\n \n\n def scenMod_IRENIFY(self, scenarios=None, ELorRL='RL'):\n \n if ELorRL == 'RL':\n weibullInputParams = {'alpha': 5.3759, 'beta': 30} # Regular-loss scenario IRENA\n print(\"Using Irena Regular Loss Assumptions\")\n if ELorRL == 'EL':\n weibullInputParams = {'alpha': 2.4928, 'beta': 30} # Regular-loss scenario IRENA\n print(\"Using Irena Early Loss Assumptions\")\n \n if scenarios is None:\n scenarios = list(self.scenario.keys())\n else:\n if isinstance(scenarios, str):\n scenarios = [scenarios]\n\n for scen in scenarios:\n self.scenario[scen].data['weibull_alpha'] = weibullInputParams['alpha']\n self.scenario[scen].data['weibull_beta'] = weibullInputParams['beta']\n self.scenario[scen].data['mod_lifetime'] = 40.0\n self.scenario[scen].data['mod_MFG_eff'] = 100.0\n \n for mat in self.scenario[scen].material:\n self.scenario[scen].material[mat].materialdata['mat_MFG_eff'] = 100.0 \n self.scenario[scen].material[mat].materialdata['mat_MFG_scrap_Recycled'] = 0.0 \n \n return\n\n\n\n def scenMod_PerfectManufacturing(self, scenarios=None):\n \n if scenarios is None:\n scenarios = list(self.scenario.keys())\n else:\n if isinstance(scenarios, str):\n scenarios = [scenarios]\n\n for scen in scenarios:\n self.scenario[scen].data['mod_MFG_eff'] = 100.0\n \n for mat in self.scenario[scen].material:\n self.scenario[scen].material[mat].materialdata['mat_virgin_eff'] = 100.0 \n self.scenario[scen].material[mat].materialdata['mat_MFG_eff'] = 100.0 \n return\n\n def scenMod_noCircularity(self, scenarios=None):\n \n if scenarios is None:\n scenarios = list(self.scenario.keys())\n else:\n if isinstance(scenarios, str):\n scenarios = [scenarios]\n\n for scen in scenarios:\n self.scenario[scen].data['mod_EOL_collection_eff '] = 0.0\n self.scenario[scen].data['mod_EOL_collected_recycled'] = 0.0\n self.scenario[scen].data['mod_Repair'] = 0.0\n self.scenario[scen].data['mod_MerchantTail'] = 0.0\n self.scenario[scen].data['mod_Reuse'] = 0.0\n\n for mat in self.scenario[scen].material:\n self.scenario[scen].material[mat].materialdata['mat_MFG_scrap_Recycled'] = 0.0 \n self.scenario[scen].material[mat].materialdata['mat_MFG_scrap_Recycling_eff'] = 0.0 \n self.scenario[scen].material[mat].materialdata['mat_MFG_scrap_Recycled_into_HQ'] = 0.0 \n self.scenario[scen].material[mat].materialdata['mat_MFG_scrap_Recycled_into_HQ_Reused4MFG'] = 0.0 \n\n self.scenario[scen].material[mat].materialdata['mat_EOL_collected_Recycled'] = 0.0 \n self.scenario[scen].material[mat].materialdata['mat_EOL_Recycling_eff'] = 0.0 \n self.scenario[scen].material[mat].materialdata['mat_EOL_Recycled_into_HQ'] = 0.0 \n self.scenario[scen].material[mat].materialdata['mat_EOL_RecycledHQ_Reused4MFG'] = 0.0 \n\n\n return \n\n def aggregateResults(self, scenarios=None, materials=None):\n\n if scenarios is None:\n scenarios = list(self.scenario.keys())\n else:\n if isinstance(scenarios, str):\n scenarios = [scenarios]\n\n if materials is None:\n materials = list(self.scenario[scenarios[0]].material.keys())\n else:\n if isinstance(materials, str):\n materials = [materials]\n\n keywds = ['mat_Virgin_Stock', 'mat_Total_Landfilled', 'mat_Total_EOL_Landfilled', 'mat_Total_MFG_Landfilled']\n nice_keywds = ['VirginStock', 'WasteAll', 'WasteEOL', 'WasteMFG']\n\n USyearly=pd.DataFrame()\n\n for scen in scenarios:\n for ii in range(len(keywds)):\n keywd = keywds[ii]\n nicekey = nice_keywds[ii]\n\n for mat in materials:\n USyearly[nicekey+'_'+mat+'_'+self.name+'_'+scen] = self.scenario[scen].material[mat].materialdata[keywd]\n filter_col = [col for col in USyearly if (col.startswith(nicekey) and col.endswith(self.name+'_'+scen)) ]\n USyearly[nicekey+'_Module_'+self.name+'_'+scen] = USyearly[filter_col].sum(axis=1)\n # 2DO: Add multiple objects option\n\n \n USyearly = USyearly/1000000 # This is the ratio for grams to Metric tonnes\n USyearly = USyearly.add_suffix('_[Tonnes]')\n \n # Different units, so no need to do the ratio to Metric tonnes :p\n keywd1='new_Installed_Capacity_[MW]'\n \n for scen in scenarios:\n USyearly['newInstalledCapacity_'+self.name+'_'+scen+'_[MW]'] = self.scenario[scen].data[keywd1]\n \n # Creating c umulative results\n UScum = USyearly.copy()\n UScum = UScum.cumsum()\n \n # Adding Installed Capacity to US (This is already 'Cumulative') so not including it in UScum\n # We are also renaming it to 'ActiveCapacity' and calculating Decommisioned Capacity. \n # TODO: Rename Installed_CApacity to ActiveCapacity throughout.\n keywd='Installed_Capacity_[W]' \n for scen in scenarios:\n USyearly['ActiveCapacity_'+self.name+'_'+scen+'_[MW]'] = self.scenario[scen].data[keywd]/1e6\n USyearly['DecommisionedCapacity_'+self.name+'_'+scen+'_[MW]'] = (\n UScum['newInstalledCapacity_'+self.name+'_'+scen+'_[MW]']-\n USyearly['ActiveCapacity_'+self.name+'_'+scen+'_[MW]'])\n\n # Adding Decommissioned Capacity\n\n # Reindexing and Merging\n USyearly.index = self.scenario[scen].data['year']\n UScum.index = self.scenario[scen].data['year']\n \n self.USyearly = USyearly\n self.UScum = UScum\n \n return USyearly, UScum\n \n def plotScenariosComparison(self, keyword=None, scenarios=None):\n\n if scenarios is None:\n scenarios = list(self.scenario.keys())\n else:\n if isinstance(scenarios, str):\n scenarios = [scenarios]\n \n if keyword is None:\n scens = list(self.scenario.keys())[0]\n print(\"Choose one of the keywords: \", list(self.scenario[scens].data.keys())) \n return\n \n yunits = _unitReferences(keyword)\n \n plt.figure()\n \n for scen in scenarios:\n plt.plot(self.scenario[scen].data['year'],self.scenario[scen].data[keyword], label=scen)\n plt.legend()\n plt.xlabel('Year')\n plt.title(keyword.replace('_', \" \"))\n plt.ylabel(yunits) \n\n\n def plotMetricResults(self):\n from plotly.subplots import make_subplots\n # import plotly.graph_objects as go\n\n \n y1 = self.plotMaterialResults(keyword='VirginStock', yearlyorcumulative='yearly') \n y2 = self.plotMaterialResults(keyword='WasteAll', yearlyorcumulative='yearly')\n y3 = self.plotMaterialResults(keyword='WasteEOL', yearlyorcumulative='yearly')\n y4 = self.plotMaterialResults(keyword='WasteMFG', yearlyorcumulative='yearly')\n c1 = self.plotMaterialResults(keyword='VirginStock', yearlyorcumulative='cumulative')\n c2 = self.plotMaterialResults(keyword='WasteAll', yearlyorcumulative='cumulative')\n c3 = self.plotMaterialResults(keyword='WasteEOL', yearlyorcumulative='cumulative')\n c4 = self.plotMaterialResults(keyword='WasteMFG', yearlyorcumulative='cumulative')\n ic = self.plotInstalledCapacityResults()\n \n def plotMaterialResults(self, keyword, yearlyorcumulative='yearly', cumplot=False):\n import plotly.express as px\n import re\n \n if yearlyorcumulative == 'yearly':\n data = self.USyearly\n else:\n data = self.UScum\n\n if keyword is None:\n print(\"keyword options are :\" 'VirginStock', 'WasteALL', 'WasteEOL', 'WasteMFG')\n return\n #TODO: add a split to first bracket and print unique values option and return.\n \n filter_col = [col for col in data if col.startswith(keyword)]\n \n # Getting Title, Y-Axis Labels, and Legend Readable\n titlekeyword = str.capitalize(yearlyorcumulative) + re.sub( r\"([A-Z])\", r\" \\1\", keyword)\n units = filter_col[0].split('_')[-1]\n \n mylegend = [col.split('_')[1:] for col in filter_col]\n mylegend = [col[:-1] for col in mylegend]\n mylegend = [' '.join(col) for col in mylegend]\n mylegend = [str.capitalize(col) for col in mylegend]\n\n fig = px.line(data[filter_col], template=\"plotly_white\")\n \n fig.update_layout(\n title=titlekeyword,\n xaxis_title=\"Year\", \n yaxis_title=units\n )\n \n for idx, name in enumerate(mylegend):\n fig.data[idx].name = name\n fig.data[idx].hovertemplate = name\n \n if cumplot:\n return fig\n else:\n fig.show() \n return\n \n def plotInstalledCapacityResults(self, cumplot=False):\n # TODO: Add scenarios input to subselect which ones to plot.\n\n import plotly.express as px\n \n datay = self.USyearly\n datac = self.UScum\n \n filter_colc = [col for col in datac if col.startswith('newInstalledCapacity')]\n filter_coly = [col for col in datay if col.startswith('Capacity')]\n\n datay = datay[filter_coly].copy()\n mylegend = [col.split('_')[1:] for col in datay]\n mylegend = [col[:-1] for col in mylegend]\n mylegend = [str(col)[2:-2] for col in mylegend]\n mylegendy = ['Cumulative New Installs, '+col for col in mylegend]\n\n print(mylegend)\n \n datac = datac[filter_colc].copy()\n mylegend = [col.split('_')[1:] for col in datac]\n mylegend = [col[:-1] for col in mylegend]\n mylegend = [str(col)[2:-2] for col in mylegend]\n mylegendc = ['Capacity, '+col for col in mylegend]\n\n data = datay.join(datac)\n mylegend = mylegendy + mylegendc\n \n titlekeyword = 'Installed Capacity and Cumulative new Installs'\n\n \n # Getting Title, Y-Axis Labels, and Legend Readable\n units = filter_colc[0].split('_')[-1]\n \n\n \n fig = px.line(data, template=\"plotly_white\")\n \n fig.update_layout(\n title=titlekeyword,\n xaxis_title=\"Year\", \n yaxis_title=units\n )\n \n for idx, name in enumerate(mylegend):\n fig.data[idx].name = name\n fig.data[idx].hovertemplate = name\n \n if cumplot:\n return fig\n else:\n fig.show() \n return\n \n\n def plotMaterialComparisonAcrossScenarios(self, keyword=None, scenarios=None, material = None):\n\n if scenarios is None:\n scenarios = list(self.scenario.keys())\n else:\n if isinstance(scenarios, str):\n scenarios = [scenarios]\n \n if keyword is None:\n scens = list(self.scenario.keys())[0]\n mats = list(self.scenario[scens].material.keys())[0]\n print(\"Choose one of the keywords: \", list(self.scenario[scens].material[mats].materialdata.keys())) \n return\n\n\n if material is None:\n scens = list(self.scenario.keys())[0]\n mats = list(self.scenario[scens].material.keys())\n print(\"Choose one of the Materials: \", mats) \n return\n else:\n if isinstance(material, str) is False: \n mats = list(self.scenario[scens].material.keys())\n print(\"Can only pass one material name (str). Choose one of the Materials: \", mats) \n return\n\n yunits = _unitReferences(keyword)\n\n plt.figure()\n \n for scen in scenarios:\n plt.plot(self.scenario[scen].data['year'], self.scenario[scen].material[material].materialdata[keyword], label=scen)\n plt.legend()\n \n plt.xlabel('Year')\n plt.title((material + ' ' + keyword.replace('_', \" \")))\n plt.ylabel(yunits) \n \n \nclass Scenario(Simulation):\n \n def __init__(self, name, file=None):\n self.name = name\n self.material = {}\n \n if file is None:\n try:\n file = _interactive_load('Select module baseline file')\n except:\n raise Exception('Interactive load failed. Tkinter not supported'+\n 'on this system. Try installing X-Quartz and reloading')\n \n csvdata = open(str(file), 'r', encoding=\"UTF-8\")\n csvdata = open(str(file), 'r', encoding=\"UTF-8-sig\")\n firstline = csvdata.readline()\n secondline = csvdata.readline()\n\n head = firstline.rstrip('\\n').split(\",\")\n meta = dict(zip(head, secondline.rstrip('\\n').split(\",\")))\n\n data = pd.read_csv(csvdata, names=head)\n data.loc[:, data.columns != 'year'] = data.loc[:, data.columns != 'year'].astype(float)\n self.baselinefile = file\n self.metdata = meta,\n self.data = data\n \n def addMaterial(self, materialname, file=None):\n self.material[materialname] = Material(materialname, file)\n\n def addMaterials(self, materials, baselinefolder=None, nameformat=None):\n \n if baselinefolder is None:\n baselinefolder = r'..\\..\\baselines' \n\n if nameformat is None:\n nameformat = r'\\baseline_material_{}.csv'\n for mat in materials:\n filemat = baselinefolder + nameformat.format(mat)\n self.material[mat] = Material(mat, filemat)\n \n \n def modifyMaterials(self, materials, stage, value, start_year=None):\n \n if start_year is None:\n start_year = int(datetime.datetime.now().year)\n \n if materials is None:\n materials = list(self.material.keys())\n else:\n if isinstance(materials, str):\n materials = [materials]\n\n selectyears = self.data['year']>start_year\n \n for mat in materials:\n self.material[mat].materialdata.loc[selectyears, stage] = value\n\n\n def __getitem__(self, key):\n return getattr(self, key)\n\n def __setitem__(self, key):\n return setattr(self, key)\n\nclass Material:\n def __init__(self, materialname, file):\n self.materialname = materialname\n \n if file is None:\n try:\n file = _interactive_load('Select material baseline file')\n except:\n raise Exception('Interactive load failed. Tkinter not supported'+\n 'on this system. Try installing X-Quartz and reloading')\n \n csvdata = open(str(file), 'r', encoding=\"UTF-8\")\n csvdata = open(str(file), 'r', encoding=\"UTF-8-sig\")\n firstline = csvdata.readline()\n secondline = csvdata.readline()\n\n head = firstline.rstrip('\\n').split(\",\")\n meta = dict(zip(head, secondline.rstrip('\\n').split(\",\")))\n\n data = pd.read_csv(csvdata, names=head)\n data.loc[:, data.columns != 'year'] = data.loc[:, data.columns != 'year'].astype(float)\n self.materialfile = file\n self.materialmetdata = meta\n self.materialdata = data\n\n\ndef weibull_params(keypoints):\n r'''Returns shape parameter `alpha` and scale parameter `beta`\n for a Weibull distribution whose CDF passes through the\n two time: value pairs in `keypoints`\n\n Parameters\n ----------\n keypoints : list\n Two lists of t50 and 590 values, where t50 is the year since deployment\n that the cohort has lost 50% of originally installed modules, and t90 \n is the year since deployment that the cohort has lost 90% of the originally\n installed modules. These values are used to calcualte the shape and scale \n parameters for the weibull distribution.\n \n Returns\n -------\n alpha : float\n Shape parameter `alpha` for weibull distribution.\n beta : float\n Scale parameter `beta` for weibull distribution. Often exchanged with ``lifetime``\n like in Irena 2016, beta = 30.\n \n '''\n \n t1, t2 = tuple(keypoints.keys())\n cdf1, cdf2 = tuple(keypoints.values())\n alpha = np.ndarray.item(np.real_if_close(\n (np.log(np.log(1 - cdf1)+0j) - np.log(np.log(1 - cdf2)+0j))/(np.log(t1) - np.log(t2))\n ))\n beta = np.abs(np.exp(\n (\n np.log(t2)*((0+1j)*np.pi + np.log(np.log(1 - cdf1)+0j))\n + np.log(t1)*(((0-1j))*np.pi - np.log(np.log(1 - cdf2)+0j))\n )/(\n np.log(np.log(1 - cdf1)+0j) - np.log(np.log(1 - cdf2)+0j)\n )\n ))\n return {'alpha': alpha, 'beta': beta}\n\ndef weibull_cdf(alpha, beta):\n '''Return the CDF for a Weibull distribution having:\n shape parameter `alpha`\n scale parameter `beta`\n \n Parameters\n ----------\n alpha : float\n Shape parameter `alpha` for weibull distribution.\n beta : float\n Scale parameter `beta` for weibull distribution. Often exchanged with ``lifetime``\n like in Irena 2016, beta = 30.\n \n '''\n \n def cdf(x):\n return 1 - np.exp(-(np.array(x)/beta)**alpha)\n return cdf\n\ndef weibull_pdf(alpha, beta):\n r'''Return the PDF for a Weibull distribution having:\n shape parameter `alpha`\n scale parameter `beta`\n \n Parameters\n ----------\n alpha : float\n Shape parameter `alpha` for weibull distribution.\n beta : float\n Scale parameter `beta` for weibull distribution. Often exchanged with ``lifetime``\n like in Irena 2016, beta = 30.\n \n '''\n \n def pdf(x):\n return (alpha/np.array(x)) * ((np.array(x)/beta)**alpha) * (np.exp(-(np.array(x)/beta)**alpha))\n \n return pdf\n\ndef weibull_pdf_vis(alpha, beta, xlim=56):\n r''' Returns the CDF for a weibull distribution of 1 generation\n so it can be plotted.\n \n Parameters\n ----------\n alpha : float\n Shape parameter `alpha` for weibull distribution.\n beta : float\n Scale parameter `beta` for weibull distribution. Often exchanged with ``lifetime``\n like in Irena 2016, beta = 30.\n xlim : int\n Number of years to calculate the distribution for. i.e. x-axis limit. \n\n Returns\n -------\n idf : list\n List of weibull cumulative distribution values for year 0 until xlim.\n\n '''\n\n dfindex = pd.RangeIndex(0,xlim,1)\n x = np.clip(dfindex - 0, 0, np.inf)\n\n if alpha and beta:\n i = weibull_pdf(alpha, beta)\n \n idf = list(map(i, x))\n \n return idf\n\n\ndef weibull_cdf_vis(alpha, beta, xlim=56):\n r''' Returns the CDF for a weibull distribution of 1 generation\n so it can be plotted.\n \n Parameters\n ----------\n alpha : float\n Shape parameter `alpha` for weibull distribution.\n beta : float\n Scale parameter `beta` for weibull distribution. Often exchanged with ``lifetime``\n like in Irena 2016, beta = 30.\n xlim : int\n Number of years to calculate the distribution for. i.e. x-axis limit. \n\n Returns\n -------\n idf : list\n List of weibull cumulative distribution values for year 0 until xlim.\n\n '''\n\n dfindex = pd.RangeIndex(0,xlim,1)\n x = np.clip(dfindex - 0, 0, np.inf)\n\n if alpha and beta:\n i = weibull_cdf(alpha, beta)\n \n idf = list(map(i, x))\n \n return idf\n\n \ndef sens_StageImprovement(df, stage, improvement=1.3, start_year=None):\n '''\n Modifies baseline scenario for evaluating sensitivity of lifetime parameter.\n t50 and t90 reliability years get incresed by `improvement` parameter\n starting the `year_increase` year specified. \n \n Parameters\n ----------\n df : dataframe\n dataframe to be modified\n stage : str\n Stage that wants to be modified. This can be any of the module or \n material specified values, for example:'MFG_Material_eff', \n 'mat_MFG_scrap_recycled', 'mat_MFG_scrap_Recycled', \n 'mat_MFG_scrap_Recycled_into_HQ', 'mat_MFG_scrap_Recycled_into_HQ_Reused4MFG'\n 'mod_EOL_collection_losses', 'mod_EOL_collected_recycled',\n 'mat_EOL_Recycling_eff', 'mat_EOL_Recycled_into_HQ', \n 'mat_EOL_RecycledHQ_Reused4MFG', 'mod_Repair',\n 'mod_MerchantTail', 'mod_Reuse', 'mod_eff', etc.\n improvement : decimal\n Percent increase in decimal (i.e. \"1.3\" for 30% increase in value) \n or percent decrease (i.e. \"0.3\") relative to values in df.\n start_year : \n the year at which the improvement occurs\n \n Returns\n --------\n df : dataframe\n dataframe of expected module lifetime increased or decreased at specified year\n '''\n\n\n if start_year is None:\n start_year = int(datetime.datetime.now().year)\n\n #df[df.index > 2000]['mod_reliability_t50'].apply(lambda x: x*1.3)\n df[stage] = df[stage].astype(float)\n df.loc[df.index > start_year, stage] = df[df.index > start_year][stage].apply(lambda x: x*improvement)\n \n return df\n\n\ndef sens_StageEfficiency(df, stage, target_eff = 95.0, start_year = None, \n goal_year = 2030, plotflag = False):\n '''\n Modifies baseline scenario for evaluating sensitivity to increasing a stage in the \n lifetime of the module's efficiency. It either increases or decreases from the \n start year until the goal year the value to the target efficiency by interpolation.\n \n Parameters\n ----------\n df : dataframe \n dataframe to be modified\n stage : str\n Stage that wants to be modified. This can be any of the module or \n material specified efficiencies, for example:'MFG_Material_eff', \n 'mat_MFG_scrap_recycled', 'mat_MFG_scrap_Recycled', \n 'mat_MFG_scrap_Recycled_into_HQ', 'mat_MFG_scrap_Recycled_into_HQ_Reused4MFG'\n 'mod_EOL_collection_losses', 'mod_EOL_collected_recycled',\n 'mat_EOL_Recycling_eff', 'mat_EOL_Recycled_into_HQ', \n 'mat_EOL_RecycledHQ_Reused4MFG', 'mod_Repair',\n 'mod_MerchantTail', 'mod_Reuse', 'mod_eff', etc.\n start_year: int\n Year to start modifying the value. This specifies the initial efficiency \n value that is going to be modified. If None is passed, current year is used.\n target_eff: flat\n target eff value in percentage to be reached. i.e. 95.0 %.\n goal_year : int\n year by which target efficiency will be reached. i.e. 2030. Must be higher than current year.\n \n Returns\n -------\n df : dataframe\n modified dataframe\n '''\n \n if start_year is None:\n start_year = int(datetime.datetime.now().year)\n \n if start_year > goal_year:\n print(\"Error. Goal Year is before start year\")\n return\n \n if 0 < abs(target_eff) < 1: # checking it is not 0.95 but 95% i.e.\n print(\"Warning: target_eff value is between 0 and 1; it has been\"\n \"multiplied by 100% assuming it was a percentage in decimal form.\")\n target_eff = target_eff*100\n \n if target_eff > 100 or target_eff < 0:\n print(\"Warning: target_eff is out of range. Input value between\"\n \"0 and 100\")\n return\n \n if stage in df.columns:\n df2 = df.copy()\n df2[stage]=df2[stage].astype(float)\n df2.loc[(df2.index < goal_year) & (df2.index > start_year), stage] = np.nan\n df2.loc[df2.index >= goal_year , stage] = target_eff\n df2[stage] = df2[stage].interpolate()\n \n if plotflag:\n plt.plot(df[stage], label='Original')\n plt.plot(df2[stage], label='Modified')\n plt.title('Updated values for '+stage)\n plt.legend()\n return df2\n else:\n print(\"Stage name incorrect.\")\n\n\n\n\n\n\ndef _modDict(originaldict, moddict):\n '''\n Compares keys in originaldict with moddict and updates values of \n originaldict to moddict if existing.\n \n Parameters\n ----------\n originaldict : dictionary\n Original dictionary calculated, for example frontscan or backscan dictionaries.\n moddict : dictionary\n Modified dictinoary, for example modscan['x'] = 0 to change position of x.\n \n Returns\n -------\n originaldict : dictionary\n Updated original dictionary with values from moddict.\n '''\n for key in moddict:\n try:\n originaldict[key] = moddict[key]\n except:\n print(\"Wrong key in modified dictionary\")\n \n return originaldict\n\n\ndef calculateLCA(PVarea, modified_impacts=None, printflag = False):\n '''\n\n\n '''\n \n if printflag:\n print(\"Doing calculations of LCA analysis for Silicon Photovoltaic Panels\")\n \n \n\n impacts = {'Acidification':{'UUID': '75d0c8a2-e466-3bd7-813b-5beef2209330',\n 'Result': 1.29374135667815,\n 'Unit': 'kg SO2' },\n 'Carcinogenics':{'UUID': 'a6e5e5d8-a1e5-3c77-8170-586c4fe37514',\n 'Result': 0.0000231966690476102,\n 'Unit': 'CTUh' },\n 'Ecotoxicity':{'UUID': '338e9370-ceb0-3d18-9d87-5f91feb7829c',\n 'Result': 5933.77859696668,\n 'Unit': 'CTUe' },\n 'Eutrophication':{'UUID': '45b8cd56-498a-3c6f-9488-134e951d8c02',\n 'Result': 1.34026194777363,\n 'Unit': 'kg N eq' },\n \n 'Fossil fuel depletion':{'UUID': '0e45786f-67fa-3b8a-b8a3-73a7c316434c',\n 'Result': 249.642261689385,\n 'Unit': 'MJ surplus' },\n \n 'Global warming':{'UUID': '31967441-d687-313d-9910-13da3a584ab7',\n 'Result': 268.548841324818,\n 'Unit': 'kg CO2 eq' },\n \n 'Non carcinogenics':{'UUID': 'd4827ae3-c873-3ea4-85fb-860b7f3f2dee',\n 'Result': 0.000135331806321799,\n 'Unit': 'CTUh' },\n \n 'Ozone depletion':{'UUID': '6c05dad1-6661-35f2-82aa-6e8e6a498aec',\n 'Result': 0.0000310937628622019,\n 'Unit': 'kg CFC-11 eq' },\n \n 'Respiratory effects':{'UUID': 'e0916d62-7fbd-3d0a-a4a5-52659b0ac9c1',\n 'Result': 0.373415542664206,\n 'Unit': 'kg PM2.5 eq' },\n 'Smog':{'UUID': '7a149078-e2fd-3e07-a5a3-79035c60e7c3',\n 'Result': 15.35483065, \n 'Unit': 'kg O3 eq' },\n }\n \n if modified_impacts is not None:\n impacts = _modDict(impacts, modified_impacts)\n if printflag:\n print(\"Following Modified impacts provided instead of TRACI 2.1 default\")\n print(impacts)\n print(\"\")\n else:\n if printflag:\n print(\"Following TRACI 2.1\")\n\n acidification = impacts['Acidification']['Result']*PVarea\n carcinogenics = impacts['Carcinogenics']['Result']*PVarea\n ecotoxicity = impacts['Ecotoxicity']['Result']*PVarea\n eutrophication = impacts['Eutrophication']['Result']*PVarea\n fossil_fuel_depletion = impacts['Fossil fuel depletion']['Result']*PVarea\n global_warming = impacts['Global warming']['Result']*PVarea\n non_carcinogenics = impacts['Non carcinogenics']['Result']*PVarea\n ozone_depletion = impacts['Ozone depletion']['Result']*PVarea\n respiratory_effects = impacts['Respiratory effects']['Result']*PVarea\n smog = impacts['Smog']['Result']*PVarea\n \n\n \n if printflag:\n print(\"RESULTS FOR PV AREA \", PVarea, \" m2 \")\n print(\"****************************************\")\n print('Acidification: ', round(impacts['Acidification']['Result']*PVarea, 2), ' ', impacts['Acidification']['Unit'])\n print('Carcinogenics: ', round(impacts['Carcinogenics']['Result']*PVarea, 2), ' ', impacts['Carcinogenics']['Unit'])\n print('Ecotoxicity: ', round(impacts['Ecotoxicity']['Result']*PVarea, 2), ' ', impacts['Ecotoxicity']['Unit'])\n print('Eutrophication: ', round(impacts['Eutrophication']['Result']*PVarea, 2), ' ', impacts['Eutrophication']['Unit'])\n print('Fossil fuel depletion: ', round(impacts['Fossil fuel depletion']['Result']*PVarea, 2), ' ', impacts['Fossil fuel depletion']['Unit'])\n print('Global warming: ', round(impacts['Global warming']['Result']*PVarea, 2), ' ', impacts['Global warming']['Unit'])\n print('Non carcinogenics: ', round(impacts['Non carcinogenics']['Result']*PVarea, 2), ' ', impacts['Non carcinogenics']['Unit'])\n print('Ozone depletion: ', round(impacts['Ozone depletion']['Result']*PVarea, 2), ' ', impacts['Ozone depletion']['Unit'])\n print('Respiratory effects: ', round(impacts['Respiratory effects']['Result']*PVarea, 2), ' ', impacts['Respiratory effects']['Unit'])\n print('Smog: ', round(impacts['Smog']['Result']*PVarea, 2), ' ', impacts['Smog']['Unit'])\n \n return (acidification, carcinogenics, ecotoxicity, eutrophication, \n fossil_fuel_depletion, global_warming,\n non_carcinogenics, ozone_depletion, respiratory_effects, smog)" ]
[ [ "matplotlib.pyplot.plot", "matplotlib.pyplot.legend", "pandas.read_csv", "matplotlib.pyplot.figure", "pandas.DataFrame", "numpy.cos", "matplotlib.pyplot.title", "numpy.clip", "pandas.RangeIndex", "matplotlib.pyplot.ylabel", "numpy.log", "numpy.isnan", "numpy.sqrt", "numpy.sin", "numpy.array", "matplotlib.pyplot.xlabel", "numpy.deg2rad" ] ]
keshaviyengar/rl-baselines-zoo
[ "6e39f5c7c6c2d30873297308ed064551bffaa52d" ]
[ "trajectory_generator.py" ]
[ "import rospy\nfrom geometry_msgs.msg import Pose, Point\nfrom std_msgs.msg import Bool\n\nimport numpy as np\nimport os\n# This script creates a square trajectory for a robot to follow.\n# Will output errors as well.\n\n\nclass CircleTrajectory(object):\n def __init__(self, x_offset, y_offset, z_height, radius, theta_step):\n self.trajectory_pub = rospy.Publisher(\"desired_goal\", Pose, queue_size=10)\n self.trajectory_finish_pub = rospy.Publisher(\"trajectory_finish\", Bool, queue_size=10)\n self._current_pose = Pose()\n\n # Create a timer to update the desired trajectory\n self.trajectory_timer = rospy.Timer(rospy.Duration(0.01), self._trajectory_callback)\n\n self.traj_finish = False\n # For now set initial current pose as 0\n self._desired_pose = Pose()\n self.x_offset = x_offset\n self.y_offset = y_offset\n self.radius = radius\n self.thetas = np.arange(0, 2 * np.pi, np.deg2rad(theta_step))\n self.thetas_counter = 0\n self._desired_pose.position.x = self.x_offset + self.radius * np.cos(self.thetas[self.thetas_counter])\n self._desired_pose.position.y = self.y_offset + self.radius * np.sin(self.thetas[self.thetas_counter])\n self._desired_pose.position.z = z_height\n self._desired_pose.orientation.x = 0\n self._desired_pose.orientation.y = 0\n self._desired_pose.orientation.z = 0\n self._desired_pose.orientation.w = 1\n\n self.speed = 1\n\n def _trajectory_callback(self, event):\n self.thetas_counter += 1\n if self.thetas_counter == self.thetas.size - 1:\n self.traj_finish = True\n print(\"Trajectory is complete.\")\n self.trajectory_finish_pub.publish(True)\n self.trajectory_timer.shutdown()\n\n if not self.traj_finish:\n self._desired_pose.position.x = self.x_offset + self.radius * np.cos(self.thetas[self.thetas_counter])\n self._desired_pose.position.y = self.y_offset + self.radius * np.sin(self.thetas[self.thetas_counter])\n # Publish new pose\n self.trajectory_pub.publish(self._desired_pose)\n\n\nclass TriangleTrajectory(object):\n def __init__(self, point_a, point_b, point_c, z_height):\n self.trajectory_pub = rospy.Publisher(\"desired_goal\", Pose, queue_size=10)\n self.trajectory_finish_pub = rospy.Publisher(\"trajectory_finish\", Bool, queue_size=10)\n self._current_pose = Pose()\n\n\n # Second timer for how long to move in axis before moving to next\n # self.change_direction_timer = rospy.Timer(rospy.Duration(5.0), self._change_direction)\n\n # Specify three points to reach to create the triangle\n self.points = np.array([point_a, point_b, point_c])\n\n self._turn_count = 0\n self.del_vector = [(self.points[1][0] - self.points[0][0]), (self.points[1][1] - self.points[0][1])]\n\n self._done_trajectory = False\n\n self._desired_pose = Pose()\n self._desired_pose.position.x = point_a[0]\n self._desired_pose.position.y = point_a[1]\n self._desired_pose.position.z = z_height\n self._desired_pose.orientation.x = 0\n self._desired_pose.orientation.y = 0\n self._desired_pose.orientation.z = 0\n self._desired_pose.orientation.w = 1\n\n # Publish initial point and sleep to initialize\n for _ in range(10):\n self.trajectory_pub.publish(self._desired_pose)\n rospy.sleep(0.1)\n\n self.prev_time = rospy.get_time()\n self.traj_finish = False\n\n # Create a timer to update the desired trajectory\n self.trajectory_timer = rospy.Timer(rospy.Duration(0.01), self._trajectory_callback)\n\n # This callback changes the direction by 90 degrees, to make the square.\n def _change_direction(self):\n if self._turn_count == 0:\n if np.linalg.norm(self.points[self._turn_count + 1] - np.array(\n [self._desired_pose.position.x, self._desired_pose.position.y])) < 0.5:\n self._turn_count += 1\n self.del_vector = [(self.points[1][0] - self.points[0][0]),\n (self.points[1][1] - self.points[0][1])]\n\n if self._turn_count == 1:\n if np.linalg.norm(self.points[self._turn_count + 1] - np.array(\n [self._desired_pose.position.x, self._desired_pose.position.y])) < 0.5:\n self._turn_count += 1\n self.del_vector = [(self.points[2][0] - self.points[1][0]),\n (self.points[2][1] - self.points[1][1])]\n if self._turn_count == 2:\n if np.linalg.norm(self.points[0] - np.array(\n [self._desired_pose.position.x, self._desired_pose.position.y])) < 0.5:\n self._turn_count += 1\n self.del_vector = [(self.points[0][0] - self.points[2][0]),\n (self.points[0][1] - self.points[2][1])]\n if self._turn_count == 3:\n print(\"Trajectory is complete.\")\n self.traj_finish = True\n self.trajectory_finish_pub.publish(True)\n self.trajectory_timer.shutdown()\n # self.change_direction_timer.shutdown()\n\n def _trajectory_callback(self, event):\n # Compute current difference in time from last callback\n if not self.traj_finish:\n current_time = rospy.get_time()\n delta_t = current_time - self.prev_time\n self.prev_time = current_time\n\n self._change_direction()\n\n self._desired_pose.position.x += self.del_vector[0] * delta_t\n self._desired_pose.position.y += self.del_vector[1] * delta_t\n self.trajectory_pub.publish(self._desired_pose)\n\n\nclass SquareTrajectory2(object):\n def __init__(self, point_a, point_b, point_c, point_d, z_height):\n self.trajectory_pub = rospy.Publisher(\"desired_goal\", Pose, queue_size=10)\n self.trajectory_finish_pub = rospy.Publisher(\"trajectory_finish\", Bool, queue_size=10)\n self._current_pose = Pose()\n\n self.points = [point_a, point_b, point_c, point_d]\n\n self._turn_count = 0\n self.del_vector = [(self.points[1][0] - self.points[0][0]), (self.points[1][1] - self.points[0][1])]\n\n # For now set initial current pose as 0\n self._desired_pose = Pose()\n self._desired_pose.position.x = point_a[0]\n self._desired_pose.position.y = point_a[1]\n self._desired_pose.position.z = z_height\n self._desired_pose.orientation.x = 0\n self._desired_pose.orientation.y = 0\n self._desired_pose.orientation.z = 0\n self._desired_pose.orientation.w = 1\n\n # Publish initial point and sleep to initialize\n for _ in range(10):\n self.trajectory_pub.publish(self._desired_pose)\n rospy.sleep(0.1)\n\n self.prev_time = rospy.get_time()\n self.traj_finish = False\n\n # Create a timer to update the desired trajectory\n self.trajectory_timer = rospy.Timer(rospy.Duration(0.01), self._trajectory_callback)\n\n # This callback changes the direction by 90 degrees, to make the square.\n def _change_direction(self):\n if self._turn_count == 0:\n if np.linalg.norm(self.points[self._turn_count + 1] - np.array(\n [self._desired_pose.position.x, self._desired_pose.position.y])) < 0.5:\n self._turn_count += 1\n self.del_vector = [(self.points[1][0] - self.points[0][0]),\n (self.points[1][1] - self.points[0][1])]\n\n if self._turn_count == 1:\n if np.linalg.norm(self.points[self._turn_count + 1] - np.array(\n [self._desired_pose.position.x, self._desired_pose.position.y])) < 0.5:\n self._turn_count += 1\n self.del_vector = [(self.points[2][0] - self.points[1][0]),\n (self.points[2][1] - self.points[1][1])]\n if self._turn_count == 2:\n if np.linalg.norm(self.points[self._turn_count + 1] - np.array(\n [self._desired_pose.position.x, self._desired_pose.position.y])) < 0.5:\n self._turn_count += 1\n self.del_vector = [(self.points[3][0] - self.points[2][0]),\n (self.points[3][1] - self.points[2][1])]\n if self._turn_count == 3:\n if np.linalg.norm(self.points[0] - np.array(\n [self._desired_pose.position.x, self._desired_pose.position.y])) < 0.5:\n self._turn_count += 1\n self.del_vector = [(self.points[0][0] - self.points[3][0]),\n (self.points[0][1] - self.points[3][1])]\n if self._turn_count == 4:\n print(\"Trajectory is complete.\")\n self.traj_finish = True\n self.trajectory_finish_pub.publish(True)\n self.trajectory_timer.shutdown()\n\n def _trajectory_callback(self, event):\n # Compute current difference in time from last callback\n if not self.traj_finish:\n current_time = rospy.get_time()\n delta_t = current_time - self.prev_time\n self.prev_time = current_time\n\n self._change_direction()\n\n self._desired_pose.position.x += self.del_vector[0] * delta_t\n self._desired_pose.position.y += self.del_vector[1] * delta_t\n self.trajectory_pub.publish(self._desired_pose)\n\n\nif __name__ == '__main__':\n rospy.init_node(\"trajectory_generator\")\n experiments = [7]\n\n for exp in experiments:\n x_offset = 5\n y_offset = 5\n if exp in [1, 2, 3, 4, 5]:\n z_height = 100\n elif exp in [6, 7, 8, 9, 10]:\n z_height = 100\n else:\n z_height = 125\n\n radius = 2.0\n theta_step = 0.5\n print(\"Circle trajectory\")\n circle_trajectory = CircleTrajectory(x_offset, y_offset, z_height, radius, theta_step)\n while not circle_trajectory.traj_finish:\n if circle_trajectory.traj_finish:\n break\n\n # point_a = [20, 20]\n # point_b = [20, 30]\n # point_c = [30, 20]\n # point_a = [-5, 0]\n # point_b = [-10, -5]\n # point_c = [5, 0]\n # if exp in [1, 2, 3, 4, 5]:\n # z_height = 100\n # elif exp in [6, 7, 8, 9, 10]:\n # z_height = 125\n # else:\n # z_height = 125\n # print(\"Triangle trajectory\")\n # triangle_trajectory = TriangleTrajectory(point_a, point_b, point_c, z_height)\n # while not triangle_trajectory.traj_finish:\n # pass\n\n # point_a = [5, 0]\n # point_b = [-5, 0]\n # point_c = [-5, -5]\n # point_d = [5, -5]\n # if exp in [1, 2, 3, 4, 5]:\n # z_height = 100\n # elif exp in [6, 7, 8, 9, 10]:\n # z_height = 125\n # else:\n # z_height = 125\n # print(\"Square trajectory\")\n # square_trajectory = SquareTrajectory2(point_a, point_b, point_c, point_d, z_height)\n # while not square_trajectory.traj_finish:\n # pass\n\n" ]
[ [ "numpy.array", "numpy.sin", "numpy.cos", "numpy.deg2rad" ] ]
truatpasteurdotfr/napari
[ "48cdf4d1c4bcf6f76603e90b1c0c7498e2aba6c0" ]
[ "napari/layers/surface/surface.py" ]
[ "import warnings\n\nimport numpy as np\n\nfrom ...utils.colormaps import AVAILABLE_COLORMAPS\nfrom ...utils.events import Event\nfrom ...utils.translations import trans\nfrom ..base import Layer\nfrom ..intensity_mixin import IntensityVisualizationMixin\nfrom ..utils.layer_utils import calc_data_range\nfrom ._surface_constants import Shading\nfrom .normals import SurfaceNormals\nfrom .wireframe import SurfaceWireframe\n\n\n# Mixin must come before Layer\nclass Surface(IntensityVisualizationMixin, Layer):\n \"\"\"\n Surface layer renders meshes onto the canvas.\n\n Parameters\n ----------\n data : 2-tuple or 3-tuple of array\n The first element of the tuple is an (N, D) array of vertices of\n mesh triangles. The second is an (M, 3) array of int of indices\n of the mesh triangles. The optional third element is the\n (K0, ..., KL, N) array of values used to color vertices where the\n additional L dimensions are used to color the same mesh with\n different values. If not provided, it defaults to ones.\n colormap : str, napari.utils.Colormap, tuple, dict\n Colormap to use for luminance images. If a string must be the name\n of a supported colormap from vispy or matplotlib. If a tuple the\n first value must be a string to assign as a name to a colormap and\n the second item must be a Colormap. If a dict the key must be a\n string to assign as a name to a colormap and the value must be a\n Colormap.\n contrast_limits : list (2,)\n Color limits to be used for determining the colormap bounds for\n luminance images. If not passed is calculated as the min and max of\n the image.\n gamma : float\n Gamma correction for determining colormap linearity. Defaults to 1.\n name : str\n Name of the layer.\n metadata : dict\n Layer metadata.\n scale : tuple of float\n Scale factors for the layer.\n translate : tuple of float\n Translation values for the layer.\n rotate : float, 3-tuple of float, or n-D array.\n If a float convert into a 2D rotation matrix using that value as an\n angle. If 3-tuple convert into a 3D rotation matrix, using a yaw,\n pitch, roll convention. Otherwise assume an nD rotation. Angles are\n assumed to be in degrees. They can be converted from radians with\n np.degrees if needed.\n shear : 1-D array or n-D array\n Either a vector of upper triangular values, or an nD shear matrix with\n ones along the main diagonal.\n affine : n-D array or napari.utils.transforms.Affine\n (N+1, N+1) affine transformation matrix in homogeneous coordinates.\n The first (N, N) entries correspond to a linear transform and\n the final column is a length N translation vector and a 1 or a napari\n `Affine` transform object. Applied as an extra transform on top of the\n provided scale, rotate, and shear values.\n opacity : float\n Opacity of the layer visual, between 0.0 and 1.0.\n blending : str\n One of a list of preset blending modes that determines how RGB and\n alpha values of the layer visual get mixed. Allowed values are\n {'opaque', 'translucent', and 'additive'}.\n shading : str, Shading\n One of a list of preset shading modes that determine the lighting model\n using when rendering the surface in 3D.\n\n * ``Shading.NONE``\n Corresponds to ``shading='none'``.\n * ``Shading.FLAT``\n Corresponds to ``shading='flat'``.\n * ``Shading.SMOOTH``\n Corresponds to ``shading='smooth'``.\n visible : bool\n Whether the layer visual is currently being displayed.\n cache : bool\n Whether slices of out-of-core datasets should be cached upon retrieval.\n Currently, this only applies to dask arrays.\n wireframe : dict or SurfaceWireframe\n Whether and how to display the edges of the surface mesh with a wireframe.\n normals : dict or SurfaceNormals\n Whether and how to display the face and vertex normals of the surface mesh.\n\n Attributes\n ----------\n data : 3-tuple of array\n The first element of the tuple is an (N, D) array of vertices of\n mesh triangles. The second is an (M, 3) array of int of indices\n of the mesh triangles. The third element is the (K0, ..., KL, N)\n array of values used to color vertices where the additional L\n dimensions are used to color the same mesh with different values.\n vertices : (N, D) array\n Vertices of mesh triangles.\n faces : (M, 3) array of int\n Indices of mesh triangles.\n vertex_values : (K0, ..., KL, N) array\n Values used to color vertices.\n colormap : str, napari.utils.Colormap, tuple, dict\n Colormap to use for luminance images. If a string must be the name\n of a supported colormap from vispy or matplotlib. If a tuple the\n first value must be a string to assign as a name to a colormap and\n the second item must be a Colormap. If a dict the key must be a\n string to assign as a name to a colormap and the value must be a\n Colormap.\n contrast_limits : list (2,)\n Color limits to be used for determining the colormap bounds for\n luminance images. If not passed is calculated as the min and max of\n the image.\n shading: str\n One of a list of preset shading modes that determine the lighting model\n using when rendering the surface.\n\n * ``'none'``\n * ``'flat'``\n * ``'smooth'``\n gamma : float\n Gamma correction for determining colormap linearity.\n wireframe : SurfaceWireframe\n Whether and how to display the edges of the surface mesh with a wireframe.\n normals : SurfaceNormals\n Whether and how to display the face and vertex normals of the surface mesh.\n\n\n Notes\n -----\n _data_view : (M, 2) or (M, 3) array\n The coordinates of the vertices given the viewed dimensions.\n _view_faces : (P, 3) array\n The integer indices of the vertices that form the triangles\n in the currently viewed slice.\n _colorbar : array\n Colorbar for current colormap.\n \"\"\"\n\n _colormaps = AVAILABLE_COLORMAPS\n\n def __init__(\n self,\n data,\n *,\n colormap='gray',\n contrast_limits=None,\n gamma=1,\n name=None,\n metadata=None,\n scale=None,\n translate=None,\n rotate=None,\n shear=None,\n affine=None,\n opacity=1,\n blending='translucent',\n shading='flat',\n visible=True,\n cache=True,\n experimental_clipping_planes=None,\n wireframe=None,\n normals=None,\n ):\n\n ndim = data[0].shape[1]\n\n super().__init__(\n data,\n ndim,\n name=name,\n metadata=metadata,\n scale=scale,\n translate=translate,\n rotate=rotate,\n shear=shear,\n affine=affine,\n opacity=opacity,\n blending=blending,\n visible=visible,\n cache=cache,\n experimental_clipping_planes=experimental_clipping_planes,\n )\n\n self.events.add(\n interpolation=Event,\n rendering=Event,\n shading=Event,\n )\n\n # assign mesh data and establish default behavior\n if len(data) not in (2, 3):\n raise ValueError(\n trans._(\n 'Surface data tuple must be 2 or 3, specifying verictes, faces, and optionally vertex values, instead got length {length}.',\n deferred=True,\n length=len(data),\n )\n )\n self._vertices = data[0]\n self._faces = data[1]\n if len(data) == 3:\n self._vertex_values = data[2]\n else:\n self._vertex_values = np.ones(len(self._vertices))\n\n # Set contrast_limits and colormaps\n self._gamma = gamma\n if contrast_limits is None:\n self._contrast_limits_range = calc_data_range(self._vertex_values)\n else:\n self._contrast_limits_range = contrast_limits\n self._contrast_limits = tuple(self._contrast_limits_range)\n self.colormap = colormap\n self.contrast_limits = self._contrast_limits\n\n # Data containing vectors in the currently viewed slice\n self._data_view = np.zeros((0, self._ndisplay))\n self._view_faces = np.zeros((0, 3))\n self._view_vertex_values = []\n\n # Trigger generation of view slice and thumbnail\n self._update_dims()\n\n # Shading mode\n self._shading = shading\n\n self.wireframe = wireframe or SurfaceWireframe()\n self.normals = normals or SurfaceNormals()\n\n def _calc_data_range(self, mode='data'):\n return calc_data_range(self.vertex_values)\n\n @property\n def dtype(self):\n return self.vertex_values.dtype\n\n @property\n def data(self):\n return (self.vertices, self.faces, self.vertex_values)\n\n @data.setter\n def data(self, data):\n if len(data) not in (2, 3):\n raise ValueError(\n trans._(\n 'Surface data tuple must be 2 or 3, specifying vertices, faces, and optionally vertex values, instead got length {data_length}.',\n deferred=True,\n data_length=len(data),\n )\n )\n self._vertices = data[0]\n self._faces = data[1]\n if len(data) == 3:\n self._vertex_values = data[2]\n else:\n self._vertex_values = np.ones(len(self._vertices))\n\n self._update_dims()\n self.events.data(value=self.data)\n if self._keep_auto_contrast:\n self.reset_contrast_limits()\n\n @property\n def vertices(self):\n return self._vertices\n\n @vertices.setter\n def vertices(self, vertices):\n \"\"\"Array of vertices of mesh triangles.\"\"\"\n\n self._vertices = vertices\n\n self._update_dims()\n self.refresh()\n self.events.data(value=self.data)\n self._set_editable()\n\n @property\n def vertex_values(self) -> np.ndarray:\n return self._vertex_values\n\n @vertex_values.setter\n def vertex_values(self, vertex_values: np.ndarray):\n \"\"\"Array of values used to color vertices..\"\"\"\n\n self._vertex_values = vertex_values\n\n self.refresh()\n self.events.data(value=self.data)\n self._set_editable()\n\n @property\n def faces(self) -> np.ndarray:\n return self._faces\n\n @faces.setter\n def faces(self, faces: np.ndarray):\n \"\"\"Array of indices of mesh triangles..\"\"\"\n\n self.faces = faces\n\n self.refresh()\n self.events.data(value=self.data)\n self._set_editable()\n\n def _get_ndim(self):\n \"\"\"Determine number of dimensions of the layer.\"\"\"\n return self.vertices.shape[1] + (self.vertex_values.ndim - 1)\n\n @property\n def _extent_data(self) -> np.ndarray:\n \"\"\"Extent of layer in data coordinates.\n\n Returns\n -------\n extent_data : array, shape (2, D)\n \"\"\"\n if len(self.vertices) == 0:\n extrema = np.full((2, self.ndim), np.nan)\n else:\n maxs = np.max(self.vertices, axis=0)\n mins = np.min(self.vertices, axis=0)\n\n # The full dimensionality and shape of the layer is determined by\n # the number of additional vertex value dimensions and the\n # dimensionality of the vertices themselves\n if self.vertex_values.ndim > 1:\n mins = [0] * (self.vertex_values.ndim - 1) + list(mins)\n maxs = list(self.vertex_values.shape[:-1]) + list(maxs)\n extrema = np.vstack([mins, maxs])\n return extrema\n\n @property\n def shading(self):\n return str(self._shading)\n\n @shading.setter\n def shading(self, shading):\n if isinstance(shading, Shading):\n self._shading = shading\n else:\n self._shading = Shading(shading)\n self.events.shading(value=self._shading)\n\n def _get_state(self):\n \"\"\"Get dictionary of layer state.\n\n Returns\n -------\n state : dict\n Dictionary of layer state.\n \"\"\"\n state = self._get_base_state()\n state.update(\n {\n 'colormap': self.colormap.name,\n 'contrast_limits': self.contrast_limits,\n 'gamma': self.gamma,\n 'shading': self.shading,\n 'data': self.data,\n 'wireframe': self.wireframe.dict(),\n 'normals': self.normals.dict(),\n }\n )\n return state\n\n def _set_view_slice(self):\n \"\"\"Sets the view given the indices to slice with.\"\"\"\n N, vertex_ndim = self.vertices.shape\n values_ndim = self.vertex_values.ndim - 1\n\n # Take vertex_values dimensionality into account if more than one value\n # is provided per vertex.\n if values_ndim > 0:\n # Get indices for axes corresponding to values dimensions\n values_indices = self._slice_indices[:-vertex_ndim]\n values = self.vertex_values[values_indices]\n if values.ndim > 1:\n warnings.warn(\n trans._(\n \"Assigning multiple values per vertex after slicing is not allowed. All dimensions corresponding to vertex_values must be non-displayed dimensions. Data will not be visible.\",\n deferred=True,\n )\n )\n self._data_view = np.zeros((0, self._ndisplay))\n self._view_faces = np.zeros((0, 3))\n self._view_vertex_values = []\n return\n\n self._view_vertex_values = values\n # Determine which axes of the vertices data are being displayed\n # and not displayed, ignoring the additional dimensions\n # corresponding to the vertex_values.\n indices = np.array(self._slice_indices[-vertex_ndim:])\n disp = [\n d\n for d in np.subtract(self._dims_displayed, values_ndim)\n if d >= 0\n ]\n not_disp = [\n d\n for d in np.subtract(self._dims_not_displayed, values_ndim)\n if d >= 0\n ]\n else:\n self._view_vertex_values = self.vertex_values\n indices = np.array(self._slice_indices)\n not_disp = list(self._dims_not_displayed)\n disp = list(self._dims_displayed)\n\n self._data_view = self.vertices[:, disp]\n if len(self.vertices) == 0:\n self._view_faces = np.zeros((0, 3))\n elif vertex_ndim > self._ndisplay:\n vertices = self.vertices[:, not_disp].astype('int')\n triangles = vertices[self.faces]\n matches = np.all(triangles == indices[not_disp], axis=(1, 2))\n matches = np.where(matches)[0]\n if len(matches) == 0:\n self._view_faces = np.zeros((0, 3))\n else:\n self._view_faces = self.faces[matches]\n else:\n self._view_faces = self.faces\n\n if self._keep_auto_contrast:\n self.reset_contrast_limits()\n\n def _update_thumbnail(self):\n \"\"\"Update thumbnail with current surface.\"\"\"\n pass\n\n def _get_value(self, position):\n \"\"\"Value of the data at a position in data coordinates.\n\n Parameters\n ----------\n position : tuple\n Position in data coordinates.\n\n Returns\n -------\n value : None\n Value of the data at the coord.\n \"\"\"\n return None\n" ]
[ [ "numpy.vstack", "numpy.zeros", "numpy.subtract", "numpy.max", "numpy.all", "numpy.min", "numpy.array", "numpy.where", "numpy.full" ] ]
tomstark99/epic-kitchens-100-fyrp
[ "cbc9e59569fb6110b900a51def1947b8a3c93699" ]
[ "src/models/esvs.py" ]
[ "import torch as t\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\n\r\nclass MTRN(nn.Module):\r\n \r\n def __init__(self, frame_count: int):\r\n super().__init__()\r\n self.frame_count = frame_count\r\n self.fc1 = nn.Linear(256 * frame_count, 1024)\r\n self.fc2 = nn.Linear(1024, 512)\r\n self.fc3 = nn.Linear(512, 397)\r\n \r\n def forward(self, x):\r\n x = x.view(-1, 256 * self.frame_count)\r\n x = F.relu(self.fc1(x))\r\n x = F.relu(self.fc2(x))\r\n x = self.fc3_verb(x)\r\n \r\n return x\r\n\r\nclass V_MTRN(nn.Module):\r\n \r\n def __init__(self, frame_count: int, hidden_layer_size: int, dropout_count: int, dropout_probability: int = 0.5):\r\n super().__init__()\r\n if dropout_probability < 0 or dropout_probability > 1:\r\n raise ValueError(f'Probability needs to be between 0 and 1, was: {dropout_probability}')\r\n self.frame_count = frame_count\r\n self.dropout_count = dropout_count\r\n self.fc1 = nn.Linear(256 * frame_count, hidden_layer_size)\r\n self.dropout = nn.Dropout(p=dropout_probability)\r\n self.fc2 = nn.Linear(hidden_layer_size, 512)\r\n self.fc3_verb = nn.Linear(512, 97)\r\n \r\n def forward(self, x):\r\n x = x.view(-1, 256 * self.frame_count)\r\n x = F.relu(self.fc1(x))\r\n if self.dropout_count >= 1:\r\n x = self.dropout(x)\r\n x = F.relu(self.fc2(x))\r\n if self.dropout_count == 2:\r\n x = self.dropout(x)\r\n x = self.fc3_verb(x)\r\n \r\n return x\r\n\r\nclass N_MTRN(nn.Module):\r\n \r\n def __init__(self, frame_count: int, hidden_layer_size: int, dropout_count: int, dropout_probability: int = 0.5):\r\n super().__init__()\r\n if dropout_probability < 0 or dropout_probability > 1:\r\n raise ValueError(f'Probability needs to be between 0 and 1, was: {dropout_probability}')\r\n self.frame_count = frame_count\r\n self.dropout_count = dropout_count\r\n self.fc1 = nn.Linear(256 * frame_count, hidden_layer_size)\r\n self.dropout = nn.Dropout(p=dropout_probability)\r\n self.fc2 = nn.Linear(hidden_layer_size, 512)\r\n self.fc3_noun = nn.Linear(512, 300)\r\n \r\n def forward(self, x):\r\n x = x.view(-1, 256 * self.frame_count)\r\n x = F.relu(self.fc1(x))\r\n if self.dropout_count >= 1:\r\n x = self.dropout(x)\r\n x = F.relu(self.fc2(x))\r\n if self.dropout_count == 2:\r\n x = self.dropout(x)\r\n x = self.fc3_noun(x)\r\n \r\n return x\r\n\r\nclass V_MF(nn.Module):\r\n \r\n def __init__(self, frame_count: int, hidden_layer_size: int, dropout_probability: int = 0.5):\r\n super().__init__()\r\n if dropout_probability < 0 or dropout_probability > 1:\r\n raise ValueError(f'Probability needs to be between 0 and 1, was: {dropout_probability}')\r\n self.frame_count = frame_count\r\n self.fc1 = nn.Linear(768 * frame_count, hidden_layer_size)\r\n self.dropout = nn.Dropout(p=dropout_probability)\r\n self.fc2_verb = nn.Linear(hidden_layer_size, 97)\r\n \r\n def forward(self, x):\r\n x = x.view(-1, 768 * self.frame_count)\r\n x = F.relu(self.fc1(x))\r\n x = self.dropout(x)\r\n x = self.fc2_verb(x)\r\n \r\n return x\r\n\r\nclass N_MF(nn.Module):\r\n \r\n def __init__(self, frame_count: int, hidden_layer_size: int, dropout_probability: int = 0.5):\r\n super().__init__()\r\n if dropout_probability < 0 or dropout_probability > 1:\r\n raise ValueError(f'Probability needs to be between 0 and 1, was: {dropout_probability}')\r\n self.frame_count = frame_count\r\n self.fc1 = nn.Linear(768 * frame_count, hidden_layer_size)\r\n self.dropout = nn.Dropout(p=dropout_probability)\r\n self.fc2_noun = nn.Linear(hidden_layer_size, 300)\r\n \r\n def forward(self, x):\r\n x = x.view(-1, 768 * self.frame_count)\r\n x = F.relu(self.fc1(x))\r\n x = self.dropout(x)\r\n x = self.fc2_noun(x)\r\n \r\n return x\r\n" ]
[ [ "torch.nn.Linear", "torch.nn.Dropout" ] ]
JaimeCernuda/dlio_benchmark
[ "d9cfbf76b4c7fb0d48a0dd43b8d2f2ea6ba75949" ]
[ "src/data_generator/npz_generator.py" ]
[ "\"\"\"\n Copyright (C) 2020 Argonne, Hariharan Devarajan <[email protected]>\n This file is part of DLProfile\n DLIO is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as\n published by the Free Software Foundation, either version 3 of the published by the Free Software Foundation, either\n version 3 of the License, or (at your option) any later version.\n This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied\n warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more\n details.\n You should have received a copy of the GNU General Public License along with this program.\n If not, see <http://www.gnu.org/licenses/>.\n\"\"\"\n\nfrom src.common.enumerations import Compression\nfrom src.data_generator.data_generator import DataGenerator\n\nimport numpy as np\nfrom numpy import random\n\nfrom src.utils.utility import progress\nfrom shutil import copyfile\n\n\"\"\"\nGenerator for creating data in NPZ format.\n\"\"\"\nclass NPZGenerator(DataGenerator):\n def __init__(self):\n super().__init__()\n\n def generate(self):\n \"\"\"\n Generator for creating data in NPZ format of 3d dataset.\n \"\"\"\n super().generate()\n records = random.random((self._dimension, self._dimension, self.num_samples))\n record_labels = [0] * self.num_samples\n prev_out_spec =\"\"\n count = 0\n for i in range(0, int(self.num_files)):\n if i % self.comm_size == self.my_rank:\n progress(i+1, self.num_files, \"Generating NPZ Data\")\n out_path_spec = \"{}_{}_of_{}.npz\".format(self._file_prefix, i, self.num_files)\n if count == 0:\n prev_out_spec = out_path_spec\n if self.compression != Compression.ZIP:\n np.savez(out_path_spec, x=records, y=record_labels)\n else:\n np.savez_compressed(out_path_spec, x=records, y=record_labels)\n count += 1\n else:\n copyfile(prev_out_spec, out_path_spec)" ]
[ [ "numpy.random.random", "numpy.savez", "numpy.savez_compressed" ] ]
CogStack/CAT
[ "5ac04d2676aede13f8e8d0ab408472c3c6d46a86" ]
[ "medcat/cat.py" ]
[ "import os\nimport shutil\nimport pickle\nimport traceback\nimport json\nimport logging\nimport math\nimport time\nimport psutil\nfrom time import sleep\nfrom copy import deepcopy\nfrom multiprocess import Process, Manager, cpu_count\nfrom multiprocess.queues import Queue\nfrom multiprocess.synchronize import Lock\nfrom typing import Union, List, Tuple, Optional, Dict, Iterable, Set\nfrom itertools import islice, chain, repeat\nfrom datetime import date\nfrom tqdm.autonotebook import tqdm, trange\nfrom spacy.tokens import Span, Doc, Token\nfrom spacy.language import Language\n\nfrom medcat import __version__\nfrom medcat.preprocessing.tokenizers import spacy_split_all\nfrom medcat.pipe import Pipe\nfrom medcat.preprocessing.taggers import tag_skip_and_punct\nfrom medcat.cdb import CDB\nfrom medcat.utils.matutils import intersect_nonempty_set\nfrom medcat.utils.data_utils import make_mc_train_test, get_false_positives\nfrom medcat.utils.normalizers import BasicSpellChecker\nfrom medcat.utils.checkpoint import Checkpoint, CheckpointConfig, CheckpointManager\nfrom medcat.utils.helpers import tkns_from_doc, get_important_config_parameters\nfrom medcat.utils.hasher import Hasher\nfrom medcat.ner.vocab_based_ner import NER\nfrom medcat.linking.context_based_linker import Linker\nfrom medcat.utils.filters import get_project_filters, check_filters\nfrom medcat.preprocessing.cleaners import prepare_name\nfrom medcat.meta_cat import MetaCAT\nfrom medcat.utils.meta_cat.data_utils import json_to_fake_spacy\nfrom medcat.config import Config\nfrom medcat.vocab import Vocab\nfrom medcat.utils.decorators import deprecated\nfrom medcat.ner.transformers_ner import TransformersNER\n\n\nclass CAT(object):\n r\"\"\"\n The main MedCAT class used to annotate documents, it is built on top of spaCy\n and works as a spaCy pipline. Creates an instance of a spaCy pipline that can\n be used as a spacy nlp model.\n\n Args:\n cdb (medcat.cdb.CDB):\n The concept database that will be used for NER+L\n config (medcat.config.Config):\n Global configuration for medcat\n vocab (medcat.vocab.Vocab, optional):\n Vocabulary used for vector embeddings and spelling. Default: None\n meta_cats (list of medcat.meta_cat.MetaCAT, optional):\n A list of models that will be applied sequentially on each\n detected annotation.\n\n Attributes (limited):\n cdb (medcat.cdb.CDB):\n Concept database used with this CAT instance, please do not assign\n this value directly.\n config (medcat.config.Config):\n The global configuration for medcat. Usually cdb.config will be used for this\n field. WILL BE REMOVED - TEMPORARY PLACEHOLDER\n vocab (medcat.utils.vocab.Vocab):\n The vocabulary object used with this instance, please do not assign\n this value directly.\n\n Examples:\n\n >>> cat = CAT(cdb, vocab)\n >>> spacy_doc = cat(\"Put some text here\")\n >>> print(spacy_doc.ents) # Detected entites\n \"\"\"\n # Add file and console handlers\n log = logging.getLogger(__package__)\n DEFAULT_MODEL_PACK_NAME = \"medcat_model_pack\"\n\n def __init__(self,\n cdb: CDB,\n vocab: Union[Vocab, None] = None,\n config: Optional[Config] = None,\n meta_cats: List[MetaCAT] = [],\n addl_ner: Union[TransformersNER, List[TransformersNER]] = []) -> None:\n self.cdb = cdb\n self.vocab = vocab\n if config is None:\n # Take config from the cdb\n self.config = cdb.config\n else:\n # Take the new config and assign it to the CDB also\n self.config = config\n self.cdb.config = config\n self._meta_cats = meta_cats\n self._addl_ner = addl_ner if isinstance(addl_ner, list) else [addl_ner]\n self._create_pipeline(self.config)\n\n def _create_pipeline(self, config):\n # Set log level\n self.log.setLevel(config.general['log_level'])\n\n # Build the pipeline\n self.pipe = Pipe(tokenizer=spacy_split_all, config=config)\n self.pipe.add_tagger(tagger=tag_skip_and_punct,\n name='skip_and_punct',\n additional_fields=['is_punct'])\n\n if self.vocab is not None:\n spell_checker = BasicSpellChecker(cdb_vocab=self.cdb.vocab, config=config, data_vocab=self.vocab)\n self.pipe.add_token_normalizer(spell_checker=spell_checker, config=config)\n\n # Add NER\n self.ner = NER(self.cdb, config)\n self.pipe.add_ner(self.ner)\n\n # Add LINKER\n self.linker = Linker(self.cdb, self.vocab, config)\n self.pipe.add_linker(self.linker)\n\n # Add addl_ner if they exist\n for ner in self._addl_ner:\n self.pipe.add_addl_ner(ner, ner.config.general['name'])\n\n # Add meta_annotaiton classes if they exist\n for meta_cat in self._meta_cats:\n self.pipe.add_meta_cat(meta_cat, meta_cat.config.general['category_name'])\n\n # Set max document length\n self.pipe.spacy_nlp.max_length = config.preprocessing.get('max_document_length', 1000000)\n\n @deprecated(message=\"Replaced with cat.pipe.spacy_nlp.\")\n def get_spacy_nlp(self) -> Language:\n \"\"\" Returns the spacy pipeline with MedCAT\n \"\"\"\n return self.pipe.spacy_nlp\n\n def get_hash(self):\n r\"\"\" Will not be a deep hash but will try to cactch all the changing parts during training.\n \"\"\"\n hasher = Hasher()\n hasher.update(self.cdb.get_hash())\n\n hasher.update(self.config.get_hash())\n\n for mc in self._meta_cats:\n hasher.update(mc.get_hash())\n\n for trf in self._addl_ner:\n hasher.update(trf.get_hash())\n\n return hasher.hexdigest()\n\n def get_model_card(self, as_dict=False):\n \"\"\"\n A minimal model card for MedCAT model packs.\n\n Args:\n as_dict: return the model card as a dictionary instead of a str.\n\n Returns:\n By default a str - indented JSON object.\n \"\"\"\n card = {\n 'Model ID': self.config.version['id'],\n 'Last Modified On': self.config.version['last_modified'],\n 'History (from least to most recent)': self.config.version['history'],\n 'Description': self.config.version['description'],\n 'Source Ontology': self.config.version['ontology'],\n 'Location': self.config.version['location'],\n 'MetaCAT models': self.config.version['meta_cats'],\n 'Basic CDB Stats': self.config.version['cdb_info'],\n 'Performance': self.config.version['performance'],\n 'Important Parameters (Partial view, all available in cat.config)': get_important_config_parameters(self.config),\n 'MedCAT Version': self.config.version['medcat_version']\n }\n\n if as_dict:\n return card\n else:\n return json.dumps(card, indent=2, sort_keys=False)\n\n def _versioning(self):\n # Check version info and do not allow without it\n if self.config.version['description'] == 'No description':\n self.log.warning(\"Please consider populating the version information [description, performance, location, ontology] in cat.config.version\")\n\n # Fill the stuff automatically that is needed for versioning\n m = self.get_hash()\n version = self.config.version\n if version['id'] is None or m != version['id']:\n if version['id'] is not None:\n version['history'].append(version['id'])\n version['id'] = m\n version['last_modified'] = date.today().strftime(\"%d %B %Y\")\n version['cdb_info'] = self.cdb._make_stats()\n version['meta_cats'] = [meta_cat.get_model_card(as_dict=True) for meta_cat in self._meta_cats]\n version['medcat_version'] = __version__\n self.log.warning(\"Please consider updating [description, performance, location, ontology] in cat.config.version\")\n\n def create_model_pack(self, save_dir_path: str, model_pack_name: str = DEFAULT_MODEL_PACK_NAME) -> str:\n r\"\"\" Will crete a .zip file containing all the models in the current running instance\n of MedCAT. This is not the most efficient way, for sure, but good enough for now.\n\n model_pack_name - an id will be appended to this name\n\n returns:\n Model pack name\n \"\"\"\n # Spacy model always should be just the name, but during loading it can be reset to path\n self.config.general['spacy_model'] = os.path.basename(self.config.general['spacy_model'])\n # Versioning\n self._versioning()\n model_pack_name += \"_{}\".format(self.config.version['id'])\n\n self.log.warning(\"This will save all models into a zip file, can take some time and require quite a bit of disk space.\")\n _save_dir_path = save_dir_path\n save_dir_path = os.path.join(save_dir_path, model_pack_name)\n\n # expand user path to make this work with '~'\n os.makedirs(os.path.expanduser(save_dir_path), exist_ok=True)\n\n # Save the used spacy model\n spacy_path = os.path.join(save_dir_path, self.config.general['spacy_model'])\n if str(self.pipe.spacy_nlp._path) != spacy_path:\n # First remove if something is there\n shutil.rmtree(spacy_path, ignore_errors=True)\n shutil.copytree(str(self.pipe.spacy_nlp._path), spacy_path)\n\n # Save the CDB\n cdb_path = os.path.join(save_dir_path, \"cdb.dat\")\n self.cdb.save(cdb_path)\n\n # Save the Vocab\n vocab_path = os.path.join(save_dir_path, \"vocab.dat\")\n if self.vocab is not None:\n # We will allow creation of modelpacks without vocabs\n self.vocab.save(vocab_path)\n\n # Save addl_ner\n for comp in self.pipe.spacy_nlp.components:\n if isinstance(comp[1], TransformersNER):\n trf_path = os.path.join(save_dir_path, \"trf_\" + comp[1].config.general['name'])\n comp[1].save(trf_path)\n\n # Save all meta_cats\n for comp in self.pipe.spacy_nlp.components:\n if isinstance(comp[1], MetaCAT):\n name = comp[0]\n meta_path = os.path.join(save_dir_path, \"meta_\" + name)\n comp[1].save(meta_path)\n\n # Add a model card also, why not\n model_card_path = os.path.join(save_dir_path, \"model_card.json\")\n json.dump(self.get_model_card(as_dict=True), open(model_card_path, 'w'), indent=2)\n\n # Zip everything\n shutil.make_archive(os.path.join(_save_dir_path, model_pack_name), 'zip', root_dir=save_dir_path)\n\n # Log model card and return new name\n self.log.info(self.get_model_card()) # Print the model card\n return model_pack_name\n\n @classmethod\n def load_model_pack(cls, zip_path: str, meta_cat_config_dict: Optional[Dict] = None) -> \"CAT\":\n r\"\"\"Load everything within the 'model pack', i.e. the CDB, config, vocab and any MetaCAT models\n (if present)\n\n Args:\n zip_path:\n path to model pack zip.\n meta_cat_config_dict:\n A config dict that will overwrite existing configs in meta_cat.\n e.g. meta_cat_config_dict = {'general': {'device': 'cpu'}}\n \"\"\"\n from medcat.cdb import CDB\n from medcat.vocab import Vocab\n from medcat.meta_cat import MetaCAT\n\n base_dir = os.path.dirname(zip_path)\n filename = os.path.basename(zip_path)\n foldername = filename.replace(\".zip\", '')\n\n model_pack_path = os.path.join(base_dir, foldername)\n if os.path.exists(model_pack_path):\n cls.log.info(\"Found an existing unziped model pack at: {}, the provided zip will not be touched.\".format(model_pack_path))\n else:\n cls.log.info(\"Unziping the model pack and loading models.\")\n shutil.unpack_archive(zip_path, extract_dir=model_pack_path)\n\n # Load the CDB\n cdb_path = os.path.join(model_pack_path, \"cdb.dat\")\n cdb = CDB.load(cdb_path)\n\n # TODO load addl_ner\n\n # Modify the config to contain full path to spacy model\n cdb.config.general['spacy_model'] = os.path.join(model_pack_path, os.path.basename(cdb.config.general['spacy_model']))\n\n # Load Vocab\n vocab_path = os.path.join(model_pack_path, \"vocab.dat\")\n if os.path.exists(vocab_path):\n vocab = Vocab.load(vocab_path)\n else:\n vocab = None\n\n # Find meta models in the model_pack\n trf_paths = [os.path.join(model_pack_path, path) for path in os.listdir(model_pack_path) if path.startswith('trf_')]\n addl_ner = []\n for trf_path in trf_paths:\n trf = TransformersNER.load(save_dir_path=trf_path)\n trf.cdb = cdb # Set the cat.cdb to be the CDB of the TRF model\n addl_ner.append(trf)\n\n # Find meta models in the model_pack\n meta_paths = [os.path.join(model_pack_path, path) for path in os.listdir(model_pack_path) if path.startswith('meta_')]\n meta_cats = []\n for meta_path in meta_paths:\n meta_cats.append(MetaCAT.load(save_dir_path=meta_path,\n config_dict=meta_cat_config_dict))\n\n cat = cls(cdb=cdb, config=cdb.config, vocab=vocab, meta_cats=meta_cats, addl_ner=addl_ner)\n cls.log.info(cat.get_model_card()) # Print the model card\n return cat\n\n def __call__(self, text: Optional[str], do_train: bool = False) -> Optional[Doc]:\n r\"\"\"\n Push the text through the pipeline.\n\n Args:\n text (string):\n The text to be annotated, if the text length is longer than\n self.config.preprocessing['max_document_length'] it will be trimmed to that length.\n do_train (bool, defaults to `False`):\n This causes so many screwups when not there, so I'll force training\n to False. To run training it is much better to use the self.train() function\n but for some special cases I'm leaving it here also.\n Returns:\n A single spacy document or multiple spacy documents with the extracted entities\n \"\"\"\n # Should we train - do not use this for training, unless you know what you are doing. Use the\n #self.train() function\n self.config.linking['train'] = do_train\n\n if text is None:\n self.log.error(\"The input text should be either a string or a sequence of strings but got %s\", type(text))\n return None\n else:\n text = self._get_trimmed_text(str(text))\n return self.pipe(text)\n\n def __repr__(self):\n \"\"\"\n Prints the model_card for this CAT instance.\n Returns:\n the 'Model Card' for this CAT instance. This includes NER+L config and any MetaCATs\n \"\"\"\n return self.get_model_card(as_dict=False)\n\n def _print_stats(self,\n data: Dict,\n epoch: int = 0,\n use_project_filters: bool = False,\n use_overlaps: bool = False,\n use_cui_doc_limit: bool = False,\n use_groups: bool = False,\n extra_cui_filter: Optional[Set] = None) -> Tuple:\n r\"\"\" TODO: Refactor and make nice\n Print metrics on a dataset (F1, P, R), it will also print the concepts that have the most FP,FN,TP.\n\n Args:\n data (list of dict):\n The json object that we get from MedCATtrainer on export.\n epoch (int):\n Used during training, so we know what epoch is it.\n use_project_filters (boolean):\n Each project in medcattrainer can have filters, do we want to respect those filters\n when calculating metrics.\n use_overlaps (boolean):\n Allow overlapping entites, nearly always False as it is very difficult to annotate overlapping entites.\n use_cui_doc_limit (boolean):\n If True the metrics for a CUI will be only calculated if that CUI appears in a document, in other words\n if the document was annotated for that CUI. Useful in very specific situations when during the annotation\n process the set of CUIs changed.\n use_groups (boolean):\n If True concepts that have groups will be combined and stats will be reported on groups.\n extra_cui_filter(Optional[Set]):\n This filter will be intersected with all other filters, or if all others are not set then only this one will be used.\n\n Returns:\n fps (dict):\n False positives for each CUI\n fns (dict):\n False negatives for each CUI\n tps (dict):\n True positives for each CUI\n cui_prec (dict):\n Precision for each CUI\n cui_rec (dict):\n Recall for each CUI\n cui_f1 (dict):\n F1 for each CUI\n cui_counts (dict):\n Number of occurrence for each CUI\n examples (dict):\n Examples for each of the fp, fn, tp. Format will be examples['fp']['cui'][<list_of_examples>]\n \"\"\"\n tp = 0\n fp = 0\n fn = 0\n fps: Dict = {}\n fns: Dict = {}\n tps: Dict = {}\n cui_prec: Dict = {}\n cui_rec: Dict = {}\n cui_f1: Dict = {}\n cui_counts: Dict = {}\n examples: Dict = {'fp': {}, 'fn': {}, 'tp': {}}\n\n fp_docs: Set = set()\n fn_docs: Set = set()\n # reset and back up filters\n _filters = deepcopy(self.config.linking['filters'])\n filters = self.config.linking['filters']\n for pind, project in tqdm(enumerate(data['projects']), desc=\"Stats project\", total=len(data['projects']), leave=False):\n filters['cuis'] = set()\n\n # Add extrafilter if set\n if isinstance(extra_cui_filter, set):\n filters['cuis'] = extra_cui_filter\n\n if use_project_filters:\n project_filter = get_project_filters(cuis=project.get('cuis', None),\n type_ids=project.get('tuis', None),\n cdb=self.cdb,\n project=project)\n # Intersect project filter with existing if it has something\n if project_filter:\n filters['cuis'] = intersect_nonempty_set(project_filter, filters['cuis'])\n\n for dind, doc in tqdm(\n enumerate(project[\"documents\"]),\n desc=\"Stats document\",\n total=len(project[\"documents\"]),\n leave=False,\n ):\n anns = self._get_doc_annotations(doc)\n\n # Apply document level filtering, in this case project_filter is ignored while the extra_cui_filter is respected still\n if use_cui_doc_limit:\n _cuis = set([ann['cui'] for ann in anns])\n if _cuis:\n filters['cuis'] = intersect_nonempty_set(_cuis, extra_cui_filter)\n else:\n filters['cuis'] = {'empty'}\n\n spacy_doc: Doc = self(doc['text'])\n\n if use_overlaps:\n p_anns = spacy_doc._.ents\n else:\n p_anns = spacy_doc.ents\n\n anns_norm = []\n anns_norm_neg = []\n anns_examples = []\n anns_norm_cui = []\n for ann in anns:\n cui = ann['cui']\n if check_filters(cui, filters):\n if use_groups:\n cui = self.cdb.addl_info['cui2group'].get(cui, cui)\n\n if ann.get('validated', True) and (not ann.get('killed', False) and not ann.get('deleted', False)):\n anns_norm.append((ann['start'], cui))\n anns_examples.append({\"text\": doc['text'][max(0, ann['start']-60):ann['end']+60],\n \"cui\": cui,\n \"source value\": ann['value'],\n \"acc\": 1,\n \"project index\": pind,\n \"document inedex\": dind})\n elif ann.get('validated', True) and (ann.get('killed', False) or ann.get('deleted', False)):\n anns_norm_neg.append((ann['start'], cui))\n\n\n if ann.get(\"validated\", True):\n # This is used to test was someone annotating for this CUI in this document\n anns_norm_cui.append(cui)\n cui_counts[cui] = cui_counts.get(cui, 0) + 1\n\n p_anns_norm = []\n p_anns_examples = []\n for ann in p_anns:\n cui = ann._.cui\n if use_groups:\n cui = self.cdb.addl_info['cui2group'].get(cui, cui)\n\n p_anns_norm.append((ann.start_char, cui))\n p_anns_examples.append({\"text\": doc['text'][max(0, ann.start_char-60):ann.end_char+60],\n \"cui\": cui,\n \"source value\": ann.text,\n \"acc\": float(ann._.context_similarity),\n \"project index\": pind,\n \"document inedex\": dind})\n\n\n for iann, ann in enumerate(p_anns_norm):\n cui = ann[1]\n if ann in anns_norm:\n tp += 1\n tps[cui] = tps.get(cui, 0) + 1\n\n example = p_anns_examples[iann]\n examples['tp'][cui] = examples['tp'].get(cui, []) + [example]\n else:\n fp += 1\n fps[cui] = fps.get(cui, 0) + 1\n fp_docs.add(doc.get('name', 'unk'))\n\n # Add example for this FP prediction\n example = p_anns_examples[iann]\n if ann in anns_norm_neg:\n # Means that it really was annotated as negative\n example['real_fp'] = True\n\n examples['fp'][cui] = examples['fp'].get(cui, []) + [example]\n\n for iann, ann in enumerate(anns_norm):\n if ann not in p_anns_norm:\n cui = ann[1]\n fn += 1\n fn_docs.add(doc.get('name', 'unk'))\n\n fns[cui] = fns.get(cui, 0) + 1\n examples['fn'][cui] = examples['fn'].get(cui, []) + [anns_examples[iann]]\n\n try:\n prec = tp / (tp + fp)\n rec = tp / (tp + fn)\n f1 = 2*(prec*rec) / (prec + rec)\n print(\"Epoch: {}, Prec: {}, Rec: {}, F1: {}\\n\".format(epoch, prec, rec, f1))\n print(\"Docs with false positives: {}\\n\".format(\"; \".join([str(x) for x in list(fp_docs)[0:10]])))\n print(\"Docs with false negatives: {}\\n\".format(\"; \".join([str(x) for x in list(fn_docs)[0:10]])))\n\n # Sort fns & prec\n fps = {k: v for k, v in sorted(fps.items(), key=lambda item: item[1], reverse=True)}\n fns = {k: v for k, v in sorted(fns.items(), key=lambda item: item[1], reverse=True)}\n tps = {k: v for k, v in sorted(tps.items(), key=lambda item: item[1], reverse=True)}\n\n\n # F1 per concept\n for cui in tps.keys():\n prec = tps[cui] / (tps.get(cui, 0) + fps.get(cui, 0))\n rec = tps[cui] / (tps.get(cui, 0) + fns.get(cui, 0))\n f1 = 2*(prec*rec) / (prec + rec)\n cui_prec[cui] = prec\n cui_rec[cui] = rec\n cui_f1[cui] = f1\n\n\n # Get top 10\n pr_fps = [(self.cdb.cui2preferred_name.get(cui,\n list(self.cdb.cui2names.get(cui, [cui]))[0]), cui, fps[cui]) for cui in list(fps.keys())[0:10]]\n pr_fns = [(self.cdb.cui2preferred_name.get(cui,\n list(self.cdb.cui2names.get(cui, [cui]))[0]), cui, fns[cui]) for cui in list(fns.keys())[0:10]]\n pr_tps = [(self.cdb.cui2preferred_name.get(cui,\n list(self.cdb.cui2names.get(cui, [cui]))[0]), cui, tps[cui]) for cui in list(tps.keys())[0:10]]\n\n\n print(\"\\n\\nFalse Positives\\n\")\n for one in pr_fps:\n print(\"{:70} - {:20} - {:10}\".format(str(one[0])[0:69], str(one[1])[0:19], one[2]))\n print(\"\\n\\nFalse Negatives\\n\")\n for one in pr_fns:\n print(\"{:70} - {:20} - {:10}\".format(str(one[0])[0:69], str(one[1])[0:19], one[2]))\n print(\"\\n\\nTrue Positives\\n\")\n for one in pr_tps:\n print(\"{:70} - {:20} - {:10}\".format(str(one[0])[0:69], str(one[1])[0:19], one[2]))\n print(\"*\"*110 + \"\\n\")\n\n except Exception:\n traceback.print_exc()\n\n # restore filters to original state\n self.config.linking['filters'] = _filters\n\n return fps, fns, tps, cui_prec, cui_rec, cui_f1, cui_counts, examples\n\n def _init_ckpts(self, is_resumed, checkpoint):\n if self.config.general['checkpoint']['steps'] is not None or checkpoint is not None:\n checkpoint_config = CheckpointConfig(**self.config.general.get('checkpoint', {}))\n checkpoint_manager = CheckpointManager('cat_train', checkpoint_config)\n if is_resumed:\n # TODO: probably remove is_resumed mark and always resume if a checkpoint is provided,\n #but I'll leave it for now\n checkpoint = checkpoint or checkpoint_manager.get_latest_checkpoint()\n self.log.info(f\"Resume training on the most recent checkpoint at {checkpoint.dir_path}...\")\n self.cdb = checkpoint.restore_latest_cdb()\n self.cdb.config.merge_config(self.config.__dict__)\n self.config = self.cdb.config\n self._create_pipeline(self.config)\n else:\n checkpoint = checkpoint or checkpoint_manager.create_checkpoint()\n self.log.info(f\"Start new training and checkpoints will be saved at {checkpoint.dir_path}...\")\n\n return checkpoint\n\n def train(self,\n data_iterator: Iterable,\n nepochs: int = 1,\n fine_tune: bool = True,\n progress_print: int = 1000,\n checkpoint: Optional[Checkpoint] = None,\n is_resumed: bool = False) -> None:\n \"\"\" Runs training on the data, note that the maximum length of a line\n or document is 1M characters. Anything longer will be trimmed.\n\n Args:\n data_iterator (Iterable):\n Simple iterator over sentences/documents, e.g. a open file\n or an array or anything that we can use in a for loop.\n nepochs (int):\n Number of epochs for which to run the training.\n fine_tune (bool):\n If False old training will be removed.\n progress_print (int):\n Print progress after N lines.\n checkpoint (Optional[medcat.utils.checkpoint.CheckpointUT]):\n The MedCAT checkpoint object\n is_resumed (bool):\n If True resume the previous training; If False, start a fresh new training.\n \"\"\"\n if not fine_tune:\n self.log.info(\"Removing old training data!\")\n self.cdb.reset_training()\n checkpoint = self._init_ckpts(is_resumed, checkpoint)\n\n latest_trained_step = checkpoint.count if checkpoint is not None else 0\n epochal_data_iterator = chain.from_iterable(repeat(data_iterator, nepochs))\n for line in islice(epochal_data_iterator, latest_trained_step, None):\n if line is not None and line:\n # Convert to string\n line = str(line).strip()\n\n try:\n _ = self(line, do_train=True)\n except Exception as e:\n self.log.warning(\"LINE: '%s...' \\t WAS SKIPPED\", line[0:100])\n self.log.warning(\"BECAUSE OF: %s\", str(e))\n else:\n self.log.warning(\"EMPTY LINE WAS DETECTED AND SKIPPED\")\n\n latest_trained_step += 1\n if latest_trained_step % progress_print == 0:\n self.log.info(\"DONE: %s\", str(latest_trained_step))\n if checkpoint is not None and checkpoint.steps is not None and latest_trained_step % checkpoint.steps == 0:\n checkpoint.save(cdb=self.cdb, count=latest_trained_step)\n\n self.config.linking['train'] = False\n\n def add_cui_to_group(self, cui: str, group_name: str) -> None:\n r\"\"\"\n Ads a CUI to a group, will appear in cdb.addl_info['cui2group']\n\n Args:\n cui (str):\n The concept to be added\n group_name (str):\n The group to whcih the concept will be added\n\n Examples:\n\n >>> cat.add_cui_to_group(\"S-17\", 'pain')\n \"\"\"\n\n # Add group_name\n self.cdb.addl_info['cui2group'][cui] = group_name\n\n def unlink_concept_name(self, cui: str, name: str, preprocessed_name: bool = False) -> None:\n r\"\"\"\n Unlink a concept name from the CUI (or all CUIs if full_unlink), removes the link from\n the Concept Database (CDB). As a consequence medcat will never again link the `name`\n to this CUI - meaning the name will not be detected as a concept in the future.\n\n Args:\n cui (str):\n The CUI from which the `name` will be removed\n name (str):\n The span of text to be removed from the linking dictionary\n Examples:\n\n >>> # To never again link C0020538 to HTN\n >>> cat.unlink_concept_name('C0020538', 'htn', False)\n \"\"\"\n\n cuis = [cui]\n if preprocessed_name:\n names = {name: 'nothing'}\n else:\n names = prepare_name(name, self.pipe.spacy_nlp, {}, self.config)\n\n # If full unlink find all CUIs\n if self.config.general.get('full_unlink', False):\n for n in names:\n cuis.extend(self.cdb.name2cuis.get(n, []))\n\n # Remove name from all CUIs\n for c in cuis:\n self.cdb.remove_names(cui=c, names=names)\n\n def add_and_train_concept(self,\n cui: str,\n name: str,\n spacy_doc: Optional[Doc] = None,\n spacy_entity: Optional[Union[List[Token], Span]] = None,\n ontologies: Set = set(),\n name_status: str = 'A',\n type_ids: Set = set(),\n description: str = '',\n full_build: bool = True,\n negative: bool = False,\n devalue_others: bool = False,\n do_add_concept: bool = True) -> None:\n r\"\"\" Add a name to an existing concept, or add a new concept, or do not do anything if the name or concept already exists. Perform\n training if spacy_entity and spacy_doc are set.\n\n Args:\n cui (str):\n CUI of the concept\n name (str):\n Name to be linked to the concept (in the case of MedCATtrainer this is simply the\n selected value in text, no preprocessing or anything needed).\n spacy_doc (spacy.tokens.Doc):\n Spacy represenation of the document that was manually annotated.\n spacy_entity (Optional[Union[List[Token], Span]]):\n Given the spacy document, this is the annotated span of text - list of annotated tokens that are marked with this CUI.\n negative (bool):\n Is this a negative or positive example.\n devalue_others:\n If set, cuis to which this name is assigned and are not `cui` will receive negative training given\n that negative=False.\n\n \\*\\*other:\n Refer to medcat.cat.cdb.CDB.add_concept\n \"\"\"\n names = prepare_name(name, self.pipe.spacy_nlp, {}, self.config)\n # Only if not negative, otherwise do not add the new name if in fact it should not be detected\n if do_add_concept and not negative:\n self.cdb.add_concept(cui=cui, names=names, ontologies=ontologies, name_status=name_status, type_ids=type_ids, description=description,\n full_build=full_build)\n\n if spacy_entity is not None and spacy_doc is not None:\n # Train Linking\n self.linker.context_model.train(cui=cui, entity=spacy_entity, doc=spacy_doc, negative=negative, names=names)\n\n if not negative and devalue_others:\n # Find all cuis\n cuis = set()\n for n in names:\n cuis.update(self.cdb.name2cuis.get(n, []))\n # Remove the cui for which we just added positive training\n if cui in cuis:\n cuis.remove(cui)\n # Add negative training for all other CUIs that link to these names\n for _cui in cuis:\n self.linker.context_model.train(cui=_cui, entity=spacy_entity, doc=spacy_doc, negative=True)\n\n def train_supervised(self,\n data_path: str,\n reset_cui_count: bool = False,\n nepochs: int = 1,\n print_stats: int = 0,\n use_filters: bool = False,\n terminate_last: bool = False,\n use_overlaps: bool = False,\n use_cui_doc_limit: bool = False,\n test_size: int = 0,\n devalue_others: bool = False,\n use_groups: bool = False,\n never_terminate: bool = False,\n train_from_false_positives: bool = False,\n extra_cui_filter: Optional[Set] = None,\n checkpoint: Optional[Checkpoint] = None,\n is_resumed: bool = False) -> Tuple:\n r\"\"\" TODO: Refactor, left from old\n Run supervised training on a dataset from MedCATtrainer. Please take care that this is more a simulated\n online training then supervised.\n\n Args:\n data_path (str):\n The path to the json file that we get from MedCATtrainer on export.\n reset_cui_count (boolean):\n Used for training with weight_decay (annealing). Each concept has a count that is there\n from the beginning of the CDB, that count is used for annealing. Resetting the count will\n significantly increase the training impact. This will reset the count only for concepts\n that exist in the the training data.\n nepochs (int):\n Number of epochs for which to run the training.\n print_stats (int):\n If > 0 it will print stats every print_stats epochs.\n use_filters (boolean):\n Each project in medcattrainer can have filters, do we want to respect those filters\n when calculating metrics.\n terminate_last (boolean):\n If true, concept termination will be done after all training.\n use_overlaps (boolean):\n Allow overlapping entities, nearly always False as it is very difficult to annotate overlapping entities.\n use_cui_doc_limit (boolean):\n If True the metrics for a CUI will be only calculated if that CUI appears in a document, in other words\n if the document was annotated for that CUI. Useful in very specific situations when during the annotation\n process the set of CUIs changed.\n test_size (float):\n If > 0 the data set will be split into train test based on this ration. Should be between 0 and 1.\n Usually 0.1 is fine.\n devalue_others(bool):\n Check add_name for more details.\n use_groups (boolean):\n If True concepts that have groups will be combined and stats will be reported on groups.\n never_terminate (boolean):\n If True no termination will be applied\n train_from_false_positives (boolean):\n If True it will use false positive examples detected by medcat and train from them as negative examples.\n extra_cui_filter(Optional[Set]):\n This filter will be intersected with all other filters, or if all others are not set then only this one will be used.\n checkpoint (Optional[Optional[medcat.utils.checkpoint.CheckpointST]):\n The MedCAT CheckpointST object\n is_resumed (bool):\n If True resume the previous training; If False, start a fresh new training.\n Returns:\n fp (dict):\n False positives for each CUI\n fn (dict):\n False negatives for each CUI\n tp (dict):\n True positives for each CUI\n p (dict):\n Precision for each CUI\n r (dict):\n Recall for each CUI\n f1 (dict):\n F1 for each CUI\n cui_counts (dict):\n Number of occurrence for each CUI\n examples (dict):\n FP/FN examples of sentences for each CUI\n \"\"\"\n checkpoint = self._init_ckpts(is_resumed, checkpoint)\n\n # Backup filters\n _filters = deepcopy(self.config.linking['filters'])\n filters = self.config.linking['filters']\n\n fp = fn = tp = p = r = f1 = examples = {}\n with open(data_path) as f:\n data = json.load(f)\n cui_counts = {}\n\n if test_size == 0:\n self.log.info(\"Running without a test set, or train==test\")\n test_set = data\n train_set = data\n else:\n train_set, test_set, _, _ = make_mc_train_test(data, self.cdb, test_size=test_size)\n\n if print_stats > 0:\n fp, fn, tp, p, r, f1, cui_counts, examples = self._print_stats(test_set,\n use_project_filters=use_filters,\n use_cui_doc_limit=use_cui_doc_limit,\n use_overlaps=use_overlaps,\n use_groups=use_groups,\n extra_cui_filter=extra_cui_filter)\n if reset_cui_count:\n # Get all CUIs\n cuis = []\n for project in train_set['projects']:\n for doc in project['documents']:\n doc_annotations = self._get_doc_annotations(doc)\n for ann in doc_annotations:\n cuis.append(ann['cui'])\n for cui in set(cuis):\n if cui in self.cdb.cui2count_train:\n self.cdb.cui2count_train[cui] = 100\n\n # Remove entities that were terminated\n if not never_terminate:\n for project in train_set['projects']:\n for doc in project['documents']:\n doc_annotations = self._get_doc_annotations(doc)\n for ann in doc_annotations:\n if ann.get('killed', False):\n self.unlink_concept_name(ann['cui'], ann['value'])\n\n latest_trained_step = checkpoint.count if checkpoint is not None else 0\n current_epoch, current_project, current_document = self._get_training_start(train_set, latest_trained_step)\n\n for epoch in trange(current_epoch, nepochs, initial=current_epoch, total=nepochs, desc='Epoch', leave=False):\n # Print acc before training\n for idx_project in trange(current_project, len(train_set['projects']), initial=current_project, total=len(train_set['projects']), desc='Project', leave=False):\n project = train_set['projects'][idx_project]\n\n # Set filters in case we are using the train_from_fp\n filters['cuis'] = set()\n if isinstance(extra_cui_filter, set):\n filters['cuis'] = extra_cui_filter\n\n if use_filters:\n project_filter = get_project_filters(cuis=project.get('cuis', None),\n type_ids=project.get('tuis', None),\n cdb=self.cdb,\n project=project)\n\n if project_filter:\n filters['cuis'] = intersect_nonempty_set(project_filter, filters['cuis'])\n\n for idx_doc in trange(current_document, len(project['documents']), initial=current_document, total=len(project['documents']), desc='Document', leave=False):\n doc = project['documents'][idx_doc]\n spacy_doc: Doc = self(doc['text'])\n\n # Compatibility with old output where annotations are a list\n doc_annotations = self._get_doc_annotations(doc)\n for ann in doc_annotations:\n if not ann.get('killed', False):\n cui = ann['cui']\n start = ann['start']\n end = ann['end']\n spacy_entity = tkns_from_doc(spacy_doc=spacy_doc, start=start, end=end)\n deleted = ann.get('deleted', False)\n self.add_and_train_concept(cui=cui,\n name=ann['value'],\n spacy_doc=spacy_doc,\n spacy_entity=spacy_entity,\n negative=deleted,\n devalue_others=devalue_others)\n if train_from_false_positives:\n fps: List[Span] = get_false_positives(doc, spacy_doc)\n\n for fp in fps:\n fp_: Span = fp\n self.add_and_train_concept(cui=fp_._.cui,\n name=fp_.text,\n spacy_doc=spacy_doc,\n spacy_entity=fp_,\n negative=True,\n do_add_concept=False)\n\n latest_trained_step += 1\n if checkpoint is not None and checkpoint.steps is not None and latest_trained_step % checkpoint.steps == 0:\n checkpoint.save(self.cdb, latest_trained_step)\n\n if terminate_last and not never_terminate:\n # Remove entities that were terminated, but after all training is done\n for project in train_set['projects']:\n for doc in project['documents']:\n doc_annotations = self._get_doc_annotations(doc)\n for ann in doc_annotations:\n if ann.get('killed', False):\n self.unlink_concept_name(ann['cui'], ann['value'])\n\n if print_stats > 0 and (epoch + 1) % print_stats == 0:\n fp, fn, tp, p, r, f1, cui_counts, examples = self._print_stats(test_set,\n epoch=epoch + 1,\n use_project_filters=use_filters,\n use_cui_doc_limit=use_cui_doc_limit,\n use_overlaps=use_overlaps,\n use_groups=use_groups,\n extra_cui_filter=extra_cui_filter)\n\n # Set the filters again\n self.config.linking['filters'] = _filters\n\n return fp, fn, tp, p, r, f1, cui_counts, examples\n\n def get_entities(self,\n text: str,\n only_cui: bool = False,\n addl_info: List[str] = ['cui2icd10', 'cui2ontologies', 'cui2snomed']) -> Dict:\n doc = self(text)\n out = self._doc_to_out(doc, only_cui, addl_info)\n return out\n\n def get_entities_multi_texts(self,\n texts: Union[Iterable[str], Iterable[Tuple]],\n only_cui: bool = False,\n addl_info: List[str] = ['cui2icd10', 'cui2ontologies', 'cui2snomed'],\n n_process: Optional[int] = None,\n batch_size: Optional[int] = None) -> List[Dict]:\n r\"\"\" Get entities\n text: text to be annotated\n return: entities\n \"\"\"\n out: List[Dict] = []\n\n if n_process is None:\n texts_ = self._generate_trimmed_texts(texts)\n for text in texts_:\n out.append(self._doc_to_out(self(text), only_cui, addl_info))\n else:\n self.pipe.set_error_handler(self._pipe_error_handler)\n try:\n texts_ = self._get_trimmed_texts(texts)\n docs = self.pipe.batch_multi_process(texts_, n_process, batch_size)\n\n for doc in tqdm(docs, total=len(texts_)):\n doc = None if doc.text.strip() == '' else doc\n out.append(self._doc_to_out(doc, only_cui, addl_info, out_with_text=True))\n\n # Currently spaCy cannot mark which pieces of texts failed within the pipe so be this workaround,\n # which also assumes texts are different from each others.\n if len(out) < len(texts_):\n self.log.warning(\"Found at least one failed batch and set output for enclosed texts to empty\")\n for i, text in enumerate(texts_):\n if i == len(out):\n out.append(self._doc_to_out(None, only_cui, addl_info))\n elif out[i].get('text', '') != text:\n out.insert(i, self._doc_to_out(None, only_cui, addl_info))\n\n cnf_annotation_output = getattr(self.config, 'annotation_output', {})\n if not(cnf_annotation_output.get('include_text_in_output', False)):\n for o in out:\n if o is not None:\n o.pop('text', None)\n finally:\n self.pipe.reset_error_handler()\n\n return out\n\n def get_json(self, text: str, only_cui: bool = False, addl_info=['cui2icd10', 'cui2ontologies']) -> str:\n \"\"\" Get output in json format\n\n text: text to be annotated\n return: json with fields {'entities': <>, 'text': text}\n \"\"\"\n ents = self.get_entities(text, only_cui, addl_info=addl_info)['entities']\n out = {'annotations': ents, 'text': text}\n\n return json.dumps(out)\n\n @staticmethod\n def _get_training_start(train_set, latest_trained_step):\n total_steps_per_epoch = sum([1 for project in train_set['projects'] for _ in project['documents']])\n if total_steps_per_epoch == 0:\n raise ValueError(\"MedCATtrainer export contains no documents\")\n current_epoch, last_step_in_epoch = divmod(latest_trained_step, total_steps_per_epoch)\n document_count = 0\n current_project = 0\n current_document = 0\n for idx_project, project in enumerate(train_set['projects']):\n for idx_doc, _ in enumerate(project['documents']):\n document_count += 1\n if document_count == last_step_in_epoch:\n current_project = idx_project\n current_document = idx_doc\n break\n if current_project > 0:\n break\n current_document = 0\n return current_epoch, current_project, current_document\n\n def _separate_nn_components(self):\n # Loop though the models and check are there GPU devices\n nn_components = []\n for component in self.pipe.spacy_nlp.components:\n if isinstance(component[1], MetaCAT) or isinstance(component[1], TransformersNER):\n self.pipe.spacy_nlp.disable_pipe(component[0])\n nn_components.append(component)\n\n return nn_components\n\n def _run_nn_components(self, docs: Dict, nn_components: List, id2text: Dict) -> None:\n r\"\"\" This will add meta_anns in-place to the docs dict.\n \"\"\"\n self.log.debug(\"Running GPU components separately\")\n\n # First convert the docs into the fake spacy doc format\n spacy_docs = json_to_fake_spacy(docs, id2text=id2text)\n # Disable component locks also\n for name, component in nn_components:\n component.config.general['disable_component_lock'] = True\n\n # For meta_cat compoments \n for name, component in [c for c in nn_components if isinstance(c[1], MetaCAT)]:\n spacy_docs = component.pipe(spacy_docs)\n for spacy_doc in spacy_docs:\n for ent in spacy_doc.ents:\n docs[spacy_doc.id]['entities'][ent._.id]['meta_anns'].update(ent._.meta_anns)\n\n def _batch_generator(self, data: Iterable, batch_size_chars: int, skip_ids: Set = set()):\n docs = []\n char_count = 0\n for doc in data:\n if doc[0] not in skip_ids:\n char_count += len(str(doc[1]))\n docs.append(doc)\n if char_count < batch_size_chars:\n continue\n yield docs\n docs = []\n char_count = 0\n\n if len(docs) > 0:\n yield docs\n\n def _save_docs_to_file(self, docs: Iterable, annotated_ids: List[str], save_dir_path: str, annotated_ids_path: Optional[str], part_counter: int = 0) -> int:\n path = os.path.join(save_dir_path, 'part_{}.pickle'.format(part_counter))\n pickle.dump(docs, open(path, \"wb\"))\n self.log.info(\"Saved part: %s, to: %s\", part_counter, path)\n part_counter = part_counter + 1 # Increase for save, as it should be what is the next part\n if annotated_ids_path is not None:\n pickle.dump((annotated_ids, part_counter), open(annotated_ids_path, 'wb'))\n return part_counter\n\n def multiprocessing(self,\n data: Union[List[Tuple], Iterable[Tuple]],\n nproc: int = 2,\n batch_size_chars: int = 5000 * 1000,\n only_cui: bool = False,\n addl_info: List[str] = [],\n separate_nn_components: bool = True,\n out_split_size_chars: Optional[int] = None,\n save_dir_path: str = os.path.abspath(os.getcwd()),\n min_free_memory=0.1) -> Dict:\n r\"\"\" Run multiprocessing for inference, if out_save_path and out_split_size_chars is used this will also continue annotating\n documents if something is saved in that directory.\n\n Args:\n data:\n Iterator or array with format: [(id, text), (id, text), ...]\n nproc (`int`, defaults to 8):\n Number of processors\n batch_size_chars (`int`, defaults to 1000000):\n Size of a batch in number of characters, this should be around: NPROC * average_document_length * 200\n separate_nn_components (`bool`, defaults to True):\n If set the medcat pipe will be broken up into NN and not-NN components and\n they will be run sequentially. This is useful as the NN components\n have batching and like to process many docs at once, while the rest of the pipeline\n runs the documents one by one.\n out_split_size_chars (`int`, None):\n If set once more than out_split_size_chars are annotated\n they will be saved to a file (save_dir_path) and the memory cleared. Recommended\n value is 20*batch_size_chars.\n save_dir_path(`str`, defaults to the current working directory):\n Where to save the annotated documents if splitting.\n min_free_memory(`float`, defaults to 0.1):\n If set a process will not start unless there is at least this much RAM memory left,\n should be a range between [0, 1] meaning how much of the memory has to be free. Helps when annotating\n very large datasets because spacy is not the best with memory management and multiprocessing.\n\n Returns:\n A dictionary: {id: doc_json, id2: doc_json2, ...}, in case out_split_size_chars is used\n the last batch will be returned while that and all previous batches will be\n written to disk (out_save_dir).\n \"\"\"\n for comp in self.pipe.spacy_nlp.components:\n if isinstance(comp[1], TransformersNER):\n raise Exception(\"Please do not use multiprocessing when running a transformer model for NER, run sequentially.\")\n\n # Set max document length\n self.pipe.spacy_nlp.max_length = self.config.preprocessing.get('max_document_length', 1000000)\n\n if self._meta_cats and not separate_nn_components:\n # Hack for torch using multithreading, which is not good if not \n #separate_nn_components, need for CPU runs only\n import torch\n torch.set_num_threads(1)\n\n nn_components = []\n if separate_nn_components:\n nn_components = self._separate_nn_components()\n\n if save_dir_path is not None:\n os.makedirs(save_dir_path, exist_ok=True)\n\n # \"5\" looks like a magic number here so better with comment about why the choice was made.\n internal_batch_size_chars = batch_size_chars // (5 * nproc)\n\n annotated_ids_path = os.path.join(save_dir_path, 'annotated_ids.pickle') if save_dir_path is not None else None\n if annotated_ids_path is not None and os.path.exists(annotated_ids_path):\n annotated_ids, part_counter = pickle.load(open(annotated_ids_path, 'rb'))\n else:\n annotated_ids = []\n part_counter = 0\n\n docs = {}\n _start_time = time.time()\n _batch_counter = 0 # Used for splitting the output, counts batches inbetween saves\n for batch in self._batch_generator(data, batch_size_chars, skip_ids=set(annotated_ids)):\n self.log.info(\"Annotated until now: %s docs; Current BS: %s docs; Elapsed time: %.2f minutes\",\n len(annotated_ids),\n len(batch),\n (time.time() - _start_time)/60)\n try:\n _docs = self._multiprocessing_batch(data=batch,\n nproc=nproc,\n only_cui=only_cui,\n batch_size_chars=internal_batch_size_chars,\n addl_info=addl_info,\n nn_components=nn_components,\n min_free_memory=min_free_memory)\n docs.update(_docs)\n annotated_ids.extend(_docs.keys())\n _batch_counter += 1\n del _docs\n if out_split_size_chars is not None and (_batch_counter * batch_size_chars) > out_split_size_chars:\n # Save to file and reset the docs \n part_counter = self._save_docs_to_file(docs=docs,\n annotated_ids=annotated_ids,\n save_dir_path=save_dir_path,\n annotated_ids_path=annotated_ids_path,\n part_counter=part_counter)\n del docs\n docs = {}\n _batch_counter = 0\n except Exception as e:\n self.log.warning(\"Failed an outer batch in the multiprocessing script\")\n self.log.warning(e, exc_info=True, stack_info=True)\n\n # Save the last batch\n if out_split_size_chars is not None and len(docs) > 0:\n # Save to file and reset the docs \n self._save_docs_to_file(docs=docs,\n annotated_ids=annotated_ids,\n save_dir_path=save_dir_path,\n annotated_ids_path=annotated_ids_path,\n part_counter=part_counter)\n\n # Enable the GPU Components again\n if separate_nn_components:\n for name, _ in nn_components:\n # No need to do anything else as it was already in the pipe\n self.pipe.spacy_nlp.enable_pipe(name)\n\n return docs\n\n def _multiprocessing_batch(self,\n data: Union[List[Tuple], Iterable[Tuple]],\n nproc: int = 8,\n batch_size_chars: int = 1000000,\n only_cui: bool = False,\n addl_info: List[str] = [],\n nn_components: List = [],\n min_free_memory: int = 0) -> Dict:\n r\"\"\" Run multiprocessing on one batch\n\n Args:\n data:\n Iterator or array with format: [(id, text), (id, text), ...]\n nproc (`int`, defaults to 8):\n Number of processors\n batch_size_chars (`int`, defaults to 1000000):\n Size of a batch in number of characters\n\n Returns:\n A dictionary: {id: doc_json, id2: doc_json2, ...}\n \"\"\"\n # Create the input output for MP\n with Manager() as manager:\n out_list = manager.list()\n lock = manager.Lock()\n in_q = manager.Queue(maxsize=10*nproc)\n\n id2text = {}\n for batch in self._batch_generator(data, batch_size_chars):\n if nn_components:\n # We need this for the json_to_fake_spacy\n id2text.update({k:v for k,v in batch})\n in_q.put(batch)\n\n # Final data point for workers\n for _ in range(nproc):\n in_q.put(None)\n sleep(2)\n\n # Create processes\n procs = []\n for i in range(nproc):\n p = Process(target=self._mp_cons,\n kwargs={'in_q': in_q,\n 'out_list': out_list,\n 'pid': i,\n 'only_cui': only_cui,\n 'addl_info': addl_info,\n 'min_free_memory': min_free_memory,\n 'lock': lock})\n p.start()\n procs.append(p)\n\n # Join processes\n for p in procs:\n p.join()\n\n docs = {}\n # Covnerts a touple into a dict\n docs.update({k:v for k,v in out_list})\n\n # If we have separate GPU components now we pipe that\n if nn_components:\n try:\n self._run_nn_components(docs, nn_components, id2text=id2text)\n except Exception as e:\n self.log.warning(e, exc_info=True, stack_info=True)\n\n return docs\n\n def multiprocessing_pipe(self,\n in_data: Union[List[Tuple], Iterable[Tuple]],\n nproc: Optional[int] = None,\n batch_size: Optional[int] = None,\n only_cui: bool = False,\n addl_info: List[str] = [],\n return_dict: bool = True,\n batch_factor: int = 2) -> Union[List[Tuple], Dict]:\n r\"\"\" Run multiprocessing NOT FOR TRAINING\n\n in_data: a list with format: [(id, text), (id, text), ...]\n nproc: the number of processors\n batch_size: the number of texts to buffer\n return_dict: a flag for returning either a dict or a list of tuples\n\n return: a dict: {id: doc_json, id: doc_json, ...} or if return_dict is False, a list of tuples: [(id, doc_json), (id, doc_json), ...]\n \"\"\"\n out: Union[Dict, List[Tuple]]\n\n if nproc == 0:\n raise ValueError(\"nproc cannot be set to zero\")\n\n in_data = list(in_data) if isinstance(in_data, Iterable) else in_data\n n_process = nproc if nproc is not None else min(max(cpu_count() - 1, 1), math.ceil(len(in_data) / batch_factor))\n batch_size = batch_size if batch_size is not None else math.ceil(len(in_data) / (batch_factor * abs(n_process)))\n\n start_method = None\n try:\n if self._meta_cats:\n import torch\n if torch.multiprocessing.get_start_method() != \"spawn\":\n start_method = torch.multiprocessing.get_start_method()\n torch.multiprocessing.set_start_method(\"spawn\", force=True)\n\n entities = self.get_entities_multi_texts(texts=in_data, only_cui=only_cui, addl_info=addl_info,\n n_process=n_process, batch_size=batch_size)\n finally:\n if start_method is not None:\n import torch\n torch.multiprocessing.set_start_method(start_method, force=True)\n\n if return_dict:\n out = {}\n for idx, data in enumerate(in_data):\n out[data[0]] = entities[idx]\n else:\n out = []\n for idx, data in enumerate(in_data):\n out.append((data[0], entities[idx]))\n\n return out\n\n def _mp_cons(self, in_q: Queue, out_list: List, min_free_memory: int, lock: Lock, pid: int = 0, only_cui: bool = False, addl_info: List = []) -> None:\n out: List = []\n\n while True:\n if not in_q.empty():\n if psutil.virtual_memory().available / psutil.virtual_memory().total < min_free_memory:\n with lock:\n out_list.extend(out)\n # Stop a process if there is not enough memory left\n break\n\n data = in_q.get()\n if data is None:\n with lock:\n out_list.extend(out)\n break\n\n for i_text, text in data:\n try:\n # Annotate document\n doc = self.get_entities(text=text, only_cui=only_cui, addl_info=addl_info)\n out.append((i_text, doc))\n except Exception as e:\n self.log.warning(\"PID: %s failed one document in _mp_cons, running will continue normally. \\n\" +\n \"Document length in chars: %s, and ID: %s\", pid, len(str(text)), i_text)\n self.log.warning(str(e))\n sleep(2)\n\n def _doc_to_out(self,\n doc: Doc,\n only_cui: bool,\n addl_info: List[str],\n out_with_text: bool = False) -> Dict:\n out: Dict = {'entities': {}, 'tokens': []}\n cnf_annotation_output = getattr(self.config, 'annotation_output', {})\n if doc is not None:\n out_ent: Dict = {}\n if self.config.general.get('show_nested_entities', False):\n _ents = []\n for _ent in doc._.ents:\n entity = Span(doc, _ent['start'], _ent['end'], label=_ent['label'])\n entity._.cui = _ent['cui']\n entity._.detected_name = _ent['detected_name']\n entity._.context_similarity = _ent['context_similarity']\n entity._.id = _ent['id']\n if 'meta_anns' in _ent:\n entity._.meta_anns = _ent['meta_anns']\n _ents.append(entity)\n else:\n _ents = doc.ents\n\n if cnf_annotation_output.get(\"lowercase_context\", True):\n doc_tokens = [tkn.text_with_ws.lower() for tkn in list(doc)]\n else:\n doc_tokens = [tkn.text_with_ws for tkn in list(doc)]\n\n if cnf_annotation_output.get('doc_extended_info', False):\n # Add tokens if extended info\n out['tokens'] = doc_tokens\n\n context_left = cnf_annotation_output.get('context_left', -1)\n context_right = cnf_annotation_output.get('context_right', -1)\n doc_extended_info = cnf_annotation_output.get('doc_extended_info', False)\n\n for _, ent in enumerate(_ents):\n cui = str(ent._.cui)\n if not only_cui:\n out_ent['pretty_name'] = self.cdb.get_name(cui)\n out_ent['cui'] = cui\n out_ent['type_ids'] = list(self.cdb.cui2type_ids.get(cui, ''))\n out_ent['types'] = [self.cdb.addl_info['type_id2name'].get(tui, '') for tui in out_ent['type_ids']]\n out_ent['source_value'] = ent.text\n out_ent['detected_name'] = str(ent._.detected_name)\n out_ent['acc'] = float(ent._.context_similarity)\n out_ent['context_similarity'] = float(ent._.context_similarity)\n out_ent['start'] = ent.start_char\n out_ent['end'] = ent.end_char\n for addl in addl_info:\n tmp = self.cdb.addl_info.get(addl, {}).get(cui, [])\n out_ent[addl.split(\"2\")[-1]] = list(tmp) if type(tmp) == set else tmp\n out_ent['id'] = ent._.id\n out_ent['meta_anns'] = {}\n\n if doc_extended_info:\n out_ent['start_tkn'] = ent.start\n out_ent['end_tkn'] = ent.end\n\n if context_left > 0 and context_right > 0:\n out_ent['context_left'] = doc_tokens[max(ent.start - context_left, 0):ent.start]\n out_ent['context_right'] = doc_tokens[ent.end:min(ent.end + context_right, len(doc_tokens))]\n out_ent['context_center'] = doc_tokens[ent.start:ent.end]\n\n if hasattr(ent._, 'meta_anns') and ent._.meta_anns:\n out_ent['meta_anns'] = ent._.meta_anns\n\n out['entities'][out_ent['id']] = dict(out_ent)\n else:\n out['entities'][ent._.id] = cui\n\n if cnf_annotation_output.get('include_text_in_output', False) or out_with_text:\n out['text'] = doc.text\n return out\n\n def _get_trimmed_text(self, text: Optional[str]) -> str:\n return text[0:self.config.preprocessing.get('max_document_length')] if text is not None and len(text) > 0 else \"\"\n\n def _generate_trimmed_texts(self, texts: Union[Iterable[str], Iterable[Tuple]]) -> Iterable[str]:\n text_: str\n for text in texts:\n text_ = text[1] if isinstance(text, tuple) else text\n yield self._get_trimmed_text(text_)\n\n def _get_trimmed_texts(self, texts: Union[Iterable[str], Iterable[Tuple]]) -> List[str]:\n trimmed: List = []\n text_: str\n for text in texts:\n text_ = text[1] if isinstance(text, tuple) else text\n trimmed.append(self._get_trimmed_text(text_))\n return trimmed\n\n @staticmethod\n def _pipe_error_handler(proc_name: str, proc: \"Pipe\", docs: List[Doc], e: Exception) -> None:\n CAT.log.warning(\"Exception raised when applying component %s to a batch of docs.\", proc_name)\n CAT.log.warning(e, exc_info=True, stack_info=True)\n if docs is not None:\n CAT.log.warning(\"Docs contained in the batch:\")\n for doc in docs:\n if hasattr(doc, \"text\"):\n CAT.log.warning(\"%s...\", doc.text[:50])\n\n @staticmethod\n def _get_doc_annotations(doc: Doc):\n if type(doc['annotations']) == list:\n return doc['annotations']\n if type(doc['annotations']) == dict:\n return doc['annotations'].values()\n return None\n\n def destroy_pipe(self):\n self.pipe.destroy()\n" ]
[ [ "torch.set_num_threads", "torch.multiprocessing.get_start_method", "torch.multiprocessing.set_start_method" ] ]
akrouriad/rlberry
[ "dde4e2cbafca05fdef1df07646bb6368059eeadf" ]
[ "rlberry/utils/torch.py" ]
[ "import os\nimport re\nimport shutil\nfrom subprocess import check_output, run, PIPE\nimport numpy as np\nimport torch\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_gpu_memory_map():\n result = check_output(\n [\"nvidia-smi\", \"--query-gpu=memory.used\", \"--format=csv,nounits,noheader\"]\n )\n return [int(x) for x in result.split()]\n\n\ndef least_used_device():\n \"\"\"Get the GPU device with most available memory.\"\"\"\n if not torch.cuda.is_available():\n raise RuntimeError(\"cuda unavailable\")\n\n if shutil.which(\"nvidia-smi\") is None:\n raise RuntimeError(\n \"nvidia-smi unavailable: \\\ncannot select device with most least memory used.\"\n )\n\n memory_map = get_gpu_memory_map()\n device_id = np.argmin(memory_map)\n logger.info(\n f\"Choosing GPU device: {device_id}, \" f\"memory used: {memory_map[device_id]}\"\n )\n return torch.device(\"cuda:{}\".format(device_id))\n\n\ndef choose_device(preferred_device, default_device=\"cpu\"):\n if preferred_device == \"cuda:best\":\n try:\n preferred_device = least_used_device()\n except RuntimeError:\n logger.info(\n f\"Could not find least used device (nvidia-smi might be missing), use cuda:0 instead\"\n )\n if torch.cuda.is_available():\n return choose_device(\"cuda:0\")\n else:\n return choose_device(\"cpu\")\n try:\n torch.zeros((1,), device=preferred_device) # Test availability\n except (RuntimeError, AssertionError) as e:\n logger.info(\n f\"Preferred device {preferred_device} unavailable ({e}).\"\n f\"Switching to default {default_device}\"\n )\n return default_device\n return preferred_device\n\n\ndef get_memory(pid=None):\n if not pid:\n pid = os.getpid()\n command = \"nvidia-smi\"\n result = run(\n command, stdout=PIPE, stderr=PIPE, universal_newlines=True, shell=True\n ).stdout\n m = re.findall(\n \"\\| *[0-9] *\" + str(pid) + \" *C *.*python.*? +([0-9]+).*\\|\",\n result,\n re.MULTILINE,\n )\n return [int(mem) for mem in m]\n" ]
[ [ "torch.zeros", "numpy.argmin", "torch.cuda.is_available" ] ]
jtiscione/doodlecritic
[ "3af8245330523109b7452d3afc7d8d25d43d182c" ]
[ "train.py" ]
[ "import sys\nimport os\nfrom os.path import expanduser\nimport pickle\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.utils.data\nimport torch.onnx\nimport re\nimport json\nfrom PIL import Image, ImageDraw\nimport torch\nimport numpy as np\n\n# Training script- trains a Pytorch model against the Google Quickdraw dataset:\n# https://github.com/googlecreativelab/quickdraw-dataset\n#\n# Specifically, it uses the \"simplified Drawing files\":\n#\n# https://console.cloud.google.com/storage/browser/quickdraw_dataset/full/simplified\n#\n# Also see https://www.kaggle.com/google/tinyquickdraw for a single downloadable tar file\n# with about 50 million samples separated into 343 classes, which is where I got mine.\n#\n# It expects those files to be in ~/data/quickdraw. Specify any alternate path on the command line.\n# \n# As output it generates two files: doodles.pth (internal format) and doodles.onnx (ONNX export format).\n#\n# The model used here is a convolutional neural network accepting 1x64x64 inputs\n# (i.e. black-and-white 64x64 images). Output is 344 neurons (i.e. one per label) with an extra neuron\n# corresponding to label \"nothing\".\n# \n# NOTES:\n# \n# If doodles.pth is found (typically saved from a previous run), it will be loaded into the\n# current model; otherwise it will start with a set of random weights. File size is approx. 300 MB.\n# \n# If it finds at any point during training that the output files doodles.pth or doodles.onnx\n# are not on the drive, it will write new copies immediately with its current state (even though\n# this means the first versions will only contain random weights). Deleting the files\n# generates fresh copies, and so does finishing a training epoch (overwriting the prior versions).\n# Because the data set is so immense, each epoch takes several hours to complete.\n# In practice, with this model, performance levels off after about 3-4 epochs, with the network\n# agreeing with Google's classification about 73% of the time.\n# \n# This way, if you need to edit a hyperparameter or go to work, you can pause execution by\n# deleting the current doodles.pth and doodles.onnx files, letting it write new ones,\n# and then hitting Ctrl-C. Typically you will want to adjust the learning rate downward\n# or experiment with a different optimizer after the script has run for a few hours and\n# its performance has reached a plateau. After you make your edits the script will pick up\n# where it left off.\n#\n# If SAVE_BACKUP_FILES is set to True, the script will save backups as training progresses.\n# Each time performance reaches a new record, a file will be saved with a filename indicating the\n# new record number of correct responses. This is to avoid losing progress if the script crashes.\n# (Raising the batch size too high can cause spurious out-of-memory errors at random times.)\n\n\n# Specify data folder as command line argument; default is ~/data/quickdraw\nDATA_DIRECTORY = '~/data/quickdraw'\nif len(sys.argv) > 1:\n DATA_DIRECTORY = sys.argv[1]\nif DATA_DIRECTORY[0] == '~':\n DATA_DIRECTORY = expanduser(DATA_DIRECTORY)\n\n# Standard industry practice: Jack this number up as high as you can, then carefully lower it\n# until the script stops crashing. Final value is dependent on GPU memory.\n# This is a safe batch size to use on an RTX 2060 with 6 GB.\nBATCH_SIZE = 1000\n\n# Hyperparameters; both SGD and Adam work well, at least in the beginning; use SGD by default\nOPTIMIZER_NAME = 'SGD'\n\nSGD_LEARNING_RATE = 0.01\nSGD_MOMENTUM = 0\n\nADAM_LEARNING_RATE = 0.001\nADAM_BETAS = (0.9, 0.99)\nADAM_EPSILON = 0.0001\n\nINDEX_CACHE_FILE = './index_cache.pkl'\nLABELS_FILE = './labels.txt'\n\nSTATE_DICT_FILE = './doodles.pth'\nONNX_FILE = './doodles.onnx'\n\nSAVE_BACKUP_FILES = True\nNUMBERED_STATE_DICT_FILE_TEMPLATE = './doodles_{}_of_{}.pth'\nNUMBERED_ONNX_FILE_TEMPLATE = './doodles_{}_of_{}.onnx'\n\nDEVICE = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n# If it's installed, turn this on to enable NVidia's Apex AMP Pytorch extension.\n# This will let us do calculations in FP16 on the GPU which will save memory on the card\n# and let us raise the batch size. It will also leverage RTX tensor cores on RTX cards.\n# Default is set to False, because compiling and installing AMP is an involved process-\n# NVidia's CUDA Toolkit to be installed on your system before you can compile it using pip.\n\nMIXED_PRECISION = False\n\nif MIXED_PRECISION and torch.cuda.is_available():\n # See if the AMP Pytorch extension has been installed; otherwise stick to standard FP32.\n # If we are using mixed precision we can raise the batch size but keep it a multiple of 8.\n # All tensor dimensions must be multiples of 8 to trigger NVidia's tensor core optimizations.\n try:\n from apex import amp, optimizers\n MIXED_PRECISION = True\n BATCH_SIZE = int(BATCH_SIZE * 1.6) # Raising it by 60%\n print('Using mixed precision.')\n except ImportError:\n MIXED_PRECISION = False\n\n# This is a torch DataSet implementation that makes the following assumptions:\n#\n# 1. Data consists of a set of text files with \".ndjson\" extensions in the specified directory.\n# 2. Each line in the .ndjson file is a JSON string with all data for a single sample.\n# 3. Each line of JSON has the following format (omitting extraneous fields):\n# {\"word\":\"elephant\",\"drawing\":[[[0, 1, 10],[25, 103, 163]],[[4,15,134,234,250],[27,22,6,4,0]]]}\n# Array \"drawing\" has the brush strokes, each stroke a pair of arrays with x and y coordinates on a 256x256 grid.\n# 4. We can build our label list by only looking at the first line of each file. (All lines have same value for \"word\".)\nclass QuickDrawDataset(torch.utils.data.Dataset):\n\n # Take the batch size, so we know how much to pad with all-zero samples mapping to the \"blank\" channel.\n # This way we ensure we deliver full-sized batches interspersed with a few blank samples mapping to label \"nothing\".\n def __init__(self, dataDir, batch_size):\n super(QuickDrawDataset, self).__init__()\n print('Data folder: ' + dataDir)\n self.dataDir = dataDir\n self.filenames = list(filter(lambda x: x.endswith(\".ndjson\"), sorted(os.listdir(dataDir)))) #[1:20]\n self.filenameByIndex = []\n self.fileByteOffsetByIndex = []\n self.labelListIndices = {}\n self.labelList = []\n\n for filename in self.filenames:\n print('Indexing ' + filename)\n file = open(dataDir + \"/\" + filename, \"r\")\n byte_offset = 0\n word = None\n for line in file:\n if (word == None):\n words = re.findall('\\\"word\\\":\\\"([\\w\\s-]+)\\\"', line)\n word = words[0]\n self.labelListIndices[word] = len(self.labelList)\n self.labelList.append(word)\n # Only use the ones Google recognizes\n if (len(re.findall('\\\"recognized\\\":true', line)) > 0):\n self.filenameByIndex.append(filename)\n self.fileByteOffsetByIndex.append(byte_offset)\n byte_offset += len(line)\n file.close()\n\n self.labelListIndices['nothing'] = len(self.labelList)\n self.labelList.append('nothing')\n if MIXED_PRECISION:\n # NVidia really wants tensor dimensions to be multiples of 8, make sure here\n extra_nothings = 0\n while len(self.labelList) % 8 > 0:\n extra_nothings += 1\n self.labelListIndices['nothing_{}'.format(extra_nothings)] = len(self.labelList)\n self.labelList.append('nothing_{}'.format(extra_nothings))\n\n self.paddingLength = batch_size - (len(self.filenameByIndex) % batch_size)\n print('padding length {}'.format(self.paddingLength))\n\n def __len__(self):\n return len(self.filenameByIndex) + self.paddingLength\n\n def __getitem__(self, idx):\n if idx >= len(self.filenameByIndex):\n # NULL sample\n return torch.zeros(1, 64, 64, dtype=torch.float), self.labelListIndices['nothing']\n filename = self.filenameByIndex[idx]\n byte_offset = self.fileByteOffsetByIndex[idx]\n file = open(self.dataDir + '/' + filename, 'r')\n file.seek(byte_offset)\n line = file.readline()\n file.close()\n # Convert line containing brush stroke coordinate list to a 256x256 image tensor using PIL\n entry = json.loads(line)\n drawing = entry.get('drawing')\n im = Image.new(\"L\", (256, 256))\n draw = ImageDraw.Draw(im)\n for stroke in drawing:\n x_coords = stroke[0]\n y_coords = stroke[1]\n for i in range(len(x_coords) - 1):\n draw.line((x_coords[i], y_coords[i], x_coords[i + 1], y_coords[i + 1]), fill=255, width=5)\n im = im.resize((64, 64), Image.ANTIALIAS)\n word = entry.get('word')\n imageTensor = torch.tensor(np.array(im) / 256, dtype=torch.float)\n\n # Alter image slightly to look like the inputs we're eventually going to get from the client.\n # This is a limitation imposed by JavaScript which implements \"antialiasing\" on downsized canvases by\n # nearest-neighbor downsampling, smoothed onscreen by a WebGL filter that looks nice but doesn't alter the image data,\n # so we only get two-color jagged images.\n #\n # Tedious workarounds are possible: https://stackoverflow.com/questions/2303690/resizing-an-image-in-an-html5-canvas\n THRESHOLD = 0.1\n imageTensor[imageTensor >= THRESHOLD] = 1.0\n imageTensor[imageTensor < THRESHOLD] = 0.0\n\n imageTensor = imageTensor.unsqueeze(0)\n\n return imageTensor, self.labelListIndices.get(word)\n\n# Takes input of size Nx1x64x64, a batch of N black and white 64x64 images.\n# Applies two convolutional layers and three fully connected layers.\n\nclass CNNModel(nn.Module):\n\n # input_size is 64 (input samples are 64x64 images); num_classes is 344\n def __init__(self, input_size, num_classes):\n super(CNNModel, self).__init__()\n self.layer1 = nn.Sequential(\n nn.Conv2d(1, 32, kernel_size=5, stride=1, padding=2, bias=False),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=2, stride=2))\n self.layer2 = nn.Sequential(\n nn.Conv2d(32, 64, kernel_size=5, stride=1, padding=2, bias=False),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=2, stride=2))\n dimension = int(64 * pow(input_size / 4, 2))\n self.fc1 = nn.Sequential(nn.Linear(dimension, int(dimension / 4)), nn.Dropout(0.25))\n self.fc2 = nn.Sequential(nn.Linear(int(dimension / 4), int(dimension / 8)), nn.Dropout(0.25))\n self.fc3 = nn.Sequential(nn.Linear(int(dimension / 8), num_classes))\n\n def forward(self, x):\n out = self.layer1(x)\n out = self.layer2(out)\n out = out.view(out.size(0), -1)\n out = self.fc1(out)\n out = self.fc2(out)\n out = self.fc3(out)\n return out\n\n# Main part\nif __name__ == '__main__':\n\n if os.path.isfile(INDEX_CACHE_FILE):\n print(\"Loading {}\".format(INDEX_CACHE_FILE))\n infile = open(INDEX_CACHE_FILE, 'rb')\n dataSet = pickle.load(infile)\n infile.close()\n else:\n dataSet = QuickDrawDataset(DATA_DIRECTORY, BATCH_SIZE)\n outfile = open(INDEX_CACHE_FILE, 'wb')\n pickle.dump(dataSet, outfile)\n outfile.close()\n print(\"Saved {}\".format(INDEX_CACHE_FILE))\n\n if (os.path.isfile(LABELS_FILE) == False):\n with open(LABELS_FILE, 'w') as f:\n for label in dataSet.labelList:\n f.write(\"%s\\n\" % label)\n f.close()\n print(\"Saved {}\".format(LABELS_FILE))\n\n print('Total number of labels: {}'.format(len(dataSet.labelList)))\n print('Total number of samples: {}'.format(len(dataSet)))\n\n randomSampler = torch.utils.data.RandomSampler(dataSet)\n dataLoader = torch.utils.data.DataLoader(dataSet, batch_size = BATCH_SIZE, sampler = randomSampler, num_workers=4, pin_memory=True)\n\n model = CNNModel(input_size=64, num_classes=len(dataSet.labelList)).to(DEVICE)\n\n if (os.path.isfile(STATE_DICT_FILE)):\n # We found an existing doodles.pth file! Instead of starting from scratch we'll load this one.\n # and continue training it.\n print(\"Loading {}\".format(STATE_DICT_FILE))\n state_dict = torch.load(STATE_DICT_FILE)\n model.load_state_dict(state_dict)\n\n optimizer = None\n if (OPTIMIZER_NAME == 'SGD'):\n optimizer = optim.SGD(model.parameters(), lr = SGD_LEARNING_RATE, momentum=SGD_MOMENTUM)\n print('Using SGD with learning rate {} and momentum {}'.format(SGD_LEARNING_RATE, SGD_MOMENTUM))\n elif (OPTIMIZER_NAME == 'Adam'):\n if MIXED_PRECISION:\n optimizer = optim.Adam(model.parameters(), lr = ADAM_LEARNING_RATE, betas = ADAM_BETAS, eps = ADAM_EPSILON)\n else:\n optimizer = optim.Adam(model.parameters(), lr = ADAM_LEARNING_RATE)\n print('Using Adam with learning rate {}'.format(ADAM_LEARNING_RATE))\n else:\n print('No optimizer specified!')\n\n if MIXED_PRECISION:\n # Using NVidia's AMP Pytorch extension\n model, optimizer = amp.initialize(model, optimizer, opt_level=\"O1\")\n\n criterion = nn.CrossEntropyLoss()\n\n ROLLING_AVERAGE_RUN_LENGTH = 100\n rolling = np.zeros(0)\n record_rolling_average = 0\n count = 0\n\n # On my computer each epoch takes about 4 hours; the script consumes ~250 watts or about 1 kWh per epoch.\n # Performance reaches a plateau after 3-4 epochs.\n for epoch in range(4):\n print('Epoch: {}'.format(epoch))\n batch_number = 0\n for i, (images, labels) in enumerate(dataLoader):\n count = count + 1\n images = images.to(DEVICE)\n labels = labels.to(DEVICE)\n optimizer.zero_grad()\n outputs = model(images)\n _, predicted = torch.max(outputs.data, 1)\n correct = (predicted == labels).sum().item()\n if (count < ROLLING_AVERAGE_RUN_LENGTH):\n rolling = np.insert(rolling, 0, correct)\n else:\n rolling = np.roll(rolling, 1)\n rolling[0] = correct\n rolling_average = int(np.mean(rolling))\n loss = criterion(outputs, labels)\n if MIXED_PRECISION:\n # Use of FP16 requires loss scaling, due to underflow error.\n # See https://devblogs.nvidia.com/mixed-precision-training-deep-neural-networks/\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n optimizer.step()\n print('EPOCH: {} BATCH: {} SIZE: {} CORRECT: {} (ROLLING AVG: {})'.format(epoch, batch_number, BATCH_SIZE, correct, rolling_average))\n batch_number += 1\n # print(loss.item())\n\n # To be safe, save model whenever performance reaches a new high\n if (count < 2 * ROLLING_AVERAGE_RUN_LENGTH): # (once rolling average has had time to stabilize)\n record_rolling_average = max(rolling_average, record_rolling_average)\n else:\n if (rolling_average > record_rolling_average):\n # Save model with a munged filename; e.g. doodles_706.pth\n if (SAVE_BACKUP_FILES):\n backupPth = NUMBERED_STATE_DICT_FILE_TEMPLATE.format(rolling_average, BATCH_SIZE)\n torch.save(model.state_dict(), backupPth)\n print('Saved model file {}'.format(backupPth))\n # Delete the last backup .pth file we wrote to avoid filling up the drive\n if (record_rolling_average > 0):\n old_file = NUMBERED_STATE_DICT_FILE_TEMPLATE.format(record_rolling_average, BATCH_SIZE)\n if os.path.exists(old_file):\n os.remove(old_file)\n # Same for ONNX\n backupOnnx = NUMBERED_ONNX_FILE_TEMPLATE.format(rolling_average, BATCH_SIZE)\n if MIXED_PRECISION:\n with amp.disable_casts():\n dummy_input = torch.randn(1, 1, 64, 64).to(DEVICE)\n torch.onnx.export(model, dummy_input, backupOnnx, verbose=False)\n else:\n dummy_input = torch.randn(1, 1, 64, 64).to(DEVICE)\n torch.onnx.export(model, dummy_input, backupOnnx, verbose=False)\n print('Saved ONNX file {}'.format(backupOnnx))\n # Delete the last backup ONNX file we wrote to avoid filling up the drive\n if (record_rolling_average > 0):\n old_file = NUMBERED_ONNX_FILE_TEMPLATE.format(record_rolling_average, BATCH_SIZE)\n if os.path.exists(old_file):\n os.remove(old_file)\n record_rolling_average = rolling_average\n\n # Deleting the model file during training triggers a fresh rewrite:\n if (os.path.isfile(STATE_DICT_FILE) == False):\n torch.save(model.state_dict(), STATE_DICT_FILE)\n print('Saved model file {}'.format(STATE_DICT_FILE))\n # ONNX: same policy\n if (os.path.isfile(ONNX_FILE) == False):\n if MIXED_PRECISION:\n with amp.disable_casts():\n dummy_input = torch.randn(1, 1, 64, 64).to(DEVICE)\n torch.onnx.export(model, dummy_input, ONNX_FILE, verbose=False)\n else:\n dummy_input = torch.randn(1, 1, 64, 64).to(DEVICE)\n torch.onnx.export(model, dummy_input, ONNX_FILE, verbose=False)\n print('Exported ONNX file {}'.format(ONNX_FILE))\n # Epoch finished\n # Save the current model at the end of an epoch\n torch.save(model.state_dict(), STATE_DICT_FILE)\n # Export ONNX with loudmouth flag set\n if (MIXED_PRECISION):\n with amp.disable_casts():\n dummy_input = torch.randn(1, 1, 64, 64).to(DEVICE)\n torch.onnx.export(model, dummy_input, ONNX_FILE, verbose=True)\n else:\n dummy_input = torch.randn(1, 1, 64, 64).to(DEVICE)\n torch.onnx.export(model, dummy_input, ONNX_FILE, verbose=True)\n print('EPOCH {} FINISHED, SAVED {} AND {}'.format(epoch, STATE_DICT_FILE, ONNX_FILE))\n" ]
[ [ "torch.utils.data.DataLoader", "numpy.insert", "torch.cuda.is_available", "torch.nn.Conv2d", "torch.max", "torch.nn.Dropout", "torch.nn.BatchNorm2d", "torch.onnx.export", "torch.randn", "torch.utils.data.RandomSampler", "numpy.mean", "torch.nn.MaxPool2d", "torch.load", "numpy.zeros", "torch.zeros", "numpy.roll", "torch.nn.CrossEntropyLoss", "numpy.array", "torch.nn.ReLU" ] ]
r-graves/demo_lab
[ "729cdf61774bf32d2c07ca68bf70e65470700cc2" ]
[ "venv/lib/python3.10/site-packages/pandas/tests/series/methods/test_drop_duplicates.py" ]
[ "import numpy as np\nimport pytest\n\nfrom pandas import (\n NA,\n Categorical,\n Series,\n)\nimport pandas._testing as tm\n\n\[email protected](\n \"keep, expected\",\n [\n (\"first\", Series([False, False, False, False, True, True, False])),\n (\"last\", Series([False, True, True, False, False, False, False])),\n (False, Series([False, True, True, False, True, True, False])),\n ],\n)\ndef test_drop_duplicates(any_numpy_dtype, keep, expected):\n tc = Series([1, 0, 3, 5, 3, 0, 4], dtype=np.dtype(any_numpy_dtype))\n\n if tc.dtype == \"bool\":\n pytest.skip(\"tested separately in test_drop_duplicates_bool\")\n\n tm.assert_series_equal(tc.duplicated(keep=keep), expected)\n tm.assert_series_equal(tc.drop_duplicates(keep=keep), tc[~expected])\n sc = tc.copy()\n return_value = sc.drop_duplicates(keep=keep, inplace=True)\n assert return_value is None\n tm.assert_series_equal(sc, tc[~expected])\n\n\[email protected](\n \"keep, expected\",\n [\n (\"first\", Series([False, False, True, True])),\n (\"last\", Series([True, True, False, False])),\n (False, Series([True, True, True, True])),\n ],\n)\ndef test_drop_duplicates_bool(keep, expected):\n tc = Series([True, False, True, False])\n\n tm.assert_series_equal(tc.duplicated(keep=keep), expected)\n tm.assert_series_equal(tc.drop_duplicates(keep=keep), tc[~expected])\n sc = tc.copy()\n return_value = sc.drop_duplicates(keep=keep, inplace=True)\n tm.assert_series_equal(sc, tc[~expected])\n assert return_value is None\n\n\[email protected](\"values\", [[], list(range(5))])\ndef test_drop_duplicates_no_duplicates(any_numpy_dtype, keep, values):\n tc = Series(values, dtype=np.dtype(any_numpy_dtype))\n expected = Series([False] * len(tc), dtype=\"bool\")\n\n if tc.dtype == \"bool\":\n # 0 -> False and 1-> True\n # any other value would be duplicated\n tc = tc[:2]\n expected = expected[:2]\n\n tm.assert_series_equal(tc.duplicated(keep=keep), expected)\n\n result_dropped = tc.drop_duplicates(keep=keep)\n tm.assert_series_equal(result_dropped, tc)\n\n # validate shallow copy\n assert result_dropped is not tc\n\n\nclass TestSeriesDropDuplicates:\n @pytest.fixture(\n params=[\"int_\", \"uint\", \"float_\", \"unicode_\", \"timedelta64[h]\", \"datetime64[D]\"]\n )\n def dtype(self, request):\n return request.param\n\n @pytest.fixture\n def cat_series1(self, dtype, ordered):\n # Test case 1\n cat_array = np.array([1, 2, 3, 4, 5], dtype=np.dtype(dtype))\n\n input1 = np.array([1, 2, 3, 3], dtype=np.dtype(dtype))\n cat = Categorical(input1, categories=cat_array, ordered=ordered)\n tc1 = Series(cat)\n return tc1\n\n def test_drop_duplicates_categorical_non_bool(self, cat_series1):\n tc1 = cat_series1\n\n expected = Series([False, False, False, True])\n\n result = tc1.duplicated()\n tm.assert_series_equal(result, expected)\n\n result = tc1.drop_duplicates()\n tm.assert_series_equal(result, tc1[~expected])\n\n sc = tc1.copy()\n return_value = sc.drop_duplicates(inplace=True)\n assert return_value is None\n tm.assert_series_equal(sc, tc1[~expected])\n\n def test_drop_duplicates_categorical_non_bool_keeplast(self, cat_series1):\n tc1 = cat_series1\n\n expected = Series([False, False, True, False])\n\n result = tc1.duplicated(keep=\"last\")\n tm.assert_series_equal(result, expected)\n\n result = tc1.drop_duplicates(keep=\"last\")\n tm.assert_series_equal(result, tc1[~expected])\n\n sc = tc1.copy()\n return_value = sc.drop_duplicates(keep=\"last\", inplace=True)\n assert return_value is None\n tm.assert_series_equal(sc, tc1[~expected])\n\n def test_drop_duplicates_categorical_non_bool_keepfalse(self, cat_series1):\n tc1 = cat_series1\n\n expected = Series([False, False, True, True])\n\n result = tc1.duplicated(keep=False)\n tm.assert_series_equal(result, expected)\n\n result = tc1.drop_duplicates(keep=False)\n tm.assert_series_equal(result, tc1[~expected])\n\n sc = tc1.copy()\n return_value = sc.drop_duplicates(keep=False, inplace=True)\n assert return_value is None\n tm.assert_series_equal(sc, tc1[~expected])\n\n @pytest.fixture\n def cat_series2(self, dtype, ordered):\n # Test case 2; TODO: better name\n cat_array = np.array([1, 2, 3, 4, 5], dtype=np.dtype(dtype))\n\n input2 = np.array([1, 2, 3, 5, 3, 2, 4], dtype=np.dtype(dtype))\n cat = Categorical(input2, categories=cat_array, ordered=ordered)\n tc2 = Series(cat)\n return tc2\n\n def test_drop_duplicates_categorical_non_bool2(self, cat_series2):\n # Test case 2; TODO: better name\n tc2 = cat_series2\n\n expected = Series([False, False, False, False, True, True, False])\n\n result = tc2.duplicated()\n tm.assert_series_equal(result, expected)\n\n result = tc2.drop_duplicates()\n tm.assert_series_equal(result, tc2[~expected])\n\n sc = tc2.copy()\n return_value = sc.drop_duplicates(inplace=True)\n assert return_value is None\n tm.assert_series_equal(sc, tc2[~expected])\n\n def test_drop_duplicates_categorical_non_bool2_keeplast(self, cat_series2):\n tc2 = cat_series2\n\n expected = Series([False, True, True, False, False, False, False])\n\n result = tc2.duplicated(keep=\"last\")\n tm.assert_series_equal(result, expected)\n\n result = tc2.drop_duplicates(keep=\"last\")\n tm.assert_series_equal(result, tc2[~expected])\n\n sc = tc2.copy()\n return_value = sc.drop_duplicates(keep=\"last\", inplace=True)\n assert return_value is None\n tm.assert_series_equal(sc, tc2[~expected])\n\n def test_drop_duplicates_categorical_non_bool2_keepfalse(self, cat_series2):\n tc2 = cat_series2\n\n expected = Series([False, True, True, False, True, True, False])\n\n result = tc2.duplicated(keep=False)\n tm.assert_series_equal(result, expected)\n\n result = tc2.drop_duplicates(keep=False)\n tm.assert_series_equal(result, tc2[~expected])\n\n sc = tc2.copy()\n return_value = sc.drop_duplicates(keep=False, inplace=True)\n assert return_value is None\n tm.assert_series_equal(sc, tc2[~expected])\n\n def test_drop_duplicates_categorical_bool(self, ordered):\n tc = Series(\n Categorical(\n [True, False, True, False], categories=[True, False], ordered=ordered\n )\n )\n\n expected = Series([False, False, True, True])\n tm.assert_series_equal(tc.duplicated(), expected)\n tm.assert_series_equal(tc.drop_duplicates(), tc[~expected])\n sc = tc.copy()\n return_value = sc.drop_duplicates(inplace=True)\n assert return_value is None\n tm.assert_series_equal(sc, tc[~expected])\n\n expected = Series([True, True, False, False])\n tm.assert_series_equal(tc.duplicated(keep=\"last\"), expected)\n tm.assert_series_equal(tc.drop_duplicates(keep=\"last\"), tc[~expected])\n sc = tc.copy()\n return_value = sc.drop_duplicates(keep=\"last\", inplace=True)\n assert return_value is None\n tm.assert_series_equal(sc, tc[~expected])\n\n expected = Series([True, True, True, True])\n tm.assert_series_equal(tc.duplicated(keep=False), expected)\n tm.assert_series_equal(tc.drop_duplicates(keep=False), tc[~expected])\n sc = tc.copy()\n return_value = sc.drop_duplicates(keep=False, inplace=True)\n assert return_value is None\n tm.assert_series_equal(sc, tc[~expected])\n\n def test_drop_duplicates_categorical_bool_na(self):\n # GH#44351\n ser = Series(\n Categorical(\n [True, False, True, False, NA], categories=[True, False], ordered=True\n )\n )\n result = ser.drop_duplicates()\n expected = Series(\n Categorical([True, False, np.nan], categories=[True, False], ordered=True),\n index=[0, 1, 4],\n )\n tm.assert_series_equal(result, expected)\n\n\ndef test_drop_duplicates_pos_args_deprecation():\n # GH#41485\n s = Series([\"a\", \"b\", \"c\", \"b\"])\n msg = (\n \"In a future version of pandas all arguments of \"\n \"Series.drop_duplicates will be keyword-only\"\n )\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = s.drop_duplicates(\"last\")\n expected = Series([\"a\", \"c\", \"b\"], index=[0, 2, 3])\n tm.assert_series_equal(expected, result)\n" ]
[ [ "pandas.Series", "pandas._testing.assert_produces_warning", "numpy.dtype", "pandas.Categorical", "pandas._testing.assert_series_equal" ] ]
Ivanfdezr/CentralSoftware
[ "8681fedd4814dc60deb527a370411350b40c994c" ]
[ "OneSpanAnalysis_Mdl.py" ]
[ "import numpy as np\r\nimport numpy.linalg as la\r\nfrom MdlUtilities import Field, FieldList\r\nimport MdlUtilities as mdl\r\n\r\n\r\ndef get_osaCasing_fields():\r\n\r\n\t\r\n\tOD = Field(2030)\r\n\tID = Field(2031)\r\n\tWeight = Field(2032)\r\n\tDensity = Field(2039)\r\n\tE = Field(2040)\r\n\tosaCasing_fields = FieldList()\r\n\tosaCasing_fields.append( OD )\r\n\tosaCasing_fields.append( ID )\r\n\tosaCasing_fields.append( Weight )\r\n\tosaCasing_fields.append( Density )\r\n\tosaCasing_fields.append( E )\t\r\n\t\r\n\treturn osaCasing_fields\r\n\r\n\r\ndef get_osaCent_fields():\r\n\r\n\tType = Field(2049)\r\n\tIPOD = Field(2009)\r\n\tCentOD = Field(2011)\r\n\t#CentID = Field(2012)\r\n\tResF_SO67 = Field(2018)\r\n\tminResF = Field(2017)\r\n\tSO_minResF = Field(2019)\r\n\tResF_SO67.set_representation('Res. Force @ SO=67%')\r\n\tminResF.set_representation('minimum Res. Force')\r\n\tSO_minResF.set_representation('StandOff @ min. Res. F.')\r\n\tosaCent_fields = FieldList()\r\n\tosaCent_fields.append( Type )\r\n\tosaCent_fields.append( IPOD )\r\n\tosaCent_fields.append( CentOD )\r\n\t#osaCent_fields.append( CentID )\r\n\tosaCent_fields.append( ResF_SO67 )\r\n\tosaCent_fields.append( minResF )\r\n\tosaCent_fields.append( SO_minResF )\r\n\t\r\n\treturn osaCent_fields\r\n\r\n\r\ndef get_osaWellbore_fields():\r\n\r\n\tHoleID = Field(2010)\r\n\tMaxSpan = Field(2061)\r\n\tMudIPDensity = Field(2077)\r\n\tMudOPDensity = Field(2077)\r\n\tHoleID.set_representation('Hole ID')\r\n\tHoleID.set_abbreviation('HoleID')\r\n\tMaxSpan.set_representation('Max span')\r\n\tMaxSpan.set_abbreviation('MaxSpan')\r\n\tMudIPDensity.set_representation('Mud inside pipe')\r\n\tMudIPDensity.set_abbreviation('MudIPDensity')\r\n\tMudOPDensity.set_representation('Mud in annulus')\r\n\tMudOPDensity.set_abbreviation('MudOPDensity')\r\n\tosaWellbore_fields = FieldList()\r\n\tosaWellbore_fields.append( HoleID )\r\n\tosaWellbore_fields.append( MaxSpan )\r\n\tosaWellbore_fields.append( MudIPDensity )\r\n\tosaWellbore_fields.append( MudOPDensity )\r\n\t\r\n\treturn osaWellbore_fields\r\n\r\n\r\ndef get_osaOutputdata1_fields():\r\n\r\n\tclearanceA = Field(2073, altBg=True, altFg=True)\r\n\tclearanceB = Field(2073, altBg=True, altFg=True)\r\n\tclearanceM = Field(2073, altBg=True, altFg=True)\r\n\tsideForceA = Field(2074, altBg=True, altFg=True)\r\n\tsideForceB = Field(2074, altBg=True, altFg=True)\r\n\tsideForceM = Field(2074, altBg=True, altFg=True)\r\n\tstandoffA = Field(2078, altBg=True, altFg=True)\r\n\tstandoffB = Field(2078, altBg=True, altFg=True)\r\n\tstandoffM = Field(2078, altBg=True, altFg=True)\r\n\tclearanceA.set_representation('Annular clearance @ cent. A')\r\n\tclearanceA.set_abbreviation('ClearanceA')\r\n\tclearanceB.set_representation('Annular clearance @ cent. B')\r\n\tclearanceB.set_abbreviation('ClearanceB')\r\n\tclearanceM.set_representation('Annular clearance @ mid span')\r\n\tclearanceM.set_abbreviation('ClearanceM')\r\n\tsideForceA.set_representation('Side force @ cent. A')\r\n\tsideForceA.set_abbreviation('SideForceA')\r\n\tsideForceB.set_representation('Side force @ cent. B')\r\n\tsideForceB.set_abbreviation('SideForceB')\r\n\tsideForceM.set_representation('Side force @ mid span')\r\n\tsideForceM.set_abbreviation('SideForceM')\r\n\tstandoffA.set_representation('Standoff @ cent. A')\r\n\tstandoffA.set_abbreviation('StandoffA')\r\n\tstandoffB.set_representation('Standoff @ cent. B')\r\n\tstandoffB.set_abbreviation('StandoffB')\r\n\tstandoffM.set_representation('Standoff @ mid span')\r\n\tstandoffM.set_abbreviation('StandoffM')\r\n\tosaOutputdata1_fields = FieldList()\r\n\tosaOutputdata1_fields.append( clearanceA )\r\n\tosaOutputdata1_fields.append( clearanceB )\r\n\tosaOutputdata1_fields.append( clearanceM )\r\n\tosaOutputdata1_fields.append( sideForceA )\r\n\tosaOutputdata1_fields.append( sideForceB )\r\n\tosaOutputdata1_fields.append( sideForceM )\r\n\tosaOutputdata1_fields.append( standoffA )\r\n\tosaOutputdata1_fields.append( standoffB )\r\n\tosaOutputdata1_fields.append( standoffM )\r\n\t\r\n\treturn osaOutputdata1_fields\r\n\r\n\r\ndef get_osaOutputdata2_fields():\r\n\r\n\taxialForce = Field(2075, altBg=True, altFg=True)\r\n\tdeflection = Field(2076, altBg=True, altFg=True)\r\n\twClearance = Field(2073, altBg=True, altFg=True)\r\n\twStandoff = Field(2078, altBg=True, altFg=True)\r\n\taxialForce.set_representation('Axial extra force @ top')\r\n\taxialForce.set_abbreviation('AxialForce')\r\n\tdeflection.set_representation('Max. pipe deflection')\r\n\tdeflection.set_abbreviation('MaxDeflection')\r\n\twClearance.set_representation('Mean wellbore clearance')\r\n\twClearance.set_abbreviation('WellboreClearance')\r\n\twStandoff.set_representation('Mean wellbore standoff')\r\n\twStandoff.set_abbreviation('WellboreStandoff')\r\n\tosaOutputdata2_fields = FieldList()\r\n\tosaOutputdata2_fields.append( axialForce )\r\n\tosaOutputdata2_fields.append( deflection )\r\n\tosaOutputdata2_fields.append( wClearance )\r\n\tosaOutputdata2_fields.append( wStandoff )\r\n\t\r\n\treturn osaOutputdata2_fields\r\n\r\n\r\ndef get_casingDeflectionCurve(self):\r\n\r\n\t# Equation(s) Reference 1:\r\n\t# \tHans C. Juvkam-Wold, Jiang Wu. Casing Deflection and Centralizer Spacing Calculations.\r\n\t# \tSPE Drilling Engineering (December 1992).\r\n\r\n\t# Equation(s) Reference 2:\r\n\t# \tHans C. Juvkam-Wold, Richard L. Baxter. Discussion of Optimal Spacing for Casing Centralizers.\r\n\t# \tSPE Drilling Engineering (December 1988).\r\n\r\n\t# Equation(s) Reference 3:\r\n\t# \tCarlos F. H. Fonseca, Jacques Braile. Optimizing of Centralizer Distribution.\r\n\t# \tSPE Latin American Petroleum Engineering Conference (October 1990).\r\n\r\n\tself.osaCasing_fields.referenceUnitConvert_fields()\r\n\tself.osaCentA_fields.referenceUnitConvert_fields()\r\n\tself.osaCentB_fields.referenceUnitConvert_fields()\r\n\tself.osaWellbore_fields.referenceUnitConvert_fields()\r\n\r\n\tRot = lambda φ: np.array( [[np.cos(φ),-np.sin(φ)],[np.sin(φ),np.cos(φ)]] )\r\n\r\n\tdH = self.osaWellbore_fields.HoleID[0]\r\n\tL = self.osaWellbore_fields.MaxSpan[0]*self.osaSpacing_slider.sliderPosition()/100\r\n\tρe = self.osaWellbore_fields.MudOPDensity[0]\r\n\tρi = self.osaWellbore_fields.MudIPDensity[0]\r\n\tρs = self.osaCasing_fields.Density[0]\r\n\tE = self.osaCasing_fields.E[0]\r\n\tw = self.osaCasing_fields.PW[0]\r\n\tD = self.osaCasing_fields.OD[0]\r\n\td = self.osaCasing_fields.ID[0]\r\n\tType_A = self.osaCentA_fields.Type[0]\r\n\tF_So67_A = self.osaCentA_fields.ResF_SO67[0]\r\n\tminF_A = self.osaCentA_fields.minResF[0]\r\n\tSo_minF_A = self.osaCentA_fields.SO_minResF[0]\r\n\tDA = self.osaCentA_fields.COD[0]\r\n\tdA = self.osaCentA_fields.IPOD[0]\r\n\tType_B = self.osaCentB_fields.Type[0]\r\n\tF_So67_B = self.osaCentB_fields.ResF_SO67[0]\r\n\tminF_B = self.osaCentB_fields.minResF[0]\r\n\tSo_minF_B = self.osaCentB_fields.SO_minResF[0]\r\n\tDB = self.osaCentB_fields.COD[0]\r\n\tdB = self.osaCentB_fields.IPOD[0]\r\n\t#kA = ResFA/(DA/2-0.335*(DA-D)) # Con esto se calculan los coeficientes de los resortes ( 0.335=0.67/2 )\r\n\t#kB = ResFB/(DB/2-0.335*(DB-D))\r\n\r\n\tfor field in self.osaWellbore_fields:\r\n\t\tif field[0]<0:\r\n\t\t\traise mdl.LogicalError('Every parameter should be greater than zero.')\r\n\tfor field in self.osaCasing_fields:\r\n\t\tif field[0]<0:\r\n\t\t\traise mdl.LogicalError('Every parameter should be greater than zero.')\r\n\tfor field in self.osaCentA_fields[1:]:\r\n\t\tif field[0]<0:\r\n\t\t\traise mdl.LogicalError('Every parameter should be greater than zero.')\r\n\tfor field in self.osaCentB_fields[1:]:\r\n\t\tif field[0]<0:\r\n\t\t\traise mdl.LogicalError('Every parameter should be greater than zero.')\r\n\r\n\tif dA!=D or dB!=D or dH<=D:\r\n\t\traise mdl.LogicalError('The selected devices are not size-consistent.')\r\n\r\n\tθ = np.pi*self.osaInclination_slider.sliderPosition()/180\r\n\tI = np.pi/64*(D**4-d**4) # [Ref.3] Momento de inercia diferente a momento de inercia polar.\r\n\tF = 30000 # [Ref.1]\r\n\tRadio = L*1e6\r\n\taspr = L*0.02\r\n\r\n\tbuoyancyFactor = mdl.calculate_buoyancyFactor( OD=D, ID=d, ρs=ρs, ρe=ρe, ρi=ρi ) # [Ref.2]\r\n\tw *= buoyancyFactor\r\n\tfC = w*L*np.sin(θ)/2\r\n\r\n\tif Type_A=='Resin': #mdl.isNoneEntry(ResFA):\r\n\t\tyA = 0\r\n\t\tdA = d\r\n\telse:\r\n\t\tkA = 2*(F_So67_A-minF_A)/(So_minF_A-0.67)/(DA-dA)\r\n\t\tyA = fC/kA if (DA<dH) else fC/kA/2\r\n\t\t\r\n\r\n\tif Type_B=='Resin': #mdl.isNoneEntry(ResFB):\r\n\t\tyB = 0\r\n\t\tdB = d\r\n\telse:\r\n\t\tkB = 2*(F_So67_B-minF_B)/(So_minF_B-0.67)/(DB-dB)\r\n\t\tyB = fC/kB if (DB<dH) else fC/kB/2\r\n\r\n\tR = D/2\r\n\trH = dH/2\r\n\trA_min = R+(DA/2-R)*0.1\r\n\trB_min = R+(DB/2-R)*0.1\r\n\trA = (DA/2-yA) if (DA<dH) else (rH-yA)\r\n\trB = (DB/2-yB) if (DB<dH) else (rH-yB)\r\n\r\n\trA = rA_min if (rA<=rA_min) else rA\r\n\trB = rB_min if (rB<=rB_min) else rB\r\n\r\n\tα = np.arctan( (rB-rA)/L )\r\n\tLα = L/np.cos(α)\r\n\tx = np.linspace( 0, Lα, 101 )\r\n\r\n\tK = np.sqrt(F/E/I)\r\n\ty = (Lα/2/Radio/K + w*Lα*np.sin(θ)/2/K/F)*( (np.cosh(K*x)-1)/np.tanh(K*Lα/2) + K*x - np.sinh(K*x) ) - w*np.sin(θ)/2/F*x**2 # [Ref.1]\r\n\tRα = Rot(α)\r\n\txy = np.array([x,y])\r\n\tx,y = np.dot(Rα,xy)\r\n\tΔy = rH-rB\r\n\ty += Δy\r\n\r\n\tcH = rH-R\r\n\tcA = rA-R\r\n\tcB = rB-R\r\n\t\r\n\tindexes = y>cH\r\n\ty[indexes] = cH\r\n\tindexes = y<-cH\r\n\ty[indexes] =-cH\r\n\tcy = cH-y\r\n\r\n\trM = rH-y[50]\r\n\tif y[50]==cH:\r\n\t\tfM = fC\r\n\t\tfC = 0\r\n\telse:\r\n\t\tfM = 0\r\n\tcM = rM-R\r\n\r\n\tx -= L/2\r\n\tyoh = y*0\r\n\tohc = np.array([x, yoh])\r\n\tohp = np.array([x, (yoh+rH)*aspr])\r\n\tohm = np.array([x, (yoh-rH)*aspr])\r\n\r\n\txyc = np.array([x, y*aspr])\r\n\txyp = np.array([x, (y+R)*aspr])\r\n\txym = np.array([x, (y-R)*aspr])\r\n\r\n\tφ = θ + np.pi/2\r\n\tRφ = Rot(φ)\r\n\r\n\tOHc = np.dot(Rφ,ohc)\r\n\tOHp = np.dot(Rφ,ohp)\r\n\tOHm = np.dot(Rφ,ohm)\r\n\r\n\tXYc = np.dot(Rφ,xyc)\r\n\tXYp = np.dot(Rφ,xyp)\r\n\tXYm = np.dot(Rφ,xym)\r\n\r\n\tSA = cA/cH\r\n\tSB = cB/cH\r\n\tSM = cM/cH\r\n\tSy = cy/cH\r\n\tδ = (cA+cB)/2-cM\r\n\r\n\tself.osaOutputdata1_fields.clear_content()\r\n\tself.osaOutputdata2_fields.clear_content()\r\n\r\n\tself.osaOutputdata1_fields.ClearanceA.append( mdl.physicalValue( cA, self.osaOutputdata1_fields.ClearanceA.referenceUnit ) )\r\n\tself.osaOutputdata1_fields.ClearanceB.append( mdl.physicalValue( cB, self.osaOutputdata1_fields.ClearanceB.referenceUnit ) )\r\n\tself.osaOutputdata1_fields.ClearanceM.append( mdl.physicalValue( cM, self.osaOutputdata1_fields.ClearanceM.referenceUnit ) )\r\n\r\n\tself.osaOutputdata1_fields.SideForceA.append( mdl.physicalValue( fC, self.osaOutputdata1_fields.SideForceA.referenceUnit ) )\r\n\tself.osaOutputdata1_fields.SideForceB.append( mdl.physicalValue( fC, self.osaOutputdata1_fields.SideForceB.referenceUnit ) )\r\n\tself.osaOutputdata1_fields.SideForceM.append( mdl.physicalValue( fM, self.osaOutputdata1_fields.SideForceM.referenceUnit ) )\r\n\r\n\tself.osaOutputdata1_fields.StandoffA.append( mdl.physicalValue( SA, self.osaOutputdata1_fields.StandoffA.referenceUnit ) )\r\n\tself.osaOutputdata1_fields.StandoffB.append( mdl.physicalValue( SB, self.osaOutputdata1_fields.StandoffB.referenceUnit ) )\r\n\tself.osaOutputdata1_fields.StandoffM.append( mdl.physicalValue( SM, self.osaOutputdata1_fields.StandoffM.referenceUnit ) )\r\n\r\n\tself.osaOutputdata2_fields.AxialForce.append( mdl.physicalValue( w*L*np.cos(θ), self.osaOutputdata2_fields.AxialForce.referenceUnit ) )\r\n\tself.osaOutputdata2_fields.MaxDeflection.append( mdl.physicalValue( δ, self.osaOutputdata2_fields.MaxDeflection.referenceUnit ) )\r\n\tself.osaOutputdata2_fields.WellboreClearance.append( mdl.physicalValue( np.mean(cy), self.osaOutputdata2_fields.WellboreClearance.referenceUnit ) )\r\n\tself.osaOutputdata2_fields.WellboreStandoff.append( mdl.physicalValue( np.mean(Sy), self.osaOutputdata2_fields.WellboreStandoff.referenceUnit ) )\r\n\r\n\tself.osaCasing_fields.inverseReferenceUnitConvert_fields()\r\n\tself.osaCentA_fields.inverseReferenceUnitConvert_fields()\r\n\tself.osaCentB_fields.inverseReferenceUnitConvert_fields()\r\n\tself.osaWellbore_fields.inverseReferenceUnitConvert_fields()\r\n\tself.osaOutputdata1_fields.inverseReferenceUnitConvert_fields()\r\n\tself.osaOutputdata2_fields.inverseReferenceUnitConvert_fields()\r\n\r\n\tlim = L/2*1.05\r\n\r\n\r\n\r\n\treturn OHc, OHp, OHm, XYc, XYp, XYm, lim, rA, rB, rM\r\n\r\n\r\n\r\n" ]
[ [ "numpy.cosh", "numpy.array", "numpy.arctan", "numpy.cos", "numpy.sinh", "numpy.sqrt", "numpy.sin", "numpy.dot", "numpy.linspace", "numpy.tanh", "numpy.mean" ] ]
HitkoDev/triplet-reid
[ "d80edf7bdcee2ebcab160f1a06224837ac624329" ]
[ "loss.py" ]
[ "import numbers\nimport tensorflow as tf\n\n\ndef all_diffs(a, b):\n \"\"\" Returns a tensor of all combinations of a - b.\n\n Args:\n a (2D tensor): A batch of vectors shaped (B1, F).\n b (2D tensor): A batch of vectors shaped (B2, F).\n\n Returns:\n The matrix of all pairwise differences between all vectors in `a` and in\n `b`, will be of shape (B1, B2).\n\n Note:\n For convenience, if either `a` or `b` is a `Distribution` object, its\n mean is used.\n \"\"\"\n return tf.expand_dims(a, axis=1) - tf.expand_dims(b, axis=0)\n\n\ndef cdist(a, b, metric='euclidean'):\n \"\"\"Similar to scipy.spatial's cdist, but symbolic.\n\n The currently supported metrics can be listed as `cdist.supported_metrics` and are:\n - 'euclidean', although with a fudge-factor epsilon.\n - 'sqeuclidean', the squared euclidean.\n - 'cityblock', the manhattan or L1 distance.\n\n Args:\n a (2D tensor): The left-hand side, shaped (B1, F).\n b (2D tensor): The right-hand side, shaped (B2, F).\n metric (string): Which distance metric to use, see notes.\n\n Returns:\n The matrix of all pairwise distances between all vectors in `a` and in\n `b`, will be of shape (B1, B2).\n\n Note:\n When a square root is taken (such as in the Euclidean case), a small\n epsilon is added because the gradient of the square-root at zero is\n undefined. Thus, it will never return exact zero in these cases.\n \"\"\"\n with tf.compat.v1.name_scope(\"cdist\"):\n diffs = all_diffs(a, b)\n if metric == 'sqeuclidean':\n return tf.reduce_sum(input_tensor=tf.square(diffs), axis=-1)\n elif metric == 'euclidean':\n return tf.sqrt(tf.reduce_sum(input_tensor=tf.square(diffs), axis=-1) + 1e-12)\n elif metric == 'cityblock':\n return tf.reduce_sum(input_tensor=tf.abs(diffs), axis=-1)\n else:\n raise NotImplementedError(\n 'The following metric is not implemented by `cdist` yet: {}'.format(metric))\ncdist.supported_metrics = [\n 'euclidean',\n 'sqeuclidean',\n 'cityblock',\n]\n\n\ndef get_at_indices(tensor, indices):\n \"\"\" Like `tensor[np.arange(len(tensor)), indices]` in numpy. \"\"\"\n counter = tf.range(tf.shape(input=indices, out_type=indices.dtype)[0])\n return tf.gather_nd(tensor, tf.stack((counter, indices), -1))\n\n\ndef batch_hard(dists, pids, margin, batch_precision_at_k=None):\n \"\"\"Computes the batch-hard loss from arxiv.org/abs/1703.07737.\n\n Args:\n dists (2D tensor): A square all-to-all distance matrix as given by cdist.\n pids (1D tensor): The identities of the entries in `batch`, shape (B,).\n This can be of any type that can be compared, thus also a string.\n margin: The value of the margin if a number, alternatively the string\n 'soft' for using the soft-margin formulation, or `None` for not\n using a margin at all.\n\n Returns:\n A 1D tensor of shape (B,) containing the loss value for each sample.\n \"\"\"\n with tf.compat.v1.name_scope(\"batch_hard\"):\n same_identity_mask = tf.equal(tf.expand_dims(pids, axis=1),\n tf.expand_dims(pids, axis=0))\n negative_mask = tf.logical_not(same_identity_mask)\n positive_mask = tf.math.logical_xor(same_identity_mask,\n tf.eye(tf.shape(input=pids)[0], dtype=tf.bool))\n\n furthest_positive = tf.reduce_max(input_tensor=dists*tf.cast(positive_mask, tf.float32), axis=1)\n closest_negative = tf.map_fn(lambda x: tf.reduce_min(input_tensor=tf.boolean_mask(tensor=x[0], mask=x[1])),\n (dists, negative_mask), tf.float32)\n # Another way of achieving the same, though more hacky:\n # closest_negative = tf.reduce_min(dists + 1e5*tf.cast(same_identity_mask, tf.float32), axis=1)\n\n diff = furthest_positive - closest_negative\n if isinstance(margin, numbers.Real):\n diff = tf.maximum(diff + margin, 0.0)\n elif margin == 'soft':\n diff = tf.nn.softplus(diff)\n elif margin.lower() == 'none':\n pass\n else:\n raise NotImplementedError(\n 'The margin {} is not implemented in batch_hard'.format(margin))\n\n if batch_precision_at_k is None:\n return diff\n\n # For monitoring, compute the within-batch top-1 accuracy and the\n # within-batch precision-at-k, which is somewhat more expressive.\n with tf.compat.v1.name_scope(\"monitoring\"):\n # This is like argsort along the last axis. Add one to K as we'll\n # drop the diagonal.\n _, indices = tf.nn.top_k(-dists, k=batch_precision_at_k+1)\n\n # Drop the diagonal (distance to self is always least).\n indices = indices[:,1:]\n\n # Generate the index indexing into the batch dimension.\n # This is simething like [[0,0,0],[1,1,1],...,[B,B,B]]\n batch_index = tf.tile(\n tf.expand_dims(tf.range(tf.shape(input=indices)[0]), 1),\n (1, tf.shape(input=indices)[1]))\n\n # Stitch the above together with the argsort indices to get the\n # indices of the top-k of each row.\n topk_indices = tf.stack((batch_index, indices), -1)\n\n # See if the topk belong to the same person as they should, or not.\n topk_is_same = tf.gather_nd(same_identity_mask, topk_indices)\n\n # All of the above could be reduced to the simpler following if k==1\n #top1_is_same = get_at_indices(same_identity_mask, top_idxs[:,1])\n\n topk_is_same_f32 = tf.cast(topk_is_same, tf.float32)\n top1 = tf.reduce_mean(input_tensor=topk_is_same_f32[:,0])\n prec_at_k = tf.reduce_mean(input_tensor=topk_is_same_f32)\n\n # Finally, let's get some more info that can help in debugging while\n # we're at it!\n negative_dists = tf.boolean_mask(tensor=dists, mask=negative_mask)\n positive_dists = tf.boolean_mask(tensor=dists, mask=positive_mask)\n\n return diff, top1, prec_at_k, topk_is_same, negative_dists, positive_dists\n\n\nLOSS_CHOICES = {\n 'batch_hard': batch_hard,\n}\n" ]
[ [ "tensorflow.stack", "tensorflow.shape", "tensorflow.gather_nd", "tensorflow.logical_not", "tensorflow.nn.top_k", "tensorflow.expand_dims", "tensorflow.reduce_mean", "tensorflow.cast", "tensorflow.abs", "tensorflow.square", "tensorflow.boolean_mask", "tensorflow.maximum", "tensorflow.nn.softplus", "tensorflow.compat.v1.name_scope" ] ]
fg6/MachineLearning
[ "7c3f6e8f2f90b729dbcc345c5a8a5da712cfbb27" ]
[ "kaggle/mnist/bayes/naivebayes.py" ]
[ "\nimport numpy as np\nfrom sortedcontainers import SortedList\nfrom scipy.stats import multivariate_normal\n\nclass NaiveBayes:\n #def __init__(self):\n # pass\n\n def fit(self, X, Y):\n self.X = X\n self.Y = set(Y)\n\n self.Classes = set(Y) \n self.Prior = {}\n self.G = {}\n # smoothing\n epsilon=0.001*np.identity(28)\n \n for c in self.Classes:\n Xc = X[Y==c]\n Mean = np.mean(Xc, axis=0,dtype=np.float64)\n Sigma = np.var(Xc,axis=0,dtype=np.float64)+0.001 \n \n self.G[c] = (Mean, Sigma)\n self.Prior[c] = float(len(Xc))/len(Y) \n\n def predict(self, X):\n \n results=[]\n max_posterior = -1\n max_class = None\n c_posterior = np.zeros((X.shape[0], len(self.G)))\n for c in self.Classes:\n mean, sigma = self.G[c]\n c_posterior[:,c] = multivariate_normal.logpdf(X, mean, sigma) + np.log(self.Prior[c]) # add cov !\n\n #print(len(c_posterior), np.argmax(c_posterior, axis=1))\n \n\n return np.argmax(c_posterior, axis=1)\n\n \n\n def score(self, X, Y):\n results = self.predict(X)\n #for i,v in enumerate(Y):\n # print(i,v,results[i])\n score = np.mean(results == Y)\n return score\n\nclass Bayes:\n \n\n def fit(self, X, Y, e=0.001):\n\n self.X = X\n self.Y = set(Y)\n N,D = X.shape\n\n self.Classes = set(Y) \n self.Prior = {}\n self.G = {}\n # smoothing\n epsilon=e*np.identity(28)\n \n for c in self.Classes:\n Xc = X [ Y==c ]\n Mean = np.mean(Xc, axis=0, dtype=np.float64)\n #Sigma = np.var(Xc, axis=0, dtype=np.float64) + e\n Cov = np.cov(Xc.T)+ np.eye(D)*e\n \n self.G[c] = (Mean, Cov)\n self.Prior[c] = float(len(Xc))/len(Y) \n\n def predict(self, X):\n results=[]\n max_posterior = -1\n max_class = None\n c_posterior = np.zeros((X.shape[0], len(self.G)))\n for c in self.Classes:\n mean, cov = self.G[c]\n c_posterior[:,c] = multivariate_normal.logpdf(X, mean, cov) + np.log(self.Prior[c])\n\n return np.argmax(c_posterior, axis=1)\n\n \n \n def score(self, X, Y):\n results = self.predict(X)\n score = np.mean(results == Y)\n return score\n" ]
[ [ "numpy.eye", "numpy.var", "numpy.cov", "scipy.stats.multivariate_normal.logpdf", "numpy.argmax", "numpy.log", "numpy.identity", "numpy.mean" ] ]
gitlost-murali/awesome-align
[ "39fb45ca85a98e005447bddb52c48e65ce7d399b" ]
[ "run_align.py" ]
[ "# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n# Modifications copyright (C) 2020 Zi-Yi Dou\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport argparse\nimport random\nimport itertools\nimport os\n\nimport numpy as np\nimport torch\nfrom tqdm import trange\nfrom torch.nn.utils.rnn import pad_sequence\nfrom torch.utils.data import DataLoader, Dataset, SequentialSampler\n\nimport modeling\nfrom configuration_bert import BertConfig\nfrom modeling import BertForMaskedLM\nfrom tokenization_bert import BertTokenizer\nfrom tokenization_utils import PreTrainedTokenizer\nfrom modeling_utils import PreTrainedModel\n\n\n\ndef set_seed(args):\n if args.seed >= 0:\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n torch.cuda.manual_seed_all(args.seed)\n\nclass LineByLineTextDataset(Dataset):\n def __init__(self, tokenizer: PreTrainedTokenizer, args, file_path):\n assert os.path.isfile(file_path)\n print('Loading the dataset...')\n self.examples = []\n with open(file_path, encoding=\"utf-8\") as f:\n for idx, line in enumerate(f.readlines()):\n if len(line) == 0 or line.isspace() or not len(line.split(' ||| ')) == 2:\n raise ValueError(f'Line {idx+1} is not in the correct format!')\n \n src, tgt = line.split(' ||| ')\n if src.rstrip() == '' or tgt.rstrip() == '':\n raise ValueError(f'Line {idx+1} is not in the correct format!')\n \n sent_src, sent_tgt = src.strip().split(), tgt.strip().split()\n token_src, token_tgt = [tokenizer.tokenize(word) for word in sent_src], [tokenizer.tokenize(word) for word in sent_tgt]\n wid_src, wid_tgt = [tokenizer.convert_tokens_to_ids(x) for x in token_src], [tokenizer.convert_tokens_to_ids(x) for x in token_tgt]\n\n ids_src, ids_tgt = tokenizer.prepare_for_model(list(itertools.chain(*wid_src)), return_tensors='pt', max_length=tokenizer.max_len)['input_ids'], tokenizer.prepare_for_model(list(itertools.chain(*wid_tgt)), return_tensors='pt', max_length=tokenizer.max_len)['input_ids']\n\n bpe2word_map_src = []\n for i, word_list in enumerate(token_src):\n bpe2word_map_src += [i for x in word_list]\n bpe2word_map_tgt = []\n for i, word_list in enumerate(token_tgt):\n bpe2word_map_tgt += [i for x in word_list]\n\n self.examples.append( (ids_src[0], ids_tgt[0], bpe2word_map_src, bpe2word_map_tgt) )\n\n def __len__(self):\n return len(self.examples)\n\n def __getitem__(self, i):\n return self.examples[i]\n\ndef word_align(args, model: PreTrainedModel, tokenizer: PreTrainedTokenizer, output_word_alignments = False):\n def collate(examples):\n ids_src, ids_tgt, bpe2word_map_src, bpe2word_map_tgt = zip(*examples)\n ids_src = pad_sequence(ids_src, batch_first=True, padding_value=tokenizer.pad_token_id)\n ids_tgt = pad_sequence(ids_tgt, batch_first=True, padding_value=tokenizer.pad_token_id)\n return ids_src, ids_tgt, bpe2word_map_src, bpe2word_map_tgt\n\n dataset = LineByLineTextDataset(tokenizer, args, file_path=args.data_file)\n sampler = SequentialSampler(dataset)\n dataloader = DataLoader(\n dataset, sampler=sampler, batch_size=args.batch_size, collate_fn=collate\n )\n\n model.to(args.device)\n model.eval()\n tqdm_iterator = trange(dataset.__len__(), desc=\"Extracting\")\n with open(args.output_file, 'w') as writer:\n for batch in dataloader:\n with torch.no_grad():\n ids_src, ids_tgt, bpe2word_map_src, bpe2word_map_tgt = batch\n word_aligns_list = model.get_aligned_word(ids_src, ids_tgt, bpe2word_map_src, bpe2word_map_tgt, args.device, 0, 0, align_layer=args.align_layer, extraction=args.extraction, softmax_threshold=args.softmax_threshold, test=True)\n for word_aligns in word_aligns_list:\n output_str = []\n for word_align in word_aligns:\n output_str.append(f'{word_align[0]}-{word_align[1]}')\n writer.write(' '.join(output_str)+'\\n')\n tqdm_iterator.update(len(ids_src))\n\n if output_word_alignments:\n with open(args.output_file, 'r') as fh:\n outputf = (fh.read()).split(\"\\n\")\n with open(args.data_file, 'r') as fh:\n datalines = (fh.read()).split(\"\\n\")\n\n with open(args.output_file+\".outtxt\", 'w') as fwriter:\n for indices, line in zip(outputf, datalines):\n srcline, tgtline = line.split(' ||| ')\n indices = indices.split()\n srcwrds = srcline.split()\n tgtwrds = tgtline.split()\n output_wrds = []\n for wrd in indices:\n srcix,tgtix = wrd.split(\"-\")\n srcix, tgtix = int(srcix), int(tgtix)\n output_wrds.append(f\"{srcwrds[srcix]}-{tgtwrds[tgtix]}\")\n fwriter.write(' '.join(output_wrds)+'\\n')\n\ndef main():\n parser = argparse.ArgumentParser()\n\n # Required parameters\n parser.add_argument(\n \"--data_file\", default=None, type=str, required=True, help=\"The input data file (a text file).\"\n )\n parser.add_argument(\n \"--output_file\",\n type=str,\n required=True,\n help=\"The output directory where the model predictions and checkpoints will be written.\",\n )\n parser.add_argument(\"--align_layer\", type=int, default=8, help=\"layer for alignment extraction\")\n parser.add_argument(\n \"--extraction\", default='softmax', type=str, help='softmax or entmax15'\n )\n parser.add_argument(\n \"--softmax_threshold\", type=float, default=0.001\n )\n parser.add_argument(\n \"--model_name_or_path\",\n default=None,\n type=str,\n help=\"The model checkpoint for weights initialization. Leave None if you want to train a model from scratch.\",\n )\n parser.add_argument(\n \"--config_name\",\n default=None,\n type=str,\n help=\"Optional pretrained config name or path if not the same as model_name_or_path. If both are None, initialize a new config.\",\n )\n parser.add_argument(\n \"--tokenizer_name\",\n default=None,\n type=str,\n help=\"Optional pretrained tokenizer name or path if not the same as model_name_or_path. If both are None, initialize a new tokenizer.\",\n )\n parser.add_argument(\"--seed\", type=int, default=42, help=\"random seed for initialization\")\n parser.add_argument(\"--batch_size\", default=32, type=int)\n parser.add_argument(\n \"--cache_dir\",\n default='cache_dir',\n type=str,\n help=\"Optional directory to store the pre-trained models downloaded from s3 (instead of the default one)\",\n )\n parser.add_argument(\"--no_cuda\", action=\"store_true\", help=\"Avoid using CUDA when available\")\n args = parser.parse_args()\n device = torch.device(\"cuda\" if torch.cuda.is_available() and not args.no_cuda else \"cpu\")\n args.device = device\n\n # Set seed\n set_seed(args)\n config_class, model_class, tokenizer_class = BertConfig, BertForMaskedLM, BertTokenizer\n if args.config_name:\n config = config_class.from_pretrained(args.config_name, cache_dir=args.cache_dir)\n elif args.model_name_or_path:\n config = config_class.from_pretrained(args.model_name_or_path, cache_dir=args.cache_dir)\n else:\n config = config_class()\n\n if args.tokenizer_name:\n tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name, cache_dir=args.cache_dir)\n elif args.model_name_or_path:\n tokenizer = tokenizer_class.from_pretrained(args.model_name_or_path, cache_dir=args.cache_dir)\n else:\n raise ValueError(\n \"You are instantiating a new {} tokenizer. This is not supported, but you can do it from another script, save it,\"\n \"and load it from here, using --tokenizer_name\".format(tokenizer_class.__name__)\n )\n\n modeling.PAD_ID = tokenizer.pad_token_id\n modeling.CLS_ID = tokenizer.cls_token_id\n modeling.SEP_ID = tokenizer.sep_token_id\n\n if args.model_name_or_path:\n model = model_class.from_pretrained(\n args.model_name_or_path,\n from_tf=bool(\".ckpt\" in args.model_name_or_path),\n config=config,\n cache_dir=args.cache_dir,\n )\n else:\n model = model_class(config=config)\n\n word_align(args, model, tokenizer)\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "torch.utils.data.DataLoader", "torch.nn.utils.rnn.pad_sequence", "torch.cuda.manual_seed_all", "torch.manual_seed", "torch.utils.data.SequentialSampler", "numpy.random.seed", "torch.no_grad", "torch.cuda.is_available" ] ]
duoan/light-text-classification
[ "6c96c9fb6b52abd42e4b4358cb85c44473731668" ]
[ "src/lightextclassification/imdb.py" ]
[ "# _*_ coding: utf-8 _*_\nfrom argparse import ArgumentParser\n\nimport torch\nfrom torchtext import data, datasets\n\nfrom vocab import LocalVectors\n\nfrom models import *\n\nfrom torch.optim import SGD\nfrom torch.utils.data import DataLoader\nfrom ignite.engine import Events, create_supervised_trainer, create_supervised_evaluator\nfrom ignite.metrics import Accuracy, Loss\n\nfrom tqdm import tqdm\n\n\ndef get_data_loaders(batch_size=32):\n tokenize = lambda x: x.split()\n TEXT = data.Field(\n sequential=True,\n tokenize=tokenize,\n lower=True,\n include_lengths=True,\n batch_first=True,\n fix_length=200)\n LABEL = data.LabelField(dtype=torch.float)\n print('Load IMDB dataset')\n train_data, test_data = datasets.IMDB.splits(TEXT, LABEL)\n print('TEXT build vocab')\n TEXT.build_vocab(\n train_data,\n vectors=LocalVectors(\n '/Users/duoan/nbs/quora-insincere-questions-classification/input/embeddings/glove.840B.300d/glove.840B.300d.txt'\n ))\n print('LABEL build vocab')\n LABEL.build_vocab(train_data)\n\n word_embeddings = TEXT.vocab.vectors\n print('Length of TEXT Vocabulary: {}'.format(len(TEXT.vocab)))\n print('Vector size of TEXT Vocabulary: {}'.format(TEXT.vocab.vectors.size()))\n print('LABEL Length: {}'.format(len(LABEL.vocab)))\n\n train_data, valid_data = train_data.split()\n train_iter, valid_iter, test_iter = data.BucketIterator.splits(\n (train_data, valid_data, test_data),\n batch_size=batch_size,\n sort_key=lambda x: len(x.text),\n repeat=False,\n shuffle=True)\n vocab_size = len(TEXT.vocab)\n print('finished get data loaders')\n return vocab_size, word_embeddings, train_iter, valid_iter, test_iter\n\n\ndef run(batch_size, epochs, lr, momentum, log_interval):\n vocab_size, word_embeddings, train_iter, valid_iter, test_iter = get_data_loaders(\n batch_size)\n model = LSTMClassifier(32, 2, 256, vocab_size, 300, word_embeddings)\n device = 'cpu'\n\n if torch.cuda.is_available():\n device = 'cuda'\n\n optimizer = SGD(model.parameters(), lr=lr, momentum=momentum)\n trainer = create_supervised_trainer(\n model, optimizer, F.nll_loss, device=device)\n evaluator = create_supervised_evaluator(\n model,\n metrics={\n 'accuracy': Accuracy(),\n 'nll': Loss(F.nll_loss)\n },\n device=device)\n\n desc = \"ITERATION - loss: {:.2f}\"\n pbar = tqdm(\n initial=0, leave=False, total=len(train_iter), desc=desc.format(0))\n\n @trainer.on(Events.ITERATION_COMPLETED)\n def log_training_loss(engine):\n iter = (engine.state.iteration - 1) % len(train_iter) + 1\n if iter % log_interval == 0:\n pbar.desc = desc.format(engine.state.output)\n pbar.update(log_interval)\n\n @trainer.on(Events.EPOCH_COMPLETED)\n def log_training_results(engine):\n pbar.refresh()\n evaluator.run(train_iter)\n metrics = evaluator.state.metrics\n avg_accuracy = metrics['accuracy']\n avg_nll = metrics['nll']\n tqdm.write(\n \"Training Results - Epoch: {} Avg accuracy: {:.2f} Avg loss: {:.2f}\"\n .format(engine.state.epoch, avg_accuracy, avg_nll))\n\n @trainer.on(Events.EPOCH_COMPLETED)\n def log_validation_results(engine):\n evaluator.run(valid_iter)\n metrics = evaluator.state.metrics\n avg_accuracy = metrics['accuracy']\n avg_nll = metrics['nll']\n tqdm.write(\n \"Validation Results - Epoch: {} Avg accuracy: {:.2f} Avg loss: {:.2f}\"\n .format(engine.state.epoch, avg_accuracy, avg_nll))\n\n pbar.n = pbar.last_print_n = 0\n\n trainer.run(train_iter, max_epochs=epochs)\n pbar.close()\n\n\nif __name__ == \"__main__\":\n parser = ArgumentParser()\n parser.add_argument(\n '--batch_size',\n type=int,\n default=64,\n help='input batch size for training (default: 64)')\n parser.add_argument(\n '--val_batch_size',\n type=int,\n default=1000,\n help='input batch size for validation (default: 1000)')\n parser.add_argument(\n '--epochs',\n type=int,\n default=10,\n help='number of epochs to train (default: 10)')\n parser.add_argument(\n '--lr', type=float, default=0.01, help='learning rate (default: 0.01)')\n parser.add_argument(\n '--momentum', type=float, default=0.5, help='SGD momentum (default: 0.5)')\n parser.add_argument(\n '--log_interval',\n type=int,\n default=10,\n help='how many batches to wait before logging training status')\n\n args = parser.parse_args()\n\n run(args.batch_size, args.epochs, args.lr, args.momentum, args.log_interval)\n" ]
[ [ "torch.cuda.is_available" ] ]
krishpop/CHER
[ "0633a45151b13f23acf20faabc65028c599a3551" ]
[ "baselines/cher/experiment/config.py" ]
[ "from copy import deepcopy\nimport numpy as np\nimport json\nimport os\nimport gym\n\nfrom baselines import logger\nfrom baselines.her.ddpg import DDPG\n\nfrom baselines.cher.her import make_sample_her_transitions\n\n\nDEFAULT_ENV_PARAMS = {\n 'FetchReach-v0': {\n 'n_cycles': 10,\n },\n}\n\n\nDEFAULT_PARAMS = {\n # env\n 'max_u': 1., # max absolute value of actions on different coordinates\n # ddpg\n 'layers': 3, # number of layers in the critic/actor networks\n 'hidden': 256, # number of neurons in each hidden layers\n 'network_class': 'baselines.her.actor_critic:ActorCritic',\n 'Q_lr': 0.001, # critic learning rate\n 'pi_lr': 0.001, # actor learning rate\n 'buffer_size': int(1E6), # for experience replay\n 'polyak': 0.95, # polyak averaging coefficient\n 'action_l2': 1.0, # quadratic penalty on actions (before rescaling by max_u)\n 'clip_obs': 200.,\n 'scope': 'ddpg', # can be tweaked for testing\n 'relative_goals': False,\n # training\n 'n_cycles': 50, # per epoch\n 'rollout_batch_size': 2, # per mpi thread\n 'n_batches': 40, # training batches per cycle\n 'batch_size': 64, # per mpi thread, measured in transitions and reduced to even multiple of chunk_length.\n 'n_test_rollouts': 10, # number of test rollouts per epoch, each consists of rollout_batch_size rollouts\n 'test_with_polyak': False, # run test episodes with the target network\n # exploration\n 'random_eps': 0.3, # percentage of time a random action is taken\n 'noise_eps': 0.2, # std of gaussian noise added to not-completely-random actions as a percentage of max_u\n # HER\n 'replay_strategy': 'future', # supported modes: future, none\n 'replay_k': 4, # number of additional goals used for replay, only used if off_policy_data=future\n # normalization\n 'norm_eps': 0.01, # epsilon used for observation normalization\n 'norm_clip': 5, # normalized observations are cropped to this values\n}\n\n\nCACHED_ENVS = {}\ndef cached_make_env(make_env):\n \"\"\"\n Only creates a new environment from the provided function if one has not yet already been\n created. This is useful here because we need to infer certain properties of the env, e.g.\n its observation and action spaces, without any intend of actually using it.\n \"\"\"\n if make_env not in CACHED_ENVS:\n env = make_env()\n CACHED_ENVS[make_env] = env\n return CACHED_ENVS[make_env]\n\n\ndef prepare_params(kwargs):\n # DDPG params\n ddpg_params = dict()\n\n env_name = kwargs['env_name']\n def make_env():\n return gym.make(env_name)\n kwargs['make_env'] = make_env\n tmp_env = cached_make_env(kwargs['make_env'])\n assert hasattr(tmp_env, '_max_episode_steps')\n kwargs['T'] = tmp_env._max_episode_steps\n tmp_env.reset()\n kwargs['max_u'] = np.array(kwargs['max_u']) if type(kwargs['max_u']) == list else kwargs['max_u']\n kwargs['gamma'] = 1. - 1. / kwargs['T']\n if 'lr' in kwargs:\n kwargs['pi_lr'] = kwargs['lr']\n kwargs['Q_lr'] = kwargs['lr']\n del kwargs['lr']\n for name in ['buffer_size', 'hidden', 'layers',\n 'network_class',\n 'polyak', \n 'batch_size', 'Q_lr', 'pi_lr',\n 'norm_eps', 'norm_clip', 'max_u',\n 'action_l2', 'clip_obs', 'scope', 'relative_goals']:\n ddpg_params[name] = kwargs[name]\n kwargs['_' + name] = kwargs[name]\n del kwargs[name]\n kwargs['ddpg_params'] = ddpg_params\n\n return kwargs\n\n\ndef log_params(params, logger=logger):\n for key in sorted(params.keys()):\n logger.info('{}: {}'.format(key, params[key]))\n\n\ndef configure_her(params):\n env = cached_make_env(params['make_env'])\n env.reset()\n def reward_fun(ag_2, g, info): # vectorized\n return env.compute_reward(achieved_goal=ag_2, desired_goal=g, info=info)\n\n # Prepare configuration for HER.\n her_params = {\n 'reward_fun': reward_fun,\n }\n for name in ['replay_strategy', 'replay_k']:\n her_params[name] = params[name]\n params['_' + name] = her_params[name]\n del params[name]\n sample_her_transitions = make_sample_her_transitions(**her_params)\n\n return sample_her_transitions\n\n\ndef simple_goal_subtract(a, b):\n assert a.shape == b.shape\n return a - b\n\n\ndef configure_ddpg(dims, params, reuse=False, use_mpi=True, clip_return=True):\n sample_her_transitions = configure_her(params)\n # Extract relevant parameters.\n gamma = params['gamma']\n rollout_batch_size = params['rollout_batch_size']\n ddpg_params = params['ddpg_params']\n\n input_dims = dims.copy()\n\n # DDPG agent\n env = cached_make_env(params['make_env'])\n env.reset()\n ddpg_params.update({'input_dims': input_dims, # agent takes an input observations\n 'T': params['T'],\n 'clip_pos_returns': True, # clip positive returns\n 'clip_return': (1. / (1. - gamma)) if clip_return else np.inf, # max abs of return\n 'rollout_batch_size': rollout_batch_size,\n 'subtract_goals': simple_goal_subtract,\n 'sample_transitions': sample_her_transitions,\n 'gamma': gamma,\n })\n ddpg_params['info'] = {\n 'env_name': params['env_name'],\n }\n policy = DDPG(reuse=reuse, **ddpg_params, use_mpi=use_mpi)\n return policy\n\n\ndef configure_dims(params):\n env = cached_make_env(params['make_env'])\n env.reset()\n obs, _, _, info = env.step(env.action_space.sample())\n\n dims = {\n 'o': obs['observation'].shape[0],\n 'u': env.action_space.shape[0],\n 'g': obs['desired_goal'].shape[0],\n }\n for key, value in info.items():\n value = np.array(value)\n if value.ndim == 0:\n value = value.reshape(1)\n dims['info_{}'.format(key)] = value.shape[0]\n return dims\n" ]
[ [ "numpy.array" ] ]
tlambert03/image-demos
[ "a2974bcc7f040fd4d14e659c4cbfeabcf726c707" ]
[ "test-examples/million_points.py" ]
[ "\"\"\"Test converting an image to a pyramid.\n\"\"\"\n\nimport numpy as np\nimport napari\n\npoints = np.random.randint(100, size=(50_000, 2))\n\nwith napari.gui_qt():\n viewer = napari.view_points(points, face_color='red')\n" ]
[ [ "numpy.random.randint" ] ]
BME-SmartLab/GraphConvWat
[ "6cdcb3cb1bd22eb274c19ad4a45a78e334462e44" ]
[ "evaluation/plot_WDS_topo_with_sensitivity.py" ]
[ "# -*- coding: utf-8 -*-\nimport argparse\nimport os\nimport sys\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nfrom matplotlib import collections as mc\nimport matplotlib.pyplot as plt\n\nfrom epynet import Network\n\nsys.path.insert(0, os.path.join('..'))\nfrom utils.graph_utils import get_nx_graph, get_sensitivity_matrix\nfrom utils.SensorInstaller import SensorInstaller\n\n# ----- ----- ----- ----- ----- -----\n# Command line arguments\n# ----- ----- ----- ----- ----- -----\nparser = argparse.ArgumentParser()\nparser.add_argument(\n '--wds',\n default = 'anytown',\n type = str\n )\nparser.add_argument(\n '--nodesize',\n default = 7,\n type = int,\n help = \"Size of nodes on the plot.\"\n )\nparser.add_argument(\n '--perturb',\n action = \"store_true\",\n )\nargs = parser.parse_args()\n\npathToRoot = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')\npathToModels = os.path.join(pathToRoot, 'experiments', 'models')\n\nwds = Network(os.path.join('..', 'water_networks', args.wds+'.inp'))\nwds.solve()\n\nprint('Calculating nodal sensitivity to demand change...\\n')\nptb = np.max(wds.junctions.basedemand) / 100\nif args.perturb:\n for pump in wds.pumps:\n pump.speed *= 1.1\n\n for junc in wds.junctions:\n tempo = np.random.rand()\n if tempo < .3:\n junc.basedemand *= 1.1\n elif tempo > .6:\n junc.basedemand *= .9\nS = get_sensitivity_matrix(wds, ptb)\n\ndef get_node_df(elements, get_head=False):\n data = []\n for junc in elements:\n ser = pd.Series({\n 'uid': junc.uid,\n 'x': junc.coordinates[0],\n 'y': junc.coordinates[1],\n })\n if get_head:\n ser['head'] = junc.head\n data.append(ser)\n data = pd.DataFrame(data)\n if get_head:\n data['head'] = (data['head'] - data['head'].min()) / (data['head'].max()-data['head'].min())\n return data\n\ndef get_elem_df(elements, nodes):\n data= []\n df = pd.DataFrame(data)\n if elements:\n for elem in elements:\n ser = pd.Series({\n 'uid': elem.uid,\n 'x1': nodes.loc[nodes['uid'] == elem.from_node.uid, 'x'].values,\n 'y1': nodes.loc[nodes['uid'] == elem.from_node.uid, 'y'].values,\n 'x2': nodes.loc[nodes['uid'] == elem.to_node.uid, 'x'].values,\n 'y2': nodes.loc[nodes['uid'] == elem.to_node.uid, 'y'].values,\n })\n data.append(ser)\n df = pd.DataFrame(data)\n df['x1'] = df['x1'].str[0]\n df['y1'] = df['y1'].str[0]\n df['x2'] = df['x2'].str[0]\n df['y2'] = df['y2'].str[0]\n df['center_x'] = (df['x1']+df['x2']) / 2\n df['center_y'] = (df['y1']+df['y2']) / 2\n df['orient'] = np.degrees(np.arctan((df['y2']-df['y1'])/(df['x2']-df['x1']))) + 90\n return df\n\ndef build_lc_from(df):\n line_collection = []\n for elem_id in df['uid']:\n line_collection.append([\n (df.loc[df['uid'] == elem_id, 'x1'].values[0],\n df.loc[df['uid'] == elem_id, 'y1'].values[0]),\n (df.loc[df['uid'] == elem_id, 'x2'].values[0],\n df.loc[df['uid'] == elem_id, 'y2'].values[0])\n ])\n return line_collection\n\nnodes = get_node_df(wds.nodes, get_head=True)\njuncs = get_node_df(wds.junctions, get_head=True)\ntanks = get_node_df(wds.tanks)\nreservoirs = get_node_df(wds.reservoirs)\npipes = get_elem_df(wds.pipes, nodes)\npumps = get_elem_df(wds.pumps, nodes)\nvalves= get_elem_df(wds.valves, nodes)\npipe_collection = build_lc_from(pipes)\npump_collection = build_lc_from(pumps)\nif not valves.empty:\n valve_collection = build_lc_from(valves)\n\nmew = .5\nfig, ax = plt.subplots()\nlc = mc.LineCollection(pipe_collection, linewidths=mew, color='k')\nax.add_collection(lc)\nlc = mc.LineCollection(pump_collection, linewidths=mew, color='k')\nax.add_collection(lc)\nif not valves.empty:\n lc = mc.LineCollection(valve_collection, linewidths=mew, color='k')\n ax.add_collection(lc)\n\nnodal_s = np.sum(np.abs(S), axis=0)\nnodal_s = (nodal_s-nodal_s.min()) / nodal_s.max()\ncolors = []\ncmap = plt.get_cmap('plasma')\nfor idx, junc in juncs.iterrows():\n color = cmap(nodal_s[idx])\n colors.append(color)\n ax.plot(junc['x'], junc['y'], 'ko', mfc=color, mec='k', ms=args.nodesize, mew=mew)\n\nfor _, tank in tanks.iterrows():\n ax.plot(tank['x'], tank['y'], marker=7, mfc='k', mec='k', ms=7, mew=mew)\nfor _, reservoir in reservoirs.iterrows():\n ax.plot(reservoir['x'], reservoir['y'], marker='o', mfc='k', mec='k', ms=3, mew=mew)\nax.plot(pumps['center_x'], pumps['center_y'], 'ko', ms=7, mfc='white', mew=mew)\nfor _, pump in pumps.iterrows():\n ax.plot(pump['center_x'], pump['center_y'],\n marker=(3, 0, pump['orient']),\n color='k',\n ms=5\n )\nax.autoscale()\nax.axis('off')\nplt.tight_layout()\nplt.show()\n" ]
[ [ "pandas.Series", "matplotlib.pyplot.tight_layout", "pandas.DataFrame", "numpy.abs", "numpy.arctan", "matplotlib.pyplot.subplots", "matplotlib.pyplot.show", "matplotlib.pyplot.get_cmap", "numpy.max", "numpy.random.rand", "matplotlib.collections.LineCollection" ] ]
OphirGenomica/proteinFolding
[ "b4b6ea19307e176e58aa9d39ae161003c340416d" ]
[ "srcOld/loss.py" ]
[ "import time\n\nimport matplotlib\nimport numpy as np\n\nmatplotlib.use('Agg')\n\nimport torch\nimport torch.nn as nn\n\nclass LossMultiTargets(nn.Module):\n def __init__(self,loss_fnc=torch.nn.CrossEntropyLoss()):\n super(LossMultiTargets, self).__init__()\n self.loss = loss_fnc\n\n def forward(self, inputs,targets):\n # loss = []\n # for (input,target) in zip(inputs,targets):\n # loss.append(self.loss(input,target))\n loss = 0\n nb = len(targets)\n for (input,target) in zip(inputs,targets):\n loss += self.loss(input,target)\n loss /= nb\n return loss\n\nclass MSELoss(torch.nn.Module):\n def __init__(self):\n super(MSELoss,self).__init__()\n\n def forward(self, input, target):\n #We only want places where the target is larger than zero (remember this is for distances)\n # mask = target > 0\n # result = torch.mean((input[mask] - target[mask])**2)\n # result = torch.norm((input[mask] - target[mask])) ** 2 / torch.norm(target[mask]) ** 2\n nb = target.shape[0]\n result = 0\n for i in range(nb):\n inputi = input[i,:,:]\n targeti = target[i,:,:]\n maski = targeti > 0\n if torch.sum(maski) == 0: #nothing to learn from this one\n continue\n assert torch.norm(targeti[maski]) > 0\n result += torch.norm((inputi[maski] - targeti[maski])) ** 2 / torch.norm(targeti[maski]) ** 2\n\n return result/nb\n\n\ndef pc_translation_rotation_matching(r1,r2):\n '''\n Given two sets of 3D points of equal size. It computes the distance between these two sets of points, when allowing translation and rotation of the point clouds.\n We compute both chirality, and take whichever one has the lowest loss.\n r1 -> Tensor of shape (3,n)\n r2 -> Tensor of shape (3,n)\n '''\n\n #First we translate the two sets, by setting both their centroids to origin\n r1c = r1 - torch.mean(r1, dim=1, keepdim=True)\n r2c = r2 - torch.mean(r2, dim=1, keepdim=True)\n\n H = r1c @ r2c.transpose(0,1)\n t1 = time.time()\n\n U, S, V = torch.svd(H)\n\n t2 = time.time()\n\n d = torch.sign(torch.det(V @ U.transpose(0,1)))\n t3 = time.time()\n tmp = torch.diag_embed(torch.tensor([1, 1, d])).to(device=V.device)\n t4 = time.time()\n R = V @ tmp @ U.transpose(0,1)\n t5 = time.time()\n\n # tmp2 = torch.diag_embed(torch.tensor([1, 1, -d])).to(device=V.device)\n # R2 = V @ tmp2 @ U.transpose(0,1)\n\n r1cr = R @ r1c\n # r1cr2 = R2 @ r1c\n\n assert torch.norm(r2c) > 0\n loss_tr1 = torch.norm(r1cr - r2c) ** 2 / torch.norm(r2c) ** 2\n # loss_tr2 = torch.norm(r1cr2 - r2c) ** 2 / torch.norm(r2c) ** 2\n\n # if loss_tr1 < loss_tr2:\n loss_tr = loss_tr1\n # pred = r1cr.squeeze().cpu().detach().numpy()\n # else:\n # pred = r1cr2.squeeze().cpu().detach().numpy()\n # loss_tr = loss_tr2\n # target = r2c.squeeze().cpu().detach().numpy()\n print(\"{:2.4f},{:2.4f},{:2.4f},{:2.4f}\".format(t2-t1,t3-t2,t4-t3,t5-t4))\n return loss_tr#, pred, target\n\n\ndef loss_tr_wrapper(r1,r2):\n '''\n\n Note that any point with r2 coordinates set to zero is considered masked and will not be included in the calculation. (so use r1 for prediction and r2 for target, and just make sure no target point are accidently zero. Remember the point cloud is translation invariant, so you can just translate all points if needed)\n '''\n\n nb = r1.shape[0]\n loss_tr = 0\n for i in range(nb):\n r1i = r1[i, :, :]\n r2i = r2[i,:,:]\n mask = (r2i != 0).reshape(3, -1)\n mask = torch.sum(mask,dim=0) > 0\n r1i = r1i[:,mask]\n r2i = r2i[:,mask]\n # loss_tri, predi, targeti = pc_translation_rotation_matching(r1i, r2i)\n loss_tri = pc_translation_rotation_matching(r1i, r2i)\n loss_tr += loss_tri\n loss_tr /= nb\n return loss_tr#, predi, targeti\n\ndef loss_tr(r1,r2, return_coords=False):\n t1 = time.time()\n loss_tr = 0\n mask = (r2 != 0).reshape(r2.shape)\n mask = (torch.sum(mask,dim=1) > 0).unsqueeze(1)\n mask = mask.repeat(1,3,1)\n batch_mask = torch.sum(mask,dim=(1,2)) > 0\n\n r1 = r1[batch_mask,:,:]\n r2 = r2[batch_mask,:,:]\n mask = mask[batch_mask,:,:]\n\n nb = r1.shape[0]\n\n\n t2 = time.time()\n #First we translate the two sets, by setting both their centroids to origin\n r1c = torch.empty_like(r1)\n r2c = torch.empty_like(r2)\n for i in range(nb):\n r1c[i, :, :] = r1[i, :, :] - torch.mean(r1[i, mask[i, :, :]].reshape(3, -1), dim=1, keepdim=True)\n r2c[i, :, :] = r2[i, :, :] - torch.mean(r2[i, mask[i, :, :]].reshape(3, -1), dim=1, keepdim=True)\n t3 = time.time()\n r1c = r1c * mask\n r2c = r2c * mask\n\n H = torch.bmm(r1c,r2c.transpose(1,2))\n # try:\n # U, S, V = torch.svd(H)\n # except: # torch.svd may have convergence issues for GPU and CPU.\n # U, S, V = torch.svd(H + 1e-4 * H.mean() * torch.rand(H.shape,device=H.device))\n U, S, V = torch.svd(H)\n t4 = time.time()\n\n d = torch.sign(torch.det(torch.bmm(V, U.transpose(1,2))))\n t5 = time.time()\n\n tt=torch.tensor([[1]*nb, [1]*nb, d]).transpose(0,1)\n tmp = torch.diag_embed(tt).to(device=V.device)\n t6 = time.time()\n\n R = torch.bmm(V, torch.bmm(tmp, U.transpose(1,2)))\n\n r1cr = torch.bmm(R, r1c)\n\n loss_tr = torch.mean(torch.norm(r1cr - r2c, dim=(1, 2)) ** 2 / torch.norm(r2c, dim=(1, 2)) ** 2)\n t7 = time.time()\n # print(\"{:2.4f},{:2.4f},{:2.4f},{:2.4f},{:2.4f},{:2.4f}\".format(t2-t1,t3-t2,t4-t3,t5-t4,t6-t5,t7-t6))\n if return_coords:\n pred = r1cr[-1,:,:].squeeze().cpu().detach().numpy()\n target = r2c[-1,:,:].squeeze().cpu().detach().numpy()\n return loss_tr, pred, target\n else:\n return loss_tr" ]
[ [ "torch.sum", "torch.empty_like", "torch.svd", "torch.tensor", "torch.nn.CrossEntropyLoss", "torch.norm", "torch.diag_embed", "matplotlib.use", "torch.bmm", "torch.mean" ] ]
johnwu0604/pytorch-tutorial
[ "bdbc283a0b79620d9b582f1c4d2c2220a853b856" ]
[ "tutorials/02-intermediate/recurrent_neural_network/main.py" ]
[ "import torch \nimport torch.nn as nn\nimport torchvision\nimport torchvision.transforms as transforms\n\n\n# Device configuration\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n# Hyper-parameters\nsequence_length = 28\ninput_size = 28\nhidden_size = 128\nnum_layers = 2\nnum_classes = 10\nbatch_size = 100\nnum_epochs = 2\nlearning_rate = 0.01\n\n# MNIST dataset\ntrain_dataset = torchvision.datasets.MNIST(root='../../data/',\n train=True, \n transform=transforms.ToTensor(),\n download=True)\n\ntest_dataset = torchvision.datasets.MNIST(root='../../data/',\n train=False, \n transform=transforms.ToTensor())\n\n# Data loader\ntrain_loader = torch.utils.data.DataLoader(dataset=train_dataset,\n batch_size=batch_size, \n shuffle=True)\n\ntest_loader = torch.utils.data.DataLoader(dataset=test_dataset,\n batch_size=batch_size, \n shuffle=False)\n\n# Recurrent neural network (many-to-one)\nclass RNN(nn.Module):\n def __init__(self, input_size, hidden_size, num_layers, num_classes):\n super(RNN, self).__init__()\n self.hidden_size = hidden_size\n self.num_layers = num_layers\n self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True)\n self.fc = nn.Linear(hidden_size, num_classes)\n \n def forward(self, x):\n # Set initial hidden and cell states \n h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device) \n c0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device)\n \n # Forward propagate LSTM\n out, _ = self.lstm(x, (h0, c0)) # out: tensor of shape (batch_size, seq_length, hidden_size)\n \n # Decode the hidden state of the last time step\n out = self.fc(out[:, -1, :])\n return out\n\nmodel = RNN(input_size, hidden_size, num_layers, num_classes).to(device)\n\n\n# Loss and optimizer\ncriterion = nn.CrossEntropyLoss()\noptimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\n\n# Train the model\ntotal_step = len(train_loader)\nfor epoch in range(num_epochs):\n for i, (images, labels) in enumerate(train_loader):\n images = images.reshape(-1, sequence_length, input_size).to(device)\n labels = labels.to(device)\n \n # Forward pass\n outputs = model(images)\n loss = criterion(outputs, labels)\n \n # Backward and optimize\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n \n if (i+1) % 100 == 0:\n print ('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}' \n .format(epoch+1, num_epochs, i+1, total_step, loss.item()))\n\n# Test the model\nmodel.eval()\nwith torch.no_grad():\n correct = 0\n total = 0\n for images, labels in test_loader:\n images = images.reshape(-1, sequence_length, input_size).to(device)\n labels = labels.to(device)\n outputs = model(images)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n\n print('Test Accuracy of the model on the 10000 test images: {} %'.format(100 * correct / total)) \n\n# Save the model checkpoint\ntorch.save(model.state_dict(), './outputs/model.ckpt')\n" ]
[ [ "torch.utils.data.DataLoader", "torch.nn.LSTM", "torch.nn.Linear", "torch.no_grad", "torch.nn.CrossEntropyLoss", "torch.cuda.is_available", "torch.max" ] ]
JoseHernandez9094/CohortLexicase
[ "5179a3c0db6dcf0c2cae79fcfd08b4b919c9269d" ]
[ "Summarize/solution_timeseries_EVAL.py" ]
[ "#python3\r\n#This script will make csv so that graph_timeseries.py can create plots with them!\r\n\r\nimport pandas as p\r\n\r\nMAX_EVAL = 512*512*1000\r\ndf = p.read_csv('../Data/Raw/min_programs__eval_262144000.csv')\r\ntreat = {}\r\nTREATMENT = 'treatment'\r\nFOUND = 'solution_found'\r\nUPDATE = 'update_found'\r\nEVAL = 'evaluation_found'\r\nPOS_UPDATE = 0\r\nPOS_EVAL = 1\r\n\r\nfor i,row in df.iterrows():\r\n #If we do not have the treatment in our data dict\r\n if row[TREATMENT] in treat:\r\n if row[FOUND] == True:\r\n #If the row has found a solution store gen and eval\r\n tup = tuple([float(row[UPDATE]), float(row[EVAL])])\r\n treat[row[TREATMENT]].append(tup)\r\n else:\r\n if row[FOUND] == True: \r\n temp = [tuple([float(row[UPDATE]), float(row[EVAL])])]\r\n treat[row[TREATMENT]] = temp\r\n\r\n#Will gather data by problem into CN,CS lists for generations.\r\n#TODO\r\nK_PROB = 0\r\nK_SEL = 1\r\nK_CN = 2\r\nK_CS = 3\r\ndata_gen = {}\r\nfor k,val in treat.items():\r\n k = k[8:].split('__')\r\n gens = [x[POS_EVAL] for x in val]\r\n gens.sort()\r\n dimen = k[K_CN] + '-' + k[K_CS]\r\n prob = k[K_PROB]\r\n sele = k[K_SEL]\r\n\r\n #check if problem exists within the first layer of dict\r\n if prob not in data_gen:\r\n #If not in the dict, create an empty one for it\r\n data_gen[prob] = {}\r\n\r\n #Check if selection not within the second layer\r\n if sele not in data_gen[prob]:\r\n #Second level is the selection scheme\r\n data_gen[prob][sele] = {}\r\n #Third level is the dimensionality\r\n data_gen[prob][sele][dimen] = gens\r\n\r\n #Selection is within the second layer\r\n else:\r\n #Third level is the dimensionality\r\n data_gen[prob][sele][dimen] = gens\r\n\r\n else:\r\n #Check if selection not within the second layer\r\n if sele not in data_gen[prob]:\r\n #Second level is the selection scheme\r\n data_gen[prob][sele] = {}\r\n #Third level is the dimensionality\r\n data_gen[prob][sele][dimen] = gens\r\n\r\n #Selection is within the second layer\r\n else:\r\n #Third level is the dimensionality\r\n data_gen[prob][sele][dimen] = gens\r\n\r\n#Go through each problem\r\nfor prob in data_gen:\r\n #Go through each selection scheme\r\n for sele in data_gen[prob]:\r\n #Go through each dimensionality\r\n for dimen in data_gen[prob][sele]:\r\n raw = []\r\n raw.append(tuple([0,0]))\r\n d = data_gen[prob][sele][dimen]\r\n #Create the coordinates\r\n for i in range(0, len(d)):\r\n # raw.append(tuple([d[i], raw[len(raw)-1][1]]))\r\n raw.append(tuple([d[i], raw[len(raw)-1][1]+1]))\r\n raw.append([MAX_EVAL, raw[len(raw)-1][1]])\r\n\r\n gen = [x[0] for x in raw]\r\n cnt = [x[1] for x in raw]\r\n raw_data = {'Evaluation': gen, 'Solution_Count': cnt}\r\n df = p.DataFrame(raw_data, columns = ['Evaluation', 'Solution_Count'])\r\n fname = prob + '__' + sele[4:] + '__' + dimen + '.csv'\r\n df.to_csv('../Data/Polished/Evaluations/'+fname)" ]
[ [ "pandas.read_csv", "pandas.DataFrame" ] ]
ybubnov/dnsthreat
[ "75a3298379c8b48aeea6bae6c5c31a7d5e9fe315" ]
[ "deeplookup/env.py" ]
[ "from enum import Enum\n\nimport gym\nimport numpy as np\nfrom gym import spaces\nfrom gym.utils import seeding\n\n\nclass Action(Enum):\n decrease_attention = 0\n increase_attention = 1\n access_detector = 2\n isolate_node = 3\n forget_node = 4\n\n\nclass State(Enum):\n healthy = 0\n infected = 1\n\n\nclass MalwareEnv(gym.Env):\n \"\"\"\n Observations:\n Type: Box(2)\n Num Observation Min Max\n 0 Attention Level 0.05 1.0\n 1 Malware Rate 0.0 1.0\n\n Actions:\n Type: Discrete(5)\n Num Action\n 0 Decrease attention\n 1 Increase attention\n 2 Access detector\n 3 Isolate node\n 4 Forget node\n\n Reward:\n Reward of -0.1 is awarded for accessing detector.\n Reward of -0.2 is awarded for decreasing attention.\n Reward of -0.8 is awarded for increasing attention.\n Reward of 1 is awarded for isolation of infected node.\n Reward of 1 is awarded for forgeting healthy node.\n Reward of -1 is awarded for isolation of healthy node.\n Reward of -1 if awarded for forgetting infected node.\n\n Starting State:\n Attention level is set between [0.1, 0.2]\n Actual state is set either to 'healthy' or 'infected'.\n\n Episode Termination:\n Node is either isolated of forgotten.\n Episode length is greater than 100.\n \"\"\"\n\n def __init__(self, malware_prob: float = 0.9, seed: int = 100, log: bool = False):\n self.min_attention = 0.05\n self.max_attention = 1.0\n\n self.min_rate = 0.0\n self.max_rate = 1.0\n\n self.attention_inc = 0.05\n\n self.low = np.array([self.min_attention, self.min_rate], dtype=np.float32)\n self.high = np.array([self.max_attention, self.max_rate], dtype=np.float32)\n\n self.action_space = spaces.Discrete(5)\n self.observation_space = spaces.Box(self.low, self.high, dtype=np.float32)\n\n self.malware_prob = malware_prob\n self.log = log\n\n # (attention, health)\n self.state = (None, None, None)\n self.latest_action = None\n self.actions = []\n self.seed(seed)\n\n def seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]\n\n def start_step_policy(self, observation):\n attention, malware_rate = observation\n if attention > self.min_attention:\n return Action.access_detector.value\n return Action.increase_attention.value\n\n def step(self, action):\n if isinstance(action, np.ndarray):\n action = np.argmax(action)\n\n assert self.action_space.contains(action), f\"{action} ({type(action)}) invalid\"\n action = Action(action)\n\n if self.log:\n self.actions.append(action)\n\n attention, malware_rate, health = self.state\n st = State(health)\n\n if action == Action.decrease_attention:\n attention = max(self.min_attention, attention - self.attention_inc)\n if action == Action.increase_attention:\n attention = min(self.max_attention, attention + self.attention_inc)\n if action == Action.access_detector:\n # Accessing a detector changes malware rate.\n #\n # When the node is healthy, there is a `1 - malware_prob` probability\n # to observe malware. And malware rate depends on the attention level.\n #\n # Throw a \"dice\" in order to calculate the malware rate.\n prob = self.np_random.uniform()\n T = (1 - self.malware_prob) if st == State.healthy else self.malware_prob\n\n mu = np.average([0, attention])\n # sigma = 0.2\n malware_rate = 0 if prob > T else self.np_random.normal(mu, 0.01)\n malware_rate = max(self.min_rate, malware_rate)\n malware_rate = min(self.max_rate, malware_rate)\n\n # Agent does not observe the node health directly, only through\n # malware rate.\n self.state = np.array([attention, malware_rate, health])\n self.latest_action = action\n\n observation = np.array([attention, malware_rate])\n reward = self.compute_reward(health, action)\n done = action in {Action.isolate_node, Action.forget_node}\n\n return observation, reward, done, {} # {\"state\": self.state}\n\n def compute_reward(self, health, action):\n if action == Action.decrease_attention:\n return -0.2\n if action == Action.increase_attention:\n return -0.8\n if action == Action.access_detector:\n return -0.1\n if action == Action.isolate_node:\n return 1 * (health * 2 - 1)\n if action == Action.forget_node:\n return -1 * (health * 2 - 1)\n return 0\n\n def reset(self):\n # Node if either healthy (0), or infected (1), when node is infected,\n # agent observes malware requests depending on the attention level.\n health = self.np_random.choice([0, 1])\n attention = self.min_attention\n malware_rate = 0\n\n self.state = np.array([attention, malware_rate, health])\n return np.array([attention, malware_rate])\n\n def render(self, mode=\"human\"):\n attention, malware_rate, infected = self.state\n print(f\"\\tattention: {attention} - malware rate: {malware_rate}\", end=\" - \")\n print(f\"health: {'infected' if infected else 'healthy'}\", end=\" - \")\n print(f\"action: {self.latest_action}\")\n\n def close(self):\n pass\n" ]
[ [ "numpy.array", "numpy.average", "numpy.argmax" ] ]
RAJAGOPALAN-GANGADHARAN/PlasmaPy
[ "6df9583cc47375687a07300c0aa11ba31634d770" ]
[ "plasmapy/formulary/tests/test_parameters.py" ]
[ "\"\"\"Tests for functions that calculate plasma parameters.\"\"\"\n\nimport numpy as np\nimport pytest\n\nfrom astropy import units as u\nfrom astropy.constants import m_e, m_p\nfrom astropy.tests.helper import assert_quantity_allclose\n\nfrom plasmapy.formulary.parameters import (\n Alfven_speed,\n betaH_,\n Bohm_diffusion,\n cs_,\n cwp_,\n DB_,\n Debye_length,\n Debye_number,\n gyrofrequency,\n gyroradius,\n Hall_parameter,\n inertial_length,\n ion_sound_speed,\n lambdaD_,\n lower_hybrid_frequency,\n magnetic_energy_density,\n magnetic_pressure,\n mass_density,\n nD_,\n oc_,\n plasma_frequency,\n pmag_,\n pth_,\n rc_,\n rho_,\n rhoc_,\n thermal_pressure,\n thermal_speed,\n ub_,\n upper_hybrid_frequency,\n va_,\n wc_,\n wlh_,\n wp_,\n wuh_,\n)\nfrom plasmapy.particles import Particle\nfrom plasmapy.particles.exceptions import InvalidParticleError\nfrom plasmapy.utils.exceptions import (\n PhysicsError,\n PhysicsWarning,\n PlasmaPyFutureWarning,\n RelativityError,\n RelativityWarning,\n)\nfrom plasmapy.utils.pytest_helpers import assert_can_handle_nparray\n\nB = 1.0 * u.T\nZ = 1\nion = \"p\"\nm_i = m_p\nn_i = 5e19 * u.m ** -3\nn_e = Z * 5e19 * u.m ** -3\nrho = n_i * m_i + n_e * m_e\nT_e = 1e6 * u.K\nT_i = 1e6 * u.K\nk_1 = 3e1 * u.m ** -1\nk_2 = 3e7 * u.m ** -1\n\nB_arr = np.array([0.001, 0.002]) * u.T\nB_nanarr = np.array([0.001, np.nan]) * u.T\nB_allnanarr = np.array([np.nan, np.nan]) * u.T\n\nrho_arr = np.array([5e-10, 2e-10]) * u.kg / u.m ** 3\nrho_infarr = np.array([np.inf, 5e19]) * u.m ** -3\nrho_negarr = np.array([-5e19, 6e19]) * u.m ** -3\n\nT_arr = np.array([1e6, 2e6]) * u.K\nT_nanarr = np.array([1e6, np.nan]) * u.K\nT_nanarr2 = np.array([np.nan, 2e6]) * u.K\nT_allnanarr = np.array([np.nan, np.nan]) * u.K\nT_negarr = np.array([1e6, -5151.0]) * u.K\n\nV = 25.2 * u.m / u.s\nV_arr = np.array([25, 50]) * u.m / u.s\nV_nanarr = np.array([25, np.nan]) * u.m / u.s\nV_allnanarr = np.array([np.nan, np.nan]) * u.m / u.s\n\nmu = m_p.to(u.u).value\n\n\nclass Test_mass_density:\n r\"\"\"Test the mass_density function in parameters.py.\"\"\"\n\n @pytest.mark.parametrize(\n \"args, kwargs, conditional\",\n [\n ((-1 * u.kg * u.m ** -3, \"He\"), {}, pytest.raises(ValueError)),\n ((-1 * u.m ** -3, \"He\"), {}, pytest.raises(ValueError)),\n ((\"not a Quantity\", \"He\"), {}, pytest.raises(TypeError)),\n ((1 * u.m ** -3,), {}, pytest.raises(TypeError)),\n ((1 * u.J, \"He\"), {}, pytest.raises(u.UnitTypeError)),\n ((1 * u.m ** -3, None), {}, pytest.raises(TypeError)),\n (\n (1 * u.m ** -3, \"He\"),\n {\"z_ratio\": \"not a ratio\"},\n pytest.raises(TypeError),\n ),\n ],\n )\n def test_raises(self, args, kwargs, conditional):\n with conditional:\n mass_density(*args, **kwargs)\n\n @pytest.mark.parametrize(\n \"args, kwargs, expected\",\n [\n ((1.0 * u.g * u.m ** -3, \"\"), {}, 1.0e-3 * u.kg * u.m ** -3),\n ((5.0e12 * u.cm ** -3, \"He\"), {}, 3.32323849e-8 * u.kg * u.m ** -3),\n (\n (5.0e12 * u.cm ** -3, Particle(\"He\")),\n {},\n 3.32323849e-8 * u.kg * u.m ** -3,\n ),\n (\n (5.0e12 * u.cm ** -3, \"He\"),\n {\"z_ratio\": 0.5},\n 1.66161925e-08 * u.kg * u.m ** -3,\n ),\n (\n (5.0e12 * u.cm ** -3, \"He\"),\n {\"z_ratio\": -0.5},\n 1.66161925e-08 * u.kg * u.m ** -3,\n ),\n ],\n )\n def test_values(self, args, kwargs, expected):\n assert np.isclose(mass_density(*args, **kwargs), expected)\n\n def test_handle_nparrays(self):\n \"\"\"Test for ability to handle numpy array quantities\"\"\"\n assert_can_handle_nparray(mass_density)\n\n\n# Assertions below that are in CGS units with 2-3 significant digits\n# are generally from the NRL Plasma Formulary.\n\n\nclass TestAlfvenSpeed:\n \"\"\"Test `~plasmapy.formulary.parameters.Alfven_speed`.\"\"\"\n\n @pytest.mark.parametrize(\"alias\", [va_])\n def test_aliases(self, alias):\n assert alias is Alfven_speed\n\n @pytest.mark.parametrize(\n \"args, kwargs, _error\",\n [\n # scenarios that raise RelativityError\n ((10 * u.T, 1.0e-10 * u.kg * u.m ** -3), {}, RelativityError),\n ((np.inf * u.T, 1 * u.m ** -3), {\"ion\": \"p\"}, RelativityError),\n ((-np.inf * u.T, 1 * u.m ** -3), {\"ion\": \"p\"}, RelativityError),\n #\n # scenarios that raise InvalidParticleError\n ((1 * u.T, 5e19 * u.m ** -3), {\"ion\": \"spacecats\"}, InvalidParticleError),\n #\n # scenarios that raise TypeError\n ((\"not a Bfield\", 1.0e-10 * u.kg * u.m ** -3), {}, TypeError),\n ((10 * u.T, \"not a density\"), {}, TypeError),\n ((10 * u.T, 5), {\"ion\": \"p\"}, TypeError),\n ((1 * u.T, 1.0e18 * u.m ** -3), {\"ion\": [\"He\"]}, TypeError),\n ((1 * u.T, 1.0e18 * u.m ** -3), {\"ion\": \"He\", \"z_mean\": \"nope\"}, TypeError),\n #\n # scenarios that raise UnitTypeError\n ((1 * u.T, 1.0e18 * u.cm), {\"ion\": \"He\"}, u.UnitTypeError),\n ((1 * u.T, 5 * u.m ** -2), {\"ion\": \"p\"}, u.UnitTypeError),\n ((1 * u.cm, 1.0e18 * u.m ** -3), {\"ion\": \"He\"}, u.UnitTypeError),\n ((5 * u.A, 5e19 * u.m ** -3), {\"ion\": \"p\"}, u.UnitTypeError),\n #\n # scenarios that raise ValueError\n ((1 * u.T, -1.0e18 * u.m ** -3), {\"ion\": \"He\"}, ValueError),\n (\n (np.array([5, 6, 7]) * u.T, np.array([5, 6]) * u.m ** -3),\n {\"ion\": \"p\"},\n ValueError,\n ),\n (\n (np.array([0.001, 0.002]) * u.T, np.array([-5e19, 6e19]) * u.m ** -3),\n {\"ion\": \"p\"},\n ValueError,\n ),\n ],\n )\n def test_raises(self, args, kwargs, _error):\n \"\"\"Test scenarios that raise exceptions or warnings.\"\"\"\n with pytest.raises(_error):\n Alfven_speed(*args, **kwargs)\n\n @pytest.mark.parametrize(\n \"args, kwargs, expected, isclose_kw, _warning\",\n [\n # scenarios that issue RelativityWarning\n (\n (5 * u.T, 5e19 * u.m ** -3),\n {\"ion\": \"H\"},\n 15413707.39,\n {},\n RelativityWarning,\n ),\n (\n (5 * u.T, 5e19 * u.m ** -3),\n {\"ion\": \"H+\"},\n 15413707.39,\n {\"rtol\": 3.0e-4},\n RelativityWarning,\n ),\n (\n (5 * u.T, 5e19 * u.m ** -3),\n {\"ion\": \"p\"},\n 15413707.39,\n {\"rtol\": 4.0e-4},\n RelativityWarning,\n ),\n #\n # scenarios that issue UnitsWarning\n ((0.5, 1.0e18 * u.m ** -3), {\"ion\": \"He\"}, 5470657.93, {}, u.UnitsWarning),\n ],\n )\n def test_warns(self, args, kwargs, expected, isclose_kw, _warning):\n \"\"\"Test scenarios that issue warnings\"\"\"\n with pytest.warns(_warning):\n val = Alfven_speed(*args, **kwargs)\n assert isinstance(val, u.Quantity)\n assert val.unit == u.m / u.s\n assert np.isclose(val.value, expected, **isclose_kw)\n\n @pytest.mark.parametrize(\n \"args, kwargs, expected, isclose_kw\",\n [\n (\n (1 * u.T, 1e-8 * u.kg * u.m ** -3),\n {\"ion\": \"p\"},\n 8920620.58 * u.m / u.s,\n {\"rtol\": 1e-6},\n ),\n (\n (1 * u.T, 1e-8 * u.kg * u.m ** -3),\n {},\n 8920620.58 * u.m / u.s,\n {\"rtol\": 1e-6},\n ),\n (\n (0.05 * u.T, 1e18 * u.m ** -3),\n {\"ion\": \"He\"},\n Alfven_speed(0.05 * u.T, 6.64738793e-09 * u.kg * u.m ** -3),\n {},\n ),\n (\n (0.05 * u.T, 1e18 * u.m ** -3),\n {\"ion\": \"He+\"},\n Alfven_speed(0.05 * u.T, 1e18 * u.m ** -3, ion=\"He\"),\n {\"rtol\": 7e-5},\n ),\n (\n (0.05 * u.T, 1e18 * u.m ** -3),\n {\"ion\": \"He\", \"z_mean\": 2},\n Alfven_speed(0.05 * u.T, 1e18 * u.m ** -3, ion=\"He +2\"),\n {\"rtol\": 1.4e-4},\n ),\n (\n (0.05 * u.T, 1e18 * u.m ** -3),\n {\"ion\": Particle(\"He+\")},\n Alfven_speed(0.05 * u.T, 1e18 * u.m ** -3, ion=\"He+\"),\n {},\n ),\n (\n ([0.001, 0.002] * u.T, 5e-10 * u.kg * u.m ** -3),\n {},\n [\n va_(0.001 * u.T, 5e-10 * u.kg * u.m ** -3).value,\n va_(0.002 * u.T, 5e-10 * u.kg * u.m ** -3).value,\n ]\n * (u.m / u.s),\n {},\n ),\n (\n ([0.001, 0.002] * u.T, [5e-10, 2e-10] * u.kg * u.m ** -3),\n {},\n [\n va_(0.001 * u.T, 5e-10 * u.kg * u.m ** -3).value,\n va_(0.002 * u.T, 2e-10 * u.kg * u.m ** -3).value,\n ]\n * (u.m / u.s),\n {},\n ),\n (\n (0.001 * u.T, [1.0e18, 2e18] * u.m ** -3),\n {\"ion\": \"p\"},\n [\n va_(0.001 * u.T, 1e18 * u.m ** -3, ion=\"p\").value,\n va_(0.001 * u.T, 2e18 * u.m ** -3, ion=\"p\").value,\n ]\n * (u.m / u.s),\n {},\n ),\n ],\n )\n def test_values(self, args, kwargs, expected, isclose_kw):\n \"\"\"Test expected values.\"\"\"\n assert np.allclose(Alfven_speed(*args, **kwargs), expected, **isclose_kw)\n\n @pytest.mark.parametrize(\n \"args, kwargs, nan_mask\",\n [\n ((np.nan * u.T, 1 * u.kg * u.m ** -3), {}, []),\n ((0.001 * u.T, np.nan * u.kg * u.m ** -3), {}, []),\n (([np.nan, 0.001] * u.T, 1 * u.kg * u.m ** -3), {}, [True, False]),\n (\n (0.001 * u.T, [np.nan, 1.0, np.nan] * u.kg * u.m ** -3),\n {},\n [True, False, True],\n ),\n (([np.nan, 0.001] * u.T, [1, np.nan] * u.kg * u.m ** -3), {}, [True, True]),\n (\n (0.001 * u.T, [np.nan, 1e18, np.nan] * u.m ** -3),\n {\"ion\": \"Ar+\"},\n [True, False, True],\n ),\n ],\n )\n def test_nan_values(self, args, kwargs, nan_mask):\n \"\"\"Input scenarios that leat to `numpy.nan` values being returned.\"\"\"\n val = Alfven_speed(*args, **kwargs)\n if np.isscalar(val.value):\n assert np.isnan(val)\n else:\n nan_arr = np.isnan(val)\n assert np.all(nan_arr[nan_mask])\n assert np.all(np.logical_not(nan_arr[np.logical_not(nan_mask)]))\n\n def test_handle_nparrays(self):\n \"\"\"Test for ability to handle numpy array quantities\"\"\"\n assert_can_handle_nparray(Alfven_speed)\n\n\nclass Test_Ion_Sound_Speed:\n r\"\"\"Test the ion_sound_speed function in parameters.py.\"\"\"\n\n @pytest.mark.parametrize(\n \"args, kwargs, expected, isclose_kw\",\n [\n (\n (),\n {\n \"T_i\": 1.3232 * u.MK,\n \"T_e\": 1.831 * u.MK,\n \"ion\": \"p\",\n \"gamma_e\": 1,\n \"gamma_i\": 3,\n },\n 218816.06086407552 * (u.m / u.s),\n {},\n ),\n (\n (1.831 * u.MK, 1.3232 * u.MK, \"p\"),\n {},\n 218816.06086407552 * (u.m / u.s),\n {},\n ), # Test that function call without keyword argument works correctly\n (\n (),\n {\n \"T_i\": 1.3232 * u.MK,\n \"T_e\": 1.831 * u.MK,\n \"n_e\": n_e,\n \"k\": k_1,\n \"ion\": \"p\",\n \"gamma_e\": 1,\n \"gamma_i\": 3,\n },\n 218816.06086407552 * (u.m / u.s),\n {},\n ),\n (\n (),\n {\n \"T_i\": 1.3232 * u.MK,\n \"T_e\": 1.831 * u.MK,\n \"n_e\": n_e,\n \"k\": k_2,\n \"ion\": \"p\",\n \"gamma_e\": 1,\n \"gamma_i\": 3,\n },\n 552.3212936293337 * (u.m / u.s),\n {},\n ),\n (\n (),\n {\n \"T_i\": 0.88 * u.MK,\n \"T_e\": 1.28 * u.MK,\n \"n_e\": n_e,\n \"k\": 0 * u.m ** -1,\n \"ion\": \"p\",\n \"gamma_e\": 1.2,\n \"gamma_i\": 3.4,\n },\n 193328.52857788358 * (u.m / u.s),\n {},\n ),\n (\n (),\n {\"T_i\": T_i, \"T_e\": 0 * u.K, \"n_e\": n_e, \"k\": k_1, \"ion\": \"p+\"},\n ion_sound_speed(T_i=T_i, T_e=0 * u.K, n_e=n_e, k=k_1, ion=\"p+\").value\n * (u.m / u.s),\n {},\n ),\n (\n (),\n {\n \"T_e\": 1.2e6 * u.K,\n \"T_i\": 0 * u.K,\n \"n_e\": n_e,\n \"k\": 0 * u.m ** -1,\n \"z_mean\": 0.8,\n \"ion\": \"p\",\n },\n 89018.09 * (u.m / u.s),\n {\"atol\": 0.0, \"rtol\": 1e-6},\n ), # testing for user input z_mean\n ],\n )\n def test_values(self, args, kwargs, expected, isclose_kw):\n assert np.isclose(ion_sound_speed(*args, **kwargs), expected, **isclose_kw)\n\n # case when Z=1 is assumed\n # assert ion_sound_speed(T_i=T_i, T_e=T_e, ion='p+') == ion_sound_speed(T_i=T_i, T_e=T_e,\n # ion='H-1')\n\n @pytest.mark.parametrize(\n \"kwargs1, kwargs2, _warning\",\n [\n ({\"T_i\": T_i, \"T_e\": T_e, \"n_e\": n_e, \"ion\": \"p\"}, {}, PhysicsWarning),\n ({\"T_i\": T_i, \"T_e\": T_e, \"k\": k_1, \"ion\": \"p\"}, {}, PhysicsWarning),\n ({\"T_i\": 5e11 * u.K, \"T_e\": 0 * u.K, \"ion\": \"p\"}, {}, RelativityWarning),\n (\n {\"T_e\": 1.2e6, \"T_i\": 0 * u.K, \"n_e\": n_e, \"k\": k_1, \"ion\": \"p\"},\n {\"T_e\": 1.2e6 * u.K, \"T_i\": 0 * u.K, \"n_e\": n_e, \"k\": k_1, \"ion\": \"p\"},\n u.UnitsWarning,\n ),\n (\n {\"T_i\": 1.3e6, \"T_e\": 0 * u.K, \"n_e\": n_e, \"k\": k_1, \"ion\": \"p\"},\n {\"T_i\": 1.3e6 * u.K, \"T_e\": 0 * u.K, \"n_e\": n_e, \"k\": k_1, \"ion\": \"p\"},\n u.UnitsWarning,\n ),\n ],\n )\n def test_warns(self, kwargs1, kwargs2, _warning):\n with pytest.warns(_warning):\n val = ion_sound_speed(**kwargs1)\n if kwargs2 != {}:\n val == ion_sound_speed(**kwargs2)\n\n @pytest.mark.parametrize(\n \"args, kwargs, _error\",\n [\n (\n (),\n {\n \"T_i\": T_i,\n \"T_e\": T_e,\n \"n_e\": n_e,\n \"k\": k_1,\n \"ion\": \"p\",\n \"gamma_i\": np.inf,\n },\n RelativityError,\n ),\n (\n (),\n {\n \"T_i\": np.array([5, 6, 5]) * u.K,\n \"T_e\": np.array([3, 4]) * u.K,\n \"n_e\": np.array([5, 6, 5]) * u.m ** -3,\n \"k\": np.array([3, 4]) * u.m ** -3,\n \"ion\": \"p\",\n },\n u.UnitTypeError,\n ),\n ((5 * u.T), {\"ion\": \"p\"}, TypeError), # Is this test right??????\n ((), {\"ion\": \"p\"}, TypeError),\n (\n (),\n {\"T_i\": T_i, \"T_e\": 0 * u.K, \"gamma_i\": 0.9999, \"ion\": \"p\"},\n PhysicsError,\n ),\n (\n (),\n {\"T_i\": T_i, \"T_e\": 0 * u.K, \"gamma_e\": 0.9999, \"ion\": \"p\"},\n PhysicsError,\n ),\n (\n (),\n {\"T_i\": T_i, \"T_e\": 0 * u.K, \"gamma_e\": \"sdjklsf\", \"ion\": \"p\"},\n TypeError,\n ),\n (\n (),\n {\"T_i\": T_i, \"T_e\": 0 * u.K, \"gamma_i\": \"fsdfas\", \"ion\": \"p\"},\n TypeError,\n ),\n ((), {\"T_i\": T_i, \"T_e\": 0 * u.K, \"ion\": \"cupcakes\"}, InvalidParticleError),\n ((), {\"T_i\": -np.abs(T_i), \"T_e\": 0 * u.K, \"ion\": \"p\"}, ValueError),\n (\n (),\n {\"T_i\": T_i, \"T_e\": 0 * u.K, \"n_e\": -np.abs(n_e), \"k\": k_1, \"ion\": \"p\"},\n ValueError,\n ),\n (\n (),\n {\"T_i\": T_i, \"T_e\": 0 * u.K, \"n_e\": n_e, \"k\": -np.abs(k_1), \"ion\": \"p\"},\n ValueError,\n ),\n ((), {\"T_i\": 5e19 * u.K, \"T_e\": 0 * u.K, \"ion\": \"p\"}, RelativityError),\n (\n (),\n {\"T_i\": 5 * u.A, \"T_e\": 0 * u.K, \"n_e\": n_e, \"k\": k_1, \"ion\": \"p\"},\n u.UnitTypeError,\n ),\n (\n (),\n {\"T_i\": T_negarr, \"T_e\": 0 * u.K, \"n_e\": n_e, \"k\": k_1, \"ion\": \"p\"},\n ValueError,\n ),\n (\n (),\n {\"T_e\": T_negarr, \"T_i\": 0 * u.K, \"n_e\": n_e, \"k\": k_1, \"ion\": \"p\"},\n ValueError,\n ),\n ],\n )\n def test_raises(self, args, kwargs, _error):\n with pytest.raises(_error):\n ion_sound_speed(*args, **kwargs)\n\n @pytest.mark.parametrize(\n \"kwargs\",\n [\n ({\"T_i\": T_nanarr, \"T_e\": 0 * u.K, \"n_e\": n_e, \"k\": k_1, \"ion\": \"p\"}),\n ({\"T_e\": T_nanarr, \"T_i\": 0 * u.K, \"n_e\": n_e, \"k\": k_1, \"ion\": \"p\"}),\n ],\n )\n def test_nan_values(self, kwargs):\n np.isnan(ion_sound_speed(**kwargs)[1])\n\n def test_handle_nparrays(self):\n assert_can_handle_nparray(ion_sound_speed)\n\n\ndef test_thermal_pressure():\n assert thermal_pressure(T_e, n_i).unit.is_equivalent(u.Pa)\n\n # TODO: may be array issues with arg \"mass\"\n assert_can_handle_nparray(thermal_pressure)\n\n\ndef test_gyrofrequency():\n r\"\"\"Test the gyrofrequency function in parameters.py.\"\"\"\n\n assert gyrofrequency(B, \"e-\").unit.is_equivalent(u.rad / u.s)\n\n assert gyrofrequency(B, \"e-\", to_hz=True).unit.is_equivalent(u.Hz)\n\n assert np.isclose(gyrofrequency(1 * u.T, \"e-\").value, 175882008784.72018)\n\n assert np.isclose(gyrofrequency(2.4 * u.T, \"e-\").value, 422116821083.3284)\n\n assert np.isclose(\n gyrofrequency(1 * u.T, \"e-\", to_hz=True).value, 27992490076.528206\n )\n\n assert np.isclose(\n gyrofrequency(2.4 * u.T, \"e-\", signed=True).value, -422116821083.3284\n )\n\n assert np.isclose(gyrofrequency(1 * u.G, \"e-\").cgs.value, 1.76e7, rtol=1e-3)\n\n with pytest.raises(TypeError):\n with pytest.warns(u.UnitsWarning):\n gyrofrequency(u.m, \"e-\")\n\n with pytest.raises(u.UnitTypeError):\n gyrofrequency(u.m * 1, \"e-\")\n\n assert np.isnan(gyrofrequency(B_nanarr, \"e-\")[-1])\n\n # The following is a test to check that equivalencies from astropy\n # are working.\n omega_ce = gyrofrequency(2.2 * u.T, \"e-\")\n f_ce = (omega_ce / (2 * np.pi)) / u.rad\n f_ce_use_equiv = omega_ce.to(u.Hz, equivalencies=[(u.cy / u.s, u.Hz)])\n assert np.isclose(f_ce.value, f_ce_use_equiv.value)\n\n with pytest.warns(u.UnitsWarning):\n assert gyrofrequency(5.0, \"e-\") == gyrofrequency(5.0 * u.T, \"e-\")\n\n assert gyrofrequency(B, particle=ion).unit.is_equivalent(u.rad / u.s)\n\n assert np.isclose(gyrofrequency(1 * u.T, particle=\"p\").value, 95788335.834874)\n\n assert np.isclose(gyrofrequency(2.4 * u.T, particle=\"p\").value, 229892006.00369796)\n\n assert np.isclose(gyrofrequency(1 * u.G, particle=\"p\").cgs.value, 9.58e3, rtol=2e-3)\n\n assert gyrofrequency(-5 * u.T, \"p\") == gyrofrequency(5 * u.T, \"p\")\n\n # Case when Z=1 is assumed\n # assert gyrofrequency(B, particle='p+') == gyrofrequency(B, particle='H-1')\n\n assert gyrofrequency(B, particle=\"e+\") == gyrofrequency(B, \"e-\")\n\n with pytest.warns(u.UnitsWarning):\n gyrofrequency(8, \"p\")\n\n with pytest.raises(u.UnitTypeError):\n gyrofrequency(5 * u.m, \"p\")\n\n with pytest.raises(InvalidParticleError):\n gyrofrequency(8 * u.T, particle=\"asdfasd\")\n\n with pytest.warns(u.UnitsWarning):\n # TODO this should be WARNS, not RAISES. and it's probably still raised\n assert gyrofrequency(5.0, \"p\") == gyrofrequency(5.0 * u.T, \"p\")\n\n gyrofrequency(1 * u.T, particle=\"p\")\n # testing for user input Z\n testMeth1 = gyrofrequency(1 * u.T, particle=\"p\", Z=0.8).si.value\n testTrue1 = 76630665.79318453\n errStr = f\"gyrofrequency() gave {testMeth1}, should be {testTrue1}.\"\n assert np.isclose(testMeth1, testTrue1, atol=0.0, rtol=1e-5), errStr\n\n assert_can_handle_nparray(gyrofrequency, kwargs={\"signed\": True})\n\n assert_can_handle_nparray(gyrofrequency, kwargs={\"signed\": False})\n\n\ndef test_gyroradius():\n r\"\"\"Test the gyroradius function in parameters.py.\"\"\"\n\n assert gyroradius(B, \"e-\", T=T_e).unit.is_equivalent(u.m)\n\n assert gyroradius(B, \"e-\", Vperp=25 * u.m / u.s).unit.is_equivalent(u.m)\n\n # test for possiblity to allow nan for input values\n assert np.isnan(gyroradius(np.nan * u.T, particle=\"e-\", T=1 * u.K))\n assert np.isnan(gyroradius(1 * u.T, particle=\"e-\", T=np.nan * u.K))\n assert np.isnan(gyroradius(1 * u.T, particle=\"e-\", Vperp=np.nan * u.m / u.s))\n\n Vperp = 1e6 * u.m / u.s\n Bmag = 1 * u.T\n omega_ce = gyrofrequency(Bmag, \"e-\")\n analytical_result = (Vperp / omega_ce).to(\n u.m, equivalencies=u.dimensionless_angles()\n )\n assert gyroradius(Bmag, \"e-\", Vperp=Vperp) == analytical_result\n\n with pytest.raises(TypeError):\n with pytest.warns(u.UnitsWarning):\n gyroradius(u.T, \"e-\")\n\n with pytest.raises(u.UnitTypeError):\n gyroradius(5 * u.A, \"e-\", Vperp=8 * u.m / u.s)\n\n with pytest.raises(u.UnitTypeError):\n gyroradius(5 * u.T, \"e-\", Vperp=8 * u.m)\n\n with pytest.raises(ValueError):\n gyroradius(np.array([5, 6]) * u.T, \"e-\", Vperp=np.array([5, 6, 7]) * u.m / u.s)\n\n assert np.isnan(gyroradius(np.nan * u.T, \"e-\", Vperp=1 * u.m / u.s))\n\n with pytest.raises(ValueError):\n gyroradius(3.14159 * u.T, \"e-\", T=-1 * u.K)\n\n with pytest.warns(u.UnitsWarning):\n assert gyroradius(1.0, \"e-\", Vperp=1.0) == gyroradius(\n 1.0 * u.T, \"e-\", Vperp=1.0 * u.m / u.s\n )\n\n with pytest.warns(u.UnitsWarning):\n assert gyroradius(1.1, \"e-\", T=1.2) == gyroradius(1.1 * u.T, \"e-\", T=1.2 * u.K)\n\n with pytest.raises(ValueError):\n gyroradius(1.1 * u.T, \"e-\", Vperp=1 * u.m / u.s, T=1.2 * u.K)\n\n with pytest.raises(u.UnitTypeError):\n gyroradius(1.1 * u.T, \"e-\", Vperp=1.1 * u.m, T=1.2 * u.K)\n\n # Check for Deprecation warning when using T_i instead of T\n with pytest.warns(PlasmaPyFutureWarning):\n gyroradius(1.1 * u.T, \"e-\", T_i=1.2 * u.K)\n\n assert gyroradius(B, particle=\"p\", T=T_i).unit.is_equivalent(u.m)\n\n assert gyroradius(B, particle=\"p\", Vperp=25 * u.m / u.s).unit.is_equivalent(u.m)\n\n # Case when Z=1 is assumed\n assert np.isclose(\n gyroradius(B, particle=\"p\", T=T_i),\n gyroradius(B, particle=\"H+\", T=T_i),\n atol=1e-6 * u.m,\n )\n\n gyroPos = gyroradius(B, particle=\"p\", Vperp=V)\n gyroNeg = gyroradius(B, particle=\"p\", Vperp=-V)\n assert gyroPos == gyroNeg\n\n Vperp = 1e6 * u.m / u.s\n Bmag = 1 * u.T\n omega_ci = gyrofrequency(Bmag, particle=\"p\")\n analytical_result = (Vperp / omega_ci).to(\n u.m, equivalencies=u.dimensionless_angles()\n )\n assert gyroradius(Bmag, particle=\"p\", Vperp=Vperp) == analytical_result\n\n T2 = 1.2 * u.MK\n B2 = 123 * u.G\n particle2 = \"alpha\"\n Vperp2 = thermal_speed(T2, particle=particle2)\n gyro_by_vperp = gyroradius(B2, particle=\"alpha\", Vperp=Vperp2)\n assert gyro_by_vperp == gyroradius(B2, particle=\"alpha\", T=T2)\n\n explicit_positron_gyro = gyroradius(1 * u.T, particle=\"positron\", T=1 * u.MK)\n assert explicit_positron_gyro == gyroradius(1 * u.T, \"e-\", T=1 * u.MK)\n\n with pytest.raises(TypeError):\n with pytest.warns(u.UnitsWarning):\n gyroradius(u.T, particle=\"p\", Vperp=8 * u.m / u.s)\n\n with pytest.raises(ValueError):\n gyroradius(B, particle=\"p\", T=-1 * u.K)\n\n with pytest.warns(u.UnitsWarning):\n gyro_without_units = gyroradius(1.0, particle=\"p\", Vperp=1.0)\n gyro_with_units = gyroradius(1.0 * u.T, particle=\"p\", Vperp=1.0 * u.m / u.s)\n assert gyro_without_units == gyro_with_units\n\n with pytest.warns(u.UnitsWarning):\n gyro_t_without_units = gyroradius(1.1, particle=\"p\", T=1.2)\n gyro_t_with_units = gyroradius(1.1 * u.T, particle=\"p\", T=1.2 * u.K)\n assert gyro_t_with_units == gyro_t_without_units\n\n with pytest.raises(ValueError):\n gyroradius(1.1 * u.T, particle=\"p\", Vperp=1 * u.m / u.s, T=1.2 * u.K)\n\n with pytest.raises(u.UnitTypeError):\n gyroradius(1.1 * u.T, particle=\"p\", Vperp=1.1 * u.m, T=1.2 * u.K)\n\n with pytest.raises(u.UnitTypeError):\n gyroradius(1.1 * u.T, particle=\"p\", Vperp=1.2 * u.m, T=1.1 * u.K)\n\n\nclass Test_gyroradius:\n\n # some custom numpy array tests here, because of the T / Vperp situation\n def test_handle_numpy_array(self):\n # Tests to verify that can handle Quantities with numpy array as the value:\n assert gyroradius(B_arr, \"e-\", Vperp=V_arr)[0] == gyroradius(\n B_arr[0], \"e-\", Vperp=V_arr[0]\n )\n assert gyroradius(B_arr, \"e-\", T=T_arr)[0] == gyroradius(\n B_arr[0], \"e-\", T=T_arr[0]\n )\n\n def test_handle_mixed_Qarrays(self):\n # If both Vperp or T are input as Qarrays, but only one of the two is valid\n # at each element, then that's fine, the function should work:\n assert gyroradius(B_arr, \"e-\", Vperp=V_nanarr, T=T_nanarr2)[0] == gyroradius(\n B_arr[0], \"e-\", Vperp=V_nanarr[0], T=T_nanarr2[0]\n )\n\n def test_raise_two_valid_inputs(self):\n # If both Vperp or T are nan-less, Qarrays or not, should raise ValueError:\n with pytest.raises(ValueError):\n gyroradius(B_arr, \"e-\", Vperp=V, T=T_arr)\n with pytest.raises(ValueError):\n gyroradius(B_arr, \"e-\", Vperp=V_arr, T=T_i)\n\n def test_all_valid_and_one_valid(self):\n # If one of (Vperp, T) is a valid and one is Qarray with at least one valid, ValueError:\n with pytest.raises(ValueError):\n gyroradius(B_arr, \"e-\", Vperp=V, T=T_nanarr)\n with pytest.raises(ValueError):\n gyroradius(B_arr, \"e-\", Vperp=V_nanarr, T=T_i)\n\n def test_scalar_and_nan_qarray(self):\n # If either Vperp or T is a valid scalar and the other is a Qarray of all nans,\n # should do something valid and not raise a ValueError\n assert np.all(np.isfinite(gyroradius(B_arr, \"e-\", Vperp=V, T=T_allnanarr)))\n assert np.all(np.isfinite(gyroradius(B_arr, \"e-\", Vperp=V_allnanarr, T=T_i)))\n\n def test_keeps_arguments_unchanged(self):\n Vperp1 = u.Quantity([np.nan, 1], unit=u.m / u.s)\n Vperp2 = u.Quantity([np.nan, 1], unit=u.m / u.s) # an exact copy\n T_i = u.Quantity([1, np.nan], unit=u.K)\n\n gyroradius(B_arr, \"e-\", Vperp=Vperp1, T=T_i)\n assert_quantity_allclose(Vperp1, Vperp2)\n\n\ndef test_plasma_frequency():\n r\"\"\"Test the plasma_frequency function in parameters.py.\"\"\"\n\n assert plasma_frequency(n_e, \"e-\").unit.is_equivalent(u.rad / u.s)\n\n assert plasma_frequency(n_e, \"e-\", to_hz=True).unit.is_equivalent(u.Hz)\n\n assert np.isclose(plasma_frequency(1 * u.cm ** -3, \"e-\").value, 5.64e4, rtol=1e-2)\n\n assert np.isclose(\n plasma_frequency(1 * u.cm ** -3, particle=\"N\").value, 3.53e2, rtol=1e-1\n )\n\n assert np.isclose(\n plasma_frequency(1 * u.cm ** -3, particle=\"N\", to_hz=True).value,\n 56.19000195094519,\n )\n\n with pytest.raises(TypeError):\n with pytest.warns(u.UnitsWarning):\n plasma_frequency(u.m ** -3, \"e-\")\n\n with pytest.raises(u.UnitTypeError):\n plasma_frequency(5 * u.m ** -2, \"e-\")\n\n assert np.isnan(plasma_frequency(np.nan * u.m ** -3, \"e-\"))\n\n with pytest.warns(u.UnitsWarning):\n assert plasma_frequency(1e19, \"e-\") == plasma_frequency(1e19 * u.m ** -3, \"e-\")\n\n assert plasma_frequency(n_i, particle=\"p\").unit.is_equivalent(u.rad / u.s)\n\n # Case where Z=1 is assumed\n assert plasma_frequency(n_i, particle=\"H-1+\") == plasma_frequency(n_i, particle=\"p\")\n\n assert np.isclose(\n plasma_frequency(mu * u.cm ** -3, particle=\"p\").value, 1.32e3, rtol=1e-2\n )\n\n with pytest.raises(ValueError):\n plasma_frequency(n=5 * u.m ** -3, particle=\"sdfas\")\n\n with pytest.warns(u.UnitsWarning):\n plasma_freq_no_units = plasma_frequency(1e19, particle=\"p\")\n assert plasma_freq_no_units == plasma_frequency(1e19 * u.m ** -3, particle=\"p\")\n\n plasma_frequency(1e17 * u.cm ** -3, particle=\"p\")\n # testing for user input z_mean\n testMeth1 = plasma_frequency(1e17 * u.cm ** -3, particle=\"p\", z_mean=0.8).si.value\n testTrue1 = 333063562455.4028\n errStr = f\"plasma_frequency() gave {testMeth1}, should be {testTrue1}.\"\n assert np.isclose(testMeth1, testTrue1, atol=0.0, rtol=1e-6), errStr\n\n assert_can_handle_nparray(plasma_frequency)\n\n\ndef test_Debye_length():\n r\"\"\"Test the Debye_length function in parameters.py.\"\"\"\n\n assert Debye_length(T_e, n_e).unit.is_equivalent(u.m)\n\n assert np.isclose(Debye_length(1 * u.eV, 1 * u.cm ** -3).value, 7.43, atol=0.005)\n\n with pytest.warns(u.UnitsWarning):\n Debye_length(5, 5 * u.m ** -3)\n\n with pytest.raises(u.UnitTypeError):\n Debye_length(56 * u.kg, 5 * u.m ** -3)\n\n with pytest.raises(ValueError):\n Debye_length(5 * u.eV, -5 * u.m ** -3)\n\n with pytest.raises(ValueError):\n Debye_length(-45 * u.K, 5 * u.m ** -3)\n\n Tarr2 = np.array([1, 2]) * u.K\n narr3 = np.array([1, 2, 3]) * u.m ** -3\n with pytest.raises(ValueError):\n Debye_length(Tarr2, narr3)\n\n with pytest.warns(u.UnitsWarning):\n assert Debye_length(2.0, 2.0) == Debye_length(2.0 * u.K, 2.0 * u.m ** -3)\n\n with pytest.warns(u.UnitsWarning):\n assert Debye_length(2.0 * u.K, 2.0) == Debye_length(2.0, 2.0 * u.m ** -3)\n\n assert_can_handle_nparray(Debye_length)\n\n\ndef test_Debye_number():\n r\"\"\"Test the Debye_number function in parameters.py.\"\"\"\n\n assert Debye_number(T_e, n_e).unit.is_equivalent(u.dimensionless_unscaled)\n\n T_e_eV = T_e.to(u.eV, equivalencies=u.temperature_energy())\n assert np.isclose(Debye_number(T_e, n_e).value, Debye_number(T_e_eV, n_e).value)\n\n assert np.isclose(Debye_number(1 * u.eV, 1 * u.cm ** -3).value, 1720862385.43342)\n\n with pytest.warns(u.UnitsWarning):\n Debye_number(T_e, 4)\n\n with pytest.raises(ValueError):\n Debye_number(None, n_e)\n\n with pytest.raises(u.UnitTypeError):\n Debye_number(5 * u.m, 5 * u.m ** -3)\n\n with pytest.raises(u.UnitTypeError):\n Debye_number(5 * u.K, 5 * u.m ** 3)\n\n with pytest.raises(ValueError):\n Debye_number(5j * u.K, 5 * u.cm ** -3)\n\n Tarr2 = np.array([1, 2]) * u.K\n narr3 = np.array([1, 2, 3]) * u.m ** -3\n with pytest.raises(ValueError):\n Debye_number(Tarr2, narr3)\n\n with pytest.warns(u.UnitsWarning):\n assert Debye_number(1.1, 1.1) == Debye_number(1.1 * u.K, 1.1 * u.m ** -3)\n\n with pytest.warns(u.UnitsWarning):\n assert Debye_number(1.1 * u.K, 1.1) == Debye_number(1.1, 1.1 * u.m ** -3)\n\n assert_can_handle_nparray(Debye_number)\n\n\ndef test_inertial_length():\n r\"\"\"Test the inertial_length function in parameters.py.\"\"\"\n\n assert inertial_length(n_i, particle=\"p\").unit.is_equivalent(u.m)\n\n assert np.isclose(\n inertial_length(mu * u.cm ** -3, particle=\"p\").cgs.value, 2.28e7, rtol=0.01\n )\n\n inertial_length_electron_plus = inertial_length(5.351 * u.m ** -3, particle=\"e+\")\n assert inertial_length_electron_plus == inertial_length(\n 5.351 * u.m ** -3, particle=\"e\"\n )\n\n assert inertial_length(n_i, particle=\"p\") == inertial_length(n_i, particle=\"p\")\n\n with pytest.warns(u.UnitsWarning):\n inertial_length(4, particle=\"p\")\n\n with pytest.raises(u.UnitTypeError):\n inertial_length(4 * u.m ** -2, particle=\"p\")\n\n with pytest.raises(ValueError):\n inertial_length(-5 * u.m ** -3, particle=\"p\")\n\n with pytest.raises(InvalidParticleError):\n inertial_length(n_i, particle=-135)\n\n with pytest.warns(u.UnitsWarning):\n inertial_length_no_units = inertial_length(1e19, particle=\"p\")\n assert inertial_length_no_units == inertial_length(\n 1e19 * u.m ** -3, particle=\"p\"\n )\n\n assert inertial_length(n_e, \"e-\").unit.is_equivalent(u.m)\n\n assert np.isclose(\n inertial_length(1 * u.cm ** -3, \"e-\").cgs.value, 5.31e5, rtol=1e-3\n )\n\n with pytest.warns(u.UnitsWarning):\n inertial_length(5, \"e-\")\n\n with pytest.raises(u.UnitTypeError):\n inertial_length(5 * u.m, \"e-\")\n\n with pytest.raises(ValueError):\n inertial_length(-5 * u.m ** -3, \"e-\")\n\n with pytest.warns(u.UnitsWarning):\n assert inertial_length(1e19, \"e-\") == inertial_length(1e19 * u.m ** -3, \"e-\")\n\n assert_can_handle_nparray(inertial_length)\n\n\ndef test_magnetic_pressure():\n r\"\"\"Test the magnetic_pressure function in parameters.py.\"\"\"\n\n assert magnetic_pressure(B_arr).unit.is_equivalent(u.Pa)\n\n assert magnetic_pressure(B).unit.is_equivalent(u.Pa)\n\n assert magnetic_pressure(B).unit.name == \"Pa\"\n\n assert magnetic_pressure(B).value == magnetic_energy_density(B).value\n\n assert magnetic_pressure(B) == magnetic_energy_density(B.to(u.G))\n\n assert np.isclose(magnetic_pressure(B).value, 397887.35772973835)\n\n with pytest.warns(u.UnitsWarning):\n magnetic_pressure(5)\n\n with pytest.raises(u.UnitTypeError):\n magnetic_pressure(5 * u.m)\n\n assert np.isnan(magnetic_pressure(np.nan * u.T))\n\n with pytest.raises(ValueError):\n magnetic_pressure(5j * u.T)\n\n assert np.isnan(magnetic_pressure(B_nanarr)[-1])\n\n with pytest.warns(u.UnitsWarning):\n assert magnetic_pressure(22.2) == magnetic_pressure(22.2 * u.T)\n\n assert_can_handle_nparray(magnetic_pressure)\n\n\ndef test_magnetic_energy_density():\n r\"\"\"Test the magnetic_energy_density function in parameters.py.\"\"\"\n\n assert magnetic_energy_density(B_arr).unit.is_equivalent(u.J / u.m ** 3)\n\n assert magnetic_energy_density(B).unit.is_equivalent(\"J / m3\")\n\n assert magnetic_energy_density(B).value == magnetic_pressure(B).value\n\n assert_quantity_allclose(\n magnetic_energy_density(2 * B), 4 * magnetic_energy_density(B)\n )\n\n assert_quantity_allclose(magnetic_energy_density(B).value, 397887.35772973835)\n\n assert_quantity_allclose(\n magnetic_energy_density(B), magnetic_energy_density(B.to(u.G))\n )\n\n assert isinstance(magnetic_energy_density(B_arr), u.Quantity)\n\n with pytest.warns(u.UnitsWarning):\n magnetic_energy_density(5)\n\n with pytest.raises(u.UnitTypeError):\n magnetic_energy_density(5 * u.m)\n\n assert np.isnan(magnetic_energy_density(np.nan * u.T))\n\n with pytest.raises(ValueError):\n magnetic_energy_density(5j * u.T)\n\n assert np.isnan(magnetic_energy_density(B_nanarr)[-1])\n\n with pytest.warns(u.UnitsWarning):\n assert magnetic_energy_density(22.2) == magnetic_energy_density(22.2 * u.T)\n\n assert_can_handle_nparray(magnetic_energy_density)\n\n\ndef test_upper_hybrid_frequency():\n r\"\"\"Test the upper_hybrid_frequency function in parameters.py.\"\"\"\n\n omega_uh = upper_hybrid_frequency(B, n_e=n_e)\n omega_uh_hz = upper_hybrid_frequency(B, n_e=n_e, to_hz=True)\n omega_ce = gyrofrequency(B, \"e-\")\n omega_pe = plasma_frequency(n=n_e, particle=\"e-\")\n assert omega_ce.unit.is_equivalent(u.rad / u.s)\n assert omega_pe.unit.is_equivalent(u.rad / u.s)\n assert omega_uh.unit.is_equivalent(u.rad / u.s)\n assert omega_uh_hz.unit.is_equivalent(u.Hz)\n left_hand_side = omega_uh ** 2\n right_hand_side = omega_ce ** 2 + omega_pe ** 2\n assert np.isclose(left_hand_side.value, right_hand_side.value)\n\n assert np.isclose(omega_uh_hz.value, 69385868857.90918)\n\n with pytest.raises(ValueError):\n upper_hybrid_frequency(5 * u.T, n_e=-1 * u.m ** -3)\n\n with pytest.warns(u.UnitsWarning):\n assert upper_hybrid_frequency(1.2, 1.3) == upper_hybrid_frequency(\n 1.2 * u.T, 1.3 * u.m ** -3\n )\n\n with pytest.warns(u.UnitsWarning):\n assert upper_hybrid_frequency(1.4 * u.T, 1.3) == upper_hybrid_frequency(\n 1.4, 1.3 * u.m ** -3\n )\n\n assert_can_handle_nparray(upper_hybrid_frequency)\n\n\ndef test_lower_hybrid_frequency():\n r\"\"\"Test the lower_hybrid_frequency function in parameters.py.\"\"\"\n\n ion = \"He-4 1+\"\n omega_ci = gyrofrequency(B, particle=ion)\n omega_pi = plasma_frequency(n=n_i, particle=ion)\n omega_ce = gyrofrequency(B, \"e-\")\n omega_lh = lower_hybrid_frequency(B, n_i=n_i, ion=ion)\n omega_lh_hz = lower_hybrid_frequency(B, n_i=n_i, ion=ion, to_hz=True)\n assert omega_ci.unit.is_equivalent(u.rad / u.s)\n assert omega_pi.unit.is_equivalent(u.rad / u.s)\n assert omega_ce.unit.is_equivalent(u.rad / u.s)\n assert omega_lh.unit.is_equivalent(u.rad / u.s)\n left_hand_side = omega_lh ** -2\n right_hand_side = (\n 1 / (omega_ci ** 2 + omega_pi ** 2) + omega_ci ** -1 * omega_ce ** -1\n )\n assert np.isclose(left_hand_side.value, right_hand_side.value)\n\n assert np.isclose(omega_lh_hz.value, 299878691.3223296)\n\n with pytest.raises(ValueError):\n lower_hybrid_frequency(0.2 * u.T, n_i=5e19 * u.m ** -3, ion=\"asdfasd\")\n\n with pytest.raises(ValueError):\n lower_hybrid_frequency(0.2 * u.T, n_i=-5e19 * u.m ** -3, ion=\"asdfasd\")\n\n with pytest.raises(ValueError):\n lower_hybrid_frequency(np.nan * u.T, n_i=-5e19 * u.m ** -3, ion=\"asdfasd\")\n\n with pytest.warns(u.UnitsWarning):\n assert lower_hybrid_frequency(1.3, 1e19, \"p+\") == lower_hybrid_frequency(\n 1.3 * u.T, 1e19 * u.m ** -3, \"p+\"\n )\n assert_can_handle_nparray(lower_hybrid_frequency)\n\n\ndef test_Bohm_diffusion():\n r\"\"\"Test Mag_Reynolds in dimensionless.py\"\"\"\n\n T_e = 5000 * u.K\n B = 10 * u.T\n\n assert (Bohm_diffusion(T_e, B)).unit == u.m ** 2 / u.s\n\n with pytest.warns(u.UnitsWarning):\n Bohm_diffusion(5000, B)\n\n with pytest.raises(u.UnitTypeError):\n Bohm_diffusion(2.2 * u.kg, B)\n\n\[email protected](\n \"alias, parent\",\n [\n (rho_, mass_density),\n (va_, Alfven_speed),\n (cs_, ion_sound_speed),\n (pth_, thermal_pressure),\n (betaH_, Hall_parameter),\n (oc_, gyrofrequency),\n (wc_, gyrofrequency),\n (rc_, gyroradius),\n (rhoc_, gyroradius),\n (wp_, plasma_frequency),\n (lambdaD_, Debye_length),\n (nD_, Debye_number),\n (cwp_, inertial_length),\n (pmag_, magnetic_pressure),\n (ub_, magnetic_energy_density),\n (wuh_, upper_hybrid_frequency),\n (wlh_, lower_hybrid_frequency),\n (DB_, Bohm_diffusion),\n ],\n)\ndef test_parameters_aliases(alias, parent):\n \"\"\"Test all aliases defined in parameters.py\"\"\"\n assert alias is parent\n" ]
[ [ "numpy.isclose", "numpy.abs", "numpy.logical_not", "numpy.all", "numpy.isnan", "numpy.array", "numpy.isscalar" ] ]
seo-95/elvis
[ "a89c759acdf6ce64c7e6863aeb68dc0ba3293fed" ]
[ "elvis/modeling/meta_arch/vl_pretrainer.py" ]
[ "import copy\nimport os\nimport pdb\nimport random\nfrom typing import Dict, List, Text, TypeVar\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom elvis.modeling.models import build_net\nfrom elvis.modeling.models.layers import FC, MLP\nfrom elvis.utils.vlp_objectives import optimal_transport_dist\n\nfrom .base import MetaArch\nfrom .build import ARCH_REGISTRY\n\nTensor = TypeVar('torch.tensor')\n\n\n__all__ = ['AlignmentVLP',\n 'build_align_vlp']\n\n\nclass AlignmentVLP(MetaArch):\n \"\"\"Meta architecture for Visual Language Pretraining (VLP) based on image-caption alignment\n \"\"\"\n def __init__(self, model, max_visual, max_tokens, tasks_dict) -> None:\n super().__init__()\n self.model = model\n self.max_visual = max_visual\n self.max_tokens = max_tokens+2 #take into account [CLS] and [SEP]\n self.tasks_dict = tasks_dict\n\n self.lm_mlp = MLP(in_features=self.model.embed_dim,\n hidden_dim=self.model.embed_dim,\n out_features=len(self.model.tokenizer)-1,\n dropout_p=.1)\n self.itm_fc = FC(in_features=self.model.embed_dim, out_features=2)\n \n def forward(self, vis_in, txt_in, vis_mask, txt_mask, **kwargs) -> Dict:\n cntx_emb = self.model(vis_in=vis_in, vis_mask=vis_mask, txt_in=txt_in, txt_mask=txt_mask)\n txt_emb = cntx_emb[:, :self.max_tokens]\n\n itm_logits = self.itm_fc(txt_emb[:, 0, :]) #pass everything but use only [CLS]: better parallelization of loss computation\n lm_logits = self.lm_mlp(txt_emb[:, 1:, :])\n\n #? exclude special tokens from ot computation\n vis_mask = torch.cat(\n (torch.ones((vis_mask.shape[0], 1), device=vis_mask.device), vis_mask),\n dim=-1) #add attention for [IMG]\n ot_dist = optimal_transport_dist(txt_emb=cntx_emb[:, :self.max_tokens, :].float(),\n img_emb=cntx_emb[:, self.max_tokens:, :].float(),\n txt_pad=~txt_mask.bool(),\n img_pad=~vis_mask.bool()\n )\n\n return {'lm_logits': lm_logits, 'itm_logits': itm_logits, 'ot_dist': ot_dist}\n\n def compute_loss(self, lm_logits, itm_logits, lm_targets, itm_targets, **kwargs) -> Dict:\n B = lm_logits.shape[0]\n n_mlm = sum([t == 'MLM' for t in kwargs['tasks']])\n n_itm = len(kwargs['tasks']) - n_mlm\n loss_dict = {}\n\n #compute lm loss (compute it also if n_mlm > 0 otherwise the DDP will raise an exception)\n lm_loss = F.cross_entropy(lm_logits.transpose(1, 2), lm_targets[:, 1:], reduction='sum')\n if n_mlm > 0:\n lm_loss /= n_mlm\n loss_dict['lm_loss'] = lm_loss\n\n #compute itm loss (compute it also if n_itm > 0 otherwise the DDP will raise an exception)\n itm_loss = F.cross_entropy(itm_logits, itm_targets[:, 0], reduction='sum')\n ot_pos = kwargs['ot_dist'].masked_select(itm_targets[:, 0] == 1)\n ot_neg = kwargs['ot_dist'].masked_select(itm_targets[:, 0] == 0)\n #we want to maximize the OT distance for negative pairs and minimize OT distance for positive ones\n ot_loss = ot_pos.sum() - ot_neg.sum()\n itm_loss = (itm_loss + 0.1 * ot_loss)\n if n_itm > 0:\n itm_loss /= n_itm\n loss_dict['itm_loss'] = itm_loss\n\n loss_dict['loss'] = sum(loss_dict.values())\n return loss_dict\n\n def save_on_disk(self, path):\n state_dict = copy.deepcopy(self).cpu().state_dict()\n ckp_file = os.path.join(path, 'state_dict.pt')\n torch.save(state_dict, ckp_file)\n\n\n\n@ARCH_REGISTRY.register()\ndef build_align_vlp(cfg):\n model, data_interface = build_net(cfg.MODEL, get_interface='vlp')\n vlp = AlignmentVLP(model,\n max_visual=cfg.MODEL.MAX_N_VISUAL,\n max_tokens=cfg.MODEL.MAX_N_TOKENS,\n tasks_dict=cfg.MODEL.TASKS.get_as_dict())\n return vlp, data_interface\n\n" ]
[ [ "torch.ones", "torch.save", "torch.nn.functional.cross_entropy" ] ]
keisuke-umezawa/backlight
[ "db49a966fdb38de693bb8157cec88d98620f9946" ]
[ "tests/portfolio/test_portfolio.py" ]
[ "import pytest\nimport pandas as pd\nimport numpy as np\n\nimport backlight\nfrom backlight.portfolio.portfolio import create_portfolio as module\nfrom backlight.portfolio.portfolio import _fusion_positions\nimport backlight.positions.positions\nfrom backlight.trades.trades import make_trades\nfrom backlight.asset.currency import Currency\n\n\[email protected]\ndef trades():\n trades = []\n index = [\n \"2018-06-06 00:00:00\",\n \"2018-06-06 00:01:00\",\n \"2018-06-06 00:02:00\",\n \"2018-06-06 00:03:00\",\n \"2018-06-06 00:03:00\",\n \"2018-06-06 00:04:00 \",\n \"2018-06-06 00:05:00\",\n \"2018-06-06 00:05:00\",\n \"2018-06-06 00:06:00 \",\n \"2018-06-06 00:06:00 \",\n \"2018-06-06 00:07:00 \",\n \"2018-06-06 00:08:00 \",\n \"2018-06-06 00:09:00 \",\n \"2018-06-06 00:09:00 \",\n ]\n\n trade = pd.Series(\n index=pd.to_datetime(index),\n data=[1, -1, -1, 1, 1, 1, -1, -1, -1, -1, 1, 1, 1, -1],\n name=\"amount\",\n )\n ids = [0, 1, 0, 1, 2, 3, 2, 4, 3, 5, 4, 5, 6, 6]\n currency_unit = Currency.JPY\n\n trades.append(make_trades(\"USDJPY\", [trade], currency_unit, [ids]))\n trades.append(make_trades(\"EURJPY\", [trade], currency_unit, [ids]))\n trades.append(make_trades(\"USDJPY\", [trade], currency_unit, [ids]))\n return trades\n\n\[email protected]\ndef markets():\n markets = []\n symbol = \"USDJPY\"\n currency_unit = Currency.JPY\n quote_currency = Currency.USD\n periods = 13\n df = pd.DataFrame(\n index=pd.date_range(start=\"2018-06-05 23:57:00\", freq=\"1min\", periods=periods),\n data=np.repeat(2, periods)[:, None],\n columns=[\"mid\"],\n )\n markets.append(\n backlight.datasource.from_dataframe(\n df, symbol, currency_unit, quote_currency=quote_currency\n )\n )\n\n symbol = \"EURJPY\"\n currency_unit = Currency.JPY\n quote_currency = Currency.EUR\n df = pd.DataFrame(\n index=pd.date_range(start=\"2018-06-05 23:57:00\", freq=\"1min\", periods=periods),\n data=np.repeat(4, periods)[:, None],\n columns=[\"mid\"],\n )\n markets.append(\n backlight.datasource.from_dataframe(\n df, symbol, currency_unit, quote_currency=quote_currency\n )\n )\n return markets\n\n\[email protected]\ndef principal():\n return {\"USDJPY\": 10, \"EURJPY\": 10}\n\n\[email protected]\ndef lot_size():\n return {\"USDJPY\": 2, \"EURJPY\": 2}\n\n\ndef test_create_portfolio(trades, markets, principal, lot_size):\n portfolio = module(trades, markets, principal, lot_size, Currency.USD)\n\n index = [\n \"2018-06-05 23:59:00\",\n \"2018-06-06 00:00:00\",\n \"2018-06-06 00:01:00\",\n \"2018-06-06 00:02:00\",\n \"2018-06-06 00:03:00\",\n \"2018-06-06 00:04:00 \",\n \"2018-06-06 00:05:00\",\n \"2018-06-06 00:06:00 \",\n \"2018-06-06 00:07:00 \",\n \"2018-06-06 00:08:00 \",\n \"2018-06-06 00:09:00 \",\n ]\n\n data1 = [\n [0.0, 0.0, 5.0],\n [2.0, 2.0, 1.0],\n [0.0, 2.0, 5.0],\n [-2.0, 2.0, 9.0],\n [2.0, 2.0, 1.0],\n [4.0, 2.0, -3.0],\n [0.0, 2.0, 5.0],\n [-4.0, 2.0, 13.0],\n [-2.0, 2.0, 9.0],\n [0.0, 2.0, 5.0],\n [0.0, 2.0, 5.0],\n ]\n\n data2 = [\n [0.0, 0.0, 10.0],\n [4.0, 2.0, 6.0],\n [0.0, 2.0, 10.0],\n [-4.0, 2.0, 14.0],\n [4.0, 2.0, 6.0],\n [8.0, 2.0, 2.0],\n [0.0, 2.0, 10.0],\n [-8.0, 2.0, 18.0],\n [-4.0, 2.0, 14.0],\n [0.0, 2.0, 10.0],\n [0.0, 2.0, 10.0],\n ]\n\n data = [data1, data2]\n\n for (position, d) in zip(portfolio._positions, data):\n\n expected = pd.DataFrame(\n index=pd.to_datetime(index),\n data=d,\n columns=[\"amount\", \"price\", \"principal\"],\n )\n assert ((expected == position).all()).all()\n\n\ndef test_fusion_positions():\n periods = 3\n data = np.arange(periods * 3).reshape((periods, 3))\n columns = [\"amount\", \"price\", \"principal\"]\n currency_unit = Currency.JPY\n\n positions_list = []\n df = pd.DataFrame(\n data=data,\n index=pd.date_range(\"2012-1-1\", periods=periods, freq=\"D\"),\n columns=columns,\n )\n symbol = \"USDJPY\"\n positions_list.append(\n backlight.positions.positions.from_dataframe(df, symbol, currency_unit)\n )\n\n df = pd.DataFrame(\n data=data,\n index=pd.date_range(\"2012-1-2\", periods=periods, freq=\"D\"),\n columns=columns,\n )\n symbol = \"USDJPY\"\n positions_list.append(\n backlight.positions.positions.from_dataframe(df, symbol, currency_unit)\n )\n\n df = pd.DataFrame(\n data=data,\n index=pd.date_range(\"2012-1-4\", periods=periods, freq=\"D\"),\n columns=columns,\n )\n symbol = \"EURJPY\"\n positions_list.append(\n backlight.positions.positions.from_dataframe(df, symbol, currency_unit)\n )\n\n fusioned = _fusion_positions(positions_list)\n\n data1 = np.arange(periods * 3).reshape((periods, 3))\n data2 = [[0, 1, 2], [3, 5, 7], [9, 11, 13], [6, 7, 8]]\n\n df1 = pd.DataFrame(\n data=data1,\n index=pd.date_range(\"2012-1-1\", periods=periods, freq=\"D\"),\n columns=columns,\n )\n df2 = pd.DataFrame(\n data=data2,\n index=pd.date_range(\"2012-1-1\", periods=periods + 1, freq=\"D\"),\n columns=columns,\n )\n\n expected = [df1, df2]\n\n for exp, fus in zip(expected, fusioned):\n assert exp.all().all() == fus.all().all()\n" ]
[ [ "numpy.arange", "pandas.to_datetime", "numpy.repeat", "pandas.date_range" ] ]
twice154/Spatial-Self-modulation-on-BigGAN
[ "6ca691231bf7e8fd388a08b5ce6b4e30a50dd57b" ]
[ "BigGAN-PyTorch/BigGAN_remove_condbn+++++.py" ]
[ "import numpy as np\nimport math\nimport functools\n\nimport torch\nimport torch.nn as nn\nfrom torch.nn import init\nimport torch.optim as optim\nimport torch.nn.functional as F\nfrom torch.nn import Parameter as P\n\nimport layers\nfrom sync_batchnorm import SynchronizedBatchNorm2d as SyncBatchNorm2d\n\n\n# Architectures for G\n# Attention is passed in in the format '32_64' to mean applying an attention\n# block at both resolution 32x32 and 64x64. Just '64' will apply at 64x64.\ndef G_arch(ch=64, attention='64', ksize='333333', dilation='111111'):\n arch = {}\n arch[512] = {'in_channels' : [ch * item for item in [16, 16, 8, 8, 4, 2, 1]],\n 'out_channels' : [ch * item for item in [16, 8, 8, 4, 2, 1, 1]],\n 'upsample' : [True] * 7,\n 'resolution' : [8, 16, 32, 64, 128, 256, 512],\n 'attention' : {2**i: (2**i in [int(item) for item in attention.split('_')])\n for i in range(3,10)}}\n arch[256] = {'in_channels' : [ch * item for item in [16, 16, 8, 8, 4, 2]],\n 'out_channels' : [ch * item for item in [16, 8, 8, 4, 2, 1]],\n 'upsample' : [True] * 6,\n 'resolution' : [8, 16, 32, 64, 128, 256],\n 'attention' : {2**i: (2**i in [int(item) for item in attention.split('_')])\n for i in range(3,9)}}\n arch[128] = {'in_channels' : [ch * item for item in [16, 16, 8, 4, 2]],\n 'out_channels' : [ch * item for item in [16, 8, 4, 2, 1]],\n 'upsample' : [True] * 5,\n 'resolution' : [8, 16, 32, 64, 128],\n 'attention' : {2**i: (2**i in [int(item) for item in attention.split('_')])\n for i in range(3,8)}}\n arch[64] = {'in_channels' : [ch * item for item in [16, 16, 8, 4]],\n 'out_channels' : [ch * item for item in [16, 8, 4, 2]],\n 'upsample' : [True] * 4,\n 'resolution' : [8, 16, 32, 64],\n 'attention' : {2**i: (2**i in [int(item) for item in attention.split('_')])\n for i in range(3,7)}}\n arch[32] = {'in_channels' : [ch * item for item in [4, 4, 4]],\n 'out_channels' : [ch * item for item in [4, 4, 4]],\n 'upsample' : [True] * 3,\n 'resolution' : [8, 16, 32],\n 'attention' : {2**i: (2**i in [int(item) for item in attention.split('_')])\n for i in range(3,6)}}\n\n return arch\n\nclass Generator(nn.Module):\n def __init__(self, G_ch=64, dim_z=128, bottom_width=4, resolution=128,\n G_kernel_size=3, G_attn='64', n_classes=1000,\n num_G_SVs=1, num_G_SV_itrs=1,\n G_shared=True, shared_dim=0, hier=False,\n cross_replica=False, mybn=False,\n G_activation=nn.ReLU(inplace=False),\n G_lr=5e-5, G_B1=0.0, G_B2=0.999, adam_eps=1e-8,\n BN_eps=1e-5, SN_eps=1e-12, G_mixed_precision=False, G_fp16=False,\n G_init='ortho', skip_init=False, no_optim=False,\n G_param='SN', norm_style='bn',\n **kwargs):\n super(Generator, self).__init__()\n # Channel width mulitplier\n self.ch = G_ch\n # Dimensionality of the latent space\n self.dim_z = dim_z\n # The initial spatial dimensions\n self.bottom_width = bottom_width\n # Resolution of the output\n self.resolution = resolution\n # Kernel size?\n self.kernel_size = G_kernel_size\n # Attention?\n self.attention = G_attn\n # number of classes, for use in categorical conditional generation\n self.n_classes = n_classes\n # Use shared embeddings?\n self.G_shared = G_shared\n # Dimensionality of the shared embedding? Unused if not using G_shared\n self.shared_dim = shared_dim if shared_dim > 0 else dim_z\n # Hierarchical latent space?\n self.hier = hier\n # Cross replica batchnorm?\n self.cross_replica = cross_replica\n # Use my batchnorm?\n self.mybn = mybn\n # nonlinearity for residual blocks\n self.activation = G_activation\n # Initialization style\n self.init = G_init\n # Parameterization style\n self.G_param = G_param\n # Normalization style\n self.norm_style = norm_style\n # Epsilon for BatchNorm?\n self.BN_eps = BN_eps\n # Epsilon for Spectral Norm?\n self.SN_eps = SN_eps\n # fp16?\n self.fp16 = G_fp16\n # Architecture dict\n self.arch = G_arch(self.ch, self.attention)[resolution]\n\n # If using hierarchical latents, adjust z\n if self.hier:\n # Number of places z slots into\n self.num_slots = len(self.arch['in_channels']) + 1\n self.z_chunk_size = (self.dim_z // self.num_slots)\n # Recalculate latent dimensionality for even splitting into chunks\n self.dim_z = self.z_chunk_size * self.num_slots\n else:\n self.num_slots = 1\n self.z_chunk_size = 0\n\n # Which convs, batchnorms, and linear layers to use\n if self.G_param == 'SN':\n self.which_conv = functools.partial(layers.SNConv2d,\n kernel_size=3, padding=1,\n num_svs=num_G_SVs, num_itrs=num_G_SV_itrs,\n eps=self.SN_eps)\n self.which_linear = functools.partial(layers.SNLinear,\n num_svs=num_G_SVs, num_itrs=num_G_SV_itrs,\n eps=self.SN_eps)\n else:\n self.which_conv = functools.partial(nn.Conv2d, kernel_size=3, padding=1)\n self.which_linear = nn.Linear\n \n # We use a non-spectral-normed embedding here regardless;\n # For some reason applying SN to G's embedding seems to randomly cripple G\n self.which_embedding = nn.Embedding\n # bn_linear = (functools.partial(self.which_linear, bias=False) if self.G_shared\n # else self.which_embedding)\n self.which_bn = functools.partial(layers.bn,\n # which_linear=bn_linear,\n cross_replica=self.cross_replica,\n mybn=self.mybn,\n # input_size=(self.shared_dim + self.z_chunk_size if self.G_shared\n # else self.n_classes),\n # norm_style=self.norm_style,\n eps=self.BN_eps)\n\n\n # Prepare model\n # If not using shared embeddings, self.shared is just a passthrough\n self.shared = (self.which_embedding(n_classes, self.shared_dim) if G_shared \n else layers.identity())\n # First linear layer\n self.linear = self.which_linear(self.dim_z // self.num_slots,\n self.arch['in_channels'][0] * (self.bottom_width **2))\n\n # self.blocks is a doubly-nested list of modules, the outer loop intended\n # to be over blocks at a given resolution (resblocks and/or self-attention)\n # while the inner loop is over a given block\n self.blocks = []\n for index in range(len(self.arch['out_channels'])):\n self.blocks += [[layers.GBlock(in_channels=self.arch['in_channels'][index],\n out_channels=self.arch['out_channels'][index],\n which_conv=self.which_conv,\n which_bn=self.which_bn,\n activation=self.activation,\n upsample=(functools.partial(F.interpolate, scale_factor=2)\n if self.arch['upsample'][index] else None))]]\n\n # If attention on this block, attach it to the end\n if self.arch['attention'][self.arch['resolution'][index]]:\n print('Adding attention layer in G at resolution %d' % self.arch['resolution'][index])\n self.blocks[-1] += [layers.Attention(self.arch['out_channels'][index], self.which_conv)]\n\n # Turn self.blocks into a ModuleList so that it's all properly registered.\n self.blocks = nn.ModuleList([nn.ModuleList(block) for block in self.blocks])\n\n # output layer: batchnorm-relu-conv.\n # Consider using a non-spectral conv here\n self.output_layer = nn.Sequential(layers.bn(self.arch['out_channels'][-1],\n cross_replica=self.cross_replica,\n mybn=self.mybn),\n self.activation,\n self.which_conv(self.arch['out_channels'][-1], 3))\n\n\n # Prepare spatial modulation model\n # If not using shared embeddings, self.shared is just a passthrough\n self.spatial_modulation_shared = (self.which_embedding(n_classes, self.shared_dim))\n # First linear layer\n self.spatial_modulation_linear = self.which_linear(self.dim_z + self.shared_dim,\n self.arch['in_channels'][0] * (self.bottom_width **2))\n\n # self.blocks is a doubly-nested list of modules, the outer loop intended\n # to be over blocks at a given resolution (resblocks and/or self-attention)\n # while the inner loop is over a given block\n self.spatial_modulation_blocks = []\n for index in range(len(self.arch['out_channels'])):\n self.spatial_modulation_blocks += [[layers.SpatialModulationGBlock(in_channels=self.arch['in_channels'][index],\n out_channels=self.arch['out_channels'][index],\n which_conv=self.which_conv,\n which_bn=self.which_bn,\n activation=self.activation,\n upsample=(functools.partial(F.interpolate, scale_factor=2)\n if self.arch['upsample'][index] else None))]]\n\n # Turn self.blocks into a ModuleList so that it's all properly registered.\n self.spatial_modulation_blocks = nn.ModuleList([nn.ModuleList(block) for block in self.spatial_modulation_blocks])\n\n\n # Initialize weights. Optionally skip init for testing.\n if not skip_init:\n self.init_weights()\n\n # Set up optimizer\n # If this is an EMA copy, no need for an optim, so just return now\n if no_optim:\n return\n self.lr, self.B1, self.B2, self.adam_eps = G_lr, G_B1, G_B2, adam_eps\n if G_mixed_precision:\n print('Using fp16 adam in G...')\n import utils\n self.optim = utils.Adam16(params=self.parameters(), lr=self.lr,\n betas=(self.B1, self.B2), weight_decay=0,\n eps=self.adam_eps)\n else:\n self.optim = optim.Adam(params=self.parameters(), lr=self.lr,\n betas=(self.B1, self.B2), weight_decay=0,\n eps=self.adam_eps)\n\n # LR scheduling, left here for forward compatibility\n # self.lr_sched = {'itr' : 0}# if self.progressive else {}\n # self.j = 0\n\n # Initialize\n def init_weights(self):\n self.param_count = 0\n for module in self.modules():\n if (isinstance(module, nn.Conv2d) \n or isinstance(module, nn.Linear) \n or isinstance(module, nn.Embedding)):\n if self.init == 'ortho':\n init.orthogonal_(module.weight)\n elif self.init == 'N02':\n init.normal_(module.weight, 0, 0.02)\n elif self.init in ['glorot', 'xavier']:\n init.xavier_uniform_(module.weight)\n else:\n print('Init style not recognized...')\n self.param_count += sum([p.data.nelement() for p in module.parameters()])\n print('Param count for G''s initialized parameters: %d' % self.param_count)\n\n # Note on this forward function: we pass in a y vector which has\n # already been passed through G.shared to enable easy class-wise\n # interpolation later. If we passed in the one-hot and then ran it through\n # G.shared in this forward function, it would be harder to handle.\n def forward(self, z, y):\n # If hierarchical, concatenate zs and ys\n if self.hier:\n zs = torch.split(z, self.z_chunk_size, 1)\n z = zs[0]\n ys = [torch.cat([y, item], 1) for item in zs[1:]]\n\n # Class embedding layer\n # spatial_c = self.spatial_modulation_shared(y)\n # Mixing layer\n spatial_h = self.spatial_modulation_linear(torch.cat([y, z], 1))\n # Reshape\n spatial_h = spatial_h.view(spatial_h.size(0), -1, self.bottom_width, self.bottom_width)\n else:\n ys = [y] * len(self.blocks)\n\n # Class embedding layer\n spatial_c = self.spatial_modulation_shared(y)\n # Mixing layer\n if len(spatial_c.shape) == 3:\n spatial_c = torch.squeeze(spatial_c, dim=1)\n spatial_h = self.spatial_modulation_linear(torch.cat([spatial_c, z], 1))\n # Reshape\n spatial_h = spatial_h.view(spatial_h.size(0), -1, self.bottom_width, self.bottom_width)\n \n # First linear layer\n h = self.linear(z)\n # Reshape\n h = h.view(h.size(0), -1, self.bottom_width, self.bottom_width)\n \n # Loop over blocks\n for index, blocklist in enumerate(self.blocks):\n # Spatial modulation calculation\n spatial_h, voxelwise_a_mod, voxelwise_b_mod = self.spatial_modulation_blocks[index][0](spatial_h)\n # Second inner loop in case block has multiple layers\n for block in blocklist:\n # Main layer forward\n h = block(h, ys[index])\n # Most coarse modulation\n # h = (h - torch.mean(h, dim=(2, 3), keepdim=True)) / torch.std(h, dim=(2, 3), keepdim=True)\n # h = h * (1 + global_a_mod.repeat(1, 1, h.shape[2], h.shape[3])) + global_b_mod.repeat(1, 1, h.shape[2], h.shape[3])\n # Most fine modulation\n h = (h - torch.mean(h, dim=(1, 2, 3), keepdim=True)) / torch.std(h, dim=(1, 2, 3), keepdim=True)\n h = h * (1 + voxelwise_a_mod) + voxelwise_b_mod\n \n # Apply batchnorm-relu-conv-tanh at output\n return torch.tanh(self.output_layer(h))\n\n\n# Discriminator architecture, same paradigm as G's above\ndef D_arch(ch=64, attention='64',ksize='333333', dilation='111111'):\n arch = {}\n arch[256] = {'in_channels' : [3] + [ch*item for item in [1, 2, 4, 8, 8, 16]],\n 'out_channels' : [item * ch for item in [1, 2, 4, 8, 8, 16, 16]],\n 'downsample' : [True] * 6 + [False],\n 'resolution' : [128, 64, 32, 16, 8, 4, 4 ],\n 'attention' : {2**i: 2**i in [int(item) for item in attention.split('_')]\n for i in range(2,8)}}\n arch[128] = {'in_channels' : [3] + [ch*item for item in [1, 2, 4, 8, 16]],\n 'out_channels' : [item * ch for item in [1, 2, 4, 8, 16, 16]],\n 'downsample' : [True] * 5 + [False],\n 'resolution' : [64, 32, 16, 8, 4, 4],\n 'attention' : {2**i: 2**i in [int(item) for item in attention.split('_')]\n for i in range(2,8)}}\n arch[64] = {'in_channels' : [3] + [ch*item for item in [1, 2, 4, 8]],\n 'out_channels' : [item * ch for item in [1, 2, 4, 8, 16]],\n 'downsample' : [True] * 4 + [False],\n 'resolution' : [32, 16, 8, 4, 4],\n 'attention' : {2**i: 2**i in [int(item) for item in attention.split('_')]\n for i in range(2,7)}}\n arch[32] = {'in_channels' : [3] + [item * ch for item in [4, 4, 4]],\n 'out_channels' : [item * ch for item in [4, 4, 4, 4]],\n 'downsample' : [True, True, False, False],\n 'resolution' : [16, 16, 16, 16],\n 'attention' : {2**i: 2**i in [int(item) for item in attention.split('_')]\n for i in range(2,6)}}\n return arch\n\nclass Discriminator(nn.Module):\n\n def __init__(self, D_ch=64, D_wide=True, resolution=128,\n D_kernel_size=3, D_attn='64', n_classes=1000,\n num_D_SVs=1, num_D_SV_itrs=1, D_activation=nn.ReLU(inplace=False),\n D_lr=2e-4, D_B1=0.0, D_B2=0.999, adam_eps=1e-8,\n SN_eps=1e-12, output_dim=1, D_mixed_precision=False, D_fp16=False,\n D_init='ortho', skip_init=False, D_param='SN', **kwargs):\n super(Discriminator, self).__init__()\n # Width multiplier\n self.ch = D_ch\n # Use Wide D as in BigGAN and SA-GAN or skinny D as in SN-GAN?\n self.D_wide = D_wide\n # Resolution\n self.resolution = resolution\n # Kernel size\n self.kernel_size = D_kernel_size\n # Attention?\n self.attention = D_attn\n # Number of classes\n self.n_classes = n_classes\n # Activation\n self.activation = D_activation\n # Initialization style\n self.init = D_init\n # Parameterization style\n self.D_param = D_param\n # Epsilon for Spectral Norm?\n self.SN_eps = SN_eps\n # Fp16?\n self.fp16 = D_fp16\n # Architecture\n self.arch = D_arch(self.ch, self.attention)[resolution]\n\n # Which convs, batchnorms, and linear layers to use\n # No option to turn off SN in D right now\n if self.D_param == 'SN':\n self.which_conv = functools.partial(layers.SNConv2d,\n kernel_size=3, padding=1,\n num_svs=num_D_SVs, num_itrs=num_D_SV_itrs,\n eps=self.SN_eps)\n self.which_linear = functools.partial(layers.SNLinear,\n num_svs=num_D_SVs, num_itrs=num_D_SV_itrs,\n eps=self.SN_eps)\n self.which_embedding = functools.partial(layers.SNEmbedding,\n num_svs=num_D_SVs, num_itrs=num_D_SV_itrs,\n eps=self.SN_eps)\n # Prepare model\n # self.blocks is a doubly-nested list of modules, the outer loop intended\n # to be over blocks at a given resolution (resblocks and/or self-attention)\n self.blocks = []\n for index in range(len(self.arch['out_channels'])):\n self.blocks += [[layers.DBlock(in_channels=self.arch['in_channels'][index],\n out_channels=self.arch['out_channels'][index],\n which_conv=self.which_conv,\n wide=self.D_wide,\n activation=self.activation,\n preactivation=(index > 0),\n downsample=(nn.AvgPool2d(2) if self.arch['downsample'][index] else None))]]\n # If attention on this block, attach it to the end\n if self.arch['attention'][self.arch['resolution'][index]]:\n print('Adding attention layer in D at resolution %d' % self.arch['resolution'][index])\n self.blocks[-1] += [layers.Attention(self.arch['out_channels'][index],\n self.which_conv)]\n # Turn self.blocks into a ModuleList so that it's all properly registered.\n self.blocks = nn.ModuleList([nn.ModuleList(block) for block in self.blocks])\n # Linear output layer. The output dimension is typically 1, but may be\n # larger if we're e.g. turning this into a VAE with an inference output\n self.linear = self.which_linear(self.arch['out_channels'][-1], output_dim)\n # Embedding for projection discrimination\n self.embed = self.which_embedding(self.n_classes, self.arch['out_channels'][-1])\n\n # Initialize weights\n if not skip_init:\n self.init_weights()\n\n # Set up optimizer\n self.lr, self.B1, self.B2, self.adam_eps = D_lr, D_B1, D_B2, adam_eps\n if D_mixed_precision:\n print('Using fp16 adam in D...')\n import utils\n self.optim = utils.Adam16(params=self.parameters(), lr=self.lr,\n betas=(self.B1, self.B2), weight_decay=0, eps=self.adam_eps)\n else:\n self.optim = optim.Adam(params=self.parameters(), lr=self.lr,\n betas=(self.B1, self.B2), weight_decay=0, eps=self.adam_eps)\n # LR scheduling, left here for forward compatibility\n # self.lr_sched = {'itr' : 0}# if self.progressive else {}\n # self.j = 0\n\n # Initialize\n def init_weights(self):\n self.param_count = 0\n for module in self.modules():\n if (isinstance(module, nn.Conv2d)\n or isinstance(module, nn.Linear)\n or isinstance(module, nn.Embedding)):\n if self.init == 'ortho':\n init.orthogonal_(module.weight)\n elif self.init == 'N02':\n init.normal_(module.weight, 0, 0.02)\n elif self.init in ['glorot', 'xavier']:\n init.xavier_uniform_(module.weight)\n else:\n print('Init style not recognized...')\n self.param_count += sum([p.data.nelement() for p in module.parameters()])\n print('Param count for D''s initialized parameters: %d' % self.param_count)\n\n def forward(self, x, y=None):\n # Stick x into h for cleaner for loops without flow control\n h = x\n # Loop over blocks\n for index, blocklist in enumerate(self.blocks):\n for block in blocklist:\n h = block(h)\n # Apply global sum pooling as in SN-GAN\n h = torch.sum(self.activation(h), [2, 3])\n # Get initial class-unconditional output\n out = self.linear(h)\n # Get projection of final featureset onto class vectors and add to evidence\n out = out + torch.sum(self.embed(y) * h, 1, keepdim=True)\n return out\n\n# Parallelized G_D to minimize cross-gpu communication\n# Without this, Generator outputs would get all-gathered and then rebroadcast.\nclass G_D(nn.Module):\n def __init__(self, G, D):\n super(G_D, self).__init__()\n self.G = G\n self.D = D\n\n def forward(self, z, gy, x=None, dy=None, train_G=False, return_G_z=False,\n split_D=False): \n # If training G, enable grad tape\n with torch.set_grad_enabled(train_G):\n # Get Generator output given noise\n G_z = self.G(z, self.G.shared(gy))\n # Cast as necessary\n if self.G.fp16 and not self.D.fp16:\n G_z = G_z.float()\n if self.D.fp16 and not self.G.fp16:\n G_z = G_z.half()\n # Split_D means to run D once with real data and once with fake,\n # rather than concatenating along the batch dimension.\n if split_D:\n D_fake = self.D(G_z, gy)\n if x is not None:\n D_real = self.D(x, dy)\n return D_fake, D_real\n else:\n if return_G_z:\n return D_fake, G_z\n else:\n return D_fake\n # If real data is provided, concatenate it with the Generator's output\n # along the batch dimension for improved efficiency.\n else:\n D_input = torch.cat([G_z, x], 0) if x is not None else G_z\n D_class = torch.cat([gy, dy], 0) if dy is not None else gy\n # Get Discriminator output\n D_out = self.D(D_input, D_class)\n if x is not None:\n return torch.split(D_out, [G_z.shape[0], x.shape[0]]) # D_fake, D_real\n else:\n if return_G_z:\n return D_out, G_z\n else:\n return D_out\n" ]
[ [ "torch.nn.init.xavier_uniform_", "torch.split", "torch.set_grad_enabled", "torch.std", "torch.nn.init.normal_", "torch.nn.ReLU", "torch.nn.ModuleList", "torch.nn.AvgPool2d", "torch.mean", "torch.cat", "torch.nn.init.orthogonal_", "torch.squeeze" ] ]
Ericonaldo/ILSwiss
[ "efd25d457fd1578005c6fbc45cae29e9ab64a99d" ]
[ "rlkit/core/eval_util.py" ]
[ "\"\"\"\nCommon evaluation utilities.\n\"\"\"\n\nfrom collections import OrderedDict\nfrom numbers import Number\nimport os\nimport json\n\nimport numpy as np\n\nfrom rlkit.core.vistools import plot_returns_on_same_plot, save_plot\n\n\ndef get_generic_path_information(paths, stat_prefix=\"\"):\n \"\"\"\n Get an OrderedDict with a bunch of statistic names and values.\n \"\"\"\n statistics = OrderedDict()\n returns = [sum(path[\"rewards\"]) for path in paths]\n # rewards = np.vstack([path[\"rewards\"] for path in paths])\n rewards = np.concatenate([path[\"rewards\"] for path in paths])\n statistics.update(\n create_stats_ordered_dict(\n \"Rewards\", rewards, stat_prefix=stat_prefix, always_show_all_stats=True\n )\n )\n statistics.update(\n create_stats_ordered_dict(\n \"Returns\", returns, stat_prefix=stat_prefix, always_show_all_stats=True\n )\n )\n # print(paths[0][\"env_infos\"])\n if \"is_success\" in paths[0][\"env_infos\"][0].keys():\n acc_sum = [(np.sum([x['is_success'] for x in path[\"env_infos\"]])>0).astype(float) for path in paths]\n acc = np.sum(acc_sum) * 1.0 / len(paths)\n statistics.update(\n create_stats_ordered_dict(\n \"Success Num\", np.sum(acc_sum), stat_prefix=stat_prefix, always_show_all_stats=True\n )\n )\n statistics.update(\n create_stats_ordered_dict(\n \"Traj Num\", len(paths), stat_prefix=stat_prefix, always_show_all_stats=True\n )\n )\n statistics.update(\n create_stats_ordered_dict(\n \"Success Rate\", acc, stat_prefix=stat_prefix, always_show_all_stats=True\n )\n )\n actions = [path[\"actions\"] for path in paths]\n # if isinstance(actions[0][0], np.ndarray):\n # actions = np.vstack([path[\"actions\"] for path in paths])\n # else:\n # actions = np.hstack([path[\"actions\"] for path in paths])\n statistics.update(\n create_stats_ordered_dict(\n \"Actions\", actions, stat_prefix=stat_prefix, always_show_all_stats=True\n )\n )\n statistics.update(\n create_stats_ordered_dict(\n \"Ep. Len.\",\n np.array([len(path[\"terminals\"]) for path in paths]),\n stat_prefix=stat_prefix,\n always_show_all_stats=True,\n )\n )\n statistics[\"Num Paths\"] = len(paths)\n\n return statistics\n\n\ndef get_average_returns(paths, std=False):\n returns = [sum(path[\"rewards\"]) for path in paths]\n if std:\n return np.mean(returns), np.std(returns)\n\n return np.mean(returns)\n\n\ndef create_stats_ordered_dict(\n name,\n data,\n stat_prefix=None,\n always_show_all_stats=False,\n exclude_max_min=False,\n):\n # print('\\n<<<< STAT FOR {} {} >>>>'.format(stat_prefix, name))\n if stat_prefix is not None:\n name = \"{} {}\".format(stat_prefix, name)\n if isinstance(data, Number):\n # print('was a Number')\n return OrderedDict({name: data})\n\n if len(data) == 0:\n return OrderedDict()\n\n if isinstance(data, tuple):\n # print('was a tuple')\n ordered_dict = OrderedDict()\n for number, d in enumerate(data):\n sub_dict = create_stats_ordered_dict(\n \"{0}_{1}\".format(name, number),\n d,\n )\n ordered_dict.update(sub_dict)\n return ordered_dict\n\n if isinstance(data, list):\n # print('was a list')\n try:\n iter(data[0])\n except TypeError:\n pass\n else:\n data = np.concatenate(data)\n\n if isinstance(data, np.ndarray) and data.size == 1 and not always_show_all_stats:\n # print('was a numpy array of data.size==1')\n return OrderedDict({name: float(data)})\n\n # print('was a numpy array NOT of data.size==1')\n stats = OrderedDict(\n [\n (name + \" Mean\", np.mean(data)),\n (name + \" Std\", np.std(data)),\n ]\n )\n if not exclude_max_min:\n stats[name + \" Max\"] = np.max(data)\n stats[name + \" Min\"] = np.min(data)\n return stats\n\n\n# I (Kamyar) will be adding my own eval utils here too\ndef plot_experiment_returns(\n exp_path,\n title,\n save_path,\n column_name=\"Test_Returns_Mean\",\n x_axis_lims=None,\n y_axis_lims=None,\n constraints=None,\n plot_mean=False,\n plot_horizontal_lines_at=None,\n horizontal_lines_names=None,\n):\n \"\"\"\n plots the Test Returns Mean of all the\n \"\"\"\n arr_list = []\n names = []\n\n dir_path = os.path.split(save_path)[0]\n os.makedirs(dir_path, exist_ok=True)\n\n # print(exp_path)\n\n for sub_exp_dir in os.listdir(exp_path):\n try:\n sub_exp_path = os.path.join(exp_path, sub_exp_dir)\n if not os.path.isdir(sub_exp_path):\n continue\n if constraints is not None:\n constraints_satisfied = True\n with open(os.path.join(sub_exp_path, \"variant.json\"), \"r\") as j:\n d = json.load(j)\n for k, v in constraints.items():\n k = k.split(\".\")\n d_v = d[k[0]]\n for sub_k in k[1:]:\n d_v = d_v[sub_k]\n if d_v != v:\n constraints_satisfied = False\n break\n if not constraints_satisfied:\n # for debugging\n # print('\\nconstraints')\n # print(constraints)\n # print('\\nthis dict')\n # print(d)\n continue\n\n csv_full_path = os.path.join(sub_exp_path, \"progress.csv\")\n # print(csv_full_path)\n try:\n progress_csv = np.genfromtxt(\n csv_full_path, skip_header=0, delimiter=\",\", names=True\n )\n # print(progress_csv.dtype)\n if isinstance(column_name, str):\n column_name = [column_name]\n for c_name in column_name:\n if \"+\" in c_name:\n first, second = c_name.split(\"+\")\n returns = progress_csv[first] + progress_csv[second]\n elif \"-\" in c_name:\n first, second = c_name.split(\"-\")\n returns = progress_csv[first] - progress_csv[second]\n else:\n returns = progress_csv[c_name]\n arr_list.append(returns)\n names.append(c_name + \"_\" + sub_exp_dir)\n # print(csv_full_path)\n except:\n pass\n except:\n pass\n\n if plot_mean:\n min_len = min(map(lambda a: a.shape[0], arr_list))\n arr_list = list(map(lambda a: a[:min_len], arr_list))\n returns = np.stack(arr_list)\n mean = np.mean(returns, 0)\n std = np.std(returns, 0)\n x = np.arange(min_len)\n # save_plot(x, mean, title, save_path, color='cyan', x_axis_lims=x_axis_lims, y_axis_lims=y_axis_lims)\n plot_returns_on_same_plot(\n [mean, mean + std, mean - std],\n [\"mean\", \"mean+std\", \"mean-std\"],\n title,\n save_path,\n x_axis_lims=x_axis_lims,\n y_axis_lims=y_axis_lims,\n )\n else:\n if len(arr_list) == 0:\n print(0)\n if plot_horizontal_lines_at is not None:\n max_len = max(map(lambda a: a.shape[0], arr_list))\n arr_list += [np.ones(max_len) * y_val for y_val in plot_horizontal_lines_at]\n names += horizontal_lines_names\n try:\n # print(len(arr_list))\n plot_returns_on_same_plot(\n arr_list,\n names,\n title,\n save_path,\n x_axis_lims=x_axis_lims,\n y_axis_lims=y_axis_lims,\n )\n except Exception as e:\n print(\"Failed to plot:\")\n print(arr_list)\n print(title)\n print(exp_path)\n print(constraints)\n # raise e\n" ]
[ [ "numpy.sum", "numpy.ones", "numpy.genfromtxt", "numpy.arange", "numpy.max", "numpy.min", "numpy.stack", "numpy.std", "numpy.concatenate", "numpy.mean" ] ]
shelleyHLX/ai-server
[ "12c4a654a686462b8b725fa0641cc967d2f80e14" ]
[ "model/nlp/topic.py" ]
[ "# -*- coding: utf-8 -*-\n# Author: XuMing <[email protected]>\n# Brief: \nimport operator\nimport os\n\nimport tensorflow as tf\nfrom keras.models import load_model\n\nfrom model.nlp.keras_data_reader import load_dict\nfrom model.nlp.keras_data_reader import pad_sequence\nfrom model.nlp.keras_data_reader import vectorize_words\nfrom utils.io_util import get_logger\n\nlogger = get_logger(__file__)\n\nlabel_revserv_dict = {0: '人类作者',\n 1: '机器作者',\n 2: '机器翻译',\n 3: '自动摘要'}\n\n\nclass Topic(object):\n topic_model = None\n\n def __init__(self, model_path, word_dict_path, maxlen=400):\n self.name = 'topic'\n self.maxlen = maxlen\n # load dict\n pwd_path = os.path.abspath(os.path.dirname(__file__))\n if word_dict_path:\n try:\n self.word_ids_dict = load_dict(word_dict_path)\n except IOError:\n word_dict_path = os.path.join(pwd_path, '../..', word_dict_path)\n self.word_ids_dict = load_dict(word_dict_path)\n\n # load parrots_model by file\n if model_path:\n try:\n self.topic_model = load_model(model_path)\n except IOError:\n model_path = os.path.join(pwd_path, '../..', model_path)\n self.topic_model = load_model(model_path)\n logger.info(\"Load topic model ok, path: \" + model_path)\n # self.topic_model._make_predict_function() # have to initialize before threading\n self.graph = tf.get_default_graph()\n else:\n logger.warn('topic model file is need')\n raise Exception('topic model file need')\n\n @classmethod\n def get_instance(cls, model_path, word_dict_path, maxlen=400):\n if cls.topic_model:\n return cls.topic_model\n else:\n obj = cls(model_path, word_dict_path, maxlen=maxlen)\n cls.topic_model = obj\n return obj\n\n def get_topic(self, text):\n # read data to index\n test_text_words = [list(text)]\n word_ids = vectorize_words(test_text_words, self.word_ids_dict)\n # pad sequence\n word_seq = pad_sequence(word_ids, self.maxlen)\n\n with self.graph.as_default():\n # predict prob\n predict_probs = self.topic_model.predict(word_seq)\n # get prob for one line test text\n probs = predict_probs[0]\n probs_dict = dict((idx, prob) for idx, prob in enumerate(probs))\n probs_order_dict = sorted(probs_dict.items(), key=operator.itemgetter(1), reverse=True)\n return probs_order_dict\n\n def check(self, text):\n \"\"\"\n Args:\n text: 欧洲冠军联赛是欧洲足球协会联盟主办的年度足球比赛\n Returns:\n {\n \"log_id\": 3591049593939822907,\n \"items\": {\n \"lv2_tag_list\": [\n {\n \"score\": 0.877436,\n \"tag\": \"足球\"\n },\n {\n \"score\": 0.793682,\n \"tag\": \"国际足球\"\n },\n {\n \"score\": 0.775911,\n \"tag\": \"英超\"\n }\n ],\n \"lv1_tag_list\": [\n {\n \"score\": 0.824329,\n \"tag\": \"体育\"\n }\n ]\n }\n }\n \"\"\"\n result_dict = {\"text\": text}\n topics = self.get_topic(text)\n items_list = []\n for idx, prob in topics:\n # get top 3\n if len(items_list) > 2:\n continue\n items = dict()\n items[\"score\"] = prob\n items[\"tag\"] = label_revserv_dict[idx]\n items_list.append(items)\n result_dict['items'] = items_list\n return result_dict\n" ]
[ [ "tensorflow.get_default_graph" ] ]
skohtz1/web-scrapingHW
[ "11cf4686286a4fa51ef23a9e0afc5adca21f40c1" ]
[ "scrape_mars.py" ]
[ "from bs4 import BeautifulSoup\nimport requests\nfrom splinter import Browser\nimport pandas as pd\nimport time\n\ndef init_browser():\n # @NOTE: Replace the path with your actual path to the chromedriver\n executable_path = {\"executable_path\": \"./chromedriver\"}\n return Browser(\"chrome\", **executable_path, headless=False)\n\n\ndef scrape():\n browser = init_browser()\n url_nasa = \"https://mars.nasa.gov/news/?page=0&per_page=40&order=publish_date+desc%2Ccreated_at+desc&search=&category=19%2C165%2C184%2C204&blank_scope=Latest\"\n # Retrieve page with the requests module\n response_nasa = requests.get(url_nasa)\n # Create BeautifulSoup object; parse with 'html.parser'\n soup_nasa = BeautifulSoup(response_nasa.text, 'html.parser')\n \n ##finding the title and summary of first article\n results_titles = soup_nasa.find_all('div', class_='content_title')\n summaries = soup_nasa.find_all(\"div\", class_ = \"rollover_description_inner\")\n title_first = results_titles[0].text.strip()\n summaries_first = summaries[0].text.strip()\n \n ##finding feature image url\n url_mars_img = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars'\n browser.visit(url_mars_img)\n browser.click_link_by_partial_text('FULL IMAGE')\n time.sleep(5)\n browser.click_link_by_partial_text('more info')\n time.sleep(5)\n browser.click_link_by_partial_href('spaceimages/images')\n feature_image_url = browser.url\n time.sleep(5)\n \n ##getting the twitter weather\n url_twitter = \"https://twitter.com/marswxreport?lang=en\"\n # Retrieve page with the requests module\n response_twitter = requests.get(url_twitter)\n # Create BeautifulSoup object; parse with 'html.parser'\n soup3 = BeautifulSoup(response_twitter.text, 'html.parser')\n mars_weather = soup3.find_all(\"p\",class_ = \"TweetTextSize TweetTextSize--normal js-tweet-text tweet-text\")[0].text\n \n ##scraping Mars facts\n url_facts = \"https://space-facts.com/mars/\"\n tables = pd.read_html(url_facts)\n df = tables[0]\n df.columns = [\"Parameter\", \"Values\"]\n mars_data_df = df.set_index([\"Parameter\"])\n mars_data_df.to_html(\"mars_facts.html\")\n mars_data_html = mars_data_df.to_html()\n mars_data_html = mars_data_html.replace(\"\\n\", \"\")\n \n \n ##hemisphere\n url_hemis = \"https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars\"\n browser.visit(url_hemis)\n time.sleep(5)\n html4 = browser.html\n soup4 = BeautifulSoup(html4, 'html.parser')\n \n links = []\n\n for link in soup4.find_all('a'):\n finds = link.get(\"href\")\n if (\"/search/map/Mars\" in finds):\n links.append(finds)\n \n links = list(set(links))\n \n hemisphere_image_urls = []\n \n for i in range(len(links)):\n dicts1 = {}\n dicts1[\"title\"] = soup4.find_all(\"h3\")[i].text\n browser.click_link_by_partial_text(soup4.find_all(\"h3\")[i].text)\n time.sleep(5)\n n_html = browser.html\n soup5 = BeautifulSoup(n_html, \"html.parser\")\n for link in soup5.find_all(\"a\"):\n finds = link.get(\"href\")\n if (\"/full.jpg\" in finds):\n dicts1[\"img_url\"] = finds\n \n hemisphere_image_urls.append(dicts1)\n browser.back()\n \n \n print(hemisphere_image_urls)\n \n \n mars_data_dict = {\"weather\":mars_weather,\"mars_facts\":mars_data_html,\"hemisphere\":hemisphere_image_urls,\"feature_image\": feature_image_url,\"title_feature\":title_first,\"summary_feature\":summaries_first}\n \n return mars_data_dict\n \n \n\n \n \n \n \n\n" ]
[ [ "pandas.read_html" ] ]
Zosit/Useful-Reusable-Code
[ "e5eab12f1ebcc6f16e456a7515ff8cc068b5ab16" ]
[ "Class Projects/CS545(MachineLearning)/qLearning/qlearn.py" ]
[ "print(__doc__)\n\nimport matplotlib\n# Force matplotlib to not use any Xwindows backend.\nmatplotlib.use('Agg')\n\nimport math\nfrom decimal import *\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom itertools import cycle\n\nimport pandas as pd\nfrom matplotlib.backends.backend_pdf import PdfPages\nimport csv\n\nfrom random import randint\n\n\n\n#init data\nEPISODE_COUNT = 5000\nACTION_COUNT = 200\ntrainingreward = np.zeros(50)\n\n#init Q array (5 state value dimensions, 5 action dimension)\nQarr = np.zeros((3, 3, 3, 3, 3, 5))\n\n#greedy selection variable (multiplied by 100 for simplicity)\nepsilon = 100\n\n\nfor i in range(0, EPISODE_COUNT):\n\t#init board (0 wall, 1 blank, 2 can)\n\tboard = np.zeros((12, 12))\n\tfor j in range(0, 10):\n\t\tfor k in range(0, 10):\n\t\t\tboard[j+1, k+1] = randint(1, 2)\n\t#init bot location (horizontal 0, vertical 1 from top left)\n\tbotloc = np.zeros(2)\n\tbotloc[0] = randint(1, 10)\n\tbotloc[1] = randint(1, 10)\n\n\tepisodereward = 0\n\tfor j in range(0, ACTION_COUNT):\n\t\t#observestate (self, up, left, right, down)\n\t\tstate = np.zeros(5)\n\t\t#self\n\t\tstate[0] = board[int(botloc[0]), int(botloc[1])]\n\t\tstate[1] = board[int(botloc[0]), int(botloc[1] - 1)]\n\t\tstate[2] = board[int(botloc[0] - 1), int(botloc[1])]\n\t\tstate[3] = board[int(botloc[0] + 1), int(botloc[1])]\n\t\tstate[4] = board[int(botloc[0]), int(botloc[1] + 1)]\n\t\t#greedy action selection\n\t\tif (randint(0, 100) > epsilon):\n\t\t\t#do greedy\n\t\t\trandoma = np.random.choice(np.where(Qarr[int(state[0]), int(state[1]), int(state[2]), int(state[3]), int(state[4]), :] == Qarr[int(state[0]), int(state[1]), int(state[2]), int(state[3]), int(state[4]), :].max())[0])\n\t\telse:\n\t\t\t#do random action (0 can, 1 up, 2 left, 3 right, 4 down)\n\t\t\trandoma = randint(0, 4)\n\t\t#save qtable location\n\t\toldq = np.zeros(6)\n\t\toldq[0] = state[0]\n\t\toldq[1] = state[1]\n\t\toldq[2] = state[2]\n\t\toldq[3] = state[3]\n\t\toldq[4] = state[4]\n\t\toldq[5] = randoma\n\t\t#take action get reward\n\t\t\t#can grab\n\t\tif(randoma == 0):\n\t\t\t#can grabbed\n\t\t\tif(state[0] == 2):\n\t\t\t\t#remove can\n\t\t\t\tboard[int(botloc[0]), int(botloc[1])] = 1\n\t\t\t\treward = 10\n\t\t\t#can not grabbed\n\t\t\telse:\n\t\t\t\treward = -1\n\t\t#move up\n\t\tif(randoma == 1):\n\t\t\t#wall\n\t\t\tif(state[1] == 0):\n\t\t\t\treward = -5\n\t\t\t#no wall\n\t\t\telse:\n\t\t\t\tbotloc[1] = botloc[1] - 1\n\t\t\t\treward = 0\n\t\t#move left\n\t\tif(randoma == 2):\n\t\t\t#wall\n\t\t\tif(state[2] == 0):\n\t\t\t\treward = -5\n\t\t\t#no wall\n\t\t\telse:\n\t\t\t\tbotloc[0] = botloc[0] - 1\n\t\t\t\treward = 0\n\t\t#move right\n\t\tif(randoma == 3):\n\t\t\t#wall\n\t\t\tif(state[3] == 0):\n\t\t\t\treward = -5\n\t\t\t#no wall\n\t\t\telse:\n\t\t\t\tbotloc[0] = botloc[0] + 1\n\t\t\t\treward = 0\n\t\t#move down\n\t\tif(randoma == 4):\n\t\t\t#wall\n\t\t\tif(state[4] == 0):\n\t\t\t\treward = -5\n\t\t\t#no wall\n\t\t\telse:\n\t\t\t\tbotloc[1] = botloc[1] + 1\n\t\t\t\treward = 0\n\t\t#print \"movement data\"\n\t\t#print state\n\t\t#print randoma\n\t\t#updatestate\n\t\tstate = np.zeros(5)\n\t\t#self\n\t\tstate[0] = board[int(botloc[0]), int(botloc[1])]\n\t\tstate[1] = board[int(botloc[0]), int(botloc[1] - 1)]\n\t\tstate[2] = board[int(botloc[0] - 1), int(botloc[1])]\n\t\tstate[3] = board[int(botloc[0] + 1), int(botloc[1])]\n\t\tstate[4] = board[int(botloc[0]), int(botloc[1] + 1)]\n\t\t#calculate best Qtable action value in new state\n\t\tmaxq = Qarr[int(state[0]),int(state[1]),int(state[2]),int(state[3]),int(state[4]),:].max()\n\t\t#update Q table\n\t\t#if(oldq[0] == 1 and Qarr[int(oldq[0]), int(oldq[1]), int(oldq[2]), int(oldq[3]), int(oldq[4]), 0] == Qarr[int(oldq[0]), int(oldq[1]), int(oldq[2]), int(oldq[3]), int(oldq[4]), :].max()):\n\t\t#\tprint \"ERROR\"\n\t\tQarr[int(oldq[0]), int(oldq[1]), int(oldq[2]), int(oldq[3]), int(oldq[4]), int(oldq[5])] = Qarr[int(oldq[0]), int(oldq[1]), int(oldq[2]), int(oldq[3]), int(oldq[4]), int(oldq[5])] + 0.2 * (reward + 0.5 * maxq - Qarr[int(oldq[0]), int(oldq[1]), int(oldq[2]), int(oldq[3]), int(oldq[4]), int(oldq[5])])\n\t\tepisodereward = episodereward + reward\n\t#decrement epsilon\n\tif(i % 50 == 49 and epsilon > 10):\n\t\tepsilon = epsilon - 1\n\tif(i % 100 == 99 ):\n\t\ttrainingreward[(int(i / 100))] = int(episodereward)\n#save Training reward data\n#trainingreward.to_csv('TrainReward.csv')\nnp.savetxt('TrainReward.csv', trainingreward, delimiter=',')\nQold = Qarr\n#Test runs\ntestrewards = np.zeros(EPISODE_COUNT)\nfor i in range(0, EPISODE_COUNT):\n\t#init board (0 wall, 1 blank, 2 can)\n\tboard = np.zeros((12, 12))\n\tfor j in range(0, 10):\n\t\tfor k in range(0, 10):\n\t\t\tboard[j+1, k+1] = randint(1, 2)\n\t#init bot location (horizontal 0, vertical 1 from top left)\n\tbotloc = np.zeros(2)\n\tbotloc[0] = randint(1, 10)\n\tbotloc[1] = randint(1, 10)\n\n\tepisodereward = 0\n\tfor j in range(0, ACTION_COUNT):\n\t\t#observestate (self, up, left, right, down)\n\t\tstate = np.zeros(5)\n\t\t#self\n\t\tstate[0] = board[int(botloc[0]), int(botloc[1])]\n\t\tstate[1] = board[int(botloc[0]), int(botloc[1] - 1)]\n\t\tstate[2] = board[int(botloc[0] - 1), int(botloc[1])]\n\t\tstate[3] = board[int(botloc[0] + 1), int(botloc[1])]\n\t\tstate[4] = board[int(botloc[0]), int(botloc[1] + 1)]\n\t\t#greedy action selection\n\t\trandoma = np.random.choice(np.where(Qarr[int(state[0]), int(state[1]), int(state[2]), int(state[3]), int(state[4]), :] == Qarr[int(state[0]), int(state[1]), int(state[2]), int(state[3]), int(state[4]), :].max())[0])\n\t\t#save qtable location\n\t\toldq = np.zeros(6)\n\t\toldq[0] = state[0]\n\t\toldq[1] = state[1]\n\t\toldq[2] = state[2]\n\t\toldq[3] = state[3]\n\t\toldq[4] = state[4]\n\t\toldq[5] = randoma\n\t\t#take action get reward\n\t\t\t#can grab\n\t\tif(randoma == 0):\n\t\t\t#can grabbed\n\t\t\tif(state[0] == 2):\n\t\t\t\t#remove can\n\t\t\t\tboard[int(botloc[0]), int(botloc[1])] = 1\n\t\t\t\treward = 10\n\t\t\t#can not grabbed\n\t\t\telse:\n\t\t\t\treward = -1\n\t\t#move up\n\t\telif(randoma == 1):\n\t\t\t#wall\n\t\t\tif(state[1] == 0):\n\t\t\t\treward = -5\n\t\t\t#no wall\n\t\t\telse:\n\t\t\t\tbotloc[1] = botloc[1] - 1\n\t\t\t\treward = 0\n\t\t#move left\n\t\telif(randoma == 2):\n\t\t\t#wall\n\t\t\tif(state[2] == 0):\n\t\t\t\treward = -5\n\t\t\t#no wall\n\t\t\telse:\n\t\t\t\tbotloc[0] = botloc[0] - 1\n\t\t\t\treward = 0\n\t\t#move right\n\t\telif(randoma == 3):\n\t\t\t#wall\n\t\t\tif(state[3] == 0):\n\t\t\t\treward = -5\n\t\t\t#no wall\n\t\t\telse:\n\t\t\t\tbotloc[0] = botloc[0] + 1\n\t\t\t\treward = 0\n\t\t#move down\n\t\telif(randoma == 4):\n\t\t\t#wall\n\t\t\tif(state[4] == 0):\n\t\t\t\treward = -5\n\t\t\t#no wall\n\t\t\telse:\n\t\t\t\tbotloc[1] = botloc[1] + 1\n\t\t\t\treward = 0\n\t\telse:\n\t\t\tprint(\"ERROR\")\n\t\t#print \"movement data\"\n\t\t#print state\n\t\t#print randoma\n\t\t#updatestate\n\t\tstate = np.zeros(5)\n\t\t#self\n\t\tstate[0] = board[int(botloc[0]), int(botloc[1])]\n\t\tstate[1] = board[int(botloc[0]), int(botloc[1] - 1)]\n\t\tstate[2] = board[int(botloc[0] - 1), int(botloc[1])]\n\t\tstate[3] = board[int(botloc[0] + 1), int(botloc[1])]\n\t\tstate[4] = board[int(botloc[0]), int(botloc[1] + 1)]\n\t\t#maxq = max(Qarr[int(state[0]),int(state[1]),int(state[2]),int(state[3]),int(state[4]),:])\n\t\t#update Q table\n\t\t#Qarr[int(oldq[0]), int(oldq[1]), int(oldq[2]), int(oldq[3]), int(oldq[4]), int(oldq[5])] = Qarr[int(oldq[0]), int(oldq[1]), int(oldq[2]), int(oldq[3]), int(oldq[4]), int(oldq[5])] + 0.01 * (reward + 0.9 * maxq - Qarr[int(oldq[0]), int(oldq[1]), int(oldq[2]), int(oldq[3]), int(oldq[4]), int(oldq[5])])\t\n\t\tepisodereward = episodereward + reward\n\ttestrewards[i] = episodereward\n\nprint(np.mean(testrewards))\nprint(np.std(testrewards))\n" ]
[ [ "numpy.zeros", "numpy.savetxt", "matplotlib.use", "numpy.std", "numpy.mean" ] ]
xhades/rates_classify
[ "225627dad22c162023bc6b5e4d8f5881c5a6f354" ]
[ "rates_classify/rdf.py" ]
[ "# !/usr/bin/env python\n# -*-coding:utf-8-*-\n\n\"\"\"\n@author: xhades\n@Date: 2017/12/28\n\n\"\"\"\n\n# 随机森林分类器\n\nimport numpy as np\nfrom numpy import *\nfrom numpy import array, argmax\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import LabelEncoder\nimport pickle\nfrom sklearn.ensemble import RandomForestClassifier as RDF\n\n\nnp.set_printoptions(threshold=np.inf)\n\n\n# 训练集测试集 3/7分割\ndef train(xFile, yFile):\n with open(xFile, \"rb\") as file_r:\n X = pickle.load(file_r)\n\n X = reshape(X, (212841, -1)) # reshape一下 (212841, 30*128)\n\n # 读取label数据,并且encodig\n with open(yFile, \"r\") as yFile_r:\n labelLines = [_.strip(\"\\n\") for _ in yFile_r.readlines()]\n values = array(labelLines)\n labelEncoder = LabelEncoder()\n integerEncoded = labelEncoder.fit_transform(values)\n integerEncoded = integerEncoded.reshape(len(integerEncoded), 1)\n # print(integerEncoded)\n\n # 获得label 编码\n Y = integerEncoded.reshape(212841, )\n X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.3, random_state=42)\n\n # 随机森林分类器\n clf = RDF(criterion=\"gini\")\n # criterion 可以使用\"gini\"或者\"entropy\",前者代表基尼系数,后者代表信息增益。一般说使用默认的基尼系数\"gini\"就可以了,即CART算法。除非你更喜欢类似ID3, C4.5的最优特征选择方法。\n\n clf.fit(X_train, Y_train)\n\n # 测试数据\n predict = clf.predict(X_test)\n count = 0\n for p, t in zip(predict, Y_test):\n if p == t:\n count += 1\n print(\"RandomForest Accuracy is:\", count/len(Y_test))\n\n\nif __name__ == \"__main__\":\n xFile = \"Res/char_embedded.pkl\"\n yFile = \"data/label.txt\"\n print(\"Start Training.....\")\n train(xFile, yFile)\n print(\"End.....\")\n" ]
[ [ "numpy.set_printoptions", "sklearn.ensemble.RandomForestClassifier", "sklearn.preprocessing.LabelEncoder", "numpy.array", "sklearn.model_selection.train_test_split" ] ]
TomeRozen/IML.HUJI
[ "84b0d835a2a4dd4f52ea415e36382cb25a9eebdc" ]
[ "IMLearn/learners/regressors/linear_regression.py" ]
[ "from __future__ import annotations\nfrom typing import NoReturn\nfrom ...base import BaseEstimator\nimport numpy as np\nfrom numpy.linalg import pinv\n\nclass LinearRegression(BaseEstimator):\n \"\"\"\n Linear Regression Estimator\n\n Solving Ordinary Least Squares optimization problem\n \"\"\"\n\n def __init__(self, include_intercept: bool = True) -> LinearRegression:\n \"\"\"\n Instantiate a linear regression estimator\n\n Parameters\n ----------\n include_intercept: bool, default=True\n Should fitted model include an intercept or not\n\n Attributes\n ----------\n include_intercept_: bool\n Should fitted model include an intercept or not\n\n coefs_: ndarray of shape (n_features,) or (n_features+1,)\n Coefficients vector fitted by linear regression. To be set in\n `LinearRegression.fit` function.\n \"\"\"\n super().__init__()\n self.include_intercept_, self.coefs_ = include_intercept, None\n\n def _fit(self, X: np.ndarray, y: np.ndarray) -> NoReturn:\n \"\"\"\n Fit Least Squares model to given samples\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n Input data to fit an estimator for\n\n y : ndarray of shape (n_samples, )\n Responses of input data to fit to\n\n Notes\n -----\n Fits model with or without an intercept depending on value of `self.include_intercept_`\n \"\"\"\n if self.include_intercept_:\n X = np.hstack((np.ones((X.shape[0], 1)), X))\n self.coefs = pinv(X)@y\n\n def _predict(self, X: np.ndarray) -> np.ndarray:\n \"\"\"\n Predict responses for given samples using fitted estimator\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n Input data to predict responses for\n\n Returns\n -------\n responses : ndarray of shape (n_samples, )\n Predicted responses of given samples\n \"\"\"\n if self.include_intercept_:\n X = np.hstack((np.ones((X.shape[0], 1)), X))\n return X @ self.coefs\n\n def _loss(self, X: np.ndarray, y: np.ndarray) -> float:\n \"\"\"\n Evaluate performance under MSE loss function\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n Test samples\n\n y : ndarray of shape (n_samples, )\n True labels of test samples\n\n Returns\n -------\n loss : float\n Performance under MSE loss function\n \"\"\"\n return mean_square_error(y, self.predict(X))\n" ]
[ [ "numpy.linalg.pinv", "numpy.ones" ] ]
xizaoqu/mmdetection3d
[ "1809f9650de95d7bc80035787b09e3b69390b702" ]
[ "mmdet3d/datasets/pipelines/transforms_3d.py" ]
[ "# Copyright (c) OpenMMLab. All rights reserved.\nimport numpy as np\nimport warnings\nfrom mmcv import is_tuple_of\nfrom mmcv.utils import build_from_cfg\n\nfrom mmdet3d.core import VoxelGenerator\nfrom mmdet3d.core.bbox import (CameraInstance3DBoxes, DepthInstance3DBoxes,\n LiDARInstance3DBoxes, box_np_ops)\nfrom mmdet.datasets.builder import PIPELINES\nfrom mmdet.datasets.pipelines import RandomFlip\nfrom ..builder import OBJECTSAMPLERS\nfrom .data_augment_utils import noise_per_object_v3_\n\n\[email protected]_module()\nclass RandomDropPointsColor(object):\n r\"\"\"Randomly set the color of points to all zeros.\n\n Once this transform is executed, all the points' color will be dropped.\n Refer to `PAConv <https://github.com/CVMI-Lab/PAConv/blob/main/scene_seg/\n util/transform.py#L223>`_ for more details.\n\n Args:\n drop_ratio (float): The probability of dropping point colors.\n Defaults to 0.2.\n \"\"\"\n\n def __init__(self, drop_ratio=0.2):\n assert isinstance(drop_ratio, (int, float)) and 0 <= drop_ratio <= 1, \\\n f'invalid drop_ratio value {drop_ratio}'\n self.drop_ratio = drop_ratio\n\n def __call__(self, input_dict):\n \"\"\"Call function to drop point colors.\n\n Args:\n input_dict (dict): Result dict from loading pipeline.\n\n Returns:\n dict: Results after color dropping, \\\n 'points' key is updated in the result dict.\n \"\"\"\n points = input_dict['points']\n assert points.attribute_dims is not None and \\\n 'color' in points.attribute_dims, \\\n 'Expect points have color attribute'\n\n # this if-expression is a bit strange\n # `RandomDropPointsColor` is used in training 3D segmentor PAConv\n # we discovered in our experiments that, using\n # `if np.random.rand() > 1.0 - self.drop_ratio` consistently leads to\n # better results than using `if np.random.rand() < self.drop_ratio`\n # so we keep this hack in our codebase\n if np.random.rand() > 1.0 - self.drop_ratio:\n points.color = points.color * 0.0\n return input_dict\n\n def __repr__(self):\n \"\"\"str: Return a string that describes the module.\"\"\"\n repr_str = self.__class__.__name__\n repr_str += f'(drop_ratio={self.drop_ratio})'\n return repr_str\n\n\[email protected]_module()\nclass RandomFlip3D(RandomFlip):\n \"\"\"Flip the points & bbox.\n\n If the input dict contains the key \"flip\", then the flag will be used,\n otherwise it will be randomly decided by a ratio specified in the init\n method.\n\n Args:\n sync_2d (bool, optional): Whether to apply flip according to the 2D\n images. If True, it will apply the same flip as that to 2D images.\n If False, it will decide whether to flip randomly and independently\n to that of 2D images. Defaults to True.\n flip_ratio_bev_horizontal (float, optional): The flipping probability\n in horizontal direction. Defaults to 0.0.\n flip_ratio_bev_vertical (float, optional): The flipping probability\n in vertical direction. Defaults to 0.0.\n \"\"\"\n\n def __init__(self,\n sync_2d=True,\n flip_ratio_bev_horizontal=0.0,\n flip_ratio_bev_vertical=0.0,\n **kwargs):\n super(RandomFlip3D, self).__init__(\n flip_ratio=flip_ratio_bev_horizontal, **kwargs)\n self.sync_2d = sync_2d\n self.flip_ratio_bev_vertical = flip_ratio_bev_vertical\n if flip_ratio_bev_horizontal is not None:\n assert isinstance(\n flip_ratio_bev_horizontal,\n (int, float)) and 0 <= flip_ratio_bev_horizontal <= 1\n if flip_ratio_bev_vertical is not None:\n assert isinstance(\n flip_ratio_bev_vertical,\n (int, float)) and 0 <= flip_ratio_bev_vertical <= 1\n\n def random_flip_data_3d(self, input_dict, direction='horizontal'):\n \"\"\"Flip 3D data randomly.\n\n Args:\n input_dict (dict): Result dict from loading pipeline.\n direction (str): Flip direction. Default: horizontal.\n\n Returns:\n dict: Flipped results, 'points', 'bbox3d_fields' keys are \\\n updated in the result dict.\n \"\"\"\n assert direction in ['horizontal', 'vertical']\n if len(input_dict['bbox3d_fields']) == 0: # test mode\n input_dict['bbox3d_fields'].append('empty_box3d')\n input_dict['empty_box3d'] = input_dict['box_type_3d'](\n np.array([], dtype=np.float32))\n assert len(input_dict['bbox3d_fields']) == 1\n for key in input_dict['bbox3d_fields']:\n if 'points' in input_dict:\n input_dict['points'] = input_dict[key].flip(\n direction, points=input_dict['points'])\n else:\n input_dict[key].flip(direction)\n if 'centers2d' in input_dict:\n assert self.sync_2d is True and direction == 'horizontal', \\\n 'Only support sync_2d=True and horizontal flip with images'\n w = input_dict['ori_shape'][1]\n input_dict['centers2d'][..., 0] = \\\n w - input_dict['centers2d'][..., 0]\n # need to modify the horizontal position of camera center\n # along u-axis in the image (flip like centers2d)\n # ['cam2img'][0][2] = c_u\n # see more details and examples at\n # https://github.com/open-mmlab/mmdetection3d/pull/744\n input_dict['cam2img'][0][2] = w - input_dict['cam2img'][0][2]\n\n def __call__(self, input_dict):\n \"\"\"Call function to flip points, values in the ``bbox3d_fields`` and \\\n also flip 2D image and its annotations.\n\n Args:\n input_dict (dict): Result dict from loading pipeline.\n\n Returns:\n dict: Flipped results, 'flip', 'flip_direction', \\\n 'pcd_horizontal_flip' and 'pcd_vertical_flip' keys are added \\\n into result dict.\n \"\"\"\n # filp 2D image and its annotations\n super(RandomFlip3D, self).__call__(input_dict)\n\n if self.sync_2d:\n input_dict['pcd_horizontal_flip'] = input_dict['flip']\n input_dict['pcd_vertical_flip'] = False\n else:\n if 'pcd_horizontal_flip' not in input_dict:\n flip_horizontal = True if np.random.rand(\n ) < self.flip_ratio else False\n input_dict['pcd_horizontal_flip'] = flip_horizontal\n if 'pcd_vertical_flip' not in input_dict:\n flip_vertical = True if np.random.rand(\n ) < self.flip_ratio_bev_vertical else False\n input_dict['pcd_vertical_flip'] = flip_vertical\n\n if 'transformation_3d_flow' not in input_dict:\n input_dict['transformation_3d_flow'] = []\n\n if input_dict['pcd_horizontal_flip']:\n self.random_flip_data_3d(input_dict, 'horizontal')\n input_dict['transformation_3d_flow'].extend(['HF'])\n if input_dict['pcd_vertical_flip']:\n self.random_flip_data_3d(input_dict, 'vertical')\n input_dict['transformation_3d_flow'].extend(['VF'])\n return input_dict\n\n def __repr__(self):\n \"\"\"str: Return a string that describes the module.\"\"\"\n repr_str = self.__class__.__name__\n repr_str += f'(sync_2d={self.sync_2d},'\n repr_str += f' flip_ratio_bev_vertical={self.flip_ratio_bev_vertical})'\n return repr_str\n\n\[email protected]_module()\nclass RandomJitterPoints(object):\n \"\"\"Randomly jitter point coordinates.\n\n Different from the global translation in ``GlobalRotScaleTrans``, here we \\\n apply different noises to each point in a scene.\n\n Args:\n jitter_std (list[float]): The standard deviation of jittering noise.\n This applies random noise to all points in a 3D scene, which is \\\n sampled from a gaussian distribution whose standard deviation is \\\n set by ``jitter_std``. Defaults to [0.01, 0.01, 0.01]\n clip_range (list[float] | None): Clip the randomly generated jitter \\\n noise into this range. If None is given, don't perform clipping.\n Defaults to [-0.05, 0.05]\n\n Note:\n This transform should only be used in point cloud segmentation tasks \\\n because we don't transform ground-truth bboxes accordingly.\n For similar transform in detection task, please refer to `ObjectNoise`.\n \"\"\"\n\n def __init__(self,\n jitter_std=[0.01, 0.01, 0.01],\n clip_range=[-0.05, 0.05]):\n seq_types = (list, tuple, np.ndarray)\n if not isinstance(jitter_std, seq_types):\n assert isinstance(jitter_std, (int, float)), \\\n f'unsupported jitter_std type {type(jitter_std)}'\n jitter_std = [jitter_std, jitter_std, jitter_std]\n self.jitter_std = jitter_std\n\n if clip_range is not None:\n if not isinstance(clip_range, seq_types):\n assert isinstance(clip_range, (int, float)), \\\n f'unsupported clip_range type {type(clip_range)}'\n clip_range = [-clip_range, clip_range]\n self.clip_range = clip_range\n\n def __call__(self, input_dict):\n \"\"\"Call function to jitter all the points in the scene.\n\n Args:\n input_dict (dict): Result dict from loading pipeline.\n\n Returns:\n dict: Results after adding noise to each point, \\\n 'points' key is updated in the result dict.\n \"\"\"\n points = input_dict['points']\n jitter_std = np.array(self.jitter_std, dtype=np.float32)\n jitter_noise = \\\n np.random.randn(points.shape[0], 3) * jitter_std[None, :]\n if self.clip_range is not None:\n jitter_noise = np.clip(jitter_noise, self.clip_range[0],\n self.clip_range[1])\n\n points.translate(jitter_noise)\n return input_dict\n\n def __repr__(self):\n \"\"\"str: Return a string that describes the module.\"\"\"\n repr_str = self.__class__.__name__\n repr_str += f'(jitter_std={self.jitter_std},'\n repr_str += f' clip_range={self.clip_range})'\n return repr_str\n\n\[email protected]_module()\nclass ObjectSample(object):\n \"\"\"Sample GT objects to the data.\n\n Args:\n db_sampler (dict): Config dict of the database sampler.\n sample_2d (bool): Whether to also paste 2D image patch to the images\n This should be true when applying multi-modality cut-and-paste.\n Defaults to False.\n \"\"\"\n\n def __init__(self, db_sampler, sample_2d=False):\n self.sampler_cfg = db_sampler\n self.sample_2d = sample_2d\n if 'type' not in db_sampler.keys():\n db_sampler['type'] = 'DataBaseSampler'\n self.db_sampler = build_from_cfg(db_sampler, OBJECTSAMPLERS)\n\n @staticmethod\n def remove_points_in_boxes(points, boxes):\n \"\"\"Remove the points in the sampled bounding boxes.\n\n Args:\n points (:obj:`BasePoints`): Input point cloud array.\n boxes (np.ndarray): Sampled ground truth boxes.\n\n Returns:\n np.ndarray: Points with those in the boxes removed.\n \"\"\"\n masks = box_np_ops.points_in_rbbox(points.coord.numpy(), boxes)\n points = points[np.logical_not(masks.any(-1))]\n return points\n\n def __call__(self, input_dict):\n \"\"\"Call function to sample ground truth objects to the data.\n\n Args:\n input_dict (dict): Result dict from loading pipeline.\n\n Returns:\n dict: Results after object sampling augmentation, \\\n 'points', 'gt_bboxes_3d', 'gt_labels_3d' keys are updated \\\n in the result dict.\n \"\"\"\n gt_bboxes_3d = input_dict['gt_bboxes_3d']\n gt_labels_3d = input_dict['gt_labels_3d']\n\n # change to float for blending operation\n points = input_dict['points']\n if self.sample_2d:\n img = input_dict['img']\n gt_bboxes_2d = input_dict['gt_bboxes']\n # Assume for now 3D & 2D bboxes are the same\n sampled_dict = self.db_sampler.sample_all(\n gt_bboxes_3d.tensor.numpy(),\n gt_labels_3d,\n gt_bboxes_2d=gt_bboxes_2d,\n img=img)\n else:\n sampled_dict = self.db_sampler.sample_all(\n gt_bboxes_3d.tensor.numpy(), gt_labels_3d, img=None)\n\n if sampled_dict is not None:\n sampled_gt_bboxes_3d = sampled_dict['gt_bboxes_3d']\n sampled_points = sampled_dict['points']\n sampled_gt_labels = sampled_dict['gt_labels_3d']\n\n gt_labels_3d = np.concatenate([gt_labels_3d, sampled_gt_labels],\n axis=0)\n gt_bboxes_3d = gt_bboxes_3d.new_box(\n np.concatenate(\n [gt_bboxes_3d.tensor.numpy(), sampled_gt_bboxes_3d]))\n\n points = self.remove_points_in_boxes(points, sampled_gt_bboxes_3d)\n # check the points dimension\n points = points.cat([sampled_points, points])\n\n if self.sample_2d:\n sampled_gt_bboxes_2d = sampled_dict['gt_bboxes_2d']\n gt_bboxes_2d = np.concatenate(\n [gt_bboxes_2d, sampled_gt_bboxes_2d]).astype(np.float32)\n\n input_dict['gt_bboxes'] = gt_bboxes_2d\n input_dict['img'] = sampled_dict['img']\n\n input_dict['gt_bboxes_3d'] = gt_bboxes_3d\n input_dict['gt_labels_3d'] = gt_labels_3d.astype(np.long)\n input_dict['points'] = points\n\n return input_dict\n\n def __repr__(self):\n \"\"\"str: Return a string that describes the module.\"\"\"\n repr_str = self.__class__.__name__\n repr_str += f' sample_2d={self.sample_2d},'\n repr_str += f' data_root={self.sampler_cfg.data_root},'\n repr_str += f' info_path={self.sampler_cfg.info_path},'\n repr_str += f' rate={self.sampler_cfg.rate},'\n repr_str += f' prepare={self.sampler_cfg.prepare},'\n repr_str += f' classes={self.sampler_cfg.classes},'\n repr_str += f' sample_groups={self.sampler_cfg.sample_groups}'\n return repr_str\n\n\[email protected]_module()\nclass ObjectNoise(object):\n \"\"\"Apply noise to each GT objects in the scene.\n\n Args:\n translation_std (list[float], optional): Standard deviation of the\n distribution where translation noise are sampled from.\n Defaults to [0.25, 0.25, 0.25].\n global_rot_range (list[float], optional): Global rotation to the scene.\n Defaults to [0.0, 0.0].\n rot_range (list[float], optional): Object rotation range.\n Defaults to [-0.15707963267, 0.15707963267].\n num_try (int, optional): Number of times to try if the noise applied is\n invalid. Defaults to 100.\n \"\"\"\n\n def __init__(self,\n translation_std=[0.25, 0.25, 0.25],\n global_rot_range=[0.0, 0.0],\n rot_range=[-0.15707963267, 0.15707963267],\n num_try=100):\n self.translation_std = translation_std\n self.global_rot_range = global_rot_range\n self.rot_range = rot_range\n self.num_try = num_try\n\n def __call__(self, input_dict):\n \"\"\"Call function to apply noise to each ground truth in the scene.\n\n Args:\n input_dict (dict): Result dict from loading pipeline.\n\n Returns:\n dict: Results after adding noise to each object, \\\n 'points', 'gt_bboxes_3d' keys are updated in the result dict.\n \"\"\"\n gt_bboxes_3d = input_dict['gt_bboxes_3d']\n points = input_dict['points']\n\n # TODO: check this inplace function\n numpy_box = gt_bboxes_3d.tensor.numpy()\n numpy_points = points.tensor.numpy()\n\n noise_per_object_v3_(\n numpy_box,\n numpy_points,\n rotation_perturb=self.rot_range,\n center_noise_std=self.translation_std,\n global_random_rot_range=self.global_rot_range,\n num_try=self.num_try)\n\n input_dict['gt_bboxes_3d'] = gt_bboxes_3d.new_box(numpy_box)\n input_dict['points'] = points.new_point(numpy_points)\n return input_dict\n\n def __repr__(self):\n \"\"\"str: Return a string that describes the module.\"\"\"\n repr_str = self.__class__.__name__\n repr_str += f'(num_try={self.num_try},'\n repr_str += f' translation_std={self.translation_std},'\n repr_str += f' global_rot_range={self.global_rot_range},'\n repr_str += f' rot_range={self.rot_range})'\n return repr_str\n\n\[email protected]_module()\nclass GlobalAlignment(object):\n \"\"\"Apply global alignment to 3D scene points by rotation and translation.\n\n Args:\n rotation_axis (int): Rotation axis for points and bboxes rotation.\n\n Note:\n We do not record the applied rotation and translation as in \\\n GlobalRotScaleTrans. Because usually, we do not need to reverse \\\n the alignment step.\n For example, ScanNet 3D detection task uses aligned ground-truth \\\n bounding boxes for evaluation.\n \"\"\"\n\n def __init__(self, rotation_axis):\n self.rotation_axis = rotation_axis\n\n def _trans_points(self, input_dict, trans_factor):\n \"\"\"Private function to translate points.\n\n Args:\n input_dict (dict): Result dict from loading pipeline.\n trans_factor (np.ndarray): Translation vector to be applied.\n\n Returns:\n dict: Results after translation, 'points' is updated in the dict.\n \"\"\"\n input_dict['points'].translate(trans_factor)\n\n def _rot_points(self, input_dict, rot_mat):\n \"\"\"Private function to rotate bounding boxes and points.\n\n Args:\n input_dict (dict): Result dict from loading pipeline.\n rot_mat (np.ndarray): Rotation matrix to be applied.\n\n Returns:\n dict: Results after rotation, 'points' is updated in the dict.\n \"\"\"\n # input should be rot_mat_T so I transpose it here\n input_dict['points'].rotate(rot_mat.T)\n\n def _check_rot_mat(self, rot_mat):\n \"\"\"Check if rotation matrix is valid for self.rotation_axis.\n\n Args:\n rot_mat (np.ndarray): Rotation matrix to be applied.\n \"\"\"\n is_valid = np.allclose(np.linalg.det(rot_mat), 1.0)\n valid_array = np.zeros(3)\n valid_array[self.rotation_axis] = 1.0\n is_valid &= (rot_mat[self.rotation_axis, :] == valid_array).all()\n is_valid &= (rot_mat[:, self.rotation_axis] == valid_array).all()\n assert is_valid, f'invalid rotation matrix {rot_mat}'\n\n def __call__(self, input_dict):\n \"\"\"Call function to shuffle points.\n\n Args:\n input_dict (dict): Result dict from loading pipeline.\n\n Returns:\n dict: Results after global alignment, 'points' and keys in \\\n input_dict['bbox3d_fields'] are updated in the result dict.\n \"\"\"\n assert 'axis_align_matrix' in input_dict['ann_info'].keys(), \\\n 'axis_align_matrix is not provided in GlobalAlignment'\n\n axis_align_matrix = input_dict['ann_info']['axis_align_matrix']\n assert axis_align_matrix.shape == (4, 4), \\\n f'invalid shape {axis_align_matrix.shape} for axis_align_matrix'\n rot_mat = axis_align_matrix[:3, :3]\n trans_vec = axis_align_matrix[:3, -1]\n\n self._check_rot_mat(rot_mat)\n self._rot_points(input_dict, rot_mat)\n self._trans_points(input_dict, trans_vec)\n\n return input_dict\n\n def __repr__(self):\n repr_str = self.__class__.__name__\n repr_str += f'(rotation_axis={self.rotation_axis})'\n return repr_str\n\n\[email protected]_module()\nclass GlobalRotScaleTrans(object):\n \"\"\"Apply global rotation, scaling and translation to a 3D scene.\n\n Args:\n rot_range (list[float]): Range of rotation angle.\n Defaults to [-0.78539816, 0.78539816] (close to [-pi/4, pi/4]).\n scale_ratio_range (list[float]): Range of scale ratio.\n Defaults to [0.95, 1.05].\n translation_std (list[float]): The standard deviation of translation\n noise. This applies random translation to a scene by a noise, which\n is sampled from a gaussian distribution whose standard deviation\n is set by ``translation_std``. Defaults to [0, 0, 0]\n shift_height (bool): Whether to shift height.\n (the fourth dimension of indoor points) when scaling.\n Defaults to False.\n \"\"\"\n\n def __init__(self,\n rot_range=[-0.78539816, 0.78539816],\n scale_ratio_range=[0.95, 1.05],\n translation_std=[0, 0, 0],\n shift_height=False):\n seq_types = (list, tuple, np.ndarray)\n if not isinstance(rot_range, seq_types):\n assert isinstance(rot_range, (int, float)), \\\n f'unsupported rot_range type {type(rot_range)}'\n rot_range = [-rot_range, rot_range]\n self.rot_range = rot_range\n\n assert isinstance(scale_ratio_range, seq_types), \\\n f'unsupported scale_ratio_range type {type(scale_ratio_range)}'\n self.scale_ratio_range = scale_ratio_range\n\n if not isinstance(translation_std, seq_types):\n assert isinstance(translation_std, (int, float)), \\\n f'unsupported translation_std type {type(translation_std)}'\n translation_std = [\n translation_std, translation_std, translation_std\n ]\n assert all([std >= 0 for std in translation_std]), \\\n 'translation_std should be positive'\n self.translation_std = translation_std\n self.shift_height = shift_height\n\n def _trans_bbox_points(self, input_dict):\n \"\"\"Private function to translate bounding boxes and points.\n\n Args:\n input_dict (dict): Result dict from loading pipeline.\n\n Returns:\n dict: Results after translation, 'points', 'pcd_trans' \\\n and keys in input_dict['bbox3d_fields'] are updated \\\n in the result dict.\n \"\"\"\n translation_std = np.array(self.translation_std, dtype=np.float32)\n trans_factor = np.random.normal(scale=translation_std, size=3).T\n\n input_dict['points'].translate(trans_factor)\n input_dict['pcd_trans'] = trans_factor\n for key in input_dict['bbox3d_fields']:\n input_dict[key].translate(trans_factor)\n\n def _rot_bbox_points(self, input_dict):\n \"\"\"Private function to rotate bounding boxes and points.\n\n Args:\n input_dict (dict): Result dict from loading pipeline.\n\n Returns:\n dict: Results after rotation, 'points', 'pcd_rotation' \\\n and keys in input_dict['bbox3d_fields'] are updated \\\n in the result dict.\n \"\"\"\n rotation = self.rot_range\n noise_rotation = np.random.uniform(rotation[0], rotation[1])\n\n # if no bbox in input_dict, only rotate points\n if len(input_dict['bbox3d_fields']) == 0:\n rot_mat_T = input_dict['points'].rotate(noise_rotation)\n input_dict['pcd_rotation'] = rot_mat_T\n return\n\n # rotate points with bboxes\n for key in input_dict['bbox3d_fields']:\n if len(input_dict[key].tensor) != 0:\n points, rot_mat_T = input_dict[key].rotate(\n noise_rotation, input_dict['points'])\n input_dict['points'] = points\n input_dict['pcd_rotation'] = rot_mat_T\n\n def _scale_bbox_points(self, input_dict):\n \"\"\"Private function to scale bounding boxes and points.\n\n Args:\n input_dict (dict): Result dict from loading pipeline.\n\n Returns:\n dict: Results after scaling, 'points'and keys in \\\n input_dict['bbox3d_fields'] are updated in the result dict.\n \"\"\"\n scale = input_dict['pcd_scale_factor']\n points = input_dict['points']\n points.scale(scale)\n if self.shift_height:\n assert 'height' in points.attribute_dims.keys(), \\\n 'setting shift_height=True but points have no height attribute'\n points.tensor[:, points.attribute_dims['height']] *= scale\n input_dict['points'] = points\n\n for key in input_dict['bbox3d_fields']:\n input_dict[key].scale(scale)\n\n def _random_scale(self, input_dict):\n \"\"\"Private function to randomly set the scale factor.\n\n Args:\n input_dict (dict): Result dict from loading pipeline.\n\n Returns:\n dict: Results after scaling, 'pcd_scale_factor' are updated \\\n in the result dict.\n \"\"\"\n scale_factor = np.random.uniform(self.scale_ratio_range[0],\n self.scale_ratio_range[1])\n input_dict['pcd_scale_factor'] = scale_factor\n\n def __call__(self, input_dict):\n \"\"\"Private function to rotate, scale and translate bounding boxes and \\\n points.\n\n Args:\n input_dict (dict): Result dict from loading pipeline.\n\n Returns:\n dict: Results after scaling, 'points', 'pcd_rotation',\n 'pcd_scale_factor', 'pcd_trans' and keys in \\\n input_dict['bbox3d_fields'] are updated in the result dict.\n \"\"\"\n if 'transformation_3d_flow' not in input_dict:\n input_dict['transformation_3d_flow'] = []\n\n self._rot_bbox_points(input_dict)\n\n if 'pcd_scale_factor' not in input_dict:\n self._random_scale(input_dict)\n self._scale_bbox_points(input_dict)\n\n self._trans_bbox_points(input_dict)\n\n input_dict['transformation_3d_flow'].extend(['R', 'S', 'T'])\n return input_dict\n\n def __repr__(self):\n \"\"\"str: Return a string that describes the module.\"\"\"\n repr_str = self.__class__.__name__\n repr_str += f'(rot_range={self.rot_range},'\n repr_str += f' scale_ratio_range={self.scale_ratio_range},'\n repr_str += f' translation_std={self.translation_std},'\n repr_str += f' shift_height={self.shift_height})'\n return repr_str\n\[email protected]_module()\nclass RotFlipScaleTrans(object):\n def __init__(self,\n scale_ratio_range=[0.95, 1.05],\n translation_std=[0, 0, 0],\n #TODO\n ):\n seq_types = (list, tuple, np.ndarray)\n if not isinstance(rot_range, seq_types):\n assert isinstance(rot_range, (int, float)), \\\n f'unsupported rot_range type {type(rot_range)}'\n rot_range = [-rot_range, rot_range]\n self.rot_range = rot_range\n\n assert isinstance(scale_ratio_range, seq_types), \\\n f'unsupported scale_ratio_range type {type(scale_ratio_range)}'\n self.scale_ratio_range = scale_ratio_range\n\n if not isinstance(translation_std, seq_types):\n assert isinstance(translation_std, (int, float)), \\\n f'unsupported translation_std type {type(translation_std)}'\n translation_std = [\n translation_std, translation_std, translation_std\n ]\n assert all([std >= 0 for std in translation_std]), \\\n 'translation_std should be positive'\n self.translation_std = translation_std\n self.shift_height = shift_height\n\n def _trans_bbox_points(self, input_dict):\n \"\"\"Private function to translate bounding boxes and points.\n\n Args:\n input_dict (dict): Result dict from loading pipeline.\n\n Returns:\n dict: Results after translation, 'points', 'pcd_trans' \\\n and keys in input_dict['bbox3d_fields'] are updated \\\n in the result dict.\n \"\"\"\n translation_std = np.array(self.translation_std, dtype=np.float32)\n trans_factor = np.random.normal(scale=translation_std, size=3).T\n\n input_dict['points'].translate(trans_factor)\n input_dict['pcd_trans'] = trans_factor\n for key in input_dict['bbox3d_fields']:\n input_dict[key].translate(trans_factor)\n\n def _rot_bbox_points(self, input_dict):\n \"\"\"Private function to rotate bounding boxes and points.\n\n Args:\n input_dict (dict): Result dict from loading pipeline.\n\n Returns:\n dict: Results after rotation, 'points', 'pcd_rotation' \\\n and keys in input_dict['bbox3d_fields'] are updated \\\n in the result dict.\n \"\"\"\n rotation = self.rot_range\n noise_rotation = np.random.uniform(rotation[0], rotation[1])\n\n # if no bbox in input_dict, only rotate points\n if len(input_dict['bbox3d_fields']) == 0:\n rot_mat_T = input_dict['points'].rotate(noise_rotation)\n input_dict['pcd_rotation'] = rot_mat_T\n return\n\n # rotate points with bboxes\n for key in input_dict['bbox3d_fields']:\n if len(input_dict[key].tensor) != 0:\n points, rot_mat_T = input_dict[key].rotate(\n noise_rotation, input_dict['points'])\n input_dict['points'] = points\n input_dict['pcd_rotation'] = rot_mat_T\n\n def _scale_bbox_points(self, input_dict):\n \"\"\"Private function to scale bounding boxes and points.\n\n Args:\n input_dict (dict): Result dict from loading pipeline.\n\n Returns:\n dict: Results after scaling, 'points'and keys in \\\n input_dict['bbox3d_fields'] are updated in the result dict.\n \"\"\"\n scale = input_dict['pcd_scale_factor']\n points = input_dict['points']\n points.scale(scale)\n if self.shift_height:\n assert 'height' in points.attribute_dims.keys(), \\\n 'setting shift_height=True but points have no height attribute'\n points.tensor[:, points.attribute_dims['height']] *= scale\n input_dict['points'] = points\n\n for key in input_dict['bbox3d_fields']:\n input_dict[key].scale(scale)\n\n def _random_scale(self, input_dict):\n \"\"\"Private function to randomly set the scale factor.\n\n Args:\n input_dict (dict): Result dict from loading pipeline.\n\n Returns:\n dict: Results after scaling, 'pcd_scale_factor' are updated \\\n in the result dict.\n \"\"\"\n scale_factor = np.random.uniform(self.scale_ratio_range[0],\n self.scale_ratio_range[1])\n input_dict['pcd_scale_factor'] = scale_factor\n\n def __call__(self, input_dict):\n \"\"\"Private function to rotate, scale and translate bounding boxes and \\\n points.\n\n Args:\n input_dict (dict): Result dict from loading pipeline.\n\n Returns:\n dict: Results after scaling, 'points', 'pcd_rotation',\n 'pcd_scale_factor', 'pcd_trans' and keys in \\\n input_dict['bbox3d_fields'] are updated in the result dict.\n \"\"\"\n if 'transformation_3d_flow' not in input_dict:\n input_dict['transformation_3d_flow'] = []\n\n self._rot_bbox_points(input_dict)\n\n if 'pcd_scale_factor' not in input_dict:\n self._random_scale(input_dict)\n self._scale_bbox_points(input_dict)\n\n self._trans_bbox_points(input_dict)\n\n input_dict['transformation_3d_flow'].extend(['R', 'S', 'T'])\n return input_dict\n\n def __repr__(self):\n \"\"\"str: Return a string that describes the module.\"\"\"\n repr_str = self.__class__.__name__\n repr_str += f'(rot_range={self.rot_range},'\n repr_str += f' scale_ratio_range={self.scale_ratio_range},'\n repr_str += f' translation_std={self.translation_std},'\n repr_str += f' shift_height={self.shift_height})'\n return repr_str\n\[email protected]_module()\nclass PointShuffle(object):\n \"\"\"Shuffle input points.\"\"\"\n\n def __call__(self, input_dict):\n \"\"\"Call function to shuffle points.\n\n Args:\n input_dict (dict): Result dict from loading pipeline.\n\n Returns:\n dict: Results after filtering, 'points', 'pts_instance_mask' \\\n and 'pts_semantic_mask' keys are updated in the result dict.\n \"\"\"\n idx = input_dict['points'].shuffle()\n idx = idx.numpy()\n\n pts_instance_mask = input_dict.get('pts_instance_mask', None)\n pts_semantic_mask = input_dict.get('pts_semantic_mask', None)\n\n if pts_instance_mask is not None:\n input_dict['pts_instance_mask'] = pts_instance_mask[idx]\n\n if pts_semantic_mask is not None:\n input_dict['pts_semantic_mask'] = pts_semantic_mask[idx]\n\n return input_dict\n\n def __repr__(self):\n return self.__class__.__name__\n\n\[email protected]_module()\nclass ObjectRangeFilter(object):\n \"\"\"Filter objects by the range.\n\n Args:\n point_cloud_range (list[float]): Point cloud range.\n \"\"\"\n\n def __init__(self, point_cloud_range):\n self.pcd_range = np.array(point_cloud_range, dtype=np.float32)\n\n def __call__(self, input_dict):\n \"\"\"Call function to filter objects by the range.\n\n Args:\n input_dict (dict): Result dict from loading pipeline.\n\n Returns:\n dict: Results after filtering, 'gt_bboxes_3d', 'gt_labels_3d' \\\n keys are updated in the result dict.\n \"\"\"\n # Check points instance type and initialise bev_range\n if isinstance(input_dict['gt_bboxes_3d'],\n (LiDARInstance3DBoxes, DepthInstance3DBoxes)):\n bev_range = self.pcd_range[[0, 1, 3, 4]]\n elif isinstance(input_dict['gt_bboxes_3d'], CameraInstance3DBoxes):\n bev_range = self.pcd_range[[0, 2, 3, 5]]\n\n gt_bboxes_3d = input_dict['gt_bboxes_3d']\n gt_labels_3d = input_dict['gt_labels_3d']\n mask = gt_bboxes_3d.in_range_bev(bev_range)\n gt_bboxes_3d = gt_bboxes_3d[mask]\n # mask is a torch tensor but gt_labels_3d is still numpy array\n # using mask to index gt_labels_3d will cause bug when\n # len(gt_labels_3d) == 1, where mask=1 will be interpreted\n # as gt_labels_3d[1] and cause out of index error\n gt_labels_3d = gt_labels_3d[mask.numpy().astype(np.bool)]\n\n # limit rad to [-pi, pi]\n gt_bboxes_3d.limit_yaw(offset=0.5, period=2 * np.pi)\n input_dict['gt_bboxes_3d'] = gt_bboxes_3d\n input_dict['gt_labels_3d'] = gt_labels_3d\n\n return input_dict\n\n def __repr__(self):\n \"\"\"str: Return a string that describes the module.\"\"\"\n repr_str = self.__class__.__name__\n repr_str += f'(point_cloud_range={self.pcd_range.tolist()})'\n return repr_str\n\n\[email protected]_module()\nclass PointsRangeFilter(object):\n \"\"\"Filter points by the range.\n\n Args:\n point_cloud_range (list[float]): Point cloud range.\n \"\"\"\n\n def __init__(self, point_cloud_range):\n self.pcd_range = np.array(point_cloud_range, dtype=np.float32)\n\n def __call__(self, input_dict):\n \"\"\"Call function to filter points by the range.\n\n Args:\n input_dict (dict): Result dict from loading pipeline.\n\n Returns:\n dict: Results after filtering, 'points', 'pts_instance_mask' \\\n and 'pts_semantic_mask' keys are updated in the result dict.\n \"\"\"\n points = input_dict['points']\n points_mask = points.in_range_3d(self.pcd_range)\n clean_points = points[points_mask]\n input_dict['points'] = clean_points\n points_mask = points_mask.numpy()\n\n pts_instance_mask = input_dict.get('pts_instance_mask', None)\n pts_semantic_mask = input_dict.get('pts_semantic_mask', None)\n\n if pts_instance_mask is not None:\n input_dict['pts_instance_mask'] = pts_instance_mask[points_mask]\n\n if pts_semantic_mask is not None:\n input_dict['pts_semantic_mask'] = pts_semantic_mask[points_mask]\n\n return input_dict\n\n def __repr__(self):\n \"\"\"str: Return a string that describes the module.\"\"\"\n repr_str = self.__class__.__name__\n repr_str += f'(point_cloud_range={self.pcd_range.tolist()})'\n return repr_str\n\n\[email protected]_module()\nclass ObjectNameFilter(object):\n \"\"\"Filter GT objects by their names.\n\n Args:\n classes (list[str]): List of class names to be kept for training.\n \"\"\"\n\n def __init__(self, classes):\n self.classes = classes\n self.labels = list(range(len(self.classes)))\n\n def __call__(self, input_dict):\n \"\"\"Call function to filter objects by their names.\n\n Args:\n input_dict (dict): Result dict from loading pipeline.\n\n Returns:\n dict: Results after filtering, 'gt_bboxes_3d', 'gt_labels_3d' \\\n keys are updated in the result dict.\n \"\"\"\n gt_labels_3d = input_dict['gt_labels_3d']\n gt_bboxes_mask = np.array([n in self.labels for n in gt_labels_3d],\n dtype=np.bool_)\n input_dict['gt_bboxes_3d'] = input_dict['gt_bboxes_3d'][gt_bboxes_mask]\n input_dict['gt_labels_3d'] = input_dict['gt_labels_3d'][gt_bboxes_mask]\n\n return input_dict\n\n def __repr__(self):\n \"\"\"str: Return a string that describes the module.\"\"\"\n repr_str = self.__class__.__name__\n repr_str += f'(classes={self.classes})'\n return repr_str\n\n\[email protected]_module()\nclass PointSample(object):\n \"\"\"Point sample.\n\n Sampling data to a certain number.\n\n Args:\n num_points (int): Number of points to be sampled.\n sample_range (float, optional): The range where to sample points.\n If not None, the points with depth larger than `sample_range` are\n prior to be sampled. Defaults to None.\n replace (bool, optional): Whether the sampling is with or without\n replacement. Defaults to False.\n \"\"\"\n\n def __init__(self, num_points, sample_range=None, replace=False):\n self.num_points = num_points\n self.sample_range = sample_range\n self.replace = replace\n\n def _points_random_sampling(self,\n points,\n num_samples,\n sample_range=None,\n replace=False,\n return_choices=False):\n \"\"\"Points random sampling.\n\n Sample points to a certain number.\n\n Args:\n points (np.ndarray | :obj:`BasePoints`): 3D Points.\n num_samples (int): Number of samples to be sampled.\n sample_range (float, optional): Indicating the range where the\n points will be sampled. Defaults to None.\n replace (bool, optional): Sampling with or without replacement.\n Defaults to None.\n return_choices (bool, optional): Whether return choice.\n Defaults to False.\n Returns:\n tuple[np.ndarray] | np.ndarray:\n - points (np.ndarray | :obj:`BasePoints`): 3D Points.\n - choices (np.ndarray, optional): The generated random samples.\n \"\"\"\n if not replace:\n replace = (points.shape[0] < num_samples)\n point_range = range(len(points))\n if sample_range is not None and not replace:\n # Only sampling the near points when len(points) >= num_samples\n depth = np.linalg.norm(points.tensor, axis=1)\n far_inds = np.where(depth > sample_range)[0]\n near_inds = np.where(depth <= sample_range)[0]\n # in case there are too many far points\n if len(far_inds) > num_samples:\n far_inds = np.random.choice(\n far_inds, num_samples, replace=False)\n point_range = near_inds\n num_samples -= len(far_inds)\n choices = np.random.choice(point_range, num_samples, replace=replace)\n if sample_range is not None and not replace:\n choices = np.concatenate((far_inds, choices))\n # Shuffle points after sampling\n np.random.shuffle(choices)\n if return_choices:\n return points[choices], choices\n else:\n return points[choices]\n\n def __call__(self, results):\n \"\"\"Call function to sample points to in indoor scenes.\n\n Args:\n input_dict (dict): Result dict from loading pipeline.\n Returns:\n dict: Results after sampling, 'points', 'pts_instance_mask' \\\n and 'pts_semantic_mask' keys are updated in the result dict.\n \"\"\"\n points = results['points']\n # Points in Camera coord can provide the depth information.\n # TODO: Need to suport distance-based sampling for other coord system.\n if self.sample_range is not None:\n from mmdet3d.core.points import CameraPoints\n assert isinstance(points, CameraPoints), \\\n 'Sampling based on distance is only appliable for CAMERA coord'\n points, choices = self._points_random_sampling(\n points,\n self.num_points,\n self.sample_range,\n self.replace,\n return_choices=True)\n results['points'] = points\n\n pts_instance_mask = results.get('pts_instance_mask', None)\n pts_semantic_mask = results.get('pts_semantic_mask', None)\n\n if pts_instance_mask is not None:\n pts_instance_mask = pts_instance_mask[choices]\n results['pts_instance_mask'] = pts_instance_mask\n\n if pts_semantic_mask is not None:\n pts_semantic_mask = pts_semantic_mask[choices]\n results['pts_semantic_mask'] = pts_semantic_mask\n\n return results\n\n def __repr__(self):\n \"\"\"str: Return a string that describes the module.\"\"\"\n repr_str = self.__class__.__name__\n repr_str += f'(num_points={self.num_points},'\n repr_str += f' sample_range={self.sample_range},'\n repr_str += f' replace={self.replace})'\n\n return repr_str\n\n\[email protected]_module()\nclass IndoorPointSample(PointSample):\n \"\"\"Indoor point sample.\n\n Sampling data to a certain number.\n NOTE: IndoorPointSample is deprecated in favor of PointSample\n\n Args:\n num_points (int): Number of points to be sampled.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n warnings.warn(\n 'IndoorPointSample is deprecated in favor of PointSample')\n super(IndoorPointSample, self).__init__(*args, **kwargs)\n\n\[email protected]_module()\nclass IndoorPatchPointSample(object):\n r\"\"\"Indoor point sample within a patch. Modified from `PointNet++ <https://\n github.com/charlesq34/pointnet2/blob/master/scannet/scannet_dataset.py>`_.\n\n Sampling data to a certain number for semantic segmentation.\n\n Args:\n num_points (int): Number of points to be sampled.\n block_size (float, optional): Size of a block to sample points from.\n Defaults to 1.5.\n sample_rate (float, optional): Stride used in sliding patch generation.\n This parameter is unused in `IndoorPatchPointSample` and thus has\n been deprecated. We plan to remove it in the future.\n Defaults to None.\n ignore_index (int, optional): Label index that won't be used for the\n segmentation task. This is set in PointSegClassMapping as neg_cls.\n If not None, will be used as a patch selection criterion.\n Defaults to None.\n use_normalized_coord (bool, optional): Whether to use normalized xyz as\n additional features. Defaults to False.\n num_try (int, optional): Number of times to try if the patch selected\n is invalid. Defaults to 10.\n enlarge_size (float | None, optional): Enlarge the sampled patch to\n [-block_size / 2 - enlarge_size, block_size / 2 + enlarge_size] as\n an augmentation. If None, set it as 0. Defaults to 0.2.\n min_unique_num (int | None, optional): Minimum number of unique points\n the sampled patch should contain. If None, use PointNet++'s method\n to judge uniqueness. Defaults to None.\n eps (float, optional): A value added to patch boundary to guarantee\n points coverage. Defaults to 1e-2.\n\n Note:\n This transform should only be used in the training process of point\n cloud segmentation tasks. For the sliding patch generation and\n inference process in testing, please refer to the `slide_inference`\n function of `EncoderDecoder3D` class.\n \"\"\"\n\n def __init__(self,\n num_points,\n block_size=1.5,\n sample_rate=None,\n ignore_index=None,\n use_normalized_coord=False,\n num_try=10,\n enlarge_size=0.2,\n min_unique_num=None,\n eps=1e-2):\n self.num_points = num_points\n self.block_size = block_size\n self.ignore_index = ignore_index\n self.use_normalized_coord = use_normalized_coord\n self.num_try = num_try\n self.enlarge_size = enlarge_size if enlarge_size is not None else 0.0\n self.min_unique_num = min_unique_num\n self.eps = eps\n\n if sample_rate is not None:\n warnings.warn(\n \"'sample_rate' has been deprecated and will be removed in \"\n 'the future. Please remove them from your code.')\n\n def _input_generation(self, coords, patch_center, coord_max, attributes,\n attribute_dims, point_type):\n \"\"\"Generating model input.\n\n Generate input by subtracting patch center and adding additional \\\n features. Currently support colors and normalized xyz as features.\n\n Args:\n coords (np.ndarray): Sampled 3D Points.\n patch_center (np.ndarray): Center coordinate of the selected patch.\n coord_max (np.ndarray): Max coordinate of all 3D Points.\n attributes (np.ndarray): features of input points.\n attribute_dims (dict): Dictionary to indicate the meaning of extra\n dimension.\n point_type (type): class of input points inherited from BasePoints.\n\n Returns:\n :obj:`BasePoints`: The generated input data.\n \"\"\"\n # subtract patch center, the z dimension is not centered\n centered_coords = coords.copy()\n centered_coords[:, 0] -= patch_center[0]\n centered_coords[:, 1] -= patch_center[1]\n\n if self.use_normalized_coord:\n normalized_coord = coords / coord_max\n attributes = np.concatenate([attributes, normalized_coord], axis=1)\n if attribute_dims is None:\n attribute_dims = dict()\n attribute_dims.update(\n dict(normalized_coord=[\n attributes.shape[1], attributes.shape[1] +\n 1, attributes.shape[1] + 2\n ]))\n\n points = np.concatenate([centered_coords, attributes], axis=1)\n points = point_type(\n points, points_dim=points.shape[1], attribute_dims=attribute_dims)\n\n return points\n\n def _patch_points_sampling(self, points, sem_mask):\n \"\"\"Patch points sampling.\n\n First sample a valid patch.\n Then sample points within that patch to a certain number.\n\n Args:\n points (:obj:`BasePoints`): 3D Points.\n sem_mask (np.ndarray): semantic segmentation mask for input points.\n\n Returns:\n tuple[:obj:`BasePoints`, np.ndarray] | :obj:`BasePoints`:\n\n - points (:obj:`BasePoints`): 3D Points.\n - choices (np.ndarray): The generated random samples.\n \"\"\"\n coords = points.coord.numpy()\n attributes = points.tensor[:, 3:].numpy()\n attribute_dims = points.attribute_dims\n point_type = type(points)\n\n coord_max = np.amax(coords, axis=0)\n coord_min = np.amin(coords, axis=0)\n\n for _ in range(self.num_try):\n # random sample a point as patch center\n cur_center = coords[np.random.choice(coords.shape[0])]\n\n # boundary of a patch, which would be enlarged by\n # `self.enlarge_size` as an augmentation\n cur_max = cur_center + np.array(\n [self.block_size / 2.0, self.block_size / 2.0, 0.0])\n cur_min = cur_center - np.array(\n [self.block_size / 2.0, self.block_size / 2.0, 0.0])\n cur_max[2] = coord_max[2]\n cur_min[2] = coord_min[2]\n cur_choice = np.sum(\n (coords >= (cur_min - self.enlarge_size)) *\n (coords <= (cur_max + self.enlarge_size)),\n axis=1) == 3\n\n if not cur_choice.any(): # no points in this patch\n continue\n\n cur_coords = coords[cur_choice, :]\n cur_sem_mask = sem_mask[cur_choice]\n point_idxs = np.where(cur_choice)[0]\n mask = np.sum(\n (cur_coords >= (cur_min - self.eps)) * (cur_coords <=\n (cur_max + self.eps)),\n axis=1) == 3\n\n # two criteria for patch sampling, adopted from PointNet++\n # 1. selected patch should contain enough unique points\n if self.min_unique_num is None:\n # use PointNet++'s method as default\n # [31, 31, 62] are just some big values used to transform\n # coords from 3d array to 1d and then check their uniqueness\n # this is used in all the ScanNet code following PointNet++\n vidx = np.ceil(\n (cur_coords[mask, :] - cur_min) / (cur_max - cur_min) *\n np.array([31.0, 31.0, 62.0]))\n vidx = np.unique(vidx[:, 0] * 31.0 * 62.0 + vidx[:, 1] * 62.0 +\n vidx[:, 2])\n flag1 = len(vidx) / 31.0 / 31.0 / 62.0 >= 0.02\n else:\n # if `min_unique_num` is provided, directly compare with it\n flag1 = mask.sum() >= self.min_unique_num\n\n # 2. selected patch should contain enough annotated points\n if self.ignore_index is None:\n flag2 = True\n else:\n flag2 = np.sum(cur_sem_mask != self.ignore_index) / \\\n len(cur_sem_mask) >= 0.7\n\n if flag1 and flag2:\n break\n\n # sample idx to `self.num_points`\n if point_idxs.size >= self.num_points:\n # no duplicate in sub-sampling\n choices = np.random.choice(\n point_idxs, self.num_points, replace=False)\n else:\n # do not use random choice here to avoid some points not counted\n dup = np.random.choice(point_idxs.size,\n self.num_points - point_idxs.size)\n idx_dup = np.concatenate(\n [np.arange(point_idxs.size),\n np.array(dup)], 0)\n choices = point_idxs[idx_dup]\n\n # construct model input\n points = self._input_generation(coords[choices], cur_center, coord_max,\n attributes[choices], attribute_dims,\n point_type)\n\n return points, choices\n\n def __call__(self, results):\n \"\"\"Call function to sample points to in indoor scenes.\n\n Args:\n input_dict (dict): Result dict from loading pipeline.\n\n Returns:\n dict: Results after sampling, 'points', 'pts_instance_mask' \\\n and 'pts_semantic_mask' keys are updated in the result dict.\n \"\"\"\n points = results['points']\n\n assert 'pts_semantic_mask' in results.keys(), \\\n 'semantic mask should be provided in training and evaluation'\n pts_semantic_mask = results['pts_semantic_mask']\n\n points, choices = self._patch_points_sampling(points,\n pts_semantic_mask)\n\n results['points'] = points\n results['pts_semantic_mask'] = pts_semantic_mask[choices]\n pts_instance_mask = results.get('pts_instance_mask', None)\n if pts_instance_mask is not None:\n results['pts_instance_mask'] = pts_instance_mask[choices]\n\n return results\n\n def __repr__(self):\n \"\"\"str: Return a string that describes the module.\"\"\"\n repr_str = self.__class__.__name__\n repr_str += f'(num_points={self.num_points},'\n repr_str += f' block_size={self.block_size},'\n repr_str += f' ignore_index={self.ignore_index},'\n repr_str += f' use_normalized_coord={self.use_normalized_coord},'\n repr_str += f' num_try={self.num_try},'\n repr_str += f' enlarge_size={self.enlarge_size},'\n repr_str += f' min_unique_num={self.min_unique_num},'\n repr_str += f' eps={self.eps})'\n return repr_str\n\n\[email protected]_module()\nclass BackgroundPointsFilter(object):\n \"\"\"Filter background points near the bounding box.\n\n Args:\n bbox_enlarge_range (tuple[float], float): Bbox enlarge range.\n \"\"\"\n\n def __init__(self, bbox_enlarge_range):\n assert (is_tuple_of(bbox_enlarge_range, float)\n and len(bbox_enlarge_range) == 3) \\\n or isinstance(bbox_enlarge_range, float), \\\n f'Invalid arguments bbox_enlarge_range {bbox_enlarge_range}'\n\n if isinstance(bbox_enlarge_range, float):\n bbox_enlarge_range = [bbox_enlarge_range] * 3\n self.bbox_enlarge_range = np.array(\n bbox_enlarge_range, dtype=np.float32)[np.newaxis, :]\n\n def __call__(self, input_dict):\n \"\"\"Call function to filter points by the range.\n\n Args:\n input_dict (dict): Result dict from loading pipeline.\n\n Returns:\n dict: Results after filtering, 'points', 'pts_instance_mask' \\\n and 'pts_semantic_mask' keys are updated in the result dict.\n \"\"\"\n points = input_dict['points']\n gt_bboxes_3d = input_dict['gt_bboxes_3d']\n\n # avoid groundtruth being modified\n gt_bboxes_3d_np = gt_bboxes_3d.tensor.clone().numpy()\n gt_bboxes_3d_np[:, :3] = gt_bboxes_3d.gravity_center.clone().numpy()\n\n enlarged_gt_bboxes_3d = gt_bboxes_3d_np.copy()\n enlarged_gt_bboxes_3d[:, 3:6] += self.bbox_enlarge_range\n points_numpy = points.tensor.clone().numpy()\n foreground_masks = box_np_ops.points_in_rbbox(\n points_numpy, gt_bboxes_3d_np, origin=(0.5, 0.5, 0.5))\n enlarge_foreground_masks = box_np_ops.points_in_rbbox(\n points_numpy, enlarged_gt_bboxes_3d, origin=(0.5, 0.5, 0.5))\n foreground_masks = foreground_masks.max(1)\n enlarge_foreground_masks = enlarge_foreground_masks.max(1)\n valid_masks = ~np.logical_and(~foreground_masks,\n enlarge_foreground_masks)\n\n input_dict['points'] = points[valid_masks]\n pts_instance_mask = input_dict.get('pts_instance_mask', None)\n if pts_instance_mask is not None:\n input_dict['pts_instance_mask'] = pts_instance_mask[valid_masks]\n\n pts_semantic_mask = input_dict.get('pts_semantic_mask', None)\n if pts_semantic_mask is not None:\n input_dict['pts_semantic_mask'] = pts_semantic_mask[valid_masks]\n return input_dict\n\n def __repr__(self):\n \"\"\"str: Return a string that describes the module.\"\"\"\n repr_str = self.__class__.__name__\n repr_str += f'(bbox_enlarge_range={self.bbox_enlarge_range.tolist()})'\n return repr_str\n\n\[email protected]_module()\nclass VoxelBasedPointSampler(object):\n \"\"\"Voxel based point sampler.\n\n Apply voxel sampling to multiple sweep points.\n\n Args:\n cur_sweep_cfg (dict): Config for sampling current points.\n prev_sweep_cfg (dict): Config for sampling previous points.\n time_dim (int): Index that indicate the time dimention\n for input points.\n \"\"\"\n\n def __init__(self, cur_sweep_cfg, prev_sweep_cfg=None, time_dim=3):\n self.cur_voxel_generator = VoxelGenerator(**cur_sweep_cfg)\n self.cur_voxel_num = self.cur_voxel_generator._max_voxels\n self.time_dim = time_dim\n if prev_sweep_cfg is not None:\n assert prev_sweep_cfg['max_num_points'] == \\\n cur_sweep_cfg['max_num_points']\n self.prev_voxel_generator = VoxelGenerator(**prev_sweep_cfg)\n self.prev_voxel_num = self.prev_voxel_generator._max_voxels\n else:\n self.prev_voxel_generator = None\n self.prev_voxel_num = 0\n\n def _sample_points(self, points, sampler, point_dim):\n \"\"\"Sample points for each points subset.\n\n Args:\n points (np.ndarray): Points subset to be sampled.\n sampler (VoxelGenerator): Voxel based sampler for\n each points subset.\n point_dim (int): The dimention of each points\n\n Returns:\n np.ndarray: Sampled points.\n \"\"\"\n voxels, coors, num_points_per_voxel = sampler.generate(points)\n if voxels.shape[0] < sampler._max_voxels:\n padding_points = np.zeros([\n sampler._max_voxels - voxels.shape[0], sampler._max_num_points,\n point_dim\n ],\n dtype=points.dtype)\n padding_points[:] = voxels[0]\n sample_points = np.concatenate([voxels, padding_points], axis=0)\n else:\n sample_points = voxels\n\n return sample_points\n\n def __call__(self, results):\n \"\"\"Call function to sample points from multiple sweeps.\n\n Args:\n input_dict (dict): Result dict from loading pipeline.\n\n Returns:\n dict: Results after sampling, 'points', 'pts_instance_mask' \\\n and 'pts_semantic_mask' keys are updated in the result dict.\n \"\"\"\n points = results['points']\n original_dim = points.shape[1]\n\n # TODO: process instance and semantic mask while _max_num_points\n # is larger than 1\n # Extend points with seg and mask fields\n map_fields2dim = []\n start_dim = original_dim\n points_numpy = points.tensor.numpy()\n extra_channel = [points_numpy]\n for idx, key in enumerate(results['pts_mask_fields']):\n map_fields2dim.append((key, idx + start_dim))\n extra_channel.append(results[key][..., None])\n\n start_dim += len(results['pts_mask_fields'])\n for idx, key in enumerate(results['pts_seg_fields']):\n map_fields2dim.append((key, idx + start_dim))\n extra_channel.append(results[key][..., None])\n\n points_numpy = np.concatenate(extra_channel, axis=-1)\n\n # Split points into two part, current sweep points and\n # previous sweeps points.\n # TODO: support different sampling methods for next sweeps points\n # and previous sweeps points.\n cur_points_flag = (points_numpy[:, self.time_dim] == 0)\n cur_sweep_points = points_numpy[cur_points_flag]\n prev_sweeps_points = points_numpy[~cur_points_flag]\n if prev_sweeps_points.shape[0] == 0:\n prev_sweeps_points = cur_sweep_points\n\n # Shuffle points before sampling\n np.random.shuffle(cur_sweep_points)\n np.random.shuffle(prev_sweeps_points)\n\n cur_sweep_points = self._sample_points(cur_sweep_points,\n self.cur_voxel_generator,\n points_numpy.shape[1])\n if self.prev_voxel_generator is not None:\n prev_sweeps_points = self._sample_points(prev_sweeps_points,\n self.prev_voxel_generator,\n points_numpy.shape[1])\n\n points_numpy = np.concatenate(\n [cur_sweep_points, prev_sweeps_points], 0)\n else:\n points_numpy = cur_sweep_points\n\n if self.cur_voxel_generator._max_num_points == 1:\n points_numpy = points_numpy.squeeze(1)\n results['points'] = points.new_point(points_numpy[..., :original_dim])\n\n # Restore the correspoinding seg and mask fields\n for key, dim_index in map_fields2dim:\n results[key] = points_numpy[..., dim_index]\n\n return results\n\n def __repr__(self):\n \"\"\"str: Return a string that describes the module.\"\"\"\n\n def _auto_indent(repr_str, indent):\n repr_str = repr_str.split('\\n')\n repr_str = [' ' * indent + t + '\\n' for t in repr_str]\n repr_str = ''.join(repr_str)[:-1]\n return repr_str\n\n repr_str = self.__class__.__name__\n indent = 4\n repr_str += '(\\n'\n repr_str += ' ' * indent + f'num_cur_sweep={self.cur_voxel_num},\\n'\n repr_str += ' ' * indent + f'num_prev_sweep={self.prev_voxel_num},\\n'\n repr_str += ' ' * indent + f'time_dim={self.time_dim},\\n'\n repr_str += ' ' * indent + 'cur_voxel_generator=\\n'\n repr_str += f'{_auto_indent(repr(self.cur_voxel_generator), 8)},\\n'\n repr_str += ' ' * indent + 'prev_voxel_generator=\\n'\n repr_str += f'{_auto_indent(repr(self.prev_voxel_generator), 8)})'\n return repr_str\n" ]
[ [ "numpy.sum", "numpy.amax", "numpy.logical_and", "numpy.random.choice", "numpy.random.rand", "numpy.where", "numpy.unique", "numpy.random.uniform", "numpy.zeros", "numpy.linalg.det", "numpy.arange", "numpy.array", "numpy.linalg.norm", "numpy.random.shuffle", "numpy.random.randn", "numpy.amin", "numpy.clip", "numpy.random.normal", "numpy.concatenate" ] ]
AidenPearce7/python-tictactoe
[ "1f5aaaca87bfb8487a1366b4bc2bd567df8feb5e" ]
[ "src/opencv_backend/ui.py" ]
[ "\"\"\"UI class\"\"\"\nimport cv2 as cv\nimport numpy as np\n\n\nclass UI:\n \"\"\"Handles UI drawing and managing\"\"\"\n\n def __init__(self, frame):\n height, width, channels = frame.shape\n self.width = width\n self.height = height\n self.separators = {\n \"y\": (0, height // 3, 2 * height // 3),\n \"x\": (0, width // 3, 2 * width // 3),\n }\n self.figure = np.zeros((height, width, channels), dtype=np.uint8)\n self.grid_drawn = False\n\n def draw_grid(self, color=(255, 0, 0), thickness=9):\n \"\"\"Draws a 3 by 3 grid on the frame\"\"\"\n if not self.grid_drawn:\n for i in range(1, 3):\n startpoint_height = (0, self.separators[\"y\"][i])\n startpoint_width = (self.separators[\"x\"][i], 0)\n endpoint_height = (self.width, self.separators[\"y\"][i])\n endpoint_width = (self.separators[\"x\"][i], self.height)\n self.figure = cv.line(\n self.figure, startpoint_height, endpoint_height, color, thickness\n )\n self.figure = cv.line(\n self.figure, startpoint_width, endpoint_width, color, thickness\n )\n self.grid_drawn = True\n\n def _draw_x(self, x, y, color, thickness):\n \"\"\"Draws X on the selected grid marker.\\n\n location should be a tuple with two numbers indicating place on the grid\"\"\"\n width_offset = self.separators[\"x\"][1] * 0.25\n height_offset = self.separators[\"y\"][1] * 0.25\n\n left = int(self.separators[\"x\"][x] + width_offset)\n up = int(self.separators[\"y\"][y] + height_offset)\n right = int(self.separators[\"x\"][x] + width_offset * 3)\n down = int(self.separators[\"y\"][y] + height_offset * 3)\n self.figure = cv.line(self.figure, (left, up), (right, down), color, thickness)\n self.figure = cv.line(self.figure, (left, down), (right, up), color, thickness)\n\n def _draw_circle(self, x, y, color, thickness):\n \"\"\"Draws circle on the selected grid marker.\\n\n location should be a tuple with two numbers indicating place on the grid\"\"\"\n width_offset = self.separators[\"x\"][1] * 0.5\n height_offset = self.separators[\"y\"][1] * 0.5\n center = (\n int(self.separators[\"x\"][x] + width_offset),\n int(self.separators[\"y\"][y] + height_offset),\n )\n radius = int(height_offset * 0.75)\n self.figure = cv.circle(self.figure, center, radius, color, thickness)\n\n def draw_move(self, coords, color=(0, 0, 255), thickness=7):\n \"\"\"Draws a shape based on the coordinate object\"\"\"\n if coords.symbol == \"x\":\n self._draw_x(coords.x, coords.y, color, thickness)\n else:\n self._draw_circle(coords.x, coords.y, color, thickness)\n\n def get_separators(self):\n \"\"\"Returns the separators used for the processing\"\"\"\n return self.separators\n\n def overlay(self, frame):\n \"\"\"Returns the frame with added figure array\"\"\"\n return cv.add(frame, self.figure)\n" ]
[ [ "numpy.zeros" ] ]
gabemery/gammapy
[ "99e5c5d38e4920dddd7bca41fb1539ccda8bea2d" ]
[ "gammapy/astro/population/tests/test_simulate.py" ]
[ "# Licensed under a 3-clause BSD style license - see LICENSE.rst\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nfrom numpy.testing import assert_allclose\nfrom astropy.table import Table\nimport astropy.units as u\nfrom ....utils.testing import requires_dependency\nfrom ...population import (\n make_base_catalog_galactic,\n make_catalog_random_positions_cube,\n make_catalog_random_positions_sphere,\n add_snr_parameters,\n add_pulsar_parameters,\n add_pwn_parameters,\n add_observed_parameters,\n add_observed_source_parameters,\n)\n\n\ndef test_make_catalog_random_positions_cube():\n size = 100\n table = make_catalog_random_positions_cube(size=size)\n assert len(table) == size\n\n\ndef test_make_catalog_random_positions_sphere():\n size = 100\n table = make_catalog_random_positions_sphere(size=size,\n center='Milky Way')\n assert len(table) == size\n\n\ndef test_make_base_catalog_galactic():\n \"\"\"Test that make_base_catalog_galactic uses random_state correctly.\n\n Calling with a given seed should always give the same output.\n\n Regression test for https://github.com/gammapy/gammapy/issues/959\n \"\"\"\n table = make_base_catalog_galactic(n_sources=10, random_state=0)\n assert len(table) == 10\n assert table.colnames == [\n 'age', 'n_ISM', 'spiralarm',\n 'x_birth', 'y_birth', 'z_birth',\n 'x', 'y', 'z',\n 'vx', 'vy', 'vz', 'v_abs',\n ]\n\n d = table[0]\n\n assert_allclose(d['age'], 548813.50392732478)\n assert_allclose(d['n_ISM'], 1.0)\n assert d['spiralarm'] == 'Crux Scutum'\n\n assert_allclose(d['x_birth'], 0.58513884292018437)\n assert_allclose(d['y_birth'], -11.682838052120154)\n assert_allclose(d['z_birth'], 0.15710279448905115)\n assert_allclose(d['x'], 0.5828226720259867)\n assert_allclose(d['y'], -11.658959390801584)\n assert_allclose(d['z'], 0.35098629652725671)\n assert_allclose(d['vx'], -4.1266001441394655)\n assert_allclose(d['vy'], 42.543357869627776)\n assert_allclose(d['vz'], 345.43206179709432)\n assert_allclose(d['v_abs'], 348.06648135803658)\n\n\ndef test_add_observed_parameters():\n table = make_base_catalog_galactic(n_sources=10, random_state=0)\n table = add_observed_parameters(table)\n\n assert len(table) == 10\n assert set(table.colnames).issuperset([\n 'distance', 'GLON', 'GLAT', 'VGLON', 'VGLAT', 'RA', 'DEC',\n ])\n\n d = table[0]\n\n assert_allclose(d['distance'], 3231.392591455106)\n assert_allclose(d['GLON'], 169.54657778189639)\n assert_allclose(d['GLAT'], 6.2356357665816162)\n assert_allclose(d['VGLON'], 0.066778795313076678)\n assert_allclose(d['VGLAT'], 5.6115948931932174)\n assert_allclose(d['RA'], 86.308826288823127)\n assert_allclose(d['DEC'], 41.090120056648828)\n\n\ndef test_add_snr_parameters():\n table = Table()\n table['age'] = [100, 1000] * u.yr\n table['n_ISM'] = u.Quantity(1, 'cm-3')\n\n table = add_snr_parameters(table)\n\n assert len(table) == 2\n assert table.colnames == ['age', 'n_ISM', 'E_SN', 'r_out', 'r_in', 'L_SNR']\n\n assert_allclose(table['E_SN'], 1e51)\n assert_allclose(table['r_out'], [1, 3.80730787743])\n assert_allclose(table['r_in'], [0.9086, 3.45931993743])\n assert_allclose(table['L_SNR'], [0, 1.0768e+33])\n\n\ndef test_add_pulsar_parameters():\n table = Table()\n table['age'] = [100, 1000] * u.yr\n\n table = add_pulsar_parameters(table, random_state=0)\n\n assert len(table) == 2\n assert table.colnames == ['age', 'P0', 'P1', 'P0_birth', 'P1_birth', 'CharAge',\n 'Tau0', 'L_PSR', 'L0_PSR', 'logB']\n\n assert_allclose(table['P0'], [0.322829453422, 0.51352778881])\n assert_allclose(table['P1'], [4.54295751161e-14, 6.98423128444e-13])\n assert_allclose(table['P0_birth'], [0.322254715288, 0.388110930459])\n assert_allclose(table['P1_birth'], [4.55105983192e-14, 9.24116423053e-13])\n assert_allclose(table['CharAge'], [2.32368825638e-22, 5.6826197937e-21])\n assert_allclose(table['Tau0'], [112189.64476, 6654.19039158])\n assert_allclose(table['L_PSR'], [5.37834069771e+34, 8.25708734631e+35])\n assert_allclose(table['L0_PSR'], [5.36876555682e+34, 6.24049160082e+35])\n assert_allclose(table['logB'], [12.5883058913, 13.2824912596])\n\n\n@requires_dependency('scipy')\ndef test_add_pwn_parameters():\n table = make_base_catalog_galactic(n_sources=10, random_state=0)\n # To compute PWN parameters we need PSR and SNR parameters first\n table = add_snr_parameters(table)\n table = add_pulsar_parameters(table, random_state=0)\n table = add_pwn_parameters(table)\n assert len(table) == 10\n\n d = table[0]\n assert_allclose(d['r_out_PWN'], 0.5892196771927385, atol=1e-3)\n assert_allclose(d['L_PWN'], 7.057857699785925e+45)\n\n\n@requires_dependency('scipy')\ndef test_chain_all():\n \"\"\"\n Test that running the simulation functions in chain works\n \"\"\"\n table = make_base_catalog_galactic(n_sources=10, random_state=0)\n table = add_snr_parameters(table)\n table = add_pulsar_parameters(table, random_state=0)\n table = add_pwn_parameters(table)\n table = add_observed_parameters(table)\n table = add_observed_source_parameters(table)\n\n # Note: the individual functions are tested above.\n # Here we just run them in a chain and do very basic asserts\n # on the output so that we make sure we notice changes.\n assert len(table) == 10\n assert len(table.colnames) == 43\n d = table[0]\n assert_allclose(d['r_out_PWN'], 0.5892196771927385, atol=1e-3)\n assert_allclose(d['RA'], 86.308826288823127)\n" ]
[ [ "numpy.testing.assert_allclose" ] ]
joelfrederico/mytools
[ "7bf57c49c7dde0a8b0aa337fbd2fbd527ce7a67f" ]
[ "scisalt/matplotlib/plot.py" ]
[ "import os as _os\n_on_rtd = _os.environ.get('READTHEDOCS', None) == 'True'\nif not _on_rtd:\n import matplotlib.pyplot as _plt\n import numpy as _np\n\nfrom .setup_axes import setup_axes as _setup_axes\n\n\ndef plot(*args, ax=None, **kwargs):\n \"\"\"\n Plots but automatically resizes x axis.\n\n .. versionadded:: 1.4\n\n Parameters\n ----------\n args\n Passed on to :meth:`matplotlib.axis.Axis.plot`.\n ax : :class:`matplotlib.axis.Axis`, optional\n The axis to plot to.\n kwargs\n Passed on to :meth:`matplotlib.axis.Axis.plot`.\n\n \"\"\"\n if ax is None:\n fig, ax = _setup_axes()\n\n pl = ax.plot(*args, **kwargs)\n\n if _np.shape(args)[0] > 1:\n if type(args[1]) is not str:\n min_x = min(args[0])\n max_x = max(args[0])\n ax.set_xlim((min_x, max_x))\n\n return pl\n" ]
[ [ "numpy.shape" ] ]
purnendu91/allennlp
[ "7bdc142f3fba9b4b751be4de51299858613f134f" ]
[ "allennlp/data/fields/sequence_label_field.py" ]
[ "from typing import Dict, List, Union, Set\nimport logging\n\nfrom overrides import overrides\nimport torch\nfrom torch.autograd import Variable\n\nfrom allennlp.common.checks import ConfigurationError\nfrom allennlp.common.util import pad_sequence_to_length\nfrom allennlp.data.fields.field import Field\nfrom allennlp.data.fields.sequence_field import SequenceField\nfrom allennlp.data.vocabulary import Vocabulary\n\nlogger = logging.getLogger(__name__) # pylint: disable=invalid-name\n\n\nclass SequenceLabelField(Field[torch.Tensor]):\n \"\"\"\n A ``SequenceLabelField`` assigns a categorical label to each element in a\n :class:`~allennlp.data.fields.sequence_field.SequenceField`.\n Because it's a labeling of some other field, we take that field as input here, and we use it to\n determine our padding and other things.\n\n This field will get converted into a list of integer class ids, representing the correct class\n for each element in the sequence.\n\n Parameters\n ----------\n labels : ``Union[List[str], List[int]]``\n A sequence of categorical labels, encoded as strings or integers. These could be POS tags like [NN,\n JJ, ...], BIO tags like [B-PERS, I-PERS, O, O, ...], or any other categorical tag sequence. If the\n labels are encoded as integers, they will not be indexed using a vocab.\n sequence_field : ``SequenceField``\n A field containing the sequence that this ``SequenceLabelField`` is labeling. Most often, this is a\n ``TextField``, for tagging individual tokens in a sentence.\n label_namespace : ``str``, optional (default='labels')\n The namespace to use for converting tag strings into integers. We convert tag strings to\n integers for you, and this parameter tells the ``Vocabulary`` object which mapping from\n strings to integers to use (so that \"O\" as a tag doesn't get the same id as \"O\" as a word).\n \"\"\"\n # It is possible that users want to use this field with a namespace which uses OOV/PAD tokens.\n # This warning will be repeated for every instantiation of this class (i.e for every data\n # instance), spewing a lot of warnings so this class variable is used to only log a single\n # warning per namespace.\n _already_warned_namespaces: Set[str] = set()\n\n def __init__(self,\n labels: Union[List[str], List[int]],\n sequence_field: SequenceField,\n label_namespace: str = 'labels') -> None:\n self.labels = labels\n self.sequence_field = sequence_field\n self._label_namespace = label_namespace\n self._indexed_labels = None\n self._maybe_warn_for_namespace(label_namespace)\n if len(labels) != sequence_field.sequence_length():\n raise ConfigurationError(\"Label length and sequence length \"\n \"don't match: %d and %d\" % (len(labels), sequence_field.sequence_length()))\n\n if all([isinstance(x, int) for x in labels]):\n self._indexed_labels = labels\n\n elif not all([isinstance(x, str) for x in labels]):\n raise ConfigurationError(\"SequenceLabelFields must be passed either all \"\n \"strings or all ints. Found labels {} with \"\n \"types: {}.\".format(labels, [type(x) for x in labels]))\n\n def _maybe_warn_for_namespace(self, label_namespace: str) -> None:\n if not (self._label_namespace.endswith(\"labels\") or self._label_namespace.endswith(\"tags\")):\n if label_namespace not in self._already_warned_namespaces:\n logger.warning(\"Your label namespace was '%s'. We recommend you use a namespace \"\n \"ending with 'labels' or 'tags', so we don't add UNK and PAD tokens by \"\n \"default to your vocabulary. See documentation for \"\n \"`non_padded_namespaces` parameter in Vocabulary.\",\n self._label_namespace)\n self._already_warned_namespaces.add(label_namespace)\n\n @overrides\n def count_vocab_items(self, counter: Dict[str, Dict[str, int]]):\n if self._indexed_labels is None:\n for label in self.labels:\n counter[self._label_namespace][label] += 1 # type: ignore\n\n @overrides\n def index(self, vocab: Vocabulary):\n if self._indexed_labels is None:\n self._indexed_labels = [vocab.get_token_index(label, self._label_namespace) # type: ignore\n for label in self.labels]\n\n @overrides\n def get_padding_lengths(self) -> Dict[str, int]:\n return {'num_tokens': self.sequence_field.sequence_length()}\n\n @overrides\n def as_tensor(self,\n padding_lengths: Dict[str, int],\n cuda_device: int = -1,\n for_training: bool = True) -> torch.Tensor:\n desired_num_tokens = padding_lengths['num_tokens']\n padded_tags = pad_sequence_to_length(self._indexed_labels, desired_num_tokens)\n tensor = Variable(torch.LongTensor(padded_tags), volatile=not for_training)\n return tensor if cuda_device == -1 else tensor.cuda(cuda_device)\n\n @overrides\n def empty_field(self): # pylint: disable=no-self-use\n # pylint: disable=protected-access\n sequence_label_field = SequenceLabelField([], self.sequence_field.empty_field())\n sequence_label_field._indexed_labels = []\n return sequence_label_field\n" ]
[ [ "torch.LongTensor" ] ]
sssssch/jupyter-examples
[ "cf9e26e22dcfa263bcd26323527911cdbcc2cd61" ]
[ "Project_google_task_usage/task_uasge_500_preprocess/data_inverse.py" ]
[ "# -*-coding:utf-8-*-\nimport pandas as pd\nfrom numpy import *\n\ndataset = pd.read_csv(\n 'test_data.csv', header=None)\ndataset = round(dataset, 8)\nList_data = mat(dataset)\nInverse = List_data.T\nprint(Inverse)\nname = [\n 'cpu',\n 'cmui',\n 'amui',\n 'upcmui',\n 'tpcmui',\n 'mmui',\n 'mditi',\n 'mldsui',\n 'mcui',\n 'scui'\n]\n\ntest = pd.DataFrame(columns=name, data=Inverse)\ntest.to_csv('test_data_inversed_bycode.csv', encoding='gbk', header=None)\n" ]
[ [ "pandas.read_csv", "pandas.DataFrame" ] ]
fredmontet/timeatlas
[ "9a439a913ef9a8a1ef9833b42e5fb4e988d7e35e" ]
[ "src/timeatlas/time_series/component_handler.py" ]
[ "from typing import List, Union, NoReturn\nfrom copy import deepcopy, copy\n\nfrom pandas import Index\n\nfrom .component import Component\n\n\nclass ComponentHandler:\n \"\"\" Helper class to manage many components\n\n The purpose of this class is to make the management of components in a\n time series as simple as possible, with one or many components.\n\n The underlying data structure is a simple list where component are stored.\n \"\"\"\n\n def __init__(self, components: Union[List[Component], Component] = None):\n if isinstance(components, Component):\n components = [components]\n self.components = components if components is not None else []\n\n def __getitem__(self, item: Union[int, str, List[int], List[str]]):\n # handler[0]\n if isinstance(item, int):\n new_components = self.components[item]\n # handler[\"0_foo\"]\n elif isinstance(item, str):\n new_components = self.get_component_by_name(item)\n\n elif isinstance(item, list):\n\n # handler[[0,3,5]]\n if all(isinstance(i, int) for i in item):\n new_components = [self.components[i] for i in item]\n\n # handler[[\"0_foo\",\"1_bar\"]]\n elif all(isinstance(i, str) for i in item):\n new_components = [self.get_component_by_name(i_n)\n for i_n in item]\n else:\n raise TypeError(f\"ComponentHandler list indices must be int or \"\n f\"str, not {type(item)}\")\n else:\n raise TypeError(f\"ComponentHandler indices must be int, str or list,\"\n f\" not {type(item)}\")\n\n return ComponentHandler(new_components)\n\n def __delitem__(self, key: Union[int, str]) -> NoReturn:\n \"\"\" Delete an item from the ComponentHandler\n\n Args:\n key: int or str of the item to delete\n \"\"\"\n if isinstance(key, int):\n del self.components[key]\n elif isinstance(key, str):\n i = self.get_component_id_by_name(key)\n del self.components[i]\n\n def __len__(self) -> int:\n \"\"\" Get the number of item in the ComponentHandler\n\n Returns:\n int\n \"\"\"\n return len(self.components)\n\n def __str__(self):\n \"\"\" get the str representation of a ComponentHandler\n\n Returns:\n str\n \"\"\"\n return str(self.get_columns().to_list())\n\n def append(self, component: Component) -> NoReturn:\n \"\"\" Append a Component to the ComponentHandler\n\n Args:\n component: Component to append\n \"\"\"\n self.components.append(component)\n\n def clear(self):\n \"\"\" Removes all Components from the ComponentHandler\n \"\"\"\n self.components.clear()\n\n def get_component_id_by_name(self, name: str) -> int:\n \"\"\" Get a Component ID by its name\n\n Args:\n name: str of the name of the Component, including the ID (lol)\n e.g. \"0_temperature\"\n\n Returns:\n int\n \"\"\"\n for i, c in enumerate(self.get_columns().to_list()):\n if name == c:\n return i\n # if no component are found throughout the for loop\n raise KeyError(f\"Component with name '{name}' does not exist.\")\n\n def get_component_by_name(self, name: str):\n \"\"\" Get a Component by its name\n\n Args:\n name: str of the name of the Component, including the ID\n e.g. \"0_temperature\"\n\n Returns:\n Component\n \"\"\"\n for i, c in enumerate(self.components):\n component_name = self.__format_main_series(i, c.get_main())\n if name == component_name:\n return c\n raise KeyError(f\"Component with name '{name}' does not exist.\")\n\n def get_column_by_id(self, index: int) -> Index:\n \"\"\" Get a the name of a column by its Component ID\n\n Get Pandas Index of a Component from the ComponentHandler by its\n positional identifier\n\n Args:\n index: int of the index of the component in the ComponentHandler\n with_meta: bool to include or not meta series in the return value\n\n Returns:\n Pandas Index of the names of the component\n \"\"\"\n c = self.components[index]\n cols = [self.__format_main_series(index, c.get_main())]\n return Index(cols)\n\n def get_column_by_name(self, name: str) -> Index:\n \"\"\" Get the name of a column by its Component name\n\n Args:\n name: str if the name of the component in the ComponentHandler\n e.g: \"0_temperature\"\n\n Returns:\n Pandas Index of the names of the component\n \"\"\"\n for i, c in enumerate(self.get_columns().to_list()):\n if name == c:\n return self.get_column_by_id(i)\n # if no component are found throughout the for loop\n raise KeyError(f\"Component with name '{name}' does not exist.\")\n\n def get_columns(self) -> Index:\n \"\"\" Get names of all the Components columns\n\n Get Pandas Index of a Component from the ComponentHandler by its\n positional identifier\n\n Args:\n index: int of the index of the component in the ComponentHandler\n\n Returns:\n Pandas Index of the names of the component\n \"\"\"\n cols = []\n for i, c in enumerate(self.components):\n cols.extend(self.get_column_by_id(i).to_list())\n return Index(cols)\n\n def copy(self, deep=True) -> 'ComponentHandler':\n \"\"\" Copy function, deep by default\n\n Args:\n deep: bool if deep copy or not\n\n Returns:\n ComponentHandler\n \"\"\"\n return deepcopy(self) if deep else copy(self)\n\n @staticmethod\n def __format_main_series(index: int, value: Union[str, list]):\n \"\"\" Format a main series name\n\n Args:\n index: int of the position of the main series\n value: list with the main series name\n\n Returns:\n list with the formatted str of the series\n \"\"\"\n if isinstance(value, str):\n return f\"{index}_{value}\"\n elif isinstance(value, list):\n return [f\"{index}_{v}\" for v in value]\n else:\n TypeError(f\"Type {value} isn't accepted\")\n" ]
[ [ "pandas.Index" ] ]
cyente/OFA
[ "291a0abb76559a6379f1a7ebbdfdf1350c94a9f4" ]
[ "data/rec_data/rec_nextitem_dataset.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\nfrom io import BytesIO\n\nimport logging\nimport warnings\nimport string\n\nimport numpy as np\nimport torch\nimport base64\nfrom torchvision import transforms\n\nfrom PIL import Image, ImageFile\n\nfrom data import data_utils\nfrom data.ofa_dataset import OFADataset\n\nImageFile.LOAD_TRUNCATED_IMAGES = True\nImageFile.MAX_IMAGE_PIXELS = None\nImage.MAX_IMAGE_PIXELS = None\n\nlogger = logging.getLogger(__name__)\nwarnings.filterwarnings(\"ignore\", \"(Possibly )?corrupt EXIF data\", UserWarning)\n\nIMAGENET_DEFAULT_MEAN = (0.485, 0.456, 0.406)\nIMAGENET_DEFAULT_STD = (0.229, 0.224, 0.225)\n\n\ndef collate(samples, pad_idx, eos_idx):\n if len(samples) == 0:\n return {}\n\n def merge(key):\n return data_utils.collate_tokens(\n [s[key] for s in samples],\n pad_idx,\n eos_idx=eos_idx,\n )\n\n id = np.array([s[\"id\"] for s in samples])\n src_tokens = merge(\"source\")\n src_lengths = torch.LongTensor([s[\"source\"].ne(pad_idx).long().sum() for s in samples])\n\n # patch_images = torch.stack([sample['patch_image'] for sample in samples], dim=0)\n # patch_masks = torch.cat([sample['patch_mask'] for sample in samples])\n\n prev_output_tokens = None\n target = None\n if samples[0].get(\"target\", None) is not None:\n target = merge(\"target\")\n tgt_lengths = torch.LongTensor([s[\"target\"].ne(pad_idx).long().sum() for s in samples])\n ntokens = tgt_lengths.sum().item()\n\n if samples[0].get(\"prev_output_tokens\", None) is not None:\n prev_output_tokens = merge(\"prev_output_tokens\")\n else:\n ntokens = src_lengths.sum().item()\n\n batch = {\n \"id\": id,\n \"nsentences\": len(samples),\n \"ntokens\": ntokens,\n \"net_input\": {\n \"src_tokens\": src_tokens,\n \"src_lengths\": src_lengths,\n # \"patch_images\": patch_images,\n # \"patch_masks\": patch_masks,\n \"prev_output_tokens\": prev_output_tokens\n },\n \"target\": target,\n }\n\n return batch\n\n\nclass Rec_nextitemDataset(OFADataset):\n def __init__(\n self,\n split,\n dataset,\n bpe,\n src_dict,\n tgt_dict=None,\n max_src_length=128,\n max_tgt_length=30,\n # patch_image_size=224,\n # imagenet_default_mean_and_std=False,\n scst=False\n ):\n super().__init__(split, dataset, bpe, src_dict, tgt_dict)\n self.max_src_length = max_src_length\n self.max_tgt_length = max_tgt_length\n # self.patch_image_size = patch_image_size\n self.scst = scst\n\n self.transtab = str.maketrans({key: None for key in string.punctuation})\n\n # if imagenet_default_mean_and_std:\n # mean = IMAGENET_DEFAULT_MEAN\n # std = IMAGENET_DEFAULT_STD\n # else:\n # mean = [0.5, 0.5, 0.5]\n # std = [0.5, 0.5, 0.5]\n\n # self.patch_resize_transform = transforms.Compose([\n # lambda image: image.convert(\"RGB\"),\n # transforms.Resize((patch_image_size, patch_image_size), interpolation=Image.BICUBIC),\n # transforms.ToTensor(),\n # transforms.Normalize(mean=mean, std=std),\n # ])\n print(\"self.max_tgt_length\", self.max_tgt_length)\n\n def __getitem__(self, index):\n uniq_id, user_behavior, target_item, rating = self.dataset[index]\n\n while target_item.translate(self.transtab).strip() == \"\":\n uniq_id, user_behavior, target_item, rating = self.dataset[index]\n\n if len(user_behavior) >= self.max_src_length - 20:\n user_behavior = user_behavior[:self.max_src_length - 20]\n if user_behavior[-1] != \",\":\n user_behavior = ','.join(user_behavior.split(\",\")[:-1])\n else:\n user_behavior = user_behavior[:-1]\n\n if self.split == 'train' and not self.scst:\n target_item = target_item.translate(self.transtab).strip()\n target_item_token_list = target_item.strip().split(\" \")\n tgt_explain = ' '.join(target_item_token_list[:self.max_tgt_length])\n else:\n target_item = ' '.join(target_item.strip().split(\" \")[:self.max_tgt_length])\n target_item_list = [target_item.translate(self.transtab).strip() for explain in target_item.strip().split('&&')]\n tgt_explain = '&&'.join(target_item_list)\n\n print(\"user_behavior\", user_behavior)\n src_text = \"If you liked \" + user_behavior + \\\n \", you will also like \"\n\n assert len(src_text.split(\" \")) <= self.max_src_length\n src_item = self.encode_text(src_text)\n tgt_item = self.encode_text(\" {}\".format(tgt_explain))\n\n src_item = torch.cat([self.bos_item, src_item, self.eos_item])\n target_item = torch.cat([tgt_item, self.eos_item])\n prev_output_item = torch.cat([self.bos_item, tgt_item])\n example = {\n \"id\": uniq_id,\n \"source\": src_item,\n # \"patch_image\": patch_image,\n # \"patch_mask\": patch_mask,\n \"target\": target_item,\n \"prev_output_tokens\": prev_output_item\n }\n return example\n\n def collater(self, samples, pad_to_length=None):\n \"\"\"Merge a list of samples to form a mini-batch.\n Args:\n samples (List[dict]): samples to collate\n Returns:\n dict: a mini-batch with the following keys:\n \"\"\"\n return collate(samples, pad_idx=self.pad, eos_idx=self.eos)\n\n\n def __getitem__2(self, index):\n # uniq_id, user_behavior, explaination, fea, opt = self.dataset[index]\n # print(\"user_behavior\", user_behavior)\n # print(\"fea\", fea, \"opt\", opt)\n # print(\"user_behavior\", user_behavior)\n tgt_explain = \"asdasss ssa\"\n while tgt_explain.strip() != \"\":\n uniq_id, user_behavior, explaination, fea, opt = self.dataset[index]\n while explaination.translate(self.transtab).strip() == \"\":\n uniq_id, user_behavior, explaination, fea, opt = self.dataset[index]\n print(\"explaination begin\", explaination)\n tmp_user_beha = user_behavior.split(\" Right now, \")\n len_context = len(tmp_user_beha[1].split(\" \"))\n behavior_list = tmp_user_beha[0].split(\" \")[0: self.max_src_length - 40 - len_context]\n behavior_ = \" \".join(behavior_list)\n if behavior_[-1] == \",\":\n behavior_ = behavior_[:-1] + '.'\n if behavior_[-1] != \".\":\n behavior_ = ','.join(behavior_.split(\",\")[:-1]) + '.'\n\n user_behavior = \" right now, \".join([behavior_, tmp_user_beha[1]])\n\n user_behavior += \\\n \" the user cares about {} and the item is {}.\".format(fea, opt)\n\n\n # image = Image.open(BytesIO(base64.urlsafe_b64decode(image)))\n # patch_image = self.patch_resize_transform(image)\n # patch_mask = torch.tensor([True])\n\n if self.split == 'train' and not self.scst:\n explaination = explaination.translate(self.transtab).strip()\n print(\"explaination.translate(self.transtab).strip()\", explaination.translate(self.transtab).strip())\n explaination_token_list = explaination.strip().split(\" \")\n tgt_explain = ' '.join(explaination_token_list[:self.max_tgt_length])\n else:\n explaination = ' '.join(explaination.strip().split(\" \")[:self.max_tgt_length])\n explain_list = [explain.translate(self.transtab).strip() for explain in explaination.strip().split('&&')]\n tgt_explain = '&&'.join(explain_list)\n print(\"explaination\", explaination)\n print(\"tgt_explain\", tgt_explain)\n assert False\n src_text = user_behavior + \\\n \" how to persuade the user to buy the item?\"\n # print(\"src_text\", src_text.split(\" \")[0:320])\n assert len(src_text.split(\" \")) <= self.max_src_length\n src_item = self.encode_text(src_text)\n tgt_item = self.encode_text(\" {}\".format(tgt_explain))\n\n src_item = torch.cat([self.bos_item, src_item, self.eos_item])\n target_item = torch.cat([tgt_item, self.eos_item])\n prev_output_item = torch.cat([self.bos_item, tgt_item])\n example = {\n \"id\": uniq_id,\n \"source\": src_item,\n # \"patch_image\": patch_image,\n # \"patch_mask\": patch_mask,\n \"target\": target_item,\n \"prev_output_tokens\": prev_output_item\n }\n return example\n" ]
[ [ "numpy.array", "torch.cat" ] ]
Khumayun/FairDeepLearning
[ "e19947c17c282ce1e89ad105cc241ffc07190628" ]
[ "dataloaders/adult_loader.py" ]
[ "import os\nimport numpy as np\nimport pandas as pd\nimport torch\nfrom torch.utils.data import Dataset\nfrom dataloaders.adult_process import get_adult_data\n\n\nclass AdultDataset(Dataset):\n \"\"\"\n The UCI Adult dataset.\n \"\"\"\n\n def __init__(self, root_dir, phase, tar_attr, priv_attr, clr_ratio):\n self.tar_attr = tar_attr\n self.priv_attr = priv_attr\n\n self.data = get_adult_data(tar_attr, priv_attr, clr_ratio)\n if phase not in [\"train\", \"val\", \"test\"]:\n raise NotImplementedError\n\n if phase == \"train\":\n self.X = self.data[f\"x_train\"][self.data[\"train_inds\"]]\n self.Y = self.data[f\"y_train\"][self.data[\"train_inds\"]]\n self.A = self.data[f\"attr_train\"][self.data[\"train_inds\"]]\n elif phase == \"val\":\n self.X = self.data[f\"x_train\"][self.data[\"valid_inds\"]]\n self.Y = self.data[f\"y_train\"][self.data[\"valid_inds\"]]\n self.A = self.data[f\"attr_train\"][self.data[\"valid_inds\"]]\n elif phase == \"test\":\n self.X = self.data[f\"x_test\"]\n self.Y = self.data[f\"y_test\"]\n self.A = self.data[f\"attr_test\"]\n else:\n raise Exception(\"Wrong phase\")\n\n self.input_shape = self.X.shape\n self.num_samples = self.input_shape[0]\n self.xdim = self.X.shape[1]\n self.ydim = 1\n self.adim = 1\n\n def __len__(self):\n return len(self.X)\n\n def __getitem__(self, idx):\n if self.ydim == 1 and len(self.Y.shape) == 2: # binary classification\n return (\n torch.from_numpy(self.X[idx]).float(),\n torch.from_numpy(self.Y[idx]),\n torch.from_numpy(self.A[idx]),\n )\n raise NotImplementedError\n\n def onehot_2_int(self, ts):\n if len(ts.shape) == 2:\n return torch.argmax(ts, dim=1)\n if len(ts.shape) == 1:\n return torch.argmax(ts, dim=0)\n raise NotImplementedError\n\n def get_A_proportions(self):\n \"\"\"for catergorical attribute\"\"\"\n assert len(self.A.shape) == 2\n num_class = self.A.shape[1]\n\n A_label = np.argmax(self.A, axis=1)\n A_proportions = []\n for cls_idx in range(num_class):\n A_proportion = np.sum(cls_idx == A_label)\n A_proportions.append(A_proportion)\n A_proportions = [a_prop * 1.0 / len(A_label) for a_prop in A_proportions]\n return A_proportions\n\n def get_Y_proportions(self):\n \"\"\"for catergorical attribute\"\"\"\n assert len(self.Y.shape) == 2\n num_class = self.Y.shape[1]\n\n Y_label = np.argmax(self.Y, axis=1)\n Y_proportions = []\n for cls_idx in range(num_class):\n Y_proportion = np.sum(cls_idx == Y_label)\n Y_proportions.append(Y_proportion)\n Y_proportions = [y_prop * 1.0 / len(Y_label) for y_prop in Y_proportions]\n return Y_proportions\n\n def get_AY_proportions(self):\n \"\"\"for catergorical attributes\"\"\"\n assert len(self.Y.shape) == len(self.A.shape) == 2\n A_num_class = self.A.shape[1]\n Y_num_class = self.Y.shape[1]\n A_label = np.argmax(self.A, axis=1)\n Y_label = np.argmax(self.Y, axis=1)\n AY_proportions = []\n for A_cls_idx in range(A_num_class):\n Y_proportions = []\n for Y_cls_idx in range(Y_num_class):\n AY_proprtion = np.sum(\n np.logical_and(Y_cls_idx == Y_label, A_cls_idx == A_label)\n )\n Y_proportions.append(AY_proprtion)\n Y_proportions = [y_prop * 1.0 / len(Y_label) for y_prop in Y_proportions]\n AY_proportions.append(Y_proportions)\n return AY_proportions\n" ]
[ [ "numpy.sum", "torch.argmax", "numpy.logical_and", "numpy.argmax", "torch.from_numpy" ] ]
MarsBighead/mustang
[ "ffbaf109931557e40da2d97e4eb914bc1c0aba0d" ]
[ "Python/npr.py" ]
[ "#!/usr/local/bin/python3\nimport numpy as np \n\nimport numpy.random as npr \nimport matplotlib.pyplot as plt\n\nprint (npr.rand(5,5))\na=5.\nb=10.\nprint (npr.rand(10)*(b-a)+a )\n\n\nsample_size =500 \nrn1 = npr.rand(sample_size,3) \nrn2 = npr.randint(0,10,sample_size) \nrn3 = npr.sample(size=sample_size) \na =[0, 25, 50, 75, 100] \nrn4=npr.choice(a, size=sample_size) \n\nfig, ((ax1,ax2),(ax3,ax4))= plt.subplots(\n nrows=2,\n ncols=2,\n figsize=(7,7)\n)\nax1.hist(rn1, bins=25, stacked=True)\nax1.set_title('rand')\nax1.set_ylabel('frequency')\nax1.grid(True)\n\nax2.hist(rn2, bins=25)\nax2.set_title('randint')\nax2.grid(True)\n\nax3.hist(rn3, bins=25)\nax3.set_title('sample')\nax3.set_ylabel('frequency')\nax3.grid(True)\n\nax4.hist(rn4, bins=25) \nax4.set_title('choice')\nax4.grid(True)\n\n#print (fig)\n#plt.show()\nfig.savefig(\"random-statistics.png\", bbox_inches='tight')\n\nplt.close(\"all\")\n\nsample_size =500 \nrn1 = npr.standard_normal(sample_size) \nrn2 = npr.normal(100,20,sample_size) \nrn3 = npr.chisquare(df=0.5, size=sample_size) \na =[0, 25, 50, 75, 100] \nrn4=npr.poisson(lam=1.0, size=sample_size) \n\nfig, ((ax1,ax2),(ax3,ax4))= plt.subplots(\n nrows=2,\n ncols=2,\n figsize=(7,7)\n)\nax1.hist(rn1, bins=25, stacked=True)\nax1.set_title('standard normal')\nax1.set_ylabel('frequency')\nax1.grid(True)\n\nax2.hist(rn2, bins=25)\nax2.set_title('normal(100, 20)')\nax2.grid(True)\n\nax3.hist(rn3, bins=25)\nax3.set_title('chi square')\nax3.set_ylabel('frequency')\nax3.grid(True)\n\nax4.hist(rn4, bins=25) \nax4.set_title('Poisson')\nax4.grid(True)\nfig.savefig(\"high-statistics.png\", bbox_inches='tight')\nplt.show()" ]
[ [ "numpy.random.standard_normal", "numpy.random.choice", "matplotlib.pyplot.subplots", "numpy.random.poisson", "matplotlib.pyplot.show", "numpy.random.rand", "matplotlib.pyplot.close", "numpy.random.normal", "numpy.random.chisquare", "numpy.random.randint", "numpy.random.sample" ] ]
mkturkcan/FC.AntennalLobe
[ "6a0e124f68c249fcb067c571b5170002b3335efc" ]
[ "feedbackcircuits/NDComponents/AntennalLobe/AlphaSpike.py" ]
[ "# pylint:disable=no-member\nimport os\nfrom collections import OrderedDict\nimport numpy as np\nimport pycuda.gpuarray as garray\nfrom pycuda.tools import dtype_to_ctype\nimport pycuda.driver as drv\nfrom pycuda.compiler import SourceModule\nfrom neurokernel.LPU.NDComponents.NDComponent import NDComponent\n\nCUDA_SRC = \"\"\"\n\n\n#define G_MIN\t\t0.0\n#define G_MAX\t\t50000.0\n\nstruct States {\n double s;\n double u;\n double g;\n};\n\nstruct Derivatives {\n double s;\n double u;\n};\n\n\n__device__ void clip(States &states)\n{\n states.g = fmax(states.g, G_MIN);\n states.g = fmin(states.g, G_MAX);\n}\n\n__device__ void forward(\n States &states,\n Derivatives &gstates,\n double dt\n)\n{\n states.s += dt * gstates.s;\n states.u += dt * gstates.u;\n}\n\n__device__ int ode(\n States &states,\n Derivatives &gstates,\n double AD,\n double AR,\n double GMAX,\n double &spike\n)\n{\n\n gstates.s = states.u;\n gstates.u = (((-(AR + AD)) * states.u) - ((AR * AD) * states.s));\n if (spike) {\n states.u = (states.u + (AR * AD));\n }\n states.g = (states.s * GMAX);\n return 0;\n}\n\n\n\n__global__ void run_step (\n int num_thread,\n double dt,\n double *g_state_s,\n double *g_state_u,\n double *g_state_g,\n double *g_param_ad,\n double *g_param_ar,\n double *g_param_gmax,\n double *g_input_spike,\n double *g_output_g\n)\n{\n /* TODO: option for 1-D or 2-D */\n int tid = blockIdx.x * blockDim.x + threadIdx.x;\n int total_threads = gridDim.x * blockDim.x;\n\n for (int nid = tid; nid < num_thread; nid += total_threads) {\n\n States states;\n Derivatives gstates;\n\n /* import data */\n states.s = g_state_s[nid];\n states.u = g_state_u[nid];\n states.g = g_state_g[nid];\n double param_AD = g_param_ad[nid];\n double param_AR = g_param_ar[nid];\n double param_GMAX = g_param_gmax[nid];\n double input_spike = g_input_spike[nid];\n\n \n \n /* compute gradient */\n ode(states, gstates, param_AD, param_AR, param_GMAX, input_spike);\n\n /* solve ode */\n forward(states, gstates, dt);\n\n /* clip */\n clip(states);\n\n \n\n /* export state (internals) data */\n g_state_s[nid] = states.s;\n g_state_u[nid] = states.u;\n g_state_g[nid] = states.g;\n\n /* export output (updates) data */\n g_output_g[nid] = states.g;\n }\n\n return;\n}\n\n\n\"\"\"\n\n\nclass AlphaSpike(NDComponent):\n \"\"\"AlphaSpike\n\n Attributes:\n accesses (list): list of input variables\n updates (list): list of output variables\n params (list): list of parameters\n params_default (dict): default values of the parameters\n internals (OrderedDict): internal variables of the model and initial value\n time_scale (float): scaling factor of the `dt`\n \"\"\"\n\n accesses = [\n \"spike\",\n ]\n updates = [\n \"g\",\n ]\n params = [\n \"ad\",\n \"ar\",\n \"gmax\",\n ]\n params_default = dict(\n ar=12.5,\n ad=12.19,\n gmax=0.1,\n )\n internals = OrderedDict(\n [\n (\"s\", 0.0),\n (\"u\", 0.0),\n (\"g\", 0.0),\n ]\n )\n time_scale = 1.0 # scales dt\n _has_rand = False\n\n def maximum_dt_allowed(self):\n return np.inf\n\n def __init__(\n self,\n params_dict,\n access_buffers,\n dt,\n LPU_id=None,\n debug=False,\n cuda_verbose=False,\n ):\n if cuda_verbose:\n self.compile_options = [\"--ptxas-options=-v\", \"--expt-relaxed-constexpr\"]\n else:\n self.compile_options = [\"--expt-relaxed-constexpr\"]\n\n self.debug = debug\n self.LPU_id = LPU_id\n self.num_comps = params_dict[self.params[0]].size\n self.dtype = params_dict[self.params[0]].dtype\n\n self.dt = dt * self.time_scale\n self.params_dict = params_dict\n self.access_buffers = access_buffers\n\n self.internal_states = {\n c: garray.zeros(self.num_comps, dtype=self.dtype) + self.internals[c]\n for c in self.internals\n }\n\n self.inputs = {\n k: garray.empty(self.num_comps, dtype=self.access_buffers[k].dtype)\n for k in self.accesses\n }\n\n # make all dtypes consistent\n dtypes = {\"dt\": self.dtype}\n dtypes.update(\n {\"state_\" + k: self.internal_states[k].dtype for k in self.internals}\n )\n dtypes.update({\"param_\" + k: self.params_dict[k].dtype for k in self.params})\n dtypes.update(\n {\"input_\" + k.format(k): self.inputs[k].dtype for k in self.accesses}\n )\n dtypes.update({\"output_\" + k: self.dtype for k in self.updates})\n self.update_func = self.get_update_func(dtypes)\n\n if self._has_rand:\n import neurokernel.LPU.utils.curand as curand\n\n self.randState = curand.curand_setup(\n self.num_comps, np.random.randint(10000)\n )\n dtypes.update({\"rand\": self.dtype})\n\n def run_step(self, update_pointers, st=None):\n for k in self.inputs:\n self.sum_in_variable(k, self.inputs[k], st=st)\n args = (\n [self.internal_states[k].gpudata for k in self.internals]\n + [self.params_dict[k].gpudata for k in self.params]\n + [self.inputs[k].gpudata for k in self.accesses]\n + [update_pointers[k] for k in self.updates]\n )\n if self._has_rand:\n args += [self.randState.gpudata]\n\n self.update_func.prepared_async_call(\n self.update_func.grid,\n self.update_func.block,\n st,\n self.num_comps,\n self.dt,\n *args\n )\n\n def get_update_func(self, dtypes):\n from pycuda.compiler import SourceModule\n\n mod = SourceModule(\n CUDA_SRC,\n options=self.compile_options,\n no_extern_c=self._has_rand,\n )\n func = mod.get_function(\"run_step\")\n type_dict = {k: dtype_to_ctype(dtypes[k]) for k in dtypes}\n\n func.prepare(\"i\" + np.dtype(self.dtype).char + \"P\" * (len(type_dict) - 1))\n func.block = (256, 1, 1)\n func.grid = (\n min(\n 6 * drv.Context.get_device().MULTIPROCESSOR_COUNT,\n (self.num_comps - 1) // 256 + 1,\n ),\n 1,\n )\n return func\n" ]
[ [ "numpy.dtype", "numpy.random.randint" ] ]
lizhe960118/CenterNet
[ "d1a0d13974e2316c6d127ca7860866cdd93bcfa7" ]
[ "tools/test_file_dir/voc_test.py" ]
[ "import argparse\nimport os\nimport os.path as osp\nimport shutil\nimport tempfile\n\nimport mmcv\nimport torch\nimport torch.distributed as dist\nfrom mmcv.runner import load_checkpoint, get_dist_info\nfrom mmcv.parallel import MMDataParallel, MMDistributedDataParallel\n\nfrom mmdet.apis import init_dist\nfrom mmdet.core import results2json\n# , coco_eval, \nfrom txt_val import txt_eval\nfrom mmdet.core import wrap_fp16_model\nfrom mmdet.datasets import build_dataloader, build_dataset\nfrom mmdet.models import build_detector\nfrom mmdet import datasets\n\n\ndef single_gpu_test(model, data_loader, show=False):\n model.eval()\n results = []\n dataset = data_loader.dataset\n prog_bar = mmcv.ProgressBar(len(dataset))\n for i, data in enumerate(data_loader):\n with torch.no_grad():\n result = model(return_loss=False, rescale=not show, **data)\n results.append(result)\n\n if show:\n model.module.show_result(data, result, dataset.img_norm_cfg)\n\n# batch_size = data['img'][0].size(0)\n batch_size = 1\n for _ in range(batch_size):\n prog_bar.update()\n return results\n\n\ndef multi_gpu_test(model, data_loader, tmpdir=None):\n model.eval()\n results = []\n dataset = data_loader.dataset\n rank, world_size = get_dist_info()\n if rank == 0:\n prog_bar = mmcv.ProgressBar(len(dataset))\n for i, data in enumerate(data_loader):\n with torch.no_grad():\n result = model(return_loss=False, rescale=True, **data)\n results.append(result)\n\n if rank == 0:\n batch_size = data['img'][0].size(0)\n for _ in range(batch_size * world_size):\n prog_bar.update()\n\n # collect results from all ranks\n results = collect_results(results, len(dataset), tmpdir)\n\n return results\n\n\ndef collect_results(result_part, size, tmpdir=None):\n rank, world_size = get_dist_info()\n # create a tmp dir if it is not specified\n if tmpdir is None:\n MAX_LEN = 512\n # 32 is whitespace\n dir_tensor = torch.full((MAX_LEN, ),\n 32,\n dtype=torch.uint8,\n device='cuda')\n if rank == 0:\n tmpdir = tempfile.mkdtemp()\n tmpdir = torch.tensor(\n bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda')\n dir_tensor[:len(tmpdir)] = tmpdir\n dist.broadcast(dir_tensor, 0)\n tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip()\n else:\n mmcv.mkdir_or_exist(tmpdir)\n # dump the part result to the dir\n mmcv.dump(result_part, osp.join(tmpdir, 'part_{}.pkl'.format(rank)))\n dist.barrier()\n # collect all parts\n if rank != 0:\n return None\n else:\n # load results of all parts from tmp dir\n part_list = []\n for i in range(world_size):\n part_file = osp.join(tmpdir, 'part_{}.pkl'.format(i))\n part_list.append(mmcv.load(part_file))\n # sort the results\n ordered_results = []\n for res in zip(*part_list):\n ordered_results.extend(list(res))\n # the dataloader may pad some samples\n ordered_results = ordered_results[:size]\n # remove tmp dir\n shutil.rmtree(tmpdir)\n return ordered_results\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='MMDet test detector')\n parser.add_argument('config', help='test config file path')\n parser.add_argument('checkpoint', help='checkpoint file')\n parser.add_argument('--out', help='output result file')\n# parser.add_argument(\n# '--eval',\n# type=str,\n# nargs='+',\n# choices=['proposal', 'proposal_fast', 'bbox', 'segm', 'keypoints'],\n# help='eval types')\n parser.add_argument('--show', action='store_true', help='show results')\n parser.add_argument('--tmpdir', help='tmp dir for writing some results')\n parser.add_argument(\n '--launcher',\n choices=['none', 'pytorch', 'slurm', 'mpi'],\n default='none',\n help='job launcher')\n parser.add_argument('--local_rank', type=int, default=0)\n parser.add_argument('--iou_thr', type=float, default=0.5)\n args = parser.parse_args()\n if 'LOCAL_RANK' not in os.environ:\n os.environ['LOCAL_RANK'] = str(args.local_rank)\n return args\n\n\ndef main():\n #os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1\"\n args = parse_args()\n\n assert args.out or args.show, \\\n ('Please specify at least one operation (save or show the results) '\n 'with the argument \"--out\" or \"--show\"')\n\n if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):\n raise ValueError('The output file must be a pkl file.')\n\n cfg = mmcv.Config.fromfile(args.config)\n # set cudnn_benchmark\n if cfg.get('cudnn_benchmark', False):\n torch.backends.cudnn.benchmark = True\n cfg.model.pretrained = None\n cfg.data.test.test_mode = True\n\n # init distributed env first, since logger depends on the dist info.\n if args.launcher == 'none':\n distributed = False\n else:\n distributed = True\n init_dist(args.launcher, **cfg.dist_params)\n\n # build the dataloader\n # TODO: support multiple images per gpu (only minor changes are needed)\n dataset = build_dataset(cfg.data.test)\n data_loader = build_dataloader(\n dataset,\n imgs_per_gpu=1,\n workers_per_gpu=cfg.data.workers_per_gpu,\n dist=distributed,\n shuffle=False)\n\n # build the model and load checkpoint\n model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)\n fp16_cfg = cfg.get('fp16', None)\n if fp16_cfg is not None:\n wrap_fp16_model(model)\n checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')\n # old versions did not save class info in checkpoints, this walkaround is\n # for backward compatibility\n if 'CLASSES' in checkpoint['meta']:\n model.CLASSES = checkpoint['meta']['CLASSES']\n else:\n model.CLASSES = dataset.CLASSES\n\n if not distributed:\n model = MMDataParallel(model, device_ids=[0])\n outputs = single_gpu_test(model, data_loader, args.show)\n else:\n model = MMDistributedDataParallel(model.cuda())\n outputs = multi_gpu_test(model, data_loader, args.tmpdir)\n\n rank, _ = get_dist_info()\n if args.out and rank == 0:\n print('\\nwriting results to {}'.format(args.out))\n mmcv.dump(outputs, args.out)\n result_file = args.out\n# args = parser.parse_args()\n# cfg = mmcv.Config.fromfile(args.config)\n# test_dataset = mmcv.runner.obj_from_dict(cfg.data.test, datasets)\n# txt_eval(args.result, test_dataset, args.iou_thr)\n txt_eval(result_file, dataset, iou_thr=args.iou_thr)\n \n# eval_types = args.eval\n# if eval_types:\n# print('Starting evaluate {}'.format(' and '.join(eval_types)))\n# if eval_types == ['proposal_fast']:\n# result_file = args.out\n# coco_eval(result_file, eval_types, dataset.coco)\n# else:\n# if not isinstance(outputs[0], dict):\n# result_files = results2json(dataset, outputs, args.out)\n# coco_eval(result_files, eval_types, dataset.coco)\n# else:\n# for name in outputs[0]:\n# print('\\nEvaluating {}'.format(name))\n# outputs_ = [out[name] for out in outputs]\n# result_file = args.out + '.{}'.format(name)\n# result_files = results2json(dataset, outputs_,\n# result_file)\n# coco_eval(result_files, eval_types, dataset.coco)\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "torch.distributed.barrier", "torch.no_grad", "torch.full", "torch.distributed.broadcast" ] ]
Livioni/Cloud-Workflow-Scheduling-base-on-Deep-Reinforcement-Learning
[ "eb246ebba160567277c9c1aa226e359f48629dac" ]
[ "AblationExperiment.py" ]
[ "import gym, torch, copy, os, xlwt, random\nimport torch.nn as nn\nfrom datetime import datetime\nimport numpy as np\n\nenv = gym.make(\"clusterEnv-v0\").unwrapped\nstate_dim, action_dim = env.return_dim_info()\n\n####### initialize environment hyperparameters ######\nmax_ep_len = 1000 # max timesteps in one episode\nauto_save = 1\ntotal_test_episodes = 100 * auto_save # total num of testing episodes\n\n\ndef initial_excel():\n global worksheet, workbook\n # xlwt 库将数据导入Excel并设置默认字符编码为ascii\n workbook = xlwt.Workbook(encoding='ascii')\n # 添加一个表 参数为表名\n worksheet = workbook.add_sheet('makespan')\n # 生成单元格样式的方法\n # 设置列宽, 3为列的数目, 12为列的宽度, 256为固定值\n for i in range(3):\n worksheet.col(i).width = 256 * 12\n # 设置单元格行高, 25为行高, 20为固定值\n worksheet.row(1).height_mismatch = True\n worksheet.row(1).height = 20 * 25\n # 保存excel文件\n workbook.save('data/makespan_MCTSAE.xls')\n\n\ndef read_current_state():\n '''\n 读取当前env的状态\n :return: 当前env的状态\n '''\n state = copy.deepcopy(env.state)\n ready_list = copy.deepcopy(env.ready_list)\n done_job = copy.deepcopy(env.done_job)\n tasks = copy.deepcopy(env.tasks)\n wait_duration = copy.deepcopy(env.wait_duration)\n cpu_demand = copy.deepcopy(env.cpu_demand)\n memory_demand = copy.deepcopy(env.memory_demand)\n tasks_remaing_time = copy.deepcopy(env.tasks_remaing_time)\n time = env.time\n cpu_res = env.cpu_res\n memory_res = env.memory_res\n return state, ready_list, done_job, tasks, wait_duration, cpu_demand, memory_demand, tasks_remaing_time, cpu_res, memory_res, time\n\n\ndef load_current_state(state, ready_list, done_job, tasks, wait_duration, cpu_demand, memory_demand, tasks_remaing_time,\n cpu_res, memory_res, time):\n env.set_state(state[:])\n env.set_ready_list(ready_list[:])\n env.set_done_job(done_job[:])\n env.set_tasks(tasks[:])\n env.set_wait_duration(wait_duration[:])\n env.set_cpu_demand(cpu_demand[:])\n env.set_memory_demand(memory_demand[:])\n env.set_tasks_remaing_time(tasks_remaing_time)\n env.set_cpu_res(cpu_res)\n env.set_memory_res(memory_res)\n env.set_time(time)\n return\n\n\nclass TreeNode(object):\n def __init__(self, parent, state, ready_list, done_job, tasks, wait_duration, cpu_demand, memory_demand,\n tasks_remaing_time, cpu_res, memory_res, time):\n self._parent = parent\n self._children = {} # a map from action to TreeNode\n self._n_visits = 0\n self._makespan = 0\n self._total_makespan = 0\n self._state = state\n self._ready_list = ready_list\n self._done_job = done_job\n self._tasks = tasks\n self._wait_duration = wait_duration\n self._cpu_demand = cpu_demand\n self._memory_demand = memory_demand\n self._tasks_remaing_time = tasks_remaing_time\n self._cpu_res = cpu_res\n self._memory_res = memory_res\n self._time = time\n self._c = 40\n self._value = 0\n if self._parent != None:\n self.get_value()\n\n def expand(self):\n '''\n 扩展树\n '''\n load_current_state(self._state, self._ready_list, self._done_job, self._tasks, self._wait_duration,\n self._cpu_demand, self._memory_demand, self._tasks_remaing_time, self._cpu_res,\n self._memory_res, self._time)\n available_action = env.return_action_list()\n if available_action:\n for action in available_action:\n load_current_state(self._state, self._ready_list, self._done_job, self._tasks, self._wait_duration,\n self._cpu_demand, self._memory_demand, self._tasks_remaing_time, self._cpu_res,\n self._memory_res, self._time)\n if action not in self._children:\n env.step(action)\n state, ready_list, done_job, tasks, wait_duration, cpu_demand, memory_demand, tasks_remaing_time, cpu_res, memory_res, time = read_current_state()\n self._children[action] = TreeNode(self, state, ready_list, done_job, tasks, wait_duration,\n cpu_demand, memory_demand, tasks_remaing_time, cpu_res,\n memory_res, time)\n else:\n print(\"done\")\n\n def get_average_makespan(self):\n return self._makespan\n\n def get_value(self):\n self._value = self._makespan + self._c * np.sqrt(np.log(self._parent._n_visits + 1) / (self._n_visits + 1))\n return self._value\n\n def select(self):\n '''\n 在子节中选择具有搜索价值的点\n '''\n return max(self._children.items(), key=lambda act_node: act_node[1].get_value())[1]\n\n def update(self, makespan):\n # Count visit.\n self._n_visits += 1\n if self._makespan == 0:\n self._makespan = -makespan\n else:\n if -makespan > self._makespan:\n self._makespan = -makespan\n if self._parent != None:\n self._value = self.get_value()\n\n def update_recursive(self, leaf_value):\n # If it is not root, this node's parent should be updated first.\n if self._parent:\n self._parent.update_recursive(leaf_value)\n self.update(leaf_value)\n\n def is_leaf(self):\n return self._children == {}\n\n def is_root(self):\n return self._parent is None\n\n\nclass MCTS(object):\n def __init__(self, state, ready_list, done_job, tasks, wait_duration, cpu_demand, memory_demand, tasks_remaing_time,\n cpu_res, memory_res, time, depth):\n self._root = TreeNode(None, state, ready_list, done_job, tasks, wait_duration, cpu_demand, memory_demand,\n tasks_remaing_time, cpu_res, memory_res, time)\n self._root.expand() # 初始化扩展\n self._initial_buget = 100\n self._min_buget = 10\n self._depth = depth\n\n def playout(self):\n buget = max(self._initial_buget / self._depth, self._min_buget)\n for j in range(int(buget)):\n node = self._root\n while True:\n if node.is_leaf():\n if node._n_visits == 0:\n cur_state, cur_ready_list, cur_done_job, cur_tasks, cur_wait_duration, cur_cpu_demand, cur_memory_demand, cur_tasks_remaing_time, cur_cpu_res, cur_memory_res, cur_time = node._state, node._ready_list, node._done_job, node._tasks, node._wait_duration, node._cpu_demand, node._memory_demand, node._tasks_remaing_time, node._cpu_res, node._memory_res, node._time\n makespan = self._roll_out(cur_state, cur_ready_list, cur_done_job, cur_tasks, cur_wait_duration,\n cur_cpu_demand, cur_memory_demand, cur_tasks_remaing_time,\n cur_cpu_res, cur_memory_res, cur_time)\n node.update_recursive(makespan)\n break\n else:\n node.expand()\n node = node.select()\n else:\n node = node.select()\n node = self._root\n return max(node._children.items(), key=lambda act_node: act_node[1].get_average_makespan())[0]\n\n def _roll_out(self, cur_state, cur_ready_list, cur_done_job, cur_tasks, cur_wait_duration, cur_cpu_demand,\n cur_memory_demand, cur_tasks_remaing_time, cur_cpu_res, cur_memory_res, cur_time):\n load_current_state(cur_state, cur_ready_list, cur_done_job, cur_tasks, cur_wait_duration, cur_cpu_demand,\n cur_memory_demand, cur_tasks_remaing_time, cur_cpu_res, cur_memory_res, cur_time)\n state = cur_state\n max_ep_len = 1000 # max timesteps in one episode\n for t in range(1, max_ep_len + 1):\n action = random.choice(range(action_dim)) - 1\n state, reward, done, info = env.step(action)\n while (info[0] == False):\n action = random.choice(range(action_dim)) - 1\n state, reward, done, info = env.step(action) # 输入step的都是\n next_state, reward, done, _ = state, reward, done, info\n # break; if the episode is over\n state = next_state\n if done:\n makespan = state[0]\n break\n return makespan\n\n\nif __name__ == '__main__':\n initial_excel()\n makespans = []\n line = 0\n start_time = datetime.now().replace(microsecond=0)\n print(\"Started training at (GMT) : \", start_time)\n print(\"============================================================================================\")\n for ep in range(1, total_test_episodes + 1):\n initial_state = env.reset()\n state, ready_list, done_job, tasks, wait_duration, cpu_demand, memory_demand, tasks_remaing_time, cpu_res, memory_res, time = read_current_state()\n for depth in range(1, max_ep_len + 1):\n tree = MCTS(state, ready_list, done_job, tasks, wait_duration, cpu_demand, memory_demand,\n tasks_remaing_time, cpu_res, memory_res, time, depth=depth)\n best_action = tree.playout()\n load_current_state(tree._root._state, tree._root._ready_list, tree._root._done_job, tree._root._tasks,\n tree._root._wait_duration, tree._root._cpu_demand, tree._root._memory_demand,\n tree._root._tasks_remaing_time, tree._root._cpu_res, tree._root._memory_res,\n tree._root._time)\n observation, reward, done, info = env.step(best_action)\n state, ready_list, done_job, tasks, wait_duration, cpu_demand, memory_demand, tasks_remaing_time, cpu_res, memory_res, time = read_current_state()\n del tree\n if done:\n makespan = observation[0]\n makespans.append(makespan)\n print(\"Episode:\", ep, \"Makespan:\", makespan)\n if ep % auto_save == 0:\n average_makespan = np.mean(makespans)\n worksheet.write(line, 1, float(average_makespan))\n workbook.save('data/makespan_MCTSAE.xls')\n print('MCTS : Episode: {}, Makespan: {:.3f}s'.format((line + 1) * auto_save, average_makespan))\n line += 1\n makespans = []\n end_time = datetime.now().replace(microsecond=0)\n print(\"Finished testing at (GMT) : \", end_time)\n print(\"Total testing time : \", end_time - start_time)\n start_time = end_time\n break\n workbook.save('data/makespan_MCTSAE.xls')\n env.close()\n" ]
[ [ "numpy.log", "numpy.mean" ] ]
OuyangChao/Paddle
[ "311b3b44fc7d51d4d66d90ab8a3fc0d42231afda", "311b3b44fc7d51d4d66d90ab8a3fc0d42231afda" ]
[ "python/paddle/fluid/tests/unittests/test_transpose_op.py", "python/paddle/fluid/tests/unittests/test_pixel_shuffle.py" ]
[ "# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nimport unittest\nimport numpy as np\nfrom op_test import OpTest\nimport paddle\nimport paddle.fluid as fluid\nfrom paddle.fluid import Program, program_guard\n\npaddle.enable_static()\n\nclass TestTransposeOp(OpTest):\n def setUp(self):\n self.init_op_type()\n self.initTestCase()\n self.inputs = {'X': np.random.random(self.shape).astype(\"float64\")}\n self.attrs = {\n 'axis': list(self.axis),\n 'use_mkldnn': self.use_mkldnn,\n }\n self.outputs = {\n 'XShape': np.random.random(self.shape).astype(\"float64\"),\n 'Out': self.inputs['X'].transpose(self.axis)\n }\n\n def init_op_type(self):\n self.op_type = \"transpose2\"\n self.use_mkldnn = False\n\n def test_check_output(self):\n self.check_output(no_check_set=['XShape'])\n\n def test_check_grad(self):\n self.check_grad(['X'], 'Out')\n\n def initTestCase(self):\n self.shape = (3, 40)\n self.axis = (1, 0)\n\n\nclass TestCase0(TestTransposeOp):\n def initTestCase(self):\n self.shape = (100, )\n self.axis = (0, )\n\n\nclass TestCase1(TestTransposeOp):\n def initTestCase(self):\n self.shape = (3, 4, 10)\n self.axis = (0, 2, 1)\n\n\nclass TestCase2(TestTransposeOp):\n def initTestCase(self):\n self.shape = (2, 3, 4, 5)\n self.axis = (0, 2, 3, 1)\n\n\nclass TestCase3(TestTransposeOp):\n def initTestCase(self):\n self.shape = (2, 3, 4, 5, 6)\n self.axis = (4, 2, 3, 1, 0)\n\n\nclass TestCase4(TestTransposeOp):\n def initTestCase(self):\n self.shape = (2, 3, 4, 5, 6, 1)\n self.axis = (4, 2, 3, 1, 0, 5)\n\n\nclass TestCase5(TestTransposeOp):\n def initTestCase(self):\n self.shape = (2, 16, 96)\n self.axis = (0, 2, 1)\n\n\nclass TestCase6(TestTransposeOp):\n def initTestCase(self):\n self.shape = (2, 10, 12, 16)\n self.axis = (3, 1, 2, 0)\n\n\nclass TestCase7(TestTransposeOp):\n def initTestCase(self):\n self.shape = (2, 10, 2, 16)\n self.axis = (0, 1, 3, 2)\n\n\nclass TestCase8(TestTransposeOp):\n def initTestCase(self):\n self.shape = (2, 3, 2, 3, 2, 4, 3, 3)\n self.axis = (0, 1, 3, 2, 4, 5, 6, 7)\n\n\nclass TestCase9(TestTransposeOp):\n def initTestCase(self):\n self.shape = (2, 3, 2, 3, 2, 4, 3, 3)\n self.axis = (6, 1, 3, 5, 0, 2, 4, 7)\n\n\nclass TestTransposeOpError(unittest.TestCase):\n def test_errors(self):\n paddle.enable_static()\n with program_guard(Program(), Program()):\n x = fluid.layers.data(name='x', shape=[10, 5, 3], dtype='float64')\n\n def test_x_Variable_check():\n # the Input(x)'s type must be Variable\n fluid.layers.transpose(\"not_variable\", perm=[1, 0, 2])\n\n self.assertRaises(TypeError, test_x_Variable_check)\n\n def test_x_dtype_check():\n # the Input(x)'s dtype must be one of [float16, float32, float64, int32, int64]\n x1 = fluid.layers.data(\n name='x1', shape=[10, 5, 3], dtype='bool')\n fluid.layers.transpose(x1, perm=[1, 0, 2])\n\n self.assertRaises(TypeError, test_x_dtype_check)\n\n def test_perm_list_check():\n # Input(perm)'s type must be list\n fluid.layers.transpose(x, perm=\"[1, 0, 2]\")\n\n self.assertRaises(TypeError, test_perm_list_check)\n\n def test_perm_length_and_x_dim_check():\n # Input(perm) is the permutation of dimensions of Input(input)\n # its length should be equal to dimensions of Input(input)\n fluid.layers.transpose(x, perm=[1, 0, 2, 3, 4])\n\n self.assertRaises(ValueError, test_perm_length_and_x_dim_check)\n\n def test_each_elem_value_check():\n # Each element in Input(perm) should be less than Input(x)'s dimension\n fluid.layers.transpose(x, perm=[3, 5, 7])\n\n self.assertRaises(ValueError, test_each_elem_value_check)\n\nclass TestTransposeApi(unittest.TestCase):\n def test_static_out(self):\n paddle.enable_static()\n with paddle.static.program_guard(paddle.static.Program()):\n x = paddle.static.data(name='x', shape=[2, 3, 4], dtype='float32')\n x_trans1 = paddle.transpose(x, perm=[1, 0, 2])\n x_trans2 = paddle.transpose(x, perm=(2, 1, 0))\n place = paddle.CPUPlace()\n exe = paddle.static.Executor(place)\n x_np = np.random.random([2, 3, 4]).astype(\"float32\")\n result1, result2 = exe.run(feed={\"x\": x_np}, fetch_list=[x_trans1, x_trans2])\n expected_result1 = np.transpose(x_np, [1, 0, 2])\n expected_result2 = np.transpose(x_np, (2, 1, 0))\n \n np.testing.assert_array_equal(result1, expected_result1)\n np.testing.assert_array_equal(result2, expected_result2)\n\n def test_dygraph_out(self):\n # This is an old test before 2.0 API so we need to disable static\n # to trigger dygraph\n paddle.disable_static()\n x = paddle.randn([2, 3, 4])\n x_trans1 = paddle.transpose(x, perm=[1, 0, 2])\n x_trans2 = paddle.transpose(x, perm=(2, 1, 0))\n x_np = x.numpy()\n expected_result1 = np.transpose(x_np, [1, 0, 2])\n expected_result2 = np.transpose(x_np, (2, 1, 0))\n\n np.testing.assert_array_equal(x_trans1.numpy(), expected_result1)\n np.testing.assert_array_equal(x_trans2.numpy(), expected_result2)\n # This is an old test before 2.0 API so we enable static again after\n # dygraph test\n paddle.enable_static()\n\nclass TestTAPI(unittest.TestCase):\n def test_out(self):\n with fluid.program_guard(fluid.Program()):\n data = fluid.data(shape=[10], dtype=\"float64\", name=\"data\")\n data_t = paddle.t(data)\n place = fluid.CPUPlace()\n exe = fluid.Executor(place)\n data_np = np.random.random([10]).astype(\"float64\")\n result, = exe.run(feed={\"data\": data_np}, fetch_list=[data_t])\n expected_result = np.transpose(data_np)\n self.assertEqual((result == expected_result).all(), True)\n\n with fluid.program_guard(fluid.Program()):\n data = fluid.data(shape=[10, 5], dtype=\"float64\", name=\"data\")\n data_t = paddle.t(data)\n place = fluid.CPUPlace()\n exe = fluid.Executor(place)\n data_np = np.random.random([10, 5]).astype(\"float64\")\n result, = exe.run(feed={\"data\": data_np}, fetch_list=[data_t])\n expected_result = np.transpose(data_np)\n self.assertEqual((result == expected_result).all(), True)\n\n with fluid.program_guard(fluid.Program()):\n data = fluid.data(shape=[1, 5], dtype=\"float64\", name=\"data\")\n data_t = paddle.t(data)\n place = fluid.CPUPlace()\n exe = fluid.Executor(place)\n data_np = np.random.random([1, 5]).astype(\"float64\")\n result, = exe.run(feed={\"data\": data_np}, fetch_list=[data_t])\n expected_result = np.transpose(data_np)\n self.assertEqual((result == expected_result).all(), True)\n\n with fluid.dygraph.guard():\n np_x = np.random.random([10]).astype(\"float64\")\n data = fluid.dygraph.to_variable(np_x)\n z = paddle.t(data)\n np_z = z.numpy()\n z_expected = np.array(np.transpose(np_x))\n self.assertEqual((np_z == z_expected).all(), True)\n\n with fluid.dygraph.guard():\n np_x = np.random.random([10, 5]).astype(\"float64\")\n data = fluid.dygraph.to_variable(np_x)\n z = paddle.t(data)\n np_z = z.numpy()\n z_expected = np.array(np.transpose(np_x))\n self.assertEqual((np_z == z_expected).all(), True)\n\n with fluid.dygraph.guard():\n np_x = np.random.random([1, 5]).astype(\"float64\")\n data = fluid.dygraph.to_variable(np_x)\n z = paddle.t(data)\n np_z = z.numpy()\n z_expected = np.array(np.transpose(np_x))\n self.assertEqual((np_z == z_expected).all(), True)\n\n def test_errors(self):\n with fluid.program_guard(fluid.Program()):\n x = fluid.data(name='x', shape=[10, 5, 3], dtype='float64')\n\n def test_x_dimension_check():\n paddle.t(x)\n\n self.assertRaises(ValueError, test_x_dimension_check)\n\n\nif __name__ == '__main__':\n unittest.main()\n", "# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nimport unittest\nimport numpy as np\n\nfrom op_test import OpTest\nimport paddle\nimport paddle.nn.functional as F\nimport paddle.fluid.core as core\nimport paddle.fluid as fluid\n\n\ndef pixel_shuffle_np(x, up_factor, data_format=\"NCHW\"):\n if data_format == \"NCHW\":\n n, c, h, w = x.shape\n new_shape = (n, c // (up_factor * up_factor), up_factor, up_factor, h,\n w)\n # reshape to (num,output_channel,upscale_factor,upscale_factor,h,w)\n npresult = np.reshape(x, new_shape)\n # transpose to (num,output_channel,h,upscale_factor,w,upscale_factor)\n npresult = npresult.transpose(0, 1, 4, 2, 5, 3)\n oshape = [n, c // (up_factor * up_factor), h * up_factor, w * up_factor]\n npresult = np.reshape(npresult, oshape)\n return npresult\n else:\n n, h, w, c = x.shape\n new_shape = (n, h, w, c // (up_factor * up_factor), up_factor,\n up_factor)\n # reshape to (num,h,w,output_channel,upscale_factor,upscale_factor)\n npresult = np.reshape(x, new_shape)\n # transpose to (num,h,upscale_factor,w,upscale_factor,output_channel)\n npresult = npresult.transpose(0, 1, 4, 2, 5, 3)\n oshape = [n, h * up_factor, w * up_factor, c // (up_factor * up_factor)]\n npresult = np.reshape(npresult, oshape)\n return npresult\n\n\nclass TestPixelShuffleOp(OpTest):\n def setUp(self):\n self.op_type = \"pixel_shuffle\"\n self.init_data_format()\n n, c, h, w = 2, 9, 4, 4\n\n if self.format == \"NCHW\":\n shape = [n, c, h, w]\n if self.format == \"NHWC\":\n shape = [n, h, w, c]\n\n up_factor = 3\n\n x = np.random.random(shape).astype(\"float64\")\n npresult = pixel_shuffle_np(x, up_factor, self.format)\n\n self.inputs = {'X': x}\n self.outputs = {'Out': npresult}\n self.attrs = {'upscale_factor': up_factor, \"data_format\": self.format}\n\n def init_data_format(self):\n self.format = \"NCHW\"\n\n def test_check_output(self):\n self.check_output()\n\n def test_check_grad(self):\n self.check_grad(['X'], 'Out')\n\n\nclass TestChannelLast(TestPixelShuffleOp):\n def init_data_format(self):\n self.format = \"NHWC\"\n\n\nclass TestPixelShuffleAPI(unittest.TestCase):\n def setUp(self):\n self.x_1_np = np.random.random([2, 9, 4, 4]).astype(\"float64\")\n self.x_2_np = np.random.random([2, 4, 4, 9]).astype(\"float64\")\n self.out_1_np = pixel_shuffle_np(self.x_1_np, 3)\n self.out_2_np = pixel_shuffle_np(self.x_2_np, 3, \"NHWC\")\n\n def test_static_graph_functional(self):\n for use_cuda in ([False, True]\n if core.is_compiled_with_cuda() else [False]):\n place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace()\n\n paddle.enable_static()\n x_1 = paddle.fluid.data(name=\"x\", shape=[2, 9, 4, 4], dtype=\"float64\")\n x_2 = paddle.fluid.data(name=\"x2\", shape=[2, 4, 4, 9], dtype=\"float64\")\n out_1 = F.pixel_shuffle(x_1, 3)\n out_2 = F.pixel_shuffle(x_2, 3, \"NHWC\")\n\n exe = paddle.static.Executor(place=place)\n res_1 = exe.run(fluid.default_main_program(),\n feed={\"x\": self.x_1_np},\n fetch_list=out_1,\n use_prune=True)\n\n res_2 = exe.run(fluid.default_main_program(),\n feed={\"x2\": self.x_2_np},\n fetch_list=out_2,\n use_prune=True)\n\n assert np.allclose(res_1, self.out_1_np)\n assert np.allclose(res_2, self.out_2_np)\n\n # same test between layer and functional in this op.\n def test_static_graph_layer(self):\n for use_cuda in ([False, True]\n if core.is_compiled_with_cuda() else [False]):\n place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace()\n\n paddle.enable_static()\n x_1 = paddle.fluid.data(name=\"x\", shape=[2, 9, 4, 4], dtype=\"float64\")\n x_2 = paddle.fluid.data(name=\"x2\", shape=[2, 4, 4, 9], dtype=\"float64\")\n # init instance\n ps_1 = paddle.nn.PixelShuffle(3)\n ps_2 = paddle.nn.PixelShuffle(3, \"NHWC\")\n out_1 = ps_1(x_1)\n out_2 = ps_2(x_2)\n out_1_np = pixel_shuffle_np(self.x_1_np, 3)\n out_2_np = pixel_shuffle_np(self.x_2_np, 3, \"NHWC\")\n\n exe = paddle.static.Executor(place=place)\n res_1 = exe.run(fluid.default_main_program(),\n feed={\"x\": self.x_1_np},\n fetch_list=out_1,\n use_prune=True)\n\n res_2 = exe.run(fluid.default_main_program(),\n feed={\"x2\": self.x_2_np},\n fetch_list=out_2,\n use_prune=True)\n\n assert np.allclose(res_1, out_1_np)\n assert np.allclose(res_2, out_2_np)\n\n def run_dygraph(self, up_factor, data_format):\n\n n, c, h, w = 2, 9, 4, 4\n\n if data_format == \"NCHW\":\n shape = [n, c, h, w]\n if data_format == \"NHWC\":\n shape = [n, h, w, c]\n\n x = np.random.random(shape).astype(\"float64\")\n\n npresult = pixel_shuffle_np(x, up_factor, data_format)\n\n for use_cuda in ([False, True]\n if core.is_compiled_with_cuda() else [False]):\n place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace()\n\n paddle.disable_static(place=place)\n\n pixel_shuffle = paddle.nn.PixelShuffle(\n up_factor, data_format=data_format)\n result = pixel_shuffle(paddle.to_tensor(x))\n\n self.assertTrue(np.allclose(result.numpy(), npresult))\n\n result_functional = F.pixel_shuffle(\n paddle.to_tensor(x), 3, data_format)\n self.assertTrue(np.allclose(result_functional.numpy(), npresult))\n\n def test_dygraph1(self):\n self.run_dygraph(3, \"NCHW\")\n\n def test_dygraph2(self):\n self.run_dygraph(3, \"NHWC\")\n\n\nclass TestPixelShuffleError(unittest.TestCase):\n def test_error_functional(self):\n def error_upscale_factor():\n with paddle.fluid.dygraph.guard():\n x = np.random.random([2, 9, 4, 4]).astype(\"float64\")\n pixel_shuffle = F.pixel_shuffle(paddle.to_tensor(x), 3.33)\n\n self.assertRaises(TypeError, error_upscale_factor)\n\n def error_data_format():\n with paddle.fluid.dygraph.guard():\n x = np.random.random([2, 9, 4, 4]).astype(\"float64\")\n pixel_shuffle = F.pixel_shuffle(paddle.to_tensor(x), 3, \"WOW\")\n\n self.assertRaises(ValueError, error_data_format)\n\n def test_error_layer(self):\n def error_upscale_factor_layer():\n with paddle.fluid.dygraph.guard():\n x = np.random.random([2, 9, 4, 4]).astype(\"float64\")\n ps = paddle.nn.PixelShuffle(3.33)\n\n self.assertRaises(TypeError, error_upscale_factor_layer)\n\n def error_data_format_layer():\n with paddle.fluid.dygraph.guard():\n x = np.random.random([2, 9, 4, 4]).astype(\"float64\")\n ps = paddle.nn.PixelShuffle(3, \"MEOW\")\n\n self.assertRaises(ValueError, error_data_format_layer)\n\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "numpy.random.random", "numpy.transpose", "numpy.testing.assert_array_equal" ], [ "numpy.random.random", "numpy.allclose", "numpy.reshape" ] ]
BreastGAN/augmentation
[ "0e1bcb7175e2b2a45cd8084bb14521e26b68caea" ]
[ "models/breast_cycle_gan/custom/conv/contrib.py" ]
[ "# Copyright 2019 Lukas Jendele and Ondrej Skopek.\n# Adapted from The TensorFlow Authors, under the ASL 2.0.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n# This part is copied from:\n# https://github.com/tensorflow/tensorflow/blob/r1.11/tensorflow/contrib/layers/python/layers/layers.py\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.contrib.framework.python.ops import add_arg_scope\n# from tensorflow.contrib.framework.python.ops import variables\nfrom tensorflow.contrib.layers.python.layers import initializers\nfrom tensorflow.contrib.layers.python.layers import utils\n# from tensorflow.python.eager import context\n# from tensorflow.python.framework import constant_op\n# from tensorflow.python.framework import dtypes\n# from tensorflow.python.framework import function\nfrom tensorflow.python.framework import ops\n# from tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.layers import convolutional as convolutional_layers\nfrom tensorflow.python.ops import init_ops\nfrom tensorflow.python.ops import nn\nfrom tensorflow.python.ops import variable_scope\n\n# My imports\nfrom tensorflow.contrib.layers.python.layers.layers import _build_variable_getter, _add_variable_to_collections\nfrom models.breast_cycle_gan.custom.conv.layers import MyConv2D\nimport tensorflow as tf\n# This part is copied from:\n# https://github.com/tensorflow/tensorflow/blob/r1.11/tensorflow/contrib/layers/python/layers/layers.py\n\n\n@add_arg_scope\ndef convolution2d(inputs,\n num_outputs,\n kernel_size,\n stride=1,\n padding='SAME',\n data_format=None,\n rate=1,\n activation_fn=nn.relu,\n normalizer_fn=None,\n normalizer_params=None,\n weights_initializer=initializers.xavier_initializer(),\n weights_regularizer=None,\n biases_initializer=init_ops.zeros_initializer(),\n biases_regularizer=None,\n reuse=None,\n variables_collections=None,\n outputs_collections=None,\n trainable=True,\n use_spectral_norm=False,\n is_training=False,\n self_attention=False,\n scope=None):\n h = convolution(\n inputs,\n num_outputs,\n kernel_size,\n stride,\n padding,\n data_format,\n rate,\n activation_fn,\n normalizer_fn,\n normalizer_params,\n weights_initializer,\n weights_regularizer,\n biases_initializer,\n biases_regularizer,\n reuse,\n variables_collections,\n outputs_collections,\n trainable,\n use_spectral_norm,\n is_training,\n scope,\n conv_dims=2)\n if not self_attention:\n return h\n with tf.variable_scope(\"self_attention\"):\n with tf.variable_scope(\"f\"):\n f = convolution(\n inputs,\n num_outputs // 8,\n kernel_size,\n stride,\n padding,\n data_format,\n rate,\n activation_fn,\n normalizer_fn,\n normalizer_params,\n weights_initializer,\n weights_regularizer,\n biases_initializer,\n biases_regularizer,\n reuse,\n variables_collections,\n outputs_collections,\n trainable,\n use_spectral_norm,\n is_training,\n None,\n conv_dims=2)\n with tf.variable_scope(\"g\"):\n g = convolution(\n inputs,\n num_outputs // 8,\n kernel_size,\n stride,\n padding,\n data_format,\n rate,\n activation_fn,\n normalizer_fn,\n normalizer_params,\n weights_initializer,\n weights_regularizer,\n biases_initializer,\n biases_regularizer,\n reuse,\n variables_collections,\n outputs_collections,\n trainable,\n use_spectral_norm,\n is_training,\n None,\n conv_dims=2)\n\n def hw_flatten(x):\n return tf.reshape(x, shape=[x.shape[0], -1, x.shape[-1]])\n\n # N = h * w\n s = tf.matmul(hw_flatten(g), hw_flatten(f), transpose_b=True) # # [bs, N, N]\n\n beta = tf.nn.softmax(s, axis=-1) # attention map\n\n o = tf.matmul(beta, hw_flatten(h)) # [bs, N, C]\n gamma = tf.get_variable(\"gamma\", [1], initializer=tf.constant_initializer(0.0))\n\n o = tf.reshape(o, shape=inputs.shape) # [bs, h, w, C]\n x = gamma * o + inputs\n\n return x\n\n\n@add_arg_scope\ndef convolution(inputs,\n num_outputs,\n kernel_size,\n stride=1,\n padding='SAME',\n data_format=None,\n rate=1,\n activation_fn=nn.relu,\n normalizer_fn=None,\n normalizer_params=None,\n weights_initializer=initializers.xavier_initializer(),\n weights_regularizer=None,\n biases_initializer=init_ops.zeros_initializer(),\n biases_regularizer=None,\n reuse=None,\n variables_collections=None,\n outputs_collections=None,\n trainable=True,\n use_spectral_norm=False,\n is_training=False,\n scope=None,\n conv_dims=None):\n \"\"\"Adds an N-D convolution followed by an optional batch_norm layer.\n It is required that 1 <= N <= 3.\n `convolution` creates a variable called `weights`, representing the\n convolutional kernel, that is convolved (actually cross-correlated) with the\n `inputs` to produce a `Tensor` of activations. If a `normalizer_fn` is\n provided (such as `batch_norm`), it is then applied. Otherwise, if\n `normalizer_fn` is None and a `biases_initializer` is provided then a `biases`\n variable would be created and added the activations. Finally, if\n `activation_fn` is not `None`, it is applied to the activations as well.\n Performs atrous convolution with input stride/dilation rate equal to `rate`\n if a value > 1 for any dimension of `rate` is specified. In this case\n `stride` values != 1 are not supported.\n Args:\n inputs: A Tensor of rank N+2 of shape\n `[batch_size] + input_spatial_shape + [in_channels]` if data_format does\n not start with \"NC\" (default), or\n `[batch_size, in_channels] + input_spatial_shape` if data_format starts\n with \"NC\".\n num_outputs: Integer, the number of output filters.\n kernel_size: A sequence of N positive integers specifying the spatial\n dimensions of the filters. Can be a single integer to specify the same\n value for all spatial dimensions.\n stride: A sequence of N positive integers specifying the stride at which to\n compute output. Can be a single integer to specify the same value for all\n spatial dimensions. Specifying any `stride` value != 1 is incompatible\n with specifying any `rate` value != 1.\n padding: One of `\"VALID\"` or `\"SAME\"`.\n data_format: A string or None. Specifies whether the channel dimension of\n the `input` and output is the last dimension (default, or if `data_format`\n does not start with \"NC\"), or the second dimension (if `data_format`\n starts with \"NC\"). For N=1, the valid values are \"NWC\" (default) and\n \"NCW\". For N=2, the valid values are \"NHWC\" (default) and \"NCHW\".\n For N=3, the valid values are \"NDHWC\" (default) and \"NCDHW\".\n rate: A sequence of N positive integers specifying the dilation rate to use\n for atrous convolution. Can be a single integer to specify the same\n value for all spatial dimensions. Specifying any `rate` value != 1 is\n incompatible with specifying any `stride` value != 1.\n activation_fn: Activation function. The default value is a ReLU function.\n Explicitly set it to None to skip it and maintain a linear activation.\n normalizer_fn: Normalization function to use instead of `biases`. If\n `normalizer_fn` is provided then `biases_initializer` and\n `biases_regularizer` are ignored and `biases` are not created nor added.\n default set to None for no normalizer function\n normalizer_params: Normalization function parameters.\n weights_initializer: An initializer for the weights.\n weights_regularizer: Optional regularizer for the weights.\n biases_initializer: An initializer for the biases. If None skip biases.\n biases_regularizer: Optional regularizer for the biases.\n reuse: Whether or not the layer and its variables should be reused. To be\n able to reuse the layer scope must be given.\n variables_collections: Optional list of collections for all the variables or\n a dictionary containing a different list of collection per variable.\n outputs_collections: Collection to add the outputs.\n trainable: If `True` also add variables to the graph collection\n `GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).\n scope: Optional scope for `variable_scope`.\n conv_dims: Optional convolution dimensionality, when set it would use the\n corresponding convolution (e.g. 2 for Conv 2D, 3 for Conv 3D, ..). When\n leaved to None it would select the convolution dimensionality based on\n the input rank (i.e. Conv ND, with N = input_rank - 2).\n Returns:\n A tensor representing the output of the operation.\n Raises:\n ValueError: If `data_format` is invalid.\n ValueError: Both 'rate' and `stride` are not uniformly 1.\n \"\"\"\n if data_format not in [None, 'NWC', 'NCW', 'NHWC', 'NCHW', 'NDHWC', 'NCDHW']:\n raise ValueError('Invalid data_format: %r' % (data_format,))\n\n layer_variable_getter = _build_variable_getter({'bias': 'biases', 'kernel': 'weights'})\n\n with variable_scope.variable_scope(scope, 'Conv', [inputs], reuse=reuse, custom_getter=layer_variable_getter) as sc:\n inputs = ops.convert_to_tensor(inputs)\n input_rank = inputs.get_shape().ndims\n\n if conv_dims is not None and conv_dims + 2 != input_rank:\n raise ValueError('Convolution expects input with rank %d, got %d' % (conv_dims + 2, input_rank))\n if input_rank == 3:\n layer_class = convolutional_layers.Convolution1D\n elif input_rank == 4:\n layer_class = MyConv2D\n elif input_rank == 5:\n layer_class = convolutional_layers.Convolution3D\n else:\n raise ValueError('Convolution not supported for input with rank', input_rank)\n\n df = ('channels_first' if data_format and data_format.startswith('NC') else 'channels_last')\n layer = layer_class(\n filters=num_outputs,\n kernel_size=kernel_size,\n strides=stride,\n padding=padding,\n data_format=df,\n dilation_rate=rate,\n activation=None,\n use_bias=not normalizer_fn and biases_initializer,\n kernel_initializer=weights_initializer,\n bias_initializer=biases_initializer,\n kernel_regularizer=weights_regularizer,\n bias_regularizer=biases_regularizer,\n activity_regularizer=None,\n use_spectral_norm=use_spectral_norm,\n is_training=is_training,\n trainable=trainable,\n name=sc.name,\n dtype=inputs.dtype.base_dtype,\n _scope=sc,\n _reuse=reuse)\n outputs = layer.apply(inputs)\n\n # Add variables to collections.\n _add_variable_to_collections(layer.kernel, variables_collections, 'weights')\n if layer.use_bias:\n _add_variable_to_collections(layer.bias, variables_collections, 'biases')\n\n if normalizer_fn is not None:\n normalizer_params = normalizer_params or {}\n outputs = normalizer_fn(outputs, **normalizer_params)\n\n if activation_fn is not None:\n outputs = activation_fn(outputs)\n return utils.collect_named_outputs(outputs_collections, sc.name, outputs)\n" ]
[ [ "tensorflow.constant_initializer", "tensorflow.reshape", "tensorflow.variable_scope", "tensorflow.python.ops.variable_scope.variable_scope", "tensorflow.contrib.layers.python.layers.layers._add_variable_to_collections", "tensorflow.python.ops.init_ops.zeros_initializer", "tensorflow.contrib.layers.python.layers.utils.collect_named_outputs", "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.contrib.layers.python.layers.initializers.xavier_initializer", "tensorflow.nn.softmax", "tensorflow.contrib.layers.python.layers.layers._build_variable_getter" ] ]
csp-inc/fluvius
[ "8eb8c3caee2b98720ae17bef384302d6fa88c828" ]
[ "bin/02-preprocess-data.py" ]
[ "import os\nimport pandas as pd\nimport fsspec\nimport argparse\nfrom src.defaults import args_info\n\nenv_vars = open(\"/content/credentials\",\"r\").read().split('\\n')\n\nfor var in env_vars[:-1]:\n key, value = var.split(' = ')\n os.environ[key] = value\n\nstorage_options={'account_name':os.environ['ACCOUNT_NAME'],\\\n 'account_key':os.environ['BLOB_KEY']}\nfs = fsspec.filesystem('az', account_name=storage_options['account_name'], account_key=storage_options['account_key'])\n\n##env data acquired\n\ndef return_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument('--data-src',\n type=args_info[\"data_src\"][\"type\"],\n help=args_info[\"data_src\"][\"help\"])\n parser.add_argument('--write-to-csv',\n action=args_info[\"write_to_csv\"][\"action\"],\n help=args_info[\"write_to_csv\"][\"help\"])\n return parser\n\nif __name__ == \"__main__\":\n\n args = return_parser().parse_args()\n\n if args.data_src == 'usgs':\n #USGS DATA PROCESS\n data_src = 'usgs'\n container = 'usgs-data'\n\n station_url = f'az://{container}/{args.data_src}_station_metadata_raw.csv'\n station_df = pd.read_csv(station_url, storage_options=storage_options)\n\n sites_str = [str(f).zfill(8) for f in station_df.site_no]\n station_df['sites_str'] = sites_str\n\n query = []\n for f in fs.ls(f'{container}/stations'):\n station = os.path.basename(f).split('_')[0]\n query.append(station)\n q = pd.DataFrame({'sites_str':query})\n out = station_df.merge(q, on='sites_str')\n out['site_no'] = out['sites_str']\n out = out[['site_no','site_name', 'Latitude', 'Longitude','geometry']]\n if args.write_to_csv:\n out.to_csv(f'az://{container}/usgs_station_metadata.csv',index=False, storage_options=storage_options)\n\n if args.data_src == 'ana':\n container = 'ana-data'\n station_url = f'az://{container}/ana_station_metadata.csv'\n station_df = pd.read_csv(station_url, storage_options=storage_options)\n for site_no in station_df.site_no:\n station_url = f'az://{container}/{site_no}.csv'\n station_url2 = f'az://{container}/{site_no}_2.csv'\n site_df1_raw = pd.read_csv(station_url, delimiter=',', skiprows=10, storage_options=storage_options)\n translation = pd.read_csv(f'az://{container}/ana_translations.csv', storage_options=storage_options)\n trans = {p:e for p,e in zip(translation.Portuguese, translation.English)}\n site_df1 = site_df1_raw.rename(columns=trans)\n site_df1 = site_df1.dropna(subset=['Date'])\n site_df1['TimeL'] = site_df1['TimeL'].fillna('01/01/1900 01:00')\n site_df1['Date-Time'] = [d for d in site_df1['Date']]\n site_df1['Date-Time'] = pd.to_datetime(site_df1['Date-Time'],\\\n format='%d/%m/%Y')\n\n site_df2_raw = pd.read_csv(station_url2, delimiter=',', skiprows=14, storage_options=storage_options)\n site_df2_raw = site_df2_raw.replace('01/01/1900', '01/01/1900 01:00')\n translation2 = {'Data':'Date','Hora':'Hour','Turbidez':'Turbidity'}\n site_df2 = site_df2_raw.rename(columns=translation2)\n site_df2 = site_df2.dropna(subset=['Date'])\n site_df2['Date-Time-HM'] = [f\"{d} {t.split(' ')[1]}\" for d,t in zip(site_df2['Date'],site_df2['Hour'])]\n site_df2['Date-Time'] = [d for d in site_df2['Date']]\n site_df2['Date-Time'] = pd.to_datetime(site_df2['Date-Time'],\\\n format='%d/%m/%Y')\n site_df2 = site_df2[['Date', 'Hour', 'Date-Time','Turbidity']]\n\n selection = ['Date-Time', 'Discharge', 'Suspended Sediment Concentration (mg/L)', 'Turbidity']\n site_df = site_df1.merge(site_df2, on='Date', how='outer', suffixes=('_',''))\n site_df['Date-Time'] = site_df['Date-Time'].fillna(site_df['Date-Time_'])\n #site_df['Hour'] = site_df['Hour'].fillna(site_df['Hour_'])\n site_df = site_df[selection]\n s = str(site_no).zfill(8)\n write_filename = f'az://{container}/stations/{str(site_no)}.csv'\n print(f'writing to {write_filename}')\n if args.write_to_csv:\n site_df.to_csv(write_filename, index=False, storage_options=storage_options)\n \n if args.data_src == 'itv':\n container = 'itv-data'\n station_url = f'az://{container}/itv_station_metadata.csv'\n station_df = pd.read_csv(station_url, storage_options=storage_options)\n for site_no in station_df.site_no:\n station_url = f'az://{container}/{site_no}.csv'\n site_df = pd.read_csv(station_url,\\\n storage_options=storage_options,\\\n delimiter=',')\n\n site_df['Date-Time'] = pd.to_datetime(site_df['Campaign Date'], \\\n format='%d/%m/%Y')\n\n if args.write_to_csv:\n write_filename = f'az://{container}/stations/{site_no}.csv'\n site_df.to_csv(write_filename, storage_options=storage_options,\\\n index=False)\n" ]
[ [ "pandas.read_csv", "pandas.to_datetime", "pandas.DataFrame" ] ]
tianyapiaozi/tensorflow
[ "7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae", "7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae", "7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae" ]
[ "tensorflow/contrib/training/python/training/training.py", "tensorflow/contrib/autograph/utils/multiple_dispatch_test.py", "tensorflow/contrib/eager/python/examples/revnet/cifar_tfrecords.py" ]
[ "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Contains various routines and helper functions for training models.\n\nThis script contains various functions for training models. These include\nmanipulating gradients, creating a `train_op` (an operation that computes the\nloss and applies the gradients) and a training loop function. The training loop\nallows the user to pass in the `train_op` and runs the optimization according\nto user-specified arguments.\n\n************************************\n* A simple working training script *\n************************************\n\n # Load data and create the model:\n images, labels = LoadData(...)\n predictions = MyModel(images)\n\n # Define the loss:\n tf.contrib.losses.log_loss(predictions, labels)\n total_loss = tf.contrib.losses.get_total_loss()\n\n # Define the optimizer:\n optimizer = tf.train.MomentumOptimizer(FLAGS.learning_rate, FLAGS.momentum)\n\n # Create the train_op\n train_op = tf.contrib.training.create_train_op(total_loss, optimizer)\n\n # Run training.\n tf.contrib.training.train(train_op, my_log_dir)\n\n*************************\n* Creating the train_op *\n*************************\n\nIn order to use the `train` function, one needs a train_op: an `Operation` that\n(a) computes the loss, (b) applies the gradients to update the weights and\n(c) returns the value of the loss. tf.contrib.training.create_train_op creates\nsuch an `Operation`. This function also provides the ability to manipulate\nthe gradients using a few arguments:\n\n # Create the train_op and clip the gradient norms:\n train_op = tf.contrib.training.create_train_op(\n total_loss,\n optimizer,\n transform_grads_fn=clip_gradient_norms_fn(3))\n\n # Create the train_op and scale the gradients by providing a map from variable\n # name (or variable) to a scaling coefficient:\n def transform_grads_fn(grads):\n gradient_multipliers = {\n 'conv0/weights': 1.2,\n 'fc8/weights': 3.4,\n }\n return tf.contrib.training.multiply_gradients(\n grads, gradient_multipliers)\n\n train_op = tf.contrib.training.create_train_op(\n total_loss,\n optimizer,\n transform_grads_fn=transform_grads_fn)\n\n****************************************************************\n* Performing additional (non-gradient) updates during training *\n****************************************************************\n\nMany networks utilize modules, like BatchNorm, that require performing a series\nof non-gradient updates during training. tf.contrib.training.create_train_op\nallows a user to pass in a list of update_ops to call along with the gradient\nupdates.\n\n train_op = tf.contrib.training.create_train_op(\n total_loss, optimizer, update_ops)\n\nBy default, tf.contrib.training.create_train_op includes all update ops that are\npart of the `tf.GraphKeys.UPDATE_OPS` collection. Additionally, the\ntf.contrib.layers.batch_norm function adds the moving mean and moving variance\nupdates to this collection. Consequently, users who want to use\ntf.contrib.layers.batch_norm will not need to take any additional steps in order\nto have the moving mean and moving variance updates be computed.\n\nHowever, users with additional, specialized updates can either override the\ndefault update ops or simply add additional update ops to the\n`tf.GraphKeys.UPDATE_OPS` collection:\n\n # Force `create_train_op` to NOT use ANY update_ops:\n train_op = tf.contrib.training.create_train_op(\n total_loss,\n optimizer,\n update_ops=[])\n\n # Use an alternative set of update ops:\n train_op = tf.contrib.training.create_train_op(\n total_loss,\n optimizer,\n update_ops=my_other_update_ops)\n\n # Use a set of update ops in addition to the default updates:\n tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, my_update0)\n tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, my_update1)\n\n train_op = tf.contrib.training.create_train_op(\n total_loss,\n optimizer)\n\n # Which is the same as:\n train_op = tf.contrib.training.create_train_op(\n total_loss,\n optimizer,\n update_ops=tf.get_collection(tf.GraphKeys.UPDATE_OPS))\n\n******************************************\n* Initializing a model from a checkpoint *\n******************************************\n\nIt is common to want to 'warm-start' a model from a pre-trained checkpoint.\nOne can use a tf.Scaffold and an initializing function to do so.\n\n ...\n\n # Create the train_op\n train_op = tf.contrib.training.create_train_op(total_loss, optimizer)\n\n # Create the initial assignment op\n checkpoint_path = '/path/to/old_model_checkpoint'\n variables_to_restore = tf.contrib.framework.get_model_variables()\n init_fn = tf.contrib.framework.assign_from_checkpoint_fn(\n checkpoint_path, variables_to_restore)\n\n # Run training.\n scaffold = tf.Scaffold(init_fn=init_fn)\n tf.contrib.training.train(train_op, my_log_dir, scaffold=scaffold)\n\n***************************************************************************\n* Initializing a model from a checkpoint whose variable names don't match *\n***************************************************************************\n\nAt times, a user may want to initialize a new model with values from a\ncheckpoint whose variable names do not match those of the current model. In this\ncase, one needs to create a mapping from the checkpoint variable names to the\ncurrent model variables. This requires only a small modification of the code\nabove:\n ...\n # Creates a model with two variables, var0 and var1\n predictions = MyModel(images)\n ...\n\n # Create the train_op\n train_op = tf.contrib.training.create_train_op(total_loss, optimizer)\n\n checkpoint_path = '/path/to/old_model_checkpoint'\n\n # Create the mapping:\n variables_to_restore = {\n 'name_var_0_in_checkpoint':\n tf.contrib.framework.get_unique_variable('var0'),\n 'name_var_1_in_checkpoint':\n tf.contrib.framework.get_unique_variable('var1')\n }\n init_fn = tf.contrib.framework.assign_from_checkpoint_fn(\n checkpoint_path, variables_to_restore)\n scaffold = tf.Scaffold(init_fn=init_fn)\n\n # Run training.\n tf.contrib.training.train(train_op, my_log_dir, scaffold=scaffold)\n\n\n*************************************************\n* Fine-Tuning Part of a model from a checkpoint *\n*************************************************\n\nRather than initializing all of the weights of a given model, we sometimes\nonly want to restore some of the weights from a checkpoint. To do this, one\nneed only filter those variables to initialize as follows:\n\n ...\n\n # Create the train_op\n train_op = tf.contrib.training.create_train_op(total_loss, optimizer)\n\n checkpoint_path = '/path/to/old_model_checkpoint'\n\n # Specify the variables to restore via a list of inclusion or exclusion\n # patterns:\n variables_to_restore = tf.contrib.framework.get_variables_to_restore(\n include=[\"conv\"], exclude=[\"fc8\", \"fc9])\n # or\n variables_to_restore = tf.contrib.framework.get_variables_to_restore(\n exclude=[\"conv\"])\n\n init_fn = tf.contrib.framework.assign_from_checkpoint_fn(\n checkpoint_path, variables_to_restore)\n scaffold = tf.Scaffold(init_fn=init_fn)\n\n # Run training.\n tf.contrib.training.train(train_op, my_log_dir, scaffold=scaffold)\n\n******************************************************\n* Initializing model variables from values in memory *\n******************************************************\n\nOne may want to initialize the weights of a model from values coming from an\narbitrary source (a text document, matlab file, etc). While this is technically\nfeasible using assign operations, this strategy results in the values of your\nweights being stored in the graph. For large models, this becomes prohibitively\nlarge. However, it's possible to perform this initial assignment without having\nto store the values of the initial model in the graph itself by using\nplaceholders and a feed dictionary:\n\n ...\n\n # Create the train_op\n train_op = tf.contrib.training.create_train_op(total_loss, optimizer)\n\n # Create the mapping from variable names to values:\n var0_initial_value = ReadFromDisk(...)\n var1_initial_value = ReadFromDisk(...)\n\n var_names_to_values = {\n 'var0': var0_initial_value,\n 'var1': var1_initial_value,\n }\n\n init_fn = tf.contrib.framework.assign_from_values_fn(var_names_to_values)\n scaffold = tf.Scaffold(init_fn=init_fn)\n\n # Run training.\n tf.contrib.training.train(train_op, my_log_dir, scaffold=scaffold)\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import clip_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import variables as tf_variables\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.summary import summary\nfrom tensorflow.python.training import monitored_session\nfrom tensorflow.python.training import optimizer as tf_optimizer\nfrom tensorflow.python.training import training_util\n\n# TODO(nsilberman): move add_gradients_summaries, clip_gradient_norms and\n# multiply_gradients into contrib/summaries and contrib/optimizers.py\n__all__ = [\n 'add_gradients_summaries',\n 'clip_gradient_norms',\n 'clip_gradient_norms_fn',\n 'create_train_op',\n 'multiply_gradients',\n 'train',\n]\n\n\ndef add_gradients_summaries(grads_and_vars):\n \"\"\"Add summaries to gradients.\n\n Args:\n grads_and_vars: A list of gradient to variable pairs (tuples).\n\n Returns:\n The list of created summaries.\n \"\"\"\n summaries = []\n for grad, var in grads_and_vars:\n if grad is not None:\n if isinstance(grad, ops.IndexedSlices):\n grad_values = grad.values\n else:\n grad_values = grad\n summaries.append(\n summary.histogram(var.op.name + '_gradient', grad_values))\n summaries.append(\n summary.scalar(var.op.name + '_gradient_norm',\n clip_ops.global_norm([grad_values])))\n else:\n logging.info('Var %s has no gradient', var.op.name)\n\n return summaries\n\n\ndef clip_gradient_norms(gradients_to_variables, max_norm):\n \"\"\"Clips the gradients by the given value.\n\n Args:\n gradients_to_variables: A list of gradient to variable pairs (tuples).\n max_norm: the maximum norm value.\n\n Returns:\n A list of clipped gradient to variable pairs.\n \"\"\"\n clipped_grads_and_vars = []\n for grad, var in gradients_to_variables:\n if grad is not None:\n if isinstance(grad, ops.IndexedSlices):\n tmp = clip_ops.clip_by_norm(grad.values, max_norm)\n grad = ops.IndexedSlices(tmp, grad.indices, grad.dense_shape)\n else:\n grad = clip_ops.clip_by_norm(grad, max_norm)\n clipped_grads_and_vars.append((grad, var))\n return clipped_grads_and_vars\n\n\ndef clip_gradient_norms_fn(max_norm):\n \"\"\"Returns a `transform_grads_fn` function for gradient clipping.\"\"\"\n def clip_norms(gradients_to_variables):\n return clip_gradient_norms(gradients_to_variables, max_norm)\n return clip_norms\n\n\ndef multiply_gradients(grads_and_vars, gradient_multipliers):\n \"\"\"Multiply specified gradients.\n\n Args:\n grads_and_vars: A list of gradient to variable pairs (tuples).\n gradient_multipliers: A map from either `Variables` or `Variable` op names\n to the coefficient by which the associated gradient should be scaled.\n\n Returns:\n The updated list of gradient to variable pairs.\n\n Raises:\n ValueError: If `grads_and_vars` is not a list or if `gradient_multipliers`\n is empty or None or if `gradient_multipliers` is not a dictionary.\n \"\"\"\n if not isinstance(grads_and_vars, list):\n raise ValueError('`grads_and_vars` must be a list.')\n if not gradient_multipliers:\n raise ValueError('`gradient_multipliers` is empty.')\n if not isinstance(gradient_multipliers, dict):\n raise ValueError('`gradient_multipliers` must be a dict.')\n\n multiplied_grads_and_vars = []\n for grad, var in grads_and_vars:\n if var in gradient_multipliers or var.op.name in gradient_multipliers:\n key = var if var in gradient_multipliers else var.op.name\n if grad is None:\n raise ValueError('Requested multiple of `None` gradient.')\n\n if isinstance(grad, ops.IndexedSlices):\n tmp = grad.values * constant_op.constant(\n gradient_multipliers[key], dtype=grad.dtype)\n grad = ops.IndexedSlices(tmp, grad.indices, grad.dense_shape)\n else:\n grad *= constant_op.constant(\n gradient_multipliers[key], dtype=grad.dtype)\n multiplied_grads_and_vars.append((grad, var))\n return multiplied_grads_and_vars\n\n\n_USE_GLOBAL_STEP = 0\n\n\ndef create_train_op(total_loss,\n optimizer,\n global_step=_USE_GLOBAL_STEP,\n update_ops=None,\n variables_to_train=None,\n transform_grads_fn=None,\n summarize_gradients=False,\n gate_gradients=tf_optimizer.Optimizer.GATE_OP,\n aggregation_method=None,\n colocate_gradients_with_ops=False,\n check_numerics=True):\n \"\"\"Creates an `Operation` that evaluates the gradients and returns the loss.\n\n Args:\n total_loss: A `Tensor` representing the total loss.\n optimizer: A tf.Optimizer to use for computing the gradients.\n global_step: A `Tensor` representing the global step variable. If left as\n `_USE_GLOBAL_STEP`, then tf.contrib.framework.global_step() is used.\n update_ops: An optional list of updates to execute. If `update_ops` is\n `None`, then the update ops are set to the contents of the\n `tf.GraphKeys.UPDATE_OPS` collection. If `update_ops` is not `None`, but\n it doesn't contain all of the update ops in `tf.GraphKeys.UPDATE_OPS`,\n a warning will be displayed.\n variables_to_train: an optional list of variables to train. If None, it will\n default to all tf.trainable_variables().\n transform_grads_fn: A function which takes a single argument, a list of\n gradient to variable pairs (tuples), performs any requested gradient\n updates, such as gradient clipping or multipliers, and returns the updated\n list.\n summarize_gradients: Whether or not add summaries for each gradient.\n gate_gradients: How to gate the computation of gradients. See tf.Optimizer.\n aggregation_method: Specifies the method used to combine gradient terms.\n Valid values are defined in the class `AggregationMethod`.\n colocate_gradients_with_ops: Whether or not to try colocating the gradients\n with the ops that generated them.\n check_numerics: Whether or not we apply check_numerics.\n\n Returns:\n A `Tensor` that when evaluated, computes the gradients and returns the total\n loss value.\n \"\"\"\n if global_step is _USE_GLOBAL_STEP:\n global_step = training_util.get_or_create_global_step()\n\n # Update ops use GraphKeys.UPDATE_OPS collection if update_ops is None.\n global_update_ops = set(ops.get_collection(ops.GraphKeys.UPDATE_OPS))\n if update_ops is None:\n update_ops = global_update_ops\n else:\n update_ops = set(update_ops)\n if not global_update_ops.issubset(update_ops):\n logging.warning('update_ops in create_train_op does not contain all the '\n ' update_ops in GraphKeys.UPDATE_OPS')\n\n # Make sure update_ops are computed before total_loss.\n if update_ops:\n with ops.control_dependencies(update_ops):\n barrier = control_flow_ops.no_op(name='update_barrier')\n total_loss = control_flow_ops.with_dependencies([barrier], total_loss)\n\n if variables_to_train is None:\n # Default to tf.trainable_variables()\n variables_to_train = tf_variables.trainable_variables()\n else:\n # Make sure that variables_to_train are in tf.trainable_variables()\n for v in variables_to_train:\n assert v in tf_variables.trainable_variables()\n\n assert variables_to_train\n\n # Create the gradients. Note that apply_gradients adds the gradient\n # computation to the current graph.\n grads = optimizer.compute_gradients(\n total_loss,\n variables_to_train,\n gate_gradients=gate_gradients,\n aggregation_method=aggregation_method,\n colocate_gradients_with_ops=colocate_gradients_with_ops)\n\n if transform_grads_fn:\n grads = transform_grads_fn(grads)\n\n # Summarize gradients.\n if summarize_gradients:\n with ops.name_scope('summarize_grads'):\n add_gradients_summaries(grads)\n\n # Create gradient updates.\n grad_updates = optimizer.apply_gradients(grads, global_step=global_step)\n\n with ops.name_scope('train_op'):\n # Make sure total_loss is valid.\n if check_numerics:\n total_loss = array_ops.check_numerics(total_loss,\n 'LossTensor is inf or nan')\n\n # Ensure the train_tensor computes grad_updates.\n train_op = control_flow_ops.with_dependencies([grad_updates], total_loss)\n\n # Add the operation used for training to the 'train_op' collection\n train_ops = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP)\n if train_op not in train_ops:\n train_ops.append(train_op)\n\n return train_op\n\n\ndef train(train_op,\n logdir,\n master='',\n is_chief=True,\n scaffold=None,\n hooks=None,\n chief_only_hooks=None,\n save_checkpoint_secs=600,\n save_summaries_steps=100,\n config=None,\n max_wait_secs=7200):\n \"\"\"Runs the training loop.\n\n Args:\n train_op: A `Tensor` that, when executed, will apply the gradients and\n return the loss value.\n logdir: The directory where the graph and checkpoints are saved.\n master: The URL of the master.\n is_chief: Specifies whether or not the training is being run by the primary\n replica during replica training.\n scaffold: An tf.train.Scaffold instance.\n hooks: List of `tf.train.SessionRunHook` callbacks which are run inside the\n training loop.\n chief_only_hooks: List of `tf.train.SessionRunHook` instances which are run\n inside the training loop for the chief trainer only.\n save_checkpoint_secs: The frequency, in seconds, that a checkpoint is saved\n using a default checkpoint saver. If `save_checkpoint_secs` is set to\n `None`, then the default checkpoint saver isn't used.\n save_summaries_steps: The frequency, in number of global steps, that the\n summaries are written to disk using a default summary saver. If\n `save_summaries_steps` is set to `None`, then the default summary saver\n isn't used.\n config: An instance of `tf.ConfigProto`.\n max_wait_secs: Maximum time workers should wait for the session to\n become available. This should be kept relatively short to help detect\n incorrect code, but sometimes may need to be increased if the chief takes\n a while to start up.\n\n Returns:\n the value of the loss function after training.\n\n Raises:\n ValueError: if `logdir` is `None` and either `save_checkpoint_secs` or\n `save_summaries_steps` are `None.\n \"\"\"\n if logdir is None and is_chief:\n if save_summaries_steps:\n raise ValueError(\n 'logdir cannot be None when save_summaries_steps is not None')\n\n if save_checkpoint_secs:\n raise ValueError(\n 'logdir cannot be None when save_checkpoint_secs is not None')\n\n with monitored_session.MonitoredTrainingSession(\n master=master,\n is_chief=is_chief,\n checkpoint_dir=logdir,\n scaffold=scaffold,\n hooks=hooks,\n chief_only_hooks=chief_only_hooks,\n save_checkpoint_secs=save_checkpoint_secs,\n save_summaries_steps=save_summaries_steps,\n config=config,\n max_wait_secs=max_wait_secs) as session:\n loss = None\n while not session.should_stop():\n loss = session.run(train_op)\n return loss\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for multiple_dispatch.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.contrib.autograph.utils import multiple_dispatch\nfrom tensorflow.python.client.session import Session\nfrom tensorflow.python.framework.constant_op import constant\nfrom tensorflow.python.platform import test\n\n\nclass MultipleDispatchTest(test.TestCase):\n\n def test_dynamic_is_python(self):\n a = np.eye(3)\n also_a = a\n not_actually_a = np.eye(3)\n should_be_true1 = multiple_dispatch.dynamic_is(a, also_a)\n should_be_false1 = multiple_dispatch.dynamic_is_not(a, also_a)\n should_be_true2 = multiple_dispatch.dynamic_is_not(a, not_actually_a)\n should_be_false2 = multiple_dispatch.dynamic_is(a, not_actually_a)\n self.assertTrue(should_be_true1)\n self.assertTrue(should_be_true2)\n self.assertFalse(should_be_false1)\n self.assertFalse(should_be_false2)\n\n def test_dynamic_is_tf(self):\n with Session().as_default():\n a = constant([2.0])\n also_a = a\n not_actually_a = constant([2.0])\n should_be_true1 = multiple_dispatch.dynamic_is(a, also_a)\n should_be_false1 = multiple_dispatch.dynamic_is_not(a, also_a)\n should_be_true2 = multiple_dispatch.dynamic_is_not(a, not_actually_a)\n should_be_false2 = multiple_dispatch.dynamic_is(a, not_actually_a)\n self.assertTrue(should_be_true1)\n self.assertTrue(should_be_true2)\n self.assertFalse(should_be_false1)\n self.assertFalse(should_be_false2)\n\n def test_run_cond_python(self):\n true_fn = lambda: (2,)\n false_fn = lambda: (3,)\n self.assertEqual(multiple_dispatch.run_cond(True, true_fn, false_fn), 2)\n self.assertEqual(multiple_dispatch.run_cond(False, true_fn, false_fn), 3)\n\n def test_run_cond_tf(self):\n true_fn = lambda: (constant(2),)\n false_fn = lambda: (constant(3),)\n with Session() as sess:\n out = multiple_dispatch.run_cond(constant(True), true_fn, false_fn)\n self.assertEqual(sess.run(out), 2)\n out = multiple_dispatch.run_cond(constant(False), true_fn, false_fn)\n self.assertEqual(sess.run(out), 3)\n\n\nif __name__ == '__main__':\n test.main()\n", "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Read CIFAR-10 data from pickled numpy arrays and writes TFRecords.\n\nGenerates tf.train.Example protos and writes them to TFRecord files from the\npython version of the CIFAR-10 dataset downloaded from\nhttps://www.cs.toronto.edu/~kriz/cifar.html.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport sys\nimport tarfile\n\nfrom absl import flags\nfrom six.moves import cPickle as pickle\nfrom six.moves import urllib\nimport tensorflow as tf\n\nCIFAR_FILENAME = 'cifar-10-python.tar.gz'\nCIFAR_DOWNLOAD_URL = 'https://www.cs.toronto.edu/~kriz/' + CIFAR_FILENAME\nCIFAR_LOCAL_FOLDER = 'cifar-10-batches-py'\n\n\ndef download_and_extract(data_dir):\n \"\"\"Download CIFAR-10 if not already downloaded.\"\"\"\n filepath = os.path.join(data_dir, CIFAR_FILENAME)\n if tf.gfile.Exists(filepath):\n return filepath\n if not tf.gfile.Exists(data_dir):\n tf.gfile.MakeDirs(data_dir)\n\n urllib.request.urlretrieve(CIFAR_DOWNLOAD_URL, filepath)\n tarfile.open(os.path.join(filepath), 'r:gz').extractall(data_dir)\n return filepath\n\n\ndef _int64_feature(value):\n return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))\n\n\ndef _bytes_feature(value):\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))\n\n\ndef _get_file_names():\n \"\"\"Returns the file names expected to exist in the input_dir.\"\"\"\n file_names = {}\n file_names['train'] = ['data_batch_%d' % i for i in range(1, 5)]\n file_names['validation'] = ['data_batch_5']\n file_names['test'] = ['test_batch']\n return file_names\n\n\ndef read_pickle_from_file(filename):\n with tf.gfile.Open(filename, 'rb') as f:\n if sys.version_info >= (3, 0):\n data_dict = pickle.load(f, encoding='bytes')\n else:\n data_dict = pickle.load(f)\n return data_dict\n\n\ndef convert_to_tfrecord(input_files, output_file):\n \"\"\"Converts files with pickled data to TFRecords.\"\"\"\n print('Generating %s' % output_file)\n with tf.python_io.TFRecordWriter(output_file) as record_writer:\n for input_file in input_files:\n data_dict = read_pickle_from_file(input_file)\n data = data_dict[b'data']\n labels = data_dict[b'labels']\n num_entries_in_batch = len(labels)\n\n for i in range(num_entries_in_batch):\n example = tf.train.Example(\n features=tf.train.Features(\n feature={\n 'image': _bytes_feature(data[i].tobytes()),\n 'label': _int64_feature(labels[i])\n }))\n record_writer.write(example.SerializeToString())\n\n\ndef main(_):\n print('Download from {} and extract.'.format(CIFAR_DOWNLOAD_URL))\n download_and_extract(FLAGS.data_dir)\n file_names = _get_file_names()\n input_dir = os.path.join(FLAGS.data_dir, CIFAR_LOCAL_FOLDER)\n\n for mode, files in file_names.items():\n input_files = [os.path.join(input_dir, f) for f in files]\n output_file = os.path.join(FLAGS.data_dir, mode + '.tfrecords')\n try:\n os.remove(output_file)\n except OSError:\n pass\n convert_to_tfrecord(input_files, output_file)\n print('Done!')\n\n\nif __name__ == '__main__':\n FLAGS = flags.FLAGS\n flags.DEFINE_string(\n 'data_dir',\n default=None,\n help='Directory to download and extract CIFAR-10 to.')\n\n tf.app.run(main)\n" ]
[ [ "tensorflow.python.training.monitored_session.MonitoredTrainingSession", "tensorflow.python.framework.ops.IndexedSlices", "tensorflow.python.platform.tf_logging.info", "tensorflow.python.training.training_util.get_or_create_global_step", "tensorflow.python.framework.ops.get_collection", "tensorflow.python.ops.variables.trainable_variables", "tensorflow.python.summary.summary.histogram", "tensorflow.python.framework.ops.name_scope", "tensorflow.python.framework.ops.control_dependencies", "tensorflow.python.ops.clip_ops.global_norm", "tensorflow.python.ops.control_flow_ops.no_op", "tensorflow.python.ops.clip_ops.clip_by_norm", "tensorflow.python.ops.control_flow_ops.with_dependencies", "tensorflow.python.platform.tf_logging.warning", "tensorflow.python.framework.ops.get_collection_ref", "tensorflow.python.framework.constant_op.constant", "tensorflow.python.ops.array_ops.check_numerics" ], [ "numpy.eye", "tensorflow.contrib.autograph.utils.multiple_dispatch.dynamic_is", "tensorflow.contrib.autograph.utils.multiple_dispatch.run_cond", "tensorflow.python.platform.test.main", "tensorflow.python.client.session.Session", "tensorflow.contrib.autograph.utils.multiple_dispatch.dynamic_is_not", "tensorflow.python.framework.constant_op.constant" ], [ "tensorflow.app.run", "tensorflow.train.BytesList", "tensorflow.train.Int64List", "tensorflow.python_io.TFRecordWriter", "tensorflow.gfile.MakeDirs", "tensorflow.gfile.Exists", "tensorflow.gfile.Open" ] ]
MathijsMul/babyai-emergent-guidance
[ "9e37535134c89bd019affa51c7f199d1672811b6" ]
[ "babyai/arguments.py" ]
[ "\"\"\"\nCommon arguments for BabyAI training scripts\n\"\"\"\n\nimport os\nimport argparse\nimport numpy as np\n\n\nclass ArgumentParser(argparse.ArgumentParser):\n\n def __init__(self):\n super().__init__()\n\n # Base arguments\n self.add_argument(\"--env\", default=None,\n help=\"name of the environment to train on (REQUIRED)\")\n self.add_argument(\"--model\", default=None,\n help=\"name of the model (default: ENV_ALGO_TIME)\")\n self.add_argument(\"--pretrained-model\", default=None,\n help='If you\\'re using a pre-trained model and want the fine-tuned one to have a new name')\n self.add_argument(\"--seed\", type=int, default=1,\n help=\"random seed; if 0, a random random seed will be used (default: 1)\")\n self.add_argument(\"--task-id-seed\", action='store_true',\n help=\"use the task id within a Slurm job array as the seed\")\n self.add_argument(\"--procs\", type=int, default=64,\n help=\"number of processes (default: 64)\")\n self.add_argument(\"--tb\", action=\"store_true\", default=False,\n help=\"log into Tensorboard\")\n\n # Training arguments\n self.add_argument(\"--log-interval\", type=int, default=1,\n help=\"number of updates between two logs (default(Mathijs): 1, used to be 10)\")\n self.add_argument(\"--save-interval\", type=int, default=1000,\n help=\"number of updates between two saves (default: 1000, 0 means no saving)\")\n self.add_argument(\"--frames\", type=int, default=int(9e10),\n help=\"number of frames of training (default: 9e10)\")\n self.add_argument(\"--patience\", type=int, default=100,\n help=\"patience for early stopping (default: 100)\")\n self.add_argument(\"--epochs\", type=int, default=1000000,\n help=\"maximum number of epochs\")\n self.add_argument(\"--frames-per-proc\", type=int, default=40,\n help=\"number of frames per process before update (default: 40)\")\n self.add_argument(\"--lr\", type=float, default=1e-4,\n help=\"learning rate (default: 1e-4)\")\n self.add_argument(\"--beta1\", type=float, default=0.9,\n help=\"beta1 for Adam (default: 0.9)\")\n self.add_argument(\"--beta2\", type=float, default=0.999,\n help=\"beta2 for Adam (default: 0.999)\")\n self.add_argument(\"--recurrence\", type=int, default=20,\n help=\"number of timesteps gradient is backpropagated (default: 20)\")\n self.add_argument(\"--optim-eps\", type=float, default=1e-5,\n help=\"Adam and RMSprop optimizer epsilon (default: 1e-5)\")\n self.add_argument(\"--optim-alpha\", type=float, default=0.99,\n help=\"RMSprop optimizer apha (default: 0.99)\")\n self.add_argument(\"--batch-size\", type=int, default=1280,\n help=\"batch size for PPO (default: 1280)\")\n self.add_argument(\"--entropy-coef\", type=float, default=0.01,\n help=\"entropy term coefficient (default: 0.01)\")\n self.add_argument(\"--dropout\", type=float, default=0.5,\n help=\"dropout probability for processed corrections (default: 0.5)\")\n\n self.add_argument(\"--save-each-epoch\", action=\"store_true\", default=False,\n help=\"store model at each epoch\")\n self.add_argument(\"--class-weights\", action=\"store_true\", default=False,\n help=\"use class weights in loss function\")\n self.add_argument(\"--compute-cic\", action=\"store_true\", default=False,\n help=\"compute and log causal influence of communication metric after each epoch\")\n\n # Model parameters\n self.add_argument(\"--image-dim\", type=int, default=128,\n help=\"dimensionality of the image embedding\")\n self.add_argument(\"--memory-dim\", type=int, default=128,\n help=\"dimensionality of the memory LSTM\")\n self.add_argument(\"--instr-dim\", type=int, default=128,\n help=\"dimensionality of the memory LSTM\")\n self.add_argument(\"--no-instr\", action=\"store_true\", default=False,\n help=\"don't use instructions in the model\")\n self.add_argument(\"--instr-arch\", default=\"gru\",\n help=\"arch to encode instructions, possible values: gru, bigru, conv, bow (default: gru)\")\n self.add_argument(\"--no-mem\", action=\"store_true\", default=False,\n help=\"don't use memory in the model\")\n self.add_argument(\"--arch\", default='expert_filmcnn',\n help=\"image embedding architecture\")\n self.add_argument(\"--learner\", action=\"store_true\", default=False,\n help=\"use ordinary learner\")\n\n # Corrector parameters\n self.add_argument(\"--corrector\", action=\"store_true\", default=False,\n help=\"use correction module\")\n self.add_argument(\"--corr-length\", type=int, default=2,\n help=\"length of correction messages (max length if --var-corr-length true)\")\n self.add_argument(\"--corr-own-vocab\", action=\"store_true\", default=False,\n help=\"corrector uses its own vocabulary instead of instruction vocabulary\")\n self.add_argument(\"--corr-embedding-dim\", type=int, default=0,\n help=\"embedding dimensionality for corrector\")\n self.add_argument(\"--corr-vocab-size\", type=int, default=3,\n help=\"vocabulary size of corrector\")\n self.add_argument(\"--pretrained-corrector\", type=str, default=None,\n help=\"location of pretrained corrector to use and freeze\")\n self.add_argument(\"--show-corrections\", action=\"store_true\", default=False,\n help=\"show correction messages\")\n self.add_argument(\"--corrector-frozen\", action=\"store_true\", default=False,\n help=\"freeze pretrained corrector\")\n self.add_argument(\"--random-corrector\", action=\"store_true\", default=False,\n help=\"randomize correction messages\")\n self.add_argument(\"--var-corr-length\", action=\"store_true\", default=False,\n help=\"variable length correction messages with penalty for longer ones\")\n self.add_argument(\"--corr-loss-coef\", type=float, default=0.1,\n help=\"correction loss coefficient (untested default: 0.1)\")\n self.add_argument(\"--weigh-corrections\", action=\"store_true\", default=False,\n help=\"weigh corrections depending on entropy of previous timestep\")\n self.add_argument(\"--correction-weight-loss-coef\", type=float, default=1.0,\n help=\"coefficient for correction weight loss\")\n\n # Validation parameters\n self.add_argument(\"--val-seed\", type=int, default=0,\n help=\"seed for environment used for validation (default: 0)\")\n self.add_argument(\"--val-interval\", type=int, default=1,\n help=\"number of epochs between two validation checks (default: 1)\")\n self.add_argument(\"--val-episodes\", type=int, default=500,\n help=\"number of episodes used to evaluate the agent, and to evaluate validation accuracy\")\n\n def parse_args(self):\n \"\"\"\n Parse the arguments and perform some basic validation\n \"\"\"\n\n args = super().parse_args()\n\n # Set seed for all randomness sources\n if args.seed == 0:\n args.seed = np.random.randint(10000)\n if args.task_id_seed:\n args.seed = int(os.environ['SLURM_ARRAY_TASK_ID'])\n print('set seed to {}'.format(args.seed))\n\n # TODO: more validation\n\n return args\n" ]
[ [ "numpy.random.randint" ] ]
jkxing/pytorch3d
[ "71dbebe8010a0dac3e56be464778aa48fbd3bcd3" ]
[ "tests/test_laplacian_matrices.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates.\n# All rights reserved.\n#\n# This source code is licensed under the BSD-style license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport unittest\n\nimport torch\nfrom common_testing import TestCaseMixin, get_random_cuda_device\nfrom pytorch3d.ops import cot_laplacian, laplacian, norm_laplacian\nfrom pytorch3d.structures.meshes import Meshes\n\n\nclass TestLaplacianMatrices(TestCaseMixin, unittest.TestCase):\n def setUp(self) -> None:\n super().setUp()\n torch.manual_seed(1)\n\n def init_mesh(self) -> Meshes:\n V, F = 32, 64\n device = get_random_cuda_device()\n # random vertices\n verts = torch.rand((V, 3), dtype=torch.float32, device=device)\n # random valid faces (no self circles, e.g. (v0, v0, v1))\n faces = torch.stack([torch.randperm(V) for f in range(F)], dim=0)[:, :3]\n faces = faces.to(device=device)\n return Meshes(verts=[verts], faces=[faces])\n\n def test_laplacian(self):\n mesh = self.init_mesh()\n verts = mesh.verts_packed()\n edges = mesh.edges_packed()\n V, E = verts.shape[0], edges.shape[0]\n\n L = laplacian(verts, edges)\n\n Lnaive = torch.zeros((V, V), dtype=torch.float32, device=verts.device)\n for e in range(E):\n e0, e1 = edges[e]\n Lnaive[e0, e1] = 1\n # symetric\n Lnaive[e1, e0] = 1\n\n deg = Lnaive.sum(1).view(-1, 1)\n deg[deg > 0] = 1.0 / deg[deg > 0]\n Lnaive = Lnaive * deg\n diag = torch.eye(V, dtype=torch.float32, device=mesh.device)\n Lnaive.masked_fill_(diag > 0, -1)\n\n self.assertClose(L.to_dense(), Lnaive)\n\n def test_cot_laplacian(self):\n mesh = self.init_mesh()\n verts = mesh.verts_packed()\n faces = mesh.faces_packed()\n V = verts.shape[0]\n\n eps = 1e-12\n\n L, inv_areas = cot_laplacian(verts, faces, eps=eps)\n\n Lnaive = torch.zeros((V, V), dtype=torch.float32, device=verts.device)\n inv_areas_naive = torch.zeros((V, 1), dtype=torch.float32, device=verts.device)\n\n for f in faces:\n v0 = verts[f[0], :]\n v1 = verts[f[1], :]\n v2 = verts[f[2], :]\n A = (v1 - v2).norm()\n B = (v0 - v2).norm()\n C = (v0 - v1).norm()\n s = 0.5 * (A + B + C)\n\n face_area = (s * (s - A) * (s - B) * (s - C)).clamp_(min=1e-12).sqrt()\n inv_areas_naive[f[0]] += face_area\n inv_areas_naive[f[1]] += face_area\n inv_areas_naive[f[2]] += face_area\n\n A2, B2, C2 = A * A, B * B, C * C\n cota = (B2 + C2 - A2) / face_area / 4.0\n cotb = (A2 + C2 - B2) / face_area / 4.0\n cotc = (A2 + B2 - C2) / face_area / 4.0\n\n Lnaive[f[1], f[2]] += cota\n Lnaive[f[2], f[0]] += cotb\n Lnaive[f[0], f[1]] += cotc\n # symetric\n Lnaive[f[2], f[1]] += cota\n Lnaive[f[0], f[2]] += cotb\n Lnaive[f[1], f[0]] += cotc\n\n idx = inv_areas_naive > 0\n inv_areas_naive[idx] = 1.0 / inv_areas_naive[idx]\n\n self.assertClose(inv_areas, inv_areas_naive)\n self.assertClose(L.to_dense(), Lnaive)\n\n def test_norm_laplacian(self):\n mesh = self.init_mesh()\n verts = mesh.verts_packed()\n edges = mesh.edges_packed()\n V, E = verts.shape[0], edges.shape[0]\n\n eps = 1e-12\n\n L = norm_laplacian(verts, edges, eps=eps)\n\n Lnaive = torch.zeros((V, V), dtype=torch.float32, device=verts.device)\n for e in range(E):\n e0, e1 = edges[e]\n v0 = verts[e0]\n v1 = verts[e1]\n\n w01 = 1.0 / ((v0 - v1).norm() + eps)\n Lnaive[e0, e1] += w01\n Lnaive[e1, e0] += w01\n\n self.assertClose(L.to_dense(), Lnaive)\n" ]
[ [ "torch.manual_seed", "torch.rand", "torch.randperm", "torch.zeros", "torch.eye" ] ]
leimao/Logistic_Regression_Python
[ "a64ed85d0bea8010d85e9c1e056a3af09b2e43c4" ]
[ "utils.py" ]
[ "\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef train_test_splitter(X, y, ratio = 0.8, random_seed = 0):\n\n assert(len(X) == len(y)), \"The number of points in feature matrix and target vector should be the same.\"\n np.random.seed(random_seed)\n \n n = len(y)\n idx = np.arange(n)\n np.random.shuffle(idx)\n\n train_idx = idx[:int(n * ratio)]\n test_idx = idx[int(n * ratio):]\n\n return X[train_idx,:], X[test_idx,:], y[train_idx], y[test_idx]\n\ndef error_rate(y, y_predicted):\n \n assert len(y) == len(y_predicted), \"The number of targets and predictions should be the same.\"\n assert len(y) != 0, \"The number of targets and predictions should not be zero.\"\n \n return np.sum(np.array(y) != np.array(y_predicted)) / len(y)\n\ndef plot_losses(losses, savefig = False, showfig = False, filename = 'loss.png'):\n\n fig = plt.figure(figsize = (12,8))\n plt.plot(np.arange(len(losses)), losses, color = 'r', marker = 'o', label = 'Loss')\n plt.legend()\n plt.ylabel('Loss')\n plt.xlabel('Number of Iterations')\n\n if savefig:\n fig.savefig(filename, format = 'png', dpi = 600, bbox_inches = 'tight')\n if showfig:\n plt.show()\n plt.close()\n\n return " ]
[ [ "numpy.random.shuffle", "matplotlib.pyplot.legend", "matplotlib.pyplot.figure", "numpy.random.seed", "numpy.arange", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.close", "numpy.array", "matplotlib.pyplot.xlabel" ] ]
prkhrv/Python_and_the_Web
[ "6846334c4151ee94107ef393cbb5e8bc8f6a2e4b" ]
[ "Scripts/Web_Scrappers/cricketmonthly_articles/main.py" ]
[ "import pandas as pd\nimport re\nimport requests as rq\nfrom bs4 import BeautifulSoup\n\nheader = {'User-Agent' : 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36'}\nr = rq.get(\"https://www.thecricketmonthly.com/\", headers=header)\nsoup = BeautifulSoup(r.content, 'html.parser')\nmain_sec = soup.find('section', attrs={'class' : re.compile('col-lhs lhs_content')})\narticle = main_sec.find_all('article', attrs={'class' : re.compile('col-1-1 module')})\nabout=[]\nlink=[]\nsummary=[]\nprint('Fetching Latest Articles...')\nfor a in article:\n tag = a.find('h1')\n about.append(tag.text)\n link.append('https://www.thecricketmonthly.com'+tag.a['href'])\n tag = a.find('p')\n summary.append(tag.text)\nprint('Done!')\n\nmain_sec = soup.find('ul', attrs={'class' : re.compile('writer-ul')})\nli = main_sec.find_all('li')\nlinkauth=[]\nauth=[]\nheadline=[]\nsubhead=[]\nprint('Fetching articles of top Writers...')\nfor l in li:\n linkauth.append(l.a['href'])\n spn = l.find('span', attrs={'class' : re.compile('wname')})\n auth.append(spn.text)\n headline.append(l.a.text)\n spn = l.find('span', attrs={'class' : re.compile('subheadline')})\n subhead.append(spn.text)\nprint('Done!')\n\nprint('Processing Data...')\nla = {'About' : about, 'Short Summary' : summary, 'Further Reading' : link}\ntw = {'Writer' : auth, 'Headline' : headline, 'Sub-headline' : subhead, 'Further Reading' : linkauth}\nlatest_articles = pd.DataFrame.from_dict(la)\ntop_writers = pd.DataFrame.from_dict(tw)\nprint('Publishing csv...')\ntop_writers.to_csv('Articles from Top Writers.csv', index=False)\nlatest_articles.to_csv('Latest Articles from Cricket Monthly.csv', index=False)\nprint(\"Your output can be found in form of two files 'Articles from Top Writers.csv' and 'Latest Articles from Cricket Monthly.csv'\")\n" ]
[ [ "pandas.DataFrame.from_dict" ] ]
freshjang/MyKiwoom
[ "6342ec7ba8da55194bb473f9052d87f7fa1a640e" ]
[ "trader/strategy.py" ]
[ "import os\nimport sys\nimport psutil\nimport numpy as np\nimport pandas as pd\nsys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))\nfrom utility.setting import ui_num, DICT_SET, columns_gj\nfrom utility.static import now, timedelta_sec, thread_decorator, strf_time, float2str1p6\n\n\nclass Strategy:\n def __init__(self, qlist):\n \"\"\"\n 0 1 2 3 4 5 6 7 8 9 10 11\n windowQ, traderQ, receivQ, stgQ, soundQ, queryQ, teleQ, hoga1Q, hoga2Q, chart1Q, chart2Q, chart3Q,\n chart4Q, chart5Q, chart6Q, chart7Q, chart8Q, chart9Q, chart10Q, tick1Q, tick2Q, tick3Q, tick4Q\n 12 13 14 15 16 17 18 19 20 21 22\n \"\"\"\n self.windowQ = qlist[0]\n self.traderQ = qlist[1]\n self.stgQ = qlist[3]\n\n self.list_buy = [] # 매수주문리스트\n self.list_sell = [] # 매도주문리스트\n self.int_tujagm = 0 # 종목당 투자금\n self.startjjstg = False # 장중전략\n\n self.dict_gsjm = {} # key: 종목코드, value: DataFrame\n self.dict_data = {} # key: 종목코드, value: list\n self.dict_high = {} # key: 종목코드, value: float\n self.dict_time = {\n '관심종목': now(),\n '부가정보': now(),\n '연산시간': now()\n }\n self.dict_intg = {\n '스레드': 0,\n '시피유': 0.,\n '메모리': 0.\n }\n\n self.Start()\n\n def Start(self):\n while True:\n data = self.stgQ.get()\n if type(data) == int:\n self.int_tujagm = data\n elif type(data) == list:\n if len(data) == 2:\n self.UpdateList(data[0], data[1])\n elif len(data) == 38:\n self.BuyStrategy(data[0], data[1], data[2], data[3], data[4], data[5], data[6], data[7], data[8],\n data[9], data[10], data[11], data[12], data[13], data[14], data[15], data[16],\n data[17], data[18], data[19], data[20], data[21], data[22], data[23], data[24],\n data[25], data[26], data[27], data[28], data[29], data[30], data[31], data[32],\n data[33], data[34], data[35], data[36], data[37])\n elif len(data) == 6:\n self.SellStrategy(data[0], data[1], data[2], data[3], data[4], data[5])\n elif data == '전략프로세스종료':\n break\n\n if now() > self.dict_time['관심종목']:\n self.windowQ.put([ui_num['관심종목'], self.dict_gsjm])\n self.dict_time['관심종목'] = timedelta_sec(1)\n if now() > self.dict_time['부가정보']:\n self.UpdateInfo()\n self.dict_time['부가정보'] = timedelta_sec(2)\n\n self.windowQ.put([1, '시스템 명령 실행 알림 - 전략 연산 프로세스 종료'])\n sys.exit()\n\n def UpdateList(self, gubun, code):\n if '조건진입' in gubun:\n if code not in self.dict_gsjm.keys():\n if int(strf_time('%H%M%S')) < 100000:\n data = np.zeros((DICT_SET['장초평균값계산틱수'] + 2, len(columns_gj))).tolist()\n else:\n data = np.zeros((DICT_SET['장중평균값계산틱수'] + 2, len(columns_gj))).tolist()\n df = pd.DataFrame(data, columns=columns_gj)\n self.dict_gsjm[code] = df.copy()\n elif gubun == '조건이탈':\n if code in self.dict_gsjm.keys():\n del self.dict_gsjm[code]\n elif gubun in ['매수완료', '매수취소']:\n if code in self.list_buy:\n self.list_buy.remove(code)\n elif gubun in ['매도완료', '매도취소']:\n if code in self.list_sell:\n self.list_sell.remove(code)\n if code in self.dict_high.keys():\n del self.dict_high[code]\n\n def BuyStrategy(self, 현재가, 시가, 고가, 저가, 등락율, 당일거래대금, 체결강도,\n 초당매수수량, 초당매도수량, VI해제시간, VI아래5호가, 매도총잔량, 매수총잔량,\n 매도호가5, 매도호가4, 매도호가3, 매도호가2, 매도호가1, 매수호가1, 매수호가2, 매수호가3, 매수호가4, 매수호가5,\n 매도잔량5, 매도잔량4, 매도잔량3, 매도잔량2, 매도잔량1, 매수잔량1, 매수잔량2, 매수잔량3, 매수잔량4, 매수잔량5,\n 종목코드, 체결시간, 틱수신시간, 종목명, 잔고종목):\n if 종목코드 not in self.dict_gsjm.keys():\n return\n\n self.CheckStrategy()\n\n 고저평균 = round((고가 + 저가) / 2)\n 고저평균대비등락율 = round((현재가 / 고저평균 - 1) * 100, 2)\n 직전당일거래대금 = self.dict_gsjm[종목코드]['당일거래대금'][0]\n 초당거래대금 = 0 if 직전당일거래대금 == 0 else int(당일거래대금 - 직전당일거래대금)\n\n 구분 = '장초' if int(strf_time('%H%M%S')) < 100000 else '장중'\n 평균값계산틱수 = DICT_SET[f'{구분}평균값계산틱수']\n 평균값인덱스 = 평균값계산틱수 + 1\n\n self.dict_gsjm[종목코드] = self.dict_gsjm[종목코드].shift(1)\n self.dict_gsjm[종목코드].at[0] = 등락율, 고저평균대비등락율, 초당거래대금, 당일거래대금, 체결강도, 0.\n if self.dict_gsjm[종목코드]['체결강도'][평균값계산틱수] != 0.:\n 초당거래대금평균 = int(self.dict_gsjm[종목코드]['초당거래대금'][1:평균값인덱스].mean())\n 체결강도평균 = round(self.dict_gsjm[종목코드]['체결강도'][1:평균값인덱스].mean(), 2)\n 최고체결강도 = round(self.dict_gsjm[종목코드]['체결강도'][1:평균값인덱스].max(), 2)\n self.dict_gsjm[종목코드].at[평균값인덱스] = 0., 0., 초당거래대금평균, 0, 체결강도평균, 최고체결강도\n\n 매수 = True\n 직전체결강도 = self.dict_gsjm[종목코드]['체결강도'][1]\n self.dict_data[종목코드] = [\n 현재가, 시가, 고가, 저가, 등락율, 고저평균대비등락율, 당일거래대금, 초당거래대금, 초당거래대금평균, 체결강도,\n 체결강도평균, 최고체결강도, 직전체결강도, 초당매수수량, 초당매도수량, VI해제시간, VI아래5호가, 매도총잔량, 매수총잔량,\n 매도호가5, 매도호가4, 매도호가3, 매도호가2, 매도호가1, 매수호가1, 매수호가2, 매수호가3, 매수호가4, 매수호가5,\n 매도잔량5, 매도잔량4, 매도잔량3, 매도잔량2, 매도잔량1, 매수잔량1, 매수잔량2, 매수잔량3, 매수잔량4, 매수잔량5\n ]\n\n if 잔고종목:\n return\n if 종목코드 in self.list_buy:\n return\n\n # 전략 비공개\n\n if 매수:\n 매수수량 = int(self.int_tujagm / 현재가)\n if 매수수량 > 0:\n 남은수량 = 매수수량\n 직전남은수량 = 매수수량\n 매수금액 = 0\n 호가정보 = {매도호가1: 매도잔량1}\n for 매도호가, 매도잔량 in 호가정보.items():\n 남은수량 -= 매도잔량\n if 남은수량 <= 0:\n 매수금액 += 매도호가 * 직전남은수량\n break\n else:\n 매수금액 += 매도호가 * 매도잔량\n 직전남은수량 = 남은수량\n if 남은수량 <= 0:\n 예상체결가 = round(매수금액 / 매수수량, 2)\n self.list_buy.append(종목코드)\n self.traderQ.put(['매수', 종목코드, 종목명, 예상체결가, 매수수량])\n\n if now() > self.dict_time['연산시간']:\n gap = float2str1p6((now() - 틱수신시간).total_seconds())\n self.windowQ.put([1, f'전략스 연산 시간 알림 - 수신시간과 연산시간의 차이는 [{gap}]초입니다.'])\n self.dict_time['연산시간'] = timedelta_sec(60)\n\n def SellStrategy(self, 종목코드, 종목명, 수익률, 보유수량, 현재가, 매수시간):\n if 종목코드 not in self.dict_gsjm.keys() or 종목코드 not in self.dict_data.keys():\n return\n if 종목코드 in self.list_sell:\n return\n\n 매도 = False\n 구분 = '장초' if int(strf_time('%H%M%S')) < 100000 else '장중'\n 현재가, 시가, 고가, 저가, 등락율, 고저평균대비등락율, 당일거래대금, 초당거래대금, 초당거래대금평균, 체결강도, \\\n 체결강도평균, 최고체결강도, 직전체결강도, 초당매수수량, 초당매도수량, VI해제시간, VI아래5호가, 매도총잔량, 매수총잔량, \\\n 매도호가5, 매도호가4, 매도호가3, 매도호가2, 매도호가1, 매수호가1, 매수호가2, 매수호가3, 매수호가4, 매수호가5, \\\n 매도잔량5, 매도잔량4, 매도잔량3, 매도잔량2, 매도잔량1, 매수잔량1, 매수잔량2, 매수잔량3, 매수잔량4, 매수잔량5 = \\\n self.dict_data[종목코드]\n\n if 종목코드 not in self.dict_high.keys():\n self.dict_high[종목코드] = 수익률\n elif 수익률 > self.dict_high[종목코드]:\n self.dict_high[종목코드] = 수익률\n 최고수익률 = self.dict_high[종목코드]\n\n \"\"\" 매도 조건 예시 \"\"\"\n if 수익률 <= -2 or 수익률 >= 3:\n 매도 = True\n\n # 전략 비공개\n\n if 매도:\n 남은수량 = 보유수량\n 직전남은수량 = 보유수량\n 매도금액 = 0\n 호가정보 = {매수호가1: 매수잔량1, 매수호가2: 매수잔량2, 매수호가3: 매수잔량3, 매수호가4: 매수잔량4, 매수호가5: 매수잔량5}\n for 매수호가, 매수잔량 in 호가정보.items():\n 남은수량 -= 매수잔량\n if 남은수량 <= 0:\n 매도금액 += 매수호가 * 직전남은수량\n break\n else:\n 매도금액 += 매수호가 * 매수잔량\n 직전남은수량 = 남은수량\n if 남은수량 <= 0:\n 예상체결가 = round(매도금액 / 보유수량, 2)\n self.list_sell.append(종목코드)\n self.traderQ.put(['매도', 종목코드, 종목명, 예상체결가, 보유수량])\n\n def CheckStrategy(self):\n if int(strf_time('%H%M%S')) >= 100000 and not self.startjjstg:\n for code in list(self.dict_gsjm.keys()):\n data = np.zeros((DICT_SET['장중평균값계산틱수'] + 2, len(columns_gj))).tolist()\n df = pd.DataFrame(data, columns=columns_gj)\n self.dict_gsjm[code] = df.copy()\n self.startjjstg = True\n\n @thread_decorator\n def UpdateInfo(self):\n info = [6, self.dict_intg['메모리'], self.dict_intg['스레드'], self.dict_intg['시피유']]\n self.windowQ.put(info)\n self.UpdateSysinfo()\n\n def UpdateSysinfo(self):\n p = psutil.Process(os.getpid())\n self.dict_intg['메모리'] = round(p.memory_info()[0] / 2 ** 20.86, 2)\n self.dict_intg['스레드'] = p.num_threads()\n self.dict_intg['시피유'] = round(p.cpu_percent(interval=2) / 2, 2)\n" ]
[ [ "pandas.DataFrame" ] ]
Khan-Xu/Pyrod
[ "3ee62e3d6037328a010d9340bf1e8ff991f48414" ]
[ "tool/tools.py" ]
[ "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Aug 15 21:50:58 2018\r\n\r\n@author: USER\r\n\"\"\"\r\n\r\n# Codes are free to use. Do whatever you want\r\n\r\nfrom __future__ import absolute_import\r\n\r\n\"\"\"Read raw data\"\"\"\r\n\r\n####################### LIBRARY #############################\r\n\r\n# exceptions library\r\nfrom exceptions import (Data_Format_Exception,\r\n Data_Match_Exception)\r\n\r\n# Python stdlib imports\r\nimport datetime\r\nfrom math import factorial\r\n\r\n# data processing library\r\nimport numpy as np\r\n\r\n# pyrod library\r\n\r\n####################### CONSTANT ############################\r\n\r\n# constant \r\n\r\n####################### FUNCTIONS ###########################\r\n\r\n'.......................optimise.........................'\r\n\r\n# f - fitting data\r\n# y - experiment data\r\n# mask - mask data\r\n\r\ndef R_square(f, y, mask):\r\n \r\n if not len(f) == len(y) == len(mask):\r\n raise Data_Match_Exception('Please input equal length')\r\n \r\n def nplist(data):\r\n \r\n # check and transform data\r\n try:\r\n \r\n # check np array\r\n if isinstance(data, np.ndarray):\r\n pass\r\n # check list\r\n elif isinstance(data, list):\r\n rl = np.array(data)\r\n # check np mat\r\n elif isinstance(data, np.matrix):\r\n rl = np.asarray(data).reshape(-1)\r\n # for other unpoackable datatype\r\n else:\r\n # init a list first\r\n l = []\r\n # unpack raw data with for\r\n for e in data:\r\n l.append(e)\r\n # trans to np array\r\n rl = np.array(l)\r\n \r\n # unknown type\r\n except Data_Format_Exception:\r\n \r\n print('unknown data type')\r\n \r\n return rl\r\n\r\n # tranform to np array; apply mask \r\n rf, ry = nplist(f)*nplist(mask), nplist(y)*nplist(mask)\r\n\r\n # calculate r square\r\n ss_tot = np.sum((ry - np.sum(ry)/len(ry))**2)\r\n ss_res = np.sum((ry - rf)**2)\r\n \r\n r2 = 1 - ss_res/ss_tot\r\n \r\n return r2\r\n\r\n\r\ndef opt_step_brute(func,x0_range,grid_size = 10,step = 2):\r\n \r\n \"\"\"\r\n Brute method is much too slow and big.\r\n However, its usefull and simple. To improve it, we try to step it\r\n \r\n x0_range: range of variable, [x1-,x1+],[x2-,x2+]\r\n currently,only two axes are avaialble\r\n \"\"\"\r\n # current step is 3\r\n step = 3\r\n \r\n # grid_size and step have to be integer\r\n try:\r\n grid_size = int(grid_size)\r\n step = int(step)\r\n \r\n except ValueError:\r\n raise ValueError(\"grid_size and step have to be of type int\")\r\n \r\n # one dimensional step brute method\r\n if len(x0_range) == 1:\r\n \r\n # store func(grid_data) result\r\n grid_list0 = []\r\n x0 = np.linspace(x0_range[0][0],x0_range[0][1],grid_size)\r\n \r\n # func(grid_data)\r\n for px in range(grid_size):\r\n grid_list0.append(func(x0[px]))\r\n # store min in step1\r\n min_idx = np.argmin(grid_list0)\r\n \r\n # continue step2\r\n grid_list1 = []\r\n x1 = x0[min_idx]\r\n delta = (abs(x0_range[0][1] - x0_range[0][0]))/grid_size\r\n \r\n x2 = np.linspace(x1-delta,x1+delta,grid_size)\r\n for sx in range(grid_size):\r\n grid_list1.append(func(x2[sx]))\r\n \r\n min_step2 = x2[np.argmin(grid_list1)]\r\n \r\n elif len(x0_range) == 2:\r\n \r\n # step1: grid the x0_range\r\n min_step1 = []\r\n au = np.linspace(x0_range[0][0],x0_range[0][1],grid_size)\r\n av = np.linspace(x0_range[1][0],x0_range[1][1],grid_size)\r\n \r\n # find minimum in xu and xv grid\r\n def grid_min(xu,xv):\r\n \r\n x0_grid = np.meshgrid(xu, xv)\r\n \r\n #grid list\r\n grid_list = np.mat(np.zeros([grid_size**2,3]))\r\n idx = 0\r\n \r\n # pu-- for postion in u axes\r\n for pu in range(grid_size):\r\n # pv--for postion in v axes\r\n for pv in range(grid_size):\r\n \r\n grid_list[idx,0] = x0_grid[0][pu,pv]\r\n grid_list[idx,1] = x0_grid[1][pu,pv]\r\n grid_list[idx,2] = func([x0_grid[0][pu,pv],\r\n x0_grid[1][pu,pv]])\r\n idx = idx + 1\r\n # find the minimum in step1\r\n min_idx = np.argmin(grid_list[:,2])\r\n \r\n return grid_list[min_idx,:]\r\n \r\n # append the firt minimum before rocking\r\n min_step1.append(grid_min(au,av))\r\n \r\n # start rocking, try to avoid local minmum\r\n bu = au - (au[1]-au[0])/2\r\n bv = av - (av[1]-av[0])/2\r\n \r\n min_step1.append(grid_min(bu,bv))\r\n \r\n # step 2\r\n # step 2 new x range\r\n u_min = np.min([min_step1[0][0,0],\r\n min_step1[1][0,0]])\r\n u_max = np.max([min_step1[0][0,0],\r\n min_step1[1][0,0]])\r\n deta_u = u_max - u_min\r\n v_min = np.min([min_step1[0][0,1],\r\n min_step1[1][0,1]])\r\n v_max = np.max([min_step1[0][0,1],\r\n min_step1[1][0,1]])\r\n deta_v = v_max - v_min\r\n # new u and v\r\n cu = np.linspace(u_min-deta_u, u_min+deta_u, grid_size)\r\n cv = np.linspace(v_min-deta_v, v_min+deta_v, grid_size)\r\n \r\n min_step2 = grid_min(cu,cv).tolist()\r\n \r\n return min_step2\r\n \r\n \r\n'......................smooth.........................'\r\n\r\ndef savitzky_golay(y, window_size, order, deriv=0, rate=1):\r\n \r\n \"\"\" \r\n Smooth (and optionally differentiate) data with a Savitzky-Golay filter.\r\n The Savitzky-Golay filter removes high frequency noise from data.\r\n It has the advantage of preserving the original shape and\r\n features of the signal better than other types of filtering\r\n approaches, such as moving averages techniques.\r\n\r\n ----------\r\n .. [1] A. Savitzky, M. J. E. Golay, Smoothing and Differentiation of\r\n Data by Simplified Least Squares Procedures. Analytical\r\n Chemistry, 1964, 36 (8), pp 1627-1639.\r\n .. [2] Numerical Recipes 3rd Edition: The Art of Scientific Computing\r\n W.H. Press, S.A. Teukolsky, W.T. Vetterling, B.P. Flannery\r\n Cambridge University Press ISBN-13: 9780521880688\r\n \"\"\"\r\n \r\n # integer value\r\n try:\r\n window_size = np.abs(np.int(window_size))\r\n order = np.abs(np.int(order))\r\n except ValueError:\r\n raise ValueError(\"window_size and order have to be of type int\")\r\n \r\n if window_size % 2 != 1 or window_size < 1:\r\n raise TypeError(\"window_size size must be a positive odd number\")\r\n if window_size < order + 2:\r\n raise TypeError(\"window_size is too small for the polynomials order\")\r\n \r\n order_range = range(order+1)\r\n half_window = (window_size -1) // 2\r\n \r\n # precompute coefficients\r\n \r\n b = np.mat([[k**i for i in order_range] for k in range(-half_window, half_window+1)])\r\n m = np.linalg.pinv(b).A[deriv] * rate**deriv * factorial(deriv)\r\n \r\n # pad the signal at the extremes with\r\n # values taken from the signal itself\r\n \r\n firstvals = y[0] - np.abs( y[1:half_window+1][::-1] - y[0] )\r\n lastvals = y[-1] + np.abs(y[-half_window-1:-1][::-1] - y[-1])\r\n y = np.concatenate((firstvals, y, lastvals))\r\n \r\n return np.convolve( m[::-1], y, mode='valid')\r\n\r\n######################## CLASSS #############################\r\n\r\n " ]
[ [ "numpy.sum", "numpy.zeros", "numpy.argmin", "numpy.abs", "numpy.asarray", "numpy.max", "numpy.min", "numpy.array", "numpy.linalg.pinv", "numpy.concatenate", "numpy.meshgrid", "numpy.linspace", "numpy.convolve", "numpy.int" ] ]
miguelusque/NVTabular
[ "76e63d9df7b90433d552606e9cf87bd61d7eee3b" ]
[ "nvtabular/io/csv.py" ]
[ "#\n# Copyright (c) 2021, NVIDIA CORPORATION.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport functools\n\nimport dask.dataframe as dd\nimport dask_cudf\nimport numpy as np\nfrom dask.bytes import read_bytes\nfrom dask.utils import parse_bytes\nfrom fsspec.core import get_fs_token_paths\nfrom fsspec.utils import infer_compression\n\nfrom .dataset_engine import DatasetEngine\n\n\nclass CSVDatasetEngine(DatasetEngine):\n \"\"\"CSVDatasetEngine\n\n Thin wrapper around dask_cudf.read_csv.\n \"\"\"\n\n def __init__(self, paths, part_size, storage_options=None, cpu=False, **kwargs):\n super().__init__(paths, part_size, cpu=cpu, storage_options=storage_options)\n self._meta = {}\n self.csv_kwargs = kwargs\n self.csv_kwargs[\"storage_options\"] = storage_options\n\n # CSV reader needs a list of files\n # (Assume flat directory structure if this is a dir)\n if len(self.paths) == 1 and self.fs.isdir(self.paths[0]):\n self.paths = self.fs.glob(self.fs.sep.join([self.paths[0], \"*\"]))\n\n def to_ddf(self, columns=None, cpu=None):\n\n # Check if we are using cpu\n cpu = self.cpu if cpu is None else cpu\n if cpu:\n ddf = dd.read_csv(self.paths, blocksize=self.part_size, **self.csv_kwargs)\n else:\n ddf = dask_cudf.read_csv(self.paths, chunksize=self.part_size, **self.csv_kwargs)\n if columns:\n ddf = ddf[columns]\n return ddf\n\n @property\n @functools.lru_cache(1)\n def _file_partition_map(self):\n ind = 0\n _pp_map = {}\n for path, blocks in zip(\n *_byte_block_counts(\n self.paths,\n self.part_size,\n **self.csv_kwargs,\n )\n ):\n _pp_map[path.split(self.fs.sep)[-1]] = np.arange(ind, ind + blocks)\n ind += blocks\n return _pp_map\n\n def to_cpu(self):\n self.cpu = True\n\n def to_gpu(self):\n self.cpu = False\n\n\ndef _byte_block_counts(\n urlpath,\n blocksize,\n lineterminator=None,\n compression=\"infer\",\n storage_options=None,\n **kwargs,\n):\n \"\"\"Return a list of paths and block counts.\n\n Logic copied from dask.bytes.read_bytes\n \"\"\"\n\n if lineterminator is not None and len(lineterminator) == 1:\n kwargs[\"lineterminator\"] = lineterminator\n else:\n lineterminator = \"\\n\"\n\n if compression == \"infer\":\n paths = get_fs_token_paths(urlpath, mode=\"rb\", storage_options=storage_options)[2]\n compression = infer_compression(paths[0])\n\n if isinstance(blocksize, str):\n blocksize = parse_bytes(blocksize)\n if blocksize and compression:\n blocksize = None\n\n b_out = read_bytes(\n urlpath,\n delimiter=lineterminator.encode(),\n blocksize=blocksize,\n sample=False,\n compression=compression,\n include_path=True,\n **(storage_options or {}),\n )\n _, values, paths = b_out\n\n if not isinstance(values[0], (tuple, list)):\n values = [values]\n\n return paths, [len(v) for v in values]\n" ]
[ [ "numpy.arange" ] ]
raamana/cca_zoo
[ "7137918a6bac098ec20ba998d1774d5335c178dd" ]
[ "cca_zoo/data/simulated.py" ]
[ "import itertools\nfrom typing import List, Union\n\nimport numpy as np\nfrom scipy import linalg\nfrom scipy.linalg import block_diag\n\nfrom ..utils.check_values import _process_parameter\n\n\ndef generate_covariance_data(n: int, view_features: List[int], latent_dims: int = 1,\n view_sparsity: List[Union[int, float]] = None,\n correlation: Union[List[float], float] = 1,\n structure: Union[str, List[str]] = None, sigma: List[float] = None, decay: float = 0.5,\n positive=None):\n \"\"\"\n Function to generate CCA dataset with defined population correlation\n\n :param view_sparsity: level of sparsity in features in each view either as number of active variables or percentage active\n :param view_features: number of features in each view\n :param n: number of samples\n :param latent_dims: number of latent dimensions\n :param signal: correlation\n :param structure: within view covariance structure\n :param sigma: gaussian sigma\n :param decay: ratio of second signal to first signal\n :return: tuple of numpy arrays: view_1, view_2, true weights from view 1, true weights from view 2, overall covariance structure\n\n :Example:\n\n >>> from cca_zoo.data import generate_covariance_data\n >>> [train_view_1,train_view_2],[true_weights_1,true_weights_2]=generate_covariance_data(200,[10,10],latent_dims=1,correlation=1)\n \"\"\"\n structure = _process_parameter('structure', structure, 'identity', len(view_features))\n view_sparsity = _process_parameter('view_sparsity', view_sparsity, 1, len(view_features))\n positive = _process_parameter('positive', positive, False, len(view_features))\n sigma = _process_parameter('sigma', sigma, 0.5, len(view_features))\n completed = False\n while not completed:\n try:\n mean = np.zeros(sum(view_features))\n if not isinstance(correlation, list):\n p = np.arange(0, latent_dims)\n correlation = correlation * decay ** p\n covs = []\n true_features = []\n for view_p, sparsity, view_structure, view_positive, view_sigma in zip(view_features, view_sparsity,\n structure,\n positive, sigma):\n # Covariance Bit\n if view_structure == 'identity':\n cov_ = np.eye(view_p)\n elif view_structure == 'gaussian':\n cov_ = _generate_gaussian_cov(view_p, view_sigma)\n elif view_structure == 'toeplitz':\n cov_ = _generate_toeplitz_cov(view_p, view_sigma)\n elif view_structure == 'random':\n cov_ = _generate_random_cov(view_p)\n else:\n completed = True\n print(\"invalid structure\")\n break\n weights = np.random.normal(size=(view_p, latent_dims))\n if sparsity <= 1:\n sparsity = np.ceil(sparsity * view_p).astype('int')\n if sparsity < view_p:\n mask = np.stack(\n (np.concatenate(([0] * (view_p - sparsity), [1] * sparsity)).astype(bool),) * latent_dims,\n axis=0).T\n np.random.shuffle(mask)\n while np.sum(np.unique(mask, axis=1, return_counts=True)[1] > 1) > 0 or np.sum(\n np.sum(mask, axis=0) == 0) > 0:\n np.random.shuffle(mask)\n weights = weights * mask\n if view_positive:\n weights[weights < 0] = 0\n weights = _decorrelate_dims(weights, cov_)\n weights /= np.sqrt(np.diag((weights.T @ cov_ @ weights)))\n true_features.append(weights)\n covs.append(cov_)\n\n cov = block_diag(*covs)\n\n splits = np.concatenate(([0], np.cumsum(view_features)))\n\n for i, j in itertools.combinations(range(len(splits) - 1), 2):\n cross = np.zeros((view_features[i], view_features[j]))\n for _ in range(latent_dims):\n A = correlation[_] * np.outer(true_features[i][:, _], true_features[j][:, _])\n # Cross Bit\n cross += covs[i] @ A @ covs[j]\n cov[splits[i]: splits[i] + view_features[i], splits[j]: splits[j] + view_features[j]] = cross\n cov[splits[j]: splits[j] + view_features[j], splits[i]: splits[i] + view_features[i]] = cross.T\n\n X = np.zeros((n, sum(view_features)))\n chol = np.linalg.cholesky(cov)\n for _ in range(n):\n X[_, :] = _chol_sample(mean, chol)\n views = np.split(X, np.cumsum(view_features)[:-1], axis=1)\n completed = True\n except:\n completed = False\n return views, true_features\n\n\ndef generate_simple_data(n: int, view_features: List[int], view_sparsity: List[int] = None,\n eps: float = 0):\n \"\"\"\n\n :param n: number of samples\n :param view_features: number of features view 1\n :param view_sparsity: number of features view 2\n :param eps: gaussian noise std\n :return: view1 matrix, view2 matrix, true weights view 1, true weights view 2\n\n :Example:\n\n >>> from cca_zoo.data import generate_simple_data\n >>> [train_view_1,train_view_2],[true_weights_1,true_weights_2]=generate_covariance_data(200,[10,10])\n \"\"\"\n z = np.random.normal(0, 1, n)\n views = []\n true_features = []\n for p, sparsity in zip(view_features, view_sparsity):\n weights = np.random.normal(size=(p, 1))\n if sparsity > 0:\n if sparsity < 1:\n sparsity = np.ceil(sparsity * p).astype('int')\n weights[np.random.choice(np.arange(p), p - sparsity, replace=False)] = 0\n\n gaussian_x = np.random.normal(0, eps, (n, p))\n view = np.outer(z, weights)\n view += gaussian_x\n views.append(view)\n true_features.append(weights)\n return views, true_features\n\n\ndef _decorrelate_dims(up, cov):\n A = up.T @ cov @ up\n for k in range(1, A.shape[0]):\n up[:, k:] -= np.outer(up[:, k - 1], A[k - 1, k:] / A[k - 1, k - 1])\n A = up.T @ cov @ up\n return up\n\n\ndef _chol_sample(mean, chol):\n return mean + chol @ np.random.standard_normal(mean.size)\n\n\ndef _gaussian(x, mu, sig, dn):\n \"\"\"\n Generate a gaussian covariance matrix\n\n :param x:\n :param mu:\n :param sig:\n :param dn:\n \"\"\"\n return np.exp(-np.power(x - mu, 2.) / (2 * np.power(sig, 2.))) * dn / (np.sqrt(2 * np.pi) * sig)\n\n\ndef _generate_gaussian_cov(p, sigma):\n x = np.linspace(-1, 1, p)\n x_tile = np.tile(x, (p, 1))\n mu_tile = np.transpose(x_tile)\n dn = 2 / (p - 1)\n cov = _gaussian(x_tile, mu_tile, sigma, dn)\n cov /= cov.max()\n return cov\n\n\ndef _generate_toeplitz_cov(p, sigma):\n c = np.arange(0, p)\n c = sigma ** c\n cov = linalg.toeplitz(c, c)\n return cov\n\n\ndef _generate_random_cov(p):\n cov_ = np.random.rand(p, p)\n U, S, Vt = np.linalg.svd(cov_.T @ cov_)\n cov = U @ (1 + np.diag(np.random.rand(p))) @ Vt\n return cov\n" ]
[ [ "numpy.sum", "numpy.random.standard_normal", "numpy.diag", "numpy.transpose", "numpy.random.rand", "numpy.linspace", "numpy.unique", "numpy.tile", "numpy.eye", "numpy.ceil", "numpy.zeros", "numpy.random.normal", "numpy.arange", "scipy.linalg.block_diag", "numpy.linalg.cholesky", "numpy.power", "numpy.random.shuffle", "numpy.cumsum", "scipy.linalg.toeplitz", "numpy.linalg.svd", "numpy.sqrt", "numpy.concatenate", "numpy.outer" ] ]
Holldean/Recommender-System
[ "6a93e6ee970b32c76e2f71043383bf24a7e865d5" ]
[ "Recommender_System/algorithm/NeuMF/train.py" ]
[ "from Recommender_System.algorithm.NeuMF.model import NeuMF_model\r\nfrom Recommender_System.algorithm.train import train, test\r\nimport tensorflow as tf\r\n\r\n\r\ndef train_with_pretrain(n_user, n_item, train_data, test_data, topk_data, gmf_dim, mlp_dim, layers, l2):\r\n neumf_model, gmf_model, mlp_model = NeuMF_model(n_user, n_item, gmf_dim=gmf_dim, mlp_dim=mlp_dim, layers=layers, l2=l2)\r\n print('预训练GMF部分')\r\n train(gmf_model, train_data, test_data, topk_data, epochs=10, batch=512)\r\n print('预训练MLP部分')\r\n train(mlp_model, train_data, test_data, topk_data, epochs=10, batch=512)\r\n\r\n out_kernel = tf.concat((gmf_model.get_layer('gmf_out').get_weights()[0], mlp_model.get_layer('mlp_out').get_weights()[0]), 0)\r\n out_bias = gmf_model.get_layer('gmf_out').get_weights()[1] + mlp_model.get_layer('mlp_out').get_weights()[1]\r\n neumf_model.get_layer('out').set_weights([out_kernel * 0.5, out_bias * 0.5])\r\n\r\n test(neumf_model, train_data, test_data, topk_data, batch=512)\r\n train(neumf_model, train_data, test_data, topk_data, optimizer=tf.keras.optimizers.SGD(0.0001), epochs=10, batch=512)\r\n\r\n\r\ndef train_without_pretrain(n_user, n_item, train_data, test_data, topk_data, gmf_dim, mlp_dim, layers, l2):\r\n neumf_model, _, _ = NeuMF_model(n_user, n_item, gmf_dim=gmf_dim, mlp_dim=mlp_dim, layers=layers, l2=l2)\r\n train(neumf_model, train_data, test_data, topk_data, epochs=10, batch=512)\r\n" ]
[ [ "tensorflow.keras.optimizers.SGD" ] ]
dingjr27/nerf
[ "b0e0554022f66d65705d3134c4cfdd71429eb574" ]
[ "test_nerf.py" ]
[ "import os, sys\n# os.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true'\nos.environ['CUDA_VISIBLE_DEVICES'] = '1'\nimport tensorflow as tf\ntf.compat.v1.enable_eager_execution()\n\nsys.path.append(r'/home/luca/Desktop/NERFPosit/Inference')\n\nimport numpy as np\nimport imageio\nimport json\nimport random\nimport time\nimport pprint\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\n\nimport matplotlib.pyplot as plt\n\nimport run_nerf\n\nfrom load_llff import load_llff_data\nfrom load_deepvoxels import load_dv_data\nfrom load_blender import load_blender_data\n\nbasedir = './logs'\nexpname = 'fern_example'\n\nconfig = os.path.join(basedir, expname, 'config.txt')\nprint('Args:')\nprint(open(config, 'r').read())\nparser = run_nerf.config_parser()\n\nargs = parser.parse_args('--config {} --ft_path {}'.format(config, os.path.join(basedir, expname, 'model_200000.npy')))\nprint('loaded args')\n\nimages, poses, bds, render_poses, i_test = load_llff_data(args.datadir, args.factor,\n recenter=True, bd_factor=.75,\n spherify=args.spherify)\nH, W, focal = poses[0,:3,-1].astype(np.float32)\n\nH = int(H)\nW = int(W)\nhwf = [H, W, focal]\n\nimages = images.astype(np.float32)\nposes = poses.astype(np.float32)\n\nif args.no_ndc:\n near = tf.reduce_min(bds) * .9\n far = tf.reduce_max(bds) * 1.\nelse:\n near = 0.\n far = 1.\n\n# Create nerf model\n_, render_kwargs_test, start, grad_vars, models = run_nerf.create_nerf(args)\n\nprint(models['model'].input)\nmodel = models['model']\nprint(model.summary())\n#extractor = keras.Model(inputs=model.inputs,\n # outputs=model.layers[1].output)\n#embed_fn, input_ch = run_nerf.get_embedder(10,1)\n#embed_fn1, input_ch = run_nerf.get_embedder(4,1)\n#a = embed_fn(tf.constant([[0.5,0.5,0.5]]))\n#b = embed_fn1(tf.constant([[0.5,0.5,0.5]]))\n#c = tf.concat([a,b],1)\n#print(c.shape)\n#print(extractor.predict(c))\n#exit(0)\n#features = extractor()\n\nbds_dict = {\n 'near' : tf.cast(near, tf.float32),\n 'far' : tf.cast(far, tf.float32),\n}\nrender_kwargs_test.update(bds_dict)\n\nprint('Render kwargs:')\npprint.pprint(render_kwargs_test)\n\n\ndown = 4\nrender_kwargs_fast = {k : render_kwargs_test[k] for k in render_kwargs_test}\nrender_kwargs_fast['N_importance'] = 0\n\nc2w = np.eye(4)[:3,:4].astype(np.float32) # identity pose matrix\ntest = run_nerf.render(H//down, W//down, focal/down, c2w=c2w, **render_kwargs_fast)\n\nimg = np.clip(test[0],0,1)\nplt.imshow(img)\nplt.show()\n\n" ]
[ [ "numpy.eye", "tensorflow.reduce_min", "tensorflow.reduce_max", "tensorflow.cast", "matplotlib.pyplot.imshow", "matplotlib.pyplot.show", "numpy.clip", "tensorflow.compat.v1.enable_eager_execution" ] ]
kyeeh/holbertonschool-machine_learning
[ "8e4894c2b036ec7f4750de5bf99b95aee5b94449" ]
[ "math/0x06-multivariate_prob/3-main.py" ]
[ "#!/usr/bin/env python3\n\nif __name__ == '__main__':\n import numpy as np\n from multinormal import MultiNormal\n\n np.random.seed(0)\n data = np.random.multivariate_normal([12, 30, 10], [[36, -30, 15], [-30, 100, -20], [15, -20, 25]], 10000).T\n mn = MultiNormal(data)\n x = np.random.multivariate_normal([12, 30, 10], [[36, -30, 15], [-30, 100, -20], [15, -20, 25]], 1).T\n print(x)\n print(mn.pdf(x))\n" ]
[ [ "numpy.random.multivariate_normal", "numpy.random.seed" ] ]
mbonnema/SWAV
[ "d5dd4dd1a88de008f27b0232c536491c7dc84623" ]
[ "src/an_FilterS1.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Oct 28 11:35:01 2021\n\n@author: mbonnema\n\"\"\"\n\nimport numpy as np\ndef FilterS1(D,A,WE,LE):\n D_f = {}\n A_f = {}\n WE_f = {}\n LE_f = {}\n for key in D:\n dates = D[key]\n areas = A[key]\n werrors = WE[key]\n lerrors = LE[key]\n \n \n d_f = []\n a_f = []\n we_f = []\n le_f = []\n \n for d,a,we,le in zip(dates,areas,werrors,lerrors):\n #print(a)\n if we < 0:\n we = 0\n if le < 0:\n le = 0\n if a > 0:\n if we/a > 0.1:\n #print('fail 1')\n continue\n if a > 0:\n if le/a > 0.1:\n #print('fail 2')\n continue\n #print('passed')\n d_f.append(d)\n a_f.append(a)\n we_f.append(we)\n le_f.append(le)\n a_std = np.std(np.array(a_f))\n a_mean = np.mean(np.array(a_f))\n d_f = np.array(d_f)[np.array([a_f<=(a_mean+a_std*3),a_f>=(a_mean-a_std*3)]).all(axis=0)]\n we_f = np.array(we_f)[np.array([a_f<=(a_mean+a_std*3),a_f>=(a_mean-a_std*3)]).all(axis=0)]\n le_f = np.array(le_f)[np.array([a_f<=(a_mean+a_std*3),a_f>=(a_mean-a_std*3)]).all(axis=0)]\n a_f = np.array(a_f)[np.array([a_f<=(a_mean+a_std*3),a_f>=(a_mean-a_std*3)]).all(axis=0)]\n D_f[key] = d_f\n A_f[key] = a_f\n WE_f[key] = we_f\n LE_f[key] = le_f\n \n return(D_f,A_f,WE_f,LE_f)\n \n " ]
[ [ "numpy.array" ] ]
jialuechen/augustus
[ "d4fbda427e3d9c60896b0e22c06cd593b484ef9d" ]
[ "augustus/custom/trade_log_analysis.py" ]
[ "import dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport dash_table_experiments as dt\nimport pandas as pd\nimport plotly\nfrom dash.dependencies import Input, Output, State\nfrom plotly import graph_objs as go\n\nfrom augustus.systemabase_env import augustusEnvBase\n\nTRADE_LOG = augustusEnvBase.full_trade_log\n\nAPP = dash.Dash()\nAPP.scripts.config.serve_locally = True\n\nAPP.layout = html.Div([\n html.H4('augustus Trade Log Analysis'),\n dt.DataTable(\n rows=TRADE_LOG.to_dict('records'),\n\n row_selectable=True,\n filterable=True,\n sortable=True,\n selected_row_indices=[],\n id='trade_log'\n ),\n\n dcc.Graph(\n id='drawdown_pnl'\n ),\n\n dcc.Graph(\n id='run_up_pnl'\n ),\n\n], className=\"container\")\n\n\[email protected](\n Output('trade_log', 'selected_row_indices'),\n [Input('drawdown_pnl', 'clickData')],\n [State('trade_log', 'selected_row_indices')])\ndef update_selected_row_indices(clickData, selected_row_indices):\n if clickData:\n for point in clickData['points']:\n if point['pointNumber'] in selected_row_indices:\n selected_row_indices.remove(point['pointNumber'])\n else:\n selected_row_indices.append(point['pointNumber'])\n\n return selected_row_indices\n\n\[email protected](\n Output('drawdown_pnl', 'figure'),\n [Input('trade_log', 'rows'),\n Input('trade_log', 'selected_row_indices')])\ndef update_run_up_figure(rows, selected_row_indices):\n\n dff = pd.DataFrame(rows)\n profit_diff = dff.loc[dff.returns_diff > 0]\n loss_diff = dff.loc[dff.returns_diff < 0]\n\n fig = plotly.tools.make_subplots(\n rows=1, cols=1,\n shared_xaxes=True)\n\n fig['layout'].update(dict(title='Profit & Loss vs Run-up'))\n fig['layout']['xaxis'].update(dict(title='Run-up(%)'))\n fig['layout']['yaxis'].update(dict(title='Profit & Loss(%)'))\n\n fig.append_trace({\n 'x': profit_diff['run_up']*100,\n 'y': profit_diff['returns_diff']*100,\n 'text': profit_diff.entry_date + ' to ' + profit_diff.exit_date,\n 'type': 'scatter',\n 'marker': dict(color='black'),\n 'mode': 'markers',\n 'name': 'win',\n 'line': {'width': 1}\n }, 1, 1)\n fig.append_trace({\n 'x': loss_diff['run_up']*100,\n 'y': -loss_diff['returns_diff']*100,\n 'type': 'scatter',\n 'text': loss_diff.entry_date + ' to ' + loss_diff.exit_date,\n 'marker': dict(color='red'),\n 'mode': 'markers',\n 'name': 'lose',\n 'line': {'width': 1}\n }, 1, 1)\n\n fig.append_trace({\n 'x': [0, 10],\n 'y': [0, 10],\n 'type': 'scatter',\n 'mode': 'lines',\n 'name': 'Win diagonal',\n 'line': {'width': 1}\n }, 1, 1)\n\n return fig\n\n\[email protected](\n Output('run_up_pnl', 'figure'),\n [Input('trade_log', 'rows'),\n Input('trade_log', 'selected_row_indices')])\ndef update__drawdown_figure(rows, selected_row_indices):\n\n dff = pd.DataFrame(rows)\n profit_diff = dff.loc[dff.returns_diff > 0]\n loss_diff = dff.loc[dff.returns_diff < 0]\n\n fig = plotly.tools.make_subplots(\n rows=1, cols=1,\n shared_xaxes=True)\n fig['layout'].update(dict(title='Profit & Loss vs Drawdown'))\n fig['layout']['xaxis'].update(dict(title='Drawdown(%)'))\n fig['layout']['yaxis'].update(dict(title='Profit & Loss(%)'))\n\n fig.append_trace({\n 'x': profit_diff['drawdown']*100,\n 'y': profit_diff['returns_diff']*100,\n 'type': 'scatter',\n 'marker': dict(color='black'),\n 'text': profit_diff.entry_date + ' to ' + profit_diff.exit_date,\n 'mode': 'markers',\n 'name': 'win',\n 'line': {'width': 1}\n }, 1, 1)\n\n fig.append_trace({\n 'x': loss_diff['drawdown']*100,\n 'y': -loss_diff['returns_diff']*100,\n 'text': loss_diff.entry_date + ' to ' + loss_diff.exit_date,\n 'type': 'scatter',\n 'marker': dict(color='red'),\n 'mode': 'markers',\n 'name': 'lose',\n 'line': {'width': 1}\n }, 1, 1)\n\n fig.append_trace({\n 'x': [0, 10],\n 'y': [0, 10],\n 'type': 'scatter',\n 'mode': 'lines',\n 'name': 'Loss diagonal',\n 'line': {'width': 1}\n }, 1, 1)\n\n return fig\n\n\nif __name__ == '__main__':\n APP.run_server(debug=True)\n" ]
[ [ "pandas.DataFrame" ] ]
tjulitianyi1997/mindspore
[ "c802a8c31fe2b51530d932fdd364824e45264b12" ]
[ "tests/ut/python/parallel/test_reshape.py" ]
[ "# Copyright 2019 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom mindspore.train import Model, ParallelMode\nfrom mindspore.nn.loss import SoftmaxCrossEntropyWithLogits\nfrom mindspore.nn.optim.momentum import Momentum\nfrom mindspore import Tensor\nimport mindspore as ms\nimport numpy as np\nfrom mindspore.ops import operations as P\nimport mindspore.nn as nn\nfrom mindspore.common.parameter import Parameter\nfrom tests.dataset_mock import MindData\nfrom mindspore import context\nfrom tests.ut.python.ops.test_math_ops import VirtualLoss\nfrom mindspore.common.api import _executor\nfrom mindspore.ops import composite as C\nfrom mindspore.ops.operations.comm_ops import _VirtualDataset\nfrom mindspore.ops import functional as F\nfrom mindspore.common.parameter import ParameterTuple\nfrom mindspore.common import dtype as mstype\nfrom mindspore.parallel import set_algo_parameters\ncontext.set_context(mode=context.GRAPH_MODE)\ncontext.reset_auto_parallel_context()\n\nclass Dataset(MindData):\n def __init__(self, predict, label, length=3, input_num=2):\n super(Dataset, self).__init__(size=length)\n self.predict = predict\n self.label = label\n self.index = 0\n self.length = length\n self.input_num = input_num\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if self.index >= self.length:\n raise StopIteration\n self.index += 1\n if self.input_num == 2:\n return self.predict, self.label\n else:\n return self.predict,\n\n def reset(self):\n self.index = 0\n\n\nclass ReshapeNet(nn.Cell):\n def __init__(self, strategy0, strategy1, strategy2):\n super(ReshapeNet, self).__init__()\n self.relu = P.ReLU().set_strategy(strategy0)\n self.reshape = P.Reshape().set_strategy(strategy1)\n self.matmul = P.MatMul().set_strategy(strategy2)\n self.matmul_weight = Parameter(Tensor(np.ones([25088, 256]), dtype=ms.float32), name=\"weight\")\n\n def construct(self, x):\n x = self.relu(x)\n x = self.reshape(x, (256, 25088))\n x = self.matmul(x, self.matmul_weight)\n return x\n\n\ndef reshape_net(strategy0, strategy1, strategy2):\n return ReshapeNet(strategy0=strategy0, strategy1=strategy1, strategy2=strategy2)\n\n\ndef reshape_common(parallel_mode, strategy0, strategy1, strategy2, strategy_loss):\n batch_size = 32\n learning_rate = 0.1\n momentum = 0.9\n epoch_size = 2\n\n context.reset_auto_parallel_context()\n context.set_auto_parallel_context(parallel_mode=parallel_mode, device_num=8)\n predict = Tensor(np.ones([32, 512, 7, 7]), dtype=ms.float32)\n label = Tensor(np.ones([32]), dtype=ms.int32)\n dataset = Dataset(predict, label, 2)\n net = reshape_net(strategy0, strategy1, strategy2)\n\n loss = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True)\n loss.softmax_cross_entropy.set_strategy(strategy_loss)\n loss.one_hot.set_strategy(((8,1), (), ()))\n opt = Momentum(net.trainable_params(), learning_rate, momentum)\n model = Model(net, loss, opt)\n model.train(epoch_size, dataset, dataset_sink_mode=False)\n\n\ndef test_reshape1():\n strategy0 = ((8, 1, 1, 1), )\n strategy1 = None\n strategy2 = ((8, 1), (1, 1))\n strategy_loss = ((8, 1), (8, 1))\n reshape_common(ParallelMode.SEMI_AUTO_PARALLEL, strategy0, strategy1, strategy2, strategy_loss)\n\n\ndef test_reshape1_strategy_1():\n strategy0 = ((8, 1, 1, 1), )\n strategy1 = ((8, 1, 1, 1), )\n strategy2 = ((8, 1), (1, 1))\n strategy_loss = ((8, 1), (8, 1))\n try:\n reshape_common(ParallelMode.SEMI_AUTO_PARALLEL, strategy0, strategy1, strategy2, strategy_loss)\n except:\n pass\n\n\ndef test_reshape1_strategy_2():\n strategy0 = ((8, 1, 1, 1), )\n strategy1 = ((8, 1, 1, 1), )\n strategy2 = ((8, 1), (1, 1))\n strategy_loss = ((8, 1), (8, 1))\n try:\n reshape_common(ParallelMode.AUTO_PARALLEL, strategy0, strategy1, strategy2, strategy_loss)\n except:\n pass\n\n\ndef test_reshape2():\n strategy0 = ((8, 1, 1, 1), )\n strategy1 = None\n strategy2 = ((8, 1), (1, 1))\n strategy_loss = ((8, 1), (8, 1))\n reshape_common(ParallelMode.SEMI_AUTO_PARALLEL, strategy0, strategy1, strategy2, strategy_loss)\n\n\ndef test_reshape3():\n strategy0 = ((2, 1, 1, 1), )\n strategy1 = None\n strategy2 = ((8, 1), (1, 1))\n strategy_loss = ((8, 1), (8, 1))\n reshape_common(ParallelMode.SEMI_AUTO_PARALLEL, strategy0, strategy1, strategy2, strategy_loss)\n\n\ndef test_reshape4():\n strategy0 = ((1, 1, 1, 1), )\n strategy1 = None\n strategy2 = ((8, 1), (1, 1))\n strategy_loss = ((8, 1), (8, 1))\n reshape_common(ParallelMode.SEMI_AUTO_PARALLEL, strategy0, strategy1, strategy2, strategy_loss)\n\n\ndef test_reshape5():\n strategy0 = ((2, 1, 1, 1), )\n strategy1 = None\n strategy2 = ((1, 8), (8, 1))\n strategy_loss = ((8, 1), (8, 1))\n reshape_common(ParallelMode.SEMI_AUTO_PARALLEL, strategy0, strategy1, strategy2, strategy_loss)\n\n\ndef test_reshape_auto():\n strategy0 = None\n strategy1 = None\n strategy2 = None\n strategy_loss = None\n reshape_common(ParallelMode.AUTO_PARALLEL, strategy0, strategy1, strategy2, strategy_loss)\n\n\nclass NetWithLoss(nn.Cell):\n def __init__(self, network):\n super(NetWithLoss, self).__init__()\n self.loss = VirtualLoss()\n self.network = network\n\n def construct(self, x):\n predict = self.network(x)\n return self.loss(predict)\n\n\nclass GradWrap(nn.Cell):\n def __init__(self, network):\n super(GradWrap, self).__init__()\n self.network = network\n\n def construct(self, x):\n return C.grad_all(self.network)(x)\n\n\nclass ReshapeNet1(nn.Cell):\n def __init__(self, strategy0):\n super(ReshapeNet1, self).__init__()\n self.virtual_dataset = _VirtualDataset()\n self.reshape = P.Reshape()\n self.matmul = P.MatMul().set_strategy(strategy0)\n self.matmul_weight = Parameter(Tensor(np.ones([25088, 256]), dtype=ms.float32), name=\"weight\")\n self.reshape2 = P.Reshape()\n\n def construct(self, x):\n x = self.virtual_dataset(x)\n x = self.reshape(x, (256, 25088))\n x = self.matmul(x, self.matmul_weight)\n x = self.reshape2(x, (256 * 256,))\n return x\n\n\nclass ReshapeNet2(nn.Cell):\n def __init__(self, strategy0):\n super(ReshapeNet2, self).__init__()\n self.virtual_dataset = _VirtualDataset()\n self.reshape = P.Reshape()\n self.matmul = P.MatMul().set_strategy(strategy0)\n self.matmul_weight = Parameter(Tensor(np.ones([25088, 256]), dtype=ms.float32), name=\"weight\")\n self.reshape2 = P.Reshape()\n self.reduce_sum = P.ReduceSum(keep_dims=True)\n self.reshape3 = P.Reshape()\n\n def construct(self, x):\n x = self.virtual_dataset(x)\n x = self.reshape(x, (256, 25088))\n x = self.matmul(x, self.matmul_weight)\n x = self.reshape2(x, (256 * 256,))\n x = self.reduce_sum(x, -1)\n x = self.reshape3(x, ())\n return x\n\n\nclass ReshapeNet3(nn.Cell):\n def __init__(self, strategy0):\n super(ReshapeNet3, self).__init__()\n self.virtual_dataset = _VirtualDataset()\n self.reshape = P.Reshape()\n self.matmul = P.MatMul().set_strategy(strategy0)\n self.matmul_weight = Parameter(Tensor(np.ones([25088, 256]), dtype=ms.float32), name=\"weight\")\n self.reshape2 = P.Reshape()\n self.reduce_sum = P.ReduceSum(keep_dims=False)\n self.reshape3 = P.Reshape()\n\n def construct(self, x):\n x = self.virtual_dataset(x)\n x = self.reshape(x, (256, 25088))\n x = self.matmul(x, self.matmul_weight)\n x = self.reshape2(x, (256 * 256,))\n x = self.reduce_sum(x, -1)\n x = self.reshape3(x, (1, 1))\n return x\n\n\nclass ReshapeNet4(nn.Cell):\n def __init__(self, strategy0):\n super(ReshapeNet4, self).__init__()\n self.virtual_dataset = _VirtualDataset()\n self.reshape = P.Reshape()\n self.reshape2 = P.Reshape()\n self.matmul = P.MatMul().set_strategy(strategy0)\n self.matmul_weight = Parameter(Tensor(np.ones([25088, 256]), dtype=ms.float32), name=\"weight\")\n\n def construct(self, x):\n x = self.virtual_dataset(x)\n x = self.reshape(x, (256, 25088))\n w = self.reshape2(self.matmul_weight, (25088, 256))\n x = self.matmul(x, w)\n return x\n\n\nclass ReshapeNet5(nn.Cell):\n def __init__(self, strategy0):\n super(ReshapeNet5, self).__init__()\n self.virtual_dataset = _VirtualDataset()\n self.reshape = P.Reshape()\n self.matmul1 = P.MatMul().set_strategy(strategy0)\n self.matmul1_weight = Parameter(Tensor(np.ones([25088, 256]), dtype=ms.float32), name=\"weight\")\n self.matmul2 = P.MatMul().set_strategy(strategy0)\n\n def construct(self, x):\n x = self.virtual_dataset(x)\n x = self.reshape(x, (256, 25088))\n matmul1_o = self.matmul1(x, self.matmul1_weight)\n matmul2_o = self.matmul2(matmul1_o, x)\n return matmul2_o\n\n\nclass ReshapeNet6(nn.Cell):\n def __init__(self, strategy0):\n super(ReshapeNet6, self).__init__()\n self.virtual_dataset = _VirtualDataset()\n self.reshape = P.Reshape()\n self.matmul1_1 = P.MatMul().set_strategy(strategy0)\n self.matmul1_2 = P.MatMul().set_strategy(strategy0)\n self.matmul1_weight = Parameter(Tensor(np.ones([25088, 256]), dtype=ms.float32), name=\"weight\")\n self.matmul2 = P.MatMul().set_strategy(strategy0)\n self.add = P.TensorAdd()\n\n def construct(self, x):\n x = self.virtual_dataset(x)\n x = self.reshape(x, (256, 25088))\n matmul1_1_o = self.matmul1_1(x, self.matmul1_weight)\n matmul1_2_o = self.matmul1_2(x, self.matmul1_weight)\n matmul1_o = self.add(matmul1_1_o, matmul1_2_o)\n matmul2_o = self.matmul2(matmul1_o, x)\n return matmul2_o\n\n\ndef reshape_net2(backbone):\n batch_size = 16\n device_num = 16\n context.set_auto_parallel_context(device_num=device_num, global_rank=0)\n input = Tensor(np.ones([batch_size * device_num, 512, 7, 7]).astype(np.float32) * 0.01)\n\n net = GradWrap(NetWithLoss(backbone))\n context.set_auto_parallel_context(parallel_mode=\"semi_auto_parallel\")\n \n _executor.compile(net, input)\n\n\ndef test_reshape_net1_1():\n reshape_net2(ReshapeNet1(((1, 8), (8, 1))))\n\n\ndef test_reshape_net1_2():\n reshape_net2(ReshapeNet1(((1, 8), (8, 2))))\n\n\ndef test_reshape_net2_1():\n reshape_net2(ReshapeNet2(((1, 8), (8, 1))))\n\n\ndef test_reshape_net2_2():\n reshape_net2(ReshapeNet2(((1, 8), (8, 2))))\n\n\ndef test_reshape_net3_1():\n reshape_net2(ReshapeNet3(((1, 8), (8, 1))))\n\n\ndef test_reshape_net3_2():\n reshape_net2(ReshapeNet3(((1, 8), (8, 2))))\n\n\ndef test_reshape_net4_1():\n try:\n reshape_net2(ReshapeNet4(((1, 8), (8, 1))))\n except:\n pass\n\n\ndef test_reshape_net4_2():\n try:\n reshape_net2(ReshapeNet4(((1, 8), (8, 2))))\n except:\n pass\n\n\ndef test_reshape_net5_1():\n reshape_net2(ReshapeNet5(((1, 8), (8, 1))))\n\n\ndef test_reshape_net5_2():\n reshape_net2(ReshapeNet5(((1, 8), (8, 2))))\n\n\ndef test_reshape_net6_1():\n reshape_net2(ReshapeNet6(((1, 8), (8, 1))))\n\n\ndef test_reshape_net6_2():\n reshape_net2(ReshapeNet6(((1, 8), (8, 2))))\n\n\nclass TrainOneStepCell(nn.Cell):\n \"\"\"\n Network training package class.\n\n Append an optimizer to the training network after that the construct function\n can be called to create the backward graph.\n\n Args:\n network (Cell): The training network.\n optimizer (Cell): Optimizer for updating the weights.\n sens (Number): The adjust parameter. Default: 1.0.\n\n Examples:\n >>> net = Net()\n >>> loss_fn = nn.SoftmaxCrossEntropyWithLogits()\n >>> optim = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)\n >>> loss_net = WithLossCell(net, loss_fn)\n >>> train_net = TrainOneStepCell(loss_net, optim)\n \"\"\"\n def __init__(self, network, optimizer, sens=1.0):\n super(TrainOneStepCell, self).__init__(auto_prefix=False)\n self.network = network\n self.network.add_flags(defer_inline=True)\n self.weights = ParameterTuple(network.trainable_params())\n self.optimizer = optimizer\n self.grad = C.GradOperation('grad',\n get_by_list=True,\n sens_param=True)\n self.sens = sens\n\n def construct(self, data):\n weights = self.weights\n loss = self.network(data)\n sens = P.Fill()(P.DType()(loss), P.Shape()(loss), self.sens)\n grads = self.grad(self.network, weights)(data, sens)\n\n return F.depend(loss, self.optimizer(grads))\n\n\ndef reshape_common2(parallel_mode, net):\n batch_size = 16\n learning_rate = 0.1\n momentum = 0.9\n epoch_size = 2\n\n predict = Tensor(np.ones([batch_size, 512, 7, 7]), dtype=ms.float32)\n label = Tensor(np.ones([batch_size]), dtype=ms.int32)\n dataset = Dataset(predict, label, 2, input_num=1)\n context.reset_auto_parallel_context()\n context.set_auto_parallel_context(parallel_mode=parallel_mode, device_num=16)\n\n opt = Momentum(net.trainable_params(), learning_rate, momentum)\n train_net = TrainOneStepCell(net, opt).set_train()\n model = Model(train_net)\n model.train(epoch_size, dataset, dataset_sink_mode=False)\n\n\ndef test_reshape_common2_0():\n reshape_common2(ParallelMode.SEMI_AUTO_PARALLEL, ReshapeNet1(((1, 8), (8, 1))))\n\n\ndef test_reshape_common2_1():\n reshape_common2(ParallelMode.SEMI_AUTO_PARALLEL, ReshapeNet1(((1, 8), (8, 2))))\n\n\ndef test_reshape_common2_2():\n reshape_common2(ParallelMode.SEMI_AUTO_PARALLEL, ReshapeNet2(((1, 8), (8, 1))))\n\n\ndef test_reshape_common2_3():\n reshape_common2(ParallelMode.SEMI_AUTO_PARALLEL, ReshapeNet2(((1, 8), (8, 2))))\n\n\ndef test_reshape_common2_4():\n reshape_common2(ParallelMode.SEMI_AUTO_PARALLEL, ReshapeNet3(((1, 8), (8, 1))))\n\n\ndef test_reshape_common2_5():\n reshape_common2(ParallelMode.SEMI_AUTO_PARALLEL, ReshapeNet3(((1, 8), (8, 2))))\n\n\nclass BatchNormReshapeNet(nn.Cell):\n def __init__(self):\n super(BatchNormReshapeNet, self).__init__()\n self.vd = P._VirtualDataset()\n self.batch_norm = nn.BatchNorm1d(512, affine=False)\n self.reshape = P.Reshape()\n self.prelu = nn.PReLU(channel=256)\n\n def construct(self, x):\n x = self.vd(x)\n x = self.batch_norm(x)\n x = self.reshape(x, (512, 256))\n x = self.prelu(x)\n return x\n\n\ndef test_batchnorm_reshape_train():\n batch_size = 16\n device_num = 16\n context.set_auto_parallel_context(device_num=device_num, global_rank=0)\n context.set_auto_parallel_context(parallel_mode=\"semi_auto_parallel\")\n input = Tensor(np.ones([batch_size * device_num, 512]).astype(np.float32) * 0.01)\n\n net = GradWrap(NetWithLoss(BatchNormReshapeNet()))\n \n _executor.compile(net, input)\n\n\ndef bn_with_initialize(out_channels):\n bn = nn.BatchNorm2d(out_channels, momentum=0.3, eps=1e-5).add_flags_recursive(fp32=True)\n return bn\n\n\ndef fc_with_initialize(input_channels, out_channels):\n return nn.Dense(input_channels, out_channels).add_flags_recursive(fp16=True)\n\n\nclass BNReshapeDenseBNNet(nn.Cell):\n def __init__(self):\n super(BNReshapeDenseBNNet, self).__init__()\n self.batch_norm = bn_with_initialize(2)\n self.reshape = P.Reshape()\n self.cast = P.Cast()\n self.batch_norm2 = nn.BatchNorm1d(512, affine=False)\n self.fc = fc_with_initialize(2 * 32 * 32, 512)\n\n def construct(self, x):\n x = self.batch_norm(x)\n x = self.reshape(x, (16, 2*32*32))\n x = self.fc(x)\n x = self.batch_norm2(x)\n return x\n\n\ndef test_bn_reshape_dense_bn_train():\n batch_size = 16\n device_num = 16\n context.set_auto_parallel_context(device_num=device_num, global_rank=0)\n input = Tensor(np.ones([batch_size, 2, 32, 32]).astype(np.float32) * 0.01)\n\n net = GradWrap(NetWithLoss(BNReshapeDenseBNNet()))\n context.set_auto_parallel_context(parallel_mode=\"semi_auto_parallel\")\n \n _executor.compile(net, input)\n\n\nclass ParallelReduceMeanNet(nn.Cell):\n def __init__(self, conv_in_channel, conv_out_channel,\n reducemean_keep_dims=False, reducemean_axis=-1, strategy=None):\n super().__init__()\n self.conv = nn.Conv2d(in_channels=conv_in_channel, out_channels=conv_out_channel,\n kernel_size=1, stride=1, pad_mode='valid', has_bias=True,\n weight_init='ones', bias_init='ones')\n self.reduce_mean = P.ReduceMean(keep_dims=reducemean_keep_dims)\n self.flat = nn.Flatten()\n self.reducemean_axis = reducemean_axis\n if strategy is not None:\n self.reduce_mean.set_strategy(strategy)\n\n def construct(self, inputs):\n x = self.conv(inputs)\n x = self.reduce_mean(x, self.reducemean_axis)\n x = self.flat(x)\n return x\n\n\nclass CrossEntropyLoss(nn.Cell):\n def __init__(self, reduction='mean'):\n super(CrossEntropyLoss, self).__init__()\n\n self.reduce_mean = P.ReduceMean()\n self.cross_entropy = SoftmaxCrossEntropyWithLogits()\n self.reduction = reduction\n\n def construct(self, logits, label):\n loss = self.cross_entropy(logits, label)\n if self.reduction == 'mean':\n loss = self.reduce_mean(loss, (-1,))\n return loss\n\n\ndef test_flatten_reshape(parallel_mode=\"auto_parallel\"):\n batch_size = 16\n learning_rate = 0.1\n momentum = 0.9\n epoch_size = 2\n context.reset_auto_parallel_context()\n context.set_auto_parallel_context(parallel_mode=parallel_mode, device_num=8)\n net = ParallelReduceMeanNet(conv_in_channel=3, conv_out_channel=64, reducemean_axis=(2, 3), strategy=((4, 2, 1, 1),))\n loss = CrossEntropyLoss()\n predict = Tensor(np.ones([batch_size, 3, 32, 32]), dtype=ms.float32)\n label = Tensor(np.ones([batch_size, 64]), dtype=ms.float32)\n dataset = Dataset(predict, label, 2, input_num=2)\n\n opt = Momentum(net.trainable_params(), learning_rate, momentum)\n model = Model(net, loss_fn = loss, optimizer=opt)\n model.train(epoch_size, dataset, dataset_sink_mode=False)\n\n\ndef test_flatten_reshape2(parallel_mode=\"auto_parallel\"):\n batch_size = 16\n learning_rate = 0.1\n momentum = 0.9\n epoch_size = 2\n context.reset_auto_parallel_context()\n context.set_auto_parallel_context(parallel_mode=parallel_mode, device_num=8)\n set_algo_parameters(fully_use_devices=False)\n net = ParallelReduceMeanNet(conv_in_channel=3, conv_out_channel=64, reducemean_axis=(2, 3), strategy=((4, 1, 1, 1),))\n loss = CrossEntropyLoss()\n predict = Tensor(np.ones([batch_size, 3, 32, 32]), dtype=ms.float32)\n label = Tensor(np.ones([batch_size, 64]), dtype=ms.float32)\n dataset = Dataset(predict, label, 2, input_num=2)\n\n opt = Momentum(net.trainable_params(), learning_rate, momentum)\n model = Model(net, loss_fn = loss, optimizer=opt)\n model.train(epoch_size, dataset, dataset_sink_mode=False)\n\n\nclass ParallelReshapeNet(nn.Cell):\n def __init__(self, dense_in_channel, dense_out_channel, shape, strategy=None):\n super().__init__()\n self.flat = nn.Flatten()\n self.dense = nn.Dense(in_channels=dense_in_channel,\n out_channels=dense_out_channel,\n weight_init='ones',\n bias_init='ones',\n has_bias=True)\n self.reshape = P.Reshape()\n self.shape = shape\n self.reshape.set_strategy(strategy)\n\n def construct(self, inputs):\n x = self.flat(inputs)\n x = self.dense(x)\n x = self.reshape(x, self.shape)\n return x\n\n\n# the shape of input and output of reshape is the same\n# reshape is optimized before step_parallel\ndef test_flatten_reshape3(parallel_mode=\"auto_parallel\"):\n batch_size = 16\n learning_rate = 0.1\n momentum = 0.9\n epoch_size = 2\n context.reset_auto_parallel_context()\n context.set_auto_parallel_context(parallel_mode=parallel_mode, device_num=8)\n set_algo_parameters(fully_use_devices=False)\n net = ParallelReshapeNet(dense_in_channel=2048, dense_out_channel=1000, shape=(128, 1000), strategy=((16, 1),))\n loss = CrossEntropyLoss()\n predict = Tensor(np.ones([batch_size, 1, 2, 1024]), dtype=ms.float32)\n label = Tensor(np.ones([batch_size, 1000]), dtype=ms.float32)\n dataset = Dataset(predict, label, 2, input_num=2)\n\n opt = Momentum(net.trainable_params(), learning_rate, momentum)\n model = Model(net, loss_fn = loss, optimizer=opt)\n model.train(epoch_size, dataset, dataset_sink_mode=False)\n\n\nclass CrossEntropyLoss2(nn.Cell):\n def __init__(self, reduction='mean'):\n super(CrossEntropyLoss2, self).__init__()\n self.cross_entropy = SoftmaxCrossEntropyWithLogits(reduction=reduction)\n\n def construct(self, logits, label):\n loss = self.cross_entropy(logits, label)\n return loss\n\n\ndef test_flatten_reshape4(parallel_mode=\"semi_auto_parallel\"):\n batch_size = 16\n learning_rate = 0.1\n momentum = 0.9\n epoch_size = 2\n context.reset_auto_parallel_context()\n context.set_auto_parallel_context(parallel_mode=parallel_mode, device_num=8)\n set_algo_parameters(fully_use_devices=False)\n net = ParallelReduceMeanNet(conv_in_channel=3, conv_out_channel=64, reducemean_keep_dims=True, strategy=((4, 1, 1, 1),))\n loss = CrossEntropyLoss2()\n predict = Tensor(np.ones([batch_size, 3, 32, 32]), dtype=ms.float32)\n label = Tensor(np.ones([batch_size, 2048]), dtype=ms.float32)\n dataset = Dataset(predict, label, 2, input_num=2)\n\n opt = Momentum(net.trainable_params(), learning_rate, momentum)\n model = Model(net, loss_fn=loss, optimizer=opt)\n model.train(epoch_size, dataset, dataset_sink_mode=False)\n" ]
[ [ "numpy.ones" ] ]
angseung/torch_cifar10
[ "3160f749f3bffd941d6c0fb98ddaad63d4e5641d" ]
[ "models/clnet.py" ]
[ "'''\nCrossLink Network\n'''\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\ndef swish(x):\n return x * x.sigmoid()\n\n\ndef mish(x):\n return x * torch.tanh(F.softplus(x))\n\n\nclass CrossLinkBlock(nn.Module):\n '''Cross-Link Block'''\n\n def __init__(self, in_channels, out_channels, kernel_size, pool_enable):\n super(CrossLinkBlock, self).__init__()\n\n self.pool_enable = pool_enable\n self.ReLU = nn.ReLU()\n\n # basic blocks\n self.dconv1_1 = nn.Conv2d(in_channels,\n in_channels,\n kernel_size=kernel_size[0],\n stride=1,\n padding='same',\n groups=1,\n bias=False)\n\n self.dconv1_2 = nn.Conv2d(in_channels,\n in_channels,\n kernel_size=kernel_size[1],\n stride=1,\n padding='same',\n groups=1,\n bias=False)\n\n self.bn1 = nn.BatchNorm2d(in_channels)\n self.bn2 = nn.BatchNorm2d(in_channels)\n\n self.pconv = nn.Conv2d(in_channels,\n out_channels,\n kernel_size=3,\n stride=1,\n padding='same',\n groups=1,\n bias=False)\n\n self.bn3 = nn.BatchNorm2d(out_channels)\n\n self.maxpool = nn.MaxPool2d(2, 2)\n\n def forward(self, x):\n '''add forward here'''\n\n out1 = self.dconv1_1(x)\n out2 = self.dconv1_2(x)\n\n out1 = torch.mul(out1, self.ReLU(out1))\n out2 = torch.mul(out1, self.ReLU(out2))\n\n out = self.bn1(out1) + self.bn2(out2)\n out = self.bn3(self.pconv(out))\n\n if self.pool_enable:\n out = self.maxpool(out)\n\n return out\n\n\nclass CLNET(nn.Module):\n def __init__(self, cfg, num_classes=10):\n super(CLNET, self).__init__()\n self.cfg = cfg\n\n self.conv1 = nn.Conv2d(3,\n 32,\n kernel_size=3,\n stride=1,\n padding=1,\n bias=False)\n\n self.bn1 = nn.BatchNorm2d(32)\n self.pool1 = nn.MaxPool2d(2, 2)\n\n self.conv2 = nn.Conv2d(32,\n 32,\n kernel_size=3,\n stride=1,\n padding=1,\n groups=1,\n bias=False)\n\n self.bn2 = nn.BatchNorm2d(32)\n\n self.conv3 = nn.Conv2d(32,\n 16,\n kernel_size=1,\n stride=1,\n padding=0,\n bias=False)\n\n self.layers = self._make_layers(in_channels=16)\n self.linear = nn.Linear(cfg['out_channels'][-1], num_classes)\n\n def _make_layers(self, in_channels):\n layers = []\n cfg = [self.cfg[k] for k in ['out_channels', 'kernel_size', 'pool_enable']]\n\n for out_channels, kernel_size, pool_enable in zip(*cfg):\n layers.append(\n CrossLinkBlock(in_channels,\n out_channels,\n kernel_size,\n pool_enable))\n in_channels = out_channels\n return nn.Sequential(*layers)\n\n def forward(self, x):\n out = mish(self.bn1(self.pool1(self.conv1(x)))) # conv block\n out = self.conv3(swish(self.bn2(self.conv2(out)))) # sep block\n out = self.layers(out)\n out = F.adaptive_avg_pool2d(out, 1)\n out = out.view(out.size(0), -1)\n dropout_rate = self.cfg['dropout_rate']\n if self.training and dropout_rate > 0:\n out = F.dropout(out, p=dropout_rate)\n out = self.linear(out)\n return out\n\n\ndef CLNet_V0(num_classes):\n cfg = {\n 'out_channels': [24, 40, 80, 112, 160],\n 'kernel_size': [(5, 3), (3, 5), (3, 3), (5, 5), (3, 3)],\n 'pool_enable': [True, True, True, True, False],\n 'dropout_rate': 0.2\n }\n return CLNET(cfg, num_classes=num_classes)\n\n\nimport torchinfo\n\n\ndef test():\n net = CLNet_V0(10)\n torchinfo.summary(net, (1, 3, 32, 32))\n x = torch.randn(3, 3, 32, 32, device='cuda')\n y = net(x)\n print(y.shape)\n\n\nif __name__ == '__main__':\n test()\n" ]
[ [ "torch.nn.BatchNorm2d", "torch.nn.MaxPool2d", "torch.nn.Linear", "torch.nn.functional.dropout", "torch.randn", "torch.nn.functional.adaptive_avg_pool2d", "torch.nn.Conv2d", "torch.nn.Sequential", "torch.nn.functional.softplus", "torch.nn.ReLU" ] ]
TheVikJ/SUAVE
[ "eff37d167a4318ba8ba77dff873422c89db489b2" ]
[ "JinaAI/utils/get_data.py" ]
[ "import json\nimport requests\nimport pandas as pd\nimport os\n\nbaseurl = \"http://exploreapiswith.tech/api/\"\n\n\ncategories = json.loads(requests.get(\n baseurl + \"category\").text)\n\n\ndef get_category_api(category_name=None):\n category_apis = json.loads(requests.get(\n baseurl + \"category/\" + category_name).text)\n return category_apis\n\n\napi_list = []\nfor category in categories:\n\n api = get_category_api(category)\n api_list += api\n\nif os.path.exists(\"data/apis.json\"):\n os.remove(\"data/apis.json\")\n\n\nif os.path.exists(\"data/apis.csv\"):\n os.remove(\"data/apis.csv\")\n\nwith open(r\"data/apis.json\", \"x\") as f:\n json.dump(api_list, f)\n\n\njson_file = pd.read_json(r\"data/apis.json\")\njson_file.to_csv(r\"data/apis.csv\", index=False)\n" ]
[ [ "pandas.read_json" ] ]
jdailey/EnergyPATHWAYS
[ "0fb0ead475b6395f6b07fc43fe6c85826ee47d0f" ]
[ "energyPATHWAYS/tests/test_time_series.py" ]
[ "# -*- coding: utf-8 -*-\n__author__ = 'Ben, Ryan, Michael'\n\nimport numpy as np\nfrom collections import defaultdict\nimport pandas as pd\nimport energyPATHWAYS\nfrom energyPATHWAYS.time_series import TimeSeries\nimport unittest\nfrom matplotlib import pyplot as plt\n\n\nclass TestTimeSeries(unittest.TestCase):\n def setUp(self):\n self.methods = ('linear_interpolation',\n 'linear_regression',\n 'logistic',\n 'nearest',\n 'quadratic',\n 'cubic',\n 'exponential',\n 'none',\n 'decay_towards_linear_regression',\n 'average')\n\n def _help_test_clean_empty_data(self):\n newindex = np.arange(2000, 2051)\n\n x = np.array([])\n y = np.array([])\n self.run_all_cleaning_methods(x, y, newindex)\n\n def test_clean_empty_data(self):\n self.assertRaises(IndexError, self._help_test_clean_empty_data)\n\n def test_clean_one_point(self):\n newindex = np.arange(2000, 2051)\n \n x = np.array([2010])\n y = np.array([.1])\n self.run_all_cleaning_methods(x, y, newindex)\n\n def test_clean_two_points(self):\n newindex = np.arange(2000, 2051)\n \n x = np.array([2010, 2050])\n y = np.array([.1, .5])\n self.run_all_cleaning_methods(x, y, newindex)\n\n def test_clean_three_points(self):\n newindex = np.arange(2000, 2051)\n \n x = np.array([2010, 2018, 2025])\n y = np.array([.8, .7, .4])\n self.run_all_cleaning_methods(x, y, newindex)\n\n def test_clean_scurve_points(self):\n newindex = np.arange(2000, 2051)\n \n x = np.array([2010, 2018, 2025, 2040, 2050])\n y = np.array([.8, .7, .4, .35, .34])\n self.run_all_cleaning_methods(x, y, newindex)\n\n def test_clean_linear_points(self):\n newindex = np.arange(2000, 2051)\n \n x = np.array([2010, 2020, 2030, 2040, 2050])\n y = np.array([.1, .2, .3, .4, .5])\n self.run_all_cleaning_methods(x, y, newindex)\n\n def test_clean_quadratic_points(self):\n newindex = np.arange(2000, 2051)\n \n x = np.arange(2010, 2030)\n y = (x-2010)**2\n self.run_all_cleaning_methods(x, y, newindex)\n\n def test_three_zeros(self):\n # this has been a problem with logistic curve fitting\n newindex = np.arange(2000, 2051)\n\n x = np.array([2010, 2011, 2013])\n y = np.array([0, 0, 0])\n self.run_all_cleaning_methods(x, y, newindex)\n\n def test_two_zeros(self):\n newindex = np.arange(2000, 2051)\n\n x = np.array([2010, 2013])\n y = np.array([0, 0])\n self.run_all_cleaning_methods(x, y, newindex)\n\n def run_all_cleaning_methods(self, x, y, newindex):\n for method in self.methods:\n data = pd.DataFrame(y, index=x)\n newdata = TimeSeries.clean(data,\n newindex=newindex,\n interpolation_method=(None if method=='decay_towards_linear_regression' else method), # not supported for linear regression\n extrapolation_method=method)\n\n\n#newindex = np.arange(2015, 2025)\n\nnewindex = np.arange(2012, 2017)\nx = np.array([2015, 2018, 2020])\ny = np.array([.8, .7, .4])\ndata = pd.DataFrame(y, index=x)\nnewdata = TimeSeries.clean(data, newindex=newindex, interpolation_method='linear_interpolation', extrapolation_method='nearest')\n#\n#\n#newindex = np.arange(2020, 2025)\n#multi_data = pd.concat([data]*3, keys=['a', 'b', 'c'], names=['dummy', 'year'])\n#newdata2 = TimeSeries.clean(multi_data, time_index_name='year', newindex=newindex, interpolation_method='linear_interpolation', extrapolation_method='nearest')\n\n\nnewindex = np.arange(2015, 2050)\nmulti_data = pd.concat([data]*3, keys=['a', 'b', 'c'], names=['dummy', 'year'])\nnewdata2 = TimeSeries.clean(multi_data, time_index_name='year', newindex=newindex, interpolation_method='nearest', extrapolation_method='exponential')\n\n\n#raw_values = pd.read_csv('raw_values_example_for_clean_timeseries.csv')\n#raw_values.set_index(['us', 'efficiency_type', 'supply_node', 'year'], inplace=True)\n#raw_values.sort_index(inplace=True)\n#\n#newindex = [2015]\n#newdata3 = TimeSeries.clean(raw_values, time_index_name='year', newindex=newindex, interpolation_method='linear_interpolation', extrapolation_method='nearest')\n#\n#print newdata3\n\n\nnewindex = np.arange(2012, 2030)\nx = np.array([2015, 2016, 2018, 2020, 2021, 2025])\ny = np.array([.8, np.inf, .7, .4, np.inf, np.nan])\ndata = pd.DataFrame(y, index=x)\nnewdata = TimeSeries.clean(data, newindex=newindex, interpolation_method='linear_interpolation', extrapolation_method='exponential')\n\n\n" ]
[ [ "numpy.arange", "pandas.DataFrame", "numpy.array", "pandas.concat" ] ]
ikamensh/scipy
[ "d645404be21b7c0b1e7ba24bf8d525b624aeb848" ]
[ "scipy/io/matlab/mio5.py" ]
[ "''' Classes for read / write of matlab (TM) 5 files\n\nThe matfile specification last found here:\n\nhttps://www.mathworks.com/access/helpdesk/help/pdf_doc/matlab/matfile_format.pdf\n\n(as of December 5 2008)\n'''\n'''\n=================================\n Note on functions and mat files\n=================================\n\nThe document above does not give any hints as to the storage of matlab\nfunction handles, or anonymous function handles. I had, therefore, to\nguess the format of matlab arrays of ``mxFUNCTION_CLASS`` and\n``mxOPAQUE_CLASS`` by looking at example mat files.\n\n``mxFUNCTION_CLASS`` stores all types of matlab functions. It seems to\ncontain a struct matrix with a set pattern of fields. For anonymous\nfunctions, a sub-fields of one of these fields seems to contain the\nwell-named ``mxOPAQUE_CLASS``. This seems to contain:\n\n* array flags as for any matlab matrix\n* 3 int8 strings\n* a matrix\n\nIt seems that whenever the mat file contains a ``mxOPAQUE_CLASS``\ninstance, there is also an un-named matrix (name == '') at the end of\nthe mat file. I'll call this the ``__function_workspace__`` matrix.\n\nWhen I saved two anonymous functions in a mat file, or appended another\nanonymous function to the mat file, there was still only one\n``__function_workspace__`` un-named matrix at the end, but larger than\nthat for a mat file with a single anonymous function, suggesting that\nthe workspaces for the two functions had been merged.\n\nThe ``__function_workspace__`` matrix appears to be of double class\n(``mxCLASS_DOUBLE``), but stored as uint8, the memory for which is in\nthe format of a mini .mat file, without the first 124 bytes of the file\nheader (the description and the subsystem_offset), but with the version\nU2 bytes, and the S2 endian test bytes. There follow 4 zero bytes,\npresumably for 8 byte padding, and then a series of ``miMATRIX``\nentries, as in a standard mat file. The ``miMATRIX`` entries appear to\nbe series of un-named (name == '') matrices, and may also contain arrays\nof this same mini-mat format.\n\nI guess that:\n\n* saving an anonymous function back to a mat file will need the\n associated ``__function_workspace__`` matrix saved as well for the\n anonymous function to work correctly.\n* appending to a mat file that has a ``__function_workspace__`` would\n involve first pulling off this workspace, appending, checking whether\n there were any more anonymous functions appended, and then somehow\n merging the relevant workspaces, and saving at the end of the mat\n file.\n\nThe mat files I was playing with are in ``tests/data``:\n\n* sqr.mat\n* parabola.mat\n* some_functions.mat\n\nSee ``tests/test_mio.py:test_mio_funcs.py`` for the debugging\nscript I was working with.\n\n'''\n\n# Small fragments of current code adapted from matfile.py by Heiko\n# Henkelmann; parts of the code for simplify_cells=True adapted from\n# http://blog.nephics.com/2019/08/28/better-loadmat-for-scipy/.\n\nimport os\nimport time\nimport sys\nimport zlib\n\nfrom io import BytesIO\n\nimport warnings\n\nimport numpy as np\nfrom numpy.compat import asbytes, asstr\n\nimport scipy.sparse\n\nfrom .byteordercodes import native_code, swapped_code\n\nfrom .miobase import (MatFileReader, docfiller, matdims, read_dtype,\n arr_to_chars, arr_dtype_number, MatWriteError,\n MatReadError, MatReadWarning)\n\n# Reader object for matlab 5 format variables\nfrom .mio5_utils import VarReader5\n\n# Constants and helper objects\nfrom .mio5_params import (MatlabObject, MatlabFunction, MDTYPES, NP_TO_MTYPES,\n NP_TO_MXTYPES, miCOMPRESSED, miMATRIX, miINT8,\n miUTF8, miUINT32, mxCELL_CLASS, mxSTRUCT_CLASS,\n mxOBJECT_CLASS, mxCHAR_CLASS, mxSPARSE_CLASS,\n mxDOUBLE_CLASS, mclass_info, mat_struct)\n\nfrom .streams import ZlibInputStream\n\n\ndef _has_struct(elem):\n \"\"\"Determine if elem is an array and if first array item is a struct.\"\"\"\n return (isinstance(elem, np.ndarray) and (elem.size > 0) and\n isinstance(elem[0], mat_struct))\n\n\ndef _inspect_cell_array(ndarray):\n \"\"\"Construct lists from cell arrays (loaded as numpy ndarrays), recursing\n into items if they contain mat_struct objects.\"\"\"\n elem_list = []\n for sub_elem in ndarray:\n if isinstance(sub_elem, mat_struct):\n elem_list.append(_matstruct_to_dict(sub_elem))\n elif _has_struct(sub_elem):\n elem_list.append(_inspect_cell_array(sub_elem))\n else:\n elem_list.append(sub_elem)\n return elem_list\n\n\ndef _matstruct_to_dict(matobj):\n \"\"\"Construct nested dicts from mat_struct objects.\"\"\"\n d = {}\n for f in matobj._fieldnames:\n elem = matobj.__dict__[f]\n if isinstance(elem, mat_struct):\n d[f] = _matstruct_to_dict(elem)\n elif _has_struct(elem):\n d[f] = _inspect_cell_array(elem)\n else:\n d[f] = elem\n return d\n\n\ndef _simplify_cells(d):\n \"\"\"Convert mat objects in dict to nested dicts.\"\"\"\n for key in d:\n if isinstance(d[key], mat_struct):\n d[key] = _matstruct_to_dict(d[key])\n elif _has_struct(d[key]):\n d[key] = _inspect_cell_array(d[key])\n return d\n\n\nclass MatFile5Reader(MatFileReader):\n ''' Reader for Mat 5 mat files\n Adds the following attribute to base class\n\n uint16_codec - char codec to use for uint16 char arrays\n (defaults to system default codec)\n\n Uses variable reader that has the following stardard interface (see\n abstract class in ``miobase``::\n\n __init__(self, file_reader)\n read_header(self)\n array_from_header(self)\n\n and added interface::\n\n set_stream(self, stream)\n read_full_tag(self)\n\n '''\n @docfiller\n def __init__(self,\n mat_stream,\n byte_order=None,\n mat_dtype=False,\n squeeze_me=False,\n chars_as_strings=True,\n matlab_compatible=False,\n struct_as_record=True,\n verify_compressed_data_integrity=True,\n uint16_codec=None,\n simplify_cells=False):\n '''Initializer for matlab 5 file format reader\n\n %(matstream_arg)s\n %(load_args)s\n %(struct_arg)s\n uint16_codec : {None, string}\n Set codec to use for uint16 char arrays (e.g., 'utf-8').\n Use system default codec if None\n '''\n super(MatFile5Reader, self).__init__(\n mat_stream,\n byte_order,\n mat_dtype,\n squeeze_me,\n chars_as_strings,\n matlab_compatible,\n struct_as_record,\n verify_compressed_data_integrity,\n simplify_cells)\n # Set uint16 codec\n if not uint16_codec:\n uint16_codec = sys.getdefaultencoding()\n self.uint16_codec = uint16_codec\n # placeholders for readers - see initialize_read method\n self._file_reader = None\n self._matrix_reader = None\n\n def guess_byte_order(self):\n ''' Guess byte order.\n Sets stream pointer to 0 '''\n self.mat_stream.seek(126)\n mi = self.mat_stream.read(2)\n self.mat_stream.seek(0)\n return mi == b'IM' and '<' or '>'\n\n def read_file_header(self):\n ''' Read in mat 5 file header '''\n hdict = {}\n hdr_dtype = MDTYPES[self.byte_order]['dtypes']['file_header']\n hdr = read_dtype(self.mat_stream, hdr_dtype)\n hdict['__header__'] = hdr['description'].item().strip(b' \\t\\n\\000')\n v_major = hdr['version'] >> 8\n v_minor = hdr['version'] & 0xFF\n hdict['__version__'] = '%d.%d' % (v_major, v_minor)\n return hdict\n\n def initialize_read(self):\n ''' Run when beginning read of variables\n\n Sets up readers from parameters in `self`\n '''\n # reader for top level stream. We need this extra top-level\n # reader because we use the matrix_reader object to contain\n # compressed matrices (so they have their own stream)\n self._file_reader = VarReader5(self)\n # reader for matrix streams\n self._matrix_reader = VarReader5(self)\n\n def read_var_header(self):\n ''' Read header, return header, next position\n\n Header has to define at least .name and .is_global\n\n Parameters\n ----------\n None\n\n Returns\n -------\n header : object\n object that can be passed to self.read_var_array, and that\n has attributes .name and .is_global\n next_position : int\n position in stream of next variable\n '''\n mdtype, byte_count = self._file_reader.read_full_tag()\n if not byte_count > 0:\n raise ValueError(\"Did not read any bytes\")\n next_pos = self.mat_stream.tell() + byte_count\n if mdtype == miCOMPRESSED:\n # Make new stream from compressed data\n stream = ZlibInputStream(self.mat_stream, byte_count)\n self._matrix_reader.set_stream(stream)\n check_stream_limit = self.verify_compressed_data_integrity\n mdtype, byte_count = self._matrix_reader.read_full_tag()\n else:\n check_stream_limit = False\n self._matrix_reader.set_stream(self.mat_stream)\n if not mdtype == miMATRIX:\n raise TypeError('Expecting miMATRIX type here, got %d' % mdtype)\n header = self._matrix_reader.read_header(check_stream_limit)\n return header, next_pos\n\n def read_var_array(self, header, process=True):\n ''' Read array, given `header`\n\n Parameters\n ----------\n header : header object\n object with fields defining variable header\n process : {True, False} bool, optional\n If True, apply recursive post-processing during loading of\n array.\n\n Returns\n -------\n arr : array\n array with post-processing applied or not according to\n `process`.\n '''\n return self._matrix_reader.array_from_header(header, process)\n\n def get_variables(self, variable_names=None):\n ''' get variables from stream as dictionary\n\n variable_names - optional list of variable names to get\n\n If variable_names is None, then get all variables in file\n '''\n if isinstance(variable_names, str):\n variable_names = [variable_names]\n elif variable_names is not None:\n variable_names = list(variable_names)\n\n self.mat_stream.seek(0)\n # Here we pass all the parameters in self to the reading objects\n self.initialize_read()\n mdict = self.read_file_header()\n mdict['__globals__'] = []\n while not self.end_of_stream():\n hdr, next_position = self.read_var_header()\n name = asstr(hdr.name)\n if name in mdict:\n warnings.warn('Duplicate variable name \"%s\" in stream'\n ' - replacing previous with new\\n'\n 'Consider mio5.varmats_from_mat to split '\n 'file into single variable files' % name,\n MatReadWarning, stacklevel=2)\n if name == '':\n # can only be a matlab 7 function workspace\n name = '__function_workspace__'\n # We want to keep this raw because mat_dtype processing\n # will break the format (uint8 as mxDOUBLE_CLASS)\n process = False\n else:\n process = True\n if variable_names is not None and name not in variable_names:\n self.mat_stream.seek(next_position)\n continue\n try:\n res = self.read_var_array(hdr, process)\n except MatReadError as err:\n warnings.warn(\n f'Unreadable variable \"{name}\", because \"{err}\"',\n Warning, stacklevel=2)\n res = f\"Read error: {err}\"\n self.mat_stream.seek(next_position)\n mdict[name] = res\n if hdr.is_global:\n mdict['__globals__'].append(name)\n if variable_names is not None:\n variable_names.remove(name)\n if len(variable_names) == 0:\n break\n if self.simplify_cells:\n return _simplify_cells(mdict)\n else:\n return mdict\n\n def list_variables(self):\n ''' list variables from stream '''\n self.mat_stream.seek(0)\n # Here we pass all the parameters in self to the reading objects\n self.initialize_read()\n self.read_file_header()\n vars = []\n while not self.end_of_stream():\n hdr, next_position = self.read_var_header()\n name = asstr(hdr.name)\n if name == '':\n # can only be a matlab 7 function workspace\n name = '__function_workspace__'\n\n shape = self._matrix_reader.shape_from_header(hdr)\n if hdr.is_logical:\n info = 'logical'\n else:\n info = mclass_info.get(hdr.mclass, 'unknown')\n vars.append((name, shape, info))\n\n self.mat_stream.seek(next_position)\n return vars\n\n\ndef varmats_from_mat(file_obj):\n \"\"\" Pull variables out of mat 5 file as a sequence of mat file objects\n\n This can be useful with a difficult mat file, containing unreadable\n variables. This routine pulls the variables out in raw form and puts them,\n unread, back into a file stream for saving or reading. Another use is the\n pathological case where there is more than one variable of the same name in\n the file; this routine returns the duplicates, whereas the standard reader\n will overwrite duplicates in the returned dictionary.\n\n The file pointer in `file_obj` will be undefined. File pointers for the\n returned file-like objects are set at 0.\n\n Parameters\n ----------\n file_obj : file-like\n file object containing mat file\n\n Returns\n -------\n named_mats : list\n list contains tuples of (name, BytesIO) where BytesIO is a file-like\n object containing mat file contents as for a single variable. The\n BytesIO contains a string with the original header and a single var. If\n ``var_file_obj`` is an individual BytesIO instance, then save as a mat\n file with something like ``open('test.mat',\n 'wb').write(var_file_obj.read())``\n\n Examples\n --------\n >>> import scipy.io\n\n BytesIO is from the ``io`` module in Python 3, and is ``cStringIO`` for\n Python < 3.\n\n >>> mat_fileobj = BytesIO()\n >>> scipy.io.savemat(mat_fileobj, {'b': np.arange(10), 'a': 'a string'})\n >>> varmats = varmats_from_mat(mat_fileobj)\n >>> sorted([name for name, str_obj in varmats])\n ['a', 'b']\n \"\"\"\n rdr = MatFile5Reader(file_obj)\n file_obj.seek(0)\n # Raw read of top-level file header\n hdr_len = MDTYPES[native_code]['dtypes']['file_header'].itemsize\n raw_hdr = file_obj.read(hdr_len)\n # Initialize variable reading\n file_obj.seek(0)\n rdr.initialize_read()\n rdr.read_file_header()\n next_position = file_obj.tell()\n named_mats = []\n while not rdr.end_of_stream():\n start_position = next_position\n hdr, next_position = rdr.read_var_header()\n name = asstr(hdr.name)\n # Read raw variable string\n file_obj.seek(start_position)\n byte_count = next_position - start_position\n var_str = file_obj.read(byte_count)\n # write to stringio object\n out_obj = BytesIO()\n out_obj.write(raw_hdr)\n out_obj.write(var_str)\n out_obj.seek(0)\n named_mats.append((name, out_obj))\n return named_mats\n\n\nclass EmptyStructMarker(object):\n \"\"\" Class to indicate presence of empty matlab struct on output \"\"\"\n\n\ndef to_writeable(source):\n ''' Convert input object ``source`` to something we can write\n\n Parameters\n ----------\n source : object\n\n Returns\n -------\n arr : None or ndarray or EmptyStructMarker\n If `source` cannot be converted to something we can write to a matfile,\n return None. If `source` is equivalent to an empty dictionary, return\n ``EmptyStructMarker``. Otherwise return `source` converted to an\n ndarray with contents for writing to matfile.\n '''\n if isinstance(source, np.ndarray):\n return source\n if source is None:\n return None\n # Objects that implement mappings\n is_mapping = (hasattr(source, 'keys') and hasattr(source, 'values') and\n hasattr(source, 'items'))\n # Objects that don't implement mappings, but do have dicts\n if isinstance(source, np.generic):\n # NumPy scalars are never mappings (PyPy issue workaround)\n pass\n elif not is_mapping and hasattr(source, '__dict__'):\n source = dict((key, value) for key, value in source.__dict__.items()\n if not key.startswith('_'))\n is_mapping = True\n if is_mapping:\n dtype = []\n values = []\n for field, value in source.items():\n if (isinstance(field, str) and\n field[0] not in '_0123456789'):\n dtype.append((str(field), object))\n values.append(value)\n if dtype:\n return np.array([tuple(values)], dtype)\n else:\n return EmptyStructMarker\n # Next try and convert to an array\n narr = np.asanyarray(source)\n if narr.dtype.type in (object, np.object_) and \\\n narr.shape == () and narr == source:\n # No interesting conversion possible\n return None\n return narr\n\n\n# Native byte ordered dtypes for convenience for writers\nNDT_FILE_HDR = MDTYPES[native_code]['dtypes']['file_header']\nNDT_TAG_FULL = MDTYPES[native_code]['dtypes']['tag_full']\nNDT_TAG_SMALL = MDTYPES[native_code]['dtypes']['tag_smalldata']\nNDT_ARRAY_FLAGS = MDTYPES[native_code]['dtypes']['array_flags']\n\n\nclass VarWriter5(object):\n ''' Generic matlab matrix writing class '''\n mat_tag = np.zeros((), NDT_TAG_FULL)\n mat_tag['mdtype'] = miMATRIX\n\n def __init__(self, file_writer):\n self.file_stream = file_writer.file_stream\n self.unicode_strings = file_writer.unicode_strings\n self.long_field_names = file_writer.long_field_names\n self.oned_as = file_writer.oned_as\n # These are used for top level writes, and unset after\n self._var_name = None\n self._var_is_global = False\n\n def write_bytes(self, arr):\n self.file_stream.write(arr.tobytes(order='F'))\n\n def write_string(self, s):\n self.file_stream.write(s)\n\n def write_element(self, arr, mdtype=None):\n ''' write tag and data '''\n if mdtype is None:\n mdtype = NP_TO_MTYPES[arr.dtype.str[1:]]\n # Array needs to be in native byte order\n if arr.dtype.byteorder == swapped_code:\n arr = arr.byteswap().newbyteorder()\n byte_count = arr.size*arr.itemsize\n if byte_count <= 4:\n self.write_smalldata_element(arr, mdtype, byte_count)\n else:\n self.write_regular_element(arr, mdtype, byte_count)\n\n def write_smalldata_element(self, arr, mdtype, byte_count):\n # write tag with embedded data\n tag = np.zeros((), NDT_TAG_SMALL)\n tag['byte_count_mdtype'] = (byte_count << 16) + mdtype\n # if arr.tobytes is < 4, the element will be zero-padded as needed.\n tag['data'] = arr.tobytes(order='F')\n self.write_bytes(tag)\n\n def write_regular_element(self, arr, mdtype, byte_count):\n # write tag, data\n tag = np.zeros((), NDT_TAG_FULL)\n tag['mdtype'] = mdtype\n tag['byte_count'] = byte_count\n self.write_bytes(tag)\n self.write_bytes(arr)\n # pad to next 64-bit boundary\n bc_mod_8 = byte_count % 8\n if bc_mod_8:\n self.file_stream.write(b'\\x00' * (8-bc_mod_8))\n\n def write_header(self,\n shape,\n mclass,\n is_complex=False,\n is_logical=False,\n nzmax=0):\n ''' Write header for given data options\n shape : sequence\n array shape\n mclass - mat5 matrix class\n is_complex - True if matrix is complex\n is_logical - True if matrix is logical\n nzmax - max non zero elements for sparse arrays\n\n We get the name and the global flag from the object, and reset\n them to defaults after we've used them\n '''\n # get name and is_global from one-shot object store\n name = self._var_name\n is_global = self._var_is_global\n # initialize the top-level matrix tag, store position\n self._mat_tag_pos = self.file_stream.tell()\n self.write_bytes(self.mat_tag)\n # write array flags (complex, global, logical, class, nzmax)\n af = np.zeros((), NDT_ARRAY_FLAGS)\n af['data_type'] = miUINT32\n af['byte_count'] = 8\n flags = is_complex << 3 | is_global << 2 | is_logical << 1\n af['flags_class'] = mclass | flags << 8\n af['nzmax'] = nzmax\n self.write_bytes(af)\n # shape\n self.write_element(np.array(shape, dtype='i4'))\n # write name\n name = np.asarray(name)\n if name == '': # empty string zero-terminated\n self.write_smalldata_element(name, miINT8, 0)\n else:\n self.write_element(name, miINT8)\n # reset the one-shot store to defaults\n self._var_name = ''\n self._var_is_global = False\n\n def update_matrix_tag(self, start_pos):\n curr_pos = self.file_stream.tell()\n self.file_stream.seek(start_pos)\n byte_count = curr_pos - start_pos - 8\n if byte_count >= 2**32:\n raise MatWriteError(\"Matrix too large to save with Matlab \"\n \"5 format\")\n self.mat_tag['byte_count'] = byte_count\n self.write_bytes(self.mat_tag)\n self.file_stream.seek(curr_pos)\n\n def write_top(self, arr, name, is_global):\n \"\"\" Write variable at top level of mat file\n\n Parameters\n ----------\n arr : array_like\n array-like object to create writer for\n name : str, optional\n name as it will appear in matlab workspace\n default is empty string\n is_global : {False, True}, optional\n whether variable will be global on load into matlab\n \"\"\"\n # these are set before the top-level header write, and unset at\n # the end of the same write, because they do not apply for lower levels\n self._var_is_global = is_global\n self._var_name = name\n # write the header and data\n self.write(arr)\n\n def write(self, arr):\n ''' Write `arr` to stream at top and sub levels\n\n Parameters\n ----------\n arr : array_like\n array-like object to create writer for\n '''\n # store position, so we can update the matrix tag\n mat_tag_pos = self.file_stream.tell()\n # First check if these are sparse\n if scipy.sparse.issparse(arr):\n self.write_sparse(arr)\n self.update_matrix_tag(mat_tag_pos)\n return\n # Try to convert things that aren't arrays\n narr = to_writeable(arr)\n if narr is None:\n raise TypeError('Could not convert %s (type %s) to array'\n % (arr, type(arr)))\n if isinstance(narr, MatlabObject):\n self.write_object(narr)\n elif isinstance(narr, MatlabFunction):\n raise MatWriteError('Cannot write matlab functions')\n elif narr is EmptyStructMarker: # empty struct array\n self.write_empty_struct()\n elif narr.dtype.fields: # struct array\n self.write_struct(narr)\n elif narr.dtype.hasobject: # cell array\n self.write_cells(narr)\n elif narr.dtype.kind in ('U', 'S'):\n if self.unicode_strings:\n codec = 'UTF8'\n else:\n codec = 'ascii'\n self.write_char(narr, codec)\n else:\n self.write_numeric(narr)\n self.update_matrix_tag(mat_tag_pos)\n\n def write_numeric(self, arr):\n imagf = arr.dtype.kind == 'c'\n logif = arr.dtype.kind == 'b'\n try:\n mclass = NP_TO_MXTYPES[arr.dtype.str[1:]]\n except KeyError:\n # No matching matlab type, probably complex256 / float128 / float96\n # Cast data to complex128 / float64.\n if imagf:\n arr = arr.astype('c128')\n elif logif:\n arr = arr.astype('i1') # Should only contain 0/1\n else:\n arr = arr.astype('f8')\n mclass = mxDOUBLE_CLASS\n self.write_header(matdims(arr, self.oned_as),\n mclass,\n is_complex=imagf,\n is_logical=logif)\n if imagf:\n self.write_element(arr.real)\n self.write_element(arr.imag)\n else:\n self.write_element(arr)\n\n def write_char(self, arr, codec='ascii'):\n ''' Write string array `arr` with given `codec`\n '''\n if arr.size == 0 or np.all(arr == ''):\n # This an empty string array or a string array containing\n # only empty strings. Matlab cannot distinguish between a\n # string array that is empty, and a string array containing\n # only empty strings, because it stores strings as arrays of\n # char. There is no way of having an array of char that is\n # not empty, but contains an empty string. We have to\n # special-case the array-with-empty-strings because even\n # empty strings have zero padding, which would otherwise\n # appear in matlab as a string with a space.\n shape = (0,) * np.max([arr.ndim, 2])\n self.write_header(shape, mxCHAR_CLASS)\n self.write_smalldata_element(arr, miUTF8, 0)\n return\n # non-empty string.\n #\n # Convert to char array\n arr = arr_to_chars(arr)\n # We have to write the shape directly, because we are going\n # recode the characters, and the resulting stream of chars\n # may have a different length\n shape = arr.shape\n self.write_header(shape, mxCHAR_CLASS)\n if arr.dtype.kind == 'U' and arr.size:\n # Make one long string from all the characters. We need to\n # transpose here, because we're flattening the array, before\n # we write the bytes. The bytes have to be written in\n # Fortran order.\n n_chars = np.prod(shape)\n st_arr = np.ndarray(shape=(),\n dtype=arr_dtype_number(arr, n_chars),\n buffer=arr.T.copy()) # Fortran order\n # Recode with codec to give byte string\n st = st_arr.item().encode(codec)\n # Reconstruct as 1-D byte array\n arr = np.ndarray(shape=(len(st),),\n dtype='S1',\n buffer=st)\n self.write_element(arr, mdtype=miUTF8)\n\n def write_sparse(self, arr):\n ''' Sparse matrices are 2D\n '''\n A = arr.tocsc() # convert to sparse CSC format\n A.sort_indices() # MATLAB expects sorted row indices\n is_complex = (A.dtype.kind == 'c')\n is_logical = (A.dtype.kind == 'b')\n nz = A.nnz\n self.write_header(matdims(arr, self.oned_as),\n mxSPARSE_CLASS,\n is_complex=is_complex,\n is_logical=is_logical,\n # matlab won't load file with 0 nzmax\n nzmax=1 if nz == 0 else nz)\n self.write_element(A.indices.astype('i4'))\n self.write_element(A.indptr.astype('i4'))\n self.write_element(A.data.real)\n if is_complex:\n self.write_element(A.data.imag)\n\n def write_cells(self, arr):\n self.write_header(matdims(arr, self.oned_as),\n mxCELL_CLASS)\n # loop over data, column major\n A = np.atleast_2d(arr).flatten('F')\n for el in A:\n self.write(el)\n\n def write_empty_struct(self):\n self.write_header((1, 1), mxSTRUCT_CLASS)\n # max field name length set to 1 in an example matlab struct\n self.write_element(np.array(1, dtype=np.int32))\n # Field names element is empty\n self.write_element(np.array([], dtype=np.int8))\n\n def write_struct(self, arr):\n self.write_header(matdims(arr, self.oned_as),\n mxSTRUCT_CLASS)\n self._write_items(arr)\n\n def _write_items(self, arr):\n # write fieldnames\n fieldnames = [f[0] for f in arr.dtype.descr]\n length = max([len(fieldname) for fieldname in fieldnames])+1\n max_length = (self.long_field_names and 64) or 32\n if length > max_length:\n raise ValueError(\"Field names are restricted to %d characters\" %\n (max_length-1))\n self.write_element(np.array([length], dtype='i4'))\n self.write_element(\n np.array(fieldnames, dtype='S%d' % (length)),\n mdtype=miINT8)\n A = np.atleast_2d(arr).flatten('F')\n for el in A:\n for f in fieldnames:\n self.write(el[f])\n\n def write_object(self, arr):\n '''Same as writing structs, except different mx class, and extra\n classname element after header\n '''\n self.write_header(matdims(arr, self.oned_as),\n mxOBJECT_CLASS)\n self.write_element(np.array(arr.classname, dtype='S'),\n mdtype=miINT8)\n self._write_items(arr)\n\n\nclass MatFile5Writer(object):\n ''' Class for writing mat5 files '''\n\n @docfiller\n def __init__(self, file_stream,\n do_compression=False,\n unicode_strings=False,\n global_vars=None,\n long_field_names=False,\n oned_as='row'):\n ''' Initialize writer for matlab 5 format files\n\n Parameters\n ----------\n %(do_compression)s\n %(unicode_strings)s\n global_vars : None or sequence of strings, optional\n Names of variables to be marked as global for matlab\n %(long_fields)s\n %(oned_as)s\n '''\n self.file_stream = file_stream\n self.do_compression = do_compression\n self.unicode_strings = unicode_strings\n if global_vars:\n self.global_vars = global_vars\n else:\n self.global_vars = []\n self.long_field_names = long_field_names\n self.oned_as = oned_as\n self._matrix_writer = None\n\n def write_file_header(self):\n # write header\n hdr = np.zeros((), NDT_FILE_HDR)\n hdr['description'] = 'MATLAB 5.0 MAT-file Platform: %s, Created on: %s' \\\n % (os.name,time.asctime())\n hdr['version'] = 0x0100\n hdr['endian_test'] = np.ndarray(shape=(),\n dtype='S2',\n buffer=np.uint16(0x4d49))\n self.file_stream.write(hdr.tobytes())\n\n def put_variables(self, mdict, write_header=None):\n ''' Write variables in `mdict` to stream\n\n Parameters\n ----------\n mdict : mapping\n mapping with method ``items`` returns name, contents pairs where\n ``name`` which will appear in the matlab workspace in file load, and\n ``contents`` is something writeable to a matlab file, such as a NumPy\n array.\n write_header : {None, True, False}, optional\n If True, then write the matlab file header before writing the\n variables. If None (the default) then write the file header\n if we are at position 0 in the stream. By setting False\n here, and setting the stream position to the end of the file,\n you can append variables to a matlab file\n '''\n # write header if requested, or None and start of file\n if write_header is None:\n write_header = self.file_stream.tell() == 0\n if write_header:\n self.write_file_header()\n self._matrix_writer = VarWriter5(self)\n for name, var in mdict.items():\n if name[0] == '_':\n continue\n is_global = name in self.global_vars\n if self.do_compression:\n stream = BytesIO()\n self._matrix_writer.file_stream = stream\n self._matrix_writer.write_top(var, asbytes(name), is_global)\n out_str = zlib.compress(stream.getvalue())\n tag = np.empty((), NDT_TAG_FULL)\n tag['mdtype'] = miCOMPRESSED\n tag['byte_count'] = len(out_str)\n self.file_stream.write(tag.tobytes())\n self.file_stream.write(out_str)\n else: # not compressing\n self._matrix_writer.write_top(var, asbytes(name), is_global)\n" ]
[ [ "numpy.atleast_2d", "numpy.empty", "numpy.zeros", "numpy.compat.asstr", "numpy.uint16", "numpy.asarray", "numpy.asanyarray", "numpy.compat.asbytes", "numpy.all", "numpy.max", "numpy.prod", "numpy.array" ] ]
FJFranklin/BeesEtAl
[ "3fd21d044e77b4a1df56ac2f405e2084bebd54e1" ]
[ "BeesEtAl/Gholami.py" ]
[ "# *** References ***\n\n# Gholami & Mohammadi, A Novel Combination of Bees and Firefly Algorithm to Optimize Continuous Problems\n\n# Türker Tuncer, LDW-SCSA: Logistic Dynamic Weight based Sine Cosine Search Algorithm for Numerical Functions Optimization \n# https://arxiv.org/ftp/arxiv/papers/1809/1809.03055.pdf\n\n# Hartmut Pohlheim, Examples of Objective Functions\n# http://www.geatbx.com/download/GEATbx_ObjFunExpl_v38.pdf\n\n# Wikipedia, Test functions for optimization\n# https://en.wikipedia.org/wiki/Test_functions_for_optimization\n\nimport numpy as np\n\nfrom .Base_Coster import Base_Coster\n\nclass F1(Base_Coster):\n \"\"\"\n Function F1 from Gholami & Mohammadi FA-BA Hybrid paper\n De Jong / Sphere (ND) cost function; optimum @ (0,...\n \"\"\"\n\n @staticmethod\n def extents(Ndim):\n return -5.12 * np.ones(Ndim), 5.12 * np.ones(Ndim)\n\n def __init__(self, base_optimiser):\n Base_Coster.__init__(self, base_optimiser)\n\n def map_to_solution_space(self, X):\n return X\n\n def evaluate_cost(self):\n self.cost = sum(np.power(self.XA, 2))\n\n def meso(self):\n None\n\nclass F2(Base_Coster):\n \"\"\"\n Function F2 from Gholami & Mohammadi FA-BA Hybrid paper\n Schwefel 2.22 (ND) cost function; optimum @ (0,...\n \"\"\"\n\n @staticmethod\n def extents(Ndim):\n return -10 * np.ones(Ndim), 10 * np.ones(Ndim)\n\n def __init__(self, base_optimiser):\n Base_Coster.__init__(self, base_optimiser)\n\n def map_to_solution_space(self, X):\n return X\n\n def evaluate_cost(self):\n self.cost = sum(np.abs(self.XA)) + np.prod(np.abs(self.XA))\n\n def meso(self):\n None\n\nclass F3(Base_Coster):\n \"\"\"\n Function F3 from Gholami & Mohammadi FA-BA Hybrid paper\n Schwefel 1.2 - Rotated hyper-ellipsoid (ND) cost function; optimum @ (0,...\n \"\"\"\n\n @staticmethod\n def extents(Ndim):\n return -65.536 * np.ones(Ndim), 65.536 * np.ones(Ndim)\n\n def __init__(self, base_optimiser):\n Base_Coster.__init__(self, base_optimiser)\n\n def map_to_solution_space(self, X):\n return X\n\n def evaluate_cost(self):\n self.cost = 0\n for i in range(0, len(self.XA)):\n self.cost = self.cost + (sum(self.XA[0:(i+1)]))**2\n\n def meso(self):\n None\n\nclass F4(Base_Coster):\n \"\"\"\n Function F4 from Gholami & Mohammadi FA-BA Hybrid paper\n Schwefel 2.21 (ND) cost function; optimum @ (0,...\n \"\"\"\n\n @staticmethod\n def extents(Ndim):\n return -100 * np.ones(Ndim), 100 * np.ones(Ndim)\n\n def __init__(self, base_optimiser):\n Base_Coster.__init__(self, base_optimiser)\n\n def map_to_solution_space(self, X):\n return X\n\n def evaluate_cost(self):\n self.cost = max(np.abs(self.XA))\n\n def meso(self):\n None\n\nclass F5(Base_Coster):\n \"\"\"\n Function F5 from Gholami & Mohammadi FA-BA Hybrid paper\n Rosenbrock (ND) cost function; optimum @ (0,...\n \"\"\"\n\n @staticmethod\n def extents(Ndim):\n return -2.048 * np.ones(Ndim), 2.048 * np.ones(Ndim)\n\n def __init__(self, base_optimiser):\n Base_Coster.__init__(self, base_optimiser)\n\n def map_to_solution_space(self, X):\n return X\n\n def evaluate_cost(self):\n self.cost = sum(100 * np.power(self.XA[1:len(self.XA)] - np.power(self.XA[0:(len(self.XA)-1)], 2), 2) + np.power(1 - self.XA[0:(len(self.XA)-1)], 2))\n\n def meso(self):\n None\n\nclass F6(Base_Coster):\n \"\"\"\n Function F6 from Gholami & Mohammadi FA-BA Hybrid paper\n Step (ND) cost function; optimum @ (-0.5,...\n \"\"\"\n\n @staticmethod\n def extents(Ndim):\n return -100 * np.ones(Ndim), 100 * np.ones(Ndim)\n\n def __init__(self, base_optimiser):\n Base_Coster.__init__(self, base_optimiser)\n\n def map_to_solution_space(self, X):\n return X\n\n def evaluate_cost(self):\n self.cost = sum(np.floor(np.power(self.XA + 0.5, 2)))\n\n def meso(self):\n None\n\nclass F7(Base_Coster):\n \"\"\"\n Function F7 from Gholami & Mohammadi FA-BA Hybrid paper\n Noise (ND) cost function; optimum @ (0,...\n \"\"\"\n\n @staticmethod\n def extents(Ndim):\n return -1.28 * np.ones(Ndim), 1.28 * np.ones(Ndim)\n\n def __init__(self, base_optimiser):\n Base_Coster.__init__(self, base_optimiser)\n\n def map_to_solution_space(self, X):\n return X\n\n def evaluate_cost(self):\n self.cost = sum(np.power(self.XA, 4) * np.asarray(range(1, 1 + len(self.XA)))) + np.random.rand(1)\n\n def meso(self):\n None\n\nclass F8(Base_Coster):\n \"\"\"\n Function F8 from Gholami & Mohammadi FA-BA Hybrid paper\n Schwefel (ND) cost function\n \"\"\"\n\n @staticmethod\n def extents(Ndim):\n return -500 * np.ones(Ndim), 500 * np.ones(Ndim)\n\n def __init__(self, base_optimiser):\n Base_Coster.__init__(self, base_optimiser)\n\n def map_to_solution_space(self, X):\n return X\n\n def evaluate_cost(self):\n self.cost = -sum(self.XA * np.sin(np.sqrt(abs(self.XA))))\n\n def meso(self):\n None\n\nclass F9(Base_Coster):\n \"\"\"\n Function F9 from Gholami & Mohammadi FA-BA Hybrid paper\n Rastrigin (ND) cost function; optimum @ (0,...\n \"\"\"\n\n @staticmethod\n def extents(Ndim):\n return -5.12 * np.ones(Ndim), 5.12 * np.ones(Ndim)\n\n def __init__(self, base_optimiser):\n Base_Coster.__init__(self, base_optimiser)\n\n def map_to_solution_space(self, X):\n return X\n\n def evaluate_cost(self):\n self.cost = sum(np.power(self.XA, 2) - 10 * np.cos(2 * np.pi * self.XA) + 10)\n\n def meso(self):\n None\n\nclass F10(Base_Coster):\n \"\"\"\n Function F10 from Gholami & Mohammadi FA-BA Hybrid paper\n Ackley (ND) cost function; optimum @ (0,...\n \"\"\"\n\n @staticmethod\n def extents(Ndim):\n return -32.768 * np.ones(Ndim), 32.768 * np.ones(Ndim)\n\n def __init__(self, base_optimiser):\n Base_Coster.__init__(self, base_optimiser)\n\n def map_to_solution_space(self, X):\n return X\n\n @staticmethod\n def rms(X):\n return np.sqrt(X.dot(X) / len(X))\n\n def evaluate_cost(self):\n self.cost = np.exp(1) + 20 * (1 - np.exp(-F10.rms(self.XA) / 5)) - np.exp(sum(np.cos(2 * np.pi * self.XA)) / len(self.XA))\n\n def meso(self):\n None\n\nclass F11(Base_Coster):\n \"\"\"\n Function F11 from Gholami & Mohammadi FA-BA Hybrid paper\n Griewangk (ND) cost function; optimum @ (0,...\n \"\"\"\n\n @staticmethod\n def extents(Ndim):\n return -600 * np.ones(Ndim), 600 * np.ones(Ndim)\n\n def __init__(self, base_optimiser):\n Base_Coster.__init__(self, base_optimiser)\n\n def map_to_solution_space(self, X):\n return X\n\n def evaluate_cost(self):\n self.cost = sum(np.power(self.XA, 2)) / 4000 - np.prod(np.cos(np.power(self.XA, 2) / np.power(range(1, 1+len(self.XA)), 0.5))) + 1\n\n def meso(self):\n None\n\nclass F12(Base_Coster):\n \"\"\"\n Function F12 from Gholami & Mohammadi FA-BA Hybrid paper\n Generalised Penalised 1 (ND) cost function; optimum @ (0,...\n \"\"\"\n\n @staticmethod\n def extents(Ndim):\n return -50 * np.ones(Ndim), 50 * np.ones(Ndim)\n\n def __init__(self, base_optimiser):\n Base_Coster.__init__(self, base_optimiser)\n\n def map_to_solution_space(self, X):\n return X\n\n @staticmethod\n def u(xi, a, k, m):\n if xi > a:\n v = k * (xi - a)**m\n elif xi < -a:\n v = k * (-xi - a)**m\n else:\n v = 0\n return v\n \n def evaluate_cost(self):\n y = 1 + (self.XA + 1) / 4\n\n c = 0\n for i in range(0, len(self.XA)):\n c = c + F12.u(self.XA[i], 10, 100, 4)\n\n self.cost = sum(np.power(y[0:(len(self.XA)-1)] - 1, 2) * (1 + 10 * np.power(np.sin(np.pi * y[1:len(self.XA)]), 2)))\n self.cost = (self.cost + 10 * np.sin(np.pi * y[0]) + (y[len(self.XA)-1] - 1)**2) * np.pi / len(self.XA) + c\n\n def meso(self):\n None\n\ndef Gholami_TestFunction_Extents(number, Ndim=30):\n minima = None\n maxima = None\n\n if number == 1:\n minima, maxima = F1.extents(Ndim)\n if number == 2:\n minima, maxima = F2.extents(Ndim)\n if number == 3:\n minima, maxima = F3.extents(Ndim)\n if number == 4:\n minima, maxima = F4.extents(Ndim)\n if number == 5:\n minima, maxima = F5.extents(Ndim)\n if number == 6:\n minima, maxima = F6.extents(Ndim)\n if number == 7:\n minima, maxima = F7.extents(Ndim)\n if number == 8:\n minima, maxima = F8.extents(Ndim)\n if number == 9:\n minima, maxima = F9.extents(Ndim)\n if number == 10:\n minima, maxima = F10.extents(Ndim)\n if number == 11:\n minima, maxima = F11.extents(Ndim)\n if number == 12:\n minima, maxima = F12.extents(Ndim)\n\n return minima, maxima\n\ndef Gholami_TestFunction_Coster(number, base_optimiser):\n coster = None\n\n if number == 1:\n coster = F1(base_optimiser)\n if number == 2:\n coster = F2(base_optimiser)\n if number == 3:\n coster = F3(base_optimiser)\n if number == 4:\n coster = F4(base_optimiser)\n if number == 5:\n coster = F5(base_optimiser)\n if number == 6:\n coster = F6(base_optimiser)\n if number == 7:\n coster = F7(base_optimiser)\n if number == 8:\n coster = F8(base_optimiser)\n if number == 9:\n coster = F9(base_optimiser)\n if number == 10:\n coster = F10(base_optimiser)\n if number == 11:\n coster = F11(base_optimiser)\n if number == 12:\n coster = F12(base_optimiser)\n\n return coster\n" ]
[ [ "numpy.ones", "numpy.abs", "numpy.cos", "numpy.exp", "numpy.power", "numpy.random.rand", "numpy.sin" ] ]
VenkateshBH99/django_local_library
[ "db834cbe6ec475a2d3224b3ea9b56b1fa3519e9f" ]
[ "predict_risk_1/machine_learning_models/KNN.py" ]
[ "# Importing the libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport random\n\n# Importing the dataset\ndataset = pd.read_csv('kidney_disease2.csv')\n\nX = dataset.iloc[:,:-1].values\ny = dataset.iloc[:,24].values\n\n\n#handling missing data\n\nfrom sklearn.preprocessing import Imputer\nimputer = Imputer(missing_values = 'NaN', strategy = 'mean', axis = 0)\nimputer = imputer.fit(X[:,:24])\nX[:,:24] = imputer.transform(X[:,:24])\n\n# Splitting the dataset into the Training set and Test set\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.5, random_state =101)\n\n# Feature Scaling\nfrom sklearn.preprocessing import StandardScaler\nsc = StandardScaler()\ntrain=list(X_train)\nX_train = sc.fit_transform(X_train)\nfrom sklearn.externals import joblib\n# Save it\nscaler_file = \"standard_scalar_KNN.pkl\"\njoblib.dump(sc, scaler_file)\nX_test = sc.transform(X_test)\n\n#EXPLORING THE DATASET\nimport seaborn as sn\nsn.countplot(x='classification',data=dataset)\ndataset.classification.value_counts()\nprint(\"------\",dataset.classification.value_counts(),\"----------\")\n# Fitting Decision Tree Classification to the Training set\nfrom sklearn.neighbors import KNeighborsClassifier\nclassifier = KNeighborsClassifier(n_neighbors=3)\nclassifier.fit(X_train, y_train)\n\nfrom sklearn.externals import joblib\nfilename ='KNN_model.pkl'\njoblib.dump(classifier,filename)\n\n\n# Predicting the Test set results\nprint(X_test)\ny_pred = classifier.predict(X_test)\nprint(y_pred)\nprint(y_test)\n\n#ACCURACY SCORE\nfrom sklearn.metrics import accuracy_score\naccuracy_score(y_test,y_pred)\n\n##CONFUSION MATRIX\nfrom sklearn.metrics import classification_report, confusion_matrix\ncm=confusion_matrix(y_test, y_pred)\n\n#Interpretation:\nfrom sklearn.metrics import classification_report\nprint(classification_report(y_test, y_pred))\n\n#ROC\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.metrics import roc_curve\nlogit_roc_auc = roc_auc_score(y_test, classifier.predict(X_test))\nfpr, tpr, thresholds = roc_curve(y_test, classifier.predict_proba(X_test)[:,1])\nplt.figure()\nplt.plot(fpr, tpr, label='Logistic Regression (area = %0.2f)' % logit_roc_auc)\nplt.plot([0, 1], [0, 1],'r--')\nplt.xlim([0.0, 1.0])\nplt.ylim([0.0, 1.05])\nplt.xlabel('False Positive Rate')\nplt.ylabel('True Positive Rate')\nplt.title('Receiver operating characteristic')\nplt.legend(loc=\"lower right\")\nplt.savefig('Log_ROC')\nplt.show()\n\n\n##PREDICTION FOR NEW DATASET\n\nNewdataset = pd.read_csv('newdata.csv')\nsca=StandardScaler()\ntrain=sca.fit_transform(train)\nNewdataset=sca.transform(Newdataset)\nprint(Newdataset)\n\nynew=classifier.predict(Newdataset)\nprint(\"---------\",ynew,\"------------\")\n" ]
[ [ "sklearn.metrics.classification_report", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.plot", "matplotlib.pyplot.figure", "matplotlib.pyplot.savefig", "matplotlib.pyplot.xlim", "matplotlib.pyplot.title", "sklearn.preprocessing.Imputer", "pandas.read_csv", "sklearn.metrics.confusion_matrix", "sklearn.metrics.accuracy_score", "matplotlib.pyplot.ylim", "sklearn.preprocessing.StandardScaler", "sklearn.neighbors.KNeighborsClassifier", "sklearn.model_selection.train_test_split", "matplotlib.pyplot.legend", "matplotlib.pyplot.show", "sklearn.externals.joblib.dump", "matplotlib.pyplot.xlabel" ] ]
iamgreaser/fireball
[ "2c5afb3dc5756a3b26da9045278f7e4a2bc036d2" ]
[ "entity.py" ]
[ "\"\"\"\nCopyright 2011 Ben Russell & contributors. All rights reserved.\n\nRedistribution and use in source and binary forms, with or without modification, are\npermitted provided that the following conditions are met:\n\n 1. Redistributions of source code must retain the above copyright notice, this list of\n conditions and the following disclaimer.\n\n 2. Redistributions in binary form must reproduce the above copyright notice, this list\n of conditions and the following disclaimer in the documentation and/or other materials\n provided with the distribution.\n\nTHIS SOFTWARE IS PROVIDED ''AS IS'' AND ANY EXPRESS OR IMPLIED\nWARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND\nFITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ANY\nCONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\nCONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON\nANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING\nNEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF\nADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nThe views and conclusions contained in the software and documentation are those of the\nauthors and should not be interpreted as representing official policies, either expressed\nor implied, of the contributors.\n\"\"\"\n\nfrom math import *\n\nimport numpy as np\n\nimport pyglet\n\nimport helpers\n\nMOUSE_SENS_X = 0.3\nMOUSE_SENS_Y = 0.3\nPLAYER_SPEED = 3.0*2.0\nOBJECT_GRAVITY = 9.8*2.0\nPLAYER_FRICTION = 0.02\nPLAYER_JUMP_HEIGHT = 10.0\nCOLLISION_TOLERANCE = 0.2\n\nKEY_MOVE_FORWARD_BIT = 0x0001\nKEY_MOVE_BACKWARD_BIT = 0x0002\nKEY_MOVE_LEFT_BIT = 0x0004\nKEY_MOVE_RIGHT_BIT = 0x0008\nKEY_JUMP_BIT = 0x0010\nKEY_CROUCH_BIT = 0x0020\nKEY_CREEP_BIT = 0x0040\nKEY_ZOOM_BIT = 0x0080\n\nclass AbstractEntity(helpers.ArgGenerator):\n\tARGS = []\n\t\n\tdef set_game(self, idx, game):\n\t\tself.idx = idx\n\t\tself.game = game\n\nclass PositionedEntity(AbstractEntity):\n\tARGS = AbstractEntity.ARGS + [\"origin\",\"velocity\",\"orient_x\",\"orient_z\"]\n\nclass PhysicsEntity(PositionedEntity):\n\tARGS = PositionedEntity.ARGS + []\n\tgrounded = False\n\twalkable = False\n\t\n\t# i had to use floor,\n\t# otherwise the player would bounce like mad when it was in the water\n\tdef trace_vector(self, ox,oy,oz, nx,ny,nz, walkable = False):\n\t\t#walkable = False\n\t\t\n\t\t# prep values\n\t\tdx, dy, dz = (n-o for (o,n) in zip((ox,oy,oz),(nx,ny,nz))) # delta\n\t\t\n\t\t(x1,y1,z1), (x2,y2,z2) = self.BBOX\n\t\theight = floor(abs(z2-z1)-0.001)+1\n\t\t\n\t\tx3, y3, z3 = (v1 if d < 0.0 else v2 for (v1,v2,d) in zip(self.BBOX[0], self.BBOX[1], (dx, dy, dz)))\n\t\tx4, y4, z4 = (v2-v1 if d < 0.0 else v1-v2 for (v1,v2,d) in zip(self.BBOX[0], self.BBOX[1], (dx, dy, dz)))\n\t\t\n\t\tz5 = (0.0 if dz < 0.0 else z4)\n\t\t\n\t\tox += x3\n\t\toy += y3\n\t\toz += z3\n\t\t\n\t\tnx += x3\n\t\tny += y3\n\t\tnz += z3\n\t\t\n\t\tsx, sy, sz = (v%1.0 if d < 0.0 else 1.0-(v%1.0) for v,d in zip((ox,oy,oz),(dx,dy,dz))) # sub\n\t\tgx, gy, gz = (-1 if d < 0.0 else 1 for d in (dx, dy, dz)) # direction (\"go\")\n\t\twx, wy, wz = (0.001 if d < 0.0 else 0.999 for d in (dx, dy, dz)) # cell offset when hitting box\n\t\tvx, vy, vz = (max(0.00001,abs(d)) for d in (dx, dy, dz)) # abs velocity\n\t\tcx, cy, cz = (int(floor(v)) for v in (ox, oy, oz)) # cell\n\t\tdcx, dcy, dcz = (abs(int(floor(v))-c) for c,v in zip((cx,cy,cz),(nx,ny,nz))) # cell delta / count\n\t\t\n\t\twalkable = walkable and dz < 0.0\n\t\t\n\t\tdef sfix(sx,sy,sz):\n\t\t\treturn tuple(v if d < 0.0 else 1.0-v for (v,d) in zip((sx,sy,sz),(dx,dy,dz)))\n\t\t\n\t\t# flags to indicate if we've screwed with a value\n\t\tkeep_x = True\n\t\tkeep_y = True\n\t\tkeep_z = True\n\t\t\n\t\tdc = dcx+dcy+dcz\n\t\t\n\t\tfor i in xrange(dc):\n\t\t\t# get our lovely factoriffic stuff\n\t\t\tcalc_x = sx/vx\n\t\t\tcalc_y = sy/vy\n\t\t\tcalc_z = sz/vz\n\t\t\t\n\t\t\ttake_x = calc_x < calc_y and calc_x < calc_z\n\t\t\ttake_y = (not take_x) and calc_y < calc_z\n\t\t\ttake_z = (not take_x) and (not take_y)\n\t\t\t\n\t\t\tif take_x:\n\t\t\t\t# X trace\n\t\t\t\tt = sx/vx\n\t\t\t\tsy -= t*vy\n\t\t\t\tsz -= t*vz\n\t\t\t\t\n\t\t\t\tif keep_x:\n\t\t\t\t\tcx += gx\n\t\t\t\tsx = 1.0\n\t\t\telif take_y:\n\t\t\t\t# Y trace\n\t\t\t\tt = sy/vy\n\t\t\t\tsx -= t*vx\n\t\t\t\tsz -= t*vz\n\t\t\t\t\n\t\t\t\tif keep_y:\n\t\t\t\t\tcy += gy\n\t\t\t\tsy = 1.0\n\t\t\telse:\n\t\t\t\t# Z trace\n\t\t\t\tt = sz/vz\n\t\t\t\tsx -= t*vx\n\t\t\t\tsy -= t*vy\n\t\t\t\t\n\t\t\t\tif keep_z:\n\t\t\t\t\tcz += gz\n\t\t\t\tsz = 1.0\n\t\t\t\n\t\t\t# cell check!\n\t\t\t\n\t\t\tax,ay,az = sfix(sx,sy,sz) # add this to cx,cy,cz\n\t\t\tncx,ncy,ncz = cx+ax,cy+ay,cz+az\n\t\t\tif not keep_x:\n\t\t\t\tncx = nx\n\t\t\tif not keep_y:\n\t\t\t\tncy = ny\n\t\t\tif not keep_z:\n\t\t\t\tncz = nz\n\t\t\t\n\t\t\tif take_x:\n\t\t\t\tfloor_check = not self.game.world.solid_check_box(\n\t\t\t\t\tcx+0.5-gx,ncy,ncz+1,\n\t\t\t\t\tcx+0.5,ncy+y4,ncz+z4+1\n\t\t\t\t\t\t)\n\t\t\t\tchecked_out_as_solid = self.game.world.solid_check_box(\n\t\t\t\t\tcx+0.5-gx,ncy,ncz,\n\t\t\t\t\tcx+0.5,ncy+y4,ncz+z4\n\t\t\t\t\t\t)\n\t\t\telif take_y:\n\t\t\t\tfloor_check = not self.game.world.solid_check_box(\n\t\t\t\t\tncx,cy+0.5-gy,ncz+1,\n\t\t\t\t\tncx+x4,cy+0.5,ncz+z4+1\n\t\t\t\t\t\t)\n\t\t\t\tchecked_out_as_solid = self.game.world.solid_check_box(\n\t\t\t\t\tncx,cy+0.5-gy,ncz,\n\t\t\t\t\tncx+x4,cy+0.5,ncz+z4\n\t\t\t\t\t\t)\n\t\t\telse:\n\t\t\t\tchecked_out_as_solid = self.game.world.solid_check_box(\n\t\t\t\t\tncx,ncy,cz+0.5-gz,\n\t\t\t\t\tncx+x4,ncy+y4,cz+0.5\n\t\t\t\t\t\t)\n\t\t\t\n\t\t\t#if self.game.world.test_if_solid(cx,cy,cz):\n\t\t\tif checked_out_as_solid:\n\t\t\t\tif take_x:\n\t\t\t\t\tif walkable and keep_x and floor_check:\n\t\t\t\t\t\tcz += 1\n\t\t\t\t\t\tonz = nz\n\t\t\t\t\t\tnz = cz+0.001\n\t\t\t\t\t\tself.antijerk_stairs += onz-nz\n\t\t\t\t\t\tkeep_x = False\n\t\t\t\t\telse:\n\t\t\t\t\t\tcx -= gx\n\t\t\t\t\t\t#sx = 0.1\n\t\t\t\t\t\tif keep_x:\n\t\t\t\t\t\t\tnx = cx+wx\n\t\t\t\t\t\t\tself.velocity[0] *= -0.1\n\t\t\t\t\t\t\tkeep_x = False\n\t\t\t\telif take_y:\n\t\t\t\t\tif walkable and keep_y and floor_check:\n\t\t\t\t\t\tcz += 1\n\t\t\t\t\t\tonz = nz\n\t\t\t\t\t\tnz = cz+0.001\n\t\t\t\t\t\tself.antijerk_stairs += onz-nz\n\t\t\t\t\t\tkeep_z = False\n\t\t\t\t\telse:\n\t\t\t\t\t\tcy -= gy\n\t\t\t\t\t\t#sy = 0.1\n\t\t\t\t\t\tif keep_y:\n\t\t\t\t\t\t\tny = cy+wy\n\t\t\t\t\t\t\tself.velocity[1] *= -0.1\n\t\t\t\t\t\t\tkeep_y = False\n\t\t\t\telif take_z:\n\t\t\t\t\tcz -= gz\n\t\t\t\t\t#sz = 0.1\n\t\t\t\t\tif keep_z:\n\t\t\t\t\t\tnz = cz+wz\n\t\t\t\t\t\t\n\t\t\t\t\t\tif gz < 0:\n\t\t\t\t\t\t\tself.grounded = True\n\t\t\t\t\t\tself.velocity[2] *= -0.1\n\t\t\t\t\t\tkeep_z = False\n\t\t\n\t\treturn nx-x3, ny-y3, nz-z3\n\t\n\tdef update(self, dt):\n\t\t# get new position\n\t\tnvec = tuple(self.origin[i] + self.velocity[i]*dt for i in xrange(3))\n\t\t\n\t\t(x1, y1, z1), (x2, y2, z2) = self.BBOX\n\t\t\n\t\tox, oy, oz = self.origin\n\t\tnx, ny, nz = nvec\n\t\t\n\t\t# trace each corner\n\t\t\n\t\t#for vbase in self.BVEC:\n\t\t#\tvx, vy, vz, walkable = vbase\n\t\t#\ttnx, tny, tnz = self.trace_vector(ox+vx, oy+vy, oz+vz, nx+vx, ny+vy, nz+vz, walkable)\n\t\t#\tnx, ny, nz = (v-vo for (v,vo) in zip((tnx,tny,tnz),(vx,vy,vz)))\n\t\t\n\t\tnx, ny, nz = self.trace_vector(ox, oy, oz, nx, ny, nz, self.walkable)\n\t\t\n\t\tfor i,vt in zip(xrange(3), (nx, ny, nz)):\n\t\t\tself.origin[i] = vt\n\nclass PlayerEntity(PhysicsEntity):\n\tARGS = PhysicsEntity.ARGS + [\"name\",\"keys\"]\n\tBBOX_STAND = ((-0.4, -0.4, -2.4),(0.4, 0.4, 0.4))\n\tBBOX_CROUCH = ((-0.4, -0.4, -1.4),(0.4, 0.4, 0.4))\n\t\n\tBBOX = BBOX_STAND\n\t\n\tdef set_game(self, idx, game):\n\t\tself.idx = idx\n\t\tself.game = game\n\t\t\n\t\tself.target_velocity = [0.0, 0.0, 0.0]\n\t\tself.cam_vx = self.cam_vy = 0.0\n\t\tself.antijerk_stairs = 0.0\n\t\tself.crouching = False\n\t\tself.walkable = True\n\t\t\n\t\tif game != None:\n\t\t\t# init\n\t\t\tif self.origin == None:\n\t\t\t\tx = self.game.world.lx//2 + 0.5\n\t\t\t\ty = self.game.world.ly//2 + 0.5\n\t\t\t\tz = self.game.world.lz + 0.5\n\t\t\t\tself.origin = [x,y,z]\n\t\t\t\n\t\t\tif self.orient_x == None:\n\t\t\t\tself.orient_x = 0.0\n\t\t\tif self.orient_z == None:\n\t\t\t\tself.orient_z = 0.0\n\t\t\t\n\t\t\tif self.velocity == None:\n\t\t\t\tself.velocity = [0.0, 0.0, 0.0]\n\t\t\t\n\t\t\tif self.keys == None:\n\t\t\t\tself.keys = 0\n\t\t\t\n\t\t\tif self.name == None:\n\t\t\t\tself.name = \"Griefer\" + repr(self.idx)\n\t\telse:\n\t\t\t# destroy\n\t\t\tpass\n\t\n\tdef set_camera(self):\n\t\tx,y,z = self.origin\n\t\treturn x,y,z+self.antijerk_stairs,self.orient_z,self.orient_x\n\t\n\tdef update(self, dt):\n\t\t#print dt\n\t\tcam_rmatrix = self.get_cam_matrix_noxrot()\n\t\t\n\t\tself.cam_vx = 0.0\n\t\tself.cam_vy = 0.0\n\t\t\n\t\t# fix antijerk\n\t\tself.antijerk_stairs *= exp(-10.0*dt)\n\t\t\n\t\t# deal with key changes\n\t\tif (self.keys & KEY_JUMP_BIT) and self.grounded and not self.crouching:\n\t\t\tself.velocity[2] = PLAYER_JUMP_HEIGHT\n\t\t\tself.grounded = False\n\t\t\n\t\tif (self.keys & KEY_MOVE_LEFT_BIT):\n\t\t\tif not (self.keys & KEY_MOVE_RIGHT_BIT):\n\t\t\t\tself.cam_vx = -1.0\n\t\telif (self.keys & KEY_MOVE_RIGHT_BIT):\n\t\t\tself.cam_vx = 1.0\n\t\t\n\t\tif (self.keys & KEY_MOVE_BACKWARD_BIT):\n\t\t\tif not (self.keys & KEY_MOVE_FORWARD_BIT):\n\t\t\t\tself.cam_vy = -1.0\n\t\telif (self.keys & KEY_MOVE_FORWARD_BIT):\n\t\t\tself.cam_vy = 1.0\n\t\t\n\t\tbvx = self.cam_vx*PLAYER_SPEED\n\t\tbvy = -self.cam_vy*PLAYER_SPEED\n\t\t\n\t\tif bool(self.keys & KEY_CROUCH_BIT) != self.crouching:\n\t\t\tif self.crouching:\n\t\t\t\t# uncrouch check\n\t\t\t\t(x1,y1,z1),(x2,y2,z2) = self.BBOX_STAND\n\t\t\t\tx,y,z = self.origin\n\t\t\t\t\n\t\t\t\tif not self.game.world.solid_check_box(x1+x,y1+y,z1+z+2,x2+x,y2+y,z2+z+0.1+1):\n\t\t\t\t\tself.origin[2] += 1.0\n\t\t\t\t\tself.BBOX = self.BBOX_STAND\n\t\t\t\t\tself.antijerk_stairs -= 1.0\n\t\t\t\t\tself.crouching = False\n\t\t\t\t\tself.walkable = True \n\t\t\telse:\n\t\t\t\t# crouch - no check needed\n\t\t\t\tself.origin[2] -= 1.0\n\t\t\t\tself.BBOX = self.BBOX_CROUCH\n\t\t\t\tself.antijerk_stairs += 1.0\n\t\t\t\tself.crouching = True\n\t\t\t\tself.walkable = False\n\t\t\n\t\tif (self.keys & KEY_CREEP_BIT) or self.crouching:\n\t\t\tbvx *= 0.5\n\t\t\tbvy *= 0.5\n\t\t\n\t\tq = (np.asmatrix([bvx,bvy,0.0])*cam_rmatrix)\n\t\t#for i in xrange(3):\n\t\t#\tself.velocity[i] *= (1.0-PLAYER_FRICTION*dt)\n\t\t\n\t\tself.target_velocity[0] = q[0,0]\n\t\tself.target_velocity[1] = q[0,1]\n\t\tself.target_velocity[2] = q[0,2]\n\t\t\n\t\tfor i in [0,1]: # don't do this with Z.\n\t\t#for i in [0,1,2]: # ok, maybe as a temp measure\n\t\t\t# TODO: get the math behind this right\n\t\t\tself.velocity[i] += (self.target_velocity[i] - self.velocity[i])*(1.0 - exp(-dt*5.0))\n\t\t\n\t\tself.velocity[2] -= OBJECT_GRAVITY*dt\n\t\t\n\t\tPhysicsEntity.update(self, dt)\n\t\n\t\n\tdef get_cam_matrix_noxrot(self):\n\t\tsrz,crz = sin(self.orient_z*pi/180.0),cos(self.orient_z*pi/180.0)\n\t\t\n\t\tcam_rmatrix = np.asmatrix(np.identity(3))\n\t\t\n\t\tcam_rmatrix *= np.asmatrix([\n\t\t\t[crz,srz,0.0],\n\t\t\t[-srz,crz,0.0],\n\t\t\t[0.0,0.0,1.0],\n\t\t])\n\t\t\n\t\treturn cam_rmatrix\n\t\n\tdef get_cam_matrix(self):\n\t\tsrx,crx = sin(self.orient_x*pi/180.0),cos(self.orient_x*pi/180.0)\n\t\tsrz,crz = sin(self.orient_z*pi/180.0),cos(self.orient_z*pi/180.0)\n\t\t\n\t\tcam_rmatrix = np.asmatrix(np.identity(3))\n\t\t\n\t\tcam_rmatrix *= np.asmatrix([\n\t\t\t[1.0,0.0,0.0],\n\t\t\t[0.0,crx,srx],\n\t\t\t[0.0,srx,-crx],\n\t\t])\n\t\t\n\t\tcam_rmatrix *= np.asmatrix([\n\t\t\t[crz,srz,0.0],\n\t\t\t[-srz,crz,0.0],\n\t\t\t[0.0,0.0,1.0],\n\t\t])\n\t\t\n\t\treturn cam_rmatrix\n\t\n\tdef on_mouse_motion(self, x, y, dx, dy):\n\t\tself.orient_z += dx*MOUSE_SENS_X\n\t\tself.orient_x -= dy*MOUSE_SENS_Y\n\t\n\tdef on_key_press(self, key, mod):\n\t\tif key == pyglet.window.key.W:\n\t\t\tself.keys |= KEY_MOVE_FORWARD_BIT\n\t\telif key == pyglet.window.key.S:\n\t\t\tself.keys |= KEY_MOVE_BACKWARD_BIT\n\t\telif key == pyglet.window.key.A:\n\t\t\tself.keys |= KEY_MOVE_LEFT_BIT\n\t\telif key == pyglet.window.key.D:\n\t\t\tself.keys |= KEY_MOVE_RIGHT_BIT\n\t\telif key == pyglet.window.key.SPACE:\n\t\t\tself.keys |= KEY_JUMP_BIT\n\t\telif key == pyglet.window.key.LCTRL:\n\t\t\tself.keys |= KEY_CROUCH_BIT\n\t\telif key == pyglet.window.key.LSHIFT:\n\t\t\tself.keys |= KEY_CREEP_BIT\n\t\n\tdef on_key_release(self, key, mod):\n\t\tif key == pyglet.window.key.W:\n\t\t\tself.keys &= ~KEY_MOVE_FORWARD_BIT\n\t\telif key == pyglet.window.key.S:\n\t\t\tself.keys &= ~KEY_MOVE_BACKWARD_BIT\n\t\telif key == pyglet.window.key.A:\n\t\t\tself.keys &= ~KEY_MOVE_LEFT_BIT\n\t\telif key == pyglet.window.key.D:\n\t\t\tself.keys &= ~KEY_MOVE_RIGHT_BIT\n\t\telif key == pyglet.window.key.SPACE:\n\t\t\tself.keys &= ~KEY_JUMP_BIT\n\t\telif key == pyglet.window.key.LCTRL:\n\t\t\tself.keys &= ~KEY_CROUCH_BIT\n\t\telif key == pyglet.window.key.LSHIFT:\n\t\t\tself.keys &= ~KEY_CREEP_BIT\n" ]
[ [ "numpy.asmatrix", "numpy.identity" ] ]
baheytharwat/tinygrad
[ "acf652c3c524ee3214e9ce58d41113738cb833ae" ]
[ "test/test_ops.py" ]
[ "import os\nimport torch\nimport numpy as np\nimport unittest\nimport timeit\nimport functools\nfrom tinygrad.tensor import Tensor, DEFAULT_DEVICE, Device\n\ndef helper_test_op(shps, torch_fxn, tinygrad_fxn, atol=1e-6, rtol=1e-3, grad_atol=1e-6, grad_rtol=1e-3, forward_only=False, vals=None, a=-0.5, b=20):\n torch.manual_seed(0)\n if shps is None:\n ts = [torch.tensor(x, requires_grad=True) for x in vals]\n else:\n ts = [torch.tensor((np.random.random(size=x).astype(np.float32)+a)*b, requires_grad=True) for x in shps]\n\n tst = [Tensor(x.detach().numpy()) for x in ts]\n out = torch_fxn(*ts)\n ret = tinygrad_fxn(*tst)\n\n np.testing.assert_allclose(ret.cpu().data, out.detach().numpy(), atol=atol, rtol=rtol)\n\n if not forward_only:\n out.mean().backward()\n ret.mean().backward()\n\n for t, tt in zip(ts, tst):\n np.testing.assert_allclose(t.grad, tt.cpu().grad.data, atol=grad_atol, rtol=grad_rtol)\n\n # speed\n torch_fp = timeit.Timer(functools.partial(torch_fxn, *ts)).timeit(5) * 1000/5\n tinygrad_fp = timeit.Timer(functools.partial(tinygrad_fxn, *tst)).timeit(5) * 1000/5\n\n if not forward_only:\n torch_fbp = timeit.Timer(functools.partial(lambda f,x: f(*x).mean().backward(), torch_fxn, ts)).timeit(5) * 1000/5\n tinygrad_fbp = timeit.Timer(functools.partial(lambda f,x: f(*x).mean().backward(), tinygrad_fxn, tst)).timeit(5) * 1000/5\n else:\n torch_fbp, tinygrad_fbp = np.nan, np.nan\n\n print(\"testing %30r torch/tinygrad fp: %.2f / %.2f ms bp: %.2f / %.2f ms\" % (shps, torch_fp, tinygrad_fp, torch_fbp-torch_fp, tinygrad_fbp-tinygrad_fp))\n\nclass TestOps(unittest.TestCase):\n\n def test_add(self):\n helper_test_op([(45,65), (45,65)], lambda x,y: x+y, Tensor.add)\n def test_sub(self):\n helper_test_op([(45,65), (45,65)], lambda x,y: x-y, Tensor.sub)\n def test_mul(self):\n helper_test_op([(45,65), (45,65)], lambda x,y: x*y, Tensor.mul)\n def test_div(self):\n helper_test_op([(45,65), (45,65)], lambda x,y: x/y, Tensor.div)\n def test_pow(self):\n helper_test_op([(45,65), (45,65)], lambda x,y: x**y, Tensor.pow, a=0)\n def test_sqrt(self):\n helper_test_op([(45,65)], lambda x: x.sqrt(), Tensor.sqrt, a=0)\n def test_relu(self):\n helper_test_op([(45,65)], lambda x: x.relu(), Tensor.relu)\n def test_leakyrelu(self):\n helper_test_op([(45,65)], lambda x: torch.nn.functional.leaky_relu(x,0.01), Tensor.leakyrelu)\n def test_abs(self):\n helper_test_op([(45,65)], lambda x: torch.abs(x), Tensor.abs)\n def test_log(self):\n helper_test_op([(45,65)], lambda x: torch.log(x), Tensor.log)\n def test_exp(self):\n helper_test_op([(45,65)], lambda x: torch.exp(x), Tensor.exp)\n def test_sign(self):\n helper_test_op([(45,65)], lambda x: torch.sign(x), Tensor.sign)\n def test_sigmoid(self):\n helper_test_op([(45,65)], lambda x: x.sigmoid(), Tensor.sigmoid)\n def test_softplus(self):\n helper_test_op([(45,65)], lambda x: torch.nn.functional.softplus(x), Tensor.softplus, atol=1e-6, grad_atol=1e-6)\n def test_relu6(self):\n helper_test_op([(45,65)], lambda x: torch.nn.functional.relu6(x), Tensor.relu6)\n def test_hardswish(self):\n helper_test_op([(45,65)], lambda x: torch.nn.functional.hardswish(x), Tensor.hardswish, atol=1e-6, grad_atol=1e-6)\n def test_mish(self):\n def _mish_pytorch(x):\n return x*torch.tanh(torch.nn.functional.softplus(x))\n helper_test_op([(45,65)], _mish_pytorch, Tensor.mish, atol=1e-4)\n def test_dot(self):\n helper_test_op([(45,65), (65,100)], lambda x,y: x.matmul(y), Tensor.dot, atol=1e-4)\n def test_multidot(self):\n helper_test_op([(10,45,65), (10,65,45)], lambda x,y: x @ y, Tensor.dot, atol=1e-4)\n helper_test_op([(3,3,45,65), (3,3,65,45)], lambda x,y: x @ y, Tensor.dot, atol=1e-4)\n def test_sum(self):\n helper_test_op([(45,3)], lambda x: x.sum(), Tensor.sum)\n helper_test_op([(3,4,5,6)], lambda x: x.sum(axis=(1,2)), lambda x: Tensor.sum(x, axis=(1,2)))\n helper_test_op([(3,4,5,6)], lambda x: x.sum(axis=1), lambda x: Tensor.sum(x, axis=1))\n def test_max(self):\n helper_test_op([(45,3)], lambda x: x.max(), Tensor.max)\n helper_test_op([(45,3)], lambda x: x.max().mul(0.5), lambda x: Tensor.max(x).mul(0.5))\n helper_test_op(None, lambda x: x.max().mul(0.5), lambda x: Tensor.max(x).mul(0.5),\n vals=[\n [[1.0,1.0,0.0,1.0]],\n ])\n helper_test_op([(3,4,5,6)], lambda x: x.max(axis=1)[0], lambda x: Tensor.max(x, axis=1))\n def test_mean_axis(self):\n helper_test_op([(3,4,5,6)], lambda x: x.mean(axis=(1,2)), lambda x: Tensor.mean(x, axis=(1,2)))\n def test_logsoftmax(self):\n helper_test_op([(45,65)], lambda x: torch.nn.LogSoftmax(dim=1)(x), Tensor.logsoftmax, atol=1e-7, grad_atol=1e-7)\n def test_tanh(self):\n helper_test_op([(45,65)], lambda x: x.tanh(), Tensor.tanh, atol=1e-6, grad_atol=1e-6)\n def test_topo_sort(self):\n helper_test_op([(45,65)], lambda x: (x+x)*x, lambda x: x.add(x).mul(x), atol=1e-6, grad_atol=1e-6)\n\n def test_scalar_mul(self):\n helper_test_op([(45,65)], lambda x: x*2, lambda x: x*2)\n def test_scalar_rmul(self):\n helper_test_op([(45,65)], lambda x: 2*x, lambda x: 2*x)\n\n def test_scalar_sub(self):\n helper_test_op([(45,65)], lambda x: x-2, lambda x: x-2)\n def test_scalar_rsub(self):\n helper_test_op([(45,65)], lambda x: 2-x, lambda x: 2-x)\n\n def test_broadcast_full(self):\n for torch_op, tinygrad_op in [(torch.add, Tensor.add), (torch.sub, Tensor.sub), (torch.mul, Tensor.mul),\n (torch.div, Tensor.div), (torch.pow, Tensor.pow)]:\n for shapes in [((5,13,24,16), (5,1,24,1)), ((1,3,1,7,1), (2,1,5,1,8))]:\n with self.subTest(op=torch_op.__name__, shapes=shapes):\n helper_test_op(shapes, torch_op, tinygrad_op, a=-0.5 if tinygrad_op != Tensor.pow else 0.0)\n\n\n def test_broadcast_partial(self):\n for torch_op, tinygrad_op in [(torch.add, Tensor.add), (torch.sub, Tensor.sub), (torch.mul, Tensor.mul),\n (torch.div, Tensor.div), (torch.pow, Tensor.pow)]:\n for shapes in [((1,32,32,32), (1,32,1,1)), ((5,13,24,16,2), (1,13,24,1,1)),\n ((4,1), (4,5)), ((1,4), (5,4))]:\n with self.subTest(op=torch_op.__name__, shapes=shapes):\n # NOTE: ANE backwards?\n helper_test_op(shapes, torch_op, tinygrad_op, a=-0.5 if tinygrad_op != Tensor.pow else 0.0)\n\n def test_slice(self):\n helper_test_op([(3,3,3,3)], lambda x: x[1:2], lambda x: x[1:2])\n helper_test_op([(3,3,3,3)], lambda x: x[1:2, 1:2], lambda x: x[1:2, 1:2])\n helper_test_op([(3,3,3,3)], lambda x: x[1:2, 1:2, 0:-1], lambda x: x[1:2, 1:2, 0:-1])\n\n def test_pad2d(self):\n helper_test_op([(3,3,3,3)], lambda x: torch.nn.functional.pad(x, (1,2,3,4)), lambda x: x.pad2d(padding=(1,2,3,4)))\n\n def test_transpose(self):\n helper_test_op([(3,3,3)], lambda x: x.transpose(1,2), lambda x: x.transpose(order=(0,2,1)))\n # This is failing on GPU because the dim is too large\n #helper_test_op([(21,22,23,24)], lambda x: x.movedim((3,0,2,1),(0,1,2,3)), lambda x: x.transpose(order=(3,0,2,1)))\n helper_test_op([(3,4,5,6)], lambda x: x.movedim((3,2,1,0),(0,1,2,3)), lambda x: x.transpose(order=(3,2,1,0)))\n\n def test_reshape(self):\n helper_test_op([(4,3,6,6)], lambda x: torch.reshape(x, (-1,3,6,6)), lambda x: x.reshape(shape=(-1,3,6,6)))\n helper_test_op([(4,3,6,6)], lambda x: torch.reshape(x, (-1,1,6,6)), lambda x: x.reshape(shape=(-1,1,6,6)))\n\n def test_detach(self):\n helper_test_op([(4,3,6,6)], lambda x: x.detach(), lambda x: x.detach(), forward_only=True)\n\n def test_conv2d(self):\n for bs in [1,8]:\n for cin in [1,3]:\n for groups in [1,3] if cin == 3 else [1]:\n for H in [1,2,5]:\n for W in [1,2,3,5]:\n with self.subTest(batch_size=bs, channels=cin, groups=groups, height=H, width=W):\n helper_test_op([(bs,cin,11,28), (6,cin//groups,H,W)],\n lambda x,w: torch.nn.functional.conv2d(x,w,groups=groups).relu(),\n lambda x,w: Tensor.conv2d(x,w,groups=groups).relu(), atol=1e-4, grad_rtol=1e-5)\n\n def test_strided_conv2d(self):\n bs = 4\n cin = 3\n H,W = 3,3\n with self.subTest(stride := 2):\n helper_test_op([(bs,cin,11,28), (4,cin,H,W)],\n lambda x,w: torch.nn.functional.conv2d(x,w,stride=2).relu(),\n lambda x,w: Tensor.conv2d(x,w,stride=stride).relu(), atol=1e-4)\n with self.subTest(stride := (2,1)):\n helper_test_op([(bs,cin,11,28), (4,cin,H,W)],\n lambda x,w: torch.nn.functional.conv2d(x,w,stride=stride).relu(),\n lambda x,w: Tensor.conv2d(x,w,stride=(2,1)).relu(), atol=1e-4)\n\n def test_maxpool2d(self):\n for ksz in [(2,2), (3,3), (3,2), (5,5), (5,1)]:\n with self.subTest(kernel_size=ksz):\n helper_test_op([(32,2,110,28)],\n lambda x: torch.nn.functional.max_pool2d(x, kernel_size=ksz),\n # TODO: why is this tolerance so high?\n lambda x: Tensor.max_pool2d(x, kernel_size=ksz), grad_atol=1e-4)\n\n def test_avgpool2d(self):\n shape = (32,2,111,28)\n for ksz in [(2,2), (3,3), (3,2), (5,5), (5,1), shape[2:]]:\n with self.subTest(kernel_size=ksz):\n helper_test_op([shape],\n lambda x: torch.nn.functional.avg_pool2d(x, kernel_size=ksz),\n lambda x: Tensor.avg_pool2d(x, kernel_size=ksz), rtol=1e-5)\n\n\n def test_upsample2d_nearest(self):\n for sf in [1, 2, 3, 4, 5]:\n with self.subTest(scale_factor=sf):\n helper_test_op([(32,2,110,28)],\n lambda x: torch.nn.functional.interpolate(x, scale_factor=sf, mode='nearest'),\n lambda x: Tensor.upsample_nearest2d(x, scale_factor=sf), forward_only=True)\n\nif __name__ == '__main__':\n unittest.main(verbosity=2)\n" ]
[ [ "torch.nn.functional.relu6", "torch.nn.functional.hardswish", "torch.nn.functional.avg_pool2d", "torch.nn.functional.max_pool2d", "torch.nn.functional.conv2d", "torch.manual_seed", "torch.nn.functional.pad", "torch.tensor", "torch.nn.LogSoftmax", "torch.exp", "numpy.random.random", "torch.sign", "torch.log", "torch.nn.functional.softplus", "torch.abs", "torch.reshape", "torch.nn.functional.leaky_relu", "torch.nn.functional.interpolate" ] ]
onlyrico/contextualized-topic-models
[ "ac338eab6601cd34475d490ae8072fecb73bb0c2" ]
[ "contextualized_topic_models/evaluation/measures.py" ]
[ "from gensim.corpora.dictionary import Dictionary\nfrom gensim.models.coherencemodel import CoherenceModel\nfrom gensim.models import KeyedVectors\nimport gensim.downloader as api\nfrom scipy.spatial.distance import cosine\nimport abc\n\nfrom contextualized_topic_models.evaluation.rbo import rbo\nimport numpy as np\nimport itertools\n\n\nclass Measure:\n def __init__(self):\n pass\n\n def score(self):\n pass\n\n\nclass TopicDiversity(Measure):\n def __init__(self, topics):\n super().__init__()\n self.topics = topics\n\n def score(self, topk=25):\n \"\"\"\n :param topk: topk words on which the topic diversity will be computed\n :return:\n \"\"\"\n if topk > len(self.topics[0]):\n raise Exception('Words in topics are less than topk')\n else:\n unique_words = set()\n for t in self.topics:\n unique_words = unique_words.union(set(t[:topk]))\n td = len(unique_words) / (topk * len(self.topics))\n return td\n\n\nclass Coherence(abc.ABC):\n \"\"\"\n :param topics: a list of lists of the top-k words\n :param texts: (list of lists of strings) represents the corpus on which the empirical frequencies of words are computed\n \"\"\"\n def __init__(self, topics, texts):\n self.topics = topics\n self.texts = texts\n self.dictionary = Dictionary(self.texts)\n\n @abc.abstractmethod\n def score(self):\n pass\n\n\nclass CoherenceNPMI(Coherence):\n def __init__(self, topics, texts):\n super().__init__(topics, texts)\n\n def score(self, topk=10, per_topic=False):\n \"\"\"\n :param topk: how many most likely words to consider in the evaluation\n :param per_topic: if True, returns the coherence value for each topic (default: False)\n :return: NPMI coherence\n \"\"\"\n if topk > len(self.topics[0]):\n raise Exception('Words in topics are less than topk')\n else:\n npmi = CoherenceModel(topics=self.topics, texts=self.texts, dictionary=self.dictionary,\n coherence='c_npmi', topn=topk)\n if per_topic:\n return npmi.get_coherence_per_topic()\n else:\n return npmi.get_coherence()\n\nclass CoherenceUMASS(Coherence):\n def __init__(self, topics, texts):\n super().__init__(topics, texts)\n\n def score(self, topk=10, per_topic=False):\n \"\"\"\n :param topk: how many most likely words to consider in the evaluation\n :param per_topic: if True, returns the coherence value for each topic (default: False)\n :return: UMass coherence\n \"\"\"\n if topk > len(self.topics[0]):\n raise Exception('Words in topics are less than topk')\n else:\n umass = CoherenceModel(topics=self.topics, texts=self.texts, dictionary=self.dictionary,\n coherence='u_mass', topn=topk)\n if per_topic:\n return umass.get_coherence_per_topic()\n else:\n return umass.get_coherence()\n\nclass CoherenceUCI(Coherence):\n def __init__(self, topics, texts):\n super().__init__(topics, texts)\n\n def score(self, topk=10, per_topic=False):\n \"\"\"\n :param topk: how many most likely words to consider in the evaluation\n :param per_topic: if True, returns the coherence value for each topic (default: False)\n :return: UCI coherence\n \"\"\"\n if topk > len(self.topics[0]):\n raise Exception('Words in topics are less than topk')\n else:\n uci = CoherenceModel(topics=self.topics, texts=self.texts, dictionary=self.dictionary,\n coherence='c_uci', topn=topk)\n if per_topic:\n return uci.get_coherence_per_topic()\n else:\n return uci.get_coherence()\n\nclass CoherenceCV(Coherence):\n def __init__(self, topics, texts):\n super().__init__(topics, texts)\n\n def score(self, topk=10, per_topic=False):\n \"\"\"\n :param topk: how many most likely words to consider in the evaluation\n :param per_topic: if True, returns the coherence value for each topic (default: False)\n :return: C_V coherence\n \"\"\"\n if topk > len(self.topics[0]):\n raise Exception('Words in topics are less than topk')\n else:\n cv = CoherenceModel(topics=self.topics, texts=self.texts, dictionary=self.dictionary,\n coherence='c_v', topn=topk)\n if per_topic:\n return cv.get_coherence_per_topic()\n else:\n return cv.get_coherence()\n\n\nclass CoherenceWordEmbeddings(Measure):\n def __init__(self, topics, word2vec_path=None, binary=False):\n \"\"\"\n :param topics: a list of lists of the top-n most likely words\n :param word2vec_path: if word2vec_file is specified, it retrieves the word embeddings file (in word2vec format) to\n compute similarities between words, otherwise 'word2vec-google-news-300' is downloaded\n :param binary: if the word2vec file is binary\n \"\"\"\n super().__init__()\n self.topics = topics\n self.binary = binary\n if word2vec_path is None:\n self.wv = api.load('word2vec-google-news-300')\n else:\n self.wv = KeyedVectors.load_word2vec_format(word2vec_path, binary=binary)\n\n def score(self, topk=10, binary= False):\n \"\"\"\n :param topk: how many most likely words to consider in the evaluation\n :return: topic coherence computed on the word embeddings similarities\n \"\"\"\n if topk > len(self.topics[0]):\n raise Exception('Words in topics are less than topk')\n else:\n arrays = []\n for index, topic in enumerate(self.topics):\n if len(topic) > 0:\n local_simi = []\n for word1, word2 in itertools.combinations(topic[0:topk], 2):\n if word1 in self.wv.vocab and word2 in self.wv.vocab:\n local_simi.append(self.wv.similarity(word1, word2))\n arrays.append(np.mean(local_simi))\n return np.mean(arrays)\n\n\nclass InvertedRBO(Measure):\n def __init__(self, topics):\n \"\"\"\n :param topics: a list of lists of words\n \"\"\"\n super().__init__()\n self.topics = topics\n\n def score(self, topk = 10, weight=0.9):\n \"\"\"\n :param weight: p (float), default 1.0: Weight of each agreement at depth d:\n p**(d-1). When set to 1.0, there is no weight, the rbo returns to average overlap.\n :return: rank_biased_overlap over the topics\n \"\"\"\n if topk > len(self.topics[0]):\n raise Exception('Words in topics are less than topk')\n else:\n collect = []\n for list1, list2 in itertools.combinations(self.topics, 2):\n rbo_val = rbo.rbo(list1[:topk], list2[:topk], p=weight)[2]\n collect.append(rbo_val)\n return 1 - np.mean(collect)\n\n\nclass Matches(Measure):\n def __init__(self, doc_distribution_original_language, doc_distribution_unseen_language):\n \"\"\"\n :param doc_distribution_original_language: numpy array of the topical distribution of\n the documents in the original language (dim: num docs x num topics)\n :param doc_distribution_unseen_language: numpy array of the topical distribution of the\n documents in an unseen language (dim: num docs x num topics)\n \"\"\"\n super().__init__()\n self.orig_lang_docs = doc_distribution_original_language\n self.unseen_lang_docs = doc_distribution_unseen_language\n if len(self.orig_lang_docs) != len(self.unseen_lang_docs):\n raise Exception('Distributions of the comparable documents must have the same length')\n\n def score(self):\n \"\"\"\n :return: proportion of matches between the predicted topic in the original language and\n the predicted topic in the unseen language of the document distributions\n \"\"\"\n matches = 0\n for d1, d2 in zip(self.orig_lang_docs, self.unseen_lang_docs):\n if np.argmax(d1) == np.argmax(d2):\n matches = matches + 1\n return matches/len(self.unseen_lang_docs)\n\n\nclass KLDivergence(Measure):\n def __init__(self, doc_distribution_original_language, doc_distribution_unseen_language):\n \"\"\"\n :param doc_distribution_original_language: numpy array of the topical distribution of\n the documents in the original language (dim: num docs x num topics)\n :param doc_distribution_unseen_language: numpy array of the topical distribution of the\n documents in an unseen language (dim: num docs x num topics)\n \"\"\"\n super().__init__()\n self.orig_lang_docs = doc_distribution_original_language\n self.unseen_lang_docs = doc_distribution_unseen_language\n if len(self.orig_lang_docs) != len(self.unseen_lang_docs):\n raise Exception('Distributions of the comparable documents must have the same length')\n\n def score(self):\n \"\"\"\n :return: average kullback leibler divergence between the distributions\n \"\"\"\n kl_mean = 0\n for d1, d2 in zip(self.orig_lang_docs, self.unseen_lang_docs):\n kl_mean = kl_mean + kl_div(d1, d2)\n return kl_mean/len(self.unseen_lang_docs)\n\n\ndef kl_div(a, b):\n a = np.asarray(a, dtype=np.float)\n b = np.asarray(b, dtype=np.float)\n return np.sum(np.where(a != 0, a * np.log(a / b), 0))\n\n\nclass CentroidDistance(Measure):\n def __init__(self, doc_distribution_original_language, doc_distribution_unseen_language, topics, word2vec_path=None,\n binary=True, topk=10):\n \"\"\"\n :param doc_distribution_original_language: numpy array of the topical distribution of the\n documents in the original language (dim: num docs x num topics)\n :param doc_distribution_unseen_language: numpy array of the topical distribution of the\n documents in an unseen language (dim: num docs x num topics)\n :param topics: a list of lists of the top-n most likely words\n :param word2vec_path: if word2vec_file is specified, it retrieves the word embeddings\n file (in word2vec format) to compute similarities between words, otherwise\n 'word2vec-google-news-300' is downloaded\n :param binary: if the word2vec file is binary\n :param topk: max number of topical words\n \"\"\"\n super().__init__()\n self.topics = [t[:topk] for t in topics]\n self.orig_lang_docs = doc_distribution_original_language\n self.unseen_lang_docs = doc_distribution_unseen_language\n if len(self.orig_lang_docs) != len(self.unseen_lang_docs):\n raise Exception('Distributions of the comparable documents must have the same length')\n\n if word2vec_path is None:\n self.wv = api.load('word2vec-google-news-300')\n else:\n self.wv = KeyedVectors.load_word2vec_format(word2vec_path, binary=binary)\n\n def score(self):\n \"\"\"\n :return: average centroid distance between the words of the most likely topic of the\n document distributions\n \"\"\"\n cd = 0\n for d1, d2 in zip(self.orig_lang_docs, self.unseen_lang_docs):\n top_words_orig = self.topics[np.argmax(d1)]\n top_words_unseen = self.topics[np.argmax(d2)]\n\n centroid_lang = self.get_centroid(top_words_orig)\n centroid_en = self.get_centroid(top_words_unseen)\n\n cd += (1 - cosine(centroid_lang, centroid_en))\n return cd/len(self.unseen_lang_docs)\n\n def get_centroid(self, word_list):\n vector_list = []\n for word in word_list:\n if word in self.wv.vocab:\n vector_list.append(self.wv.get_vector(word))\n vec = sum(vector_list)\n return vec / np.linalg.norm(vec)\n\n" ]
[ [ "numpy.asarray", "numpy.argmax", "numpy.log", "numpy.linalg.norm", "scipy.spatial.distance.cosine", "numpy.mean" ] ]
drivergroup/beliefs
[ "7e0b2a02d719f5b1c889d72ac1e9421971cc120b" ]
[ "beliefs/factors/discrete_factor.py" ]
[ "\"\"\"\nThe MIT License (MIT)\n\nCopyright (c) 2013-2017 pgmpy\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of\nthis software and associated documentation files (the \"Software\"), to deal in\nthe Software without restriction, including without limitation the rights to\nuse, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of\nthe Software, and to permit persons to whom the Software is furnished to do so,\nsubject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS\nFOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\nCOPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER\nIN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\nCONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\n\nimport copy\nimport numpy as np\n\n\nclass DiscreteFactor:\n\n def __init__(self, variables, cardinality, values=None, state_names=None):\n \"\"\"\n Args\n variables: list,\n variables in the scope of the factor\n cardinality: list,\n cardinalities of each variable, where len(cardinality)=len(variables)\n values: list,\n row vector of values of variables with ordering such that right-most variables\n defined in `variables` cycle through their values the fastest\n state_names: dictionary,\n mapping variables to their states, of format {label_name: ['state1', 'state2']}\n \"\"\"\n self.variables = list(variables)\n self.cardinality = list(cardinality)\n if values is None:\n self._values = None\n else:\n self._values = np.array(values).reshape(self.cardinality)\n self.state_names = state_names\n\n def __mul__(self, other):\n return self.product(other)\n\n def copy(self):\n \"\"\"Return a copy of the factor\"\"\"\n return self.__class__(self.variables,\n self.cardinality,\n self._values,\n copy.deepcopy(self.state_names))\n\n @property\n def values(self):\n return self._values\n\n def update_values(self, new_values):\n \"\"\"We make this available because _values is allowed to be None on init\"\"\"\n self._values = np.array(new_values).reshape(self.cardinality)\n\n def get_value_for_state_vector(self, dict_of_states):\n \"\"\"\n Return the value for a dictionary of variable states.\n\n Args\n dict_of_states: dictionary,\n of format {label_name1: 'state1', label_name2: 'True'}\n Returns\n probability, a float, the factor value for a specific combination of variable states\n \"\"\"\n assert sorted(dict_of_states.keys()) == sorted(self.variables), \\\n \"The keys for the dictionary of states must match the variables in factor scope.\"\n state_coordinates = []\n for var in self.variables:\n var_state = dict_of_states[var]\n idx_in_var_axis = self.state_names[var].index(var_state)\n state_coordinates.append(idx_in_var_axis)\n return self.values[tuple(state_coordinates)]\n\n def add_new_variables_from_other_factor(self, other):\n \"\"\"Add new variables from `other` factor to the factor.\"\"\"\n extra_vars = set(other.variables) - set(self.variables)\n # if all of these variables already exist there is nothing to do\n if len(extra_vars) == 0:\n return\n # otherwise, extend the values array\n slice_ = [slice(None)] * len(self.variables)\n slice_.extend([np.newaxis] * len(extra_vars))\n self._values = self._values[slice_]\n self.variables.extend(extra_vars)\n\n new_card_var = other.get_cardinality(extra_vars)\n self.cardinality.extend([new_card_var[var] for var in extra_vars])\n\n def get_cardinality(self, variables):\n return {var: self.cardinality[self.variables.index(var)] for var in variables}\n\n def product(self, other):\n left = self.copy()\n\n if isinstance(other, (int, float)):\n return self.values * other\n else:\n assert isinstance(other, DiscreteFactor), \\\n \"__mul__ is only defined between subclasses of DiscreteFactor\"\n right = other.copy()\n left.add_new_variables_from_other_factor(right)\n right.add_new_variables_from_other_factor(left)\n\n # reorder variables in right factor to match order in left\n source_axes = list(range(right.values.ndim))\n destination_axes = [right.variables.index(var) for var in left.variables]\n right.variables = [right.variables[idx] for idx in destination_axes]\n\n # rearrange values in right factor to correspond to the reordered variables\n right._values = np.moveaxis(right.values, source_axes, destination_axes)\n left._values = left.values * right.values\n return left\n\n def marginalize(self, vars):\n \"\"\"\n Args\n vars: list,\n variables over which to marginalize the factor\n Returns\n DiscreteFactor, whose scope is set(self.variables) - set(vars)\n \"\"\"\n phi = copy.deepcopy(self)\n\n var_indexes = []\n for var in vars:\n if var not in phi.variables:\n raise ValueError('{} not in scope'.format(var))\n else:\n var_indexes.append(self.variables.index(var))\n\n index_to_keep = sorted(set(range(len(self.variables))) - set(var_indexes))\n phi.variables = [self.variables[index] for index in index_to_keep]\n phi.cardinality = [self.cardinality[index] for index in index_to_keep]\n phi._values = np.sum(phi.values, axis=tuple(var_indexes))\n return phi\n" ]
[ [ "numpy.array", "numpy.moveaxis" ] ]
LinZichuan/AdMRL
[ "50a22d4d480e99125cc91cc65dfcc0df4a883ac6" ]
[ "main.py" ]
[ "import sys\nsys.path = ['./rllab/'] + sys.path\nprint (sys.path)\nimport pickle\nimport os,time\nfrom collections import deque\nimport tensorflow as tf\nimport numpy as np\nimport lunzi.nn as nn\nfrom lunzi.Logger import logger\nfrom slbo.utils.average_meter import AverageMeter\nfrom slbo.utils.flags import FLAGS\nfrom slbo.utils.dataset import Dataset, gen_dtype\nfrom slbo.utils.OU_noise import OUNoise\nfrom slbo.utils.normalizer import Normalizers\nfrom slbo.utils.tf_utils import get_tf_config\nfrom slbo.utils.runner import Runner\nfrom slbo.policies.gaussian_mlp_policy import GaussianMLPPolicy\nfrom slbo.envs.virtual_env import VirtualEnv\nfrom slbo.dynamics_model import DynamicsModel\nfrom slbo.v_function.mlp_v_function import MLPVFunction\nfrom slbo.partial_envs import make_env, make_task\nfrom slbo.loss.multi_step_loss import MultiStepLoss\nfrom slbo.algos.TRPO import TRPO\nfrom slbo.algos.ADVTASK import ADVTASK\nfrom slbo.utils.tf_utils import initialize_uninitialized\nimport click\nfrom gym.wrappers.monitor import Monitor\nimport gym\nimport scipy.misc\nimport scipy.ndimage\ndef render(env_, policy=None):\n logger.info('start render video...')\n observation = env_.reset()\n imgs = []\n return_ = 0.\n cnt_ = 0\n obs = []\n for t in range(200):\n cnt_ += 1\n observation = observation.reshape(1, -1)\n obs.append(observation)\n if policy is not None:\n action = policy.get_actions(observation)\n observation, reward, done, info = env_.step(action[0])\n if done: break\n return_ += reward\n else:\n action = env_.action_space.sample()\n observation, reward, done, info = env_.step(action)\n if done: break\n return_ += reward\n logger.info (f\"render {cnt_} steps, return = {return_:.6f}\")\n res = {'obs': obs, 'return': return_}\n return res\n\ndef eval_rollout(runner, p, des):\n logger.info(des)\n runner.reset()\n data, ep_infos = runner.run(p, FLAGS.plan.n_trpo_samples)\n logp = p(data.state).log_prob(data.action).reduce_sum(axis=1).reduce_mean()\n logp = tf.get_default_session().run(logp)\n print (\"state_mean:\", np.mean(data.state))\n print (\"action_mean:\", np.mean(data.action))\n print (\"warmup_logpac_mean:\", logp)\n\ndef testeval(policy, runner):\n runner.reset()\n _, ep_infos = runner.run(policy, FLAGS.rollout.n_test_samples)\n returns = [info['return'] for info in ep_infos]\n returns = np.mean(returns)\n return returns\n\ndef evaluate(settings, tag):\n res = {}\n for runner, policy, name in settings:\n runner.reset()\n _, ep_infos = runner.run(policy, FLAGS.rollout.n_test_samples)\n returns = np.array([ep_info['return'] for ep_info in ep_infos])\n res[name] = np.mean(returns)\n logger.info('Tag = %s, Reward on %s (%d episodes): mean = %.6f, std = %.6f', tag, name,\n len(returns), np.mean(returns), np.std(returns))\n return res['Real Env'], res['Virt Env']\n\n\ndef add_multi_step(src: Dataset, dst: Dataset):\n n_envs = 1\n dst.extend(src[:-n_envs])\n\n ending = src[-n_envs:].copy()\n ending.timeout = True\n dst.extend(ending)\n\n\ndef make_real_runner(n_envs, task_config=None):\n from slbo.envs.batched_env import BatchedEnv\n batched_env = BatchedEnv([make_env(FLAGS.env.id, task_config=task_config) for _ in range(n_envs)])\n return Runner(batched_env, rescale_action=True, **FLAGS.runner.as_dict())\n\n\[email protected]()\[email protected]('--setting', default='default')\[email protected]('--adv', default=1)\[email protected]('--gpu', default=0)\[email protected]('--debug', is_flag=True, default=False)\[email protected]('--taskname', default='Ant2D')\[email protected]('--verbose', is_flag=True, default=False)\[email protected]('--test', is_flag=True, default=False)\[email protected]('--warmupent', default=0.005)\[email protected]('--alpha', default=1.0)\[email protected]('--beta', default=1.0)\[email protected]('--snapshot', default=1)\[email protected]('--testadv', default=0)\[email protected]('--seed', default=1)\[email protected]('--nsample', default=10000)\[email protected]('--fixedvel', default=None)\[email protected]('--initnslbo', default=20)\[email protected]('--nslbo', default=3)\[email protected]('--warmniter', default=40)\[email protected]('--slboniter', default=20)\[email protected]('--piter', default=20)\[email protected]('--miter', default=100)\[email protected]('--atype', default='gae') # gae, 1step, ret, adv\[email protected]('--video', is_flag=True, default=False)\[email protected]('--maxstep', default=1)\[email protected]('--genadvstrategy', default=None)\[email protected]('--inittask', default='none')\[email protected]('--decay', default='joint')\[email protected]('--testgiven', default=None)\[email protected]('--testnum', default=1)\[email protected]('--testparam', default='')\ndef main(setting, adv, gpu, debug, taskname, verbose, test, warmupent, alpha, beta, snapshot, testadv, seed, nsample, fixedvel, initnslbo, nslbo, warmniter, slboniter, piter, miter, atype, video, maxstep, genadvstrategy, inittask, decay, testgiven, testnum, testparam):\n print ('warmupent:', warmupent)\n print (\"seed:\", seed)\n setting = os.path.join('./data/', setting)\n #FLAGS.run_id = setting\n FLAGS.rollout.n_train_samples = 10000\n FLAGS.rollout.n_dev_samples = 10000\n FLAGS.rollout.n_test_samples = 10000\n FLAGS.plan.n_trpo_samples = 10000\n if taskname == 'HC':\n FLAGS.env.id = 'HalfCheetahTask-v2'\n elif taskname == 'HC2D':\n FLAGS.env.id = 'HalfCheetah2D-v2'\n elif taskname == 'HClinearstate':\n FLAGS.env.id = 'HalfCheetahLinearState-v2'\n elif taskname == 'HCgoalstate':\n FLAGS.env.id = 'HalfCheetahGoalState-v2'\n elif taskname == 'Hopper2D':\n FLAGS.env.id = 'Hopper2D-v2'\n elif taskname == 'Walker2D':\n FLAGS.env.id = 'Walker2D-v2'\n elif taskname == 'Ant3D':\n FLAGS.env.id = 'Ant3DTask-v2'\n elif taskname == 'Ant2D':\n FLAGS.env.id = 'Ant2DTask-v2'\n else:\n raise Exception(f'Unsupported taskname: {taskname}')\n if not os.path.isdir(setting):\n os.makedirs(setting)\n if not test:\n filename = f'res_{taskname}_adv{adv}.txt'\n infofilename = f'res_{taskname}_adv{adv}.npy'\n filename = setting+'/'+filename\n infofilename = setting+'/'+infofilename\n fout = open(filename, 'w')\n else:\n maxstep = 100\n logger.info(f'fixedvel={fixedvel}')\n if testadv:\n logger.info('Test with adversarial generated tasks!')\n logger.info(f'testadv=1, maxstep={maxstep}, using model revert!')\n else:\n logger.info('We still do not consider this senario: test with random tasks')\n print ('adv=', adv)\n FLAGS.seed = seed\n FLAGS.set_seed()\n FLAGS.freeze()\n print (\"FLAGS.log_dir:\", FLAGS.log_dir)\n if test:\n model_load = f'{FLAGS.log_dir}/{taskname}-stage-{snapshot}.npy'\n else:\n model_load = None\n print (\"model_load:\", model_load)\n\n task = make_task(FLAGS.env.id)\n env = make_env(FLAGS.env.id, task_config=task)\n dim_state = int(np.prod(env.observation_space.shape))\n dim_action = int(np.prod(env.action_space.shape))\n\n env.verify()\n\n normalizers = Normalizers(dim_action=dim_action, dim_state=dim_state)\n normalizers_copy = Normalizers(dim_action=dim_action, dim_state=dim_state)\n normalizers_parameters = normalizers.parameters(trainable=False, non_trainable=True)\n normalizers_copy_parameters = normalizers_copy.parameters(trainable=False, non_trainable=True)\n copy_normalizers = tf.group(*[tf.assign(w_v, p_v) for w_v, p_v in zip(normalizers_copy_parameters, normalizers_parameters)])\n revert_normalizers = tf.group(*[tf.assign(w_v, p_v) for w_v, p_v in zip(normalizers_parameters, normalizers_copy_parameters)])\n\n dtype = gen_dtype(env, 'state action next_state reward done timeout')\n train_set = Dataset(dtype, FLAGS.rollout.max_buf_size)\n dev_set = Dataset(dtype, FLAGS.rollout.max_buf_size)\n task_train_sets = [Dataset(dtype, FLAGS.rollout.max_buf_size) for i in range(100)]\n task_dev_sets = [Dataset(dtype, FLAGS.rollout.max_buf_size) for i in range(100)]\n\n print (\"state and action dim:\", dim_state, dim_action)\n policy = GaussianMLPPolicy(dim_state, dim_action, normalizer=normalizers.state, **FLAGS.policy.as_dict())\n warmup_policy = GaussianMLPPolicy(dim_state, dim_action, normalizer=normalizers.state, **FLAGS.policy.as_dict())\n print (policy.parameters())\n print (warmup_policy.parameters())\n sync_warmup_policy = tf.group(*[tf.assign(w_v, p_v) for w_v, p_v in zip(warmup_policy.parameters(), policy.parameters())])\n # batched noises\n noise = OUNoise(env.action_space, theta=FLAGS.OUNoise.theta, sigma=FLAGS.OUNoise.sigma, shape=(1, dim_action))\n vfn = MLPVFunction(dim_state, [64, 64], normalizers.state)\n warmup_vfn = MLPVFunction(dim_state, [64, 64], normalizers.state)\n sync_warmup_vfn = tf.group(*[tf.assign(w_v, p_v) for w_v, p_v in zip(warmup_vfn.parameters(), vfn.parameters())])\n\n model = DynamicsModel(dim_state, dim_action, normalizers, FLAGS.model.hidden_sizes)\n lazy_model = DynamicsModel(dim_state, dim_action, normalizers, FLAGS.model.hidden_sizes)\n warmup_model = DynamicsModel(dim_state, dim_action, normalizers, FLAGS.model.hidden_sizes)\n sync_warmup_model = tf.group(*[tf.assign(w_v, p_v) for w_v, p_v in zip(warmup_model.parameters(), model.parameters())])\n shadow_models = [DynamicsModel(dim_state, dim_action, normalizers, FLAGS.model.hidden_sizes) for n in range(FLAGS.warmup.n_shadow_models)]\n sync_model_from_lazymodel = tf.group(*[tf.assign(w_v, p_v) for w_v, p_v in zip(model.parameters(), lazy_model.parameters())])\n sync_model_to_lazymodel = tf.group(*[tf.assign(w_v, p_v) for w_v, p_v in zip(lazy_model.parameters(), model.parameters())])\n\n virt_env = VirtualEnv(model, make_env(FLAGS.env.id, task_config=task), FLAGS.plan.n_envs, opt_model=FLAGS.slbo.opt_model)\n virt_runner = Runner(virt_env, **{**FLAGS.runner.as_dict(), 'max_steps': FLAGS.plan.max_steps})\n virt_env_copy = VirtualEnv(model, make_env(FLAGS.env.id, task_config=task), nsample//FLAGS.plan.max_steps, opt_model=FLAGS.slbo.opt_model)\n virt_runner_copy = Runner(virt_env_copy, **{**FLAGS.runner.as_dict(), 'max_steps': FLAGS.plan.max_steps})\n extra_runners = {}\n for sam in [1000, 2000, 4000, 8000, 10000, 16000]:\n extra_runners[f'train{sam}']= Runner(VirtualEnv(model, make_env(FLAGS.env.id, task_config=task), sam//FLAGS.plan.max_steps, opt_model=FLAGS.slbo.opt_model), **{**FLAGS.runner.as_dict(), 'max_steps': FLAGS.plan.max_steps})\n extra_runners[f'collect{sam}'] = make_real_runner(sam//FLAGS.plan.max_steps, task_config=task)\n\n warmup_virt_env = VirtualEnv(warmup_model, make_env(FLAGS.env.id, task_config=task), FLAGS.plan.n_envs, opt_model=FLAGS.slbo.opt_model)\n warmup_virt_runner = Runner(warmup_virt_env, **{**FLAGS.runner.as_dict(), 'max_steps': FLAGS.plan.max_steps})\n\n logger.info('FLAGS.plan.n_envs=%d' % FLAGS.plan.n_envs)\n shadow_envs = [VirtualEnv(shadow_model, make_env(FLAGS.env.id, task_config=task), FLAGS.plan.n_envs, opt_model=FLAGS.slbo.opt_model) for shadow_model in shadow_models]\n shadow_runners = [Runner(shadow_env, **{**FLAGS.runner.as_dict(), 'max_steps': FLAGS.plan.max_steps}) for shadow_env in shadow_envs]\n\n criterion_map = {\n 'L1': nn.L1Loss(),\n 'L2': nn.L2Loss(),\n 'MSE': nn.MSELoss(),\n }\n criterion = criterion_map[FLAGS.model.loss]\n loss_mod = MultiStepLoss(model, normalizers, dim_state, dim_action, criterion, FLAGS.model.multi_step)\n loss_mod.build_backward(FLAGS.model.lr, FLAGS.model.weight_decay)\n shadow_loss_mods = [MultiStepLoss(shadow_model, normalizers, dim_state, dim_action, criterion, FLAGS.model.multi_step) for shadow_model in shadow_models]\n for shadow_loss_mod in shadow_loss_mods:\n shadow_loss_mod.build_backward(FLAGS.model.lr, FLAGS.model.weight_decay)\n algo = TRPO(vfn=vfn, policy=policy, dim_state=dim_state, dim_action=dim_action, **FLAGS.TRPO.as_dict())\n advtask = ADVTASK(dim_state, dim_action, policy, vfn, warmup_policy, warmup_vfn, task, alpha=alpha, beta=beta, nsample=nsample, atype=atype)\n tf.get_default_session().run(tf.global_variables_initializer())\n\n print (\"norm params:\", normalizers_parameters)\n print (\"norm_copy params:\", normalizers_copy_parameters)\n norm_before = tf.get_default_session().run(normalizers_parameters)\n print (\"norm_before:\", norm_before)\n\n assert FLAGS.algorithm != 'MF', \"don't support model free for now\"\n\n print (f\"n_envs for task: {nsample}//{FLAGS.plan.max_steps}={nsample//FLAGS.plan.max_steps}\")\n\n runners = {\n 'test': make_real_runner(FLAGS.plan.n_envs, task_config=task),\n 'collect': make_real_runner(FLAGS.plan.n_envs, task_config=task), #1\n 'collect_copy': make_real_runner(nsample//FLAGS.plan.max_steps, task_config=task), #1\n 'dev': make_real_runner(FLAGS.plan.n_envs, task_config=task),\n 'train': make_real_runner(FLAGS.plan.n_envs, task_config=task) if FLAGS.algorithm == 'MF' else virt_runner,\n 'train_copy': make_real_runner(nsample//FLAGS.plan.max_steps, task_config=task) if FLAGS.algorithm == 'MF' else virt_runner_copy,\n 'warmup_train': make_real_runner(FLAGS.plan.n_envs, task_config=task) if FLAGS.algorithm == 'MF' else warmup_virt_runner,\n }\n for name, runner in extra_runners.items():\n runners[name] = runner\n print (\"runner name is \", name)\n settings = [(runners['test'], policy, 'Real Env'), (runners['train'], policy, 'Virt Env')]\n for (i, runner) in enumerate(shadow_runners):\n settings.append((runner, policy, f'Shadow Env-{i}'))\n\n saver = nn.ModuleDict({'policy': policy, 'model': model, 'vfn': vfn, 'normalizers': normalizers}) #, 'loss_mod': loss_mod})\n print(saver)\n\n max_ent_coef = FLAGS.TRPO.ent_coef\n\n skip_metrics = []\n TASK_NUM = 0\n\n if test:\n verbose = True\n else:\n task.init()\n print (f\"task.params_={task.params_}, task.init_goal_vel={task.goal_velocity}\")\n\n if test:\n ITERS = testnum + 1\n warmup_n_iters = warmniter\n warmup_n_policy_iters = piter\n warmup_n_model_iters = miter\n slbo_n_iters = slboniter\n slbo_n_policy_iters = piter\n slbo_n_model_iters = miter\n else:\n ITERS = FLAGS.task.n_iters\n warmup_n_iters = warmniter\n warmup_n_policy_iters = piter\n warmup_n_model_iters = miter\n slbo_n_iters = slboniter\n slbo_n_policy_iters = piter\n slbo_n_model_iters = miter\n print (f\"Total Iters = {ITERS}\")\n alltaskres = []\n generated_adversarial_task = []\n init_generator = False\n logger.info(f'inittask:{inittask}')\n if not test:\n if inittask == 'none':\n pass\n elif not (os.path.exists(f'./{inittask}/{taskname}.trainset.task0.slbo0.pkl') and os.path.exists(f'./{inittask}/{taskname}.task0.saver.npy')):\n init_generator = True\n else:\n logger.info('Load the first task dataset!')\n for i in range(20):\n if not os.path.exists(f'./{inittask}/{taskname}.trainset.task0.slbo{i}.pkl'): continue\n traindata = pickle.load(open(f'./{inittask}/{taskname}.trainset.task0.slbo{i}.pkl', 'rb'))\n add_multi_step(traindata, train_set)\n add_multi_step(traindata, task_train_sets[0])\n logger.info(f'load trainset-{i} {len(traindata)}')\n\n for i in range(20):\n if not os.path.exists(f'./{inittask}/{taskname}.devset.task0.slbo{i}.pkl'): continue\n devdata = pickle.load(open(f'./{inittask}/{taskname}.devset.task0.slbo{i}.pkl', 'rb'))\n add_multi_step(devdata, task_dev_sets[0])\n logger.info(f'load devset-{i} {len(devdata)}')\n\n logger.info('Load the first task saver!')\n saver.load_state_dict(np.load(f'./{inittask}/{taskname}.task0.saver.npy', allow_pickle=True)[()])\n\n logger.info('Update all copies! (lazymodel, normalizers_copy)')\n tf.get_default_session().run(sync_model_to_lazymodel)\n tf.get_default_session().run(copy_normalizers)\n logger.info('Loaded normalizers:')\n load_norm = tf.get_default_session().run(normalizers_parameters)\n logger.info(load_norm)\n TASK_NUM = 1\n ########################## debug #########################\n #for task_idx in range(TASK_NUM):\n # total_loss = []\n # for scan in range(100):\n # samples = task_train_sets[task_idx].sample_multi_step(FLAGS.model.dev_batch_size, 1, FLAGS.model.multi_step)\n # loss_i = loss_mod.get_loss(samples.state, samples.next_state, samples.action, ~samples.done & ~samples.timeout)\n # total_loss.append(loss_i.mean())\n # total_loss = np.mean(total_loss)\n # print ('loaded model train loss:', total_loss)\n\n #for task_idx in range(TASK_NUM):\n # total_loss = []\n # for scan in range(100):\n # samples = task_dev_sets[task_idx].sample_multi_step(FLAGS.model.dev_batch_size, 1, FLAGS.model.multi_step)\n # loss_i = loss_mod.get_loss(samples.state, samples.next_state, samples.action, ~samples.done & ~samples.timeout)\n # total_loss.append(loss_i.mean())\n # total_loss = np.mean(total_loss)\n # print ('loaded model val loss:', total_loss)\n ##exit(0)\n ########################## debug #########################\n else:\n test_summary = {\n 'task':[],\n 'random':[],\n 'warmup':[],\n 'warmupprocess':[],\n 'slbo':[],\n }\n logger.info('Testing mode!')\n train_tasknum = snapshot + 1\n test_tasknum = testnum\n logger.info(f'train_tasknum = {train_tasknum}, test_tasknum = {test_tasknum}')\n assert(testgiven is not None)\n if 'noent' in testparam: warmupent = 0.\n have_data = False\n\n task_generator = 'fixed' # random or fixed\n if testgiven[-4:] == '.pkl':\n f = testgiven\n logger.info(f'Load all tasks from {f}!')\n task.fixed_velocities = pickle.load(open(f, 'rb'))\n logger.info(f\"Test on task\")\n logger.info(task.fixed_velocities)\n logger.info(f\"Task number: {np.array(task.fixed_velocities).shape}\")\n else:\n f = f'{testgiven}/all_task_parameter.pkl'\n gen_adv_task = pickle.load(open(f, 'rb'))\n logger.info(f'Load all adversarial task from {f}!')\n task.fixed_velocities = gen_adv_task[train_tasknum: train_tasknum + test_tasknum]\n logger.info(f\"Test random method on task {train_tasknum}~{train_tasknum+test_tasknum}:\")\n logger.info(task.fixed_velocities)\n logger.info(f\"Task number: {np.array(task.fixed_velocities).shape}\")\n\n def load_data_during_test():\n if inittask != 'none':\n logger.info('Load the first task dataset!')\n for i in range(20):\n if not os.path.exists(f'./{inittask}/{taskname}.trainset.task0.slbo{i}.pkl'): continue\n traindata = pickle.load(open(f'./{inittask}/{taskname}.trainset.task0.slbo{i}.pkl', 'rb'))\n add_multi_step(traindata, train_set)\n add_multi_step(traindata, task_train_sets[0])\n logger.info(f'load task0 trainset{i} size={len(traindata)}')\n have_data = True\n\n for i in range(20):\n if not os.path.exists(f'./{inittask}/{taskname}.devset.task0.slbo{i}.pkl'): continue\n devdata = pickle.load(open(f'./{inittask}/{taskname}.devset.task0.slbo{i}.pkl', 'rb'))\n add_multi_step(devdata, task_dev_sets[0])\n logger.info(f'load task0 devset{i} size={len(devdata)}')\n have_data = True\n\n logger.info(f'Load all task dataset from {setting}!')\n for t in range(0,train_tasknum):\n for i in range(20):\n if not os.path.exists(f'./{setting}/{taskname}.trainset.task{t}.slbo{i}.pkl'): continue\n traindata = pickle.load(open(f'./{setting}/{taskname}.trainset.task{t}.slbo{i}.pkl', 'rb'))\n add_multi_step(traindata, train_set)\n add_multi_step(traindata, task_train_sets[t])\n logger.info(f'load task{t} trainset{i} size={len(traindata)}')\n if not os.path.exists(f'./{setting}/{taskname}.devset.task{t}.slbo{i}.pkl'): continue\n devdata = pickle.load(open(f'./{setting}/{taskname}.devset.task{t}.slbo{i}.pkl', 'rb'))\n add_multi_step(devdata, task_dev_sets[t])\n logger.info(f'load task{t} devset{i} size={len(devdata)}')\n have_data = True\n load_data_during_test()\n\n logger.info(f'Load the task{snapshot} saver!')\n saver.load_state_dict(np.load(f'./{setting}/{taskname}.task{snapshot}.saver.npy', allow_pickle=True)[()])\n\n logger.info('Update all copies! (lazymodel, normalizers_copy)')\n tf.get_default_session().run(sync_model_to_lazymodel)\n tf.get_default_session().run(copy_normalizers)\n logger.info('Loaded normalizers:')\n load_norm = tf.get_default_session().run(normalizers_parameters)\n logger.info(load_norm)\n\n TASK_NUM = train_tasknum\n TEST_TASK_NUM = 0\n ########################## debug #########################\n #if have_data:\n # for task_idx in range(TASK_NUM):\n # total_loss = []\n # for scan in range(100):\n # samples = task_train_sets[task_idx].sample_multi_step(FLAGS.model.dev_batch_size, 1, FLAGS.model.multi_step)\n # loss_i = loss_mod.get_loss(samples.state, samples.next_state, samples.action, ~samples.done & ~samples.timeout)\n # total_loss.append(loss_i.mean())\n # total_loss = np.mean(total_loss)\n # print ('loaded model train loss:', total_loss)\n\n # for task_idx in range(TASK_NUM):\n # total_loss = []\n # for scan in range(100):\n # samples = task_dev_sets[task_idx].sample_multi_step(FLAGS.model.dev_batch_size, 1, FLAGS.model.multi_step)\n # loss_i = loss_mod.get_loss(samples.state, samples.next_state, samples.action, ~samples.done & ~samples.timeout)\n # total_loss.append(loss_i.mean())\n # total_loss = np.mean(total_loss)\n # print ('loaded model val loss:', total_loss)\n ##exit(0)\n ######################### debug #########################\n slbo_n_stages = nslbo\n print (f\"each task will do nslbo = {nslbo}\")\n for param in model.parameters():\n param.invalidate()\n\n all_task_parameter = []\n while (not test and TASK_NUM < ITERS) or (test and TEST_TASK_NUM < ITERS):\n # first task or maxstep, update the model. Otherwise, revert the model\n logger.info('Sync model from lazymodel')\n tf.get_default_session().run(sync_model_from_lazymodel)\n taskres = {}\n if 'goal_velocity' not in taskres.keys():\n taskres['goal_velocity'] = []\n if not test and inittask == 'none':\n slbo_n_stages = nslbo\n elif not test and TASK_NUM == 0:\n slbo_n_stages = initnslbo\n elif not test and TASK_NUM > 0:\n slbo_n_stages = nslbo\n\n time_start = time.time()\n trpo_warmup = []\n trpo_slbo = []\n surprisal = []\n train_losses_warmup = deque(maxlen=warmup_n_model_iters // FLAGS.model.validation_freq)\n train_losses_slbo = deque(maxlen=slbo_n_model_iters // FLAGS.model.validation_freq)\n val_losses_warmup = deque(maxlen=warmup_n_model_iters // FLAGS.model.validation_freq)\n val_losses_slbo = deque(maxlen=slbo_n_model_iters // FLAGS.model.validation_freq)\n # NOTE: For each test task, we should reset model to the loaded one, and randomly initialize policy and vfn\n #if test:\n # saver.load_state_dict(np.load(model_load, allow_pickle=True)[()])\n # logger.warning('Load model from %s', model_load)\n if test:\n logger.info(\"################################################## TESTING TASK %d ################################################\", TEST_TASK_NUM)\n logger.info(f'TEST_TASK_NUM={TEST_TASK_NUM}, TASK_NUM={TASK_NUM}')\n logger.warning('Revert model and normalizers')\n tf.get_default_session().run(sync_model_from_lazymodel)\n tf.get_default_session().run(revert_normalizers)\n else:\n logger.info(\"################################################## TRAINING TASK %d ################################################\", TASK_NUM)\n if test:\n test_returns = []\n test_summary['warmupprocess'].append([])\n test_summary['slbo'].append([])\n if not test: #and FLAGS.task.method == 'random':\n if inittask != 'none' and TASK_NUM == 1:\n if 'HClinearstate' in taskname:\n task.init([0.2] * task.n_params)\n else:\n task.init([0.] * task.n_params)\n else:\n if TASK_NUM > 0: #fix the 1st tasks during training\n if adv == 0:\n task.random_sample('uniform')\n elif adv == 2:\n task.random_sample('normal')\n elif adv == 1:\n if TASK_NUM == 1 and inittask != 'none':\n task.random_sample()\n print (f\"task.params_={task.params_}, task.init_goal_vel={task.goal_velocity}\")\n task.sample(adv=True)\n logger.info('Task Sampled: %s', task.goal_velocity)\n taskres['goal_velocity'].append(task.goal_velocity)\n all_task_parameter.append(task.goal_velocity)\n print (f\"task.params_={task.params_}, task.init_goal_vel={task.goal_velocity}\")\n if test:\n if task_generator == 'fixed':\n task.goal_velocity = task.fixed_velocities[TEST_TASK_NUM] #TODO\n logger.info('Task Fixed: %s', task.goal_velocity)\n if task_generator == 'random':\n task.sample(adv=False) #sample randomly\n logger.info('Task Sampled: %s', task.goal_velocity)\n if task_generator == 'adv':\n task.sample(adv=True) #sample adversarially\n logger.info('Task Sampled: %s', task.goal_velocity)\n generated_adversarial_task.append(task.goal_velocity)\n logger.info('Tasks dump!')\n assert (task_generator == 'fixed')\n test_summary['task'].append(task.goal_velocity)\n\n if FLAGS.task.reset_policy:\n # NOTE: reset policy and valuefunc\n logger.info(\"Resetting Policy\")\n pol_params = tf.get_default_session().run([nn.utils.parameters_to_vector(policy.parameters())])\n tf.get_default_session().run(tf.variables_initializer(policy.parameters()))\n pol_params_after = tf.get_default_session().run([nn.utils.parameters_to_vector(policy.parameters())])\n print (\"pol_params:\", np.linalg.norm(pol_params), \"pol_params_after_reset:\", np.linalg.norm(pol_params_after))\n logger.info(\"Resetting Valuefunc\")\n tf.get_default_session().run(tf.variables_initializer(vfn.parameters()))\n\n tf.get_default_session().run(tf.variables_initializer(warmup_policy.parameters()))\n tf.get_default_session().run(tf.variables_initializer(warmup_vfn.parameters()))\n for p in warmup_policy.parameters(): p.invalidate()\n for p in warmup_vfn.parameters(): p.invalidate()\n for p in policy.parameters(): p.invalidate()\n for p in vfn.parameters(): p.invalidate()\n\n last_end = None\n drops = []\n\n evaluate(settings, 'pre-warm-up')\n returns_pre_warmup = testeval(policy, runners['collect'])\n if test:\n test_returns.append(returns_pre_warmup)\n test_summary['random'].append(returns_pre_warmup)\n t1 = time.time()\n trpo_time = 0\n\n logger.info('----------------------------- Warmup for %d iterations ------------------------' % warmup_n_iters)\n if decay == 'joint':\n logger.info('Joint train from a joint dataset')\n elif decay == 'taskid':\n Z = np.sum([float(i+1) for i in range(0, TASK_NUM)])\n prop = [float(taskid+1) / Z for taskid in range(TASK_NUM)]\n logger.info(f'Sampling prop={prop}, Z={Z}')\n elif decay == 'none':\n Z = TASK_NUM\n prop = [1. / TASK_NUM for _ in range(TASK_NUM)]\n logger.info(f'Sampling prop={prop}, Z={Z}')\n for i in range(warmup_n_iters):\n #exit(0)\n if TASK_NUM == 0 and not test and not model_load:\n logger.info('Break because TASK_NUM=0')\n break\n\n losses = deque(maxlen=warmup_n_model_iters)\n grad_norm_meter = AverageMeter()\n n_model_iters = warmup_n_model_iters\n drop_plot = 0\n if test and verbose:\n logger.info(f'warmup iter #{i}/{warmup_n_iters}, Do Not train Model during warmup of test time')\n if 'warmup_task_val_loss' not in taskres.keys():\n taskres['warmup_task_val_loss'] = [[] for _ in range(TASK_NUM)]\n\n if verbose: logger.info('Train Model for %d iterations' % n_model_iters)\n model_time = time.time()\n if not test or (test and have_data):\n for _ in range(n_model_iters):\n if decay == 'joint':\n samples = train_set.sample_multi_step(FLAGS.model.train_batch_size, 1, FLAGS.model.multi_step)\n else:\n all_samples = []\n for taskid in range(TASK_NUM):\n samples_i = task_train_sets[taskid].sample_multi_step(int(FLAGS.model.train_batch_size*prop[taskid])+1, 1, FLAGS.model.multi_step)\n all_samples.append(samples_i)\n samples = np.concatenate(all_samples, axis=1).view(np.recarray)\n _, train_loss, grad_norm = loss_mod.get_loss(\n samples.state, samples.next_state, samples.action, ~samples.done & ~samples.timeout,\n fetch='train loss grad_norm')\n losses.append(train_loss.mean())\n grad_norm_meter.update(grad_norm)\n # ideally, we should define an Optimizer class, which takes parameters as inputs.\n # The `update` method of `Optimizer` will invalidate all parameters during updates.\n for param in model.parameters():\n param.invalidate()\n model_time = time.time() - model_time\n\n if i % FLAGS.model.validation_freq == 0:\n task_val_loss = []\n val_time = time.time()\n for task_idx in range(TASK_NUM):\n total_loss = []\n for scan in range(FLAGS.rollout.n_dev_samples // FLAGS.model.dev_batch_size + 1):\n samples = task_dev_sets[task_idx].sample_multi_step(FLAGS.model.dev_batch_size, 1, FLAGS.model.multi_step)\n loss_i = loss_mod.get_loss(samples.state, samples.next_state, samples.action, ~samples.done & ~samples.timeout)\n total_loss.append(loss_i.mean())\n total_loss = np.mean(total_loss)\n task_val_loss.append(total_loss)\n taskres['warmup_task_val_loss'][task_idx].append(total_loss)\n val_time = time.time() - val_time\n val_loss = np.mean(task_val_loss)\n val_losses_warmup.append(val_loss)\n train_losses_warmup.append(np.mean(losses))\n if np.isnan(val_loss) or np.isnan(np.mean(losses)):\n logger.info('nan! %s %s', np.isnan(val_loss), np.isnan(np.mean(losses)))\n logger.info('# Warmup Iter %3d: Loss = [train = %.3f, dev = %.3f], after %d steps, grad_norm = %.6f, drop = %.2f, model_time=%d, trpo_time=%d, val_time=%d',\n i, np.mean(losses), val_loss, n_model_iters, grad_norm_meter.get(), drop_plot, model_time, trpo_time, val_time)\n logger.info(f'# task_val_loss: {task_val_loss}')\n\n if verbose: logger.info('Train policy for %d iterations' % warmup_n_policy_iters)\n trpo_time = time.time()\n for n_updates in range(warmup_n_policy_iters):\n if FLAGS.algorithm != 'MF' and FLAGS.warmup.start == 'buffer':\n runners['train'].set_state(train_set.sample(FLAGS.plan.n_envs).state)\n else:\n runners['train'].reset()\n\n data, ep_infos = runners['train'].run(policy, FLAGS.plan.n_trpo_samples)\n advantages, advantages_params, values, td, coef_mat, coef_mat_returns, reward_ctrl, x_velocity, begin_mark = runners['train'].compute_advantage(vfn, data,task)\n dist_mean, dist_std, vf_loss, plotinfo = algo.train(warmupent, data, advantages, values)\n trpo_warmup.append(plotinfo)\n returns = [info['return'] for info in ep_infos]\n if n_updates == 0:\n if last_end is not None:\n drop_plot = last_end - np.mean(returns)\n drops.append(last_end - np.mean(returns))\n last_end = np.mean(returns)\n if n_updates == warmup_n_policy_iters-1:\n logger.info('[TRPO] # %d: n_episodes = %d, returns: {mean = %.0f, std = %.0f}, '\n 'dist std = %.10f, dist mean = %.10f, vf_loss = %.3f',\n n_updates, len(returns), np.mean(returns), np.std(returns) / np.sqrt(len(returns)),\n dist_std, dist_mean, vf_loss)\n trpo_time = time.time() - trpo_time\n\n if i % FLAGS.warmup.n_evaluate_iters == 0 or i == warmup_n_iters-1:# and i != 0:\n real_eval, virt_eval = evaluate(settings, 'iteration')\n if 'warmup_real_eval' not in taskres.keys(): taskres['warmup_real_eval'] = []\n if 'warmup_virt_eval' not in taskres.keys(): taskres['warmup_virt_eval'] = []\n taskres['warmup_real_eval'].append(real_eval)\n taskres['warmup_virt_eval'].append(virt_eval)\n if test:\n test_summary['warmupprocess'][TEST_TASK_NUM].append(real_eval)\n\n if not test:\n res = render(Monitor(make_env(FLAGS.env.id, task_config=task), f\"./{setting}/{taskname}-task{TASK_NUM}-warmup/\", force=True, video_callable=lambda episode_id: True), policy)\n else:\n res = render(Monitor(make_env(FLAGS.env.id, task_config=task), f\"./{setting}/{taskname}-testtask{TEST_TASK_NUM}-warm{warmup_n_iters}-warmup/\", force=True, video_callable=lambda episode_id: True), policy)\n taskres['warmup_monitor'] = [res]\n\n t2 = time.time()\n warmup_time = t2 - t1\n evaluate(settings, 'post-warm-up')\n returns_post_warmup = testeval(policy, runners['collect'])\n if test:\n test_returns.append(returns_post_warmup)\n test_summary['warmup'].append(returns_post_warmup)\n print (\"warmupprocess:\", test_summary['warmupprocess'][TEST_TASK_NUM])\n\n logger.info('Sync warmup policy and vfn and model')\n tf.get_default_session().run([sync_warmup_policy, sync_warmup_vfn, sync_warmup_model])\n for p in warmup_policy.parameters(): p.invalidate()\n for p in warmup_vfn.parameters(): p.invalidate()\n for p in warmup_model.parameters(): p.invalidate()\n for p in policy.parameters(): p.invalidate()\n task.parameters().invalidate()\n\n pol_params, warm_params = tf.get_default_session().run([nn.utils.parameters_to_vector(policy.parameters()), nn.utils.parameters_to_vector(warmup_policy.parameters())])\n print (\"After WARMUP, pol_params_norm:\", np.linalg.norm(pol_params), \"warm_params_norm:\", np.linalg.norm(warm_params))\n mod, warm_mod = tf.get_default_session().run([nn.utils.parameters_to_vector(model.parameters()), nn.utils.parameters_to_vector(warmup_model.parameters())])\n print (\"mod_norm:\", np.linalg.norm(mod), \"warm_mod_norm:\", np.linalg.norm(warm_mod))\n\n eval_rollout(runners['train'], warmup_policy, 'Use warmup policy to collect data from virtual env')\n warmup_collect_virt = []\n\n eval_rollout(runners['train'], policy, 'Use policy to collect data from virtual env')\n warmup_collect_real = []\n \n logger.info('--------------------------------------------- SLBO for %d outer stages -----------------------------------------' % slbo_n_stages)\n for T in range(slbo_n_stages):\n logger.info('-------- Starting Stage %d ---------', T)\n evaluate(settings, 'episode')\n\n # collect data\n if not test:\n logger.info('-------- Collect data from REAL env for %d samples --------' % FLAGS.rollout.n_train_samples)\n recent_train_set, ep_infos = runners['collect'].run(noise.make(policy), FLAGS.rollout.n_train_samples)\n recent_dev_set, _ = runners['dev'].run(noise.make(policy), FLAGS.rollout.n_dev_samples)\n else:\n logger.info('-------- Collect data from REAL env for %d samples --------' % 2000)\n recent_train_set, ep_infos = runners['collect2000'].run(noise.make(policy), 2000)\n recent_dev_set, _ = runners['dev'].run(noise.make(policy), FLAGS.rollout.n_dev_samples)\n\n logger.info('save setting dataset! trainset and devset!')\n if not test:\n pickle.dump(recent_train_set, open(f'./{setting}/{taskname}.trainset.task{TASK_NUM}.slbo{T}.pkl', 'wb'))\n pickle.dump(recent_dev_set, open(f'./{setting}/{taskname}.devset.task{TASK_NUM}.slbo{T}.pkl', 'wb'))\n\n # Add real data to task_train_sets and task_dev_sets\n #if not test:\n # add_multi_step(recent_train_set, train_set)\n add_multi_step(recent_train_set, task_train_sets[TASK_NUM])\n add_multi_step(recent_dev_set, task_dev_sets[TASK_NUM])\n\n #if not test:\n # states = recent_train_set.state\n # mean = np.mean(states, axis=0)\n # std = np.std(states, axis=0)\n # min_ = np.min(states, axis=0)\n # max_ = np.max(states, axis=0)\n # states_stat = {\"mean\": mean, \"std\": std, \"min\": min_, \"max\": max_}\n\n # evaluate the surprisal of collected real data for model\n new_set = Dataset(dtype, FLAGS.rollout.max_buf_size)\n add_multi_step(recent_train_set, new_set)\n losses_new = []\n for i in range(FLAGS.rollout.n_train_samples // FLAGS.model.dev_batch_size + 1):\n samples = new_set.sample_multi_step(FLAGS.model.dev_batch_size, 1, FLAGS.model.multi_step)\n loss = loss_mod.get_loss(samples.state, samples.next_state, samples.action, ~samples.done & ~samples.timeout)\n loss = loss.mean()\n losses_new.append(loss)\n losses_new_mean = np.mean(losses_new)\n surprisal.append(losses_new_mean)\n logger.info(f'(surprisal) model loss on new collected data is {losses_new_mean}')\n\n add_multi_step(recent_train_set, train_set)\n add_multi_step(\n runners['dev'].run(noise.make(policy), FLAGS.rollout.n_dev_samples)[0],\n dev_set,\n )\n\n returns = np.array([ep_info['return'] for ep_info in ep_infos])\n if len(returns) > 0:\n logger.info(\"episode: %s\", np.mean(returns))\n\n if T == 0: # check\n samples = train_set.sample_multi_step(100, 1, FLAGS.model.multi_step)\n for i in range(FLAGS.model.multi_step - 1):\n masks = 1 - (samples.done[i] | samples.timeout[i])[..., np.newaxis]\n assert np.allclose(samples.state[i + 1] * masks, samples.next_state[i] * masks)\n\n normalizers.state.update(recent_train_set.state)\n normalizers.action.update(recent_train_set.action)\n normalizers.diff.update(recent_train_set.next_state - recent_train_set.state)\n\n if TASK_NUM == 0: #In the 1st task, no warmup, but we validate loss of the random model\n samples = dev_set.sample_multi_step(FLAGS.model.train_batch_size, 1, FLAGS.model.multi_step)\n loss = loss_mod.get_loss(samples.state, samples.next_state, samples.action, ~samples.done & ~samples.timeout)\n loss = loss.mean()\n val_losses_warmup.append(loss)\n\n logger.info('SLBO for %d inner stages' % slbo_n_iters)\n model_time, trpo_time = 0, 0\n if 'slbo_task_val_loss' not in taskres.keys():\n taskres['slbo_task_val_loss'] = [[] for _ in range(TASK_NUM+1)]\n if decay == 'joint':\n logger.info('Joint train from a joint dataset')\n elif decay == 'taskid':\n Z = np.sum([float(i+1) for i in range(0, TASK_NUM+1)])\n prop = [float(taskid+1) / Z for taskid in range(TASK_NUM+1)]\n logger.info(f'Sampling prop={prop}, Z={Z}')\n elif decay == 'none':\n Z = TASK_NUM+1\n prop = [1. / float(Z) for _ in range(Z)]\n logger.info(f'Sampling prop={prop}, Z={Z}')\n for i in range(slbo_n_iters):\n if i % FLAGS.slbo.n_evaluate_iters == 0 or i == slbo_n_iters-1:# and i != 0:\n # cur_actions = policy.eval('actions_mean actions_std', states=recent_states)\n # kl_old_new = gaussian_kl(*ref_actions, *cur_actions).sum(axis=1).mean()\n # logger.info('KL(old || cur) = %.6f', kl_old_new)\n real_eval, virt_eval = evaluate(settings, 'iteration')\n if 'slbo_real_eval' not in taskres.keys(): taskres['slbo_real_eval'] = []\n if 'slbo_virt_eval' not in taskres.keys(): taskres['slbo_virt_eval'] = []\n taskres['slbo_real_eval'].append(real_eval)\n taskres['slbo_virt_eval'].append(virt_eval)\n\n losses = deque(maxlen=slbo_n_model_iters)\n grad_norm_meter = AverageMeter()\n n_model_iters = slbo_n_model_iters\n if verbose: logger.info('Train model %d iterations'% n_model_iters)\n model_time = time.time()\n for _ in range(n_model_iters):\n if decay == 'joint':\n samples = train_set.sample_multi_step(FLAGS.model.train_batch_size, 1, FLAGS.model.multi_step)\n else:\n all_samples = []\n sample_size = 0\n for taskid in range(TASK_NUM+1):\n samples_i = task_train_sets[taskid].sample_multi_step(int(FLAGS.model.train_batch_size*prop[taskid])+1, 1, FLAGS.model.multi_step)\n all_samples.append(samples_i)\n sample_size += int(FLAGS.model.train_batch_size*prop[taskid])+1\n samples = np.concatenate(all_samples, axis=1).view(np.recarray)\n\n _, train_loss, grad_norm = loss_mod.get_loss(\n samples.state, samples.next_state, samples.action, ~samples.done & ~samples.timeout,\n fetch='train loss grad_norm')\n losses.append(train_loss.mean())\n grad_norm_meter.update(grad_norm)\n # ideally, we should define an Optimizer class, which takes parameters as inputs.\n # The `update` method of `Optimizer` will invalidate all parameters during updates.\n for param in model.parameters():\n param.invalidate()\n model_time = time.time() - model_time\n\n if i % FLAGS.model.validation_freq == 0:\n task_val_loss = []\n val_time = time.time()\n for task_idx in range(TASK_NUM+1):\n total_loss = []\n for scan in range(FLAGS.rollout.n_dev_samples // FLAGS.model.dev_batch_size + 1):\n samples = task_dev_sets[task_idx].sample_multi_step(FLAGS.model.dev_batch_size, 1, FLAGS.model.multi_step)\n loss_i = loss_mod.get_loss(samples.state, samples.next_state, samples.action, ~samples.done & ~samples.timeout)\n total_loss.append(loss_i.mean())\n total_loss = np.mean(total_loss)\n task_val_loss.append(total_loss)\n taskres['slbo_task_val_loss'][task_idx].append(total_loss)\n val_loss = np.mean(task_val_loss)\n val_time = time.time() - val_time\n if np.isnan(val_loss) or np.isnan(np.mean(losses)):\n logger.info('nan! %s %s', np.isnan(val_loss), np.isnan(np.mean(losses)))\n logger.info('# SLBO Inner Iter %3d: Loss = [train = %.3f, dev = %.3f], after %d steps, grad_norm = %.6f, model_time=%d, trpo_time=%d, val_time=%d',\n i, np.mean(losses), val_loss, n_model_iters, grad_norm_meter.get(), model_time, trpo_time, val_time)\n logger.info(f'# task_val_loss: {task_val_loss}')\n model_time, trpo_time = 0, 0\n val_losses_slbo.append(val_loss)\n train_losses_slbo.append(np.mean(losses))\n\n if verbose: logger.info('Train policy %d iterations'% slbo_n_policy_iters)\n trpo_time = time.time()\n for n_updates in range(slbo_n_policy_iters):\n if FLAGS.algorithm != 'MF' and FLAGS.slbo.start == 'buffer':\n runners['train'].set_state(train_set.sample(FLAGS.plan.n_envs).state)\n else:\n runners['train'].reset()\n\n data, ep_infos = runners['train'].run(policy, FLAGS.plan.n_trpo_samples)\n advantages, advantages_params, values, td, coef_mat, coef_mat_returns, reward_ctrl, x_velocity, begin_mark = runners['train'].compute_advantage(vfn, data, task)\n dist_mean, dist_std, vf_loss, plotinfo = algo.train(max_ent_coef, data, advantages, values)\n trpo_slbo.append(plotinfo)\n returns = [info['return'] for info in ep_infos]\n if n_updates == slbo_n_policy_iters-1:\n logger.info('[TRPO] # %d: n_episodes = %d, returns: {mean = %.0f, std = %.0f}, '\n 'dist std = %.10f, dist mean = %.10f, vf_loss = %.3f',\n n_updates, len(returns), np.mean(returns), np.std(returns) / np.sqrt(len(returns)),\n dist_std, dist_mean, vf_loss)\n trpo_time = time.time() - trpo_time\n \n if not test and (TASK_NUM) % FLAGS.ckpt.n_save_stages == 0:\n np.save(f'{FLAGS.log_dir}/{taskname}-stage-{TASK_NUM}', saver.state_dict())\n np.save(f'{FLAGS.log_dir}/{taskname}-final', saver.state_dict())\n res = render(Monitor(make_env(FLAGS.env.id, task_config=task), f\"./{setting}/{taskname}-task{TASK_NUM}-slbo{T}/\", force=True, video_callable=lambda episode_id: True), policy)\n if 'slbo_monitor' not in taskres.keys():\n taskres['slbo_monitor'] = []\n taskres['slbo_monitor'].append(res)\n if not test and FLAGS.ckpt.n_save_stages == 1:\n pickle.dump(recent_train_set, open(f'{FLAGS.log_dir}/stage-{TASK_NUM}.inc-buf.pkl', 'wb'))\n if test:\n returns_post_slbo_update = testeval(policy, runners['collect'])\n test_returns.append(returns_post_slbo_update)\n real_eval, virt_eval = evaluate(settings, 'iteration')\n test_summary['slbo'][TEST_TASK_NUM].append(real_eval)\n test_summary[f'slbo{T+1}'].append(returns_post_slbo_update)\n res = render(Monitor(make_env(FLAGS.env.id, task_config=task), f\"./{setting}/{taskname}-testtask{TEST_TASK_NUM}-slbo{T}/\", force=True, video_callable=lambda episode_id: True), policy)\n print ('test_summary_slbo:', test_summary['slbo'][TEST_TASK_NUM])\n\n if not test:\n np.save(f'{setting}/{taskname}.task{TASK_NUM}.saver', saver.state_dict())\n np.save(f'{setting}/{taskname}.final.saver', saver.state_dict())\n\n if init_generator and TASK_NUM==0:\n print ('finished init generator!')\n exit(0)\n\n pol_params, warm_params = tf.get_default_session().run([nn.utils.parameters_to_vector(policy.parameters()), nn.utils.parameters_to_vector(warmup_policy.parameters())])\n print (\"After SLBO, pol_params_norm:\", np.linalg.norm(pol_params), \"warm_params_norm:\", np.linalg.norm(warm_params))\n\n eval_rollout(runners['train'], policy, 'Use optimal policy to collect data from real env')\n optimal_collect_real = []\n\n t3 = time.time()\n slbo_time = t3 - t2\n evaluate(settings, 'post-slbo')\n logger.info(f'Warmup time = {warmup_time}, SLBO time = {slbo_time}')\n\n alltaskres.append(taskres)\n if not test:\n pickle.dump(alltaskres, open(f'{setting}/{taskname}-alltaskres.info.pkl', 'wb'))\n pickle.dump(all_task_parameter, open(f'{setting}/all_task_parameter.pkl', 'wb'))\n else:\n pickle.dump(alltaskres, open(f'{setting}/{taskname}-alltaskres.info.pkl.{testparam}', 'wb'))\n pickle.dump(all_task_parameter, open(f'{setting}/all_task_parameter.pkl.{testparam}', 'wb'))\n\n eval_rollout(runners['train'], warmup_policy, 'Use warmup policy to collect data from virtual env')\n if not test:\n #if TASK_NUM > 0:\n if TASK_NUM > -1:\n task_params_before, final_grad, advtask_info = advtask.train(runners['train_copy'], runners['collect_copy'], warmup_collect_virt, warmup_collect_real, optimal_collect_real, returns_pre_warmup, val_losses_warmup, val_losses_slbo, train_losses_warmup, train_losses_slbo, surprisal, trpo_warmup, trpo_slbo, fout, infofilename, extra_runners)\n\n # first task or maxstep, update the model\n if not test and (TASK_NUM == 0 or TASK_NUM % maxstep == 0):\n logger.info(f\"task_num={TASK_NUM}, sync_model_to_lazymodel\")\n tf.get_default_session().run(sync_model_to_lazymodel)\n\n if test:\n pickle.dump(test_summary, open(f'{setting}/test_summary.pkl.{testparam}', 'wb'))\n TEST_TASK_NUM += 1\n TASK_NUM = train_tasknum\n #task_train_sets[TASK_NUM].clear()\n #task_dev_sets[TASK_NUM].clear()\n for tt in range(TASK_NUM+1):\n task_train_sets[tt].clear()\n task_dev_sets[tt].clear()\n train_set.clear()\n load_data_during_test()\n continue\n\n task_params_after = task_params_before + final_grad * alpha\n task.set_parameters(task_params_after)\n\n if not test:\n advtask_info['alpha'].append(alpha)\n with open(infofilename, 'wb') as handle:\n pickle.dump(advtask_info, handle, protocol=pickle.HIGHEST_PROTOCOL)\n print ('>>>>>>dump')\n\n TASK_NUM += 1\n time_end = time.time()\n print (f\"Task Done! Total Time Consumed for 1 task = {time_end - time_start}s\")\n\n\n\nif __name__ == '__main__':\n with tf.Session(config=get_tf_config()):\n main()\n" ]
[ [ "numpy.load", "numpy.allclose", "tensorflow.global_variables_initializer", "tensorflow.get_default_session", "tensorflow.assign", "numpy.prod", "numpy.array", "numpy.std", "numpy.concatenate", "numpy.linalg.norm", "numpy.isnan", "numpy.mean" ] ]