repo_name
stringlengths
8
130
hexsha
sequence
file_path
sequence
code
sequence
apis
sequence
kelvin95/EPOSearch
[ "020f0a8890437449dd7bb37534697aa9f71e8305" ]
[ "toy_experiments/solvers/moo_mtl.py" ]
[ "# This code is from\n# Multi-Task Learning as Multi-Objective Optimization\n# Ozan Sener, Vladlen Koltun\n# Neural Information Processing Systems (NeurIPS) 2018 \n# https://github.com/intel-isl/MultiObjectiveOptimization\n\nimport numpy as np\n\nfrom .min_norm_solvers_numpy import MinNormSolver\n\n\ndef moo_mtl_search(multi_obj_fg, x=None,\n max_iters=200, n_dim=20, step_size=1):\n \"\"\"\n MOO-MTL\n \"\"\"\n # x = np.random.uniform(-0.5,0.5,n_dim)\n x = np.random.randn(n_dim) if x is None else x\n fs = []\n for t in range(max_iters):\n f, f_dx = multi_obj_fg(x)\n\n weights = get_d_moomtl(f_dx)\n\n x = x - step_size * np.dot(weights.T, f_dx).flatten()\n fs.append(f)\n\n res = {'ls': np.stack(fs)}\n return x, res\n\n\ndef get_d_moomtl(grads):\n \"\"\"\n calculate the gradient direction for MOO-MTL\n \"\"\"\n\n nobj, dim = grads.shape\n if nobj <= 1:\n return np.array([1.])\n\n# # use cvxopt to solve QP\n# P = np.dot(grads , grads.T)\n#\n# q = np.zeros(nobj)\n#\n# G = - np.eye(nobj)\n# h = np.zeros(nobj)\n#\n#\n# A = np.ones(nobj).reshape(1,2)\n# b = np.ones(1)\n#\n# cvxopt.solvers.options['show_progress'] = False\n# sol = cvxopt_solve_qp(P, q, G, h, A, b)\n # print(f'grad.shape: {grads.shape}')\n # use MinNormSolver to solve QP\n sol, nd = MinNormSolver.find_min_norm_element(grads)\n\n return sol\n" ]
[ [ "numpy.stack", "numpy.dot", "numpy.random.randn", "numpy.array" ] ]
cyankaet/bumps
[ "427d077fd95f2d9a09eeb8677d045547061cff42" ]
[ "doc/examples/peaks/plot.py" ]
[ "import sys\nimport json\n\nimport numpy as np\nimport pylab\n\ndef plot(X,Y,theory,data,err):\n #print \"theory\",theory[1:6,1:6]\n #print \"data\",data[1:6,1:6]\n #print \"delta\",(data-theory)[1:6,1:6]\n pylab.subplot(3,1,1)\n pylab.pcolormesh(X,Y, data)\n pylab.subplot(3,1,2)\n pylab.pcolormesh(X,Y, theory)\n pylab.subplot(3,1,3)\n pylab.pcolormesh(X,Y, (data-theory)/(err+1))\n\ndef load_results(filename):\n \"\"\"\n Reload results from the json file created by Peaks.save\n \"\"\"\n data = json.load(open(filename))\n # Convert array info back into numpy arrays\n data.update( (k,np.array(data[k]))\n for k in ('X', 'Y', 'data', 'err', 'theory') )\n return data\n\ndef main():\n data = load_results(sys.argv[1])\n plot(data['X'],data['Y'],data['theory'],data['data'],data['err'])\n pylab.show()\n\nif __name__ == \"__main__\": main()\n" ]
[ [ "numpy.array" ] ]
glangsto/pyspeckit
[ "346b24fb828d1d33c7891cdde7609723e51af34c" ]
[ "pyspeckit/spectrum/speclines/optical.py" ]
[ "\"\"\"\nStorage for optical spectral line information.\n\"\"\"\nfrom __future__ import print_function\n\nimport numpy as np\n\ndef hydrogen(nu,nl, vacuum=True):\n \"\"\"\n Compute the rest wavelength of Hydrogen recombination lines in angstroms\n \"\"\"\n rydberg = 10973731.6 # m^-1\n protontoelectron = 1836.15266 # ratio\n\n lvac = 1.0/rydberg * 1./(1/float(nl)**2 - 1/float(nu)**2) * 1e10 * (1.0+1.0/protontoelectron)\n\n if not vacuum:\n import ref_index\n return ref_index.vac2air(lvac/10)*10\n else:\n return lvac\n\n# Format: name, units, vacuum?, display name\nlines = {\n \"H_alpha\": [6564.614, 'Angstrom', True, r'$\\mathrm{H}\\alpha$'],\n \"H_beta\": [4862.721, 'Angstrom', True, r'$\\mathrm{H}\\beta$'],\n \"OIIIa\": [4960.295, 'Angstrom', True, r'$[\\mathrm{OIII}]\\lambda 4959\\AA$'],\n \"OIIIb\": [5008.239, 'Angstrom', True, r'$[\\mathrm{OIII}]\\lambda 5007\\AA$'],\n \"NIIa\": [6549.860, 'Angstrom', True, r'$[\\mathrm{NII}]\\lambda 6549\\AA$'],\n \"NIIb\": [6585.270, 'Angstrom', True, r'$[\\mathrm{NII}]\\lambda 6585\\AA$'],\n \"SIIa\": [6718.290, 'Angstrom', True, r'$[\\mathrm{SII}]\\lambda 6718\\AA$'],\n \"SIIb\": [6732.680, 'Angstrom', True, r'$[\\mathrm{SII}]\\lambda 6732\\AA$'],\n \"OI\": [6300.304, 'Angstrom', True, r'$[\\mathrm{OI}]\\lambda 6300\\AA$'],\n \"OII\": [3727.319, 'Angstrom', True, r'$[\\mathrm{OII}]\\lambda 3727\\AA$'],\n \"NeIII\": [3868.760, 'Angstrom', True, r'$[\\mathrm{OII}]\\lambda 3869\\AA$']\n}\n\ndef get_optical_lines():\n for i in range(3, 7):\n name = 'H_%i-2' % i\n wavelength = hydrogen(i, 2)\n lines[name] = [wavelength, 'Angstrom', True, name]\n\n xarr = []\n for key in lines.keys(): \n xarr.append(lines[key][0])\n xarr = np.array(xarr)\n\n indx = np.argsort(xarr)\n xarr = np.sort(xarr)\n\n name = []\n keys = list(lines.keys())\n for i, key in enumerate(keys): \n name.append(keys[indx[i]])\n name = np.array(name)\n\n xunits = []\n xvac = []\n dname = []\n for i, nombre in enumerate(name): \n xunits.append(lines[nombre][1])\n xvac.append(lines[nombre][2])\n dname.append(lines[nombre][3])\n\n xunits = np.array(xunits)\n xvac = np.array(xvac)\n dname = np.array(dname)\n\n optical_lines = {'name': name, 'xarr': xarr, 'xunits': xunits, 'xvac': xvac, 'dname': dname}\n\n return optical_lines\n" ]
[ [ "numpy.array", "numpy.sort", "numpy.argsort" ] ]
bartbroere/lir
[ "041f1cea40366937d56c43bb15712873eb3e8a0a" ]
[ "lir/ece.py" ]
[ "\"\"\"\nEmpirical Cross Entrpy (ECE)\n\nThe discrimination and calibration of the LRs reported by some systems can also\nbe measured separately. The empirical cross entropy (ECE) plot is a graphical\nway of doing this.\n\nThe ECE is the average of -P(Hp) * log2(P(Hp|LRi)) for all LRi when Hp is true,\nand -P(Hd) * log2(P(Hd|LRi)) for all LRi when Hd is true.\n\nSee:\n[-] D. Ramos, Forensic evidence evaluation using automatic speaker recognition\n systems. Ph.D. Thesis. Universidad Autonoma de Madrid.\n[-] Bernard Robertson, G.A. Vignaux and Charles Berger, Interpreting Evidence:\n Evaluating Forensic Science in the Courtroom, 2nd edition, 2016, pp. 96-97.\n\"\"\"\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom .calibration import IsotonicCalibrator\nfrom . import util\n\n\ndef plot(lrs, y, log_prior_odds_range=None, on_screen=False, path=None, kw_figure={}):\n \"\"\"\n Generates an ECE plot for a set of LRs and corresponding ground-truth\n labels.\n\n The x-axis indicates the log prior odds of a sample being drawn from class\n 1; the y-axis shows the entropy for (1) a non-informative system (dotted\n line), (2) the set of LR values (line), and (3) the set of LR values after\n PAV-transformation (Pool Adjacent Violators, dashed line).\n\n :param lrs: an array of LRs\n :param y: an array of ground-truth labels (values 0 for Hd or 1 for Hp);\n must be of the same length as `lrs`\n :param log_prior_odds_range: the range of prior odds (tuple of two values,\n indicating both ends of the range on the x-axis)\n :param on_screen: boolean, show plot on screen interactively\n :param path: path name or None, write plot to file as PNG image (default\n None)\n :param kw_figure: dict of parameters to pass to `plt.figure()`\n \"\"\"\n if log_prior_odds_range is None:\n log_prior_odds_range = (-3, 3)\n\n log_prior_odds = np.arange(*log_prior_odds_range, .01)\n prior_odds = np.power(10, log_prior_odds)\n\n fig = plt.figure(**kw_figure)\n\n # plot reference\n plt.plot(log_prior_odds, calculate_ece(np.ones(len(lrs)), y, util.to_probability(prior_odds)), linestyle=':', label='reference')\n\n # plot LRs\n plt.plot(log_prior_odds, calculate_ece(lrs, y, util.to_probability(prior_odds)), linestyle='-', label='LRs')\n\n # plot PAV LRs\n pav_lrs = IsotonicCalibrator().fit_transform(util.to_probability(lrs), y)\n plt.plot(log_prior_odds, calculate_ece(pav_lrs, y, util.to_probability(prior_odds)), linestyle='--', label='PAV LRs')\n\n plt.xlabel(\"prior log10 odds\")\n plt.ylabel(\"empirical cross-entropy\")\n plt.ylim((0,None))\n plt.xlim(log_prior_odds_range)\n plt.legend()\n plt.grid(True, linestyle=':')\n if on_screen:\n plt.show()\n if path is not None:\n plt.savefig(path)\n\n plt.close(fig)\n\n\ndef calculate_ece(lrs, y, priors):\n \"\"\"\n Calculates the empirical cross-entropy (ECE) of a set of LRs and\n corresponding ground-truth labels.\n\n An entropy is calculated for each element of `priors`.\n\n :param lrs: an array of LRs\n :param y: an array of ground-truth labels of the LRs (values 0 for Hd or 1\n for Hp); must be of the same length as `lrs`.\n :param priors: an array of prior probabilities of the samples being drawn\n from class 1 (values in range [0..1])\n :returns: an array of entropy values of the same length as `priors`\n \"\"\"\n \n prior_odds = np.repeat(util.to_odds(priors), len(lrs)).reshape((len(priors), len(lrs)))\n posterior_odds = prior_odds * lrs\n posterior_p = util.to_probability(posterior_odds)\n\n with np.errstate(invalid='ignore'):\n ece0 = - (1 - priors.reshape((len(priors),1))) * np.log2(1 - posterior_p[:,y == 0])\n ece1 = - priors.reshape((len(priors),1)) * np.log2( posterior_p[:,y == 1])\n\n ece0[np.isnan(ece0)] = np.inf\n ece1[np.isnan(ece1)] = np.inf\n\n avg0 = np.average(ece0, axis=1)\n avg1 = np.average(ece1, axis=1)\n\n return avg0 + avg1\n" ]
[ [ "numpy.log2", "matplotlib.pyplot.legend", "matplotlib.pyplot.figure", "matplotlib.pyplot.grid", "matplotlib.pyplot.savefig", "matplotlib.pyplot.xlim", "numpy.arange", "numpy.errstate", "numpy.power", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.show", "matplotlib.pyplot.ylim", "matplotlib.pyplot.close", "numpy.average", "matplotlib.pyplot.xlabel", "numpy.isnan" ] ]
woffett/mmpose
[ "cf8cbf49759e745896b70ce69d412518568af33b" ]
[ "mmpose/datasets/datasets/top_down/topdown_onehand10k_dataset.py" ]
[ "import copy as cp\nimport os\nimport os.path as osp\nfrom collections import OrderedDict\n\nimport json_tricks as json\nimport numpy as np\n\nfrom mmpose.datasets.builder import DATASETS\nfrom .topdown_base_dataset import TopDownBaseDataset\n\n\[email protected]_module()\nclass TopDownOneHand10KDataset(TopDownBaseDataset):\n \"\"\"OneHand10K dataset for top-down hand pose estimation.\n\n The dataset loads raw features and apply specified transforms\n to return a dict containing the image tensors and other information.\n\n OneHand10K keypoint indexes::\n\n 0: 'wrist',\n 1: 'thumb1',\n 2: 'thumb2',\n 3: 'thumb3',\n 4: 'thumb4',\n 5: 'forefinger1',\n 6: 'forefinger2',\n 7: 'forefinger3',\n 8: 'forefinger4',\n 9: 'middle_finger1',\n 10: 'middle_finger2',\n 11: 'middle_finger3',\n 12: 'middle_finger4',\n 13: 'ring_finger1',\n 14: 'ring_finger2',\n 15: 'ring_finger3',\n 16: 'ring_finger4',\n 17: 'pinky_finger1',\n 18: 'pinky_finger2',\n 19: 'pinky_finger3',\n 20: 'pinky_finger4'\n\n Args:\n ann_file (str): Path to the annotation file.\n img_prefix (str): Path to a directory where images are held.\n Default: None.\n data_cfg (dict): config\n pipeline (list[dict | callable]): A sequence of data transforms.\n test_mode (bool): Store True when building test or\n validation dataset. Default: False.\n \"\"\"\n\n def __init__(self,\n ann_file,\n img_prefix,\n data_cfg,\n pipeline,\n test_mode=False):\n\n super().__init__(\n ann_file, img_prefix, data_cfg, pipeline, test_mode=test_mode)\n\n self.ann_info['flip_pairs'] = []\n\n self.ann_info['use_different_joint_weights'] = False\n assert self.ann_info['num_joints'] == 21\n self.ann_info['joint_weights'] = \\\n np.ones((self.ann_info['num_joints'], 1), dtype=np.float32)\n\n self.db = self._get_db(ann_file)\n self.image_set = set([x['image_file'] for x in self.db])\n self.num_images = len(self.image_set)\n\n print(f'=> num_images: {self.num_images}')\n print(f'=> load {len(self.db)} samples')\n\n def _get_db(self, ann_file):\n \"\"\"Load dataset.\"\"\"\n with open(ann_file, 'r') as f:\n data = json.load(f)\n tmpl = dict(\n image_file=None,\n center=None,\n scale=None,\n rotation=0,\n joints_3d=None,\n joints_3d_visible=None,\n bbox=None,\n dataset='OneHand10K')\n\n imid2info = {x['id']: x for x in data['images']}\n\n num_joints = self.ann_info['num_joints']\n gt_db = []\n\n for anno in data['annotations']:\n newitem = cp.deepcopy(tmpl)\n image_id = anno['image_id']\n newitem['image_file'] = os.path.join(\n self.img_prefix, imid2info[image_id]['file_name'])\n\n if max(anno['keypoints']) == 0:\n continue\n\n joints_3d = np.zeros((num_joints, 3), dtype=np.float)\n joints_3d_visible = np.zeros((num_joints, 3), dtype=np.float)\n\n for ipt in range(num_joints):\n joints_3d[ipt, 0] = anno['keypoints'][ipt * 3 + 0]\n joints_3d[ipt, 1] = anno['keypoints'][ipt * 3 + 1]\n joints_3d[ipt, 2] = 0\n t_vis = min(anno['keypoints'][ipt * 3 + 2], 1)\n joints_3d_visible[ipt, :] = (t_vis, t_vis, 0)\n\n center, scale = self._xywh2cs(*anno['bbox'][:4])\n newitem['center'] = center\n newitem['scale'] = scale\n newitem['joints_3d'] = joints_3d\n newitem['joints_3d_visible'] = joints_3d_visible\n newitem['bbox'] = anno['bbox'][:4]\n gt_db.append(newitem)\n\n return gt_db\n\n def _xywh2cs(self, x, y, w, h):\n \"\"\"This encodes bbox(x,y,w,w) into (center, scale)\n\n Args:\n x, y, w, h\n\n Returns:\n center (np.ndarray[float32](2,)): center of the bbox (x, y).\n scale (np.ndarray[float32](2,)): scale of the bbox w & h.\n \"\"\"\n aspect_ratio = self.ann_info['image_size'][0] / self.ann_info[\n 'image_size'][1]\n center = np.array([x + w * 0.5, y + h * 0.5], dtype=np.float32)\n\n if (not self.test_mode) and np.random.rand() < 0.3:\n center += 0.4 * (np.random.rand(2) - 0.5) * [w, h]\n\n if w > aspect_ratio * h:\n h = w * 1.0 / aspect_ratio\n elif w < aspect_ratio * h:\n w = h * aspect_ratio\n\n # pixel std is 200.0\n scale = np.array([w / 200.0, h / 200.0], dtype=np.float32)\n\n scale = scale * 1.25\n\n return center, scale\n\n def _evaluate_kernel(self, pred, joints_3d, joints_3d_visible, bbox):\n \"\"\"Evaluate one example.\n\n ||pre[i] - joints_3d[i]|| < 0.2 * max(w, h)\n \"\"\"\n num_joints = self.ann_info['num_joints']\n bbox = np.array(bbox)\n threshold = np.max(bbox[2:]) * 0.2\n hit = np.zeros(num_joints, dtype=np.float32)\n exist = np.zeros(num_joints, dtype=np.float32)\n\n for i in range(num_joints):\n pred_pt = pred[i]\n gt_pt = joints_3d[i]\n vis = joints_3d_visible[i][0]\n if vis:\n exist[i] = 1\n else:\n continue\n distance = np.linalg.norm(pred_pt[:2] - gt_pt[:2])\n if distance < threshold:\n hit[i] = 1\n return hit, exist\n\n def evaluate(self, outputs, res_folder, metrics='PCK', **kwargs):\n \"\"\"Evaluate OneHand10K keypoint results.\"\"\"\n res_file = os.path.join(res_folder, 'result_keypoints.json')\n\n kpts = []\n\n for preds, boxes, image_path in outputs:\n str_image_path = ''.join(image_path)\n image_id = int(osp.basename(osp.splitext(str_image_path)[0]))\n\n kpts.append({\n 'keypoints': preds[0].tolist(),\n 'center': boxes[0][0:2].tolist(),\n 'scale': boxes[0][2:4].tolist(),\n 'area': float(boxes[0][4]),\n 'score': float(boxes[0][5]),\n 'image_id': image_id,\n })\n\n self._write_keypoint_results(kpts, res_file)\n info_str = self._report_metric(res_file)\n name_value = OrderedDict(info_str)\n\n return name_value\n\n def _write_keypoint_results(self, keypoints, res_file):\n \"\"\"Write results into a json file.\"\"\"\n\n with open(res_file, 'w') as f:\n json.dump(keypoints, f, sort_keys=True, indent=4)\n\n def _report_metric(self, res_file):\n \"\"\"Keypoint evaluation.\n\n Report Mean Acc of skeleton, contour and all joints.\n \"\"\"\n num_joints = self.ann_info['num_joints']\n hit = np.zeros(num_joints, dtype=np.float32)\n exist = np.zeros(num_joints, dtype=np.float32)\n\n with open(res_file, 'r') as fin:\n preds = json.load(fin)\n\n assert len(preds) == len(self.db)\n for pred, item in zip(preds, self.db):\n h, e = self._evaluate_kernel(pred['keypoints'], item['joints_3d'],\n item['joints_3d_visible'],\n item['bbox'])\n hit += h\n exist += e\n pck = np.sum(hit) / np.sum(exist)\n\n info_str = []\n info_str.append(('PCK', pck.item()))\n return info_str\n" ]
[ [ "numpy.ones", "numpy.sum", "numpy.zeros", "numpy.max", "numpy.random.rand", "numpy.array", "numpy.linalg.norm" ] ]
HayetBD/Text-to-image
[ "7ead7e03bb8ee42f457281bc250cd88161fb5dcd" ]
[ "Unsplash_webscrapping.py" ]
[ "import time\r\nimport pandas as pd\r\nfrom selenium import webdriver\r\n\r\n# Scrapping images and their caption from unsplash website\r\n# saving these images url and captions into a csv file\r\n\r\nWEBSITE = 'http://unsplash.com/s/photos/landscape-forest-mountain'\r\ncolumns = ['description', 'url']\r\nimageset = pd.DataFrame(columns = columns)\r\n\r\n# Define Chrome options to open the window in maximized mode\r\noptions = webdriver.ChromeOptions()\r\noptions.add_argument(\"--start-maximized\")\r\n\r\n# Initialize the Chrome webdriver and open the URL\r\ndriver = webdriver.Chrome(options=options)\r\n\r\n# get web page\r\ndriver.get(WEBSITE)\r\n#Define a pause time in between scrolls\r\npause_time = 5\r\nslow=1000\r\n# Get scroll height\r\nlast_height = driver.execute_script(\"return document.body.scrollHeight\")\r\n\r\n# Create an emply list to hold all the urls for the images\r\nhrefs = []\r\ntitle = []\r\ndict = {}\r\nimage_to_scrap = 10000\r\n# We only want to scrap images of landscapes without people\r\ntagToAvoid = ['man', 'person', 'people', 'road', 'woman']\r\n\r\nwhile (len(dict) < image_to_scrap):\r\n # Scroll par etapes\r\n driver.execute_script(\"window.scrollTo(0, \"+str(slow)+\");\")\r\n slow = slow + 1000\r\n #on recupere la hauteur a scroller\r\n last_height = driver.execute_script(\"return document.body.scrollHeight\")\r\n # wait to load page\r\n time.sleep(pause_time)\r\n # Calculate new scroll height and compare with last scroll height\r\n\r\n if slow >= last_height: # which means end of page\r\n break\r\n # Extract all anchor tags\r\n link_tags = driver.find_elements_by_class_name(\"_2zEKz\");\r\n # Extract the urls and titles of only the images from each of the tag WebElements\r\n for tag in link_tags:\r\n #to avoid duplicate, use of a dictionnary\r\n if((tag.get_attribute('src') not in dict) and tag.get_attribute('alt') and len(tag.get_attribute('alt')) > 10):\r\n if((tag.get_attribute('alt').find('man') == -1)\r\n and (tag.get_attribute('alt').find('men') == -1)\r\n and (tag.get_attribute('alt').find('person') == -1)\r\n and (tag.get_attribute('alt').find('people') == -1)\r\n and (tag.get_attribute('alt').find('road') == -1)) :\r\n dict[tag.get_attribute('src')] = tag.get_attribute('alt')\r\n hrefs.append(tag.get_attribute('src'))\r\n title.append(tag.get_attribute('alt').replace(',',''))\r\n print('height scroll :',last_height, '\\tslow :',slow, '\\tlen dict:',len(dict))\r\n\r\nprint(len(hrefs), len(title))\r\nimageset.loc[:,'description'] = title\r\nimageset.loc[:,'url'] = hrefs\r\nimageset.to_csv(r'Data\\landscapeSet_v4.csv')\r\n# Select all duplicate rows based on multiple column names in list\r\nduplicateRowsDF = imageset[imageset.duplicated(['description', 'url'])]\r\n\r\nprint(\"Duplicate Rows based on 2 columns are:\", duplicateRowsDF, sep='\\n')" ]
[ [ "pandas.DataFrame" ] ]
mtrbean/pandas
[ "c0ff67a22df9c18da1172766e313732ed2ab6c30" ]
[ "pandas/core/dtypes/common.py" ]
[ "\"\"\" common type operations \"\"\"\nfrom typing import Any, Callable, Union\nimport warnings\n\nimport numpy as np\n\nfrom pandas._libs import algos, lib\nfrom pandas._libs.tslibs import conversion\nfrom pandas.compat import PY36\n\nfrom pandas.core.dtypes.dtypes import (\n CategoricalDtype,\n DatetimeTZDtype,\n ExtensionDtype,\n IntervalDtype,\n PeriodDtype,\n registry,\n)\nfrom pandas.core.dtypes.generic import (\n ABCCategorical,\n ABCDateOffset,\n ABCDatetimeIndex,\n ABCIndexClass,\n ABCPeriodArray,\n ABCPeriodIndex,\n ABCSeries,\n)\nfrom pandas.core.dtypes.inference import ( # noqa:F401\n is_array_like,\n is_bool,\n is_complex,\n is_decimal,\n is_dict_like,\n is_file_like,\n is_float,\n is_hashable,\n is_integer,\n is_interval,\n is_iterator,\n is_list_like,\n is_named_tuple,\n is_nested_list_like,\n is_number,\n is_re,\n is_re_compilable,\n is_scalar,\n is_sequence,\n is_string_like,\n)\n\nfrom pandas._typing import ArrayLike\n\n_POSSIBLY_CAST_DTYPES = {\n np.dtype(t).name\n for t in [\n \"O\",\n \"int8\",\n \"uint8\",\n \"int16\",\n \"uint16\",\n \"int32\",\n \"uint32\",\n \"int64\",\n \"uint64\",\n ]\n}\n\n_NS_DTYPE = conversion.NS_DTYPE\n_TD_DTYPE = conversion.TD_DTYPE\n_INT64_DTYPE = np.dtype(np.int64)\n\n# oh the troubles to reduce import time\n_is_scipy_sparse = None\n\nensure_float64 = algos.ensure_float64\nensure_float32 = algos.ensure_float32\n\n_ensure_datetime64ns = conversion.ensure_datetime64ns\n_ensure_timedelta64ns = conversion.ensure_timedelta64ns\n\n\ndef ensure_float(arr):\n \"\"\"\n Ensure that an array object has a float dtype if possible.\n\n Parameters\n ----------\n arr : array-like\n The array whose data type we want to enforce as float.\n\n Returns\n -------\n float_arr : The original array cast to the float dtype if\n possible. Otherwise, the original array is returned.\n \"\"\"\n\n if issubclass(arr.dtype.type, (np.integer, np.bool_)):\n arr = arr.astype(float)\n return arr\n\n\nensure_uint64 = algos.ensure_uint64\nensure_int64 = algos.ensure_int64\nensure_int32 = algos.ensure_int32\nensure_int16 = algos.ensure_int16\nensure_int8 = algos.ensure_int8\nensure_platform_int = algos.ensure_platform_int\nensure_object = algos.ensure_object\n\n\ndef ensure_str(value: Union[bytes, Any]) -> str:\n \"\"\"\n Ensure that bytes and non-strings get converted into ``str`` objects.\n \"\"\"\n if isinstance(value, bytes):\n value = value.decode(\"utf-8\")\n elif not isinstance(value, str):\n value = str(value)\n return value\n\n\ndef ensure_categorical(arr):\n \"\"\"\n Ensure that an array-like object is a Categorical (if not already).\n\n Parameters\n ----------\n arr : array-like\n The array that we want to convert into a Categorical.\n\n Returns\n -------\n cat_arr : The original array cast as a Categorical. If it already\n is a Categorical, we return as is.\n \"\"\"\n\n if not is_categorical(arr):\n from pandas import Categorical\n\n arr = Categorical(arr)\n return arr\n\n\ndef ensure_int_or_float(arr: ArrayLike, copy: bool = False) -> np.array:\n \"\"\"\n Ensure that an dtype array of some integer dtype\n has an int64 dtype if possible.\n If it's not possible, potentially because of overflow,\n convert the array to float64 instead.\n\n Parameters\n ----------\n arr : array-like\n The array whose data type we want to enforce.\n copy: boolean\n Whether to copy the original array or reuse\n it in place, if possible.\n\n Returns\n -------\n out_arr : The input array cast as int64 if\n possible without overflow.\n Otherwise the input array cast to float64.\n\n Notes\n -----\n If the array is explicitly of type uint64 the type\n will remain unchanged.\n \"\"\"\n # TODO: GH27506 potential bug with ExtensionArrays\n try:\n return arr.astype(\"int64\", copy=copy, casting=\"safe\") # type: ignore\n except TypeError:\n pass\n try:\n return arr.astype(\"uint64\", copy=copy, casting=\"safe\") # type: ignore\n except TypeError:\n return arr.astype(\"float64\", copy=copy)\n\n\ndef ensure_python_int(value: Union[int, np.integer]) -> int:\n \"\"\"\n Ensure that a value is a python int.\n\n Parameters\n ----------\n value: int or numpy.integer\n\n Returns\n -------\n int\n\n Raises\n ------\n TypeError: if the value isn't an int or can't be converted to one.\n \"\"\"\n if not is_scalar(value):\n raise TypeError(\n \"Value needs to be a scalar value, was type {}\".format(type(value))\n )\n msg = \"Wrong type {} for value {}\"\n try:\n new_value = int(value)\n assert new_value == value\n except (TypeError, ValueError, AssertionError):\n raise TypeError(msg.format(type(value), value))\n return new_value\n\n\ndef classes(*klasses) -> Callable:\n \"\"\" evaluate if the tipo is a subclass of the klasses \"\"\"\n return lambda tipo: issubclass(tipo, klasses)\n\n\ndef classes_and_not_datetimelike(*klasses) -> Callable:\n \"\"\"\n evaluate if the tipo is a subclass of the klasses\n and not a datetimelike\n \"\"\"\n return lambda tipo: (\n issubclass(tipo, klasses)\n and not issubclass(tipo, (np.datetime64, np.timedelta64))\n )\n\n\ndef is_object_dtype(arr_or_dtype):\n \"\"\"\n Check whether an array-like or dtype is of the object dtype.\n\n Parameters\n ----------\n arr_or_dtype : array-like\n The array-like or dtype to check.\n\n Returns\n -------\n boolean\n Whether or not the array-like or dtype is of the object dtype.\n\n Examples\n --------\n >>> is_object_dtype(object)\n True\n >>> is_object_dtype(int)\n False\n >>> is_object_dtype(np.array([], dtype=object))\n True\n >>> is_object_dtype(np.array([], dtype=int))\n False\n >>> is_object_dtype([1, 2, 3])\n False\n \"\"\"\n return _is_dtype_type(arr_or_dtype, classes(np.object_))\n\n\ndef is_sparse(arr):\n \"\"\"\n Check whether an array-like is a 1-D pandas sparse array.\n\n Check that the one-dimensional array-like is a pandas sparse array.\n Returns True if it is a pandas sparse array, not another type of\n sparse array.\n\n Parameters\n ----------\n arr : array-like\n Array-like to check.\n\n Returns\n -------\n bool\n Whether or not the array-like is a pandas sparse array.\n\n See Also\n --------\n DataFrame.to_sparse : Convert DataFrame to a SparseDataFrame.\n Series.to_sparse : Convert Series to SparseSeries.\n Series.to_dense : Return dense representation of a Series.\n\n Examples\n --------\n Returns `True` if the parameter is a 1-D pandas sparse array.\n\n >>> is_sparse(pd.SparseArray([0, 0, 1, 0]))\n True\n >>> is_sparse(pd.SparseSeries([0, 0, 1, 0]))\n True\n\n Returns `False` if the parameter is not sparse.\n\n >>> is_sparse(np.array([0, 0, 1, 0]))\n False\n >>> is_sparse(pd.Series([0, 1, 0, 0]))\n False\n\n Returns `False` if the parameter is not a pandas sparse array.\n\n >>> from scipy.sparse import bsr_matrix\n >>> is_sparse(bsr_matrix([0, 1, 0, 0]))\n False\n\n Returns `False` if the parameter has more than one dimension.\n\n >>> df = pd.SparseDataFrame([389., 24., 80.5, np.nan],\n columns=['max_speed'],\n index=['falcon', 'parrot', 'lion', 'monkey'])\n >>> is_sparse(df)\n False\n >>> is_sparse(df.max_speed)\n True\n \"\"\"\n from pandas.core.arrays.sparse import SparseDtype\n\n dtype = getattr(arr, \"dtype\", arr)\n return isinstance(dtype, SparseDtype)\n\n\ndef is_scipy_sparse(arr):\n \"\"\"\n Check whether an array-like is a scipy.sparse.spmatrix instance.\n\n Parameters\n ----------\n arr : array-like\n The array-like to check.\n\n Returns\n -------\n boolean\n Whether or not the array-like is a scipy.sparse.spmatrix instance.\n\n Notes\n -----\n If scipy is not installed, this function will always return False.\n\n Examples\n --------\n >>> from scipy.sparse import bsr_matrix\n >>> is_scipy_sparse(bsr_matrix([1, 2, 3]))\n True\n >>> is_scipy_sparse(pd.SparseArray([1, 2, 3]))\n False\n >>> is_scipy_sparse(pd.SparseSeries([1, 2, 3]))\n False\n \"\"\"\n\n global _is_scipy_sparse\n\n if _is_scipy_sparse is None:\n try:\n from scipy.sparse import issparse as _is_scipy_sparse\n except ImportError:\n _is_scipy_sparse = lambda _: False\n\n return _is_scipy_sparse(arr)\n\n\ndef is_categorical(arr) -> bool:\n \"\"\"\n Check whether an array-like is a Categorical instance.\n\n Parameters\n ----------\n arr : array-like\n The array-like to check.\n\n Returns\n -------\n boolean\n Whether or not the array-like is of a Categorical instance.\n\n Examples\n --------\n >>> is_categorical([1, 2, 3])\n False\n\n Categoricals, Series Categoricals, and CategoricalIndex will return True.\n\n >>> cat = pd.Categorical([1, 2, 3])\n >>> is_categorical(cat)\n True\n >>> is_categorical(pd.Series(cat))\n True\n >>> is_categorical(pd.CategoricalIndex([1, 2, 3]))\n True\n \"\"\"\n\n return isinstance(arr, ABCCategorical) or is_categorical_dtype(arr)\n\n\ndef is_datetimetz(arr):\n \"\"\"\n Check whether an array-like is a datetime array-like with a timezone\n component in its dtype.\n\n .. deprecated:: 0.24.0\n\n Parameters\n ----------\n arr : array-like\n The array-like to check.\n\n Returns\n -------\n boolean\n Whether or not the array-like is a datetime array-like with a\n timezone component in its dtype.\n\n Examples\n --------\n >>> is_datetimetz([1, 2, 3])\n False\n\n Although the following examples are both DatetimeIndex objects,\n the first one returns False because it has no timezone component\n unlike the second one, which returns True.\n\n >>> is_datetimetz(pd.DatetimeIndex([1, 2, 3]))\n False\n >>> is_datetimetz(pd.DatetimeIndex([1, 2, 3], tz=\"US/Eastern\"))\n True\n\n The object need not be a DatetimeIndex object. It just needs to have\n a dtype which has a timezone component.\n\n >>> dtype = DatetimeTZDtype(\"ns\", tz=\"US/Eastern\")\n >>> s = pd.Series([], dtype=dtype)\n >>> is_datetimetz(s)\n True\n \"\"\"\n\n warnings.warn(\n \"'is_datetimetz' is deprecated and will be removed in a \"\n \"future version. Use 'is_datetime64tz_dtype' instead.\",\n FutureWarning,\n stacklevel=2,\n )\n return is_datetime64tz_dtype(arr)\n\n\ndef is_offsetlike(arr_or_obj):\n \"\"\"\n Check if obj or all elements of list-like is DateOffset\n\n Parameters\n ----------\n arr_or_obj : object\n\n Returns\n -------\n boolean\n Whether the object is a DateOffset or listlike of DatetOffsets\n\n Examples\n --------\n >>> is_offsetlike(pd.DateOffset(days=1))\n True\n >>> is_offsetlike('offset')\n False\n >>> is_offsetlike([pd.offsets.Minute(4), pd.offsets.MonthEnd()])\n True\n >>> is_offsetlike(np.array([pd.DateOffset(months=3), pd.Timestamp.now()]))\n False\n \"\"\"\n if isinstance(arr_or_obj, ABCDateOffset):\n return True\n elif is_list_like(arr_or_obj) and len(arr_or_obj) and is_object_dtype(arr_or_obj):\n return all(isinstance(x, ABCDateOffset) for x in arr_or_obj)\n return False\n\n\ndef is_period(arr):\n \"\"\"\n Check whether an array-like is a periodical index.\n\n .. deprecated:: 0.24.0\n\n Parameters\n ----------\n arr : array-like\n The array-like to check.\n\n Returns\n -------\n boolean\n Whether or not the array-like is a periodical index.\n\n Examples\n --------\n >>> is_period([1, 2, 3])\n False\n >>> is_period(pd.Index([1, 2, 3]))\n False\n >>> is_period(pd.PeriodIndex([\"2017-01-01\"], freq=\"D\"))\n True\n \"\"\"\n\n warnings.warn(\n \"'is_period' is deprecated and will be removed in a future \"\n \"version. Use 'is_period_dtype' or is_period_arraylike' \"\n \"instead.\",\n FutureWarning,\n stacklevel=2,\n )\n\n return isinstance(arr, ABCPeriodIndex) or is_period_arraylike(arr)\n\n\ndef is_datetime64_dtype(arr_or_dtype):\n \"\"\"\n Check whether an array-like or dtype is of the datetime64 dtype.\n\n Parameters\n ----------\n arr_or_dtype : array-like\n The array-like or dtype to check.\n\n Returns\n -------\n boolean\n Whether or not the array-like or dtype is of the datetime64 dtype.\n\n Examples\n --------\n >>> is_datetime64_dtype(object)\n False\n >>> is_datetime64_dtype(np.datetime64)\n True\n >>> is_datetime64_dtype(np.array([], dtype=int))\n False\n >>> is_datetime64_dtype(np.array([], dtype=np.datetime64))\n True\n >>> is_datetime64_dtype([1, 2, 3])\n False\n \"\"\"\n\n return _is_dtype_type(arr_or_dtype, classes(np.datetime64))\n\n\ndef is_datetime64tz_dtype(arr_or_dtype):\n \"\"\"\n Check whether an array-like or dtype is of a DatetimeTZDtype dtype.\n\n Parameters\n ----------\n arr_or_dtype : array-like\n The array-like or dtype to check.\n\n Returns\n -------\n boolean\n Whether or not the array-like or dtype is of a DatetimeTZDtype dtype.\n\n Examples\n --------\n >>> is_datetime64tz_dtype(object)\n False\n >>> is_datetime64tz_dtype([1, 2, 3])\n False\n >>> is_datetime64tz_dtype(pd.DatetimeIndex([1, 2, 3])) # tz-naive\n False\n >>> is_datetime64tz_dtype(pd.DatetimeIndex([1, 2, 3], tz=\"US/Eastern\"))\n True\n\n >>> dtype = DatetimeTZDtype(\"ns\", tz=\"US/Eastern\")\n >>> s = pd.Series([], dtype=dtype)\n >>> is_datetime64tz_dtype(dtype)\n True\n >>> is_datetime64tz_dtype(s)\n True\n \"\"\"\n\n if arr_or_dtype is None:\n return False\n return DatetimeTZDtype.is_dtype(arr_or_dtype)\n\n\ndef is_timedelta64_dtype(arr_or_dtype):\n \"\"\"\n Check whether an array-like or dtype is of the timedelta64 dtype.\n\n Parameters\n ----------\n arr_or_dtype : array-like\n The array-like or dtype to check.\n\n Returns\n -------\n boolean\n Whether or not the array-like or dtype is of the timedelta64 dtype.\n\n Examples\n --------\n >>> is_timedelta64_dtype(object)\n False\n >>> is_timedelta64_dtype(np.timedelta64)\n True\n >>> is_timedelta64_dtype([1, 2, 3])\n False\n >>> is_timedelta64_dtype(pd.Series([], dtype=\"timedelta64[ns]\"))\n True\n >>> is_timedelta64_dtype('0 days')\n False\n \"\"\"\n\n return _is_dtype_type(arr_or_dtype, classes(np.timedelta64))\n\n\ndef is_period_dtype(arr_or_dtype):\n \"\"\"\n Check whether an array-like or dtype is of the Period dtype.\n\n Parameters\n ----------\n arr_or_dtype : array-like\n The array-like or dtype to check.\n\n Returns\n -------\n boolean\n Whether or not the array-like or dtype is of the Period dtype.\n\n Examples\n --------\n >>> is_period_dtype(object)\n False\n >>> is_period_dtype(PeriodDtype(freq=\"D\"))\n True\n >>> is_period_dtype([1, 2, 3])\n False\n >>> is_period_dtype(pd.Period(\"2017-01-01\"))\n False\n >>> is_period_dtype(pd.PeriodIndex([], freq=\"A\"))\n True\n \"\"\"\n\n # TODO: Consider making Period an instance of PeriodDtype\n if arr_or_dtype is None:\n return False\n return PeriodDtype.is_dtype(arr_or_dtype)\n\n\ndef is_interval_dtype(arr_or_dtype):\n \"\"\"\n Check whether an array-like or dtype is of the Interval dtype.\n\n Parameters\n ----------\n arr_or_dtype : array-like\n The array-like or dtype to check.\n\n Returns\n -------\n boolean\n Whether or not the array-like or dtype is of the Interval dtype.\n\n Examples\n --------\n >>> is_interval_dtype(object)\n False\n >>> is_interval_dtype(IntervalDtype())\n True\n >>> is_interval_dtype([1, 2, 3])\n False\n >>>\n >>> interval = pd.Interval(1, 2, closed=\"right\")\n >>> is_interval_dtype(interval)\n False\n >>> is_interval_dtype(pd.IntervalIndex([interval]))\n True\n \"\"\"\n\n # TODO: Consider making Interval an instance of IntervalDtype\n if arr_or_dtype is None:\n return False\n return IntervalDtype.is_dtype(arr_or_dtype)\n\n\ndef is_categorical_dtype(arr_or_dtype) -> bool:\n \"\"\"\n Check whether an array-like or dtype is of the Categorical dtype.\n\n Parameters\n ----------\n arr_or_dtype : array-like\n The array-like or dtype to check.\n\n Returns\n -------\n boolean\n Whether or not the array-like or dtype is of the Categorical dtype.\n\n Examples\n --------\n >>> is_categorical_dtype(object)\n False\n >>> is_categorical_dtype(CategoricalDtype())\n True\n >>> is_categorical_dtype([1, 2, 3])\n False\n >>> is_categorical_dtype(pd.Categorical([1, 2, 3]))\n True\n >>> is_categorical_dtype(pd.CategoricalIndex([1, 2, 3]))\n True\n \"\"\"\n\n if arr_or_dtype is None:\n return False\n return CategoricalDtype.is_dtype(arr_or_dtype)\n\n\ndef is_string_dtype(arr_or_dtype):\n \"\"\"\n Check whether the provided array or dtype is of the string dtype.\n\n Parameters\n ----------\n arr_or_dtype : array-like\n The array or dtype to check.\n\n Returns\n -------\n boolean\n Whether or not the array or dtype is of the string dtype.\n\n Examples\n --------\n >>> is_string_dtype(str)\n True\n >>> is_string_dtype(object)\n True\n >>> is_string_dtype(int)\n False\n >>>\n >>> is_string_dtype(np.array(['a', 'b']))\n True\n >>> is_string_dtype(pd.Series([1, 2]))\n False\n \"\"\"\n\n # TODO: gh-15585: consider making the checks stricter.\n def condition(dtype):\n return dtype.kind in (\"O\", \"S\", \"U\") and not is_period_dtype(dtype)\n\n return _is_dtype(arr_or_dtype, condition)\n\n\ndef is_period_arraylike(arr):\n \"\"\"\n Check whether an array-like is a periodical array-like or PeriodIndex.\n\n Parameters\n ----------\n arr : array-like\n The array-like to check.\n\n Returns\n -------\n boolean\n Whether or not the array-like is a periodical array-like or\n PeriodIndex instance.\n\n Examples\n --------\n >>> is_period_arraylike([1, 2, 3])\n False\n >>> is_period_arraylike(pd.Index([1, 2, 3]))\n False\n >>> is_period_arraylike(pd.PeriodIndex([\"2017-01-01\"], freq=\"D\"))\n True\n \"\"\"\n\n if isinstance(arr, (ABCPeriodIndex, ABCPeriodArray)):\n return True\n elif isinstance(arr, (np.ndarray, ABCSeries)):\n return is_period_dtype(arr.dtype)\n return getattr(arr, \"inferred_type\", None) == \"period\"\n\n\ndef is_datetime_arraylike(arr):\n \"\"\"\n Check whether an array-like is a datetime array-like or DatetimeIndex.\n\n Parameters\n ----------\n arr : array-like\n The array-like to check.\n\n Returns\n -------\n boolean\n Whether or not the array-like is a datetime array-like or\n DatetimeIndex.\n\n Examples\n --------\n >>> is_datetime_arraylike([1, 2, 3])\n False\n >>> is_datetime_arraylike(pd.Index([1, 2, 3]))\n False\n >>> is_datetime_arraylike(pd.DatetimeIndex([1, 2, 3]))\n True\n \"\"\"\n\n if isinstance(arr, ABCDatetimeIndex):\n return True\n elif isinstance(arr, (np.ndarray, ABCSeries)):\n return (\n is_object_dtype(arr.dtype)\n and lib.infer_dtype(arr, skipna=False) == \"datetime\"\n )\n return getattr(arr, \"inferred_type\", None) == \"datetime\"\n\n\ndef is_datetimelike(arr):\n \"\"\"\n Check whether an array-like is a datetime-like array-like.\n\n Acceptable datetime-like objects are (but not limited to) datetime\n indices, periodic indices, and timedelta indices.\n\n Parameters\n ----------\n arr : array-like\n The array-like to check.\n\n Returns\n -------\n boolean\n Whether or not the array-like is a datetime-like array-like.\n\n Examples\n --------\n >>> is_datetimelike([1, 2, 3])\n False\n >>> is_datetimelike(pd.Index([1, 2, 3]))\n False\n >>> is_datetimelike(pd.DatetimeIndex([1, 2, 3]))\n True\n >>> is_datetimelike(pd.DatetimeIndex([1, 2, 3], tz=\"US/Eastern\"))\n True\n >>> is_datetimelike(pd.PeriodIndex([], freq=\"A\"))\n True\n >>> is_datetimelike(np.array([], dtype=np.datetime64))\n True\n >>> is_datetimelike(pd.Series([], dtype=\"timedelta64[ns]\"))\n True\n >>>\n >>> dtype = DatetimeTZDtype(\"ns\", tz=\"US/Eastern\")\n >>> s = pd.Series([], dtype=dtype)\n >>> is_datetimelike(s)\n True\n \"\"\"\n\n return (\n is_datetime64_dtype(arr)\n or is_datetime64tz_dtype(arr)\n or is_timedelta64_dtype(arr)\n or isinstance(arr, ABCPeriodIndex)\n )\n\n\ndef is_dtype_equal(source, target):\n \"\"\"\n Check if two dtypes are equal.\n\n Parameters\n ----------\n source : The first dtype to compare\n target : The second dtype to compare\n\n Returns\n -------\n boolean\n Whether or not the two dtypes are equal.\n\n Examples\n --------\n >>> is_dtype_equal(int, float)\n False\n >>> is_dtype_equal(\"int\", int)\n True\n >>> is_dtype_equal(object, \"category\")\n False\n >>> is_dtype_equal(CategoricalDtype(), \"category\")\n True\n >>> is_dtype_equal(DatetimeTZDtype(), \"datetime64\")\n False\n \"\"\"\n\n try:\n source = _get_dtype(source)\n target = _get_dtype(target)\n return source == target\n except (TypeError, AttributeError):\n\n # invalid comparison\n # object == category will hit this\n return False\n\n\ndef is_any_int_dtype(arr_or_dtype) -> bool:\n \"\"\"Check whether the provided array or dtype is of an integer dtype.\n\n In this function, timedelta64 instances are also considered \"any-integer\"\n type objects and will return True.\n\n This function is internal and should not be exposed in the public API.\n\n .. versionchanged:: 0.24.0\n\n The nullable Integer dtypes (e.g. pandas.Int64Dtype) are also considered\n as integer by this function.\n\n Parameters\n ----------\n arr_or_dtype : array-like\n The array or dtype to check.\n\n Returns\n -------\n boolean\n Whether or not the array or dtype is of an integer dtype.\n\n Examples\n --------\n >>> is_any_int_dtype(str)\n False\n >>> is_any_int_dtype(int)\n True\n >>> is_any_int_dtype(float)\n False\n >>> is_any_int_dtype(np.uint64)\n True\n >>> is_any_int_dtype(np.datetime64)\n False\n >>> is_any_int_dtype(np.timedelta64)\n True\n >>> is_any_int_dtype(np.array(['a', 'b']))\n False\n >>> is_any_int_dtype(pd.Series([1, 2]))\n True\n >>> is_any_int_dtype(np.array([], dtype=np.timedelta64))\n True\n >>> is_any_int_dtype(pd.Index([1, 2.])) # float\n False\n \"\"\"\n\n return _is_dtype_type(arr_or_dtype, classes(np.integer, np.timedelta64))\n\n\ndef is_integer_dtype(arr_or_dtype):\n \"\"\"\n Check whether the provided array or dtype is of an integer dtype.\n\n Unlike in `in_any_int_dtype`, timedelta64 instances will return False.\n\n .. versionchanged:: 0.24.0\n\n The nullable Integer dtypes (e.g. pandas.Int64Dtype) are also considered\n as integer by this function.\n\n Parameters\n ----------\n arr_or_dtype : array-like\n The array or dtype to check.\n\n Returns\n -------\n boolean\n Whether or not the array or dtype is of an integer dtype and\n not an instance of timedelta64.\n\n Examples\n --------\n >>> is_integer_dtype(str)\n False\n >>> is_integer_dtype(int)\n True\n >>> is_integer_dtype(float)\n False\n >>> is_integer_dtype(np.uint64)\n True\n >>> is_integer_dtype('int8')\n True\n >>> is_integer_dtype('Int8')\n True\n >>> is_integer_dtype(pd.Int8Dtype)\n True\n >>> is_integer_dtype(np.datetime64)\n False\n >>> is_integer_dtype(np.timedelta64)\n False\n >>> is_integer_dtype(np.array(['a', 'b']))\n False\n >>> is_integer_dtype(pd.Series([1, 2]))\n True\n >>> is_integer_dtype(np.array([], dtype=np.timedelta64))\n False\n >>> is_integer_dtype(pd.Index([1, 2.])) # float\n False\n \"\"\"\n\n return _is_dtype_type(arr_or_dtype, classes_and_not_datetimelike(np.integer))\n\n\ndef is_signed_integer_dtype(arr_or_dtype):\n \"\"\"\n Check whether the provided array or dtype is of a signed integer dtype.\n\n Unlike in `in_any_int_dtype`, timedelta64 instances will return False.\n\n .. versionchanged:: 0.24.0\n\n The nullable Integer dtypes (e.g. pandas.Int64Dtype) are also considered\n as integer by this function.\n\n Parameters\n ----------\n arr_or_dtype : array-like\n The array or dtype to check.\n\n Returns\n -------\n boolean\n Whether or not the array or dtype is of a signed integer dtype\n and not an instance of timedelta64.\n\n Examples\n --------\n >>> is_signed_integer_dtype(str)\n False\n >>> is_signed_integer_dtype(int)\n True\n >>> is_signed_integer_dtype(float)\n False\n >>> is_signed_integer_dtype(np.uint64) # unsigned\n False\n >>> is_signed_integer_dtype('int8')\n True\n >>> is_signed_integer_dtype('Int8')\n True\n >>> is_signed_dtype(pd.Int8Dtype)\n True\n >>> is_signed_integer_dtype(np.datetime64)\n False\n >>> is_signed_integer_dtype(np.timedelta64)\n False\n >>> is_signed_integer_dtype(np.array(['a', 'b']))\n False\n >>> is_signed_integer_dtype(pd.Series([1, 2]))\n True\n >>> is_signed_integer_dtype(np.array([], dtype=np.timedelta64))\n False\n >>> is_signed_integer_dtype(pd.Index([1, 2.])) # float\n False\n >>> is_signed_integer_dtype(np.array([1, 2], dtype=np.uint32)) # unsigned\n False\n \"\"\"\n\n return _is_dtype_type(arr_or_dtype, classes_and_not_datetimelike(np.signedinteger))\n\n\ndef is_unsigned_integer_dtype(arr_or_dtype):\n \"\"\"\n Check whether the provided array or dtype is of an unsigned integer dtype.\n\n .. versionchanged:: 0.24.0\n\n The nullable Integer dtypes (e.g. pandas.UInt64Dtype) are also\n considered as integer by this function.\n\n Parameters\n ----------\n arr_or_dtype : array-like\n The array or dtype to check.\n\n Returns\n -------\n boolean\n Whether or not the array or dtype is of an unsigned integer dtype.\n\n Examples\n --------\n >>> is_unsigned_integer_dtype(str)\n False\n >>> is_unsigned_integer_dtype(int) # signed\n False\n >>> is_unsigned_integer_dtype(float)\n False\n >>> is_unsigned_integer_dtype(np.uint64)\n True\n >>> is_unsigned_integer_dtype('uint8')\n True\n >>> is_unsigned_integer_dtype('UInt8')\n True\n >>> is_unsigned_integer_dtype(pd.UInt8Dtype)\n True\n >>> is_unsigned_integer_dtype(np.array(['a', 'b']))\n False\n >>> is_unsigned_integer_dtype(pd.Series([1, 2])) # signed\n False\n >>> is_unsigned_integer_dtype(pd.Index([1, 2.])) # float\n False\n >>> is_unsigned_integer_dtype(np.array([1, 2], dtype=np.uint32))\n True\n \"\"\"\n return _is_dtype_type(\n arr_or_dtype, classes_and_not_datetimelike(np.unsignedinteger)\n )\n\n\ndef is_int64_dtype(arr_or_dtype):\n \"\"\"\n Check whether the provided array or dtype is of the int64 dtype.\n\n Parameters\n ----------\n arr_or_dtype : array-like\n The array or dtype to check.\n\n Returns\n -------\n boolean\n Whether or not the array or dtype is of the int64 dtype.\n\n Notes\n -----\n Depending on system architecture, the return value of `is_int64_dtype(\n int)` will be True if the OS uses 64-bit integers and False if the OS\n uses 32-bit integers.\n\n Examples\n --------\n >>> is_int64_dtype(str)\n False\n >>> is_int64_dtype(np.int32)\n False\n >>> is_int64_dtype(np.int64)\n True\n >>> is_int64_dtype('int8')\n False\n >>> is_int64_dtype('Int8')\n False\n >>> is_int64_dtype(pd.Int64Dtype)\n True\n >>> is_int64_dtype(float)\n False\n >>> is_int64_dtype(np.uint64) # unsigned\n False\n >>> is_int64_dtype(np.array(['a', 'b']))\n False\n >>> is_int64_dtype(np.array([1, 2], dtype=np.int64))\n True\n >>> is_int64_dtype(pd.Index([1, 2.])) # float\n False\n >>> is_int64_dtype(np.array([1, 2], dtype=np.uint32)) # unsigned\n False\n \"\"\"\n\n return _is_dtype_type(arr_or_dtype, classes(np.int64))\n\n\ndef is_datetime64_any_dtype(arr_or_dtype) -> bool:\n \"\"\"\n Check whether the provided array or dtype is of the datetime64 dtype.\n\n Parameters\n ----------\n arr_or_dtype : array-like\n The array or dtype to check.\n\n Returns\n -------\n boolean\n Whether or not the array or dtype is of the datetime64 dtype.\n\n Examples\n --------\n >>> is_datetime64_any_dtype(str)\n False\n >>> is_datetime64_any_dtype(int)\n False\n >>> is_datetime64_any_dtype(np.datetime64) # can be tz-naive\n True\n >>> is_datetime64_any_dtype(DatetimeTZDtype(\"ns\", \"US/Eastern\"))\n True\n >>> is_datetime64_any_dtype(np.array(['a', 'b']))\n False\n >>> is_datetime64_any_dtype(np.array([1, 2]))\n False\n >>> is_datetime64_any_dtype(np.array([], dtype=np.datetime64))\n True\n >>> is_datetime64_any_dtype(pd.DatetimeIndex([1, 2, 3],\n dtype=np.datetime64))\n True\n \"\"\"\n\n if arr_or_dtype is None:\n return False\n return is_datetime64_dtype(arr_or_dtype) or is_datetime64tz_dtype(arr_or_dtype)\n\n\ndef is_datetime64_ns_dtype(arr_or_dtype):\n \"\"\"\n Check whether the provided array or dtype is of the datetime64[ns] dtype.\n\n Parameters\n ----------\n arr_or_dtype : array-like\n The array or dtype to check.\n\n Returns\n -------\n boolean\n Whether or not the array or dtype is of the datetime64[ns] dtype.\n\n Examples\n --------\n >>> is_datetime64_ns_dtype(str)\n False\n >>> is_datetime64_ns_dtype(int)\n False\n >>> is_datetime64_ns_dtype(np.datetime64) # no unit\n False\n >>> is_datetime64_ns_dtype(DatetimeTZDtype(\"ns\", \"US/Eastern\"))\n True\n >>> is_datetime64_ns_dtype(np.array(['a', 'b']))\n False\n >>> is_datetime64_ns_dtype(np.array([1, 2]))\n False\n >>> is_datetime64_ns_dtype(np.array([], dtype=np.datetime64)) # no unit\n False\n >>> is_datetime64_ns_dtype(np.array([],\n dtype=\"datetime64[ps]\")) # wrong unit\n False\n >>> is_datetime64_ns_dtype(pd.DatetimeIndex([1, 2, 3],\n dtype=np.datetime64)) # has 'ns' unit\n True\n \"\"\"\n\n if arr_or_dtype is None:\n return False\n try:\n tipo = _get_dtype(arr_or_dtype)\n except TypeError:\n if is_datetime64tz_dtype(arr_or_dtype):\n tipo = _get_dtype(arr_or_dtype.dtype)\n else:\n return False\n return tipo == _NS_DTYPE or getattr(tipo, \"base\", None) == _NS_DTYPE\n\n\ndef is_timedelta64_ns_dtype(arr_or_dtype):\n \"\"\"\n Check whether the provided array or dtype is of the timedelta64[ns] dtype.\n\n This is a very specific dtype, so generic ones like `np.timedelta64`\n will return False if passed into this function.\n\n Parameters\n ----------\n arr_or_dtype : array-like\n The array or dtype to check.\n\n Returns\n -------\n boolean\n Whether or not the array or dtype is of the timedelta64[ns] dtype.\n\n Examples\n --------\n >>> is_timedelta64_ns_dtype(np.dtype('m8[ns]'))\n True\n >>> is_timedelta64_ns_dtype(np.dtype('m8[ps]')) # Wrong frequency\n False\n >>> is_timedelta64_ns_dtype(np.array([1, 2], dtype='m8[ns]'))\n True\n >>> is_timedelta64_ns_dtype(np.array([1, 2], dtype=np.timedelta64))\n False\n \"\"\"\n return _is_dtype(arr_or_dtype, lambda dtype: dtype == _TD_DTYPE)\n\n\ndef is_datetime_or_timedelta_dtype(arr_or_dtype):\n \"\"\"\n Check whether the provided array or dtype is of\n a timedelta64 or datetime64 dtype.\n\n Parameters\n ----------\n arr_or_dtype : array-like\n The array or dtype to check.\n\n Returns\n -------\n boolean\n Whether or not the array or dtype is of a timedelta64,\n or datetime64 dtype.\n\n Examples\n --------\n >>> is_datetime_or_timedelta_dtype(str)\n False\n >>> is_datetime_or_timedelta_dtype(int)\n False\n >>> is_datetime_or_timedelta_dtype(np.datetime64)\n True\n >>> is_datetime_or_timedelta_dtype(np.timedelta64)\n True\n >>> is_datetime_or_timedelta_dtype(np.array(['a', 'b']))\n False\n >>> is_datetime_or_timedelta_dtype(pd.Series([1, 2]))\n False\n >>> is_datetime_or_timedelta_dtype(np.array([], dtype=np.timedelta64))\n True\n >>> is_datetime_or_timedelta_dtype(np.array([], dtype=np.datetime64))\n True\n \"\"\"\n\n return _is_dtype_type(arr_or_dtype, classes(np.datetime64, np.timedelta64))\n\n\ndef _is_unorderable_exception(e: TypeError) -> bool:\n \"\"\"\n Check if the exception raised is an unorderable exception.\n\n The error message differs for 3 <= PY <= 3.5 and PY >= 3.6, so\n we need to condition based on Python version.\n\n Parameters\n ----------\n e : Exception or sub-class\n The exception object to check.\n\n Returns\n -------\n boolean\n Whether or not the exception raised is an unorderable exception.\n \"\"\"\n\n if PY36:\n return \"'>' not supported between instances of\" in str(e)\n\n return \"unorderable\" in str(e)\n\n\ndef is_numeric_v_string_like(a, b):\n \"\"\"\n Check if we are comparing a string-like object to a numeric ndarray.\n\n NumPy doesn't like to compare such objects, especially numeric arrays\n and scalar string-likes.\n\n Parameters\n ----------\n a : array-like, scalar\n The first object to check.\n b : array-like, scalar\n The second object to check.\n\n Returns\n -------\n boolean\n Whether we return a comparing a string-like object to a numeric array.\n\n Examples\n --------\n >>> is_numeric_v_string_like(1, 1)\n False\n >>> is_numeric_v_string_like(\"foo\", \"foo\")\n False\n >>> is_numeric_v_string_like(1, \"foo\") # non-array numeric\n False\n >>> is_numeric_v_string_like(np.array([1]), \"foo\")\n True\n >>> is_numeric_v_string_like(\"foo\", np.array([1])) # symmetric check\n True\n >>> is_numeric_v_string_like(np.array([1, 2]), np.array([\"foo\"]))\n True\n >>> is_numeric_v_string_like(np.array([\"foo\"]), np.array([1, 2]))\n True\n >>> is_numeric_v_string_like(np.array([1]), np.array([2]))\n False\n >>> is_numeric_v_string_like(np.array([\"foo\"]), np.array([\"foo\"]))\n False\n \"\"\"\n\n is_a_array = isinstance(a, np.ndarray)\n is_b_array = isinstance(b, np.ndarray)\n\n is_a_numeric_array = is_a_array and is_numeric_dtype(a)\n is_b_numeric_array = is_b_array and is_numeric_dtype(b)\n is_a_string_array = is_a_array and is_string_like_dtype(a)\n is_b_string_array = is_b_array and is_string_like_dtype(b)\n\n is_a_scalar_string_like = not is_a_array and is_string_like(a)\n is_b_scalar_string_like = not is_b_array and is_string_like(b)\n\n return (\n (is_a_numeric_array and is_b_scalar_string_like)\n or (is_b_numeric_array and is_a_scalar_string_like)\n or (is_a_numeric_array and is_b_string_array)\n or (is_b_numeric_array and is_a_string_array)\n )\n\n\ndef is_datetimelike_v_numeric(a, b):\n \"\"\"\n Check if we are comparing a datetime-like object to a numeric object.\n\n By \"numeric,\" we mean an object that is either of an int or float dtype.\n\n Parameters\n ----------\n a : array-like, scalar\n The first object to check.\n b : array-like, scalar\n The second object to check.\n\n Returns\n -------\n boolean\n Whether we return a comparing a datetime-like to a numeric object.\n\n Examples\n --------\n >>> dt = np.datetime64(pd.datetime(2017, 1, 1))\n >>>\n >>> is_datetimelike_v_numeric(1, 1)\n False\n >>> is_datetimelike_v_numeric(dt, dt)\n False\n >>> is_datetimelike_v_numeric(1, dt)\n True\n >>> is_datetimelike_v_numeric(dt, 1) # symmetric check\n True\n >>> is_datetimelike_v_numeric(np.array([dt]), 1)\n True\n >>> is_datetimelike_v_numeric(np.array([1]), dt)\n True\n >>> is_datetimelike_v_numeric(np.array([dt]), np.array([1]))\n True\n >>> is_datetimelike_v_numeric(np.array([1]), np.array([2]))\n False\n >>> is_datetimelike_v_numeric(np.array([dt]), np.array([dt]))\n False\n \"\"\"\n\n if not hasattr(a, \"dtype\"):\n a = np.asarray(a)\n if not hasattr(b, \"dtype\"):\n b = np.asarray(b)\n\n def is_numeric(x):\n \"\"\"\n Check if an object has a numeric dtype (i.e. integer or float).\n \"\"\"\n return is_integer_dtype(x) or is_float_dtype(x)\n\n is_datetimelike = needs_i8_conversion\n return (is_datetimelike(a) and is_numeric(b)) or (\n is_datetimelike(b) and is_numeric(a)\n )\n\n\ndef needs_i8_conversion(arr_or_dtype):\n \"\"\"\n Check whether the array or dtype should be converted to int64.\n\n An array-like or dtype \"needs\" such a conversion if the array-like\n or dtype is of a datetime-like dtype\n\n Parameters\n ----------\n arr_or_dtype : array-like\n The array or dtype to check.\n\n Returns\n -------\n boolean\n Whether or not the array or dtype should be converted to int64.\n\n Examples\n --------\n >>> needs_i8_conversion(str)\n False\n >>> needs_i8_conversion(np.int64)\n False\n >>> needs_i8_conversion(np.datetime64)\n True\n >>> needs_i8_conversion(np.array(['a', 'b']))\n False\n >>> needs_i8_conversion(pd.Series([1, 2]))\n False\n >>> needs_i8_conversion(pd.Series([], dtype=\"timedelta64[ns]\"))\n True\n >>> needs_i8_conversion(pd.DatetimeIndex([1, 2, 3], tz=\"US/Eastern\"))\n True\n \"\"\"\n\n if arr_or_dtype is None:\n return False\n return (\n is_datetime_or_timedelta_dtype(arr_or_dtype)\n or is_datetime64tz_dtype(arr_or_dtype)\n or is_period_dtype(arr_or_dtype)\n )\n\n\ndef is_numeric_dtype(arr_or_dtype):\n \"\"\"\n Check whether the provided array or dtype is of a numeric dtype.\n\n Parameters\n ----------\n arr_or_dtype : array-like\n The array or dtype to check.\n\n Returns\n -------\n boolean\n Whether or not the array or dtype is of a numeric dtype.\n\n Examples\n --------\n >>> is_numeric_dtype(str)\n False\n >>> is_numeric_dtype(int)\n True\n >>> is_numeric_dtype(float)\n True\n >>> is_numeric_dtype(np.uint64)\n True\n >>> is_numeric_dtype(np.datetime64)\n False\n >>> is_numeric_dtype(np.timedelta64)\n False\n >>> is_numeric_dtype(np.array(['a', 'b']))\n False\n >>> is_numeric_dtype(pd.Series([1, 2]))\n True\n >>> is_numeric_dtype(pd.Index([1, 2.]))\n True\n >>> is_numeric_dtype(np.array([], dtype=np.timedelta64))\n False\n \"\"\"\n\n return _is_dtype_type(\n arr_or_dtype, classes_and_not_datetimelike(np.number, np.bool_)\n )\n\n\ndef is_string_like_dtype(arr_or_dtype):\n \"\"\"\n Check whether the provided array or dtype is of a string-like dtype.\n\n Unlike `is_string_dtype`, the object dtype is excluded because it\n is a mixed dtype.\n\n Parameters\n ----------\n arr_or_dtype : array-like\n The array or dtype to check.\n\n Returns\n -------\n boolean\n Whether or not the array or dtype is of the string dtype.\n\n Examples\n --------\n >>> is_string_like_dtype(str)\n True\n >>> is_string_like_dtype(object)\n False\n >>> is_string_like_dtype(np.array(['a', 'b']))\n True\n >>> is_string_like_dtype(pd.Series([1, 2]))\n False\n \"\"\"\n\n return _is_dtype(arr_or_dtype, lambda dtype: dtype.kind in (\"S\", \"U\"))\n\n\ndef is_float_dtype(arr_or_dtype):\n \"\"\"\n Check whether the provided array or dtype is of a float dtype.\n\n This function is internal and should not be exposed in the public API.\n\n Parameters\n ----------\n arr_or_dtype : array-like\n The array or dtype to check.\n\n Returns\n -------\n boolean\n Whether or not the array or dtype is of a float dtype.\n\n Examples\n --------\n >>> is_float_dtype(str)\n False\n >>> is_float_dtype(int)\n False\n >>> is_float_dtype(float)\n True\n >>> is_float_dtype(np.array(['a', 'b']))\n False\n >>> is_float_dtype(pd.Series([1, 2]))\n False\n >>> is_float_dtype(pd.Index([1, 2.]))\n True\n \"\"\"\n return _is_dtype_type(arr_or_dtype, classes(np.floating))\n\n\ndef is_bool_dtype(arr_or_dtype) -> bool:\n \"\"\"\n Check whether the provided array or dtype is of a boolean dtype.\n\n Parameters\n ----------\n arr_or_dtype : array-like\n The array or dtype to check.\n\n Returns\n -------\n boolean\n Whether or not the array or dtype is of a boolean dtype.\n\n Notes\n -----\n An ExtensionArray is considered boolean when the ``_is_boolean``\n attribute is set to True.\n\n Examples\n --------\n >>> is_bool_dtype(str)\n False\n >>> is_bool_dtype(int)\n False\n >>> is_bool_dtype(bool)\n True\n >>> is_bool_dtype(np.bool)\n True\n >>> is_bool_dtype(np.array(['a', 'b']))\n False\n >>> is_bool_dtype(pd.Series([1, 2]))\n False\n >>> is_bool_dtype(np.array([True, False]))\n True\n >>> is_bool_dtype(pd.Categorical([True, False]))\n True\n >>> is_bool_dtype(pd.SparseArray([True, False]))\n True\n \"\"\"\n if arr_or_dtype is None:\n return False\n try:\n dtype = _get_dtype(arr_or_dtype)\n except TypeError:\n return False\n\n if isinstance(arr_or_dtype, CategoricalDtype):\n arr_or_dtype = arr_or_dtype.categories\n # now we use the special definition for Index\n\n if isinstance(arr_or_dtype, ABCIndexClass):\n\n # TODO(jreback)\n # we don't have a boolean Index class\n # so its object, we need to infer to\n # guess this\n return arr_or_dtype.is_object and arr_or_dtype.inferred_type == \"boolean\"\n elif is_extension_array_dtype(arr_or_dtype):\n dtype = getattr(arr_or_dtype, \"dtype\", arr_or_dtype)\n return dtype._is_boolean\n\n return issubclass(dtype.type, np.bool_)\n\n\ndef is_extension_type(arr):\n \"\"\"\n Check whether an array-like is of a pandas extension class instance.\n\n Extension classes include categoricals, pandas sparse objects (i.e.\n classes represented within the pandas library and not ones external\n to it like scipy sparse matrices), and datetime-like arrays.\n\n Parameters\n ----------\n arr : array-like\n The array-like to check.\n\n Returns\n -------\n boolean\n Whether or not the array-like is of a pandas extension class instance.\n\n Examples\n --------\n >>> is_extension_type([1, 2, 3])\n False\n >>> is_extension_type(np.array([1, 2, 3]))\n False\n >>>\n >>> cat = pd.Categorical([1, 2, 3])\n >>>\n >>> is_extension_type(cat)\n True\n >>> is_extension_type(pd.Series(cat))\n True\n >>> is_extension_type(pd.SparseArray([1, 2, 3]))\n True\n >>> is_extension_type(pd.SparseSeries([1, 2, 3]))\n True\n >>>\n >>> from scipy.sparse import bsr_matrix\n >>> is_extension_type(bsr_matrix([1, 2, 3]))\n False\n >>> is_extension_type(pd.DatetimeIndex([1, 2, 3]))\n False\n >>> is_extension_type(pd.DatetimeIndex([1, 2, 3], tz=\"US/Eastern\"))\n True\n >>>\n >>> dtype = DatetimeTZDtype(\"ns\", tz=\"US/Eastern\")\n >>> s = pd.Series([], dtype=dtype)\n >>> is_extension_type(s)\n True\n \"\"\"\n\n if is_categorical(arr):\n return True\n elif is_sparse(arr):\n return True\n elif is_datetime64tz_dtype(arr):\n return True\n return False\n\n\ndef is_extension_array_dtype(arr_or_dtype):\n \"\"\"\n Check if an object is a pandas extension array type.\n\n See the :ref:`Use Guide <extending.extension-types>` for more.\n\n Parameters\n ----------\n arr_or_dtype : object\n For array-like input, the ``.dtype`` attribute will\n be extracted.\n\n Returns\n -------\n bool\n Whether the `arr_or_dtype` is an extension array type.\n\n Notes\n -----\n This checks whether an object implements the pandas extension\n array interface. In pandas, this includes:\n\n * Categorical\n * Sparse\n * Interval\n * Period\n * DatetimeArray\n * TimedeltaArray\n\n Third-party libraries may implement arrays or types satisfying\n this interface as well.\n\n Examples\n --------\n >>> from pandas.api.types import is_extension_array_dtype\n >>> arr = pd.Categorical(['a', 'b'])\n >>> is_extension_array_dtype(arr)\n True\n >>> is_extension_array_dtype(arr.dtype)\n True\n\n >>> arr = np.array(['a', 'b'])\n >>> is_extension_array_dtype(arr.dtype)\n False\n \"\"\"\n dtype = getattr(arr_or_dtype, \"dtype\", arr_or_dtype)\n return isinstance(dtype, ExtensionDtype) or registry.find(dtype) is not None\n\n\ndef is_complex_dtype(arr_or_dtype) -> bool:\n \"\"\"\n Check whether the provided array or dtype is of a complex dtype.\n\n Parameters\n ----------\n arr_or_dtype : array-like\n The array or dtype to check.\n\n Returns\n -------\n boolean\n Whether or not the array or dtype is of a complex dtype.\n\n Examples\n --------\n >>> is_complex_dtype(str)\n False\n >>> is_complex_dtype(int)\n False\n >>> is_complex_dtype(np.complex)\n True\n >>> is_complex_dtype(np.array(['a', 'b']))\n False\n >>> is_complex_dtype(pd.Series([1, 2]))\n False\n >>> is_complex_dtype(np.array([1 + 1j, 5]))\n True\n \"\"\"\n\n return _is_dtype_type(arr_or_dtype, classes(np.complexfloating))\n\n\ndef _is_dtype(arr_or_dtype, condition) -> bool:\n \"\"\"\n Return a boolean if the condition is satisfied for the arr_or_dtype.\n\n Parameters\n ----------\n arr_or_dtype : array-like, str, np.dtype, or ExtensionArrayType\n The array-like or dtype object whose dtype we want to extract.\n condition : callable[Union[np.dtype, ExtensionDtype]]\n\n Returns\n -------\n bool\n\n \"\"\"\n\n if arr_or_dtype is None:\n return False\n try:\n dtype = _get_dtype(arr_or_dtype)\n except (TypeError, ValueError, UnicodeEncodeError):\n return False\n return condition(dtype)\n\n\ndef _get_dtype(arr_or_dtype):\n \"\"\"\n Get the dtype instance associated with an array\n or dtype object.\n\n Parameters\n ----------\n arr_or_dtype : array-like\n The array-like or dtype object whose dtype we want to extract.\n\n Returns\n -------\n obj_dtype : The extract dtype instance from the\n passed in array or dtype object.\n\n Raises\n ------\n TypeError : The passed in object is None.\n \"\"\"\n\n if arr_or_dtype is None:\n raise TypeError(\"Cannot deduce dtype from null object\")\n\n # fastpath\n elif isinstance(arr_or_dtype, np.dtype):\n return arr_or_dtype\n elif isinstance(arr_or_dtype, type):\n return np.dtype(arr_or_dtype)\n\n # if we have an array-like\n elif hasattr(arr_or_dtype, \"dtype\"):\n arr_or_dtype = arr_or_dtype.dtype\n\n return pandas_dtype(arr_or_dtype)\n\n\ndef _is_dtype_type(arr_or_dtype, condition) -> bool:\n \"\"\"\n Return a boolean if the condition is satisfied for the arr_or_dtype.\n\n Parameters\n ----------\n arr_or_dtype : array-like\n The array-like or dtype object whose dtype we want to extract.\n condition : callable[Union[np.dtype, ExtensionDtypeType]]\n\n Returns\n -------\n bool : if the condition is satisfied for the arr_or_dtype\n \"\"\"\n\n if arr_or_dtype is None:\n return condition(type(None))\n\n # fastpath\n if isinstance(arr_or_dtype, np.dtype):\n return condition(arr_or_dtype.type)\n elif isinstance(arr_or_dtype, type):\n if issubclass(arr_or_dtype, ExtensionDtype):\n arr_or_dtype = arr_or_dtype.type\n return condition(np.dtype(arr_or_dtype).type)\n\n # if we have an array-like\n if hasattr(arr_or_dtype, \"dtype\"):\n arr_or_dtype = arr_or_dtype.dtype\n\n # we are not possibly a dtype\n elif is_list_like(arr_or_dtype):\n return condition(type(None))\n\n try:\n tipo = pandas_dtype(arr_or_dtype).type\n except (TypeError, ValueError, UnicodeEncodeError):\n if is_scalar(arr_or_dtype):\n return condition(type(None))\n\n return False\n\n return condition(tipo)\n\n\ndef infer_dtype_from_object(dtype):\n \"\"\"\n Get a numpy dtype.type-style object for a dtype object.\n\n This methods also includes handling of the datetime64[ns] and\n datetime64[ns, TZ] objects.\n\n If no dtype can be found, we return ``object``.\n\n Parameters\n ----------\n dtype : dtype, type\n The dtype object whose numpy dtype.type-style\n object we want to extract.\n\n Returns\n -------\n dtype_object : The extracted numpy dtype.type-style object.\n \"\"\"\n\n if isinstance(dtype, type) and issubclass(dtype, np.generic):\n # Type object from a dtype\n return dtype\n elif isinstance(dtype, (np.dtype, ExtensionDtype)):\n # dtype object\n try:\n _validate_date_like_dtype(dtype)\n except TypeError:\n # Should still pass if we don't have a date-like\n pass\n return dtype.type\n\n try:\n dtype = pandas_dtype(dtype)\n except TypeError:\n pass\n\n if is_extension_array_dtype(dtype):\n return dtype.type\n elif isinstance(dtype, str):\n\n # TODO(jreback)\n # should deprecate these\n if dtype in [\"datetimetz\", \"datetime64tz\"]:\n return DatetimeTZDtype.type\n elif dtype in [\"period\"]:\n raise NotImplementedError\n\n if dtype == \"datetime\" or dtype == \"timedelta\":\n dtype += \"64\"\n try:\n return infer_dtype_from_object(getattr(np, dtype))\n except (AttributeError, TypeError):\n # Handles cases like _get_dtype(int) i.e.,\n # Python objects that are valid dtypes\n # (unlike user-defined types, in general)\n #\n # TypeError handles the float16 type code of 'e'\n # further handle internal types\n pass\n\n return infer_dtype_from_object(np.dtype(dtype))\n\n\ndef _validate_date_like_dtype(dtype) -> None:\n \"\"\"\n Check whether the dtype is a date-like dtype. Raises an error if invalid.\n\n Parameters\n ----------\n dtype : dtype, type\n The dtype to check.\n\n Raises\n ------\n TypeError : The dtype could not be casted to a date-like dtype.\n ValueError : The dtype is an illegal date-like dtype (e.g. the\n the frequency provided is too specific)\n \"\"\"\n\n try:\n typ = np.datetime_data(dtype)[0]\n except ValueError as e:\n raise TypeError(\"{error}\".format(error=e))\n if typ != \"generic\" and typ != \"ns\":\n msg = \"{name!r} is too specific of a frequency, try passing {type!r}\"\n raise ValueError(msg.format(name=dtype.name, type=dtype.type.__name__))\n\n\ndef pandas_dtype(dtype):\n \"\"\"\n Convert input into a pandas only dtype object or a numpy dtype object.\n\n Parameters\n ----------\n dtype : object to be converted\n\n Returns\n -------\n np.dtype or a pandas dtype\n\n Raises\n ------\n TypeError if not a dtype\n \"\"\"\n # short-circuit\n if isinstance(dtype, np.ndarray):\n return dtype.dtype\n elif isinstance(dtype, (np.dtype, ExtensionDtype)):\n return dtype\n\n # registered extension types\n result = registry.find(dtype)\n if result is not None:\n return result\n\n # try a numpy dtype\n # raise a consistent TypeError if failed\n try:\n npdtype = np.dtype(dtype)\n except Exception:\n # we don't want to force a repr of the non-string\n if not isinstance(dtype, str):\n raise TypeError(\"data type not understood\")\n raise TypeError(\"data type '{}' not understood\".format(dtype))\n\n # Any invalid dtype (such as pd.Timestamp) should raise an error.\n # np.dtype(invalid_type).kind = 0 for such objects. However, this will\n # also catch some valid dtypes such as object, np.object_ and 'object'\n # which we safeguard against by catching them earlier and returning\n # np.dtype(valid_dtype) before this condition is evaluated.\n if is_hashable(dtype) and dtype in [object, np.object_, \"object\", \"O\"]:\n # check hashability to avoid errors/DeprecationWarning when we get\n # here and `dtype` is an array\n return npdtype\n elif npdtype.kind == \"O\":\n raise TypeError(\"dtype '{}' not understood\".format(dtype))\n\n return npdtype\n" ]
[ [ "pandas._libs.lib.infer_dtype", "pandas.core.dtypes.dtypes.PeriodDtype.is_dtype", "scipy.sparse.issparse", "numpy.dtype", "pandas.core.dtypes.inference.is_scalar", "pandas.core.dtypes.inference.is_string_like", "pandas.core.dtypes.dtypes.DatetimeTZDtype.is_dtype", "numpy.asarray", "pandas.Categorical", "pandas.core.dtypes.inference.is_list_like", "pandas.core.dtypes.inference.is_hashable", "numpy.datetime_data", "pandas.core.dtypes.dtypes.IntervalDtype.is_dtype", "pandas.core.dtypes.dtypes.registry.find", "pandas.core.dtypes.dtypes.CategoricalDtype.is_dtype" ] ]
vijayperiasamy-eb/dd-trace-py
[ "2b0d396fc7f76582e8ffedff48933245a77ebaf2" ]
[ "tests/test_span.py" ]
[ "import mock\nimport time\n\nfrom unittest.case import SkipTest\n\nfrom ddtrace.context import Context\nfrom ddtrace.constants import ANALYTICS_SAMPLE_RATE_KEY\nfrom ddtrace.span import Span\nfrom ddtrace.ext import errors, priority\nfrom .base import BaseTracerTestCase\n\n\nclass SpanTestCase(BaseTracerTestCase):\n def test_ids(self):\n s = Span(tracer=None, name='span.test')\n assert s.trace_id\n assert s.span_id\n assert not s.parent_id\n\n s2 = Span(tracer=None, name='t', trace_id=1, span_id=2, parent_id=1)\n assert s2.trace_id == 1\n assert s2.span_id == 2\n assert s2.parent_id == 1\n\n def test_tags(self):\n s = Span(tracer=None, name='test.span')\n s.set_tag('a', 'a')\n s.set_tag('b', 1)\n s.set_tag('c', '1')\n d = s.to_dict()\n expected = {\n 'a': 'a',\n 'b': '1',\n 'c': '1',\n }\n assert d['meta'] == expected\n\n def test_set_valid_metrics(self):\n s = Span(tracer=None, name='test.span')\n s.set_metric('a', 0)\n s.set_metric('b', -12)\n s.set_metric('c', 12.134)\n s.set_metric('d', 1231543543265475686787869123)\n s.set_metric('e', '12.34')\n d = s.to_dict()\n expected = {\n 'a': 0,\n 'b': -12,\n 'c': 12.134,\n 'd': 1231543543265475686787869123,\n 'e': 12.34,\n }\n assert d['metrics'] == expected\n\n def test_set_invalid_metric(self):\n s = Span(tracer=None, name='test.span')\n\n invalid_metrics = [\n None,\n {},\n [],\n s,\n 'quarante-douze',\n float('nan'),\n float('inf'),\n 1j\n ]\n\n for i, m in enumerate(invalid_metrics):\n k = str(i)\n s.set_metric(k, m)\n assert s.get_metric(k) is None\n\n def test_set_numpy_metric(self):\n try:\n import numpy as np\n except ImportError:\n raise SkipTest('numpy not installed')\n s = Span(tracer=None, name='test.span')\n s.set_metric('a', np.int64(1))\n assert s.get_metric('a') == 1\n assert type(s.get_metric('a')) == float\n\n def test_tags_not_string(self):\n # ensure we can cast as strings\n class Foo(object):\n def __repr__(self):\n 1 / 0\n\n s = Span(tracer=None, name='test.span')\n s.set_tag('a', Foo())\n\n def test_finish(self):\n # ensure finish will record a span\n ctx = Context()\n s = Span(self.tracer, 'test.span', context=ctx)\n ctx.add_span(s)\n assert s.duration is None\n\n sleep = 0.05\n with s as s1:\n assert s is s1\n time.sleep(sleep)\n assert s.duration >= sleep, '%s < %s' % (s.duration, sleep)\n self.assert_span_count(1)\n\n def test_finish_no_tracer(self):\n # ensure finish works with no tracer without raising exceptions\n s = Span(tracer=None, name='test.span')\n s.finish()\n\n def test_finish_called_multiple_times(self):\n # we should only record a span the first time finish is called on it\n ctx = Context()\n s = Span(self.tracer, 'bar', context=ctx)\n ctx.add_span(s)\n s.finish()\n s.finish()\n self.assert_span_count(1)\n\n def test_finish_set_span_duration(self):\n # If set the duration on a span, the span should be recorded with this\n # duration\n s = Span(tracer=None, name='test.span')\n s.duration = 1337.0\n s.finish()\n assert s.duration == 1337.0\n\n def test_traceback_with_error(self):\n s = Span(None, 'test.span')\n try:\n 1 / 0\n except ZeroDivisionError:\n s.set_traceback()\n else:\n assert 0, 'should have failed'\n\n assert s.error\n assert 'by zero' in s.get_tag(errors.ERROR_MSG)\n assert 'ZeroDivisionError' in s.get_tag(errors.ERROR_TYPE)\n\n def test_traceback_without_error(self):\n s = Span(None, 'test.span')\n s.set_traceback()\n assert not s.error\n assert not s.get_tag(errors.ERROR_MSG)\n assert not s.get_tag(errors.ERROR_TYPE)\n assert 'in test_traceback_without_error' in s.get_tag(errors.ERROR_STACK)\n\n def test_ctx_mgr(self):\n s = Span(self.tracer, 'bar')\n assert not s.duration\n assert not s.error\n\n e = Exception('boo')\n try:\n with s:\n time.sleep(0.01)\n raise e\n except Exception as out:\n assert out == e\n assert s.duration > 0, s.duration\n assert s.error\n assert s.get_tag(errors.ERROR_MSG) == 'boo'\n assert 'Exception' in s.get_tag(errors.ERROR_TYPE)\n assert s.get_tag(errors.ERROR_STACK)\n\n else:\n assert 0, 'should have failed'\n\n def test_span_to_dict(self):\n s = Span(tracer=None, name='test.span', service='s', resource='r')\n s.span_type = 'foo'\n s.set_tag('a', '1')\n s.set_meta('b', '2')\n s.finish()\n\n d = s.to_dict()\n assert d\n assert d['span_id'] == s.span_id\n assert d['trace_id'] == s.trace_id\n assert d['parent_id'] == s.parent_id\n assert d['meta'] == {'a': '1', 'b': '2'}\n assert d['type'] == 'foo'\n assert d['error'] == 0\n assert type(d['error']) == int\n\n def test_span_to_dict_sub(self):\n parent = Span(tracer=None, name='test.span', service='s', resource='r')\n s = Span(tracer=None, name='test.span', service='s', resource='r')\n s._parent = parent\n s.span_type = 'foo'\n s.set_tag('a', '1')\n s.set_meta('b', '2')\n s.finish()\n\n d = s.to_dict()\n assert d\n assert d['span_id'] == s.span_id\n assert d['trace_id'] == s.trace_id\n assert d['parent_id'] == s.parent_id\n assert d['meta'] == {'a': '1', 'b': '2'}\n assert d['type'] == 'foo'\n assert d['error'] == 0\n assert type(d['error']) == int\n\n def test_span_boolean_err(self):\n s = Span(tracer=None, name='foo.bar', service='s', resource='r')\n s.error = True\n s.finish()\n\n d = s.to_dict()\n assert d\n assert d['error'] == 1\n assert type(d['error']) == int\n\n @mock.patch('ddtrace.span.log')\n def test_numeric_tags_none(self, span_log):\n s = Span(tracer=None, name='test.span')\n s.set_tag(ANALYTICS_SAMPLE_RATE_KEY, None)\n d = s.to_dict()\n assert d\n assert 'metrics' not in d\n\n # Ensure we log a debug message\n span_log.debug.assert_called_once_with(\n 'ignoring not number metric %s:%s',\n ANALYTICS_SAMPLE_RATE_KEY,\n None,\n )\n\n def test_numeric_tags_true(self):\n s = Span(tracer=None, name='test.span')\n s.set_tag(ANALYTICS_SAMPLE_RATE_KEY, True)\n d = s.to_dict()\n assert d\n expected = {\n ANALYTICS_SAMPLE_RATE_KEY: 1.0\n }\n assert d['metrics'] == expected\n\n def test_numeric_tags_value(self):\n s = Span(tracer=None, name='test.span')\n s.set_tag(ANALYTICS_SAMPLE_RATE_KEY, 0.5)\n d = s.to_dict()\n assert d\n expected = {\n ANALYTICS_SAMPLE_RATE_KEY: 0.5\n }\n assert d['metrics'] == expected\n\n def test_numeric_tags_bad_value(self):\n s = Span(tracer=None, name='test.span')\n s.set_tag(ANALYTICS_SAMPLE_RATE_KEY, 'Hello')\n d = s.to_dict()\n assert d\n assert 'metrics' not in d\n\n def test_set_tag_manual_keep(self):\n ctx = Context()\n s = Span(tracer=None, name='root.span', service='s', resource='r', context=ctx)\n\n assert s.context == ctx\n assert ctx.sampling_priority != priority.USER_KEEP\n assert s.context.sampling_priority != priority.USER_KEEP\n assert s.meta == dict()\n\n s.set_tag('manual.keep')\n assert ctx.sampling_priority == priority.USER_KEEP\n assert s.context.sampling_priority == priority.USER_KEEP\n assert s.meta == dict()\n\n ctx.sampling_priority = priority.AUTO_REJECT\n assert ctx.sampling_priority == priority.AUTO_REJECT\n assert s.context.sampling_priority == priority.AUTO_REJECT\n assert s.meta == dict()\n\n s.set_tag('manual.keep')\n assert ctx.sampling_priority == priority.USER_KEEP\n assert s.context.sampling_priority == priority.USER_KEEP\n assert s.meta == dict()\n\n def test_set_tag_manual_drop(self):\n ctx = Context()\n s = Span(tracer=None, name='root.span', service='s', resource='r', context=ctx)\n\n assert s.context == ctx\n assert ctx.sampling_priority != priority.USER_REJECT\n assert s.context.sampling_priority != priority.USER_REJECT\n assert s.meta == dict()\n\n s.set_tag('manual.drop')\n assert ctx.sampling_priority == priority.USER_REJECT\n assert s.context.sampling_priority == priority.USER_REJECT\n assert s.meta == dict()\n\n ctx.sampling_priority = priority.AUTO_REJECT\n assert ctx.sampling_priority == priority.AUTO_REJECT\n assert s.context.sampling_priority == priority.AUTO_REJECT\n assert s.meta == dict()\n\n s.set_tag('manual.drop')\n assert ctx.sampling_priority == priority.USER_REJECT\n assert s.context.sampling_priority == priority.USER_REJECT\n assert s.meta == dict()\n\n def test_set_tag_none(self):\n s = Span(tracer=None, name='root.span', service='s', resource='r')\n assert s.meta == dict()\n\n s.set_tag('custom.key', 100)\n\n assert s.meta == {'custom.key': '100'}\n\n s.set_tag('custom.key', None)\n\n assert s.meta == {'custom.key': 'None'}\n\n def test_duration_zero(self):\n s = Span(tracer=None, name='foo.bar', service='s', resource='r', start=123)\n s.finish(finish_time=123)\n assert s.duration_ns == 0\n assert s.duration == 0\n\n def test_start_int(self):\n s = Span(tracer=None, name='foo.bar', service='s', resource='r', start=123)\n assert s.start == 123\n assert s.start_ns == 123000000000\n\n s = Span(tracer=None, name='foo.bar', service='s', resource='r', start=123.123)\n assert s.start == 123.123\n assert s.start_ns == 123123000000\n\n s = Span(tracer=None, name='foo.bar', service='s', resource='r', start=123.123)\n s.start = 234567890.0\n assert s.start == 234567890\n assert s.start_ns == 234567890000000000\n\n def test_duration_int(self):\n s = Span(tracer=None, name='foo.bar', service='s', resource='r')\n s.finish()\n assert isinstance(s.duration_ns, int)\n assert isinstance(s.duration, float)\n\n s = Span(tracer=None, name='foo.bar', service='s', resource='r', start=123)\n s.finish(finish_time=123.2)\n assert s.duration_ns == 200000000\n assert s.duration == 0.2\n\n s = Span(tracer=None, name='foo.bar', service='s', resource='r', start=123.1)\n s.finish(finish_time=123.2)\n assert s.duration_ns == 100000000\n assert s.duration == 0.1\n\n s = Span(tracer=None, name='foo.bar', service='s', resource='r', start=122)\n s.finish(finish_time=123)\n assert s.duration_ns == 1000000000\n assert s.duration == 1\n" ]
[ [ "numpy.int64" ] ]
nifarn/PyMarlin
[ "ea1f5f927aa85112ecebc206d53b5c3ee65704fa" ]
[ "pymarlin/plugins/hf_ner/module_classes.py" ]
[ "import os\nimport dataclasses\nimport numpy as np\n\nimport torch\nfrom torch.utils.data import DataLoader\nfrom torch.optim import Adam\nfrom torch.optim.lr_scheduler import OneCycleLR\n\nfrom pymarlin.core import module_interface, data_interface\nfrom transformers import AutoModelForTokenClassification\n\nfrom pymarlin.utils.stats import global_stats\nfrom pymarlin.utils.logger.logging_utils import getlogger\nfrom .sequence_labelling_metrics import get_ner_seq_metric\nfrom pymarlin.utils.distributed import rank_zero_only\nfrom pymarlin.plugins.hf_ner.data_classes import NERDataInterface\nfrom pymarlin.plugins import PluginModuleInterface\n\nlogger = getlogger(__name__, \"DEBUG\")\n\n\[email protected]\nclass ModelArguments:\n model_name: \"bert\"\n encoder_key: \"bert\"\n hf_model: \"bert-base-uncased\"\n model_file: \"pytorch_model.bin\"\n model_config_file: \"config.json\"\n model_path: None\n model_config_path: None\n tokenizer_path: None\n\[email protected]\nclass ModuleInterfaceArguments:\n output_dir: None\n max_lr: 0.00004 # Maximum learning rate.\n warmup_prop: 0.1 # % of steps\n has_labels: True\n max_seq_len: 128\n pad_label_id: -100\n label_all_tokens: False\n model_args: ModelArguments = ModelArguments\n\nclass NERModule(PluginModuleInterface):\n \"\"\"NER Task specific ModuleInterface used with a trainer.\n The `data` and `model` are required properties and must be set.\n\n Args:\n ModuleInterfaceArguments : contains module interface arguments , i.e. max learning rate,\n warmup propotion, type of trainer , etc. Also includes modelArguments class as attribute\n which include model specific arguments such as hfmodel name , modep path , model file name , etc\n \"\"\"\n\n def __init__(self, args, data: NERDataInterface):\n super().__init__()\n self.args = args\n self.metric_func = get_ner_seq_metric\n self.data = data\n self.auto_setup(AutoModelForTokenClassification)\n \n \n def get_train_dataloader(self, sampler: torch.utils.data.Sampler, batch_size: int):\n train_ds = self.data.get_train_dataset()\n logger.info(f\"Training samples = {len(train_ds)}\")\n dl = DataLoader(\n train_ds,\n batch_size=batch_size,\n collate_fn=self.collate_func,\n sampler=sampler(train_ds),\n )\n return dl\n\n def get_val_dataloaders(self, sampler: torch.utils.data.Sampler, batch_size: int):\n val_ds = self.data.get_val_dataset()\n logger.info(f\"Validation samples = {len(val_ds)}\")\n dl = DataLoader(\n val_ds,\n batch_size=batch_size,\n collate_fn=self.collate_func,\n sampler=sampler(val_ds),\n )\n return dl\n\n def collate_func(self,batch):\n sentence, labels = zip(*batch)\n sentence, labels = list(sentence), list(labels)\n\n tokenized_inputs = self.tokenizer(\n sentence,\n padding=\"max_length\",\n return_token_type_ids=True,\n return_tensors=\"pt\",\n truncation=True,\n is_split_into_words=True,\n max_length=self.args.max_seq_len,\n )\n\n label_ids = []\n for i in range(len(sentence)): # for each sentence in input \n if self.args.has_labels:\n current_label_ids = []\n current_label = labels[i]\n word_ids = tokenized_inputs.word_ids(i)\n prev_word_idx = None # To track subwords\n for word_idx in word_ids:\n if word_idx is None: # special tokens have None\n current_label_ids.append(self.args.pad_label_id)\n elif (word_idx != prev_word_idx): # First part of a word always gets the label\n current_label_ids.append(self.data.label_map[current_label[word_idx]])\n else: # other subword tokens get the same label or ignore index, controlled by flag label_all_tokens\n current_label_ids.append(\n self.data.label_map[current_label[word_idx]]\n if self.args.label_all_tokens\n else self.args.pad_label_id\n )\n prev_word_idx = word_idx\n label_ids.append(current_label_ids)\n\n tokenized_inputs['labels'] = torch.tensor(label_ids)\n return tokenized_inputs\n\n def get_optimizers_schedulers(\n self, estimated_global_steps_per_epoch: int, epochs: int\n ):\n self.optimizer = Adam(self.model.parameters(), self.args.max_lr)\n self.schedulers = OneCycleLR(\n self.optimizer,\n max_lr=self.args.max_lr,\n steps_per_epoch=estimated_global_steps_per_epoch,\n epochs=epochs,\n anneal_strategy=\"linear\",\n pct_start=self.args.warmup_prop,\n div_factor=1e7, # initial lr ~0\n final_div_factor=1e10, # final lr ~0\n )\n return [self.optimizer], [self.schedulers]\n\n def _inputs_to_device(self, batch, device):\n inputs = {}\n for k, v in batch.items():\n if v is not None:\n inputs[k] = v.to(device)\n return inputs\n\n def train_step(self, global_step, batch, device):\n batch = self._inputs_to_device(batch, device)\n outputs = self.model.forward(**batch)\n loss = outputs.loss\n return loss\n\n def val_step(self, global_step, batch, device):\n batch = self._inputs_to_device(batch, device)\n outputs = self.model.forward(**batch)\n if self.args.has_labels:\n return outputs.loss, outputs.logits, batch[\"labels\"]\n else:\n return outputs.logits\n\n def on_end_train_step(self, global_step, train_loss):\n global_stats.update(\"lr\", self.optimizer.param_groups[0][\"lr\"], frequent=True)\n\n @rank_zero_only\n def on_end_val_epoch(self, global_step, *inputs, key=\"default\"):\n if self.args.has_labels and len(inputs) > 0:\n loss, logits, labels = inputs\n\n logits = logits.cpu().numpy()\n logits = logits.reshape(-1, logits.shape[-1])\n predictions = np.argmax(logits, axis=1)\n\n label_ids = labels.to(\"cpu\").numpy().reshape(-1)\n\n str_preds = [\n self.data.get_labels()[int(p)]\n for (p, l) in zip(predictions, label_ids)\n if l != self.args.pad_label_id\n ]\n str_labels = [\n self.data.get_labels()[int(l)]\n for (p, l) in zip(predictions, label_ids)\n if l != self.args.pad_label_id\n ]\n\n metrics = self.metric_func(str_labels, str_preds)\n for k in metrics:\n global_stats.update(k, metrics[k])\n else:\n logger.info(\n \"Either validation data was not provided OR no labels were provided to compute metrics.\"\n )\n" ]
[ [ "torch.tensor", "numpy.argmax", "torch.optim.lr_scheduler.OneCycleLR" ] ]
ChristopherGS/rolltec_motion
[ "45105ddd0a8eb1f4eb5075b1dd807cbbc3b49505" ]
[ "ML_Sandbox/data_prep.py" ]
[ "import pandas as pd\nimport numpy as np\n\nfrom manage_state import set_state, set_stand_state\nfrom utilities import combine_csv, concat_data, blank_filter, resolve_acc_gyro, resolve_acc_gyro_labels\nfrom rolltec_features import create_features\n\ndef combine_state_features(directory, state, window=40, stand=0):\n \"\"\"\n convenience method to combine three steps in one function:\n (1) combine multiple csv files, (2) set their movement state for training,\n (3) detect any instances of standing up, (4) add features\n \"\"\"\n \n combined_data = combine_csv(directory)\n combined_data_updated = set_state(combined_data, state)\n combined_data_updated2 = set_stand_state(combined_data_updated, stand)\n feature_training_data = create_features(combined_data_updated2, window)\n ready_training_data = set_state(feature_training_data, state)\n \n return ready_training_data\n \n\ndef prep(window=40):\n \"\"\"prepare the raw sensor data\n the argument window determines the size of the sliding selection window\n for the time series. Given that data has been collected at a frequency of \n 25Hz, a sliding window of 40 will give you combined data windows \n of 1.6 seconds.\n \"\"\"\n\n #1 Your mount\n ymount_td = combine_state_features('your_mount_raw_data', 'your_mount', window, 0)\n #2 Your side control\n ysc_td = combine_state_features('your_side_control_raw_data', 'your_side_control', window, 0)\n #3 Your closed guard\n ycg_td = combine_state_features('your_closed_guard_raw_data', 'your_closed_guard', window, 0)\n #4 Your back control\n ybc_td = combine_state_features('your_back_control_raw_data', 'your_back_control', window, 0)\n #5 Opponent mount or opponent side control\n omountsc_td = combine_state_features('opponent_mount_and_opponent_side_control_raw_data', 'opponent_mount_or_sc', window, 0)\n #6 Opponent closed guard\n ocg_td = combine_state_features('opponent_closed_guard_raw_data', 'opponent_closed_guard', window, 0)\n #7 Opponent back control\n obc_td = combine_state_features('opponent_back_control_raw_data', 'opponent_back_control', window, 0)\n #8 \"Non jiu-jitsu\" motion\n nonjj_td = combine_state_features('non_jj_raw_data', 'non_jj', window, 0)\n #9 \"stand up\" motion\n stand_up_td = combine_state_features('standing_up_raw_data', 'opponent_closed_guard', window, 1)\n\n training_data = concat_data([ymount_td, ysc_td, ycg_td, ybc_td, omountsc_td, ocg_td, obc_td, nonjj_td, stand_up_td])\n # remove NaN\n training_data = blank_filter(training_data)\n return training_data\n \ndef prep_test(test_file, real_test=False):\n \"\"\" prepares test data to check for algorithm accuracy\n so does not set the state\n \"\"\"\n \n el_file = 'data/test_cases/' + test_file\n df = pd.DataFrame()\n df = pd.read_csv(el_file, index_col=None, header=0)\n\n if real_test == True:\n df = resolve_acc_gyro_labels(df)\n else:\n df = resolve_acc_gyro(df)\n\n df = create_features(df, _window=40, test=True, label_test=real_test)\n test_data = blank_filter(df)\n\n return test_data" ]
[ [ "pandas.read_csv", "pandas.DataFrame" ] ]
gadial/qiskit-terra
[ "0fc83f44a6e80969875c738b2cee7bc33223e45f" ]
[ "test/python/circuit/library/test_qft.py" ]
[ "# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017, 2020.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"Test library of QFT circuits.\"\"\"\n\nimport unittest\nimport numpy as np\nfrom ddt import ddt, data, unpack\n\nfrom qiskit.test.base import QiskitTestCase\nfrom qiskit import transpile\nfrom qiskit.circuit import QuantumCircuit\nfrom qiskit.circuit.library import QFT\nfrom qiskit.quantum_info import Operator\n\n\n@ddt\nclass TestQFT(QiskitTestCase):\n \"\"\"Test the QFT.\"\"\"\n\n def assertQFTIsCorrect(self, qft, num_qubits=None, inverse=False, add_swaps_at_end=False):\n \"\"\"Assert that the QFT circuit produces the correct matrix.\n\n Can be provided with an explicit number of qubits, if None is provided the number\n of qubits is set to ``qft.num_qubits``.\n \"\"\"\n if add_swaps_at_end:\n circuit = QuantumCircuit(*qft.qregs)\n for i in range(circuit.num_qubits // 2):\n circuit.swap(i, circuit.num_qubits - i - 1)\n\n qft = qft + circuit\n\n simulated = Operator(qft)\n\n num_qubits = num_qubits or qft.num_qubits\n expected = np.empty((2 ** num_qubits, 2 ** num_qubits), dtype=complex)\n for i in range(2 ** num_qubits):\n i_index = int(bin(i)[2:].zfill(num_qubits), 2)\n for j in range(i, 2 ** num_qubits):\n entry = np.exp(2 * np.pi * 1j * i * j / 2 ** num_qubits) / 2 ** (num_qubits / 2)\n j_index = int(bin(j)[2:].zfill(num_qubits), 2)\n expected[i_index, j_index] = entry\n if i != j:\n expected[j_index, i_index] = entry\n\n if inverse:\n expected = np.conj(expected)\n\n expected = Operator(expected)\n\n self.assertTrue(expected.equiv(simulated))\n\n @data(True, False)\n def test_qft_matrix(self, inverse):\n \"\"\"Test the matrix representation of the QFT.\"\"\"\n num_qubits = 5\n qft = QFT(num_qubits)\n if inverse:\n qft = qft.inverse()\n self.assertQFTIsCorrect(qft, inverse=inverse)\n\n def test_qft_is_inverse(self):\n \"\"\"Test the is_inverse() method.\"\"\"\n qft = QFT(2)\n\n with self.subTest(msg='initial object is not inverse'):\n self.assertFalse(qft.is_inverse())\n\n qft = qft.inverse()\n with self.subTest(msg='inverted'):\n self.assertTrue(qft.is_inverse())\n\n qft = qft.inverse()\n with self.subTest(msg='re-inverted'):\n self.assertFalse(qft.is_inverse())\n\n def test_qft_mutability(self):\n \"\"\"Test the mutability of the QFT circuit.\"\"\"\n qft = QFT()\n\n with self.subTest(msg='empty initialization'):\n self.assertEqual(qft.num_qubits, 0)\n self.assertEqual(qft.data, [])\n\n with self.subTest(msg='changing number of qubits'):\n qft.num_qubits = 3\n self.assertQFTIsCorrect(qft, num_qubits=3)\n\n with self.subTest(msg='test diminishing the number of qubits'):\n qft.num_qubits = 1\n self.assertQFTIsCorrect(qft, num_qubits=1)\n\n with self.subTest(msg='test with swaps'):\n qft.num_qubits = 4\n qft.do_swaps = False\n self.assertQFTIsCorrect(qft, add_swaps_at_end=True)\n\n with self.subTest(msg='inverse'):\n qft = qft.inverse()\n qft.do_swaps = True\n self.assertQFTIsCorrect(qft, inverse=True)\n\n with self.subTest(msg='double inverse'):\n qft = qft.inverse()\n self.assertQFTIsCorrect(qft)\n\n with self.subTest(msg='set approximation'):\n qft.approximation_degree = 2\n qft.do_swaps = True\n with self.assertRaises(AssertionError):\n self.assertQFTIsCorrect(qft)\n\n @data((4, 0, False),\n (3, 0, True),\n (6, 2, False),\n (4, 5, True),\n )\n @unpack\n def test_qft_num_gates(self, num_qubits, approximation_degree, insert_barriers):\n \"\"\"Test the number of gates in the QFT and the approximated QFT.\"\"\"\n basis_gates = ['h', 'swap', 'cu1']\n\n qft = QFT(num_qubits, approximation_degree=approximation_degree,\n insert_barriers=insert_barriers)\n ops = transpile(qft, basis_gates=basis_gates).count_ops()\n\n with self.subTest(msg='assert H count'):\n self.assertEqual(ops['h'], num_qubits)\n\n with self.subTest(msg='assert swap count'):\n self.assertEqual(ops['swap'], num_qubits // 2)\n\n with self.subTest(msg='assert CU1 count'):\n expected = sum(max(0, min(num_qubits - 1 - k, num_qubits - 1 - approximation_degree))\n for k in range(num_qubits))\n self.assertEqual(ops.get('cu1', 0), expected)\n\n with self.subTest(msg='assert barrier count'):\n expected = qft.num_qubits if insert_barriers else 0\n self.assertEqual(ops.get('barrier', 0), expected)\n\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "numpy.conj", "numpy.empty", "numpy.exp" ] ]
mattp256/wheele
[ "724e2df031017051085000ac49849e4bb03b69cb" ]
[ "cone_finder/scripts/cone_pose_trainer.py" ]
[ "#!/usr/bin/env python\nimport cv2\nimport numpy as np\nimport math\nimport csv\nimport rospy\nfrom sensor_msgs.msg import Image\nfrom geometry_msgs.msg import PoseStamped\nfrom std_msgs.msg import Float32\nfrom cv_bridge import CvBridge, CvBridgeError\nimport sys\nimport threading\n\nfrom dynamic_reconfigure.server import Server\nfrom cone_detector.cfg import ConeConfig\n\nrect_w = 3;\nrect_h = 6;\nnoise_se_w = 3;\nnoise_se_h = 7;\nfill_se_w = 3;\nfill_se_h = 10;\n\nclass ConeTrainer:\n def __init__(self):\n self.cone_file = open('/home/karl/temp_cone_data.csv',mode='w')\n self.writer = csv.writer(self.cone_file, delimiter=',')\n self.writer.writerow(['img_num','x','y','dist','angle_rad','pix_col_norm','pix_row_norm','pix_height_norm'])\n self.writer.writerow([0, 7.1, 3.1, 7.9, 0.1, 0.3, 0.4, 0.5])\n \n self.img_num = 0\n self.cone_truth_x = 0.0\n self.cone_truth_y = 0.0\n self.cone_truth_dist = 0.0\n self.cone_truth_angle = 0.0\n \n self.node_name = \"Cone Trainer\"\n self.thread_lock = threading.Lock()\n self.sub_image = rospy.Subscriber(\"/camera/image_raw\", Image, self.cbImage, queue_size=1)\n self.sub_cone_truth = rospy.Subscriber(\"base_cone_pose\", PoseStamped, self.cone_truth_callback, queue_size=10)\n self.pub_image = rospy.Publisher(\"cone_img\", Image, queue_size=1)\n self.pub_hsv_filt = rospy.Publisher(\"hsv_filt\", Image, queue_size=1)\n self.pub_cone_pose = rospy.Publisher(\"raw_cone_pose\", PoseStamped, queue_size = 5)\n self.bridge = CvBridge()\n \n self.config = None\n self.srv = Server(ConeConfig, self.config_callback)\n\n rospy.loginfo(\"Initialized Cone Trainer\")\n \n def __del__(self):\n self.cone_file.close()\n\n def cone_truth_callback(self, data):\n p = data.pose.position\n x = p.x\n y = p.y\n self.cone_truth_x = x\n self.cone_truth_y = y\n self.cone_truth_dist = np.sqrt(x**2 + y**2)\n self.cone_truth_angle = math.atan2(y,x)\n\n def config_callback(self, config, level):\n rospy.loginfo(\"\"\"Reconfigure Request: {hue_min}, {hue_max}, {double_param},\\ \n {str_param}, {bool_param}, {size}\"\"\".format(**config))\n self.config = config\n return config\n\n def cbImage(self,image_msg):\n thread = threading.Thread(target=self.processImage,args=(image_msg,))\n thread.setDaemon(True)\n thread.start()\n\n def processImage(self, image_msg):\n #rospy.loginfo(\"processImage\")\n blob_found = False\n if not self.thread_lock.acquire(False):\n return\n image_cv = self.bridge.imgmsg_to_cv2(image_msg)\n img_h, img_w, img_d = image_cv.shape\n #print 'image cb, img_w, img_h: ', img_w, img_h\n \n if(not self.config == None):\n \n CONE_MIN = np.array([self.config[\"hue_min\"], self.config[\"sat_min\"], self.config[\"val_min\"]],np.uint8) #75, 86\n CONE_MAX = np.array([self.config[\"hue_max\"], self.config[\"sat_max\"],self.config[\"val_max\"]],np.uint8)\n hsv = cv2.cvtColor(image_cv,cv2.COLOR_BGR2HSV)\n hsv_filt = cv2.inRange(hsv, CONE_MIN, CONE_MAX)\n \n #Open binary image\n rect_se = cv2.getStructuringElement(cv2.MORPH_RECT,(rect_w,rect_h))\n noise_se = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(noise_se_w,noise_se_h))\n fill_se = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(fill_se_w,fill_se_h))\n #erosion then dilation, removes noise in background\n opening = cv2.morphologyEx(hsv_filt,cv2.MORPH_OPEN,noise_se)\n #4.Closes the Thresholded Image\n #dilation then erosion, fills holes in foreground\n closing = cv2.morphologyEx(opening,cv2.MORPH_CLOSE, fill_se)\n open2 = cv2.morphologyEx(closing,cv2.MORPH_OPEN, rect_se)\n \n try:\n self.pub_hsv_filt.publish(self.bridge.cv2_to_imgmsg(open2,\"mono8\"))\n except CvBridgeError as e:\n print(e)\n \n _, contours, hierarchy = cv2.findContours(open2,cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) #python 2 vs 3\n # finding contour with maximum area and store it as best_cnt\n max_area = 0\n for cnt in contours:\n area = cv2.contourArea(cnt)\n pts = cnt[:,0]\n x = pts[:,0]\n y = pts[:,1]\n cnt_height = max(y)-min(y)\n cnt_width = max(x)-min(x)\n #Longest Distance between 2 points/area\n if area > max_area and cnt_height/cnt_width > 0.5:# and cnt_height < 40 and cnt_width < 30:\n max_area = area\n best_cnt = cnt\n blob_found = True\n best_height = cnt_height\n\n # finding centroids of best_cnt and draw a circle there\n if(blob_found):\n if(best_cnt.ndim == 3):\n M = cv2.moments(best_cnt)\n cx,cy = int(M['m10']/M['m00']), int(M['m01']/M['m00'])\n cv2.circle(image_cv,(cx,cy),5,255,-1)\n #rospy.loginfo(\"Cone Found at pixel x,y: %d, %d\",int(cx),int(cy))\n try:\n self.pub_image.publish(self.bridge.cv2_to_imgmsg(image_cv,\"bgr8\"))\n except CvBridgeError as e:\n print(e)\n \n self.writer.writerow([self.img_num, self.cone_truth_x, self.cone_truth_y, self.cone_truth_dist, self.cone_truth_angle,\n (cx-img_w/2.0)/float(img_w), (cy-img_h/2.0)/float(img_h), best_height/float(img_h)])\n px_norm = (cx-img_w/2.0)/float(img_w)\n py_norm = (cy-img_h/2.0)/float(img_h)\n ph_norm = best_height/float(img_h)\n ideal_py_norm = -0.15\n \n local_x = 0.5/ph_norm\n if(local_x < 6.0 and abs(py_norm-ideal_py_norm) < 0.05):\n local_y = -0.85 * local_x * px_norm\n cone_pose = PoseStamped()\n cone_pose.header.frame_id = \"base_link\"\n cone_pose.header.stamp = rospy.Time.now()\n cone_pose.pose.orientation.w = 1.0\n cone_pose.pose.position.x = local_x\n cone_pose.pose.position.y = local_y\n self.pub_cone_pose.publish(cone_pose)\n \n #rospy.loginfo(\"Cone Search Done\")\n self.thread_lock.release()\n self.img_num += 1\n\n\nif __name__==\"__main__\": \n rospy.init_node('cone_trainer')\n cone_trainer = ConeTrainer()\n rospy.spin()\n" ]
[ [ "numpy.sqrt", "numpy.array" ] ]
jason-sa/amazon_product_trend_classification
[ "d73b94338354bfdf1d6e83942560d0f95716ecd6" ]
[ "py_files/AmazonReviews.py" ]
[ "import pandas as pd\nimport numpy as np\nfrom datetime import datetime, timedelta\nimport os\nimport pickle\nimport re\nfrom collections import defaultdict\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score, confusion_matrix, roc_auc_score, roc_curve\nfrom nltk.stem import SnowballStemmer\n\nimport matplotlib.pyplot as plt\n\nclass AmazonReviews():\n ''' Class for reading Amazon review data and building a ML model to predict whether or not a product\n will trend based on a customer review. The review data is sourced from (https://s3.amazonaws.com/amazon-reviews-pds/readme.html).\n\n date_filter: DataFrame \n Filters the raw Amazon review data\n \n reviews_df: DataFrame\n Filtered data frame of the Amazon review data\n \n reviews_selected_df: DataFrame\n Filtered reviews_df for the time window to calculate the trend score\n \n product_trend_df: DataFrame\n Output of the trend calculation process and can analyze whether the trend score is calcualted correctly\n \n obs: DataFrame\n Entire set of observations the model will be trained and tested upon. \n \n X: np.array\n Array for sklearn interface representing the feature space. \n \n y: np.array\n Array for sklearn interface representing the target.\n \n X_train: np.array \n Array for the sklearn interface representing the training feature space. \n \n X_test: np.array\n Array for the sklearn interface representing the testing feature space.\n \n y_train: np.array\n Array for the sklearn interface representing the training target.\n \n y_test: np.array\n Array for the sklearn interface representing the testing target.\n \n results: DataFrame\n Stores the results of each model. DataFrame consists of accuracy, precision, recall, F1, and AUC.\n \n y_scores: defaultdict \n Dictionary storing the target probabilities for each model.\n '''\n data_path = '../data/'\n RANDOM_STATE = 42\n\n\n def __init__(self, date_filter=datetime(2014,1,1)):# should add a flag to force to read from file\n ''' Initalizes an AmazonReview instance\n\n date_filter: (optional) \n If None, then date_filter will be set to 2014-01-01\n '''\n self.date_filter = date_filter\n self.results = pd.DataFrame(columns=['Precision', 'Recall', 'F1', 'Accuracy','AUC'])\n self.y_scores = defaultdict(np.ndarray)\n\n def load_data(self, path):\n ''' Loads the AmazonReview data\n\n path: \n File path to the tab separated Amazon Review data (https://s3.amazonaws.com/amazon-reviews-pds/readme.html)\n '''\n # only load from file if pickle does not exist\n i = path.rfind('/')\n f = self.data_path + path[i+1:] + '.pkl'\n\n if os.path.isfile(f):\n self.reviews_df = pd.read_pickle(f)\n print('Read from pickle...')\n else: # 0.0955% of lines were not read due to errors\n self.reviews_df = pd.read_csv(path, sep='\\t', error_bad_lines=False)\n \n with open(path, 'r') as f:\n lines = f.readlines()\n\n print()\n print(f'{1-self.reviews_df.shape[0] / len(lines):.4%} lines were not read due to data errors.')\n print()\n\n self.reviews_df['review_date'] = pd.to_datetime(self.reviews_df['review_date'], format='%Y-%m-%d')\n self.reviews_df.to_pickle('../data/amazon_reviews_us_Toys_v1_00.tsv.pkl')\n print('Saved to pickle...')\n\n # save data as pickle to reload if necessary\n self.reviews_df = self.reviews_df[self.reviews_df['review_date'] >= self.date_filter]\n \n def calc_trend_score(self, rating_power=1.5, review_days = 30, trend_percent = .99):\n ''' Calcualtes the trend scored defined as tanh( (review proportion * mean rating) / std rating ).\n The star rating can be smoothed by using rating_power. This will result in star_rating = star_rating ** rating_power.\n\n rating_power: default 1.5 \n Smooths the star rating scale 1-5 to star_rating ** rating_power\n \n review_time: default 30\n The trend score is calculated over the last n days since the last reivew. The default 30 days means\n all reviews that occured 30 days prior to the last review for the product are included in the trend score calcualtion. \n\n trend_percent: default .99\n Expected values are (0,1). The precentile cut-off for what is trending or not. 99 meands the top 1% of the products will be identified as trending\n '''\n # smooth the star rating\n self.reviews_df['adj_star_rating'] = self.reviews_df['star_rating'] ** rating_power\n\n # create data frame for all reviews 'review_days' days from the first review\n first_review_df = self.reviews_df.groupby('product_id')['review_date'].min().reset_index()\n self.reviews_df = self.reviews_df.merge(first_review_df, how='inner', on='product_id')\n \n # drop max_review_date to avoid renaming\n self.reviews_df.drop(columns='min_review_date', errors='ignore', inplace=True)\n self.reviews_df.rename(columns={'review_date_x': 'review_date', 'review_date_y': 'min_review_date'}, inplace=True)\n self.reviews_selected_df = self.reviews_df[\n (self.reviews_df['review_date'] - self.reviews_df['min_review_date']) \n >= timedelta(days=review_days)\n ]\n \n # calculate the review count, avg, and std star rating\n self.product_trend_df = self.reviews_selected_df.groupby('product_id')['adj_star_rating'].agg(['count','median','std'])\n self.product_trend_df['orig_std'] = self.product_trend_df['std']\n\n # set std with NA as the min std of the data set\n self.product_trend_df.loc[self.product_trend_df['std'] == 0, 'std'] = np.nan\n na_std = self.product_trend_df['std'].min()\n self.product_trend_df.fillna(na_std, inplace=True)\n\n # calcualte review success\n # total_reviews = self.product_trend_df['count'].sum()\n self.product_trend_df['review_success'] = (\n ( self.product_trend_df['count'] / review_days * self.product_trend_df['median'])\n / self.product_trend_df['std']\n )\n\n # calcualte the score and set the trend or not decision variable\n self.product_trend_df['trend_score'] = np.tanh(self.product_trend_df['review_success'])\n trend_cutoff = self.product_trend_df['trend_score'].quantile(trend_percent)\n self.product_trend_df['trend'] = (self.product_trend_df['trend_score'] >= trend_cutoff).astype(int)\n\n def create_observations(self):\n ''' Creates the observation data set containing the first review and the unsupervised topic assigned.\n\n Creates obs data frame (product_id, review_date, review_id, review_body, trend, star rating). \n The obs data frame combines the first review with the product trend. If a review body is empty, then the product is dropped.\n '''\n\n # get all reviews which appeared in the first day of the horizon\n first_review_day = self.reviews_selected_df.groupby('product_id')['review_date'].min().reset_index()\n first_review_day = first_review_day.merge(\n self.reviews_selected_df.loc[:,['review_id', 'product_id', 'review_date', 'review_body', 'star_rating']],\n how = 'inner',\n on = ['review_date', 'product_id']\n )\n # print(first_review_day.head())\n\n # get only one reivew if many occured on the first day\n first_review = first_review_day.groupby('product_id')['review_id'].head(1).reset_index()\n # print(first_review.head())\n self.obs = first_review_day.merge(\n first_review,\n how = 'inner',\n on = ['review_id']\n )\n\n self.obs = self.obs.merge(\n self.product_trend_df.reset_index().loc[:,['product_id', 'trend']],\n how = 'inner',\n on = 'product_id'\n )\n\n self.obs.drop(columns='index', inplace=True)\n self.obs.dropna(inplace=True)\n \n def create_train_test_split(self):\n ''' Cleans the review body text by removing digits and underscores. Splits obs into X and y. Creates dictionaries to hold the train/test data sets, and performs an inital split.\n '''\n\n self.X = (self.obs.review_body\n .str.replace(r\"\"\"\\w*\\d\\w*\"\"\", ' ') # remove digits\n .str.replace('_', ' ') # remove underscores\n )\n\n self.y = reduced_obs.trend\n\n self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(\n self.X, \n self.y, \n test_size = 0.25, \n stratify=self.y,\n random_state = self.RANDOM_STATE)\n\n\n def log_score(self, y_true, y_score, run_name, prob_cutoff = 0.5):\n ''' Logs the related classification metrics for the training dataset.\n\n y_true: numpy.aray (nsamples,)\n Array of the actual classification.\n\n y_score: numpy.array(nsamples,)\n Probablity array from the trained model.\n\n run_name: string\n Name for the model being scored.\n\n prob_cutoff: float\n Probability cutoff for calcualting the confustion matrix related metrics \n\n '''\n # log scores\n y_score_decision = (y_score >= 0.5).astype(int)\n\n run_results = {\n 'Precision': precision_score(y_true, y_score_decision),\n 'Recall': recall_score(y_true, y_score_decision),\n 'F1': f1_score(y_true, y_score_decision),\n 'Accuracy': accuracy_score(y_true, y_score_decision),\n 'AUC': roc_auc_score(y_true, y_score)\n }\n\n self.results.drop(index=run_name, errors='ignore', inplace=True)\n self.results = self.results.append(pd.DataFrame(run_results, index=[run_name]))\n\n # save y_score for later calculations\n self.y_scores[run_name] = y_score\n\n def plot_roc_curve(self):\n ''' Creates a ROC curve plot for all models which have been logged.\n '''\n plt.figure(figsize=(6,6))\n plt.plot([0,1],[0,1])\n\n for model, y_probs in self.y_scores.items():\n fpr, tpr,_ = roc_curve(self.y_train, y_probs)\n plt.plot(fpr,tpr, label=model)\n \n plt.legend()\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n\n" ]
[ [ "pandas.read_pickle", "matplotlib.pyplot.legend", "pandas.read_csv", "matplotlib.pyplot.figure", "sklearn.metrics.roc_curve", "pandas.DataFrame", "matplotlib.pyplot.xlabel", "sklearn.metrics.f1_score", "sklearn.metrics.accuracy_score", "pandas.to_datetime", "sklearn.metrics.precision_score", "matplotlib.pyplot.ylabel", "sklearn.metrics.recall_score", "sklearn.metrics.roc_auc_score", "matplotlib.pyplot.plot", "sklearn.model_selection.train_test_split", "numpy.tanh" ] ]
eczy/Box-World
[ "228f06d07b3cf95e29a6f49b9abec89a612e675b" ]
[ "box_world_env.py" ]
[ "import gym\nfrom gym.spaces.discrete import Discrete\nfrom gym.spaces import Box\n\nimport matplotlib.pyplot as plt\nfrom collections import deque\n\nfrom .boxworld_gen import *\n\nclass BoxWorld(gym.Env):\n \"\"\"Boxworld representation\n Args:\n n (int): Size of the field (n x n)\n goal_length (int): Number of keys to collect to solve the level\n num_distractor (int): Number of distractor trajectories\n distractor_length (int): Number of distractor keys in each distractor trajectory\n max_steps (int): Maximum number of env step for a given level\n collect_key (bool): If true, a key is collected immediately when its corresponding lock is opened\n world: an existing level. If None, generates a new level by calling the world_gen() function\n \"\"\"\n\n def __init__(self, n, goal_length, num_distractor, distractor_length, max_steps=10**6, collect_key=True, world=None):\n self.goal_length = goal_length\n self.num_distractor = num_distractor\n self.distractor_length = distractor_length\n self.n = n\n self.num_pairs = goal_length - 1 + distractor_length * num_distractor\n self.collect_key = collect_key # if True, keys are collected immediately when available\n\n # Penalties and Rewards\n self.step_cost = 0\n self.reward_gem = 10\n self.reward_key = 1\n self.reward_distractor = -1\n\n # Other Settings\n self.viewer = None\n self.max_steps = max_steps\n self.action_space = Discrete(len(ACTION_LOOKUP))\n self.observation_space = Box(low=0, high=255, shape=(n+2, n+2, 3), dtype=np.uint8)\n\n # Game initialization\n self.owned_key = [220, 220, 220]\n\n self.np_random_seed = None\n self.reset(world)\n\n self.num_env_steps = 0\n self.episode_reward = 0\n\n self.last_frames = deque(maxlen=3)\n\n def seed(self, seed=None):\n self.np_random_seed = seed\n return [seed]\n\n def save(self):\n np.save('box_world.npy', self.world)\n\n def step(self, action):\n\n change = CHANGE_COORDINATES[action]\n new_position = self.player_position + change\n current_position = self.player_position.copy()\n\n self.num_env_steps += 1\n\n reward = -self.step_cost\n done = self.num_env_steps == self.max_steps\n solved = False\n\n # Move player if the field in the moving direction is either\n\n if np.any(new_position < 1) or np.any(new_position >= self.n + 1):\n possible_move = False\n\n elif is_empty(self.world[new_position[0], new_position[1]]):\n # No key, no lock\n possible_move = True\n\n elif new_position[1] == 1 or is_empty(self.world[new_position[0], new_position[1] - 1]):\n # It is a key\n if is_empty(self.world[new_position[0], new_position[1] + 1]):\n # Key is not locked\n possible_move = True\n self.owned_key = self.world[new_position[0], new_position[1]].copy()\n self.world[0, 0] = self.owned_key\n if np.array_equal(self.world[new_position[0], new_position[1]], goal_color):\n # Goal reached\n self.world[0, 0] = wall_color\n reward += self.reward_gem\n solved = True\n done = True\n\n else:\n reward += self.reward_key\n else:\n possible_move = False\n else:\n # It is a lock\n if np.array_equal(self.world[new_position[0], new_position[1]], self.owned_key):\n # The lock matches the key\n possible_move = True\n\n if self.collect_key:\n # goal reached\n if np.array_equal(self.world[new_position[0], new_position[1]-1], goal_color):\n # Goal reached\n self.world[new_position[0], new_position[1] - 1] = [220, 220, 220]\n self.world[0, 0] = wall_color\n reward += self.reward_gem\n solved = True\n done = True\n\n else:\n # loose old key and collect new one\n self.owned_key = np.copy(self.world[new_position[0], new_position[1] - 1])\n self.world[new_position[0], new_position[1] - 1] = [220, 220, 220]\n self.world[0, 0] = self.owned_key\n if self.world_dic[tuple(new_position)] == 0:\n reward += self.reward_distractor\n done = True\n else:\n reward += self.reward_key\n else:\n self.owned_key = [220, 220, 220]\n self.world[0, 0] = [0, 0, 0]\n if self.world_dic[tuple(new_position)] == 0:\n reward += self.reward_distractor\n done = True\n else:\n possible_move = False\n # print(\"lock color is {}, but owned key is {}\".format(\n # self.world[new_position[0], new_position[1]], self.owned_key))\n\n if possible_move:\n self.player_position = new_position\n update_color(self.world, previous_agent_loc=current_position, new_agent_loc=new_position)\n\n self.episode_reward += reward\n\n info = {\n \"action.name\": ACTION_LOOKUP[action],\n \"action.moved_player\": possible_move,\n \"bad_transition\": self.max_steps == self.num_env_steps,\n }\n if done:\n info[\"episode\"] = {\"r\": self.episode_reward,\n \"length\": self.num_env_steps,\n \"solved\": solved}\n self.last_frames.append(self.world)\n\n return (self.world - grid_color[0])/255 * 2, reward, done, info\n\n def reset(self, world=None):\n if world is None:\n self.world, self.player_position, self.world_dic = world_gen(n=self.n, goal_length=self.goal_length,\n num_distractor=self.num_distractor,\n distractor_length=self.distractor_length,\n seed=self.np_random_seed)\n else:\n self.world, self.player_position, self.world_dic = world\n\n self.num_env_steps = 0\n self.episode_reward = 0\n self.owned_key = [220, 220, 220]\n\n return (self.world - grid_color[0])/255 * 2\n\n def render(self, mode=\"human\"):\n img = self.world.astype(np.uint8)\n if mode == \"rgb_array\":\n return img\n\n else:\n # from gym.envs.classic_control import rendering\n # if self.viewer is None:\n # self.viewer = rendering.SimpleImageViewer()\n # self.viewer.imshow(img)\n # return self.viewer.isopen\n plt.imshow(img, vmin=0, vmax=255, interpolation='none')\n plt.show()\n\n def get_action_lookup(self):\n return ACTION_LOOKUP\n\n\nACTION_LOOKUP = {\n 0: 'move up',\n 1: 'move down',\n 2: 'move left',\n 3: 'move right',\n}\nCHANGE_COORDINATES = {\n 0: (-1, 0),\n 1: (1, 0),\n 2: (0, -1),\n 3: (0, 1)\n}\n\nif __name__ == \"__main__\":\n # import pickle\n\n # execute only if run as a script\n env = BoxWorld(6, 2, 1, 1)\n env.seed(10)\n\n # with open('/home/nathan/PycharmProjects/relational_RL_graphs/images/ex_world.pkl', 'rb') as file:\n\n env.reset()\n env.render()\n\n env.reset()\n env.render()\n # with open('/home/nathan/PycharmProjects/relational_RL_graphs/images/ex_world.pkl', 'wb') as file:\n # pickle.dump([env.world, env.player_position, env.world_dic], file)\n\n\n# TO DO : impossible lvls ? (keys stacked right made inaccessible)\n" ]
[ [ "matplotlib.pyplot.imshow", "matplotlib.pyplot.show" ] ]
nialov/fractopo
[ "a59ca168950d07e1961f1009479ce71c2aa9c2d7" ]
[ "tests/__init__.py" ]
[ "\"\"\"\nTest parameters i.e. sample data, known past errors, etc.\n\"\"\"\nfrom functools import lru_cache\nfrom pathlib import Path\nfrom traceback import print_tb\nfrom typing import List\n\nimport geopandas as gpd\nimport numpy as np\nimport pandas as pd\nimport pytest\nfrom click.testing import Result\nfrom hypothesis.strategies import floats, integers, tuples\nfrom shapely.geometry import (\n LineString,\n MultiLineString,\n MultiPolygon,\n Point,\n Polygon,\n box,\n)\nfrom shapely.wkt import loads\n\nfrom fractopo import general\nfrom fractopo.analysis import length_distributions, parameters\nfrom fractopo.general import (\n CC_branch,\n CI_branch,\n E_node,\n I_node,\n II_branch,\n X_node,\n Y_node,\n bounding_polygon,\n determine_azimuth,\n read_geofile,\n)\nfrom fractopo.tval import trace_validation\nfrom fractopo.tval.trace_validators import (\n GeomNullValidator,\n GeomTypeValidator,\n MultiJunctionValidator,\n MultipleCrosscutValidator,\n SharpCornerValidator,\n StackedTracesValidator,\n TargetAreaSnapValidator,\n UnderlappingSnapValidator,\n VNodeValidator,\n)\nfrom tests import trace_builder\nfrom tests.sample_data.py_samples.samples import (\n results_in_false_positive_stacked_traces_list,\n results_in_false_positive_underlapping_ls,\n results_in_multijunction_why_ls_list,\n results_in_multijunction_why_ls_list_2,\n results_in_overlapping_ls_list,\n should_result_in_multij_ls_list,\n should_result_in_some_error_ls_list,\n should_result_in_target_area_underlapping_ls,\n should_result_in_target_area_underlapping_poly,\n should_result_in_vnode_ls_list,\n v_node_network_error_ls_list,\n)\n\nGEOMETRY_COLUMN = trace_validation.Validation.GEOMETRY_COLUMN\nERROR_COLUMN = trace_validation.Validation.ERROR_COLUMN\n\nSNAP_THRESHOLD = 0.001\nSNAP_THRESHOLD_ERROR_MULTIPLIER = 1.1\nAREA_EDGE_SNAP_MULTIPLIER = 5\n\n\ndef click_error_print(result: Result):\n \"\"\"\n Print click result traceback.\n \"\"\"\n if result.exit_code == 0:\n return\n assert result.exc_info is not None\n _, _, tb = result.exc_info\n # print(err_class, err)\n print_tb(tb)\n print(result.output)\n raise Exception(result.exception)\n\n\nclass Helpers:\n\n \"\"\"\n Parameters for tests.\n \"\"\"\n\n valid_geom = LineString(((0, 0), (1, 1)))\n\n invalid_geom_empty = LineString()\n invalid_geom_none = None\n invalid_geom_multilinestring = MultiLineString(\n [((0, 0), (1, 1)), ((-1, 0), (1, 0))]\n )\n mergeable_geom_multilinestring = MultiLineString(\n [((0, 0), (1, 1)), ((1, 1), (2, 2))]\n )\n (\n valid_traces,\n invalid_traces,\n valid_areas_geoseries,\n invalid_areas_geoseries,\n ) = trace_builder.main(False, SNAP_THRESHOLD, SNAP_THRESHOLD_ERROR_MULTIPLIER)\n valid_error_srs = pd.Series([[] for _ in valid_traces.geometry.values])\n invalid_error_srs = pd.Series([[] for _ in invalid_traces.geometry.values])\n\n multilinestring_critical_err_in_validation_gdf = read_geofile(\n Path(\"tests/sample_data/validation_errror_08042021/Circle3_fractures.shp\")\n )\n multilinestring_critical_err_in_validation_gdf.error_amount = 1\n\n v_node_network_error_gdf = gpd.GeoDataFrame(geometry=v_node_network_error_ls_list)\n v_node_network_error_area_gdf = gpd.GeoDataFrame(\n geometry=[bounding_polygon(v_node_network_error_gdf)]\n )\n\n hastholmen_traces = read_geofile(\n Path(\"tests/sample_data/hastholmen_traces.geojson\")\n )\n hastholmen_area = read_geofile(Path(\"tests/sample_data/hastholmen_area.geojson\"))\n\n @staticmethod\n def random_data_column(iterable):\n \"\"\"\n Make random data column contents.\n \"\"\"\n return [\"aaa\" for _ in iterable]\n\n # geoms are all LineStrings and no errors\n @classmethod\n def valid_gdf_get(cls):\n \"\"\"\n Get valid gdf.\n \"\"\"\n return gpd.GeoDataFrame(\n {\n GEOMETRY_COLUMN: Helpers.valid_traces,\n ERROR_COLUMN: Helpers.valid_error_srs,\n \"random_col\": cls.random_data_column(Helpers.valid_traces),\n \"random_col2\": cls.random_data_column(Helpers.valid_traces),\n \"random_col3\": cls.random_data_column(Helpers.valid_traces),\n \"random_col4\": cls.random_data_column(Helpers.valid_traces),\n }\n )\n\n @classmethod\n def invalid_gdf_get(cls):\n \"\"\"\n Get invalid gdf.\n \"\"\"\n return gpd.GeoDataFrame(\n {\n GEOMETRY_COLUMN: Helpers.invalid_traces,\n ERROR_COLUMN: Helpers.invalid_error_srs,\n \"random_col\": cls.random_data_column(Helpers.invalid_traces),\n }\n )\n\n @classmethod\n def invalid_gdf_null_get(cls):\n \"\"\"\n Get gdf with None and empty geometries.\n \"\"\"\n return gpd.GeoDataFrame(\n {\n GEOMETRY_COLUMN: [None, LineString()],\n ERROR_COLUMN: [[], []],\n \"random_col\": cls.random_data_column(range(2)),\n }\n )\n\n @staticmethod\n def valid_area_gdf_get():\n \"\"\"\n Get a valid area gdf.\n \"\"\"\n return gpd.GeoDataFrame({GEOMETRY_COLUMN: Helpers.valid_areas_geoseries})\n\n @staticmethod\n def invalid_area_gdf_get():\n \"\"\"\n Get an invalid area gdf.\n \"\"\"\n return gpd.GeoDataFrame({GEOMETRY_COLUMN: Helpers.invalid_areas_geoseries})\n\n faulty_error_srs = pd.Series([[] for _ in valid_traces.geometry.values])\n faulty_error_srs[0] = np.nan\n faulty_error_srs[1] = \"this cannot be transformed to list?\"\n faulty_error_srs[2] = (1, 2, 3, \"hello?\")\n faulty_error_srs[5] = 5.12315235\n\n @classmethod\n def valid_gdf_with_faulty_error_col_get(cls):\n \"\"\"\n Get valid gdf with faulty error column.\n \"\"\"\n return gpd.GeoDataFrame(\n {\n GEOMETRY_COLUMN: Helpers.valid_traces,\n ERROR_COLUMN: Helpers.faulty_error_srs,\n \"random_col\": cls.random_data_column(Helpers.valid_traces),\n }\n )\n\n @staticmethod\n def iterate_validators():\n \"\"\"\n Iterate over validators.\n \"\"\"\n for validator in (\n GeomNullValidator,\n GeomTypeValidator,\n MultiJunctionValidator,\n VNodeValidator,\n MultipleCrosscutValidator,\n UnderlappingSnapValidator,\n TargetAreaSnapValidator,\n ):\n yield validator\n\n nice_integer_coordinates = integers(-10, 10)\n nice_float = floats(\n allow_nan=False, allow_infinity=False, min_value=-1e5, max_value=1e5\n )\n nice_tuple = tuples(\n nice_float,\n nice_float,\n )\n triple_tuples = tuples(\n nice_tuple,\n nice_tuple,\n nice_tuple,\n )\n\n snap_threshold = 0.001\n geosrs_identicals = gpd.GeoSeries(\n [Point(1, 1), Point(1, 1), Point(2, 1), Point(2, 1), Point(3, 1), Point(2, 3)]\n )\n\n traces_geosrs = gpd.GeoSeries(\n [\n LineString([(-1, 0), (1, 0)]),\n LineString([(0, -1), (0, 1)]),\n LineString(\n [(-1.0 - snap_threshold * 0.99, -1), (-1.0 - snap_threshold * 0.99, 1)]\n ),\n ]\n )\n areas_geosrs = gpd.GeoSeries([Polygon([(5, 5), (-5, 5), (-5, -5), (5, -5)])])\n\n nice_traces = gpd.GeoSeries(\n [\n # Horizontal\n LineString([(-10, 0), (10, 0)]),\n # Underlapping\n LineString([(-5, 2), (-5, 0 + snap_threshold * 0.01)]),\n LineString([(-4, 2), (-4, 0 + snap_threshold * 0.5)]),\n LineString([(-3, 2), (-3, 0 + snap_threshold * 0.7)]),\n LineString([(-2, 2), (-2, 0 + snap_threshold * 0.9)]),\n LineString([(-1, 2), (-1, 0 + snap_threshold * 1.1)]),\n # Overlapping\n LineString([(1, 2), (1, 0 - snap_threshold * 1.1)]),\n LineString([(2, 2), (2, 0 - snap_threshold * 0.9)]),\n LineString([(3, 2), (3, 0 - snap_threshold * 0.7)]),\n LineString([(4, 2), (4, 0 - snap_threshold * 0.5)]),\n LineString([(5, 2), (5, 0 - snap_threshold * 0.01)]),\n ]\n )\n nice_integer_coordinates = integers(-10, 10)\n nice_float = floats(\n allow_nan=False, allow_infinity=False, min_value=-1e5, max_value=1e5\n )\n nice_tuple = tuples(\n nice_float,\n nice_float,\n )\n triple_tuples = tuples(\n nice_tuple,\n nice_tuple,\n nice_tuple,\n )\n\n @classmethod\n def get_nice_traces(cls):\n \"\"\"\n Get nice traces GeoSeries.\n \"\"\"\n return cls.nice_traces.copy()\n\n @classmethod\n def get_traces_geosrs(cls):\n \"\"\"\n Get traces GeoSeries.\n \"\"\"\n return cls.traces_geosrs.copy()\n\n @classmethod\n def get_areas_geosrs(cls):\n \"\"\"\n Get areas GeoSeries.\n \"\"\"\n return cls.areas_geosrs.copy()\n\n @classmethod\n def get_geosrs_identicals(cls):\n \"\"\"\n Get GeoSeries with identical geometries.\n \"\"\"\n return cls.geosrs_identicals.copy()\n\n line_1 = LineString([(0, 0), (0.5, 0.5)])\n line_2 = LineString([(0, 0), (0.5, -0.5)])\n line_3 = LineString([(0, 0), (1, 0)])\n line_1_sp = Point(list(line_1.coords)[0])\n line_2_sp = Point(list(line_2.coords)[0])\n line_1_ep = Point(list(line_1.coords)[-1])\n line_2_ep = Point(list(line_2.coords)[-1])\n halved_azimuths = [\n determine_azimuth(line, halved=True)\n for line in (\n line_1,\n line_2,\n line_3,\n )\n ]\n branch_frame = gpd.GeoDataFrame(\n {\n \"geometry\": [line_1, line_2, line_3],\n \"Connection\": [\"C - C\", \"C - I\", \"I - I\"],\n \"Class\": [\"X - I\", \"Y - Y\", \"I - I\"],\n \"halved\": halved_azimuths,\n \"length\": [line.length for line in [line_1, line_2, line_3]],\n }\n )\n\n trace_frame = gpd.GeoDataFrame(\n {\n \"geometry\": [line_1, line_2],\n \"length\": [line_1.length, line_2.length],\n \"startpoint\": [line_1_sp, line_2_sp],\n \"endpoint\": [line_1_ep, line_2_ep],\n }\n )\n point_1 = Point(0.5, 0.5)\n point_2 = Point(1, 1)\n point_3 = Point(10, 10)\n node_frame = gpd.GeoDataFrame(\n {\"geometry\": [point_1, point_2, point_3], \"Class\": [\"X\", \"Y\", \"I\"]}\n )\n node_frame[\"c\"] = node_frame[\"Class\"]\n area_1 = Polygon([(0, 0), (1, 1), (1, 0)])\n area_frame = gpd.GeoDataFrame({\"geometry\": [area_1]})\n\n sample_trace_data = Path(\"tests/sample_data/KB11_traces.shp\")\n sample_branch_data = Path(\"tests/sample_data/KB11_branches.shp\")\n sample_area_data = Path(\"tests/sample_data/KB11_area.shp\")\n kb11_traces = read_geofile(sample_trace_data)\n kb11_area = read_geofile(sample_area_data)\n\n kl2_2_traces = read_geofile(Path(\"tests/sample_data/kl2_2/kl2_2_traces.geojson\"))\n kl2_2_area = read_geofile(Path(\"tests/sample_data/kl2_2/kl2_2_area.geojson\"))\n multipolygon_traces = read_geofile(\n Path(\n \"tests/sample_data/multipolygon_traces_area/\"\n \"traces_within_multipolygon_target_area.geojson\"\n )\n )\n multipolygon_area = read_geofile(\n Path(\n \"tests/sample_data/multipolygon_traces_area/\"\n \"multipolygon_target_area.geojson\"\n )\n )\n manypolygon_area = read_geofile(\n Path(\n \"tests/sample_data/multipolygon_traces_area/\"\n \"many_polygon_target_area.geojson\"\n )\n )\n assert isinstance(kb11_traces, gpd.GeoDataFrame)\n assert isinstance(kb11_area, gpd.GeoDataFrame)\n\n kb7_trace_path = Path(\"tests/sample_data/KB7/KB7_tulkinta_50.shp\")\n kb7_area_path = Path(\"tests/sample_data/KB7/KB7_tulkinta_alue.shp\")\n\n kb7_traces = read_geofile(kb7_trace_path)\n kb7_area = read_geofile(kb7_area_path)\n\n test_tracevalidate_params = [\n (\n Path(\"tests/sample_data/KB7/KB7_tulkinta_50.shp\"), # cut 0-50\n Path(\"tests/sample_data/KB7/KB7_tulkinta_alue.shp\"),\n \"--allow-fix\",\n ),\n (\n Path(\"tests/sample_data/KB7/KB7_tulkinta_100.shp\"), # cut 50-100\n Path(\"tests/sample_data/KB7/KB7_tulkinta_alue.shp\"),\n \"--allow-fix\",\n ),\n ]\n\n test_match_crs_params = [\n (\n gpd.GeoSeries([Point(1, 1)]).set_crs(3067), # first\n gpd.GeoSeries([Point(1, 1)]), # second\n True, # same\n True, # from_first\n False, # is_none\n ),\n (\n gpd.GeoSeries([Point(1, 1)]), # first\n gpd.GeoSeries([Point(1, 1)]), # second\n True, # same\n True, # from_first\n True, # is_none\n ),\n (\n gpd.GeoSeries([Point(1, 1)]).set_crs(3067), # first\n gpd.GeoSeries([Point(1, 1)]).set_crs(3066), # second\n False, # same\n True, # from_first\n False, # is_none\n ),\n ]\n test_is_within_buffer_distance_params = [\n (nice_traces, 0.5, 25),\n (nice_traces, 1, 35),\n ]\n\n test_plot_xyi_plot_params = [\n ([{X_node: 0, Y_node: 0, I_node: 50}], [\"title\"]),\n ([{X_node: 0, Y_node: 0, I_node: 0}], [\"title\"]),\n ([{X_node: 0, Y_node: 10, I_node: 25}], [\"\"]),\n ]\n\n test_plot_branch_plot_params = [\n ([{CC_branch: 30, CI_branch: 15, II_branch: 50}], [\"title\"]),\n ([{CC_branch: 0, CI_branch: 0, II_branch: 50}], [\"title\"]),\n ([{CC_branch: 0, CI_branch: 0, II_branch: 0}], [\"title\"]),\n ]\n\n test_determine_topology_parameters_params = [\n (\n np.array([10, 10, 10, 10]), # trace_length_array\n {X_node: 3, Y_node: 5, I_node: 8, E_node: 0}, # node_counts dict\n 10.0, # area\n ),\n (\n np.array([1, 1, 1, 1]), # trace_length_array\n {X_node: 3, Y_node: 5, I_node: 8, E_node: 0}, # node_counts dict\n 1.0, # area\n ),\n ]\n\n test_plot_topology_params = [\n (\n [\n parameters.determine_topology_parameters( # topology_parameters_list\n *test_determine_topology_parameters_params[0]\n )\n ],\n [\"title\"], # labels\n [\"black\"], # colors\n )\n ]\n test_determine_nodes_intersecting_sets_params = [\n (\n (\n gpd.GeoSeries([LineString([(0, 0), (1, 1)])]),\n gpd.GeoSeries([LineString([(0, 1), (0, -1)])]),\n ), # trace_series_two_sets\n np.array([\"1\", \"2\"]), # set_array\n (\"1\", \"2\"), # set_names_two_sets\n gpd.GeoSeries(\n [Point(0, 0), Point(1, 1), Point(0, 1), Point(0, -1)]\n ), # node_series_xy\n 0.001, # buffer_value\n [True, False, False, False], # assumed_intersections\n ),\n (\n (\n gpd.GeoSeries([LineString([(0.5, 0.5), (1, 1)])]),\n gpd.GeoSeries([LineString([(0, 1), (0, -1)])]),\n ), # trace_series_two_sets\n np.array([\"1\", \"2\"]), # set_array\n (\"1\", \"2\"), # set_names_two_sets\n gpd.GeoSeries(\n [Point(0.5, 0.5), Point(1, 1), Point(0, 1), Point(0, -1)]\n ), # node_series_xy\n 0.001, # buffer_value\n [False, False, False, False], # assumed_intersections\n ),\n ]\n\n test_prepare_geometry_traces_params = [\n (\n gpd.GeoSeries(\n [LineString([(0.5, 0.5), (1, 1)]), LineString([(0, 1), (0, -1)])]\n )\n ),\n (\n gpd.GeoSeries(\n [\n LineString([(0.5, 0.5), (1, 1)]),\n LineString([(0, 1), (0, -1)]),\n LineString([(0, 100), (0, -15)]),\n LineString([(5, 100), (67, -15), (67, -150)]),\n ]\n )\n ),\n ]\n test_determine_intersects_params = [\n (\n (\n gpd.GeoSeries([LineString([(0, 0), (1, 1)])]),\n gpd.GeoSeries([LineString([(0, 1), (0, -1)])]),\n ), # trace_series_two_sets\n (\"1\", \"2\"), # set_names_two_sets\n gpd.GeoSeries([Point(0, 0)]), # node_series_xy_intersects\n np.array([\"Y\"]), # node_types_xy_intersects\n # assumed_intersections\n 0.001, # buffer_value\n ),\n (\n (\n gpd.GeoSeries([LineString([(0.5, 0.5), (1, 1)])]),\n gpd.GeoSeries([LineString([(0, 1), (0, -1)])]),\n ), # trace_series_two_sets\n (\"1\", \"2\"), # set_names_two_sets\n gpd.GeoSeries([]), # node_series_xy_intersects\n np.array([]), # node_types_xy_intersects\n # assumed_intersections\n 0.001, # buffer_value\n ),\n ]\n test_determine_crosscut_abutting_relationships_params = [\n (\n gpd.GeoSeries(\n [LineString([(0, 0), (1, 0)]), LineString([(0, 1), (0, -1)])]\n ), # trace_series\n gpd.GeoSeries(\n [Point(0, 0), Point(1, 0), Point(0, 1), Point(0, -1)]\n ), # node_series\n np.array([\"Y\", \"I\", \"I\", \"I\"]), # node_types\n np.array([\"1\", \"2\"]), # set_array\n (\"1\", \"2\"), # set_names\n 0.001, # buffer_value\n \"title\", # label\n ),\n ]\n\n test__validate_params = [\n (\n GeomNullValidator, # validator\n None, # geom\n [], # current_errors\n True, # allow_fix\n [None, [GeomNullValidator.ERROR], True], # assumed_result\n ),\n (\n GeomTypeValidator, # validator\n invalid_geom_multilinestring, # geom\n [], # current_errors\n True, # allow_fix\n [\n invalid_geom_multilinestring,\n [GeomTypeValidator.ERROR],\n True,\n ], # assumed_result\n ),\n (\n GeomTypeValidator, # validator\n mergeable_geom_multilinestring, # geom\n [], # current_errors\n True, # allow_fix\n [\n loads(\"LINESTRING (0 0, 1 1, 2 2)\"),\n [],\n False,\n ], # assumed_result\n ),\n ]\n intersect_nodes = [\n (Point(0, 0), Point(1, 1)),\n (Point(1, 1),),\n (Point(5, 5),),\n (Point(0, 0), Point(1, 1)),\n ]\n\n # Intersects next trace three times\n intersects_next_trace_3_times = LineString(\n [Point(-4, -3), Point(-2, -3), Point(-4, -2), Point(-2, -1)]\n )\n\n # Straight line which is intersected twice by same line\n intersected_3_times = LineString([Point(-3, -4), Point(-3, -1)])\n test_validation_params = [\n (\n kb7_traces, # traces\n kb7_area, # area\n \"kb7\", # name\n True, # auto_fix\n [SharpCornerValidator.ERROR], # assume_errors\n ),\n (\n hastholmen_traces, # traces\n hastholmen_area, # area\n \"hastholmen_traces\", # name\n True, # auto_fix\n [], # assume_errors\n ),\n # (\n # kb11_traces, # traces\n # kb11_area, # area\n # \"kb11\", # name\n # True, # auto_fix\n # None, # assume_errors\n # ),\n (\n gpd.GeoDataFrame(\n geometry=trace_builder.make_invalid_traces(\n snap_threshold=0.01, snap_threshold_error_multiplier=1.1\n )\n ), # traces\n gpd.GeoDataFrame(\n geometry=trace_builder.make_invalid_target_areas()\n ), # area\n \"invalid_traces\", # name\n True, # auto_fix\n None, # assume_errors\n ),\n (\n gpd.GeoDataFrame(geometry=[LineString([(0, 0), (0, 1)])]), # traces\n gpd.GeoDataFrame(\n geometry=[\n Polygon(\n [\n Point(-1, -1),\n Point(-1, 1.011),\n Point(1, 1.011),\n Point(1, -1),\n ]\n )\n ]\n ), # area\n \"TargetAreaSnapValidator error\", # name\n True, # auto_fix\n [TargetAreaSnapValidator.ERROR], # assume_errors\n ),\n (\n gpd.GeoDataFrame(\n geometry=[LineString([(0, 0), (0, 1)]), LineString([(5, 5), (5, 6)])]\n ), # traces\n gpd.GeoDataFrame(\n geometry=[\n Polygon(\n [\n Point(-1, -1),\n Point(-1, 1.011),\n Point(1, 1.011),\n Point(1, -1),\n ]\n ),\n Polygon(\n [\n Point(2, 2),\n Point(2, 6.011),\n Point(6, 6.011),\n Point(6, 2),\n ]\n ),\n ]\n ), # area\n \"TargetAreaSnapValidator error\", # name\n True, # auto_fix\n [TargetAreaSnapValidator.ERROR], # assume_errors\n ),\n (\n gpd.GeoDataFrame(\n geometry=[LineString([(0, 0), (0, 1)]), LineString([(5, 5), (5, 6)])]\n ), # traces\n gpd.GeoDataFrame(\n geometry=[\n MultiPolygon(\n [\n Polygon(\n [\n Point(-1, -1),\n Point(-1, 1.011),\n Point(1, 1.011),\n Point(1, -1),\n ]\n ),\n Polygon(\n [\n Point(2, 2),\n Point(2, 6.011),\n Point(6, 6.011),\n Point(6, 2),\n ]\n ),\n ]\n )\n ]\n ), # area\n \"TargetAreaSnapValidator error\", # name\n True, # auto_fix\n [TargetAreaSnapValidator.ERROR], # assume_errors\n ),\n ]\n\n test_determine_v_nodes_params = [\n (\n [(Point(1, 1),), (Point(1, 1),)], # endpoint_nodes\n 0.01, # snap_threshold\n 1.1, # snap_threshold_error_multiplier\n {0, 1}, # assumed_result\n ),\n (\n [(Point(1, 1),), (Point(1, 1),)], # endpoint_nodes\n 0.01, # snap_threshold\n 1.1, # snap_threshold_error_multiplier\n {0, 1}, # assumed_result\n ),\n ]\n\n test_determine_node_junctions_params = [\n (\n [\n (Point(0, 0), Point(1, 1)),\n (Point(1, 1),),\n (Point(5, 5),),\n (Point(0, 0), Point(1, 1)),\n ], # nodes\n 0.01, # snap_threshold\n 1.1, # snap_threshold_error_multiplier\n 2, # error_threshold\n )\n ]\n\n test_bounding_polygon_params = [\n (gpd.GeoSeries([line_1, line_2, line_3])),\n (gpd.GeoSeries([line_1])),\n ]\n\n test_testtargetareasnapvalidator_validation_method = [\n (\n LineString([(0.5, 0), (0.5, 0.5)]), # geom: LineString,\n gpd.GeoDataFrame(\n geometry=[\n MultiPolygon(\n [\n Polygon([(0, 0), (0, 1), (1, 1), (1, 0)]),\n Polygon([(10, 10), (10, 11), (11, 11), (11, 10)]),\n ]\n )\n ]\n ), # area:gpd.GeoDataFrame\n 0.01, # snap_threshold: float,\n 1.1, # snap_threshold_error_multiplier: float,\n 2.5, # area_edge_snap_multiplier: float,\n True, # assumed_result: bool,\n ),\n (\n LineString([(0.5, 0.01 * 1.05), (0.5, 0.5)]), # geom: LineString,\n gpd.GeoDataFrame(\n geometry=[\n MultiPolygon(\n [\n Polygon([(0, 0), (0, 1), (1, 1), (1, 0)]),\n Polygon([(10, 10), (10, 11), (11, 11), (11, 10)]),\n ]\n )\n ]\n ), # area:gpd.GeoDataFrame\n 0.01, # snap_threshold: float,\n 1.1, # snap_threshold_error_multiplier: float,\n 2.5, # area_edge_snap_multiplier: float,\n False, # assumed_result: bool,\n ),\n (\n LineString([(0.5, 0), (0.5, 0.5)]), # geom: LineString,\n gpd.GeoDataFrame(\n geometry=[\n Polygon([(0, 0), (0, 1), (1, 1), (1, 0)]),\n Polygon([(10, 10), (10, 11), (11, 11), (11, 10)]),\n ]\n ), # area:gpd.GeoDataFrame\n 0.01, # snap_threshold: float,\n 1.1, # snap_threshold_error_multiplier: float,\n 2.5, # area_edge_snap_multiplier: float,\n True, # assumed_result: bool,\n ),\n (\n LineString([(10, 0), (4.991, 0)]), # geom: LineString,\n gpd.GeoDataFrame(\n geometry=[Polygon([(5, 5), (-5, 5), (-5, -5), (5, -5)])]\n ), # area:gpd.GeoDataFrame\n 0.01, # snap_threshold: float,\n 1.1, # snap_threshold_error_multiplier: float,\n 1.5, # area_edge_snap_multiplier: float,\n True, # assumed_result: bool,\n ), # Test that traces coming from outside area are not marked as underlapping\n (\n LineString([(10, 0), (5.011, 0)]), # geom: LineString,\n gpd.GeoDataFrame(\n geometry=[Polygon([(5, 5), (-5, 5), (-5, -5), (5, -5)])]\n ), # area:gpd.GeoDataFrame\n 0.01, # snap_threshold: float,\n 1.1, # snap_threshold_error_multiplier: float,\n 1.5, # area_edge_snap_multiplier: float,\n True, # assumed_result: bool,\n ), # Test that traces coming from outside area are not marked as underlapping\n (\n should_result_in_target_area_underlapping_ls, # geom: LineString,\n gpd.GeoDataFrame(\n geometry=[should_result_in_target_area_underlapping_poly]\n ), # area:gpd.GeoDataFrame\n 0.01, # snap_threshold: float,\n 1.1, # snap_threshold_error_multiplier: float,\n 2.5, # area_edge_snap_multiplier: float,\n False, # assumed_result: bool,\n ), # Test that traces coming from outside area are not marked as underlapping\n ]\n\n test_tracevalidate_only_area_params = [\n (\n [\n \"tests/sample_data/KB7/KB7_tulkinta_50.shp\", # cut 0-50\n \"tests/sample_data/KB7/KB7_tulkinta_alue.shp\",\n \"--allow-fix\",\n \"--only-area-validation\",\n ] # args\n )\n ]\n\n geta_1_traces = read_geofile(\n Path(\"tests/sample_data/geta1/Getaberget_20m_1_traces.gpkg\")\n )\n geta_1_1_area = read_geofile(\n Path(\"tests/sample_data/geta1/Getaberget_20m_1_1_area.gpkg\")\n )\n\n geta_1_traces_1000_n = geta_1_traces.iloc[0:1000]\n\n test_network_params = [\n (\n geta_1_traces, # traces\n geta_1_1_area, # area\n \"Geta1_1\", # name\n True, # determine_branches_nodes\n True, # truncate_traces\n 0.001, # snap_threshold\n True, # circular_target_area\n ),\n (\n kb11_traces, # traces\n kb11_area, # area\n \"KB11\", # name\n True, # determine_branches_nodes\n True, # truncate_traces\n 0.001, # snap_threshold\n False, # circular_target_area\n ),\n (\n kb11_traces.iloc[0:100], # traces\n kb11_area, # area\n \"KB11_0_100\", # name\n True, # determine_branches_nodes\n True, # truncate_traces\n 0.001, # snap_threshold\n False, # circular_target_area\n ),\n (\n v_node_network_error_gdf, # traces\n v_node_network_error_area_gdf, # area\n \"v-node-error-network\", # name\n True, # determine_branches_nodes\n True, # truncate_traces\n 0.001, # snap_threshold\n False, # circular_target_area\n ),\n (\n multipolygon_traces, # traces\n multipolygon_area, # area\n \"MultiPolygon_target_area\", # name\n True, # determine_branches_nodes\n True, # truncate_traces\n 0.001, # snap_threshold\n False, # circular_target_area\n ),\n (\n multipolygon_traces, # traces\n multipolygon_area, # area\n \"MultiPolygon_target_area\", # name\n True, # determine_branches_nodes\n False, # truncate_traces\n 0.001, # snap_threshold\n False, # circular_target_area\n ),\n (\n multipolygon_traces, # traces\n manypolygon_area, # area\n \"MultiPolygon_target_area\", # name\n True, # determine_branches_nodes\n True, # truncate_traces\n 0.001, # snap_threshold\n False, # circular_target_area\n ),\n (\n kl2_2_traces.iloc[0:1500], # traces\n kl2_2_area, # area\n \"kl_2_2\", # name\n True, # determine_branches_nodes\n True, # truncate_traces\n 0.001, # snap_threshold\n False, # circular_target_area\n ),\n ]\n\n test_network_random_sampler_params = [\n (\n geta_1_traces, # trace_gdf\n geta_1_1_area, # area_gdf\n 10, # min_radius\n 0.001, # snap_threshold\n 1, # samples\n \"area\", # random_choice\n ),\n (\n geta_1_traces, # trace_gdf\n geta_1_1_area, # area_gdf\n 10, # min_radius\n 0.001, # snap_threshold\n 1, # samples\n \"radius\", # random_choice\n ),\n ]\n\n test_describe_powerlaw_fit_params = [\n (\n geta_1_traces.geometry.length.values, # lengths\n \"traces\", # label\n )\n ]\n\n test_determine_boundary_intersecting_lines_params = [\n (\n gpd.GeoDataFrame(\n geometry=[\n LineString([(0, 0), (0, 5)]),\n LineString([(0, 0), (0, 3)]),\n LineString([(0, 0), (0, 10)]),\n ]\n ), # line_gdf\n gpd.GeoDataFrame(\n geometry=[\n Polygon([(-5, 5), (5, 5), (5, -5), (-5, -5)]),\n ]\n ), # area_gdf\n 0.01, # snap_threshold\n np.array([True, False, True]), # assumed_result_inter\n np.array([False, False, False]), # assumed_result_cuts\n ),\n (\n gpd.GeoDataFrame(\n geometry=[\n LineString([(0, -50), (0, 5)]),\n LineString([(0, 0), (0, 3)]),\n LineString([(0, 0), (0, 10)]),\n ]\n ), # line_gdf\n gpd.GeoDataFrame(\n geometry=[\n Polygon([(-5, 5), (5, 5), (5, -5), (-5, -5)]),\n ]\n ), # area_gdf\n 0.01, # snap_threshold\n np.array([True, False, True]), # assumed_result_inter\n np.array([True, False, False]), # assumed_result_cuts\n ),\n (\n gpd.GeoDataFrame(\n geometry=[\n LineString([(0, -50), (0, -5.00001)]),\n ]\n ), # line_gdf\n gpd.GeoDataFrame(\n geometry=[\n Polygon([(-5, 5), (5, 5), (5, -5), (-5, -5)]),\n ]\n ), # area_gdf\n 0.01, # snap_threshold\n np.array([True]), # assumed_result_inter\n np.array([False]), # assumed_result_cuts\n ),\n (\n gpd.GeoDataFrame(\n geometry=[\n LineString([(0, -4.999), (0, 4.999)]),\n ]\n ), # line_gdf\n gpd.GeoDataFrame(\n geometry=[\n Polygon([(-5, 5), (5, 5), (5, -5), (-5, -5)]),\n ]\n ), # area_gdf\n 0.01, # snap_threshold\n np.array([True]), # assumed_result_inter\n np.array([True]), # assumed_result_cuts\n ),\n (\n gpd.GeoDataFrame(\n geometry=[\n LineString([(0, 0), (0, 1)]),\n LineString([(10, 0), (10, 1)]),\n ]\n ), # line_gdf\n gpd.GeoDataFrame(\n geometry=[\n Polygon([(-1, -1), (1, -1), (1, 1), (-1, 1)]),\n Polygon([(9, -1), (11, -1), (11, 1), (9, 1)]),\n ]\n ), # area_gdf\n 0.01, # snap_threshold\n np.array([True, True]), # assumed_result_inter\n np.array([False, False]), # assumed_result_cuts\n ),\n (\n gpd.GeoDataFrame(\n geometry=[\n LineString([(-2, 0), (2, 1)]),\n LineString([(8, 0), (12, 0)]),\n ]\n ), # line_gdf\n gpd.GeoDataFrame(\n geometry=[\n Polygon([(-1, -1), (1, -1), (1, 1), (-1, 1)]),\n Polygon([(9, -1), (11, -1), (11, 1), (9, 1)]),\n ]\n ), # area_gdf\n 0.01, # snap_threshold\n np.array([True, True]), # assumed_result_inter\n np.array([True, True]), # assumed_result_cuts\n ),\n ]\n\n test_network_circular_target_area_params = [\n (\n gpd.GeoDataFrame(\n geometry=[\n LineString([(0, 0), (0, 10)]), # one intersect\n LineString([(1, 0), (1, 3)]), # no intersect\n LineString([(4, -10), (4, 10)]), # two intersect\n ]\n ), # trace_gdf\n gpd.GeoDataFrame(\n geometry=[Polygon([(-5, -5), (-5, 5), (5, 5), (5, -5)])]\n ), # area_gdf\n \"circular_target_area_param_test\", # name\n )\n ]\n\n test_snap_trace_to_another_params = [\n (\n [Point(0, 0), Point(0, 4)], # trace_endpoints\n LineString([(-5, 5), (5, 5)]), # another\n 1.1, # snap_threshold\n LineString([(-5, 5), (0, 4), (5, 5)]), # another\n ),\n (\n [Point(0, 0), Point(0, 4)], # trace_endpoints\n LineString([(-5, 5), (5, 5)]), # another\n 0.1, # snap_threshold\n LineString([(-5, 5), (5, 5)]), # another\n ),\n ]\n\n test_insert_point_to_linestring_params = [\n (\n LineString([(0, 0), (1, 1), (2, 2)]),\n Point(0.5, 0.5),\n 0.01, # snap_threshold\n None,\n ),\n (\n LineString([(0, 0), (-1, -1), (-2, -2)]),\n Point(-0.5, -0.5),\n 0.01, # snap_threshold\n None,\n ),\n (\n LineString([(0, 0), (-1, -1), (-2, -2)]),\n Point(-1.0, -1.1),\n 0.11, # snap_threshold\n LineString([(0, 0), (-1.0, -1.1), (-2, -2)]),\n ),\n (\n LineString([(0, 0), (-1, -1), (-1.5, -1.5), (-2, -2)]),\n Point(-1.0, -1.1),\n 0.11, # snap_threshold\n LineString([(0, 0), (-1.0, -1.1), (-1.5, -1.5), (-2, -2)]),\n ),\n (\n LineString([(0, 0), (-1.5, -1.5), (-2, -2)]),\n Point(-1.5, -1.5),\n 0.11, # snap_threshold\n LineString([(0, 0), (-1.5, -1.5), (-2, -2)]),\n ),\n ]\n\n sample_traces_path = Path(\"tests/sample_data/branches_and_nodes/traces.gpkg\")\n sample_areas_path = Path(\"tests/sample_data/branches_and_nodes/areas.gpkg\")\n sample_traces = gpd.read_file(sample_traces_path)\n sample_areas = gpd.read_file(sample_areas_path)\n\n test_branches_and_nodes_regression_params = [\n (\n sample_traces, # traces\n sample_areas, # areas\n 0.001, # snap_threshold\n 10, # allowed_loops\n False, # already_clipped\n )\n ]\n\n troubling_traces_path = Path(\n \"tests/sample_data/branches_and_nodes/traces_troubling.gpkg\"\n )\n troubling_traces = gpd.read_file(troubling_traces_path)\n\n troubling_upper = troubling_traces.geometry.values[1]\n troubling_middle = troubling_traces.geometry.values[0]\n troubling_lower = troubling_traces.geometry.values[2]\n\n test_simple_snap_params = [\n (\n troubling_upper, # trace\n gpd.GeoSeries([troubling_middle, troubling_lower]), # trace_candidates\n 0.001, # snap_threshold\n 0, # intersects_idx\n ),\n (\n troubling_lower, # trace\n gpd.GeoSeries([troubling_middle, troubling_upper]), # trace_candidates\n 0.001, # snap_threshold\n 0, # intersects_idx\n ),\n ]\n\n test_snap_trace_simple_params = [\n (\n 0, # idx\n troubling_upper, # trace\n 0.001, # snap_threshold\n [troubling_upper, troubling_middle, troubling_upper], # traces\n 1, # intersects_idx\n )\n ]\n\n unary_err_traces_path = Path(\"tests/sample_data/unary_error_data/err_traces.shp\")\n unary_err_areas_path = Path(\"tests/sample_data/unary_error_data/err_area.shp\")\n unary_err_traces = gpd.read_file(unary_err_traces_path).iloc[5500:8000]\n unary_err_areas = gpd.read_file(unary_err_areas_path)\n assert isinstance(unary_err_traces, gpd.GeoDataFrame)\n assert isinstance(unary_err_areas, gpd.GeoDataFrame)\n\n test_safer_unary_union_params = [\n (\n unary_err_traces.geometry, # traces_geosrs\n 0.001, # snap_threshold\n 13000, # size_threshold\n ),\n (\n unary_err_traces.geometry, # traces_geosrs\n 0.001, # snap_threshold\n 50, # size_threshold\n ),\n ]\n\n test_segment_within_buffer_params = [\n (valid_geom, invalid_geom_multilinestring, 0.001, 1.1, 50, True),\n (valid_geom, mergeable_geom_multilinestring, 0.001, 1.1, 50, True),\n (\n valid_geom,\n MultiLineString([LineString([(10, 10), (50, 50)])]),\n 0.001,\n 1.1,\n 50,\n False,\n ),\n ]\n\n test_segmentize_linestring_params = [\n (LineString(((0, 0), (0, 1))), 0.1, 10),\n (LineString(((0, 0), (1, 1))), 0.1, 15),\n (LineString(((0, 0), (0, 1))), 1, 1),\n ]\n\n test_split_to_determine_triangle_errors_params = [\n (\n LineString([(-1, 0), (0, 2), (1, 0)]), # trace\n LineString([(-1, 1.99), (0, 1.99), (1, 1.99)]), # splitter_trace\n 0.001, # snap_threshold\n 50, # triangle_error_snap_multiplier\n True, # assumed_result\n ),\n (\n LineString([(-1, 0), (0, 5), (1, 0)]), # trace\n LineString([(-1, 1.99), (0, 1.99), (1, 1.99)]), # splitter_trace\n 0.001, # snap_threshold\n 50, # triangle_error_snap_multiplier\n False, # assumed_result\n ),\n (\n LineString([(-1, 0), (0, 1.98), (1, 0)]), # trace\n LineString([(-1, 1.99), (0, 1.99), (1, 1.99)]), # splitter_trace\n 0.001, # snap_threshold\n 50, # triangle_error_snap_multiplier\n False, # assumed_result\n ),\n ]\n\n test_determine_middle_in_triangle_params = [\n (\n [\n LineString([(0, 0), (0, 1)]),\n LineString([(0, 1), (0, 2)]),\n LineString([(0, 2), (0, 3)]),\n ], # segments\n 0.001, # snap_threshold\n 1.1, # snap_threshold_error_multiplier\n [\n LineString([(0, 1), (0, 2)]),\n ], # assumed_result\n ),\n (\n [\n LineString([(0, 0), (0, 1)]),\n LineString([(0, 1), (0, 2)]),\n LineString([(0, 2), (0, 3)]),\n LineString([(0, 3), (0, 4)]),\n ], # segments\n 0.001, # snap_threshold\n 1.1, # snap_threshold_error_multiplier\n [\n LineString([(0, 1), (0, 2)]),\n LineString([(0, 2), (0, 3)]),\n ], # assumed_result\n ),\n (\n [\n LineString([(0, 0), (0, 1)]),\n LineString([(0, 2), (0, 3)]),\n ], # segments\n 0.001, # snap_threshold\n 1.1, # snap_threshold_error_multiplier\n [], # assumed_result\n ),\n ]\n\n test_network_contour_grid_params = [\n (\n kb11_traces, # traces\n kb11_area, # areas\n 0.001, # snap_threshold\n ),\n ]\n\n test_report_snapping_loop_params = [\n (\n 5, # loop\n 10, # allowed_loops\n False, # will_error\n ),\n (\n 11, # loop\n 10, # allowed_loops\n True, # will_error\n ),\n ]\n\n\nclass ValidationHelpers:\n\n \"\"\"\n Known examples of validation.\n \"\"\"\n\n # Known Errors\n # ============\n\n known_errors = dict()\n\n known_multi_junction_gdfs = [\n gpd.GeoDataFrame(\n geometry=[\n LineString([Point(0, -3), Point(2, -3)]),\n LineString([Point(1, -4), Point(1, -2)]),\n LineString([Point(2, -4), Point(0.5, -2.50001)]),\n ]\n ),\n gpd.GeoDataFrame(\n geometry=[\n LineString([Point(-2, 0), Point(2, 0)]),\n LineString([Point(0, -2), Point(0, 4)]),\n LineString([Point(1, -1), Point(-1, 1)]),\n ]\n ),\n gpd.GeoDataFrame(\n geometry=[\n LineString([Point(-2, 4), Point(-3, 4)]),\n LineString([Point(-2.5, 3.5), Point(-3.5, 4.5)]),\n LineString([Point(-3.5, 3.5), Point(-2.5, 4.5)]),\n ]\n ),\n gpd.GeoDataFrame(\n geometry=[\n LineString([Point(-2, 2), Point(-4, 2)]),\n LineString(\n [\n Point(-3, 1),\n Point(-3, 2 + 0.01 + 0.0001),\n ]\n ),\n ]\n ),\n gpd.GeoDataFrame(geometry=should_result_in_some_error_ls_list),\n gpd.GeoDataFrame(geometry=should_result_in_multij_ls_list),\n ]\n\n known_multilinestring_gdfs = [\n gpd.GeoDataFrame(\n geometry=[\n MultiLineString(\n [\n LineString([Point(3, -4), Point(3, -1)]),\n LineString([Point(3, 0), Point(3, 4)]),\n ]\n )\n ],\n ),\n Helpers.multilinestring_critical_err_in_validation_gdf,\n ]\n known_vnode_gdfs = [\n gpd.GeoDataFrame(\n geometry=[\n LineString([Point(0, 0), Point(1.0001, 1)]),\n LineString([Point(1, 0), Point(1.0001, 0.9999)]),\n ]\n ),\n gpd.GeoDataFrame(geometry=should_result_in_vnode_ls_list),\n ]\n known_stacked_gdfs = [\n gpd.GeoDataFrame(\n geometry=[\n LineString([Point(0, -7), Point(0, -5)]),\n LineString([Point(-1, -7), Point(0 + 0.01, -6), Point(-1, -5)]),\n ]\n ),\n ]\n\n known_non_underlaping_gdfs_but_overlapping = [\n gpd.GeoDataFrame(geometry=results_in_false_positive_underlapping_ls)\n ]\n\n known_null_gdfs = [gpd.GeoDataFrame(geometry=[None, LineString()])]\n\n known_errors[MultiJunctionValidator.ERROR] = known_multi_junction_gdfs\n\n known_errors[GeomTypeValidator.ERROR] = known_multilinestring_gdfs\n known_errors[VNodeValidator.ERROR] = known_vnode_gdfs\n known_errors[StackedTracesValidator.ERROR] = known_stacked_gdfs\n known_errors[GeomNullValidator.ERROR] = known_null_gdfs\n known_errors[\n UnderlappingSnapValidator._OVERLAPPING\n ] = known_non_underlaping_gdfs_but_overlapping\n\n # False Positives\n # ===============\n\n known_false_positives = dict()\n\n known_non_stacked_gdfs = [\n gpd.GeoDataFrame(geometry=results_in_false_positive_stacked_traces_list),\n ]\n\n known_non_overlapping_gdfs = [\n gpd.GeoDataFrame(geometry=results_in_overlapping_ls_list)\n ]\n\n known_non_multijunction_gdfs = [\n gpd.GeoDataFrame(geometry=results_in_multijunction_why_ls_list),\n gpd.GeoDataFrame(geometry=results_in_multijunction_why_ls_list_2),\n ]\n\n known_false_positives[StackedTracesValidator.ERROR] = known_non_stacked_gdfs\n known_false_positives[\n UnderlappingSnapValidator._UNDERLAPPING\n ] = known_non_underlaping_gdfs_but_overlapping\n known_false_positives[\n UnderlappingSnapValidator._OVERLAPPING\n ] = known_non_overlapping_gdfs\n known_false_positives[MultiJunctionValidator.ERROR] = known_non_multijunction_gdfs\n\n # Class methods to generate pytest params for parametrization\n # ===========================================================\n\n @classmethod\n def generate_known_params(cls, error, false_positive):\n \"\"\"\n Generate pytest.params.\n \"\"\"\n knowns: List[gpd.GeoDataFrame] = (\n cls.known_errors[error]\n if not false_positive\n else cls.known_false_positives[error]\n )\n amounts = [\n (gdf.shape[0] if not hasattr(gdf, \"error_amount\") else gdf.error_amount)\n if error\n not in (\n UnderlappingSnapValidator._UNDERLAPPING,\n UnderlappingSnapValidator._OVERLAPPING,\n )\n else 1\n for gdf in knowns\n ]\n try:\n areas = [\n gpd.GeoDataFrame(geometry=[bounding_polygon(gdf)]) for gdf in knowns\n ]\n except ValueError:\n areas = [\n gpd.GeoDataFrame(geometry=[Polygon([(0, 0), (1, 1), (1, 0)])])\n for _ in knowns\n ]\n assert len(knowns) == len(areas) == len(amounts)\n return [\n pytest.param(\n known,\n area,\n f\"{error}, {amount}\",\n True,\n [error],\n amount,\n false_positive,\n id=f\"{error}_{amount}\".replace(\" \", \"_\"),\n )\n for known, area, amount in zip(knowns, areas, amounts)\n ]\n\n @classmethod\n def get_all_errors(cls):\n \"\"\"\n Get the defined errors.\n \"\"\"\n # TODO: UnderlappingSnapValidator doesn't follow protocol\n all_error_types = set(\n [validator.ERROR for validator in trace_validation.ALL_VALIDATORS]\n + [\n UnderlappingSnapValidator._OVERLAPPING,\n UnderlappingSnapValidator._UNDERLAPPING,\n ]\n )\n all_errs = []\n for err in all_error_types:\n try:\n all_errs.extend(cls.generate_known_params(err, false_positive=False))\n except KeyError:\n pass\n try:\n all_errs.extend(cls.generate_known_params(err, false_positive=True))\n except KeyError:\n pass\n\n assert len(all_errs) > 0\n return all_errs\n\n\n@lru_cache(maxsize=None)\ndef kb11_traces_lengths():\n \"\"\"\n Get trace lengths of KB11.\n \"\"\"\n return Helpers.kb11_traces.geometry.length\n\n\n@lru_cache(maxsize=None)\ndef kb11_area_value():\n \"\"\"\n Get area value of KB11.\n \"\"\"\n return sum(Helpers.kb11_area.geometry.area)\n\n\n@lru_cache(maxsize=None)\ndef hastholmen_traces_lengths():\n \"\"\"\n Get trace lengths of hastholmen infinity lineaments.\n \"\"\"\n return Helpers.hastholmen_traces.geometry.length\n\n\n@lru_cache(maxsize=None)\ndef hastholmen_area_value():\n \"\"\"\n Get area value of hastholmen.\n \"\"\"\n return sum(Helpers.hastholmen_area.geometry.area)\n\n\n@lru_cache(maxsize=None)\ndef test_populate_sample_cell_new_params():\n \"\"\"\n Params for test_populate_sample_cell_new.\n \"\"\"\n return [\n (\n box(0, 0, 2, 2),\n gpd.GeoDataFrame(geometry=[LineString([(-5, 1), (5, 1)])]),\n 0.001,\n )\n ]\n\n\n@lru_cache(maxsize=None)\ndef test_multinetwork_params():\n \"\"\"\n Params for test_multinetwork.\n \"\"\"\n return [\n (\n (\n dict(\n trace_gdf=Helpers.geta_1_traces_1000_n,\n area_gdf=Helpers.geta_1_1_area,\n name=\"geta1_1\",\n circular_target_area=True,\n snap_threshold=0.001,\n ),\n dict(\n trace_gdf=Helpers.geta_1_traces_1000_n,\n area_gdf=Helpers.geta_1_1_area,\n name=\"geta1_2\",\n circular_target_area=True,\n snap_threshold=0.001,\n ),\n ),\n 1,\n 5.0,\n ),\n ]\n\n\ndef test_ternary_heatmapping_params():\n \"\"\"\n Params for test_ternary_heatmapping.\n \"\"\"\n return [\n (\n np.array([0.2, 0.8, 0.1]),\n np.array([0.4, 0.1, 0.4]),\n np.array([0.4, 0.1, 0.5]),\n 15,\n ),\n (\n np.array([0.4, 0.1, 0.4]),\n np.array([0.2, 0.8, 0.1]),\n np.array([0.4, 0.1, 0.5]),\n 15,\n ),\n ]\n\n\ndef test_normalize_fit_to_area_params():\n \"\"\"\n Params for test_normalize_fit_to_area.\n \"\"\"\n return [\n length_distributions.LengthDistribution(\n name=\"kb11\",\n lengths=kb11_traces_lengths(),\n area_value=kb11_area_value(),\n ),\n length_distributions.LengthDistribution(\n name=\"kb11_50\",\n lengths=kb11_traces_lengths()[0:50],\n area_value=kb11_area_value(),\n ),\n ]\n\n\ndef test_concat_length_distributions_params():\n \"\"\"\n Params for test_concat_length_distributions.\n \"\"\"\n return [\n ([kb11_traces_lengths()], [kb11_area_value()], [\"kb11_full\"]),\n ([kb11_traces_lengths()[0:50]], [kb11_area_value()], [\"kb11_50\"]),\n (\n [kb11_traces_lengths(), hastholmen_traces_lengths()],\n [kb11_area_value(), hastholmen_area_value()],\n [\"kb11_full\", \"hastholmen_full\"],\n ),\n ]\n\n\ndef test_fit_to_multi_scale_lengths_params():\n \"\"\"\n Params for test_fit_to_multi_scale_lengths.\n \"\"\"\n return [\n [\n length_distributions.LengthDistribution(\n name=\"kb11\", lengths=kb11_traces_lengths(), area_value=kb11_area_value()\n ),\n length_distributions.LengthDistribution(\n name=\"hastholmen\",\n lengths=hastholmen_traces_lengths(),\n area_value=hastholmen_area_value(),\n ),\n ]\n ]\n\n\ndef test_aggregate_chosen_params():\n \"\"\"\n Params for test_aggregate_chosen.\n \"\"\"\n return [\n (\n [\n {\n general.Param.AREA.value.name: 2000.0,\n general.Param.CIRCLE_COUNT.value.name: 20.0,\n general.Param.FRACTURE_INTENSITY_P21.value.name: 2.0,\n general.Param.CONNECTIONS_PER_BRANCH.value.name: 1.2,\n },\n {\n general.Param.AREA.value.name: 4000.0,\n general.Param.CIRCLE_COUNT.value.name: 40.0,\n general.Param.FRACTURE_INTENSITY_P21.value.name: 2.0,\n general.Param.CONNECTIONS_PER_BRANCH.value.name: 1.4,\n },\n ],\n float,\n {\n general.Param.AREA.value.name: 6000.0,\n general.Param.CIRCLE_COUNT.value.name: 60.0,\n general.Param.FRACTURE_INTENSITY_P21.value.name: 2.0,\n general.Param.CONNECTIONS_PER_BRANCH.value.name: 1.3333333333333333,\n },\n ),\n (\n [\n {\n general.Param.AREA.value.name: 2000.0,\n \"random-column\": \"Some name?\",\n },\n {\n general.Param.AREA.value.name: 4000.0,\n \"random-column\": \"Some other name?\",\n },\n ],\n (float, str),\n {\n general.Param.AREA.value.name: 6000.0,\n \"random-column\": str([\"Some name?\", \"Some other name?\"]),\n },\n ),\n ]\n\n\ndef test_random_sample_of_circles_params():\n \"\"\"\n Params for test_random_sample_of_circles.\n \"\"\"\n return [\n (\n {\n \"geta7\": [\n {\n general.Param.AREA.value.name: 2000.0,\n general.Param.CIRCLE_COUNT.value.name: 20.0,\n general.Param.FRACTURE_INTENSITY_P21.value.name: 2.0,\n general.Param.CONNECTIONS_PER_BRANCH.value.name: 1.2,\n },\n {\n general.Param.AREA.value.name: 4000.0,\n general.Param.CIRCLE_COUNT.value.name: 40.0,\n general.Param.FRACTURE_INTENSITY_P21.value.name: 2.0,\n general.Param.CONNECTIONS_PER_BRANCH.value.name: 1.4,\n },\n ]\n },\n {\"geta7\": 12.5},\n 1,\n None,\n ),\n (\n {\n \"geta7\": [\n {\n general.Param.AREA.value.name: 2000.0,\n general.Param.CIRCLE_COUNT.value.name: 20.0,\n general.Param.FRACTURE_INTENSITY_P21.value.name: 2.0,\n general.Param.CONNECTIONS_PER_BRANCH.value.name: 1.2,\n },\n {\n general.Param.AREA.value.name: 4000.0,\n general.Param.CIRCLE_COUNT.value.name: 40.0,\n general.Param.FRACTURE_INTENSITY_P21.value.name: 2.0,\n general.Param.CONNECTIONS_PER_BRANCH.value.name: 1.4,\n },\n ],\n \"geta6\": [\n {\n general.Param.AREA.value.name: 2000.0,\n general.Param.CIRCLE_COUNT.value.name: 20.0,\n general.Param.FRACTURE_INTENSITY_P21.value.name: 2.0,\n general.Param.CONNECTIONS_PER_BRANCH.value.name: 1.2,\n },\n {\n general.Param.AREA.value.name: 4000.0,\n general.Param.CIRCLE_COUNT.value.name: 40.0,\n general.Param.FRACTURE_INTENSITY_P21.value.name: 2.0,\n general.Param.CONNECTIONS_PER_BRANCH.value.name: 1.4,\n },\n ],\n },\n {\"geta7\": 12.5, \"geta6\": 5.0},\n 2,\n None,\n ),\n ]\n\n\ndef test_collect_indexes_of_base_circles_params():\n \"\"\"\n Params for test_collect_indexes_of_base_circles.\n \"\"\"\n return [\n ([1, 2, 3], 1, [10, 10, 20]),\n ([1, 2, 4], 1, [10, 10, 30]),\n ([100, 2323, 10000], 2, [10, 10, 30]),\n ([100, 2323, 10000], 3, [10, 10, 30]),\n ]\n" ]
[ [ "numpy.array", "pandas.Series" ] ]
yxnchen/SLM-Lihang-Notes
[ "8effca5f809a1b3e563661d9a2f6774ab915e37a" ]
[ "code/perceptron.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on: 2019/4/10 13:46\n\n@author: its_cyx\n\"\"\"\n\nfrom utils import load_mldata\nfrom sklearn.linear_model import Perceptron\n\nX_train, y_train, X_test, y_test = load_mldata.fetch_mnist()\n\n# 二分类:数字5和其他\ny_train_5 = (y_train == 5)\ny_train_5 = y_train_5.ravel()\ny_test_5 = (y_test == 5)\ny_test_5 = y_test_5.ravel()\n\nclassifier = Perceptron(tol=1e-3, random_state=2019)\nclassifier.fit(X_train, y_train_5)\nclassifier.score(X_test, y_test_5)\n" ]
[ [ "sklearn.linear_model.Perceptron" ] ]
susheels/adgcl
[ "2605ef8f980934c28d545f2556af5cc6ff48ed18" ]
[ "unsupervised/convs/wgin_conv.py" ]
[ "from typing import Callable, Union\n\nimport torch\nfrom torch import Tensor\nfrom torch_geometric.nn.conv import MessagePassing\nfrom torch_geometric.typing import OptPairTensor, Adj, Size\n\nfrom unsupervised.convs.inits import reset\n\n\nclass WGINConv(MessagePassing):\n\tdef __init__(self, nn: Callable, eps: float = 0., train_eps: bool = False,\n\t\t\t\t **kwargs):\n\t\tkwargs.setdefault('aggr', 'add')\n\t\tsuper(WGINConv, self).__init__(**kwargs)\n\t\tself.nn = nn\n\t\tself.initial_eps = eps\n\t\tif train_eps:\n\t\t\tself.eps = torch.nn.Parameter(torch.Tensor([eps]))\n\t\telse:\n\t\t\tself.register_buffer('eps', torch.Tensor([eps]))\n\t\tself.reset_parameters()\n\n\tdef reset_parameters(self):\n\t\treset(self.nn)\n\t\tself.eps.data.fill_(self.initial_eps)\n\n\tdef forward(self, x: Union[Tensor, OptPairTensor], edge_index: Adj, edge_weight = None,\n\t\t\t\tsize: Size = None) -> Tensor:\n\t\t\"\"\"\"\"\"\n\t\tif isinstance(x, Tensor):\n\t\t\tx: OptPairTensor = (x, x)\n\n\t\t# propagate_type: (x: OptPairTensor)\n\t\tout = self.propagate(edge_index, x=x, edge_weight=edge_weight, size=size)\n\n\t\tx_r = x[1]\n\t\tif x_r is not None:\n\t\t\tout += (1 + self.eps) * x_r\n\n\t\treturn self.nn(out)\n\n\tdef message(self, x_j: Tensor, edge_weight) -> Tensor:\n\t\treturn x_j if edge_weight is None else x_j * edge_weight.view(-1, 1)\n\n\n\tdef __repr__(self):\n\t\treturn '{}(nn={})'.format(self.__class__.__name__, self.nn)" ]
[ [ "torch.Tensor" ] ]
quangmnh/UltimaTTTBot
[ "2307a164934ac82ec318662dfe8ecb063b68e113" ]
[ "P2.py" ]
[ "# // _ooOoo_\n# // o8888888o\n# // 88\" . \"88\n# // (| -_- |)\n# // O\\ = /O\n# // ____/`---'\\____\n# // .' \\\\| |// `.\n# // / \\\\||| : |||// \\\n# // / _||||| -:- |||||- \\\n# // | | \\\\\\ - /// | |\n# // | \\_| ''\\---/'' | |\n# // \\ .-\\__ `-` ___/-. /\n# // ___`. .' /--.--\\ `. . __\n# // .\"\" '< `.___\\_<|>_/___.' >'\"\".\n# // | | : `- \\`.;`\\ _ /`;.`/ - ` : | |\n# // \\ \\ `-. \\_ __\\ /__ _/ .-` / /\n# //======`-.____`-.___\\_____/___.-`____.-'======\n# // `=---='\n# //\n# //^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n# // 佛祖保佑 永无BUG\n# // God Bless Never Crash\n\nimport numpy as np\nfrom state import State, State_2, UltimateTTT_Move\nimport sys\nimport random\n\nAPPROXIMATE_WIN_SCORE = 7\nBIG_BOARD_WEIGHT = 23\nWIN_SCORE = 10**6\nPOSSIBLE_WIN_SEQUENCES = np.array ([\n [0, 1, 2], \n [3, 4, 5], \n [6, 7, 8], \n [0, 3, 6], \n [1, 4, 7], \n [2, 5, 8], \n [0, 4, 8], \n [2, 4, 6]\n ])\nALPHA_BETA_DEPTH = 3\nSCORE_PER_CELL = {\n 0: 3, \n 1: 2, \n 2: 3, \n 3: 2, \n 4: 5, \n 5: 2, \n 6: 3, \n 7: 2, \n 8: 3\n }\n\ndef _legal_cells(block):\n block_flatten = block.ravel()\n return len([i for i in range(9) if block_flatten[i] == 0.0])\n\ndef _is_block_full(block):\n return _legal_cells(block) == 0\n\ndef _is_gboard_full(blocks):\n _available_blocks = []\n for i, block in enumerate(blocks):\n if not _is_block_full(block):\n _available_blocks.append(i)\n return len(_available_blocks) == 0\n\ndef _is_terminal_state(cur_state):\n return cur_state.game_over or _is_gboard_full(cur_state.blocks)\n\ndef _terminal_test(cur_state, depth):\n return _is_terminal_state(cur_state) or depth == 0\n\ndef _assess_global(block_flatten, player):\n player_counter = 0\n opponent_counter = 0\n opponent = player * -1\n for seq in POSSIBLE_WIN_SEQUENCES:\n filtered_seq = []\n filtered_indices = []\n for index in seq:\n if block_flatten[index] != 0.0:\n filtered_seq.append(block_flatten[index])\n filtered_indices.append(index)\n if len(filtered_seq) == 0:\n continue\n if player in filtered_seq:\n if opponent in filtered_seq:\n continue\n if len(filtered_seq) > 1:\n player_counter += APPROXIMATE_WIN_SCORE\n if len(filtered_seq) == 3:\n player_counter *= APPROXIMATE_WIN_SCORE \n player_counter += 1\n elif opponent in filtered_seq:\n if len(filtered_seq) > 1:\n opponent_counter += APPROXIMATE_WIN_SCORE\n if len(filtered_seq) == 3:\n opponent_counter *= APPROXIMATE_WIN_SCORE\n opponent_counter += 1\n return player_counter - opponent_counter\n\ndef _assess_block(block_flatten, player):\n player_counter = 0\n opponent_counter = 0\n opponent = player * -1\n for seq in POSSIBLE_WIN_SEQUENCES:\n filtered_seq = []\n filtered_indices = []\n for index in seq:\n if block_flatten[index] != 0.0:\n filtered_seq.append(block_flatten[index])\n filtered_indices.append(index)\n if player in filtered_seq:\n if opponent in filtered_seq:\n continue\n if len(filtered_seq) > 1:\n player_counter += APPROXIMATE_WIN_SCORE\n if len(filtered_seq) == 3:\n player_counter += APPROXIMATE_WIN_SCORE\n player_counter += 1\n elif opponent in filtered_seq:\n if len(filtered_seq) > 1:\n opponent_counter += APPROXIMATE_WIN_SCORE\n if len(filtered_seq) == 3:\n opponent_counter += APPROXIMATE_WIN_SCORE\n opponent_counter += 1\n return player_counter - opponent_counter\n\n\ndef _eval_state(cur_state, player):\n if cur_state.game_result(cur_state.global_cells.reshape(3,3)) != None:\n winner = cur_state.game_result(cur_state.global_cells.reshape(3,3))\n free_cells = 0 \n for block in cur_state.blocks:\n free_cells += _legal_cells(block)\n return (WIN_SCORE + free_cells) if (winner == -player) else (-WIN_SCORE - free_cells)\n if _is_gboard_full(cur_state.blocks):\n return 0\n ret = _assess_global(cur_state.global_cells, player) * BIG_BOARD_WEIGHT\n for i,block in enumerate(cur_state.blocks):\n if not _is_block_full(block) and cur_state.game_result(block) == None:\n # if not _is_block_full(block):\n ret += _assess_block(block.ravel(), player)\n return ret\n\n\ndef _generate_succ(cur_state, move):\n new_state = State(cur_state)\n new_state.free_move = cur_state.free_move\n new_state.act_move(move)\n return new_state\n\n\ndef _min_val_ab(cur_state, depth, alpha=-sys.maxsize-1, beta=sys.maxsize):\n if _terminal_test(cur_state, depth):\n return _eval_state(cur_state, cur_state.player_to_move)\n val = sys.maxsize\n for move in cur_state.get_valid_moves:\n successor_state = _generate_succ(cur_state, move)\n val = min(val, _max_val_ab(successor_state, depth-1, alpha, beta))\n if val <= alpha:\n return val\n beta = min(beta, val)\n return val\n\n\ndef _max_val_ab(cur_state, depth, alpha=-sys.maxsize-1, beta=sys.maxsize):\n if _terminal_test(cur_state, depth):\n return _eval_state(cur_state, cur_state.player_to_move)\n val = -sys.maxsize-1\n for move in cur_state.get_valid_moves:\n successor_state = _generate_succ(cur_state, move)\n val = max(val, _min_val_ab(successor_state, depth, alpha, beta))\n if val >= beta:\n return val\n alpha = max(alpha, val)\n return val\n\ndef _run_AB(cur_state, DEPTH):\n moves_res = []\n for move in cur_state.get_valid_moves:\n successor_state = _generate_succ(cur_state, move)\n weight = _min_val_ab(successor_state, DEPTH)\n moves_res.append((move, weight))\n\n move, best_val = max(moves_res, key=lambda x: x[1])\n return random.choice([best_move for best_move, val in moves_res if val==best_val])\n\n# GOOD\ndef select_move(cur_state, remain_time):\n valid_moves = cur_state.get_valid_moves\n jump = random.random()\n if len(valid_moves) != 0:\n if cur_state.previous_move == None:\n return _run_AB(cur_state,1)\n # return _run_AB(cur_state,1)\n return _run_AB(cur_state,2)\n return None\n\n# def select_move(cur_state, remain_time):\n# valid_moves = cur_state.get_valid_moves\n# jump = random.random()\n# if len(valid_moves) != 0:\n# if cur_state.previous_move == None:\n# return _run_AB(cur_state,1)\n# return _run_AB(cur_state,1)\n# # return _run_AB(cur_state,2)\n# return None" ]
[ [ "numpy.array" ] ]
miguel-mzbi/MachineLearning-DataMining
[ "d589e89c85ccc7cba129c9a489c49f61a4298c5d" ]
[ "HW3/ratingprank.py" ]
[ "# Input: number of iterations L\n# number of labels k\n# matrix X of features, with n rows (samples), d columns (features)\n# X[i,j] is the j-th feature of the i-th sample\n# vector y of labels, with n rows (samples), 1 column\n# y[i] is the label (1 or 2 ... or k) of the i-th sample\n# Output: vector theta of d rows, 1 column\n# vector b of k-1 rows, 1 column\nimport numpy as np\n\ndef stl(yt, l):\n if yt <= l+1:\n return -1\n else:\n return 1\n\ndef run(L,k,X,y):\n n, d = X.shape\n theta = np.zeros((d, 1))\n b = np.zeros((k-1, 1))\n \n for l in range(k-1):\n b[l] = l\n \n for _ in range(1, L+1):\n for t in range(0, n):\n E = set()\n for l in range(k-1):\n condition = stl(y[t], l) * (np.dot(X[t], theta) - b[l])\n if condition <= 0:\n E.add(l)\n if E:\n summ = 0\n for l in E:\n summ += stl(y[t], l)\n \n theta += summ*np.reshape(X[t], (d, 1))\n\n for l in E:\n b[l] -= stl(y[t], l)\n return (theta, b)" ]
[ [ "numpy.dot", "numpy.reshape", "numpy.zeros" ] ]
rheinonen/hw_ml
[ "516f707ef2ec2b611333df1c1f94fcab4a9e8457" ]
[ "prepare_data.py" ]
[ "from boutdata.collect import collect\nfrom boututils import calculus as calc\nimport math\nimport numpy as np\nimport bout_field as bf\n\n\nphi=bf.Field(name='phi',dx=0.1)\nn=bf.Field(name='n',dx=0.1)\nvort=bf.Field(name='vort',dx=0.1)\n\nprint('collecting data')\nphi.collect()\nn.collect()\nvort.collect()\n\nprint('computing mean fields')\n#extract relevant mean fields\nphi_zonal=phi.zonal()\nphi_fluc=phi.fluc()\nn_zonal=n.zonal()\nn_fluc=n.fluc()\nvort_zonal=vort.zonal()\nvort_fluc=vort.fluc()\n\ntrange=n.dims[0]\n#clear some memory\ndel phi\ndel n\ndel vort\n\nq_fluc=n_fluc-vort_fluc\n\ndel n_fluc\n\nens_zonal=0.5*(q_fluc**2).zonal()\ndel q_fluc\nvx=phi_fluc.deriv(2)\ndel phi_fluc\n#ens_flux=0.5*((q_fluc**2)*vx).zonal()\n#n_flux=(n_fluc*vx).zonal()\nvort_flux=(vort_fluc*vx).zonal()\ndel vort_fluc\n#build array corresponding to x points\nx=[i for i in range(0,516)]\nx=np.divide(x,515.)\n\n#add in background density field\n\n#beta=\n#bg=(-0.5*beta*np.square(x)+0.5*beta)*515*0.1\n\nprint('adding in background density')\nk=1\nbg=k*515*0.1*x\nbg=np.repeat(bg[np.newaxis,:,np.newaxis],trange,axis=0)\nn_zonal.data=np.add(n_zonal.data,bg)\n\nxpoints=16\ntmin=25\n\nprint('coarse graining and packaging data')\n\n#prepare data for analysis\n\n#ens_flux_std=ens_flux.stddev(xpoints).clean(tmin)\n#ens_flux=ens_flux.mean_x(xpoints).clean(tmin)\n#n_flux_std=n_flux.stddev(xpoints).clean(tmin)\n#n_flux=n_flux.mean_x(xpoints).clean(tmin)\n#vort_flux_std=vort_flux.stddev(xpoints).clean(tmin)\nvort_flux=vort_flux.mean_x(xpoints).clean(tmin)\nens=ens_zonal.mean_x(xpoints).clean(tmin)\n#print(ens.shape)\n#ens_x=ens_zonal.secants(xpoints)\n#print(ens_x.dims)\n#ens_x=ens_x.clean(tmin)\n#print(ens_x.shape)\nvort=vort_zonal.mean_x(xpoints).clean(tmin)\nvort_x=vort_zonal.secants(xpoints).clean(tmin)\n#n=n_zonal.mean_x(xpoints).clean(tmin)\n#phi=phi_zonal.mean_x(xpoints).clean(tmin)\n\nvort_xx=vort_zonal.mean_d2(xpoints).clean(tmin)\nvort_xxx=vort_zonal.mean_d3(xpoints).clean(tmin)\n\n#todo: compute averages over windows\n\nn_x=n_zonal.secants(xpoints)\nn_x=n_x.clean(tmin)\nprint(n_x.size)\n\nn_xx=n_zonal.mean_d2(xpoints).clean(tmin)\n\n#phi_x=phi_zonal.secants(xpoints).clean()\n\n#save the data\nnp.savez('cleaned_data_vort.npz',vort_flux=vort_flux,n_xx=n_xx,ens=ens,vort=vort,n_x=n_x,vort_x=vort_x,vort_xx=vort_xx,vort_xxx=vort_xxx)\n" ]
[ [ "numpy.savez", "numpy.add", "numpy.divide", "numpy.repeat" ] ]
ehgh/product-rationalization
[ "42aaa8167f2981a9e2d9790ff2743310acdc3f81" ]
[ "p_q_search.py" ]
[ "import basket_completion as bc\nimport basket_generation as bg\nimport sys\nimport itertools\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport os\nimport pandas\nimport warnings\nimport pickle\nwarnings.simplefilter(action='ignore', category=FutureWarning)\n\nsys.path.insert(0, \"../node2vec_embeddings_modified/src\")\nimport node2vec_main as n2v\n\nsys.path.insert(0, \"../p2v-map\")\nimport p2v_main as p2v\n\nfrom timeit import default_timer as timer\nfrom multiprocessing import Pool as mpPool\n\nnp.set_printoptions(precision = 4, \n suppress = True, \n linewidth=500, \n #threshold=sys.maxsize,\n edgeitems=8\n )\n\nselection = 'top_2' #average / max / top_2\nmethod='p2v' #'node2v' / 'p2v' / 'random' \ndimensions = 128\ngenerate_baskets = False\nbuild_graph = generate_baskets or False\noverwrite_embedding = True\nparallel = True\nfilename = 'out_files/embedding_%s_%s_p%s_q%s_minlen%s.npy' % ('%s', method, '%d', '%g', '%d')\n\nif generate_baskets:\n print('basket generation')\n bg.main(I=200,\n T=100,\n C=20,\n Jc=15,\n Gamma0=0.5)\n\ndef embed_and_basket_completion(p, q, n):\n\n if generate_baskets:\n N = 20*15\n N = 300#1929\n min_basket_len = n\n #sim_v2 1500\n #sim 300\n\n if build_graph:\n embedding = n2v.main(input='data_graph/baskets_train.csv',\n input_format= 'basketlist',\n dimensions=3,\n walk_length=3,\n output='../node2vec_embeddings_modified/emb/baskets_train.emd',\n overwrite=True,\n overwrite_walks=True,\n overwrite_transitions=True,\n num_walks=2,\n window_size=10,\n iter=1,\n p=p, \n q=q,\n N=N,\n min_basket_len=min_basket_len,\n num_basket=10000)\n if method == 'node2v':\n if not overwrite_embedding:\n embedding = np.load(filename%('v', p, q, n))\n else:\n embedding = n2v.main(input='data_graph/baskets.graph',\n input_format= 'basketgraph',\n dimensions=dimensions,\n walk_length=50,\n output='../node2vec_embeddings_modified/emb/baskets_train.emd',\n overwrite=True,\n overwrite_walks=False,\n overwrite_transitions=False,\n num_walks=1000,\n window_size=n,\n iter=5,\n p=p, \n q=q,\n N=N,\n min_basket_len=min_basket_len)\n print('embedding shape', embedding.shape)\n np.save(filename%('v', p, q, n), embedding)\n \n ###for p2v comment n2v and uncomment this\n elif method == 'p2v':\n if not overwrite_embedding:\n embedding = (np.load(filename%('v', p, q, n)), \n np.load(filename%('w', p, q, n)))\n else:\n embedding = p2v.main(data_dir = 'p2v',\n output_dir = '../p2v-map/results',\n control_dir = '../p2v-map/control',\n dimensions=dimensions,\n p=p, \n q=q,\n N=N,\n min_basket_len=min_basket_len)\n np.save(filename%('v', p, q, n), embedding[0])\n np.save(filename%('w', p, q, n), embedding[1])\n ###for random\n else:\n embedding = np.array([]), np.array([])\n acc = bc.basket_completion_accuracy(embedding=embedding, \n plot_embedding=False,\n clustering=False,\n n_clusters=20,\n p=p, \n q=q,\n N=N,\n selection=selection,\n method=method, \n n_sample=15,\n bootstrap=10,\n min_basket_len=min_basket_len)\n return acc\n\ndef plot_accuracy(p_q, acc_l):\n df = pd.DataFrame([(*i,*j) for (i,j) in zip(p_q,acc_l)], \n columns=['p', 'q', 'n', 'accuracy', 'lower', 'median', 'upper']\n ).set_index(['n','q'])\n df['CI'] = df['upper'] - df['lower']\n df.to_csv(os.path.join('out_files', 'accuracy_'+method+'_'+selection+'.csv'))\n print(df)\n plt.clf()\n f = df['accuracy'].unstack(level=1).plot.bar(\n yerr=np.stack([(df['accuracy']-df['lower']).unstack(level=1).to_numpy().T,\n (df['upper']-df['accuracy']).unstack(level=1).to_numpy().T]).transpose(1,0,2),\n capsize=4).get_figure()\n f.savefig(os.path.join('images', 'accuracy_'+method+'_'+selection+'.pdf'))\n\n\nresult_list = []\ndef log_result(result):\n result_list.append(result)\n\ndef main():\n\n p_range = [1000000]\n q_range = [0.2, 0.5 ,1 , 2, 4]\n q_range = [1.2, 1.4, 1.6, 1.8]\n q_range = [0.2, 0.5 ,1 ,1.2, 1.4, 1.6, 1.8, 2, 4]\n n_range = [2,3,4,5]\n q_range = [0.5]\n q_range = [0.2, 0.5, 1 ,1.2, 1.4, 1.6, 1.8, 2, 4]\n n_range = [2,3,4,5,6,7]\n q_range = [1, 2]\n n_range = [4,5,6]\n p_q = list(itertools.product(p_range, q_range, n_range))\n print(p_q)\n acc_l = []\n \n if parallel:\n nCPU = min([6, len(p_q)])\n print('nCPU: {}'.format(nCPU))\n pool = mpPool(nCPU)\n acc = pool.starmap_async(embed_and_basket_completion, p_q)\n acc_l = acc.get()\n pool.close()\n pool.join()\n else:\n for (p,q,n) in p_q:\n start = timer()\n print('p = {}, q = {}, n = {}'.format(p,q,n))\n acc = embed_and_basket_completion(p, q, n)\n acc_l.append(acc)\n print('loop time {}'.format(timer()-start))\n\n plot_accuracy(p_q, acc_l)\n \nif __name__ == \"__main__\":\n main()" ]
[ [ "numpy.load", "numpy.save", "numpy.set_printoptions", "matplotlib.pyplot.clf", "numpy.array" ] ]
Matej-Chmel/KVContest-data-test-suite
[ "ff6db5a16b6653a9bb85876a88451dd8b9cc8bad" ]
[ "src/common/graph.py" ]
[ "from itertools import zip_longest\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndef extract_sublist(source, idx, default=0):\n \"\"\"Return list from elements at idx for each sublist in source\n or default if such element is empty string.\n Args:\n source (list): List of sublists.\n idx (int): Element index.\n \"\"\"\n return [sublist[idx] if sublist[idx] != '' else default for sublist in source]\n\nclass Bar:\n \"\"\"Args:\n upper (list): Main data for bars.\n upper_label (str) opt: Bars label.\n bottom (list) opt: Bottom part data.\n same_loc (Bar) opt: Bars plotted on same location.\n \"\"\"\n def __init__(\n self, upper=None, upper_label='', bottom=None, bottom_label='', same_loc=None\n ): \n self.upper = upper\n self.label = upper_label\n self.bottom = bottom\n self.blabel = bottom_label\n self.same_loc = same_loc\n def plot(self, ax, loc, width):\n if self.bottom:\n ax.bar(loc, self.bottom, width, label=self.blabel)\n ax.bar(loc, self.upper, width, bottom=self.bottom, label=self.label)\n if self.same_loc:\n self.same_loc.plot(ax, loc, width)\n @staticmethod\n def chart(\n data, title='', xlabel='', ylabel='', group_labels=None,\n width=0.5\n ):\n \"\"\"\n Args:\n data (list): List of Bars or single Bar.\n --- Item (tuple | Bar): initializes Bar object.\n title (str) opt: Title graph.\n xlabel (str) opt: Label x axis.\n ylabel (str) opt: Label y axis.\n group_labels (list) opt: Label each group.\n width (float) opt: Width of a group.\n \"\"\"\n fig, ax = plt.subplots()\n try:\n if not isinstance(data, list):\n data = [data]\n loc = np.arange(\n len(data[0].upper)\n if isinstance(data[0], Bar)\n else len(data[0][0])\n )\n bars = len(data)\n swidth = width/bars\n except TypeError:\n print('Empty graph will be shown.')\n else:\n for idx, item in enumerate(data):\n if isinstance(item, Bar):\n item.plot(ax, loc + (idx*swidth - ((bars - 1)/2)*(width/bars)), swidth)\n else:\n Bar(*item).plot(ax, loc + (idx*swidth - ((bars - 1)/2)*(width/bars)), swidth)\n ax.set_xticks(loc)\n if group_labels:\n ax.set_xticklabels(group_labels)\n\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)\n ax.set_title(title)\n ax.legend()\n\n fig.tight_layout()\n plt.show()\n @staticmethod\n def unpack(source, from_idx=0, labels=None):\n \"\"\"\n Args:\n source (list of lists): Data.\n from_idx (int): Start index of each sublist.\n labels (list | int): Labels for bars.\n If int, idx of sublist items where labels are located in source.\n Returns:\n list: List of Bar objects.\n If used inside list, \n please unpack return value of this method with *.\n \"\"\"\n if isinstance(labels, int):\n labels = [sublist[labels] for sublist in source]\n def _bar_generator():\n for upper, label in zip_longest(list(zip(*source))[from_idx:], labels):\n yield Bar(upper, label)\n return list(_bar_generator())\n\ndef pie_chart(data, labels, explode=None, title='', shadow=True, start_angle=90):\n \"\"\"Args:\n data (list): Values in order of labels.\n labels (list): Names for fractions.\n explode (list) opt: Explode fractions out of pie.\n title (str) opt.\n shadow (bool) opt: Draw shadow around pie?\n start_angle (float) opt.\n \"\"\" \n fig, ax = plt.subplots()\n ax.pie(\n data, labels=labels, explode=explode,\n autopct='%1.1f%%', shadow=shadow, startangle=start_angle\n )\n ax.axis('equal')\n ax.set_title(title)\n\n fig.tight_layout()\n plt.show()\n\n### demos ###\n\ndef _bar1():\n Bar.chart(\n [\n ([1, 2, 3], '1 A', [4, 5, 6], '1 B'),\n ([7, 8, 9], '2'),\n Bar(\n [10, 1, 0], '3 A', [4, 5, 0], '3 B',\n Bar(\n [0, 0, 8], '3 Replacement'\n )\n )\n ],\n title='Bar chart demo',\n xlabel='X axis, groups',\n ylabel='Y axis, values',\n group_labels=['ABC', 'DEF', 'GHI'],\n width=0.33\n )\ndef _bar2():\n Bar.chart(\n [\n *Bar.unpack(\n [\n [1, 2, 3],\n [1, 2, 3],\n [1, 2, 3]\n ], labels=['Label One']\n )\n ]\n )\ndef _bar3():\n Bar.chart(\n Bar.unpack(\n [\n ['A', 1, 7],\n ['B', 3, 5]\n ], from_idx=1, labels=0\n )\n )\ndef _single_bar():\n Bar.chart(Bar([1, 2, 3], 'One'))\n\ndef _pie1():\n pie_chart(\n [10, 20, 30, 40],\n 'ABCD',\n title='Pie chart ABCD 001'\n )\n\nif __name__ == \"__main__\":\n # _bar1()\n # _bar2()\n # _bar3()\n # _single_bar()\n _pie1()\n" ]
[ [ "matplotlib.pyplot.show", "matplotlib.pyplot.subplots" ] ]
RunzeXU/dissecting-reinforcement-learning
[ "36b418481aed016901c2da5132d44b05074929e9" ]
[ "src/assignment_multiArmedBandit/epsilon_greedy_agent_bandit_mingap.py" ]
[ "#!/usr/bin/env python\n\n# MIT License\n# Copyright (c) 2017 Massimiliano Patacchiola\n# https://mpatacchiola.github.io/blog/\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n#Average cumulated reward: 763.802\n#Std Cumulated Reward: 20.4605179798\n#Average utility distribution: [ 0.2934227 0.49422608 0.80003897]\n#Average utility RMSE: 0.00505307354513\n\nfrom multi_armed_bandit import MultiArmedBandit\nimport numpy as np\nimport random\n\ndef return_rmse(predictions, targets):\n \"\"\"Return the Root Mean Square error between two arrays\n\n @param predictions an array of prediction values\n @param targets an array of target values\n @return the RMSE\n \"\"\"\n return np.sqrt(((predictions - targets)**2).mean())\n\ndef return_epsilon_greedy_action(epsilon, reward_counter_array):\n \"\"\"Return an action using an epsilon greedy strategy\n\n @return the action selected\n \"\"\"\n tot_actions = reward_counter_array.shape[0]\n if random.uniform(0, 1) <= epsilon:\n action = np.random.randint(low=0, high=tot_actions)\n else:\n amax = np.amax(reward_counter_array)\n #choose arms with max probability and ones are higher than 80%*max\n indices = np.where(reward_counter_array >= amax*0.65)[0]\n action = np.random.choice(indices)\n return action\n\ndef main():\n reward_distribution = [0.3, 0.5, 0.8]\n my_bandit = MultiArmedBandit(reward_probability_list=reward_distribution)\n epsilon = 0.1\n tot_arms = 3\n tot_episodes = 2000\n tot_steps = 1000\n print_every_episodes = 100\n cumulated_reward_list = list()\n average_utility_array = np.zeros(tot_arms)\n print(\"Starting epsilon-greedy agent...\")\n for episode in range(tot_episodes):\n cumulated_reward = 0\n reward_counter_array = np.zeros(tot_arms)\n action_counter_array = np.full(tot_arms, 1.0e-5)\n for step in range(tot_steps):\n action = return_epsilon_greedy_action(epsilon, np.true_divide(reward_counter_array, action_counter_array))\n reward = my_bandit.step(action)\n reward_counter_array[action] += reward \n action_counter_array[action] += 1 \n cumulated_reward += reward\n # Append the cumulated reward for this episode in a list\n cumulated_reward_list.append(cumulated_reward)\n utility_array = np.true_divide(reward_counter_array, action_counter_array)\n average_utility_array += utility_array\n if episode % print_every_episodes == 0:\n print(\"Episode: \" + str(episode))\n print(\"Cumulated Reward: \" + str(cumulated_reward))\n print(\"Reward counter: \" + str(reward_counter_array))\n print(\"Utility distribution: \" + str(utility_array))\n print(\"Utility RMSE: \" + str(return_rmse(utility_array, reward_distribution)))\n print(\"\")\n # Print the average cumulated reward for all the episodes\n print(\"Average cumulated reward: \" + str(np.mean(cumulated_reward_list)))\n print(\"Std Cumulated Reward: \" + str(np.std(cumulated_reward_list)))\n print(\"Average utility distribution: \" + str(average_utility_array / tot_episodes))\n print(\"Average utility RMSE: \" + str(return_rmse(average_utility_array/tot_episodes, reward_distribution)))\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.true_divide", "numpy.zeros", "numpy.random.choice", "numpy.full", "numpy.amax", "numpy.std", "numpy.where", "numpy.random.randint", "numpy.mean" ] ]
eth-sri/transformation-smoothing
[ "12a653e881a6d61c5c63a3e16d58292435486cbd" ]
[ "util.py" ]
[ "import PIL\nimport PIL.Image\nfrom functional import compose\nimport numpy as np\nimport argparse\n\n\nlmap = compose(list, map)\n\n\ndef str2bool(v):\n if v.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n elif v.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n else:\n raise argparse.ArgumentTypeError('Boolean value expected.')\n\n\ndef split(a, n):\n \"\"\"\n Splits a list into n parts of approx. equal length\n from https://stackoverflow.com/questions/2130016/splitting-a-list-into-n-parts-of-approximately-equal-length\n \"\"\"\n k, m = divmod(len(a), n)\n return (a[i * k + min(i, m):(i + 1) * k + min(i + 1, m)] for i in range(n))\n\n\ndef str2FloatOrNone(v):\n if v.lower() == 'none':\n return None\n try:\n return float(v)\n except ValueError:\n raise argparse.ArgumentTypeError('Float or none value expected.')\n\n\ndef torch_image_to_PIL(img):\n img = img.cpu().numpy()\n if len(img.shape) == 4:\n img = img[0, ...]\n elif len(img.shape) == 3:\n pass\n else:\n assert False\n img = 255 * np.transpose(img, (1, 2, 0))\n img = np.clip(np.round(img), 0, 255).astype(np.uint8)\n return PIL.Image.fromarray(img)\n\n\nclass Logger(object):\n def __init__(self, filename, stdout):\n self.terminal = stdout\n if filename is not None:\n self.log = open(filename, \"a\")\n else:\n self.log = None\n\n def write(self, message):\n self.terminal.write(message)\n if self.log is not None:\n self.log.write(message)\n\n def flush(self):\n self.terminal.flush()\n if self.log is not None:\n self.log.flush()\n\n\ndef get_interpolation(i):\n return getattr(PIL.Image, i.upper())\n" ]
[ [ "numpy.round", "numpy.transpose" ] ]
atlan-antillia/keras-efficientdet
[ "8dd3eccd5812063927dd32ff00a6e4164904ca76" ]
[ "losses.py" ]
[ "\"\"\"\r\nCopyright 2017-2018 Fizyr (https://fizyr.com)\r\n\r\nLicensed under the Apache License, Version 2.0 (the \"License\");\r\nyou may not use this file except in compliance with the License.\r\nYou may obtain a copy of the License at\r\n\r\n http://www.apache.org/licenses/LICENSE-2.0\r\n\r\nUnless required by applicable law or agreed to in writing, software\r\ndistributed under the License is distributed on an \"AS IS\" BASIS,\r\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\nSee the License for the specific language governing permissions and\r\nlimitations under the License.\r\n\"\"\"\r\n\r\n# import keras\r\nfrom tensorflow import keras\r\nimport tensorflow as tf\r\n\r\n\r\ndef focal(alpha=0.25, gamma=1.5):\r\n \"\"\"\r\n Create a functor for computing the focal loss.\r\n\r\n Args\r\n alpha: Scale the focal weight with alpha.\r\n gamma: Take the power of the focal weight with gamma.\r\n\r\n Returns\r\n A functor that computes the focal loss using the alpha and gamma.\r\n \"\"\"\r\n\r\n def _focal(y_true, y_pred):\r\n \"\"\"\r\n Compute the focal loss given the target tensor and the predicted tensor.\r\n\r\n As defined in https://arxiv.org/abs/1708.02002\r\n\r\n Args\r\n y_true: Tensor of target data from the generator with shape (B, N, num_classes).\r\n y_pred: Tensor of predicted data from the network with shape (B, N, num_classes).\r\n\r\n Returns\r\n The focal loss of y_pred w.r.t. y_true.\r\n \"\"\"\r\n labels = y_true[:, :, :-1]\r\n # -1 for ignore, 0 for background, 1 for object\r\n anchor_state = y_true[:, :, -1]\r\n classification = y_pred\r\n\r\n # filter out \"ignore\" anchors\r\n indices = tf.where(keras.backend.not_equal(anchor_state, -1))\r\n labels = tf.gather_nd(labels, indices)\r\n classification = tf.gather_nd(classification, indices)\r\n\r\n # compute the focal loss\r\n alpha_factor = keras.backend.ones_like(labels) * alpha\r\n alpha_factor = tf.where(keras.backend.equal(labels, 1), alpha_factor, 1 - alpha_factor)\r\n # (1 - 0.99) ** 2 = 1e-4, (1 - 0.9) ** 2 = 1e-2\r\n focal_weight = tf.where(keras.backend.equal(labels, 1), 1 - classification, classification)\r\n focal_weight = alpha_factor * focal_weight ** gamma\r\n cls_loss = focal_weight * keras.backend.binary_crossentropy(labels, classification)\r\n\r\n # compute the normalizer: the number of positive anchors\r\n normalizer = tf.where(keras.backend.equal(anchor_state, 1))\r\n normalizer = keras.backend.cast(keras.backend.shape(normalizer)[0], keras.backend.floatx())\r\n normalizer = keras.backend.maximum(keras.backend.cast_to_floatx(1.0), normalizer)\r\n\r\n return keras.backend.sum(cls_loss) / normalizer\r\n\r\n return _focal\r\n\r\n\r\ndef smooth_l1(sigma=3.0):\r\n \"\"\"\r\n Create a smooth L1 loss functor.\r\n Args\r\n sigma: This argument defines the point where the loss changes from L2 to L1.\r\n Returns\r\n A functor for computing the smooth L1 loss given target data and predicted data.\r\n \"\"\"\r\n sigma_squared = sigma ** 2\r\n\r\n def _smooth_l1(y_true, y_pred):\r\n \"\"\" Compute the smooth L1 loss of y_pred w.r.t. y_true.\r\n Args\r\n y_true: Tensor from the generator of shape (B, N, 5). The last value for each box is the state of the anchor (ignore, negative, positive).\r\n y_pred: Tensor from the network of shape (B, N, 4).\r\n Returns\r\n The smooth L1 loss of y_pred w.r.t. y_true.\r\n \"\"\"\r\n # separate target and state\r\n regression = y_pred\r\n regression_target = y_true[:, :, :-1]\r\n anchor_state = y_true[:, :, -1]\r\n\r\n # filter out \"ignore\" anchors\r\n indices = tf.where(keras.backend.equal(anchor_state, 1))\r\n regression = tf.gather_nd(regression, indices)\r\n regression_target = tf.gather_nd(regression_target, indices)\r\n\r\n # compute smooth L1 loss\r\n # f(x) = 0.5 * (sigma * x)^2 if |x| < 1 / sigma / sigma\r\n # |x| - 0.5 / sigma / sigma otherwise\r\n regression_diff = regression - regression_target\r\n regression_diff = keras.backend.abs(regression_diff)\r\n regression_loss = tf.where(\r\n keras.backend.less(regression_diff, 1.0 / sigma_squared),\r\n 0.5 * sigma_squared * keras.backend.pow(regression_diff, 2),\r\n regression_diff - 0.5 / sigma_squared\r\n )\r\n\r\n # compute the normalizer: the number of positive anchors\r\n normalizer = keras.backend.maximum(1, keras.backend.shape(indices)[0])\r\n normalizer = keras.backend.cast(normalizer, dtype=keras.backend.floatx())\r\n return keras.backend.sum(regression_loss) / normalizer\r\n\r\n return _smooth_l1\r\n\r\n\r\ndef smooth_l1_quad(sigma=3.0):\r\n \"\"\"\r\n Create a smooth L1 loss functor.\r\n\r\n Args\r\n sigma: This argument defines the point where the loss changes from L2 to L1.\r\n\r\n Returns\r\n A functor for computing the smooth L1 loss given target data and predicted data.\r\n \"\"\"\r\n sigma_squared = sigma ** 2\r\n\r\n def _smooth_l1(y_true, y_pred):\r\n \"\"\" Compute the smooth L1 loss of y_pred w.r.t. y_true.\r\n\r\n Args\r\n y_true: Tensor from the generator of shape (B, N, 5). The last value for each box is the state of the anchor (ignore, negative, positive).\r\n y_pred: Tensor from the network of shape (B, N, 4).\r\n\r\n Returns\r\n The smooth L1 loss of y_pred w.r.t. y_true.\r\n \"\"\"\r\n # separate target and state\r\n regression = y_pred\r\n regression = tf.concat([regression[..., :4], tf.sigmoid(regression[..., 4:9])], axis=-1)\r\n regression_target = y_true[:, :, :-1]\r\n anchor_state = y_true[:, :, -1]\r\n\r\n # filter out \"ignore\" anchors\r\n indices = tf.where(keras.backend.equal(anchor_state, 1))\r\n regression = tf.gather_nd(regression, indices)\r\n regression_target = tf.gather_nd(regression_target, indices)\r\n\r\n # compute smooth L1 loss\r\n # f(x) = 0.5 * (sigma * x)^2 if |x| < 1 / sigma / sigma\r\n # |x| - 0.5 / sigma / sigma otherwise\r\n regression_diff = regression - regression_target\r\n regression_diff = keras.backend.abs(regression_diff)\r\n box_regression_loss = tf.where(\r\n keras.backend.less(regression_diff[..., :4], 1.0 / sigma_squared),\r\n 0.5 * sigma_squared * keras.backend.pow(regression_diff[..., :4], 2),\r\n regression_diff[..., :4] - 0.5 / sigma_squared\r\n )\r\n\r\n alpha_regression_loss = tf.where(\r\n keras.backend.less(regression_diff[..., 4:8], 1.0 / sigma_squared),\r\n 0.5 * sigma_squared * keras.backend.pow(regression_diff[..., 4:8], 2),\r\n regression_diff[..., 4:8] - 0.5 / sigma_squared\r\n )\r\n\r\n ratio_regression_loss = tf.where(\r\n keras.backend.less(regression_diff[..., 8], 1.0 / sigma_squared),\r\n 0.5 * sigma_squared * keras.backend.pow(regression_diff[..., 8], 2),\r\n regression_diff[..., 8] - 0.5 / sigma_squared\r\n )\r\n # compute the normalizer: the number of positive anchors\r\n normalizer = keras.backend.maximum(1, keras.backend.shape(indices)[0])\r\n normalizer = keras.backend.cast(normalizer, dtype=keras.backend.floatx())\r\n\r\n box_regression_loss = tf.reduce_sum(box_regression_loss) / normalizer\r\n alpha_regression_loss = tf.reduce_sum(alpha_regression_loss) / normalizer\r\n ratio_regression_loss = tf.reduce_sum(ratio_regression_loss) / normalizer\r\n\r\n return box_regression_loss + alpha_regression_loss + 16 * ratio_regression_loss\r\n\r\n return _smooth_l1\r\n" ]
[ [ "tensorflow.keras.backend.sum", "tensorflow.keras.backend.less", "tensorflow.gather_nd", "tensorflow.keras.backend.cast_to_floatx", "tensorflow.sigmoid", "tensorflow.keras.backend.abs", "tensorflow.keras.backend.binary_crossentropy", "tensorflow.keras.backend.not_equal", "tensorflow.keras.backend.floatx", "tensorflow.keras.backend.equal", "tensorflow.keras.backend.shape", "tensorflow.reduce_sum", "tensorflow.keras.backend.pow", "tensorflow.keras.backend.ones_like" ] ]
kaitumisuuringute-keskus/quantipy3
[ "4066f22d1bda38a7082fb055d8a35bef8a7cd786" ]
[ "tests/test_link.py" ]
[ "import unittest\nimport os.path\nimport pandas as pd\n# import numpy as np\nfrom quantipy import dataframe_fix_string_types\nfrom quantipy.core.link import Link\nfrom quantipy.core.stack import Stack\nfrom quantipy.core.helpers.functions import load_json\nfrom quantipy.core.view_generators.view_maps import QuantipyViews\n\nclass TestLinkObject(unittest.TestCase):\n\n# stack.add_link(x='q1', y=y, views=mean_views.subset('m1to6'), weights=weight)\n\n def setUp(self):\n self.path = './tests/'\n# self.path = ''\n project_name = 'Example Data (A)'\n\n # Load Example Data (A) data and meta into self\n name_data = '%s.csv' % (project_name)\n path_data = '%s%s' % (self.path, name_data)\n self.example_data_A_data = pd.read_csv(path_data)\n self.example_data_A_data = dataframe_fix_string_types(self.example_data_A_data)\n name_meta = '%s.json' % (project_name)\n path_meta = '%s%s' % (self.path, name_meta)\n self.example_data_A_meta = load_json(path_meta)\n\n # The minimum list of variables required to populate a stack with all single*delimited set variations\n self.minimum = ['q2b', 'Wave', 'q2', 'q3', 'q5_1']\n\n self.setup_stack_Example_Data_A()\n\n def test_link_is_a_subclassed_dict(self):\n dk = self.stack.name\n fk = 'no_filter'\n xk = self.minimum\n yk = ['@'] + self.minimum\n\n for x in xk:\n for y in yk:\n link = self.stack[dk][fk][x][y]\n self.assertIsInstance(link, dict)\n self.assertIsInstance(link, Link)\n\n def test_link_behaves_like_a_dict(self):\n\n dk = self.stack.name\n fk = 'no_filter'\n xk = self.minimum\n yk = ['@'] + self.minimum\n\n key = \"some_key_name\"\n value = \"some_value\"\n\n for x in xk:\n for y in yk:\n link = self.stack[dk][fk][x][y]\n link[key] = value\n self.assertIn(\n key,\n link.keys(),\n msg=\"Link should have key {data_key}, but has {link_keys}\".format(\n data_key=key,\n link_keys=link.keys()\n )\n )\n\n def test_get_meta(self):\n\n dk = self.stack.name\n fk = 'no_filter'\n xk = self.minimum\n yk = ['@'] + self.minimum\n\n #test returned meta against stack meta\n for x in xk:\n for y in yk:\n link = self.stack[dk][fk][x][y]\n self.assertEqual(link.get_meta(), self.stack[dk].meta)\n\n def test_get_data(self):\n\n dk = self.stack.name\n fk = 'no_filter'\n xk = self.minimum\n yk = ['@'] + self.minimum\n\n stack_data = self.stack[dk][fk].data\n\n #test returned data against stack data\n for x in xk:\n for y in yk:\n link_data = self.stack[dk][fk][x][y].get_data()\n self.assertTrue(link_data is stack_data)\n\n @classmethod\n def tearDownClass(self):\n self.stack = Stack(\"StackName\")\n filepath ='./tests/'+self.stack.name+'.stack'\n if os.path.exists(filepath):\n os.remove(filepath)\n\n def is_empty(self, any_structure):\n if any_structure:\n #print('Structure is not empty.')\n return False\n else:\n #print('Structure is empty.')\n return True\n\n def create_key_stack(self, branch_pos=\"data\"):\n \"\"\" Creates a dictionary that has the structure of the keys in the Stack\n It is used to loop through the stack without affecting it.\n \"\"\"\n key_stack = {}\n for data_key in self.stack:\n key_stack[data_key] = {}\n for the_filter in self.stack[data_key][branch_pos]:\n key_stack[data_key][the_filter] = {}\n for x in self.stack[data_key][branch_pos][the_filter]:\n key_stack[data_key][the_filter][x] = []\n for y in self.stack[data_key][branch_pos][the_filter][x]:\n link = self.stack[data_key][branch_pos][the_filter][x][y]\n if not isinstance(link, Link):\n continue\n key_stack[data_key][the_filter][x].append(y)\n return key_stack\n\n def setup_stack_Example_Data_A(self, fk=None, xk=None, yk=None, views=None, weights=None):\n if fk is None:\n fk = [\n 'no_filter',\n 'Wave == 1'\n ]\n if xk is None:\n xk = self.minimum\n if yk is None:\n yk = ['@'] + self.minimum\n if views is None:\n views = ['default']\n if not isinstance(weights, list):\n weights = [weights]\n\n self.stack = Stack(name=\"Example Data (A)\")\n self.stack.add_data(\n data_key=self.stack.name,\n meta=self.example_data_A_meta,\n data=self.example_data_A_data\n )\n\n for weight in weights:\n self.stack.add_link(\n data_keys=self.stack.name,\n filters=fk,\n x=xk,\n y=yk,\n views=QuantipyViews(views),\n weights=weight\n )\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "pandas.read_csv" ] ]
xuhancn/pytorch
[ "5c7d916c3d287f6c86f4d59ca1e2b8cc4cd9cd3e" ]
[ "test/test_jit_cuda_fuser.py" ]
[ "# Owner(s): [\"oncall: jit\"]\n\nimport unittest\nimport os\nimport random\nimport enum\nimport copy\nfrom functools import reduce\nimport operator\nimport warnings\n\nimport torch\nfrom torch.nn import functional\nfrom torch.profiler import profile, ProfilerActivity\n\nfrom torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed\nfrom torch.testing._internal.common_cuda import TEST_MULTIGPU\nfrom torch.testing._internal.common_device_type import instantiate_device_type_tests, ops, OpDTypes\nfrom torch.testing._internal.common_jit import JitCommonTestCase\nfrom torch.testing._internal.common_methods_invocations import op_db, SampleInput\nfrom torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, TEST_WITH_ROCM, slowTest, \\\n is_iterable_of_tensors, freeze_rng_state\nfrom torch.testing._internal.jit_utils import clone_inputs, get_traced_sample_variant_pairs, JitTestCase, RUN_CUDA\nfrom torch.testing._internal.jit_metaprogramming_utils import create_traced_fn\nfrom torch.testing import FileCheck\n\nfrom jit.test_fuser_common import TestFuserCommon # noqa: F401\n\nimport itertools\nimport numpy as np\nimport math\n\nfrom torch.autograd.gradcheck import gradcheck\n\nfrom typing import List\n\nRUN_NVFUSER = RUN_CUDA and not TEST_WITH_ROCM\nCUDA_MAJOR, CUDA_MINOR = 0, 0\n\nif RUN_NVFUSER and torch.version.cuda is not None:\n CUDA_MAJOR, CUDA_MINOR = (int(x) for x in torch.version.cuda.split('.')[:2])\n\nos.environ['PYTORCH_NVFUSER_DISABLE_FALLBACK'] = '1'\nos.environ['PYTORCH_NVFUSER_DISABLE_FMA'] = '1'\nos.environ['PYTORCH_NVFUSER_DISABLE_FASTMATH'] = '1'\nos.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0'\nos.environ['PYTORCH_NVFUSER_DISABLE_RNG_UNROLL'] = '1'\n\nif GRAPH_EXECUTOR == ProfilingMode.PROFILING:\n torch._C._jit_set_texpr_fuser_enabled(False)\n torch._C._jit_set_profiling_executor(True)\n torch._C._jit_set_profiling_mode(True)\n\nFUSION_GROUP = 'prim::CudaFusionGroup'\nFUSION_GUARD = 'prim::CudaFusionGuard'\n\nimport contextlib\n\[email protected]\ndef nvfuser_singleton_fusion(flag):\n old_value = torch._C._jit_set_nvfuser_single_node_mode(flag)\n try:\n yield\n finally:\n torch._C._jit_set_nvfuser_single_node_mode(old_value)\n\[email protected]\ndef nvfuser_horizontal_fusion(flag):\n old_value = torch._C._jit_set_nvfuser_horizontal_mode(flag)\n try:\n yield\n finally:\n torch._C._jit_set_nvfuser_horizontal_mode(old_value)\n\ndef is_pre_volta():\n if not RUN_NVFUSER:\n return False\n prop = torch.cuda.get_device_properties(torch.cuda.current_device())\n return prop.major < 7\n\nTEST_BF16 = RUN_NVFUSER and torch.cuda.is_bf16_supported()\n\nclass CudaFuserTestOptions():\n def __init__(self):\n self.old_cpu_fuse = torch._C._jit_can_fuse_on_cpu()\n self.old_gpu_fuse = torch._C._jit_can_fuse_on_gpu()\n torch._C._jit_override_can_fuse_on_cpu(False)\n torch._C._jit_override_can_fuse_on_gpu(False)\n self.old_guard = torch._C._jit_set_nvfuser_guard_mode(False)\n torch._C._debug_set_autodiff_subgraph_inlining(False)\n self.old_value = torch._C._jit_set_autocast_mode(True)\n\n if(RUN_CUDA):\n self.old_nvfuser = torch._C._jit_set_nvfuser_enabled(True)\n\n def restore(self):\n if(RUN_CUDA):\n torch._C._jit_set_nvfuser_enabled(self.old_nvfuser)\n torch._C._jit_override_can_fuse_on_cpu(self.old_cpu_fuse)\n torch._C._jit_override_can_fuse_on_gpu(self.old_gpu_fuse)\n torch._C._jit_set_nvfuser_guard_mode(self.old_guard)\n torch._C._debug_set_autodiff_subgraph_inlining(True)\n torch._C._jit_set_autocast_mode(self.old_value)\n\nclass TestCudaFuser(JitTestCase):\n def assertEqual(self, *args, **kwargs):\n kwargs[\"exact_layout\"] = True\n super(JitTestCase, self).assertEqual(*args, **kwargs)\n\n def _getSubgraphInFusion(self, graph):\n num_node = 0\n subgraph = None\n\n def count(block, ret):\n for n in block.nodes():\n if n.kind() == FUSION_GROUP:\n ret[0] = ret[0] + 1\n self.assertTrue(n.hasAttribute('Subgraph'))\n ret[1] = n.g('Subgraph')\n for block in n.blocks():\n count(block, ret)\n ret = [num_node, subgraph]\n count(graph, ret)\n self.assertEqual(ret[0], 1)\n return ret[1]\n\n def setUp(self):\n super(TestCudaFuser, self).setUp()\n\n self.skip_node_list = []\n disabled_ops = (\"aten::batch_norm\",\n \"aten::_batch_norm_impl_index\",\n \"aten::_batch_norm_impl_index_backward\",\n \"aten::native_batch_norm_backward\")\n for op in disabled_ops:\n disabled_flag = torch._C._jit_set_nvfuser_skip_node_kind(op, False)\n if disabled_flag:\n torch._C._jit_set_nvfuser_skip_node_kind(op, True)\n self.skip_node_list.append(op)\n\n # cpu backup to avoid errors in case this is run on a CPU-only machine\n dev = 'cuda' if RUN_NVFUSER else 'cpu'\n self.special_values = torch.tensor(\n [float(\"-inf\"), -10, -math.pi,\n -1, -0.5, 0, 1, 0.5,\n math.pi, 10, float(\"inf\"),\n float(\"nan\")], dtype=torch.float, device=dev)\n\n self.int_types = [\n torch.int8,\n torch.uint8,\n torch.int16,\n torch.int32,\n torch.int64\n ]\n\n self.support_tensor_dtypes = [\n torch.int32,\n torch.int64,\n torch.float16,\n torch.float32,\n torch.float64,\n torch.bool\n ]\n if TEST_BF16:\n self.support_tensor_dtypes.append(torch.bfloat16)\n\n if(RUN_NVFUSER):\n self.cuda_fuser_options = CudaFuserTestOptions()\n\n def tearDown(self):\n # restoring skip node to the configuration before tests\n for op in self.skip_node_list:\n disabled_flag = torch._C._jit_set_nvfuser_skip_node_kind(op, False)\n if not disabled_flag:\n torch._C._jit_set_nvfuser_skip_node_kind(op, True)\n\n if(RUN_NVFUSER):\n self.cuda_fuser_options.restore()\n super(TestCudaFuser, self).tearDown()\n\n def _run_helper(self, jit_op, op, *args):\n torch.cuda.manual_seed_all(123)\n jit_o = jit_op(*args)\n torch.cuda.manual_seed_all(123)\n jit_o = jit_op(*args)\n torch.cuda.manual_seed_all(123)\n o = op(*args)\n self.assertEqual(o.dtype, jit_o.dtype)\n self.assertEqual(o, jit_o)\n self.assertGraphContainsExactly(jit_op.graph_for(*args), FUSION_GUARD, 1, consider_subgraphs=True)\n\n def _run_training_helper(self, jit_op, op, grads, *args):\n torch.cuda.manual_seed_all(123)\n jit_o = jit_op(*args)\n jit_g = jit_o.backward(grads)\n torch.cuda.manual_seed_all(123)\n jit_o = jit_op(*args)\n jit_g = jit_o.backward(grads)\n torch.cuda.manual_seed_all(123)\n jit_o = jit_op(*args)\n jit_g = jit_o.backward(grads)\n torch.cuda.manual_seed_all(123)\n o = op(*args)\n g = o.backward(grads)\n self.assertEqual(o, jit_o)\n self.assertEqual(g, jit_g)\n self.assertGraphContainsExactly(jit_op.graph_for(*args), FUSION_GUARD, 1, consider_subgraphs=True)\n bwd_graph = list(\n list(jit_op.get_debug_state().execution_plans.values())[\n 0].code.grad_executor_states()[0].execution_plans.values()\n )[0].graph\n self.assertGraphContainsExactly(bwd_graph, FUSION_GUARD, 1, consider_subgraphs=True)\n\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_half(self):\n def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor, alpha: float):\n o_16 = torch.add(x, y)\n o_32_a = torch.add(y, z, alpha=alpha)\n o_32_b = torch.add(o_16, z)\n return (o_16, o_32_a, o_32_b)\n\n t_jit = torch.jit.script(t)\n alpha = 0.5\n # stick to integers, this avoid the numerical difference due to our\n # promotion\n x = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device=\"cuda\")\n y = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device=\"cuda\")\n z = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device=\"cuda\")\n jit_o = t_jit(x, y, z, alpha)\n jit_o = t_jit(x, y, z, alpha)\n o = t(x, y, z, alpha)\n for oo, jit_oo in zip(o, jit_o):\n self.assertEqual(oo.dtype, jit_oo.dtype)\n self.assertEqual(oo, jit_oo)\n self.assertGraphContains(t_jit.graph_for(x, y, z, alpha), FUSION_GUARD)\n\n\n @unittest.skipIf(not TEST_BF16, \"device does not support BFloat16\")\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_bfloat(self):\n def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor, alpha: float):\n o_16 = torch.add(x, y)\n o_32_a = torch.add(y, z, alpha=alpha)\n o_32_b = torch.add(o_16, z)\n return (o_16, o_32_a, o_32_b)\n\n t_jit = torch.jit.script(t)\n alpha = 0.5\n # stick to integers, this avoid the numerical difference due to our\n # promotion\n x = torch.randint(0, 256, (4, 8)).to(dtype=torch.bfloat16, device=\"cuda\")\n y = torch.randint(0, 256, (4, 8)).to(dtype=torch.bfloat16, device=\"cuda\")\n z = torch.randint(0, 256, (4, 8)).to(dtype=torch.bfloat16, device=\"cuda\")\n jit_o = t_jit(x, y, z, alpha)\n jit_o = t_jit(x, y, z, alpha)\n o = t(x, y, z, alpha)\n for oo, jit_oo in zip(o, jit_o):\n self.assertEqual(oo.dtype, jit_oo.dtype)\n self.assertEqual(oo, jit_oo)\n self.assertGraphContains(t_jit.graph_for(x, y, z, alpha), FUSION_GUARD)\n\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_const(self):\n def t(x, y):\n o = x + y\n o = o + 2.0\n return o\n t_jit = torch.jit.script(t)\n x = torch.randn(4, 8, dtype=torch.float, device=\"cuda\")\n y = torch.randn(4, 8, dtype=torch.float, device=\"cuda\")\n jit_o = t_jit(x, y)\n jit_o = t_jit(x, y)\n o = t(x, y)\n self.assertEqual(o, jit_o)\n self.assertGraphContains(t_jit.graph_for(x, y), FUSION_GUARD)\n\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_chunk(self):\n def t(x, y, z, q):\n o = x + q\n x0, x1 = torch.chunk(o, 2)\n o = x0 + x1\n o = o + y\n o = o * z\n o = torch.relu(o)\n return o\n t_jit = torch.jit.script(t)\n x = torch.randn(4, 8, dtype=torch.float, device=\"cuda\")\n y = torch.randn(2, 8, dtype=torch.float, device=\"cuda\")\n z = torch.randn(2, 8, dtype=torch.float, device=\"cuda\")\n q = torch.randn(4, 8, dtype=torch.float, device=\"cuda\")\n jit_o = t_jit(x, y, z, q)\n jit_o = t_jit(x, y, z, q)\n o = t(x, y, z, q)\n self.assertEqual(o, jit_o)\n self.assertGraphContains(t_jit.graph_for(x, y, z, q), FUSION_GUARD)\n\n @unittest.skipIf(is_pre_volta(), \"reduction not supported in pre volta device\")\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_reduction_dtypes_axis(self):\n\n for op in [torch.sum, torch.mean, torch.amax, torch.var, torch.std]:\n for dtype in [torch.float16, torch.float32, torch.double]:\n for axis in [-1, 2, 0]:\n def make_func(op):\n def func(x: torch.Tensor):\n o = torch.mul(x, 2.0)\n o = op(o, dim=[axis])\n return o\n return func\n\n x = torch.randn(8, 4, 16, dtype=dtype, device=\"cuda\")\n t = make_func(op)\n t_jit = torch.jit.trace(t, x)\n jit_o = t_jit(x)\n jit_o = t_jit(x)\n o = t(x)\n self.assertEqual(o.dtype, jit_o.dtype)\n self.assertTrue(self._compare(\"comparing output failed\", o, jit_o, 1e-4))\n self.assertGraphContains(t_jit.graph_for(x), FUSION_GUARD)\n\n @unittest.skipIf(is_pre_volta(), \"reduction not supported in pre volta device\")\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_variance(self):\n\n for op in [torch.var, torch.std]:\n for dtype in [torch.float16, torch.float32, torch.double]:\n for axis in [-2, -1, 2, 1]:\n for unbiased in [False, True]:\n def make_func(op):\n def func(x: torch.Tensor):\n o = torch.mul(x, 2.0)\n o = op(o, dim=[axis])\n return o\n return func\n\n x = torch.randn(8, 4, 16, dtype=dtype, device=\"cuda\")\n t = make_func(op)\n t_jit = torch.jit.trace(t, x)\n jit_o = t_jit(x)\n jit_o = t_jit(x)\n o = t(x)\n self.assertEqual(o.dtype, jit_o.dtype)\n self.assertTrue(self._compare(\"comparing output failed\", o, jit_o, 1e-4))\n self.assertGraphContains(t_jit.graph_for(x), FUSION_GUARD)\n\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_scalar_input(self):\n def t(x: torch.Tensor, y: torch.Tensor, z: float):\n o = x + y\n o = o + z\n return o\n t_jit = torch.jit.script(t)\n x = torch.randn(4, 8, 32, 32, dtype=torch.float, device=\"cuda\")\n y = torch.randn(4, 8, 1, 32, dtype=torch.float, device=\"cuda\")\n y = y.expand(4, 8, 32, 32)\n jit_o = t_jit(x, y, 2.0)\n jit_o = t_jit(x, y, 2.0)\n o = t(x, y, 2.0)\n self.assertEqual(o, jit_o)\n self.assertGraphContains(t_jit.graph_for(x, y, 2.0), FUSION_GUARD)\n\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_broadcasting_0(self):\n\n def t(x: torch.Tensor, y: torch.Tensor, z: float):\n o = x + y\n o = o + z\n return o\n t_jit = torch.jit.script(t)\n x = torch.randn(4, 8, 32, 32, dtype=torch.float, device=\"cuda\")\n y = torch.randn(32, 32, dtype=torch.float, device=\"cuda\")\n jit_o = t_jit(x, y, 2.0)\n jit_o = t_jit(x, y, 2.0)\n o = t(x, y, 2.0)\n self.assertEqual(o, jit_o)\n subgraph = self._getSubgraphInFusion(t_jit.graph_for(x, y, 2.0))\n self.assertGraphContainsExactly(subgraph, 'aten::add', 2, consider_subgraphs=False)\n\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_broadcasting_1(self):\n\n def t(x: torch.Tensor, y: torch.Tensor, z: float):\n o = x + y\n o = o + z\n return o\n t_jit = torch.jit.script(t)\n x = torch.randn(4, 8, 32, 32, dtype=torch.float, device=\"cuda\")\n y = torch.randn(1, 32, 32, dtype=torch.float, device=\"cuda\")\n jit_o = t_jit(x, y, 2.0)\n jit_o = t_jit(x, y, 2.0)\n o = t(x, y, 2.0)\n self.assertEqual(o, jit_o)\n subgraph = self._getSubgraphInFusion(t_jit.graph_for(x, y, 2.0))\n self.assertGraphContainsExactly(subgraph, 'aten::add', 2, consider_subgraphs=False)\n\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_broadcasting_2(self):\n\n def t(x: torch.Tensor, y: torch.Tensor, z: float):\n o = x + y\n o = o + z\n return o\n t_jit = torch.jit.script(t)\n x = torch.randn(4, 1, 32, 32, dtype=torch.float, device=\"cuda\")\n y = torch.randn(8, 32, 32, dtype=torch.float, device=\"cuda\")\n jit_o = t_jit(x, y, 2.0)\n jit_o = t_jit(x, y, 2.0)\n o = t(x, y, 2.0)\n self.assertEqual(o, jit_o)\n subgraph = self._getSubgraphInFusion(t_jit.graph_for(x, y, 2.0))\n self.assertGraphContainsExactly(subgraph, 'aten::add', 2, consider_subgraphs=False)\n\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_broadcasting_3(self):\n\n def t(x: torch.Tensor, y: torch.Tensor, z: float):\n o = x + y\n o = o + z\n return o\n t_jit = torch.jit.script(t)\n x = torch.randn(8, 17, 8, dtype=torch.float, device=\"cuda\")\n y = torch.randn(8, 17, 1, dtype=torch.float, device=\"cuda\")\n jit_o = t_jit(x, y, 2.0)\n jit_o = t_jit(x, y, 2.0)\n o = t(x, y, 2.0)\n self.assertEqual(o, jit_o)\n subgraph = self._getSubgraphInFusion(t_jit.graph_for(x, y, 2.0))\n self.assertGraphContainsExactly(subgraph, 'aten::add', 2, consider_subgraphs=False)\n\n # test_broadcasting_partition_logic_X\n # Testing partition logic that is capable to avoid creating unsupported\n # broadcasting semantics in CudaFusionGroup\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_broadcasting_partition_logic_0(self):\n\n def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor):\n x = x + 12.0\n o1 = x + y\n o2 = x + z\n o = o1 + o2\n return o\n t_jit = torch.jit.script(t)\n x = torch.randn(4, 8, 6, 8, dtype=torch.float32, device=\"cuda\")\n y = torch.randn(8, 6, 8, dtype=torch.float32, device=\"cuda\")\n z = torch.randn(6, 8, dtype=torch.float32, device=\"cuda\")\n jit_o = t_jit(x, y, z)\n jit_o = t_jit(x, y, z)\n o = t(x, y, z)\n self.assertEqual(o, jit_o)\n subgraph = self._getSubgraphInFusion(t_jit.graph_for(x, y, z))\n self.assertGraphContainsExactly(subgraph, 'aten::add', 4, consider_subgraphs=False)\n\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_broadcasting_partition_logic_1(self):\n\n def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor):\n x = x + 12.0\n o1 = x + y\n o2 = x + z\n o = o1 + o2\n return o\n t_jit = torch.jit.script(t)\n x = torch.randn(8, 6, 8, dtype=torch.float32, device=\"cuda\")\n y = torch.randn(4, 8, 6, 8, dtype=torch.float32, device=\"cuda\")\n z = torch.randn(4, 1, 6, 8, dtype=torch.float32, device=\"cuda\")\n jit_o = t_jit(x, y, z)\n jit_o = t_jit(x, y, z)\n o = t(x, y, z)\n self.assertEqual(o, jit_o)\n subgraph = self._getSubgraphInFusion(t_jit.graph_for(x, y, z))\n self.assertGraphContainsExactly(subgraph, 'aten::add', 4, consider_subgraphs=False)\n\n @unittest.skipIf(True, \"Broadcast with different output not supported yet\")\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_broadcasting_multiple_output_shape(self):\n def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor):\n o = x + 12\n o1 = o + y\n o2 = o + z\n oo = o1.sum() + o2.sum()\n return oo\n t_jit = torch.jit.script(t)\n x = torch.randn(32, 32, dtype=torch.float, device=\"cuda\")\n y = torch.randn(2, 32, 32, dtype=torch.float, device=\"cuda\")\n z = torch.randn(4, 32, 32, dtype=torch.float, device=\"cuda\")\n jit_o = t_jit(x, y, z)\n jit_o = t_jit(x, y, z)\n o = t(x, y, z)\n self.assertEqual(o, jit_o)\n # Currently cannot fuse this\n self.assertGraphContains(t_jit.graph_for(x, y, z), FUSION_GUARD)\n\n @unittest.skipIf(True, \"broadcast on branches can't be resolved yet\")\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_broadcasting_multiple_output(self):\n def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor):\n o = x + 12\n o1 = o + y\n o2 = o + z\n oo = o1.sum() + o2.sum()\n return oo\n t_jit = torch.jit.script(t)\n x = torch.randn(32, 32, dtype=torch.float, device=\"cuda\")\n y = torch.randn(4, 32, 32, dtype=torch.float, device=\"cuda\")\n z = torch.randn(4, 32, 32, dtype=torch.float, device=\"cuda\")\n jit_o = t_jit(x, y, z)\n jit_o = t_jit(x, y, z)\n o = t(x, y, z)\n self.assertEqual(o, jit_o)\n # Currently cannot fuse this\n self.assertGraphContains(t_jit.graph_for(x, y, z), FUSION_GUARD)\n\n def _unary_test_helper(self, operation, dtype, random_data):\n gradient_check = (dtype == torch.float64) and random_data\n shape = (8, 7)\n torch.cuda.manual_seed_all(211)\n\n # need additional def of t for boolean ops\n def t(x: torch.Tensor, y: torch.Tensor):\n o = x * y\n o = o + 5e-3\n o = operation(o)\n return o\n\n y = torch.rand(shape, dtype=torch.float32, device=\"cuda\", requires_grad=gradient_check)\n y = y.to(dtype=dtype)\n\n if random_data:\n x = torch.rand(shape, dtype=torch.float32, device=\"cuda\", requires_grad=gradient_check)\n if dtype in self.int_types:\n # prefer a larger variance for integer types\n x = x * 5\n x = x.to(dtype=dtype)\n else:\n x = self.special_values.to(dtype=dtype)\n try:\n ref = t(x, y)\n except Exception:\n # same way as TE checker, if eager mode throws, ignore this test\n return\n t_jit = torch.jit.script(t)\n jit_o = t_jit(x, y)\n jit_o = t_jit(x, y)\n jit_o = t_jit(x, y)\n if gradient_check:\n if jit_o.dtype != torch.bool:\n # bool dtype has no `-`\n gradcheck(t_jit, [x, y], nondet_tol=1e-5)\n elif dtype in self.support_tensor_dtypes:\n self.assertGraphContains(t_jit.graph_for(x, y), FUSION_GUARD)\n o = t(x, y)\n self.assertEqual(o.dtype, jit_o.dtype)\n self.assertTrue(self._compare(\"failing case {}\\n{}\\n{}\\n{}\".format(dtype, operation, x, y), o, jit_o, 1e-2))\n\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_unary_ops(self):\n data_types = [\n *self.int_types,\n torch.float16,\n torch.float32,\n torch.float64\n ]\n if TEST_BF16:\n data_types.append(torch.bfloat16)\n operations = [torch.neg,\n torch.abs,\n torch.log,\n torch.log10,\n torch.log1p,\n torch.log2,\n torch.lgamma,\n torch.exp,\n torch.expm1,\n torch.erf,\n torch.erfc,\n torch.cos,\n torch.acos,\n torch.cosh,\n torch.sin,\n torch.asin,\n torch.sinh,\n torch.tan,\n torch.atan,\n torch.sqrt,\n torch.rsqrt,\n torch.ceil,\n torch.floor,\n torch.round,\n torch.trunc,\n torch.frac,\n torch.reciprocal,\n torch.isfinite,\n torch.isinf,\n torch.isnan,\n torch.isneginf,\n torch.isposinf,\n torch.isreal,\n torch.nn.functional.softplus,\n torch.nn.functional.gelu,\n torch.relu,\n torch.sigmoid,\n torch.bitwise_not,\n torch.tan,\n torch.tanh,\n torch.nn.functional.silu]\n for op, dtype in itertools.product(operations, data_types):\n self._unary_test_helper(op, dtype, False) # test special numbers\n self._unary_test_helper(op, dtype, True) # test random data\n\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_category_rule(self):\n def run_tensor(x, z):\n def t(x: torch.Tensor, z: torch.Tensor):\n o = x + z\n o = torch.abs(o)\n return o\n t_jit = torch.jit.script(t)\n jit_o = t_jit(x, z)\n jit_o = t_jit(x, z)\n o = t(x, z)\n self.assertEqual(o.dtype, jit_o.dtype)\n self.assertEqual(o, jit_o)\n self.assertGraphContains(t_jit.graph_for(x, z), FUSION_GUARD)\n\n def run_scalar(x, z):\n def t(x: torch.Tensor, z: float):\n o = x + z\n o = torch.abs(o)\n return o\n t_jit = torch.jit.script(t)\n jit_o = t_jit(x, z)\n jit_o = t_jit(x, z)\n o = t(x, z)\n self.assertEqual(o.dtype, jit_o.dtype)\n self.assertEqual(o, jit_o)\n self.assertGraphContains(t_jit.graph_for(x, z), FUSION_GUARD)\n\n # n-dim with 0-dim (no type-promote)\n x = torch.randn(4, 8, 32, 32, dtype=torch.float, device=\"cuda\")\n z = torch.tensor(2.0, dtype=torch.double, device=\"cuda\")\n run_tensor(x, z)\n\n # n-dim with 0-dim (type-promote)\n x = torch.randn(4, 8, 32, 32, device=\"cuda\").to(dtype=torch.long)\n z = torch.tensor(2.0, dtype=torch.double, device=\"cuda\")\n run_tensor(x, z)\n\n # n-dim with n-dim (type-promote)\n x = torch.randn(4, 8, 32, 32, dtype=torch.float, device=\"cuda\")\n z = torch.randn(4, 8, 32, 32, dtype=torch.double, device=\"cuda\")\n run_tensor(x, z)\n\n # n-dim with scalar (no type-promote)\n x = torch.randn(4, 8, 32, 32, dtype=torch.float16, device=\"cuda\")\n z = torch.tensor(3., dtype=torch.double)\n run_scalar(x, z)\n if TEST_BF16:\n # n-dim with scalar (no type-promote)\n x = torch.randn(4, 8, 32, 32, dtype=torch.bfloat16, device=\"cuda\")\n z = torch.tensor(3., dtype=torch.double)\n run_scalar(x, z)\n\n # n-dim with scalar (type-promote)\n x = torch.randn(4, 8, 32, 32, device=\"cuda\").to(dtype=torch.long)\n z = torch.tensor(3., dtype=torch.double)\n run_scalar(x, z)\n\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_unary_bitwise(self):\n def bit_not(x: torch.Tensor):\n return ~(x + 1)\n\n jitted = torch.jit.script(bit_not)\n x = torch.randn(4, 8, 32, 32, dtype=torch.float, device=\"cuda\").mul(5).to(torch.long)\n jit_o = bit_not(x)\n jit_o = bit_not(x)\n o = bit_not(x)\n self.assertEqual(o, jit_o)\n jitted.graph_for(x) # Shows up in second instance, not first\n self.assertGraphContains(jitted.graph_for(x), FUSION_GUARD)\n\n def bool_not(x: torch.Tensor, y: torch.Tensor):\n return ~(x & y)\n\n jitted = torch.jit.script(bool_not)\n x = torch.rand(4, 8, 32, 32, dtype=torch.float, device=\"cuda\").round().to(torch.bool)\n y = torch.rand(4, 8, 32, 32, dtype=torch.float, device=\"cuda\").round().to(torch.bool)\n jit_o = bool_not(x, y)\n jit_o = bool_not(x, y)\n o = bool_not(x, y)\n self.assertEqual(o, jit_o)\n jitted.graph_for(x, y) # Shows up in second instance, not first\n self.assertGraphContains(jitted.graph_for(x, y), FUSION_GUARD)\n\n def _get_scalar_binary_test_fn(self, category_and_type1, category_and_type2, operation):\n category1, dtype_arg1 = category_and_type1\n category2, dtype_arg2 = category_and_type2\n\n def t_intx_tensory(x: int, y: torch.Tensor):\n o = operation(x, y)\n o = 2 + o\n return o\n\n def t_doublex_tensory(x: float, y: torch.Tensor):\n o = operation(x, y)\n o = 2 + o\n return o\n # Omit both scalar cases and swap cases\n assert category1 == \"scalar\" and category2 != \"scalar\"\n if dtype_arg1.is_floating_point:\n return t_doublex_tensory\n if dtype_arg1 == torch.int64 or dtype_arg1 == torch.int32:\n return t_intx_tensory\n raise NotImplementedError\n\n def _binary_test_helper(self, operation, dtypes, random_data, categories=\"ndim\"):\n if isinstance(dtypes, tuple):\n dtype_arg1, dtype_arg2 = dtypes\n else:\n dtype_arg1 = dtype_arg2 = dtypes\n\n if isinstance(categories, tuple) and random_data:\n category1, category2 = categories\n elif not random_data:\n category1 = category2 = \"ndim\"\n else:\n category1 = category2 = categories\n\n def is_cpu_category(x):\n return x == \"0dimcpu\" or x == \"scalar\"\n\n # skip unsupported cases\n if is_cpu_category(category1) and is_cpu_category(category2):\n return\n\n # only test cases with first operand as scalar\n if category2 == \"scalar\":\n return\n\n # skip ops that doesn't support scalar inputs in eager\n if operation in [\n torch.atan2,\n torch.max,\n torch.min,\n torch.remainder, # unsupported in nvfuser\n ]:\n if category1 == \"scalar\" or category2 == \"scalar\":\n return\n\n if operation in [\n torch.fmod,\n torch.eq,\n torch.ne,\n torch.ge,\n torch.gt,\n torch.le,\n torch.lt\n ]:\n if category1 == \"scalar\":\n return\n\n # operators that does not support bfloat16\n if operation in [torch.fmod]:\n if dtype_arg1 == torch.bfloat16 or dtype_arg2 == torch.bfloat16:\n return\n\n def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor):\n o = operation(x, y)\n o = o + z\n return o\n\n shape = (4, 32, 32)\n\n shapex = shape if category1 == \"ndim\" else ()\n shapey = shape if category2 == \"ndim\" else ()\n\n if random_data:\n x = (torch.randn(shapex, dtype=torch.float, device=\"cuda\") * 5).to(dtype_arg1)\n y = (torch.randn(shapey, dtype=torch.float, device=\"cuda\") * 5).to(dtype_arg2)\n else:\n x = self.special_values.to(dtype=dtype_arg1)\n y = (torch.rand_like(self.special_values) * 5).to(dtype_arg2)\n\n r\"\"\"\n Category conversion\n \"\"\"\n has_scalar = False\n if category1 == \"scalar\":\n has_scalar = True\n x = x.item()\n\n if category1 == \"0dimcpu\":\n x = x.to(device=\"cpu\")\n\n if category2 == \"scalar\":\n has_scalar = True\n y = y.item()\n\n if category2 == \"0dimcpu\":\n y = y.to(device=\"cpu\")\n\n z = torch.tensor([2], device=\"cuda\").to(dtype_arg1)\n is_dtype_arg1_int = dtype_arg1 == torch.int32 or dtype_arg1 == torch.int64\n is_dtype_arg2_int = dtype_arg2 == torch.int32 or dtype_arg2 == torch.int64\n\n if operation in [torch.pow]:\n if is_dtype_arg1_int and is_dtype_arg2_int:\n if category2 == \"scalar\":\n # RuntimeError: Integers to negative integer powers are not allowed\n y = abs(y)\n if category2 == \"0dimcpu\" and y == -1:\n # https://github.com/pytorch/pytorch/issues/73196\n y = y - 1\n if category2 == \"0dimcpu\" and y == -2:\n # avoid pow(0, -2), which gives inconsistent results on integer tensor\n y = y - 1\n\n # Avoid division by zero for integer tensors\n div_like = [torch.div, torch.fmod, torch.remainder]\n if operation in div_like and (dtype_arg2 == torch.int32 or dtype_arg2 == torch.int64):\n y[y == 0] = 1\n\n test_value = True\n if dtype_arg1 == torch.half or dtype_arg2 == torch.half:\n test_value = False\n if dtype_arg1 == torch.bfloat16 or dtype_arg2 == torch.bfloat16:\n test_value = False\n\n try:\n if not has_scalar:\n o = t(x, y, z)\n t_jit = torch.jit.script(t)\n jit_o = t_jit(x, y, z)\n jit_o = t_jit(x, y, z)\n jit_o = t_jit(x, y, z)\n\n self.assertEqual(o.dtype, jit_o.dtype)\n if test_value:\n self.assertEqual(o, jit_o)\n self.assertGraphContains(t_jit.graph_for(x, y, z), FUSION_GUARD)\n\n elif category2 != \"scalar\": # only test the case where first is scalar\n test_fn = self._get_scalar_binary_test_fn((category1, dtype_arg1), (category2, dtype_arg2), operation)\n o = test_fn(x, y)\n t_jit = torch.jit.script(test_fn)\n jit_o = t_jit(x, y)\n jit_o = t_jit(x, y)\n jit_o = t_jit(x, y)\n\n self.assertEqual(o.dtype, jit_o.dtype)\n if test_value:\n self.assertEqual(o, jit_o)\n self.assertGraphContains(t_jit.graph_for(x, y), FUSION_GUARD)\n except Exception as e:\n print(\"failing test for op: \", operation.__name__)\n print(\"with input\\n\\tx: \", x)\n print(\"\\ty: \", y)\n print(\"\\tz: \", z)\n raise e\n\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_binary_ops(self):\n # disabled bf16 / fp16 data types because of accuracy tolerance\n data_types = [\n torch.int32,\n torch.int64,\n torch.float16,\n torch.float32,\n torch.float64\n ]\n if TEST_BF16:\n data_types.append(torch.bfloat16)\n operations = [torch.mul,\n torch.div,\n torch.atan2,\n torch.max,\n torch.min,\n torch.pow,\n torch.remainder,\n torch.fmod,\n torch.eq,\n torch.ne,\n torch.ge,\n torch.gt,\n torch.le,\n torch.lt]\n\n category_types = [\n \"scalar\",\n \"0dim\",\n \"0dimcpu\",\n \"ndim\"\n ]\n\n binary_dtype_combinations = list(itertools.combinations(data_types, 2))\n category_combinations = list(itertools.combinations(category_types, 2))\n\n for op, dtypes, categories in itertools.product(operations, binary_dtype_combinations, category_combinations):\n self._binary_test_helper(op, dtypes, True, categories) # random data\n\n for op, dtypes in itertools.product(operations, binary_dtype_combinations):\n self._binary_test_helper(op, dtypes, False) # special numbers\n\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_binary_bitwise(self):\n def jit_or(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor):\n return (x & y) | z\n\n def jit_xor(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor):\n return (x & y) ^ z\n\n def jit_lshift(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor):\n return (x & y) << z\n\n def jit_rshift(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor):\n return (x & y) >> z\n\n for jit_func in [jit_or, jit_xor, jit_lshift, jit_rshift]:\n x = torch.randn(4, 8, 32, 32, dtype=torch.float, device=\"cuda\").mul(5).to(torch.long)\n y = torch.randn(4, 8, 32, 32, dtype=torch.float, device=\"cuda\").mul(5).to(torch.long)\n z = torch.randn(4, 8, 32, 32, dtype=torch.float, device=\"cuda\").mul(2).to(torch.long)\n\n jitted = torch.jit.script(jit_func)\n jit_o = jitted(x, y, z)\n jit_o = jitted(x, y, z)\n o = jit_func(x, y, z)\n self.assertEqual(o, jit_o)\n self.assertGraphContains(jitted.graph_for(x, y, z), FUSION_GUARD)\n\n # We shouldn't need this redefinition of the function, but otherwise it won't recompile for a new type\n def jit_or(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor):\n return (x & y) | z\n\n def jit_xor(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor):\n return (x & y) ^ z\n\n for jit_func in [jit_or, jit_xor]:\n x = torch.rand(4, 2, dtype=torch.float, device=\"cuda\").round().to(torch.bool)\n y = torch.rand(4, 2, dtype=torch.float, device=\"cuda\").round().to(torch.bool)\n z = torch.rand(4, 2, dtype=torch.float, device=\"cuda\").round().to(torch.bool)\n\n jitted = torch.jit.script(jit_func)\n jit_o = jitted(x, y, z)\n jit_o = jitted(x, y, z)\n o = jit_func(x, y, z)\n self.assertEqual(o, jit_o)\n self.assertGraphContains(jitted.graph_for(x, y, z), FUSION_GUARD)\n\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_type_as_op(self):\n def t(x: torch.Tensor, y: torch.Tensor, z: float):\n o = torch.lt(x, z)\n o = o.type_as(y)\n return o\n t_jit = torch.jit.script(t)\n x = torch.randn(4, 8, 32, 32, dtype=torch.float, device=\"cuda\")\n y = torch.randn(4, 8, 32, 32, dtype=torch.float, device=\"cuda\")\n jit_o = t_jit(x, y, 0.5)\n jit_o = t_jit(x, y, 0.5)\n o = t(x, y, 0.5)\n self.assertEqual(o, jit_o)\n self.assertGraphContains(t_jit.graph_for(x, y, 0.5), FUSION_GUARD)\n\n def _ternary_integer_test_helper(self, dtype_arg1):\n shape = (4, 8, 32, 32)\n magnitude = 100\n if (dtype_arg1 in self.int_types):\n x = torch.randint(-magnitude, magnitude, shape, dtype=dtype_arg1, device=\"cuda\")\n else:\n x = torch.randn(shape, dtype=dtype_arg1, device=\"cuda\") * magnitude\n arg2 = int(0)\n arg3 = int(magnitude * 0.1)\n\n def clamp0(x: torch.Tensor, f: int):\n o = 2. * torch.clamp(x, min=f)\n return o\n clamp0_jit = torch.jit.script(clamp0)\n self._run_helper(clamp0_jit, clamp0, x, arg2)\n\n def clamp1(x: torch.Tensor, f: int, ff: int):\n o = 2. * torch.clamp(x, min=f, max=ff)\n return o\n clamp1_jit = torch.jit.script(clamp1)\n self._run_helper(clamp1_jit, clamp1, x, arg2, arg3)\n\n def clamp2(x: torch.Tensor, f: float, ff: int):\n o = 2. * torch.clamp(x, min=f, max=ff)\n return o\n clamp2_jit = torch.jit.script(clamp2)\n self._run_helper(clamp2_jit, clamp2, x, float(arg2), arg3)\n\n def clamp3(x: torch.Tensor, f: int, ff: float):\n o = 2. * torch.clamp(x, min=f, max=ff)\n return o\n clamp3_jit = torch.jit.script(clamp3)\n self._run_helper(clamp3_jit, clamp3, x, arg2, float(arg3))\n\n def threshold(x: torch.Tensor, th: int, val: int):\n o = 2. * torch.threshold(x, th, val)\n return o\n threshold_jit = torch.jit.script(threshold)\n self._run_helper(threshold_jit, threshold, x, arg2, arg3)\n\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_ternary_ops_integer_compatibility(self):\n data_types = [\n torch.float16,\n torch.float32,\n torch.float64\n ]\n for dtype in data_types:\n self._ternary_integer_test_helper(dtype)\n\n def _ternary_test_helper(self, operation, dtypes, random_data):\n if isinstance(dtypes, tuple):\n dtype_arg1, dtype_arg2, dtype_arg3 = dtypes\n else:\n dtype_arg1 = dtype_arg2 = dtype_arg3 = dtypes\n\n def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor, alpha: torch.Tensor):\n o = operation(x, y, z)\n o = o + alpha\n return o\n\n shape = (4, 32, 32)\n if operation is torch.where:\n dtype_arg1 = torch.bool\n if random_data:\n x = torch.randint(0, 2, shape).to(dtype=torch.bool, device=\"cuda\")\n y = (torch.randn(shape, dtype=torch.float, device=\"cuda\") * 5).to(dtype_arg2)\n z = (torch.randn(shape, dtype=torch.float, device=\"cuda\") * 5).to(dtype_arg3)\n else:\n x = torch.randint(0, 2, self.special_values.size()).to(dtype=torch.bool, device=\"cuda\")\n y = self.special_values.to(dtype=dtype_arg2)\n z = (torch.rand_like(self.special_values) * 5).to(dtype_arg3)\n elif random_data:\n x = (torch.randn(shape, dtype=torch.float, device=\"cuda\") * 5).to(dtype_arg1)\n y = (torch.randn(shape, dtype=torch.float, device=\"cuda\") * 5).to(dtype_arg2)\n z = (torch.randn(shape, dtype=torch.float, device=\"cuda\") * 5).to(dtype_arg3)\n else:\n x = self.special_values.to(dtype=dtype_arg1)\n y = (torch.rand_like(self.special_values) * 5).to(dtype_arg2)\n z = (torch.rand_like(self.special_values) * 5).to(dtype_arg3)\n alpha = torch.tensor([2], device=\"cuda\").to(dtype_arg1)\n\n o = t(x, y, z, alpha)\n t_jit = torch.jit.script(t)\n jit_o = t_jit(x, y, z, alpha)\n jit_o = t_jit(x, y, z, alpha)\n\n self.assertEqual(o.dtype, jit_o.dtype)\n self.assertEqual(o, jit_o)\n self.assertGraphContains(t_jit.graph_for(x, y, z), FUSION_GUARD)\n\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_ternary_ops_type_promotion(self):\n # TODO: update accuracy tolerance for bf16 / fp16 data types\n data_types = [\n # torch.float16,\n torch.float32,\n torch.float64\n ]\n '''\n if TEST_BF16:\n data_types.append(torch.bfloat16)\n '''\n # TODO: Add Tensor support for clamp\n operations = [torch.clamp]\n ternary_dtype_combinations = itertools.combinations(data_types, 3)\n for op, dtypes in itertools.product(operations, ternary_dtype_combinations):\n self._ternary_test_helper(op, dtypes, True) # random data\n self._ternary_test_helper(op, dtypes, False) # special numbers\n\n # We can't test the scalar version of rsub from python\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING, \"Requires fusion optimization pass to be effective\")\n def test_rsub(self):\n x = torch.randn(4, 8, 32, 32, dtype=torch.float, device=\"cuda\")\n y = torch.randn(4, 8, 32, 32, dtype=torch.float, device=\"cuda\")\n\n def rsub(x: torch.Tensor, y: torch.Tensor):\n o = torch.rsub(x, y)\n o = o * 2.\n return o\n\n rsub_jit = torch.jit.script(rsub)\n self._run_helper(rsub_jit, rsub, x, y)\n\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n # legacy fuser does not work for rand_like, see issue #34361\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING, \"Requires fusion optimization pass to be effective\")\n def test_ternary_ops(self):\n x = torch.randn(4, 8, 32, 32, dtype=torch.float, device=\"cuda\")\n y = torch.randn(4, 8, 32, 32, dtype=torch.float, device=\"cuda\")\n z = torch.randn(4, 8, 32, 32, dtype=torch.float, device=\"cuda\")\n cond = torch.randint(0, 2, (4, 8, 32, 32)).to(dtype=torch.bool, device=\"cuda\")\n\n def add(x: torch.Tensor, other: torch.Tensor, alpha: float):\n o = torch.relu(x)\n o = torch.add(o, other=other, alpha=alpha)\n return o\n add_jit = torch.jit.script(add)\n self._run_helper(add_jit, add, x, y, 2.0)\n\n def clamp0(x: torch.Tensor, f: float):\n o = 2. * torch.clamp(x, min=f)\n return o\n clamp0_jit = torch.jit.script(clamp0)\n self._run_helper(clamp0_jit, clamp0, x, 0.5)\n\n def clamp1(x: torch.Tensor, f: float, ff: float):\n o = 2. * torch.clamp(x, min=f, max=ff)\n return o\n clamp1_jit = torch.jit.script(clamp1)\n self._run_helper(clamp1_jit, clamp1, x, -0.2, 0.7)\n\n def threshold(x: torch.Tensor, th: float, val: float):\n o = 2. * torch.threshold(x, th, val)\n return o\n threshold_jit = torch.jit.script(threshold)\n self._run_helper(threshold_jit, threshold, x, 0.2, 0.9)\n\n def where(x: torch.Tensor, y: torch.Tensor, cond: torch.Tensor):\n o = 2. * torch.where(cond, x, y)\n return o\n where_jit = torch.jit.script(where)\n self._run_helper(where_jit, where, x, y, cond)\n\n def lerp(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor):\n o = 2. * torch.lerp(x, y, z)\n return o\n lerp_jit = torch.jit.script(lerp)\n self._run_helper(lerp_jit, lerp, x, y, z)\n\n def lerp_scale(x: torch.Tensor, y: torch.Tensor, z: float):\n o = 2. * torch.lerp(x, y, z)\n return o\n lerp_scale_jit = torch.jit.script(lerp_scale)\n self._run_helper(lerp_scale_jit, lerp_scale, x, y, 0.5)\n\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING, \"Requires profiling node to run cuda fuser\")\n def test_addcmul_ops(self):\n x = torch.randn(4, 8, 32, 32, dtype=torch.float, device=\"cuda\")\n y = torch.randn(4, 8, 32, 32, dtype=torch.float, device=\"cuda\")\n z = torch.randn(4, 8, 32, 32, dtype=torch.float, device=\"cuda\")\n\n def addcmul(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor, value: float):\n o = torch.add(x, 0.5)\n o = torch.addcmul(o, y, z, value=value)\n return o\n addcmul_jit = torch.jit.script(addcmul)\n self._run_helper(addcmul_jit, addcmul, x, y, z, 2.0)\n\n def addcmul_no_alpha(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor):\n o = torch.add(x, 0.5)\n o = torch.addcmul(o, y, z)\n return o\n addcmul_no_alpha_jit = torch.jit.script(addcmul_no_alpha)\n self._run_helper(addcmul_no_alpha_jit, addcmul_no_alpha, x, y, z)\n\n def addcmul_const_alpha(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor):\n o = torch.add(x, 0.5)\n o = torch.addcmul(o, y, z, value=0.75)\n return o\n addcmul_const_alpha_jit = torch.jit.script(addcmul_const_alpha)\n self._run_helper(addcmul_const_alpha_jit, addcmul_const_alpha, x, y, z)\n\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_dynamic_size(self):\n old_guard = torch._C._jit_set_nvfuser_guard_mode(True)\n torch._C._jit_set_bailout_depth(20)\n\n def t(x: torch.Tensor, y: torch.Tensor, z: float):\n o = x + y\n o = o + z\n return o\n t_jit = torch.jit.script(t)\n x = torch.randn(4, 8, 32, 32, dtype=torch.float, device=\"cuda\")\n y = torch.randn(32, 32, dtype=torch.float, device=\"cuda\")\n jit_o = t_jit(x, y, 2.0)\n jit_o = t_jit(x, y, 2.0)\n o = t(x, y, 2.0)\n self.assertEqual(o, jit_o)\n subgraph = self._getSubgraphInFusion(t_jit.graph_for(x, y, 2.0))\n self.assertGraphContainsExactly(subgraph, 'aten::add', 2, consider_subgraphs=False)\n\n # this test is not ideal, as we rely on the bailout to test it and we\n # don't know a way to verify the bailout graph to validate the proper\n # fusion.\n x = torch.randn(8, 32, 16, 8, dtype=torch.float, device=\"cuda\")\n y = torch.randn(16, 8, dtype=torch.float, device=\"cuda\")\n jit_o = t_jit(x, y, 2.0)\n jit_o = t_jit(x, y, 2.0)\n o = t(x, y, 2.0)\n self.assertEqual(o, jit_o)\n self.assertGraphContains(t_jit.graph_for(x, y, 2.0), FUSION_GUARD)\n x = torch.randn(8, 17, 8, dtype=torch.float, device=\"cuda\")\n y = torch.randn(8, 17, 1, dtype=torch.float, device=\"cuda\")\n jit_o = t_jit(x, y, 2.0)\n jit_o = t_jit(x, y, 2.0)\n o = t(x, y, 2.0)\n self.assertEqual(o, jit_o)\n self.assertGraphContains(t_jit.graph_for(x, y, 2.0), FUSION_GUARD)\n torch._C._jit_set_nvfuser_guard_mode(old_guard)\n\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_random_topo(self):\n os.environ[\"PYTORCH_NVFUSER_DISABLE_FALLBACK\"] = \"1\"\n self.assertTrue(runDefaultTestWithSeed(28449))\n\n def _compare(self, desc, inp1, inp2, error):\n a = inp1.clone()\n b = inp2.clone()\n close = torch.allclose(a, b, rtol=error, atol=error)\n if not close:\n print(desc, close)\n z = a - b\n index = (torch.abs(z) >= error + error * torch.abs(b)).nonzero()\n print(\"dif : \", z[index])\n print(\"inp1 : \", a[index])\n print(\"inp2 : \", b[index])\n print(\"maximum difference\", z[index].max())\n return close\n\n # Permutation helper that applies binary operation between two tensors:\n # 1. applies separate permutation `perm0` & `perm1` to two inputs\n # 2. reduce dimension `broadcast_axis` of operand two to size 1\n # The purpose of this test is to ensure permutation works well in\n # complicated cases with arbitrary stride order and broadcasting dimensions\n def _permutation_helper(self, sizes, broadcast_axis, dtype, device, perm0, perm1):\n def t(x: torch.Tensor, y: torch.Tensor):\n o = torch.add(x, y)\n o = torch.relu(o)\n return o\n\n x = torch.randn([sizes[i] for i in perm0], dtype=dtype, device=device).permute(\n [perm0.index(i) for i in range(len(sizes))])\n if broadcast_axis >= 0:\n sizes[broadcast_axis] = 1\n y = torch.randn([sizes[i] for i in perm1], dtype=dtype, device=device).permute(\n [perm1.index(i) for i in range(len(sizes))])\n t_jit = torch.jit.script(t)\n jit_o = t_jit(x, y)\n jit_o = t_jit(x, y)\n o = t(x, y)\n self.assertEqual(o.dtype, jit_o.dtype)\n self.assertEqual(o, jit_o)\n self.assertEqual(o.stride(), jit_o.stride())\n self.assertGraphContains(t_jit.graph_for(x, y), FUSION_GUARD)\n\n # end-2-end test of permutation & contiguity handling in integration.\n # we are testing inputs with all combination of permutation order, just to\n # ensure that integration would be able to generate functionally correct\n # kernels\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_binary_ops_permutation(self):\n # note that num_dim is exclusive from len(x), so we are not reducing\n # to single element (codegen limitation at this moment)\n x = [7, 8, 12]\n b_axes = range(-1, len(x))\n for b_axis in b_axes:\n for perm0 in itertools.permutations(range(len(x))):\n for perm1 in itertools.permutations(range(len(x))):\n x = [7, 8, 12]\n self._permutation_helper(x, b_axis, torch.float32, \"cuda\", perm0, perm1)\n\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_binary_ops_channels_last_with_bcast(self):\n device = \"cuda\"\n x = torch.randn([4, 3, 2, 5], device=device).to(memory_format=torch.channels_last)\n w = torch.randn([2, 5], device=device)\n\n def t(x: torch.Tensor, b: torch.Tensor):\n o = x + b\n return torch.relu(o)\n t_jit = torch.jit.script(t)\n jit_o = t_jit(x, w)\n jit_o = t_jit(x, w)\n jit_o = t_jit(x, w)\n o = t(x, w)\n self.assertEqual(o.dtype, jit_o.dtype)\n self.assertTrue(self._compare(\"comparing output failed\", o, jit_o, 1e-4))\n self.assertGraphContains(t_jit.graph_for(x, w), FUSION_GUARD)\n\n def _reduction_helper(self, sizes, reduction_axis, dtype, device, perm0, perm1, keepdim=False):\n class MyReduction(torch.nn.Module):\n __constants__ = ['reduction_axis', 'keepdim']\n\n def __init__(self):\n super(MyReduction, self).__init__()\n self.reduction_axis = reduction_axis\n self.keepdim = keepdim\n\n def forward(self, x: torch.Tensor, y: torch.Tensor):\n o = torch.add(x, y)\n o = torch.sum(o, dim=self.reduction_axis, keepdim=self.keepdim)\n return o\n\n t = MyReduction()\n\n x = torch.randn([sizes[i] for i in perm0], dtype=dtype, device=device).permute(\n [perm0.index(i) for i in range(len(sizes))])\n y = torch.randn([sizes[i] for i in perm1], dtype=dtype, device=device).permute(\n [perm1.index(i) for i in range(len(sizes))])\n t_jit = torch.jit.script(t)\n jit_o = t_jit(x, y)\n jit_o = t_jit(x, y)\n o = t(x, y)\n self.assertEqual(o.dtype, jit_o.dtype)\n # numerical issues here due to our scheduling.\n # can't use `self.assertEqual(o, jit_o)`\n self.assertTrue(self._compare(\"comparing output failed\", o, jit_o, 1e-4))\n self.assertGraphContains(t_jit.graph_for(x, y), FUSION_GUARD)\n\n @unittest.skipIf(is_pre_volta(), \"reduction not supported in pre volta device\")\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_reduction(self):\n for x in ([7, 8, 12], [12, 8, 7, 9, 15], [128, 16, 8, 32]):\n # note that num_dim is exclusive from len(x), so we are not reducing\n # to single element (codegen limitation at this moment)\n for num_reduce_dim in range(1, len(x)):\n for axes in itertools.combinations(range(len(x)), num_reduce_dim):\n for keepdim in (True, False):\n perm0 = range(len(x))\n perm1 = range(len(x))\n self._reduction_helper(x, axes, torch.float32, \"cuda\", perm0, perm1, keepdim)\n\n def _layer_norm_autodiff_helper(self, model, grad, shapes, args):\n jit_model = torch.jit.script(model)\n\n eps = np.random.random() * 1e-4\n use_cudnn = bool(np.random.randint(0, 2))\n\n # profile/optimization runs\n for i in range(3):\n jit_o = jit_model(shapes, *args, eps, use_cudnn)\n jit_o.backward(grad)\n\n ref_args = [t.detach().clone().requires_grad_() for t in args]\n [t.grad.zero_() for t in args]\n jit_o = jit_model(shapes, *args, eps, use_cudnn)\n jit_o.backward(grad)\n\n o = model(shapes, *ref_args, eps, use_cudnn)\n o.backward(grad)\n self.assertEqual(jit_o, o)\n for arg, ref_arg in zip(args, ref_args):\n self.assertEqual(arg.grad, ref_arg.grad)\n\n # check fusion in fw & bw\n g = jit_model.graph_for(shapes, *args, eps, use_cudnn)\n for node in g.nodes():\n n = node\n dbg_state = jit_model.get_debug_state()\n for val in dbg_state.execution_plans.values():\n v = val\n state2 = v.code.grad_executor_states()\n for val in state2[0].execution_plans.values():\n v2 = val\n FileCheck().check(FUSION_GUARD).run(g)\n FileCheck().check(FUSION_GUARD).run(v2.graph)\n\n @unittest.skipIf(is_pre_volta(), \"reduction not supported in pre volta device\")\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_layer_norm_autodiff(self):\n def t_wb(shapes: List[int], x, w, b, eps: float, cudnn: bool):\n o = torch.layer_norm(x, shapes, w, b, eps, cudnn)\n o = torch.relu(o)\n return o\n\n def t_w(shapes: List[int], x, w, eps: float, cudnn: bool):\n o = torch.layer_norm(x, shapes, w, None, eps, cudnn)\n o = torch.relu(o)\n return o\n\n def t_b(shapes: List[int], x, b, eps: float, cudnn: bool):\n o = torch.layer_norm(x, shapes, None, b, eps, cudnn)\n o = torch.relu(o)\n return o\n\n def t(shapes: List[int], x, eps: float, cudnn: bool):\n o = torch.layer_norm(x, shapes, None, None, eps, cudnn)\n o = torch.relu(o)\n return o\n\n model = {3: t_wb, 2: t_w, 1: t_b, 0: t}\n\n for w, b in itertools.product([True, False], repeat=2):\n batch = [2]\n # note: awkward shape here to avoid vectorized fast kernel, which is\n # buggy in aten\n shapes = [2, 7, 3]\n m = model[w * 2 + b]\n\n grad = torch.randn(batch + shapes, dtype=torch.float32, device=\"cuda\")\n args = [torch.randn(batch + shapes, dtype=torch.float32, device=\"cuda\").requires_grad_()]\n if w:\n args.append(torch.randn(shapes, dtype=torch.float32, device=\"cuda\").requires_grad_())\n if b:\n args.append(torch.randn(shapes, dtype=torch.float32, device=\"cuda\").requires_grad_())\n self._layer_norm_autodiff_helper(m, grad, shapes, args)\n\n @unittest.skipIf(is_pre_volta(), \"reduction not supported in pre volta device\")\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_layer_norm_parser(self):\n dtype = torch.float32\n device = \"cuda\"\n x = torch.randn([4, 4, 2], dtype=dtype, device=device)\n w = torch.randn([4, 2], dtype=dtype, device=device)\n b = torch.randn([4, 2], dtype=dtype, device=device)\n\n def t(x: torch.Tensor, w: torch.Tensor, b: torch.Tensor):\n o = torch.relu(x)\n o = torch.layer_norm(o, [4, 2], w, b, 1e-5)\n return o\n\n o = t(x, w, b)\n t_jit = torch.jit.script(t)\n jit_o = t_jit(x, w, b)\n jit_o = t_jit(x, w, b)\n o = t(x, w, b)\n self.assertGraphContains(t_jit.graph_for(x, w, b), FUSION_GUARD)\n\n def _native_layer_norm_helper(self, shape, norm_shape, dtype, device, error, affine=True):\n class MyLayerNorm(torch.nn.Module):\n __constants__ = ['norm_shape']\n\n def __init__(self, elementwise_affine=True):\n super(MyLayerNorm, self).__init__()\n self.norm_shape = norm_shape\n if elementwise_affine:\n self.weight = torch.randn(norm_shape, dtype=dtype, device=device)\n self.bias = torch.randn(norm_shape, dtype=dtype, device=device)\n with torch.no_grad():\n self.weight.fill_(1)\n self.bias.fill_(0)\n else:\n self.weight = None\n self.bias = None\n\n def forward(self, x: torch.Tensor):\n o = torch.relu(x)\n o = torch.native_layer_norm(o, self.norm_shape, self.weight, self.bias, 1e-5)\n return o\n\n t = MyLayerNorm(affine)\n\n x = torch.randn(shape, dtype=dtype, device=device)\n t_jit = torch.jit.script(t)\n jit_o, jit_mean, jit_rstd = t_jit(x)\n jit_o, jit_mean, jit_rstd = t_jit(x)\n o, mean, rstd = t(x)\n self.assertEqual(o.dtype, jit_o.dtype)\n # numerical issues here due to our scheduling.\n # can't use `self.assertEqual(o, jit_o)`\n self.assertTrue(self._compare(\"comparing output failed\", o, jit_o, error))\n self.assertTrue(self._compare(\"comparing mean failed\", mean, jit_mean, error))\n self.assertTrue(self._compare(\"comparing rstd failed\", rstd, jit_rstd, error))\n self.assertGraphContains(t_jit.graph_for(x), FUSION_GUARD)\n\n @unittest.skipIf(is_pre_volta(), \"reduction not supported in pre volta device\")\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_native_layer_norm(self):\n dims = 4\n rnds = 3\n for idx in range(rnds):\n for offset in range(1, dims):\n for affine in (True, False):\n input_shape = [random.randint(10, 30) for idx in range(dims)]\n norm_shape = [input_shape[idx] for idx in range(dims - offset, dims)]\n self._native_layer_norm_helper(input_shape, norm_shape, torch.float32, \"cuda\", 1e-4, affine)\n\n @unittest.skipIf(is_pre_volta(), \"reduction not supported in pre volta device\")\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_native_layer_norm_half(self):\n dims = 4\n rnds = 3\n for idx in range(rnds):\n for offset in range(1, dims):\n input_shape = [random.randint(10, 30) for idx in range(dims)]\n norm_shape = [input_shape[idx] for idx in range(dims - offset, dims)]\n self._native_layer_norm_helper(input_shape, norm_shape, torch.float16, \"cuda\", 5e-3)\n\n @unittest.skipIf(is_pre_volta(), \"reduction not supported in pre volta device\")\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n @unittest.skipIf(not TEST_BF16, \"device does not support BFloat16\")\n def test_native_layer_norm_bfloat(self):\n dims = 4\n rnds = 3\n for idx in range(rnds):\n for offset in range(1, dims):\n input_shape = [random.randint(10, 30) for idx in range(dims)]\n norm_shape = [input_shape[idx] for idx in range(dims - offset, dims)]\n self._native_layer_norm_helper(input_shape, norm_shape, torch.bfloat16, \"cuda\", 1e-1)\n\n def _norm_helper(self,\n shape,\n dtype,\n device,\n error,\n is_batch_norm_else_instance_norm,\n memory_format=torch.contiguous_format,\n *,\n layer_dtype=torch.float32):\n class MyBatchNorm(torch.nn.Module):\n def __init__(self):\n super(MyBatchNorm, self).__init__()\n\n def forward(self, x: torch.Tensor, r_mean: torch.Tensor, r_var: torch.Tensor):\n o = torch.nn.functional.batch_norm(x, r_mean, r_var, training=True)\n o = torch.relu(o)\n return o\n\n class MyInstanceNorm(torch.nn.Module):\n def __init__(self):\n super(MyInstanceNorm, self).__init__()\n\n def forward(self, x: torch.Tensor, r_mean: torch.Tensor, r_var: torch.Tensor):\n o = torch.nn.functional.instance_norm(x, r_mean, r_var, use_input_stats=True)\n o = torch.relu(o)\n return o\n\n t = MyBatchNorm() if is_batch_norm_else_instance_norm else MyInstanceNorm()\n\n x = torch.randn(shape, dtype=dtype, device=device).to(memory_format=memory_format)\n running_mean = torch.zeros(shape[1], dtype=layer_dtype, device=device)\n running_var = torch.ones(shape[1], dtype=layer_dtype, device=device)\n t_jit = torch.jit.script(t)\n\n eager_running_mean = running_mean.clone()\n eager_running_var = running_var.clone()\n jit_running_mean = running_mean.clone()\n jit_running_var = running_var.clone()\n\n jit_o = t_jit(x, running_mean.clone(), running_var.clone())\n\n self.assertTrue(self._compare(\"prerun comparing running_mean failed\", eager_running_mean, jit_running_mean, error))\n self.assertTrue(self._compare(\"prerun comparing running_var failed\", eager_running_var, jit_running_var, error))\n\n jit_o = t_jit(x, jit_running_mean, jit_running_var)\n o = t(x, eager_running_mean, eager_running_var)\n self.assertEqual(o.dtype, jit_o.dtype)\n self.assertEqual(o.stride(), jit_o.stride())\n # numerical issues here due to our scheduling.\n # can't use `self.assertEqual(o, jit_o)`\n self.assertTrue(self._compare(\"comparing output failed\", o, jit_o, error))\n self.assertTrue(self._compare(\"comparing running_mean failed\", eager_running_mean, jit_running_mean, error))\n self.assertTrue(self._compare(\"comparing running_var failed\", eager_running_var, jit_running_var, error))\n self.assertGraphContains(t_jit.graph_for(x, running_mean, running_var), FUSION_GUARD)\n\n @unittest.skipIf(is_pre_volta(), \"reduction not supported in pre volta device\")\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_layer_norm_trivial_reduce_dim(self):\n def t_wb(shapes: List[int], x, w, b, eps: float, cudnn: bool):\n o = torch.layer_norm(x, shapes, w, b, eps, cudnn)\n o = torch.relu(o)\n return o\n\n batch = [1]\n shapes = [2, 7, 3]\n\n grad = torch.randn(batch + shapes, dtype=torch.float32, device=\"cuda\")\n args = [torch.randn(batch + shapes, dtype=torch.float32, device=\"cuda\").requires_grad_()]\n args.append(torch.randn(shapes, dtype=torch.float32, device=\"cuda\").requires_grad_())\n args.append(torch.randn(shapes, dtype=torch.float32, device=\"cuda\").requires_grad_())\n self._layer_norm_autodiff_helper(t_wb, grad, shapes, args)\n\n @unittest.skipIf(is_pre_volta(), \"reduction not supported in pre volta device\")\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_norm_half_layer(self):\n size = [2, 4, 2, 2]\n\n for is_batch_norm_else_instance_norm in [False, True]:\n for mf in [torch.channels_last, torch.contiguous_format]:\n self._norm_helper(size, torch.float16, \"cuda\", 1e-3, is_batch_norm_else_instance_norm,\n memory_format=mf, layer_dtype=torch.float16)\n\n @unittest.skipIf(is_pre_volta(), \"reduction not supported in pre volta device\")\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_norm_channels_last(self):\n size = [3, 4, 5, 6]\n\n with torch.backends.cudnn.flags(enabled=False):\n for is_batch_norm_else_instance_norm in [False, True]:\n for mf in [torch.channels_last, torch.contiguous_format]:\n self._norm_helper(size, torch.float32, \"cuda\", 1e-4, is_batch_norm_else_instance_norm, memory_format=mf)\n\n @unittest.skipIf(is_pre_volta(), \"reduction not supported in pre volta device\")\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_norm(self):\n output_elements = 10000\n channel_sizes = [67, 457, 1024, 4096]\n\n with torch.backends.cudnn.flags(enabled=False):\n for is_batch_norm_else_instance_norm in [False, True]:\n for dims in range(3, 6):\n output_size = int(pow(output_elements, 1. / (dims - 1)))\n for C in channel_sizes:\n x = [output_size for idx in range(dims)]\n x[1] = C\n self._norm_helper(x, torch.float32, \"cuda\", 1e-4, is_batch_norm_else_instance_norm)\n\n @unittest.skipIf(is_pre_volta(), \"reduction not supported in pre volta device\")\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_norm_large(self):\n output_elements = 262144\n channel_sizes = 67, 457, 1024\n\n for is_batch_norm_else_instance_norm in [True, False]:\n for dims in range(3, 6):\n output_size = int(pow(output_elements, 1. / (dims - 1)))\n for C in channel_sizes:\n x = [output_size for idx in range(dims)]\n x[1] = C\n self._norm_helper(x, torch.float32, \"cuda\", 1e-4, is_batch_norm_else_instance_norm)\n\n @unittest.skipIf(is_pre_volta(), \"reduction not supported in pre volta device\")\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_norm_half(self):\n output_elements = 10000\n channel_sizes = [67, 457, 1024, 4096]\n\n with torch.backends.cudnn.flags(enabled=False):\n for is_batch_norm_else_instance_norm in [False, True]:\n for dims in range(3, 6):\n output_size = int(pow(output_elements, 1. / (dims - 1)))\n for C in channel_sizes:\n x = [output_size for idx in range(dims)]\n x[1] = C\n self._norm_helper(x, torch.float16, \"cuda\", 5e-3, is_batch_norm_else_instance_norm)\n\n @unittest.skipIf(is_pre_volta(), \"reduction not supported in pre volta device\")\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n @unittest.skipIf(not TEST_BF16, \"device does not support BFloat16\")\n def test_norm_bfloat(self):\n output_elements = 10000\n channel_sizes = [67, 457, 1024, 4096]\n\n with torch.backends.cudnn.flags(enabled=False):\n for is_batch_norm_else_instance_norm in [False, True]:\n for dims in range(3, 6):\n output_size = int(pow(output_elements, 1. / (dims - 1)))\n for C in channel_sizes:\n x = [output_size for idx in range(dims)]\n x[1] = C\n self._norm_helper(x, torch.bfloat16, \"cuda\", 1e-1, is_batch_norm_else_instance_norm)\n\n def _softmax_helper(self, shape, reduction_axis, is_log_softmax, dtype, device, error):\n class MySoftmax(torch.nn.Module):\n __constants__ = ['reduction_axis']\n\n def __init__(self):\n super(MySoftmax, self).__init__()\n self.reduction_axis = reduction_axis\n\n def forward(self, x: torch.Tensor, y: torch.Tensor):\n o = torch.add(x, y)\n o = torch.nn.functional.softmax(o, dim=self.reduction_axis)\n return o\n\n class MyLogSoftmax(torch.nn.Module):\n __constants__ = ['reduction_axis']\n\n def __init__(self):\n super(MyLogSoftmax, self).__init__()\n self.reduction_axis = reduction_axis\n\n def forward(self, x: torch.Tensor, y: torch.Tensor):\n o = torch.add(x, y)\n o = torch.nn.functional.log_softmax(o, dim=self.reduction_axis)\n return o\n\n gradient_check = (dtype == torch.float64)\n t = MyLogSoftmax() if is_log_softmax else MySoftmax()\n\n x = torch.randn(shape, dtype=dtype, device=device, requires_grad=gradient_check)\n y = torch.randn(shape, dtype=dtype, device=device, requires_grad=gradient_check)\n t_jit = torch.jit.script(t)\n jit_o = t_jit(x, y)\n jit_o = t_jit(x, y)\n jit_o = t_jit(x, y)\n\n if gradient_check:\n gradcheck(t_jit.forward, [x, y], nondet_tol=1e-5)\n else:\n o = t(x, y)\n self.assertEqual(o.dtype, jit_o.dtype)\n # numerical issues here due to our scheduling.\n # can't use `self.assertEqual(o, jit_o)`\n self.assertTrue(self._compare(\"comparing output failed\", o, jit_o, error))\n self.assertGraphContains(t_jit.graph_for(x, y), FUSION_GUARD)\n\n @unittest.skipIf(is_pre_volta(), \"reduction not supported in pre volta device\")\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_softmax_dtype(self):\n def t(x: torch.Tensor, y: torch.Tensor):\n o = torch.mul(x, y)\n o = torch.nn.functional.softmax(o, dim=0, dtype=torch.float32)\n return o\n\n x = torch.randn([4, 4], dtype=torch.float16, device=\"cuda\").requires_grad_()\n y = torch.randn_like(x).requires_grad_()\n grad = torch.randn_like(x).float()\n\n ref_x = x.detach().requires_grad_()\n ref_y = y.detach().requires_grad_()\n o = t(ref_x, ref_y)\n o.backward(grad)\n\n t_jit = torch.jit.script(t)\n jit_o = t_jit(x, y)\n jit_o.backward(grad)\n jit_o = t_jit(x, y)\n jit_o.backward(grad)\n jit_o = t_jit(x, y)\n jit_o.backward(grad)\n x.grad.zero_()\n y.grad.zero_()\n jit_o = t_jit(x, y)\n jit_o.backward(grad)\n\n self.assertEqual(o.dtype, jit_o.dtype)\n self.assertEqual(ref_x.grad, x.grad)\n self.assertEqual(ref_y.grad, y.grad)\n self.assertTrue(self._compare(\"comparing output failed\", o, jit_o, 1e-3))\n self.assertGraphContainsExactly(t_jit.graph_for(x, y), FUSION_GUARD, 1, consider_subgraphs=True)\n bwd_graph = list(\n list(t_jit.get_debug_state().execution_plans.values())[\n 0].code.grad_executor_states()[0].execution_plans.values()\n )[0].graph\n FileCheck().check(FUSION_GUARD).run(bwd_graph)\n\n @unittest.skipIf(is_pre_volta(), \"reduction not supported in pre volta device\")\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test__softmax_function(self):\n def t(x: torch.Tensor, y: torch.Tensor):\n o = torch.mul(x, y)\n o = torch._softmax(o, dim=-1, half_to_float=False)\n return o\n\n x = torch.randn([4, 4], dtype=torch.float16, device=\"cuda\")\n y = torch.randn_like(x)\n\n o = t(x, y)\n\n t_jit = torch.jit.script(t)\n jit_o = t_jit(x, y)\n jit_o = t_jit(x, y)\n jit_o = t_jit(x, y)\n\n self.assertEqual(o.dtype, jit_o.dtype)\n self.assertTrue(self._compare(\"comparing output failed\", o, jit_o, 1e-3))\n self.assertGraphContainsExactly(t_jit.graph_for(x, y), FUSION_GUARD, 1, consider_subgraphs=True)\n\n @unittest.skipIf(is_pre_volta(), \"reduction not supported in pre volta device\")\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test__softmax_function_half_to_float(self):\n def t(x: torch.Tensor, y: torch.Tensor):\n o = torch.mul(x, y)\n o = torch._softmax(o, dim=-1, half_to_float=True)\n return o\n\n x = torch.randn([4, 4], dtype=torch.float16, device=\"cuda\")\n y = torch.randn_like(x)\n\n o = t(x, y)\n\n t_jit = torch.jit.script(t)\n jit_o = t_jit(x, y)\n jit_o = t_jit(x, y)\n jit_o = t_jit(x, y)\n\n self.assertEqual(o.dtype, jit_o.dtype)\n self.assertTrue(self._compare(\"comparing output failed\", o, jit_o, 1e-3))\n self.assertGraphContainsExactly(t_jit.graph_for(x, y), FUSION_GUARD, 1, consider_subgraphs=True)\n\n @unittest.skipIf(is_pre_volta(), \"reduction not supported in pre volta device\")\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_softmax(self):\n output_size = 10000\n dims = 4\n output_size = int(pow(output_size, 1. / dims))\n reduction_sizes = [67, 256, 1024, 4096]\n\n # gradient check\n for reduction_dim in range(dims):\n for is_log_softmax in [False, True]:\n shape = [output_size for idx in range(dims)]\n self._softmax_helper(shape, reduction_dim, is_log_softmax, torch.float64, \"cuda\", 1e-4)\n\n for reduction_dim in range(dims):\n for reduction_size in reduction_sizes:\n x = [output_size for idx in range(dims)]\n x[reduction_dim] = reduction_size\n for is_log_softmax in [False, True]:\n self._softmax_helper(x, reduction_dim, is_log_softmax, torch.float32, \"cuda\", 1e-4)\n\n @unittest.skipIf(is_pre_volta(), \"reduction not supported in pre volta device\")\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_softmax_half(self):\n output_size = 10000\n dims = 4\n output_size = int(pow(output_size, 1. / dims))\n reduction_sizes = [67, 256, 1024, 4096]\n\n for reduction_dim in range(dims):\n for reduction_size in reduction_sizes:\n x = [output_size for idx in range(dims)]\n x[reduction_dim] = reduction_size\n for is_log_softmax in [False, True]:\n self._softmax_helper(x, reduction_dim, is_log_softmax, torch.float16, \"cuda\", 5e-3)\n\n @unittest.skipIf(is_pre_volta(), \"reduction not supported in pre volta device\")\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n @unittest.skipIf(not TEST_BF16, \"device does not support BFloat16\")\n def test_softmax_bfloat(self):\n output_size = 10000\n dims = 4\n output_size = int(pow(output_size, 1. / dims))\n reduction_sizes = [67, 256, 1024, 4096]\n\n for reduction_dim in range(dims):\n for reduction_size in reduction_sizes:\n x = [output_size for idx in range(dims)]\n x[reduction_dim] = reduction_size\n for is_log_softmax in [False, True]:\n self._softmax_helper(x, reduction_dim, is_log_softmax, torch.bfloat16, \"cuda\", 1e-1)\n\n @unittest.skipIf(is_pre_volta(), \"reduction not supported in pre volta device\")\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_reduction_permutation(self):\n x = [7, 8, 12]\n # note that num_dim is exclusive from len(x), so we are not reducing\n # to single element (codegen limitation at this moment)\n for num_reduce_dim in range(1, len(x)):\n for axes in itertools.combinations(range(len(x)), num_reduce_dim):\n for perm0 in itertools.permutations(range(len(x))):\n for perm1 in itertools.permutations(range(len(x))):\n self._reduction_helper(x, axes, torch.float32, \"cuda\", perm0, perm1)\n\n @unittest.skipIf(is_pre_volta(), \"reduction not supported in pre volta device\")\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_reduction_multiple_output(self):\n old_guard = torch._C._jit_set_nvfuser_guard_mode(True)\n torch._C._jit_set_bailout_depth(20)\n\n def t(x: torch.Tensor, y: torch.Tensor, scale: float, z: torch.Tensor):\n o = torch.mul(x, y)\n o = torch.mul(o, scale)\n out1 = torch.mul(o, z)\n out2 = torch.sum(out1, dim=[2])\n return out1, out2\n\n t_jit = torch.jit.script(t)\n x = torch.randn(8, 4, 10, 16, dtype=torch.float, device=\"cuda\")\n y = torch.randn(8, 4, 10, 16, dtype=torch.float, device=\"cuda\")\n z = torch.randn(8, 4, 10, 16, dtype=torch.float, device=\"cuda\")\n scale = 0.5\n jit_o = t_jit(x, y, scale, z)\n jit_o = t_jit(x, y, scale, z)\n o = t(x, y, scale, z)\n for oo, jit_oo in zip(o, jit_o):\n self.assertEqual(oo.dtype, jit_oo.dtype)\n self.assertEqual(oo, jit_oo)\n self.assertGraphContains(t_jit.graph_for(x, y, scale, z), FUSION_GUARD)\n\n x = x.to(memory_format=torch.channels_last)\n y = y.to(memory_format=torch.channels_last)\n z = z.to(memory_format=torch.channels_last)\n jit_o = t_jit(x, y, scale, z)\n jit_o = t_jit(x, y, scale, z)\n o = t(x, y, scale, z)\n for oo, jit_oo in zip(o, jit_o):\n self.assertEqual(oo.dtype, jit_oo.dtype)\n self.assertEqual(oo, jit_oo)\n self.assertGraphContains(t_jit.graph_for(x, y, scale, z), FUSION_GUARD)\n torch._C._jit_set_nvfuser_guard_mode(old_guard)\n\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_channels_last_with_broadcast(self):\n # setting this true forces a new graph to be generated with a new\n # input a different broadcast shape\n torch._C._jit_set_nvfuser_guard_mode(True)\n\n def t(x: torch.Tensor, y: torch.Tensor):\n o = torch.mul(x, y)\n o = o + 2.0\n return o\n t_jit = torch.jit.script(t)\n\n # Single Channel broadcasts\n # Test 1\n x = torch.randn(8, 4, 10, 16, dtype=torch.float, device=\"cuda\")\n x = x.to(memory_format=torch.channels_last)\n\n y = torch.randn(8, 4, 10, 1, dtype=torch.float, device=\"cuda\")\n y = y.to(memory_format=torch.channels_last)\n\n jit_o = t_jit(x, y)\n jit_o = t_jit(x, y)\n o = t(x, y)\n\n self.assertEqual(o.dtype, jit_o.dtype)\n self.assertEqual(o.is_contiguous(memory_format=torch.channels_last),\n jit_o.is_contiguous(memory_format=torch.channels_last))\n self.assertEqual(o, jit_o)\n\n # Test 2\n y = torch.randn(8, 4, 1, 16, dtype=torch.float, device=\"cuda\")\n y = y.to(memory_format=torch.channels_last)\n\n jit_o = t_jit(x, y)\n jit_o = t_jit(x, y)\n o = t(x, y)\n\n self.assertEqual(o.dtype, jit_o.dtype)\n self.assertEqual(o.is_contiguous(memory_format=torch.channels_last),\n jit_o.is_contiguous(memory_format=torch.channels_last))\n self.assertEqual(o, jit_o)\n\n # Test 3\n y = torch.randn(8, 1, 10, 16, dtype=torch.float, device=\"cuda\")\n y = y.to(memory_format=torch.channels_last)\n\n jit_o = t_jit(x, y)\n jit_o = t_jit(x, y)\n o = t(x, y)\n\n self.assertEqual(o.dtype, jit_o.dtype)\n self.assertEqual(o.is_contiguous(memory_format=torch.channels_last),\n jit_o.is_contiguous(memory_format=torch.channels_last))\n self.assertEqual(o, jit_o)\n\n # Test 3\n y = torch.randn(1, 4, 10, 16, dtype=torch.float, device=\"cuda\")\n y = y.to(memory_format=torch.channels_last)\n\n jit_o = t_jit(x, y)\n jit_o = t_jit(x, y)\n o = t(x, y)\n\n self.assertEqual(o.dtype, jit_o.dtype)\n self.assertEqual(o.is_contiguous(memory_format=torch.channels_last),\n jit_o.is_contiguous(memory_format=torch.channels_last))\n self.assertEqual(o, jit_o)\n\n '''\n Currently, the JIT doesn't have tensor merge logic to handle adding\n a broadcast tensor with more than one broadcast into a non-broadcast\n tensor. Therefore, either of these tests can fail depending on the\n sort implementation. The second test is known to fail.\n\n # Two Channel broadcasts\n # Test 1\n y = torch.randn(8, 4, 1, 1, dtype=torch.float, device=\"cuda\")\n y = y.to(memory_format=torch.channels_last)\n\n jit_o = t_jit(x, y)\n jit_o = t_jit(x, y)\n o = t(x, y)\n\n self.assertEqual(o.dtype, jit_o.dtype)\n self.assertEqual(o.is_contiguous(memory_format=torch.channels_last),\n jit_o.is_contiguous(memory_format=torch.channels_last))\n self.assertEqual(o, jit_o)\n\n # Test 2\n y = torch.randn(8, 4, 1, 1, dtype=torch.float, device=\"cuda\")\n y = y.to(memory_format=torch.channels_last).transpose(2,3)\n x = x.transpose(2,3)\n\n jit_o = t_jit(x, y)\n jit_o = t_jit(x, y)\n o = t(x, y)\n\n self.assertEqual(o.dtype, jit_o.dtype)\n self.assertEqual(o.is_contiguous(memory_format=torch.channels_last),\n jit_o.is_contiguous(memory_format=torch.channels_last))\n self.assertEqual(o, jit_o)\n '''\n\n @unittest.skipIf(is_pre_volta(), \"reduction not supported in pre volta device\")\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_pw_single_reduction_partition(self):\n sizes = [2, 2, 2]\n dtype = torch.float\n device = \"cuda\"\n x = torch.randn(sizes, dtype=dtype, device=device)\n y = torch.randn(sizes, dtype=dtype, device=device)\n z = torch.randn(sizes, dtype=dtype, device=device)\n\n def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor):\n o = torch.add(x, y)\n o = torch.sum(o, dim=[0])\n o = torch.add(o, z)\n return o\n t_jit = torch.jit.script(t)\n jit_o = t_jit(x, y, z)\n jit_o = t_jit(x, y, z)\n o = t(x, y, z)\n self.assertEqual(o.dtype, jit_o.dtype)\n self.assertEqual(o, jit_o)\n self.assertGraphContains(t_jit.graph_for(x, y, z), FUSION_GUARD)\n\n @unittest.skipIf(is_pre_volta(), \"reduction not supported in pre volta device\")\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_permutation_preservation(self):\n sizes = [2, 3, 4, 5]\n dtype = torch.float\n device = \"cuda\"\n x = torch.randn(sizes, dtype=dtype, device=device).to(memory_format=torch.channels_last)\n\n def t(x: torch.Tensor):\n o = torch.relu(x)\n o = torch.sum(o, dim=[0])\n return o\n t_jit = torch.jit.script(t)\n jit_o = t_jit(x)\n jit_o = t_jit(x)\n o = t(x)\n self.assertEqual(o.dtype, jit_o.dtype)\n self.assertEqual(o, jit_o)\n self.assertGraphContains(t_jit.graph_for(x), FUSION_GUARD)\n # TODO: we could preserve permutation to inputs\n self.assertEqual(o.stride(), jit_o.stride())\n\n def t(x: torch.Tensor):\n o = torch.relu(x)\n o = torch.add(o, 1.0)\n return o\n\n t_jit = torch.jit.script(t)\n jit_o = t_jit(x)\n jit_o = t_jit(x)\n o = t(x)\n self.assertEqual(o.dtype, jit_o.dtype)\n self.assertEqual(o, jit_o)\n self.assertGraphContains(t_jit.graph_for(x), FUSION_GUARD)\n self.assertTrue(jit_o.is_contiguous(memory_format=torch.channels_last))\n\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_permutation_preservation_edge_case_0(self):\n sizes = [2, 3, 4, 5]\n dtype = torch.float\n device = \"cuda\"\n x = torch.randn(sizes, dtype=dtype, device=device).to(memory_format=torch.channels_last)\n # mismatch rank with *note* different permutation recognized by PE\n bias = torch.randn(3, dtype=dtype, device=device).unsqueeze(-1).unsqueeze(-1)\n\n def t(x, y):\n return x + y\n\n t_jit = torch.jit.script(t)\n with nvfuser_singleton_fusion(True):\n for _ in range(5):\n jit_o = t_jit(x, bias)\n\n o = t(x, bias)\n self.assertEqual(o.dtype, jit_o.dtype)\n self.assertEqual(o, jit_o)\n self.assertEqual(o.stride(), jit_o.stride())\n self.assertGraphContains(t_jit.graph_for(x, bias), FUSION_GUARD)\n\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_permutation_preservation_edge_case_1_broken(self):\n sizes = [2, 3, 4, 5]\n dtype = torch.float\n device = \"cuda\"\n x = torch.randn(sizes, dtype=dtype, device=device).to(memory_format=torch.channels_last)\n # in-compatible permutation, this will cause format propagation to break\n bias = torch.randn(4, 5, dtype=dtype, device=device)\n\n def t(x, y):\n return x + y\n\n t_jit = torch.jit.script(t)\n with nvfuser_singleton_fusion(True):\n for _ in range(5):\n jit_o = t_jit(x, bias)\n\n o = t(x, bias)\n self.assertEqual(o.dtype, jit_o.dtype)\n self.assertEqual(o, jit_o)\n try:\n # nvfuser does not support in-compatible permutation, this will throw\n self.assertEqual(o.stride(), jit_o.stride())\n except Exception as e:\n warnings.warn(\n \"permutation propagatoin is broken, proper support should come after nvfuser permutation scheduler update\")\n self.assertGraphContains(t_jit.graph_for(x, bias), FUSION_GUARD)\n\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_permutation_preservation_edge_case_2(self):\n sizes = [2, 3, 4, 5]\n dtype = torch.float\n device = \"cuda\"\n x = torch.randn(sizes, dtype=dtype, device=device).to(memory_format=torch.channels_last)\n y = torch.randn(sizes, dtype=dtype, device=device).to(memory_format=torch.channels_last)\n z = torch.randn(sizes, dtype=dtype, device=device).to(memory_format=torch.channels_last)\n\n def t(x, y, w):\n tmp = torch.lerp(x, y, w)\n tmp = torch.clamp(tmp, -1.0, 0.5)\n tmp = torch.nn.functional.softplus(tmp)\n return torch.threshold(tmp, -2.0, 0.5)\n\n t_jit = torch.jit.script(t)\n with nvfuser_singleton_fusion(True):\n for _ in range(5):\n jit_o = t_jit(x, y, z)\n\n o = t(x, y, z)\n self.assertEqual(o.dtype, jit_o.dtype)\n self.assertEqual(o, jit_o)\n self.assertEqual(o.stride(), jit_o.stride())\n self.assertGraphContains(t_jit.graph_for(x, y, z), FUSION_GUARD)\n\n @unittest.skipIf(is_pre_volta(), \"reduction not supported in pre volta device\")\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_normalization_partition(self):\n sizes = [3, 8, 5]\n dtype = torch.float\n device = \"cuda\"\n x = torch.randn(sizes, dtype=dtype, device=device)\n y = torch.randn(sizes, dtype=dtype, device=device)\n z = torch.randn(sizes, dtype=dtype, device=device)\n r_m = torch.randn(8, dtype=dtype, device=device)\n r_v = torch.randn(8, dtype=dtype, device=device)\n\n def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor, r_mean: torch.Tensor, r_var: torch.Tensor):\n o = torch.add(x, y)\n o = torch.nn.functional.softmax(o, dim=0)\n o = torch.add(o, z)\n o = torch.nn.functional.batch_norm(o, r_mean, r_var, training=True)\n return o\n t_jit = torch.jit.script(t)\n jit_o = t_jit(x, y, z, r_m, r_v)\n jit_o = t_jit(x, y, z, r_m, r_v)\n o = t(x, y, z, r_m, r_v)\n self.assertEqual(o.dtype, jit_o.dtype)\n self.assertEqual(o, jit_o)\n self.assertGraphContains(t_jit.graph_for(x, y, z, r_m, r_v), FUSION_GUARD)\n\n @unittest.skipIf(is_pre_volta(), \"reduction not supported in pre volta device\")\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_sum_to_one(self):\n dtype = torch.float\n device = \"cuda\"\n x = torch.randn([4, 5, 6], dtype=dtype, device=device)\n\n def t(x: torch.Tensor):\n o = torch.add(x, 1)\n o = torch.sum(o, dim=[0, 1, 2])\n return o\n t_jit = torch.jit.script(t)\n jit_o = t_jit(x)\n jit_o = t_jit(x)\n o = t(x)\n self.assertEqual(o.dtype, jit_o.dtype)\n self.assertEqual(o, jit_o)\n self.assertGraphContains(t_jit.graph_for(x), FUSION_GUARD)\n\n @unittest.skipIf(is_pre_volta(), \"reduction not supported in pre volta device\")\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_single_reduction_broadcast(self):\n dtype = torch.float\n device = \"cuda\"\n x = torch.randn([7, 4, 8], dtype=dtype, device=device)\n y = torch.randn([4, 8], dtype=dtype, device=device)\n z = torch.randn([1, 4, 8], dtype=dtype, device=device)\n\n def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor):\n o = torch.add(x, y)\n o = torch.add(o, z)\n o = torch.sum(o, dim=[0])\n return o\n t_jit = torch.jit.script(t)\n jit_o = t_jit(x, y, z)\n jit_o = t_jit(x, y, z)\n o = t(x, y, z)\n self.assertEqual(o.dtype, jit_o.dtype)\n self.assertEqual(o, jit_o)\n self.assertGraphContains(t_jit.graph_for(x, y, z), FUSION_GUARD)\n\n @unittest.skipIf(is_pre_volta(), \"reduction not supported in pre volta device\")\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_trivial_reduction(self):\n dtype = torch.float\n device = \"cuda\"\n x = torch.randn([1, 4, 8], dtype=dtype, device=device)\n\n def t(x: torch.Tensor):\n o = torch.add(x, 1)\n o = torch.sum(o, dim=[0])\n o = torch.sum(o, dim=[0])\n return o\n t_jit = torch.jit.script(t)\n jit_o = t_jit(x)\n jit_o = t_jit(x)\n o = t(x)\n self.assertEqual(o.dtype, jit_o.dtype)\n self.assertEqual(o, jit_o)\n self.assertGraphContains(t_jit.graph_for(x), FUSION_GUARD)\n\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_profiling_node(self):\n dtype = torch.float\n device = \"cuda\"\n x = torch.randn(4, 8, 8, 8, dtype=dtype, device=device)\n\n def repro(x: torch.Tensor, alpha: float):\n o = torch.rand_like(x)\n o = torch.add(o, alpha)\n return o\n repro_jit = torch.jit.script(repro)\n self._run_helper(repro_jit, repro, x, 0.6)\n\n @unittest.skipIf(is_pre_volta(), \"reduction not supported in pre volta device\")\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_reduction_sizes_op(self):\n dtype = torch.float\n device = \"cuda\"\n x = torch.randn(2, 3, 4, 5, dtype=dtype, device=device)\n y = torch.randn(2, 3, 4, 5, dtype=dtype, device=device)\n\n def t(x: torch.Tensor, y: torch.Tensor):\n o = x + y\n o = torch.relu(o)\n o = o.sum((1, 3))\n return o.size()\n t_jit = torch.jit.script(t)\n jit_o = t_jit(x, y)\n jit_o = t_jit(x, y)\n o = t(x, y)\n self.assertEqual(o, jit_o)\n # since the output value is not used at all, the fusion operator should\n # have been optimized away\n self.assertGraphContainsExactly(t_jit.graph_for(x, y), FUSION_GUARD, 0)\n\n @unittest.skipIf(is_pre_volta(), \"reduction not supported in pre volta device\")\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_profile_ivalue(self):\n dtype = torch.float\n device = \"cuda\"\n x = torch.randn([7, 4, 7], dtype=dtype, device=device)\n y = torch.randn([7, 4, 7], dtype=dtype, device=device)\n\n def t(x: torch.Tensor, y: torch.Tensor, dim: List[int], keepdim: bool):\n o = torch.add(x, y)\n o = o.sum(dim, keepdim=keepdim)\n return o\n\n t_jit = torch.jit.script(t)\n jit_o = t_jit(x, y, (0, 1), False)\n jit_o = t_jit(x, y, (0, 1), False)\n o = t(x, y, (0, 1), False)\n self.assertEqual(o.dtype, jit_o.dtype)\n self.assertEqual(o, jit_o)\n self.assertGraphContains(t_jit.graph_for(x, y, (0, 1), False), FUSION_GUARD)\n\n @unittest.skipIf(is_pre_volta(), \"reduction not supported in pre volta device\")\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_sum_to_size(self):\n dtype = torch.float\n device = \"cuda\"\n x = torch.randn([2, 4, 4], dtype=dtype, device=device)\n y = torch.randn([2, 4, 4], dtype=dtype, device=device)\n\n def t(x: torch.Tensor, y: torch.Tensor, new_size: List[int]):\n o = torch.add(x, y)\n o = o.sum_to_size(new_size)\n return o\n\n t_jit = torch.jit.script(t)\n jit_o = t_jit(x, y, (4, 1))\n jit_o = t_jit(x, y, (4, 1))\n o = t(x, y, (4, 1))\n self.assertEqual(o.dtype, jit_o.dtype)\n self.assertEqual(o, jit_o)\n self.assertGraphContains(t_jit.graph_for(x, y, (4, 1)), FUSION_GUARD)\n\n # update shape: old kernel should handle dynamic shape well without\n # recompilation\n x = torch.randn([2, 5, 8], dtype=dtype, device=device)\n y = torch.randn([2, 5, 8], dtype=dtype, device=device)\n # (TODO) check executed kernel, should extend autograd.profiler to fused\n # kernels\n jit_o = t_jit(x, y, (5, 1))\n o = t(x, y, (5, 1))\n self.assertEqual(o.dtype, jit_o.dtype)\n self.assertEqual(o, jit_o)\n\n @unittest.skipIf(is_pre_volta(), \"reduction not supported in pre volta device\")\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_grad_sum_to_size(self):\n dtype = torch.float\n device = \"cuda\"\n x = torch.randn([2, 4, 4], dtype=dtype, device=device).requires_grad_()\n y = torch.randn([4], dtype=dtype, device=device).requires_grad_()\n grad = torch.randn([2, 4, 4], dtype=dtype, device=device)\n\n ref_x = x.detach().clone().requires_grad_()\n ref_y = y.detach().clone().requires_grad_()\n\n def t(x: torch.Tensor, y: torch.Tensor):\n o = torch.add(x, y)\n o = torch.relu(o)\n return o\n\n # profiling runs for forward & backward\n t_jit = torch.jit.script(t)\n jit_o = t_jit(x, y)\n jit_o.backward(grad)\n jit_o = t_jit(x, y)\n jit_o.backward(grad)\n\n x.grad = None\n y.grad = None\n jit_o = t_jit(x, y)\n jit_o.backward(grad)\n o = t(ref_x, ref_y)\n o.backward(grad)\n self.assertEqual(o.dtype, jit_o.dtype)\n self.assertEqual(o, jit_o)\n self.assertEqual(x.grad, ref_x.grad)\n self.assertEqual(y.grad, ref_y.grad)\n bwd_graph = list(\n list(t_jit.get_debug_state().execution_plans.values())[\n 0].code.grad_executor_states()[0].execution_plans.values()\n )[0].graph\n FileCheck().check(FUSION_GUARD).run(bwd_graph)\n\n # update shape: old kernel should handle dynamic shape well without\n # recompilation\n x = torch.randn([2, 5, 8], dtype=dtype, device=device).requires_grad_()\n y = torch.randn([8], dtype=dtype, device=device).requires_grad_()\n ref_x = x.detach().clone().requires_grad_()\n ref_y = y.detach().clone().requires_grad_()\n grad = torch.randn([2, 5, 8], dtype=dtype, device=device)\n jit_o = t_jit(x, y)\n # (TODO) check executed kernel, should extend autograd.profiler to fused\n # kernels\n jit_o.backward(grad)\n o = t(ref_x, ref_y)\n o.backward(grad)\n self.assertEqual(o.dtype, jit_o.dtype)\n self.assertEqual(o, jit_o)\n self.assertEqual(x.grad, ref_x.grad)\n self.assertEqual(y.grad, ref_y.grad)\n\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_dropout_inference_fusion(self):\n dtype = torch.float\n device = \"cuda\"\n x = torch.randn([10, 4, 8], dtype=dtype, device=device)\n\n def t(x: torch.Tensor, p: float, train: bool):\n o = torch.nn.functional.dropout(x, p, training=train)\n o = o + 1.0\n return o\n\n t_jit = torch.jit.script(t)\n\n self._run_helper(t_jit, t, x, 0.15, False)\n\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_dropout_train_nograd_fusion(self):\n dtype = torch.float\n device = \"cuda\"\n x = torch.randn([10, 4, 8], dtype=dtype, device=device)\n\n def t(x: torch.Tensor, p: float, train: bool):\n o = torch.nn.functional.dropout(x, p, training=train)\n o = o + 1.0\n return o\n\n t_jit = torch.jit.script(t)\n\n self._run_helper(t_jit, t, x, 0.0, True)\n\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_dropout_train_nograd_prob_check(self):\n dtype = torch.float\n device = \"cuda\"\n x = torch.randn([1024, 1024], dtype=dtype, device=device)\n\n def t(x: torch.Tensor, p: float, train: bool):\n o = torch.nn.functional.dropout(x, p, training=train)\n o = o * 2.0\n return o\n\n t_jit = torch.jit.script(t)\n\n for prob in [0.0, 0.15, 0.5, 0.85, 1.]:\n torch.cuda.manual_seed_all(123)\n jit_o = t_jit(x, prob, True)\n torch.cuda.manual_seed_all(123)\n jit_o = t_jit(x, prob, True)\n\n self.assertTrue(jit_o.detach().isfinite().all().item())\n\n num_elems = x.numel()\n num_zeros = num_elems - jit_o.detach().count_nonzero().item()\n percent_zeros = num_zeros / num_elems\n\n self.assertTrue((percent_zeros >= (prob - 0.01)) and (percent_zeros <= (prob + 0.01)))\n self.assertGraphContainsExactly(t_jit.graph_for(x, prob, True), FUSION_GUARD, 1, consider_subgraphs=True)\n\n @unittest.skipIf(is_pre_volta(), \"reduction not supported in pre volta device\")\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_dropout_training_fusion(self):\n dtype = torch.float\n device = \"cuda\"\n x = torch.randn([10, 4, 8], dtype=dtype, device=device, requires_grad=True)\n grads = torch.randn([10, 4, 8], dtype=dtype, device=device)\n\n def t(x: torch.Tensor, p: float, train: bool):\n o = torch.nn.functional.dropout(x, p, training=train)\n o = o * 2.0\n return o\n\n t_jit = torch.jit.script(t)\n\n # The drop probability needs to be set to zero given that the order of picking random\n # numbers between eager mode and the jit is different\n self._run_training_helper(t_jit, t, grads, x, 0.0, True)\n\n def t2(x: torch.Tensor, p: float, train: bool):\n o = torch.nn.functional.softmax(x, dim=-1)\n o = torch.nn.functional.dropout(o, p, training=train)\n return o\n\n t2_jit = torch.jit.script(t2)\n\n # The drop probability needs to be set to zero given that the order of picking random\n # numbers between eager mode and the jit is different\n self._run_training_helper(t2_jit, t2, grads, x, 0.0, True)\n\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_gelu(self):\n old_guard = torch._C._jit_set_nvfuser_guard_mode(True)\n dtype = torch.float\n device = \"cuda\"\n x = torch.randn([1024, 1024], dtype=dtype, device=device, requires_grad=True)\n grads = torch.randn([1024, 1024], dtype=dtype, device=device, requires_grad=False)\n\n def t(x: torch.Tensor, mode : str):\n o = torch.nn.functional.gelu(x, approximate=mode)\n o = o * 2.0\n return o\n\n t_jit = torch.jit.script(t)\n self._run_training_helper(t_jit, t, grads, x, 'none')\n self._run_training_helper(t_jit, t, grads, x, 'tanh')\n torch._C._jit_set_nvfuser_guard_mode(old_guard)\n\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_dropout_training_prob_check(self):\n dtype = torch.float\n device = \"cuda\"\n x = torch.randn([1024, 1024], dtype=dtype, device=device, requires_grad=True)\n x_nograd = torch.randn([1024, 1024], dtype=dtype, device=device)\n\n def t(x: torch.Tensor, p: float, train: bool):\n o = torch.nn.functional.dropout(x, p, training=train)\n o = o * 2.0\n return o\n\n t_jit = torch.jit.script(t)\n\n for prob in [0.0, 0.15, 0.5, 0.85, 1.]:\n torch.cuda.manual_seed_all(123)\n jit_o = t_jit(x, prob, True)\n torch.cuda.manual_seed_all(123)\n jit_o = t_jit(x, prob, True)\n torch.cuda.manual_seed_all(123)\n jit_o = t_jit(x, prob, True)\n\n self.assertTrue(jit_o.detach().isfinite().all().item())\n\n num_elems = x.numel()\n num_zeros = num_elems - jit_o.detach().count_nonzero().item()\n percent_zeros = num_zeros / num_elems\n\n self.assertTrue((percent_zeros >= (prob - 0.01)) and (percent_zeros <= (prob + 0.01)))\n self.assertGraphContainsExactly(t_jit.graph_for(x, prob, True), FUSION_GUARD, 1, consider_subgraphs=True)\n\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_linear(self):\n in_feature = 2\n out_feature = 8\n # Changing the input dims to be 3-D to avoid eager mode bias fusion\n # The bias fusion causes some precision issues with TF-32\n x = torch.randn(2, 4, in_feature, dtype=torch.float32, device='cuda')\n weight = torch.randn(out_feature, in_feature, dtype=torch.float32, device='cuda')\n bias = torch.randn(out_feature, dtype=torch.float32, device='cuda')\n\n def t(x: torch.Tensor, weight: torch.Tensor, bias: torch.Tensor):\n o = torch.nn.functional.linear(x, weight, bias)\n o = torch.relu(o)\n return o\n\n # bias set to true.\n t_jit = torch.jit.script(t)\n jit_o = t_jit(x, weight, bias)\n jit_o = t_jit(x, weight, bias)\n o = t(x, weight, bias)\n self.assertEqual(o, jit_o)\n # since the output value is not used at all, the fusion operator should\n # have been optimized away\n self.assertGraphContainsExactly(t_jit.graph_for(x, weight, bias), FUSION_GUARD, 1)\n\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_linear_symbolic_shapes(self):\n def fn(x: int):\n y = torch.zeros((x, x + 2)).cuda()\n for i in range(2):\n inp = torch.rand((x, x + i)).cuda()\n weight = torch.rand((x + 2, x + i)).cuda()\n bias = torch.rand((x, x + 2)).cuda()\n y += torch.sin(torch.nn.functional.linear(inp, weight, bias))\n return y\n\n fn_s = torch.jit.script(fn)\n fn_s(5)\n fn_s(5)\n\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_backward_type(self):\n # not super useful to check gradient of integer/bool, so skipping here\n type_pairs = [\n (torch.float, torch.half),\n (torch.double, torch.half),\n (torch.float, torch.double),\n ]\n if TEST_BF16:\n type_pairs += [\n (torch.float, torch.bfloat16),\n (torch.double, torch.bfloat16),\n ]\n for x_type, y_type in type_pairs:\n x = torch.randn(4, 2, dtype=x_type, device='cuda', requires_grad=True)\n y = torch.randn(4, 2, dtype=y_type, device='cuda', requires_grad=True)\n grad = torch.randn(4, 2, dtype=torch.float, device='cuda')\n\n def test1(x: torch.Tensor, y: torch.Tensor):\n o = torch.add(x, y)\n o = torch.add(o, y)\n o = torch.add(o, y)\n o = torch.add(o, y)\n o = o + 1.0\n return o\n\n test1_jit = torch.jit.script(test1)\n for i in range(3):\n jit_o = test1_jit(x, y)\n jit_o.backward(grad)\n\n bwd_graph = list(\n list(test1_jit.get_debug_state().execution_plans.values())[\n 0].code.grad_executor_states()[0].execution_plans.values()\n )[0].graph\n\n FileCheck().check(FUSION_GROUP).run(bwd_graph)\n self.assertEqual(x.grad.dtype, x.dtype)\n self.assertEqual(y.grad.dtype, y.dtype)\n\n @unittest.skipIf(is_pre_volta(), \"reduction not supported in pre volta device\")\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_autocast_1(self):\n def t(x: torch.Tensor, y: torch.Tensor):\n o = x * 2.0\n o = torch.softmax(o, dim=-1)\n o = o * 3.0\n o = torch._C._nn.linear(o, y)\n return o\n\n x = torch.randn(8, 4, dtype=torch.half, device='cuda', requires_grad=True)\n y = torch.randn(4, 4, dtype=torch.float, device='cuda', requires_grad=True)\n grad = torch.randn(8, 4, dtype=torch.half, device='cuda', requires_grad=False)\n t_jit = torch.jit.script(t)\n\n for i in range(3):\n with torch.cuda.amp.autocast():\n jit_o = t_jit(x, y)\n if i == 2 :\n fwd_graph = t_jit.graph_for(x, y)\n jit_o.backward(grad)\n\n self.assertGraphContainsExactly(fwd_graph, FUSION_GUARD, 1, consider_subgraphs=True)\n\n with torch.cuda.amp.autocast():\n bwd_graph = list(\n list(t_jit.get_debug_state().execution_plans.values())[\n 0].code.grad_executor_states()[0].execution_plans.values()\n )[0].graph\n FileCheck().check(FUSION_GROUP).run(bwd_graph)\n\n self.assertEqual(jit_o.dtype, torch.half)\n self.assertEqual(x.grad.dtype, x.dtype)\n self.assertEqual(y.grad.dtype, y.dtype)\n\n @unittest.skipIf(is_pre_volta(), \"reduction not supported in pre volta device\")\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_autocast_2(self):\n def t(x: torch.Tensor):\n o = x * 2.0\n o = torch.softmax(o, dim=-1)\n o = o * 3.0\n o = torch.softmax(o, dim=-1)\n o = o * 4.0\n return o\n\n x = torch.randn(8, 4, dtype=torch.half, device='cuda', requires_grad=True)\n grad = torch.randn(8, 4, dtype=torch.float, device='cuda', requires_grad=False)\n t_jit = torch.jit.script(t)\n\n for i in range(3):\n with torch.cuda.amp.autocast() :\n jit_o = t_jit(x)\n if i == 2 :\n fwd_graph = t_jit.graph_for(x)\n jit_o.backward(grad)\n\n self.assertGraphContainsExactly(fwd_graph, FUSION_GUARD, 1, consider_subgraphs=True)\n\n with torch.cuda.amp.autocast():\n bwd_graph = list(\n list(t_jit.get_debug_state().execution_plans.values())[\n 0].code.grad_executor_states()[0].execution_plans.values()\n )[0].graph\n FileCheck().check(FUSION_GROUP).run(bwd_graph)\n\n self.assertEqual(jit_o.dtype, torch.float)\n self.assertEqual(x.grad.dtype, x.dtype)\n\n @unittest.skipIf(is_pre_volta(), \"reduction not supported in pre volta device\")\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n @unittest.skipIf(not TEST_BF16, \"device does not support BFloat16\")\n def test_autocast_1_bfloat(self):\n def t(x: torch.Tensor, y: torch.Tensor):\n o = x * 2.0\n o = torch.softmax(o, dim=-1)\n o = o * 3.0\n o = torch._C._nn.linear(o, y)\n return o\n\n x = torch.randn(8, 4, dtype=torch.bfloat16, device='cuda', requires_grad=True)\n y = torch.randn(4, 4, dtype=torch.float, device='cuda', requires_grad=True)\n grad = torch.randn(8, 4, dtype=torch.bfloat16, device='cuda', requires_grad=False)\n t_jit = torch.jit.script(t)\n\n for i in range(3):\n with torch.cuda.amp.autocast(dtype=torch.bfloat16):\n jit_o = t_jit(x, y)\n if i == 2 :\n fwd_graph = t_jit.graph_for(x, y)\n jit_o.backward(grad)\n\n self.assertGraphContainsExactly(fwd_graph, FUSION_GUARD, 1, consider_subgraphs=True)\n\n with torch.cuda.amp.autocast(dtype=torch.bfloat16):\n bwd_graph = list(\n list(t_jit.get_debug_state().execution_plans.values())[\n 0].code.grad_executor_states()[0].execution_plans.values()\n )[0].graph\n FileCheck().check(FUSION_GROUP).run(bwd_graph)\n\n self.assertEqual(jit_o.dtype, torch.bfloat16)\n self.assertEqual(x.grad.dtype, x.dtype)\n self.assertEqual(y.grad.dtype, y.dtype)\n\n @unittest.skipIf(is_pre_volta(), \"reduction not supported in pre volta device\")\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n @unittest.skipIf(not TEST_BF16, \"device does not support BFloat16\")\n def test_autocast_2_bfloat(self):\n def t(x: torch.Tensor):\n o = x * 2.0\n o = torch.softmax(o, dim=-1)\n o = o * 3.0\n o = torch.softmax(o, dim=-1)\n o = o * 4.0\n return o\n\n x = torch.randn(8, 4, dtype=torch.bfloat16, device='cuda', requires_grad=True)\n grad = torch.randn(8, 4, dtype=torch.float, device='cuda', requires_grad=False)\n t_jit = torch.jit.script(t)\n\n for i in range(3):\n with torch.cuda.amp.autocast(dtype=torch.bfloat16) :\n jit_o = t_jit(x)\n if i == 2 :\n fwd_graph = t_jit.graph_for(x)\n jit_o.backward(grad)\n\n self.assertGraphContainsExactly(fwd_graph, FUSION_GUARD, 1, consider_subgraphs=True)\n\n with torch.cuda.amp.autocast(dtype=torch.bfloat16):\n bwd_graph = list(\n list(t_jit.get_debug_state().execution_plans.values())[\n 0].code.grad_executor_states()[0].execution_plans.values()\n )[0].graph\n FileCheck().check(FUSION_GROUP).run(bwd_graph)\n\n self.assertEqual(jit_o.dtype, torch.float)\n self.assertEqual(x.grad.dtype, x.dtype)\n\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_to_dtype_fp32_to_fp16(self):\n def t(x: torch.Tensor):\n o = x * 2.0\n o = o.to(dtype=torch.half)\n o = o * 3.0\n return o\n\n x = torch.randn(8, 4, dtype=torch.float, device='cuda')\n t_jit = torch.jit.script(t)\n\n for i in range(3):\n jit_o = t_jit(x)\n\n self.assertGraphContainsExactly(t_jit.graph_for(x), FUSION_GUARD, 1)\n self.assertEqual(jit_o.dtype, torch.half)\n\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_to_dtype_fp16_to_fp32(self):\n def t(x: torch.Tensor):\n o = x * 2.0\n o = o.to(dtype=torch.float)\n o = o * 3.0\n return o\n\n x = torch.randn(8, 4, dtype=torch.half, device='cuda')\n t_jit = torch.jit.script(t)\n\n for i in range(3):\n jit_o = t_jit(x)\n\n self.assertGraphContainsExactly(t_jit.graph_for(x), FUSION_GUARD, 1)\n self.assertEqual(jit_o.dtype, torch.float)\n\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_to_dtype_fp16_to_fp16(self):\n def t(x: torch.Tensor):\n o = x * 2.0\n o = o.to(dtype=torch.half)\n o = o * 3.0\n return o\n\n x = torch.randn(8, 4, dtype=torch.half, device='cuda')\n t_jit = torch.jit.script(t)\n\n for i in range(3):\n jit_o = t_jit(x)\n\n self.assertGraphContainsExactly(t_jit.graph_for(x), FUSION_GUARD, 1)\n self.assertEqual(jit_o.dtype, torch.half)\n\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n @unittest.skipIf(not TEST_BF16, \"device does not support BFloat16\")\n def test_to_dtype_fp32_to_bf16(self):\n def t(x: torch.Tensor):\n o = x * 2.0\n o = o.to(dtype=torch.bfloat16)\n o = o * 3.0\n return o\n\n x = torch.randn(8, 4, dtype=torch.float, device='cuda')\n t_jit = torch.jit.script(t)\n\n for i in range(3):\n jit_o = t_jit(x)\n\n self.assertGraphContainsExactly(t_jit.graph_for(x), FUSION_GUARD, 1)\n self.assertEqual(jit_o.dtype, torch.bfloat16)\n\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n @unittest.skipIf(not TEST_BF16, \"device does not support BFloat16\")\n def test_to_dtype_bf16_to_fp32(self):\n def t(x: torch.Tensor):\n o = x * 2.0\n o = o.to(dtype=torch.float)\n o = o * 3.0\n return o\n\n x = torch.randn(8, 4, dtype=torch.bfloat16, device='cuda')\n t_jit = torch.jit.script(t)\n\n for i in range(3):\n jit_o = t_jit(x)\n\n self.assertGraphContainsExactly(t_jit.graph_for(x), FUSION_GUARD, 1)\n self.assertEqual(jit_o.dtype, torch.float)\n\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n @unittest.skipIf(not TEST_BF16, \"device does not support BFloat16\")\n def test_to_dtype_bf16_to_bf16(self):\n def t(x: torch.Tensor):\n o = x * 2.0\n o = o.to(dtype=torch.bfloat16)\n o = o * 3.0\n return o\n\n x = torch.randn(8, 4, dtype=torch.bfloat16, device='cuda')\n t_jit = torch.jit.script(t)\n\n for i in range(3):\n jit_o = t_jit(x)\n\n self.assertGraphContainsExactly(t_jit.graph_for(x), FUSION_GUARD, 1)\n self.assertEqual(jit_o.dtype, torch.bfloat16)\n\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(not TEST_MULTIGPU, \"requires multiple CUDA device\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_multiple_device_pw(self):\n\n def t(x):\n o = x + 1.0\n o = torch.relu(o)\n return o\n\n x = torch.randn(2, dtype=torch.float32, device=\"cuda\")\n t_jit = torch.jit.script(t)\n\n for i in range(3):\n jit_o = t_jit(x)\n\n self.assertGraphContainsExactly(t_jit.graph_for(x), FUSION_GUARD, 1)\n torch.cuda.device(1)\n x = x.to(\"cuda:1\")\n jit_o = t_jit(x)\n\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_graph_for_with_missing_optimized_engine(self):\n x = torch.randn(8, 4, 2, dtype=torch.float, device=\"cuda\").requires_grad_()\n\n def t(x: torch.Tensor, flag: bool):\n x = x + 1.0\n x = torch.relu(x)\n if flag:\n o = x + 1.0\n o = torch.relu(o)\n else:\n o = x + 2.0\n o = torch.relu(o)\n return o\n\n t_jit = torch.jit.script(t)\n jit_o = t_jit(x, False)\n jit_o = t_jit(x, False)\n jit_o = t_jit(x, True)\n o = t(x, True)\n self.assertEqual(o, jit_o)\n # since the output value is not used at all, the fusion operator should\n # have been optimized away\n self.assertGraphContainsExactly(t_jit.graph_for(x, True), FUSION_GUARD, 1, True)\n\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_branches(self):\n in_feature = 2\n out_feature = 4\n x = torch.randn(4, in_feature, dtype=torch.float32, device='cuda')\n weight = torch.randn(out_feature, in_feature, dtype=torch.float32, device='cuda')\n bias = torch.randn(out_feature, dtype=torch.float32, device='cuda')\n\n def t(x: torch.Tensor, weight: torch.Tensor, bias: torch.Tensor, flag: bool):\n if flag:\n o = torch.nn.functional.linear(x, weight, bias)\n o = o + 1.0\n o = torch.relu(o)\n else:\n o = x.sum()\n o = o + 2.0\n o = torch.relu(o)\n return o\n\n t_jit = torch.jit.script(t)\n jit_o = t_jit(x, weight, bias, True)\n jit_o = t_jit(x, weight, bias, True)\n o = t(x, weight, bias, True)\n self.assertEqual(o, jit_o)\n # since the output value is not used at all, the fusion operator should\n # have been optimized away\n self.assertGraphContainsExactly(t_jit.graph_for(x, weight, bias, True), FUSION_GUARD, 1)\n\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_scalar_tensor(self):\n x = torch.empty([], device=\"cuda\", dtype=torch.float32)\n\n def t(x: torch.Tensor):\n o = x + 1.0\n o = torch.nn.functional.relu(o)\n return o\n\n # bias set to true.\n t_jit = torch.jit.script(t)\n jit_o = t_jit(x)\n jit_o = t_jit(x)\n o = t(x)\n self.assertEqual(o, jit_o)\n # since the output value is not used at all, the fusion operator should\n # have been optimized away\n self.assertGraphContainsExactly(t_jit.graph_for(x), FUSION_GUARD, 1)\n\n @unittest.skipIf(os.environ.get('PYTORCH_NO_CUDA_MEMORY_CACHING') is not None,\n \"skipping graph_rng when caching allocator is disabled\")\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(CUDA_MAJOR < 11, \"requires CUDA11 or above\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_graph_rng(self):\n self.assertTrue(torch._C._jit_nvfuser_enabled())\n size = 10000\n a = torch.randn((size,), device=\"cuda\", dtype=torch.float)\n\n def t(x):\n o = x + 1.0\n o = torch.nn.functional.dropout(o, p=0.1)\n o = o + 1.0\n o = torch.nn.functional.dropout(o, p=0.1)\n return o\n\n t_jit = torch.jit.script(t)\n\n for _ in range(3):\n t_jit(a)\n\n self.assertGraphContainsExactly(t_jit.graph_for(a), FUSION_GUARD, 1)\n\n # Control (jitted, ungraphed)\n torch.cuda.manual_seed(5)\n eager_out = a.clone()\n for _ in range(3):\n eager_out = t_jit(eager_out)\n\n graph_in = a.clone()\n g = torch.cuda.CUDAGraph()\n s = torch.cuda.Stream()\n s.wait_stream(torch.cuda.current_stream())\n with torch.cuda.stream(s):\n torch.cuda.manual_seed(5)\n g.capture_begin()\n graph_out = t_jit(graph_in)\n g.capture_end()\n torch.cuda.current_stream().wait_stream(s)\n # g is now a jitted, graphed version of t.\n\n # Runs a (jitted, graphed) -> (jitted, ungraphed) -> (jitted, graphed) sequence.\n # The ops in the overall sequence should be the same as Control.\n g.replay()\n # graph_out is now filled with g's result. Use it as ungraphed input.\n out = t_jit(graph_out)\n graph_in.copy_(out)\n g.replay()\n\n # If replay() updated RNG state correctly, graph_out should now equal eager_out\n self.assertEqual(graph_out, eager_out)\n\n def _test_batch_norm_impl_index_helper(self, batch, c, hw, affine=True,\n track_running_stats=True, train=True,\n dtype=torch.float32):\n # enabling inlining to avoid counter increment in BN forward\n torch._C._debug_set_autodiff_subgraph_inlining(True)\n\n class MyModule(torch.nn.Module):\n def __init__(self, num_features=10, affine=True, track_running_stats=True):\n super(MyModule, self).__init__()\n self.bn = torch.nn.BatchNorm2d(num_features,\n 1e-5,\n affine=affine,\n track_running_stats=track_running_stats).to(dtype=dtype)\n\n def forward(self, x):\n o = self.bn(x)\n o = o * 2.0\n return o\n\n x = torch.randn(batch, c, hw, hw, dtype=torch.float, device=\"cuda\").to(dtype=dtype).requires_grad_()\n grad = torch.randint(-20, 20, (batch, c, hw, hw), device=\"cuda\").to(dtype=dtype).div(-10)\n\n my_module = MyModule(c, affine, track_running_stats).cuda()\n ref_module = MyModule(c, affine, track_running_stats).cuda()\n\n if not train:\n my_module.eval()\n ref_module.eval()\n\n t_jit = torch.jit.script(my_module)\n ref_module.load_state_dict(my_module.state_dict())\n\n ref_x = x.detach().requires_grad_()\n\n for i in range(0, 3):\n jit_o = t_jit(x)\n jit_o.backward(grad)\n\n # TODO: remove this run?\n o = ref_module(ref_x)\n o.backward(grad)\n\n has_affine = ref_module.bn.weight is not None\n has_running_stats = ref_module.bn.running_mean is not None\n\n if has_running_stats:\n my_module.bn.running_mean.zero_()\n my_module.bn.running_var.fill_(1.0)\n ref_module.bn.running_mean.zero_()\n ref_module.bn.running_var.fill_(1.0)\n\n # Verify that when train is False, we don't have grad for weight/bias.\n if has_affine and train:\n my_module.bn.weight.grad.zero_()\n my_module.bn.bias.grad.zero_()\n ref_module.bn.weight.grad.zero_()\n ref_module.bn.bias.grad.zero_()\n\n x.grad.zero_()\n ref_x.grad.zero_()\n\n # real runs\n jit_o = t_jit(x)\n jit_o.backward(grad)\n\n o = ref_module(ref_x)\n o.backward(grad)\n\n # assert forward graph fusion\n self.assertGraphContainsExactly(t_jit.graph_for(x), FUSION_GUARD, 1, consider_subgraphs=True)\n # assert backward graph fusion\n bwd_graph = list(\n list(t_jit.get_debug_state().execution_plans.values())[0].code.grad_executor_states()[0]\n .execution_plans.values())[0].graph\n self.assertGraphContainsExactly(bwd_graph, FUSION_GUARD, 1, consider_subgraphs=True)\n\n e0 = 1e-5 if dtype is not torch.half else 1e-3\n e1 = 1e-4 if dtype is not torch.half else 1e-3\n e2 = 1e-3 if dtype is not torch.half else 1e-2\n\n self.assertTrue(self._compare(\"comparing output failed\", jit_o, o, e0))\n self.assertTrue(self._compare(\"comparing input grad failed\", x.grad, ref_x.grad, e1))\n # TODO: switch to welford and reduce this to 1e-5\n # The 1e-3 looks bad, but we don't have welford in codegen, so numeric\n # is very different between reference and codegen.\n if has_affine and train:\n self.assertTrue(self._compare(\"comparing weight grad failed\",\n my_module.bn.weight.grad,\n ref_module.bn.weight.grad,\n e2))\n self.assertTrue(self._compare(\"comparing bias grad failed\",\n my_module.bn.bias.grad,\n ref_module.bn.bias.grad,\n e1))\n if has_running_stats:\n self.assertTrue(self._compare(\"comparing running_mean failed\",\n my_module.bn.running_mean,\n ref_module.bn.running_mean,\n e0))\n self.assertTrue(self._compare(\"comparing running_var failed\",\n my_module.bn.running_var,\n ref_module.bn.running_var,\n e0))\n\n @unittest.skipIf(is_pre_volta(), \"reduction not supported in pre volta device\")\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_batch_norm_half(self):\n with torch.backends.cudnn.flags(enabled=True):\n setups = [\n [True, True],\n [False, False],\n [True, False],\n [False, True]]\n for training_and_track, affine in itertools.product(setups, [True, False]):\n training, track_running_stats = training_and_track\n self._test_batch_norm_impl_index_helper(4, 8, 5, affine, track_running_stats, training, torch.half)\n\n @unittest.skipIf(is_pre_volta(), \"reduction not supported in pre volta device\")\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_batch_norm_impl_index_inner_bcast(self):\n # the repro\n self._test_batch_norm_impl_index_helper(2, 1, 1, False, True, True)\n\n # running the full set\n setups = [\n [True, True],\n [False, False],\n [True, False],\n [False, True]]\n for training_and_track, affine in itertools.product(setups, [True, False]):\n training, track_running_stats = training_and_track\n self._test_batch_norm_impl_index_helper(2, 1, 1, affine, track_running_stats, training)\n\n @unittest.skipIf(is_pre_volta(), \"reduction not supported in pre volta device\")\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_batch_norm_impl_index_correctness(self):\n with torch.backends.cudnn.flags(enabled=True):\n batch = [2, 7, 16]\n channels = [4, 89, 19, 32]\n hw = [1, 8, 17, 32]\n\n # avoid tolerance failure in CI\n torch.cuda.manual_seed_all(211)\n\n # failing sizes (2, 1, 1, 1)\n # failing sizes (2, 89, 8, 8) training False, track True, affine: False\n for b, c, hw in itertools.product(batch, channels, hw):\n setups = [\n [True, True],\n [False, False],\n [True, False],\n [False, True]]\n for training_and_track, affine in itertools.product(setups, [True, False]):\n training, track_running_stats = training_and_track\n self._test_batch_norm_impl_index_helper(b, c, hw, affine, track_running_stats, training)\n\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_softplus_fuser(self):\n def shifted_softplus(x: torch.Tensor, shift: float):\n return functional.softplus(x) - shift\n\n jitted = torch.jit.script(shifted_softplus)\n inp = torch.randn(4, 2, dtype=torch.float32, device=\"cuda\").requires_grad_()\n inp_ref = inp.detach().clone().requires_grad_()\n grad = torch.randn(4, 2, dtype=torch.float32, device=\"cuda\")\n\n aten_o = shifted_softplus(inp_ref, 0.693147)\n aten_o.backward(grad)\n aten_grad = inp_ref.grad\n\n for i in range(3):\n jit_o = jitted(inp, 0.693147)\n inp.grad = None # avoid accumulation on grad\n jit_o.backward(grad)\n jit_grad = inp.grad\n\n assert torch.allclose(jit_o, aten_o)\n assert torch.allclose(jit_grad, aten_grad)\n self.assertGraphContains(jitted.graph_for(inp, 0.693147), FUSION_GROUP, True)\n\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_inplace_removal(self):\n def t(x: torch.Tensor):\n o = torch.nn.functional.softmax(x, dim=0)\n o += x\n return o.relu_()\n\n jitted = torch.jit.script(t)\n inp = torch.randn(4, 2, dtype=torch.float32, device=\"cuda\")\n\n for i in range(3):\n jit_o = jitted(inp)\n\n graph = jitted.graph_for(inp)\n self.assertGraphContains(graph, FUSION_GROUP, True)\n self.assertGraphContains(graph, 'aten::add', True)\n self.assertGraphContains(graph, 'aten::relu', True)\n\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_conv2d_bias(self):\n def t(x: torch.Tensor, w: torch.Tensor, bias: torch.Tensor):\n o = torch.nn.functional.conv2d(x, w, bias)\n return o.relu()\n\n jitted = torch.jit.script(t)\n inp = torch.randn(4, 5, 3, 3, dtype=torch.float32, device=\"cuda\")\n weight = torch.randn(2, 5, 2, 2, dtype=torch.float32, device=\"cuda\")\n bias = torch.randn(2, dtype=torch.float32, device=\"cuda\")\n\n for i in range(3):\n jit_o = jitted(inp, weight, bias)\n\n graph = jitted.graph_for(inp)\n self.assertGraphContains(graph, FUSION_GROUP, True)\n\n def t_not_fused(x: torch.Tensor, w: torch.Tensor):\n o = torch.nn.functional.conv2d(x, w)\n return o.relu()\n\n jitted_not_fused = torch.jit.script(t_not_fused)\n\n for i in range(3):\n jit_o = jitted_not_fused(inp, weight)\n\n graph = jitted_not_fused.graph_for(inp)\n self.assertGraphContainsExactly(graph, FUSION_GROUP, 0)\n self.assertGraphContains(graph, 'aten::relu', True)\n\n def t_bias(x: torch.Tensor, w: torch.Tensor, bias: torch.Tensor):\n o = torch.nn.functional.conv2d(x, w, bias)\n return o.relu()\n\n jitted_bias = torch.jit.script(t_bias)\n\n for i in range(3):\n jit_o = jitted_bias(inp, weight, bias)\n\n graph = jitted_bias.graph_for(inp)\n self.assertGraphContains(graph, FUSION_GROUP, True)\n self.assertGraphContains(graph, 'prim::add_optional', True)\n\n @unittest.skipIf(is_pre_volta(), \"reduction not supported in pre volta device\")\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_remove_output_used_only_in_dtype(self):\n class MyModule(torch.nn.Module):\n def __init__(self, num_features=4):\n super(MyModule, self).__init__()\n self.bn0 = torch.nn.BatchNorm2d(num_features)\n self.bn1 = torch.nn.BatchNorm2d(num_features)\n\n def forward(self, x, y):\n o1 = self.bn0(x)\n o2 = self.bn1(y)\n return torch.relu(o1 + o2)\n\n t = MyModule(4).float().cuda()\n\n jitted = torch.jit.script(t)\n x = torch.randn(3, 4, 2, 5, dtype=torch.float32, device=\"cuda\")\n y = torch.randn(3, 4, 2, 5, dtype=torch.float32, device=\"cuda\")\n\n with torch.cuda.amp.autocast(True):\n for i in range(5):\n jit_o = jitted(x, y)\n\n jit_o = jitted(x, y)\n o = t(x, y)\n\n self.assertTrue(torch.allclose(jit_o, o))\n graph = jitted.graph_for(x, y)\n self.assertGraphContains(graph, FUSION_GROUP, True)\n\n @unittest.skipIf(is_pre_volta(), \"reduction not supported in pre volta device\")\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_fix_shape_expression_bn(self):\n class MyModule(torch.nn.Module):\n def __init__(self, num_features=4):\n super(MyModule, self).__init__()\n self.bn = torch.nn.BatchNorm2d(num_features)\n\n def forward(self, x, y):\n out1 = self.bn(x)\n out2 = out1 + y\n out3 = torch.relu(out2)\n return out3\n\n t = MyModule(4).float().cuda()\n\n jitted = torch.jit.script(t)\n x = torch.randn(3, 4, 2, 5, dtype=torch.float32, device=\"cuda\")\n y = torch.randn(3, 4, 2, 5, dtype=torch.float32, device=\"cuda\")\n\n with torch.cuda.amp.autocast(True):\n for i in range(5):\n jit_o = jitted(x, y)\n\n jit_o = jitted(x, y)\n o = t(x, y)\n\n self.assertTrue(torch.allclose(jit_o, o))\n graph = jitted.graph_for(x, y)\n self.assertGraphContains(graph, FUSION_GROUP, True)\n\n def _run_fwd_helper(self, func, ops, *args):\n jitted = torch.jit.script(func)\n for i in range(3):\n jit_o = jitted(*args)\n jit_o = jitted(*args)\n o = func(*args)\n for oo, jit_oo in zip(o, jit_o):\n self.assertEqual(oo.dtype, jit_oo.dtype)\n self.assertEqual(oo, jit_oo)\n graph = jitted.graph_for(*args)\n self.assertGraphContains(graph, FUSION_GROUP, True)\n for op in ops:\n self.assertGraphContainsExactly(graph, op, 0)\n\n @unittest.skipIf(is_pre_volta(), \"reduction not supported in pre volta device\")\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_sibling_fusion(self):\n device = \"cuda\"\n dtype = torch.float\n x = torch.randn(2, 5, dtype=dtype, device=device)\n y = torch.randn(2, 5, dtype=dtype, device=device)\n\n def t(x: torch.Tensor):\n o1 = x + 1.0\n o2 = x * 0.5\n return o1, o2\n self._run_fwd_helper(t, ['aten::add', 'aten::mul'], x)\n\n def t2(x: torch.Tensor, y: torch.Tensor):\n o1 = x.sum(0)\n o2 = (x * y).sum(0)\n return o1, o2\n self._run_fwd_helper(t2, ['aten::sum', 'aten::mul'], x, y)\n\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_clean_profile_ivalue(self):\n device = \"cuda\"\n dtype = torch.float\n x = torch.randn(2, 5, dtype=dtype, device=device, requires_grad=True)\n # turn on autodiff subgraph inlining\n # this is to verify that we clean up profile_ivalue node out side of\n # fusion code path.\n torch._C._debug_set_autodiff_subgraph_inlining(True)\n\n def t(x: torch.Tensor, flag: bool):\n return torch.dropout(x, 0.5, flag)\n\n jit_t = torch.jit.script(t)\n for idx in range(5) :\n out = jit_t(x, True)\n\n graph = jit_t.graph_for(x, True)\n out = jit_t(x, False)\n\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_sibling_fusion_no_scalar_inputs(self):\n device = \"cuda\"\n dtype = torch.float\n x = torch.randn(2, 5, dtype=dtype, device=device)\n y = torch.randn(3, dtype=dtype, device=device)\n\n # no tensor dependency between o1/o2, we shouldn't be fusing them\n def t(x: torch.Tensor, y: torch.Tensor):\n o1 = x + 1\n o2 = y - 1\n return o1, o2\n\n jitted = torch.jit.script(t)\n for i in range(3):\n jit_o = jitted(x, y)\n graph = jitted.graph_for(x, y)\n self.assertGraphContainsExactly(graph, FUSION_GROUP, 0)\n\n def _bias_view_relu_helper(self, shape, output_shape, dtype, device, error):\n class BiasViewRelu(torch.nn.Module):\n def __init__(self):\n super(BiasViewRelu, self).__init__()\n self.bias = torch.nn.Parameter(torch.randn(shape, dtype=dtype, device=device), requires_grad=False)\n with torch.no_grad():\n self.bias.fill_(10)\n\n def forward(self, inputs : torch.Tensor, view_shape : List[int]):\n o = inputs + self.bias\n o = o.view(view_shape)\n return torch.relu(o)\n\n t = BiasViewRelu()\n x = torch.randn(shape, dtype=dtype, device=device, requires_grad=False)\n t_jit = torch.jit.script(t)\n\n # profiling\n jit_o = t_jit(x, output_shape)\n # optimization\n jit_o = t_jit(x, output_shape)\n # final\n jit_o = t_jit(x, output_shape)\n # eager - baseline\n o = t(x, output_shape)\n\n self.assertEqual(o.dtype, jit_o.dtype)\n self.assertTrue(self._compare(\"comparing output failed\", o, jit_o, error))\n graph = t_jit.graph_for(x, output_shape)\n\n # TODO: revert disabled aten::view\n # has_inferred_dimension = any([dim == -1 for dim in output_shape])\n has_inferred_dimension = True\n if has_inferred_dimension:\n # prohibit fusing when view_shape contains an inferred dimension\n self.assertGraphContainsExactly(graph, FUSION_GROUP, 0)\n self.assertGraphContainsExactly(graph, 'prim::view_copy', 0)\n else:\n self.assertGraphContains(graph, FUSION_GUARD)\n self.assertGraphContains(graph, 'prim::view_copy', True)\n\n def _alias_bias_view_relu_helper(self, shape, output_shape, dtype, device, error):\n class BiasViewRelu(torch.nn.Module):\n def __init__(self):\n super(BiasViewRelu, self).__init__()\n self.bias = torch.nn.Parameter(torch.randn(shape, dtype=dtype, device=device), requires_grad=False)\n with torch.no_grad():\n self.bias.fill_(10)\n\n def forward(self, inputs : torch.Tensor, bias : torch.Tensor, view_shape : List[int]):\n o = inputs.view(view_shape)\n inputs.add_(bias)\n return torch.relu(o)\n\n t = BiasViewRelu()\n x = torch.randn(shape, dtype=dtype, device=device, requires_grad=False)\n bias = torch.randn(shape, dtype=dtype, device=device, requires_grad=False)\n t_jit = torch.jit.script(t)\n\n # profiling\n jit_o = t_jit(x.clone(), bias, output_shape)\n # optimization\n jit_o = t_jit(x.clone(), bias, output_shape)\n # final\n jit_o = t_jit(x.clone(), bias, output_shape)\n # eager - baseline\n o = t(x.clone(), bias, output_shape)\n\n self.assertEqual(o.dtype, jit_o.dtype)\n self.assertTrue(self._compare(\"comparing output failed\", o, jit_o, error))\n graph = t_jit.graph_for(x, bias, output_shape)\n self.assertGraphContainsExactly(graph, FUSION_GUARD, 0)\n self.assertGraphContainsExactly(graph, 'prim::view_copy', 0)\n\n # generate random view given original view\n def _random_view(self, original_view, max_len=8, max_views=10000):\n class Moves(enum.Enum):\n Merge = 0\n Split = 1\n Broadcast = 2\n ImplicitBroadcast = 3\n Keep = 4\n\n def valid(old_view, new_view):\n old_view_size = reduce(operator.mul, old_view)\n new_view_size = reduce(operator.mul, new_view)\n return old_view_size == new_view_size\n\n # given a random starting number, find the nearest divisor\n def find_nearest_divisor(N):\n if 2 >= (N - 1):\n return -1\n result = random.randint(2, N - 1)\n while (N % result) != 0:\n result += 1\n return result\n\n complete_views = set([tuple(original_view)])\n\n to_visit = []\n # empty new view, curent originaal view, start pos=0, move count = 0, last_move\n to_visit.append(([], original_view, 0, [], Moves.Keep))\n\n # depth-first search of view shapes, starting from the original view\n while len(to_visit) > 0 and len(complete_views) < max_views:\n new_view, old_view, odx, move_list, last_move = to_visit[-1]\n to_visit.pop()\n\n # iterate over each move type\n for idx in range(len(Moves)):\n state = Moves(idx)\n new_view_clone = copy.deepcopy(new_view)\n old_view_clone = copy.deepcopy(old_view)\n new_move_list = move_list + [state]\n new_odx = odx\n\n # Update state using Move state\n if state == Moves.Keep:\n new_size = old_view_clone[odx]\n new_view_clone.append(new_size)\n new_odx += 1\n\n elif state == Moves.Merge:\n if odx + 1 < len(old_view_clone):\n new_size = old_view_clone[odx] * old_view_clone[odx + 1]\n new_view_clone.append(new_size)\n new_odx += 2\n else:\n continue\n\n elif state == Moves.Broadcast and last_move != Moves.Broadcast:\n new_view_clone.append(1)\n\n elif state == Moves.Split:\n new_size = find_nearest_divisor(old_view_clone[odx])\n if new_size == -1:\n continue\n new_view_clone.append(new_size)\n old_view_clone[odx] = int(old_view[odx] / new_size)\n\n if old_view_clone[odx] == 1:\n new_odx += 1\n\n elif state == Moves.ImplicitBroadcast:\n old_view_clone.insert(odx + 1, 1)\n new_size = old_view[odx] * 1\n new_view_clone.append(new_size)\n new_odx += 2\n\n if new_odx < len(old_view_clone) and len(new_move_list) < max_len:\n to_visit.append((new_view_clone, old_view_clone, new_odx, new_move_list, state))\n elif (valid(original_view, new_view_clone)):\n final_new_view = tuple(new_view_clone)\n complete_views.add(final_new_view)\n return list(complete_views)\n\n # ndims - number of dimensions\n # test_fn - view test function\n def _view_test_generator(self, ndims, test_fn):\n # create random tensor\n # max value for each dimension\n max_size = 10e7\n max_value = max(int(pow(max_size, 1. / ndims)), 1)\n sizes = [random.randint(1, max_value) for idx in range(ndims)]\n x = torch.randn(sizes)\n\n original_sizes = list(x.size())\n all_views = self._random_view(original_sizes)\n random.shuffle(all_views)\n\n max_samples = 20\n max_views = min(len(all_views), max_samples)\n total = 0\n correct = 0\n # test random combinations of compatible views\n for idx in range(max_views):\n for jdx in range(idx + 1, max_views):\n total += 1\n test_fn(all_views[idx], all_views[jdx], torch.float, 'cuda', 1e-6)\n\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_view(self):\n torch._C._jit_set_nvfuser_guard_mode(True)\n self._bias_view_relu_helper([2, 3, 4, 5], [-1, 4, 5], torch.float, 'cuda', 1e-6)\n for ndims in range(1, 5):\n self._view_test_generator(ndims, self._bias_view_relu_helper)\n self._alias_bias_view_relu_helper([2, 3, 4, 5], [1, 6, 1, 2, 2, 5, 1], torch.float, 'cuda', 1e-6)\n\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_strict_fusion(self):\n def success(x):\n with torch.jit.strict_fusion():\n return x + x + x\n\n scripted = self.checkScript(success, (torch.rand([4], device='cuda'),))\n g = torch.jit.last_executed_optimized_graph()\n FileCheck().check_not(\"aten::add\").check(\"prim::CudaFusionGroup\").run(g)\n\n def failure(x):\n with torch.jit.strict_fusion():\n return x + torch.mm(x, x) + x\n\n with self.assertRaises(Exception) as error_out:\n foo_s = torch.jit.script(failure)\n foo_s(torch.rand([4, 4]))\n foo_s(torch.rand([4, 4]))\n\n fc = FileCheck().check(\"Found unfused operators\")\n fc.check(\"aten::mm\").run(str(error_out.exception))\n\n def _ltc_helper(self, shape, dtype, device, error, approximate=True):\n # modeled after LTC linear layer\n class LTC(torch.nn.Module):\n def __init__(self):\n super(LTC, self).__init__()\n self.weight = torch.nn.Parameter(torch.randn([1024, 1024], dtype=dtype, device=device), requires_grad=False)\n self.bias = torch.nn.Parameter(torch.randn([1, 1024], dtype=dtype, device=device), requires_grad=False)\n\n def forward(self, inputs : torch.Tensor):\n o = inputs.view([32768, 1024])\n o = torch.mm(o, self.weight)\n o = o.view([256, 128, 1024])\n o = o + self.bias\n o = o.view([32768, 1024])\n o = o.view([256, 128, 1024])\n return torch.nn.functional.gelu(o)\n\n t = LTC()\n x = torch.randn(shape, dtype=dtype, device=device, requires_grad=False)\n t_jit = torch.jit.script(t)\n\n # profile/optimization runs\n for i in range(3):\n jit_o = t_jit(x)\n o = t(x)\n\n self.assertEqual(o.dtype, jit_o.dtype)\n self.assertTrue(self._compare(\"comparing output failed\", o, jit_o, error))\n graph = t_jit.graph_for(x)\n # TODO: revert disabled aten::view\n # self.assertGraphContains(graph, FUSION_GUARD)\n # self.assertGraphContains(graph, 'prim::view_copy', True)\n self.assertGraphContainsExactly(graph, FUSION_GUARD, 0)\n self.assertGraphContainsExactly(graph, 'prim::view_copy', 0, True)\n\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_nested_view(self):\n self._ltc_helper([256, 128, 1024], torch.float, 'cuda', 1e-6)\n\n def _bias_squeeze_relu_helper(self, shape, dtype, device, error):\n class BiasSqueezeRelu(torch.nn.Module):\n def __init__(self):\n super(BiasSqueezeRelu, self).__init__()\n\n def forward(self, inputs : torch.Tensor, bias : torch.Tensor):\n o = inputs + bias\n o = torch.squeeze(o)\n return torch.relu(o)\n\n t = BiasSqueezeRelu()\n x = torch.randn(shape, dtype=dtype, device=device, requires_grad=False)\n bias = torch.randn(shape, dtype=dtype, device=device, requires_grad=False)\n t_jit = torch.jit.script(t)\n\n jit_o = t_jit(x, bias)\n jit_o = t_jit(x, bias)\n jit_o = t_jit(x, bias)\n o = t(x, bias)\n\n self.assertEqual(o.dtype, jit_o.dtype)\n self.assertTrue(self._compare(\"comparing output failed\", o, jit_o, error))\n graph = t_jit.graph_for(x, bias)\n self.assertGraphContains(graph, FUSION_GUARD)\n self.assertGraphContains(graph, 'prim::squeeze_copy', True)\n\n def _alias_bias_squeeze_relu_helper(self, shape, dtype, device, error):\n class BiasSqueezeRelu(torch.nn.Module):\n def __init__(self):\n super(BiasSqueezeRelu, self).__init__()\n\n def forward(self, inputs : torch.Tensor, bias : torch.Tensor):\n o = torch.squeeze(inputs)\n inputs.add_(bias)\n return torch.relu(o)\n\n t = BiasSqueezeRelu()\n x = torch.randn(shape, dtype=dtype, device=device, requires_grad=False)\n bias = torch.randn(shape, dtype=dtype, device=device, requires_grad=False)\n t_jit = torch.jit.script(t)\n\n jit_o = t_jit(x.clone(), bias)\n jit_o = t_jit(x.clone(), bias)\n jit_o = t_jit(x.clone(), bias)\n o = t(x.clone(), bias)\n\n self.assertEqual(o.dtype, jit_o.dtype)\n self.assertTrue(self._compare(\"comparing output failed\", o, jit_o, error))\n graph = t_jit.graph_for(x, bias)\n self.assertGraphContainsExactly(graph, FUSION_GUARD, 0)\n self.assertGraphContainsExactly(graph, 'prim::squeeze_copy', 0)\n\n # TODO: revert disabled alias ops\n @unittest.skipIf(True, \"skipping this test since squeeze/unsqueeze is disabled now\")\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_squeeze(self):\n self._bias_squeeze_relu_helper([1, 6, 1, 2, 2, 5, 1], torch.float, 'cuda', 1e-6)\n self._alias_bias_squeeze_relu_helper([1, 6, 1, 2, 2, 5, 1], torch.float, 'cuda', 1e-6)\n\n # TODO: revert disabled alias ops\n @unittest.skipIf(True, \"skipping this test since squeeze/unsqueeze is disabled now\")\n # remove this after opinfo tests are enabled\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_squeeze_zero(self):\n x = torch.tensor(1.0, dtype=torch.float, device=\"cuda\")\n\n def squeeze_0(x: torch.Tensor):\n o = x + 1.\n o = torch.squeeze(o, 0)\n o = o * 2.\n return o\n\n def squeeze_1(x: torch.Tensor):\n o = x + 1.\n o = torch.squeeze(o, -1)\n o = o + .5\n return o\n\n squeeze_0_jit = torch.jit.script(squeeze_0)\n self._run_helper(squeeze_0_jit, squeeze_0, x)\n squeeze_1_jit = torch.jit.script(squeeze_1)\n self._run_helper(squeeze_1_jit, squeeze_1, x)\n\n def _bias_unsqueeze_relu_helper(self, shape, dtype, device, error):\n class BiasUnsqueezeRelu(torch.nn.Module):\n def __init__(self):\n super(BiasUnsqueezeRelu, self).__init__()\n\n def forward(self, inputs : torch.Tensor, bias : torch.Tensor):\n o = inputs + bias\n o = torch.unsqueeze(o, 0)\n return torch.relu(o)\n\n t = BiasUnsqueezeRelu()\n x = torch.randn(shape, dtype=dtype, device=device, requires_grad=False)\n bias = torch.randn(shape, dtype=dtype, device=device, requires_grad=False)\n t_jit = torch.jit.script(t)\n\n jit_o = t_jit(x, bias)\n jit_o = t_jit(x, bias)\n jit_o = t_jit(x, bias)\n o = t(x, bias)\n\n self.assertEqual(o.dtype, jit_o.dtype)\n self.assertTrue(self._compare(\"comparing output failed\", o, jit_o, error))\n graph = t_jit.graph_for(x, bias)\n self.assertGraphContains(graph, FUSION_GUARD)\n self.assertGraphContains(graph, 'prim::unsqueeze_copy', True)\n\n def _alias_bias_unsqueeze_relu_helper(self, shape, dtype, device, error):\n class BiasUnsqueezeRelu(torch.nn.Module):\n def __init__(self):\n super(BiasUnsqueezeRelu, self).__init__()\n\n def forward(self, inputs : torch.Tensor, bias : torch.Tensor):\n o = torch.unsqueeze(inputs, 0)\n inputs.add_(bias)\n return torch.relu(o)\n\n t = BiasUnsqueezeRelu()\n x = torch.randn(shape, dtype=dtype, device=device, requires_grad=False)\n bias = torch.randn(shape, dtype=dtype, device=device, requires_grad=False)\n t_jit = torch.jit.script(t)\n\n jit_o = t_jit(x.clone(), bias)\n jit_o = t_jit(x.clone(), bias)\n jit_o = t_jit(x.clone(), bias)\n o = t(x.clone(), bias)\n\n self.assertEqual(o.dtype, jit_o.dtype)\n self.assertTrue(self._compare(\"comparing output failed\", o, jit_o, error))\n graph = t_jit.graph_for(x, bias)\n self.assertGraphContainsExactly(graph, FUSION_GUARD, 0)\n self.assertGraphContainsExactly(graph, 'prim::unsqueeze_copy', 0)\n\n # TODO: revert disabled alias ops\n @unittest.skipIf(True, \"skipping this test since squeeze/unsqueeze is disabled now\")\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_unsqueeze(self):\n self._bias_unsqueeze_relu_helper([2, 3, 4, 5], torch.float, 'cuda', 1e-6)\n self._alias_bias_unsqueeze_relu_helper([2, 3, 4, 5], torch.float, 'cuda', 1e-6)\n\n # TODO: revert disabled alias ops\n @unittest.skipIf(True, \"skipping this test since unsqueeze is disabled now\")\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_alias_pass_fix(self):\n x = torch.randn(4, 24, 2, 2, dtype=torch.float, device=\"cuda\")\n w = torch.randn(24, 24, 1, 1, dtype=torch.float, device=\"cuda\")\n b = torch.randn(24, dtype=torch.float, device=\"cuda\")\n\n def t(x, w, b):\n b2 = b + 1.0\n o = torch.conv2d(x, w, b2)\n return o\n\n t_jit = torch.jit.script(t)\n self._run_helper(t_jit, t, x, w, b)\n\n # TODO: revert disabled alias ops\n @unittest.skipIf(True, \"skipping this test since squeeze/unsqueeze is disabled now\")\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_squeeze_negative_dim(self):\n x = torch.randn(4, 24, 1, 2, dtype=torch.float, device=\"cuda\")\n\n def t(x):\n o = x + 1.0\n o = o.squeeze(-2)\n o = o * 2.0\n return o\n\n t_jit = torch.jit.script(t)\n self._run_helper(t_jit, t, x)\n\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_singleton_fusion(self):\n x = torch.randn(4, 2, device=\"cuda\")\n\n with nvfuser_singleton_fusion(True):\n def t(x):\n return x.relu()\n\n t_jit = torch.jit.script(t)\n self._run_helper(t_jit, t, x)\n\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_issue1445_fusion(self):\n def f(t0, t1, t2, t3):\n masked_input = torch.where(t1, t2, t3)\n total = masked_input.sum([0, 1, 2, 3])\n sizes : List[int] = []\n t10 = torch.reshape(t0, sizes)\n t7 = total / t10\n t4 = t7.to(dtype=torch.float)\n return t4\n\n x = torch.randn(1, 1, 1, 1, device='cuda').to(dtype=torch.long)\n y = torch.randn(3, 2, 1, 1, device='cuda').to(dtype=torch.bool).expand([3, 2, 1, 2])\n z = torch.randn(3, 2, 1, 2, device='cuda')\n w = torch.tensor(1.5, device='cuda')\n\n f_jit = torch.jit.script(f)\n for i in range(5):\n out_jit = f_jit(x, y, z, w)\n out = f(x, y, z, w)\n self.assertEqual(out, out_jit)\n self.assertGraphContainsExactly(f_jit.graph_for(x, y, z, w), FUSION_GROUP, 1)\n\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_disable_sibling_fuse(self):\n x = torch.randn(4, 2, device=\"cuda\")\n y = torch.randn(8, device=\"cuda\")\n s = torch.tensor(1.5, device=\"cuda\")\n\n with nvfuser_horizontal_fusion(False):\n def t(x, y, s):\n o1 = x + s\n o2 = y + s\n return o1, o2\n\n t_jit = torch.jit.script(t)\n for i in range(5):\n t_jit(x, y, s)\n\n # sibling fusion should be disabled with the flag\n self.assertGraphContainsExactly(t_jit.graph_for(x, y, s), FUSION_GUARD, 0)\n\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_build_shape_expression_native_dropout(self):\n x = torch.randn(4, 2, device=\"cuda\")\n\n def t(x):\n o, mask = torch.native_dropout(x, 0.0, True)\n o1 = o.sigmoid()\n o2 = mask.float().sigmoid()\n return (o1, o2)\n\n t_jit = torch.jit.script(t)\n\n jit_o = t_jit(x)\n jit_o = t_jit(x)\n o = t(x)\n for oo, jit_oo in zip(o, jit_o):\n self.assertEqual(oo.dtype, jit_oo.dtype)\n self.assertEqual(oo, jit_oo)\n self.assertGraphContains(t_jit.graph_for(x), FUSION_GUARD)\n\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_scalar_tensor_permuted(self):\n x = torch.randn(4, 2, 3, device=\"cuda\").permute([1, 2, 0])\n y = torch.tensor(1.0, device=\"cuda\")\n\n with nvfuser_singleton_fusion(True):\n def t(x, y):\n return x + y\n\n t_jit = torch.jit.script(t)\n self._run_helper(t_jit, t, x, y)\n\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_cpu_scalar(self):\n x = torch.randn(4, 2, 3, device=\"cuda\")\n y = torch.tensor(1.0, device=\"cpu\")\n z = torch.tensor(2.0, device=\"cpu\")\n\n with nvfuser_singleton_fusion(True):\n # testing cpu scalar tensor promotion\n def t(x, y):\n return x + y\n\n t_jit = torch.jit.script(t)\n self._run_helper(t_jit, t, x, y)\n\n # scalar cpu tensor add should NOT be fused\n @torch.jit.script\n def t1(y, z):\n return y * z\n for _ in range(5):\n t1(y, z)\n self.assertGraphContainsExactly(t1.graph_for(y, z), FUSION_GUARD, 0)\n\n # everything, including scalar cpu tensor add should be fused\n @torch.jit.script\n def t2(x, y, z):\n tmp = y + z\n return tmp + x\n for _ in range(5):\n t2(x, y, z)\n self.assertGraphContainsExactly(t2.graph_for(x, y, z), 'aten::add', 0)\n self.assertGraphContainsExactly(t2.graph_for(x, y, z), FUSION_GUARD, 1)\n\n # 'cpu_tmp = y + z' shouldn't be fused.\n @torch.jit.script\n def t3(x, y, z):\n cpu_tmp = y + z\n out = x + y\n return cpu_tmp, out\n for _ in range(5):\n t3(x, y, z)\n self.assertGraphContainsExactly(t3.graph_for(x, y, z), FUSION_GUARD, 1)\n self.assertGraphContainsExactly(t3.graph_for(x, y, z), 'aten::add', 1)\n\n # TODO: revert disabled alias ops\n @unittest.skipIf(True, \"skipping this test since squeeze/unsqueeze is disabled now\")\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_shape_expression(self):\n x = torch.randn(4, 2, 1, 3, device=\"cuda\")\n\n def t_unsqueeze(x):\n t0 = x.relu()\n t1 = t0.unsqueeze(1)\n t2 = t1 + 1.0\n t3 = t1.size()\n return t2, t3\n\n def t_squeeze(x):\n t0 = x.relu()\n t1 = t0.squeeze()\n t2 = t1 + 1.0\n t3 = t1.size()\n return t2, t3\n\n def t_squeeze_dim(x):\n t0 = x.relu()\n t1 = t0.squeeze(-2)\n t2 = t1 + 1.0\n t3 = t1.size()\n return t2, t3\n\n # squeezing a non-size 1 dimension should be a no op\n def t_squeeze_dim_no_op(x):\n t0 = x.relu()\n t1 = t0.squeeze(1)\n t2 = t1 + 1.0\n t3 = t1.size()\n return t2, t3\n\n def run(fn):\n jit_fn = torch.jit.script(fn)\n jit_o = jit_fn(x)\n jit_o = jit_fn(x)\n jit_o = jit_fn(x)\n o = fn(x)\n # output 0 is a tensor, so we check dtype and value\n self.assertEqual(o[0].dtype, jit_o[0].dtype)\n self.assertEqual(o[0], jit_o[0])\n # output 1 is shape\n self.assertEqual(o[1], jit_o[1])\n self.assertGraphContainsExactly(jit_fn.graph_for(x), FUSION_GUARD, 1)\n\n for t in [t_unsqueeze, t_squeeze, t_squeeze_dim, t_squeeze_dim_no_op]:\n run(t)\n\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_scalar_cuda_tensor(self):\n x = torch.tensor(2.0, device=\"cuda\")\n\n with nvfuser_singleton_fusion(True):\n def t(x):\n return x + 1.0\n\n t_jit = torch.jit.script(t)\n self._run_helper(t_jit, t, x)\n\n @torch.jit.script\n def t_jitted(x):\n return x.sum(0)\n\n for i in range(5):\n t_jitted(x)\n self.assertGraphContainsExactly(t_jitted.graph_for(x), FUSION_GUARD, 0)\n\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_overlapped_input(self):\n x = torch.randn(8, device=\"cuda\").as_strided((2, 4), (1, 1))\n\n with nvfuser_singleton_fusion(True):\n def t(x):\n return x + 1.0\n\n t_jit = torch.jit.script(t)\n self._run_helper(t_jit, t, x)\n\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n @unittest.skipIf(is_pre_volta(), \"reduction not supported in pre volta device\")\n def test_reduction_empty_axes(self):\n x = torch.randn(4, 2, 3, device=\"cuda\").permute([1, 2, 0])\n\n with nvfuser_singleton_fusion(True):\n def t(x):\n sizes : List[int] = []\n return x.sum(sizes)\n\n t_jit = torch.jit.script(t)\n self._run_helper(t_jit, t, x)\n\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n @unittest.skipIf(is_pre_volta(), \"reduction not supported in pre volta device\")\n def test_int_tensor_input(self):\n x = torch.randn(4, 2, device=\"cuda\").to(dtype=torch.int)\n\n with nvfuser_singleton_fusion(True):\n def t(x):\n return x.amax(dim=0)\n\n t_jit = torch.jit.script(t)\n self._run_helper(t_jit, t, x)\n\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_to_boolean(self):\n x = torch.randn(4, 2, device=\"cuda\")\n\n with nvfuser_singleton_fusion(True):\n def t(x):\n return x.to(dtype=torch.bool)\n\n t_jit = torch.jit.script(t)\n self._run_helper(t_jit, t, x)\n\n # TODO: revert disabled alias ops\n @unittest.skipIf(True, \"skipping this test since reshape is disabled now\")\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_view_copy_graph_guard(self):\n x = torch.randn(4, 2, 3, device=\"cuda\").permute([1, 2, 0])\n y = [4, 6]\n\n with nvfuser_singleton_fusion(True):\n def t(x, y : List[int]):\n t1 = x + 1.0\n t2 = t1 * 1.0\n out = t2.reshape(y)\n return out.relu()\n\n t_jit = torch.jit.script(t)\n self._run_helper(t_jit, t, x, y)\n\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_view_copy_graph_guard_double_fusion(self):\n x = torch.randn(2, 2, 5, device=\"cuda\")\n w = torch.randn(5, 5, device=\"cuda\")\n\n with nvfuser_singleton_fusion(True):\n def t(x, w):\n o = x.view([4, x.size()[-1]])\n o = torch.matmul(o, w)\n o = o.view([2, 2, o.size()[1]])\n return o\n\n t_jit = torch.jit.script(t)\n for i in range(3):\n jit_o = t_jit(x, w)\n o = t(x, w)\n self.assertEqual(jit_o, o)\n # TODO: revert disabled aten::view\n # self.assertGraphContainsExactly(t_jit.graph_for(x, w), FUSION_GUARD, 2, consider_subgraphs=True)\n self.assertGraphContainsExactly(t_jit.graph_for(x, w), FUSION_GUARD, 0, consider_subgraphs=True)\n\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_input_output_passthrough(self):\n def t(t0, t1, t2):\n mask = t1.to(dtype=torch.bool)\n masked_input = torch.where(t0, mask, t2)\n return masked_input, mask\n\n t_jit = torch.jit.script(t)\n # stick to integers, this avoid the numerical difference due to our\n # promotion\n x = torch.randn(4, 4, device='cuda').to(dtype=torch.bool)\n y = torch.randn(4, 4, device='cuda').to(dtype=torch.bool)\n z = torch.tensor(1.0, device='cuda').to(dtype=torch.bool)\n jit_o = t_jit(x, y, z)\n jit_o = t_jit(x, y, z)\n o = t(x, y, z)\n for oo, jit_oo in zip(o, jit_o):\n self.assertEqual(oo.dtype, jit_oo.dtype)\n self.assertEqual(oo, jit_oo)\n self.assertGraphContains(t_jit.graph_for(x, y, z), FUSION_GUARD)\n\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_pointwise_reference_tensor(self):\n def t(input1, input2, scalar):\n _unsafe_view = torch.ops.aten._unsafe_view(input1, [2, 4, 16])\n add_ = torch.ops.aten.add_(_unsafe_view, input2)\n gelu_ = torch.ops.aten.gelu(add_)\n view_ = torch.ops.aten.view(gelu_, [8, 16])\n mul_ = torch.ops.aten.mul(add_, scalar)\n return [view_, mul_]\n\n x = torch.randn(8, 16, device=\"cuda\")\n bias = torch.randn(16, device=\"cuda\")\n scalar = torch.ones(torch.Size([]), device=\"cuda\")\n\n t_jit = torch.jit.script(t)\n for i in range(3):\n jit_o = t_jit(x, bias, scalar)\n o = t(x, bias, scalar)\n self.assertEqual(jit_o, o)\n self.assertGraphContains(t_jit.graph_for(x, bias, scalar), FUSION_GUARD)\n\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n @unittest.skipIf(is_pre_volta(), \"reduction not supported in pre volta device\")\n def test_native_batch_norm_backward(self):\n grad_output = torch.randn(4, 2, 3, device=\"cuda\")\n input = torch.randn(4, 2, 3, device=\"cuda\")\n weight = torch.randn(2, device=\"cuda\")\n\n r_m = torch.randn(2, device=\"cuda\")\n r_v = torch.randn(2, device=\"cuda\").abs()\n\n save_mean = torch.randn(2, device=\"cuda\")\n save_invstd = torch.randn(2, device=\"cuda\").abs()\n\n with nvfuser_singleton_fusion(True):\n def t(grad_out, input, weight, r_m, r_v, save_mean, save_invstd, train: bool, eps: float, mask: List[bool]):\n return torch.ops.aten.native_batch_norm_backward(grad_out, input, weight, r_m, r_v, save_mean,\n save_invstd, train, eps, mask)\n\n t_jit = torch.jit.script(t)\n for i in range(4):\n jit_o = t_jit(grad_output, input, weight, r_m.clone(), r_v.clone(),\n save_mean, save_invstd, True, 1e-5, [True, True, True])\n\n ref_m = r_m.clone()\n ref_v = r_v.clone()\n jit_o = t_jit(grad_output, input, weight, r_m, r_v, save_mean, save_invstd, True, 1e-5, [True, True, True])\n o = t(grad_output, input, weight, ref_m, ref_v, save_mean, save_invstd, True, 1e-5, [True, True, True])\n for oo, jit_oo in zip(o, jit_o):\n self.assertEqual(oo.dtype, jit_oo.dtype)\n self.assertEqual(oo, jit_oo)\n self.assertEqual(ref_m.dtype, r_m.dtype)\n self.assertEqual(ref_m, r_m)\n self.assertEqual(ref_v.dtype, r_v.dtype)\n self.assertEqual(ref_v, r_v)\n self.assertGraphContains(t_jit.graph_for(grad_output, input, weight, r_m.clone(), r_v.clone, save_mean,\n save_invstd, True, 1e-5, [True, True, True]), FUSION_GUARD)\n\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_contiguous_on_broadcasted(self):\n x = torch.randn(4, 1, device=\"cuda\")\n y = torch.randn(4, 128, device=\"cuda\")\n\n with nvfuser_singleton_fusion(True):\n def t(x, y):\n t1 = x.expand([4, 128])\n t2 = t1 * y\n return t2\n\n t_jit = torch.jit.script(t)\n self._run_helper(t_jit, t, x, y)\n\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_skip_parser(self):\n x = torch.randn(4, 12, device=\"cuda\")\n\n with nvfuser_singleton_fusion(True):\n def fn(x):\n t1 = x + 1.0\n return t1.relu()\n\n fn_jit = torch.jit.script(fn)\n self._run_helper(fn_jit, fn, x)\n\n # add node should have been merged into fusion\n self.assertGraphContains(fn_jit.graph_for(x), FUSION_GUARD)\n self.assertGraphContainsExactly(fn_jit.graph_for(x), 'aten::add', 0)\n\n # flips skip parse for `aten::add`, following fusion should skip the\n # add node\n self.assertFalse(torch._C._jit_set_nvfuser_skip_node_kind(\"aten::add\", True))\n\n def fn_1(x):\n t1 = x + 2.0 # change const value so we'll not reuse plan\n return t1.relu()\n\n fn_1_jit = torch.jit.script(fn_1)\n self._run_helper(fn_1_jit, fn_1, x)\n\n # add node should have been merged into fusion\n self.assertGraphContains(fn_1_jit.graph_for(x), FUSION_GUARD)\n self.assertGraphContainsExactly(fn_1_jit.graph_for(x), 'aten::add', 1)\n\n # flips skip parse for `aten::add`, next fusion should fuse add node\n self.assertTrue(torch._C._jit_set_nvfuser_skip_node_kind(\"aten::add\", True))\n\n def fn_2(x):\n t1 = x + 2.0 # change const value so we'll not reuse plan\n return t1.relu()\n\n fn_2_jit = torch.jit.script(fn_2)\n self._run_helper(fn_2_jit, fn_2, x)\n\n # add node should have been merged into fusion\n self.assertGraphContains(fn_2_jit.graph_for(x), FUSION_GUARD)\n self.assertGraphContainsExactly(fn_2_jit.graph_for(x), 'aten::add', 0)\n\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_cuda_fusion_guard(self):\n old_guard = torch._C._jit_set_nvfuser_guard_mode(True)\n\n class ConvModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n def forward(self, x):\n return x.sin().sigmoid()\n\n mod = ConvModule().to(device=\"cuda\")\n\n inputs = [torch.randn(20, 16, 50, 100, device=\"cuda\", requires_grad=True)]\n\n def reduce_scalar(temp):\n return temp.sum()\n\n scripted = torch.jit.script(mod)\n with torch.no_grad():\n scripted(*inputs)\n res = scripted(*inputs)\n reduce_scalar(res).backward()\n torch._C._jit_set_nvfuser_guard_mode(old_guard)\n\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_nvfuser_comparison_callbacks_with_fallback(self):\n try:\n fused_result = None\n unfused_result = None\n graph_ir = None\n\n def callback(fused_outputs, unfused_outputs, graph_str):\n nonlocal unfused_result\n nonlocal fused_result\n nonlocal graph_ir\n unfused_result = unfused_outputs[-1]\n fused_result = fused_outputs[-1]\n graph_ir = graph_str\n torch._C._jit_nvfuser_set_comparison_callback(True, callback)\n\n def fn(x, y):\n z = torch.add(x, y)\n return torch.relu(z)\n\n x = torch.rand((4, 4)).cuda() - 0.5\n y = torch.rand((4, 4)).cuda() - 0.5\n\n fn_s = torch.jit.script(fn)\n fn_s(x, y)\n fn_s(x, y)\n fn_s(x, y)\n\n expected = fn(x, y)\n\n self.assertEqual(expected, fused_result)\n self.assertEqual(expected, unfused_result)\n FileCheck().check(\"aten::add\").run(graph_ir)\n finally:\n torch._C._jit_nvfuser_clear_comparison_callback()\n\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_nvfuser_comparison_callbacks_without_fallback(self):\n try:\n fused_result = None\n unfused_result = None\n graph_ir = None\n\n def callback(fused_outputs, unfused_outputs, graph_str):\n nonlocal unfused_result\n nonlocal fused_result\n nonlocal graph_ir\n if len(unfused_outputs) > 0:\n unfused_result = unfused_outputs[-1]\n fused_result = fused_outputs[-1]\n graph_ir = graph_str\n torch._C._jit_nvfuser_set_comparison_callback(False, callback)\n\n def fn(x, y):\n z = torch.add(x, y)\n return torch.relu(z)\n\n x = torch.rand((4, 4)).cuda() - 0.5\n y = torch.rand((4, 4)).cuda() - 0.5\n\n fn_s = torch.jit.script(fn)\n fn_s(x, y)\n fn_s(x, y)\n fn_s(x, y)\n\n expected = fn(x, y)\n\n self.assertEqual(expected, fused_result)\n self.assertEqual(None, unfused_result)\n FileCheck().check(\"aten::add\").run(graph_ir)\n finally:\n torch._C._jit_nvfuser_clear_comparison_callback()\n\n @unittest.skipIf(not RUN_NVFUSER, \"requires NVFuser\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_cuda_fusion_guard_backward(self):\n old_guard = torch._C._jit_set_nvfuser_guard_mode(True)\n\n inp = torch.randn(10, device=\"cuda\", requires_grad=True)\n grad = torch.randn(10, device=\"cuda\")\n\n def f(x):\n a = x.cos().cos()\n return a\n scripted = torch.jit.script(f)\n\n with profile(activities=[ProfilerActivity.CPU]) as prof:\n for _ in range(5):\n inp.grad = None\n out = scripted(inp)\n out.backward(grad)\n\n # check that we do not have fallback triggered\n self.assertEqual(prof.events().table().find(\"fallback\"), -1)\n torch._C._jit_set_nvfuser_guard_mode(old_guard)\n\n # TODO: generalize this\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n @unittest.skipIf(is_pre_volta(), \"reduction not supported in pre volta device\")\n def test_inf_quick_patch(self):\n inputs = [torch.tensor([-float('inf'), float('inf'), 4.0], device=\"cuda\"),\n torch.tensor([1.0, float('inf'), 4.0], device=\"cuda\"),\n torch.tensor([-float('inf'), -1.5, 4.0], device=\"cuda\"),\n torch.tensor([1.0, -3.0, float('nan')], device=\"cuda\"),\n torch.tensor([-float('inf'), -float('inf'), -float('inf')], device=\"cuda\"),\n torch.tensor([float('inf'), float('inf'), float('inf')], device=\"cuda\"),\n torch.tensor([float('nan'), float('nan'), float('nan')], device=\"cuda\")]\n\n def fn_amax(x):\n return x.amax(dim=0)\n\n def fn_amin(x):\n return x.amin(dim=0)\n\n def fn_add_nan(x):\n return x.relu() + float('nan')\n\n def fn_add(x):\n return x + 1.0\n\n with nvfuser_singleton_fusion(True):\n for t in [fn_amax, fn_amin, fn_add, fn_add_nan]:\n for x in inputs:\n t_jit = torch.jit.script(t)\n self._run_helper(t_jit, t, x)\n\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_clamp(self):\n x = torch.tensor([1., float('inf'), 2., float('nan'), float('-inf')], device=\"cuda\")\n\n def clamp_max(x):\n return x.clamp(max=1.5)\n\n def clamp_min_max(x):\n return x.clamp(min=1.5)\n\n def clamp_min(x):\n return x.clamp(min=1., max=3.)\n\n with nvfuser_singleton_fusion(True):\n for t in [clamp_max, clamp_min, clamp_min_max]:\n t_jit = torch.jit.script(t)\n self._run_helper(t_jit, t, x)\n\n\n\nclass TestPassManagerCudaFuser(JitTestCase):\n def setUp(self):\n super().setUp()\n if RUN_NVFUSER:\n self.is_enabled = torch._C._jit_set_nvfuser_enabled(False)\n\n def tearDown(self):\n if RUN_NVFUSER:\n torch._C._jit_set_nvfuser_enabled(self.is_enabled)\n super().tearDown()\n\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n def test_context_manager_test(self):\n x = torch.randn(4, 8, dtype=torch.float, device=\"cuda\")\n y = torch.randn(4, 8, dtype=torch.float, device=\"cuda\")\n with torch.jit.fuser('fuser2'):\n with torch.jit.fuser('fuser2'):\n\n def t1(x, y):\n o = x + y\n o = o + 2.0\n return o\n t_jit = torch.jit.script(t1)\n t_jit(x, y)\n t_jit(x, y)\n self.assertGraphContains(t_jit.graph_for(x, y), FUSION_GUARD)\n\n def t2(x, y):\n o = x + y\n o = o + 3.0\n return o\n t_jit_2 = torch.jit.script(t2)\n t_jit_2(x, y)\n t_jit_2(x, y)\n self.assertGraphContains(t_jit_2.graph_for(x, y), FUSION_GUARD)\n\n def t3(x, y):\n o = x + y\n o = o + 4.0\n return o\n t_jit_3 = torch.jit.script(t3)\n t_jit_3(x, y)\n t_jit_3(x, y)\n self.assertGraphContainsExactly(t_jit_3.graph_for(x, y), FUSION_GUARD, 0)\n\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n def test_register_fuser(self):\n self.assertFalse(torch._C._jit_set_nvfuser_enabled(True))\n self.assertTrue(torch._C._jit_nvfuser_enabled())\n self.assertTrue(torch._C._jit_set_nvfuser_enabled(True))\n self.assertTrue(torch._C._jit_nvfuser_enabled())\n self.assertTrue(torch._C._jit_set_nvfuser_enabled(False))\n self.assertFalse(torch._C._jit_nvfuser_enabled())\n\n @unittest.skipIf(RUN_CUDA, \"Testing on CPU only\")\n def test_register_fuser_cpu(self):\n with self.assertRaises(RuntimeError):\n torch._C._jit_set_nvfuser_enabled(True)\n torch._C._jit_set_nvfuser_enabled(False)\n\n @unittest.skipIf(not RUN_CUDA, \"requires CUDA\")\n @unittest.skipIf(not TEST_WITH_ROCM, \"ROCM test only\")\n def test_register_fuser_rocm(self):\n with self.assertRaises(RuntimeError):\n torch._C._jit_set_nvfuser_enabled(True)\n torch._C._jit_set_nvfuser_enabled(False)\n\n# See TestNNCOpInfoParent\nclass TestCudaFuserOpInfoParent(JitCommonTestCase):\n pass\n\nclass TestCudaFuserOpInfo(TestCudaFuserOpInfoParent):\n def setUp(self):\n super(TestCudaFuserOpInfoParent, self).setUp()\n if RUN_NVFUSER:\n self.cuda_fuser_options = CudaFuserTestOptions()\n # enables guard mode since tracing could change graph to violate guard.\n torch._C._jit_set_nvfuser_guard_mode(True)\n self.nvfuser_single_node_mode = torch._C._jit_set_nvfuser_single_node_mode(True)\n\n def tearDown(self):\n if RUN_NVFUSER:\n self.cuda_fuser_options.restore()\n\n torch._C._jit_set_nvfuser_single_node_mode(self.nvfuser_single_node_mode)\n\n super(TestCudaFuserOpInfoParent, self).tearDown()\n\n @slowTest\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @ops(op_db, dtypes=OpDTypes.supported)\n def test_nvfuser_correctness(self, device, dtype, op):\n variant_sample_pairs = get_traced_sample_variant_pairs(device, dtype, op)\n\n for variant, sample in variant_sample_pairs:\n trace = create_traced_fn(self, variant, cache_traced_fn=True)\n ref = variant(*clone_inputs((sample.input, *sample.args)), **sample.kwargs)\n\n trace(*clone_inputs((sample.input, *sample.args)), **sample.kwargs)\n\n val = trace(*clone_inputs((sample.input, *sample.args)), **sample.kwargs)\n\n self.assertEqual(ref, val, exact_layout=True)\n\n # Note: Clearing CU after NVFuser tests\n # https://github.com/pytorch/pytorch/issues/35600\n # each torch.jit.trace adds state to the _python_cu compilation unit\n # since this test traces a lot of functions, out-of-memory can occur\n # if the CU is not cleared.\n torch.jit._state._python_cu.drop_all_functions()\n\n @slowTest\n @unittest.skipIf(not RUN_NVFUSER, \"requires CUDA\")\n @unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,\n \"Requires fusion optimization pass to be effective\")\n @ops(op_db, allowed_dtypes=(torch.float16, torch.bfloat16, torch.float32,\n torch.float64, torch.complex64, torch.complex128))\n def test_nvfuser_extremal_values(self, device, dtype, op):\n variant_sample_pairs = get_traced_sample_variant_pairs(device, dtype, op)\n\n def _get_extremal_tensor(x, val, dtype):\n if x.dtype != dtype:\n return x\n return torch.full_like(x, val)\n\n def _get_extremal_input(x, val, dtype):\n if isinstance(x, torch.Tensor):\n return _get_extremal_tensor(x, val, dtype)\n elif is_iterable_of_tensors(x):\n return [_get_extremal_tensor(y, val, dtype) for y in x]\n return x\n\n def _get_extremal_sample(sample: SampleInput, val, dtype):\n extremal_sample = SampleInput(\n input=_get_extremal_input(sample.input, val, dtype),\n args=[_get_extremal_input(x, val, dtype) for x in sample.args],\n kwargs={k: _get_extremal_input(v, val, dtype) for k, v in sample.kwargs.items()},\n )\n return extremal_sample\n\n def _get_extremal_samples(sample: SampleInput, dtype):\n vals = [float('inf'), float('-inf'), float('nan')]\n if dtype.is_complex:\n complex_vals = itertools.product(vals, vals)\n vals = list(map(lambda x: complex(*x), complex_vals))\n for val in vals:\n yield _get_extremal_sample(sample, val, dtype)\n\n variant_sample_pairs = get_traced_sample_variant_pairs(device, dtype, op)\n\n for variant, sample in variant_sample_pairs:\n\n trace = create_traced_fn(self, variant, cache_traced_fn=True)\n trace(*clone_inputs((sample.input, *sample.args)), **sample.kwargs)\n trace(*clone_inputs((sample.input, *sample.args)), **sample.kwargs)\n\n for extremal_sample in _get_extremal_samples(sample, dtype):\n try:\n with freeze_rng_state():\n ref = variant(*clone_inputs((extremal_sample.input, *extremal_sample.args)),\n **extremal_sample.kwargs)\n except (torch._C._LinAlgError, RuntimeError, ValueError):\n # if eager errors out, then don't expect NVFuser to pass\n continue\n\n with freeze_rng_state():\n val = trace(*clone_inputs((extremal_sample.input, *extremal_sample.args)),\n **extremal_sample.kwargs)\n\n self.assertEqual(val, ref, equal_nan=True, exact_device=True)\n\n # See [Note: Clearing CU after NVFuser tests]\n torch.jit._state._python_cu.drop_all_functions()\n\ninstantiate_device_type_tests(TestCudaFuserOpInfo, globals(), only_for=(\"cuda\"))\n\n\nif __name__ == '__main__':\n run_tests()\n" ]
[ [ "torch.cuda.manual_seed_all", "torch.cuda.manual_seed", "torch.testing._internal.common_utils.run_tests", "torch.rand", "torch.nn.functional.softmax", "torch.ops.aten.add_", "torch.softmax", "torch.jit.trace", "torch.native_layer_norm", "torch.jit._state._python_cu.drop_all_functions", "torch.testing._internal.jit_utils.clone_inputs", "torch.randn", "torch.layer_norm", "torch.nn.functional.batch_norm", "torch.nn.functional.gelu", "torch.unsqueeze", "torch._C._jit_set_nvfuser_skip_node_kind", "torch.lerp", "torch.profiler.profile", "torch._C._nn.linear", "torch.reshape", "torch.testing.FileCheck", "torch.jit.fuser", "torch.cuda.device", "torch.allclose", "torch.ops.aten.mul", "torch.native_dropout", "torch.nn.functional.conv2d", "torch._C._jit_override_can_fuse_on_cpu", "torch.nn.functional.relu", "torch.zeros", "torch.empty", "torch.jit.script", "torch.testing._internal.jit_utils.get_traced_sample_variant_pairs", "torch.no_grad", "torch._C._jit_set_nvfuser_guard_mode", "torch._C._jit_nvfuser_set_comparison_callback", "torch.testing._internal.common_device_type.ops", "torch._C._jit_set_nvfuser_enabled", "torch.nn.functional.softplus", "torch._C._jit_override_can_fuse_on_gpu", "torch._C._jit_nvfuser_clear_comparison_callback", "torch.nn.functional.dropout", "torch.backends.cudnn.flags", "torch.testing._internal.common_utils.is_iterable_of_tensors", "torch.threshold", "torch.conv2d", "torch.version.cuda.split", "torch._C._jit_set_autocast_mode", "torch.cuda.current_device", "torch._softmax", "torch.Size", "torch.dropout", "torch.relu", "torch.mul", "torch.cuda.amp.autocast", "numpy.random.randint", "torch.cuda.Stream", "torch.squeeze", "torch.clamp", "torch.rand_like", "torch.chunk", "torch.autograd.gradcheck.gradcheck", "torch.nn.BatchNorm2d", "torch.add", "torch._C._jit_set_bailout_depth", "torch._C._jit_set_nvfuser_single_node_mode", "torch._C._jit_set_nvfuser_horizontal_mode", "torch.testing._internal.jit_metaprogramming_utils.create_traced_fn", "torch.cuda.is_bf16_supported", "torch.randn_like", "torch.cuda.CUDAGraph", "torch.where", "torch.full_like", "torch._C._jit_can_fuse_on_gpu", "torch.matmul", "torch.testing._internal.common_utils.freeze_rng_state", "torch.randint", "torch.addcmul", "torch.jit.strict_fusion", "torch.mm", "torch._C._jit_set_profiling_executor", "torch.jit.last_executed_optimized_graph", "torch.cuda.current_stream", "torch._C._jit_nvfuser_enabled", "torch.ops.aten._unsafe_view", "torch.testing._internal.codegen.random_topo_test.runDefaultTestWithSeed", "torch._C._debug_set_autodiff_subgraph_inlining", "torch.ones", "torch.ops.aten.gelu", "torch.lt", "torch.rsub", "torch.tensor", "torch._C._jit_can_fuse_on_cpu", "torch.sum", "torch.nn.functional.log_softmax", "torch.ops.aten.view", "torch.nn.functional.linear", "torch.ops.aten.native_batch_norm_backward", "torch.nn.functional.instance_norm", "numpy.random.random", "torch.abs", "torch._C._jit_set_texpr_fuser_enabled", "torch.cuda.stream", "torch._C._jit_set_profiling_mode" ] ]
vincent841/cameracalib
[ "94356af0bc14c61551710acbc287fba010b87e76" ]
[ "ArucoTrackerRS.py" ]
[ "import pyrealsense2 as rs\nimport numpy as np\nimport cv2\nimport cv2.aruco as aruco\nimport glob\n\ncalibFile = cv2.FileStorage(\"calibData.xml\", cv2.FILE_STORAGE_READ)\ncmnode = calibFile.getNode(\"cameraMatrix\")\nmtx = cmnode.mat()\ndcnode = calibFile.getNode(\"distCoeff\")\ndist = dcnode.mat()\n\ncriteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)\n\n#Configure depth and color streams\npipeline = rs.pipeline()\nconfig = rs.config()\nconfig.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)\nconfig.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)\n\n# Start streaming\npipeline.start(config)\n\nwhile (True):\n # Wait for a coherent pair of frames: depth and color\n frames = pipeline.wait_for_frames()\n color_frame = frames.get_color_frame()\n if not color_frame:\n continue\n\n # Convert images to numpy arrays\n color_image = np.asanyarray(color_frame.get_data())\n\n # operations on the frame\n gray = cv2.cvtColor(color_image, cv2.COLOR_BGR2GRAY)\n\n # set dictionary size depending on the aruco marker selected\n aruco_dict = aruco.Dictionary_get(aruco.DICT_5X5_1000)\n\n # detector parameters can be set here (List of detection parameters[3])\n parameters = aruco.DetectorParameters_create()\n parameters.adaptiveThreshConstant = 10\n\n # lists of ids and the corners belonging to each id\n corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, aruco_dict, parameters=parameters)\n print(ids)\n\n # font for displaying text (below)\n font = cv2.FONT_HERSHEY_SIMPLEX\n\n # check if the ids list is not empty\n # if no check is added the code will crash\n if np.all(ids != None):\n\n # estimate pose of each marker and return the values\n # rvet and tvec-different from camera coefficients\n rvec, tvec ,_ = aruco.estimatePoseSingleMarkers(corners, 0.05, mtx, dist)\n #(rvec-tvec).any() # get rid of that nasty numpy value array error\n\n for i in range(0, ids.size):\n # draw axis for the aruco markers\n aruco.drawAxis(color_image, mtx, dist, rvec[i], tvec[i], 0.1)\n\n # draw a square around the markers\n aruco.drawDetectedMarkers(color_image, corners)\n\n # code to show ids of the marker found\n strg = ''\n for i in range(0, ids.size):\n strg += str(ids[i][0])+', '\n\n cv2.putText(color_image, \"Id: \" + strg, (0,64), font, 1, (0,255,0),2,cv2.LINE_AA)\n\n\n else:\n # code to show 'No Ids' when no markers are found\n cv2.putText(color_image, \"No Ids\", (0,64), font, 1, (0,255,0),2,cv2.LINE_AA)\n\n # display the resulting frame\n cv2.imshow('Image Show',color_image)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n# When everything done, release the capture\n# Stop streaming\npipeline.stop()\ncv2.destroyAllWindows() \n\n" ]
[ [ "numpy.all" ] ]
DraganaMana/mne-python
[ "83d48ec9e93bc176ae7fb8d000521ba3bd6b4c3c" ]
[ "mne/source_estimate.py" ]
[ "# Authors: Alexandre Gramfort <[email protected]>\n# Matti Hamalainen <[email protected]>\n# Martin Luessi <[email protected]>\n# Mads Jensen <[email protected]>\n#\n# License: BSD (3-clause)\n\nimport copy\nimport os.path as op\nimport numpy as np\nfrom scipy import linalg, sparse\nfrom scipy.sparse import coo_matrix, block_diag as sparse_block_diag\n\nfrom .filter import resample\nfrom .fixes import einsum\nfrom .evoked import _get_peak\nfrom .surface import read_surface, _get_ico_surface, mesh_edges\nfrom .source_space import (_ensure_src, _get_morph_src_reordering,\n _ensure_src_subject, SourceSpaces)\nfrom .utils import (get_subjects_dir, _check_subject, logger, verbose,\n _time_mask, warn as warn_, copy_function_doc_to_method_doc)\nfrom .viz import (plot_source_estimates, plot_vector_source_estimates,\n plot_volume_source_estimates)\nfrom .io.base import ToDataFrameMixin, TimeMixin\nfrom .externals.h5io import read_hdf5, write_hdf5\n\n\ndef _read_stc(filename):\n \"\"\"Aux Function.\"\"\"\n with open(filename, 'rb') as fid:\n buf = fid.read()\n\n stc = dict()\n offset = 0\n num_bytes = 4\n\n # read tmin in ms\n stc['tmin'] = float(np.frombuffer(buf, dtype=\">f4\", count=1,\n offset=offset))\n stc['tmin'] /= 1000.0\n offset += num_bytes\n\n # read sampling rate in ms\n stc['tstep'] = float(np.frombuffer(buf, dtype=\">f4\", count=1,\n offset=offset))\n stc['tstep'] /= 1000.0\n offset += num_bytes\n\n # read number of vertices/sources\n vertices_n = int(np.frombuffer(buf, dtype=\">u4\", count=1, offset=offset))\n offset += num_bytes\n\n # read the source vector\n stc['vertices'] = np.frombuffer(buf, dtype=\">u4\", count=vertices_n,\n offset=offset)\n offset += num_bytes * vertices_n\n\n # read the number of timepts\n data_n = int(np.frombuffer(buf, dtype=\">u4\", count=1, offset=offset))\n offset += num_bytes\n\n if (vertices_n and # vertices_n can be 0 (empty stc)\n ((len(buf) // 4 - 4 - vertices_n) % (data_n * vertices_n)) != 0):\n raise ValueError('incorrect stc file size')\n\n # read the data matrix\n stc['data'] = np.frombuffer(buf, dtype=\">f4\", count=vertices_n * data_n,\n offset=offset)\n stc['data'] = stc['data'].reshape([data_n, vertices_n]).T\n\n return stc\n\n\ndef _write_stc(filename, tmin, tstep, vertices, data):\n \"\"\"Write an STC file.\n\n Parameters\n ----------\n filename : string\n The name of the STC file.\n tmin : float\n The first time point of the data in seconds.\n tstep : float\n Time between frames in seconds.\n vertices : array of integers\n Vertex indices (0 based).\n data : 2D array\n The data matrix (nvert * ntime).\n \"\"\"\n fid = open(filename, 'wb')\n\n # write start time in ms\n fid.write(np.array(1000 * tmin, dtype='>f4').tostring())\n # write sampling rate in ms\n fid.write(np.array(1000 * tstep, dtype='>f4').tostring())\n # write number of vertices\n fid.write(np.array(vertices.shape[0], dtype='>u4').tostring())\n # write the vertex indices\n fid.write(np.array(vertices, dtype='>u4').tostring())\n\n # write the number of timepts\n fid.write(np.array(data.shape[1], dtype='>u4').tostring())\n #\n # write the data\n #\n fid.write(np.array(data.T, dtype='>f4').tostring())\n\n # close the file\n fid.close()\n\n\ndef _read_3(fid):\n \"\"\"Read 3 byte integer from file.\"\"\"\n data = np.fromfile(fid, dtype=np.uint8, count=3).astype(np.int32)\n\n out = np.left_shift(data[0], 16) + np.left_shift(data[1], 8) + data[2]\n\n return out\n\n\ndef _read_w(filename):\n \"\"\"Read a w file.\n\n w files contain activations or source reconstructions for a single time\n point.\n\n Parameters\n ----------\n filename : string\n The name of the w file.\n\n Returns\n -------\n data: dict\n The w structure. It has the following keys:\n vertices vertex indices (0 based)\n data The data matrix (nvert long)\n \"\"\"\n with open(filename, 'rb', buffering=0) as fid: # buffering=0 for np bug\n # skip first 2 bytes\n fid.read(2)\n\n # read number of vertices/sources (3 byte integer)\n vertices_n = int(_read_3(fid))\n\n vertices = np.zeros((vertices_n), dtype=np.int32)\n data = np.zeros((vertices_n), dtype=np.float32)\n\n # read the vertices and data\n for i in range(vertices_n):\n vertices[i] = _read_3(fid)\n data[i] = np.fromfile(fid, dtype='>f4', count=1)[0]\n\n w = dict()\n w['vertices'] = vertices\n w['data'] = data\n\n return w\n\n\ndef _write_3(fid, val):\n \"\"\"Write 3 byte integer to file.\"\"\"\n f_bytes = np.zeros((3), dtype=np.uint8)\n f_bytes[0] = (val >> 16) & 255\n f_bytes[1] = (val >> 8) & 255\n f_bytes[2] = val & 255\n fid.write(f_bytes.tostring())\n\n\ndef _write_w(filename, vertices, data):\n \"\"\"Write a w file.\n\n w files contain activations or source reconstructions for a single time\n point.\n\n Parameters\n ----------\n filename: string\n The name of the w file.\n vertices: array of int\n Vertex indices (0 based).\n data: 1D array\n The data array (nvert).\n \"\"\"\n assert (len(vertices) == len(data))\n\n fid = open(filename, 'wb')\n\n # write 2 zero bytes\n fid.write(np.zeros((2), dtype=np.uint8).tostring())\n\n # write number of vertices/sources (3 byte integer)\n vertices_n = len(vertices)\n _write_3(fid, vertices_n)\n\n # write the vertices and data\n for i in range(vertices_n):\n _write_3(fid, vertices[i])\n # XXX: without float() endianness is wrong, not sure why\n fid.write(np.array(float(data[i]), dtype='>f4').tostring())\n\n # close the file\n fid.close()\n\n\ndef read_source_estimate(fname, subject=None):\n \"\"\"Read a source estimate object.\n\n Parameters\n ----------\n fname : str\n Path to (a) source-estimate file(s).\n subject : str | None\n Name of the subject the source estimate(s) is (are) from.\n It is good practice to set this attribute to avoid combining\n incompatible labels and SourceEstimates (e.g., ones from other\n subjects). Note that due to file specification limitations, the\n subject name isn't saved to or loaded from files written to disk.\n\n Returns\n -------\n stc : SourceEstimate | VectorSourceEstimate | VolSourceEstimate | MixedSourceEstimate\n The source estimate object loaded from file.\n\n Notes\n -----\n - for volume source estimates, ``fname`` should provide the path to a\n single file named '*-vl.stc` or '*-vol.stc'\n - for surface source estimates, ``fname`` should either provide the\n path to the file corresponding to a single hemisphere ('*-lh.stc',\n '*-rh.stc') or only specify the asterisk part in these patterns. In any\n case, the function expects files for both hemisphere with names\n following this pattern.\n - for vector surface source estimates, only HDF5 files are supported.\n - for mixed source estimates, only HDF5 files are supported.\n - for single time point .w files, ``fname`` should follow the same\n pattern as for surface estimates, except that files are named\n '*-lh.w' and '*-rh.w'.\n \"\"\" # noqa: E501\n fname_arg = fname\n\n # make sure corresponding file(s) can be found\n ftype = None\n if op.exists(fname):\n if fname.endswith('-vl.stc') or fname.endswith('-vol.stc') or \\\n fname.endswith('-vl.w') or fname.endswith('-vol.w'):\n ftype = 'volume'\n elif fname.endswith('.stc'):\n ftype = 'surface'\n if fname.endswith(('-lh.stc', '-rh.stc')):\n fname = fname[:-7]\n else:\n err = (\"Invalid .stc filename: %r; needs to end with \"\n \"hemisphere tag ('...-lh.stc' or '...-rh.stc')\"\n % fname)\n raise IOError(err)\n elif fname.endswith('.w'):\n ftype = 'w'\n if fname.endswith(('-lh.w', '-rh.w')):\n fname = fname[:-5]\n else:\n err = (\"Invalid .w filename: %r; needs to end with \"\n \"hemisphere tag ('...-lh.w' or '...-rh.w')\"\n % fname)\n raise IOError(err)\n elif fname.endswith('.h5'):\n ftype = 'h5'\n fname = fname[:-3]\n else:\n raise RuntimeError('Unknown extension for file %s' % fname_arg)\n\n if ftype is not 'volume':\n stc_exist = [op.exists(f)\n for f in [fname + '-rh.stc', fname + '-lh.stc']]\n w_exist = [op.exists(f)\n for f in [fname + '-rh.w', fname + '-lh.w']]\n if all(stc_exist) and (ftype is not 'w'):\n ftype = 'surface'\n elif all(w_exist):\n ftype = 'w'\n elif op.exists(fname + '.h5'):\n ftype = 'h5'\n elif op.exists(fname + '-stc.h5'):\n ftype = 'h5'\n fname += '-stc'\n elif any(stc_exist) or any(w_exist):\n raise IOError(\"Hemisphere missing for %r\" % fname_arg)\n else:\n raise IOError(\"SourceEstimate File(s) not found for: %r\"\n % fname_arg)\n\n # read the files\n if ftype == 'volume': # volume source space\n if fname.endswith('.stc'):\n kwargs = _read_stc(fname)\n elif fname.endswith('.w'):\n kwargs = _read_w(fname)\n kwargs['data'] = kwargs['data'][:, np.newaxis]\n kwargs['tmin'] = 0.0\n kwargs['tstep'] = 0.0\n else:\n raise IOError('Volume source estimate must end with .stc or .w')\n elif ftype == 'surface': # stc file with surface source spaces\n lh = _read_stc(fname + '-lh.stc')\n rh = _read_stc(fname + '-rh.stc')\n assert lh['tmin'] == rh['tmin']\n assert lh['tstep'] == rh['tstep']\n kwargs = lh.copy()\n kwargs['data'] = np.r_[lh['data'], rh['data']]\n kwargs['vertices'] = [lh['vertices'], rh['vertices']]\n elif ftype == 'w': # w file with surface source spaces\n lh = _read_w(fname + '-lh.w')\n rh = _read_w(fname + '-rh.w')\n kwargs = lh.copy()\n kwargs['data'] = np.atleast_2d(np.r_[lh['data'], rh['data']]).T\n kwargs['vertices'] = [lh['vertices'], rh['vertices']]\n # w files only have a single time point\n kwargs['tmin'] = 0.0\n kwargs['tstep'] = 1.0\n elif ftype == 'h5':\n kwargs = read_hdf5(fname + '.h5', title='mnepython')\n if \"src_type\" in kwargs:\n ftype = kwargs['src_type']\n del kwargs['src_type']\n\n if ftype != 'volume':\n # Make sure the vertices are ordered\n vertices = kwargs['vertices']\n if any(np.any(np.diff(v.astype(int)) <= 0) for v in vertices):\n sidx = [np.argsort(verts) for verts in vertices]\n vertices = [verts[idx] for verts, idx in zip(vertices, sidx)]\n data = kwargs['data'][np.r_[sidx[0], len(sidx[0]) + sidx[1]]]\n kwargs['vertices'] = vertices\n kwargs['data'] = data\n\n if 'subject' not in kwargs:\n kwargs['subject'] = subject\n if subject is not None and subject != kwargs['subject']:\n raise RuntimeError('provided subject name \"%s\" does not match '\n 'subject name from the file \"%s'\n % (subject, kwargs['subject']))\n\n if ftype in ('volume', 'discrete'):\n stc = VolSourceEstimate(**kwargs)\n elif ftype == 'mixed':\n stc = MixedSourceEstimate(**kwargs)\n elif ftype == 'h5' and kwargs['data'].ndim == 3:\n stc = VectorSourceEstimate(**kwargs)\n else:\n stc = SourceEstimate(**kwargs)\n\n return stc\n\n\ndef _get_src_type(src, vertices, warn_text=None):\n src_type = None\n if src is None:\n if warn_text is None:\n warn_(\"src should not be None for a robust guess of stc type.\")\n else:\n warn_(warn_text)\n if isinstance(vertices, list) and len(vertices) == 2:\n src_type = 'surface'\n elif isinstance(vertices, np.ndarray) or isinstance(vertices, list) \\\n and len(vertices) == 1:\n src_type = 'volume'\n elif isinstance(vertices, list) and len(vertices) > 2:\n src_type = 'mixed'\n else:\n src_type = src.kind\n assert src_type in ('surface', 'volume', 'mixed', 'discrete')\n return src_type\n\n\ndef _make_stc(data, vertices, src_type=None, tmin=None, tstep=None,\n subject=None, vector=False, source_nn=None, warn_text=None):\n \"\"\"Generate a surface, vector-surface, volume or mixed source estimate.\"\"\"\n if src_type is None:\n # attempt to guess from vertices\n src_type = _get_src_type(src=None, vertices=vertices,\n warn_text=warn_text)\n\n if src_type == 'surface':\n # make a surface source estimate\n n_vertices = len(vertices[0]) + len(vertices[1])\n if vector:\n if source_nn is None:\n raise RuntimeError('No source vectors supplied.')\n\n # Rotate data to absolute XYZ coordinates\n data_rot = np.zeros((n_vertices, 3, data.shape[1]))\n if data.shape[0] == 3 * n_vertices:\n source_nn = source_nn.reshape(n_vertices, 3, 3)\n data = data.reshape(n_vertices, 3, -1)\n else:\n raise RuntimeError('Shape of data array does not match the '\n 'number of vertices.')\n for i, d, n in zip(range(data.shape[0]), data, source_nn):\n data_rot[i] = np.dot(n.T, d)\n data = data_rot\n stc = VectorSourceEstimate(data, vertices=vertices, tmin=tmin,\n tstep=tstep, subject=subject)\n else:\n stc = SourceEstimate(data, vertices=vertices, tmin=tmin,\n tstep=tstep, subject=subject)\n elif src_type in ('volume', 'discrete'):\n if vector:\n data = data.reshape((-1, 3, data.shape[-1]))\n stc = VolSourceEstimate(data, vertices=vertices, tmin=tmin,\n tstep=tstep, subject=subject)\n elif src_type == 'mixed':\n # make a mixed source estimate\n stc = MixedSourceEstimate(data, vertices=vertices, tmin=tmin,\n tstep=tstep, subject=subject)\n else:\n raise ValueError('vertices has to be either a list with one or more '\n 'arrays or an array')\n return stc\n\n\ndef _verify_source_estimate_compat(a, b):\n \"\"\"Make sure two SourceEstimates are compatible for arith. operations.\"\"\"\n compat = False\n if type(a) != type(b):\n raise ValueError('Cannot combine %s and %s.' % (type(a), type(b)))\n if len(a.vertices) == len(b.vertices):\n if all(np.array_equal(av, vv)\n for av, vv in zip(a.vertices, b.vertices)):\n compat = True\n if not compat:\n raise ValueError('Cannot combine source estimates that do not have '\n 'the same vertices. Consider using stc.expand().')\n if a.subject != b.subject:\n raise ValueError('source estimates do not have the same subject '\n 'names, %r and %r' % (a.subject, b.subject))\n\n\nclass _BaseSourceEstimate(ToDataFrameMixin, TimeMixin):\n \"\"\"Abstract base class for source estimates.\n\n Parameters\n ----------\n data : array of shape (n_dipoles, n_times) | 2-tuple (kernel, sens_data)\n The data in source space. The data can either be a single array or\n a tuple with two arrays: \"kernel\" shape (n_vertices, n_sensors) and\n \"sens_data\" shape (n_sensors, n_times). In this case, the source\n space data corresponds to \"numpy.dot(kernel, sens_data)\".\n vertices : array | list of two arrays\n Vertex numbers corresponding to the data.\n tmin : float\n Time point of the first sample in data.\n tstep : float\n Time step between successive samples in data.\n subject : str | None\n The subject name. While not necessary, it is safer to set the\n subject parameter to avoid analysis errors.\n verbose : bool, str, int, or None\n If not None, override default verbose level (see :func:`mne.verbose`\n and :ref:`Logging documentation <tut_logging>` for more).\n\n Attributes\n ----------\n subject : str | None\n The subject name.\n times : array of shape (n_times,)\n The time vector.\n vertices : array or list of arrays of shape (n_dipoles,)\n The indices of the dipoles in the different source spaces. Can\n be an array if there is only one source space (e.g., for volumes).\n data : array of shape (n_dipoles, n_times)\n The data in source space.\n shape : tuple\n The shape of the data. A tuple of int (n_dipoles, n_times).\n \"\"\"\n\n @verbose\n def __init__(self, data, vertices=None, tmin=None, tstep=None,\n subject=None, verbose=None): # noqa: D102\n kernel, sens_data = None, None\n if isinstance(data, tuple):\n if len(data) != 2:\n raise ValueError('If data is a tuple it has to be length 2')\n kernel, sens_data = data\n data = None\n if kernel.shape[1] != sens_data.shape[0]:\n raise ValueError('kernel and sens_data have invalid '\n 'dimensions')\n\n if isinstance(vertices, list):\n vertices = [np.asarray(v, int) for v in vertices]\n if any(np.any(np.diff(v.astype(int)) <= 0) for v in vertices):\n raise ValueError('Vertices must be ordered in increasing '\n 'order.')\n\n n_src = sum([len(v) for v in vertices])\n\n if len(vertices) == 1:\n vertices = vertices[0]\n elif isinstance(vertices, np.ndarray):\n n_src = len(vertices)\n else:\n raise ValueError('Vertices must be a list or numpy array')\n\n # safeguard the user against doing something silly\n if data is not None and data.shape[0] != n_src:\n raise ValueError('Number of vertices (%i) and stc.shape[0] (%i) '\n 'must match' % (n_src, data.shape[0]))\n\n self._data = data\n self._tmin = tmin\n self._tstep = tstep\n self.vertices = vertices\n self.verbose = verbose\n self._kernel = kernel\n self._sens_data = sens_data\n self._kernel_removed = False\n self._times = None\n self._update_times()\n self.subject = _check_subject(None, subject, False)\n\n @property\n def sfreq(self):\n \"\"\"Sample rate of the data.\"\"\"\n return 1. / self.tstep\n\n def _remove_kernel_sens_data_(self):\n \"\"\"Remove kernel and sensor space data and compute self._data.\"\"\"\n if self._kernel is not None or self._sens_data is not None:\n self._kernel_removed = True\n self._data = np.dot(self._kernel, self._sens_data)\n self._kernel = None\n self._sens_data = None\n\n def crop(self, tmin=None, tmax=None):\n \"\"\"Restrict SourceEstimate to a time interval.\n\n Parameters\n ----------\n tmin : float | None\n The first time point in seconds. If None the first present is used.\n tmax : float | None\n The last time point in seconds. If None the last present is used.\n \"\"\"\n mask = _time_mask(self.times, tmin, tmax, sfreq=self.sfreq)\n self.tmin = self.times[np.where(mask)[0][0]]\n if self._kernel is not None and self._sens_data is not None:\n self._sens_data = self._sens_data[..., mask]\n else:\n self.data = self.data[..., mask]\n\n return self # return self for chaining methods\n\n @verbose\n def resample(self, sfreq, npad='auto', window='boxcar', n_jobs=1,\n verbose=None):\n \"\"\"Resample data.\n\n Parameters\n ----------\n sfreq : float\n New sample rate to use.\n npad : int | str\n Amount to pad the start and end of the data.\n Can also be \"auto\" to use a padding that will result in\n a power-of-two size (can be much faster).\n window : string or tuple\n Window to use in resampling. See scipy.signal.resample.\n n_jobs : int\n Number of jobs to run in parallel.\n verbose : bool, str, int, or None\n If not None, override default verbose level (see\n :func:`mne.verbose` and :ref:`Logging documentation <tut_logging>`\n for more). Defaults to self.verbose.\n\n Notes\n -----\n For some data, it may be more accurate to use npad=0 to reduce\n artifacts. This is dataset dependent -- check your data!\n\n Note that the sample rate of the original data is inferred from tstep.\n \"\"\"\n # resampling in sensor instead of source space gives a somewhat\n # different result, so we don't allow it\n self._remove_kernel_sens_data_()\n\n o_sfreq = 1.0 / self.tstep\n self.data = resample(self.data, sfreq, o_sfreq, npad, n_jobs=n_jobs)\n\n # adjust indirectly affected variables\n self.tstep = 1.0 / sfreq\n return self\n\n @property\n def data(self):\n \"\"\"Numpy array of source estimate data.\"\"\"\n if self._data is None:\n # compute the solution the first time the data is accessed and\n # remove the kernel and sensor data\n self._remove_kernel_sens_data_()\n return self._data\n\n @data.setter\n def data(self, value):\n value = np.asarray(value)\n if self._data is not None and value.ndim != self._data.ndim:\n raise ValueError('Data array should have %d dimensions.' %\n self._data.ndim)\n\n # vertices can be a single number, so cast to ndarray\n if isinstance(self.vertices, list):\n n_verts = sum([len(v) for v in self.vertices])\n elif isinstance(self.vertices, np.ndarray):\n n_verts = len(self.vertices)\n else:\n raise ValueError('Vertices must be a list or numpy array')\n\n if value.shape[0] != n_verts:\n raise ValueError('The first dimension of the data array must '\n 'match the number of vertices (%d != %d)' %\n (value.shape[0], n_verts))\n\n self._data = value\n self._update_times()\n\n @property\n def shape(self):\n \"\"\"Shape of the data.\"\"\"\n if self._data is not None:\n return self._data.shape\n return (self._kernel.shape[0], self._sens_data.shape[1])\n\n @property\n def tmin(self):\n \"\"\"The first timestamp.\"\"\"\n return self._tmin\n\n @tmin.setter\n def tmin(self, value):\n self._tmin = float(value)\n self._update_times()\n\n @property\n def tstep(self):\n \"\"\"The change in time between two consecutive samples (1 / sfreq).\"\"\"\n return self._tstep\n\n @tstep.setter\n def tstep(self, value):\n if value <= 0:\n raise ValueError('.tstep must be greater than 0.')\n self._tstep = float(value)\n self._update_times()\n\n @property\n def times(self):\n \"\"\"A timestamp for each sample.\"\"\"\n return self._times\n\n @times.setter\n def times(self, value):\n raise ValueError('You cannot write to the .times attribute directly. '\n 'This property automatically updates whenever '\n '.tmin, .tstep or .data changes.')\n\n def _update_times(self):\n \"\"\"Update the times attribute after changing tmin, tmax, or tstep.\"\"\"\n self._times = self.tmin + (self.tstep * np.arange(self.shape[-1]))\n self._times.flags.writeable = False\n\n def __add__(self, a):\n \"\"\"Add source estimates.\"\"\"\n stc = self.copy()\n stc += a\n return stc\n\n def __iadd__(self, a): # noqa: D105\n self._remove_kernel_sens_data_()\n if isinstance(a, _BaseSourceEstimate):\n _verify_source_estimate_compat(self, a)\n self.data += a.data\n else:\n self.data += a\n return self\n\n def mean(self):\n \"\"\"Make a summary stc file with mean over time points.\n\n Returns\n -------\n stc : SourceEstimate | VectorSourceEstimate\n The modified stc.\n \"\"\"\n out = self.sum()\n out /= len(self.times)\n return out\n\n def sum(self):\n \"\"\"Make a summary stc file with sum over time points.\n\n Returns\n -------\n stc : SourceEstimate | VectorSourceEstimate\n The modified stc.\n \"\"\"\n data = self.data\n tmax = self.tmin + self.tstep * data.shape[-1]\n tmin = (self.tmin + tmax) / 2.\n tstep = tmax - self.tmin\n sum_stc = self.__class__(self.data.sum(axis=-1, keepdims=True),\n vertices=self.vertices, tmin=tmin,\n tstep=tstep, subject=self.subject)\n return sum_stc\n\n def __sub__(self, a):\n \"\"\"Subtract source estimates.\"\"\"\n stc = self.copy()\n stc -= a\n return stc\n\n def __isub__(self, a): # noqa: D105\n self._remove_kernel_sens_data_()\n if isinstance(a, _BaseSourceEstimate):\n _verify_source_estimate_compat(self, a)\n self.data -= a.data\n else:\n self.data -= a\n return self\n\n def __truediv__(self, a): # noqa: D105\n return self.__div__(a)\n\n def __div__(self, a): # noqa: D105\n \"\"\"Divide source estimates.\"\"\"\n stc = self.copy()\n stc /= a\n return stc\n\n def __itruediv__(self, a): # noqa: D105\n return self.__idiv__(a)\n\n def __idiv__(self, a): # noqa: D105\n self._remove_kernel_sens_data_()\n if isinstance(a, _BaseSourceEstimate):\n _verify_source_estimate_compat(self, a)\n self.data /= a.data\n else:\n self.data /= a\n return self\n\n def __mul__(self, a):\n \"\"\"Multiply source estimates.\"\"\"\n stc = self.copy()\n stc *= a\n return stc\n\n def __imul__(self, a): # noqa: D105\n self._remove_kernel_sens_data_()\n if isinstance(a, _BaseSourceEstimate):\n _verify_source_estimate_compat(self, a)\n self.data *= a.data\n else:\n self.data *= a\n return self\n\n def __pow__(self, a): # noqa: D105\n stc = self.copy()\n stc **= a\n return stc\n\n def __ipow__(self, a): # noqa: D105\n self._remove_kernel_sens_data_()\n self.data **= a\n return self\n\n def __radd__(self, a): # noqa: D105\n return self + a\n\n def __rsub__(self, a): # noqa: D105\n return self - a\n\n def __rmul__(self, a): # noqa: D105\n return self * a\n\n def __rdiv__(self, a): # noqa: D105\n return self / a\n\n def __neg__(self): # noqa: D105\n \"\"\"Negate the source estimate.\"\"\"\n stc = self.copy()\n stc._remove_kernel_sens_data_()\n stc.data *= -1\n return stc\n\n def __pos__(self): # noqa: D105\n return self\n\n def __abs__(self):\n \"\"\"Compute the absolute value of the data.\n\n Returns\n -------\n stc : instance of _BaseSourceEstimate\n A version of the source estimate, where the data attribute is set\n to abs(self.data).\n \"\"\"\n stc = self.copy()\n stc._remove_kernel_sens_data_()\n stc._data = abs(stc._data)\n return stc\n\n def sqrt(self):\n \"\"\"Take the square root.\n\n Returns\n -------\n stc : instance of SourceEstimate\n A copy of the SourceEstimate with sqrt(data).\n \"\"\"\n return self ** (0.5)\n\n def copy(self):\n \"\"\"Return copy of source estimate instance.\"\"\"\n return copy.deepcopy(self)\n\n def bin(self, width, tstart=None, tstop=None, func=np.mean):\n \"\"\"Return a source estimate object with data summarized over time bins.\n\n Time bins of ``width`` seconds. This method is intended for\n visualization only. No filter is applied to the data before binning,\n making the method inappropriate as a tool for downsampling data.\n\n Parameters\n ----------\n width : scalar\n Width of the individual bins in seconds.\n tstart : scalar | None\n Time point where the first bin starts. The default is the first\n time point of the stc.\n tstop : scalar | None\n Last possible time point contained in a bin (if the last bin would\n be shorter than width it is dropped). The default is the last time\n point of the stc.\n func : callable\n Function that is applied to summarize the data. Needs to accept a\n numpy.array as first input and an ``axis`` keyword argument.\n\n Returns\n -------\n stc : SourceEstimate | VectorSourceEstimate\n The binned source estimate.\n \"\"\"\n if tstart is None:\n tstart = self.tmin\n if tstop is None:\n tstop = self.times[-1]\n\n times = np.arange(tstart, tstop + self.tstep, width)\n nt = len(times) - 1\n data = np.empty(self.shape[:-1] + (nt,), dtype=self.data.dtype)\n for i in range(nt):\n idx = (self.times >= times[i]) & (self.times < times[i + 1])\n data[..., i] = func(self.data[..., idx], axis=-1)\n\n tmin = times[0] + width / 2.\n stc = self.copy()\n stc._data = data\n stc.tmin = tmin\n stc.tstep = width\n return stc\n\n def transform_data(self, func, idx=None, tmin_idx=None, tmax_idx=None):\n \"\"\"Get data after a linear (time) transform has been applied.\n\n The transform is applied to each source time course independently.\n\n\n Parameters\n ----------\n func : callable\n The transform to be applied, including parameters (see, e.g.,\n :func:`functools.partial`). The first parameter of the function is\n the input data. The first return value is the transformed data,\n remaining outputs are ignored. The first dimension of the\n transformed data has to be the same as the first dimension of the\n input data.\n idx : array | None\n Indicices of source time courses for which to compute transform.\n If None, all time courses are used.\n tmin_idx : int | None\n Index of first time point to include. If None, the index of the\n first time point is used.\n tmax_idx : int | None\n Index of the first time point not to include. If None, time points\n up to (and including) the last time point are included.\n\n Returns\n -------\n data_t : ndarray\n The transformed data.\n\n Notes\n -----\n Applying transforms can be significantly faster if the\n SourceEstimate object was created using \"(kernel, sens_data)\", for\n the \"data\" parameter as the transform is applied in sensor space.\n Inverse methods, e.g., \"apply_inverse_epochs\", or \"apply_lcmv_epochs\"\n do this automatically (if possible).\n \"\"\"\n if idx is None:\n # use all time courses by default\n idx = slice(None, None)\n\n if self._kernel is None and self._sens_data is None:\n if self._kernel_removed:\n warn_('Performance can be improved by not accessing the data '\n 'attribute before calling this method.')\n\n # transform source space data directly\n data_t = func(self.data[idx, ..., tmin_idx:tmax_idx])\n\n if isinstance(data_t, tuple):\n # use only first return value\n data_t = data_t[0]\n else:\n # apply transform in sensor space\n sens_data_t = func(self._sens_data[:, tmin_idx:tmax_idx])\n\n if isinstance(sens_data_t, tuple):\n # use only first return value\n sens_data_t = sens_data_t[0]\n\n # apply inverse\n data_shape = sens_data_t.shape\n if len(data_shape) > 2:\n # flatten the last dimensions\n sens_data_t = sens_data_t.reshape(data_shape[0],\n np.prod(data_shape[1:]))\n\n data_t = np.dot(self._kernel[idx, :], sens_data_t)\n\n # restore original shape if necessary\n if len(data_shape) > 2:\n data_t = data_t.reshape(data_t.shape[0], *data_shape[1:])\n\n return data_t\n\n def transform(self, func, idx=None, tmin=None, tmax=None, copy=False):\n \"\"\"Apply linear transform.\n\n The transform is applied to each source time course independently.\n\n Parameters\n ----------\n func : callable\n The transform to be applied, including parameters (see, e.g.,\n :func:`functools.partial`). The first parameter of the function is\n the input data. The first two dimensions of the transformed data\n should be (i) vertices and (ii) time. Transforms which yield 3D\n output (e.g. time-frequency transforms) are valid, so long as the\n first two dimensions are vertices and time. In this case, the\n copy parameter (see below) must be True and a list of\n SourceEstimates, rather than a single instance of SourceEstimate,\n will be returned, one for each index of the 3rd dimension of the\n transformed data. In the case of transforms yielding 2D output\n (e.g. filtering), the user has the option of modifying the input\n inplace (copy = False) or returning a new instance of\n SourceEstimate (copy = True) with the transformed data.\n idx : array | None\n Indices of source time courses for which to compute transform.\n If None, all time courses are used.\n tmin : float | int | None\n First time point to include (ms). If None, self.tmin is used.\n tmax : float | int | None\n Last time point to include (ms). If None, self.tmax is used.\n copy : bool\n If True, return a new instance of SourceEstimate instead of\n modifying the input inplace.\n\n Returns\n -------\n stcs : SourceEstimate | VectorSourceEstimate | list\n The transformed stc or, in the case of transforms which yield\n N-dimensional output (where N > 2), a list of stcs. For a list,\n copy must be True.\n\n Notes\n -----\n Applying transforms can be significantly faster if the\n SourceEstimate object was created using \"(kernel, sens_data)\", for\n the \"data\" parameter as the transform is applied in sensor space.\n Inverse methods, e.g., \"apply_inverse_epochs\", or \"apply_lcmv_epochs\"\n do this automatically (if possible).\n \"\"\"\n # min and max data indices to include\n times = 1000. * self.times\n t_idx = np.where(_time_mask(times, tmin, tmax, sfreq=self.sfreq))[0]\n if tmin is None:\n tmin_idx = None\n else:\n tmin_idx = t_idx[0]\n\n if tmax is None:\n tmax_idx = None\n else:\n # +1, because upper boundary needs to include the last sample\n tmax_idx = t_idx[-1] + 1\n\n data_t = self.transform_data(func, idx=idx, tmin_idx=tmin_idx,\n tmax_idx=tmax_idx)\n\n # account for change in n_vertices\n if idx is not None:\n idx_lh = idx[idx < len(self.lh_vertno)]\n idx_rh = idx[idx >= len(self.lh_vertno)] - len(self.lh_vertno)\n verts_lh = self.lh_vertno[idx_lh]\n verts_rh = self.rh_vertno[idx_rh]\n else:\n verts_lh = self.lh_vertno\n verts_rh = self.rh_vertno\n verts = [verts_lh, verts_rh]\n\n tmin_idx = 0 if tmin_idx is None else tmin_idx\n tmin = self.times[tmin_idx]\n\n if data_t.ndim > 2:\n # return list of stcs if transformed data has dimensionality > 2\n if copy:\n stcs = [SourceEstimate(data_t[:, :, a], verts, tmin,\n self.tstep, self.subject)\n for a in range(data_t.shape[-1])]\n else:\n raise ValueError('copy must be True if transformed data has '\n 'more than 2 dimensions')\n else:\n # return new or overwritten stc\n stcs = self if not copy else self.copy()\n stcs.vertices = verts\n stcs.data = data_t\n stcs.tmin = tmin\n\n return stcs\n\n\ndef _center_of_mass(vertices, values, hemi, surf, subject, subjects_dir,\n restrict_vertices):\n \"\"\"Find the center of mass on a surface.\"\"\"\n if (values == 0).all() or (values < 0).any():\n raise ValueError('All values must be non-negative and at least one '\n 'must be non-zero, cannot compute COM')\n subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)\n surf = read_surface(op.join(subjects_dir, subject, 'surf',\n hemi + '.' + surf))\n if restrict_vertices is True:\n restrict_vertices = vertices\n elif restrict_vertices is False:\n restrict_vertices = np.arange(surf[0].shape[0])\n elif isinstance(restrict_vertices, SourceSpaces):\n idx = 1 if restrict_vertices.kind == 'surface' and hemi == 'rh' else 0\n restrict_vertices = restrict_vertices[idx]['vertno']\n else:\n restrict_vertices = np.array(restrict_vertices, int)\n pos = surf[0][vertices, :].T\n c_o_m = np.sum(pos * values, axis=1) / np.sum(values)\n vertex = np.argmin(np.sqrt(np.mean((surf[0][restrict_vertices, :] -\n c_o_m) ** 2, axis=1)))\n vertex = restrict_vertices[vertex]\n return vertex\n\n\nclass _BaseSurfaceSourceEstimate(_BaseSourceEstimate):\n \"\"\"Abstract base class for surface source estimates.\n\n Parameters\n ----------\n data : array\n The data in source space.\n vertices : list of two arrays\n Vertex numbers corresponding to the data.\n tmin : scalar\n Time point of the first sample in data.\n tstep : scalar\n Time step between successive samples in data.\n subject : str | None\n The subject name. While not necessary, it is safer to set the\n subject parameter to avoid analysis errors.\n verbose : bool, str, int, or None\n If not None, override default verbose level (see :func:`mne.verbose`\n and :ref:`Logging documentation <tut_logging>` for more).\n\n Attributes\n ----------\n subject : str | None\n The subject name.\n times : array of shape (n_times,)\n The time vector.\n vertices : list of two arrays of shape (n_dipoles,)\n The indices of the dipoles in the left and right source space.\n data : array\n The data in source space.\n shape : tuple\n The shape of the data. A tuple of int (n_dipoles, n_times).\n \"\"\"\n\n @verbose\n def __init__(self, data, vertices=None, tmin=None, tstep=None,\n subject=None, verbose=None): # noqa: D102\n\n if not (isinstance(vertices, list) and len(vertices) == 2):\n raise ValueError('Vertices must be a list containing two '\n 'numpy arrays, got type %s (%s)'\n % (type(vertices), vertices))\n\n _BaseSourceEstimate.__init__(self, data, vertices=vertices, tmin=tmin,\n tstep=tstep, subject=subject,\n verbose=verbose)\n\n def __repr__(self): # noqa: D105\n if isinstance(self.vertices, list):\n nv = sum([len(v) for v in self.vertices])\n else:\n nv = self.vertices.size\n s = \"%d vertices\" % nv\n if self.subject is not None:\n s += \", subject : %s\" % self.subject\n s += \", tmin : %s (ms)\" % (1e3 * self.tmin)\n s += \", tmax : %s (ms)\" % (1e3 * self.times[-1])\n s += \", tstep : %s (ms)\" % (1e3 * self.tstep)\n s += \", data shape : %s\" % (self.shape,)\n return \"<%s | %s>\" % (type(self).__name__, s)\n\n @property\n def lh_data(self):\n \"\"\"Left hemisphere data.\"\"\"\n return self.data[:len(self.lh_vertno)]\n\n @property\n def rh_data(self):\n \"\"\"Right hemisphere data.\"\"\"\n return self.data[len(self.lh_vertno):]\n\n @property\n def lh_vertno(self):\n \"\"\"Left hemisphere vertno.\"\"\"\n return self.vertices[0]\n\n @property\n def rh_vertno(self):\n \"\"\"Right hemisphere vertno.\"\"\"\n return self.vertices[1]\n\n def _hemilabel_stc(self, label):\n if label.hemi == 'lh':\n stc_vertices = self.vertices[0]\n else:\n stc_vertices = self.vertices[1]\n\n # find index of the Label's vertices\n idx = np.nonzero(np.in1d(stc_vertices, label.vertices))[0]\n\n # find output vertices\n vertices = stc_vertices[idx]\n\n # find data\n if label.hemi == 'rh':\n values = self.data[idx + len(self.vertices[0])]\n else:\n values = self.data[idx]\n\n return vertices, values\n\n def in_label(self, label):\n \"\"\"Get a source estimate object restricted to a label.\n\n SourceEstimate contains the time course of\n activation of all sources inside the label.\n\n Parameters\n ----------\n label : Label | BiHemiLabel\n The label (as created for example by mne.read_label). If the label\n does not match any sources in the SourceEstimate, a ValueError is\n raised.\n\n Returns\n -------\n stc : SourceEstimate | VectorSourceEstimate\n The source estimate restricted to the given label.\n \"\"\"\n # make sure label and stc are compatible\n if label.subject is not None and self.subject is not None \\\n and label.subject != self.subject:\n raise RuntimeError('label and stc must have same subject names, '\n 'currently \"%s\" and \"%s\"' % (label.subject,\n self.subject))\n\n if label.hemi == 'both':\n lh_vert, lh_val = self._hemilabel_stc(label.lh)\n rh_vert, rh_val = self._hemilabel_stc(label.rh)\n vertices = [lh_vert, rh_vert]\n values = np.vstack((lh_val, rh_val))\n elif label.hemi == 'lh':\n lh_vert, values = self._hemilabel_stc(label)\n vertices = [lh_vert, np.array([], int)]\n elif label.hemi == 'rh':\n rh_vert, values = self._hemilabel_stc(label)\n vertices = [np.array([], int), rh_vert]\n else:\n raise TypeError(\"Expected Label or BiHemiLabel; got %r\" % label)\n\n if sum([len(v) for v in vertices]) == 0:\n raise ValueError('No vertices match the label in the stc file')\n\n label_stc = self.__class__(values, vertices=vertices, tmin=self.tmin,\n tstep=self.tstep, subject=self.subject)\n return label_stc\n\n def expand(self, vertices):\n \"\"\"Expand SourceEstimate to include more vertices.\n\n This will add rows to stc.data (zero-filled) and modify stc.vertices\n to include all vertices in stc.vertices and the input vertices.\n\n Parameters\n ----------\n vertices : list of array\n New vertices to add. Can also contain old values.\n\n Returns\n -------\n stc : SourceEstimate | VectorSourceEstimate\n The modified stc (note: method operates inplace).\n \"\"\"\n if not isinstance(vertices, list):\n raise TypeError('vertices must be a list')\n if not len(self.vertices) == len(vertices):\n raise ValueError('vertices must have the same length as '\n 'stc.vertices')\n\n # can no longer use kernel and sensor data\n self._remove_kernel_sens_data_()\n\n inserters = list()\n offsets = [0]\n for vi, (v_old, v_new) in enumerate(zip(self.vertices, vertices)):\n v_new = np.setdiff1d(v_new, v_old)\n inds = np.searchsorted(v_old, v_new)\n # newer numpy might overwrite inds after np.insert, copy here\n inserters += [inds.copy()]\n offsets += [len(v_old)]\n self.vertices[vi] = np.insert(v_old, inds, v_new)\n inds = [ii + offset for ii, offset in zip(inserters, offsets[:-1])]\n inds = np.concatenate(inds)\n new_data = np.zeros((len(inds),) + self.data.shape[1:])\n self.data = np.insert(self.data, inds, new_data, axis=0)\n return self\n\n @verbose\n def to_original_src(self, src_orig, subject_orig=None,\n subjects_dir=None, verbose=None):\n \"\"\"Get a source estimate from morphed source to the original subject.\n\n Parameters\n ----------\n src_orig : instance of SourceSpaces\n The original source spaces that were morphed to the current\n subject.\n subject_orig : str | None\n The original subject. For most source spaces this shouldn't need\n to be provided, since it is stored in the source space itself.\n subjects_dir : string, or None\n Path to SUBJECTS_DIR if it is not set in the environment.\n verbose : bool, str, int, or None\n If not None, override default verbose level (see\n :func:`mne.verbose` and :ref:`Logging documentation <tut_logging>`\n for more).\n\n Returns\n -------\n stc : SourceEstimate | VectorSourceEstimate\n The transformed source estimate.\n\n See Also\n --------\n morph_source_spaces\n\n Notes\n -----\n .. versionadded:: 0.10.0\n \"\"\"\n if self.subject is None:\n raise ValueError('stc.subject must be set')\n src_orig = _ensure_src(src_orig, kind='surf')\n subject_orig = _ensure_src_subject(src_orig, subject_orig)\n data_idx, vertices = _get_morph_src_reordering(\n self.vertices, src_orig, subject_orig, self.subject, subjects_dir)\n return self.__class__(self._data[data_idx], vertices,\n self.tmin, self.tstep, subject_orig)\n\n\nclass SourceEstimate(_BaseSurfaceSourceEstimate):\n \"\"\"Container for surface source estimates.\n\n Parameters\n ----------\n data : array of shape (n_dipoles, n_times) | 2-tuple (kernel, sens_data)\n The data in source space. The data can either be a single array or\n a tuple with two arrays: \"kernel\" shape (n_vertices, n_sensors) and\n \"sens_data\" shape (n_sensors, n_times). In this case, the source\n space data corresponds to \"numpy.dot(kernel, sens_data)\".\n vertices : list of two arrays\n Vertex numbers corresponding to the data.\n tmin : scalar\n Time point of the first sample in data.\n tstep : scalar\n Time step between successive samples in data.\n subject : str | None\n The subject name. While not necessary, it is safer to set the\n subject parameter to avoid analysis errors.\n verbose : bool, str, int, or None\n If not None, override default verbose level (see :func:`mne.verbose`\n and :ref:`Logging documentation <tut_logging>` for more).\n\n Attributes\n ----------\n subject : str | None\n The subject name.\n times : array of shape (n_times,)\n The time vector.\n vertices : list of two arrays of shape (n_dipoles,)\n The indices of the dipoles in the left and right source space.\n data : array of shape (n_dipoles, n_times)\n The data in source space.\n shape : tuple\n The shape of the data. A tuple of int (n_dipoles, n_times).\n\n See Also\n --------\n VectorSourceEstimate : A container for vector source estimates.\n VolSourceEstimate : A container for volume source estimates.\n MixedSourceEstimate : A container for mixed surface + volume source\n estimates.\n \"\"\"\n\n @verbose\n def save(self, fname, ftype='stc', verbose=None):\n \"\"\"Save the source estimates to a file.\n\n Parameters\n ----------\n fname : string\n The stem of the file name. The file names used for surface source\n spaces are obtained by adding \"-lh.stc\" and \"-rh.stc\" (or \"-lh.w\"\n and \"-rh.w\") to the stem provided, for the left and the right\n hemisphere, respectively.\n ftype : string\n File format to use. Allowed values are \"stc\" (default), \"w\",\n and \"h5\". The \"w\" format only supports a single time point.\n verbose : bool, str, int, or None\n If not None, override default verbose level (see\n :func:`mne.verbose` and :ref:`Logging documentation <tut_logging>`\n for more). Defaults to self.verbose.\n \"\"\"\n if ftype not in ('stc', 'w', 'h5'):\n raise ValueError('ftype must be \"stc\", \"w\", or \"h5\", not \"%s\"'\n % ftype)\n\n lh_data = self.data[:len(self.lh_vertno)]\n rh_data = self.data[-len(self.rh_vertno):]\n\n if ftype == 'stc':\n logger.info('Writing STC to disk...')\n _write_stc(fname + '-lh.stc', tmin=self.tmin, tstep=self.tstep,\n vertices=self.lh_vertno, data=lh_data)\n _write_stc(fname + '-rh.stc', tmin=self.tmin, tstep=self.tstep,\n vertices=self.rh_vertno, data=rh_data)\n\n elif ftype == 'w':\n if self.shape[1] != 1:\n raise ValueError('w files can only contain a single time '\n 'point')\n logger.info('Writing STC to disk (w format)...')\n _write_w(fname + '-lh.w', vertices=self.lh_vertno,\n data=lh_data[:, 0])\n _write_w(fname + '-rh.w', vertices=self.rh_vertno,\n data=rh_data[:, 0])\n\n elif ftype == 'h5':\n if not fname.endswith('.h5'):\n fname += '-stc.h5'\n write_hdf5(fname,\n dict(vertices=self.vertices, data=self.data,\n tmin=self.tmin, tstep=self.tstep,\n subject=self.subject), title='mnepython',\n overwrite=True)\n logger.info('[done]')\n\n @copy_function_doc_to_method_doc(plot_source_estimates)\n def plot(self, subject=None, surface='inflated', hemi='lh',\n colormap='auto', time_label='auto', smoothing_steps=10,\n transparent=True, alpha=1.0, time_viewer=False, subjects_dir=None,\n figure=None, views='lat', colorbar=True, clim='auto',\n cortex=\"classic\", size=800, background=\"black\",\n foreground=\"white\", initial_time=None, time_unit='s',\n backend='auto', spacing='oct6', title=None, verbose=None):\n brain = plot_source_estimates(\n self, subject, surface=surface, hemi=hemi, colormap=colormap,\n time_label=time_label, smoothing_steps=smoothing_steps,\n transparent=transparent, alpha=alpha, time_viewer=time_viewer,\n subjects_dir=subjects_dir, figure=figure, views=views,\n colorbar=colorbar, clim=clim, cortex=cortex, size=size,\n background=background, foreground=foreground,\n initial_time=initial_time, time_unit=time_unit, backend=backend,\n spacing=spacing, title=title, verbose=verbose)\n return brain\n\n @verbose\n def extract_label_time_course(self, labels, src, mode='mean_flip',\n allow_empty=False, verbose=None):\n \"\"\"Extract label time courses for lists of labels.\n\n This function will extract one time course for each label. The way the\n time courses are extracted depends on the mode parameter.\n\n Parameters\n ----------\n labels : Label | BiHemiLabel | list of Label or BiHemiLabel\n The labels for which to extract the time courses.\n src : list\n Source spaces for left and right hemisphere.\n mode : str\n Extraction mode, see explanation below.\n allow_empty : bool\n Instead of emitting an error, return all-zero time course for\n labels that do not have any vertices in the source estimate.\n verbose : bool, str, int, or None\n If not None, override default verbose level (see\n :func:`mne.verbose` and :ref:`Logging documentation <tut_logging>`\n for more).\n\n Returns\n -------\n label_tc : array, shape=(len(labels), n_times)\n Extracted time course for each label.\n\n See Also\n --------\n extract_label_time_course : extract time courses for multiple STCs\n\n Notes\n -----\n Valid values for mode are:\n\n - 'mean'\n Average within each label.\n - 'mean_flip'\n Average within each label with sign flip depending\n on source orientation.\n - 'pca_flip'\n Apply an SVD to the time courses within each label\n and use the scaled and sign-flipped first right-singular vector\n as the label time course. The scaling is performed such that the\n power of the label time course is the same as the average\n per-vertex time course power within the label. The sign of the\n resulting time course is adjusted by multiplying it with\n \"sign(dot(u, flip))\" where u is the first left-singular vector,\n and flip is a sing-flip vector based on the vertex normals. This\n procedure assures that the phase does not randomly change by 180\n degrees from one stc to the next.\n - 'max'\n Max value within each label.\n \"\"\"\n label_tc = extract_label_time_course(\n self, labels, src, mode=mode, return_generator=False,\n allow_empty=allow_empty, verbose=verbose)\n\n return label_tc\n\n def get_peak(self, hemi=None, tmin=None, tmax=None, mode='abs',\n vert_as_index=False, time_as_index=False):\n \"\"\"Get location and latency of peak amplitude.\n\n Parameters\n ----------\n hemi : {'lh', 'rh', None}\n The hemi to be considered. If None, the entire source space is\n considered.\n tmin : float | None\n The minimum point in time to be considered for peak getting.\n tmax : float | None\n The maximum point in time to be considered for peak getting.\n mode : {'pos', 'neg', 'abs'}\n How to deal with the sign of the data. If 'pos' only positive\n values will be considered. If 'neg' only negative values will\n be considered. If 'abs' absolute values will be considered.\n Defaults to 'abs'.\n vert_as_index : bool\n whether to return the vertex index instead of of its ID.\n Defaults to False.\n time_as_index : bool\n Whether to return the time index instead of the latency.\n Defaults to False.\n\n Returns\n -------\n pos : int\n The vertex exhibiting the maximum response, either ID or index.\n latency : float | int\n The time point of the maximum response, either latency in seconds\n or index.\n \"\"\"\n data = {'lh': self.lh_data, 'rh': self.rh_data, None: self.data}[hemi]\n vertno = {'lh': self.lh_vertno, 'rh': self.rh_vertno,\n None: np.concatenate(self.vertices)}[hemi]\n\n vert_idx, time_idx, _ = _get_peak(data, self.times, tmin, tmax, mode)\n\n return (vert_idx if vert_as_index else vertno[vert_idx],\n time_idx if time_as_index else self.times[time_idx])\n\n def center_of_mass(self, subject=None, hemi=None, restrict_vertices=False,\n subjects_dir=None, surf='sphere'):\n \"\"\"Compute the center of mass of activity.\n\n This function computes the spatial center of mass on the surface\n as well as the temporal center of mass as in [1]_.\n\n .. note:: All activity must occur in a single hemisphere, otherwise\n an error is raised. The \"mass\" of each point in space for\n computing the spatial center of mass is computed by summing\n across time, and vice-versa for each point in time in\n computing the temporal center of mass. This is useful for\n quantifying spatio-temporal cluster locations, especially\n when combined with :func:`mne.vertex_to_mni`.\n\n Parameters\n ----------\n subject : string | None\n The subject the stc is defined for.\n hemi : int, or None\n Calculate the center of mass for the left (0) or right (1)\n hemisphere. If None, one of the hemispheres must be all zeroes,\n and the center of mass will be calculated for the other\n hemisphere (useful for getting COM for clusters).\n restrict_vertices : bool | array of int | instance of SourceSpaces\n If True, returned vertex will be one from stc. Otherwise, it could\n be any vertex from surf. If an array of int, the returned vertex\n will come from that array. If instance of SourceSpaces (as of\n 0.13), the returned vertex will be from the given source space.\n For most accuruate estimates, do not restrict vertices.\n subjects_dir : str, or None\n Path to the SUBJECTS_DIR. If None, the path is obtained by using\n the environment variable SUBJECTS_DIR.\n surf : str\n The surface to use for Euclidean distance center of mass\n finding. The default here is \"sphere\", which finds the center\n of mass on the spherical surface to help avoid potential issues\n with cortical folding.\n\n See Also\n --------\n mne.Label.center_of_mass\n mne.vertex_to_mni\n\n Returns\n -------\n vertex : int\n Vertex of the spatial center of mass for the inferred hemisphere,\n with each vertex weighted by the sum of the stc across time. For a\n boolean stc, then, this would be weighted purely by the duration\n each vertex was active.\n hemi : int\n Hemisphere the vertex was taken from.\n t : float\n Time of the temporal center of mass (weighted by the sum across\n source vertices).\n\n References\n ----------\n .. [1] Larson and Lee, \"The cortical dynamics underlying effective\n switching of auditory spatial attention\", NeuroImage 2012.\n \"\"\"\n if not isinstance(surf, str):\n raise TypeError('surf must be a string, got %s' % (type(surf),))\n subject = _check_subject(self.subject, subject)\n if np.any(self.data < 0):\n raise ValueError('Cannot compute COM with negative values')\n values = np.sum(self.data, axis=1) # sum across time\n vert_inds = [np.arange(len(self.vertices[0])),\n np.arange(len(self.vertices[1])) + len(self.vertices[0])]\n if hemi is None:\n hemi = np.where(np.array([np.sum(values[vi])\n for vi in vert_inds]))[0]\n if not len(hemi) == 1:\n raise ValueError('Could not infer hemisphere')\n hemi = hemi[0]\n if hemi not in [0, 1]:\n raise ValueError('hemi must be 0 or 1')\n vertices = self.vertices[hemi]\n values = values[vert_inds[hemi]] # left or right\n del vert_inds\n vertex = _center_of_mass(\n vertices, values, hemi=['lh', 'rh'][hemi], surf=surf,\n subject=subject, subjects_dir=subjects_dir,\n restrict_vertices=restrict_vertices)\n # do time center of mass by using the values across space\n masses = np.sum(self.data, axis=0).astype(float)\n t_ind = np.sum(masses * np.arange(self.shape[1])) / np.sum(masses)\n t = self.tmin + self.tstep * t_ind\n return vertex, hemi, t\n\n\nclass VolSourceEstimate(_BaseSourceEstimate):\n \"\"\"Container for volume source estimates.\n\n Parameters\n ----------\n data : array of shape (n_dipoles, n_times) | 2-tuple (kernel, sens_data)\n The data in source space. The data can either be a single array or\n a tuple with two arrays: \"kernel\" shape (n_vertices, n_sensors) and\n \"sens_data\" shape (n_sensors, n_times). In this case, the source\n space data corresponds to \"numpy.dot(kernel, sens_data)\".\n vertices : array\n Vertex numbers corresponding to the data.\n tmin : scalar\n Time point of the first sample in data.\n tstep : scalar\n Time step between successive samples in data.\n subject : str | None\n The subject name. While not necessary, it is safer to set the\n subject parameter to avoid analysis errors.\n verbose : bool, str, int, or None\n If not None, override default verbose level (see :func:`mne.verbose`\n and :ref:`Logging documentation <tut_logging>` for more).\n\n Attributes\n ----------\n subject : str | None\n The subject name.\n times : array of shape (n_times,)\n The time vector.\n vertices : array of shape (n_dipoles,)\n The indices of the dipoles in the source space.\n data : array of shape (n_dipoles, n_times)\n The data in source space.\n shape : tuple\n The shape of the data. A tuple of int (n_dipoles, n_times).\n\n Notes\n -----\n .. versionadded:: 0.9.0\n\n See Also\n --------\n SourceEstimate : A container for surface source estimates.\n VectorSourceEstimate : A container for vector source estimates.\n MixedSourceEstimate : A container for mixed surface + volume source\n estimates.\n \"\"\"\n\n @verbose\n def __init__(self, data, vertices=None, tmin=None, tstep=None,\n subject=None, verbose=None): # noqa: D102\n if not (isinstance(vertices, np.ndarray) or\n isinstance(vertices, list)):\n raise ValueError('Vertices must be a numpy array or a list of '\n 'arrays')\n\n _BaseSourceEstimate.__init__(self, data, vertices=vertices, tmin=tmin,\n tstep=tstep, subject=subject,\n verbose=verbose)\n\n @copy_function_doc_to_method_doc(plot_volume_source_estimates)\n def plot(self, src, subject=None, subjects_dir=None, mode='stat_map',\n bg_img=None, colorbar=True, colormap='auto', clim='auto',\n transparent='auto', show=True, verbose=None):\n return plot_volume_source_estimates(\n self, src=src, subject=subject, subjects_dir=subjects_dir,\n mode=mode, bg_img=bg_img, colorbar=colorbar, colormap=colormap,\n clim=clim, transparent=transparent, show=show, verbose=verbose)\n\n @verbose\n def save(self, fname, ftype='stc', verbose=None):\n \"\"\"Save the source estimates to a file.\n\n Parameters\n ----------\n fname : string\n The stem of the file name. The stem is extended with \"-vl.stc\"\n or \"-vl.w\".\n ftype : string\n File format to use. Allowed values are \"stc\" (default), \"w\",\n and \"h5\". The \"w\" format only supports a single time point.\n verbose : bool, str, int, or None\n If not None, override default verbose level (see\n :func:`mne.verbose` and :ref:`Logging documentation <tut_logging>`\n for more). Defaults to self.verbose.\n \"\"\"\n if ftype not in ['stc', 'w', 'h5']:\n raise ValueError('ftype must be \"stc\", \"w\" or \"h5\", not \"%s\"' %\n ftype)\n\n if ftype == 'stc':\n logger.info('Writing STC to disk...')\n if not (fname.endswith('-vl.stc') or fname.endswith('-vol.stc')):\n fname += '-vl.stc'\n _write_stc(fname, tmin=self.tmin, tstep=self.tstep,\n vertices=self.vertices, data=self.data)\n elif ftype == 'w':\n logger.info('Writing STC to disk (w format)...')\n if not (fname.endswith('-vl.w') or fname.endswith('-vol.w')):\n fname += '-vl.w'\n _write_w(fname, vertices=self.vertices, data=self.data)\n elif ftype == 'h5':\n if not fname.endswith('.h5'):\n fname += '-stc.h5'\n write_hdf5(fname,\n dict(vertices=self.vertices, data=self.data,\n tmin=self.tmin, tstep=self.tstep,\n subject=self.subject, src_type='volume'),\n title='mnepython',\n overwrite=True)\n\n logger.info('[done]')\n\n def save_as_volume(self, fname, src, dest='mri', mri_resolution=False,\n format='nifti1'):\n \"\"\"Save a volume source estimate in a NIfTI file.\n\n Parameters\n ----------\n fname : string\n The name of the generated nifti file.\n src : list\n The list of source spaces (should all be of type volume).\n dest : 'mri' | 'surf'\n If 'mri' the volume is defined in the coordinate system of\n the original T1 image. If 'surf' the coordinate system\n of the FreeSurfer surface is used (Surface RAS).\n mri_resolution: bool\n It True the image is saved in MRI resolution.\n WARNING: if you have many time points the file produced can be\n huge.\n format : str\n Either 'nifti1' (default) or 'nifti2'.\n\n .. versionadded:: 0.17\n\n\n Returns\n -------\n img : instance Nifti1Image\n The image object.\n\n Notes\n -----\n .. versionadded:: 0.9.0\n \"\"\"\n import nibabel as nib\n img = self.as_volume(src, dest=dest, mri_resolution=mri_resolution,\n format=format)\n nib.save(img, fname)\n\n def as_volume(self, src, dest='mri', mri_resolution=False,\n format='nifti1'):\n \"\"\"Export volume source estimate as a nifti object.\n\n Parameters\n ----------\n src : list\n The list of source spaces (should all be of type volume).\n dest : 'mri' | 'surf'\n If 'mri' the volume is defined in the coordinate system of\n the original T1 image. If 'surf' the coordinate system\n of the FreeSurfer surface is used (Surface RAS).\n mri_resolution: bool\n It True the image is saved in MRI resolution.\n WARNING: if you have many time points the file produced can be\n huge.\n format : str\n Either 'nifti1' (default) or 'nifti2'.\n\n Returns\n -------\n img : instance Nifti1Image\n The image object.\n\n Notes\n -----\n .. versionadded:: 0.9.0\n \"\"\"\n from .morph import _interpolate_data\n return _interpolate_data(self, src, mri_resolution=mri_resolution,\n mri_space=True, output=format)\n\n def __repr__(self): # noqa: D105\n if isinstance(self.vertices, list):\n nv = sum([len(v) for v in self.vertices])\n else:\n nv = self.vertices.size\n s = \"%d vertices\" % nv\n if self.subject is not None:\n s += \", subject : %s\" % self.subject\n s += \", tmin : %s (ms)\" % (1e3 * self.tmin)\n s += \", tmax : %s (ms)\" % (1e3 * self.times[-1])\n s += \", tstep : %s (ms)\" % (1e3 * self.tstep)\n s += \", data size : %s\" % ' x '.join(map(str, self.shape))\n return \"<VolSourceEstimate | %s>\" % s\n\n def get_peak(self, tmin=None, tmax=None, mode='abs',\n vert_as_index=False, time_as_index=False):\n \"\"\"Get location and latency of peak amplitude.\n\n Parameters\n ----------\n tmin : float | None\n The minimum point in time to be considered for peak getting.\n tmax : float | None\n The maximum point in time to be considered for peak getting.\n mode : {'pos', 'neg', 'abs'}\n How to deal with the sign of the data. If 'pos' only positive\n values will be considered. If 'neg' only negative values will\n be considered. If 'abs' absolute values will be considered.\n Defaults to 'abs'.\n vert_as_index : bool\n whether to return the vertex index instead of of its ID.\n Defaults to False.\n time_as_index : bool\n Whether to return the time index instead of the latency.\n Defaults to False.\n\n Returns\n -------\n pos : int\n The vertex exhibiting the maximum response, either ID or index.\n latency : float\n The latency in seconds.\n \"\"\"\n vert_idx, time_idx, _ = _get_peak(self.data, self.times, tmin, tmax,\n mode)\n\n return (vert_idx if vert_as_index else self.vertices[vert_idx],\n time_idx if time_as_index else self.times[time_idx])\n\n\nclass VectorSourceEstimate(_BaseSurfaceSourceEstimate):\n \"\"\"Container for vector surface source estimates.\n\n For each vertex, the magnitude of the current is defined in the X, Y and Z\n directions.\n\n Parameters\n ----------\n data : array of shape (n_dipoles, 3, n_times)\n The data in source space. Each dipole contains three vectors that\n denote the dipole strength in X, Y and Z directions over time.\n vertices : array | list of two arrays\n Vertex numbers corresponding to the data.\n tmin : float\n Time point of the first sample in data.\n tstep : float\n Time step between successive samples in data.\n subject : str | None\n The subject name. While not necessary, it is safer to set the\n subject parameter to avoid analysis errors.\n verbose : bool, str, int, or None\n If not None, override default verbose level (see mne.verbose).\n\n Attributes\n ----------\n subject : str | None\n The subject name.\n times : array of shape (n_times,)\n The time vector.\n shape : tuple\n The shape of the data. A tuple of int (n_dipoles, n_times).\n\n Notes\n -----\n .. versionadded:: 0.15\n\n See Also\n --------\n SourceEstimate : A container for surface source estimates.\n VolSourceEstimate : A container for volume source estimates.\n MixedSourceEstimate : A container for mixed surface + volume source\n estimates.\n \"\"\"\n\n @verbose\n def save(self, fname, ftype='h5', verbose=None):\n \"\"\"Save the full source estimate to an HDF5 file.\n\n Parameters\n ----------\n fname : string\n The file name to write the source estimate to, should end in\n '-stc.h5'.\n ftype : string\n File format to use. Currently, the only allowed values is \"h5\".\n verbose : bool, str, int, or None\n If not None, override default verbose level (see mne.verbose).\n Defaults to self.verbose.\n \"\"\"\n if ftype != 'h5':\n raise ValueError('VectorSourceEstimate objects can only be '\n 'written as HDF5 files.')\n\n if not fname.endswith('.h5'):\n fname += '-stc.h5'\n\n write_hdf5(fname,\n dict(vertices=self.vertices, data=self.data, tmin=self.tmin,\n tstep=self.tstep, subject=self.subject),\n title='mnepython', overwrite=True)\n\n def magnitude(self):\n \"\"\"Compute magnitude of activity without directionality.\n\n Returns\n -------\n stc : instance of SourceEstimate\n The source estimate without directionality information.\n \"\"\"\n data_mag = np.linalg.norm(self.data, axis=1)\n return SourceEstimate(data_mag, self.vertices, self.tmin, self.tstep,\n self.subject, self.verbose)\n\n def normal(self, src):\n \"\"\"Compute activity orthogonal to the cortex.\n\n Parameters\n ----------\n src : instance of SourceSpaces\n The source space for which this source estimate is specified.\n\n Returns\n -------\n stc : instance of SourceEstimate\n The source estimate only retaining the activity orthogonal to the\n cortex.\n \"\"\"\n normals = np.vstack([s['nn'][v] for s, v in zip(src, self.vertices)])\n data_norm = einsum('ijk,ij->ik', self.data, normals)\n return SourceEstimate(data_norm, self.vertices, self.tmin, self.tstep,\n self.subject, self.verbose)\n\n @copy_function_doc_to_method_doc(plot_vector_source_estimates)\n def plot(self, subject=None, hemi='lh', colormap='hot', time_label='auto',\n smoothing_steps=10, transparent=True, brain_alpha=0.4,\n overlay_alpha=None, vector_alpha=1.0, scale_factor=None,\n time_viewer=False, subjects_dir=None, figure=None, views='lat',\n colorbar=True, clim='auto', cortex='classic', size=800,\n background='black', foreground='white', initial_time=None,\n time_unit='s'):\n\n return plot_vector_source_estimates(\n self, subject=subject, hemi=hemi, colormap=colormap,\n time_label=time_label, smoothing_steps=smoothing_steps,\n transparent=transparent, brain_alpha=brain_alpha,\n overlay_alpha=overlay_alpha, vector_alpha=vector_alpha,\n scale_factor=scale_factor, time_viewer=time_viewer,\n subjects_dir=subjects_dir, figure=figure, views=views,\n colorbar=colorbar, clim=clim, cortex=cortex, size=size,\n background=background, foreground=foreground,\n initial_time=initial_time, time_unit=time_unit\n )\n\n def __abs__(self):\n \"\"\"Compute the absolute value of each component.\n\n Returns\n -------\n stc_abs : VectorSourceEstimate\n A vector source estimate where the data attribute is set to\n abs(self.data).\n\n See Also\n --------\n VectorSourceEstimate.magnitude\n \"\"\"\n return super(VectorSourceEstimate, self).__abs__()\n\n\nclass MixedSourceEstimate(_BaseSourceEstimate):\n \"\"\"Container for mixed surface and volume source estimates.\n\n Parameters\n ----------\n data : array of shape (n_dipoles, n_times) | 2-tuple (kernel, sens_data)\n The data in source space. The data can either be a single array or\n a tuple with two arrays: \"kernel\" shape (n_vertices, n_sensors) and\n \"sens_data\" shape (n_sensors, n_times). In this case, the source\n space data corresponds to \"numpy.dot(kernel, sens_data)\".\n vertices : list of arrays\n Vertex numbers corresponding to the data.\n tmin : scalar\n Time point of the first sample in data.\n tstep : scalar\n Time step between successive samples in data.\n subject : str | None\n The subject name. While not necessary, it is safer to set the\n subject parameter to avoid analysis errors.\n verbose : bool, str, int, or None\n If not None, override default verbose level (see :func:`mne.verbose`\n and :ref:`Logging documentation <tut_logging>` for more).\n\n Attributes\n ----------\n subject : str | None\n The subject name.\n times : array of shape (n_times,)\n The time vector.\n vertices : list of arrays of shape (n_dipoles,)\n The indices of the dipoles in each source space.\n data : array of shape (n_dipoles, n_times)\n The data in source space.\n shape : tuple\n The shape of the data. A tuple of int (n_dipoles, n_times).\n\n Notes\n -----\n .. versionadded:: 0.9.0\n\n See Also\n --------\n SourceEstimate : A container for surface source estimates.\n VectorSourceEstimate : A container for vector source estimates.\n VolSourceEstimate : A container for volume source estimates.\n \"\"\"\n\n @verbose\n def __init__(self, data, vertices=None, tmin=None, tstep=None,\n subject=None, verbose=None): # noqa: D102\n if not isinstance(vertices, list) or len(vertices) < 2:\n raise ValueError('Vertices must be a list of numpy arrays with '\n 'one array per source space.')\n\n _BaseSourceEstimate.__init__(self, data, vertices=vertices, tmin=tmin,\n tstep=tstep, subject=subject,\n verbose=verbose)\n\n def plot_surface(self, src, subject=None, surface='inflated', hemi='lh',\n colormap='auto', time_label='time=%02.f ms',\n smoothing_steps=10,\n transparent=None, alpha=1.0, time_viewer=False,\n config_opts=None, subjects_dir=None, figure=None,\n views='lat', colorbar=True, clim='auto'):\n \"\"\"Plot surface source estimates with PySurfer.\n\n Note: PySurfer currently needs the SUBJECTS_DIR environment variable,\n which will automatically be set by this function. Plotting multiple\n SourceEstimates with different values for subjects_dir will cause\n PySurfer to use the wrong FreeSurfer surfaces when using methods of\n the returned Brain object. It is therefore recommended to set the\n SUBJECTS_DIR environment variable or always use the same value for\n subjects_dir (within the same Python session).\n\n Parameters\n ----------\n src : SourceSpaces\n The source spaces to plot.\n subject : str | None\n The subject name corresponding to FreeSurfer environment\n variable SUBJECT. If None stc.subject will be used. If that\n is None, the environment will be used.\n surface : str\n The type of surface (inflated, white etc.).\n hemi : str, 'lh' | 'rh' | 'split' | 'both'\n The hemisphere to display. Using 'both' or 'split' requires\n PySurfer version 0.4 or above.\n colormap : str | np.ndarray of float, shape(n_colors, 3 | 4)\n Name of colormap to use. See `plot_source_estimates`.\n time_label : str\n How to print info about the time instant visualized.\n smoothing_steps : int\n The amount of smoothing.\n transparent : bool | None\n If True, use a linear transparency between fmin and fmid.\n None will choose automatically based on colormap type.\n alpha : float\n Alpha value to apply globally to the overlay.\n time_viewer : bool\n Display time viewer GUI.\n config_opts : dict\n Keyword arguments for Brain initialization.\n See pysurfer.viz.Brain.\n subjects_dir : str\n The path to the FreeSurfer subjects reconstructions.\n It corresponds to FreeSurfer environment variable SUBJECTS_DIR.\n figure : instance of mayavi.core.scene.Scene | None\n If None, the last figure will be cleaned and a new figure will\n be created.\n views : str | list\n View to use. See surfer.Brain().\n colorbar : bool\n If True, display colorbar on scene.\n clim : str | dict\n Colorbar properties specification. See `plot_source_estimates`.\n\n Returns\n -------\n brain : Brain\n A instance of surfer.viz.Brain from PySurfer.\n \"\"\"\n # extract surface source spaces\n surf = _ensure_src(src, kind='surf')\n\n # extract surface source estimate\n data = self.data[:surf[0]['nuse'] + surf[1]['nuse']]\n vertices = [s['vertno'] for s in surf]\n\n stc = SourceEstimate(data, vertices, self.tmin, self.tstep,\n self.subject, self.verbose)\n\n return plot_source_estimates(stc, subject, surface=surface, hemi=hemi,\n colormap=colormap, time_label=time_label,\n smoothing_steps=smoothing_steps,\n transparent=transparent, alpha=alpha,\n time_viewer=time_viewer,\n config_opts=config_opts,\n subjects_dir=subjects_dir, figure=figure,\n views=views, colorbar=colorbar, clim=clim)\n\n @verbose\n def save(self, fname, ftype='h5', verbose=None):\n \"\"\"Save the source estimates to a file.\n\n Parameters\n ----------\n fname : string\n The stem of the file name. The file names used for surface source\n spaces are obtained by adding \"-lh.stc\" and \"-rh.stc\" (or \"-lh.w\"\n and \"-rh.w\") to the stem provided, for the left and the right\n hemisphere, respectively.\n ftype : string\n File format to use. Allowed values are \"stc\" (default), \"w\",\n and \"h5\". The \"w\" format only supports a single time point.\n verbose : bool, str, int, or None\n If not None, override default verbose level (see\n :func:`mne.verbose` and :ref:`Logging documentation <tut_logging>`\n for more). Defaults to self.verbose.\n \"\"\"\n if ftype != 'h5':\n raise ValueError('MixedSourceEstimate objects can only be '\n 'written as HDF5 files.')\n\n if not fname.endswith('.h5'):\n fname += '-stc.h5'\n\n write_hdf5(fname,\n dict(vertices=self.vertices, data=self.data,\n tmin=self.tmin, tstep=self.tstep,\n subject=self.subject, src_type='mixed'),\n title='mnepython',\n overwrite=True)\n logger.info('[done]')\n\n\n###############################################################################\n# Morphing\n\n\ndef _get_vol_mask(src):\n \"\"\"Get the volume source space mask.\"\"\"\n assert len(src) == 1 # not a mixed source space\n shape = src[0]['shape'][::-1]\n mask = np.zeros(shape, bool)\n mask.flat[src[0]['vertno']] = True\n return mask\n\n\ndef _spatio_temporal_src_connectivity_vol(src, n_times):\n from sklearn.feature_extraction import grid_to_graph\n mask = _get_vol_mask(src)\n edges = grid_to_graph(*mask.shape, mask=mask)\n connectivity = _get_connectivity_from_edges(edges, n_times)\n return connectivity\n\n\ndef _spatio_temporal_src_connectivity_surf(src, n_times):\n if src[0]['use_tris'] is None:\n # XXX It would be nice to support non oct source spaces too...\n raise RuntimeError(\"The source space does not appear to be an ico \"\n \"surface. Connectivity cannot be extracted from\"\n \" non-ico source spaces.\")\n used_verts = [np.unique(s['use_tris']) for s in src]\n offs = np.cumsum([0] + [len(u_v) for u_v in used_verts])[:-1]\n tris = np.concatenate([np.searchsorted(u_v, s['use_tris']) + off\n for u_v, s, off in zip(used_verts, src, offs)])\n connectivity = spatio_temporal_tris_connectivity(tris, n_times)\n\n # deal with source space only using a subset of vertices\n masks = [np.in1d(u, s['vertno']) for s, u in zip(src, used_verts)]\n if sum(u.size for u in used_verts) != connectivity.shape[0] / n_times:\n raise ValueError('Used vertices do not match connectivity shape')\n if [np.sum(m) for m in masks] != [len(s['vertno']) for s in src]:\n raise ValueError('Vertex mask does not match number of vertices')\n masks = np.concatenate(masks)\n missing = 100 * float(len(masks) - np.sum(masks)) / len(masks)\n if missing:\n warn_('%0.1f%% of original source space vertices have been'\n ' omitted, tri-based connectivity will have holes.\\n'\n 'Consider using distance-based connectivity or '\n 'morphing data to all source space vertices.' % missing)\n masks = np.tile(masks, n_times)\n masks = np.where(masks)[0]\n connectivity = connectivity.tocsr()\n connectivity = connectivity[masks]\n connectivity = connectivity[:, masks]\n # return to original format\n connectivity = connectivity.tocoo()\n return connectivity\n\n\n@verbose\ndef spatio_temporal_src_connectivity(src, n_times, dist=None, verbose=None):\n \"\"\"Compute connectivity for a source space activation over time.\n\n Parameters\n ----------\n src : instance of SourceSpaces\n The source space. It can be a surface source space or a\n volume source space.\n n_times : int\n Number of time instants.\n dist : float, or None\n Maximal geodesic distance (in m) between vertices in the\n source space to consider neighbors. If None, immediate neighbors\n are extracted from an ico surface.\n verbose : bool, str, int, or None\n If not None, override default verbose level (see :func:`mne.verbose`\n and :ref:`Logging documentation <tut_logging>` for more).\n\n Returns\n -------\n connectivity : sparse COO matrix\n The connectivity matrix describing the spatio-temporal\n graph structure. If N is the number of vertices in the\n source space, the N first nodes in the graph are the\n vertices are time 1, the nodes from 2 to 2N are the vertices\n during time 2, etc.\n \"\"\"\n # XXX we should compute connectivity for each source space and then\n # use scipy.sparse.block_diag to concatenate them\n if src[0]['type'] == 'vol':\n if dist is not None:\n raise ValueError('dist must be None for a volume '\n 'source space. Got %s.' % dist)\n\n connectivity = _spatio_temporal_src_connectivity_vol(src, n_times)\n elif dist is not None:\n # use distances computed and saved in the source space file\n connectivity = spatio_temporal_dist_connectivity(src, n_times, dist)\n else:\n connectivity = _spatio_temporal_src_connectivity_surf(src, n_times)\n return connectivity\n\n\n@verbose\ndef grade_to_tris(grade, verbose=None):\n \"\"\"Get tris defined for a certain grade.\n\n Parameters\n ----------\n grade : int\n Grade of an icosahedral mesh.\n verbose : bool, str, int, or None\n If not None, override default verbose level (see :func:`mne.verbose`\n and :ref:`Logging documentation <tut_logging>` for more).\n\n Returns\n -------\n tris : list\n 2-element list containing Nx3 arrays of tris, suitable for use in\n spatio_temporal_tris_connectivity.\n \"\"\"\n a = _get_ico_tris(grade, None, False)\n tris = np.concatenate((a, a + (np.max(a) + 1)))\n return tris\n\n\n@verbose\ndef spatio_temporal_tris_connectivity(tris, n_times, remap_vertices=False,\n verbose=None):\n \"\"\"Compute connectivity from triangles and time instants.\n\n Parameters\n ----------\n tris : array\n N x 3 array defining triangles.\n n_times : int\n Number of time points\n remap_vertices : bool\n Reassign vertex indices based on unique values. Useful\n to process a subset of triangles. Defaults to False.\n verbose : bool, str, int, or None\n If not None, override default verbose level (see :func:`mne.verbose`\n and :ref:`Logging documentation <tut_logging>` for more).\n\n Returns\n -------\n connectivity : sparse COO matrix\n The connectivity matrix describing the spatio-temporal\n graph structure. If N is the number of vertices in the\n source space, the N first nodes in the graph are the\n vertices are time 1, the nodes from 2 to 2N are the vertices\n during time 2, etc.\n \"\"\"\n if remap_vertices:\n logger.info('Reassigning vertex indices.')\n tris = np.searchsorted(np.unique(tris), tris)\n\n edges = mesh_edges(tris).tocoo()\n return _get_connectivity_from_edges(edges, n_times)\n\n\n@verbose\ndef spatio_temporal_dist_connectivity(src, n_times, dist, verbose=None):\n \"\"\"Compute connectivity from distances in a source space and time instants.\n\n Parameters\n ----------\n src : instance of SourceSpaces\n The source space must have distances between vertices computed, such\n that src['dist'] exists and is useful. This can be obtained using MNE\n with a call to mne_add_patch_info with the --dist option.\n n_times : int\n Number of time points\n dist : float\n Maximal geodesic distance (in m) between vertices in the\n source space to consider neighbors.\n verbose : bool, str, int, or None\n If not None, override default verbose level (see :func:`mne.verbose`\n and :ref:`Logging documentation <tut_logging>` for more).\n\n Returns\n -------\n connectivity : sparse COO matrix\n The connectivity matrix describing the spatio-temporal\n graph structure. If N is the number of vertices in the\n source space, the N first nodes in the graph are the\n vertices are time 1, the nodes from 2 to 2N are the vertices\n during time 2, etc.\n \"\"\"\n if src[0]['dist'] is None:\n raise RuntimeError('src must have distances included, consider using\\n'\n 'mne_add_patch_info with --dist argument')\n edges = sparse_block_diag([s['dist'][s['vertno'], :][:, s['vertno']]\n for s in src])\n edges.data[:] = np.less_equal(edges.data, dist)\n # clean it up and put it in coo format\n edges = edges.tocsr()\n edges.eliminate_zeros()\n edges = edges.tocoo()\n return _get_connectivity_from_edges(edges, n_times)\n\n\n@verbose\ndef spatial_src_connectivity(src, dist=None, verbose=None):\n \"\"\"Compute connectivity for a source space activation.\n\n Parameters\n ----------\n src : instance of SourceSpaces\n The source space. It can be a surface source space or a\n volume source space.\n dist : float, or None\n Maximal geodesic distance (in m) between vertices in the\n source space to consider neighbors. If None, immediate neighbors\n are extracted from an ico surface.\n verbose : bool, str, int, or None\n If not None, override default verbose level (see :func:`mne.verbose`\n and :ref:`Logging documentation <tut_logging>` for more).\n\n Returns\n -------\n connectivity : sparse COO matrix\n The connectivity matrix describing the spatial graph structure.\n \"\"\"\n return spatio_temporal_src_connectivity(src, 1, dist)\n\n\n@verbose\ndef spatial_tris_connectivity(tris, remap_vertices=False, verbose=None):\n \"\"\"Compute connectivity from triangles.\n\n Parameters\n ----------\n tris : array\n N x 3 array defining triangles.\n remap_vertices : bool\n Reassign vertex indices based on unique values. Useful\n to process a subset of triangles. Defaults to False.\n verbose : bool, str, int, or None\n If not None, override default verbose level (see :func:`mne.verbose`\n and :ref:`Logging documentation <tut_logging>` for more).\n\n Returns\n -------\n connectivity : sparse COO matrix\n The connectivity matrix describing the spatial graph structure.\n \"\"\"\n return spatio_temporal_tris_connectivity(tris, 1, remap_vertices)\n\n\ndef spatial_dist_connectivity(src, dist, verbose=None):\n \"\"\"Compute connectivity from distances in a source space.\n\n Parameters\n ----------\n src : instance of SourceSpaces\n The source space must have distances between vertices computed, such\n that src['dist'] exists and is useful. This can be obtained using MNE\n with a call to mne_add_patch_info with the --dist option.\n dist : float\n Maximal geodesic distance (in m) between vertices in the\n source space to consider neighbors.\n verbose : bool, str, int, or None\n If not None, override default verbose level (see :func:`mne.verbose`\n and :ref:`Logging documentation <tut_logging>` for more).\n\n Returns\n -------\n connectivity : sparse COO matrix\n The connectivity matrix describing the spatial graph structure.\n \"\"\"\n return spatio_temporal_dist_connectivity(src, 1, dist)\n\n\ndef spatial_inter_hemi_connectivity(src, dist, verbose=None):\n \"\"\"Get vertices on each hemisphere that are close to the other hemisphere.\n\n Parameters\n ----------\n src : instance of SourceSpaces\n The source space. Must be surface type.\n dist : float\n Maximal Euclidean distance (in m) between vertices in one hemisphere\n compared to the other to consider neighbors.\n verbose : bool, str, int, or None\n If not None, override default verbose level (see :func:`mne.verbose`\n and :ref:`Logging documentation <tut_logging>` for more).\n\n Returns\n -------\n connectivity : sparse COO matrix\n The connectivity matrix describing the spatial graph structure.\n Typically this should be combined (addititively) with another\n existing intra-hemispheric connectivity matrix, e.g. computed\n using geodesic distances.\n \"\"\"\n from scipy.spatial.distance import cdist\n src = _ensure_src(src, kind='surf')\n conn = cdist(src[0]['rr'][src[0]['vertno']],\n src[1]['rr'][src[1]['vertno']])\n conn = sparse.csr_matrix(conn <= dist, dtype=int)\n empties = [sparse.csr_matrix((nv, nv), dtype=int) for nv in conn.shape]\n conn = sparse.vstack([sparse.hstack([empties[0], conn]),\n sparse.hstack([conn.T, empties[1]])])\n return conn\n\n\n@verbose\ndef _get_connectivity_from_edges(edges, n_times, verbose=None):\n \"\"\"Given edges sparse matrix, create connectivity matrix.\"\"\"\n n_vertices = edges.shape[0]\n logger.info(\"-- number of connected vertices : %d\" % n_vertices)\n nnz = edges.col.size\n aux = n_vertices * np.arange(n_times)[:, None] * np.ones((1, nnz), np.int)\n col = (edges.col[None, :] + aux).ravel()\n row = (edges.row[None, :] + aux).ravel()\n if n_times > 1: # add temporal edges\n o = (n_vertices * np.arange(n_times - 1)[:, None] +\n np.arange(n_vertices)[None, :]).ravel()\n d = (n_vertices * np.arange(1, n_times)[:, None] +\n np.arange(n_vertices)[None, :]).ravel()\n row = np.concatenate((row, o, d))\n col = np.concatenate((col, d, o))\n data = np.ones(edges.data.size * n_times + 2 * n_vertices * (n_times - 1),\n dtype=np.int)\n connectivity = coo_matrix((data, (row, col)),\n shape=(n_times * n_vertices,) * 2)\n return connectivity\n\n\n@verbose\ndef _get_ico_tris(grade, verbose=None, return_surf=False):\n \"\"\"Get triangles for ico surface.\"\"\"\n ico = _get_ico_surface(grade)\n if not return_surf:\n return ico['tris']\n else:\n return ico\n\n\ndef _get_label_flip(labels, label_vertidx, src):\n \"\"\"Get sign-flip for labels.\"\"\"\n # do the import here to avoid circular dependency\n from .label import label_sign_flip\n # get the sign-flip vector for every label\n label_flip = list()\n for label, vertidx in zip(labels, label_vertidx):\n if vertidx is not None:\n flip = label_sign_flip(label, src)[:, None]\n else:\n flip = None\n label_flip.append(flip)\n\n return label_flip\n\n\ndef _pca_flip(flip, data):\n U, s, V = linalg.svd(data, full_matrices=False)\n # determine sign-flip\n sign = np.sign(np.dot(U[:, 0], flip))\n # use average power in label for scaling\n scale = linalg.norm(s) / np.sqrt(len(data))\n return sign * scale * V[0]\n\n\n_label_funcs = {\n 'mean': lambda flip, data: np.mean(data, axis=0),\n 'mean_flip': lambda flip, data: np.mean(flip * data, axis=0),\n 'max': lambda flip, data: np.max(np.abs(data), axis=0),\n 'pca_flip': _pca_flip,\n}\n\n\n@verbose\ndef _gen_extract_label_time_course(stcs, labels, src, mode='mean',\n allow_empty=False, verbose=None):\n \"\"\"Generate extract_label_time_course.\"\"\"\n # if src is a mixed src space, the first 2 src spaces are surf type and\n # the other ones are vol type. For mixed source space n_labels will be the\n # given by the number of ROIs of the cortical parcellation plus the number\n # of vol src space\n\n if mode not in _label_funcs:\n raise ValueError('%s is an invalid mode' % mode)\n func = _label_funcs[mode]\n\n if len(src) > 2:\n if src[0]['type'] != 'surf' or src[1]['type'] != 'surf':\n raise ValueError('The first 2 source spaces have to be surf type')\n if any(np.any(s['type'] != 'vol') for s in src[2:]):\n raise ValueError('source spaces have to be of vol type')\n\n n_aparc = len(labels)\n n_aseg = len(src[2:])\n n_labels = n_aparc + n_aseg\n else:\n n_labels = len(labels)\n\n # get vertices from source space, they have to be the same as in the stcs\n vertno = [s['vertno'] for s in src]\n nvert = [len(vn) for vn in vertno]\n\n # do the initialization\n label_vertidx = list()\n for label in labels:\n if label.hemi == 'both':\n # handle BiHemiLabel\n sub_labels = [label.lh, label.rh]\n else:\n sub_labels = [label]\n this_vertidx = list()\n for slabel in sub_labels:\n if slabel.hemi == 'lh':\n this_vertno = np.intersect1d(vertno[0], slabel.vertices)\n vertidx = np.searchsorted(vertno[0], this_vertno)\n elif slabel.hemi == 'rh':\n this_vertno = np.intersect1d(vertno[1], slabel.vertices)\n vertidx = nvert[0] + np.searchsorted(vertno[1], this_vertno)\n else:\n raise ValueError('label %s has invalid hemi' % label.name)\n this_vertidx.append(vertidx)\n\n # convert it to an array\n this_vertidx = np.concatenate(this_vertidx)\n if len(this_vertidx) == 0:\n msg = ('source space does not contain any vertices for label %s'\n % label.name)\n if not allow_empty:\n raise ValueError(msg)\n else:\n warn_(msg + '. Assigning all-zero time series to label.')\n this_vertidx = None # to later check if label is empty\n\n label_vertidx.append(this_vertidx)\n\n # mode-dependent initialization\n if mode not in ('mean', 'max'):\n # get the sign-flip vector for every label\n src_flip = _get_label_flip(labels, label_vertidx, src[:2])\n else:\n src_flip = [None] * len(labels)\n\n # loop through source estimates and extract time series\n for stc in stcs:\n # make sure the stc is compatible with the source space\n for i in range(len(src)):\n if len(stc.vertices[i]) != nvert[i]:\n raise ValueError('stc not compatible with source space. '\n 'stc has %s time series but there are %s '\n 'vertices in source space'\n % (len(stc.vertices[i]), nvert[i]))\n\n if any(np.any(svn != vn) for svn, vn in zip(stc.vertices, vertno)):\n raise ValueError('stc not compatible with source space')\n if sum(nvert) != stc.shape[0]:\n raise ValueError('stc not compatible with source space. '\n 'stc has %s vertices but the source space '\n 'has %s vertices'\n % (stc.shape[0], sum(nvert)))\n\n logger.info('Extracting time courses for %d labels (mode: %s)'\n % (n_labels, mode))\n\n # do the extraction\n label_tc = np.zeros((n_labels, stc.data.shape[1]),\n dtype=stc.data.dtype)\n for i, (vertidx, flip) in enumerate(zip(label_vertidx, src_flip)):\n if vertidx is not None:\n label_tc[i] = func(flip, stc.data[vertidx, :])\n\n # extract label time series for the vol src space\n if len(src) > 2:\n v1 = nvert[0] + nvert[1]\n for i, nv in enumerate(nvert[2:]):\n\n v2 = v1 + nv\n v = range(v1, v2)\n if nv != 0:\n label_tc[n_aparc + i] = np.mean(stc.data[v, :], axis=0)\n\n v1 = v2\n\n # this is a generator!\n yield label_tc\n\n\n@verbose\ndef extract_label_time_course(stcs, labels, src, mode='mean_flip',\n allow_empty=False, return_generator=False,\n verbose=None):\n \"\"\"Extract label time course for lists of labels and source estimates.\n\n This function will extract one time course for each label and source\n estimate. The way the time courses are extracted depends on the mode\n parameter.\n\n Valid values for mode are:\n\n - 'mean': Average within each label.\n - 'mean_flip': Average within each label with sign flip depending\n on source orientation.\n - 'pca_flip': Apply an SVD to the time courses within each label\n and use the scaled and sign-flipped first right-singular vector\n as the label time course. The scaling is performed such that the\n power of the label time course is the same as the average\n per-vertex time course power within the label. The sign of the\n resulting time course is adjusted by multiplying it with\n \"sign(dot(u, flip))\" where u is the first left-singular vector,\n and flip is a sing-flip vector based on the vertex normals. This\n procedure assures that the phase does not randomly change by 180\n degrees from one stc to the next.\n - 'max': Max value within each label.\n\n\n Parameters\n ----------\n stcs : SourceEstimate | list (or generator) of SourceEstimate\n The source estimates from which to extract the time course.\n labels : Label | BiHemiLabel | list of Label or BiHemiLabel\n The labels for which to extract the time course.\n src : list\n Source spaces for left and right hemisphere.\n mode : str\n Extraction mode, see explanation above.\n allow_empty : bool\n Instead of emitting an error, return all-zero time courses for labels\n that do not have any vertices in the source estimate.\n return_generator : bool\n If True, a generator instead of a list is returned.\n verbose : bool, str, int, or None\n If not None, override default verbose level (see :func:`mne.verbose`\n and :ref:`Logging documentation <tut_logging>` for more).\n\n Returns\n -------\n label_tc : array | list (or generator) of array, shape=(len(labels), n_times)\n Extracted time course for each label and source estimate.\n \"\"\" # noqa: E501\n # convert inputs to lists\n if isinstance(stcs, SourceEstimate):\n stcs = [stcs]\n return_several = False\n return_generator = False\n else:\n return_several = True\n\n if not isinstance(labels, list):\n labels = [labels]\n\n label_tc = _gen_extract_label_time_course(stcs, labels, src, mode=mode,\n allow_empty=allow_empty)\n\n if not return_generator:\n # do the extraction and return a list\n label_tc = list(label_tc)\n\n if not return_several:\n # input was a single SoureEstimate, return single array\n label_tc = label_tc[0]\n\n return label_tc\n" ]
[ [ "numpy.ones", "numpy.sum", "scipy.sparse.block_diag", "scipy.spatial.distance.cdist", "numpy.intersect1d", "numpy.any", "numpy.argsort", "numpy.asarray", "numpy.insert", "numpy.vstack", "scipy.linalg.norm", "numpy.in1d", "numpy.abs", "scipy.sparse.hstack", "numpy.where", "numpy.unique", "numpy.mean", "numpy.tile", "numpy.fromfile", "numpy.atleast_2d", "numpy.zeros", "numpy.dot", "numpy.searchsorted", "numpy.setdiff1d", "numpy.arange", "scipy.sparse.coo_matrix", "numpy.max", "numpy.prod", "numpy.linalg.norm", "numpy.empty", "numpy.left_shift", "scipy.sparse.csr_matrix", "sklearn.feature_extraction.grid_to_graph", "numpy.less_equal", "numpy.array_equal", "numpy.array", "numpy.concatenate", "numpy.frombuffer", "scipy.linalg.svd" ] ]
jpk2f2/CMP_SC_4650_3
[ "2b1a84557ac280c70d7e19e4fdc6677ce10745dc" ]
[ "python/filters.py" ]
[ "import numpy as np\n\n# prewitt x and y filters\nPREWITTX = np.array([[-1, 0, 1], [-1, 0, 1], [-1, 0, 1]], np.float32)\nPREWITTY = np.array([[-1, -1, -1], [0, 0, 0], [1, 1, 1]], np.float32)\n\n# Sobel x and y filters\nSOBELX = np.array([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]], np.float32)\nSOBELY = np.array([[-1, -2, -1], [0, 0, 0], [1, 2, 1]], np.float32)\n\n" ]
[ [ "numpy.array" ] ]
mhearne-usgs/earthquake-sequence
[ "3b642a6c202894b0ea421635f0f258fa045fa271" ]
[ "sequence/seqdb.py" ]
[ "import sqlite3\nfrom collections import OrderedDict\nfrom datetime import datetime, timedelta\nimport logging\n\nimport pandas as pd\nfrom shapely.geometry import Polygon\n\nfrom impactutils.extern.openquake.geodetic import geodetic_distance\n\nEQTABLE = OrderedDict([('id', 'integer primary key'),\n ('sid', 'int'),\n ('code', 'text'),\n ('time', 'datetime'),\n ('latitude', 'float'),\n ('longitude', 'float'),\n ('depth', 'float'),\n ('magnitude', 'float')])\nSQTABLE = OrderedDict([('id', 'integer primary key'),\n ('name', 'text'),\n ('start_time', 'datetime'),\n ('end_time', 'datetime'),\n ('center_lat', 'float'),\n ('center_lon', 'float'),\n ('nearest_city', 'text'),\n ('dist_nearest_city', 'float'),\n ('sequence_ended', 'boolean DEFAULT 0'),\n ('n_earthquakes', 'integer'),\n ('projstr', 'text'),\n ('radius', 'float'),\n ('xmin', 'float'),\n ('xmax', 'float'),\n ('ymin', 'float'),\n ('ymax', 'float')])\n\nTABLES={'earthquake':EQTABLE,\n 'sequence': SQTABLE}\n\nTIMEFMT='%Y-%m-%dT%H:%M:%S'\nTIMEFMT2 = '%Y-%m-%d %H:%M:%S.%f'\n\n\nclass SequenceDatabase(object):\n def __init__(self, dbfile, config, create=False):\n self._db, self._cursor=get_connection_objects(dbfile)\n if create:\n self._createTables()\n self._config=config\n\n def _createTables(self):\n \"\"\"\n Build the database tables.\n \"\"\"\n for table in TABLES.keys():\n sql='CREATE TABLE %s (' % table\n nuggets=[]\n for column, ctype in TABLES[table].items():\n nuggets.append('%s %s' % (column, ctype))\n sql += ','.join(nuggets) + ')'\n self._cursor.execute(sql)\n\n self._db.commit()\n return\n\n def mergeSequences(self):\n query = 'SELECT id, xmin, xmax, ymin, ymax FROM sequence WHERE sequence_ended=0'\n df = pd.read_sql_query(query, self._db)\n \n id1_array = []\n id2_array = []\n\n for i in range(0, len(df)):\n xmin1 = df['xmin'].iloc[i]\n xmax1 = df['xmax'].iloc[i]\n ymin1 = df['ymin'].iloc[i]\n ymax1 = df['ymax'].iloc[i]\n id1 = df['id'].iloc[i]\n \n for j in range(0, len(df)):\n if i==j:\n continue\n xmin2 = df['xmin'].iloc[j]\n xmax2 = df['xmax'].iloc[j]\n ymin2 = df['ymin'].iloc[j]\n ymax2 = df['ymax'].iloc[j]\n id2 = df['id'].iloc[j]\n\n bounds1 = (xmin1, xmax1, ymin1, ymax1)\n bounds2 = (xmin2, xmax2, ymin2, ymax2)\n if boxes_intersect(bounds1, bounds2):\n if id1 not in id2_array and id2 not in id1_array:\n id1_array.append(id1)\n id2_array.append(id2)\n sequence_sets = merge_sequences(id1_array, id2_array)\n return sequence_sets\n\n\n def getElapsedDataframe(self):\n query='SELECT id, start_time, end_time, n_earthquakes FROM sequence'\n dataframe=pd.read_sql_query(query, self._db, parse_dates=[\n 'start_time', 'end_time'])\n dataframe['elapsed']=dataframe['end_time'] - dataframe['start_time']\n return dataframe\n\n def getStoppedDataframe(self):\n dataframe=self.getElapsedDataframe()\n\n # get the most recent update time by looking at most EQ loaded\n query2='SELECT max(time) FROM earthquake'\n self._cursor.execute(query2)\n maxtime=datetime.strptime(self._cursor.fetchone()[0], TIMEFMT2)\n\n # mark sequences that have stopped\n dataframe['gap_days']=maxtime - dataframe['end_time']\n daygap=timedelta(days=self._config['DAYGAP'])\n ended=dataframe['gap_days'] > daygap\n dataframe['ended']=ended\n dataframe = dataframe[dataframe['ended'] == True].copy()\n\n return dataframe\n\n def getNonSequences(self):\n dataframe=self.getStoppedDataframe()\n\n # if we don't have any stopped sequences, bail out\n if not len(dataframe):\n return dataframe\n\n # delete small/short sequences\n c1=dataframe['n_earthquakes'] < self._config['NUMEQ']\n c2=dataframe['elapsed'] < timedelta(days=self._config['NUMDAYS'])\n deletes=dataframe[c1 | c2]\n\n return deletes\n\n def deleteNonSequences(self, deletes):\n # clean out non sequences\n for _, row in deletes.iterrows():\n edelete='DELETE FROM earthquake WHERE sid=%i' % row['id']\n self._cursor.execute(edelete)\n self._db.commit()\n sdelete='DELETE FROM sequence WHERE id=%i' % row['id']\n self._cursor.execute(sdelete)\n self._db.commit()\n\n def markStoppedSequences(self, keeps):\n # mark dead sequences\n for _, row in keeps.iterrows():\n smark='UPDATE sequence SET sequence_ended=1 WHERE id=%i' % row['id']\n self._cursor.execute(smark)\n self._db.commit()\n\n def getNumSequences(self):\n query='SELECT count(*) FROM sequence'\n self._cursor.execute(query)\n nseq=self._cursor.fetchone()[0]\n return nseq\n\n def checkSequence(self, sqstats):\n clat=sqstats['center_lat']\n clon=sqstats['center_lon']\n radius=sqstats['radius']\n seq_query='SELECT id, name, center_lat, center_lon, radius FROM sequence WHERE sequence_ended = 0'\n self._cursor.execute(seq_query)\n seq_rows=self._cursor.fetchall()\n if not len(seq_rows):\n return None\n for row in seq_rows:\n sid=row['id']\n center_lat=row['center_lat']\n center_lon=row['center_lon']\n sradius=row['radius']\n # what is the distance in km from one center to another?\n dist=geodetic_distance(clat, clon, center_lat, center_lon)\n # cmp_dist=max(radius + sradius, self._config['EQDIST2'])\n cmp_dist = radius + sradius\n if dist < cmp_dist:\n return sid\n\n return None\n\n def updateSequence(self, dataframe, sqstats, seqid):\n # dataframe is the list of new events...\n # sqstats is the stats for the combined events\n nuggets = ['name = \"%s\"' % sqstats['name'],\n 'start_time = \"%s\"' % sqstats['start_time'],\n 'end_time = \"%s\"' % sqstats['end_time'],\n 'center_lat = %.4f' % sqstats['center_lat'],\n 'center_lon = %.4f' % sqstats['center_lon'],\n 'nearest_city = \"%s\"' % sqstats['nearest_city'],\n 'dist_nearest_city = %.2f' % sqstats['dist_nearest_city'],\n 'n_earthquakes = %i' % sqstats['n_earthquakes'],\n 'radius = %.2f' % sqstats['radius'],\n 'xmin = %.4f' % sqstats['xmin'],\n 'xmax = %.4f' % sqstats['xmax'],\n 'ymin = %.4f' % sqstats['ymin'],\n 'ymax = %.4f' % sqstats['ymax'],\n ]\n colstr = ','.join(nuggets)\n query = 'UPDATE sequence SET %s WHERE id=%i' % (colstr, seqid)\n self._cursor.execute(query)\n self._db.commit()\n self.insertEvents(dataframe, seqid)\n\n def deleteSequence(self, seqid):\n query = 'DELETE FROM sequence WHERE id=%i' % seqid\n self._cursor.execute(query)\n self._db.commit()\n\n def getSequenceEvents(self, seqid):\n cols = list(EQTABLE.keys())\n colstr = ','.join(cols)\n query = 'SELECT %s FROM earthquake WHERE sid=%i' % (colstr, seqid)\n dataframe = pd.read_sql_query(query, self._db, parse_dates=['time'])\n dataframe = dataframe.drop(['id','sid'],axis=1)\n dataframe = dataframe.rename({'code':'id'}, axis=1)\n return dataframe\n\n def insertEvents(self, dataframe, seqid):\n for _, row in dataframe.iterrows():\n colstr='(sid, code, time, latitude, longitude, depth, magnitude)'\n valstr='(%i, \"%s\", \"%s\", %.4f, %.4f, %.1f, %.1f)'\n tpl=(seqid, row['id'], row['time'],\n row['latitude'], row['longitude'],\n row['depth'], row['magnitude'])\n query3='INSERT INTO earthquake %s VALUES %s' % (\n colstr, valstr % tpl)\n self._cursor.execute(query3)\n self._db.commit()\n\n def insertSequence(self, dataframe, sqstats):\n cols=['name', 'start_time', 'end_time',\n 'center_lat', 'center_lon', 'nearest_city',\n 'dist_nearest_city',\n 'n_earthquakes', 'radius', 'xmin', 'xmax',\n 'ymin', 'ymax'\n ]\n colstr=','.join(cols)\n fmt='\"%s\", \"%s\", \"%s\", %.4f, %.4f, \"%s\", %.2f, %i, %.1f, %.4f, %.4f, %.4f, %.4f'\n tpl=(sqstats['name'],\n sqstats['start_time'].strftime(TIMEFMT),\n sqstats['end_time'].strftime(TIMEFMT),\n sqstats['center_lat'],\n sqstats['center_lon'],\n sqstats['nearest_city'],\n sqstats['dist_nearest_city'],\n sqstats['n_earthquakes'],\n sqstats['radius'],\n sqstats['xmin'],\n sqstats['xmax'],\n sqstats['ymin'],\n sqstats['ymax'],\n )\n valstr=fmt % tpl\n query='INSERT INTO sequence (%s) VALUES (%s)' % (colstr, valstr)\n self._cursor.execute(query)\n self._db.commit()\n query2='SELECT last_insert_rowid()'\n seqid=self._cursor.execute(query2).fetchone()[0]\n\n self.insertEvents(dataframe, seqid)\n\n return seqid\n\n def getSequenceNames(self, confirmed=True):\n query = 'SELECT name FROM sequence'\n if confirmed:\n query += ' WHERE sequence_ended = 1'\n self._cursor.execute(query)\n rows = self._cursor.fetchall()\n names = [row[0] for row in rows]\n return names\n\n def getSequence(self, seqname):\n # 'id', 'integer primary key'),\n # ('name', 'text'),\n # ('start_time', 'datetime'),\n # ('end_time', 'datetime'),\n # ('center_lat', 'float'),\n # ('center_lon', 'float'),\n # ('nearest_city', 'text'),\n # ('dist_nearest_city', 'float'),\n # ('sequence_ended', 'boolean DEFAULT 0'),\n # ('n_earthquakes', 'integer'),\n # ('projstr', 'text'),\n # ('radius', 'float'),\n # ('xmin', 'float'),\n # ('xmax', 'float'),\n # ('ymin', 'float'),\n # ('ymax', 'float')\n cols = list(SQTABLE.keys())\n colstr = ','.join(cols)\n query = 'SELECT %s FROM sequence WHERE name=\"%s\"' % (colstr, seqname)\n df = pd.read_sql_query(query, self._db)\n sequence = df.iloc[0]\n sid = sequence['id']\n sequence = sequence.drop('id').to_dict()\n seqframe = self.getSequenceEvents(sid)\n return (sequence, seqframe)\n\n\ndef merge_sequences(id1, id2):\n pairs = list(zip(id1, id2))\n sets = []\n while len(pairs):\n npairs = len(pairs)\n deletes = [0]\n set1 = set(pairs[0])\n for j in range(1, npairs):\n set2 = set(pairs[j])\n if len(set1.intersection(set2)):\n set1 = set1.union(set2)\n deletes.append(j)\n newpairs = []\n for k in range(npairs):\n if k in deletes:\n continue\n newpairs.append(pairs[k])\n npairs = len(newpairs)\n pairs = newpairs.copy()\n sets.append(set1)\n return sets\n \n\n # for i in range(0, len(pairs)):\n # tset = set()\n # set1 = set(pairs[i])\n # for j in range(0, len(pairs)):\n # if i==j:\n # continue\n # set2 = set(pairs[j])\n # if len(set1.intersection(set2)):\n # if len(tset):\n # tset = tset.union(set2)\n # else:\n # tset = tset.union(set1)\n # tset = tset.union(set2)\n # if len(tset):\n # sets.append(tset)\n # return sets\n\ndef boxes_intersect(bounds1, bounds2):\n xmin1, xmax1, ymin1, ymax1 = bounds1\n xmin2, xmax2, ymin2, ymax2 = bounds2\n\n coords1 = [(xmin1, ymax1),\n (xmax1, ymax1),\n (xmax1, ymin1),\n (xmin1, ymin1)]\n coords2 = [(xmin2, ymax2),\n (xmax2, ymax2),\n (xmax2, ymin2),\n (xmin2, ymin2)]\n rect1 = Polygon(coords1)\n rect2 = Polygon(coords2)\n if rect1.intersects(rect2):\n return True\n \n return False\n\n\ndef get_connection_objects(dbfile):\n db=sqlite3.connect(dbfile)\n db.row_factory=sqlite3.Row\n cursor=db.cursor()\n return (db, cursor)\n" ]
[ [ "pandas.read_sql_query" ] ]
NickLalo/beginners-pytorch-deep-learning
[ "491e4ec7e1faa6c274082f548e8ea5b5bd2e687c" ]
[ "chapter2/download.py" ]
[ "# download.py\n\nimport os\nimport sys\nimport urllib3\nfrom urllib.parse import urlparse\nimport pandas as pd\nimport itertools\nimport shutil\n\nfrom urllib3.util import Retry\n\nurllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n\nclasses = [\"cat\", \"fish\"]\nset_types = [\"train\", \"test\", \"val\"]\n\ndef download_image(url, klass, data_type):\n basename = os.path.basename(urlparse(url).path)\n filename = \"{}/{}/{}\".format(data_type, klass, basename)\n if not os.path.exists(filename):\n try: \n http = urllib3.PoolManager(retries=Retry(connect=1, read=1, redirect=2))\n with http.request(\"GET\", url, preload_content=False) as resp, open(\n filename, \"wb\"\n ) as out_file:\n if resp.status == 200:\n shutil.copyfileobj(resp, out_file)\n else:\n print(\"Error downloading {}\".format(url))\n resp.release_conn()\n except:\n print(\"Error downloading {}\".format(url))\n\n\nif __name__ == \"__main__\":\n if not os.path.exists(\"images.csv\"):\n print(\"Error: can't find images.csv!\")\n sys.exit(0)\n\n # get args and create output directory\n imagesDF = pd.read_csv(\"images.csv\")\n\n for set_type, klass in list(itertools.product(set_types, classes)):\n path = \"./{}/{}\".format(set_type, klass)\n if not os.path.exists(path):\n print(\"Creating directory {}\".format(path))\n os.makedirs(path)\n\n print(\"Downloading {} images\".format(len(imagesDF)))\n\n result = [\n download_image(url, klass, data_type)\n for url, klass, data_type in zip(\n imagesDF[\"url\"], imagesDF[\"class\"], imagesDF[\"type\"]\n )\n ]\n sys.exit(0)\n" ]
[ [ "pandas.read_csv" ] ]
dejanzelic/frigate
[ "3b04169c8b53b5653ad9b26d5bbe6313cbeff08d" ]
[ "process_clip.py" ]
[ "import sys\nimport click\nimport os\nimport datetime\nfrom unittest import TestCase, main\nfrom frigate.video import process_frames, start_or_restart_ffmpeg, capture_frames, get_frame_shape\nfrom frigate.util import DictFrameManager, EventsPerSecond, draw_box_with_label\nfrom frigate.motion import MotionDetector\nfrom frigate.edgetpu import LocalObjectDetector\nfrom frigate.objects import ObjectTracker\nimport multiprocessing as mp\nimport numpy as np\nimport cv2\nfrom frigate.object_processing import COLOR_MAP, CameraState\n\nclass ProcessClip():\n def __init__(self, clip_path, frame_shape, config):\n self.clip_path = clip_path\n self.frame_shape = frame_shape\n self.camera_name = 'camera'\n self.frame_manager = DictFrameManager()\n self.frame_queue = mp.Queue()\n self.detected_objects_queue = mp.Queue()\n self.camera_state = CameraState(self.camera_name, config, self.frame_manager)\n\n def load_frames(self):\n fps = EventsPerSecond()\n skipped_fps = EventsPerSecond()\n stop_event = mp.Event()\n detection_frame = mp.Value('d', datetime.datetime.now().timestamp()+100000)\n current_frame = mp.Value('d', 0.0)\n ffmpeg_cmd = f\"ffmpeg -hide_banner -loglevel panic -i {self.clip_path} -f rawvideo -pix_fmt rgb24 pipe:\".split(\" \")\n ffmpeg_process = start_or_restart_ffmpeg(ffmpeg_cmd, self.frame_shape[0]*self.frame_shape[1]*self.frame_shape[2])\n capture_frames(ffmpeg_process, self.camera_name, self.frame_shape, self.frame_manager, self.frame_queue, 1, fps, skipped_fps, stop_event, detection_frame, current_frame)\n ffmpeg_process.wait()\n ffmpeg_process.communicate()\n \n def process_frames(self, objects_to_track=['person'], object_filters={}):\n mask = np.zeros((self.frame_shape[0], self.frame_shape[1], 1), np.uint8)\n mask[:] = 255\n motion_detector = MotionDetector(self.frame_shape, mask)\n\n object_detector = LocalObjectDetector(labels='/labelmap.txt')\n object_tracker = ObjectTracker(10)\n process_fps = mp.Value('d', 0.0)\n detection_fps = mp.Value('d', 0.0)\n current_frame = mp.Value('d', 0.0)\n stop_event = mp.Event()\n\n process_frames(self.camera_name, self.frame_queue, self.frame_shape, self.frame_manager, motion_detector, object_detector, object_tracker, self.detected_objects_queue, \n process_fps, detection_fps, current_frame, objects_to_track, object_filters, mask, stop_event, exit_on_empty=True)\n \n def objects_found(self, debug_path=None):\n obj_detected = False\n top_computed_score = 0.0\n def handle_event(name, obj):\n nonlocal obj_detected\n nonlocal top_computed_score\n if obj['computed_score'] > top_computed_score:\n top_computed_score = obj['computed_score']\n if not obj['false_positive']:\n obj_detected = True\n self.camera_state.on('new', handle_event)\n self.camera_state.on('update', handle_event)\n\n while(not self.detected_objects_queue.empty()):\n camera_name, frame_time, current_tracked_objects = self.detected_objects_queue.get()\n if not debug_path is None:\n self.save_debug_frame(debug_path, frame_time, current_tracked_objects.values())\n\n self.camera_state.update(frame_time, current_tracked_objects)\n for obj in self.camera_state.tracked_objects.values():\n print(f\"{frame_time}: {obj['id']} - {obj['computed_score']} - {obj['score_history']}\")\n \n return {\n 'object_detected': obj_detected,\n 'top_score': top_computed_score\n }\n \n def save_debug_frame(self, debug_path, frame_time, tracked_objects):\n current_frame = self.frame_manager.get(f\"{self.camera_name}{frame_time}\")\n # draw the bounding boxes on the frame\n for obj in tracked_objects:\n thickness = 2\n color = (0,0,175)\n\n if obj['frame_time'] != frame_time:\n thickness = 1\n color = (255,0,0)\n else:\n color = (255,255,0)\n\n # draw the bounding boxes on the frame\n box = obj['box']\n draw_box_with_label(current_frame, box[0], box[1], box[2], box[3], obj['label'], f\"{int(obj['score']*100)}% {int(obj['area'])}\", thickness=thickness, color=color)\n # draw the regions on the frame\n region = obj['region']\n draw_box_with_label(current_frame, region[0], region[1], region[2], region[3], 'region', \"\", thickness=1, color=(0,255,0))\n \n cv2.imwrite(f\"{os.path.join(debug_path, os.path.basename(self.clip_path))}.{int(frame_time*1000000)}.jpg\", cv2.cvtColor(current_frame, cv2.COLOR_RGB2BGR))\n\[email protected]()\[email protected](\"-p\", \"--path\", required=True, help=\"Path to clip or directory to test.\")\[email protected](\"-l\", \"--label\", default='person', help=\"Label name to detect.\")\[email protected](\"-t\", \"--threshold\", default=0.85, help=\"Threshold value for objects.\")\[email protected](\"--debug-path\", default=None, help=\"Path to output frames for debugging.\")\ndef process(path, label, threshold, debug_path):\n clips = []\n if os.path.isdir(path):\n files = os.listdir(path)\n files.sort()\n clips = [os.path.join(path, file) for file in files]\n elif os.path.isfile(path): \n clips.append(path)\n\n config = {\n 'snapshots': {\n 'show_timestamp': False, \n 'draw_zones': False\n },\n 'zones': {},\n 'objects': {\n 'track': [label],\n 'filters': {\n 'person': {\n 'threshold': threshold\n }\n }\n }\n }\n\n results = []\n for c in clips:\n frame_shape = get_frame_shape(c)\n process_clip = ProcessClip(c, frame_shape, config)\n process_clip.load_frames()\n process_clip.process_frames(objects_to_track=config['objects']['track'])\n\n results.append((c, process_clip.objects_found(debug_path)))\n\n for result in results:\n print(f\"{result[0]}: {result[1]}\")\n \n positive_count = sum(1 for result in results if result[1]['object_detected'])\n print(f\"Objects were detected in {positive_count}/{len(results)}({positive_count/len(results)*100:.2f}%) clip(s).\")\n\nif __name__ == '__main__':\n process()" ]
[ [ "numpy.zeros" ] ]
SkBlaz/supertest
[ "5d99034af820cc10c8f70271b55cc90c42328709" ]
[ "examples/example_visualization.py" ]
[ "# simple plot of a larger file\nfrom py3plex.visualization.multilayer import hairball_plot, plt\nfrom py3plex.visualization.colors import colors_default\nfrom py3plex.core import multinet\nfrom py3plex.wrappers import train_node2vec_embedding\nfrom py3plex.visualization.embedding_visualization import embedding_tools\nfrom py3plex.algorithms.community_detection import community_wrapper as cw\nfrom collections import Counter\n\n\ndef plot_intact_embedding(num_it):\n\n # string layout for larger network -----------------------------------\n multilayer_network = multinet.multi_layer_network().load_network(\n \"../datasets/intact02.gpickle\", input_type=\"gpickle\",\n directed=False).add_dummy_layers()\n multilayer_network.basic_stats()\n\n # use embedding to first initialize the nodes..\n\n # call a specific n2v compiled binary\n train_node2vec_embedding.call_node2vec_binary(\n \"../datasets/IntactEdgelistedges.txt\",\n \"../datasets/test_embedding.emb\",\n binary=\"../bin/node2vec\",\n weighted=False)\n\n # preprocess and check embedding -- for speed, install parallel tsne from https://github.com/DmitryUlyanov/Multicore-TSNE, py3plex knows how to use it.\n\n multilayer_network.load_embedding(\"../datasets/test_embedding.emb\")\n\n # load the positions and select the projection algorithm\n output_positions = embedding_tools.get_2d_coordinates_tsne(\n multilayer_network, output_format=\"pos_dict\")\n\n # custom layouts are part of the custom coordinate option\n layout_parameters = {\"iterations\": num_it}\n layout_parameters['pos'] = output_positions # assign parameters\n network_colors, graph = multilayer_network.get_layers(style=\"hairball\")\n partition = cw.louvain_communities(multilayer_network)\n\n # select top n communities by size\n top_n = 10\n partition_counts = dict(Counter(partition.values()))\n top_n_communities = list(partition_counts.keys())[0:top_n]\n\n # assign node colors\n color_mappings = dict(\n zip(top_n_communities,\n [x for x in colors_default if x != \"black\"][0:top_n]))\n\n network_colors = [\n color_mappings[partition[x]]\n if partition[x] in top_n_communities else \"black\"\n for x in multilayer_network.get_nodes()\n ]\n\n f = plt.figure()\n # gravity=0.2,strongGravityMode=False,barnesHutTheta=1.2,edgeWeightInfluence=1,scalingRatio=2.0\n hairball_plot(graph,\n network_colors,\n layout_algorithm=\"custom_coordinates_initial_force\",\n layout_parameters=layout_parameters,\n nodesize=0.02,\n alpha_channel=0.30,\n edge_width=0.001,\n scale_by_size=False)\n\n f.savefig(\"../datasets/\" + str(num_it) + \"intact.png\",\n bbox_inches='tight',\n dpi=300)\n\n\ndef plot_intact_basic(num_it=10):\n\n print(\"Plotting intact\")\n multilayer_network = multinet.multi_layer_network().load_network(\n \"../datasets/intact02.gpickle\", input_type=\"gpickle\",\n directed=False).add_dummy_layers()\n network_colors, graph = multilayer_network.get_layers(style=\"hairball\")\n partition = cw.louvain_communities(multilayer_network)\n\n # select top n communities by size\n top_n = 3\n partition_counts = dict(Counter(partition.values()))\n top_n_communities = list(partition_counts.keys())[0:top_n]\n\n # assign node colors\n color_mappings = dict(\n zip(top_n_communities,\n [x for x in colors_default if x != \"black\"][0:top_n]))\n\n network_colors = [\n color_mappings[partition[x]]\n if partition[x] in top_n_communities else \"black\"\n for x in multilayer_network.get_nodes()\n ]\n\n layout_parameters = {\"iterations\": num_it, \"forceImport\": True}\n f = plt.figure()\n hairball_plot(graph,\n network_colors,\n legend=False,\n layout_parameters=layout_parameters)\n f.savefig(\"../example_images/intact_\" + str(num_it) + \"_BH_basic.png\",\n bbox_inches='tight',\n dpi=300)\n\n\ndef plot_intact_BH(num_it=10):\n\n print(\"Plotting intact\")\n multilayer_network = multinet.multi_layer_network().load_network(\n \"../datasets/intact02.gpickle\", input_type=\"gpickle\",\n directed=False).add_dummy_layers()\n network_colors, graph = multilayer_network.get_layers(style=\"hairball\")\n partition = cw.louvain_communities(multilayer_network)\n\n # select top n communities by size\n top_n = 3\n partition_counts = dict(Counter(partition.values()))\n top_n_communities = list(partition_counts.keys())[0:top_n]\n\n # assign node colors\n color_mappings = dict(\n zip(top_n_communities,\n [x for x in colors_default if x != \"black\"][0:top_n]))\n\n network_colors = [\n color_mappings[partition[x]]\n if partition[x] in top_n_communities else \"black\"\n for x in multilayer_network.get_nodes()\n ]\n\n layout_parameters = {\"iterations\": num_it}\n f = plt.figure()\n hairball_plot(graph,\n network_colors,\n legend=False,\n layout_parameters=layout_parameters)\n f.savefig(\"../example_images/intact_\" + str(num_it) + \"_BH.png\",\n bbox_inches='tight',\n dpi=300)\n\n\nif __name__ == \"__main__\":\n import time\n import numpy as np\n iteration_range = [0, 10, 100]\n for iterations in iteration_range:\n mean_times = []\n for j in range(2):\n start = time.time()\n # plot_intact_BH(iterations)\n plot_intact_embedding(iterations)\n end = (time.time() - start) / 60\n mean_times.append(end)\n print(\"Mean time for BK {}, iterations: {}\".format(\n np.mean(mean_times), iterations))\n\n # mean_times = []\n # for j in range(iterations):\n # start = time.time()\n # plot_intact_embedding()\n # end = (time.time() - start)/60\n # mean_times.append(end)\n # print(\"Mean time for Py3 {}\".format(np.mean(mean_times)))\n" ]
[ [ "numpy.mean" ] ]
liangyy/haplotype-po
[ "2a6830095bcfa4298ad04ce0790888dbccd4a426" ]
[ "scripts/haplotype_imputation/impute_otf_multi_chr.py" ]
[ "##\n# Implement idea 2: multi-chromosome version\n##\n\nimport argparse\nparser = argparse.ArgumentParser(prog='impute_otf_multi_chr.py', description='''\n Impute parental origin of haplotypes and observed phenotypes.\n It takes preloaded phenotypes, covariates, and genotypes \n (generated by impute_otf_preload.py)\n''')\n\nparser.add_argument('--genotype-prefix-pattern', help='''\n Prefix of preloaded NPY for genotype and position matrix. Should contain {chr_num} \n as placeholder for chromosome number.\n''')\nparser.add_argument('--chromosomes', default=None, help='''\n List of chromosomes to work with, separated by ,.\n For instance: 1,2,3.\n If not set, it will include 1 .. 22.\n''')\nparser.add_argument('--npy-prefix', type=str, help='''\n Prefix of preloaded NPY for phenotypes and covariates.\n And individual list.\n''')\nparser.add_argument('--output-prefix', help='''\n Prefix of output in TSV.GZ format.\n''')\nparser.add_argument('--nthread', default=None, type=int, help='''\n Number of threads to use.\n''')\nparser.add_argument('--imputer-output', type=str, help='''\n Pickle GZ imputer output\n''')\n\nargs = parser.parse_args()\n\n\nimport logging, sys\nimport torch\nimport numpy as np\nsys.path.insert(0, '../logistic_gpu')\nimport table_reader\nimport geno_hdf5_reader\nimport snp_list_reader\nimport haplotype_imputer\nimport gzip, pickle\n\n# configing util\nlogging.basicConfig(\n level = logging.INFO, \n stream = sys.stderr, \n format = '%(asctime)s %(message)s',\n datefmt = '%Y-%m-%d %I:%M:%S %p'\n)\n\nif args.nthread is not None:\n torch.set_num_threads(args.nthread)\n\nlogging.info('Loading preloaded phenotypes and covariates')\nfmat = np.load(args.npy_prefix + '.fmat.npy')\nmmat = np.load(args.npy_prefix + '.mmat.npy')\ncmat = np.load(args.npy_prefix + '.cmat.npy')\n\nlogging.info('Loading posmat')\n# load all posmat into memory since it does not take too much\nif args.chromosomes is None:\n chroms = [ str(i) for i in range(1, 23) ]\nelse:\n chroms = args.chromosomes.split(',')\nposmat_dic = {}\nfor chrom in chroms:\n posmat_dic[chrom] = np.load(args.genotype_prefix_pattern.format(chr_num=chrom) + '.posmat.npy')\n\n\nlogging.info('Run imputation: mode = multi-chromosome OTF')\nimputer = haplotype_imputer.HaploImputer()\nbeta, sigma2, out, lld = imputer.impute_otf_multi_chr(\n fmat, mmat, \n posmat_dic, chroms, \n args.genotype_prefix_pattern + '.hh1.npy', args.genotype_prefix_pattern + '.hh2.npy',\n cmat=cmat\n)\n\nlogging.info('Save imputer output')\nwith gzip.open(args.imputer_output, 'w') as f:\n pickle.dump((beta, sigma2, lld), f)\n\n\nlogging.info('Output')\nindiv_list = np.load(args.npy_prefix + '.individual_id.npy', allow_pickle=True)\nfor chrom in chroms:\n out[chrom]['individual_id'] = indiv_list\n out[chrom].to_csv(args.output_prefix + chrom + '.tsv.gz', compression='gzip', sep='\\t', index=False)\n\n" ]
[ [ "torch.set_num_threads", "numpy.load" ] ]
twmht/mmcv
[ "44e7eee835c3bc138ee0f667228777eca3db1a17" ]
[ "mmcv/runner/base_module.py" ]
[ "# Copyright (c) OpenMMLab. All rights reserved.\nimport copy\nimport warnings\nfrom abc import ABCMeta\nfrom collections import defaultdict\nfrom logging import FileHandler\n\nimport torch.nn as nn\n\nfrom mmcv.runner.dist_utils import master_only\nfrom mmcv.utils.logging import get_logger, logger_initialized, print_log\n\n\nclass BaseModule(nn.Module, metaclass=ABCMeta):\n \"\"\"Base module for all modules in openmmlab.\n\n ``BaseModule`` is a wrapper of ``torch.nn.Module`` with additional\n functionality of parameter initialization. Compared with\n ``torch.nn.Module``, ``BaseModule`` mainly adds three attributes.\n\n - ``init_cfg``: the config to control the initialization.\n - ``init_weights``: The function of parameter\n initialization and recording initialization\n information.\n - ``_params_init_info``: Used to track the parameter\n initialization information. This attribute only\n exists during executing the ``init_weights``.\n\n Args:\n init_cfg (dict, optional): Initialization config dict.\n \"\"\"\n\n def __init__(self, init_cfg=None):\n \"\"\"Initialize BaseModule, inherited from `torch.nn.Module`\"\"\"\n\n # NOTE init_cfg can be defined in different levels, but init_cfg\n # in low levels has a higher priority.\n\n super(BaseModule, self).__init__()\n # define default value of init_cfg instead of hard code\n # in init_weights() function\n self._is_init = False\n\n self.init_cfg = copy.deepcopy(init_cfg)\n\n # Backward compatibility in derived classes\n # if pretrained is not None:\n # warnings.warn('DeprecationWarning: pretrained is a deprecated \\\n # key, please consider using init_cfg')\n # self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)\n\n @property\n def is_init(self):\n return self._is_init\n\n def init_weights(self):\n \"\"\"Initialize the weights.\"\"\"\n\n is_top_level_module = False\n # check if it is top-level module\n if not hasattr(self, '_params_init_info'):\n # The `_params_init_info` is used to record the initialization\n # information of the parameters\n # the key should be the obj:`nn.Parameter` of model and the value\n # should be a dict containing\n # - init_info (str): The string that describes the initialization.\n # - tmp_mean_value (FloatTensor): The mean of the parameter,\n # which indicates whether the parameter has been modified.\n # this attribute would be deleted after all parameters\n # is initialized.\n self._params_init_info = defaultdict(dict)\n is_top_level_module = True\n\n # Initialize the `_params_init_info`,\n # When detecting the `tmp_mean_value` of\n # the corresponding parameter is changed, update related\n # initialization information\n for name, param in self.named_parameters():\n self._params_init_info[param][\n 'init_info'] = f'The value is the same before and ' \\\n f'after calling `init_weights` ' \\\n f'of {self.__class__.__name__} '\n self._params_init_info[param][\n 'tmp_mean_value'] = param.data.mean()\n\n # pass `params_init_info` to all submodules\n # All submodules share the same `params_init_info`,\n # so it will be updated when parameters are\n # modified at any level of the model.\n for sub_module in self.modules():\n sub_module._params_init_info = self._params_init_info\n\n # Get the initialized logger, if not exist,\n # create a logger named `mmcv`\n logger_names = list(logger_initialized.keys())\n logger_name = logger_names[0] if logger_names else 'mmcv'\n\n from ..cnn import initialize\n from ..cnn.utils.weight_init import update_init_info\n module_name = self.__class__.__name__\n if not self._is_init:\n if self.init_cfg:\n print_log(\n f'initialize {module_name} with init_cfg {self.init_cfg}',\n logger=logger_name)\n initialize(self, self.init_cfg)\n if isinstance(self.init_cfg, dict):\n # prevent the parameters of\n # the pre-trained model\n # from being overwritten by\n # the `init_weights`\n if self.init_cfg['type'] == 'Pretrained':\n return\n\n for m in self.children():\n if hasattr(m, 'init_weights'):\n m.init_weights()\n # users may overload the `init_weights`\n update_init_info(\n m,\n init_info=f'Initialized by '\n f'user-defined `init_weights`'\n f' in {m.__class__.__name__} ')\n\n self._is_init = True\n else:\n warnings.warn(f'init_weights of {self.__class__.__name__} has '\n f'been called more than once.')\n\n if is_top_level_module:\n self._dump_init_info(logger_name)\n\n for sub_module in self.modules():\n del sub_module._params_init_info\n\n @master_only\n def _dump_init_info(self, logger_name):\n \"\"\"Dump the initialization information to a file named\n `initialization.log.json` in workdir.\n\n Args:\n logger_name (str): The name of logger.\n \"\"\"\n\n logger = get_logger(logger_name)\n\n with_file_handler = False\n # dump the information to the logger file if there is a `FileHandler`\n for handler in logger.handlers:\n if isinstance(handler, FileHandler):\n handler.stream.write(\n 'Name of parameter - Initialization information\\n')\n for name, param in self.named_parameters():\n handler.stream.write(\n f'\\n{name} - {param.shape}: '\n f\"\\n{self._params_init_info[param]['init_info']} \\n\")\n handler.stream.flush()\n with_file_handler = True\n if not with_file_handler:\n for name, param in self.named_parameters():\n print_log(\n f'\\n{name} - {param.shape}: '\n f\"\\n{self._params_init_info[param]['init_info']} \\n \",\n logger=logger_name)\n\n def __repr__(self):\n s = super().__repr__()\n if self.init_cfg:\n s += f'\\ninit_cfg={self.init_cfg}'\n return s\n\n\nclass Sequential(BaseModule, nn.Sequential):\n \"\"\"Sequential module in openmmlab.\n\n Args:\n init_cfg (dict, optional): Initialization config dict.\n \"\"\"\n\n def __init__(self, *args, init_cfg=None):\n BaseModule.__init__(self, init_cfg)\n nn.Sequential.__init__(self, *args)\n\n\nclass ModuleList(BaseModule, nn.ModuleList):\n \"\"\"ModuleList in openmmlab.\n\n Args:\n modules (iterable, optional): an iterable of modules to add.\n init_cfg (dict, optional): Initialization config dict.\n \"\"\"\n\n def __init__(self, modules=None, init_cfg=None):\n BaseModule.__init__(self, init_cfg)\n nn.ModuleList.__init__(self, modules)\n\n\nclass ModuleDict(BaseModule, nn.ModuleDict):\n \"\"\"ModuleDict in openmmlab.\n\n Args:\n modules (dict, optional): a mapping (dictionary) of (string: module)\n or an iterable of key-value pairs of type (string, module).\n init_cfg (dict, optional): Initialization config dict.\n \"\"\"\n\n def __init__(self, modules=None, init_cfg=None):\n BaseModule.__init__(self, init_cfg)\n nn.ModuleDict.__init__(self, modules)\n" ]
[ [ "torch.nn.ModuleDict.__init__", "torch.nn.Sequential.__init__", "torch.nn.ModuleList.__init__" ] ]
Jungyhuk/plotcoder
[ "4c5fe923dc69227c58d93f55b8a89fd8bb960703" ]
[ "run.py" ]
[ "import argparse\nimport math\nimport random\nimport sys\nimport os\nimport json\nimport numpy as np\nimport time\n\nimport torch\n\nimport arguments\nimport models\nimport models.data_utils.data_utils as data_utils\nimport models.model_utils as model_utils\nfrom models.model import PlotCodeGenerator\n\ndef create_model(args, word_vocab, code_vocab):\n\tmodel = PlotCodeGenerator(args, word_vocab, code_vocab)\n\tif model.cuda_flag:\n\t\tmodel = model.cuda()\n\tmodel_supervisor = model_utils.Supervisor(model, args)\n\tif args.load_model:\n\t\tmodel_supervisor.load_pretrained(args.load_model)\n\telse:\n\t\tprint('Created model with fresh parameters.')\n\t\tmodel_supervisor.model.init_weights(args.param_init)\n\treturn model_supervisor\n\n\ndef train(args):\n\tprint('Training:')\n\n\tdata_processor = data_utils.DataProcessor(args)\n\ttrain_data = data_processor.load_data(args.train_dataset)\n\ttrain_data, train_indices = data_processor.preprocess(train_data)\n\tdev_data = data_processor.load_data(args.dev_dataset)\n\tdev_data, dev_indices = data_processor.preprocess(dev_data)\n\n\ttrain_data_size = len(train_data)\n\targs.word_vocab_size = data_processor.word_vocab_size\n\targs.code_vocab_size = data_processor.code_vocab_size\n\tmodel_supervisor = create_model(args, data_processor.word_vocab, data_processor.code_vocab)\n\n\tlogger = model_utils.Logger(args)\n\n\tfor epoch in range(args.num_epochs):\n\t\trandom.shuffle(train_data)\n\t\tfor batch_idx in range(0, train_data_size, args.batch_size):\n\t\t\tprint(epoch, batch_idx)\n\t\t\tbatch_input, batch_labels = data_processor.get_batch(train_data, args.batch_size, batch_idx)\n\t\t\ttrain_loss, train_acc = model_supervisor.train(batch_input, batch_labels)\n\t\t\tprint('train loss: %.4f train acc: %.4f' % (train_loss, train_acc))\n\n\t\t\tif model_supervisor.global_step % args.eval_every_n == 0:\n\t\t\t\tmodel_supervisor.save_model()\n\t\t\t\teval_loss, eval_label_acc, eval_data_acc, eval_acc, pred_labels = model_supervisor.eval(dev_data, args.data_order_invariant, args.max_eval_size)\n\t\t\t\tval_summary = {'train_loss': train_loss, 'train_acc': train_acc, 'eval_loss': eval_loss,\n\t\t\t\t'eval_label_acc': eval_label_acc, 'eval_data_acc': eval_data_acc, 'eval_acc': eval_acc}\n\t\t\t\tval_summary['global_step'] = model_supervisor.global_step\n\t\t\t\tlogger.write_summary(val_summary)\n\n\t\t\tif args.lr_decay_steps is not None and model_supervisor.global_step % args.lr_decay_steps == 0:\n\t\t\t\tmodel_supervisor.model.lr_decay(args.lr_decay_rate)\n\n\ndef evaluate(args):\n\tprint('Evaluation')\n\tdata_processor = data_utils.DataProcessor(args)\n\tinit_test_data = data_processor.load_data(args.test_dataset)\n\ttest_data, test_indices = data_processor.preprocess(init_test_data)\n\n\targs.word_vocab_size = data_processor.word_vocab_size\n\targs.code_vocab_size = data_processor.code_vocab_size\n\tmodel_supervisor = create_model(args, data_processor.word_vocab, data_processor.code_vocab)\n\ttest_loss, test_label_acc, test_data_acc, test_acc, predictions = model_supervisor.eval(test_data, args.data_order_invariant)\n\n\tlabel_acc_per_category = [0] * args.num_plot_types\n\tdata_acc_per_category = [0] * args.num_plot_types\n\tacc_per_category = [0] * args.num_plot_types\n\tcnt_per_category = [0] * args.num_plot_types\n\n\tcnt_unpredictable = 0\n\tfor i, item in enumerate(test_data):\n\t\tgt_label = item['label']\n\t\tif args.joint_plot_types:\n\t\t\tgt_label = data_processor.get_joint_plot_type(gt_label)\n\t\tcnt_per_category[gt_label] += 1\n\n\t\tgt_prog = data_processor.ids_to_prog(item, item['output_gt'])\n\n\t\tif data_utils._PAD in gt_prog:\n\t\t\tcnt_unpredictable += 1\n\n\t\tpred_prog = data_processor.ids_to_prog(item, predictions[i])\n\n\t\tpred_label = data_processor.label_extraction(pred_prog)\n\t\tif args.joint_plot_types:\n\t\t\tpred_label = data_processor.get_joint_plot_type(pred_label)\n\t\tif pred_label == gt_label:\n\t\t\tlabel_acc_per_category[gt_label] += 1\n\n\t\ttarget_dfs, target_strs, target_vars = item['target_dfs'], item['target_strs'], item['target_vars']\n\t\tpred_dfs, pred_strs, pred_vars, _ = data_processor.data_extraction(pred_prog,\n\t\t\titem['reserved_dfs'], item['reserved_strs'], item['reserved_vars'])\n\n\t\tif args.data_order_invariant:\n\t\t\tif (set(target_dfs + target_strs + target_vars) == set(pred_dfs + pred_strs + pred_vars) and\n\t\t\t\tlen(target_dfs + target_strs + target_vars) == len(pred_dfs + pred_strs + pred_vars)):\n\t\t\t\tcur_data_acc = 1\n\t\t\telse:\n\t\t\t\tcur_data_acc = 0\n\t\telse:\n\t\t\tif target_dfs + target_strs + target_vars == pred_dfs + pred_strs + pred_vars:\n\t\t\t\tcur_data_acc = 1\n\t\t\telse:\n\t\t\t\tcur_data_acc = 0\n\t\tif cur_data_acc == 1:\n\t\t\tdata_acc_per_category[gt_label] += 1\n\t\t\tif pred_label == gt_label:\n\t\t\t\tacc_per_category[gt_label] += 1\n\n\tprint('test label acc: %.4f test data acc: %.4f test acc: %.4f ' % (test_label_acc, test_data_acc, test_acc))\n\tprint('Unpredictable samples: %d %.4f' % (cnt_unpredictable, cnt_unpredictable * 1.0 / len(test_data)))\n\tprint('Upper bound: %.4f' % (1 - cnt_unpredictable * 1.0 / len(test_data)))\n\tfor i in range(args.num_plot_types):\n\t\tprint('cnt per category: ', i, cnt_per_category[i])\n\t\tif cnt_per_category[i] == 0:\n\t\t\tcontinue\n\t\tprint('label acc per category: ', i, label_acc_per_category[i], label_acc_per_category[i] * 1.0 / cnt_per_category[i])\n\t\tprint('data acc per category: ', i, data_acc_per_category[i], data_acc_per_category[i] * 1.0 / cnt_per_category[i])\n\t\tprint('acc per category: ', i, acc_per_category[i], acc_per_category[i] * 1.0 / cnt_per_category[i])\n\n\nif __name__ == \"__main__\":\n\targ_parser = arguments.get_arg_parser('juice')\n\targs = arg_parser.parse_args()\n\targs.cuda = not args.cpu and torch.cuda.is_available()\n\trandom.seed(args.seed)\n\tnp.random.seed(args.seed)\n\tif args.eval:\n\t\tevaluate(args)\n\telse:\n\t\ttrain(args)\t\n" ]
[ [ "torch.cuda.is_available", "numpy.random.seed" ] ]
nusc2016/lambdata-DS15
[ "b98d13d2155c741bb4fdda7f0ad74cbb12be3bb1" ]
[ "my_script.py" ]
[ "import pandas\n\ndef enlarge(n):\n return n * 100\n\nprint(\"HELLO WORLD\")\n\ndf = pandas.DataFrame({\"state\": [\"CT\", \"CO\", \"CA\", \"TX\"]})\nprint(df.head())\n\nprint(\"-----------------\")\nx = 5\nprint(\"NUMBER\", x)\nprint(\"ENLARGED NUMBER\", enlarge(x)) # invoking our function!!" ]
[ [ "pandas.DataFrame" ] ]
peipeiwang6/Genomic_prediction_in_Switchgrass
[ "1fba3508c0d81d16e0629e3cf94ff4d174a85b13" ]
[ "Other_data_processing_scripts/35_randomize.py" ]
[ "import os,sys\nimport pandas as pd\nimport numpy\nfrom numpy import random\nfrom numpy.random import shuffle\nfile = sys.argv[1]\ndf = pd.read_csv(file, sep=',', index_col = None, header = 0)\n# rowname = df.index.tolist()\n# row = shuffle(rowname)\n# df.index = rowname\nshuffle(df.ID)\ndf.to_csv(file, index=False, header=True,sep=\",\")\n" ]
[ [ "pandas.read_csv", "numpy.random.shuffle" ] ]
naternguyen/NAS_FinalExam
[ "da838b7df9615160d67092fade919e2251cf753f" ]
[ "feeders/feederBoth.py" ]
[ "import numpy as np\nimport pickle\nimport torch\nfrom torch.utils.data import Dataset\nimport sys\n\nsys.path.extend(['../'])\nfrom feeders import tools\n\n\nclass Feeder(Dataset):\n def __init__(self, data_path1, data_path2, label_path,\n random_choose=False, random_shift=False, random_move=False,\n window_size=-1, normalization=False, debug=False, use_mmap=True):\n \"\"\"\n \n :param data_path: \n :param label_path: \n :param random_choose: If true, randomly choose a portion of the input sequence\n :param random_shift: If true, randomly pad zeros at the begining or end of sequence\n :param random_move: \n :param window_size: The length of the output sequence\n :param normalization: If true, normalize input sequence\n :param debug: If true, only use the first 100 samples\n :param use_mmap: If true, use mmap mode to load data, which can save the running memory\n \"\"\"\n\n self.debug = debug\n self.data_path1 = data_path1\n self.data_path2 = data_path2\n \n self.label_path = label_path\n self.random_choose = random_choose\n self.random_shift = random_shift\n self.random_move = random_move\n self.window_size = window_size\n self.normalization = normalization\n self.use_mmap = use_mmap\n self.load_data()\n if normalization:\n self.get_mean_map()\n\n def load_data(self):\n # data: N C V T M\n\n try:\n with open(self.label_path) as f:\n self.sample_name, self.label = pickle.load(f)\n except:\n # for pickle file from python2\n with open(self.label_path, 'rb') as f:\n self.sample_name, self.label = pickle.load(f, encoding='latin1')\n\n # load data\n if self.use_mmap:\n self.data1 = np.load(self.data_path1, mmap_mode='r')\n self.data2 = np.load(self.data_path2, mmap_mode='r')\n self.data = np.concatenate((self.data1, self.data2), axis=1)\n else:\n self.data1 = np.load(self.data_path)\n self.data2 = np.load(self.data_path)\n self.data = np.concatenate((self.data1, self.data2), axis=1)\n if self.debug:\n self.label = self.label[0:100]\n self.data = self.data[0:100]\n self.sample_name = self.sample_name[0:100]\n\n def get_mean_map(self):\n data = self.data\n N, C, T, V, M = data.shape\n self.mean_map = data.mean(axis=2, keepdims=True).mean(axis=4, keepdims=True).mean(axis=0)\n self.std_map = data.transpose((0, 2, 4, 1, 3)).reshape((N * T * M, C * V)).std(axis=0).reshape((C, 1, V, 1))\n\n def __len__(self):\n return len(self.label)\n\n def __iter__(self):\n return self\n\n def __getitem__(self, index):\n data_numpy = self.data[index]\n label = self.label[index]\n data_numpy = np.array(data_numpy)\n\n if self.normalization:\n data_numpy = (data_numpy - self.mean_map) / self.std_map\n if self.random_shift:\n data_numpy = tools.random_shift(data_numpy)\n if self.random_choose:\n data_numpy = tools.random_choose(data_numpy, self.window_size)\n elif self.window_size > 0:\n data_numpy = tools.auto_pading(data_numpy, self.window_size)\n if self.random_move:\n data_numpy = tools.random_move(data_numpy)\n\n return data_numpy, label, index\n\n def top_k(self, score, top_k):\n rank = score.argsort()\n hit_top_k = [l in rank[i, -top_k:] for i, l in enumerate(self.label)]\n return sum(hit_top_k) * 1.0 / len(hit_top_k)\n\n\ndef import_class(name):\n components = name.split('.')\n mod = __import__(components[0])\n for comp in components[1:]:\n mod = getattr(mod, comp)\n return mod\n\n\ndef test(data_path, label_path, vid=None, graph=None, is_3d=False):\n '''\n vis the samples using matplotlib\n :param data_path: \n :param label_path: \n :param vid: the id of sample\n :param graph: \n :param is_3d: when vis NTU, set it True\n :return: \n '''\n import matplotlib.pyplot as plt\n loader = torch.utils.data.DataLoader(\n dataset=Feeder(data_path, label_path),\n batch_size=64,\n shuffle=False,\n num_workers=2)\n\n if vid is not None:\n sample_name = loader.dataset.sample_name\n sample_id = [name.split('.')[0] for name in sample_name]\n index = sample_id.index(vid)\n data, label, index = loader.dataset[index]\n data = data.reshape((1,) + data.shape)\n\n # for batch_idx, (data, label) in enumerate(loader):\n N, C, T, V, M = data.shape\n\n plt.ion()\n fig = plt.figure()\n if is_3d:\n from mpl_toolkits.mplot3d import Axes3D\n ax = fig.add_subplot(111, projection='3d')\n else:\n ax = fig.add_subplot(111)\n\n if graph is None:\n p_type = ['b.', 'g.', 'r.', 'c.', 'm.', 'y.', 'k.', 'k.', 'k.', 'k.']\n pose = [\n ax.plot(np.zeros(V), np.zeros(V), p_type[m])[0] for m in range(M)\n ]\n ax.axis([-1, 1, -1, 1])\n for t in range(T):\n for m in range(M):\n pose[m].set_xdata(data[0, 0, t, :, m])\n pose[m].set_ydata(data[0, 1, t, :, m])\n fig.canvas.draw()\n plt.pause(0.001)\n else:\n p_type = ['b-', 'g-', 'r-', 'c-', 'm-', 'y-', 'k-', 'k-', 'k-', 'k-']\n import sys\n from os import path\n sys.path.append(\n path.dirname(path.dirname(path.dirname(path.abspath(__file__)))))\n G = import_class(graph)()\n edge = G.inward\n pose = []\n for m in range(M):\n a = []\n for i in range(len(edge)):\n if is_3d:\n a.append(ax.plot(np.zeros(3), np.zeros(3), p_type[m])[0])\n else:\n a.append(ax.plot(np.zeros(2), np.zeros(2), p_type[m])[0])\n pose.append(a)\n ax.axis([-1, 1, -1, 1])\n if is_3d:\n ax.set_zlim3d(-1, 1)\n for t in range(T):\n for m in range(M):\n for i, (v1, v2) in enumerate(edge):\n x1 = data[0, :2, t, v1, m]\n x2 = data[0, :2, t, v2, m]\n if (x1.sum() != 0 and x2.sum() != 0) or v1 == 1 or v2 == 1:\n pose[m][i].set_xdata(data[0, 0, t, [v1, v2], m])\n pose[m][i].set_ydata(data[0, 1, t, [v1, v2], m])\n if is_3d:\n pose[m][i].set_3d_properties(data[0, 2, t, [v1, v2], m])\n fig.canvas.draw()\n # plt.savefig('/home/lshi/Desktop/skeleton_sequence/' + str(t) + '.jpg')\n plt.pause(0.01)\n\n\nif __name__ == '__main__':\n import os\n\n os.environ['DISPLAY'] = 'localhost:10.0'\n data_path = \"../data/ntu/xview/val_data_joint.npy\"\n label_path = \"../data/ntu/xview/val_label.pkl\"\n graph = 'graph.ntu_rgb_d.Graph'\n test(data_path, label_path, vid='S004C001P003R001A032', graph=graph, is_3d=True)\n # data_path = \"../data/kinetics/val_data.npy\"\n # label_path = \"../data/kinetics/val_label.pkl\"\n # graph = 'graph.Kinetics'\n # test(data_path, label_path, vid='UOD7oll3Kqo', graph=graph)\n" ]
[ [ "numpy.load", "matplotlib.pyplot.pause", "numpy.zeros", "matplotlib.pyplot.figure", "numpy.array", "matplotlib.pyplot.ion", "numpy.concatenate" ] ]
rohandhanraj/Auto-AI-Pipeline
[ "d5f39715c802db45afae0d5978d228bf0bcd2f0a" ]
[ "controller/project_controller/projects/WaferFaultDetection_new/best_model_finder/tuner.py" ]
[ "import uuid\n\nimport numpy\n\nimport pandas\nfrom sklearn.ensemble import RandomForestClassifier, RandomForestRegressor\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.svm import SVC, SVR\nfrom sklearn.tree import DecisionTreeRegressor\nfrom xgboost import XGBClassifier, XGBRegressor\nfrom sklearn.linear_model import SGDRegressor\nfrom sklearn.metrics import roc_auc_score, accuracy_score, r2_score, roc_curve\nimport sys\nfrom exception_layer.generic_exception.generic_exception import GenericException as RandomForestClassificationException\nfrom exception_layer.generic_exception.generic_exception import GenericException as XGBoostClassificationException\nfrom exception_layer.generic_exception.generic_exception import GenericException as ModelFinderException\nfrom plotly_dash.accuracy_graph.accuracy_graph import AccurayGraph\nfrom sklearn.naive_bayes import GaussianNB\nfrom project_library_layer.initializer.initializer import Initializer\nfrom sklearn.linear_model import Ridge, Lasso, RidgeCV, LassoCV, ElasticNet, ElasticNetCV\n\n\nclass ModelFinder:\n \"\"\"\n This class shall be used to find the model with best accuracy and AUC score.\n Written By: iNeuron Intelligence\n Version: 1.0\n Revisions: None\n\n \"\"\"\n\n def __init__(self, project_id, file_object, logger_object):\n try:\n self.project_id = project_id\n self.file_object = file_object\n self.logger_object = logger_object\n self.clf = RandomForestClassifier()\n self.knn = KNeighborsClassifier()\n self.xgb = XGBClassifier(objective='binary:logistic')\n self.sv_classifier = SVC()\n self.gnb = GaussianNB()\n self.linearReg = LinearRegression()\n self.RandomForestReg = RandomForestRegressor()\n self.DecisionTreeReg = DecisionTreeRegressor()\n self.sv_regressor = SVR()\n self.sgd_regression = SGDRegressor()\n self.initailizer = Initializer()\n self.model_name = []\n self.model = []\n self.score = []\n\n except Exception as e:\n model_finder = ModelFinderException(\n \"Failed during object instantiation in module [{0}] class [{1}] method [{2}]\"\n .format(self.__module__, ModelFinder.__name__,\n self.__init__.__name__))\n raise Exception(model_finder.error_message_detail(str(e), sys)) from e\n\n def get_best_params_for_ridge_regression(self, train_x, train_y):\n try:\n self.logger_object.log(\"Entered the get best params for Ridge Repressor\")\n alphas = numpy.random.uniform(low=0, high=10, size=(50,))\n ridge_cv = RidgeCV(alphas=alphas, cv=5, normalize=True)\n ridge_cv.fit(train_x, train_y)\n alpha = ridge_cv.alpha_\n ridge_model = Ridge(alpha=alpha)\n ridge_model.fit(train_x, train_y)\n self.logger_object.log(\n 'Ridge Regressor best params <alpha value: ' + str(ridge_cv.alpha_) + '>. Exited the '\n 'get_best_params_for_ridge_regression method of the Model_Finder class')\n return ridge_model\n except Exception as e:\n model_finder = ModelFinderException(\n \"Model Selection Failed in module [{0}] class [{1}] method [{2}]\"\n .format(self.__module__, ModelFinder.__name__,\n self.get_best_params_for_ridge_regression.__name__))\n raise Exception(model_finder.error_message_detail(str(e), sys)) from e\n\n def get_best_params_for_support_vector_regressor(self, train_x, train_y):\n try:\n self.logger_object.log(\"Entered the get best params for Support Vector Repressor\")\n\n param_grid = {'C': [0.1, 1, 10, 50, 100, 500], 'gamma': [1, 0.5, 0.1, 0.01, 0.001]}\n\n grid = GridSearchCV(SVR(), param_grid, verbose=3, cv=5)\n\n grid.fit(train_x, train_y)\n\n C = grid.best_params_['C']\n gamma = grid.best_params_['gamma']\n svr_reg = SVR(C=C, gamma=gamma)\n svr_reg.fit(train_x, train_y)\n\n self.logger_object.log('Support Vector Regressor best params: ' + str(grid.best_params_) + '. Exited the '\n 'get_best_params_for_support_vector_regressor method of the Model_Finder class')\n return svr_reg\n except Exception as e:\n model_finder = ModelFinderException(\n \"Model Selection Failed in module [{0}] class [{1}] method [{2}]\"\n .format(self.__module__, ModelFinder.__name__,\n self.get_best_params_for_support_vector_regressor.__name__))\n raise Exception(model_finder.error_message_detail(str(e), sys)) from e\n\n def get_best_params_for_random_forest(self, train_x, train_y):\n \"\"\"\n Method Name: get_best_params_for_random_forest\n Description: get the parameters for Random Forest Algorithm which give the best accuracy.\n Use Hyper Parameter Tuning.\n Output: The model with the best parameters\n On Failure: Raise Exception\n\n Written By: iNeuron Intelligence\n Version: 1.0\n Revisions: None\n\n \"\"\"\n try:\n self.logger_object.log('Entered the get_best_params_for_random_forest method of the Model_Finder class')\n # initializing with different combination of parameters\n param_grid = {\"n_estimators\": [10, 130], \"criterion\": ['gini', 'entropy'],\n \"max_depth\": range(2, 4, 1), \"max_features\": ['auto', 'log2']}\n # Creating an object of the Grid Search class\n grid = GridSearchCV(estimator=self.clf, param_grid=param_grid, cv=5, verbose=3)\n # finding the best parameters\n grid.fit(train_x, train_y)\n\n # extracting the best parameters\n criterion = grid.best_params_['criterion']\n max_depth = grid.best_params_['max_depth']\n max_features = grid.best_params_['max_features']\n n_estimators = grid.best_params_['n_estimators']\n\n # creating a new model with the best parameters\n self.clf = RandomForestClassifier(n_estimators=n_estimators, criterion=criterion,\n max_depth=max_depth, max_features=max_features)\n # training the mew model\n self.clf.fit(train_x, train_y)\n self.logger_object.log('Random Forest best params: ' + str(grid.best_params_) + '. Exited the '\n 'get_best_params_for_random_forest method of the Model_Finder class')\n\n return self.clf\n except Exception as e:\n random_clf_exception = RandomForestClassificationException(\n \"Random Forest Parameter tuning failed in module [{0}] class [{1}] method [{2}]\"\n .format(self.__module__, ModelFinder.__name__,\n self.get_best_params_for_random_forest.__name__))\n raise Exception(random_clf_exception.error_message_detail(str(e), sys)) from e\n\n def get_best_params_for_xgboost(self, train_x, train_y):\n\n \"\"\"\n Method Name: get_best_params_for_xgboost\n Description: get the parameters for XGBoost Algorithm which give the best accuracy.\n Use Hyper Parameter Tuning.\n Output: The model with the best parameters\n On Failure: Raise Exception\n\n Written By: iNeuron Intelligence\n Version: 1.0\n Revisions: None\n\n \"\"\"\n\n try:\n self.logger_object.log('Entered the get_best_params_for_xgboost method of the Model_Finder class')\n # initializing with different combination of parameters\n param_grid_xgboost = {\n\n 'learning_rate': [0.5, 0.001],\n 'max_depth': [20],\n\n 'n_estimators': [10, 200]\n\n }\n # Creating an object of the Grid Search class\n grid = GridSearchCV(XGBClassifier(objective='binary:logistic'), param_grid_xgboost, verbose=3, cv=5)\n # finding the best parameters\n grid.fit(train_x, train_y)\n\n # extracting the best parameters\n learning_rate = grid.best_params_['learning_rate']\n max_depth = grid.best_params_['max_depth']\n n_estimators = grid.best_params_['n_estimators']\n\n # creating a new model with the best parameters\n self.xgb = XGBClassifier(learning_rate=learning_rate, max_depth=max_depth, n_estimators=n_estimators)\n # training the mew model\n self.xgb.fit(train_x, train_y)\n self.logger_object.log('XGBoost best params: ' + str(grid.best_params_) + '. Exited the '\n 'get_best_params_for_xgboost method of the Model_Finder class')\n return self.xgb\n except Exception as e:\n xg_boost_clf_exception = XGBoostClassificationException(\n \"XGBoost Parameter tuning failed in module [{0}] class [{1}] method [{2}]\"\n .format(self.__module__, ModelFinder.__name__,\n self.get_best_params_for_xgboost.__name__))\n raise Exception(xg_boost_clf_exception.error_message_detail(str(e), sys)) from e\n\n def get_best_model(self, train_x, train_y, test_x, test_y, cluster_no=None):\n \"\"\"\n Method Name: get_best_model\n Description: Find out the Model which has the best AUC score.\n Output: The best model name and the model object\n On Failure: Raise Exception\n\n Written By: iNeuron Intelligence\n Version: 1.0\n Revisions: None\n\n \"\"\"\n\n # create best model for XGBoost\n try:\n if cluster_no is not None:\n title_generator = \" Cluster \" + cluster_no + \" model {}\"\n else:\n title_generator = \"Model {}\"\n\n # XG Boost model\n\n self.model_name.append('XG_BOOST')\n title = title_generator.format('XG_BOOST')\n self.logger_object.log('Entered the get_best_model method of the Model_Finder class')\n xgboost = self.get_best_params_for_xgboost(train_x, train_y)\n prediction_xgboost = xgboost.predict(test_x) # Predictions using the XGBoost Model\n\n if len(test_y.unique()) == 1: # if there is only one label in y, then roc_auc_score returns error. We\n # will use accuracy in that case\n xgboost_score = accuracy_score(test_y, prediction_xgboost)\n self.logger_object.log('Accuracy for XGBoost:' + str(xgboost_score)) # Log AUC\n else:\n xgboost_score = roc_auc_score(test_y, prediction_xgboost) # AUC for XGBoost\n self.logger_object.log('AUC for XGBoost:' + str(xgboost_score)) # Log AUC\n y_score = xgboost.predict_proba(test_x)[:, 1]\n fpr, tpr, thresholds = roc_curve(test_y, y_score, pos_label=test_y.unique()[1])\n AccurayGraph().save_roc_curve_plot_binary_classification(fpr, tpr, self.project_id,\n execution_id=self.logger_object.execution_id,\n file_object=self.file_object,\n title=title)\n\n self.model.append(xgboost)\n self.score.append(xgboost_score)\n\n # create best model for naive bayes\n self.model_name.append('NAIVE_BAYES')\n title = title_generator.format('NAIVE_BAYES')\n naive_bayes = self.get_best_params_for_naive_bayes(train_x, train_y)\n prediction_naive_bayes = naive_bayes.predict(test_x) # prediction using the Random Forest Algorithm\n self.model.append(naive_bayes)\n if len(test_y.unique()) == 1: # if there is only one label in y,\n # then roc_auc_score returns error. We will use accuracy in that case\n naive_bayes_score = accuracy_score(test_y, prediction_naive_bayes)\n self.logger_object.log('Accuracy for naive bayes score' + str(naive_bayes_score))\n else:\n naive_bayes_score = roc_auc_score(test_y, prediction_naive_bayes) # AUC for Random Forest\n self.logger_object.log('AUC for naive bayes score:' + str(naive_bayes_score))\n y_score = naive_bayes.predict_proba(test_x)[:, 1]\n fpr, tpr, thresholds = roc_curve(test_y, y_score, pos_label=test_y.unique()[0])\n AccurayGraph().save_roc_curve_plot_binary_classification(fpr, tpr, self.project_id,\n execution_id=self.logger_object.execution_id,\n file_object=self.file_object,\n title=title)\n\n self.score.append(naive_bayes_score)\n # create best model for Random forest\n self.model_name.append('Random_Forest')\n title = title_generator.format('Random_Forest')\n random_forest = self.get_best_params_for_random_forest(train_x, train_y)\n prediction_random_forest = random_forest.predict(test_x)\n self.model.append(random_forest)\n if len(test_y.unique()) == 1:\n random_forest_score = accuracy_score(test_y, prediction_random_forest)\n self.logger_object.log('Accuracy for Random Forest' + str(random_forest_score))\n else:\n random_forest_score = roc_auc_score(test_y, prediction_random_forest) # AUC for Random Forest\n self.logger_object.log('AUC for Random Forest' + str(random_forest_score))\n y_score = random_forest.predict_proba(test_x)[:, 1]\n fpr, tpr, thresholds = roc_curve(test_y, y_score, pos_label=test_y.unique()[1])\n AccurayGraph().save_roc_curve_plot_binary_classification(fpr, tpr, self.project_id,\n execution_id=self.logger_object.execution_id,\n file_object=self.file_object,\n title=title)\n\n self.score.append(random_forest_score)\n\n # create best model for KNN\n self.model_name.append('KNN')\n title = title_generator.format('KNN')\n knn_clf = self.get_best_params_for_KNN(train_x, train_y)\n prediction_knn = knn_clf.predict(test_x)\n self.model.append(knn_clf)\n if len(test_y.unique()) == 1:\n knn_score = accuracy_score(test_y, prediction_knn)\n self.logger_object.log('Accuracy for KNN clf' + str(knn_score))\n else:\n knn_score = roc_auc_score(test_y, prediction_knn) # AUC for Random Forest\n self.logger_object.log('AUC for KNN' + str(knn_score))\n y_score = knn_clf.predict_proba(test_x)[:, 1]\n fpr, tpr, thresholds = roc_curve(test_y, y_score, pos_label=test_y.unique()[1])\n AccurayGraph().save_roc_curve_plot_binary_classification(fpr, tpr, self.project_id,\n execution_id=self.logger_object.execution_id,\n file_object=self.file_object,\n title=title)\n self.score.append(knn_score)\n\n \"\"\" 5. SVC \"\"\"\n if len(test_y.unique()) != 1:\n self.model_name.append(\"SVC\")\n title = title_generator.format(\"SVC\")\n svc_clf = self.get_best_params_for_svm_fraud_detection_and_scania(train_x, train_y)\n prediction_svc = svc_clf.predict(test_x)\n self.model.append(svc_clf)\n if len(test_y.unique()) == 1:\n svc_score = accuracy_score(test_y, prediction_svc)\n self.logger_object.log('Accuracy for svc clf' + str(svc_score))\n else:\n svc_score = roc_auc_score(test_y, prediction_svc) # AUC for Random Forest\n self.logger_object.log('AUC for svc' + str(svc_score))\n y_score = svc_clf.predict_proba(test_x)[:, 1]\n fpr, tpr, thresholds = roc_curve(test_y, y_score, pos_label=test_y.unique()[1])\n AccurayGraph().save_roc_curve_plot_binary_classification(fpr, tpr, self.project_id,\n execution_id=self.logger_object.execution_id,\n file_object=self.file_object,\n title=title)\n self.score.append(svc_score)\n\n AccurayGraph().save_accuracy_bar_graph(\n model_name_list=self.model_name,\n accuracy_score_list=self.score,\n project_id=self.project_id,\n execution_id=self.logger_object.execution_id,\n file_object=self.file_object,\n x_label=\"Model List\",\n y_label=\"Accuracy score comparison {}\".format(self.model_name),\n title=\"Accuracy Score \"\n )\n execution_model_comparison_id = str(uuid.uuid4())\n for data in zip(self.model_name, self.score):\n self.save_accuracy_data(model_name=data[0], score=data[1],\n execution_model_comparision_id=execution_model_comparison_id)\n # comparing the two models\n return self.get_best_model_on_score(model_name=self.model_name, model=self.model, score=self.score)\n\n except Exception as e:\n model_finder = ModelFinderException(\n \"Model Selection Failed in module [{0}] class [{1}] method [{2}]\"\n .format(self.__module__, ModelFinder.__name__,\n self.get_best_model.__name__))\n raise Exception(model_finder.error_message_detail(str(e), sys)) from e\n\n def get_best_params_for_random_forest_thyroid(self, train_x, train_y):\n \"\"\"\n Method Name: get_best_params_for_random_forest\n Description: get the parameters for Random Forest Algorithm which give the best accuracy.\n Use Hyper Parameter Tuning.\n Output: The model with the best parameters\n On Failure: Raise Exception\n\n Written By: iNeuron Intelligence\n Version: 1.0\n Revisions: None\n\n \"\"\"\n self.logger_object.log('Entered the get_best_params_for_random_forest method of the Model_Finder class')\n try:\n # initializing with different combination of parameters\n param_grid = {\"n_estimators\": [10, 50, 100, 130], \"criterion\": ['gini', 'entropy'],\n \"max_depth\": range(2, 4, 1), \"max_features\": ['auto', 'log2']}\n\n # Creating an object of the Grid Search class\n grid = GridSearchCV(estimator=RandomForestClassifier(), param_grid=param_grid, cv=5, verbose=3)\n # finding the best parameters\n grid.fit(train_x, train_y)\n\n # extracting the best parameters\n criterion = grid.best_params_['criterion']\n max_depth = grid.best_params_['max_depth']\n max_features = grid.best_params_['max_features']\n n_estimators = grid.best_params_['n_estimators']\n\n # creating a new model with the best parameters\n clf = RandomForestClassifier(n_estimators=n_estimators, criterion=criterion,\n max_depth=max_depth, max_features=max_features)\n # training the mew model\n clf.fit(train_x, train_y)\n self.logger_object.log('Random Forest best params: ' + str(\n grid.best_params_) + '. Exited the get_best_params_for_random_forest method of the Model_Finder class')\n\n return clf\n except Exception as e:\n model_finder = ModelFinderException(\n \"Model Selection Failed in module [{0}] class [{1}] method [{2}]\"\n .format(self.__module__, ModelFinder.__name__,\n self.get_best_params_for_random_forest_thyroid.__name__))\n raise Exception(model_finder.error_message_detail(str(e), sys)) from e\n\n def get_best_params_for_KNN_fraud_detection(self, train_x, train_y):\n \"\"\"\n Method Name: get_best_params_for_KNN\n Description: get the parameters for KNN Algorithm which give the best accuracy.\n Use Hyper Parameter Tuning.\n Output: The model with the best parameters\n On Failure: Raise Exception\n\n Written By: iNeuron Intelligence\n Version: 1.0\n Revisions: None\n\n \"\"\"\n self.logger_object.log('Entered the get_best_params_for_Ensembled_KNN method of the Model_Finder class')\n try:\n # initializing with different combination of parameters\n param_grid_knn = {\n 'algorithm': ['ball_tree', 'kd_tree', 'brute'],\n 'leaf_size': [10, 17, 24, 28, 30, 35],\n 'n_neighbors': [4, 5],\n 'p': [1, 2]\n }\n\n # Creating an object of the Grid Search class\n grid = GridSearchCV(KNeighborsClassifier(), param_grid_knn, verbose=3,\n cv=5)\n # finding the best parameters\n grid.fit(train_x, train_y)\n\n # extracting the best parameters\n algorithm = grid.best_params_['algorithm']\n leaf_size = grid.best_params_['leaf_size']\n n_neighbors = grid.best_params_['n_neighbors']\n p = grid.best_params_['p']\n\n # creating a new model with the best parameters\n knn = KNeighborsClassifier(algorithm=algorithm, leaf_size=leaf_size,\n n_neighbors=n_neighbors, p=p, n_jobs=-1)\n # training the mew model\n knn.fit(train_x, train_y)\n self.logger_object.log('KNN best params: ' + str(\n grid.best_params_) + '. Exited the KNN method of the Model_Finder class')\n return knn\n except Exception as e:\n model_finder = ModelFinderException(\n \"Model Selection Failed in module [{0}] class [{1}] method [{2}]\"\n .format(self.__module__, ModelFinder.__name__,\n self.get_best_params_for_KNN_fraud_detection.__name__))\n raise Exception(model_finder.error_message_detail(str(e), sys)) from e\n\n def get_best_params_for_KNN(self, train_x, train_y):\n \"\"\"\n Method Name: get_best_params_for_KNN\n Description: get the parameters for KNN Algorithm which give the best accuracy.\n Use Hyper Parameter Tuning.\n Output: The model with the best parameters\n On Failure: Raise Exception\n\n Written By: iNeuron Intelligence\n Version: 1.0\n Revisions: None\n\n \"\"\"\n self.logger_object.log('Entered the get_best_params_for_Ensembled_KNN method of the Model_Finder class')\n try:\n # initializing with different combination of parameters\n param_grid_knn = {\n 'algorithm': ['ball_tree', 'kd_tree', 'brute'],\n 'leaf_size': [10, 17, 24, 28, 30, 35],\n 'n_neighbors': [4, 5, 8, 10, 11],\n 'p': [1, 2]\n }\n\n # Creating an object of the Grid Search class\n grid = GridSearchCV(KNeighborsClassifier(), param_grid_knn, verbose=3,\n cv=5)\n # finding the best parameters\n grid.fit(train_x, train_y)\n\n # extracting the best parameters\n algorithm = grid.best_params_['algorithm']\n leaf_size = grid.best_params_['leaf_size']\n n_neighbors = grid.best_params_['n_neighbors']\n p = grid.best_params_['p']\n\n # creating a new model with the best parameters\n knn = KNeighborsClassifier(algorithm=algorithm, leaf_size=leaf_size,\n n_neighbors=n_neighbors, p=p, n_jobs=-1)\n # training the mew model\n knn.fit(train_x, train_y)\n self.logger_object.log('KNN best params: ' + str(\n grid.best_params_) + '. Exited the KNN method of the Model_Finder class')\n return knn\n except Exception as e:\n model_finder = ModelFinderException(\n \"Model Selection Failed in module [{0}] class [{1}] method [{2}]\"\n .format(self.__module__, ModelFinder.__name__,\n self.get_best_params_for_KNN.__name__))\n raise Exception(model_finder.error_message_detail(str(e), sys)) from e\n\n def get_best_model_thyroid(self, train_x, train_y, test_x, test_y, cluster_no=None):\n \"\"\"\n Method Name: get_best_model\n Description: Find out the Model which has the best AUC score.\n Output: The best model name and the model object\n On Failure: Raise Exception\n\n Written By: iNeuron Intelligence\n Version: 1.0\n Revisions: None\n\n \"\"\"\n\n # create best model for KNN\n try:\n self.logger_object.log('Entered the get_best_model method of the Model_Finder class')\n if cluster_no is not None:\n title_generator = \" Cluster \" + cluster_no + \" model {}\"\n else:\n title_generator = \"Model {}\"\n # XG Boost model\n self.model_name.append('XG_BOOST')\n title = title_generator.format('XG_BOOST')\n self.logger_object.log('Entered the get_best_model method of the Model_Finder class')\n xgboost = self.get_best_params_for_xgboost(train_x, train_y)\n prediction_xgboost = xgboost.predict(test_x) # Predictions using the XGBoost Model\n\n if len(test_y.unique()) == 1: # if there is only one label in y, then roc_auc_score returns error. We\n # will use accuracy in that case\n xgboost_score = accuracy_score(test_y, prediction_xgboost)\n self.logger_object.log('Accuracy for XGBoost:' + str(xgboost_score)) # Log AUC\n else:\n y_scores = xgboost.predict_proba(test_x)\n AccurayGraph().save_plot_multiclass_roc_curve(test_y, y_scores, xgboost,\n project_id=self.project_id,\n execution_id=self.logger_object.execution_id,\n file_object=self.file_object,\n title=title\n )\n xgboost_score = roc_auc_score(test_y, y_scores, multi_class='ovr') # AUC for XGBoost\n self.logger_object.log('AUC for XGBoost:' + str(xgboost_score)) # Log AUC\n\n self.model.append(xgboost)\n self.score.append(xgboost_score)\n\n # create best model for naive bayes\n self.model_name.append('NAIVE_BAYES')\n title = title_generator.format('NAIVE_BAYES')\n naive_bayes = self.get_best_params_for_naive_bayes(train_x, train_y)\n prediction_naive_bayes = naive_bayes.predict(test_x) # prediction using the Random Forest Algorithm\n self.model.append(naive_bayes)\n if len(test_y.unique()) == 1: # if there is only one label in y,\n # then roc_auc_score returns error. We will use accuracy in that case\n naive_bayes_score = accuracy_score(test_y, prediction_naive_bayes)\n self.logger_object.log('Accuracy for naive bayes score' + str(naive_bayes_score))\n else:\n y_scores = naive_bayes.predict_proba(test_x)\n AccurayGraph().save_plot_multiclass_roc_curve(test_y, y_scores, naive_bayes,\n project_id=self.project_id,\n execution_id=self.logger_object.execution_id,\n file_object=self.file_object,\n title=title\n )\n naive_bayes_score = roc_auc_score(test_y, y_scores,\n multi_class='ovr') # AUC for Random Forest\n self.logger_object.log('AUC for naive bayes score:' + str(naive_bayes_score))\n\n self.score.append(naive_bayes_score)\n # create best model for Random forest\n self.model_name.append('Random_Forest')\n title = title_generator.format('Random_Forest')\n random_forest = self.get_best_params_for_random_forest_thyroid(train_x, train_y)\n prediction_random_forest = random_forest.predict(test_x)\n self.model.append(random_forest)\n if len(test_y.unique()) == 1:\n random_forest_score = accuracy_score(test_y, prediction_random_forest)\n self.logger_object.log('Accuracy for Random Forest' + str(random_forest_score))\n else:\n y_scores = random_forest.predict_proba(test_x)\n AccurayGraph().save_plot_multiclass_roc_curve(test_y, y_scores, random_forest,\n project_id=self.project_id,\n execution_id=self.logger_object.execution_id,\n file_object=self.file_object,\n title=title\n )\n random_forest_score = roc_auc_score(test_y, y_scores,\n multi_class='ovr') # AUC for Random Forest\n self.logger_object.log('AUC for Random Forest' + str(random_forest_score))\n\n self.score.append(random_forest_score)\n\n # create best model for KNN\n self.model_name.append('KNN')\n title = title_generator.format('KNN')\n knn_clf = self.get_best_params_for_KNN(train_x, train_y)\n prediction_knn = knn_clf.predict(test_x)\n self.model.append(knn_clf)\n if len(test_y.unique()) == 1:\n knn_score = accuracy_score(test_y, prediction_knn)\n self.logger_object.log('Accuracy for KNN clf' + str(knn_score))\n else:\n y_scores = knn_clf.predict_proba(test_x)\n AccurayGraph().save_plot_multiclass_roc_curve(test_y, y_scores, knn_clf,\n project_id=self.project_id,\n execution_id=self.logger_object.execution_id,\n file_object=self.file_object,\n title=title\n )\n knn_score = roc_auc_score(test_y, y_scores, multi_class='ovr') # AUC for Random Forest\n self.logger_object.log('AUC for KNN' + str(knn_score))\n\n self.score.append(knn_score)\n\n \"\"\" 5. SVC \"\"\"\n if len(test_y.unique()) != 1:\n self.model_name.append(\"SVC\")\n title = title_generator.format(\"SVC\")\n svc_clf = self.get_best_params_for_svm_fraud_detection_and_scania(train_x, train_y)\n prediction_svc = svc_clf.predict(test_x)\n self.model.append(svc_clf)\n if len(test_y.unique()) == 1:\n svc_score = accuracy_score(test_y, prediction_svc)\n self.logger_object.log('Accuracy for svc clf' + str(svc_score))\n else:\n y_scores = svc_clf.predict_proba(test_x)\n AccurayGraph().save_plot_multiclass_roc_curve(test_y, y_scores, svc_clf,\n project_id=self.project_id,\n execution_id=self.logger_object.execution_id,\n file_object=self.file_object,\n title=title\n )\n svc_score = roc_auc_score(test_y, y_scores, multi_class='ovr') # AUC for Random Forest\n self.logger_object.log('AUC for svc' + str(svc_score))\n\n self.score.append(svc_score)\n\n AccurayGraph().save_accuracy_bar_graph(\n model_name_list=self.model_name,\n accuracy_score_list=self.score,\n project_id=self.project_id,\n execution_id=self.logger_object.execution_id,\n file_object=self.file_object,\n x_label=\"Model List\",\n y_label=\"Accuracy score comparison {}\".format(self.model_name),\n title=\"Accuracy Score \"\n )\n\n execution_model_comparison_id = str(uuid.uuid4())\n\n for data in zip(self.model_name, self.score):\n self.save_accuracy_data(model_name=data[0], score=data[1],\n execution_model_comparision_id=execution_model_comparison_id)\n return self.get_best_model_on_score(model_name=self.model_name, model=self.model, score=self.score)\n\n except Exception as e:\n model_finder = ModelFinderException(\n \"Model Selection Failed in module [{0}] class [{1}] method [{2}]\"\n .format(self.__module__, ModelFinder.__name__,\n self.get_best_model_thyroid.__name__))\n raise Exception(model_finder.error_message_detail(str(e), sys)) from e\n\n def get_best_params_for_random_forest_mushroom(self, train_x, train_y):\n \"\"\"\n Method Name: get_best_params_for_random_forest\n Description: get the parameters for Random Forest Algorithm which give the best accuracy.\n Use Hyper Parameter Tuning.\n Output: The model with the best parameters\n On Failure: Raise Exception\n\n Written By: iNeuron Intelligence\n Version: 1.0\n Revisions: None\n\n \"\"\"\n\n try:\n self.logger_object.log('Entered the get_best_params_for_random_forest method of the Model_Finder class')\n # initializing with different combination of parameters\n param_grid = {\"n_estimators\": [10, 50, 100, 130], \"criterion\": ['gini', 'entropy'],\n \"max_depth\": range(2, 4, 1), \"max_features\": ['auto', 'log2']}\n\n # Creating an object of the Grid Search class\n grid = GridSearchCV(estimator=RandomForestClassifier(), param_grid=param_grid, cv=5, verbose=3)\n # finding the best parameters\n grid.fit(train_x, train_y)\n\n # extracting the best parameters\n criterion = grid.best_params_['criterion']\n max_depth = grid.best_params_['max_depth']\n max_features = grid.best_params_['max_features']\n n_estimators = grid.best_params_['n_estimators']\n\n # creating a new model with the best parameters\n clf = RandomForestClassifier(n_estimators=n_estimators, criterion=criterion,\n max_depth=max_depth, max_features=max_features)\n # training the mew model\n clf.fit(train_x, train_y)\n self.logger_object.log('Random Forest best params: ' + str(\n grid.best_params_) + '.Exited the get_best_params_for_random_forest method of the Model_Finder class')\n\n return clf\n except Exception as e:\n model_finder = ModelFinderException(\n \"Model Selection Failed in module [{0}] class [{1}] method [{2}]\"\n .format(self.__module__, ModelFinder.__name__,\n self.get_best_params_for_random_forest_mushroom.__name__))\n raise Exception(model_finder.error_message_detail(str(e), sys)) from e\n\n def get_best_params_for_KNN_mushroom(self, train_x, train_y):\n \"\"\"\n Method Name: get_best_params_for_KNN\n Description: get the parameters for KNN Algorithm which give the best accuracy.\n Use Hyper Parameter Tuning.\n Output: The model with the best parameters\n On Failure: Raise Exception\n\n Written By: iNeuron Intelligence\n Version: 1.0\n Revisions: None\n\n \"\"\"\n\n try:\n self.logger_object.log('Entered the get_best_params_for_KNN method of the Model_Finder class')\n # initializing with different combination of parameters\n param_grid_knn = {\n 'algorithm': ['ball_tree', 'kd_tree', 'brute'],\n 'leaf_size': [10, 17, 24, 28, 30, 35],\n 'n_neighbors': [4, 5, 8, 10, 11],\n 'p': [1, 2]\n }\n\n # Creating an object of the Grid Search class\n grid = GridSearchCV(KNeighborsClassifier(), param_grid_knn, verbose=3,\n cv=5)\n # finding the best parameters\n grid.fit(train_x, train_y)\n\n # extracting the best parameters\n algorithm = grid.best_params_['algorithm']\n leaf_size = grid.best_params_['leaf_size']\n n_neighbors = grid.best_params_['n_neighbors']\n p = grid.best_params_['p']\n\n # creating a new model with the best parameters\n knn = KNeighborsClassifier(algorithm=algorithm, leaf_size=leaf_size,\n n_neighbors=n_neighbors, p=p, n_jobs=-1)\n # training the mew model\n knn.fit(train_x, train_y)\n self.logger_object.log('KNN best params: ' + str(\n grid.best_params_) + '. Exited the KNN method of the Model_Finder class')\n return knn\n except Exception as e:\n model_finder = ModelFinderException(\n \"Model Selection Failed in module [{0}] class [{1}] method [{2}]\"\n .format(self.__module__, ModelFinder.__name__,\n self.get_best_params_for_KNN_mushroom.__name__))\n raise Exception(model_finder.error_message_detail(str(e), sys)) from e\n\n def get_binary_format_target_value(self, target_column):\n try:\n column_value = target_column.unique()\n target_column = target_column.replace(column_value[0], 0)\n target_column = target_column.replace(column_value[1], 1)\n return target_column\n except Exception as e:\n model_finder = ModelFinderException(\n \"Model Selection Failed in module [{0}] class [{1}] method [{2}]\"\n .format(self.__module__, ModelFinder.__name__,\n self.get_best_params_for_KNN_mushroom.__name__))\n raise Exception(model_finder.error_message_detail(str(e), sys)) from e\n\n def get_best_model_mushroom(self, train_x, train_y, test_x, test_y, cluster_no=None):\n \"\"\"\n Method Name: get_best_model\n Description: Find out the Model which has the best AUC score.\n Output: The best model name and the model object\n On Failure: Raise Exception\n\n Written By: iNeuron Intelligence\n Version: 1.0\n Revisions: None\n\n \"\"\"\n\n # create best model for KNN\n try:\n self.logger_object.log('Entered the get_best_model method of the Model_Finder class')\n title_generator = \" Cluster \" + cluster_no + \" model {}\"\n\n # XG Boost model\n\n self.model_name.append('XG_BOOST')\n title = title_generator.format('XG_BOOST')\n\n xgboost = self.get_best_params_for_xgboost_income_prediction(train_x, train_y)\n prediction_xgboost = xgboost.predict(test_x) # Predictions using the XGBoost Model\n\n if len(test_y.unique()) == 1: # if there is only one label in y, then roc_auc_score returns error. We\n # will use accuracy in that case\n xgboost_score = accuracy_score(test_y, prediction_xgboost)\n self.logger_object.log('Accuracy for XGBoost:' + str(xgboost_score)) # Log AUC\n else:\n xgboost_score = roc_auc_score(test_y, prediction_xgboost) # AUC for XGBoost\n self.logger_object.log('AUC for XGBoost:' + str(xgboost_score)) # Log AUC\n y_score = xgboost.predict_proba(test_x)[:, 1]\n fpr, tpr, thresholds = roc_curve(test_y, y_score, pos_label=test_y.unique()[1])\n AccurayGraph().save_roc_curve_plot_binary_classification(fpr, tpr, self.project_id,\n execution_id=self.logger_object.execution_id,\n file_object=self.file_object,\n title=title)\n\n self.model.append(xgboost)\n self.score.append(xgboost_score)\n\n # create best model for naive bayes\n self.model_name.append('NAIVE_BAYES')\n title = title_generator.format('NAIVE_BAYES')\n naive_bayes = self.get_best_params_for_naive_bayes(train_x, train_y)\n prediction_naive_bayes = naive_bayes.predict(test_x) # prediction using the Random Forest Algorithm\n self.model.append(naive_bayes)\n if len(test_y.unique()) == 1: # if there is only one label in y,\n # then roc_auc_score returns error. We will use accuracy in that case\n naive_bayes_score = accuracy_score(test_y, prediction_naive_bayes)\n self.logger_object.log('Accuracy for naive bayes score' + str(naive_bayes_score))\n else:\n naive_bayes_score = roc_auc_score(test_y, prediction_naive_bayes) # AUC for Random Forest\n self.logger_object.log('AUC for naive bayes score:' + str(naive_bayes_score))\n y_score = naive_bayes.predict_proba(test_x)[:, 1]\n fpr, tpr, thresholds = roc_curve(test_y, y_score, pos_label=test_y.unique()[0])\n AccurayGraph().save_roc_curve_plot_binary_classification(fpr, tpr, self.project_id,\n execution_id=self.logger_object.execution_id,\n file_object=self.file_object,\n title=title)\n\n self.score.append(naive_bayes_score)\n # create best model for Random forest\n self.model_name.append('Random_Forest')\n title = title_generator.format('Random_Forest')\n random_forest = self.get_best_params_for_random_forest_mushroom(train_x, train_y)\n prediction_random_forest = random_forest.predict(test_x)\n self.model.append(random_forest)\n if len(test_y.unique()) == 1:\n random_forest_score = accuracy_score(test_y, prediction_random_forest)\n self.logger_object.log('Accuracy for Random Forest' + str(random_forest_score))\n else:\n random_forest_score = roc_auc_score(test_y, prediction_random_forest) # AUC for Random Forest\n self.logger_object.log('AUC for Random Forest' + str(random_forest_score))\n y_score = random_forest.predict_proba(test_x)[:, 1]\n fpr, tpr, thresholds = roc_curve(test_y, y_score, pos_label=test_y.unique()[1])\n AccurayGraph().save_roc_curve_plot_binary_classification(fpr, tpr, self.project_id,\n execution_id=self.logger_object.execution_id,\n file_object=self.file_object,\n title=title)\n\n self.score.append(random_forest_score)\n\n # create best model for KNN\n self.model_name.append('KNN')\n title = title_generator.format('KNN')\n knn_clf = self.get_best_params_for_KNN_mushroom(train_x, train_y)\n prediction_knn = knn_clf.predict(test_x)\n self.model.append(knn_clf)\n if len(test_y.unique()) == 1:\n knn_score = accuracy_score(test_y, prediction_knn)\n self.logger_object.log('Accuracy for KNN clf' + str(knn_score))\n else:\n knn_score = roc_auc_score(test_y, prediction_knn) # AUC for Random Forest\n self.logger_object.log('AUC for KNN' + str(knn_score))\n y_score = knn_clf.predict_proba(test_x)[:, 1]\n fpr, tpr, thresholds = roc_curve(test_y, y_score, pos_label=test_y.unique()[1])\n AccurayGraph().save_roc_curve_plot_binary_classification(fpr, tpr, self.project_id,\n execution_id=self.logger_object.execution_id,\n file_object=self.file_object,\n title=title)\n self.score.append(knn_score)\n\n if len(test_y.unique()) != 1:\n \"\"\" 5. SVC \"\"\"\n self.model_name.append(\"SVC\")\n title = title_generator.format(\"SVC\")\n svc_clf = self.get_best_params_for_svm_fraud_detection_and_scania(train_x, train_y)\n prediction_svc = svc_clf.predict(test_x)\n self.model.append(svc_clf)\n if len(test_y.unique()) == 1:\n svc_score = accuracy_score(test_y, prediction_svc)\n self.logger_object.log('Accuracy for svc clf' + str(svc_score))\n else:\n svc_score = roc_auc_score(test_y, prediction_svc) # AUC for Random Forest\n self.logger_object.log('AUC for svc' + str(svc_score))\n y_score = svc_clf.predict_proba(test_x)[:, 1]\n fpr, tpr, thresholds = roc_curve(test_y, y_score, pos_label=test_y.unique()[1])\n AccurayGraph().save_roc_curve_plot_binary_classification(fpr, tpr, self.project_id,\n execution_id=self.logger_object.execution_id,\n file_object=self.file_object,\n title=title)\n self.score.append(svc_score)\n\n AccurayGraph().save_accuracy_bar_graph(\n model_name_list=self.model_name,\n accuracy_score_list=self.score,\n project_id=self.project_id,\n execution_id=self.logger_object.execution_id,\n file_object=self.file_object,\n x_label=\"Model List\",\n y_label=\"Accuracy score comparison {}\".format(self.model_name),\n title=\"Cluster \" + str(cluster_no) + \"Accuracy Score \"\n )\n execution_model_comparison_id = str(uuid.uuid4())\n\n for data in zip(self.model_name, self.score):\n self.save_accuracy_data(model_name=data[0], score=data[1],\n execution_model_comparision_id=execution_model_comparison_id)\n return self.get_best_model_on_score(model_name=self.model_name, model=self.model, score=self.score)\n except Exception as e:\n model_finder = ModelFinderException(\n \"Model Selection Failed in module [{0}] class [{1}] method [{2}]\"\n .format(self.__module__, ModelFinder.__name__,\n self.get_best_model_mushroom.__name__))\n raise Exception(model_finder.error_message_detail(str(e), sys)) from e\n\n def save_accuracy_data(self, model_name, score, execution_model_comparision_id):\n try:\n accuracy_graph_data = AccurayGraph(project_id=self.project_id,\n model_accuracy_dict={'model_name': model_name,\n 'score': score,\n 'execution_model_comparision': execution_model_comparision_id,\n 'training_execution_id': self.logger_object.execution_id}\n )\n accuracy_graph_data.save_accuracy()\n except Exception as e:\n model_finder = ModelFinderException(\n \"save model accuracy [{0}] class [{1}] method [{2}]\"\n .format(self.__module__, ModelFinder.__name__,\n self.get_best_model_mushroom.__name__))\n raise Exception(model_finder.error_message_detail(str(e), sys)) from e\n\n def get_best_params_for_svm_fraud_detection_and_scania(self, train_x, train_y):\n \"\"\"\n Method Name: get_best_params_for_naive_bayes\n Description: get the parameters for the SVM Algorithm which give the best accuracy.\n Use Hyper Parameter Tuning.\n Output: The model with the best parameters\n On Failure: Raise Exception\n\n Written By: iNeuron Intelligence\n Version: 1.0\n Revisions: None\n\n \"\"\"\n try:\n self.logger_object.log('Entered the get_best_params_for_svm method of the Model_Finder class')\n\n # initializing with different combination of parameters\n param_grid = {\"kernel\": ['rbf', 'sigmoid'],\n \"C\": [0.1, 0.5, 1.0],\n \"random_state\": [0, 100, 200, 300]}\n\n # Creating an object of the Grid Search class\n grid = GridSearchCV(estimator=SVC(), param_grid=param_grid, cv=5, verbose=3)\n # finding the best parameters\n grid.fit(train_x, train_y)\n\n # extracting the best parameters\n kernel = grid.best_params_['kernel']\n C = grid.best_params_['C']\n random_state = grid.best_params_['random_state']\n\n # creating a new model with the best parameters\n sv_classifier = SVC(kernel=kernel, C=C, random_state=random_state, probability=True)\n # training the mew model\n sv_classifier.fit(train_x, train_y)\n self.logger_object.log('SVM best params: ' + str(\n grid.best_params_) + '. Exited the get_best_params_for_svm method of the Model_Finder class')\n return sv_classifier\n except Exception as e:\n model_finder = ModelFinderException(\n \"Failed in [{0}] class [{1}] method [{2}]\"\n .format(self.__module__, ModelFinder.__name__,\n self.get_best_params_for_svm_fraud_detection_and_scania.__name__))\n raise Exception(model_finder.error_message_detail(str(e), sys)) from e\n\n def get_best_params_for_xgboost_fraud_detection(self, train_x, train_y):\n \"\"\"\n Method Name: get_best_params_for_xgboost\n Description: get the parameters for XGBoost Algorithm which give the best accuracy.\n Use Hyper Parameter Tuning.\n Output: The model with the best parameters\n On Failure: Raise Exception\n\n Written By: iNeuron Intelligence\n Version: 1.0\n Revisions: None\n\n \"\"\"\n try:\n # initializing with different combination of parameters\n self.logger_object.log('Entered the get_best_params_for_xgboost method of the Model_Finder class')\n param_grid_xgboost = {\n \"n_estimators\": [100, 130], \"criterion\": ['gini', 'entropy'],\n\n \"max_depth\": range(8, 10, 1)\n\n }\n # Creating an object of the Grid Search class\n grid = GridSearchCV(XGBClassifier(objective='binary:logistic'), param_grid_xgboost, verbose=3,\n cv=5)\n # finding the best parameters\n grid.fit(train_x, train_y)\n\n # extracting the best parameters\n criterion = grid.best_params_['criterion']\n max_depth = grid.best_params_['max_depth']\n n_estimators = grid.best_params_['n_estimators']\n\n # creating a new model with the best parameters\n xgb = XGBClassifier(criterion=criterion, max_depth=max_depth, n_estimators=n_estimators,\n n_jobs=-1)\n # training the mew model\n xgb.fit(train_x, train_y)\n self.logger_object.log('XGBoost best params: ' + str(\n grid.best_params_) + '. Exited the get_best_params_for_xgboost method of the Model_Finder class')\n return xgb\n except Exception as e:\n model_finder = ModelFinderException(\n \"Failed in [{0}] class [{1}] method [{2}]\"\n .format(self.__module__, ModelFinder.__name__,\n self.get_best_params_for_xgboost_fraud_detection.__name__))\n raise Exception(model_finder.error_message_detail(str(e), sys)) from e\n\n def get_best_model_fraud_detection(self, train_x, train_y, test_x, test_y, cluster_no=None):\n \"\"\"\n Method Name: get_best_model\n Description: Find out the Model which has the best AUC score.\n Output: The best model name and the model object\n On Failure: Raise Exception\n\n Written By: iNeuron Intelligence\n Version: 1.0\n Revisions: None\n\n \"\"\"\n\n # create best model for XGBoost\n try:\n self.logger_object.log('Entered the get_best_model method of the Model_Finder class')\n\n title_generator = \" Cluster \" + cluster_no + \" model {}\"\n\n # XG Boost model\n\n self.model_name.append('XG_BOOST')\n title = title_generator.format('XG_BOOST')\n self.logger_object.log('Entered the get_best_model method of the Model_Finder class')\n xgboost = self.get_best_params_for_xgboost_fraud_detection(train_x, train_y)\n prediction_xgboost = xgboost.predict(test_x) # Predictions using the XGBoost Model\n\n if len(test_y.unique()) == 1: # if there is only one label in y, then roc_auc_score returns error. We\n # will use accuracy in that case\n xgboost_score = accuracy_score(test_y, prediction_xgboost)\n self.logger_object.log('Accuracy for XGBoost:' + str(xgboost_score)) # Log AUC\n else:\n xgboost_score = roc_auc_score(test_y, prediction_xgboost) # AUC for XGBoost\n self.logger_object.log('AUC for XGBoost:' + str(xgboost_score)) # Log AUC\n y_score = xgboost.predict_proba(test_x)[:, 1]\n fpr, tpr, thresholds = roc_curve(test_y, y_score, pos_label=test_y.unique()[1])\n AccurayGraph().save_roc_curve_plot_binary_classification(fpr, tpr, self.project_id,\n execution_id=self.logger_object.execution_id,\n file_object=self.file_object,\n title=title)\n\n self.model.append(xgboost)\n self.score.append(xgboost_score)\n\n # create best model for naive bayes\n self.model_name.append('NAIVE_BAYES')\n title = title_generator.format('NAIVE_BAYES')\n naive_bayes = self.get_best_params_for_naive_bayes(train_x, train_y)\n prediction_naive_bayes = naive_bayes.predict(test_x) # prediction using the Random Forest Algorithm\n self.model.append(naive_bayes)\n if len(test_y.unique()) == 1: # if there is only one label in y,\n # then roc_auc_score returns error. We will use accuracy in that case\n naive_bayes_score = accuracy_score(test_y, prediction_naive_bayes)\n self.logger_object.log('Accuracy for naive bayes score' + str(naive_bayes_score))\n else:\n naive_bayes_score = roc_auc_score(test_y, prediction_naive_bayes) # AUC for Random Forest\n self.logger_object.log('AUC for naive bayes score:' + str(naive_bayes_score))\n y_score = naive_bayes.predict_proba(test_x)[:, 1]\n fpr, tpr, thresholds = roc_curve(test_y, y_score, pos_label=test_y.unique()[0])\n AccurayGraph().save_roc_curve_plot_binary_classification(fpr, tpr, self.project_id,\n execution_id=self.logger_object.execution_id,\n file_object=self.file_object,\n title=title)\n\n self.score.append(naive_bayes_score)\n # create best model for Random forest\n self.model_name.append('Random_Forest')\n title = title_generator.format('Random_Forest')\n random_forest = self.get_best_params_for_svm_fraud_detection_and_scania(train_x, train_y)\n prediction_random_forest = random_forest.predict(test_x)\n self.model.append(random_forest)\n if len(test_y.unique()) == 1:\n random_forest_score = accuracy_score(test_y, prediction_random_forest)\n self.logger_object.log('Accuracy for Random Forest' + str(random_forest_score))\n else:\n random_forest_score = roc_auc_score(test_y, prediction_random_forest) # AUC for Random Forest\n self.logger_object.log('AUC for Random Forest' + str(random_forest_score))\n y_score = random_forest.predict_proba(test_x)[:, 1]\n fpr, tpr, thresholds = roc_curve(test_y, y_score, pos_label=test_y.unique()[1])\n AccurayGraph().save_roc_curve_plot_binary_classification(fpr, tpr, self.project_id,\n execution_id=self.logger_object.execution_id,\n file_object=self.file_object,\n title=title)\n\n self.score.append(random_forest_score)\n\n # create best model for KNN\n self.model_name.append('KNN')\n title = title_generator.format('KNN')\n knn_clf = self.get_best_params_for_KNN_fraud_detection(train_x, train_y)\n prediction_knn = knn_clf.predict(test_x)\n self.model.append(knn_clf)\n if len(test_y.unique()) == 1:\n knn_score = accuracy_score(test_y, prediction_knn)\n self.logger_object.log('Accuracy for KNN clf' + str(knn_score))\n else:\n knn_score = roc_auc_score(test_y, prediction_knn) # AUC for Random Forest\n self.logger_object.log('AUC for KNN' + str(knn_score))\n y_score = knn_clf.predict_proba(test_x)[:, 1]\n fpr, tpr, thresholds = roc_curve(test_y, y_score, pos_label=test_y.unique()[1])\n AccurayGraph().save_roc_curve_plot_binary_classification(fpr, tpr, self.project_id,\n execution_id=self.logger_object.execution_id,\n file_object=self.file_object,\n title=title)\n self.score.append(knn_score)\n\n if len(test_y.unique()) != 1:\n \"\"\" 5. SVC \"\"\"\n self.model_name.append(\"SVC\")\n title = title_generator.format(\"SVC\")\n svc_clf = self.get_best_params_for_svm_phising_classifier(train_x, train_y)\n prediction_svc = svc_clf.predict(test_x)\n self.model.append(svc_clf)\n if len(test_y.unique()) == 1:\n svc_score = accuracy_score(test_y, prediction_svc)\n self.logger_object.log('Accuracy for svc clf' + str(svc_score))\n else:\n svc_score = roc_auc_score(test_y, prediction_svc) # AUC for Random Forest\n self.logger_object.log('AUC for svc' + str(svc_score))\n y_score = svc_clf.predict_proba(test_x)[:, 1]\n fpr, tpr, thresholds = roc_curve(test_y, y_score, pos_label=test_y.unique()[1])\n AccurayGraph().save_roc_curve_plot_binary_classification(fpr, tpr, self.project_id,\n execution_id=self.logger_object.execution_id,\n file_object=self.file_object,\n title=title)\n self.score.append(svc_score)\n\n AccurayGraph().save_accuracy_bar_graph(\n model_name_list=self.model_name,\n accuracy_score_list=self.score,\n project_id=self.project_id,\n execution_id=self.logger_object.execution_id,\n file_object=self.file_object,\n x_label=\"Model List\",\n y_label=\"Accuracy score comparison {}\".format(self.model_name),\n title=\"Cluster \" + str(cluster_no) + \"Accuracy Score \"\n )\n execution_model_comparison_id = str(uuid.uuid4())\n\n for data in zip(self.model_name, self.score):\n self.save_accuracy_data(model_name=data[0], score=data[1],\n execution_model_comparision_id=execution_model_comparison_id)\n\n return self.get_best_model_on_score(model_name=self.model_name, model=self.model, score=self.score)\n\n except Exception as e:\n model_finder = ModelFinderException(\n \"Failed in [{0}] class [{1}] method [{2}]\"\n .format(self.__module__, ModelFinder.__name__,\n self.get_best_model_fraud_detection.__name__))\n raise Exception(model_finder.error_message_detail(str(e), sys)) from e\n\n def get_best_params_for_naive_bayes_credit_defaulter(self, train_x, train_y):\n \"\"\"\n Method Name: get_best_params_for_naive_bayes\n Description: get the parameters for the Naive Bayes's Algorithm which give the best accuracy.\n Use Hyper Parameter Tuning.\n Output: The model with the best parameters\n On Failure: Raise Exception\n\n Written By: iNeuron Intelligence\n Version: 1.0\n Revisions: None\n\n \"\"\"\n try:\n self.logger_object.log('Entered the get_best_params_for_naive_bayes method of the Model_Finder class')\n\n # initializing with different combination of parameters\n param_grid = {\"var_smoothing\": [1e-9, 0.1, 0.001, 0.5, 0.05, 0.01, 1e-8, 1e-7, 1e-6, 1e-10, 1e-11]}\n\n # Creating an object of the Grid Search class\n grid = GridSearchCV(estimator=GaussianNB(), param_grid=param_grid, cv=3, verbose=3)\n # finding the best parameters\n grid.fit(train_x, train_y)\n\n # extracting the best parameters\n var_smoothing = grid.best_params_['var_smoothing']\n\n # creating a new model with the best parameters\n gnb = GaussianNB(var_smoothing=var_smoothing)\n # training the mew model\n gnb.fit(train_x, train_y)\n self.logger_object.log('Naive Bayes best params: ' + str(\n grid.best_params_) + '. Exited the get_best_params_for_naive_bayes method of the Model_Finder class')\n return gnb\n except Exception as e:\n model_finder = ModelFinderException(\n \"Failed in [{0}] class [{1}] method [{2}]\"\n .format(self.__module__, ModelFinder.__name__,\n self.get_best_params_for_naive_bayes_credit_defaulter.__name__))\n raise Exception(model_finder.error_message_detail(str(e), sys)) from e\n\n def get_best_params_for_xgboost_credit_defaulter(self, train_x, train_y):\n\n \"\"\"\n Method Name: get_best_params_for_xgboost\n Description: get the parameters for XGBoost Algorithm which give the best accuracy.\n Use Hyper Parameter Tuning.\n Output: The model with the best parameters\n On Failure: Raise Exception\n\n Written By: iNeuron Intelligence\n Version: 1.0\n Revisions: None\n\n \"\"\"\n try:\n self.logger_object.log('Entered the get_best_params_for_xgboost method of the Model_Finder class')\n # initializing with different combination of parameters\n param_grid_xgboost = {\n\n \"n_estimators\": [50, 100, 130],\n \"max_depth\": range(3, 11, 1),\n \"random_state\": [0, 50, 100]\n\n }\n # Creating an object of the Grid Search class\n grid = GridSearchCV(XGBClassifier(objective='binary:logistic'), param_grid_xgboost, verbose=3,\n cv=2, n_jobs=-1)\n # finding the best parameters\n grid.fit(train_x, train_y)\n\n # extracting the best parameters\n random_state = grid.best_params_['random_state']\n max_depth = grid.best_params_['max_depth']\n n_estimators = grid.best_params_['n_estimators']\n\n # creating a new model with the best parameters\n xgb = XGBClassifier(random_state=random_state, max_depth=max_depth,\n n_estimators=n_estimators, n_jobs=-1)\n # training the mew model\n xgb.fit(train_x, train_y)\n self.logger_object.log('XGBoost best params: ' + str(\n grid.best_params_) + '. Exited the get_best_params_for_xgboost method of the Model_Finder class')\n return xgb\n except Exception as e:\n model_finder = ModelFinderException(\n \"Failed in [{0}] class [{1}] method [{2}]\"\n .format(self.__module__, ModelFinder.__name__,\n self.get_best_params_for_xgboost_credit_defaulter.__name__))\n raise Exception(model_finder.error_message_detail(str(e), sys)) from e\n\n def get_best_model_credit_deaulter(self, train_x, train_y, test_x, test_y, cluster_no):\n \"\"\"\n Method Name: get_best_model\n Description: Find out the Model which has the best AUC score.\n Output: The best model name and the model object\n On Failure: Raise Exception\n\n Written By: iNeuron Intelligence\n Version: 1.0\n Revisions: None\n\n \"\"\"\n # create best model for XGBoost\n try:\n self.logger_object.log('Entered the get_best_model method of the Model_Finder class')\n\n title_generator = \" Cluster \" + cluster_no + \" model {}\"\n\n # XG Boost model\n\n self.model_name.append('XG_BOOST')\n title = title_generator.format('XG_BOOST')\n self.logger_object.log('Entered the get_best_model method of the Model_Finder class')\n xgboost = self.get_best_params_for_xgboost_credit_defaulter(train_x, train_y)\n prediction_xgboost = xgboost.predict(test_x) # Predictions using the XGBoost Model\n\n if len(test_y.unique()) == 1: # if there is only one label in y, then roc_auc_score returns error. We\n # will use accuracy in that case\n xgboost_score = accuracy_score(test_y, prediction_xgboost)\n self.logger_object.log('Accuracy for XGBoost:' + str(xgboost_score)) # Log AUC\n else:\n xgboost_score = roc_auc_score(test_y, prediction_xgboost) # AUC for XGBoost\n self.logger_object.log('AUC for XGBoost:' + str(xgboost_score)) # Log AUC\n y_score = xgboost.predict_proba(test_x)[:, 1]\n fpr, tpr, thresholds = roc_curve(test_y, y_score, pos_label=test_y.unique()[1])\n AccurayGraph().save_roc_curve_plot_binary_classification(fpr, tpr, self.project_id,\n execution_id=self.logger_object.execution_id,\n file_object=self.file_object,\n title=title)\n\n self.model.append(xgboost)\n self.score.append(xgboost_score)\n\n # create best model for naive bayes\n self.model_name.append('NAIVE_BAYES')\n title = title_generator.format('NAIVE_BAYES')\n naive_bayes = self.get_best_params_for_naive_bayes_credit_defaulter(train_x, train_y)\n prediction_naive_bayes = naive_bayes.predict(test_x) # prediction using the Random Forest Algorithm\n self.model.append(naive_bayes)\n if len(test_y.unique()) == 1: # if there is only one label in y,\n # then roc_auc_score returns error. We will use accuracy in that case\n naive_bayes_score = accuracy_score(test_y, prediction_naive_bayes)\n self.logger_object.log('Accuracy for naive bayes score' + str(naive_bayes_score))\n else:\n naive_bayes_score = roc_auc_score(test_y, prediction_naive_bayes) # AUC for Random Forest\n self.logger_object.log('AUC for naive bayes score:' + str(naive_bayes_score))\n y_score = naive_bayes.predict_proba(test_x)[:, 1]\n fpr, tpr, thresholds = roc_curve(test_y, y_score, pos_label=test_y.unique()[0])\n AccurayGraph().save_roc_curve_plot_binary_classification(fpr, tpr, self.project_id,\n execution_id=self.logger_object.execution_id,\n file_object=self.file_object,\n title=title)\n\n self.score.append(naive_bayes_score)\n # create best model for Random forest\n self.model_name.append('Random_Forest')\n title = title_generator.format('Random_Forest')\n random_forest = self.get_best_params_for_random_forest(train_x, train_y)\n prediction_random_forest = random_forest.predict(test_x)\n self.model.append(random_forest)\n if len(test_y.unique()) == 1:\n random_forest_score = accuracy_score(test_y, prediction_random_forest)\n self.logger_object.log('Accuracy for Random Forest' + str(random_forest_score))\n else:\n random_forest_score = roc_auc_score(test_y, prediction_random_forest) # AUC for Random Forest\n self.logger_object.log('AUC for Random Forest' + str(random_forest_score))\n y_score = random_forest.predict_proba(test_x)[:, 1]\n fpr, tpr, thresholds = roc_curve(test_y, y_score, pos_label=test_y.unique()[1])\n AccurayGraph().save_roc_curve_plot_binary_classification(fpr, tpr, self.project_id,\n execution_id=self.logger_object.execution_id,\n file_object=self.file_object,\n title=title)\n\n self.score.append(random_forest_score)\n\n # create best model for KNN\n self.model_name.append('KNN')\n title = title_generator.format('KNN')\n knn_clf = self.get_best_params_for_KNN(train_x, train_y)\n prediction_knn = knn_clf.predict(test_x)\n self.model.append(knn_clf)\n if len(test_y.unique()) == 1:\n knn_score = accuracy_score(test_y, prediction_knn)\n self.logger_object.log('Accuracy for KNN clf' + str(knn_score))\n else:\n knn_score = roc_auc_score(test_y, prediction_knn) # AUC for Random Forest\n self.logger_object.log('AUC for KNN' + str(knn_score))\n y_score = knn_clf.predict_proba(test_x)[:, 1]\n fpr, tpr, thresholds = roc_curve(test_y, y_score, pos_label=test_y.unique()[1])\n AccurayGraph().save_roc_curve_plot_binary_classification(fpr, tpr, self.project_id,\n execution_id=self.logger_object.execution_id,\n file_object=self.file_object,\n title=title)\n self.score.append(knn_score)\n\n if len(test_y.unique()) != 1:\n \"\"\" 5. SVC \"\"\"\n self.model_name.append(\"SVC\")\n title = title_generator.format(\"SVC\")\n svc_clf = self.get_best_params_for_svm_phising_classifier(train_x, train_y)\n prediction_svc = svc_clf.predict(test_x)\n self.model.append(svc_clf)\n if len(test_y.unique()) == 1:\n svc_score = accuracy_score(test_y, prediction_svc)\n self.logger_object.log('Accuracy for svc clf' + str(svc_score))\n else:\n svc_score = roc_auc_score(test_y, prediction_svc) # AUC for Random Forest\n self.logger_object.log('AUC for svc' + str(svc_score))\n y_score = svc_clf.predict_proba(test_x)[:, 1]\n fpr, tpr, thresholds = roc_curve(test_y, y_score, pos_label=test_y.unique()[1])\n AccurayGraph().save_roc_curve_plot_binary_classification(fpr, tpr, self.project_id,\n execution_id=self.logger_object.execution_id,\n file_object=self.file_object,\n title=title)\n self.score.append(svc_score)\n\n AccurayGraph().save_accuracy_bar_graph(\n model_name_list=self.model_name,\n accuracy_score_list=self.score,\n project_id=self.project_id,\n execution_id=self.logger_object.execution_id,\n file_object=self.file_object,\n x_label=\"Model List\",\n y_label=\"Accuracy score comparison {}\".format(self.model_name),\n title=\"Cluster \" + str(cluster_no) + \"Accuracy Score \"\n )\n execution_model_comparison_id = str(uuid.uuid4())\n\n for data in zip(self.model_name, self.score):\n self.save_accuracy_data(model_name=data[0], score=data[1],\n execution_model_comparision_id=execution_model_comparison_id)\n\n return self.get_best_model_on_score(model_name=self.model_name, model=self.model, score=self.score)\n\n\n except Exception as e:\n model_finder = ModelFinderException(\n \"Failed in [{0}] class [{1}] method [{2}]\"\n .format(self.__module__, ModelFinder.__name__,\n self.get_best_model_credit_deaulter.__name__))\n raise Exception(model_finder.error_message_detail(str(e), sys)) from e\n\n \"\"\"phishing classifier\"\"\"\n\n def get_best_params_for_svm_phising_classifier(self, train_x, train_y):\n \"\"\"\n Method Name: get_best_params_for_naive_bayes\n Description: get the parameters for the SVM Algorithm which give the best accuracy.\n Use Hyper Parameter Tuning.\n Output: The model with the best parameters\n On Failure: Raise Exception\n\n Written By: iNeuron Intelligence\n Version: 1.0\n Revisions: None\n\n \"\"\"\n try:\n self.logger_object.log('Entered the get_best_params_for_svm method of the Model_Finder class')\n\n # initializing with different combination of parameters\n param_grid = {\"kernel\": ['rbf', 'sigmoid'],\n \"C\": [0.1, 0.5, 1.0],\n \"random_state\": [0, 100, 200, 300]}\n\n # Creating an object of the Grid Search class\n grid = GridSearchCV(SVC(), param_grid=param_grid, cv=5, verbose=3)\n # finding the best parameters\n grid.fit(train_x, train_y)\n\n # extracting the best parameters\n kernel = grid.best_params_['kernel']\n c = grid.best_params_['C']\n random_state = grid.best_params_['random_state']\n\n # creating a new model with the best parameters\n sv_classifier = SVC(kernel=kernel, C=c, random_state=random_state, probability=True)\n # training the mew model\n sv_classifier.fit(train_x, train_y)\n self.logger_object.log('SVM best params: ' + str(\n grid.best_params_) + '. Exited the get_best_params_for_svm method of the Model_Finder class')\n\n return sv_classifier\n except Exception as e:\n model_finder = ModelFinderException(\n \"Failed in [{0}] class [{1}] method [{2}]\"\n .format(self.__module__, ModelFinder.__name__,\n self.get_best_params_for_svm_phising_classifier.__name__))\n raise Exception(model_finder.error_message_detail(str(e), sys)) from e\n\n def get_best_params_for_xgboost_phising_classifier(self, train_x, train_y):\n\n \"\"\"\n Method Name: get_best_params_for_xgboost\n Description: get the parameters for XGBoost Algorithm which give the best accuracy.\n Use Hyper Parameter Tuning.\n Output: The model with the best parameters\n On Failure: Raise Exception\n\n Written By: iNeuron Intelligence\n Version: 1.0\n Revisions: None\n\n \"\"\"\n try:\n self.logger_object.log('Entered the get_best_params_for_xgboost method of the Model_Finder class')\n # initializing with different combination of parameters\n param_grid_xgboost = {\n\n \"n_estimators\": [100, 130], \"criterion\": ['gini', 'entropy'],\n \"max_depth\": range(8, 10, 1)\n\n }\n # Creating an object of the Grid Search class\n grid = GridSearchCV(XGBClassifier(objective='binary:logistic'), param_grid_xgboost, verbose=3,\n cv=5)\n # finding the best parameters\n grid.fit(train_x, train_y)\n\n # extracting the best parameters\n criterion = grid.best_params_['criterion']\n max_depth = grid.best_params_['max_depth']\n n_estimators = grid.best_params_['n_estimators']\n\n # creating a new model with the best parameters\n xgb = XGBClassifier(criterion=criterion, max_depth=max_depth, n_estimators=n_estimators,\n n_jobs=-1)\n # training the mew model\n xgb.fit(train_x, train_y)\n self.logger_object.log('XGBoost best params: ' + str(\n grid.best_params_) + '. Exited the get_best_params_for_xgboost method of the Model_Finder class')\n return xgb\n except Exception as e:\n model_finder = ModelFinderException(\n \"Failed in [{0}] class [{1}] method [{2}]\"\n .format(self.__module__, ModelFinder.__name__,\n self.get_best_params_for_xgboost_phising_classifier.__name__))\n raise Exception(model_finder.error_message_detail(str(e), sys)) from e\n\n def get_best_model_phising_classifier(self, train_x, train_y, test_x, test_y, cluster_no):\n \"\"\"\n Method Name: get_best_model\n Description: Find out the Model which has the best AUC score.\n Output: The best model name and the model object\n On Failure: Raise Exception\n\n Written By: iNeuron Intelligence\n Version: 1.0\n Revisions: None\n\n \"\"\"\n # create best model for XGBoost\n try:\n self.logger_object.log('Entered the get_best_model method of the Model_Finder class')\n\n title_generator = \" Cluster \" + cluster_no + \" model {}\"\n\n # XG Boost model\n\n self.model_name.append('XG_BOOST')\n title = title_generator.format('XG_BOOST')\n self.logger_object.log('Entered the get_best_model method of the Model_Finder class')\n xgboost = self.get_best_params_for_xgboost_phising_classifier(train_x, train_y)\n prediction_xgboost = xgboost.predict(test_x) # Predictions using the XGBoost Model\n\n if len(test_y.unique()) == 1: # if there is only one label in y, then roc_auc_score returns error. We\n # will use accuracy in that case\n xgboost_score = accuracy_score(test_y, prediction_xgboost)\n self.logger_object.log('Accuracy for XGBoost:' + str(xgboost_score)) # Log AUC\n else:\n xgboost_score = roc_auc_score(test_y, prediction_xgboost) # AUC for XGBoost\n self.logger_object.log('AUC for XGBoost:' + str(xgboost_score)) # Log AUC\n y_score = xgboost.predict_proba(test_x)[:, 1]\n fpr, tpr, thresholds = roc_curve(test_y, y_score, pos_label=test_y.unique()[1])\n AccurayGraph().save_roc_curve_plot_binary_classification(fpr, tpr, self.project_id,\n execution_id=self.logger_object.execution_id,\n file_object=self.file_object,\n title=title)\n\n self.model.append(xgboost)\n self.score.append(xgboost_score)\n\n # create best model for naive bayes\n self.model_name.append('NAIVE_BAYES')\n title = title_generator.format('NAIVE_BAYES')\n naive_bayes = self.get_best_params_for_naive_bayes(train_x, train_y)\n prediction_naive_bayes = naive_bayes.predict(test_x) # prediction using the Random Forest Algorithm\n self.model.append(naive_bayes)\n if len(test_y.unique()) == 1: # if there is only one label in y,\n # then roc_auc_score returns error. We will use accuracy in that case\n naive_bayes_score = accuracy_score(test_y, prediction_naive_bayes)\n self.logger_object.log('Accuracy for naive bayes score' + str(naive_bayes_score))\n else:\n naive_bayes_score = roc_auc_score(test_y, prediction_naive_bayes) # AUC for Random Forest\n self.logger_object.log('AUC for naive bayes score:' + str(naive_bayes_score))\n y_score = naive_bayes.predict_proba(test_x)[:, 1]\n fpr, tpr, thresholds = roc_curve(test_y, y_score, pos_label=test_y.unique()[0])\n AccurayGraph().save_roc_curve_plot_binary_classification(fpr, tpr, self.project_id,\n execution_id=self.logger_object.execution_id,\n file_object=self.file_object,\n title=title)\n\n self.score.append(naive_bayes_score)\n # create best model for Random forest\n self.model_name.append('Random_Forest')\n title = title_generator.format('Random_Forest')\n random_forest = self.get_best_params_for_random_forest(train_x, train_y)\n prediction_random_forest = random_forest.predict(test_x)\n self.model.append(random_forest)\n if len(test_y.unique()) == 1:\n random_forest_score = accuracy_score(test_y, prediction_random_forest)\n self.logger_object.log('Accuracy for Random Forest' + str(random_forest_score))\n else:\n random_forest_score = roc_auc_score(test_y, prediction_random_forest) # AUC for Random Forest\n self.logger_object.log('AUC for Random Forest' + str(random_forest_score))\n y_score = random_forest.predict_proba(test_x)[:, 1]\n fpr, tpr, thresholds = roc_curve(test_y, y_score, pos_label=test_y.unique()[1])\n AccurayGraph().save_roc_curve_plot_binary_classification(fpr, tpr, self.project_id,\n execution_id=self.logger_object.execution_id,\n file_object=self.file_object,\n title=title)\n\n self.score.append(random_forest_score)\n\n # create best model for KNN\n self.model_name.append('KNN')\n title = title_generator.format('KNN')\n knn_clf = self.get_best_params_for_KNN(train_x, train_y)\n prediction_knn = knn_clf.predict(test_x)\n self.model.append(knn_clf)\n if len(test_y.unique()) == 1:\n knn_score = accuracy_score(test_y, prediction_knn)\n self.logger_object.log('Accuracy for KNN clf' + str(knn_score))\n else:\n knn_score = roc_auc_score(test_y, prediction_knn) # AUC for Random Forest\n self.logger_object.log('AUC for KNN' + str(knn_score))\n y_score = knn_clf.predict_proba(test_x)[:, 1]\n fpr, tpr, thresholds = roc_curve(test_y, y_score, pos_label=test_y.unique()[1])\n AccurayGraph().save_roc_curve_plot_binary_classification(fpr, tpr, self.project_id,\n execution_id=self.logger_object.execution_id,\n file_object=self.file_object,\n title=title)\n self.score.append(knn_score)\n\n if len(test_y.unique()) != 1:\n \"\"\" 5. SVC \"\"\"\n self.model_name.append(\"SVC\")\n title = title_generator.format(\"SVC\")\n svc_clf = self.get_best_params_for_svm_phising_classifier(train_x, train_y)\n prediction_svc = svc_clf.predict(test_x)\n self.model.append(svc_clf)\n if len(test_y.unique()) == 1:\n svc_score = accuracy_score(test_y, prediction_svc)\n self.logger_object.log('Accuracy for svc clf' + str(svc_score))\n else:\n svc_score = roc_auc_score(test_y, prediction_svc) # AUC for Random Forest\n self.logger_object.log('AUC for svc' + str(svc_score))\n y_score = svc_clf.predict_proba(test_x)[:, 1]\n fpr, tpr, thresholds = roc_curve(test_y, y_score, pos_label=test_y.unique()[1])\n AccurayGraph().save_roc_curve_plot_binary_classification(fpr, tpr, self.project_id,\n execution_id=self.logger_object.execution_id,\n file_object=self.file_object,\n title=title)\n self.score.append(svc_score)\n\n AccurayGraph().save_accuracy_bar_graph(\n model_name_list=self.model_name,\n accuracy_score_list=self.score,\n project_id=self.project_id,\n execution_id=self.logger_object.execution_id,\n file_object=self.file_object,\n x_label=\"Model List\",\n y_label=\"Accuracy score comparison {}\".format(self.model_name),\n title=\"Cluster \" + str(cluster_no) + \"Accuracy Score \"\n )\n execution_model_comparison_id = str(uuid.uuid4())\n\n for data in zip(self.model_name, self.score):\n self.save_accuracy_data(model_name=data[0], score=data[1],\n execution_model_comparision_id=execution_model_comparison_id)\n\n return self.get_best_model_on_score(model_name=self.model_name, model=self.model, score=self.score)\n\n except Exception as e:\n model_finder = ModelFinderException(\n \"Failed in [{0}] class [{1}] method [{2}]\"\n .format(self.__module__, ModelFinder.__name__,\n self.get_best_model_phising_classifier.__name__))\n raise Exception(model_finder.error_message_detail(str(e), sys)) from e\n\n \"\"\"Forest cover classifier \"\"\"\n\n def get_best_params_for_random_forest_forest_cover_clf(self, train_x, train_y):\n \"\"\"\n Method Name: get_best_params_for_random_forest\n Description: get the parameters for Random Forest Algorithm which give the best accuracy.\n Use Hyper Parameter Tuning.\n Output: The model with the best parameters\n On Failure: Raise Exception\n\n Written By: iNeuron Intelligence\n Version: 1.0\n Revisions: None\n\n \"\"\"\n\n try:\n self.logger_object.log('Entered the get_best_params_for_random_forest method of the Model_Finder class')\n # initializing with different combination of parameters\n param_grid = {\"n_estimators\": [10, 50, 100, 130], \"criterion\": ['gini', 'entropy'],\n \"max_depth\": range(2, 4, 1), \"max_features\": ['auto', 'log2']}\n\n # Creating an object of the Grid Search class\n grid = GridSearchCV(estimator=RandomForestClassifier(), param_grid=param_grid, cv=5, verbose=3, n_jobs=-1)\n # finding the best parameters\n grid.fit(train_x, train_y)\n\n # extracting the best parameters\n criterion = grid.best_params_['criterion']\n max_depth = grid.best_params_['max_depth']\n max_features = grid.best_params_['max_features']\n n_estimators = grid.best_params_['n_estimators']\n\n # creating a new model with the best parameters\n clf = RandomForestClassifier(n_estimators=n_estimators, criterion=criterion,\n max_depth=max_depth, max_features=max_features)\n # training the mew model\n clf.fit(train_x, train_y)\n self.logger_object.log('Random Forest best params: ' + str(\n grid.best_params_) + '. Exited the get_best_params_for_random_forest method of the Model_Finder class')\n\n return clf\n except Exception as e:\n model_finder = ModelFinderException(\n \"Failed in [{0}] class [{1}] method [{2}]\"\n .format(self.__module__, ModelFinder.__name__,\n self.get_best_params_for_random_forest_forest_cover_clf.__name__))\n raise Exception(model_finder.error_message_detail(str(e), sys)) from e\n\n def get_best_params_for_xgboost_forest_cover_clf(self, train_x, train_y):\n\n \"\"\"\n Method Name: get_best_params_for_xgboost\n Description: get the parameters for XGBoost Algorithm which give the best accuracy.\n Use Hyper Parameter Tuning.\n Output: The model with the best parameters\n On Failure: Raise Exception\n\n Written By: iNeuron Intelligence\n Version: 1.0\n Revisions: None\n\n \"\"\"\n\n try:\n self.logger_object.log('Entered the get_best_params_for_xgboost method of the Model_Finder class')\n # initializing with different combination of parameters\n param_grid_xgboost = {\n\n 'learning_rate': [0.5, 0.1, 0.01, 0.001],\n 'max_depth': [3, 5, 10, 20],\n 'n_estimators': [10, 50, 100, 200]\n\n }\n # Creating an object of the Grid Search class\n grid = GridSearchCV(XGBClassifier(objective='multi:softprob'), param_grid_xgboost, verbose=3, cv=5,\n n_jobs=-1)\n # finding the best parameters\n grid.fit(train_x, train_y)\n\n # extracting the best parameters\n learning_rate = grid.best_params_['learning_rate']\n max_depth = grid.best_params_['max_depth']\n n_estimators = grid.best_params_['n_estimators']\n\n # creating a new model with the best parameters\n xgb = XGBClassifier(learning_rate=learning_rate, max_depth=max_depth, n_estimators=n_estimators)\n # training the mew model\n xgb.fit(train_x, train_y)\n self.logger_object.log('XGBoost best params: ' + str(\n grid.best_params_) + '. Exited the get_best_params_for_xgboost method of the Model_Finder class')\n return xgb\n except Exception as e:\n model_finder = ModelFinderException(\n \"Failed in [{0}] class [{1}] method [{2}]\"\n .format(self.__module__, ModelFinder.__name__,\n self.get_best_params_for_xgboost_forest_cover_clf.__name__))\n raise Exception(model_finder.error_message_detail(str(e), sys)) from e\n\n def get_best_model_forest_cover(self, train_x, train_y, test_x, test_y, cluster_no=None):\n \"\"\"\n Method Name: get_best_model\n Description: Find out the Model which has the best AUC score.\n Output: The best model name and the model object\n On Failure: Raise Exception\n\n Written By: iNeuron Intelligence\n Version: 1.0\n Revisions: None\n\n \"\"\"\n\n # create best model for XGBoost\n try:\n self.logger_object.log('Entered the get_best_model method of the Model_Finder class')\n if cluster_no is not None:\n title_generator = \" Cluster \" + cluster_no + \" model {}\"\n else:\n title_generator = \"Model {}\"\n # XG Boost model\n self.model_name.append('XG_BOOST')\n title = title_generator.format('XG_BOOST')\n self.logger_object.log('Entered the get_best_model method of the Model_Finder class')\n xgboost = self.get_best_params_for_xgboost(train_x, train_y)\n prediction_xgboost = xgboost.predict(test_x) # Predictions using the XGBoost Model\n\n if len(test_y.unique()) == 1: # if there is only one label in y, then roc_auc_score returns error. We\n # will use accuracy in that case\n xgboost_score = accuracy_score(test_y, prediction_xgboost)\n self.logger_object.log('Accuracy for XGBoost:' + str(xgboost_score)) # Log AUC\n else:\n y_scores = xgboost.predict_proba(test_x)\n AccurayGraph().save_plot_multiclass_roc_curve(test_y, y_scores, xgboost,\n project_id=self.project_id,\n execution_id=self.logger_object.execution_id,\n file_object=self.file_object,\n title=\"XGBoost ROC curve\"\n )\n xgboost_score = roc_auc_score(test_y, y_scores, multi_class='ovr') # AUC for XGBoost\n self.logger_object.log('AUC for XGBoost:' + str(xgboost_score)) # Log AUC\n\n self.model.append(xgboost)\n self.score.append(xgboost_score)\n\n # create best model for naive bayes\n self.model_name.append('NAIVE_BAYES')\n title = title_generator.format('NAIVE_BAYES')\n naive_bayes = self.get_best_params_for_naive_bayes(train_x, train_y)\n prediction_naive_bayes = naive_bayes.predict(test_x) # prediction using the Random Forest Algorithm\n self.model.append(naive_bayes)\n if len(test_y.unique()) == 1: # if there is only one label in y,\n # then roc_auc_score returns error. We will use accuracy in that case\n naive_bayes_score = accuracy_score(test_y, prediction_naive_bayes)\n self.logger_object.log('Accuracy for naive bayes score' + str(naive_bayes_score))\n else:\n y_scores = naive_bayes.predict_proba(test_x)\n AccurayGraph().save_plot_multiclass_roc_curve(test_y, y_scores, naive_bayes,\n project_id=self.project_id,\n execution_id=self.logger_object.execution_id,\n file_object=self.file_object,\n title=title + self.model_name[-1]\n )\n naive_bayes_score = roc_auc_score(test_y, y_scores,\n multi_class='ovr') # AUC for Random Forest\n self.logger_object.log('AUC for naive bayes score:' + str(naive_bayes_score))\n\n self.score.append(naive_bayes_score)\n # create best model for Random forest\n self.model_name.append('Random_Forest')\n title = title_generator.format('Random_Forest')\n random_forest = self.get_best_params_for_random_forest(train_x, train_y)\n prediction_random_forest = random_forest.predict(test_x)\n self.model.append(random_forest)\n if len(test_y.unique()) == 1:\n random_forest_score = accuracy_score(test_y, prediction_random_forest)\n self.logger_object.log('Accuracy for Random Forest' + str(random_forest_score))\n else:\n y_scores = random_forest.predict_proba(test_x)\n AccurayGraph().save_plot_multiclass_roc_curve(test_y, y_scores, random_forest,\n project_id=self.project_id,\n execution_id=self.logger_object.execution_id,\n file_object=self.file_object,\n title=title + self.model_name[-1]\n )\n random_forest_score = roc_auc_score(test_y, y_scores,\n multi_class='ovr') # AUC for Random Forest\n self.logger_object.log('AUC for Random Forest' + str(random_forest_score))\n\n self.score.append(random_forest_score)\n\n # create best model for KNN\n self.model_name.append('KNN')\n title = title_generator.format('KNN')\n knn_clf = self.get_best_params_for_KNN(train_x, train_y)\n prediction_knn = knn_clf.predict(test_x)\n self.model.append(knn_clf)\n if len(test_y.unique()) == 1:\n knn_score = accuracy_score(test_y, prediction_knn)\n self.logger_object.log('Accuracy for KNN clf' + str(knn_score))\n else:\n y_scores = knn_clf.predict_proba(test_x)\n AccurayGraph().save_plot_multiclass_roc_curve(test_y, y_scores, knn_clf,\n project_id=self.project_id,\n execution_id=self.logger_object.execution_id,\n file_object=self.file_object,\n title=title + self.model_name[-1]\n )\n knn_score = roc_auc_score(test_y, y_scores, multi_class='ovr') # AUC for Random Forest\n self.logger_object.log('AUC for KNN' + str(knn_score))\n\n self.score.append(knn_score)\n\n \"\"\" 5. SVC \"\"\"\n if len(test_y.unique()) != 1:\n self.model_name.append(\"SVC\")\n title = title_generator.format(\"SVC\")\n svc_clf = self.get_best_params_for_svm_fraud_detection_and_scania(train_x, train_y)\n prediction_svc = svc_clf.predict(test_x)\n self.model.append(svc_clf)\n if len(test_y.unique()) == 1:\n svc_score = accuracy_score(test_y, prediction_svc)\n self.logger_object.log('Accuracy for svc clf' + str(svc_score))\n else:\n y_scores = svc_clf.predict_proba(test_x)\n AccurayGraph().save_plot_multiclass_roc_curve(test_y, y_scores, svc_clf,\n project_id=self.project_id,\n execution_id=self.logger_object.execution_id,\n file_object=self.file_object,\n title=title + self.model_name[-1]\n )\n svc_score = roc_auc_score(test_y, y_scores, multi_class='ovr') # AUC for Random Forest\n self.logger_object.log('AUC for svc' + str(svc_score))\n\n self.score.append(svc_score)\n\n AccurayGraph().save_accuracy_bar_graph(\n model_name_list=self.model_name,\n accuracy_score_list=self.score,\n project_id=self.project_id,\n execution_id=self.logger_object.execution_id,\n file_object=self.file_object,\n x_label=\"Model List\",\n y_label=\"Accuracy score comparison {}\".format(self.model_name),\n title=\"Accuracy Score \"\n )\n execution_model_comparison_id = str(uuid.uuid4())\n\n for data in zip(self.model_name, self.score):\n self.save_accuracy_data(model_name=data[0], score=data[1],\n execution_model_comparision_id=execution_model_comparison_id)\n\n # comparing the two models\n return self.get_best_model_on_score(model_name=self.model_name, model=self.model, score=self.score)\n\n except Exception as e:\n model_finder = ModelFinderException(\n \"Failed in [{0}] class [{1}] method [{2}]\"\n .format(self.__module__, ModelFinder.__name__,\n self.get_best_model_forest_cover.__name__))\n raise Exception(model_finder.error_message_detail(str(e), sys)) from e\n\n def get_best_model_scania_truck(self, train_x, train_y, test_x, test_y, cluster_no=None):\n \"\"\"\n Method Name: get_best_model\n Description: Find out the Model which has the best AUC score.\n Output: The best model name and the model object\n On Failure: Raise Exception\n\n Written By: iNeuron Intelligence\n Version: 1.0\n Revisions: None\n\n \"\"\"\n # create best model for XGBoost\n try:\n self.logger_object.log('Entered the get_best_model method of the Model_Finder class')\n if cluster_no is not None:\n title_generator = \" Cluster \" + cluster_no + \" model {}\"\n else:\n title_generator = \"Model {}\"\n # XG Boost model\n self.model_name.append('XG_BOOST')\n title = title_generator.format('XG_BOOST')\n\n xgboost = self.get_best_params_for_xgboost(train_x, train_y)\n prediction_xgboost = xgboost.predict(test_x) # Predictions using the XGBoost Model\n\n if len(test_y.unique()) == 1: # if there is only one label in y, then roc_auc_score returns error. We\n # will use accuracy in that case\n xgboost_score = accuracy_score(test_y, prediction_xgboost)\n self.logger_object.log('Accuracy for XGBoost:' + str(xgboost_score)) # Log AUC\n else:\n xgboost_score = roc_auc_score(test_y, prediction_xgboost) # AUC for XGBoost\n self.logger_object.log('AUC for XGBoost:' + str(xgboost_score)) # Log AUC\n y_score = xgboost.predict_proba(test_x)[:, 1]\n fpr, tpr, thresholds = roc_curve(test_y, y_score, pos_label=test_y.unique()[1])\n AccurayGraph().save_roc_curve_plot_binary_classification(fpr, tpr, self.project_id,\n execution_id=self.logger_object.execution_id,\n file_object=self.file_object,\n title=title)\n\n self.model.append(xgboost)\n self.score.append(xgboost_score)\n \"\"\"\n # create best model for naive bayes\n self.model_name.append('NAIVE_BAYES')\n title = title_generator.format('NAIVE_BAYES')\n naive_bayes = self.get_best_params_for_naive_bayes(train_x, train_y)\n prediction_naive_bayes = naive_bayes.predict(test_x) # prediction using the Random Forest Algorithm\n self.model.append(naive_bayes)\n if len(test_y.unique()) == 1: # if there is only one label in y,\n # then roc_auc_score returns error. We will use accuracy in that case\n naive_bayes_score = accuracy_score(test_y, prediction_naive_bayes)\n self.logger_object.log('Accuracy for naive bayes score' + str(naive_bayes_score))\n else:\n naive_bayes_score = roc_auc_score(test_y, prediction_naive_bayes) # AUC for Random Forest\n self.logger_object.log('AUC for naive bayes score:' + str(naive_bayes_score))\n y_score = naive_bayes.predict_proba(test_x)[:, 1]\n fpr, tpr, thresholds = roc_curve(test_y, y_score, pos_label=test_y.unique()[0])\n AccurayGraph().save_roc_curve_plot_binary_classification(fpr, tpr, self.project_id,\n execution_id=self.logger_object.execution_id,\n file_object=self.file_object,\n title=title)\n\n self.score.append(naive_bayes_score)\n \"\"\"\n # create best model for Random forest\n self.model_name.append('Random_Forest')\n title = title_generator.format('Random_Forest')\n random_forest = self.get_best_params_for_random_forest(train_x, train_y)\n prediction_random_forest = random_forest.predict(test_x)\n self.model.append(random_forest)\n if len(test_y.unique()) == 1:\n random_forest_score = accuracy_score(test_y, prediction_random_forest)\n self.logger_object.log('Accuracy for Random Forest' + str(random_forest_score))\n else:\n random_forest_score = roc_auc_score(test_y, prediction_random_forest) # AUC for Random Forest\n self.logger_object.log('AUC for Random Forest' + str(random_forest_score))\n y_score = random_forest.predict_proba(test_x)[:, 1]\n fpr, tpr, thresholds = roc_curve(test_y, y_score, pos_label=test_y.unique()[1])\n AccurayGraph().save_roc_curve_plot_binary_classification(fpr, tpr, self.project_id,\n execution_id=self.logger_object.execution_id,\n file_object=self.file_object,\n title=title)\n\n self.score.append(random_forest_score)\n \"\"\"\n # create best model for KNN\n self.model_name.append('KNN')\n title = title_generator.format('KNN')\n knn_clf = self.get_best_params_for_KNN(train_x, train_y)\n prediction_knn = knn_clf.predict(test_x)\n self.model.append(knn_clf)\n if len(test_y.unique()) == 1:\n knn_score = accuracy_score(test_y, prediction_knn)\n self.logger_object.log('Accuracy for KNN clf' + str(knn_score))\n else:\n knn_score = roc_auc_score(test_y, prediction_knn) # AUC for Random Forest\n self.logger_object.log('AUC for KNN' + str(knn_score))\n y_score = knn_clf.predict_proba(test_x)[:, 1]\n fpr, tpr, thresholds = roc_curve(test_y, y_score, pos_label=test_y.unique()[1])\n AccurayGraph().save_roc_curve_plot_binary_classification(fpr, tpr, self.project_id,\n execution_id=self.logger_object.execution_id,\n file_object=self.file_object,\n title=title)\n self.score.append(knn_score)\n \n 5. SVC \n if len(test_y.unique()) != 1:\n self.model_name.append(\"SVC\")\n title = title_generator.format(\"SVC\")\n svc_clf = self.get_best_params_for_svm_fraud_detection_and_scania(train_x, train_y)\n prediction_svc = svc_clf.predict(test_x)\n self.model.append(svc_clf)\n if len(test_y.unique()) == 1:\n svc_score = accuracy_score(test_y, prediction_svc)\n self.logger_object.log('Accuracy for svc clf' + str(svc_score))\n else:\n svc_score = roc_auc_score(test_y, prediction_svc) # AUC for Random Forest\n self.logger_object.log('AUC for svc' + str(svc_score))\n y_score = svc_clf.predict_proba(test_x)[:, 1]\n fpr, tpr, thresholds = roc_curve(test_y, y_score, pos_label=test_y.unique()[1])\n AccurayGraph().save_roc_curve_plot_binary_classification(fpr, tpr, self.project_id,\n execution_id=self.logger_object.execution_id,\n file_object=self.file_object,\n title=title)\n self.score.append(svc_score)\n \"\"\"\n AccurayGraph().save_accuracy_bar_graph(\n model_name_list=self.model_name,\n accuracy_score_list=self.score,\n project_id=self.project_id,\n execution_id=self.logger_object.execution_id,\n file_object=self.file_object,\n x_label=\"Model List\",\n y_label=\"Accuracy score comparison {}\".format(self.model_name),\n title=\"Accuracy Score \"\n )\n execution_model_comparison_id = str(uuid.uuid4())\n for data in zip(self.model_name, self.score):\n self.save_accuracy_data(model_name=data[0], score=data[1],\n execution_model_comparision_id=execution_model_comparison_id)\n # comparing the two models\n return self.get_best_model_on_score(model_name=self.model_name, model=self.model, score=self.score)\n except Exception as e:\n model_finder = ModelFinderException(\n \"Failed in [{0}] class [{1}] method [{2}]\"\n .format(self.__module__, ModelFinder.__name__,\n self.get_best_model_forest_cover.__name__))\n raise Exception(model_finder.error_message_detail(str(e), sys)) from e\n\n def get_best_params_for_Random_Forest_Regressor(self, train_x, train_y):\n \"\"\"\n Method Name: get_best_params_for_Random_Forest_Regressor\n Description: get the parameters for Random_Forest_Regressor Algorithm which give the best accuracy.\n Use Hyper Parameter Tuning.\n Output: The model with the best parameters\n On Failure: Raise Exception\n\n Written By: iNeuron Intelligence\n Version: 1.0\n Revisions: None\n\n \"\"\"\n try:\n self.logger_object.log('Entered the RandomForestReg method of the Model_Finder class')\n # initializing with different combination of parameters\n param_grid_random_forest_tree = {\n \"n_estimators\": [10, 20, 30],\n \"max_features\": [\"auto\", \"sqrt\", \"log2\"],\n \"min_samples_split\": [2, 4, 8],\n \"bootstrap\": [True, False]\n }\n\n # Creating an object of the Grid Search class\n grid = GridSearchCV(RandomForestRegressor(), param_grid_random_forest_tree, verbose=3, cv=5)\n # finding the best parameters\n grid.fit(train_x, train_y)\n\n # extracting the best parameters\n n_estimators = grid.best_params_['n_estimators']\n max_features = grid.best_params_['max_features']\n min_samples_split = grid.best_params_['min_samples_split']\n bootstrap = grid.best_params_['bootstrap']\n\n # creating a new model with the best parameters\n random_forest_reg = RandomForestRegressor(n_estimators=n_estimators,\n max_features=max_features,\n min_samples_split=min_samples_split,\n bootstrap=bootstrap)\n # training the mew models\n random_forest_reg.fit(train_x, train_y)\n self.logger_object.log('RandomForestReg best params: ' + str(\n grid.best_params_) + '. Exited the RandomForestReg method of the Model_Finder class')\n return random_forest_reg\n except Exception as e:\n model_finder = ModelFinderException(\n \"Failed in [{0}] class [{1}] method [{2}]\"\n .format(self.__module__, ModelFinder.__name__,\n self.get_best_params_for_Random_Forest_Regressor.__name__))\n raise Exception(model_finder.error_message_detail(str(e), sys)) from e\n\n def get_best_params_for_linearReg(self, train_x, train_y):\n\n \"\"\"\n Method Name: get_best_params_for_linearReg\n Description: get the parameters for LinearReg Algorithm which give the best accuracy.\n Use Hyper Parameter Tuning.\n Output: The model with the best parameters\n On Failure: Raise Exception\n\n Written By: iNeuron Intelligence\n Version: 1.0\n Revisions: None\n\n \"\"\"\n try:\n self.logger_object.log('Entered the get_best_params_for_linearReg method of the Model_Finder class')\n # initializing with different combination of parameters\n param_grid_linear_reg = {\n 'fit_intercept': [True, False], 'normalize': [True, False], 'copy_X': [True, False]\n\n }\n # Creating an object of the Grid Search class\n grid = GridSearchCV(LinearRegression(), param_grid_linear_reg, verbose=3, cv=5)\n # finding the best parameters\n grid.fit(train_x, train_y)\n\n # extracting the best parameters\n fit_intercept = grid.best_params_['fit_intercept']\n normalize = grid.best_params_['normalize']\n copy_x = grid.best_params_['copy_X']\n\n # creating a new model with the best parameters\n lin_reg = LinearRegression(fit_intercept=fit_intercept, normalize=normalize,\n copy_X=copy_x)\n # training the mew model\n lin_reg.fit(train_x, train_y)\n self.logger_object.log('LinearRegression best params: ' + str(\n grid.best_params_) + '. Exited the get_best_params_for_linearReg method of the Model_Finder class')\n return lin_reg\n except Exception as e:\n model_finder = ModelFinderException(\n \"Failed in [{0}] class [{1}] method [{2}]\"\n .format(self.__module__, ModelFinder.__name__,\n self.get_best_params_for_linearReg.__name__))\n raise Exception(model_finder.error_message_detail(str(e), sys)) from e\n\n def get_best_model_for_reg(self, train_x, train_y, test_x, test_y, cluster_no=None):\n \"\"\"\n Method Name: get_best_model\n Description: Find out the Model which has the best AUC score.\n Output: The best model name and the model object\n On Failure: Raise Exception\n\n Written By: iNeuron Intelligence\n Version: 1.0\n Revisions: None\n\n \"\"\"\n try:\n\n self.logger_object.log('Entered the get_best_model method of the Model_Finder class')\n title = \"Cluster {} \".format(cluster_no) if cluster_no is not None else ''\n\n # Linear Regression Training\n\n self.model_name.append(\"Linear_Regression\")\n linear_reg = self.get_best_params_for_linearReg(train_x, train_y)\n prediction_linear_reg = linear_reg.predict(test_x) # Predictions using the LinearReg Model\n linear_reg_error = r2_score(test_y, prediction_linear_reg)\n self.model.append(linear_reg)\n self.score.append(linear_reg_error)\n\n # Decision Tree training\n self.model_name.append('Decision_Tree')\n decision_tree_reg = self.get_best_params_for_decision_tree_regressor(train_x, train_y)\n\n self.model.append(decision_tree_reg)\n prediction_decision_tree_reg = decision_tree_reg.predict(\n test_x) # Predictions using the decisionTreeReg Model\n decision_tree_reg_error = r2_score(test_y, prediction_decision_tree_reg)\n\n self.score.append(decision_tree_reg_error)\n self.logger_object.log(\"Decision tree regression r2 score {}\".format(decision_tree_reg_error))\n\n # create best model for XGBoost\n self.model_name.append('XG_BOOST')\n xgboost = self.get_best_params_for_xgboost_regressor(train_x, train_y)\n prediction_xgboost = xgboost.predict(test_x) # Predictions using the XGBoost Model\n\n self.model.append(xgboost)\n prediction_xgboost_error = r2_score(test_y, prediction_xgboost)\n self.logger_object.log(\"XGBoost regression r2 score {}\".format(prediction_xgboost_error))\n self.score.append(prediction_xgboost_error)\n\n self.model_name.append(\"Random_Forest\")\n random_forest_reg = self.get_best_params_for_Random_Forest_Regressor(train_x, train_y)\n self.model.append(random_forest_reg)\n prediction_random_forest_reg = random_forest_reg.predict(test_x)\n prediction_random_forest_error = r2_score(test_y, prediction_random_forest_reg)\n self.score.append(prediction_random_forest_error)\n self.logger_object.log(\"Random Forest regression r2 score {}\".format(prediction_random_forest_error))\n\n self.model_name.append(\"SVR\")\n sv_reg = self.get_best_params_for_support_vector_regressor(train_x, train_y)\n self.model.append(sv_reg)\n prediction_sv_reg = sv_reg.predict(test_x)\n prediction_sv_reg_error = r2_score(test_y, prediction_sv_reg)\n self.score.append(prediction_sv_reg_error)\n self.logger_object.log(\"Support vector regression r2 score {}\".format(prediction_sv_reg_error))\n\n \"\"\"\n Visualization begin based on above model\n \"\"\"\n prediction_value = [prediction_linear_reg,\n prediction_decision_tree_reg,\n prediction_xgboost,\n prediction_random_forest_reg,\n prediction_sv_reg]\n\n for data in zip(self.model_name, prediction_value):\n AccurayGraph().save_scatter_plot(x_axis_data=test_y, y_axis_data=data[1],\n project_id=self.project_id,\n execution_id=self.logger_object.execution_id,\n file_object=self.file_object,\n x_label=\"True Target values\", y_label=\"Predicted Target value\",\n title=title + \"Predicted vs True \" + data[0])\n\n AccurayGraph().save_distribution_plot(data=numpy.abs(test_y - data[1]),\n label=\"Residual distribution plot\",\n project_id=self.project_id,\n execution_id=self.logger_object.execution_id,\n file_object=self.file_object,\n x_label=\"Error \",\n y_label=\"frequency or occurance\",\n title=title + \"{} residual distribution plot\".format(data[0])\n )\n\n mean_abs_error = []\n for data in prediction_value:\n mean_abs_error.append(numpy.mean(numpy.abs(test_y - data)))\n\n AccurayGraph().save_accuracy_bar_graph(\n model_name_list=self.model_name,\n accuracy_score_list=mean_abs_error,\n project_id=self.project_id,\n execution_id=self.logger_object.execution_id,\n file_object=self.file_object,\n x_label=\"Model List\",\n y_label=\"MAE comparison between {}\".format(self.model_name),\n title=title + \"Mean Absolute error \"\n )\n # saving accuracy data based on model on mongo db\n execution_model_comparison_id = str(uuid.uuid4())\n for data in zip(self.model_name, self.score):\n self.save_accuracy_data(model_name=data[0], score=data[1],\n execution_model_comparision_id=execution_model_comparison_id)\n return self.get_best_model_on_score(model_name=self.model_name, model=self.model, score=self.score)\n\n except Exception as e:\n model_finder = ModelFinderException(\n \"Failed in [{0}] class [{1}] method [{2}]\"\n .format(self.__module__, ModelFinder.__name__,\n self.get_best_model_for_reg.__name__))\n raise Exception(model_finder.error_message_detail(str(e), sys)) from e\n\n def get_best_params_for_decision_tree_regressor(self, train_x, train_y):\n \"\"\"\n Method Name: get_best_params_for_DecisionTreeRegressor\n Description: get the parameters for DecisionTreeRegressor Algorithm which give the best accuracy.\n Use Hyper Parameter Tuning.\n Output: The model with the best parameters\n On Failure: Raise Exception\n\n Written By: iNeuron Intelligence\n Version: 1.0\n Revisions: None\n\n \"\"\"\n try:\n self.logger_object.log(\n 'Entered the get_best_params_for_DecisionTreeRegressor method of the Model_Finder class')\n # initializing with different combination of parameters\n param_grid_decision_tree = {\"criterion\": [\"mse\", \"friedman_mse\", \"mae\"],\n \"splitter\": [\"best\", \"random\"],\n \"max_features\": [\"auto\", \"sqrt\", \"log2\"],\n 'max_depth': range(2, 16, 2),\n 'min_samples_split': range(2, 16, 2)\n }\n\n # Creating an object of the Grid Search class\n grid = GridSearchCV(DecisionTreeRegressor(), param_grid_decision_tree, verbose=3, cv=5)\n # finding the best parameters\n grid.fit(train_x, train_y)\n\n # extracting the best parameters\n criterion = grid.best_params_['criterion']\n splitter = grid.best_params_['splitter']\n max_features = grid.best_params_['max_features']\n max_depth = grid.best_params_['max_depth']\n min_samples_split = grid.best_params_['min_samples_split']\n\n # creating a new model with the best parameters\n decision_tree_reg = DecisionTreeRegressor(criterion=criterion, splitter=splitter,\n max_features=max_features, max_depth=max_depth,\n min_samples_split=min_samples_split)\n # training the mew models\n decision_tree_reg.fit(train_x, train_y)\n self.logger_object.log('Decision Tree repressor ' + str(\n grid.best_params_) + '. exited decision tree the Model_Finder class')\n return decision_tree_reg\n except Exception as e:\n model_finder = ModelFinderException(\n \"Failed in [{0}] class [{1}] method [{2}]\"\n .format(self.__module__, ModelFinder.__name__,\n self.get_best_params_for_decision_tree_regressor.__name__))\n raise Exception(model_finder.error_message_detail(str(e), sys)) from e\n\n def get_best_params_for_xgboost_regressor(self, train_x, train_y):\n\n \"\"\"\n Method Name: get_best_params_for_xgboost\n Description: get the parameters for XGBoost Algorithm which give the best accuracy.\n Use Hyper Parameter Tuning.\n Output: The model with the best parameters\n On Failure: Raise Exception\n\n Written By: iNeuron Intelligence\n Version: 1.0\n Revisions: None\n\n \"\"\"\n try:\n self.logger_object.log('Entered the get_best_params_for_xgboost method of the Model_Finder class')\n # initializing with different combination of parameters\n param_grid_xgboost = {\n\n 'learning_rate': [0.5, 0.1, 0.01, 0.001],\n 'max_depth': [3, 5, 10, 20],\n 'n_estimators': [10, 50, 100, 200]\n\n }\n # Creating an object of the Grid Search class\n grid = GridSearchCV(XGBRegressor(objective='reg:squarederror'), param_grid_xgboost, verbose=3,\n cv=5)\n # finding the best parameters\n grid.fit(train_x, train_y)\n\n # extracting the best parameters\n learning_rate = grid.best_params_['learning_rate']\n max_depth = grid.best_params_['max_depth']\n n_estimators = grid.best_params_['n_estimators']\n\n # creating a new model with the best parameters objective='reg:linear'\n xgb = XGBRegressor(objective='reg:squarederror', learning_rate=learning_rate,\n max_depth=max_depth,\n n_estimators=n_estimators)\n # training the mew model\n xgb.fit(train_x, train_y)\n self.logger_object.log('XGBoost best params: ' + str(\n grid.best_params_) + '. Exited the get_best_params_for_xgboost method of the Model_Finder class')\n return xgb\n except Exception as e:\n model_finder = ModelFinderException(\n \"Failed in [{0}] class [{1}] method [{2}]\"\n .format(self.__module__, ModelFinder.__name__,\n self.get_best_params_for_xgboost_regressor.__name__))\n raise Exception(model_finder.error_message_detail(str(e), sys)) from e\n\n def get_best_model_zomato_or_fitbit_or_climate_visibility(self, train_x, train_y, test_x, test_y, cluster_no=None):\n \"\"\"\n Method Name: get_best_model\n Description: Find out the Model which has the best AUC score.\n Output: The best model name and the model object\n On Failure: Raise Exception\n\n Written By: iNeuron Intelligence\n Version: 1.0\n Revisions: None\n\n \"\"\"\n # create best model for KNN\n try:\n\n title = \"Cluster {} \".format(cluster_no) if cluster_no is not None else ''\n self.model_name.append('Decision_Tree')\n\n self.logger_object.log('Entered the get_best_model method of the Model_Finder class')\n decision_tree_reg = self.get_best_params_for_decision_tree_regressor(train_x, train_y)\n\n self.model.append(decision_tree_reg)\n prediction_decision_tree_reg = decision_tree_reg.predict(\n test_x) # Predictions using the decisionTreeReg Model\n decision_tree_reg_error = r2_score(test_y, prediction_decision_tree_reg)\n\n self.score.append(decision_tree_reg_error)\n self.logger_object.log(\"Decision tree regression r2 score {}\".format(decision_tree_reg_error))\n\n # create best model for XGBoost\n self.model_name.append('XG_BOOST')\n xgboost = self.get_best_params_for_xgboost_regressor(train_x, train_y)\n prediction_xgboost = xgboost.predict(test_x) # Predictions using the XGBoost Model\n\n self.model.append(xgboost)\n prediction_xgboost_error = r2_score(test_y, prediction_xgboost)\n self.logger_object.log(\"XGBoost regression r2 score {}\".format(prediction_xgboost_error))\n self.score.append(prediction_xgboost_error)\n\n self.model_name.append('RIDGE_REG')\n ridge_regression = self.get_best_params_for_ridge_regression(train_x, train_y)\n self.model.append(ridge_regression)\n prediction_ridge_regression = ridge_regression.predict(test_x)\n prediction_ridge_error = r2_score(test_y, prediction_ridge_regression)\n self.score.append(prediction_ridge_error)\n self.logger_object.log(\"RIDGE_REG regression r2 score {}\".format(prediction_ridge_error))\n\n self.model_name.append(\"Random_Forest\")\n random_forest_reg = self.get_best_params_for_Random_Forest_Regressor(train_x, train_y)\n self.model.append(random_forest_reg)\n prediction_random_forest_reg = random_forest_reg.predict(test_x)\n prediction_random_forest_error = r2_score(test_y, prediction_random_forest_reg)\n self.score.append(prediction_random_forest_error)\n self.logger_object.log(\"Random Forest regression r2 score {}\".format(prediction_ridge_error))\n\n self.model_name.append(\"SVR\")\n sv_reg = self.get_best_params_for_support_vector_regressor(train_x, train_y)\n self.model.append(sv_reg)\n prediction_sv_reg = sv_reg.predict(test_x)\n prediction_sv_reg_error = r2_score(test_y, prediction_sv_reg)\n self.score.append(prediction_sv_reg_error)\n self.logger_object.log(\"Support vector regression r2 score {}\".format(prediction_ridge_error))\n\n \"\"\"\n Visualization begin based on above model\n \"\"\"\n prediction_value = [prediction_decision_tree_reg,\n prediction_xgboost,\n prediction_ridge_regression,\n prediction_random_forest_reg,\n prediction_sv_reg]\n\n for data in zip(self.model_name, prediction_value):\n\n AccurayGraph().save_scatter_plot(x_axis_data=test_y, y_axis_data=data[1],\n project_id=self.project_id,\n execution_id=self.logger_object.execution_id,\n file_object=self.file_object,\n x_label=\"True Target values\", y_label=\"Predicted Target value\",\n title=title + \"Predicted vs True \" + data[0])\n\n AccurayGraph().save_distribution_plot(data=numpy.abs(test_y - data[1]),\n label=\"Residual distribution plot\",\n project_id=self.project_id,\n execution_id=self.logger_object.execution_id,\n file_object=self.file_object,\n x_label=\"Error \",\n y_label=\"frequency or occurrence\",\n title=title + \"{} residual distribution plot\".format(data[0])\n )\n\n mean_abs_error = []\n for data in prediction_value:\n mean_abs_error.append(numpy.mean(numpy.abs(test_y - data)))\n\n AccurayGraph().save_accuracy_bar_graph(\n model_name_list=self.model_name,\n accuracy_score_list=mean_abs_error,\n project_id=self.project_id,\n execution_id=self.logger_object.execution_id,\n file_object=self.file_object,\n x_label=\"Model List\",\n y_label=\"MAE comparison between {}\".format(self.model_name),\n title=title + \"Mean Absolute error \"\n )\n execution_model_comparison_id = str(uuid.uuid4())\n for data in zip(self.model_name, self.score):\n self.save_accuracy_data(model_name=data[0], score=data[1],\n execution_model_comparision_id=execution_model_comparison_id)\n\n return self.get_best_model_on_score(model_name=self.model_name, model=self.model, score=self.score)\n except Exception as e:\n model_finder = ModelFinderException(\n \"Failed in [{0}] class [{1}] method [{2}]\"\n .format(self.__module__, ModelFinder.__name__,\n self.get_best_model_zomato_or_fitbit_or_climate_visibility.__name__))\n raise Exception(model_finder.error_message_detail(str(e), sys)) from e\n\n def get_best_params_for_naive_bayes(self, train_x, train_y):\n \"\"\"\n Method Name: get_best_params_for_naive_bayes\n Description: get the parameters for the Naive Bayes's Algorithm which give the best accuracy.\n Use Hyper Parameter Tuning.\n Output: The model with the best parameters\n On Failure: Raise Exception\n\n Written By: iNeuron Intelligence\n Version: 1.0\n Revisions: None\n\n \"\"\"\n try:\n self.logger_object.log('Entered the get_best_params_for_naive_bayes method of the Model_Finder class')\n\n # initializing with different combination of parameters\n param_grid = {\"var_smoothing\": [1e-9, 0.1, 0.001, 0.5, 0.05, 0.01, 1e-8, 1e-7, 1e-6, 1e-10, 1e-11]}\n\n # Creating an object of the Grid Search class\n grid = GridSearchCV(estimator=self.gnb, param_grid=param_grid, cv=5, verbose=3)\n # finding the best parameters\n grid.fit(train_x, train_y)\n\n # extracting the best parameters\n var_smoothing = grid.best_params_['var_smoothing']\n\n # creating a new model with the best parameters\n gnb = GaussianNB(var_smoothing=var_smoothing)\n # training the mew model\n gnb.fit(train_x, train_y)\n self.logger_object.log('Naive Bayes best params: ' + str(\n grid.best_params_) + '. Exited the get_best_params_for_naive_bayes method of the Model_Finder class')\n\n return gnb\n except Exception as e:\n model_finder = ModelFinderException(\n \"Failed in [{0}] class [{1}] method [{2}]\".format(self.__module__, ModelFinder.__name__,\n self.get_best_params_for_naive_bayes.__name__))\n raise Exception(model_finder.error_message_detail(str(e), sys)) from e\n\n def get_best_params_for_xgboost_income_prediction(self, train_x, train_y):\n \"\"\"\n Method Name: get_best_params_for_xgboost\n Description: get the parameters for XGBoost Algorithm which give the best accuracy.\n Use Hyper Parameter Tuning.\n Output: The model with the best parameters\n On Failure: Raise Exception\n\n Written By: iNeuron Intelligence\n Version: 1.0\n Revisions: None\n\n \"\"\"\n try:\n self.logger_object.log('Entered the get_best_params_for_xgboost method of the Model_Finder class')\n # initializing with different combination of parameters\n param_grid_xgboost = {\n\n \"n_estimators\": [100, 130], \"criterion\": ['gini', 'entropy'],\n \"max_depth\": range(8, 10, 1)\n\n }\n # Creating an object of the Grid Search class\n grid = GridSearchCV(XGBClassifier(objective='binary:logistic'), param_grid_xgboost, verbose=3,\n cv=5)\n # finding the best parameters\n grid.fit(train_x, train_y)\n\n # extracting the best parameters\n criterion = grid.best_params_['criterion']\n max_depth = grid.best_params_['max_depth']\n n_estimators = grid.best_params_['n_estimators']\n\n # creating a new model with the best parameters\n xgb = XGBClassifier(criterion=criterion, max_depth=max_depth, n_estimators=n_estimators,\n n_jobs=-1)\n # training the mew model\n xgb.fit(train_x, train_y)\n self.logger_object.log('XGBoost best params: ' + str(\n grid.best_params_) + '. Exited the get_best_params_for_xgboost method of the Model_Finder class')\n return xgb\n except Exception as e:\n model_finder = ModelFinderException(\"Failed in [{0}] class [{1}] method [{2}]\"\n .format(self.__module__,\n ModelFinder.__name__,\n self.get_best_params_for_xgboost_income_prediction.__name__))\n raise Exception(model_finder.error_message_detail(str(e), sys)) from e\n\n def get_best_model_income_prediction(self, train_x, train_y, test_x, test_y, cluster_number):\n \"\"\"\n Method Name: get_best_model\n Description: Find out the Model which has the best AUC score.\n Output: The best model name and the model object\n On Failure: Raise Exception\n\n Written By: iNeuron Intelligence\n Version: 1.0\n Revisions: None\n\n \"\"\"\n # create best model for XGBoost\n try:\n title_generator = \" Cluster \" + cluster_number + \" model {}\"\n\n # XG Boost model\n\n self.model_name.append('XG_BOOST')\n title = title_generator.format('XG_BOOST')\n self.logger_object.log('Entered the get_best_model method of the Model_Finder class')\n xgboost = self.get_best_params_for_xgboost_income_prediction(train_x, train_y)\n prediction_xgboost = xgboost.predict(test_x) # Predictions using the XGBoost Model\n\n if len(test_y.unique()) == 1: # if there is only one label in y, then roc_auc_score returns error. We\n # will use accuracy in that case\n xgboost_score = accuracy_score(test_y, prediction_xgboost)\n self.logger_object.log('Accuracy for XGBoost:' + str(xgboost_score)) # Log AUC\n else:\n xgboost_score = roc_auc_score(test_y, prediction_xgboost) # AUC for XGBoost\n self.logger_object.log('AUC for XGBoost:' + str(xgboost_score)) # Log AUC\n y_score = xgboost.predict_proba(test_x)[:, 1]\n fpr, tpr, thresholds = roc_curve(test_y, y_score, pos_label=test_y.unique()[1])\n AccurayGraph().save_roc_curve_plot_binary_classification(fpr, tpr, self.project_id,\n execution_id=self.logger_object.execution_id,\n file_object=self.file_object,\n title=title)\n\n self.model.append(xgboost)\n self.score.append(xgboost_score)\n\n # create best model for naive bayes\n self.model_name.append('NAIVE_BAYES')\n title = title_generator.format('NAIVE_BAYES')\n naive_bayes = self.get_best_params_for_naive_bayes(train_x, train_y)\n prediction_naive_bayes = naive_bayes.predict(test_x) # prediction using the Random Forest Algorithm\n self.model.append(naive_bayes)\n if len(test_y.unique()) == 1: # if there is only one label in y,\n # then roc_auc_score returns error. We will use accuracy in that case\n naive_bayes_score = accuracy_score(test_y, prediction_naive_bayes)\n self.logger_object.log('Accuracy for naive bayes score' + str(naive_bayes_score))\n else:\n naive_bayes_score = roc_auc_score(test_y, prediction_naive_bayes) # AUC for Random Forest\n self.logger_object.log('AUC for naive bayes score:' + str(naive_bayes_score))\n y_score = naive_bayes.predict_proba(test_x)[:, 1]\n fpr, tpr, thresholds = roc_curve(test_y, y_score, pos_label=test_y.unique()[0])\n AccurayGraph().save_roc_curve_plot_binary_classification(fpr, tpr, self.project_id,\n execution_id=self.logger_object.execution_id,\n file_object=self.file_object,\n title=title)\n\n self.score.append(naive_bayes_score)\n # create best model for Random forest\n self.model_name.append('Random_Forest')\n title = title_generator.format('Random_Forest')\n random_forest = self.get_best_params_for_random_forest(train_x, train_y)\n prediction_random_forest = random_forest.predict(test_x)\n self.model.append(random_forest)\n if len(test_y.unique()) == 1:\n random_forest_score = accuracy_score(test_y, prediction_random_forest)\n self.logger_object.log('Accuracy for Random Forest' + str(random_forest_score))\n else:\n random_forest_score = roc_auc_score(test_y, prediction_random_forest) # AUC for Random Forest\n self.logger_object.log('AUC for Random Forest' + str(random_forest_score))\n y_score = random_forest.predict_proba(test_x)[:, 1]\n fpr, tpr, thresholds = roc_curve(test_y, y_score, pos_label=test_y.unique()[1])\n AccurayGraph().save_roc_curve_plot_binary_classification(fpr, tpr, self.project_id,\n execution_id=self.logger_object.execution_id,\n file_object=self.file_object,\n title=title)\n\n self.score.append(random_forest_score)\n\n # create best model for KNN\n self.model_name.append('KNN')\n title = title_generator.format('KNN')\n knn_clf = self.get_best_params_for_KNN(train_x, train_y)\n prediction_knn = knn_clf.predict(test_x)\n self.model.append(knn_clf)\n if len(test_y.unique()) == 1:\n knn_score = accuracy_score(test_y, prediction_knn)\n self.logger_object.log('Accuracy for KNN clf' + str(knn_score))\n else:\n knn_score = roc_auc_score(test_y, prediction_knn) # AUC for Random Forest\n self.logger_object.log('AUC for KNN' + str(knn_score))\n y_score = knn_clf.predict_proba(test_x)[:, 1]\n fpr, tpr, thresholds = roc_curve(test_y, y_score, pos_label=test_y.unique()[1])\n AccurayGraph().save_roc_curve_plot_binary_classification(fpr, tpr, self.project_id,\n execution_id=self.logger_object.execution_id,\n file_object=self.file_object,\n title=title)\n self.score.append(knn_score)\n\n if len(test_y.unique()) != 1:\n \"\"\" 5. SVC \"\"\"\n self.model_name.append(\"SVC\")\n title = title_generator.format(\"SVC\")\n svc_clf = self.get_best_params_for_svm_fraud_detection_and_scania(train_x, train_y)\n prediction_svc = svc_clf.predict(test_x)\n self.model.append(svc_clf)\n if len(test_y.unique()) == 1:\n svc_score = accuracy_score(test_y, prediction_svc)\n self.logger_object.log('Accuracy for svc clf' + str(svc_score))\n else:\n svc_score = roc_auc_score(test_y, prediction_svc) # AUC for Random Forest\n self.logger_object.log('AUC for svc' + str(svc_score))\n y_score = svc_clf.predict_proba(test_x)[:, 1]\n fpr, tpr, thresholds = roc_curve(test_y, y_score, pos_label=test_y.unique()[1])\n AccurayGraph().save_roc_curve_plot_binary_classification(fpr, tpr, self.project_id,\n execution_id=self.logger_object.execution_id,\n file_object=self.file_object,\n title=title)\n self.score.append(svc_score)\n\n AccurayGraph().save_accuracy_bar_graph(\n model_name_list=self.model_name,\n accuracy_score_list=self.score,\n project_id=self.project_id,\n execution_id=self.logger_object.execution_id,\n file_object=self.file_object,\n x_label=\"Model List\",\n y_label=\"Accuracy score comparison {}\".format(self.model_name),\n title=\"Cluster \" + str(cluster_number) + \"Accuracy Score \"\n )\n\n execution_model_comparison_id = str(uuid.uuid4())\n for data in zip(self.model_name, self.score):\n self.save_accuracy_data(model_name=data[0], score=data[1],\n execution_model_comparision_id=execution_model_comparison_id)\n\n # comparing the two models\n return self.get_best_model_on_score(model_name=self.model_name, model=self.model, score=self.score)\n\n except Exception as e:\n model_finder = ModelFinderException(\n \"Failed in [{0}] class [{1}] method [{2}]\"\n .format(self.__module__, ModelFinder.__name__,\n self.get_best_model_income_prediction.__name__))\n raise Exception(model_finder.error_message_detail(str(e), sys)) from e\n\n def get_best_model_on_score(self, model_name: list, model: list, score: list):\n \"\"\"\n\n :param model: models in list\n :param model_name: Model name list\n :param score: score list\n :return: best model name and model\n \"\"\"\n try:\n record = {'model_name': model_name, 'model': model, 'score': score}\n df = pandas.DataFrame(record)\n df.index = df.model_name\n model_name = df.max()['model_name']\n model = df.loc[model_name]['model']\n return model_name, model\n\n except Exception as e:\n model_finder = ModelFinderException(\n \"Failed in [{0}] class [{1}] method [{2}]\".format(self.__module__, ModelFinder.__name__,\n self.get_best_model_on_score.__name__))\n raise Exception(model_finder.error_message_detail(str(e), sys)) from e\n" ]
[ [ "numpy.random.uniform", "sklearn.svm.SVC", "sklearn.linear_model.Ridge", "sklearn.svm.SVR", "sklearn.linear_model.LinearRegression", "sklearn.tree.DecisionTreeRegressor", "sklearn.linear_model.SGDRegressor", "sklearn.linear_model.RidgeCV", "pandas.DataFrame", "sklearn.metrics.roc_auc_score", "sklearn.metrics.accuracy_score", "sklearn.ensemble.RandomForestRegressor", "numpy.abs", "sklearn.model_selection.GridSearchCV", "sklearn.ensemble.RandomForestClassifier", "sklearn.neighbors.KNeighborsClassifier", "sklearn.metrics.r2_score", "sklearn.naive_bayes.GaussianNB" ] ]
chance-alvarado/dementia-classifier
[ "8c0d1735e072665c65c8d0bc4de32d0fa25fde87" ]
[ "resources/dementia_analysis.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"Defining functions and classes for classification of dementia features.\n\nThe contents of this module define functions and classes for cleaning,\nvisualizing, and making predictions based on clinical datasets collected by\nthe Open Access Series of Imaging Studies (OASIS) project.\n\nExplore this repository at:\n https://github.com/chance-alvarado/dementia-classifier\n\nAuthor:\n Chance Alvarado\n LinkedIn: https://www.linkedin.com/in/chance-alvarado/\n GitHub: https://github.com/chance-alvarado/\n\"\"\"\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.svm import SVC\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.metrics import confusion_matrix, plot_roc_curve\n\n\nclass DataCleaning():\n \"\"\"Class for DataFrame construction and cleaning.\"\"\"\n\n def create_dataframes(self, cross_sectional_path, longitudinal_path):\n \"\"\"Construct DataFrames for both datasets.\"\"\"\n cs_df = pd.read_csv(cross_sectional_path)\n l_df = pd.read_csv(longitudinal_path)\n\n return [cs_df, l_df]\n\n def create_master_dataframe(self, cs_df, l_df):\n \"\"\"Combine DataFrames into one master DataFrame.\"\"\"\n # Grab only the first visit from l_df\n l_df = l_df[l_df.Visit == 1].copy(deep=True)\n\n # Drop different columns\n l_df.drop(columns=['Subject ID', 'MRI ID', 'Group',\n 'Visit', 'MR Delay', 'Hand'\n ],\n inplace=True\n )\n cs_df.drop(columns=['ID', 'Delay', 'Hand'], inplace=True)\n\n # Rename 'EDUC' column for a proper merge\n l_df.rename(columns={'EDUC': 'Educ'}, inplace=True)\n\n # Construct master DataFrame\n df = pd.concat([cs_df, l_df], ignore_index=True, sort=False)\n\n # Reset index\n df.reset_index(drop=True, inplace=True)\n\n # Rename columns for ease of use\n df.rename(columns={'M/F': 'sex',\n 'Age': 'age',\n 'Educ': 'education',\n 'SES': 'economic_status',\n 'MMSE': 'mental_state_eval',\n 'CDR': 'dementia_rating',\n 'eTIV': 'intracranial_vol',\n 'nWBV': 'brain_vol',\n 'ASF': 'scaling_factor',\n },\n inplace=True\n )\n\n return df\n\n def clean_master_dataframe(self, df):\n \"\"\"Clean master DataFrame for visualization and prediction.\"\"\"\n # Remove rows where no target information exists\n df = df[~df.dementia_rating.isnull()].copy(deep=True)\n\n # Convert sex info to binary: M=1, F=0\n df.sex.replace('F', 0, inplace=True)\n df.sex.replace('M', 1, inplace=True)\n\n # Fill na with column median\n df.fillna(df.median(), inplace=True)\n\n # Reset index\n df.reset_index(drop=True, inplace=True)\n\n # Add target column\n df['target'] = 1\n for i, val in enumerate(df.dementia_rating):\n if val == 0:\n df.at[i, 'target'] = 0\n\n return df\n\n\nclass DataVisualization():\n \"\"\"Class for data exploration through visualization.\"\"\"\n\n def __init__(self):\n \"\"\"Itialize with proper themes.\"\"\"\n self._blue = '#0834CB'\n self._red = '#B22222'\n self._main = '#ebeaf3'\n self._blue_opaque = '#b7bde5'\n self._red_opaque = '#ddbac2'\n\n sns.set_style(\"darkgrid\")\n sns.set_palette(sns.color_palette([self._blue, self._red]), desat=.75)\n\n def donut_plot(self, df):\n \"\"\"Create donut plot of sex breakdown.\"\"\"\n # Manipulate data\n total_male = df[df.sex == 1].shape[0]\n dementia_male = df[df.sex == 1].target.sum()\n\n total_female = df[df.sex == 0].shape[0]\n dementia_female = df[df.sex == 0].target.sum()\n\n # Create plot\n fig, ax = plt.subplots(figsize=(7, 7))\n\n ax.pie([total_male, total_female], colors=['#C97276', '#90A1E3'],\n labels=['Male', 'Female'], shadow=True, radius=1,\n textprops={'fontsize': 14},\n wedgeprops=dict(width=.4, edgecolor=self._red),\n )\n\n wedges, texts = ax.pie([total_male - dementia_male, dementia_male,\n total_female - dementia_female,\n dementia_female\n ],\n colors=[self._blue_opaque, self._red_opaque,\n self._blue_opaque, self._red_opaque\n ],\n shadow=True, radius=.7,\n textprops={'fontsize': 14},\n wedgeprops=dict(width=.15,\n edgecolor=self._blue\n ),\n )\n\n # Plot attributes\n ax.legend(wedges[0:2], ['No Dementia', 'Dementia'],\n loc=\"center left\",\n fontsize=14,\n bbox_to_anchor=(.8, .9))\n\n fig.suptitle('Breakdown of Sexes', fontsize=18, x=.52, y=.9)\n\n # Show plot\n plt.show()\n\n def age_sex_kde_plot(self, df):\n \"\"\"Create KDE plots of sex and age features.\"\"\"\n # Create plot\n fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(12, 4))\n\n sns.kdeplot(df[(df.sex == 1) & (df.target == 0)].age, shade=True,\n ax=ax[0], legend=False)\n sns.kdeplot(df[(df.sex == 1) & (df.target == 1)].age, shade=True,\n ax=ax[0], legend=False)\n\n sns.kdeplot(df[(df.sex == 0) & (df.target == 0)].age, shade=True,\n ax=ax[1], legend=False)\n sns.kdeplot(df[(df.sex == 0) & (df.target == 1)].age, shade=True,\n ax=ax[1], legend=False)\n\n # Plot attributes\n fig.suptitle('Kernal Density Estimates of Varying Populations',\n fontsize=14)\n ax[0].set_xlabel('Male', fontsize=14)\n ax[1].set_xlabel('Female', fontsize=14)\n ax[1].legend(['No Dementia', 'Dementia'], fontsize=14,\n bbox_to_anchor=(1, 1))\n\n # Show plot\n plt.show()\n\n def kde_plot(self, df):\n \"\"\"Create KDE plot of relevant features.\"\"\"\n # Create figure and specify subplots\n fig, ax = plt.subplots(nrows=2, ncols=3, figsize=(14, 8))\n\n # Attributes for all KDE plots\n plot_attrs = {'shade': True, 'legend': False}\n\n # Create KDE plots\n sns.kdeplot(df[df.target == 0].age, ax=ax[0, 0], **plot_attrs)\n sns.kdeplot(df[df.target == 1].age, ax=ax[0, 0], **plot_attrs)\n\n sns.kdeplot(df[df.target == 0].education, ax=ax[0, 1], **plot_attrs)\n sns.kdeplot(df[df.target == 1].education, ax=ax[0, 1], **plot_attrs)\n\n sns.kdeplot(df[df.target == 0].economic_status, ax=ax[0, 2],\n **plot_attrs\n )\n sns.kdeplot(df[df.target == 1].economic_status, ax=ax[0, 2],\n **plot_attrs\n )\n\n sns.kdeplot(df[df.target == 0].intracranial_vol, ax=ax[1, 0],\n **plot_attrs\n )\n sns.kdeplot(df[df.target == 1].intracranial_vol, ax=ax[1, 0],\n **plot_attrs\n )\n\n sns.kdeplot(df[df.target == 0].brain_vol, ax=ax[1, 1], **plot_attrs)\n sns.kdeplot(df[df.target == 1].brain_vol, ax=ax[1, 1], **plot_attrs)\n\n sns.kdeplot(df[df.target == 0].mental_state_eval, ax=ax[1, 2],\n **plot_attrs\n )\n sns.kdeplot(df[df.target == 1].mental_state_eval, ax=ax[1, 2],\n **plot_attrs\n )\n\n # Update plot attributes\n fig.suptitle('Kernal Density Estimates', fontsize=18)\n\n ax[0, 2].legend(['No Dementia', 'Dementia'], fontsize=14,\n bbox_to_anchor=(1, 1))\n\n ax[0, 0].set_title('Age', fontsize=14)\n ax[0, 1].set_title('Years of Education', fontsize=14)\n ax[0, 2].set_title('Socioeconomic Status', fontsize=14)\n ax[1, 0].set_title('Estimated Intracranial Volume', fontsize=14)\n ax[1, 1].set_title('Normalized Whole Brain Volume', fontsize=14)\n ax[1, 2].set_title('Mini Mental State Evaluation', fontsize=14)\n\n # Show plot\n plt.show()\n\n def scatter_plot(self, df):\n \"\"\"Create scatter plot of scaling factor and intracranial volume.\"\"\"\n # Create plot\n sns.scatterplot(x='scaling_factor', y='intracranial_vol',\n hue='target', data=df, alpha=0.4,\n s=50, legend=False\n )\n\n # Update plot attributes\n plt.title('Atlas Scaling Factor vs. Estimated Intracranial Volume',\n fontsize=14)\n plt.xlabel('Atlas Scaling Factor', fontsize=12)\n plt.ylabel('Estimated Intracranial Volume', fontsize=12)\n\n # Show plot\n plt.show()\n plt.show\n\n def pair_plot(self, df):\n \"\"\"Create pair plot to examine clustering of multiple features.\"\"\"\n # Create grid and populate with plots\n g = sns.PairGrid(df[['age', 'intracranial_vol',\n 'brain_vol', 'mental_state_eval',\n 'target']],\n hue='target', height=3.5, aspect=1.5,\n )\n\n g = g.map_diag(plt.hist, alpha=0.5)\n g = g.map_offdiag(plt.scatter, alpha=0.5, s=80)\n\n # Update plot attributes\n labels = {'age': 'Age',\n 'intracranial_vol': 'Intracranial Volume',\n 'brain_vol': 'Whole Brain Volume',\n 'mental_state_eval': 'Mini Mental State Evaluation'\n }\n\n for i in range(len(labels)):\n for j in range(len(labels)):\n xlabel = g.axes[i][j].get_xlabel()\n ylabel = g.axes[i][j].get_ylabel()\n if xlabel in labels.keys():\n g.axes[i][j].set_xlabel(labels[xlabel], fontsize=20)\n if ylabel in labels.keys():\n g.axes[i][j].set_ylabel(labels[ylabel], fontsize=20)\n\n g.fig.suptitle('Clustering between Features', fontsize=28, y=1.04)\n\n # Show plot\n plt.show()\n\n\nclass PredictiveModel():\n \"\"\"Create multiple predictive models and find the best fit for the data.\"\"\"\n\n def feature_target_split(self, df):\n \"\"\"Seperate feature matrix and target vector from DataFrame.\"\"\"\n # Labels\n labels = ['sex', 'age', 'education', 'economic_status',\n 'mental_state_eval', 'intracranial_vol', 'brain_vol'\n ]\n\n # Feature matrix\n X = df[labels].to_numpy()\n\n # Target Vector\n y = df['target'].to_numpy()\n\n # Normalize the feature matrix\n scaler = MinMaxScaler(feature_range=(0, 1))\n X = scaler.fit_transform(X)\n\n return [X, y]\n\n def split(self, X, y):\n \"\"\"Split data into training and testing sets with an 70/30 split.\"\"\"\n X_train, X_test, y_train, y_test = train_test_split(X, y,\n test_size=.3,\n random_state=1\n )\n\n return [X_train, X_test, y_train, y_test]\n\n def logistic_regression(self, X_train, X_test, y_train, y_test):\n \"\"\"Fit the data to a logistic regression classifier and test.\"\"\"\n # Create and fit the model\n classifier = LogisticRegression(random_state=2)\n classifier.fit(X_train, y_train)\n\n # Print score on test data\n print('Logistic Regression Accuracy: ',\n classifier.score(X_test, y_test)\n )\n\n return classifier\n\n def random_forest(self, X_train, X_test, y_train, y_test):\n \"\"\"Fit the data to a random forest classifier and test.\"\"\"\n # Create and fit the model\n classifier = RandomForestClassifier(random_state=3)\n classifier.fit(X_train, y_train)\n\n # Print score on test data\n print('Random Forest Accuracy: ',\n classifier.score(X_test, y_test)\n )\n\n return classifier\n\n def svc(self, X_train, X_test, y_train, y_test):\n \"\"\"Fit the data to an SVC classifier and test.\"\"\"\n # Create and fit the model\n classifier = SVC(kernel='poly', C=0.5, random_state=4)\n classifier.fit(X_train, y_train)\n\n # Print score on test data\n print('SVC Accuracy: ',\n classifier.score(X_test, y_test)\n )\n\n return classifier\n\n\nclass ResultsVisualization():\n \"\"\"Class for visualizing results from classifications models.\"\"\"\n\n def __init__(self):\n \"\"\"Initialize class with proper themes.\"\"\"\n self._blue = '#0834CB'\n self._red = '#B22222'\n\n self._labels = ['Sex', 'Age', 'Education', 'Economic Status',\n 'Mental State Evaluation', 'Intracranial Volume',\n 'Brain Volume'\n ]\n\n sns.set_style(\"darkgrid\")\n sns.set_palette(sns.color_palette([self._blue, self._red]), desat=.75)\n\n def roc_plot(self, classifier, X_test, y_test):\n \"\"\"Create ROC plot for given classifier.\"\"\"\n # Create plot\n fig, ax = plt.subplots(1, 1, figsize=(7, 7))\n plot_roc_curve(classifier, X_test, y_test, alpha=0.6, ax=ax,\n linewidth=3, c=self._blue\n )\n ax.plot([0, 1], [0, 1], linestyle='dashed', linewidth=3,\n c=self._red, alpha=0.6\n )\n\n # Plot attributes\n ax.set_title('Receiver Operating Charecteristic Curve',\n fontsize=18)\n\n ax.set_xlabel('False Positive Rate', fontsize=14)\n ax.set_ylabel('True Postive Rate', fontsize=14)\n\n ax.get_legend().remove()\n\n # Show plot\n plt.show\n\n def feature_importance_plot(self, classifier):\n \"\"\"Create bar plot of feature importances for given classifier.\"\"\"\n # Manipulate data\n feature_importances = classifier.feature_importances_\n sort = np.argsort(feature_importances)\n\n # Create plot\n fig, ax = plt.subplots(1, 1, figsize=(7, 7))\n ax.barh(np.array(self._labels)[sort], feature_importances[sort],\n color=self._blue, alpha=0.6\n )\n\n # Plot attributes\n ax.set_title('Feature Importances', fontsize=18)\n ax.tick_params(labelsize=14)\n\n # Show plot\n plt.show()\n\n def confusion_matrix(self, classifier, X_test, y_test):\n \"\"\"Create confusion matrix for given classifier.\"\"\"\n # Make predicitons from test features\n y_prediction = classifier.predict(X_test)\n\n # Create plot\n ax = plt.subplot()\n sns.heatmap(confusion_matrix(y_test, y_prediction),\n annot=True, cmap=plt.get_cmap('Blues'),\n alpha=0.8, ax=ax, fmt=\"g\", cbar=False,\n annot_kws={\"size\": 16}\n )\n\n # Plot attributes\n ax.set_title('Confusion Matrix', fontsize=18)\n\n ax.set_xlabel('Predicted labels', fontsize=14)\n ax.set_ylabel('True labels', fontsize=14)\n\n ax.tick_params(axis='both', which='major', labelsize=12)\n ax.xaxis.set_ticklabels(['No Dementia', 'Dementia'])\n ax.yaxis.set_ticklabels(['No Dementia', 'Dementia'])\n\n # Show plot\n plt.show()\n" ]
[ [ "sklearn.svm.SVC", "sklearn.metrics.plot_roc_curve", "pandas.read_csv", "sklearn.preprocessing.MinMaxScaler", "numpy.argsort", "matplotlib.pyplot.subplots", "matplotlib.pyplot.title", "sklearn.metrics.confusion_matrix", "matplotlib.pyplot.subplot", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel", "pandas.concat", "sklearn.model_selection.train_test_split", "sklearn.linear_model.LogisticRegression", "sklearn.ensemble.RandomForestClassifier", "matplotlib.pyplot.get_cmap", "numpy.array", "matplotlib.pyplot.xlabel" ] ]
andrewtarzia/PoreMapper
[ "fc98324275e0e4fb3735b9a9cc4a79a34567eca2" ]
[ "pore_mapper/inflater.py" ]
[ "\"\"\"\nInflater\n========\n\n#. :class:`.Inflater`\n\nGenerator of blob guests using nonbonded interactions and growth.\n\n\"\"\"\n\nfrom __future__ import annotations\nfrom collections import abc\nimport typing\n\nimport numpy as np\nfrom copy import deepcopy\nfrom scipy.spatial.distance import cdist\n\nfrom .host import Host\nfrom .blob import Blob\nfrom .bead import Bead\nfrom .pore import Pore\nfrom .result import InflationResult, InflationStepResult\n\n\nclass Inflater:\n \"\"\"\n Grow guest blob.\n\n \"\"\"\n\n def __init__(self, bead_sigma: float):\n \"\"\"\n Initialize a :class:`Inflater` instance.\n\n Parameters:\n\n bead_sigma:\n Bead sigma to use in Blob.\n\n \"\"\"\n\n self._bead_sigma = bead_sigma\n\n def _check_steric(\n self,\n host: Host,\n blob: Blob,\n bead: Bead,\n ) -> np.ndarray:\n\n coord = np.array([blob.get_position_matrix()[bead.get_id()]])\n host_coords = host.get_position_matrix()\n host_radii = np.array([\n i.get_radii() for i in host.get_atoms()\n ]).reshape(host.get_num_atoms(), 1)\n host_bead_distances = cdist(host_coords, coord)\n host_bead_distances += -host_radii\n min_host_guest_distance = np.min(host_bead_distances.flatten())\n if min_host_guest_distance < bead.get_sigma():\n return True\n return False\n\n def _translate_beads_along_vector(\n self,\n blob: Blob,\n vector: np.ndarray,\n bead_id: typing.Optional[int] = None,\n ) -> Blob:\n\n if bead_id is None:\n return blob.with_displacement(vector)\n else:\n new_position_matrix = deepcopy(blob.get_position_matrix())\n for bead in blob.get_beads():\n if bead.get_id() != bead_id:\n continue\n pos = blob.get_position_matrix()[bead.get_id()]\n new_position_matrix[bead.get_id()] = pos - vector\n\n return blob.with_position_matrix(new_position_matrix)\n\n def inflate_blob(\n self,\n host: Host,\n ) -> abc.Iterable[InflationStepResult]:\n \"\"\"\n Mould blob from beads inside host.\n\n Parameters:\n\n host:\n The host to analyse.\n\n Yields:\n\n The result of this step.\n\n \"\"\"\n\n starting_radius = 0.1\n num_steps = 100\n\n # Move host to origin.\n host = host.with_centroid([0., 0., 0.])\n host_pos_mat = host.get_position_matrix()\n host_maximum_diameter = host.get_maximum_diameter()\n host_radii_arr = np.array([\n i.get_radii() for i in host.get_atoms()\n ]).reshape(1, host.get_num_atoms())\n\n # Get num beads and step size based on maximum diameter of\n # host. Using pyWindow code.\n host_radius = host_maximum_diameter / 2\n host_surface_area = 4 * np.pi * host_radius**2\n num_beads = int(np.log10(host_surface_area) * 250)\n step_size = (host_radius-starting_radius)/num_steps\n\n # Define an idealised blob based on num_beads.\n blob = Blob.init_from_idealised_geometry(\n bead_sigma=self._bead_sigma,\n num_beads=num_beads,\n sphere_radius=starting_radius,\n )\n blob = blob.with_centroid(host.get_centroid())\n\n blob_maximum_diameter = blob.get_maximum_diameter()\n movable_bead_ids = set([i.get_id() for i in blob.get_beads()])\n for step in range(num_steps):\n # If the distance is further than the maximum diameter.\n # Stop.\n blob_maximum_diameter = blob.get_maximum_diameter()\n if blob_maximum_diameter > host_maximum_diameter:\n print(\n f'Pop! breaking at step: {step} with blob larger '\n 'than host'\n )\n break\n if len(movable_bead_ids) == 0:\n print(\n f'breaking at step: {step} with no more moveable '\n 'beads'\n )\n break\n\n pos_mat = blob.get_position_matrix()\n\n # Check for steric clashes.\n # Get host-blob distances.\n pair_dists = cdist(pos_mat, host_pos_mat)\n # Include host atom radii.\n pair_dists += -host_radii_arr\n min_pair_dists = np.min(pair_dists, axis=1)\n # Update movable array.\n movable_bead_arr = np.where(\n min_pair_dists < self._bead_sigma, 0, 1\n ).reshape(num_beads, 1)\n\n # And ids.\n movable_bead_ids = set(\n np.argwhere(movable_bead_arr==1)[:, 0]\n )\n # Update blob.\n blob = blob.with_movable_bead_ids(\n movable_bead_ids=movable_bead_ids,\n )\n\n # Define step array based on collisions.\n step_arr = movable_bead_arr * step_size\n # Get translations.\n translation_mat = step_arr * (\n pos_mat / np.linalg.norm(\n x=pos_mat,\n axis=1,\n ).reshape(num_beads, 1)\n )\n new_pos_mat = pos_mat + translation_mat\n\n # Do move.\n blob = blob.with_position_matrix(new_pos_mat)\n\n num_movable_beads = len(movable_bead_ids)\n if num_movable_beads < 0.6*blob.get_num_beads():\n nonmovable_bead_ids = [\n i.get_id() for i in blob.get_beads()\n if i.get_id() not in movable_bead_ids\n ]\n else:\n nonmovable_bead_ids = [\n i.get_id() for i in blob.get_beads()\n # if i.get_id() not in movable_bead_ids\n ]\n pore = Pore(\n blob=blob,\n nonmovable_bead_ids=nonmovable_bead_ids,\n )\n step_result = InflationStepResult(\n step=step,\n num_movable_beads=num_movable_beads,\n blob=blob,\n pore=pore,\n )\n yield step_result\n\n step_result = InflationStepResult(\n step=step,\n num_movable_beads=num_movable_beads,\n blob=blob,\n pore=pore,\n )\n yield step_result\n\n def get_inflated_blob(\n self,\n host: Host,\n ) -> InflationResult:\n \"\"\"\n Mould blob from beads inside host.\n\n Parameters:\n\n host:\n The host to analyse.\n\n Returns:\n\n The final result of inflation.\n\n \"\"\"\n\n for step_result in self.inflate_blob(host):\n continue\n\n return InflationResult(\n step=step_result.step,\n num_movable_beads=step_result.num_movable_beads,\n blob=step_result.blob,\n pore=step_result.pore,\n )\n" ]
[ [ "scipy.spatial.distance.cdist", "numpy.argwhere", "numpy.log10", "numpy.min", "numpy.where", "numpy.linalg.norm" ] ]
DMGREENHOUSE/inference-tools
[ "4b007cdcb6ae31dad6a5edf6cb50b6a9120c27e7" ]
[ "inference/mcmc.py" ]
[ "\"\"\"\n.. moduleauthor:: Chris Bowman <[email protected]>\n\"\"\"\n\nimport sys\nfrom warnings import warn\nfrom copy import copy, deepcopy\nfrom multiprocessing import Process, Pipe, Event, Pool\nfrom time import time\nfrom random import choice\n\nimport matplotlib.pyplot as plt\nfrom numpy import array, arange, float64, identity, linspace, zeros\nfrom numpy import exp, log, mean, sqrt, argmax, diff, dot, cov, var, percentile\nfrom numpy import isfinite, sort, argsort, savez, savez_compressed, load\nfrom numpy.fft import rfft, irfft\nfrom numpy.random import normal, random, shuffle, seed, randint\nfrom scipy.linalg import eigh\n\nfrom inference.pdf import UnimodalPdf, GaussianKDE\nfrom inference.plotting import matrix_plot, trace_plot, transition_matrix_plot\n\n\nclass Parameter(object):\n \"\"\"\n This class is used by the markov-chain samplers in this module\n to manage data specific to each model parameter which is being\n sampled.\n\n The class also adjusts the parameter's proposal distribution\n width automatically as the chain advances in order to ensure\n efficient sampling.\n \"\"\"\n\n def __init__(self, value=None, sigma=None):\n self.samples = [] # list to store all samples for the parameter\n self.samples.append(value) # add starting location as first sample\n self.sigma = sigma # the width parameter for the proposal distribution\n\n # storage for proposal width adjustment algorithm\n self.avg = 0\n self.var = 0\n self.num = 0\n self.sigma_values = [copy(self.sigma)] # sigma values after each assessment\n self.sigma_checks = [0.0] # chain locations at which sigma was assessed\n self.try_count = 0 # counter variable tracking number of proposals\n self.last_update = 0 # chain location where sigma was last updated\n\n # settings for proposal width adjustment algorithm\n self.target_rate = 0.25 # default of 0.25 is optimal for MH sampling\n self.max_tries = 50 # maximum allowed tries before width is cut in half\n self.chk_int = 100 # interval of steps at which proposal widths are adjusted\n self.growth_factor = 1.75 # factor chk_int grows when width is adjusted\n self.adjust_rate = 0.25\n\n # properties\n self._non_negative = False\n self.bounded = False\n self.proposal = self.standard_proposal\n self.upper = 0.0\n self.lower = 0.0\n self.width = 0.0\n\n def set_boundaries(self, lower, upper):\n if lower < upper:\n self.upper = upper\n self.lower = lower\n self.width = upper - lower\n self.proposal = self.boundary_proposal\n self.bounded = True\n else:\n warn(\"Upper limit must be greater than lower limit\")\n\n def remove_boundaries(self):\n self.proposal = self.standard_proposal\n self.bounded = False\n self.upper = 0.0\n self.lower = 0.0\n self.width = 0.0\n\n @property\n def non_negative(self):\n return self._non_negative\n\n @non_negative.setter\n def non_negative(self, value):\n if type(value) is bool:\n self._non_negative = value\n if self._non_negative is True:\n self.proposal = self.abs_proposal\n else:\n self.proposal = self.standard_proposal\n else:\n warn(\"non_negative must have a boolean value\")\n\n def standard_proposal(self):\n # increment the try count\n self.try_count += 1\n # if tries climb too high, then cut sigma in half\n if self.try_count > self.max_tries:\n self.adjust_sigma(0.25)\n # return the proposed value\n return self.samples[-1] + self.sigma * normal()\n\n def abs_proposal(self):\n # increment the try count\n self.try_count += 1\n # if tries climb too high, then cut sigma in half\n if self.try_count > self.max_tries:\n self.adjust_sigma(0.25)\n # return the proposed value\n return abs(self.samples[-1] + self.sigma * normal())\n\n def boundary_proposal(self):\n # increment the try count\n self.try_count += 1\n # if tries climb too high, then cut sigma in half\n if self.try_count > self.max_tries:\n self.adjust_sigma(0.25)\n # generate the proposed value\n prop = self.samples[-1] + self.sigma * normal()\n\n # we now pass the proposal through a 'reflecting' function where\n # proposals falling outside the boundary are reflected inside\n d = prop - self.lower\n n = (d // self.width) % 2\n if n == 0:\n return self.lower + d % self.width\n else:\n return self.upper - d % self.width\n\n def submit_accept_prob(self, p):\n self.num += 1\n self.avg += p\n self.var += p * (1 - p)\n\n if self.num >= self.chk_int:\n self.update_epsilon()\n\n def update_epsilon(self):\n \"\"\"\n looks at average tries over recent steps, and adjusts proposal\n widths self.sigma to bring the average towards self.target_tries.\n \"\"\"\n # normal approximation of poisson binomial distribution\n mu = self.avg / self.num\n std = sqrt(self.var) / self.num\n\n # now check if the desired success rate is within 2-sigma\n if ~(mu - 2 * std < self.target_rate < mu + 2 * std):\n adj = (log(self.target_rate) / log(mu)) ** self.adjust_rate\n adj = min(adj, 3.0)\n adj = max(adj, 0.1)\n self.adjust_sigma(adj)\n else: # increase the check interval\n self.chk_int = int((self.growth_factor * self.chk_int) * 0.1) * 10\n\n def adjust_sigma(self, ratio):\n self.sigma *= ratio\n self.sigma_values.append(copy(self.sigma))\n self.sigma_checks.append(len(self.samples))\n self.avg = 0\n self.var = 0\n self.num = 0\n\n def add_sample(self, s):\n self.samples.append(s)\n self.try_count = 0\n\n def get_items(self, param_id):\n i = f\"param_{param_id}\"\n items = {\n f\"{i}samples\": self.samples,\n f\"{i}sigma\": self.sigma,\n f\"{i}avg\": self.avg,\n f\"{i}var\": self.var,\n f\"{i}num\": self.num,\n f\"{i}sigma_values\": self.sigma_values,\n f\"{i}sigma_checks\": self.sigma_checks,\n f\"{i}try_count\": self.try_count,\n f\"{i}last_update\": self.last_update,\n f\"{i}target_rate\": self.target_rate,\n f\"{i}max_tries\": self.max_tries,\n f\"{i}chk_int\": self.chk_int,\n f\"{i}growth_factor\": self.growth_factor,\n f\"{i}adjust_rate\": self.adjust_rate,\n f\"{i}_non_negative\": self._non_negative,\n f\"{i}bounded\": self.bounded,\n f\"{i}upper\": self.upper,\n f\"{i}lower\": self.lower,\n f\"{i}width\": self.width,\n }\n return items\n\n def load_items(self, dictionary, param_id):\n i = \"param_\" + str(param_id)\n self.samples = list(dictionary[i + \"samples\"])\n self.sigma = float(dictionary[i + \"sigma\"])\n self.avg = float(dictionary[i + \"avg\"])\n self.var = float(dictionary[i + \"var\"])\n self.num = float(dictionary[i + \"num\"])\n self.sigma_values = list(dictionary[i + \"sigma_values\"])\n self.sigma_checks = list(dictionary[i + \"sigma_checks\"])\n self.try_count = int(dictionary[i + \"try_count\"])\n self.last_update = int(dictionary[i + \"last_update\"])\n self.target_rate = float(dictionary[i + \"target_rate\"])\n self.max_tries = int(dictionary[i + \"max_tries\"])\n self.chk_int = int(dictionary[i + \"chk_int\"])\n self.growth_factor = float(dictionary[i + \"growth_factor\"])\n self.adjust_rate = float(dictionary[i + \"adjust_rate\"])\n self._non_negative = bool(dictionary[i + \"_non_negative\"])\n self.bounded = bool(dictionary[i + \"bounded\"])\n self.upper = float(dictionary[i + \"upper\"])\n self.lower = float(dictionary[i + \"lower\"])\n self.width = float(dictionary[i + \"width\"])\n\n if self.bounded:\n self.proposal = self.boundary_proposal\n elif self._non_negative:\n self.proposal = self.abs_proposal\n else:\n self.proposal = self.standard_proposal\n\n\nclass MarkovChain(object):\n \"\"\"\n Implementation of the metropolis-hastings algorithm using a multivariate-normal\n proposal distribution.\n\n :param func posterior: \\\n A function which takes the vector of model parameters as a ``numpy.ndarray``,\n and returns the posterior log-probability.\n\n :param start: \\\n Vector of model parameters which correspond to the parameter-space coordinates\n at which the chain will start.\n\n :param widths: \\\n Vector of standard deviations which serve as initial guesses for the widths of\n the proposal distribution for each model parameter. If not specified, the\n starting widths will be approximated as 5% of the values in 'start'.\n \"\"\"\n\n def __init__(self, posterior=None, start=None, widths=None, temperature=1.0):\n\n if start is None:\n start = []\n\n self.inv_temp = 1.0 / temperature\n\n if posterior is not None:\n self.posterior = posterior\n\n # if widths are not specified, take 5% of the starting values (unless they're zero)\n if widths is None:\n widths = [v * 0.05 if v != 0 else 1.0 for v in start]\n\n # create a list of parameter objects\n self.params = [Parameter(value=v, sigma=s) for v, s in zip(start, widths)]\n\n # create storage\n self.n = 1 # tracks total length of the chain\n self.L = len(start) # number of posterior parameters\n self.probs = [] # list of probabilities for all steps\n\n # add starting point as first step in chain\n if len(self.params) != 0:\n self.probs.append(self.posterior(self.get_last()) * self.inv_temp)\n\n # check posterior value of chain starting point is finite\n if not isfinite(self.probs[0]):\n ValueError(\n \"posterior returns a non-finite value for provided starting position\"\n )\n\n # add default burn and thin values\n self.burn = 1 # remove the starting position by default\n self.thin = 1 # no thinning by default\n\n # flag for displaying completion of the advance() method\n self.print_status = True\n\n def take_step(self):\n \"\"\"\n Draws samples from the proposal distribution until one is\n found which satisfies the metropolis-hastings criteria.\n \"\"\"\n while True:\n proposal = array([p.proposal() for p in self.params])\n pval = self.posterior(proposal) * self.inv_temp\n\n if pval > self.probs[-1]:\n break\n else:\n test = random()\n acceptance_prob = exp(pval - self.probs[-1])\n if test < acceptance_prob:\n break\n\n for p, v in zip(self.params, proposal):\n p.add_sample(v)\n\n self.n += 1\n\n def advance(self, m):\n \"\"\"\n Advances the chain by taking *m* new steps.\n\n :param int m: number of steps the chain will advance.\n \"\"\"\n k = 100 # divide chain steps into k groups to track progress\n t_start = time()\n for j in range(k):\n for i in range(m // k):\n self.take_step()\n dt = time() - t_start\n\n # display the progress status message\n if self.print_status:\n pct = int(100 * (j + 1) / k)\n eta = int(dt * (k / (j + 1) - 1))\n sys.stdout.write(\n f\"\\r advancing chain: [ {pct}% complete ETA: {eta} sec ] \"\n )\n sys.stdout.flush()\n\n # cleanup\n if m % k != 0:\n for i in range(m % k):\n self.take_step()\n\n if self.print_status:\n # this is a little ugly...\n t_elapsed = time() - t_start\n mins, secs = divmod(t_elapsed, 60)\n hrs, mins = divmod(mins, 60)\n time_taken = \"%d:%02d:%02d\" % (hrs, mins, secs)\n sys.stdout.write(\n f\"\\r advancing chain: [ complete - {m} steps taken in {time_taken} ] \"\n )\n sys.stdout.flush()\n sys.stdout.write(\"\\n\")\n\n def run_for(self, minutes=0, hours=0, days=0):\n \"\"\"\n Advances the chain for a chosen amount of computation time\n\n :param int minutes: number of minutes for which to run the chain.\n :param int hours: number of hours for which to run the chain.\n :param int days: number of days for which to run the chain.\n \"\"\"\n # first find the runtime in seconds:\n run_time = ((days * 24.0 + hours) * 60.0 + minutes) * 60.0\n start_time = time()\n end_time = start_time + run_time\n\n # get a rough estimate of the time per step\n step_time = time()\n self.posterior(self.get_last())\n step_time = time() - step_time\n step_time *= 2 * self.L\n if step_time <= 0.0:\n step_time = 0.005\n\n # choose an update interval that should take ~2 seconds\n update_interval = max(int(2.0 // step_time), 1)\n\n # store the starting length of the chain\n start_length = copy(self.n)\n\n while time() < end_time:\n for i in range(update_interval):\n self.take_step()\n\n # display the progress status message\n seconds_remaining = end_time - time()\n m, s = divmod(seconds_remaining, 60)\n h, m = divmod(m, 60)\n time_left = \"%d:%02d:%02d\" % (h, m, s)\n steps_taken = self.n - start_length\n sys.stdout.write(\n f\"\\r advancing chain: [ {steps_taken} steps taken, time remaining: {time_left} ] \"\n )\n sys.stdout.flush()\n\n # this is a little ugly...\n mins, secs = divmod(run_time, 60)\n hrs, mins = divmod(mins, 60)\n time_taken = \"%d:%02d:%02d\" % (hrs, mins, secs)\n sys.stdout.write(\n f\"\\r advancing chain: [ complete - {self.n - start_length} steps taken in {time_taken} ] \"\n )\n sys.stdout.flush()\n sys.stdout.write(\"\\n\")\n\n def get_last(self):\n return array([p.samples[-1] for p in self.params], dtype=float64)\n\n def replace_last(self, theta):\n for p, t in zip(self.params, theta):\n p.samples[-1] = t\n\n def get_parameter(self, n, burn=None, thin=None):\n \"\"\"\n Return sample values for a chosen parameter.\n\n :param int n: Index of the parameter for which samples are to be returned.\n\n :param int burn: \\\n Number of samples to discard from the start of the chain. If not specified,\n the value of self.burn is used instead.\n\n :param int thin: \\\n Instead of returning every sample which is not discarded as part of the burn-in,\n every *m*'th sample is returned for a specified integer *m*. If not specified,\n the value of self.thin is used instead.\n\n :return: List of samples for parameter *n*'th parameter.\n \"\"\"\n burn = burn if burn is not None else self.burn\n thin = thin if thin is not None else self.thin\n return self.params[n].samples[burn::thin]\n\n def get_probabilities(self, burn=None, thin=None):\n \"\"\"\n Return log-probability values for each step in the chain\n\n :param int burn: \\\n Number of steps to discard from the start of the chain. If not specified, the\n value of self.burn is used instead.\n\n :param int thin: \\\n Instead of returning every step which is not discarded as part of the burn-in,\n every *m*'th step is returned for a specified integer *m*. If not specified,\n the value of self.thin is used instead.\n\n :return: List of log-probability values for each step in the chain.\n \"\"\"\n burn = burn if burn is not None else self.burn\n thin = thin if thin is not None else self.thin\n return self.probs[burn::thin]\n\n def get_sample(self, burn=None, thin=None):\n \"\"\"\n Return the sample generated by the chain as a list of tuples\n\n :param int burn: \\\n Number of samples to discard from the start of the chain. If not specified,\n the value of self.burn is used instead.\n\n :param int thin: \\\n Instead of returning every sample which is not discarded as part of the burn-in,\n every *m*'th sample is returned for a specified integer *m*. If not specified,\n the value of self.thin is used instead.\n\n :return: List containing sample points stored as tuples.\n \"\"\"\n burn = burn if burn is not None else self.burn\n thin = thin if thin is not None else self.thin\n return list(zip(*[p.samples[burn::thin] for p in self.params]))\n\n def get_interval(self, interval=0.95, burn=None, thin=None, samples=None):\n \"\"\"\n Return the samples from the chain which lie inside a chosen highest-density interval.\n\n :param float interval: \\\n Total probability of the desired interval. For example, if interval = 0.95, then\n the samples corresponding to the top 95% of posterior probability values are returned.\n\n :param int burn: \\\n Number of samples to discard from the start of the chain. If not specified, the\n value of self.burn is used instead.\n\n :param int thin: \\\n Instead of returning every sample which is not discarded as part of the burn-in,\n every *m*'th sample is returned for a specified integer *m*. If not specified,\n the value of self.thin is used instead.\n\n :param int samples: \\\n The number of samples that should be returned from the requested interval. Note\n that specifying *samples* overrides the value of *thin*.\n\n :return: List containing sample points stored as tuples, and a corresponding list of\n log-probability values\n \"\"\"\n burn = burn if burn is not None else self.burn\n\n # get the sorting indices for the probabilities\n probs = array(self.probs[burn:])\n inds = probs.argsort()\n # sort the sample by probability\n arrays = [array(p.samples[burn:])[inds] for p in self.params]\n probs = probs[inds]\n # trim lowest-probability samples\n cutoff = int(len(probs) * (1 - interval))\n arrays = [a[cutoff:] for a in arrays]\n probs = probs[cutoff:]\n # if a specific number of samples is requested we override the thin value\n if samples is not None:\n thin = max(len(probs) // samples, 1)\n elif thin is None:\n thin = self.thin\n\n # thin the sample\n arrays = [a[::thin] for a in arrays]\n probs = probs[::thin]\n\n if samples is not None:\n # we may need to trim some extra samples to meet the requested number,\n # but as they arranged in order of increasing probability, we must remove\n # elements at random in order not to introduce bias.\n n_trim = len(probs) - samples\n if n_trim > 0:\n trim = sort(argsort(random(size=len(probs)))[n_trim:])\n arrays = [a[trim] for a in arrays]\n probs = probs[trim]\n\n return list(zip(*arrays)), probs\n\n def mode(self):\n \"\"\"\n Return the sample with the current highest posterior probability.\n\n :return: List containing parameter values.\n \"\"\"\n ind = argmax(self.probs)\n return [p.samples[ind] for p in self.params]\n\n def set_non_negative(self, parameter, flag=True):\n \"\"\"\n Constrain a particular parameter to have non-negative values.\n\n :param int parameter: Index of the parameter which is to be set \\\n as non-negative.\n \"\"\"\n self.params[parameter].non_negative = flag\n\n def set_boundaries(self, parameter, boundaries, remove=False):\n \"\"\"\n Constrain the value of a particular parameter to specified boundaries.\n\n :param int parameter: Index of the parameter for which boundaries \\\n are to be set.\n\n :param boundaries: Tuple of boundaries in the format (lower_limit, upper_limit)\n \"\"\"\n if remove:\n self.params[parameter].remove_boundaries()\n else:\n self.params[parameter].set_boundaries(*boundaries)\n\n def get_marginal(self, n, thin=None, burn=None, unimodal=False):\n \"\"\"\n Estimate the 1D marginal distribution of a chosen parameter.\n\n :param int n: \\\n Index of the parameter for which the marginal distribution is to be estimated.\n\n :param int burn: \\\n Number of samples to discard from the start of the chain. If not specified,\n the value of self.burn is used instead.\n\n :param int thin: \\\n Rather than using every sample which is not discarded as part of the burn-in,\n every *m*'th sample is used for a specified integer *m*. If not specified, the\n value of self.thin is used instead, which has a default value of 1.\n\n :param bool unimodal: \\\n Selects the type of density estimation to be used. The default value is False,\n which causes a GaussianKDE object to be returned. If however the marginal\n distribution being estimated is known to be unimodal, setting `unimodal = True`\n will result in the UnimodalPdf class being used to estimate the density.\n\n Returns one of two 'density estimator' objects which can be\n called as functions to return the estimated PDF at any point.\n \"\"\"\n burn = burn if burn is not None else self.burn\n thin = thin if thin is not None else self.thin\n\n if unimodal:\n return UnimodalPdf(self.get_parameter(n, burn=burn, thin=thin))\n else:\n return GaussianKDE(self.get_parameter(n, burn=burn, thin=thin))\n\n def plot_diagnostics(self, show=True, filename=None):\n \"\"\"\n Plot diagnostic traces that give information on how the chain is progressing.\n\n Currently this method plots:\n\n - The posterior log-probability as a function of step number, which is useful\n for checking if the chain has reached a maximum. Any early parts of the chain\n where the probability is rising rapidly should be removed as burn-in.\n\n - The history of changes to the proposal widths for each parameter. Ideally, the\n proposal widths should converge, and the point in the chain where this occurs\n is often a good choice for the end of the burn-in. For highly-correlated pdfs,\n the proposal widths may never fully converge, but in these cases small fluctuations\n in the width values are acceptable.\n\n :param bool show: If set to True, the plot is displayed.\n\n :param str filename: \\\n File path to which the diagnostics plot will be saved. If left unspecified the\n plot won't be saved.\n \"\"\"\n burn = self.estimate_burn_in()\n param_ESS = [\n ESS(array(self.get_parameter(i, burn=burn))) for i in range(self.L)\n ]\n\n fig = plt.figure(figsize=(12, 9))\n\n # probability history plot\n ax1 = fig.add_subplot(221)\n step_ax = [i * 1e-3 for i in range(len(self.probs))]\n ax1.plot(step_ax, self.probs, marker=\".\", ls=\"none\", markersize=3)\n ax1.set_xlabel(\"chain step number ($10^3$)\", fontsize=12)\n ax1.set_ylabel(\"log posterior probability\", fontsize=12)\n ax1.set_title(\"Chain log-probability history\")\n ylims = [\n min(self.probs[self.n // 2 :]),\n max(self.probs) * 1.1 - 0.1 * min(self.probs[self.n // 2 :]),\n ]\n plt.plot([burn * 1e-3, burn * 1e-3], ylims, c=\"red\", ls=\"dashed\", lw=2)\n ax1.set_ylim(ylims)\n ax1.grid()\n\n # proposal widths plot\n ax2 = fig.add_subplot(222)\n for p in self.params:\n y = array(p.sigma_values)\n x = array(p.sigma_checks[1:]) * 1e-3\n ax2.plot(x, 1e2 * diff(y) / y[:-1], marker=\"D\", markersize=3)\n ax2.plot([0, self.n * 1e-3], [5, 5], ls=\"dashed\", lw=2, color=\"black\")\n ax2.plot([0, self.n * 1e-3], [-5, -5], ls=\"dashed\", lw=2, color=\"black\")\n ax2.set_xlabel(\"chain step number ($10^3$)\", fontsize=12)\n ax2.set_ylabel(\"% change in proposal widths\", fontsize=12)\n ax2.set_title(\"Parameter proposal widths adjustment summary\")\n ax2.set_ylim([-50, 50])\n ax2.grid()\n\n # parameter ESS plot\n ax3 = fig.add_subplot(223)\n ax3.bar(range(self.L), param_ESS, color=[\"C0\", \"C1\", \"C2\", \"C3\", \"C4\"])\n ax3.set_xlabel(\"parameter\", fontsize=12)\n ax3.set_ylabel(\"effective sample size\", fontsize=12)\n ax3.set_title(\"Parameter effective sample size estimate\")\n ax3.set_xticks(range(self.L))\n\n # summary stats text plot\n ax4 = fig.add_subplot(224)\n gap = 0.1\n h = 0.85\n x1 = 0.5\n x2 = 0.55\n fntsiz = 14\n\n ax4.text(\n x1, h, \"Estimated burn-in:\", horizontalalignment=\"right\", fontsize=fntsiz\n )\n ax4.text(\n x2, h, \"{:.5G}\".format(burn), horizontalalignment=\"left\", fontsize=fntsiz\n )\n h -= gap\n ax4.text(x1, h, \"Average ESS:\", horizontalalignment=\"right\", fontsize=fntsiz)\n ax4.text(\n x2,\n h,\n \"{:.5G}\".format(int(mean(param_ESS))),\n horizontalalignment=\"left\",\n fontsize=fntsiz,\n )\n h -= gap\n ax4.text(x1, h, \"Lowest ESS:\", horizontalalignment=\"right\", fontsize=fntsiz)\n ax4.text(\n x2,\n h,\n \"{:.5G}\".format(int(min(param_ESS))),\n horizontalalignment=\"left\",\n fontsize=fntsiz,\n )\n ax4.axis(\"off\")\n\n plt.tight_layout()\n if filename is not None:\n plt.savefig(filename)\n if show:\n plt.show()\n else:\n fig.clear()\n plt.close(fig)\n\n def matrix_plot(self, params=None, thin=None, burn=None, **kwargs):\n \"\"\"\n Construct a 'matrix plot' of the parameters (or a subset) which displays\n all 1D and 2D marginal distributions. See the documentation of\n inference.plotting.matrix_plot for a description of other allowed\n keyword arguments.\n\n :param params: \\\n A list of integers specifying the indices of parameters which are to\n be plotted.\n\n :param int burn: \\\n Number of samples to discard from the start of the chain. If not\n specified, the value of self.burn is used instead.\n\n :param int thin: \\\n Rather than using every sample which is not discarded as part of the\n burn-in, every *m*'th sample is used for a specified integer *m*. If\n not specified, the value of self.thin is used instead, which has\n a default value of 1.\n \"\"\"\n burn = burn if burn is not None else self.burn\n thin = thin if thin is not None else self.thin\n params = params if params is not None else range(self.L)\n samples = [self.get_parameter(i, burn=burn, thin=thin) for i in params]\n matrix_plot(samples, **kwargs)\n\n def trace_plot(self, params=None, thin=1, burn=0, **kwargs):\n \"\"\"\n Construct a 'trace plot' of the parameters (or a subset) which displays\n the value of the parameters as a function of step number in the chain.\n See the documentation of inference.plotting.trace_plot for a description\n of other allowed keyword arguments.\n\n :param params: \\\n A list of integers specifying the indices of parameters which are to\n be plotted.\n\n :param int burn: \\\n Number of samples to discard from the start of the chain.\n\n :param int thin: \\\n Rather than using every sample which is not discarded as part of the\n burn-in, every *m*'th sample is used for a specified integer *m*.\n \"\"\"\n params = params if params is not None else range(self.L)\n samples = [self.get_parameter(i, burn=burn, thin=thin) for i in params]\n trace_plot(samples, **kwargs)\n\n def save(self, filename):\n \"\"\"\n Save the entire state of the chain object as an .npz file.\n\n :param str filename: file path to which the chain will be saved.\n \"\"\"\n # get the chain attributes\n items = {\n \"n\": self.n,\n \"L\": self.L,\n \"probs\": self.probs,\n \"burn\": self.burn,\n \"thin\": self.thin,\n \"inv_temp\": self.inv_temp,\n \"print_status\": self.print_status,\n }\n\n # get the parameter attributes\n for i, p in enumerate(self.params):\n items.update(p.get_items(param_id=i))\n\n # save as npz\n savez(filename, **items)\n\n @classmethod\n def load(cls, filename, posterior=None):\n \"\"\"\n Load a chain object which has been previously saved using the save() method.\n\n :param str filename: \\\n file path of the .npz file containing the chain object data.\n\n :param posterior: \\\n The posterior which was sampled by the chain. This argument need only be\n specified if new samples are to be added to the chain.\n \"\"\"\n # load the data and create a chain instance\n D = load(filename)\n chain = cls(posterior=posterior)\n\n # re-build the chain's attributes\n chain.n = int(D[\"n\"])\n chain.L = int(D[\"L\"])\n chain.probs = list(D[\"probs\"])\n chain.inv_temp = float(D[\"inv_temp\"])\n chain.burn = int(D[\"burn\"])\n chain.thin = int(D[\"thin\"])\n chain.print_status = bool(D[\"print_status\"])\n\n # re-build all the parameter objects\n chain.params = []\n for i in range(chain.L):\n p = Parameter()\n p.load_items(dictionary=D, param_id=i)\n chain.params.append(p)\n\n return chain\n\n def estimate_burn_in(self):\n # first get an estimate based on when the chain first reaches\n # the top 1% of log-probabilities\n prob_estimate = argmax(self.probs > percentile(self.probs, 99))\n\n # now we find the point at which the proposal width for each parameter\n # starts to deviate significantly from the current value\n width_estimates = []\n for p in self.params:\n vals = abs((array(p.sigma_values)[::-1] / p.sigma) - 1.0)\n chks = array(p.sigma_checks)[::-1]\n first_true = chks[argmax(vals > 0.15)]\n width_estimates.append(first_true)\n\n width_estimate = mean(width_estimates)\n return int(max(prob_estimate, width_estimate))\n\n def autoselect_burn(self):\n self.burn = self.estimate_burn_in()\n msg = \"[ burn-in set to {} | {:.1%} of total samples ]\".format(\n self.burn, self.burn / self.n\n )\n print(msg)\n\n def autoselect_thin(self):\n param_ESS = [ESS(array(self.get_parameter(i, thin=1))) for i in range(self.L)]\n self.thin = int((self.n - self.burn) / min(param_ESS))\n if self.thin < 1:\n self.thin = 1\n elif (self.n - self.burn) / self.thin < 1:\n self.thin = 1\n warn(\"Thinning not performed as lowest ESS is below 1\")\n elif (self.n - self.burn) / self.thin < 100:\n warn(\"Sample size after thinning is less than 100\")\n\n thin_size = len(self.probs[self.burn :: self.thin])\n print(\n f\"[ thinning factor set to {self.thin} | thinned sample size is {thin_size} ]\"\n )\n\n def autoselect_burn_and_thin(self):\n self.autoselect_burn()\n self.autoselect_thin()\n\n\nclass GibbsChain(MarkovChain):\n \"\"\"\n A class for sampling from distributions using Gibbs-sampling.\n\n In Gibbs sampling, each \"step\" in the chain consists of a series of 1D Metropolis-Hastings\n updates, one for each parameter, such that after each step all parameters have been adjusted.\n\n This allows Metropolis-Hastings update acceptance rate data to be collected independently for\n each parameter, thereby allowing the proposal width of each parameter to be tuned individually.\n\n :param func posterior: \\\n A function which takes the vector of model parameters as a ``numpy.ndarray``,\n and returns the posterior log-probability.\n\n :param start: \\\n Vector of model parameters which correspond to the parameter-space coordinates at which\n the chain will start.\n\n :param widths: \\\n Vector of standard deviations which serve as initial guesses for the widths of the proposal\n distribution for each model parameter. If not specified, the starting widths will be\n approximated as 5% of the values in 'start'.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(GibbsChain, self).__init__(*args, **kwargs)\n # we need to adjust the target acceptance rate to 50%\n # which is optimal for gibbs sampling:\n if hasattr(self, \"params\"):\n for p in self.params:\n p.target_rate = 0.5\n\n def take_step(self):\n \"\"\"\n Take a 1D metropolis-hastings step for each parameter\n \"\"\"\n p_old = self.probs[-1]\n prop = self.get_last()\n\n for i, p in enumerate(self.params):\n while True:\n prop[i] = p.proposal()\n p_new = self.posterior(prop) * self.inv_temp\n\n if p_new > p_old:\n # automatically accept step if the probability goes up\n p.submit_accept_prob(1.0)\n break\n else:\n # else calculate the acceptance probability and perform the test\n acceptance_prob = exp(p_new - p_old)\n p.submit_accept_prob(acceptance_prob)\n if random() < acceptance_prob:\n break\n\n p_old = deepcopy(p_new) # NOTE - is deepcopy needed?\n\n for v, p in zip(prop, self.params):\n p.add_sample(v)\n\n self.probs.append(p_new)\n self.n += 1\n\n\nclass PcaChain(MarkovChain):\n \"\"\"\n A class which performs Gibbs sampling over the eigenvectors of the covariance matrix.\n\n The PcaChain sampler uses 'principal component analysis' (PCA) to improve\n the performance of Gibbs sampling in cases where strong linear correlation\n exists between two or more variables in a problem.\n\n For an N-parameter problem, PcaChain produces a new sample by making N\n sequential 1D Metropolis-Hastings steps in the direction of each of the\n N eigenvectors of the NxN covariance matrix.\n\n As an initial guess the covariance matrix is taken to be diagonal, which results\n in standard gibbs sampling for the first samples in the chain. As the chain advances,\n the covariance matrix is periodically updated with an estimate derived from the sample\n itself, and the eigenvectors are re-calculated.\n\n :param func posterior: \\\n A function which takes the vector of model parameters as a ``numpy.ndarray``,\n and returns the posterior log-probability.\n\n :param start: \\\n Vector of model parameters which correspond to the parameter-space coordinates\n at which the chain will start.\n\n :param widths: \\\n Vector of standard deviations which serve as initial guesses for the widths of\n the proposal distribution for each model parameter. If not specified, the starting\n widths will be approximated as 5% of the values in 'start'.\n\n :param parameter_boundaries: \\\n A list of length-2 tuples specifying the lower and upper bounds to be set on each\n parameter, in the form (lower, upper).\n \"\"\"\n\n def __init__(self, *args, parameter_boundaries=None, **kwargs):\n super(PcaChain, self).__init__(*args, **kwargs)\n # we need to adjust the target acceptance rate to 50%\n # which is optimal for gibbs sampling:\n if hasattr(self, \"params\"):\n for p in self.params:\n p.target_rate = 0.5\n\n self.directions = []\n if hasattr(self, \"L\"):\n for i in range(self.L):\n v = zeros(self.L)\n v[i] = 1.0\n self.directions.append(v)\n\n # PCA update settings\n self.dir_update_interval = 100\n self.dir_growth_factor = 1.5\n self.last_update = 0\n self.next_update = copy(self.dir_update_interval)\n\n # PCA convergence tracking\n self.angles_history = []\n self.update_history = []\n\n # Set-up for imposing boundaries if specified\n if parameter_boundaries is not None:\n if len(parameter_boundaries) == self.L:\n self.lower = array([k[0] for k in parameter_boundaries])\n self.upper = array([k[1] for k in parameter_boundaries])\n self.width = self.upper - self.lower\n self.process_proposal = self.impose_boundaries\n else:\n warn(\n \"\"\"\n # parameter_boundaries keyword error #\n The number of given lower/upper bounds pairs does not match\n the number of model parameters - bounds were not imposed.\n \"\"\"\n )\n else:\n self.process_proposal = self.pass_through\n\n def update_directions(self):\n # re-estimate the covariance and find its eigenvectors\n data = array([self.get_parameter(i)[self.last_update :] for i in range(self.L)])\n if hasattr(self, \"covar\"):\n nu = min(2 * self.dir_update_interval / self.last_update, 0.5)\n self.covar = self.covar * (1 - nu) + nu * cov(data)\n else:\n self.covar = cov(data)\n\n w, V = eigh(self.covar)\n\n # find the sine of the angle between the old and new eigenvectors to track convergence\n angles = [\n sqrt(1.0 - dot(V[:, i], self.directions[i]) ** 2) for i in range(self.L)\n ]\n self.angles_history.append(angles)\n self.update_history.append(copy(self.n))\n\n # store the new directions and plan the next update\n self.directions = [V[:, i] for i in range(self.L)]\n self.last_update = copy(self.n)\n self.dir_update_interval = int(\n self.dir_update_interval * self.dir_growth_factor\n )\n self.next_update = self.last_update + self.dir_update_interval\n\n def directions_diagnostics(self):\n for i in range(self.L):\n prods = [v[i] for v in self.angles_history]\n plt.plot(self.update_history, prods, \".-\")\n plt.plot(\n [self.update_history[0], self.update_history[-1]],\n [1e-2, 1e-2],\n ls=\"dashed\",\n c=\"black\",\n lw=2,\n )\n plt.yscale(\"log\")\n plt.ylim([1e-4, 1.0])\n plt.xlim([0, self.update_history[-1]])\n\n plt.ylabel(r\"$|\\sin{(\\Delta \\theta)}|$\", fontsize=13)\n plt.xlabel(r\"update step number\", fontsize=13)\n\n plt.grid()\n plt.tight_layout()\n plt.show()\n\n def take_step(self):\n \"\"\"\n Take a Metropolis-Hastings step along each principal component\n \"\"\"\n p_old = self.probs[-1]\n theta0 = self.get_last()\n # loop over each eigenvector and take a step along each\n for v, p in zip(self.directions, self.params):\n while True:\n prop = theta0 + v * p.sigma * normal()\n prop = self.process_proposal(prop)\n p_new = self.posterior(prop) * self.inv_temp\n\n if p_new > p_old:\n p.submit_accept_prob(1.0)\n break\n else:\n test = random()\n acceptance_prob = exp(p_new - p_old)\n p.submit_accept_prob(acceptance_prob)\n if test < acceptance_prob:\n break\n\n theta0 = copy(prop)\n p_old = copy(p_new)\n\n # add the new value for each parameter\n for v, p in zip(theta0, self.params):\n p.add_sample(v)\n\n self.probs.append(p_new)\n self.n += 1\n\n if self.n == self.next_update:\n self.update_directions()\n\n def save(self, filename):\n \"\"\"\n Save the entire state of the chain object as an .npz file.\n\n :param str filename: file path to which the chain will be saved.\n \"\"\"\n # get the chain attributes\n items = {\n \"n\": self.n,\n \"L\": self.L,\n \"probs\": self.probs,\n \"burn\": self.burn,\n \"thin\": self.thin,\n \"inv_temp\": self.inv_temp,\n \"print_status\": self.print_status,\n \"dir_update_interval\": self.dir_update_interval,\n \"dir_growth_factor\": self.dir_growth_factor,\n \"last_update\": self.last_update,\n \"next_update\": self.next_update,\n \"angles_history\": array(self.angles_history),\n \"update_history\": array(self.update_history),\n \"directions\": array(self.directions),\n \"covar\": self.covar,\n }\n\n # get the parameter attributes\n for i, p in enumerate(self.params):\n items.update(p.get_items(param_id=i))\n\n # save as npz\n savez(filename, **items)\n\n @classmethod\n def load(cls, filename, posterior=None):\n \"\"\"\n Load a chain object which has been previously saved using the save() method.\n\n :param str filename: file path of the .npz file containing the chain object data.\n :param posterior: The posterior which was sampled by the chain. This argument need \\\n only be specified if new samples are to be added to the chain.\n \"\"\"\n # load the data and create a chain instance\n D = load(filename)\n chain = cls(posterior=posterior)\n\n # re-build the chain's attributes\n chain.n = int(D[\"n\"])\n chain.L = int(D[\"L\"])\n chain.probs = list(D[\"probs\"])\n chain.burn = int(D[\"burn\"])\n chain.thin = int(D[\"thin\"])\n chain.inv_temp = float(D[\"inv_temp\"])\n chain.print_status = bool(D[\"print_status\"])\n chain.dir_update_interval = int(D[\"dir_update_interval\"])\n chain.dir_growth_factor = float(D[\"dir_growth_factor\"])\n chain.last_update = int(D[\"last_update\"])\n chain.next_update = int(D[\"next_update\"])\n chain.angles_history = [\n D[\"angles_history\"][i, :] for i in range(D[\"angles_history\"].shape[0])\n ]\n chain.update_history = list(D[\"update_history\"])\n chain.directions = [\n D[\"directions\"][i, :] for i in range(D[\"directions\"].shape[0])\n ]\n chain.covar = D[\"covar\"]\n\n # re-build all the parameter objects\n chain.params = []\n for i in range(chain.L):\n p = Parameter()\n p.load_items(dictionary=D, param_id=i)\n chain.params.append(p)\n return chain\n\n def set_non_negative(self, *args, **kwargs):\n warn(\n \"\"\"\n The set_non_negative method is not available for PcaChain:\n Limits on parameters should instead be set using\n the parameter_boundaries keyword argument.\n \"\"\"\n )\n\n def set_boundaries(self, *args, **kwargs):\n warn(\n \"\"\"\n The set_boundaries method is not available for PcaChain:\n Limits on parameters should instead be set using\n the parameter_boundaries keyword argument.\n \"\"\"\n )\n\n def impose_boundaries(self, prop):\n d = prop - self.lower\n n = (d // self.width) % 2\n return self.lower + (1 - 2 * n) * (d % self.width) + n * self.width\n\n def pass_through(self, prop):\n return prop\n\n\nclass HamiltonianChain(MarkovChain):\n \"\"\"\n Class for performing Hamiltonian Monte-Carlo sampling.\n\n Hamiltonian Monte-Carlo (HMC) is an MCMC algorithm where proposed steps are generated\n by integrating Hamilton’s equations, treating the negative posterior log-probability\n as a scalar potential. In order to do this, the algorithm requires the gradient of\n the log-posterior with respect to the model parameters. Assuming this gradient can be\n calculated efficiently, HMC deals well with strongly correlated variables and scales\n favourably to higher-dimensionality problems.\n\n This implementation automatically selects an appropriate time-step for the Hamiltonian\n dynamics simulation, but currently does not dynamically select the number of time-steps\n per proposal, or appropriate inverse-mass values.\n\n :param func posterior: \\\n A function which takes the vector of model parameters as a ``numpy.ndarray``,\n and returns the posterior log-probability.\n\n :param func grad: \\\n A function which returns the gradient of the log-posterior probability density\n for a given set of model parameters theta. If this function is not given, the\n gradient will instead be estimated by finite difference.\n\n :param start: \\\n Vector of model parameters which correspond to the parameter-space coordinates\n at which the chain will start.\n\n :param float epsilon: \\\n Initial guess for the time-step of the Hamiltonian dynamics simulation.\n\n :param float temperature: \\\n The temperature of the markov chain. This parameter is used for parallel\n tempering and should be otherwise left unspecified.\n\n :param bounds: \\\n A list or tuple containing two numpy arrays which specify the upper and lower\n bounds for the parameters in the form (lower_bounds, upper_bounds).\n\n :param inv_mass: \\\n A vector specifying the inverse-mass value to be used for each parameter. The\n inverse-mass is used to transform the momentum distribution in order to make\n the problem more isotropic. Ideally, the inverse-mass for each parameter should\n be set to the variance of the marginal distribution of that parameter.\n \"\"\"\n\n def __init__(\n self,\n posterior=None,\n grad=None,\n start=None,\n epsilon=0.1,\n temperature=1,\n bounds=None,\n inv_mass=None,\n ):\n\n self.posterior = posterior\n # if no gradient function is supplied, default to finite difference\n if grad is None:\n self.grad = self.finite_diff\n else:\n self.grad = grad\n\n # set either the bounded or unbounded leapfrog update\n if bounds is None:\n self.leapfrog = self.standard_leapfrog\n self.bounded = False\n self.lwr_bounds = None\n self.upr_bounds = None\n self.widths = None\n else:\n self.leapfrog = self.bounded_leapfrog\n self.bounded = True\n self.lwr_bounds = array(bounds[0])\n self.upr_bounds = array(bounds[1])\n if any((self.lwr_bounds > array(start)) | (self.upr_bounds < array(start))):\n raise ValueError(\n \"starting location for the chain is outside specified bounds\"\n )\n self.widths = self.upr_bounds - self.lwr_bounds\n if not all(self.widths > 0):\n raise ValueError(\n \"specified upper bounds must be greater than lower bounds\"\n )\n\n self.temperature = temperature\n self.inv_temp = 1.0 / temperature\n\n if start is not None:\n self.theta = [start]\n self.probs = [self.posterior(start) * self.inv_temp]\n self.leapfrog_steps = [0]\n self.L = len(start)\n self.n = 1\n\n # set the variance to 1 if none supplied\n if inv_mass is None:\n self.variance = 1.0\n else:\n self.variance = inv_mass\n\n self.ES = EpsilonSelector(epsilon)\n self.steps = 50\n self.burn = 1\n self.thin = 1\n\n self.print_status = True\n\n def take_step(self):\n \"\"\"\n Takes the next step in the HMC-chain\n \"\"\"\n accept = False\n steps_taken = 0\n while not accept:\n r0 = normal(size=self.L) / sqrt(self.variance)\n t0 = self.theta[-1]\n H0 = 0.5 * dot(r0, r0 / self.variance) - self.probs[-1]\n\n r = copy(r0)\n t = copy(t0)\n g = self.grad(t) * self.inv_temp\n n_steps = int(self.steps * (1 + (random() - 0.5) * 0.2))\n\n t, r, g = self.run_leapfrog(t, r, g, n_steps)\n\n steps_taken += n_steps\n p = self.posterior(t) * self.inv_temp\n H = 0.5 * dot(r, r / self.variance) - p\n test = exp(H0 - H)\n\n if isfinite(test):\n self.ES.add_probability(min(test, 1))\n else:\n self.ES.add_probability(0.0)\n\n if test >= 1:\n accept = True\n else:\n q = random()\n if q <= test:\n accept = True\n\n self.theta.append(t)\n self.probs.append(p)\n self.leapfrog_steps.append(steps_taken)\n self.n += 1\n\n def run_leapfrog(self, t, r, g, L):\n for i in range(L):\n t, r, g = self.leapfrog(t, r, g)\n return t, r, g\n\n def hamiltonian(self, t, r):\n return 0.5 * dot(r, r / self.variance) - self.posterior(t) * self.inv_temp\n\n def estimate_mass(self, burn=1, thin=1):\n self.variance = var(array(self.theta[burn::thin]), axis=0)\n\n def finite_diff(self, t):\n p = self.posterior(t) * self.inv_temp\n G = zeros(self.L)\n for i in range(self.L):\n delta = zeros(self.L) + 1\n delta[i] += 1e-5\n G[i] = (self.posterior(t * delta) * self.inv_temp - p) / (t[i] * 1e-5)\n return G\n\n def standard_leapfrog(self, t, r, g):\n r2 = r + (0.5 * self.ES.epsilon) * g\n t2 = t + self.ES.epsilon * r2 * self.variance\n\n g = self.grad(t2) * self.inv_temp\n r2 = r2 + (0.5 * self.ES.epsilon) * g\n return t2, r2, g\n\n def bounded_leapfrog(self, t, r, g):\n r2 = r + (0.5 * self.ES.epsilon) * g\n t2 = t + self.ES.epsilon * r2 * self.variance\n # check for values outside bounds\n lwr_diff = self.lwr_bounds - t2\n upr_diff = t2 - self.upr_bounds\n lwr_bools = lwr_diff > 0\n upr_bools = upr_diff > 0\n # calculate necessary adjustment\n lwr_adjust = lwr_bools * (lwr_diff + lwr_diff % (0.1 * self.widths))\n upr_adjust = upr_bools * (upr_diff + upr_diff % (0.1 * self.widths))\n t2 += lwr_adjust\n t2 -= upr_adjust\n\n # reverse momenta where necessary\n reflect = 1 - 2 * (lwr_bools | upr_bools)\n r2 *= reflect\n\n g = self.grad(t2) * self.inv_temp\n r2 = r2 + (0.5 * self.ES.epsilon) * g\n return t2, r2, g\n\n def get_last(self):\n return self.theta[-1]\n\n def replace_last(self, theta):\n self.theta[-1] = theta\n\n def get_parameter(self, n, burn=None, thin=None):\n \"\"\"\n Return sample values for a chosen parameter.\n\n :param int n: \\\n Index of the parameter for which samples are to be returned.\n\n :param int burn: \\\n Number of samples to discard from the start of the chain. If not specified, the\n value of self.burn is used instead.\n\n :param int thin: \\\n Instead of returning every sample which is not discarded as part of the burn-in,\n every *m*'th sample is returned for a specified integer *m*. If not specified,\n the value of self.thin is used instead.\n\n :return: \\\n List of samples for parameter *n*'th parameter.\n \"\"\"\n if burn is None:\n burn = self.burn\n if thin is None:\n thin = self.thin\n return [v[n] for v in self.theta[burn::thin]]\n\n def plot_diagnostics(self, show=True, filename=None, burn=None):\n \"\"\"\n Plot diagnostic traces that give information on how the chain is progressing.\n\n Currently this method plots:\n\n - The posterior log-probability as a function of step number, which is useful\n for checking if the chain has reached a maximum. Any early parts of the chain\n where the probability is rising rapidly should be removed as burn-in.\n\n - The history of the simulation step-size epsilon as a function of number of\n total proposed jumps.\n\n - The estimated sample size (ESS) for every parameter, or in cases where the\n number of parameters is very large, a histogram of the ESS values.\n\n :param bool show: \\\n If set to True, the plot is displayed.\n\n :param str filename: \\\n File path to which the diagnostics plot will be saved. If left unspecified\n the plot won't be saved.\n \"\"\"\n if burn is None:\n burn = self.estimate_burn_in()\n param_ESS = [\n ESS(array(self.get_parameter(i, burn=burn, thin=1))) for i in range(self.L)\n ]\n\n fig = plt.figure(figsize=(12, 9))\n\n # probability history plot\n ax1 = fig.add_subplot(221)\n step_ax = [\n i * 1e-3 for i in range(len(self.probs))\n ] # TODO - avoid making this axis but preserve figure form\n ax1.plot(step_ax, self.probs, marker=\".\", ls=\"none\", markersize=3)\n ax1.set_xlabel(\"chain step number ($10^3$)\", fontsize=12)\n ax1.set_ylabel(\"log posterior probability\", fontsize=12)\n ax1.set_title(\"Chain log-probability history\")\n ylims = [\n min(self.probs[self.n // 2 :]),\n max(self.probs) * 1.1 - 0.1 * min(self.probs[self.n // 2 :]),\n ]\n plt.plot([burn * 1e-3, burn * 1e-3], ylims, c=\"red\", ls=\"dashed\", lw=2)\n ax1.set_ylim(ylims)\n ax1.grid()\n\n # epsilon plot\n ax2 = fig.add_subplot(222)\n ax2.plot(array(self.ES.epsilon_checks) * 1e-3, self.ES.epsilon_values, \".-\")\n ax2.set_xlabel(\"chain step number ($10^3$)\", fontsize=12)\n ax2.set_ylabel(\"Leapfrog step-size\", fontsize=12)\n ax2.set_title(\"Simulation time-step adjustment summary\")\n ax2.grid()\n\n ax3 = fig.add_subplot(223)\n if self.L < 50:\n ax3.bar(range(self.L), param_ESS, color=[\"C0\", \"C1\", \"C2\", \"C3\", \"C4\"])\n ax3.set_xlabel(\"parameter\", fontsize=12)\n ax3.set_ylabel(\"effective sample size\", fontsize=12)\n ax3.set_title(\"Parameter effective sample size estimate\")\n ax3.set_xticks(range(self.L))\n else:\n ax3.hist(param_ESS, bins=20)\n ax3.set_xlabel(\"effective sample size\", fontsize=12)\n ax3.set_ylabel(\"frequency\", fontsize=12)\n ax3.set_title(\"Parameter effective sample size estimates\")\n\n ax4 = fig.add_subplot(224)\n gap = 0.1\n h = 0.85\n x1 = 0.5\n x2 = 0.55\n fntsiz = 14\n\n ax4.text(\n x1, h, \"Estimated burn-in:\", horizontalalignment=\"right\", fontsize=fntsiz\n )\n ax4.text(\n x2, h, \"{:.5G}\".format(burn), horizontalalignment=\"left\", fontsize=fntsiz\n )\n h -= gap\n ax4.text(x1, h, \"Average ESS:\", horizontalalignment=\"right\", fontsize=fntsiz)\n ax4.text(\n x2,\n h,\n \"{:.5G}\".format(int(mean(param_ESS))),\n horizontalalignment=\"left\",\n fontsize=fntsiz,\n )\n h -= gap\n ax4.text(x1, h, \"Lowest ESS:\", horizontalalignment=\"right\", fontsize=fntsiz)\n ax4.text(\n x2,\n h,\n \"{:.5G}\".format(int(min(param_ESS))),\n horizontalalignment=\"left\",\n fontsize=fntsiz,\n )\n ax4.axis(\"off\")\n\n plt.tight_layout()\n if filename is not None:\n plt.savefig(filename)\n if show:\n plt.show()\n else:\n fig.clear()\n plt.close(fig)\n\n def get_sample(self, burn=None, thin=None):\n raise ValueError(\"This method is not available for HamiltonianChain\")\n\n def get_interval(self, interval=None, burn=None, thin=None, samples=None):\n raise ValueError(\"This method is not available for HamiltonianChain\")\n\n def mode(self):\n return self.theta[argmax(self.probs)]\n\n def estimate_burn_in(self):\n # first get an estimate based on when the chain first reaches\n # the top 1% of log-probabilities\n prob_estimate = argmax(self.probs > percentile(self.probs, 99))\n # now we find the point at which the proposal width for each parameter\n # starts to deviate significantly from the current value\n epsl = abs((array(self.ES.epsilon_values)[::-1] / self.ES.epsilon) - 1.0)\n chks = array(self.ES.epsilon_checks)[::-1]\n epsl_estimate = chks[argmax(epsl > 0.15)] * self.ES.accept_rate\n return int(min(max(prob_estimate, epsl_estimate), 0.9 * self.n))\n\n def save(self, filename, compressed=False):\n items = {\n \"bounded\": self.bounded,\n \"lwr_bounds\": self.lwr_bounds,\n \"upr_bounds\": self.upr_bounds,\n \"widths\": self.widths,\n \"inv_mass\": self.variance,\n \"inv_temp\": self.inv_temp,\n \"theta\": self.theta,\n \"probs\": self.probs,\n \"leapfrog_steps\": self.leapfrog_steps,\n \"L\": self.L,\n \"n\": self.n,\n \"steps\": self.steps,\n \"burn\": self.burn,\n \"thin\": self.thin,\n \"print_status\": self.print_status,\n }\n\n items.update(self.ES.get_items())\n\n # save as npz\n if compressed:\n savez_compressed(filename, **items)\n else:\n savez(filename, **items)\n\n @classmethod\n def load(cls, filename, posterior=None, grad=None):\n D = load(filename)\n chain = cls(posterior=posterior, grad=grad)\n\n chain.bounded = bool(D[\"bounded\"])\n chain.variance = array(D[\"inv_mass\"])\n chain.inv_temp = float(D[\"inv_temp\"])\n chain.temperature = 1.0 / chain.inv_temp\n chain.probs = list(D[\"probs\"])\n chain.leapfrog_steps = list(D[\"leapfrog_steps\"])\n chain.L = int(D[\"L\"])\n chain.n = int(D[\"n\"])\n chain.steps = int(D[\"steps\"])\n chain.burn = int(D[\"burn\"])\n chain.thin = int(D[\"thin\"])\n chain.print_status = bool(D[\"print_status\"])\n chain.n = int(D[\"n\"])\n\n t = D[\"theta\"]\n chain.theta = [t[i, :] for i in range(t.shape[0])]\n\n if chain.bounded:\n chain.lwr_bounds = array(D[\"lwr_bounds\"])\n chain.upr_bounds = array(D[\"upr_bounds\"])\n chain.widths = array(D[\"widths\"])\n\n # build the epsilon selector\n chain.ES.load_items(D)\n\n return chain\n\n\nclass EpsilonSelector(object):\n def __init__(self, epsilon):\n\n # storage\n self.epsilon = epsilon\n self.epsilon_values = [copy(epsilon)] # sigma values after each assessment\n self.epsilon_checks = [0.0] # chain locations at which sigma was assessed\n\n # tracking variables\n self.avg = 0\n self.var = 0\n self.num = 0\n\n # settings for epsilon adjustment algorithm\n self.accept_rate = 0.65\n self.chk_int = 15 # interval of steps at which proposal widths are adjusted\n self.growth_factor = (\n 1.4 # factor by which self.chk_int grows when sigma is modified\n )\n\n def add_probability(self, p):\n self.num += 1\n self.avg += p\n self.var += max(p * (1 - p), 0.03)\n\n if self.num >= self.chk_int:\n self.update_epsilon()\n\n def update_epsilon(self):\n \"\"\"\n looks at average tries over recent steps, and adjusts proposal\n widths self.sigma to bring the average towards self.target_tries.\n \"\"\"\n # normal approximation of poisson binomial distribution\n mu = self.avg / self.num\n std = sqrt(self.var) / self.num\n\n # now check if the desired success rate is within 2-sigma\n if ~(mu - 2 * std < self.accept_rate < mu + 2 * std):\n adj = (log(self.accept_rate) / log(mu)) ** 0.15\n adj = min(adj, 2.0)\n adj = max(adj, 0.5)\n self.adjust_epsilon(adj)\n else: # increase the check interval\n self.chk_int = int((self.growth_factor * self.chk_int) * 0.1) * 10\n\n def adjust_epsilon(self, ratio):\n self.epsilon *= ratio\n self.epsilon_values.append(copy(self.epsilon))\n self.epsilon_checks.append(self.epsilon_checks[-1] + self.num)\n self.avg = 0\n self.var = 0\n self.num = 0\n\n def get_items(self):\n return self.__dict__\n\n def load_items(self, dictionary):\n self.epsilon = float(dictionary[\"epsilon\"])\n self.epsilon_values = list(dictionary[\"epsilon_values\"])\n self.epsilon_checks = list(dictionary[\"epsilon_checks\"])\n self.avg = float(dictionary[\"avg\"])\n self.var = float(dictionary[\"var\"])\n self.num = float(dictionary[\"num\"])\n self.accept_rate = float(dictionary[\"accept_rate\"])\n self.chk_int = int(dictionary[\"chk_int\"])\n self.growth_factor = float(dictionary[\"growth_factor\"])\n\n\nclass ChainPool(object):\n def __init__(self, objects):\n self.chains = objects\n self.pool_size = len(self.chains)\n self.pool = Pool(self.pool_size)\n\n def advance(self, n):\n self.chains = self.pool.map(\n self.adv_func, [(n, chain) for chain in self.chains]\n )\n\n @staticmethod\n def adv_func(arg):\n n, chain = arg\n for _ in range(n):\n chain.take_step()\n return chain\n\n\ndef tempering_process(chain, connection, end, proc_seed):\n # used to ensure each process has a different random seed\n seed(proc_seed)\n # main loop\n while not end.is_set():\n # poll the pipe until there is something to read\n while not end.is_set():\n if connection.poll(timeout=0.05):\n D = connection.recv()\n break\n\n # if read loop was broken because of shutdown event\n # then break the main loop as well\n if end.is_set():\n break\n\n task = D[\"task\"]\n\n # advance the chain\n if task == \"advance\":\n for _ in range(D[\"advance_count\"]):\n chain.take_step()\n connection.send(\"advance_complete\") # send signal to confirm completion\n\n # return the current position of the chain\n elif task == \"send_position\":\n connection.send((chain.get_last(), chain.probs[-1]))\n\n # update the position of the chain\n elif task == \"update_position\":\n chain.replace_last(D[\"position\"])\n chain.probs[-1] = D[\"probability\"] * chain.inv_temp\n\n # return the local chain object\n elif task == \"send_chain\":\n connection.send(chain)\n\n\nclass ParallelTempering(object):\n \"\"\"\n A class which enables 'parallel tempering', a sampling algorithm which\n advances multiple Markov-chains in parallel, each with a different\n 'temperature', with a probability that the chains will exchange their\n positions during the advancement.\n\n The 'temperature' concept introduces a transformation to the distribution\n being sampled, such that a chain with temperature 'T' instead samples from\n the provided posterior distribution raised to the power 1/T.\n\n When T = 1, the original distribution is recovered, but choosing T > 1 has\n the effect of 'compressing' the distribution, such that any two points having\n different probability densities will have the difference between those densities\n reduced as the temperature is increased. This allows chains with higher\n temperatures to take much larger steps, and explore the distribution more\n quickly.\n\n Parallel tempering exploits this by advancing a collection of markov-chains at\n different temperatures, with at least one chain at T = 1 (i.e. sampling from\n the actual posterior distribution). At regular intervals, pairs of chains are\n selected at random and a metropolis-hastings test is performed to decide if\n the pair exchange their positions.\n\n The ability for the T = 1 chain to exchange positions with chains of higher\n temperatures allows it to make large jumps to other areas of the distribution\n which it may take a large number of steps to reach otherwise.\n\n This is particularly useful when sampling from highly-complex distributions\n which may have many separate maxima and/or strong correlations.\n\n :param chains: \\\n A list of Markov-Chain objects (such as GibbsChain, PcaChain, HamiltonianChain)\n covering a range of different temperature levels. The list of chains should be\n sorted in order of increasing chain temperature.\n \"\"\"\n\n def __init__(self, chains):\n self.shutdown_evt = Event()\n self.connections = []\n self.processes = []\n self.temperatures = [1.0 / chain.inv_temp for chain in chains]\n self.inv_temps = [chain.inv_temp for chain in chains]\n self.N_chains = len(chains)\n\n self.attempted_swaps = identity(self.N_chains)\n self.successful_swaps = zeros([self.N_chains, self.N_chains])\n\n if sorted(self.temperatures) != self.temperatures:\n warn(\n \"\"\"\n The list of Markov-chain objects passed to ParallelTempering\n should be sorted in order of increasing chain temperature.\n \"\"\"\n )\n\n # Spawn a separate process for each chain object\n for chn in chains:\n parent_ctn, child_ctn = Pipe()\n self.connections.append(parent_ctn)\n p = Process(\n target=tempering_process,\n args=(chn, child_ctn, self.shutdown_evt, randint(30000)),\n )\n self.processes.append(p)\n\n [p.start() for p in self.processes]\n\n def take_steps(self, n):\n \"\"\"\n Advance all the chains *n* steps without performing any swaps.\n\n :param int n: The number of steps by which every chain is advanced.\n \"\"\"\n # order the chains to advance n steps\n D = {\"task\": \"advance\", \"advance_count\": n}\n for pipe in self.connections:\n pipe.send(D)\n\n # block until all chains report successful advancement\n responses = [pipe.recv() == \"advance_complete\" for pipe in self.connections]\n if not all(responses):\n raise ValueError(\"Unexpected data received from pipe\")\n\n def uniform_pairs(self):\n \"\"\"\n Randomly pair up each chain, with uniform sampling across all possible pairings\n \"\"\"\n proposed_swaps = arange(self.N_chains)\n shuffle(proposed_swaps)\n return [p for p in zip(proposed_swaps[::2], proposed_swaps[1::2])]\n\n def tight_pairs(self):\n \"\"\"\n Randomly pair up each chain, with almost all paired chains being separated\n by either 1 or 2 temperature levels.\n \"\"\"\n # first generate all possible pairings with a gap of 2 or less\n pairs = [(i, i + j) for i in range(self.N_chains - 1) for j in [1, 2]][:-1]\n sample = []\n # randomly sample from these pairings until no valid pairs remain\n while len(pairs) > 0:\n p = choice(pairs)\n pairs = [k for k in pairs if not any(j in k for j in p)]\n sample.append(p)\n # if there are still some pairs which haven't been paired, randomly pair the remaining ones\n remaining = len(sample) - self.N_chains // 2\n if remaining != 0:\n leftovers = [\n i for i in range(self.N_chains) if not any(i in p for p in sample)\n ]\n shuffle(leftovers)\n sample.extend(\n [\n p if p[0] < p[1] else (p[1], p[0])\n for p in zip(leftovers[::2], leftovers[1::2])\n ]\n )\n return sample\n\n def swap(self):\n \"\"\"\n Randomly group all chains into pairs and propose a position swap between each pair.\n \"\"\"\n # ask each process to report the current position of its chain\n D = {\"task\": \"send_position\"}\n [pipe.send(D) for pipe in self.connections]\n\n # receive the positions and probabilities\n data = [pipe.recv() for pipe in self.connections]\n positions = [k[0] for k in data]\n probabilities = [k[1] for k in data]\n\n # randomly pair up indices for all the processes\n proposed_swaps = self.tight_pairs()\n\n # perform MH tests to see if the swaps occur or not\n for pair in proposed_swaps:\n self.attempted_swaps[pair] += 1\n\n for i, j in proposed_swaps:\n dt = self.inv_temps[i] - self.inv_temps[j]\n pi = probabilities[i] / self.inv_temps[i]\n pj = probabilities[j] / self.inv_temps[j]\n dp = pi - pj\n\n if random() <= exp(-dt * dp): # check if the swap is successful\n Di = {\n \"task\": \"update_position\",\n \"position\": positions[i],\n \"probability\": pi,\n }\n\n Dj = {\n \"task\": \"update_position\",\n \"position\": positions[j],\n \"probability\": pj,\n }\n\n self.connections[i].send(Dj)\n self.connections[j].send(Di)\n self.successful_swaps[i, j] += 1\n\n def advance(self, n, swap_interval=10):\n \"\"\"\n Advances each chain by a total of *n* steps, performing swap attempts\n at intervals set by the *swap_interval* keyword.\n\n :param int n: The number of steps each chain will advance.\n :param int swap_interval: \\\n The number of steps that are taken in each chain between swap attempts.\n \"\"\"\n k = 50 # divide chain steps into k groups to track progress\n total_cycles = n // swap_interval\n if k < total_cycles:\n k = total_cycles\n cycles = 1\n else:\n cycles = total_cycles // k\n\n t_start = time()\n for j in range(k):\n for i in range(cycles):\n self.take_steps(swap_interval)\n self.swap()\n\n dt = time() - t_start\n\n # display the progress status message\n pct = str(int(100 * (j + 1) / k))\n eta = str(int(dt * (k / (j + 1) - 1)))\n sys.stdout.write(\n f\"\\r [ Running ParallelTempering - {pct}% complete ETA: {eta} sec ] \"\n )\n sys.stdout.flush()\n\n # run the remaining cycles\n if total_cycles % k != 0:\n for i in range(total_cycles % k):\n self.take_steps(swap_interval)\n self.swap()\n\n # run remaining steps\n if n % swap_interval != 0:\n self.take_steps(n % swap_interval)\n\n # print the completion message\n sys.stdout.write(\n \"\\r [ Running ParallelTempering - complete! ] \"\n )\n sys.stdout.flush()\n sys.stdout.write(\"\\n\")\n\n def run_for(self, minutes=0, hours=0, swap_interval=10):\n \"\"\"\n Advances all chains for a chosen amount of computation time.\n\n :param float minutes: Number of minutes for which to advance the chains.\n :param float hours: Number of hours for which to advance the chains.\n :param int swap_interval: \\\n The number of steps that are taken in each chain between swap attempts.\n \"\"\"\n # first find the runtime in seconds:\n run_time = (hours * 60.0 + minutes) * 60.0\n start_time = time()\n end_time = start_time + run_time\n\n # estimate how long it takes to do one swap cycle\n t1 = time()\n self.take_steps(swap_interval)\n self.swap()\n t2 = time()\n\n # number of cycles chosen to give a print-out roughly every 2 seconds\n N = max(1, int(2.0 / (t2 - t1)))\n\n while time() < end_time:\n for i in range(N):\n self.take_steps(swap_interval)\n self.swap()\n\n # display the progress status message\n seconds_remaining = end_time - time()\n m, s = divmod(seconds_remaining, 60)\n h, m = divmod(m, 60)\n time_left = \"%d:%02d:%02d\" % (h, m, s)\n sys.stdout.write(\n f\"\\r [ Running ParallelTempering - time remaining: {time_left} ] \"\n )\n sys.stdout.flush()\n\n # this is a little ugly...\n sys.stdout.write(\n \"\\r [ Running ParallelTempering - complete! ] \"\n )\n sys.stdout.flush()\n sys.stdout.write(\"\\n\")\n\n def swap_diagnostics(self):\n \"\"\"\n Plot the acceptance rates of proposed position swaps between the\n different chains. This can be useful in selecting appropriate temperatures\n for the chains.\n \"\"\"\n rate_matrix = self.successful_swaps / self.attempted_swaps.clip(min=1)\n\n pairs = [\n (i, i + j)\n for j in range(1, self.N_chains)\n for i in range(self.N_chains - j)\n ]\n total_swaps = zeros(self.N_chains)\n for i, j in pairs:\n total_swaps[i] += self.successful_swaps[i, j]\n total_swaps[j] += self.successful_swaps[i, j]\n\n fig = plt.figure(figsize=(10, 5))\n ax1 = fig.add_subplot(121)\n transition_matrix_plot(\n ax=ax1, matrix=rate_matrix, exclude_diagonal=True, upper_triangular=True\n )\n ax1.set_xlabel(\"chain number\")\n ax1.set_ylabel(\"chain number\")\n ax1.set_title(\"acceptance rate of chain position swaps\")\n\n ax2 = fig.add_subplot(122)\n ax2.bar([i for i in range(1, self.N_chains + 1)], total_swaps)\n ax2.set_ylim([0, None])\n ax2.set_xlabel(\"chain number\")\n ax2.set_ylabel(\"total successful position swaps\")\n\n plt.tight_layout()\n plt.show()\n\n def return_chains(self):\n \"\"\"\n Recover the chain held by each process and return them in a list.\n\n :return: A list containing the chain objects.\n \"\"\"\n # order each process to return its locally stored chain object\n request = {\"task\": \"send_chain\"}\n for pipe in self.connections:\n pipe.send(request)\n\n # receive the chains and return them\n return [pipe.recv() for pipe in self.connections]\n\n def shutdown(self):\n \"\"\"\n Trigger a shutdown event which tells the processes holding each of\n the chains to terminate.\n \"\"\"\n self.shutdown_evt.set()\n [p.join() for p in self.processes]\n\n\nclass EnsembleSampler(object):\n def __init__(self, posterior=None, starting_positions=None, alpha=2.0, bounds=None):\n self.posterior = posterior\n\n if starting_positions is not None:\n # store core data\n self.N_params = len(starting_positions[0])\n self.N_walkers = len(starting_positions)\n self.theta = zeros([self.N_walkers, self.N_params])\n for i, v in enumerate(starting_positions):\n self.theta[i, :] = array(v)\n self.probs = array([self.posterior(t) for t in self.theta])\n\n # storage for diagnostic information\n self.L = 1 # total number of steps taken\n self.total_proposals = [[1.0] for i in range(self.N_walkers)]\n self.means = []\n self.std_devs = []\n self.prob_means = []\n self.prob_devs = []\n self.update_summary_stats()\n\n if bounds is not None:\n if len(bounds) == self.N_params:\n self.bounded = True\n self.lower = array([k[0] for k in bounds])\n self.upper = array([k[1] for k in bounds])\n self.width = self.upper - self.lower\n self.process_proposal = self.impose_boundaries\n else:\n warn(\n \"\"\"\n # 'bounds' keyword error #\n The number of given lower/upper bounds pairs does not match\n the number of model parameters - bounds were not imposed.\n \"\"\"\n )\n else:\n self.process_proposal = self.pass_through\n self.bounded = False\n\n # proposal settings\n self.a = alpha\n self.z_lwr = 1.0 / self.a\n self.z_upr = self.a - self.z_lwr\n\n self.max_attempts = 100\n\n def update_summary_stats(self):\n mu = mean(self.theta, axis=0)\n devs = sqrt(mean(self.theta**2, axis=0) - mu**2)\n\n self.means.append(mu)\n self.std_devs.append(devs)\n p_mu = mean(self.probs)\n self.prob_means.append(p_mu)\n self.prob_devs.append(sqrt(mean(self.probs**2) - p_mu**2))\n\n def proposal(self, i):\n j = i # randomly select walker\n while i == j:\n j = randint(self.N_walkers)\n z = self.z_lwr + self.z_upr * random() # sample the stretch distance\n prop = self.process_proposal(\n self.theta[i, :] + z * (self.theta[j, :] - self.theta[i, :])\n )\n return prop, z\n\n def advance_walker(self, i):\n for attempts in range(1, self.max_attempts + 1):\n Y, z = self.proposal(i)\n p = self.posterior(Y)\n q = exp((self.N_params - 1) * log(z) + p - self.probs[i])\n if random() <= q:\n self.theta[i, :] = Y\n self.probs[i] = p\n self.total_proposals[i].append(attempts)\n break\n\n if attempts == self.max_attempts:\n self.total_proposals[i].append(attempts)\n warn(\n f\"Walker #{i} failed to advance within the maximum allowed attempts\"\n )\n\n def advance_all(self):\n for i in range(self.N_walkers):\n self.advance_walker(i)\n self.L += 1\n self.update_summary_stats()\n\n def advance(self, n):\n t_start = time()\n sys.stdout.write(\"\\n\")\n sys.stdout.write(f\"\\r EnsembleSampler: [ 0 / {n} iterations completed ]\")\n sys.stdout.flush()\n\n for k in range(n):\n self.advance_all()\n\n # display the progress status message\n dt = time() - t_start\n eta = int(dt * (n / (k + 1) - 1))\n sys.stdout.write(\n f\"\\r EnsembleSampler: [ {k + 1} / {n} iterations completed | ETA: {eta} sec ]\"\n )\n sys.stdout.flush()\n\n # display completion message\n sys.stdout.write(\n f\"\\r EnsembleSampler: [ {n} / {n} iterations completed ] \"\n )\n sys.stdout.flush()\n sys.stdout.write(\"\\n\")\n\n def impose_boundaries(self, prop):\n d = prop - self.lower\n n = (d // self.width) % 2\n return self.lower + (1 - 2 * n) * (d % self.width) + n * self.width\n\n def pass_through(self, prop):\n return prop\n\n def mode(self):\n return self.theta[self.probs.argmax(), :]\n\n def plot_diagnostics(self):\n x = linspace(1, self.L, self.L)\n\n rates = x / array(self.total_proposals).cumsum(axis=1)\n\n avg_rate = rates.mean(axis=0)\n\n fig = plt.figure(figsize=(12, 7))\n\n ax1 = fig.add_subplot(221)\n alpha = max(0.01, min(1, 20.0 / float(self.N_walkers)))\n for i in range(self.N_walkers):\n ax1.plot(x, rates[i, :], lw=0.5, c=\"C0\", alpha=alpha)\n ax1.plot(x, avg_rate, lw=2, c=\"red\", label=\"mean rate of all walkers\")\n ax1.set_ylim([0, 1])\n ax1.grid()\n ax1.legend()\n ax1.set_title(\"walker acceptance rates\")\n ax1.set_xlabel(\"iteration\")\n ax1.set_ylabel(\"average acceptance rate per walker\")\n\n del rates, avg_rate\n\n p_mu = array(self.prob_means)\n ax2 = fig.add_subplot(222)\n ax2.plot(x, (p_mu - p_mu[-1]) / self.prob_devs[-1], lw=2, c=\"C0\")\n ax2.set_ylim([-0.6, 0.6])\n ax2.grid()\n ax2.set_title(\"log-probabilities normalised mean difference\")\n ax2.set_xlabel(\"iteration\")\n ax2.set_ylabel(\"normalised mean difference\")\n\n devs = array(self.std_devs).T\n ax3 = fig.add_subplot(223)\n alpha = max(0.02, min(1, 20.0 / float(self.N_params)))\n for i in range(self.N_params):\n ax3.plot(x, (devs[i, :] / devs[i, -1]) - 1.0, lw=0.5, c=\"C0\", alpha=alpha)\n ax3.set_ylim([-0.6, 0.6])\n ax3.grid()\n ax3.set_title(\"parameter standard-dev difference\")\n ax3.set_xlabel(\"iteration\")\n ax3.set_ylabel(\"standard-dev difference\")\n\n means = array(self.means).T\n ax4 = fig.add_subplot(224)\n alpha = max(0.02, min(1, 20.0 / float(self.N_params)))\n for i in range(self.N_params):\n ax4.plot(\n x,\n (means[i, :] - means[i, -1]) / devs[i, -1],\n lw=0.5,\n c=\"C0\",\n alpha=alpha,\n )\n ax4.set_ylim([-0.6, 0.6])\n ax4.grid()\n ax4.set_title(\"parameters normalised mean difference\")\n ax4.set_xlabel(\"iteration\")\n ax4.set_ylabel(\"normalised mean difference\")\n\n plt.tight_layout()\n plt.show()\n\n def matrix_plot(self, **kwargs):\n params = [k for k in self.theta.T]\n matrix_plot(samples=params, **kwargs)\n\n def trace_plot(self, **kwargs):\n params = [k for k in self.theta.T]\n trace_plot(samples=params, **kwargs)\n\n def save(self, filename):\n D = {\n \"theta\": self.theta,\n \"N_params\": self.N_params,\n \"N_walkers\": self.N_walkers,\n \"probs\": self.probs,\n \"L\": self.L,\n \"total_proposals\": array(self.total_proposals),\n \"means\": array(self.means),\n \"std_devs\": array(self.std_devs),\n \"prob_means\": array(self.prob_means),\n \"prob_devs\": array(self.prob_devs),\n \"bounded\": self.bounded,\n \"a\": self.a,\n \"z_lwr\": self.z_lwr,\n \"z_upr\": self.z_upr,\n \"max_attempts\": self.max_attempts,\n }\n\n if self.bounded:\n D[\"lower\"] = self.lower\n D[\"upper\"] = self.upper\n D[\"width\"] = self.width\n\n savez(filename, **D)\n\n @classmethod\n def load(cls, filename, posterior=None):\n sampler = cls(posterior=posterior)\n D = load(filename)\n\n sampler.theta = D[\"theta\"]\n sampler.N_params = int(D[\"N_params\"])\n sampler.N_walkers = int(D[\"N_walkers\"])\n sampler.probs = D[\"probs\"]\n sampler.L = int(D[\"L\"])\n sampler.total_proposals = [list(v) for v in D[\"total_proposals\"]]\n sampler.means = [v for v in D[\"means\"]]\n sampler.std_devs = [v for v in D[\"std_devs\"]]\n sampler.prob_means = list(D[\"prob_means\"])\n sampler.prob_devs = list(D[\"prob_devs\"])\n sampler.bounded = D[\"bounded\"]\n sampler.a = D[\"a\"]\n sampler.z_lwr = D[\"z_lwr\"]\n sampler.z_upr = D[\"z_upr\"]\n sampler.max_attempts = int(D[\"max_attempts\"])\n\n if sampler.bounded:\n sampler.lower = D[\"lower\"]\n sampler.upper = D[\"upper\"]\n sampler.width = D[\"width\"]\n sampler.process_proposal = sampler.impose_boundaries\n\n return sampler\n\n\ndef ESS(x):\n # get the autocorrelation\n f = irfft(abs(rfft(x - mean(x))) ** 2)\n # remove reflected 2nd half\n f = f[: len(f) // 2]\n # check that the first value is not negative\n if f[0] < 0.0:\n raise ValueError(\"First element of the autocorrelation is negative\")\n # cut to first negative value\n f = f[: argmax(f < 0.0)]\n # sum and normalise\n thin_factor = f.sum() / f[0]\n return int(len(x) / thin_factor)\n" ]
[ [ "numpy.diff", "matplotlib.pyplot.tight_layout", "numpy.random.seed", "matplotlib.pyplot.yscale", "numpy.log", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.plot", "numpy.cov", "numpy.isfinite", "matplotlib.pyplot.figure", "numpy.savez", "matplotlib.pyplot.savefig", "scipy.linalg.eigh", "matplotlib.pyplot.xlim", "numpy.identity", "numpy.linspace", "numpy.mean", "numpy.sqrt", "numpy.load", "numpy.zeros", "numpy.random.normal", "numpy.argmax", "numpy.arange", "matplotlib.pyplot.ylim", "matplotlib.pyplot.close", "numpy.percentile", "numpy.savez_compressed", "numpy.random.shuffle", "matplotlib.pyplot.grid", "numpy.exp", "numpy.random.random", "matplotlib.pyplot.show", "numpy.array", "numpy.dot", "numpy.random.randint", "matplotlib.pyplot.xlabel" ] ]
acninetyfive/blokus
[ "509fc993097b9fd527005c281fb61ea97e4a55af" ]
[ "board.py" ]
[ "import numpy as np\nfrom piece import Piece\nimport time\nfrom scipy.ndimage import convolve\n\nclass Board:\n\n\tdef __init__(self, size = 20, player_colors = [1,2,3,4]):\n\t\tself.size = size\n\t\tself.board = np.zeros((size,size), dtype = int)\n\t\tself.start_squares = [[0,0], [0, size-1], [size-1, 0], [size-1, size-1]]\n\t\tself.player_colors = player_colors\n\t\tself.c = [[1,0,1],[0,0,0],[1,0,1]]\n\t\tself.a = [[0,1,0],[1,1,1],[0,1,0]]\t\n\n\tdef add_piece(self, piece, x, y):\n\t\tif not self.valid_move(piece, x, y):\n\t\t\treturn False\n\t\tp_shape = piece.get_shape()\n\t\tpx, py = p_shape.shape\n\t\tself.board[x:x + px, y:y+py] += p_shape\n\t\treturn True\n\n\tdef valid_move(self, piece, x, y):\n\t\tp_shape = piece.get_shape()\n\t\tp_color = piece.get_color()\n\t\tpx, py = p_shape.shape\n\t\tshape_coords = np.argwhere(p_shape != 0) + [x,y]\n\n\t\tif x + px > self.size or y + py > self.size: #Piece off the edge of the board\n\t\t\t#print(\"Piece off the edge of the board\")\n\t\t\treturn False\n\t\tif len(np.nonzero(self.board[x:x+px,y:y+py] * piece.get_shape())[0]) > 0: #Piece on top of another piece\n\t\t\t#print(\"Piece on top of another\")\n\t\t\treturn False\n\t\tfor i in self.generate_adjacents(shape_coords): #Piece adjacent to same color\n\t\t\tif i[0] < self.size and i[0] >= 0 and i[1] < self.size and i[1] >= 0 and self.board[i] == p_color:\n\t\t\t\t#print(\"piece adjacent to the same color\")\n\t\t\t\treturn False\n\t\tfor i in self.generate_corners(shape_coords): #Piece is touching a corner\n\t\t\tif i[0] < self.size and i[0] >= 0 and i[1] < self.size and i[1] >= 0 and self.board[i] == p_color:\n\t\t\t\treturn True\n\t\tfor x in shape_coords:\n\t\t\tif list(x) in self.start_squares:\n\t\t\t\treturn True\n\t\t#print(\"else\")\n\t\treturn False\n\n\tdef generate_adjacents(self, shape_coords):\n\t\tadj = set()\n\t\tfor i in shape_coords:\n\t\t\tadj.add((i[0] + 1, i[1]))\n\t\t\tadj.add((i[0], i[1] + 1))\n\t\t\tadj.add((i[0] - 1, i[1]))\n\t\t\tadj.add((i[0], i[1] - 1))\n\t\treturn adj\n\n\tdef generate_corners(self, shape_coords):\n\t\tcorners = set()\n\t\tfor i in shape_coords:\n\t\t\tcorners.add((i[0] + 1, i[1] + 1))\n\t\t\tcorners.add((i[0] - 1, i[1] + 1))\n\t\t\tcorners.add((i[0] + 1, i[1] - 1))\n\t\t\tcorners.add((i[0] - 1, i[1] - 1))\n\n\t\t#print(corners - self.generate_adjacents(shape_coords)) #true corners\n\t\treturn corners\n\t\n\tdef get_color_corners(self, color): \n\t\tone_color_board = np.array(self.board == color, dtype=\"int\") * color\n\t\tcorner_board = convolve(one_color_board, self.c, mode='constant') - 20 * convolve(one_color_board, self.a, mode='constant') - 20 * self.board\n\t\treturn np.array(np.where(corner_board >= 1))\n\n\tdef get_moves_list(self, player, corners):\n\t\tplayable_moves = []\n\t\tpcs = player.get_pieces()\n\t\tif len(pcs) == 21: \n\t\t\tstart_squares = np.array([[0,0,19,19],[0,19,0,19]])\n\t\t\tcorners = np.hstack((corners, start_squares))\n\t\tfor p in pcs:\n\t\t\tmoves = pcs[p].get_legal_moves()\n\t\t\tpcs[p].reset()\n\t\t\tfor m in moves:\n\t\t\t\tfor c in m:\n\t\t\t\t\tif c == 'r':\n\t\t\t\t\t\tpcs[p].rotate()\n\t\t\t\t\telif c == 'f':\n\t\t\t\t\t\tpcs[p].flip()\n\t\t\t\tfor i in moves[m]:\n\t\t\t\t\tshp = pcs[p].get_shape()\n\t\t\t\t\tfor j in range(len(corners[0])):\n\t\t\t\t\t\tx = corners[0,j]+i[0]\n\t\t\t\t\t\ty = corners[1,j]+i[1]\n\t\t\t\t\t\tif x < 0 or x > self.size - 1:\n\t\t\t\t\t\t\tpass\n\t\t\t\t\t\telif y < 0 or y > self.size - 1:\n\t\t\t\t\t\t\tpass\n\t\t\t\t\t\telif self.valid_move(pcs[p],x,y):\n\t\t\t\t\t\t\tplayable_moves.append((p, m, x, y))\n\n\t\t\t\tpcs[p].reset()\n\t\treturn playable_moves\n\n\tdef get_board(self):\n\t\treturn self.board\n\n" ]
[ [ "numpy.argwhere", "numpy.zeros", "scipy.ndimage.convolve", "numpy.hstack", "numpy.array", "numpy.where" ] ]
aerometu/rbfopt
[ "4aba6186aa7d49c10551601d77e2484f88ffee39" ]
[ "tests/test_rbfopt_utils.py" ]
[ "\"\"\"Test the module rbfopt_utils in RBFOpt.\n\nThis module contains unit tests for the module rbfopt_utils.\n\nLicensed under Revised BSD license, see LICENSE.\n(C) Copyright International Business Machines Corporation 2016.\n\n\"\"\"\n\nfrom __future__ import print_function\nfrom __future__ import division\nfrom __future__ import absolute_import\n\nimport unittest\nimport math\nimport numpy as np\nimport rbfopt\nimport rbfopt.rbfopt_utils as ru\nfrom rbfopt.rbfopt_settings import RbfoptSettings\n\nclass TestUtils(unittest.TestCase):\n \"\"\"Test the rbfopt_utils module.\"\"\"\n\n def setUp(self):\n \"\"\"Initialize data used by several functions.\"\"\"\n np.random.seed(71294123)\n self.rbf_types = [rbf_type for rbf_type\n in RbfoptSettings._allowed_rbf\n if rbf_type != 'auto']\n # -- end function\n\n def test_get_rbf_function(self):\n \"\"\"Check that all RBFs are properly computed at 0 and at 1.\"\"\"\n settings = RbfoptSettings()\n # Set up values of the RBF at 0 and at 1\n rbf_values = dict()\n rbf_values['linear'] = (0.0, 1.0)\n rbf_values['multiquadric'] = (\n np.sqrt(settings.rbf_shape_parameter**2),\n np.sqrt(1+settings.rbf_shape_parameter**2))\n rbf_values['cubic'] = (0.0, 1.0)\n rbf_values['thin_plate_spline'] = (0.0, 0.0)\n rbf_values['gaussian'] = (1.0, np.exp(-settings.rbf_shape_parameter))\n for rbf_type in self.rbf_types:\n settings.rbf = rbf_type\n rbf = ru.get_rbf_function(settings)\n rbf_at_0, rbf_at_1 = rbf_values[rbf_type]\n msg='RBF {:s} is not {:f} at 0'.format(rbf_type, rbf_at_0)\n self.assertEqual(rbf_at_0, rbf(0.0), msg=msg)\n msg='RBF {:s} is not {:f} at 1'.format(rbf_type, rbf_at_1)\n self.assertEqual(rbf_at_1, rbf(1.0), msg=msg)\n # -- end function\n\n def test_get_degree_polynomial(self):\n \"\"\"Verify that the degree is always between 0 and 1.\"\"\"\n settings = RbfoptSettings()\n for rbf_type in self.rbf_types:\n settings.rbf = rbf_type\n degree = ru.get_degree_polynomial(settings)\n self.assertTrue(-1 <= degree <= 1)\n # -- end function\n\n def test_get_size_P_matrix(self):\n \"\"\"Verify that the size is always between 0 and n+1.\"\"\"\n settings = RbfoptSettings()\n for rbf_type in self.rbf_types:\n settings.rbf = rbf_type\n for n in range(20):\n size = ru.get_size_P_matrix(settings, n)\n self.assertTrue(0 <= size <= n + 1)\n # -- end function \n\n def test_get_all_corners(self):\n \"\"\"Check that all corners of a box are properly returned.\"\"\"\n var_lower = np.array([-1, 0, 1])\n var_upper = np.array([1, 2, 3])\n corners = ru.get_all_corners(var_lower, var_upper)\n self.assertTrue(sorted([[-1, 0, 1], [-1, 0, 3], [-1, 2, 1], \n [-1, 2, 3], [1, 0, 1], [1, 0, 3], \n [1, 2, 1], [1, 2, 3]]) ==\n sorted(corners.tolist()))\n # -- end function\n\n def test_get_lower_corners(self):\n \"\"\"Check that the lower corners of a box are properly returned.\"\"\"\n var_lower = np.array([-1, 0, 1])\n var_upper = np.array([1, 2, 3])\n corners = ru.get_lower_corners(var_lower, var_upper)\n self.assertTrue(sorted([[-1, 0, 1], [-1, 0, 3], \n [-1, 2, 1], [1, 0, 1]]) == \n sorted(corners.tolist()))\n # -- end function\n\n def test_get_random_corners(self):\n \"\"\"Check that random corners of a box are properly returned.\"\"\"\n var_lower = np.array([-1, 0, 1])\n var_upper = np.array([1, 2, 3])\n all_corners = [[-1, 0, 1], [-1, 0, 3], [-1, 2, 1], [-1, 2, 3],\n [1, 0, 1], [1, 0, 3], [1, 2, 1], [1, 2, 3]]\n for i in range(10):\n corners = ru.get_random_corners(var_lower, var_upper)\n for corner in corners:\n self.assertIn(corner.tolist(), all_corners)\n self.assertEqual(len(corners), 4)\n # -- end function\n\n def test_get_lhd_points(self):\n \"\"\"Check that latin hypercube designs have the correct size.\"\"\"\n var_lower = np.array([-1, 0, 1])\n var_upper = np.array([1, 2, 3])\n corners = ru.get_lhd_maximin_points(var_lower, var_upper, 4)\n self.assertEqual(len(corners), 4)\n corners = ru.get_lhd_corr_points(var_lower, var_upper, 5)\n self.assertEqual(len(corners), 5)\n # -- end function\n\n def test_initialize_nodes(self):\n \"\"\"Test initialization methods for the sample points.\n\n This method verifies that returned sets of points have at\n least n+1 points, and integer variables are integer.\n \"\"\"\n var_lower = np.array([-1, 0, 1])\n var_upper = np.array([1, 2, 3])\n integer_vars = np.array([1, 2])\n for method in RbfoptSettings._allowed_init_strategy:\n settings = RbfoptSettings(init_strategy=method,\n init_sample_fraction=1.0)\n points = ru.initialize_nodes(settings, var_lower, var_upper,\n integer_vars)\n msg=('Number of points returned by {:s}'.format(method) +\n ' is insufficient')\n self.assertGreaterEqual(len(points), 4, msg=msg)\n for point in points:\n for index in integer_vars:\n self.assertEqual(point[index] - round(point[index]), 0)\n # -- end function\n\n def test_initialize_nodes_midpoint(self):\n \"\"\"Test initialization methods for the sample points.\n\n This method verifies that returned sets of points have at\n least n+1 points, and integer variables are integer.\n \"\"\"\n var_lower = np.array([-1, 0, 1])\n var_upper = np.array([1, 2, 3])\n integer_vars = np.array([1, 2])\n midpoint = np.array([0, 1, 2])\n for method in RbfoptSettings._allowed_init_strategy:\n settings = RbfoptSettings(init_strategy=method,\n init_include_midpoint=True,\n init_sample_fraction=1.0)\n points = ru.initialize_nodes(settings, var_lower, var_upper,\n integer_vars)\n msg=('Number of points returned by {:s}'.format(method) +\n ' is insufficient')\n self.assertGreaterEqual(len(points), 4, msg=msg)\n dist = np.linalg.norm(points - midpoint, axis=1)\n self.assertEqual(np.min(dist), 0.0,\n msg='Did not find midpoint')\n # -- end function\n\n def test_round_integer_vars(self):\n \"\"\"Verify that some fractional points are properly rounded.\"\"\"\n point = np.array([0.1, 2.3, -3.5, 4.6])\n ru.round_integer_vars(point, np.array([0, 2]))\n self.assertListEqual(point.tolist(), [0.0, 2.3, -4.0, 4.6],\n msg='Failed when integer_vars is subset')\n point = np.array([0.1, 2.3, -3.5, 4.6])\n ru.round_integer_vars(point, np.array([]))\n self.assertListEqual(point.tolist(), [0.1, 2.3, -3.5, 4.6],\n msg='Failed when integer_vars is empty')\n point = np.array([0.1, 2.3, -3.5, 4.6])\n ru.round_integer_vars(point, np.array([0, 1, 2, 3]))\n self.assertListEqual(point.tolist(), [0.0, 2.0, -4.0, 5.0],\n msg='Failed when integer_vars is everything')\n # -- end function\n\n def test_round_integer_bounds(self):\n \"\"\"Verify that some fractional bounds are properly rounded.\"\"\"\n var_lower = np.array([-0.1, 2.3, -3.5, 4.6])\n var_upper = np.array([2.5, 3.0, -1.2, 4.6])\n ru.round_integer_bounds(var_lower, var_upper, np.array([0, 2]))\n self.assertListEqual(var_lower.tolist(), [-1.0, 2.3, -4.0, 4.6],\n msg='Failed when integer_vars is subset')\n self.assertListEqual(var_upper.tolist(), [3.0, 3.0, -1.0, 4.6],\n msg='Failed when integer_vars is subset')\n var_lower = np.array([-0.1, 2.3, -3.5, 4.6])\n var_upper = np.array([2.5, 3.0, -1.2, 4.6])\n ru.round_integer_bounds(var_lower, var_upper, np.array([]))\n self.assertListEqual(var_lower.tolist(), [-0.1, 2.3, -3.5, 4.6],\n msg='Failed when integer_vars is empty')\n self.assertListEqual(var_upper.tolist(), [2.5, 3.0, -1.2, 4.6],\n msg='Failed when integer_vars is empty')\n var_lower = np.array([-0.1, 2.3, -3.5, 4.6])\n var_upper = np.array([2.5, 3.0, -1.2, 4.6])\n ru.round_integer_bounds(var_lower, var_upper, np.array([0, 1, 2, 3]))\n self.assertListEqual(var_lower.tolist(), [-1.0, 2.0, -4.0, 4.0],\n msg='Failed when integer_vars is everything')\n self.assertListEqual(var_upper.tolist(), [3.0, 3.0, -1.0, 5.0],\n msg='Failed when integer_vars is everything')\n # -- end function\n\n def test_norm(self):\n \"\"\"Verify that norm is 0 at 0 and correct for some other vectors.\"\"\"\n self.assertEqual(ru.norm(np.array([0 for i in range(10)])), 0.0,\n msg='Norm is not zero at zero')\n self.assertEqual(ru.norm(np.array([-1 for i in range(9)])), 3.0,\n msg='Norm is not 3.0 at {-1}^9')\n self.assertEqual(ru.norm(np.array([-2 + i for i in range(5)])), math.sqrt(10),\n msg='Norm is not sqrt{10} at [-2, -1, 0, 1, 2]')\n # -- end function\n\n def test_distance(self):\n \"\"\"Verify that distance is 0 iff two points are the same.\"\"\"\n self.assertEqual(ru.distance(np.array([i*5 for i in range(15)]),\n np.array([i*5 for i in range(15)])), 0.0,\n msg='Distance is not zero at equal points')\n self.assertNotEqual(ru.distance(np.array([i*5 for i in range(15)]),\n np.array([i*5 + 0.001 for i in range(15)])),\n 0.0, msg='Distance is nonzero at diff points')\n self.assertNotEqual(ru.distance(np.array([-i*5 for i in range(15)]),\n np.array([-i*5 + 0.001 for i in range(15)])),\n 0.0, msg='Distance is nonzero at diff points')\n # -- end function\n\n def test_get_min_distance(self):\n \"\"\"Test some extreme cases for get_min_distance.\"\"\"\n self.assertEqual(ru.get_min_distance(np.array([i for i in range(5)]),\n np.array([[i+j for i in range(5)]\n for j in range(10)])), 0.0)\n # -- end function\n def test_get_min_distance_and_index(self):\n \"\"\"Test some extreme cases for get_min_distance_index.\"\"\"\n d, i = ru.get_min_distance_and_index(np.array([i for i in range(5)]),\n np.array([[i+j for i in range(5)]\n for j in range(-2, 3)]))\n self.assertEqual(i, 2)\n d, i = ru.get_min_distance_and_index(np.array([i+0.01 for \n i in range(5)]),\n np.array([[i+j for i in range(5)]\n for j in range(-3, 2)]))\n self.assertEqual(i, 3)\n # -- end function\n\n def test_bulk_get_min_distance(self):\n \"\"\"Verify that bulk version returns the same as regular version.\n \n This function checks that the bulk version of get_min_distance\n on a number of randomly generated points returns the same\n result as the regular version.\n \"\"\"\n for i in range(50):\n dim = np.random.randint(1, 20)\n num_points_1 = np.random.randint(10, 50)\n num_points_2 = np.random.randint(10, 50)\n points = np.random.uniform(-100, 100, size=(num_points_1,dim))\n other_points = np.random.uniform(-100, 100,\n size=(num_points_2,dim))\n dist1 = [ru.get_min_distance(point, other_points)\n for point in points]\n dist2 = ru.bulk_get_min_distance(points, other_points)\n for j in range(num_points_1):\n msg='Failed random test {:d} point {:d}'.format(i, j)\n self.assertAlmostEqual(dist1[j], dist2[j], 12, msg=msg)\n # -- end function\n\n def test_get_rbf_matrix(self):\n \"\"\"Test basic properties of the RBF matrix (e.g. symmetry, size).\n\n Verify that the RBF matrix is symmetric and it has the correct\n size for all types of RBF.\n \"\"\"\n settings = RbfoptSettings()\n for i in range(50):\n dim = np.random.randint(1, 20)\n num_points = np.random.randint(10, 50)\n node_pos = np.random.uniform(-100, 100, size=(num_points,dim))\n # Possible shapes of the matrix\n for rbf_type in self.rbf_types:\n settings.rbf = rbf_type\n mat = ru.get_rbf_matrix(settings, dim, num_points, node_pos)\n self.assertIsInstance(mat, np.matrix)\n self.assertAlmostEqual(np.max(mat - mat.transpose()), 0.0,\n msg='RBF matrix is not symmetric')\n size = num_points\n if (ru.get_degree_polynomial(settings) >= 0):\n size += 1\n if (ru.get_degree_polynomial(settings) > 0):\n size += dim ** ru.get_degree_polynomial(settings)\n self.assertEqual(mat.shape, (size, size))\n # Check that exception is raised for unknown RBF types\n settings.rbf = 'unknown'\n self.assertRaises(ValueError, ru.get_rbf_matrix, settings, \n dim, num_points, node_pos)\n # -- end function\n\n def test_rbf_interpolation(self):\n \"\"\"Test interpolation conditions.\n\n Verify that the RBF interpolates at points.\n \"\"\"\n settings = RbfoptSettings()\n for i in range(20):\n dim = np.random.randint(1, 20)\n num_points = np.random.randint(10, 50)\n node_pos = np.random.uniform(-100, 100, size=(num_points,dim))\n node_val = np.random.uniform(0, 100, num_points)\n # Possible shapes of the matrix\n for rbf_type in self.rbf_types:\n settings.rbf = rbf_type\n mat = ru.get_rbf_matrix(settings, dim, num_points, node_pos)\n rbf_l, rbf_h = ru.get_rbf_coefficients(\n settings, dim, num_points, mat, node_val)\n for i in range(num_points):\n value = ru.evaluate_rbf(settings, node_pos[i], dim,\n num_points, node_pos, rbf_l, rbf_h)\n self.assertAlmostEqual(value, node_val[i], places=4,\n msg='Interpolation failed' +\n 'with rbf ' + rbf_type)\n # -- end function\n\n def test_transform_function_values(self):\n \"\"\"Test all codomain transformation strategies.\n\n This will verify that the transformation strategies always\n produce valid results and can handle extreme cases.\n \"\"\"\n settings = RbfoptSettings()\n list_scaling = [val for val in RbfoptSettings._allowed_function_scaling\n if val != 'auto']\n list_clipping = [val for val in \n RbfoptSettings._allowed_dynamism_clipping\n if val != 'auto']\n transf = ru.transform_function_values\n # Create list of values to test: node_val and corresponding\n # node_err_bound\n to_test = [(np.array([0, -100, settings.dynamism_threshold * 10]), \n np.array([[0,0], [0,0], [0,0]])),\n (np.array([0.0]), np.array([[0, 0]])),\n (np.array([0.0 for i in range(10)]),\n np.array([[-i, i] for i in range(10)])),\n (np.array([100.0 for i in range(10)]), \n np.array([[-1,1] for i in range(10)])),\n (np.array([10.0**i for i in range(-20, 20)]),\n np.array([[0,0] for i in range(-20, 20)])),\n (np.append(np.array([-10.0**i for i in range(-20, 20)]),\n np.array([10.0**i for i in range(-20, 20)])),\n np.array([[-2**i,2**i] for i in range(-40, 40)]))]\n for scaling in list_scaling:\n for clipping in list_clipping:\n header = '({:s}, {:s}):'.format(scaling, clipping)\n for (node_val, node_err_bounds) in to_test:\n settings.function_scaling = scaling\n settings.dynamism_clipping = clipping\n (scaled, minval, maxval, errbounds,\n rescale_func) = transf(settings, node_val, min(node_val),\n max(node_val), node_err_bounds)\n # Check that the number of scaled values is the\n # same as the number of input values\n msg='Number of output values is different from input'\n self.assertEqual(len(scaled), len(node_val),\n msg=header + msg)\n msg='Dynamism threshold was not enforced'\n v1 = abs(min(scaled))\n v2 = abs(max(scaled))\n c1 = v1 > 1.0e-10 and v2/v1 <= settings.dynamism_threshold\n c2 = v1 <= 1.0e-10 and v2 <= settings.dynamism_threshold\n self.assertTrue(clipping == 'off' or c1 or c2,\n msg=header + msg)\n for i in range(len(node_val)):\n msg='Fast_node_index have wrong sign'\n self.assertLessEqual(errbounds[i][0], 0, msg=msg)\n self.assertGreaterEqual(errbounds[i][1], 0, msg=msg)\n msg=('Min/Max of scaled values inconsistent with ' +\n 'returned scaled_min and scaled_max')\n self.assertEqual(min(scaled), minval, msg=header + msg)\n self.assertEqual(max(scaled), maxval, msg=header + msg)\n # -- end for\n # -- end function\n\n def test_transform_domain(self):\n \"\"\"Check that affine transformation does not hang on limit case.\n\n Further check that 'off' transformation returns the point as\n is, and unimplemented strategies raise a ValueError.\n \"\"\"\n settings = RbfoptSettings()\n settings.domain_scaling = 'affine'\n var_lower = np.array([i for i in range(5)] + [i for i in range(5)])\n var_upper = np.array([i for i in range(5)] + [i + 10 for i in range(5)])\n point = np.array([i for i in range(5)] + [i + 2*i for i in range(5)])\n # Test what happend when lower and upper bounds coincide\n transf_point = ru.transform_domain(settings, var_lower, \n var_upper, point)\n orig_point = ru.transform_domain(settings, var_lower, var_upper,\n transf_point, True)\n for i in range(10):\n msg='Exceeding lower bound on affine domain scaling'\n self.assertLessEqual(0.0, transf_point[i], msg=msg)\n msg='Exceeding upper bound on affine domain scaling'\n self.assertLessEqual(transf_point[i], 1.0, msg=msg)\n msg='Doubly transformed point does not match original'\n self.assertAlmostEqual(point[i], orig_point[i], 12, msg=msg)\n # Check that 'off' scaling does not do anything\n settings.domain_scaling = 'off'\n transf_point = ru.transform_domain(settings, var_lower, \n var_upper, point)\n for i in range(10):\n msg='Transformed point with \\'off\\' does not match original'\n self.assertEqual(point[i], transf_point[i], msg=msg)\n # Check that unimplemented strategies are rejected\n settings.domain_scaling = 'test'\n self.assertRaises(ValueError, ru.transform_domain, settings, \n var_lower, var_upper, point)\n # -- end function\n\n def test_transform_domain_bounds(self):\n \"\"\"Check that domain bounds are consistent.\"\"\"\n list_scaling = [val for val in RbfoptSettings._allowed_domain_scaling \n if val != 'auto']\n for scaling in list_scaling:\n settings = RbfoptSettings(domain_scaling = scaling)\n # Test limit case with empty bounds\n vl, vu = ru.transform_domain_bounds(settings, np.array([]), np.array([]))\n msg='Failed transform_domain_bounds on empty bounds'\n self.assertEqual(len(vl), 0, msg=msg)\n self.assertEqual(len(vu), 0, msg=msg)\n msg='Bounds inconsistent with random bounds'\n for i in range(10):\n dim = np.random.randint(0, 20)\n var_lower = np.random.uniform(-100, 100, dim) \n var_upper = var_lower + np.random.uniform(0, 100, dim)\n vl, vu = ru.transform_domain_bounds(settings, var_lower,\n var_upper)\n self.assertEqual(len(vl), len(var_lower), msg=msg)\n self.assertEqual(len(vu), len(var_upper), msg=msg)\n for j in range(dim):\n self.assertLessEqual(vl[j], vu[j], msg=msg)\n # -- end function\n\n def test_get_sigma_n(self):\n \"\"\"Check that sigma_n is always within the bounds [0, k-1].\"\"\"\n for k in range(0, 1000, 50):\n for num_global_searches in range(0, 10, 2):\n for current_step in range(1, num_global_searches):\n for num_initial_points in range(0, k):\n i = ru.get_sigma_n(k, current_step, \n num_global_searches,\n num_initial_points)\n self.assertTrue(0 <= i < k, \n msg='sigma_n out of bounds')\n # -- end function\n\n def test_get_fmax_current_iter(self):\n \"\"\"Verify get_fmax_current_iter is resilient to limit cases.\n\n This function tests whether correct values are returned when\n there is a single-element list of node values, and when the\n list of node values is exactly the minimum required k + 1.\n \"\"\"\n settings = RbfoptSettings(init_sample_fraction=0.75)\n fun = ru.get_fmax_current_iter\n self.assertEqual(fun(settings, 0, 1, 1, np.array([1])), 1,\n msg='Failed on single-element list')\n self.assertEqual(fun(settings, 10, 11, 5, \n np.array([i for i in range(11)])),\n 10, msg='Failed on n == k + 1')\n # -- end function\n\n def test_init_points_cleanup(self):\n \"\"\"Verify that init_points_cleanup removes points too close.\n\n Test that only points with distance larger than min_dist are\n returned.\n \"\"\"\n settings = RbfoptSettings(min_dist=1.0e-5)\n points = np.array([[0, 0], [0, 0], [0, 0]])\n ret = ru.init_points_cleanup(settings, points)\n self.assertListEqual(ret.tolist(), [0],\n msg='Returned coinciding points')\n points = np.array([[0, 0, 0], [0, 1, 1], [0, 1.0e-5, 0]])\n ret = ru.init_points_cleanup(settings, points)\n self.assertListEqual(ret.tolist(), [0, 1],\n msg='Returned coinciding points')\n# -- end class\n\nclass TestModelSelection(unittest.TestCase):\n \"\"\"Test the model selection functions.\"\"\"\n\n def setUp(self):\n \"\"\"Determine which model selection solvers should be tested.\"\"\"\n self.n = 3\n self.k = 10\n self.var_lower = np.array([i for i in range(self.n)])\n self.var_upper = np.array([i + 10 for i in range(self.n)])\n self.node_pos = np.array([self.var_lower, self.var_upper, \n [1, 2, 3], [9, 5, 8.8], [5.5, 7, 12],\n [3.2, 10.2, 4], [2.1, 1.1, 7.4],\n [6.6, 9.1, 2.0], [10, 8.8, 11.1],\n [7, 7, 7]])\n self.node_val = np.array([2*i*i for i in range(self.k)])\n\n # -- end function \n\n def test_get_best_rbf_model(self):\n \"\"\"Test the get_best_rbf_model function.\n \"\"\"\n settings = RbfoptSettings()\n rbf, gamma = ru.get_best_rbf_model(settings, self.n, self.k, \n self.node_pos, self.node_val,\n self.k)\n self.assertTrue(rbf == 'linear' or\n (rbf == 'multiquadric' and gamma == 0.1),\n msg='Did not obtain expected model')\n # -- end function\n\n def test_get_model_quality_estimate(self):\n \"\"\"Test the get_model_quality_estimate function.\n \"\"\"\n for rbf in ['cubic', 'thin_plate_spline', 'multiquadric',\n 'linear', 'gaussian']:\n settings = RbfoptSettings(rbf=rbf)\n error = ru.get_model_quality_estimate(\n settings, self.n, self.k, self.node_pos, \n self.node_val, self.k)\n # Create a copy of the interpolation nodes and values\n sorted_idx = self.node_val.argsort()\n sorted_node_val = self.node_val[sorted_idx]\n # Initialize the arrays used for the cross-validation\n cv_node_pos = self.node_pos[sorted_idx[1:]]\n cv_node_val = self.node_val[sorted_idx[1:]] \n # The node that was left out\n rm_node_pos = self.node_pos[sorted_idx[0]]\n rm_node_val = self.node_val[sorted_idx[0]]\n # Estimate of the model error\n loo_error = 0.0 \n for i in range(self.k):\n # Compute the RBF interpolant with one node left out\n Amat = ru.get_rbf_matrix(settings, self.n, self.k-1, \n cv_node_pos)\n rbf_l, rbf_h = ru.get_rbf_coefficients(\n settings, self.n, self.k-1, Amat, cv_node_val)\n # Compute value of the interpolant at the removed node\n predicted_val = ru.evaluate_rbf(settings, rm_node_pos, \n self.n, self.k-1, \n cv_node_pos, rbf_l, rbf_h)\n # Update leave-one-out error\n loc = np.searchsorted(sorted_node_val, predicted_val)\n loo_error += abs(loc - i)\n # Update the node left out\n if (i < self.k - 1):\n tmp = cv_node_pos[i].copy()\n cv_node_pos[i] = rm_node_pos\n rm_node_pos = tmp\n cv_node_val[i], rm_node_val = rm_node_val, cv_node_val[i]\n self.assertAlmostEqual(loo_error, error, \n msg='Model selection procedure ' +\n 'miscomputed the error')\n # -- end for\n # -- end for\n # -- end function\n# -- end class\n" ]
[ [ "numpy.sqrt", "numpy.random.uniform", "numpy.searchsorted", "numpy.random.seed", "numpy.exp", "numpy.min", "numpy.array", "numpy.random.randint", "numpy.linalg.norm" ] ]
bgshin/rn
[ "5a0649533b5aba05556cc6f9607e28c95e3b9e55" ]
[ "keras_not_working/train.py" ]
[ "import tensorflow as tf\nimport keras.backend.tensorflow_backend as ktf\nfrom keras.callbacks import ModelCheckpoint\nfrom soclevr import load_all, Timer\nimport os\nimport argparse\nimport numpy as np\nfrom model import RN, RN2\n\n\ndef run(attempt, gpunum):\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = gpunum\n def get_session(gpu_fraction=1):\n gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_fraction,\n allow_growth=True)\n return tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))\n\n ktf.set_session(get_session())\n\n\n with Timer(\"load_all...\"):\n rel_train, rel_test, norel_train, norel_test = load_all(source='shm')\n # rel_train, rel_test, norel_train, norel_test = load_all(source='file')\n\n # model = RN()\n model = RN2()\n\n model.fit([rel_train[0], rel_train[1]], rel_train[2], validation_data=[[rel_test[0], rel_test[1]], rel_test[2]],\n epochs=100, batch_size=64)\n\n\n # with Timer(\"Build model...\"):\n # input_shape = (maxlen,)\n # model_input = Input(shape=input_shape)\n # model = CNNv1(model_input, max_features, embedding)\n # # model = CNNv2(model_input, max_features)\n\n # # checkpoint\n # filepath='./model/best-%d-%d' % (w2vdim, attempt)\n #\n # checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max')\n # callbacks_list = [checkpoint]\n #\n # model.fit(x_trn, y_trn,\n # batch_size=batch_size,\n # shuffle=True,\n # callbacks=callbacks_list,\n # epochs=epochs,\n # validation_data=(x_dev, y_dev))\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('-t', default=0, choices=range(10), type=int)\n parser.add_argument('-g', default=\"0\", choices=[\"0\", \"1\", \"2\", \"3\"], type=str)\n args = parser.parse_args()\n\n run(args.t, args.g)" ]
[ [ "tensorflow.GPUOptions", "tensorflow.ConfigProto" ] ]
diabolical-ninja/AllTheNames
[ "cdf8a181b80ee3250b76f30cd0b875368d60570c" ]
[ "src/data_collection/MatthiasWinkelmann_firstname_database.py" ]
[ "\"\"\"Firstnames Database from Github User MatthiasWinkelmann.\n\nSource:\n - https://github.com/MatthiasWinkelmann/firstname-database\n\"\"\"\nimport sys\nfrom pathlib import Path\n\nimport pandas as pd\n\nsys.path.append(str(Path(__file__).parent.parent))\n\nimport utils as ut # noqa\n\nnames_url = \"https://raw.githubusercontent.com/MatthiasWinkelmann/firstname-database/master/firstnames.csv\" # noqa\nnames_df = pd.read_csv(names_url, sep=\";\")\n\n# Original format is wide, with a column for each country. Normalise\nnames_df = pd.melt(names_df, id_vars=[\"name\", \"gender\"])\n\n# Remap column names\ncolnames_dict = {\n \"name\": \"first_name\",\n \"gender\": \"gender\",\n \"variable\": \"origin\",\n}\n\nnames_df = names_df[list(colnames_dict.keys())]\nnames_df.rename(columns=colnames_dict, inplace=True)\n\nnames_df[\"gender\"] = names_df[\"gender\"].apply(ut.remap_gender)\nnames_df[\"definition\"] = pd.NA\n\n# Save\nnames_df.to_csv(\n \"data/MatthiasWinkelmann_firstname_database.csv\",\n sep=\"|\",\n index=False,\n encoding=\"utf-8\",\n)\n" ]
[ [ "pandas.read_csv", "pandas.melt" ] ]
bitan1998/DSDA-PROJECT
[ "55c94f130bde487128e3b5c02d6f2c2622192766" ]
[ "KNN MODEL/knn.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n\nnames=['AGE','TB','DB','TP','Albumin','A/G','sgpt','sgot','ALKPHOS','GENDER']\ndataset=pd.read_csv(\"Indian Liver Patient Dataset.csv\")\n##||REMOVING NAN FILES AS COLLEGE GAVE BAD DATASET||##\ndataset1=dataset.dropna(subset = ['AGE','TB','DB','TP','Albumin','A/G','sgpt','sgot','ALKPHOS','GENDER'])\n\n\nX=dataset1.iloc[:,:-1].values # REJECTING THE LAST COLUMN\ny=dataset1.iloc[:,8].values\ny=y.astype('int')## REMOVING CONTIGUOS FILES\nfrom sklearn.model_selection import train_test_split \nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)\n\nfrom sklearn.preprocessing import StandardScaler ## BEST FOR CLASSIFICATION TYPE MODEL\nscaler = StandardScaler() \nscaler.fit(X_train)\n\nX_train = scaler.transform(X_train) \nX_test = scaler.transform(X_test) \n\n##BUILDING THE MODEL\nfrom sklearn.neighbors import KNeighborsClassifier \nclassifier = KNeighborsClassifier(n_neighbors=5) \nclassifier.fit(X_train, y_train) \n\ny_pred = classifier.predict(X_test)\n\nfrom sklearn.metrics import classification_report, confusion_matrix \nprint('||CONFUSION_MATRIX||')\nprint(confusion_matrix(y_test, y_pred))\nprint('\\n') \nprint('||CLASSIFICATION_REPORT||') \nprint(classification_report(y_test, y_pred))\n\nerror = []\n\n# Calculating error for K values between 1 and 100\nfor i in range(1, 100): \n knn = KNeighborsClassifier(n_neighbors=i)\n knn.fit(X_train, y_train)\n pred_i = knn.predict(X_test)\n error.append(np.mean(pred_i != y_test))\n\n#PLOT\nimport matplotlib.pyplot as plt\nplt.figure(figsize=(12,6)) \nplt.plot(range(1, 100), error, color='red', linestyle='dashed', marker='o',markerfacecolor='blue', markersize=10)\nplt.title('Error Rate K Value') \nplt.xlabel('K Value') \nplt.ylabel('Mean Error') \nplt.show()\n" ]
[ [ "sklearn.metrics.classification_report", "pandas.read_csv", "matplotlib.pyplot.figure", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.title", "sklearn.metrics.confusion_matrix", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel", "sklearn.preprocessing.StandardScaler", "sklearn.neighbors.KNeighborsClassifier", "sklearn.model_selection.train_test_split", "numpy.mean" ] ]
UKPLab/emnlp2021-hypercoref-cdcr
[ "205ebfc79d022c5db096fe218fd158a769a71415" ]
[ "hypercoref/python/common_components/commoncrawl.py" ]
[ "import pprint\nfrom datetime import datetime\n\nimport cdx_toolkit\nimport pandas as pd\nimport tqdm\nfrom typing import Optional\n\nfrom python import *\nfrom python.pipeline import GLOBAL, ComponentBase, DEVELOPMENT_MODE\n\n\nclass CommonCrawl(ComponentBase):\n TIMESTAMP_FORMAT = \"%Y%m%d%H%M%S\" # applies to wayback machine and common crawl\n DEFAULT_SIZE_ESTIMATE = 1500\n DEBUG_MAX_COMMONCRAWL_HITS = 200\n\n def __init__(self, config, config_global, logger):\n super(CommonCrawl, self).__init__(config, config_global, logger)\n\n self.cache = self._provide_cache(\"commoncrawl_cdx\", scope=GLOBAL, size_limit=100*1024*1024*1024)\n self.cdx = cdx_toolkit.CDXFetcher(source=\"cc\")\n\n self.debug = config_global.get(DEVELOPMENT_MODE, False)\n\n def cdx_query(self,\n url: str,\n wildcard_query: bool = False,\n from_: datetime = None,\n to: datetime = None,\n limit: Optional[int] = None):\n \"\"\"\n Query the Common Crawl CDX API for which pages were captured at which time.\n :param url: URL to query for\n :param wildcard_query: if True, this method will query for all pages which have the given url as their prefix\n :param from_: If set, only retrieves pages which were captured at least once after this datetime. If None,\n retrieves only the past 12 months by default!\n :param to: if set, only retrieves pages which were captured at least once until this datetime\n :param limit: if set, return only n results\n :return: pandas DataFrame with columns url, timestamp, digest\n \"\"\"\n if self.debug:\n limit = CommonCrawl.DEBUG_MAX_COMMONCRAWL_HITS\n self.logger.info(f\"Running in debug mode, number of results is limited to {limit}\")\n\n query = {\"url\": url, \"wildcard_query\": wildcard_query, \"from\": from_, \"to\": to, \"limit\": limit}\n query_serialized = pprint.pformat(query)\n\n # obtain CDX result\n if query_serialized in self.cache:\n df = self.cache[query_serialized]\n else:\n try:\n df = self._do_cdx_query(url, wildcard_query, from_, to, limit)\n self.cache[query_serialized] = df\n except ValueError as e:\n # do not cache in case of errors\n self.logger.error(e)\n df = None\n\n # post-process\n if df is not None and not df.empty:\n # convert all timestamps to datetimes\n df.loc[:, TIMESTAMP] = pd.to_datetime(df[TIMESTAMP], format=self.TIMESTAMP_FORMAT, errors=\"coerce\")\n # append warc prefix to obtain full URLs for WARC files\n df[FILENAME] = self.cdx.warc_url_prefix + \"/\" + df[FILENAME]\n return df\n\n def _do_cdx_query(self, url: str, wildcard_query: bool = False, from_: datetime = None, to: datetime = None, limit=None):\n self.logger.info(f\"Querying Common Crawl CDX for {url}...\")\n\n # set up the query URL\n query_parts = [url]\n if wildcard_query:\n if not url.endswith(\"/\"):\n query_parts.append(\"/\")\n query_parts.append(\"*\")\n query = \"\".join(query_parts)\n\n kwargs = {\n \"url\": query,\n \"filter\": \"status:200\",\n \"mime-detected\": \"text/html\",\n \"languages\": \"eng\",\n }\n if from_:\n kwargs[\"from_ts\"] = from_.strftime(self.TIMESTAMP_FORMAT)\n if to:\n kwargs[\"to\"] = to.strftime(self.TIMESTAMP_FORMAT)\n\n if wildcard_query:\n size_estimate = self.cdx.get_size_estimate(**kwargs)\n self.logger.info(f\"{size_estimate} estimated hits.\")\n else:\n size_estimate = self.DEFAULT_SIZE_ESTIMATE\n if limit:\n kwargs[\"limit\"] = size_estimate = limit\n\n captures = []\n with tqdm.tqdm(total=size_estimate, desc=\"CDX hits\", mininterval=10) as p:\n it = self.cdx.iter(**kwargs)\n while True:\n try:\n obj = next(it)\n except StopIteration:\n break\n except Exception as e:\n self.logger.warning(f\"CDX iteration failed with '{str(e)}', continuing...\")\n\n # sometimes there are crawled robots.txt files which we want to ignore\n if \"robotstxt\" in obj[\"filename\"]:\n continue\n\n # in theory, there is a parameter fl= with one can select the fields returned, but fl=url returns\n # nothing for an unknown reason, so we have to kludge it\n captures.append({URL: obj[\"url\"],\n TIMESTAMP: obj[\"timestamp\"],\n FILENAME: obj[\"filename\"],\n OFFSET: obj[\"offset\"],\n LENGTH: obj[\"length\"]})\n if len(captures) % 1000 == 0:\n p.update(len(captures))\n\n df = pd.DataFrame(captures)\n return df\n\n\ncomponent = CommonCrawl\n" ]
[ [ "pandas.to_datetime", "pandas.DataFrame" ] ]
ciceklab/targeted_brain_tumor_margin_assessment
[ "2cf729019dfc1785992208a69c353a659c9b6448" ]
[ "train_with_your_data/scripts/cpmg/pathologic_classification/control_tumor/load_fully_quantified_cpmg_data.py" ]
[ "import pdb\r\nimport pickle\r\nimport pandas as pd\r\nimport os \r\nimport numpy as np\r\nimport sys\r\nsys.path.insert(1,\"../\")\r\nsys.path.insert(1,\"../../\")\r\nsys.path.insert(1,\"../../../\")\r\nfrom config_u import base\r\nproject_base_path = base\r\ncurrent_path = \"scripts/cpmg/pathologic_classification/\"\r\n\r\nsys.path.insert(1, os.path.join(project_base_path, current_path))\r\nfrom data_utils import split_to_kfold, spectrum2ppm, spectrum_peak_unit_quantification\r\n\r\n\r\nSEED = int(input(\"(CPMG) Enter Data and Weight Initialization Seed: \"))\r\n\r\n# load fully quantified samples\r\ndatapath_base = os.path.join(project_base_path, \"data/raw_data_cpmg/\") \r\nwith open(os.path.join(datapath_base, \"fully_quantified_samples_spectra\"), \"rb\") as f:\r\n c_spectra = pickle.load(f)\r\nwith open(os.path.join(datapath_base, \"fully_quantified_samples_quantification\"), \"rb\") as f:\r\n c_quantification = pickle.load(f)\r\nwith open(os.path.join(project_base_path, \"data/raw_data_cpmg/metabolite_names\"), \"rb\") as f:\r\n metabolite_names = pickle.load(f)\r\nc_statistics = pd.read_pickle(os.path.join(datapath_base, \"fully_quantified_samples_statistics\"))\r\n\r\n# find samples with valid pathologic classification (i.e. \"*\")\r\nindex = c_statistics.index\r\ncondition = c_statistics[\"Pathologic Classification\"] != \"*\"\r\nvalid_sample_indices = index[condition].tolist()\r\nvalid_statistics = c_statistics.iloc[valid_sample_indices, :].reset_index(drop=True)\r\nvalid_spectra = c_spectra[valid_sample_indices, :]\r\nvalid_quant = c_quantification[valid_sample_indices, :]\r\n\r\n\r\nvalid_pathologic_class = [\"Agressive-GLIOMA\", \"Benign-GLIOMA\", \"Control\"]\r\nindex = valid_statistics.index\r\ncondition = valid_statistics[\"Pathologic Classification\"].isin(valid_pathologic_class)\r\ntask_based_sample_indices = index[condition].tolist()\r\nstatistics = valid_statistics.iloc[task_based_sample_indices, :].reset_index(drop=True)\r\nspectra = valid_spectra[task_based_sample_indices, :]\r\nquant = valid_quant[task_based_sample_indices, :]\r\n\r\n# split dataset to 5 folds with no patient and sample overlap \r\nfold_dct, class_labels = split_to_kfold(spectra, statistics, \"benign_aggressive\", k=5, seed=SEED)\r\nclass_labels = np.array(class_labels).reshape(-1,1)\r\n\r\n# discard control\r\nnon_control_indices = list(np.where(class_labels == 1)[0]) + list(np.where(class_labels == 0)[0])\r\ncontrol_indices = list(np.where(class_labels == 2)[0])\r\nstatistics = statistics.iloc[non_control_indices, :].reset_index(drop=True)\r\nspectra = spectra[non_control_indices, :]\r\nquant = quant[non_control_indices, :]\r\nclass_labels = class_labels[non_control_indices]\r\n\r\n# remove control samples from folds\r\nfor key in fold_dct.keys():\r\n samples = set(fold_dct[key])\r\n samples = samples.difference(control_indices)\r\n fold_dct[key] = list(samples)\r\n\r\n# map indices to old position\r\nnew_fold_dct = {\"0\":[], \"1\":[], \"2\":[], \"3\":[], \"4\":[]}\r\nfor new_idx, old_idx in enumerate(non_control_indices):\r\n for key in fold_dct.keys():\r\n if old_idx in fold_dct[key]:\r\n new_fold_dct[key].append(new_idx)\r\n break\r\nold_fold_dct = fold_dct\r\nfold_dct = new_fold_dct\r\n\r\n# scale CPMG spectra with respect to reference Acetate and sample mass\r\nmass = np.array(statistics[\"Mass\"].tolist()).astype(float)\r\nmass_factor = np.repeat(mass.reshape(-1,1), spectra.shape[1], axis=1)\r\nnormalized_spectra = np.divide(spectra, mass_factor)\r\nscaled_spectra = normalized_spectra * spectrum_peak_unit_quantification\r\n\r\n# calculate ppm spectra\r\nppm_spectra = spectrum2ppm(scaled_spectra)\r\n\r\n# rename variables to be accessed from other scripts\r\nfq_v_ppm_spectra = ppm_spectra\r\nfq_v_spectra = scaled_spectra\r\nfq_v_statistics = statistics\r\nfq_v_quant = quant\r\nfq_v_class_labels = class_labels\r\nfq_v_metabolite_names = metabolite_names\r\nfq_v_fold_dct = fold_dct" ]
[ [ "numpy.array", "numpy.where", "numpy.divide" ] ]
awesome-archive/nball4tree
[ "62621d01671136771c6d720d19c01ea7eeef9a3f" ]
[ "nball4tree/main_training_process.py" ]
[ "import os\nimport copy\nimport time\nimport decimal\nimport operator\nimport numpy as np\nfrom distutils.dir_util import copy_tree\nfrom nball4tree.config import cgap, L0, R0, DIM, DECIMAL_PRECISION\nfrom nball4tree.util_train import get_children\nfrom nball4tree.util_vec import vec_norm, qsr_DC, qsr_DC_degree, qsr_P, qsr_P_degree, dis_between_ball_centers\nfrom nball4tree.util_file import create_ball_file, load_balls, get_ball_from_file, merge_balls_into_file, \\\n initialize_dictionaries\nfrom nball4tree.geo_transformation import ratio_homothetic_DC_transform, homothetic_recursive_transform_of_decendents,\\\n shift_whole_tree_of\n\ndecimal.getcontext().prec = DECIMAL_PRECISION\n\n\ndef get_word2vector(wordsense, word2vecDic = dict()):\n \"\"\"\n :param wordsense:\n :param word2vecDic:\n :return:\n \"\"\"\n wd = wordsense.split('.')[0]\n if wd in word2vecDic:\n return word2vecDic[wd]\n elif wordsense.split('.')[0] in word2vecDic:\n return word2vecDic[wordsense.split('.')[0]]\n\n\ndef initialize_ball(root, addDim=[], L0=0.1, R0=0.1,\n word2vecDic=dict(), wscatCodeDic=dict(), word2ballDic=dict(), outputPath=None):\n \"\"\"\n :param root:\n :param addDim:\n :param L0:\n :param R0:\n :param word2vecDic:\n :param wscatCodeDic:\n :param word2ballDic:\n :param outputPath:\n :return:\n \"\"\"\n w2v = [decimal.Decimal(ele*100) for ele in get_word2vector(root, word2vecDic=word2vecDic)]\n cpoint = w2v + [ele+10 for ele in wscatCodeDic[root]]+ addDim\n word2ballDic[root] = vec_norm(cpoint) + [L0, R0]\n if outputPath:\n create_ball_file(root, outputPath=outputPath,word2ballDic=word2ballDic)\n return word2ballDic[root], word2ballDic\n\n\ndef training_P_by_name(childName, atreeName, addDim=[], wsChildrenDic=dict(),word2vecDic=dict(), wscatCodeDic=dict(),\n word2ballDic=dict(), sep='.', outputPath=\"\", logFile=None):\n \"\"\"\n :param childName:\n :param atreeName:\n :param addDim:\n :param wsChildrenDic:\n :param word2vecDic:\n :param wscatCodeDic:\n :param word2ballDic:\n :param sep:\n :param outputPath:\n :param logFile:\n :return:\n \"\"\"\n\n if childName.split(sep)[0] == atreeName.split(sep)[0]:\n BallLeaf = word2ballDic[childName]\n BallParent, word2ballDic = initialize_ball(atreeName, addDim=addDim, L0=L0, R0=R0, word2vecDic=word2vecDic,\n wscatCodeDic=wscatCodeDic, word2ballDic=word2ballDic,\n outputPath=outputPath)\n LeafO, ParentO = BallLeaf[:-2], BallParent[:-2]\n LeafL, LeafR = BallLeaf[-2],BallLeaf[-1]\n ParentL, ParentR = LeafL + LeafR + cgap, LeafR + LeafR + cgap + cgap\n BallParent = ParentO + [ParentL, ParentR]\n word2ballDic.update({atreeName: BallParent})\n else:\n targetsin0 = 0.6\n while True:\n BallLeaf = word2ballDic[childName]\n BallParent, word2ballDic = initialize_ball(atreeName, addDim=addDim, L0=L0, R0=R0, word2vecDic=word2vecDic,\n wscatCodeDic=wscatCodeDic, word2ballDic=word2ballDic,\n outputPath=outputPath)\n LeafO, ParentO = [decimal.Decimal(ele) for ele in BallLeaf[:-2]], \\\n [decimal.Decimal(ele) for ele in BallParent[:-2]]\n LeafL, LeafR = BallLeaf[-2],BallLeaf[-1]\n sin_beta = BallLeaf[-1] / BallLeaf[-2]\n\n delta = 1 - sin_beta * sin_beta\n if delta < 0:\n delta = 0\n cos_beta = np.sqrt(delta)\n cos_alpha = np.dot(LeafO, ParentO) / np.linalg.norm(LeafO)/ np.linalg.norm(ParentO)\n\n delta = 1 - cos_alpha * cos_alpha\n if delta < 0:\n delta = 0\n sin_alpha = np.sqrt(delta)\n\n # begin alpha --> xalpha\n xalpha = sin_alpha/25\n yalpha = np.sqrt(1 - xalpha*xalpha)\n sin_xalpha = xalpha*cos_alpha + yalpha*sin_alpha\n delta = 1 - sin_xalpha * sin_xalpha\n if delta < 0: delta = 0\n cos_xalpha = np.sqrt(delta)\n\n sin_alpha = sin_xalpha\n cos_alpha = cos_xalpha\n # end\n\n dOO = LeafL * decimal.Decimal(cos_beta)\n\n cos_alpha_beta = (decimal.Decimal(cos_beta) * decimal.Decimal(cos_alpha)\n - decimal.Decimal(sin_beta) * decimal.Decimal(sin_alpha))\n if cos_alpha_beta <=0:\n # shift_one_family(root=childName, targetsin = targetsin0, outputPath=outputPath)\n L, R = word2ballDic[childName][-2:]\n print('Shifting...', childName)\n LNew = R / decimal.Decimal(targetsin0)\n with open(logFile, 'a+') as wlog:\n wlog.write(\" \".join([\"shifting\",str(childName)]+\n [str(ele) for ele in word2ballDic[childName][:-2]] + [str(LNew - L)]))\n wlog.write(\"\\n\")\n word2ballDic=shift_whole_tree_of(childName, word2ballDic[childName][:-2], LNew - L,\n wsChildrenDic=wsChildrenDic, word2ballDic=word2ballDic,\n outputPath=outputPath)\n # check_P_for_child_parent_in_one_family(childName, ballPath=outputPath)\n checkResult = check_DC_for_sibilings_in_one_family(childName)\n if checkResult:\n print(\"check_DC_for_sibilings_in_one_family\", childName, checkResult)\n targetsin0 *= 0.9\n else:\n break\n\n ParentL = dOO / cos_alpha_beta\n assert ParentL > 0 and ParentL != np.inf\n\n ParentR = ParentL * (decimal.Decimal(sin_alpha) * decimal.Decimal(cos_beta)\n + decimal.Decimal(cos_alpha) * decimal.Decimal(sin_beta)) + decimal.Decimal(0.1)\n BallParent = ParentO + [ParentL, ParentR]\n word2ballDic.update({atreeName: BallParent})\n\n count = 0\n while qsr_P_degree(word2ballDic[childName], word2ballDic[atreeName]) < 0:\n oldParentR, delta = ParentR, 10\n ParentR += decimal.Decimal(2) - qsr_P_degree(word2ballDic[childName], word2ballDic[atreeName])\n while oldParentR == ParentR:\n ParentR += delta\n delta *= 10\n BallParent = ParentO + [ParentL, ParentR]\n word2ballDic.update({atreeName: BallParent})\n # print('*', qsr_P_degree_by_name(childName, atreeName))\n # print(\"**\", qsr_P_by_name(childName, atreeName))\n count += 1\n # print('count', count)\n\n # assert qsr_P_by_name(childName, atreeName), childName+\" - \"+atreeName+\": \"+str(qsr_P_degree_by_name(childName, atreeName))\n if outputPath:\n create_ball_file(atreeName, outputPath=outputPath,word2ballDic=word2ballDic)\n return BallParent, word2ballDic\n\n\ndef making_ball_contains(root, children, addDim=[], word2vecDic=dict(),\n wsChildrenDic=dict(), wscatCodeDic=dict(), word2ballDic=dict(),\n outputPath=None, logFile=None):\n \"\"\"\n :param root:\n :param children:\n :param addDim:\n :param wsChildrenDic:\n :param wscatCodeDic:\n :param word2ballDic:\n :param outputPath:\n :param logFile:\n :return:\n \"\"\"\n maxL = -1\n flag = False\n while not flag:\n flag = True\n for childName in children:\n pBall, word2ballDic = training_P_by_name(childName, root, addDim=addDim,\n wsChildrenDic=wsChildrenDic, word2vecDic=word2vecDic, wscatCodeDic=wscatCodeDic,\n word2ballDic=word2ballDic,\n outputPath=outputPath, logFile=logFile)\n assert pBall != -1\n if maxL == -1: # initialize maxL, minL_R\n maxL, minL_R = pBall[-2], decimal.Decimal(pBall[-2]) - decimal.Decimal(pBall[-1])\n if maxL < pBall[-2]:\n maxL = pBall[-2]\n delta = decimal.Decimal(pBall[-2]) - decimal.Decimal(pBall[-1])\n if delta <=0:\n print('Shifting...mbc', root)\n with open(logFile, 'a+') as wlog:\n wlog.write(\" \".join([\"shifting\",str(root)]+\n [str(ele) for ele in word2ballDic[root][:-2]] + [str(-delta)]))\n wlog.write(\"\\n\")\n word2ballDic = shift_whole_tree_of(root, word2ballDic[root][:-2], -delta,\n wsChildrenDic=wsChildrenDic, word2ballDic=word2ballDic,\n outputPath=outputPath)\n flag = False\n break\n elif decimal.Decimal(pBall[-2]) - decimal.Decimal(pBall[-1]) < minL_R:\n minL_R = decimal.Decimal(pBall[-2]) - decimal.Decimal(pBall[-1])\n\n word2ballDic[root] = word2ballDic[root][:-2] + [maxL, maxL - minL_R + cgap]\n if outputPath:\n create_ball_file(root, outputPath=outputPath,word2ballDic=word2ballDic)\n return word2ballDic\n\n\ndef training_DC_by_name(childrenNames, wsChildrenDic=dict(), word2ballDic=dict(),\n outputPath=None, ordered = False, logFile=None):\n \"\"\"\n :param childrenNames:\n :param wsChildrenDic:\n :param word2ballDic:\n :param outputPath:\n :param maxsize:\n :param mindim:\n :param logFile:\n :return:\n \"\"\"\n dic = dict()\n for tree in childrenNames:\n dic[tree] = word2ballDic[tree][-2]\n dic0 = copy.deepcopy(dic)\n\n if ordered:\n lst = [(node, word2ballDic[node]) for node in childrenNames]\n else:\n lst = [(item[0], word2ballDic[item[0]]) for item in sorted(dic.items(), key=operator.itemgetter(1))]\n\n i = 0\n if \"herd.n.02\" in childrenNames and \"gathering.n.01\" in childrenNames:\n print('break')\n while i < len(lst) - 1:\n # print('i:', i, ' in', len(lst))\n j = i + 1\n refTreeName = lst[i][0]\n while j < len(lst):\n curTreeName = lst[j][0]\n # print(curTreeName, refTreeName)\n targetsin0 = 0.6\n while not qsr_DC(word2ballDic[curTreeName], word2ballDic[refTreeName]):\n ball1 = word2ballDic[curTreeName]\n l1, r1 = decimal.Decimal(ball1[-2]), decimal.Decimal(ball1[-1])\n k = r1 / l1\n if k == 1:\n L, R = word2ballDic[curTreeName][-2:]\n print('Shifting...', curTreeName)\n LNew = R / decimal.Decimal(targetsin0)\n with open(logFile, 'a+') as wlog:\n wlog.write(\" \".join([\"shifting\", str(tree)] +\n [str(ele) for ele in word2ballDic[tree][:-2]] + [str(LNew - L)]))\n wlog.write(\"\\n\")\n word2ballDic= shift_whole_tree_of(tree, word2ballDic[curTreeName][:-2], LNew - L,\n wsChildrenDic=wsChildrenDic, word2ballDic=word2ballDic,\n outputPath=outputPath)\n # check_P_for_child_parent_in_one_family(tree, ballPath=outputPath)\n checkResult=check_DC_for_sibilings_in_one_family(tree)\n if checkResult:\n print(\"check_DC_for_sibilings_in_one_family\", tree, checkResult)\n targetsin0 *= 0.9\n\n ratio0, word2ballDic = ratio_homothetic_DC_transform(curTreeName, refTreeName,\n wsChildrenDic=wsChildrenDic,\n word2ballDic=word2ballDic,\n outputPath=outputPath,\n logFile=logFile)\n assert ratio0 != -1\n\n # assert qsr_DC_by_name(curTreeName, refTreeName, outputPath=outputPath)\n if outputPath:\n create_ball_file(curTreeName, outputPath=outputPath, word2ballDic=word2ballDic)\n j += 1\n for tree in childrenNames:\n dic[tree] = word2ballDic[tree][-2]\n lst = [(item[0], word2ballDic[item[0]]) for item in sorted(dic.items(), key=operator.itemgetter(1))]\n i += 1\n\n if \"herd.n.02\" in childrenNames and \"gathering.n.01\" in childrenNames:\n print('break')\n\n #####\n # homothetic transformation\n #####\n for child in childrenNames:\n ratio = word2ballDic[child][-2]/decimal.Decimal(dic0[child])\n word2ballDic = homothetic_recursive_transform_of_decendents(child, root=child, rate=ratio,\n wsChildrenDic=wsChildrenDic,\n word2ballDic=word2ballDic, outputPath=outputPath)\n return word2ballDic\n\n\ndef training_one_family(treeStruc=None,root=None, addDim=[], wsChildrenDic = dict(), word2vecDic=dict(),\n wscatCodeDic=dict(),\n word2ballDic = dict(), outputPath=None, logFile=None):\n \"\"\"\n :param treeStruc:\n :param root:\n :param addDim:\n :param wsChildrenDic:\n :param word2vecDic:\n :param wscatCodeDic:\n :param word2ballDic:\n :param outputPath:\n :param logFile:\n :return:\n \"\"\"\n if treeStruc:\n children = treeStruc[root]\n else:\n children = get_children(root, wsChildrenDic=wsChildrenDic)\n if len(children) > 0:\n for child in children:\n word2ballDic = training_one_family(treeStruc=treeStruc, root=child, addDim=addDim,\n wsChildrenDic=wsChildrenDic,\n word2vecDic=word2vecDic, wscatCodeDic=wscatCodeDic,\n word2ballDic=word2ballDic,\n outputPath=outputPath, logFile=logFile)\n # children shall be separated\n if len(children) > 1:\n # print('training dc of root', root)\n word2ballDic = training_DC_by_name(children, wsChildrenDic=wsChildrenDic, word2ballDic=word2ballDic,\n outputPath=outputPath, logFile=logFile)\n # root ball shal contain all children balls\n word2ballDic = making_ball_contains(root, children, addDim=addDim, word2vecDic=word2vecDic,\n wsChildrenDic=wsChildrenDic, wscatCodeDic=wscatCodeDic,\n word2ballDic =word2ballDic, outputPath=outputPath, logFile=logFile)\n return word2ballDic\n\n else:\n ball, word2ballDic = initialize_ball(root, addDim=addDim, L0=L0, R0=R0,\n word2vecDic=word2vecDic, word2ballDic=word2ballDic,\n wscatCodeDic=wscatCodeDic,\n outputPath=outputPath)\n return word2ballDic\n\n\ndef check_P_for_child_parent_in_one_family(root=None, wsChildrenDic=dict(), word2ballDic=dict(), ballPath=\"\"):\n \"\"\"\n :param root:\n :param wsChildrenDic:\n :param word2ballDic:\n :param ballPath:\n :return:\n \"\"\"\n lst = [root]\n while lst:\n parent = lst.pop()\n pBall = get_ball_from_file(parent, ballPath = ballPath) #word2ballDic[parent]\n children = get_children(parent, wsChildrenDic=wsChildrenDic)\n lst += children\n for child in children:\n chBall = get_ball_from_file(child, ballPath = ballPath) #word2ballDic[child]\n if not qsr_P(word2ballDic[child], word2ballDic[parent]):\n print(child, parent, 'violates condition 3')\n dis = dis_between_ball_centers(chBall, pBall)\n print('dis:', dis)\n print('r1', chBall[-1])\n print('R', pBall[-1])\n print('shall >=0', pBall[-1]- dis - chBall[-1])\n # assert qsr_P_by_name(child, parent), str(qsr_P_degree_by_name(child, parent))\n return [root]\n # print(\"passed checking \", root, ' for part of')\n return []\n\n\ndef check_DC_for_sibilings_in_one_family(root=\"*root*\", wsChildrenDic=dict(), word2ballDic=dict()):\n \"\"\"\n :param root:\n :param wsChildrenDic:\n :param word2ballDic:\n :return:\n \"\"\"\n lst = [root]\n checkResult = []\n while lst:\n parent = lst.pop()\n children = get_children(parent, wsChildrenDic=wsChildrenDic)\n lst += children\n if len(children) <2:\n continue\n i,j = 0, 0\n while i < len(children):\n j = i + 1\n while j < len(children):\n if not qsr_DC(word2ballDic[children[i]], word2ballDic[children[j]]):\n print(children[i], children[j], 'violates condition 4')\n # print('shall >=0', str(qsr_DC_degree(word2ballDic[children[i]], word2ballDic[children[j]])))\n # return [root]\n checkResult.append((children[i], children[j]))\n j += 1\n i += 1\n return checkResult\n\n\ndef training_all_families(root=\"*root*\", wsChildrenDic=dict(), word2vecDic=dict(), wscatCodeDic=dict(),\n word2ballDic=dict(),\n outputPath=None, logFile=None, checking = False):\n \"\"\"\n :param root:\n :param wsChildrenDic:\n :param word2vecDic:\n :param wscatCodeDic:\n :param word2ballDic:\n :param outputPath:\n :param logFile:\n :param checking:\n :return:\n \"\"\"\n global L0, DIM\n children = get_children(root, wsChildrenDic=wsChildrenDic)\n child0= 'entity.n.01'\n children = sorted(children, key=lambda ele: np.dot(get_word2vector(child0, word2vecDic=word2vecDic),\n get_word2vector(ele, word2vecDic=word2vecDic)))\n print(children)\n N = int(np.ceil(np.log(len(children))))\n open(logFile, 'w+')\n while children:\n child = children.pop()\n k = 512\n addDim0 = list(bin(N))[2:][:DIM]\n if len(addDim0) < DIM:\n addDim0 += [0] * (DIM - len(addDim0))\n addDim = [int(ele) * 2 - 1 for ele in addDim0]\n addDim = [ele * k for ele in addDim]\n print(\"***\", child)\n with open(logFile, 'a+') as wlog:\n wlog.write(\" \".join([str(ele) for ele in [child]\n +addDim\n +[time.strftime(\"%Y-%m-%d %H:%M:%S\",time.gmtime())]]))\n wlog.write(\"\\n\")\n word2ballDic = training_one_family(root=child, addDim=addDim, wsChildrenDic=wsChildrenDic,\n word2vecDic=word2vecDic, wscatCodeDic=wscatCodeDic,\n word2ballDic=word2ballDic,\n outputPath=outputPath, logFile=logFile)\n children = sorted(children, key=lambda ele: np.dot(get_word2vector(child, word2vecDic=word2vecDic),\n get_word2vector(ele, word2vecDic=word2vecDic)))\n print(\"finished training of all families\\n\")\n\n if checking:\n print(\"checking each family\\n\")\n maxsize, mindim, word2ballDic = load_balls(ipath=outputPath, word2ballDic=word2ballDic)\n\n failed_P, failed_DC = [], []\n\n for child in get_children(root):\n failed_P += check_P_for_child_parent_in_one_family(child, word2ballDic =word2ballDic,\n wsChildrenDic=wsChildrenDic, ballPath=outputPath)\n failed_DC += check_DC_for_sibilings_in_one_family(root=child, word2ballDic =word2ballDic,\n wsChildrenDic=wsChildrenDic)\n print(\"failed families with P\", failed_P)\n print(\"failed families with DC\", failed_DC)\n return word2ballDic\n\n\ndef testing_whole_family(outputPath=None, wsChildrenDic=dict(), word2ballDic=dict(), outputBallFile=None):\n \"\"\"\n :param outputPath:\n :param wsChildrenDic:\n :param word2ballDic:\n :param outputBallFile:\n :return:\n \"\"\"\n print(\"checking whether the tree structure is perfectly encoded in nball embeddings...\\n\")\n failed_P, failed_DC = [], []\n maxsize, mindim, word2ballDic = load_balls(ipath = outputPath, word2ballDic=word2ballDic)\n\n for froot in get_children('*root*', wsChildrenDic=wsChildrenDic):\n failed_P += check_P_for_child_parent_in_one_family(froot,\n wsChildrenDic=wsChildrenDic,\n word2ballDic=word2ballDic,\n ballPath=outputPath)\n\n failed_DC += check_DC_for_sibilings_in_one_family(root='*root*', wsChildrenDic=wsChildrenDic,\n word2ballDic=word2ballDic)\n print(\"failed families with P\", failed_P)\n print(\"failed families with DC\", failed_DC)\n if failed_P == [] and failed_DC == []:\n print(\"the tree structure is perfectly encoded in nball embeddings.\\n\")\n print(\"generating nball embedding file...\\n\")\n merge_balls_into_file(ipath= outputPath, outfile=outputBallFile)\n else:\n print(\"the tree structure is NOT perfectly encoded in nball embeddings.\\n\")\n print(\"try again, or contact the author\")\n\n\ndef fix_dim(maxsize, mindim, word2ballDic=dict(), bPath = '/Users/tdong/data/glove/glove.6B/glove.6B.50Xball'):\n \"\"\"\n :param maxsize:\n :param mindim:\n :param word2ballDic:\n :param bPath:\n :return:\n \"\"\"\n for bf in os.listdir(bPath):\n with open(os.path.join(bPath, bf), 'r') as ifh:\n wlst = ifh.readline().strip().split()\n ballv = [decimal.Decimal(ele) for ele in wlst]\n delta = maxsize - len(ballv)\n if delta > 0:\n assert len(wlst) < maxsize\n print(bf, len(wlst), ballv[-1])\n vec = vec_norm(ballv[:-2] + [decimal.Decimal(mindim)] * delta) + ballv[-2:]\n word2ballDic[bf] = vec\n if outputPath:\n create_ball_file(bf, outputPath=bPath,word2ballDic=word2ballDic)\n return word2ballDic\n\n\ndef make_DC_for_first_level_children(root=\"*root*\", firstChild = 'entity.n.01', wsChildrenDic=dict(),\n outputPath='', maxsize=0, mindim=0, word2ballDic = dict(),\n logFile=None):\n \"\"\"\n :param root:\n :param firstChild:\n :param wsChildrenDic:\n :param outputPath:\n :param maxsize:\n :param mindim:\n :param word2ballDic:\n :param logFile:\n :param checking:\n :return:\n \"\"\"\n children = get_children(root, wsChildrenDic=wsChildrenDic)\n children.remove(firstChild)\n children.insert(0, firstChild)\n print('updating first level children...')\n word2ballDic = training_DC_by_name(children, outputPath=outputPath, wsChildrenDic=wsChildrenDic,\n word2ballDic =word2ballDic, ordered = True,\n logFile=logFile)\n return word2ballDic\n\n\ndef train_word2ball(root=\"\", outputPath = '', logFile='', wsChildrenDic=dict(),\n word2ballDic=dict(), word2vecDic=dict(), outputPathBack = None,\n wscatCodeDic=dict(), outputBallFile=None):\n \"\"\"\n :param root:\n :param outputPath:\n :param logFile:\n :param wsChildrenDic:\n :param word2ballDic:\n :param word2vecDic:\n :param wscatCodeDic:\n :param outputBallFile:\n :param outputBallForestFile:\n :return:\n \"\"\"\n training_all_families(root=root, wsChildrenDic=wsChildrenDic, word2vecDic=word2vecDic,\n wscatCodeDic=wscatCodeDic, word2ballDic=word2ballDic,\n outputPath=outputPath, logFile=logFile)\n if outputPathBack:\n copy_tree(outputPath, outputPathBack)\n maxsize, mindim , word2ballDic = load_balls(ipath=outputPath, word2ballDic=word2ballDic)\n fix_dim(maxsize, mindim, bPath=outputPath)\n make_DC_for_first_level_children(root=root, firstChild = 'entity.n.01', wsChildrenDic=wsChildrenDic,\n word2ballDic=word2ballDic, outputPath=outputPath,\n maxsize=maxsize, mindim=mindim, logFile=logFile)\n\n testing_whole_family(outputPath=outputPath, wsChildrenDic=wsChildrenDic, outputBallFile=outputBallFile)\n\n" ]
[ [ "numpy.sqrt", "numpy.dot", "numpy.linalg.norm" ] ]
sarrouti/VQG
[ "eb9cbe3ba4f75d85fc55f5f1e746b1f2190f0b2b" ]
[ "models/encoder_cnn.py" ]
[ "\n\"\"\"\nCreated on Tue Jun 23 20:15:11 2020\n\n@author: sarroutim2\n\"\"\"\n\n\"\"\"Genearates a representation for an image input.\n\"\"\"\n\nimport torch.nn as nn\nimport torch\nimport torchvision.models as models\n\n\nclass EncoderCNN(nn.Module):\n \"\"\"Generates a representation for an image input.\n \"\"\"\n\n def __init__(self, output_size):\n \"\"\"Load the pretrained ResNet-152 and replace top fc layer.\n \"\"\"\n super(EncoderCNN, self).__init__()\n self.cnn = models.resnet50(pretrained=True)#resnet18\n for param in self.cnn.parameters():\n param.requires_grad = False\n self.cnn.fc = nn.Linear(self.cnn.fc.in_features, output_size)\n self.bn = nn.BatchNorm1d(output_size, momentum=0.01)\n self.init_weights()\n \"\"\"\n super(EncoderCNN, self).__init__()\n self.cnn = models.googlenet(pretrained=True)#resnet18\n for param in self.cnn.parameters():\n param.requires_grad = False\n num_features = self.cnn.classifier[6].in_features\n features = list(self.cnn.classifier.children())[:-1]\n features.extend([nn.Linear(num_features, 512)])\n self.cnn.classifier=nn.Sequential(*features)\n #self.cnn.fc=nn.Sequential(*features)\n\n self.cnn.fc = nn.Linear(512, output_size)\n #self.cnn.classifier = nn.Sequential(*features)\n self.bn = nn.BatchNorm1d(output_size, momentum=0.01)\n self.init_weights()\"\"\"\n def init_weights(self):\n \"\"\"Initialize the weights.\n\t\"\"\"\n self.cnn.fc.weight.data.normal_(0.0, 0.02)\n self.cnn.fc.bias.data.fill_(0)\n\n def forward(self, images):\n \"\"\"Extract the image feature vectors.\n\t\"\"\"\n features = self.cnn(images)\n output = self.bn(features)\n return output\n" ]
[ [ "torch.nn.Linear", "torch.nn.BatchNorm1d" ] ]
zaltoprofen/chainer
[ "3b03f9afc80fd67f65d5e0395ef199e9506b6ee1" ]
[ "chainermn/communicators/_memory_utility.py" ]
[ "import ctypes\n\nimport mpi4py.MPI\nimport numpy as np\n\nimport chainer.backends\ntry:\n import cupy as cp\n _cupy_avail = True\nexcept Exception:\n _cupy_avail = False\n\n\nclass HostPinnedMemory(object):\n\n def __init__(self):\n if not _cupy_avail:\n raise RuntimeError('HostPinnedMemory cannot be used: ' +\n 'Cupy is not available.')\n self.size = 0\n self.memory = None\n\n def assign(self, size):\n if size > self.size:\n self.size = size\n self.memory = cp.cuda.alloc_pinned_memory(size)\n\n def ptr(self, offset=0):\n return ctypes.c_void_p(self.memory.ptr + offset)\n\n def buffer(self, size):\n return ctypes.cast(\n self.memory.ptr,\n ctypes.POINTER(ctypes.c_ubyte * size)\n ).contents\n\n def array(self, count, offset=0, dtype=np.float32):\n if dtype is None:\n raise TypeError('dtype must be an instance of numpy.dtype class')\n return np.frombuffer(\n self.memory, count=count, offset=offset, dtype=dtype)\n\n\nclass DeviceMemory(object):\n\n def __init__(self):\n if not _cupy_avail:\n raise RuntimeError('DeviceMemory cannot be used: ' +\n 'Cupy is not available.')\n self.size = 0\n self.memory = None\n\n def assign(self, size):\n if size > self.size:\n self.size = size\n self.memory = cp.cuda.alloc(size)\n\n def from_device(self, src, size, offset=0, stream=None):\n dst = self.memory + offset\n if stream is None:\n dst.copy_from_device(src.data, size)\n else:\n dst.copy_from_device_async(src.data, size, stream)\n\n def to_device(self, dst, size, offset=0, stream=None):\n src = self.memory + offset\n if stream is None:\n dst.data.copy_from_device(src, size)\n else:\n dst.data.copy_from_device_async(src, size, stream)\n\n def ptr(self):\n return self.memory.ptr\n\n def buffer(self, size):\n return ctypes.cast(\n self.memory.ptr,\n ctypes.POINTER(ctypes.c_ubyte * size)\n ).contents\n\n def array(self, shape, offset=0, dtype=np.float32):\n if dtype is None:\n raise TypeError('dtype must be an instance of numpy.dtype class')\n return cp.ndarray(shape, memptr=self.memory + offset, dtype=dtype)\n\n\ndef extract_params_set_data(model):\n return [param for _, param in sorted(model.namedparams())\n if param.data is not None]\n\n\ndef extract_params_set_grad(model, zero_fill):\n if zero_fill:\n return [param for _, param in sorted(model.namedparams())\n if param.data is not None]\n else:\n return [param for _, param in sorted(model.namedparams())\n if param.data is not None and param.grad is not None]\n\n\ndef count_grad_elements(params, zero_fill):\n if zero_fill:\n return sum(param.data.size for param in params)\n else:\n return sum(param.grad.size for param in params)\n\n\ndef pack_params(params, attr_name, buffer,\n transfer_dtype, zero_fill, stream=None):\n if len(params) == 0:\n return\n\n # NOTE: dtypes of params might be mixed, in particular f16 & f32.\n offset = 0\n for param in params:\n v = getattr(param, attr_name)\n if attr_name == 'grad' and v is None and zero_fill:\n v = param.xp.zeros_like(param.data)\n size = v.size * np.dtype(transfer_dtype).itemsize\n if v.dtype != transfer_dtype:\n tmp = v.astype(transfer_dtype)\n buffer.from_device(tmp, size, offset, stream)\n else:\n buffer.from_device(v, size, offset, stream)\n\n offset += size\n\n\ndef unpack_params(params, attr_name, buffer,\n transfer_dtype, zero_fill, stream=None):\n \"\"\"Pack parameters into a single CuPy array for efficient communication.\"\"\"\n if len(params) == 0:\n return\n xp = chainer.backend.get_array_module(getattr(params[0], attr_name))\n offset = 0\n for param in params:\n v = getattr(param, attr_name)\n if attr_name == 'grad' and v is None and zero_fill:\n v = param.xp.empty_like(param.data)\n setattr(param, attr_name, v)\n size = v.size * np.dtype(transfer_dtype).itemsize\n grad_dtype = v.dtype\n if grad_dtype != transfer_dtype:\n v = xp.array(v, copy=False, dtype=transfer_dtype)\n buffer.to_device(v, size, offset, stream)\n offset += size\n if grad_dtype != transfer_dtype:\n setattr(param, attr_name, v.astype(grad_dtype))\n\n\ndef array_to_buffer_object(array, mpi_dtype=mpi4py.MPI.FLOAT):\n xp = chainer.backend.get_array_module(array)\n\n if xp is np:\n return get_device_memory_pointer(array)\n else:\n return (get_device_memory_pointer(array), mpi_dtype)\n\n\ndef get_device_memory_pointer(array):\n xp = chainer.backend.get_array_module(array)\n array = xp.ascontiguousarray(array)\n\n if xp is np:\n return array\n else:\n return ctypes.cast(\n array.data.ptr,\n ctypes.POINTER(ctypes.c_ubyte * array.nbytes)\n ).contents\n" ]
[ [ "numpy.dtype", "numpy.frombuffer" ] ]
sjawabidgely/tensorflow
[ "f5de234d7f601214443f371e90fbadc8f128bb9a" ]
[ "tensorflow/python/eager/function.py" ]
[ "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n# pylint: disable=unidiomatic-typecheck\n\"\"\"Defun decorator for defining graph-mode functions.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport contextlib\nimport threading\n\nimport numpy as np\n\nfrom tensorflow.core.framework import function_pb2\nfrom tensorflow.python import pywrap_tensorflow\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.eager import execute\nfrom tensorflow.python.eager import tape\nfrom tensorflow.python.eager.graph_only_ops import graph_placeholder\nfrom tensorflow.python.framework import c_api_util\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes as dtypes_module\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import gradients_impl\nfrom tensorflow.python.util import compat\nfrom tensorflow.python.util import nest\nfrom tensorflow.python.util import tf_decorator\n\n# Thread-local storage for tfe Tensors which are referenced while evaluating a\n# graph-mode function.\n_scoped_captures = threading.local()\n# _scoped_captures.tensors is either None or a map from Tensor id to a pair\n# of a tfe tensor and its corresponding placeholder to pass as a function\n# argument. The value should be None unless we're in function definition\n# context.\n_scoped_captures.tensors = None\n\n\[email protected]\ndef capture_tensors(captures):\n old = _scoped_captures.__dict__.get(\"tensors\", None)\n try:\n _scoped_captures.tensors = captures\n yield\n finally:\n _scoped_captures.tensors = old\n\n\ndef capture_value(tensor_map, value, dtype, name):\n \"\"\"Capture a value from outside the function, to pass in as an extra arg.\"\"\"\n captured_value = tensor_map.get(ops.tensor_id(value), None)\n if captured_value is None:\n captured_value = graph_placeholder(\n dtype=dtype or value.dtype, shape=value.shape, name=name)\n if captured_value.dtype == dtypes_module.resource:\n handle_data = value._handle_data # pylint: disable=protected-access\n captured_value._handle_data = handle_data # pylint: disable=protected-access\n if handle_data is not None and handle_data.is_set:\n # Ensure that shapes and dtypes are propagated.\n shapes, types = zip(*[(pair.shape, pair.dtype)\n for pair in handle_data.shape_and_type])\n ranks = [len(s.dim) if not s.unknown_rank else -1 for s in shapes]\n shapes = [[d.size for d in s.dim]\n if not s.unknown_rank else None for s in shapes]\n with errors.raise_exception_on_not_ok_status() as status:\n pywrap_tensorflow.TF_GraphSetOutputHandleShapesAndTypes_wrapper(\n captured_value._op._graph._c_graph, # pylint: disable=protected-access\n captured_value._as_tf_output(), # pylint: disable=protected-access\n shapes,\n ranks,\n types,\n status)\n\n tensor_map[ops.tensor_id(value)] = (value, captured_value)\n else:\n captured_value = captured_value[1]\n tape.record_operation(\"captured_value\", [captured_value], [value],\n lambda x: [x])\n return captured_value\n\n\ndef _convert_to_graph_tensor(value, dtype=None, name=None, as_ref=False):\n \"\"\"Captures a Tensor while building a graph mode function.\n\n Arguments:\n value: A Tensor object.\n dtype: The datatype of the value produced by the node in the graph.\n name: str, Name of the node in the graph.\n as_ref: Ignored (required by register_tensor_conversion_function).\n\n Returns:\n Returns a constant (the current value of the tensor) if capturing\n is not enabled. A placeholder which will have the value of the\n tensor at runtime otherwise.\n \"\"\"\n del as_ref # Unused.\n\n if context.in_eager_mode():\n return value\n\n default_graph = ops.get_default_graph()\n if not default_graph.building_function:\n return value\n\n tensor_map = _scoped_captures.tensors\n if tensor_map is None:\n # Capturing is not enabled.\n return constant_op.constant(value.numpy())\n if type(value) == ops.Tensor and value.graph is default_graph:\n # The tensor has already been converted and captured. The type check\n # is intentional: we are checking that value is a Tensor and not an\n # EagerTensor.\n return value\n return capture_value(tensor_map, value, dtype, name)\n\n\nclass CapturingGraph(ops.Graph):\n \"\"\"Graph used when constructing eager functions.\"\"\"\n\n def __init__(self, captures):\n super(CapturingGraph, self).__init__()\n self._building_function = True\n self.captures = captures\n # Map from resource tensor name to last op (in program order) which uses\n # this tensor. Used to enforce that execution order matches program order\n # for resource tensors.\n self._last_op_using_resource_tensor = {}\n\n # TODO(apassos) remove once the C API is used by default.\n def _use_c_api_hack(self):\n return True\n\n def clear_resource_control_flow_state(self):\n self._last_op_using_resource_tensor = {}\n\n def create_op(\n self,\n op_type,\n inputs,\n dtypes, # pylint: disable=redefined-outer-name\n input_types=None,\n name=None,\n attrs=None,\n op_def=None,\n compute_shapes=True,\n compute_device=True):\n # TODO(apassos) probably control flow has to be handled delicately here as\n # in if a resource is accessed inside a control flow context we need the\n # control dependency to point to something outside the context which is\n # guaranteed to happen after the access.\n #\n # TODO(apassos) this should do some form of alias analysis as ops which\n # forward the resources such as Identity and Switch can cause serialization\n # to fail.\n resource_inputs = set()\n control_inputs = set()\n for i, inp in enumerate(inputs):\n if inp.graph is not self:\n inputs[i] = capture_value(self.captures, inp, inp.dtype, inp.op.name)\n inp = inputs[i]\n if inp.dtype == dtypes_module.resource:\n if inp.name in self._last_op_using_resource_tensor:\n control_inputs.add(self._last_op_using_resource_tensor[inp.name])\n resource_inputs.add(inp.name)\n with self.control_dependencies(list(control_inputs)):\n op = super(CapturingGraph, self).create_op(\n op_type, inputs, dtypes, input_types, name, attrs, op_def,\n compute_shapes, compute_device)\n for name in resource_inputs:\n self._last_op_using_resource_tensor[name] = op\n return op\n\n\n# TODO(apassos): it'd be really nice if we could scope this registration.\n# Note that we register this at a higher priority than ops.Tensor since we want\n# to handle subclass specific conversion before a superclass conversion.\nops.register_tensor_conversion_function(\n ops.EagerTensor, _convert_to_graph_tensor, priority=-1)\n\n\nclass _CapturingContext(object):\n \"\"\"Tracks references to Tensors outside this context while it is active.\"\"\"\n\n def __init__(self):\n # known_ops are ops which are created while this context is active\n self.known_ops = set()\n\n # captured_tensors are all tensors referenced to by ops in this context but\n # not produced in it\n self.captured_tensors = set()\n\n def AddOp(self, op): # pylint: disable=invalid-name\n if op.type in [\"Variable\", \"VariableV2\", \"VarHandleOp\"]:\n raise ValueError(\"tfe.defun cannot capture variables created without \"\n \"using tf.get_variable. Op: %s\" % op)\n self.known_ops.add(op)\n for i in op.inputs:\n if i.op not in self.known_ops:\n self.captured_tensors.add(i)\n\n def __enter__(self):\n self._g = ops.get_default_graph()\n self._old = self._g._get_control_flow_context() # pylint: disable=protected-access\n self._g._set_control_flow_context(self) # pylint: disable=protected-access\n\n def __exit__(self, _, __, ___): # pylint: disable=invalid-name\n self._g._set_control_flow_context(self._old) # pylint: disable=protected-access\n\n\ndef _forward_name(n):\n \"\"\"The name of a generated forward defun named n.\"\"\"\n return \"__forward_%s_%s\" % (n, ops.uid())\n\n\ndef _backward_name(n):\n \"\"\"The name of a generated backward defun named n.\"\"\"\n return \"__backward_%s_%s\" % (n, ops.uid())\n\n\ndef _inference_name(n):\n \"\"\"The name of a forward-but-no-gradient defun named n.\"\"\"\n return \"__inference_%s_%s\" % (n, ops.uid())\n\n\n# TODO(apassos) get rid of this by splitting framework.function._DefinedFunction\n# so it doesn't have the definition-generating logic and is just a container for\n# an already-defined function.\nclass _EagerDefinedFunction(object):\n \"\"\"Function object with the interface of tf _DefinedFunction.\"\"\"\n\n def __init__(self, name, graph, operations, inputs, outputs):\n \"\"\"Initializes an eager defined function.\n\n Args:\n name: str, the name for the created function.\n graph: Graph, the graph containing the operations in the function\n operations: list of Operation; the subset of operations in the graph\n which will be in the function\n inputs: the tensors in the graph to be used as inputs to the function\n outputs: the tensors in the graph which will be outputs to the function\n \"\"\"\n with errors.raise_exception_on_not_ok_status() as status:\n fn = pywrap_tensorflow.TF_GraphToFunction_wrapper(\n graph._c_graph, # pylint: disable=protected-access\n compat.as_str(name),\n False,\n [o._c_op for o in operations], # pylint: disable=protected-access\n [t._as_tf_output() for t in inputs], # pylint: disable=protected-access\n [t._as_tf_output() for t in outputs], # pylint: disable=protected-access\n [],\n None,\n compat.as_str(\"\"),\n status)\n # TODO(apassos) avoid creating a FunctionDef (specially to grab the\n # signature, but also in general it's nice not to depend on it.\n with c_api_util.tf_buffer() as buffer_:\n with errors.raise_exception_on_not_ok_status() as status:\n pywrap_tensorflow.TF_FunctionToFunctionDef(fn, buffer_, status)\n proto_data = pywrap_tensorflow.TF_GetBuffer(buffer_)\n function_def = function_pb2.FunctionDef()\n function_def.ParseFromString(compat.as_bytes(proto_data))\n if context.in_eager_mode():\n _register(fn)\n self.definition = function_def\n self.name = function_def.signature.name\n self.signature = function_def.signature\n self.grad_func_name = None\n self.python_grad_func = None\n self._c_func = fn\n self._grad_func = None\n\n\ndef _map_sequence_obj_to_idx(sequence):\n \"\"\"Maps objs in the sequence from id(obj) to sequence index.\"\"\"\n return {id(x): i for i, x in enumerate(sequence)}\n\n\nclass GraphModeFunction(object):\n \"\"\"Callable object representing a graph-mode function.\n\n Args:\n name: str the name of the created function\n input_placeholders: list of placeholder values (tensors) to feed when\n calling the wrapped function.\n extra_inputs: Tensor inputs this function definition closed over which\n are passed as arguments. Need to track so gradients are supported\n correctly.\n graph: the Graph from which the operations will be pulled. Used as\n a context when computing gradients.\n operations: the subset of Operations in the graph used in the function\n definition.\n outputs: a flat list of the Tensors in the graph used as outputs to the\n function\n func_outputs: a possibly nested python object which will be returned by\n this function. The Tensors in this structure will be replaced by their\n corresponding values in outputs.\n output_shapes: List of shapes of all tensors in outputs\n variables: (optional) List of variables to watch during function execution.\n \"\"\"\n\n def __init__(self,\n name,\n input_placeholders,\n extra_inputs,\n graph,\n operations,\n outputs,\n func_outputs,\n output_shapes,\n variables=None):\n defined_function = _EagerDefinedFunction(\n name, graph, operations, input_placeholders, outputs)\n if len(input_placeholders) != len(defined_function.signature.input_arg):\n raise ValueError(\"Internal error: invalid lengths. %s %s\" % (\n len(input_placeholders), len(defined_function.signature.input_arg)))\n self._input_placeholders = input_placeholders\n self._extra_inputs = list(extra_inputs)\n self._graph = graph\n self._has_backprop = False\n self._func_name = name\n self._function_def = defined_function\n self._num_outputs = len(defined_function.signature.output_arg)\n self._ops = operations\n self._func_outputs = func_outputs\n self._returns = [func_outputs] if isinstance(\n func_outputs, (ops.Tensor, type(None))) else list(func_outputs)\n self._output_shapes = output_shapes\n self._variables = variables if variables is not None else []\n\n @property\n def variables(self):\n return self._variables\n\n def _compute_backprop(self):\n \"\"\"Computes the backprop function object for this function.\"\"\"\n self._has_backprop = True\n with self._graph.as_default(), context.graph_mode():\n c = _CapturingContext()\n with c:\n filtered_outputs = [x for x in self._returns if x is not None]\n self._out_grad_placeholders = [\n graph_placeholder(x.dtype, x.shape) for x in filtered_outputs]\n in_gradients = gradients_impl.gradients(\n filtered_outputs,\n self._input_placeholders,\n grad_ys=self._out_grad_placeholders)\n shapes = tuple(x.shape for x in in_gradients if x is not None)\n captures = list(sorted(c.captured_tensors, key=lambda x: x.name))\n forward_name = _forward_name(self._func_name)\n self._forward_fdef = _EagerDefinedFunction(\n forward_name, self._graph, self._ops, self._input_placeholders,\n filtered_outputs + captures)\n backward_outputs = tuple(x for x in in_gradients if x is not None)\n all_inputs = self._out_grad_placeholders + captures\n # Excluding input ops from the body as we do not intend to execute these\n # operations when the function is executed.\n all_ignored_ops = frozenset(x.op for x in all_inputs)\n # Enforce a deterministic order of operations in the generated graph. This\n # means rerunning the function-defining code will always define the same\n # function, which is useful if we serialize this etc.\n function_def_ops = tuple(x\n for x in sorted(c.known_ops, key=lambda x: x.name)\n if x not in all_ignored_ops)\n bname = _backward_name(self._func_name)\n self._backward_function = GraphModeFunction(\n bname, all_inputs, [], self._graph, function_def_ops,\n backward_outputs, in_gradients, shapes)\n\n def _backprop_call(self, args):\n \"\"\"Calls the wrapped function and records the result on a tape.\"\"\"\n all_args = args + self._extra_inputs\n signature = self._forward_fdef.signature\n ctx = context.context()\n if ctx.in_graph_mode():\n g = ops.get_default_graph()\n g._add_function(self._forward_fdef) # pylint: disable=protected-access\n op = g.create_op(\n signature.name,\n [ops.internal_convert_to_tensor(x, ctx=ctx) for x in all_args],\n tuple(dtypes_module.DType(x.type) for x in signature.output_arg),\n op_def=signature,\n name=\"FunctionCall\",\n compute_shapes=False)\n outputs = op.outputs\n outputs = [outputs] if isinstance(\n outputs, (ops.Tensor, type(None))) else list(outputs)\n for i, s in enumerate(self._output_shapes):\n outputs[i].set_shape(s)\n else:\n outputs = execute.execute(\n str(signature.name),\n num_outputs=len(signature.output_arg),\n inputs=all_args,\n attrs=None,\n ctx=ctx)\n real_outputs = outputs[:len(self._returns)]\n side_outputs = outputs[len(self._returns):]\n\n def backward_function(*args):\n return self._backward_function(*(list(args) + side_outputs)) # pylint: disable=not-callable\n\n tape.record_operation(\n signature.name,\n real_outputs,\n (args + self._extra_inputs),\n backward_function)\n\n return self._build_call_outputs(real_outputs)\n\n @property\n def output_shapes(self):\n # TODO(ebrevdo): Should we only keep the output shapes associated\n # with len(self._returns) outputs?\n return nest.pack_sequence_as(self._func_outputs, self._output_shapes)\n\n @property\n def output_dtypes(self):\n return nest.map_structure(\n lambda x: x.dtype if x is not None else None, self._func_outputs)\n\n @property\n def captured_inputs(self):\n return self._extra_inputs\n\n @property\n def name(self):\n \"\"\"Returns the name of the function in Eager-compatible format.\"\"\"\n return self._function_def.name.encode(\"utf-8\")\n\n def add_to_graph(self, g):\n if self._function_def.name not in g._functions: # pylint: disable=protected-access\n g._add_function(self._function_def) # pylint: disable=protected-access\n for f in self._graph._functions.values(): # pylint: disable=protected-access\n if f.name not in g._functions: # pylint: disable=protected-access\n g._add_function(f) # pylint: disable=protected-access\n\n def __call__(self, *args):\n \"\"\"Executes the passed function in eager mode.\"\"\"\n for v in self._variables:\n if v._trainable: # pylint: disable=protected-access\n tape.watch_variable(v)\n\n tensor_inputs = [x for x in nest.flatten(args)\n if isinstance(x, ops.Tensor)]\n if tape.should_record(tensor_inputs) or tape.should_record(\n self._extra_inputs):\n if not self._has_backprop:\n self._compute_backprop()\n return self._backprop_call(tensor_inputs)\n\n ctx = context.context()\n if ctx.in_graph_mode():\n g = ops.get_default_graph()\n self.add_to_graph(g)\n signature = self._function_def.definition.signature\n args = list(tensor_inputs) + self._extra_inputs\n op = g.create_op(\n signature.name,\n [ops.internal_convert_to_tensor(x, ctx=ctx) for x in args],\n tuple(dtypes_module.DType(x.type) for x in signature.output_arg),\n op_def=signature,\n name=\"FunctionCall\",\n compute_shapes=False)\n result = op.outputs\n if not result:\n return op\n for i, s in enumerate(self._output_shapes):\n result[i].set_shape(s)\n else:\n result = execute.execute(\n str(self._func_name),\n num_outputs=self._num_outputs,\n inputs=tensor_inputs + self._extra_inputs,\n attrs=None,\n ctx=ctx)\n\n return self._build_call_outputs(result)\n\n def _build_call_outputs(self, result):\n \"\"\"Maps the fdef output list to actual output structure.\n\n Args:\n result: Output lists defined by FunctionDef.\n Returns:\n The actual call output.\n \"\"\"\n if self._func_outputs is None:\n return None\n outputs_list = nest.flatten(self._func_outputs)\n j = 0\n for i, o in enumerate(outputs_list):\n if o is not None:\n outputs_list[i] = result[j]\n j += 1\n return nest.pack_sequence_as(self._func_outputs, outputs_list)\n\n\ndef _get_defun_inputs(args):\n \"\"\"Maps the inputs args to graph inputs.\"\"\"\n ret = []\n flat_args = nest.flatten(args)\n for a in flat_args:\n if isinstance(a, ops.Tensor):\n ret.append(graph_placeholder(a.dtype, a.shape))\n else:\n ret.append(a)\n return nest.pack_sequence_as(args, ret)\n\n\ndef _defun_internal(name, func, args, kwds):\n \"\"\"Defines and returns graph-mode version of func.\"\"\"\n container_prefix = ops.get_default_graph()._container_prefix # pylint: disable=protected-access\n with context.graph_mode():\n captures = {}\n tmp_graph = CapturingGraph(captures)\n # Inherit the container prefix, since this is used for error checking when\n # isolating eager execution (the container prefix at creation must match the\n # container prefix when used, and variables accessed in the defun will be\n # used in the outside context).\n tmp_graph._container_prefix = container_prefix # pylint: disable=protected-access\n # Copy the graph collections to ensure summaries and other things work. This\n # lets the function access (but not mutate) collections of the containing\n # graph, such as the global step and the summary writer collections.\n curr_graph = ops.get_default_graph()\n for collection in curr_graph.collections:\n tmp_graph.get_collection_ref(collection)[:] = curr_graph.get_collection(\n collection)\n with tmp_graph.as_default():\n func_inputs = _get_defun_inputs(args)\n\n with capture_tensors(captures):\n this_tape = tape.push_new_tape()\n try:\n func_outputs = func(*func_inputs, **kwds)\n finally:\n tape.pop_tape(this_tape)\n variables = this_tape.watched_variables()\n\n # Returning a closed-over tensor as an output does not trigger a\n # call to convert_to_tensor, so we manually capture all such tensors.\n outputs_list = nest.flatten(func_outputs)\n func_def_outputs = [\n _convert_to_graph_tensor(x) for x in outputs_list if x is not None\n ]\n\n ids = list(sorted(captures.keys()))\n if ids:\n extra_inputs, extra_placeholders = zip(* [captures[x] for x in ids])\n else:\n extra_inputs = []\n extra_placeholders = []\n output_shapes = tuple(\n x.shape if isinstance(x, ops.Tensor) else None\n for x in outputs_list)\n\n flat_inputs = [x for x in nest.flatten(func_inputs)\n if isinstance(x, ops.Tensor)]\n all_inputs = flat_inputs + list(extra_placeholders)\n all_ignored_ops = frozenset(x.op for x in all_inputs)\n fname = _inference_name(name)\n operations = tuple(x for x in tmp_graph.get_operations()\n if x not in all_ignored_ops)\n # Register any other functions defined in the graph\n # TODO(ashankar): Oh lord, forgive me for this lint travesty.\n if context.in_eager_mode():\n for f in tmp_graph._functions.values(): # pylint: disable=protected-access\n # TODO(ashankar): What about the gradient registry?\n _register(f._c_func) # pylint: disable=protected-access\n return GraphModeFunction(\n fname, all_inputs, extra_inputs, tmp_graph, operations, func_def_outputs,\n func_outputs, output_shapes, variables)\n\n\n# Defun uses this instead of Tensor as a cache key. Using dtype because\n# TensorFlow graphs are not parametric wrt dtypes, and using shapes for\n# performance reasons, as much TensorFlow code specializes on known shapes to\n# produce slimmer graphs.\n_TensorDtype = collections.namedtuple(\"_TensorDtype\", [\"dtype\", \"shape\"])\n_ZeroDtype = collections.namedtuple(\"_ZeroDtype\", [\"dtype\", \"shape\"])\n\n\ndef _cache_key(x):\n \"\"\"Cache key for tfe functions.\"\"\"\n if isinstance(x, ops.Tensor):\n return _TensorDtype(x.dtype, x._shape_tuple()) # pylint: disable=protected-access\n if isinstance(x, np.ndarray):\n return (\"array\", x.shape, tuple(x.reshape(-1)))\n if isinstance(x, (list, tuple)):\n return tuple([_cache_key(a) for a in x])\n if isinstance(x, dict):\n return tuple(tuple([_cache_key(k), _cache_key(v)]) for k, v in x.items())\n return x\n\n\ndef _register(fn):\n \"\"\"Registers the function `fn`.\"\"\"\n context.context().add_function(fn)\n\n\n# TODO(apassos): better error messages for non-hashable arguments.\ndef named_defun(func, name):\n \"\"\"Defines a function with a given name.\n\n See the documentation for `defun` for more information on the semantics of the\n function.\n\n Args:\n func: the function to be wrapped.\n name: the name given to it.\n\n Returns:\n the wrapped function.\n \"\"\"\n arguments_to_functions = {}\n\n def decorated(*args, **kwds):\n \"\"\"Decorated version of func.\"\"\"\n # Macroexpand on non-Tensor arguments\n cache_key = tuple(_cache_key(x) for x in args)\n if any(isinstance(x, ops.EagerTensor) for x in kwds.values()):\n raise ValueError(\"Tensor keyword arguments are not supported.\")\n cache_key = (cache_key, tuple(kwds.items()))\n\n if cache_key not in arguments_to_functions:\n arguments_to_functions[cache_key] = _defun_internal(\n name, func, args, kwds)\n return arguments_to_functions[cache_key](*args)\n\n return decorated\n\n\ndef defun(func):\n \"\"\"Decorator to compile func into graph_mode.\n\n `defun` converts a function that constructs a TensorFlow graph into a function\n that executes the graph. TensorFlow graphs typically execute faster and with a\n lower memory-footprint than executing each of the operations that make up the\n function individually as the TensorFlow runtime can optimize the graph and\n execute sub-operations in parallel.\n\n func must be a Python function that constructs a TensorFlow graph,\n typically using functions in the tensorflow module.\n\n Arguments to func can be either Tensor objects or Python\n objects. Non-Tensor python objects are treated as constants, and new function\n definitions are created internally based on their values.\n\n func must return a tf.Tensor (NOT a Tensor) or a list of tf.Tensor (NOT a\n Tensor).\n\n Control flow constructs (e.g., `if`, `while`) are not yet compatible with\n `defun`.\n\n Example:\n ```python\n def f(x, y):\n return tf.reduce_mean(tf.multiply(x ** 2, 3) + y)\n\n @tfe.defun\n def g(x, y):\n return tf.reduce_mean(tf.multiply(x ** 2, 3) + y)\n\n x = tf.constant([[2.0, 3.0]])\n y = tf.constant([[3.0, -2.0]])\n # The plain function and defun-compiled function should return the same value.\n assert f(x, y).numpy() == g(x, y).numpy()\n\n # After the first invocation, the defun-compiled (graph) function runs faster\n # than the plain function because the defun-compiled function does not involve\n # Python interpreter overhead during the execution.\n %time print(f(x, y))\n %time print(g(x, y))\n ```\n\n Args:\n func: function to be compiled.\n\n Returns:\n A callable that will execute the compiled function (and return zero\n or more Tensor objects).\n \"\"\"\n # TODO(apassos): deal with captured global state. Deal with control flow.\n return tf_decorator.make_decorator(func, named_defun(func, func.__name__))\n\n\ndef make_defun_op(func, *args, **kwds):\n \"\"\"Compile func into graph_mode, assuming func arguments are *args, **kwargs.\n\n `make_defun_op` converts a function that constructs a TensorFlow graph into\n a function object and attaches it to the graph. The resulting function\n object can be queried for its properties, and called directly with different\n inputs to execute.\n\n More details on use cases and limitations are available in the\n documentation for `defun`.\n\n Example:\n ```python\n def f(x, y):\n return tf.reduce_mean(tf.multiply(x ** 2, 3) + y)\n\n def g(x, y):\n return tf.reduce_mean(tf.multiply(x ** 2, 3) + y)\n\n z = tf.constant([[0.0, 0.0]])\n g_op = make_defun_op(g, z, z)\n\n assert g_op.output_shapes == tf.TensorShape([])\n assert g_op.output_types == tf.float32\n\n x = tf.constant([[2.0, 3.0]])\n y = tf.constant([[3.0, -2.0]])\n\n # The plain function and defun-compiled function should return the same value.\n assert f(x, y).numpy() == g_op(x, y).numpy()\n ```\n\n Args:\n func: function to be compiled.\n *args: List arguments to pass to `func` when attaching to the graph.\n **kwds: Keyword arguments to pass to `func` when attaching to the graph.\n\n Returns:\n A wrapper object which can be queried for its output properties,\n and which can be called directly the way a `@defun` wrapped function\n can.\n\n Raises:\n ValueError: if any of the keyword arguments to `func` are `EagerTensor`\n objects (not yet supported).\n \"\"\"\n name = func.__name__\n if any(isinstance(x, ops.EagerTensor) for x in kwds.values()):\n raise ValueError(\"Tensor keyword arguments are not supported.\")\n return _defun_internal(name, func, args, kwds)\n" ]
[ [ "tensorflow.python.eager.tape.push_new_tape", "tensorflow.python.framework.ops.register_tensor_conversion_function", "tensorflow.python.eager.tape.pop_tape", "tensorflow.python.framework.errors.raise_exception_on_not_ok_status", "tensorflow.python.util.nest.flatten", "tensorflow.python.eager.graph_only_ops.graph_placeholder", "tensorflow.python.ops.gradients_impl.gradients", "tensorflow.python.framework.ops.uid", "tensorflow.core.framework.function_pb2.FunctionDef", "tensorflow.python.pywrap_tensorflow.TF_FunctionToFunctionDef", "tensorflow.python.framework.c_api_util.tf_buffer", "tensorflow.python.framework.ops.internal_convert_to_tensor", "tensorflow.python.eager.tape.watch_variable", "tensorflow.python.pywrap_tensorflow.TF_GetBuffer", "tensorflow.python.eager.context.in_eager_mode", "tensorflow.python.eager.tape.record_operation", "tensorflow.python.framework.dtypes.DType", "tensorflow.python.eager.context.context", "tensorflow.python.util.compat.as_str", "tensorflow.python.util.compat.as_bytes", "tensorflow.python.eager.context.graph_mode", "tensorflow.python.eager.tape.should_record", "tensorflow.python.framework.ops.get_default_graph", "tensorflow.python.util.nest.map_structure", "tensorflow.python.framework.ops.tensor_id", "tensorflow.python.util.nest.pack_sequence_as" ] ]
nikwitt/cdmft
[ "ebca66c760e0f6618a0b475eeeb5ace3cd229a2c" ]
[ "cdmft/operators/hubbard.py" ]
[ "import numpy as np, itertools as itt\nfrom scipy.linalg import expm, inv\nfrom pytriqs.operators import c as C, c_dag as CDag, n as N, dagger\n\nfrom cdmft.gfoperations import sum\nfrom cdmft.transformation import GfStructTransformationIndex\n\n\nclass Hubbard:\n \"\"\"\n meant as abstract class, realization needs self._c(s, i), self.sites, self.up, self.dn,\n self.spins, self.u\n \"\"\"\n def _c(self, spin, site, *args, **kwargs):\n return C(spin, site)\n\n def _c_dag(self, spin, site, *args, **kwargs):\n return dagger(self._c(spin, site, *args, **kwargs))\n\n def get_h_int(self):\n \"\"\"for (C)DMFT calculations\"\"\"\n return np.sum([self.u * self._c_dag(self.up, i) * self._c(self.up, i) * self._c_dag(self.dn, i) * self._c(self.dn, i) for i in self.sites], axis = 0)\n\n def h_int_cluster(self, t, mu):\n spins = [self.up, self.dn]\n return self.get_h_int() + self.kinetic_energy(t) - np.sum([self._c_dag(s, i) * mu * self._c(s, i) for s, i in itt.product(spins, self.sites)], axis = 0)\n\n def kinetic_energy(self, t):\n spins = [self.up, self.dn]\n return np.sum([self._c_dag(s, i) * t[s][i, j] * self._c(s, j) for s, i, j in itt.product(spins, self.sites, self.sites)], axis = 0)\n\n def get_gf_struct(self):\n return [[self.up, self.sites], [self.dn, self.sites]]\n\n def n(self, s, i):\n return self._c_dag(s, i) * self._c(s, i)\n\n def n_tot(self):\n spins = [self.up, self.dn]\n return np.sum([self._c_dag(s, i) * self._c(s, i) for s, i in itt.product(spins, self.sites)])\n\n def nn(self, i, j):\n return self.n_per_site(i) * self.n_per_site(j)\n\n def ss(self, i, j):\n up = self.up\n dn = self.dn\n c = lambda b, i: self._c(b, i)\n cdag = lambda b, i: self._c_dag(b, i)\n op = 0\n op += .5 * cdag(up, i) * c(dn, i) * cdag(dn, j) * c(up, j)\n op += .5 * cdag(dn, i) * c(up, i) * cdag(up, j) * c(dn, j)\n op += self.szsz(i, j)\n return op\n\n def s_plus(self, i, j):\n return self._c_dag(self.up, i) * self._c(self.dn, j)\n\n def s_minus(self, i, j):\n return self._c_dag(self.dn, i) * self._c(self.up, j)\n \n def ss_pm_loc(self, site):\n return self.s_plus(site, site) * self.s_minus(site, site)\n\n def ss_mp_loc(self, site):\n return self.s_minus(site, site) * self.s_plus(site, site)\n\n def ss_tot(self):\n return np.sum([self.ss(i, j) for i, j in itt.product(*[self.sites]*2)])\n\n def nn_tot(self):\n return np.sum([self.nn(i, j) for i, j in itt.product(*[self.sites]*2)])\n\n def sz(self, i):\n return .5 * (self.n(self.up, i) - self.n(self.dn, i))\n\n def sz_tot(self):\n return np.sum([self.sz(i) for i in self.sites])\n\n def szsz(self, i, j):\n return self.sz(i) * self.sz(j)\n\n def get_n_per_spin(self, s):\n return np.sum([self._c_dag(s, i) * self._c(s, i) for i in self.sites], axis = 0)\n\n def n_per_spin(self, s):\n return np.sum([self._c_dag(s, i) * self._c(s, i) for i in self.sites], axis = 0)\n\n def n_per_site(self, i):\n return np.sum([self._c_dag(s, i) * self._c(s, i) for s in self.spins], axis = 0)\n\n def get_n_tot(self):\n return np.sum([self.get_n_per_spin(s) for s in self.spins], axis = 0)\n\n\nclass Site(Hubbard):\n\n def __init__(self, u = None, spins = [\"up\", \"dn\"]):\n self.u = u\n self.spins = spins\n self.up = spins[0]\n self.dn = spins[1]\n self.sites = range(1)\n\n\nclass Dimer(Hubbard):\n \"\"\"\n support unitary transformation on site-space U: c -> U.c, implying e.g. G -> U.G.U^dag\n but no reblocking and relabeling so far # TODO\n \"\"\"\n def __init__(self, u = None, spins = [\"up\", \"dn\"], transf = None):\n self.u = u\n self.spins = spins\n self.up = spins[0]\n self.dn = spins[1]\n self.sites = range(2)\n self.transf = transf\n \n def _c(self, s, i):\n if self.transf is None:\n c = Hubbard._c(self, s, i)\n else:\n c = np.sum([self.transf[s][j, i].conjugate() * C(s, j) for j in self.sites])\n return c\n\n\nclass Triangle(Hubbard):\n\n def __init__(self, u = None, spins = [\"up\", \"dn\"], transf = None):\n self.u = u\n self.spins = spins\n self.up = spins[0]\n self.dn = spins[1]\n self.sites = range(3)\n self.transf = transf\n\n def _c(self, s, i):\n if self.transf is None:\n c = Hubbard._c(self, s, i)\n else:\n c = np.sum([self.transf[s][j, i].conjugate() * C(s, j) for j in self.sites])\n return c\n\n\nclass TriangleSpinOrbitCoupling(Triangle):\n \"\"\"\n 1 block, spin-site blockstructure\n transformation is not a dict, but an array acting on the site-space\n the aiao-field: the rotation MUST be applied first since it depends non-linearly on the \n site-space, the site-transformation comes second\n \"\"\"\n def __init__(self, blocklabel, *args, **kwargs):\n Triangle.__init__(self, *args, **kwargs)\n self.blocklabel = blocklabel\n self.blocksize = len(self.sites) * len(self.spins)\n\n def _c(self, s, i, theta = 0, phi = 0):\n if self.transf is None:\n c = C(self.blocklabel, self.superindex(s, i))\n else:\n c = np.sum([self.transf[s][j, i].conjugate() * C(self.blocklabel, self.superindex(s, j)) for j in self.sites], axis = 0)\n return c\n\n def spin_index(self, s):\n return {self.spins[0]: 0, self.spins[1]: 1}[s]\n\n def _c_rot(self, s, i, theta, phi = 0):\n spin_transf_mat = self.spin_transf_mat(theta, phi)\n c = np.sum([spin_transf_mat[self.spin_index(s), self.spin_index(t)] * self._c(t, i) for t in self.spins], axis = 0)\n return c\n\n def _c_rot_dag(self, s, i, theta, phi = 0):\n return dagger(self._c_rot(s,i,theta,phi))\n \n def spin_transf_mat(self, theta, phi = 0, force_real = True):\n py = np.matrix([[0,complex(0,-1)],[complex(0,1),0]])\n pz = np.matrix([[1,0],[0,-1]])\n m = expm(complex(0,-1)*theta*py*.5)#.dot(expm(complex(0,1)*phi*pz*.5))\n if force_real:\n m = m.real\n return m\n\n def aiao_op(self, chirality = [0, 1, 2]):\n \"\"\"\n chiralities are either 0,1,2 or 0,2,1\n \"\"\"\n operator = 0\n phi = 0\n for i in self.sites:\n theta = chirality[i] * 2 * np.pi / 3.\n for s, sign in zip(self.spins, [+1, -1]):\n operator += sign * self._c_rot_dag(s, i, theta, phi) * self._c_rot(s, i, theta, phi)\n return operator\n\n def superindex(self, s, i):\n if s in self.spins:\n s = self.spin_index(s)\n return s * 3 + i\n\n\nclass TriangleAIAO(Triangle):\n \"\"\"\n\n \"\"\"\n def __init__(self, *args, **kwargs):\n self.theta = kwargs.pop('theta') if 'theta' in kwargs.keys() else 0\n self.phi = kwargs.pop('phi') if 'phi' in kwargs.keys() else 0\n self.force_real = kwargs.pop('force_real') if 'force_real' in kwargs.keys() else False\n self.site_transf = kwargs.pop('site_transf') if 'site_transf' in kwargs.keys() else False\n Triangle.__init__(self, *args, **kwargs)\n\n def _c(self, s, i):\n c = 0\n if self.site_transf:\n for j in range(3):\n c += self.site_transf[j, i].conjugate() * self._c_rot(s, j)\n else:\n c = self._c_rot(s, j)\n return c\n\n def _c_rot(self, s, i):\n s = self.spin_index(s)\n c = 0\n for t in range(2):\n a = self.superindex(t, i)\n c += inv(self.spin_transf_mat(self.theta, self.phi))[s, t] * C('spin-site', a)\n return c\n\n def superindex(self, s, i):\n if s in self.spins:\n s = self.spin_index(s)\n return s * 3 + i\n\n def spin_index(self, s):\n return {'up':0, 'dn':1}[s]\n\n def spin_transf_mat(self, theta, phi = 0):\n py = np.matrix([[0,complex(0,-1)],[complex(0,1),0]])\n pz = np.matrix([[1,0],[0,-1]])\n m = expm(complex(0,-1)*theta*py*.5)#.dot(expm(complex(0,1)*phi*pz*.5))\n if self.force_real:\n m = m.real\n return m\n\n\nclass Plaquette(Hubbard):\n\n def __init__(self, u = None, spins = [\"up\", \"dn\"]):\n self.u = u\n self.spins = spins\n self.up = spins[0]\n self.dn = spins[1]\n self.sites = range(4)\n\n\nclass DimerMomentum(Hubbard):\n \"\"\"\n transformation is a 2by2 matrix applied to the site-space\n \"\"\"\n def __init__(self, u = None, spins = [\"up\", \"dn\"], momenta = [\"+\", \"-\"], transformation = {\"up\": np.sqrt(.5)*np.array([[1,1],[1,-1]]), \"dn\": np.sqrt(.5)*np.array([[1,1],[1,-1]])}):\n self.u = u\n self.up = spins[0]\n self.dn = spins[1]\n self.spins = spins\n self.transformation = transformation\n self.sites = range(2)\n self.block_labels = [spin+\"-\"+k for spin in spins for k in momenta]\n self.gf_struct = [[l, range(1)] for l in self.block_labels]\n self._to_mom = GfStructTransformationIndex(self.gf_struct, [[self.up, self.sites], [self.dn, self.sites]])\n\n def _c(self, spin, site):\n return sum([self.transformation[spin][k_index, site].conjugate() * C(*self._to_mom(spin, k_index)) for k_index in range(len(self.sites))])\n\n\n\nclass TriangleMomentum(Hubbard):\n \"\"\"\n transformation is a 3by3 matrix applied to the site-space\n \"\"\"\n def __init__(self, u = None, spins = [\"up\", \"dn\"], momenta = [\"E\", \"A1\", \"A2\"], transformation = {\"up\": np.array([[1/np.sqrt(3),1/np.sqrt(3),1/np.sqrt(3)],[0,-1/np.sqrt(2),1/np.sqrt(2)],[-np.sqrt(2./3.),1/np.sqrt(6),1/np.sqrt(6)]]), \"dn\": np.array([[1/np.sqrt(3),1/np.sqrt(3),1/np.sqrt(3)],[0,-1/np.sqrt(2),1/np.sqrt(2)],[-np.sqrt(2./3.),1/np.sqrt(6),1/np.sqrt(6)]])}):\n self.u = u\n self.up = spins[0]\n self.dn = spins[1]\n self.spins = spins\n self.transformation = transformation\n self.sites = range(3)\n self.block_labels = [spin+\"-\"+k for spin in spins for k in momenta]\n self.gf_struct = [[l, range(1)] for l in self.block_labels]\n self._to_mom = GfStructTransformationIndex(self.gf_struct, [[self.up, self.sites], [self.dn, self.sites]])\n\n def _c(self, spin, site):\n return sum([self.transformation[spin][k_index, site].conjugate() * C(*self._to_mom(spin, k_index)) for k_index in range(len(self.sites))])\n\n def doublet_state(self, i, j, sz, pm = -1):\n for site in self.sites:\n if not (site in [i, j]):\n k = site\n return (self._c(self.up, i) * self._c(self.dn, j) +pm* self._c(self.dn, i) * self._c(self.up, j)) * self._c(sz, k) / np.sqrt(2)\n\n def nn_singlet_n2_state(self, i, j, pm = -1):\n return (self._c(self.up, i) * self._c(self.dn, j) +pm* self._c(self.dn, i) * self._c(self.up, j)) / np.sqrt(2)\n\n def nn_singlet_n4_state(self, i, j, pm = -1):\n for site in self.sites:\n if not (site in [i, j]):\n k = site\n return (self._c(self.up, i) * self._c(self.dn, j) +pm* self._c(self.dn, i) * self._c(self.up, j)) * self._c(self.up, k) * self._c(self.dn, k)/ np.sqrt(2)\n\n def rvb_projector(self, particle_numbers = [2,3,4], pm = -1):\n inds = [(i, j) for i in self.sites for j in range(i)]#itt.product(self.sites, self.sites)]\n terms = []\n for i in inds:\n if 2 in particle_numbers:\n terms.append(self.nn_singlet_n2_state(*i, pm = pm))\n if 3 in particle_numbers:\n terms.append(self.doublet_state(i[0], i[1], self.up, pm = pm)+self.doublet_state(i[0], i[1], self.dn, pm = pm))\n if 4 in particle_numbers:\n terms.append(self.nn_singlet_n4_state(*i, pm = pm))\n state = np.sum(terms, axis = 0)\n return dagger(state) * state\n\n\n\nclass TriangleMomentum2(TriangleMomentum):\n def _c(self, spin, site):\n return sum([self.transformation[spin][k_index, site] * C(*self._to_mom(spin, k_index)) for k_index in range(len(self.sites))])\n \n\nclass PlaquetteMomentum(Hubbard):\n \"\"\"\n transformation is a 4by4 matrix applied to the site-space\n \"\"\"\n def __init__(self, u = None, spins = [\"up\", \"dn\"], momenta = [\"G\", \"X\", \"Y\", \"M\"], transformation = {\"up\": .5*np.array([[1,1,1,1],[1,-1,1,-1],[1,1,-1,-1],[1,-1,-1,1]]), \"dn\": .5*np.array([[1,1,1,1],[1,-1,1,-1],[1,1,-1,-1],[1,-1,-1,1]])}):\n self.u = u\n self.up = spins[0]\n self.dn = spins[1]\n self.sites = range(4)\n self.spins = spins\n self.momenta = momenta\n self.transformation = transformation\n self.block_labels = [spin+\"-\"+k for spin in spins for k in momenta]\n self.gf_struct = [[l, range(1)] for l in self.block_labels]\n self._to_mom = GfStructTransformationIndex(self.gf_struct, [[self.up, self.sites], [self.dn, self.sites]])\n\n def _c(self, spin, site):\n return sum([self.transformation[spin][site, k_index] * C(*self._to_mom(spin, k_index)) for k_index in range(4)])\n\n def cdup_cup_cddn_cdn(self, i, j, k, l):\n \"\"\"i,j,k,l being momenta\"\"\"\n return CDag(self.up+'-'+i,0) * C(self.up+'-'+j,0) * CDag(self.dn+'-'+k,0) * C(self.dn+'-'+l,0)\n\n\nclass PlaquetteMomentumNambu(Hubbard):\n \"\"\"\n extends PlaquetteMomentum space by anomalous parts, using particle-hole transformation on \n spin-down\n \"\"\"\n def __init__(self, u, spins, momenta, transformation):\n self.u = u\n self.sites = range(4)\n self.up, self.dn = up, dn = spins[0], spins[1]\n self.site_to_mom = dict([(i, momenta[i]) for i in range(4)])\n self.transformation = transformation\n self.block_labels = [k for k in momenta]\n self.gf_struct = [[l, range(2)] for l in self.block_labels]\n\n def _c(self, spin, site):\n if spin == self.up:\n return sum([self.transformation[spin][k_index, site].conjugate() * C(self.site_to_mom[k_index], 0) for k_index in range(4)])\n elif spin == self.dn:\n return sum([self.transformation[spin][k_index, site].conjugate() * CDag(self.site_to_mom[k_index], 1) for k_index in range(4)]) # TODO what's first, momentum or nambu transf?\n assert False, \"spin \"+spin+\" not recognized\"\n\n\nclass PlaquetteMomentumAFMNambu(Hubbard):\n \"\"\"\n adds afm\n \"\"\"\n def __init__(self, u, spins, momenta, transformation):\n self.u = u\n self.sites = range(4)\n self.up, self.dn = up, dn = spins[0], spins[1]\n self.site_to_mom_up = {0: (\"GM\", 0), 1: (\"GM\", 2), 2: (\"XY\", 0), 3: (\"XY\", 2)}\n self.site_to_mom_dn = {0: (\"GM\", 1), 1: (\"GM\", 3), 2: (\"XY\", 1), 3: (\"XY\", 3)}\n self.transformation = transformation\n self.block_labels = [k for k in momenta]\n self.gf_struct = [[l, range(2)] for l in self.block_labels]\n\n def _c(self, spin, site):\n if spin == self.up:\n return sum([self.transformation[spin][k_index, site].conjugate() * C(*self.site_to_mom_up[k_index]) for k_index in range(4)])\n elif spin == self.dn:\n return sum([self.transformation[spin][k_index, site].conjugate() * CDag(*self.site_to_mom_dn[k_index]) for k_index in range(4)])\n assert False, \"spin \"+spin+\" not recognized\"\n\n def cdup_cup_cddn_cdn(self, i, j, k, l):\n \"\"\"i,j,k,l being momenta\"\"\"\n return CDag(*self.site_to_mom_up[i]) * C(*self.site_to_mom_up[j]) * C(*self.site_to_mom_dn[k]) * CDag(*self.site_to_mom_dn[l])\n" ]
[ [ "numpy.sqrt", "numpy.matrix", "numpy.array", "numpy.sum" ] ]
yul69-cell/HELAO
[ "a39372eb385ee93b711443d9cbd56c5ec737ff70" ]
[ "orchestrator/orchestrator_edep.py" ]
[ "import os\nimport sys\nimport time\nfrom copy import copy\nimport matplotlib.pyplot as plt\nimport numpy as np\n# from impedance.circuits import Randles, CustomCircuit\n\n\nif __package__:\n # can import directly in package mode\n print(\"importing actions from package path\")\nelse:\n # interactive kernel mode requires path manipulation\n cwd = os.getcwd()\n pwd = os.path.dirname(cwd)\n if os.path.basename(pwd) == \"HELAO\":\n sys.path.insert(0, pwd)\n if pwd in sys.path or os.path.basename(cwd) == \"HELAO\":\n print(\"importing actions from sys.path\")\n else:\n raise ModuleNotFoundError(\"unable to find actions, current working directory is {}\".format(cwd))\n\nfrom actions import actions_edep as actions\n\nblockd = {}\nblockd[\"motion\"] = True\nblockd[\"potentiostat\"] = True\nblockd[\"io\"] = True\n\n# Define all the motion stuff\n# move_rel_op = {'x':1.1,'axis':'x','blockd':blockd}\n# x,y = np.meshgrid([7.35*i for i in range(5)],[7.35*i for i in range(5)])\nx, y = np.meshgrid([10 * i for i in range(5)], [10 * i for i in range(5)])\nx, y = x.flatten(), y.flatten()\nret_homing = actions.setup_xyz_grid(blockd)\nret_middle = actions.move_middle(blockd)\n# home_z = actions.move_altern(0,'z','homing')\n# home_z = actions.move_altern(25,'z','absolute')\n\n\ndef offset(x, y):\n pos = actions.get_positions()[\"data\"]\n return np.array(x - pos[\"x\"]), np.array(y - pos[\"y\"])\n\n\nx, y = offset(x, y)\n# Define all the echem stuff\n\neis_op = {\"start_freq\": 2, \"end_freq\": 50000, \"points\": 20, \"blockd\": blockd}\n\n\ndef make_pulse(centers, pots, widths, offset=0, l=1000):\n t = np.linspace(0, 1, 1000)\n y = np.ones(1000) * offset\n for c, p, w in zip(np.array(centers), np.array(pots), np.array(widths)):\n y[np.where((c - w / 2 < t) & (t < c + w / 2))] += p\n return y\n\n\n# Do the experiment in a loop\n\ncenters = [0.5]\npots = [0.1]\nwidths = [0.01]\nstart_freq = 1000\nend_freq = 200000\npoints = 40\n\nexp_results = {}\nana_results = {}\n\n\nfor sno, dx, dy in zip([i for i in range(len(x))], x, y):\n # make it safe and pump the cell empty before moving\n actions.pump_on()\n actions.pump_backward()\n time.sleep(5)\n actions.pump_off()\n\n print(\"Doing yacos run {}.\".format(sno))\n actions.safe_movexy(dx, dy, blockd)\n\n # refill the cell\n actions.pump_on()\n actions.pump_forward()\n time.sleep(5)\n\n pulse_exp = actions.pulse(\n 20, 10 ** -5, make_pulse(centers, pots, widths), blockd=blockd\n )\n\n # while measuring the EIS we do not want flow\n actions.pump_off()\n time.sleep(2)\n\n eis_exp = actions.eis(start_freq, end_freq, points, blockd=blockd)\n\n Zreal, Zimag, Zfreq = eis_exp[\"data\"]\n Z = np.array(Zreal) + 1j * np.array(Zimag)\n frequencies = np.array(Zfreq)\n\n # do both a randles and custom fit and check which one works better\n randles = Randles(initial_guess=[0.01, 0.005, 0.1, 0.001, 200])\n RRC = CustomCircuit(\n circuit=\"R0-p(R1,C1)\",\n initial_guess=[np.percentile(Z.real, 5), np.percentile(Z.real, 95), 10 ** -5],\n )\n\n # fit them\n res_randles = randles.fit(frequencies, Z)\n res_rrc = RRC.fit(frequencies, Z)\n\n exp_results[sno] = {\n \"pulse_params\": {\"centers\": centers, \"pots\": pots, \"widths\": widths},\n \"eis_params\": {\n \"start_freq\": start_freq,\n \"end_freq\": end_freq,\n \"points\": points,\n },\n \"eis_results\": eis_exp,\n \"pulse_results\": pulse_exp,\n }\n\n ana_results[sno] = {\"randles\": copy(res_randles), \"rrc\": copy(res_rrc)}\n\n\nimport matplotlib.pyplot as plt\nfrom impedance.plotting import plot_nyquist\n\nfig, ax = plt.subplots(figsize=(5, 5))\nplot_nyquist(ax, frequencies, Z)\nplot_nyquist(ax, frequencies, RRC.predict(frequencies))\nplot_nyquist(ax, frequencies, randles.predict(frequencies))\nplt.show()\n\n\nfig, ax = plt.subplots(3, 3)\nax = ax.flatten()\nfor i in range(9):\n ax[i].plot(np.array(pulse_exp[\"data\"])[:, i])\nplt.show()\n" ]
[ [ "numpy.ones", "matplotlib.pyplot.subplots", "matplotlib.pyplot.show", "numpy.array", "numpy.where", "numpy.linspace", "numpy.percentile" ] ]
cclauss/CMasher
[ "8ecaadd26e8a71bf7cf3ade493aef763612ff21b" ]
[ "cmasher/colormaps/redshift/redshift.py" ]
[ "# %% IMPORTS\n# Package imports\nfrom matplotlib.cm import register_cmap\nfrom matplotlib.colors import ListedColormap\n\n# All declaration\n__all__ = ['cmap']\n\n# Author declaration\n__author__ = \"Ellert van der Velden (@1313e)\"\n\n# Package declaration\n__package__ = 'cmasher'\n\n\n# %% GLOBALS AND DEFINITIONS\n# Type of this colormap\ncm_type = 'diverging'\n\n# RGB-values of this colormap\ncm_data = [[0.57869284, 0.94700453, 0.95383509],\n [0.57330096, 0.94244813, 0.95218584],\n [0.56790414, 0.93790619, 0.95055025],\n [0.56250183, 0.93337844, 0.94892950],\n [0.55709400, 0.92886470, 0.94732325],\n [0.55168026, 0.92436472, 0.94573223],\n [0.54626041, 0.91987829, 0.94415663],\n [0.54083428, 0.91540519, 0.94259649],\n [0.53540142, 0.91094515, 0.94105277],\n [0.52996183, 0.90649797, 0.93952504],\n [0.52451517, 0.90206341, 0.93801392],\n [0.51906115, 0.89764122, 0.93651980],\n [0.51359971, 0.89323118, 0.93504255],\n [0.50813051, 0.88883304, 0.93358271],\n [0.50265329, 0.88444654, 0.93214066],\n [0.49716797, 0.88007146, 0.93071625],\n [0.49167433, 0.87570754, 0.92930975],\n [0.48617195, 0.87135452, 0.92792190],\n [0.48066080, 0.86701216, 0.92655247],\n [0.47514069, 0.86268020, 0.92520162],\n [0.46961139, 0.85835838, 0.92386959],\n [0.46407266, 0.85404645, 0.92255672],\n [0.45852424, 0.84974413, 0.92126329],\n [0.45296603, 0.84545117, 0.91998924],\n [0.44739784, 0.84116729, 0.91873474],\n [0.44181946, 0.83689223, 0.91749997],\n [0.43623070, 0.83262572, 0.91628509],\n [0.43063137, 0.82836747, 0.91509023],\n [0.42502129, 0.82411721, 0.91391555],\n [0.41940026, 0.81987466, 0.91276117],\n [0.41376812, 0.81563953, 0.91162721],\n [0.40812469, 0.81141153, 0.91051377],\n [0.40246980, 0.80719037, 0.90942095],\n [0.39680330, 0.80297576, 0.90834883],\n [0.39112505, 0.79876739, 0.90729746],\n [0.38543488, 0.79456497, 0.90626692],\n [0.37973269, 0.79036819, 0.90525724],\n [0.37401834, 0.78617674, 0.90426845],\n [0.36829174, 0.78199032, 0.90330056],\n [0.36255279, 0.77780859, 0.90235356],\n [0.35680142, 0.77363124, 0.90142744],\n [0.35103745, 0.76945793, 0.90052248],\n [0.34526089, 0.76528834, 0.89963852],\n [0.33947179, 0.76112213, 0.89877532],\n [0.33367017, 0.75695898, 0.89793279],\n [0.32785588, 0.75279849, 0.89711137],\n [0.32202912, 0.74864036, 0.89631060],\n [0.31619008, 0.74448422, 0.89553017],\n [0.31033866, 0.74032967, 0.89477063],\n [0.30447528, 0.73617639, 0.89403125],\n [0.29860011, 0.73202399, 0.89331209],\n [0.29271343, 0.72787206, 0.89261309],\n [0.28681572, 0.72372025, 0.89193378],\n [0.28090735, 0.71956814, 0.89127427],\n [0.27498896, 0.71541533, 0.89063408],\n [0.26906124, 0.71126143, 0.89001292],\n [0.26312486, 0.70710597, 0.88941086],\n [0.25718092, 0.70294859, 0.88882703],\n [0.25123043, 0.69878883, 0.88826140],\n [0.24527462, 0.69462621, 0.88771373],\n [0.23931508, 0.69046034, 0.88718322],\n [0.23335351, 0.68629075, 0.88666946],\n [0.22739188, 0.68211696, 0.88617198],\n [0.22143248, 0.67793848, 0.88569042],\n [0.21547800, 0.67375483, 0.88522406],\n [0.20953152, 0.66956553, 0.88477214],\n [0.20359659, 0.66537008, 0.88433394],\n [0.19767730, 0.66116797, 0.88390868],\n [0.19177835, 0.65695868, 0.88349548],\n [0.18590517, 0.65274164, 0.88309369],\n [0.18006401, 0.64851634, 0.88270206],\n [0.17426203, 0.64428224, 0.88231942],\n [0.16850747, 0.64003882, 0.88194450],\n [0.16280986, 0.63578543, 0.88157637],\n [0.15718007, 0.63152155, 0.88121346],\n [0.15163047, 0.62724665, 0.88085394],\n [0.14617558, 0.62296002, 0.88049685],\n [0.14083138, 0.61866121, 0.88013964],\n [0.13561664, 0.61434953, 0.87978090],\n [0.13055227, 0.61002443, 0.87941833],\n [0.12566179, 0.60568534, 0.87904945],\n [0.12097171, 0.60133167, 0.87867186],\n [0.11651156, 0.59696278, 0.87828307],\n [0.11231340, 0.59257814, 0.87787994],\n [0.10841196, 0.58817719, 0.87745920],\n [0.10484430, 0.58375940, 0.87701740],\n [0.10164914, 0.57932420, 0.87655087],\n [0.09886510, 0.57487117, 0.87605525],\n [0.09652994, 0.57039990, 0.87552588],\n [0.09468006, 0.56590987, 0.87495827],\n [0.09334588, 0.56140087, 0.87434666],\n [0.09255347, 0.55687251, 0.87368561],\n [0.09231938, 0.55232472, 0.87296842],\n [0.09265244, 0.54775739, 0.87218838],\n [0.09355187, 0.54317052, 0.87133816],\n [0.09500670, 0.53856430, 0.87040963],\n [0.09699741, 0.53393908, 0.86939415],\n [0.09949602, 0.52929541, 0.86828228],\n [0.10246810, 0.52463411, 0.86706387],\n [0.10587541, 0.51995613, 0.86572833],\n [0.10967576, 0.51526281, 0.86426419],\n [0.11382580, 0.51055574, 0.86265950],\n [0.11828050, 0.50583694, 0.86090155],\n [0.12299581, 0.50110877, 0.85897736],\n [0.12792818, 0.49637399, 0.85687361],\n [0.13303495, 0.49163581, 0.85457687],\n [0.13827461, 0.48689788, 0.85207389],\n [0.14360717, 0.48216423, 0.84935190],\n [0.14899424, 0.47743927, 0.84639902],\n [0.15439799, 0.47272781, 0.84320453],\n [0.15978344, 0.46803477, 0.83975943],\n [0.16511635, 0.46336535, 0.83605671],\n [0.17036521, 0.45872465, 0.83209177],\n [0.17550082, 0.45411767, 0.82786267],\n [0.18049667, 0.44954915, 0.82337025],\n [0.18532943, 0.44502339, 0.81861819],\n [0.18997899, 0.44054421, 0.81361291],\n [0.19442908, 0.43611475, 0.80836325],\n [0.19866639, 0.43173758, 0.80288030],\n [0.20268189, 0.42741446, 0.79717672],\n [0.20646873, 0.42314661, 0.79126674],\n [0.21002416, 0.41893452, 0.78516513],\n [0.21334744, 0.41477813, 0.77888727],\n [0.21644007, 0.41067692, 0.77244864],\n [0.21930549, 0.40662992, 0.76586449],\n [0.22194871, 0.40263581, 0.75914954],\n [0.22437598, 0.39869301, 0.75231781],\n [0.22659392, 0.39479978, 0.74538284],\n [0.22861020, 0.39095420, 0.73835702],\n [0.23043243, 0.38715428, 0.73125216],\n [0.23206893, 0.38339797, 0.72407881],\n [0.23352752, 0.37968323, 0.71684701],\n [0.23481608, 0.37600805, 0.70956590],\n [0.23594240, 0.37237043, 0.70224378],\n [0.23691411, 0.36876841, 0.69488810],\n [0.23773864, 0.36520013, 0.68750558],\n [0.23842228, 0.36166379, 0.68010314],\n [0.23897259, 0.35815764, 0.67268550],\n [0.23939501, 0.35468004, 0.66525875],\n [0.23969623, 0.35122941, 0.65782678],\n [0.23988177, 0.34780424, 0.65039394],\n [0.23995694, 0.34440312, 0.64296415],\n [0.23992678, 0.34102470, 0.63554088],\n [0.23979609, 0.33766769, 0.62812723],\n [0.23956944, 0.33433088, 0.62072594],\n [0.23925118, 0.33101313, 0.61333943],\n [0.23884542, 0.32771334, 0.60596984],\n [0.23835581, 0.32443050, 0.59861944],\n [0.23778580, 0.32116362, 0.59129023],\n [0.23713919, 0.31791178, 0.58398324],\n [0.23641859, 0.31467410, 0.57670083],\n [0.23562751, 0.31144977, 0.56944356],\n [0.23476863, 0.30823799, 0.56221282],\n [0.23384436, 0.30503801, 0.55501014],\n [0.23285756, 0.30184913, 0.54783592],\n [0.23181055, 0.29867067, 0.54069108],\n [0.23070554, 0.29550198, 0.53357645],\n [0.22954464, 0.29234247, 0.52649273],\n [0.22832987, 0.28919153, 0.51944051],\n [0.22706312, 0.28604862, 0.51242029],\n [0.22574622, 0.28291320, 0.50543250],\n [0.22438088, 0.27978477, 0.49847747],\n [0.22296875, 0.27666284, 0.49155548],\n [0.22151140, 0.27354694, 0.48466672],\n [0.22001024, 0.27043662, 0.47781146],\n [0.21846644, 0.26733144, 0.47099036],\n [0.21688162, 0.26423100, 0.46420287],\n [0.21525699, 0.26113489, 0.45744915],\n [0.21359342, 0.25804272, 0.45072991],\n [0.21189245, 0.25495414, 0.44404423],\n [0.21015473, 0.25186874, 0.43739306],\n [0.20838166, 0.24878622, 0.43077545],\n [0.20657379, 0.24570619, 0.42419227],\n [0.20473243, 0.24262836, 0.41764250],\n [0.20285807, 0.23955237, 0.41112692],\n [0.20095176, 0.23647793, 0.40464482],\n [0.19901430, 0.23340473, 0.39819596],\n [0.19704615, 0.23033243, 0.39178094],\n [0.19504823, 0.22726077, 0.38539893],\n [0.19302121, 0.22418944, 0.37904975],\n [0.19096568, 0.22111817, 0.37273324],\n [0.18888207, 0.21804663, 0.36644962],\n [0.18677107, 0.21497457, 0.36019827],\n [0.18463321, 0.21190170, 0.35397893],\n [0.18246898, 0.20882775, 0.34779136],\n [0.18027883, 0.20575244, 0.34163532],\n [0.17806319, 0.20267549, 0.33551054],\n [0.17582247, 0.19959662, 0.32941673],\n [0.17355705, 0.19651555, 0.32335359],\n [0.17126727, 0.19343202, 0.31732082],\n [0.16895348, 0.19034573, 0.31131809],\n [0.16661598, 0.18725641, 0.30534506],\n [0.16425505, 0.18416377, 0.29940139],\n [0.16187095, 0.18106754, 0.29348671],\n [0.15946392, 0.17796741, 0.28760066],\n [0.15703418, 0.17486310, 0.28174285],\n [0.15458188, 0.17175430, 0.27591300],\n [0.15210713, 0.16864071, 0.27011088],\n [0.14961017, 0.16552201, 0.26433584],\n [0.14709112, 0.16239789, 0.25858744],\n [0.14455007, 0.15926804, 0.25286525],\n [0.14198701, 0.15613210, 0.24716916],\n [0.13940203, 0.15298974, 0.24149861],\n [0.13679522, 0.14984061, 0.23585293],\n [0.13416654, 0.14668436, 0.23023175],\n [0.13151588, 0.14352057, 0.22463501],\n [0.12884334, 0.14034890, 0.21906169],\n [0.12614878, 0.13716893, 0.21351147],\n [0.12343203, 0.13398023, 0.20798415],\n [0.12069311, 0.13078240, 0.20247868],\n [0.11793173, 0.12757495, 0.19699505],\n [0.11514778, 0.12435745, 0.19153238],\n [0.11234104, 0.12112938, 0.18609015],\n [0.10951121, 0.11789024, 0.18066797],\n [0.10665810, 0.11463950, 0.17526489],\n [0.10378128, 0.11137658, 0.16988075],\n [0.10088052, 0.10810090, 0.16451437],\n [0.09795529, 0.10481182, 0.15916562],\n [0.09500529, 0.10150871, 0.15383328],\n [0.09202991, 0.09819083, 0.14851704],\n [0.08902871, 0.09485748, 0.14321584],\n [0.08600106, 0.09150787, 0.13792899],\n [0.08294630, 0.08814114, 0.13265572],\n [0.07986379, 0.08475645, 0.12739482],\n [0.07675264, 0.08135279, 0.12214593],\n [0.07361211, 0.07792919, 0.11690747],\n [0.07044121, 0.07448455, 0.11167864],\n [0.06723890, 0.07101767, 0.10645840],\n [0.06400410, 0.06752732, 0.10124526],\n [0.06073553, 0.06401210, 0.09603817],\n [0.05743181, 0.06047052, 0.09083573],\n [0.05409148, 0.05690097, 0.08563621],\n [0.05071284, 0.05330166, 0.08043811],\n [0.04729398, 0.04967062, 0.07523980],\n [0.04383290, 0.04600573, 0.07003905],\n [0.04032533, 0.04230459, 0.06483370],\n [0.03681198, 0.03855880, 0.05962158],\n [0.03343838, 0.03492349, 0.05439990],\n [0.03020655, 0.03145663, 0.04916556],\n [0.02711859, 0.02815891, 0.04391515],\n [0.02417669, 0.02503115, 0.03863829],\n [0.02138315, 0.02207428, 0.03360127],\n [0.01874050, 0.01928945, 0.02894484],\n [0.01625140, 0.01667799, 0.02466102],\n [0.01391875, 0.01424146, 0.02074208],\n [0.01174570, 0.01198170, 0.01718058],\n [0.00973575, 0.00990088, 0.01396944],\n [0.00789285, 0.00800160, 0.01110189],\n [0.00622148, 0.00628698, 0.00857157],\n [0.00472684, 0.00476083, 0.00637274],\n [0.00341513, 0.00342790, 0.00450037],\n [0.00229394, 0.00229426, 0.00295046],\n [0.00137302, 0.00136800, 0.00172056],\n [0.00066576, 0.00066063, 0.00081098],\n [0.00019292, 0.00019060, 0.00022793],\n [0.00000000, 0.00000000, 0.00000000],\n [0.00025040, 0.00017502, 0.00016171],\n [0.00089250, 0.00059932, 0.00054846],\n [0.00189565, 0.00122701, 0.00111293],\n [0.00325248, 0.00203616, 0.00183195],\n [0.00496152, 0.00301225, 0.00268996],\n [0.00702387, 0.00414441, 0.00367535],\n [0.00944207, 0.00542401, 0.00477886],\n [0.01221955, 0.00684390, 0.00599285],\n [0.01536031, 0.00839795, 0.00731084],\n [0.01886877, 0.01008083, 0.00872721],\n [0.02274967, 0.01188778, 0.01023700],\n [0.02700798, 0.01381454, 0.01183582],\n [0.03164888, 0.01585720, 0.01351971],\n [0.03667771, 0.01801217, 0.01528506],\n [0.04205331, 0.02027615, 0.01712860],\n [0.04743448, 0.02264601, 0.01904731],\n [0.05278280, 0.02511884, 0.02103842],\n [0.05810212, 0.02769188, 0.02309935],\n [0.06339578, 0.03036252, 0.02522770],\n [0.06866672, 0.03312824, 0.02742124],\n [0.07391755, 0.03598666, 0.02967787],\n [0.07915058, 0.03893547, 0.03199563],\n [0.08436788, 0.04193076, 0.03437269],\n [0.08957133, 0.04487310, 0.03680730],\n [0.09476262, 0.04777329, 0.03929785],\n [0.09994327, 0.05063312, 0.04180586],\n [0.10511470, 0.05345424, 0.04426624],\n [0.11027818, 0.05623812, 0.04669049],\n [0.11543497, 0.05898610, 0.04908014],\n [0.12058623, 0.06169939, 0.05143664],\n [0.12573283, 0.06437919, 0.05376147],\n [0.13087571, 0.06702659, 0.05605592],\n [0.13601574, 0.06964256, 0.05832121],\n [0.14115372, 0.07222801, 0.06055848],\n [0.14629062, 0.07478363, 0.06276864],\n [0.15142706, 0.07731028, 0.06495279],\n [0.15656359, 0.07980873, 0.06711196],\n [0.16170085, 0.08227966, 0.06924706],\n [0.16683960, 0.08472357, 0.07135883],\n [0.17198044, 0.08714103, 0.07344805],\n [0.17712369, 0.08953271, 0.07551570],\n [0.18226984, 0.09189912, 0.07756253],\n [0.18741984, 0.09424042, 0.07958888],\n [0.19257368, 0.09655736, 0.08159583],\n [0.19773177, 0.09885035, 0.08358407],\n [0.20289508, 0.10111941, 0.08555379],\n [0.20806344, 0.10336526, 0.08750608],\n [0.21323743, 0.10558811, 0.08944138],\n [0.21841762, 0.10778812, 0.09136011],\n [0.22360393, 0.10996587, 0.09326320],\n [0.22879730, 0.11212120, 0.09515070],\n [0.23399749, 0.11425475, 0.09702362],\n [0.23920524, 0.11636644, 0.09888215],\n [0.24442056, 0.11845670, 0.10072704],\n [0.24964399, 0.12052553, 0.10255859],\n [0.25487559, 0.12257327, 0.10437748],\n [0.26011591, 0.12459987, 0.10618400],\n [0.26536485, 0.12660577, 0.10797891],\n [0.27062320, 0.12859067, 0.10976227],\n [0.27589064, 0.13055515, 0.11153501],\n [0.28116790, 0.13249892, 0.11329723],\n [0.28645496, 0.13442228, 0.11504958],\n [0.29175191, 0.13632542, 0.11679264],\n [0.29705953, 0.13820797, 0.11852645],\n [0.30237754, 0.14007037, 0.12025185],\n [0.30770619, 0.14191267, 0.12196930],\n [0.31304596, 0.14373465, 0.12367902],\n [0.31839698, 0.14553642, 0.12538157],\n [0.32375929, 0.14731812, 0.12707753],\n [0.32913312, 0.14907971, 0.12876734],\n [0.33451869, 0.15082119, 0.13045146],\n [0.33991640, 0.15254233, 0.13213020],\n [0.34532626, 0.15424328, 0.13380421],\n [0.35074842, 0.15592402, 0.13547398],\n [0.35618306, 0.15758449, 0.13714001],\n [0.36163037, 0.15922465, 0.13880282],\n [0.36709053, 0.16084442, 0.14046291],\n [0.37256368, 0.16244373, 0.14212082],\n [0.37805000, 0.16402251, 0.14377710],\n [0.38354964, 0.16558067, 0.14543231],\n [0.38906272, 0.16711815, 0.14708703],\n [0.39458938, 0.16863485, 0.14874187],\n [0.40012983, 0.17013060, 0.15039738],\n [0.40568435, 0.17160517, 0.15205410],\n [0.41125283, 0.17305863, 0.15371282],\n [0.41683537, 0.17449090, 0.15537425],\n [0.42243210, 0.17590183, 0.15703906],\n [0.42804355, 0.17729090, 0.15870770],\n [0.43366930, 0.17865845, 0.16038126],\n [0.43930967, 0.18000413, 0.16206038],\n [0.44496489, 0.18132764, 0.16374576],\n [0.45063472, 0.18262915, 0.16543847],\n [0.45631970, 0.18390803, 0.16713910],\n [0.46201950, 0.18516452, 0.16884881],\n [0.46773436, 0.18639826, 0.17056845],\n [0.47346448, 0.18760891, 0.17229894],\n [0.47920962, 0.18879660, 0.17404153],\n [0.48496988, 0.18996107, 0.17579727],\n [0.49074562, 0.19110178, 0.17756713],\n [0.49653658, 0.19221882, 0.17935252],\n [0.50234275, 0.19331200, 0.18115474],\n [0.50816415, 0.19438111, 0.18297514],\n [0.51400073, 0.19542596, 0.18481519],\n [0.51985242, 0.19644640, 0.18667645],\n [0.52571914, 0.19744228, 0.18856057],\n [0.53160107, 0.19841308, 0.19046915],\n [0.53749771, 0.19935910, 0.19240426],\n [0.54340877, 0.20028035, 0.19436797],\n [0.54933468, 0.20117600, 0.19636211],\n [0.55527440, 0.20204691, 0.19838936],\n [0.56122832, 0.20289223, 0.20045191],\n [0.56719565, 0.20371253, 0.20255260],\n [0.57317599, 0.20450784, 0.20469433],\n [0.57916892, 0.20527822, 0.20688019],\n [0.58517386, 0.20602391, 0.20911357],\n [0.59119008, 0.20674530, 0.21139818],\n [0.59721668, 0.20744301, 0.21373809],\n [0.60325256, 0.20811785, 0.21613772],\n [0.60929703, 0.20877009, 0.21860179],\n [0.61534817, 0.20940159, 0.22113570],\n [0.62140456, 0.21001357, 0.22374524],\n [0.62746447, 0.21060762, 0.22643678],\n [0.63352553, 0.21118613, 0.22921738],\n [0.63958513, 0.21175191, 0.23209485],\n [0.64563982, 0.21230887, 0.23507773],\n [0.65168622, 0.21286089, 0.23817550],\n [0.65771956, 0.21341377, 0.24139850],\n [0.66373458, 0.21397413, 0.24475810],\n [0.66972486, 0.21455034, 0.24826659],\n [0.67568308, 0.21515214, 0.25193732],\n [0.68160009, 0.21579224, 0.25578419],\n [0.68746554, 0.21648527, 0.25982171],\n [0.69326730, 0.21724893, 0.26406422],\n [0.69899173, 0.21810359, 0.26852538],\n [0.70462353, 0.21907270, 0.27321678],\n [0.71014648, 0.22018170, 0.27814698],\n [0.71554399, 0.22145733, 0.28331983],\n [0.72080022, 0.22292581, 0.28873331],\n [0.72590133, 0.22461074, 0.29437873],\n [0.73083669, 0.22653107, 0.30024056],\n [0.73559980, 0.22869924, 0.30629757],\n [0.74018868, 0.23112025, 0.31252449],\n [0.74460563, 0.23379177, 0.31889417],\n [0.74885652, 0.23670514, 0.32537978],\n [0.75294975, 0.23984693, 0.33195617],\n [0.75689531, 0.24320059, 0.33860116],\n [0.76070380, 0.24674807, 0.34529597],\n [0.76438593, 0.25047094, 0.35202454],\n [0.76795189, 0.25435133, 0.35877448],\n [0.77141115, 0.25837247, 0.36553575],\n [0.77477256, 0.26251881, 0.37229998],\n [0.77804379, 0.26677651, 0.37906169],\n [0.78123206, 0.27113290, 0.38581549],\n [0.78434357, 0.27557695, 0.39255809],\n [0.78738394, 0.28009877, 0.39928656],\n [0.79035818, 0.28468967, 0.40599881],\n [0.79327055, 0.28934218, 0.41269364],\n [0.79612522, 0.29404938, 0.41936940],\n [0.79892565, 0.29880546, 0.42602547],\n [0.80167495, 0.30360528, 0.43266135],\n [0.80437600, 0.30844427, 0.43927664],\n [0.80703138, 0.31331841, 0.44587105],\n [0.80964362, 0.31822399, 0.45244410],\n [0.81221449, 0.32315820, 0.45899641],\n [0.81474632, 0.32811795, 0.46552728],\n [0.81724057, 0.33310108, 0.47203740],\n [0.81969904, 0.33810529, 0.47852656],\n [0.82212337, 0.34312856, 0.48499471],\n [0.82451481, 0.34816932, 0.49144226],\n [0.82687469, 0.35322603, 0.49786935],\n [0.82920424, 0.35829737, 0.50427616],\n [0.83150475, 0.36338199, 0.51066266],\n [0.83377726, 0.36847888, 0.51702911],\n [0.83602265, 0.37358719, 0.52337592],\n [0.83824196, 0.37870600, 0.52970315],\n [0.84043630, 0.38383440, 0.53601074],\n [0.84260624, 0.38897198, 0.54229933],\n [0.84475296, 0.39411782, 0.54856862],\n [0.84687704, 0.39927156, 0.55481910],\n [0.84897918, 0.40443278, 0.56105104],\n [0.85106052, 0.40960072, 0.56726414],\n [0.85312150, 0.41477522, 0.57345893],\n [0.85516280, 0.41995592, 0.57963557],\n [0.85718516, 0.42514246, 0.58579419],\n [0.85918925, 0.43033456, 0.59193494],\n [0.86117573, 0.43553193, 0.59805796],\n [0.86314524, 0.44073436, 0.60416341],\n [0.86509839, 0.44594163, 0.61025145],\n [0.86703576, 0.45115357, 0.61632226],\n [0.86895794, 0.45637003, 0.62237602],\n [0.87086547, 0.46159088, 0.62841291],\n [0.87275892, 0.46681601, 0.63443311],\n [0.87463920, 0.47204504, 0.64043642],\n [0.87650647, 0.47727814, 0.64642338],\n [0.87836125, 0.48251527, 0.65239419],\n [0.88020442, 0.48775608, 0.65834869],\n [0.88203632, 0.49300068, 0.66428723],\n [0.88385731, 0.49824908, 0.67021010],\n [0.88566842, 0.50350091, 0.67611704],\n [0.88746952, 0.50875657, 0.68200879],\n [0.88926189, 0.51401550, 0.68788483],\n [0.89104529, 0.51927818, 0.69374601],\n [0.89282093, 0.52454412, 0.69959190],\n [0.89458878, 0.52981366, 0.70542311],\n [0.89634962, 0.53508661, 0.71123963],\n [0.89810404, 0.54036289, 0.71704153],\n [0.89985224, 0.54564269, 0.72282926],\n [0.90159503, 0.55092583, 0.72860276],\n [0.90333293, 0.55621228, 0.73436222],\n [0.90506623, 0.56150220, 0.74010799],\n [0.90679545, 0.56679555, 0.74584023],\n [0.90852137, 0.57209219, 0.75155895],\n [0.91024440, 0.57739217, 0.75726444],\n [0.91196493, 0.58269559, 0.76295698],\n [0.91368350, 0.58800243, 0.76863675],\n [0.91540062, 0.59331269, 0.77430396],\n [0.91711684, 0.59862638, 0.77995883],\n [0.91883267, 0.60394350, 0.78560155],\n [0.92054867, 0.60926404, 0.79123233],\n [0.92226538, 0.61458800, 0.79685141],\n [0.92398322, 0.61991544, 0.80245907],\n [0.92570273, 0.62524639, 0.80805555],\n [0.92742442, 0.63058086, 0.81364111],\n [0.92914880, 0.63591886, 0.81921600],\n [0.93087638, 0.64126043, 0.82478051],\n [0.93260773, 0.64660556, 0.83033488],\n [0.93434340, 0.65195425, 0.83587937],\n [0.93608384, 0.65730657, 0.84141433],\n [0.93782955, 0.66266254, 0.84694004],\n [0.93958104, 0.66802220, 0.85245683],\n [0.94133881, 0.67338558, 0.85796503],\n [0.94310336, 0.67875272, 0.86346495],\n [0.94487519, 0.68412366, 0.86895696],\n [0.94665494, 0.68949837, 0.87444133],\n [0.94844316, 0.69487684, 0.87991840],\n [0.95024017, 0.70025923, 0.88538863],\n [0.95204644, 0.70564557, 0.89085241],\n [0.95386245, 0.71103593, 0.89631013],\n [0.95568907, 0.71643015, 0.90176204],\n [0.95752647, 0.72182844, 0.90720868],\n [0.95937505, 0.72723088, 0.91265052],\n [0.96123539, 0.73263749, 0.91808795],\n [0.96310823, 0.73804818, 0.92352130],\n [0.96499363, 0.74346320, 0.92895121],\n [0.96689210, 0.74888260, 0.93437814],\n [0.96880452, 0.75430622, 0.93980239],\n [0.97073082, 0.75973439, 0.94522466],\n [0.97267159, 0.76516710, 0.95064542],\n [0.97462750, 0.77060429, 0.95606508],\n [0.97659851, 0.77604628, 0.96148437],\n [0.97858550, 0.78149292, 0.96690364],\n [0.98058853, 0.78694445, 0.97232360],\n [0.98260802, 0.79240093, 0.97774480],\n [0.98464455, 0.79786238, 0.98316775],\n [0.98669804, 0.80332910, 0.98859321]]\n\n# Create ListedColormap object for this colormap\ncmap = ListedColormap(cm_data, name='cmr.redshift', N=511)\ncmap_r = cmap.reversed()\n\n# Register (reversed) cmap in MPL\nregister_cmap(cmap=cmap)\nregister_cmap(cmap=cmap_r)\n" ]
[ [ "matplotlib.cm.register_cmap", "matplotlib.colors.ListedColormap" ] ]
ombretta/3D-ResNets-PyTorch
[ "a5b0f092c36c5256257ba854fbc50718c35244fb" ]
[ "cluster_print_results.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 30 17:00:15 2021\n\n@author: ombretta\n\"\"\"\n\nimport os\nfrom tensorboard.backend.event_processing.event_accumulator import EventAccumulator\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport json\nimport sys\n\n\ndef main(dirs_dataset_filter=\"mnist\", more_discard_dirs=[], \n filtering_criteria_models=[\"resnet_50\", \"resnet_34\", \"resnet_18\", \n \"bagnet_tem_9\", \"bagnet_tem_17\"], filtering_criteria_frames=[],\n filtering_criteria_others=[], filtering_criteria_annotation_path=[],\n filtering_criteria_sampling=[\"center\"]):\n \n \n res_dirs = [f for f in os.listdir(\"results/\") if dirs_dataset_filter in f]\n discard_dirs = [\"motion\", \"blackframes\", \"val_1tstride\"] + more_discard_dirs\n res_dirs = [f for f in res_dirs if all([d not in f for d in discard_dirs])]\n \n for r in res_dirs:\n \n # if os.path.exists(\"results/\"+r+\"/opts.json\"):\n # with open(\"results/\"+r+\"/opts.json\", \"r\") as f:\n # opts = json.load(f)\n # print(opts[\"annotation_path\"])\n \n if [f for f in os.listdir(\"results/\"+r) if \"events.out\" in f] and \\\n any([c in r for c in filtering_criteria_models]) and \\\n any([c in r for c in filtering_criteria_frames]) and \\\n any([c in r for c in filtering_criteria_others]) and \\\n os.path.exists(\"results/\"+r+\"/opts.json\"):\n \n print(r)\n \n event_acc = EventAccumulator(\"results/\"+r)\n event_acc.Reload()\n \n with open(\"results/\"+r+\"/opts.json\", \"r\") as f:\n opts = json.load(f) \n \n print(opts['annotation_path'])\n \n if any([c in opts['annotation_path'] for c in filtering_criteria_annotation_path]) \\\n and any ([c in opts['train_t_crop'] for c in filtering_criteria_sampling]):\n \n if event_acc.scalars.Keys() != []:\n train_losses, train_epochs, train_accs = zip(*event_acc.Scalars('train/acc'))\n val_losses, val_epochs, val_accs = zip(*event_acc.Scalars('val/acc'))\n if len(val_losses) <10:\n os.system(\"rm -r results/\"+r)\n if len(val_losses) >= 10 or train_accs[-1] > 0.95:\n \n # print(len(val_losses))\n print(r)\n print(\"train\", round(np.max(train_accs)*100, 2), np.argmax(train_accs), \\\n \"val\", round(np.max(val_accs)*100, 2), np.argmax(val_accs))\n \n if os.path.exists(\"results/\"+r+\"/checkpoints_test_results.json\"):\n with open(\"results/\"+r+\"/checkpoints_test_results.json\", \"r\") as f:\n test = json.load(f)\n print(test)\n\n\n\nif __name__ == '__main__':\n import argparse\n\n parser = argparse.ArgumentParser(description='Command line options')\n \n parser.add_argument('--discard_dirs', type=str, dest='discard_dirs', default=\"\")\n parser.add_argument('--dirs_dataset_filter', type=str, dest='dirs_dataset_filter', default=\"mnist\")\n parser.add_argument('--filtering_criteria_models', type=str, dest='filtering_criteria_models', default=\"\")\n parser.add_argument('--filtering_criteria_frames', type=str, dest='filtering_criteria_frames', default=\"\")\n parser.add_argument('--filtering_criteria_others', type=str, dest='filtering_criteria_others', default=\"\")\n parser.add_argument('--filtering_criteria_annotation_path', type=str, dest='filtering_criteria_annotation_path', default=\"\")\n parser.add_argument('--filtering_criteria_sampling', type=str, dest='filtering_criteria_sampling', default=\"\")\n \n args = parser.parse_args(sys.argv[1:])\n args.discard_dirs = args.discard_dirs.split(\",\")\n args.filtering_criteria_models = args.filtering_criteria_models.split(\",\")\n args.filtering_criteria_frames = args.filtering_criteria_frames.split(\",\")\n args.filtering_criteria_others = args.filtering_criteria_others.split(\",\")\n args.filtering_criteria_annotation_path = args.filtering_criteria_annotation_path.split(\",\")\n args.filtering_criteria_sampling = args.filtering_criteria_sampling.split(\",\")\n \n main(**{k: v for (k, v) in vars(args).items() if v is not None})" ]
[ [ "numpy.max", "numpy.argmax" ] ]
lzmisscc/pytorch-image-models
[ "a32aa96d109292bfef00a631c501bd6c2bd44fdf" ]
[ "dataset_v2.py" ]
[ "import json\nimport jsonlines\nimport tqdm\nimport random\nimport re\nfrom random import shuffle\nimport PIL\nfrom PIL import Image\nimport numpy as np\nimport os.path as osp\nfrom torch.utils.data import Dataset\nimport lmdb\nimport cv2\nimport math\n\nrandom.seed(100)\nFLAG_TRAIN = True\ntrain = 'data_v3/label_ensemble_clean_600w_100char.txt.lz'\n# label = open(train, 'r').readlines()\n# new_label = []\n# for index, l in enumerate(label):\n# filename, content = l.strip().split('.png ')\n# new_label.append(f'{filename}.png\\t{content}\\n')\n# with open(f\"{train}.lz\", \"w\") as f:\n# f.writelines(new_label)\n\n# exit()\nval = 'data_v3/trans_val.txt'\neval = 'data_v3/trans_eval_classify.txt'\nlamdb_path = 'data_v3/data_v3_00000'\npredict_ = \"/data/lz/jiangming/pytorch_lmdb_noposi/results/lz_13_table_ocr_lmdb_896_budingW_noposi_0207.txt\"\n\ndb = lmdb.open(lamdb_path, readonly=True)\ntxn = db.begin()\n\n\nclass Cus_Dataset(Dataset):\n def __init__(self, flag='train', transform=None) -> None:\n super().__init__()\n\n if flag == 'train':\n self.label = open(train, 'r').readlines()\n else:\n self.label = open(val, 'r').readlines()\n\n # t, f = [], []\n # for index, l in enumerate(self.label):\n # filename, content = l.strip().split('\\t')\n # if '卐' in content:\n # t.append(index)\n # else:\n # f.append(index)\n\n # self.res = random.choices(t, k=100000) + random.choices(f, k=100000)\n # shuffle(self.res)\n shuffle(self.label)\n self.transform = transform\n self.flag = flag\n\n def __len__(self) -> int:\n if self.flag == 'eval':\n return len(self.label[:5000])\n return len(self.label)\n\n def __getitem__(self, index: int):\n # index = self.res[index]\n l = self.label[index]\n filename, content = l.strip().split('\\t')\n im = txn.get(filename.encode(), False)\n if im == False:\n return self[random.choice(range(len(self)))]\n\n im = cv2.imdecode(np.frombuffer(im, np.uint8), 3)\n im = Image.fromarray(im)\n W, H = im.size\n im = im.resize((math.ceil(W*(64/H)), 64))\n new_im = Image.new(mode=\"RGB\", size=(\n 224, 244), color=(255, 255, 255))\n new_im.paste(im, (random.choice(range(0, 50)),\n random.choice(range(0, 50))))\n\n im = new_im\n\n if self.transform:\n im = self.transform(new_im)\n\n if '卐' in content:\n label = 1\n else:\n label = 0\n if self.flag == 'train' or FLAG_TRAIN:\n return im, label\n else:\n return im, label, filename\n\n\nclass Cus_Dataset_v2(Dataset):\n def __init__(self, flag='train', transform=None) -> None:\n super().__init__()\n\n if flag == 'train':\n self.label = open(train, 'r').readlines()\n else:\n self.label = open(val, 'r').readlines()\n\n self.transform = transform\n self.flag = flag\n\n def __len__(self) -> int:\n return len(self.label)\n\n def __getitem__(self, index: int):\n # index = self.res[index]\n l = self.label[index]\n filename, content = l.strip().split('\\t')\n im = txn.get(filename.encode(), False)\n if im == False:\n return self[random.choice(range(len(self)))]\n\n im = cv2.imdecode(np.frombuffer(im, np.uint8), 3)\n im = Image.fromarray(im)\n W, H = im.size\n im = im.resize((math.ceil(W*(64/H)), 64))\n new_im = Image.new(mode=\"RGB\", size=(\n 224, 244), color=(255, 255, 255))\n new_im.paste(im, (random.choice(range(0, 100)),\n random.choice(range(0, 100))))\n\n im = new_im\n\n if self.transform:\n im = self.transform(new_im)\n\n if '卐' in content:\n label = 1\n else:\n label = 0\n return im, label, filename, content\n\n\nclass Cus_Dataset_v3(Dataset):\n def __init__(self, flag='train', transform=None) -> None:\n super().__init__()\n self.filenames = []\n if flag == 'train':\n self.label = open(train, 'r').readlines()\n elif flag == 'val':\n self.label = open(val, 'r').readlines()\n elif flag == 'eval':\n self.label = open(eval, 'r').readlines()\n elif flag == 'predict':\n self.label = open(predict_, 'r').readlines()\n res = []\n for i in self.label:\n name, content = i.split('.png ')\n res.append(f\"{name}.png\\t{content}\")\n self.filenames.append(name)\n self.label = res\n\n # t, f = [], []\n # for index, l in enumerate(self.label):\n # filename, content = l.strip().split('\\t')\n # if '卐' in content:\n # t.append(index)\n # else:\n # f.append(index)\n\n # self.res = random.choices(t, k=100000) + random.choices(f, k=100000)\n # shuffle(self.res)\n self.transform = transform\n self.flag = flag\n print(f\"use Cus_Dataset_v3:{len(self)}\")\n def __len__(self) -> int:\n return len(self.label[:1000])\n\n def __getitem__(self, index: int):\n # index = self.res[index]\n l = self.label[index]\n filename, content = l.strip().split('\\t')\n im = txn.get(filename.encode(), False)\n if im == False:\n return self[random.choice(range(len(self)))]\n\n im = cv2.imdecode(np.frombuffer(im, np.uint8), 3)\n im = Image.fromarray(im)\n W, H = im.size\n im = im.resize((math.ceil(W*(64/H)), 64))\n new_im = Image.new(mode=\"RGB\", size=(\n 224, 244), color=(255, 255, 255))\n new_im.paste(im, (random.choice(range(0, 50)),\n random.choice(range(0, 50))))\n\n im = new_im\n\n if self.transform:\n im = self.transform(new_im)\n\n if '卐' in content:\n label = 1\n else:\n label = 0\n if self.flag == 'train':\n return im, label\n else:\n return im, label, filename, content\n\n\nif __name__ == '__main__':\n d = Cus_Dataset_v3('predict')\n print(d[3])\n" ]
[ [ "numpy.frombuffer" ] ]
inmaugarc/FutureSales
[ "87ef9a3c483efcb81741e9f56d4b5634281942a0" ]
[ "training.py" ]
[ "\"\"\"\r\n This file is to train data with a machine learning model\r\n\"\"\"\r\n# Let's import libraries\r\nimport pickle\r\nimport pandas as pd\r\n\r\nfrom xgboost import XGBRegressor\r\nfrom sklearn import linear_model\r\nfrom sklearn.base import BaseEstimator, RegressorMixin\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.metrics import mean_squared_error\r\n\r\nfrom preprocess import Preprocessor, build_dataset, save_dataset\r\n\r\nimport warnings\r\nwarnings.filterwarnings(\"ignore\")\r\n\r\nclass Model(BaseEstimator, RegressorMixin):\r\n '''\r\n scikit-learn estimator for the Rossmann's stores prediction\r\n Parameters\r\n ----------\r\n alpha : float\r\n The regularization parameter for ridge and lasso regression\r\n max_iter : int\r\n The number of iterations / epochs to do on the data.\r\n solver : 'xgb' | 'lasso' | 'ridge' | 'linear'\r\n '''\r\n\r\n def __init__(self, max_iter=2000, solver='xgb', alpha=0.1):\r\n self.max_iter = max_iter\r\n self.alpha = alpha\r\n self.solver = solver\r\n self.model = None\r\n # assert self.solver in ['xgb', 'lasso', 'ridge', 'linear']\r\n assert self.solver in ['xgb', 'lasso', 'ridge', 'linear']\r\n\r\n def fit(self, X, y):\r\n '''\r\n Fit method\r\n Input: ndarray, shape (n_samples, n_features) # The features\r\n Output: y ndarray, shape (n_samples,) # The target\r\n '''\r\n\r\n if self.solver == 'xgb':\r\n self.model = XGBRegressor(objective=\"reg:squarederror\")\r\n self.model.fit(X, y)\r\n\r\n elif self.solver == 'lasso':\r\n self.model = linear_model.Lasso(alpha=self.alpha, max_iter=self.max_iter)\r\n self.model.fit(X, y)\r\n\r\n elif self.solver == 'ridge':\r\n self.model = linear_model.Ridge(alpha=self.alpha, max_iter=self.max_iter)\r\n self.model.fit(X, y)\r\n\r\n elif self.solver == 'linear':\r\n self.model = linear_model.LinearRegression()\r\n self.model.fit(X, y)\r\n\r\n return self\r\n\r\n def predict(self, X):\r\n '''Prediction method\r\n Input: X : ndarray, shape (n_samples, n_features) # The features\r\n Output: y_pred : ndarray, shape (n_samples,) # The predicted target\r\n '''\r\n return self.model.predict(X)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\r\n # load data\r\n print('Loading data...')\r\n data = build_dataset('train')\r\n train_data, valid_data = train_test_split(data, test_size=0.2, random_state=42)\r\n preprocessor = Preprocessor()\r\n print('Preprocessor initialization finished')\r\n preprocessor.fit(train_data)\r\n print('Preprocessor fitting finished')\r\n train_data = preprocessor.transform(train_data)\r\n valid_data = preprocessor.transform(valid_data)\r\n save_dataset(pd.concat([train_data, valid_data]), './data/train_preprocessed.csv')\r\n\r\n X_train = train_data.drop(['item_id', 'item_cnt_month'], axis=1)\r\n X_valid = valid_data.drop(['item_id', 'item_cnt_month'], axis=1)\r\n y_train = train_data['item_id']\r\n y_valid = valid_data['item_id']\r\n\r\n print('Training model on', len(X_train), 'samples')\r\n print('Validating model on', len(X_valid), 'samples')\r\n print('Training model on features: ', X_train.columns.tolist())\r\n\r\n # model selection with grid search\r\n solvers = ['xgb', 'lasso', 'ridge', 'linear']\r\n best_score, best_model = 0, (None, None)\r\n for solver in solvers:\r\n print('Solver:', solver)\r\n model = Model(solver=solver)\r\n model.fit(X_train, y_train)\r\n model_r2 = model.score(X_valid, y_valid)\r\n print('r2:', model_r2)\r\n preds = model.predict(X_valid)\r\n model_mse = mean_squared_error(y_valid, preds)\r\n print('mse:', model_mse)\r\n\r\n # keep track of best model\r\n if model_r2 > best_score:\r\n best_model = (solver, model)\r\n best_score = model_r2\r\n\r\n # save best model\r\n print('Best solver:', best_model[0])\r\n print('Saving best model to pickle file')\r\n model_file = open('model.pkl', 'wb')\r\n model = pickle.dump(best_model[1], model_file)\r\n model_file.close()\r\n print('Done!')" ]
[ [ "sklearn.metrics.mean_squared_error", "sklearn.linear_model.Ridge", "sklearn.linear_model.LinearRegression", "pandas.concat", "sklearn.linear_model.Lasso", "sklearn.model_selection.train_test_split" ] ]
MartinJakomin/SIMF
[ "e04110ddcaed887abc58084686d00f84fdc6a8c8" ]
[ "simf/models/base.py" ]
[ "import logging\r\nimport sys\r\n\r\nimport numpy as np\r\nimport scipy.sparse as sps\r\n\r\nfrom simf.initialization import a_col, random_normal, bias_from_data, bias_zero\r\n\r\n\r\nclass BaseFactorization(object):\r\n\r\n def __init__(self, max_iter=20, epsilon=0, regularization=0.02, learning_rate=0.01, init_method='random', bias=True,\r\n precompute_bias=(20, 15), update=True, logger=None):\r\n\r\n self.log = logger\r\n if not logger:\r\n self.log = logging.getLogger('default_logger')\r\n if len(self.log.handlers) < 1:\r\n self.log.setLevel(logging.DEBUG)\r\n handler = logging.StreamHandler(sys.stdout)\r\n handler.setLevel(logging.DEBUG)\r\n formatter = logging.Formatter(\"%(asctime)s: %(message)s\")\r\n handler.setFormatter(formatter)\r\n self.log.addHandler(handler)\r\n\r\n self.init_method = init_method\r\n self.bias = bias\r\n self.precompute_bias = precompute_bias\r\n self.max_iter = max_iter\r\n self.epsilon = epsilon\r\n self.regularization = regularization\r\n self.learning_rate = learning_rate\r\n self.update = update\r\n self.object_types = None\r\n self.relations = None\r\n self.data_ranges = None\r\n self.data_averages = None\r\n self.factors = None\r\n self.biases = None\r\n\r\n def __str__(self):\r\n print(self.__class__.__name__)\r\n\r\n def name(self):\r\n return \"Base\"\r\n\r\n def fit(self, data, verbose):\r\n pass\r\n\r\n def fit_update(self, data, verbose):\r\n pass\r\n\r\n def predict(self, r, i, j):\r\n pass\r\n\r\n def predict_stream(self, r, s, verbose):\r\n pass\r\n\r\n def init_factors_and_biases(self, data):\r\n pass\r\n\r\n def init_relations(self, data):\r\n self.object_types = set()\r\n self.relations = []\r\n self.data_ranges = {}\r\n self.data_averages = {}\r\n for relation in data:\r\n self.object_types.add(relation.ot1)\r\n self.object_types.add(relation.ot2)\r\n self.relations.append(relation)\r\n R = relation.get_matrix()\r\n self.data_averages[relation] = float(np.average(R.data))\r\n self.data_ranges[relation] = (float(min(R.data)), float(max(R.data)))\r\n\r\n def construct_factor(self, M, n, m):\r\n if self.init_method == \"random\":\r\n return random_normal(n, m)\r\n elif self.init_method == 'a_col':\r\n return a_col(M, n, m)\r\n\r\n def vstack_factor(self, F, n):\r\n if self.init_method == 'random':\r\n return np.vstack([F, random_normal(n - F.shape[0], F.shape[1], loc=0, scale=1. / F.shape[1])])\r\n elif self.init_method == 'a_col':\r\n return np.vstack([F, a_col(F, n - F.shape[0], F.shape[1])])\r\n\r\n def construct_bias(self, R):\r\n if not self.precompute_bias:\r\n return bias_zero(R)\r\n return bias_from_data(R, self.precompute_bias[0], self.precompute_bias[1])\r\n\r\n def expand_factors_and_biases(self, r, n, m):\r\n ot1, ot2 = r.get_object_types()\r\n if self.factors and n > self.factors[ot1].shape[0]:\r\n self.factors[ot1] = self.vstack_factor(self.factors[ot1], n)\r\n if self.bias and n > len(self.biases[r][ot1]):\r\n self.biases[r][ot1] = self.resize_matrix(self.biases[r][ot1], n)\r\n if self.factors and m > self.factors[ot2].shape[0]:\r\n self.factors[ot2] = self.vstack_factor(self.factors[ot2], m)\r\n if self.bias and m > len(self.biases[r][ot2]):\r\n self.biases[r][ot2] = self.resize_matrix(self.biases[r][ot2], m)\r\n\r\n def resize_matrix(self, M, shape):\r\n if isinstance(shape, int):\r\n B = np.copy(M)\r\n B.resize(shape)\r\n return B\r\n n, m = M.shape\r\n p, k = shape\r\n if sps.issparse(M):\r\n M = sps.coo_matrix(M)\r\n return sps.csr_matrix((np.append(M.data, 0), (np.append(M.row, p - 1), np.append(M.col, k - 1))),\r\n shape=shape)\r\n return np.pad(M, [(0, p - n), (0, k - m)], mode='constant', constant_values=0)\r\n\r\n def rmse(self, real, pred):\r\n if len(pred) < 1 or np.isnan(pred).any():\r\n return -1\r\n return np.sqrt(np.average((real - pred) ** 2, axis=0))\r\n\r\n def mae(self, real, pred):\r\n if len(pred) < 1 or np.isnan(pred).any():\r\n return -1\r\n return np.average(np.abs(pred - real), axis=0)\r\n\r\n def get_train_error(self, verbose=False):\r\n errors = {}\r\n for rel in self.relations:\r\n cx = rel.get_matrix().tocoo()\r\n stream = [(int(i), int(j), float(v)) for i, j, v in zip(cx.row, cx.col, cx.data)]\r\n values = list(zip(*stream))[2]\r\n pred = self.predict_stream(rel, stream, verbose=verbose)\r\n errors[rel] = (self.rmse(values, pred), self.mae(values, pred))\r\n return errors\r\n\r\n def get_test_error(self, relation, test_set, verbose=False):\r\n errors = {}\r\n values = list(zip(*test_set))[2]\r\n pred = self.predict_stream(relation, test_set, verbose=verbose)\r\n errors[relation] = (self.rmse(values, pred), self.mae(values, pred))\r\n return errors\r\n" ]
[ [ "numpy.append", "scipy.sparse.issparse", "numpy.abs", "numpy.copy", "scipy.sparse.coo_matrix", "numpy.isnan", "numpy.pad", "numpy.average" ] ]
guotao0628/DeepNet
[ "1ae74d8b44d715bf67c7d64a8efafff4b7c7937a" ]
[ "edgelm/fairseq/models/text_to_speech/tts_transformer.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates.\r\n#\r\n# This source code is licensed under the MIT license found in the\r\n# LICENSE file in the root directory of this source tree.\r\n\r\nimport logging\r\nfrom typing import List, Optional\r\n\r\nimport torch\r\nfrom torch import nn\r\n\r\nfrom fairseq.models import (FairseqEncoder, FairseqEncoderDecoderModel,\r\n FairseqIncrementalDecoder, register_model,\r\n register_model_architecture)\r\nfrom fairseq.modules import (\r\n TransformerEncoderLayer, TransformerDecoderLayer\r\n)\r\nfrom fairseq.models.text_to_speech.tacotron2 import Prenet, Postnet\r\nfrom fairseq.modules import LayerNorm, PositionalEmbedding, FairseqDropout\r\nfrom fairseq.data.data_utils import lengths_to_padding_mask\r\nfrom fairseq import utils\r\n\r\nlogger = logging.getLogger(__name__)\r\n\r\n\r\ndef encoder_init(m):\r\n if isinstance(m, nn.Conv1d):\r\n nn.init.xavier_uniform_(m.weight, torch.nn.init.calculate_gain(\"relu\"))\r\n\r\n\r\ndef Embedding(num_embeddings, embedding_dim):\r\n m = nn.Embedding(num_embeddings, embedding_dim)\r\n nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5)\r\n return m\r\n\r\n\r\nclass TTSTransformerEncoder(FairseqEncoder):\r\n def __init__(self, args, src_dict, embed_speaker):\r\n super().__init__(src_dict)\r\n self.padding_idx = src_dict.pad()\r\n self.embed_speaker = embed_speaker\r\n self.spk_emb_proj = None\r\n if embed_speaker is not None:\r\n self.spk_emb_proj = nn.Linear(\r\n args.encoder_embed_dim + args.speaker_embed_dim,\r\n args.encoder_embed_dim\r\n )\r\n\r\n self.dropout_module = FairseqDropout(\r\n p=args.dropout, module_name=self.__class__.__name__\r\n )\r\n self.embed_tokens = nn.Embedding(len(src_dict), args.encoder_embed_dim,\r\n padding_idx=self.padding_idx)\r\n assert(args.encoder_conv_kernel_size % 2 == 1)\r\n self.prenet = nn.ModuleList(\r\n nn.Sequential(\r\n nn.Conv1d(args.encoder_embed_dim, args.encoder_embed_dim,\r\n kernel_size=args.encoder_conv_kernel_size,\r\n padding=((args.encoder_conv_kernel_size - 1) // 2)),\r\n nn.BatchNorm1d(args.encoder_embed_dim),\r\n nn.ReLU(),\r\n nn.Dropout(args.encoder_dropout),\r\n )\r\n for _ in range(args.encoder_conv_layers)\r\n )\r\n self.prenet_proj = nn.Linear(\r\n args.encoder_embed_dim, args.encoder_embed_dim\r\n )\r\n self.embed_positions = PositionalEmbedding(\r\n args.max_source_positions, args.encoder_embed_dim, self.padding_idx\r\n )\r\n self.pos_emb_alpha = nn.Parameter(torch.ones(1))\r\n\r\n self.transformer_layers = nn.ModuleList(\r\n TransformerEncoderLayer(args)\r\n for _ in range(args.encoder_transformer_layers)\r\n )\r\n if args.encoder_normalize_before:\r\n self.layer_norm = LayerNorm(args.encoder_embed_dim)\r\n else:\r\n self.layer_norm = None\r\n\r\n self.apply(encoder_init)\r\n\r\n def forward(self, src_tokens, src_lengths=None, speaker=None, **kwargs):\r\n x = self.embed_tokens(src_tokens)\r\n x = x.transpose(1, 2).contiguous() # B x T x C -> B x C x T\r\n for conv in self.prenet:\r\n x = conv(x)\r\n x = x.transpose(1, 2).contiguous() # B x C x T -> B x T x C\r\n x = self.prenet_proj(x)\r\n\r\n padding_mask = src_tokens.eq(self.padding_idx)\r\n positions = self.embed_positions(padding_mask)\r\n x += self.pos_emb_alpha * positions\r\n x = self.dropout_module(x)\r\n\r\n # B x T x C -> T x B x C\r\n x = x.transpose(0, 1)\r\n\r\n for layer in self.transformer_layers:\r\n x = layer(x, padding_mask)\r\n\r\n if self.layer_norm is not None:\r\n x = self.layer_norm(x)\r\n\r\n if self.embed_speaker is not None:\r\n seq_len, bsz, _ = x.size()\r\n emb = self.embed_speaker(speaker).transpose(0, 1)\r\n emb = emb.expand(seq_len, bsz, -1)\r\n x = self.spk_emb_proj(torch.cat([x, emb], dim=2))\r\n\r\n return {\r\n \"encoder_out\": [x], # T x B x C\r\n \"encoder_padding_mask\": [padding_mask] if padding_mask.any() else [], # B x T\r\n \"encoder_embedding\": [], # B x T x C\r\n \"encoder_states\": [], # List[T x B x C]\r\n \"src_tokens\": [],\r\n \"src_lengths\": [],\r\n }\r\n\r\n\r\ndef decoder_init(m):\r\n if isinstance(m, torch.nn.Conv1d):\r\n nn.init.xavier_uniform_(m.weight, torch.nn.init.calculate_gain(\"tanh\"))\r\n\r\n\r\nclass TTSTransformerDecoder(FairseqIncrementalDecoder):\r\n def __init__(self, args, src_dict):\r\n super().__init__(None)\r\n self._future_mask = torch.empty(0)\r\n\r\n self.args = args\r\n self.padding_idx = src_dict.pad()\r\n self.n_frames_per_step = args.n_frames_per_step\r\n self.out_dim = args.output_frame_dim * args.n_frames_per_step\r\n\r\n self.dropout_module = FairseqDropout(\r\n args.dropout, module_name=self.__class__.__name__\r\n )\r\n self.embed_positions = PositionalEmbedding(\r\n args.max_target_positions, args.decoder_embed_dim, self.padding_idx\r\n )\r\n self.pos_emb_alpha = nn.Parameter(torch.ones(1))\r\n self.prenet = nn.Sequential(\r\n Prenet(self.out_dim, args.prenet_layers, args.prenet_dim,\r\n args.prenet_dropout),\r\n nn.Linear(args.prenet_dim, args.decoder_embed_dim),\r\n )\r\n\r\n self.n_transformer_layers = args.decoder_transformer_layers\r\n self.transformer_layers = nn.ModuleList(\r\n TransformerDecoderLayer(args)\r\n for _ in range(self.n_transformer_layers)\r\n )\r\n if args.decoder_normalize_before:\r\n self.layer_norm = LayerNorm(args.decoder_embed_dim)\r\n else:\r\n self.layer_norm = None\r\n\r\n self.feat_proj = nn.Linear(args.decoder_embed_dim, self.out_dim)\r\n self.eos_proj = nn.Linear(args.decoder_embed_dim, 1)\r\n\r\n self.postnet = Postnet(self.out_dim, args.postnet_conv_dim,\r\n args.postnet_conv_kernel_size,\r\n args.postnet_layers, args.postnet_dropout)\r\n\r\n self.ctc_proj = None\r\n if getattr(args, \"ctc_weight\", 0.) > 0.:\r\n self.ctc_proj = nn.Linear(self.out_dim, len(src_dict))\r\n\r\n self.apply(decoder_init)\r\n\r\n def extract_features(\r\n self, prev_outputs, encoder_out=None, incremental_state=None,\r\n target_lengths=None, speaker=None, **kwargs\r\n ):\r\n alignment_layer = self.n_transformer_layers - 1\r\n self_attn_padding_mask = lengths_to_padding_mask(target_lengths)\r\n positions = self.embed_positions(\r\n self_attn_padding_mask, incremental_state=incremental_state\r\n )\r\n\r\n if incremental_state is not None:\r\n prev_outputs = prev_outputs[:, -1:, :]\r\n self_attn_padding_mask = self_attn_padding_mask[:, -1:]\r\n if positions is not None:\r\n positions = positions[:, -1:]\r\n\r\n x = self.prenet(prev_outputs)\r\n x += self.pos_emb_alpha * positions\r\n x = self.dropout_module(x)\r\n\r\n # B x T x C -> T x B x C\r\n x = x.transpose(0, 1)\r\n\r\n if not self_attn_padding_mask.any():\r\n self_attn_padding_mask = None\r\n\r\n attn: Optional[torch.Tensor] = None\r\n inner_states: List[Optional[torch.Tensor]] = [x]\r\n for idx, transformer_layer in enumerate(self.transformer_layers):\r\n if incremental_state is None:\r\n self_attn_mask = self.buffered_future_mask(x)\r\n else:\r\n self_attn_mask = None\r\n\r\n x, layer_attn, _ = transformer_layer(\r\n x,\r\n encoder_out[\"encoder_out\"][0]\r\n if (encoder_out is not None and len(encoder_out[\"encoder_out\"]) > 0)\r\n else None,\r\n encoder_out[\"encoder_padding_mask\"][0]\r\n if (\r\n encoder_out is not None\r\n and len(encoder_out[\"encoder_padding_mask\"]) > 0\r\n )\r\n else None,\r\n incremental_state,\r\n self_attn_mask=self_attn_mask,\r\n self_attn_padding_mask=self_attn_padding_mask,\r\n need_attn=bool((idx == alignment_layer)),\r\n need_head_weights=bool((idx == alignment_layer)),\r\n )\r\n inner_states.append(x)\r\n if layer_attn is not None and idx == alignment_layer:\r\n attn = layer_attn.float().to(x)\r\n\r\n if attn is not None:\r\n # average probabilities over heads, transpose to\r\n # (B, src_len, tgt_len)\r\n attn = attn.mean(dim=0).transpose(2, 1)\r\n\r\n if self.layer_norm is not None:\r\n x = self.layer_norm(x)\r\n\r\n # T x B x C -> B x T x C\r\n x = x.transpose(0, 1)\r\n\r\n return x, {\"attn\": attn, \"inner_states\": inner_states}\r\n\r\n def forward(self, prev_output_tokens, encoder_out=None,\r\n incremental_state=None, target_lengths=None, speaker=None,\r\n **kwargs):\r\n x, extra = self.extract_features(\r\n prev_output_tokens, encoder_out=encoder_out,\r\n incremental_state=incremental_state, target_lengths=target_lengths,\r\n speaker=speaker, **kwargs\r\n )\r\n attn = extra[\"attn\"]\r\n feat_out = self.feat_proj(x)\r\n bsz, seq_len, _ = x.size()\r\n eos_out = self.eos_proj(x)\r\n post_feat_out = feat_out + self.postnet(feat_out)\r\n return post_feat_out, eos_out, {\"attn\": attn, \"feature_out\": feat_out}\r\n\r\n def get_normalized_probs(self, net_output, log_probs, sample):\r\n logits = self.ctc_proj(net_output[2][\"feature_out\"])\r\n if log_probs:\r\n return utils.log_softmax(logits.float(), dim=-1)\r\n else:\r\n return utils.softmax(logits.float(), dim=-1)\r\n\r\n def buffered_future_mask(self, tensor):\r\n dim = tensor.size(0)\r\n # self._future_mask.device != tensor.device is not working in TorchScript. This is a workaround.\r\n if (\r\n self._future_mask.size(0) == 0\r\n or (not self._future_mask.device == tensor.device)\r\n or self._future_mask.size(0) < dim\r\n ):\r\n self._future_mask = torch.triu(\r\n utils.fill_with_neg_inf(torch.zeros([dim, dim])), 1\r\n )\r\n self._future_mask = self._future_mask.to(tensor)\r\n return self._future_mask[:dim, :dim]\r\n\r\n\r\n@register_model(\"tts_transformer\")\r\nclass TTSTransformerModel(FairseqEncoderDecoderModel):\r\n \"\"\"\r\n Implementation for https://arxiv.org/pdf/1809.08895.pdf\r\n \"\"\"\r\n\r\n @staticmethod\r\n def add_args(parser):\r\n parser.add_argument(\"--dropout\", type=float)\r\n parser.add_argument(\"--output-frame-dim\", type=int)\r\n parser.add_argument(\"--speaker-embed-dim\", type=int)\r\n # encoder prenet\r\n parser.add_argument(\"--encoder-dropout\", type=float)\r\n parser.add_argument(\"--encoder-conv-layers\", type=int)\r\n parser.add_argument(\"--encoder-conv-kernel-size\", type=int)\r\n # encoder transformer layers\r\n parser.add_argument(\"--encoder-transformer-layers\", type=int)\r\n parser.add_argument(\"--encoder-embed-dim\", type=int)\r\n parser.add_argument(\"--encoder-ffn-embed-dim\", type=int)\r\n parser.add_argument(\"--encoder-normalize-before\", action=\"store_true\")\r\n parser.add_argument(\"--encoder-attention-heads\", type=int)\r\n parser.add_argument(\"--attention-dropout\", type=float)\r\n parser.add_argument(\"--activation-dropout\", \"--relu-dropout\", type=float)\r\n parser.add_argument(\"--activation-fn\", type=str, default=\"relu\")\r\n # decoder prenet\r\n parser.add_argument(\"--prenet-dropout\", type=float)\r\n parser.add_argument(\"--prenet-layers\", type=int)\r\n parser.add_argument(\"--prenet-dim\", type=int)\r\n # decoder postnet\r\n parser.add_argument(\"--postnet-dropout\", type=float)\r\n parser.add_argument(\"--postnet-layers\", type=int)\r\n parser.add_argument(\"--postnet-conv-dim\", type=int)\r\n parser.add_argument(\"--postnet-conv-kernel-size\", type=int)\r\n # decoder transformer layers\r\n parser.add_argument(\"--decoder-transformer-layers\", type=int)\r\n parser.add_argument(\"--decoder-embed-dim\", type=int)\r\n parser.add_argument(\"--decoder-ffn-embed-dim\", type=int)\r\n parser.add_argument(\"--decoder-normalize-before\", action=\"store_true\")\r\n parser.add_argument(\"--decoder-attention-heads\", type=int)\r\n\r\n def __init__(self, *args, **kwargs):\r\n super().__init__(*args, **kwargs)\r\n self._num_updates = 0\r\n\r\n @classmethod\r\n def build_model(cls, args, task):\r\n embed_speaker = task.get_speaker_embeddings(args)\r\n encoder = TTSTransformerEncoder(args, task.src_dict, embed_speaker)\r\n decoder = TTSTransformerDecoder(args, task.src_dict)\r\n return cls(encoder, decoder)\r\n\r\n def forward_encoder(self, src_tokens, src_lengths, speaker=None, **kwargs):\r\n return self.encoder(src_tokens, src_lengths=src_lengths,\r\n speaker=speaker, **kwargs)\r\n\r\n def set_num_updates(self, num_updates):\r\n super().set_num_updates(num_updates)\r\n self._num_updates = num_updates\r\n\r\n\r\n@register_model_architecture(\"tts_transformer\", \"tts_transformer\")\r\ndef base_architecture(args):\r\n args.dropout = getattr(args, \"dropout\", 0.1)\r\n args.output_frame_dim = getattr(args, \"output_frame_dim\", 80)\r\n args.speaker_embed_dim = getattr(args, \"speaker_embed_dim\", 64)\r\n # encoder prenet\r\n args.encoder_dropout = getattr(args, \"encoder_dropout\", 0.5)\r\n args.encoder_conv_layers = getattr(args, \"encoder_conv_layers\", 3)\r\n args.encoder_conv_kernel_size = getattr(args, \"encoder_conv_kernel_size\", 5)\r\n # encoder transformer layers\r\n args.encoder_transformer_layers = getattr(args, \"encoder_transformer_layers\", 6)\r\n args.encoder_embed_dim = getattr(args, \"encoder_embed_dim\", 512)\r\n args.encoder_ffn_embed_dim = getattr(args, \"encoder_ffn_embed_dim\", 4 * args.encoder_embed_dim)\r\n args.encoder_normalize_before = getattr(args, \"encoder_normalize_before\", False)\r\n args.encoder_attention_heads = getattr(args, \"encoder_attention_heads\", 4)\r\n args.attention_dropout = getattr(args, \"attention_dropout\", 0.0)\r\n args.activation_dropout = getattr(args, \"activation_dropout\", 0.0)\r\n args.activation_fn = getattr(args, \"activation_fn\", \"relu\")\r\n # decoder prenet\r\n args.prenet_dropout = getattr(args, \"prenet_dropout\", 0.5)\r\n args.prenet_layers = getattr(args, \"prenet_layers\", 2)\r\n args.prenet_dim = getattr(args, \"prenet_dim\", 256)\r\n # decoder postnet\r\n args.postnet_dropout = getattr(args, \"postnet_dropout\", 0.5)\r\n args.postnet_layers = getattr(args, \"postnet_layers\", 5)\r\n args.postnet_conv_dim = getattr(args, \"postnet_conv_dim\", 512)\r\n args.postnet_conv_kernel_size = getattr(args, \"postnet_conv_kernel_size\", 5)\r\n # decoder transformer layers\r\n args.decoder_transformer_layers = getattr(args, \"decoder_transformer_layers\", 6)\r\n args.decoder_embed_dim = getattr(args, \"decoder_embed_dim\", 512)\r\n args.decoder_ffn_embed_dim = getattr(args, \"decoder_ffn_embed_dim\", 4 * args.decoder_embed_dim)\r\n args.decoder_normalize_before = getattr(args, \"decoder_normalize_before\", False)\r\n args.decoder_attention_heads = getattr(args, \"decoder_attention_heads\", 4)\r\n" ]
[ [ "torch.empty", "torch.nn.init.calculate_gain", "torch.ones", "torch.nn.Linear", "torch.nn.BatchNorm1d", "torch.nn.Embedding", "torch.nn.init.normal_", "torch.nn.Conv1d", "torch.nn.ReLU", "torch.zeros", "torch.cat", "torch.nn.Dropout" ] ]
sfreund-DLR/tankoh2
[ "92ff080f7034a7eb1cdabed5089c79fd01af4d11" ]
[ "src/tankoh2/control_doe.py" ]
[ "\"\"\"create DOEs and execute design workflow\n\nCaution:\nThis module requires fa_pytuils and delismm!\nPlease contatct the developers for these additional packages.\n\"\"\"\n\nimport os\nfrom collections import OrderedDict\nimport datetime\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom delismm.model.doe import LatinizedCentroidalVoronoiTesselation, DOEfromFile\nfrom delismm.model.samplecalculator import getY\nfrom delismm.model.customsystemfunction import BoundsHandler, AbstractTargetFunction\nfrom fa_pyutils.service.systemutils import getRunDir\n\nfrom tankoh2.control_sf import createWindingDesign\nfrom tankoh2 import programDir, log, pychain\nfrom tankoh2.service import indent\n\ndome = 'circle' # isotensoid circle\nsafetyFactor = 1 # 2.25\nlb = OrderedDict([('r', 500.), ('lzylByR', 0.01), ('dp', 0.13 * safetyFactor)]) # [mm, - , MPa]\nub = OrderedDict([('r', 1600.), ('lzylByR', 12.), ('dp', 0.5 * safetyFactor)])\nuseFibreFailure = False\n\nnumberOfSamples = 201\n\nclass TankWinder(AbstractTargetFunction):\n \"\"\"\"\"\"\n name = 'tank winder'\n\n def __init__(self, lb, ub, runDir):\n \"\"\"\"\"\"\n resultNames = ['frpMass', 'volume', 'area', 'lzylinder', 'numberOfLayers', 'angles', 'hoopLayerShifts']\n AbstractTargetFunction.__init__(self, lb, ub, resultNames=resultNames)\n self.doParallelization = []\n self.runDir = runDir\n self.allowFailedSample = True\n\n def _call(self, parameters):\n \"\"\"call function for the model\"\"\"\n runDir = getRunDir(basePath=os.path.join(self.runDir), useMilliSeconds=True)\n r, lzyl, burstPressure = parameters\n\n result = createWindingDesign(dzyl=r * 2, lzylByR=lzyl, burstPressure=burstPressure,\n minPolarOpening=r / 10, runDir=runDir,\n domeType=pychain.winding.DOME_TYPES.ISOTENSOID if dome == 'isotensoid' else pychain.winding.DOME_TYPES.CIRCLE,\n useFibreFailure = useFibreFailure)\n return result\n\nvolumeFunc = lambda r, lzylByR: (4 / 3 * np.pi * r ** 3 + r * lzylByR * np.pi * r ** 2)\n\"\"\"[m**3]\"\"\"\n\ndef plotGeometryRange(radii, lzylByRs, plotDir='', show=False, samples=None):\n \"\"\"\n\n :param radii: tuple with min and max radius [mm]\n :param lzylByRs: tuple with min and max lzylByR [-]\n :return: None\n \"\"\"\n radii = np.array(radii) / 1e3 # convert to m\n if samples is not None:\n samplesR, samplesLzylByR = samples[:2, :]\n samplesR = samplesR / 1e3\n\n fig = plt.figure(figsize=(15,6))\n axes = [fig.add_subplot(1, 2, 1), fig.add_subplot(1, 2, 2)]\n axes[1].set_yscale(\"log\")\n for ax in axes:\n ax.set_title(\"Parameter bounds\")\n ax.set_xlabel('Radius [m]')\n ax.set_ylabel('Volume [m^3]')\n color = 'tab:blue'\n for lzylByR in lzylByRs:\n x = np.linspace(*radii,11)\n volumes = [volumeFunc(r, lzylByR) for r in x]\n ax.plot(x, volumes, color=color, label=f'lzylByR={lzylByR}')\n color = 'tab:orange'\n ax.legend()\n if samples is not None:\n volumes = volumeFunc(samplesR, samplesLzylByR)\n ax.scatter(samplesR, volumes, label=f'samples')\n\n if plotDir:\n plt.savefig(plotDir+'/geometryRange.png')\n if show:\n plt.show()\n\n\n\ndef main():\n sampleFile = '' + 'C:/PycharmProjects/tankoh2/tmp/doe_circle_20210520_135237_cvt/sampleX.txt'\n\n startTime = datetime.datetime.now()\n names = list(lb.keys())\n runDir = getRunDir(f'doe_{dome}_{\"puckff\" if useFibreFailure else \"puckiff\"}',\n basePath=os.path.join(programDir, 'tmp'))\n\n winder = TankWinder(lb, ub, runDir)\n if sampleFile:\n lcvt = DOEfromFile(sampleFile)\n else:\n lcvt = LatinizedCentroidalVoronoiTesselation(numberOfSamples, len(names))\n\n sampleX = BoundsHandler.scaleToBoundsStatic(lcvt.sampleXNormalized, list(lb.values()), list(ub.values()))\n plotGeometryRange([lb['r'], ub['r']],[lb['lzylByR'], ub['lzylByR']], plotDir=runDir, samples=sampleX)\n lcvt.xToFile(os.path.join(runDir, 'sampleX.txt'))\n lcvt.xToFileStatic(os.path.join(runDir, 'sampleX_bounds.txt'), sampleX)\n sampleY = getY(sampleX, winder, verbose=True, runDir=runDir)\n\n # store samples\n lcvt.yToFile(os.path.join(runDir, 'sampleY.txt'), winder, sampleY)\n # lcvt.xyToFile(os.path.join(runDir, 'full_doe2.txt'), winder, sampleY, True)\n\n allSamples = [names + winder.resultNames]\n for inputSample, outputSample in zip(sampleX.T, sampleY):\n if hasattr(outputSample, '__iter__'):\n allSamples.append(list(inputSample) + list(outputSample))\n else:\n allSamples.append(list(inputSample) + list([outputSample]))\n with open(os.path.join(runDir, 'full_doe.txt'), 'w') as f:\n f.write(indent(allSamples, hasHeader=True))\n\n duration = datetime.datetime.now() - startTime\n log.info(f'runtime {duration.seconds} seconds')\n\n\nif __name__ == '__main__':\n if 1:\n main()\n else:\n plotGeometryRange([lb['r'], ub['r']],[lb['lzylByR'], ub['lzylByR']], show=True)\n" ]
[ [ "matplotlib.pyplot.figure", "matplotlib.pyplot.savefig", "matplotlib.pyplot.show", "numpy.array", "numpy.linspace" ] ]
Redict/rg_sound_generation
[ "6db8826d0797650bc5c1555a60cc9c6b3f82050d" ]
[ "audio_annotator/audio_annotator/create_spectrograms.py" ]
[ "import os\nimport librosa\nimport librosa.display\nimport matplotlib.pyplot as plt\n\nfrom tqdm import tqdm\n\n\ndef create_spectrograms():\n audio_dir = os.path.join('audio_annotator', 'static')\n files = [x for x in os.listdir(audio_dir) if x.lower().endswith('.wav')]\n\n for f in tqdm(files):\n audio_path = os.path.join(audio_dir, f)\n image_path = os.path.join(audio_dir, f'{os.path.splitext(f)[0]}.png')\n audio, sr = librosa.load(audio_path)\n mel = librosa.feature.melspectrogram(\n audio,\n sr=sr,\n n_fft=1024,\n hop_length=64,\n n_mels=256\n )\n log_mel = librosa.power_to_db(mel)\n\n plt.figure(figsize=(4, 3))\n librosa.display.specshow(\n log_mel,\n sr=sr,\n x_axis='time',\n y_axis='mel',\n cmap='inferno'\n )\n plt.tight_layout()\n plt.savefig(image_path)\n plt.close()\n\n\nif __name__ == '__main__':\n create_spectrograms()\n" ]
[ [ "matplotlib.pyplot.figure", "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.close", "matplotlib.pyplot.savefig" ] ]
Code-the-Change-YYC/YW-NLP
[ "a2ff0c96f449e81998fca6fa083350cf22eac382" ]
[ "models/svm_model.py" ]
[ "import numpy as np\nfrom sklearn.pipeline import Pipeline\n\nfrom models.model import Model, ArrayLike\nfrom preprocess.report_data import ReportData\nfrom preprocess.report_data_d import ColName\n\nfrom training.description_classification.utils import load_svm, SVMPipeline\n\n\nclass SVMDescriptionClf(Model[SVMPipeline]):\n \"\"\"Complement Naive Bayes model for description classification.\"\"\"\n _model: Pipeline\n\n def __init__(self):\n self._model = load_svm()\n\n def predict(self, X: ArrayLike) -> np.ndarray:\n \"\"\"Predict the primary incident type of the given descriptions.\n\n Params:\n X: 1D array-like of descriptions to classify\n\n Returns:\n 1D array of `IncidentType` predictions for the given descriptions.\n \"\"\"\n predictions = self._model.predict(X)\n return np.array([prediction for prediction in predictions])\n\n def partial_fit(self, X: ArrayLike, y: ArrayLike, classes: ArrayLike = None) -> object:\n pass\n\n\nif __name__ == '__main__':\n clf = SVMDescriptionClf()\n df = ReportData().get_processed_data()\n print(clf.predict([df[ColName.DESC][0]]))\n" ]
[ [ "numpy.array" ] ]
joel99/midlevel-reps
[ "f0b4a4d8ccf09a0488cd18af24723172aff99446" ]
[ "evkit/utils/viz/core.py" ]
[ "import numpy as np\nfrom skimage.transform import resize\nimport skimage\nimport torchvision.utils as tvutils\nimport torch\n\n\ndef rescale_for_display( batch, rescale=True, normalize=False ):\n '''\n Prepares network output for display by optionally rescaling from [-1,1],\n and by setting some pixels to the min/max of 0/1. This prevents matplotlib\n from rescaling the images. \n '''\n if rescale:\n display_batch = [ rescale_image( im.copy(), new_scale=[0, 1], current_scale=[-1, 1] ) \n for im in batch ]\n else:\n display_batch = batch.copy()\n if not normalize:\n for im in display_batch:\n im[0,0,0] = 1.0 # Adjust some values so that matplotlib doesn't rescale\n im[0,1,0] = 0.0 # Now adjust the min\n return display_batch\n\n\n\ndef rescale_image(im, new_scale=[-1.,1.], current_scale=None, no_clip=False):\n \"\"\"\n Rescales an image pixel values to target_scale\n \n Args:\n img: A np.float_32 array, assumed between [0,1]\n new_scale: [min,max] \n current_scale: If not supplied, it is assumed to be in:\n [0, 1]: if dtype=float\n [0, 2^16]: if dtype=uint\n [0, 255]: if dtype=ubyte\n Returns:\n rescaled_image\n \"\"\"\n # im = im.astype(np.float32)\n if current_scale is not None:\n min_val, max_val = current_scale\n if not no_clip:\n im = np.clip(im, min_val, max_val)\n im = im - min_val\n im /= (max_val - min_val) \n min_val, max_val = new_scale\n im *= (max_val - min_val)\n im += min_val\n im = skimage.img_as_float(im)\n\n return im \n\n\ndef resize_image(im, new_dims, interp_order=1):\n \"\"\"\n Resize an image array with interpolation.\n Parameters\n ----------\n im : (H x W x K) ndarray\n new_dims : (height, width) tuple of new dimensions.\n interp_order : interpolation order, default is linear.\n Returns\n -------\n im : resized ndarray with shape (new_dims[0], new_dims[1], K)\n By kchen @ https://github.com/kchen92/joint-representation/blob/24b30ca6963d2ec99618af379c1e05e1f7026710/lib/data/input_pipeline_feed_dict.py\n \"\"\"\n if type(im) == PIL.PngImagePlugin.PngImageFile:\n interps = [PIL.Image.NEAREST, PIL.Image.BILINEAR]\n return skimage.util.img_as_float(im.resize(new_dims, interps[interp_order]))\n \n if all( new_dims[i] == im.shape[i] for i in range( len( new_dims ) ) ):\n resized_im = im #return im.astype(np.float32)\n elif im.shape[-1] == 1 or im.shape[-1] == 3:\n # # skimage is fast but only understands {1,3} channel images\n resized_im = resize(im, new_dims, order=interp_order, preserve_range=True)\n else:\n # ndimage interpolates anything but more slowly.\n scale = tuple(np.array(new_dims, dtype=float) / np.array(im.shape[:2]))\n resized_im = zoom(im, scale + (1,), order=interp_order)\n # resized_im = resized_im.astype(np.float32)\n return resized_im\n\ndef resize_rescale_image(img, new_dims, new_scale, interp_order=1, current_scale=None, no_clip=False):\n \"\"\"\n Resize an image array with interpolation, and rescale to be \n between \n Parameters\n ----------\n im : (H x W x K) ndarray\n new_dims : (height, width) tuple of new dimensions.\n new_scale : (min, max) tuple of new scale.\n interp_order : interpolation order, default is linear.\n Returns\n -------\n im : resized ndarray with shape (new_dims[0], new_dims[1], K)\n \"\"\"\n img = skimage.img_as_float( img )\n img = resize_image( img, new_dims, interp_order )\n img = rescale_image( img, new_scale, current_scale=current_scale, no_clip=no_clip )\n\n return img\n\n\n\ndef pack_images(x, prediction, label, mask=None):\n uncertainty = None\n if isinstance(prediction, tuple):\n prediction, uncertainty = prediction\n\n if len(label.shape) == 4 and label.shape[1] == 2:\n zeros = torch.zeros(label.shape[0], 1, label.shape[2], label.shape[3]).to(label.device)\n label = torch.cat([label, zeros], dim=1)\n prediction = torch.cat([prediction, zeros], dim=1)\n if uncertainty is not None:\n uncertainty = torch.cat([uncertainty, zeros], dim=1)\n if mask is not None:\n mask = torch.cat([mask, mask[:,0].unsqueeze(1)], dim=1)\n\n if len(x.shape) == 4 and x.shape[1] == 2:\n zeros = torch.zeros(x.shape[0], 1, x.shape[2], x.shape[3]).to(x.device)\n x = torch.cat([x, zeros], dim=1)\n to_cat = []\n \n if x.shape[1] <= 3:\n to_cat.append(x)\n shape_with_three_channels = list(x.shape)\n shape_with_three_channels[1] = 3\n to_cat.append(prediction.expand(shape_with_three_channels))\n if uncertainty is not None:\n print(uncertainty.min(), uncertainty.max())\n uncertainty = 2*uncertainty - 1.0\n uncertainty = uncertainty.clamp(min=-1.0, max=1.0)\n to_cat.append(uncertainty.expand(shape_with_three_channels))\n to_cat.append(label.expand(shape_with_three_channels))\n if mask is not None:\n to_cat.append(mask.expand(shape_with_three_channels))\n# print([p.shape for p in to_cat])\n im_samples = torch.cat(to_cat, dim=3)\n im_samples = tvutils.make_grid(im_samples.detach().cpu(), nrow=1, padding=2)\n return im_samples\n\n\ndef maybe_entriple(x, is_mask=False):\n if x.shape[1] == 2:\n if is_mask:\n x = torch.cat([x, x[:,0].unsqueeze(1)], dim=1)\n else:\n zeros = torch.zeros(x.shape[0], 1, x.shape[2], x.shape[3]).to(x.device)\n x = torch.cat([x, zeros], dim=1)\n shape_with_three_channels = list(x.shape)\n shape_with_three_channels[1] = 3\n return x.expand(shape_with_three_channels)\n\ndef pack_chained_images(x, predictions, labels, mask=None):\n x = maybe_entriple(x)\n if mask is not None:\n mask = maybe_entriple(mask, is_mask=True)\n tripled_predictions, uncertainties = [], []\n for p in predictions:\n if isinstance(p, tuple):\n p, u = p\n uncertainties.append(maybe_entriple(u))\n else:\n uncertainties.append(None)\n tripled_predictions.append(maybe_entriple(p))\n predictions = tripled_predictions\n labels = [maybe_entriple(l) for l in labels]\n\n to_cat = []\n if x.shape[1] <= 3:\n to_cat.append(x)\n for pred, uncert, label in zip(predictions, uncertainties, labels):\n to_cat.append(label)\n to_cat.append(pred)\n if uncert is not None:\n print(uncert.min(), uncert.max())\n uncert = 2*uncert - 1.0\n uncert = uncert.clamp(min=-1.0, max=1.0)\n to_cat.append(uncert)\n if mask is not None:\n to_cat.append(mask)\n# print([p.shape for p in to_cat])\n im_samples = torch.cat(to_cat, dim=3)\n im_samples = tvutils.make_grid(im_samples.detach().cpu(), nrow=1, padding=2)\n return im_samples" ]
[ [ "torch.zeros", "numpy.clip", "torch.cat", "numpy.array" ] ]
xbodx/DeepPavlov
[ "4b60bf162df4294b8b0db3b72786cdd699c674fa", "4b60bf162df4294b8b0db3b72786cdd699c674fa" ]
[ "deeppavlov/models/preprocessors/squad_preprocessor.py", "deeppavlov/models/morpho_tagger/cells.py" ]
[ "# Copyright 2017 Neural Networks and Deep Learning lab, MIPT\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport bisect\nimport pickle\nimport unicodedata\nfrom collections import Counter\nfrom logging import getLogger\nfrom pathlib import Path\nfrom typing import Tuple, List, Union\n\nimport numpy as np\nfrom nltk import word_tokenize\nfrom tqdm import tqdm\n\nfrom deeppavlov.core.commands.utils import expand_path\nfrom deeppavlov.core.common.registry import register\nfrom deeppavlov.core.models.component import Component\nfrom deeppavlov.core.models.estimator import Estimator\n\nlogger = getLogger(__name__)\n\n\n@register('squad_preprocessor')\nclass SquadPreprocessor(Component):\n \"\"\" SquadPreprocessor is used to preprocess context and question in SQuAD-like datasets.\n\n Preprocessing includes: sanitizing unicode symbols, quotes, word tokenizing and\n building mapping from raw text to processed text.\n\n Params:\n context_limit: max context length in tokens\n question_limit: max question length in tokens\n char_limit: max number of characters in token\n \"\"\"\n\n def __init__(self, context_limit: int = 450, question_limit: int = 150, char_limit: int = 16, *args, **kwargs):\n self.context_limit = context_limit\n self.question_limit = question_limit\n self.char_limit = char_limit\n\n def __call__(self, contexts_raw: Tuple[str, ...], questions_raw: Tuple[str, ...],\n **kwargs) -> Tuple[\n List[str], List[List[str]], List[List[List[str]]],\n List[List[int]], List[List[int]],\n List[str], List[List[str]], List[List[List[str]]],\n List[List[Tuple[int, int]]]\n ]:\n \"\"\" Performs preprocessing of context and question\n Args:\n contexts_raw: batch of contexts to preprocess\n questions_raw: batch of questions to preprocess\n\n Returns:\n context: batch of processed contexts\n contexts_tokens: batch of tokenized contexts\n contexts_chars: batch of tokenized and split on chars contexts\n contexts_r2p: batch of mappings from raw context to processed context\n contexts_p2r: batch of mappings from procesesd context to raw context\n questions: batch of processed questions\n questions_tokens: batch of tokenized questions\n questions_chars: batch of tokenized and split on chars questions\n spans: batch of mapping tokens to position in context\n \"\"\"\n contexts = []\n contexts_tokens = []\n contexts_chars = []\n contexts_r2p = []\n contexts_p2r = []\n questions = []\n questions_tokens = []\n questions_chars = []\n spans = []\n for c_raw, q_raw in zip(contexts_raw, questions_raw):\n c, r2p, p2r = SquadPreprocessor.preprocess_str(c_raw, return_mapping=True)\n c_tokens = [token.replace(\"''\", '\"').replace(\"``\", '\"') for token in word_tokenize(c)][:self.context_limit]\n c_chars = [list(token)[:self.char_limit] for token in c_tokens]\n q = SquadPreprocessor.preprocess_str(q_raw)\n q_tokens = [token.replace(\"''\", '\"').replace(\"``\", '\"') for token in word_tokenize(q)][:self.question_limit]\n q_chars = [list(token)[:self.char_limit] for token in q_tokens]\n contexts.append(c)\n contexts_tokens.append(c_tokens)\n contexts_chars.append(c_chars)\n contexts_r2p.append(r2p)\n contexts_p2r.append(p2r)\n questions.append(q)\n questions_tokens.append(q_tokens)\n questions_chars.append(q_chars)\n spans.append(SquadPreprocessor.convert_idx(c, c_tokens))\n return contexts, contexts_tokens, contexts_chars, contexts_r2p, contexts_p2r, \\\n questions, questions_tokens, questions_chars, spans\n\n @staticmethod\n def preprocess_str(line: str, return_mapping: bool = False) -> Union[Tuple[str, List[int], List[int]], str]:\n \"\"\" Removes unicode and other characters from str\n\n Args:\n line: string to process\n return_mapping: return mapping from line to preprocessed line or not\n\n Returns:\n preprocessed line, raw2preprocessed mapping, preprocessed2raw mapping\n\n \"\"\"\n if not return_mapping:\n return ''.join(c for c in line if not unicodedata.combining(c)).replace(\"''\", '\" ').replace(\"``\", '\" ')\n\n r2p = [len(line)] * (len(line) + 1)\n p2r = [len(line)] * (len(line) + 1)\n s = ''\n for i, c in enumerate(line):\n if unicodedata.combining(c):\n r2p[i] = -1\n else:\n s += c\n r2p[i] = len(s) - 1\n p2r[len(s) - 1] = i\n return s.replace(\"''\", '\" ').replace(\"``\", '\" '), r2p, p2r\n\n @staticmethod\n def convert_idx(text: str, tokens: List[str]) -> List[Tuple[int, int]]:\n current = 0\n spans = []\n for token in tokens:\n current = text.find(token, current)\n if current < 0:\n logger.error(\"Token {} cannot be found\".format(token))\n raise Exception()\n spans.append((current, current + len(token)))\n current += len(token)\n return spans\n\n\n@register('squad_ans_preprocessor')\nclass SquadAnsPreprocessor(Component):\n \"\"\" SquadAnsPreprocessor is responsible for answer preprocessing.\"\"\"\n\n def __init__(self, *args, **kwargs):\n pass\n\n def __call__(self, answers_raw: Tuple[List[str], ...], answers_start: Tuple[List[int], ...],\n r2ps: List[List[int]], spans: List[List[Tuple[int, int]]],\n **kwargs) -> Tuple[List[List[str]], List[List[int]], List[List[int]]]:\n \"\"\" Processes answers for SQuAD dataset\n\n Args:\n answers_raw: list of str [batch_size x number_of_answers]\n answers_start: start position of answer (in chars) [batch_size x number_of_answers]\n r2ps: mapping from raw context to processed context\n spans: mapping tokens to position in context\n\n Returns:\n processed answer text, start position in tokens, end position in tokens\n [batch_size x number_of_answers]\n\n \"\"\"\n answers = []\n start = []\n end = []\n for ans_raw, ans_st, r2p, span in zip(answers_raw, answers_start, r2ps, spans):\n start.append([])\n end.append([])\n answers.append([])\n for a_raw, a_st in zip(ans_raw, ans_st):\n ans = SquadPreprocessor.preprocess_str(a_raw)\n ans_st = r2p[a_st]\n ans_end = ans_st + len(ans)\n answer_span = []\n for idx, sp in enumerate(span):\n if not (ans_end <= sp[0] or ans_st >= sp[1]):\n answer_span.append(idx)\n if len(answer_span) != 0:\n y1, y2 = answer_span[0], answer_span[-1]\n else:\n # answer not found in context\n y1, y2 = -1, -1\n start[-1].append(y1)\n end[-1].append(y2)\n answers[-1].append(ans)\n return answers, start, end\n\n\n@register('squad_vocab_embedder')\nclass SquadVocabEmbedder(Estimator):\n \"\"\" SquadVocabEmbedder is used to build tokens/chars vocabulary and embedding matrix.\n\n It extracts tokens/chars form dataset and looks for pretrained embeddings.\n\n Params:\n emb_folder: path to download pretrained embeddings\n emb_url: link to pretrained embeddings\n save_path: extracted embeddings save path\n load_path: extracted embeddigns load path\n context_limit: max context length in tokens\n question_limit: max question length in tokens\n char_limit: max number of characters in token\n level: token or char\n \"\"\"\n\n def __init__(self, emb_folder: str, emb_url: str, save_path: str, load_path: str,\n context_limit: int = 450, question_limit: int = 150, char_limit: int = 16,\n level: str = 'token', *args, **kwargs):\n self.emb_folder = expand_path(emb_folder)\n self.level = level\n self.emb_url = emb_url\n self.emb_file_name = Path(emb_url).name\n self.save_path = expand_path(save_path)\n self.load_path = expand_path(load_path)\n self.context_limit = context_limit\n self.question_limit = question_limit\n self.char_limit = char_limit\n self.loaded = False\n\n self.NULL = \"<NULL>\"\n self.OOV = \"<OOV>\"\n\n self.emb_folder.mkdir(parents=True, exist_ok=True)\n\n self.emb_dim = self.emb_mat = self.token2idx_dict = None\n\n if self.load_path.exists():\n self.load()\n\n def __call__(self, contexts: List[List[str]], questions: List[List[str]]) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\" Transforms tokens/chars to indices.\n\n Args:\n contexts: batch of list of tokens in context\n questions: batch of list of tokens in question\n\n Returns:\n transformed contexts and questions\n \"\"\"\n if self.level == 'token':\n c_idxs = np.zeros([len(contexts), self.context_limit], dtype=np.int32)\n q_idxs = np.zeros([len(questions), self.question_limit], dtype=np.int32)\n for i, context in enumerate(contexts):\n for j, token in enumerate(context):\n c_idxs[i, j] = self._get_idx(token)\n\n for i, question in enumerate(questions):\n for j, token in enumerate(question):\n q_idxs[i, j] = self._get_idx(token)\n\n elif self.level == 'char':\n c_idxs = np.zeros([len(contexts), self.context_limit, self.char_limit], dtype=np.int32)\n q_idxs = np.zeros([len(questions), self.question_limit, self.char_limit], dtype=np.int32)\n for i, context in enumerate(contexts):\n for j, token in enumerate(context):\n for k, char in enumerate(token):\n c_idxs[i, j, k] = self._get_idx(char)\n\n for i, question in enumerate(questions):\n for j, token in enumerate(question):\n for k, char in enumerate(token):\n q_idxs[i, j, k] = self._get_idx(char)\n\n return c_idxs, q_idxs\n\n def fit(self, contexts: Tuple[List[str], ...], questions: Tuple[List[str]], *args, **kwargs):\n self.vocab = Counter()\n self.embedding_dict = dict()\n if not self.loaded:\n logger.info('SquadVocabEmbedder: fitting with {}s'.format(self.level))\n if self.level == 'token':\n for line in tqdm(contexts + questions):\n for token in line:\n self.vocab[token] += 1\n elif self.level == 'char':\n for line in tqdm(contexts + questions):\n for token in line:\n for c in token:\n self.vocab[c] += 1\n else:\n raise RuntimeError(\"SquadVocabEmbedder::fit: Unknown level: {}\".format(self.level))\n\n with (self.emb_folder / self.emb_file_name).open('r', encoding='utf8') as femb:\n emb_voc_size, self.emb_dim = map(int, femb.readline().split())\n for line in tqdm(femb, total=emb_voc_size):\n line_split = line.strip().split(' ')\n word = line_split[0]\n vec = np.array(line_split[1:], dtype=float)\n if len(vec) != self.emb_dim:\n continue\n if word in self.vocab:\n self.embedding_dict[word] = vec\n\n self.token2idx_dict = {token: idx for idx, token in enumerate(self.embedding_dict.keys(), 2)}\n self.token2idx_dict[self.NULL] = 0\n self.token2idx_dict[self.OOV] = 1\n self.embedding_dict[self.NULL] = [0.] * self.emb_dim\n self.embedding_dict[self.OOV] = [0.] * self.emb_dim\n idx2emb_dict = {idx: self.embedding_dict[token]\n for token, idx in self.token2idx_dict.items()}\n self.emb_mat = np.array([idx2emb_dict[idx] for idx in range(len(idx2emb_dict))])\n\n def load(self) -> None:\n logger.info('SquadVocabEmbedder: loading saved {}s vocab from {}'.format(self.level, self.load_path))\n with self.load_path.open('rb') as f:\n self.emb_dim, self.emb_mat, self.token2idx_dict = pickle.load(f)\n self.loaded = True\n\n def deserialize(self, data: bytes) -> None:\n self.emb_dim, self.emb_mat, self.token2idx_dict = pickle.loads(data)\n self.loaded = True\n\n def save(self) -> None:\n logger.info('SquadVocabEmbedder: saving {}s vocab to {}'.format(self.level, self.save_path))\n self.save_path.parent.mkdir(parents=True, exist_ok=True)\n with self.save_path.open('wb') as f:\n pickle.dump((self.emb_dim, self.emb_mat, self.token2idx_dict), f, protocol=4)\n\n def serialize(self) -> bytes:\n return pickle.dumps((self.emb_dim, self.emb_mat, self.token2idx_dict), protocol=4)\n\n def _get_idx(self, el: str) -> int:\n \"\"\" Returns idx for el (token or char).\n\n Args:\n el: token or character\n\n Returns:\n idx in vocabulary\n \"\"\"\n for e in (el, el.lower(), el.capitalize(), el.upper()):\n if e in self.token2idx_dict:\n return self.token2idx_dict[e]\n return 1\n\n\n@register('squad_ans_postprocessor')\nclass SquadAnsPostprocessor(Component):\n \"\"\" SquadAnsPostprocessor class is responsible for processing SquadModel output.\n\n It extract answer from context using predicted by SquadModel answer positions.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n pass\n\n def __call__(self, ans_start: Tuple[int, ...], ans_end: Tuple[int, ...], contexts: Tuple[str, ...],\n p2rs: List[List[int]], spans: List[List[Tuple[int, int]]],\n **kwargs) -> Tuple[List[str], List[int], List[int]]:\n \"\"\" Extracts answer from context using predicted answer positions.\n\n Args:\n ans_start: predicted start position in processed context: list of ints with len(ans_start) == batch_size\n ans_end: predicted end position in processed context\n contexts: raw contexts\n p2rs: mapping from processed context to raw\n spans: tokens positions in context\n\n Returns:\n postprocessed answer text, start position in raw context, end position in raw context\n \"\"\"\n answers = []\n start = []\n end = []\n for a_st, a_end, c, p2r, span in zip(ans_start, ans_end, contexts, p2rs, spans):\n if a_st == -1 or a_end == -1:\n start.append(-1)\n end.append(-1)\n answers.append('')\n else:\n start.append(p2r[span[a_st][0]])\n end.append(p2r[span[a_end][1]])\n answers.append(c[start[-1]:end[-1]])\n return answers, start, end\n\n\n@register('squad_bert_mapping')\nclass SquadBertMappingPreprocessor(Component):\n \"\"\"Create mapping from BERT subtokens to their characters positions and vice versa.\n\n Args:\n do_lower_case: set True if lowercasing is needed\n\n \"\"\"\n\n def __init__(self, do_lower_case: bool = True, *args, **kwargs):\n self.do_lower_case = do_lower_case\n\n def __call__(self, contexts, bert_features, *args, **kwargs):\n subtok2chars = []\n char2subtoks = []\n for batch_counter, (context, features) in enumerate(zip(contexts, bert_features)):\n if self.do_lower_case:\n context = context.lower()\n if len(args) > 0:\n subtokens = args[0][batch_counter]\n else:\n subtokens = features.tokens\n context_start = subtokens.index('[SEP]') + 1\n idx = 0\n subtok2char = {}\n char2subtok = {}\n for i, subtok in list(enumerate(subtokens))[context_start:-1]:\n subtok = subtok[2:] if subtok.startswith('##') else subtok\n subtok_pos = context[idx:].find(subtok)\n if subtok_pos == -1:\n # it could be UNK\n idx += 1 # len was at least one\n else:\n # print(k, '\\t', t, p + idx)\n idx += subtok_pos\n subtok2char[i] = idx\n for j in range(len(subtok)):\n char2subtok[idx + j] = i\n idx += len(subtok)\n subtok2chars.append(subtok2char)\n char2subtoks.append(char2subtok)\n\n return subtok2chars, char2subtoks\n\n\n@register('squad_bert_ans_preprocessor')\nclass SquadBertAnsPreprocessor(Component):\n \"\"\"Create answer start and end positions in subtokens.\n\n Args:\n do_lower_case: set True if lowercasing is needed\n\n \"\"\"\n\n def __init__(self, do_lower_case: bool = True, *args, **kwargs):\n self.do_lower_case = do_lower_case\n\n def __call__(self, answers_raw, answers_start, char2subtoks, **kwargs):\n answers, starts, ends = [], [], []\n for answers_raw, answers_start, c2sub in zip(answers_raw, answers_start, char2subtoks):\n answers.append([])\n starts.append([])\n ends.append([])\n for ans, ans_st in zip(answers_raw, answers_start):\n if self.do_lower_case:\n ans = ans.lower()\n try:\n indices = {c2sub[i] for i in range(ans_st, ans_st + len(ans)) if i in c2sub}\n st = min(indices)\n end = max(indices)\n except ValueError:\n # 0 - CLS token\n st, end = 0, 0\n ans = ''\n starts[-1] += [st]\n ends[-1] += [end]\n answers[-1] += [ans]\n return answers, starts, ends\n\n\n@register('squad_bert_ans_postprocessor')\nclass SquadBertAnsPostprocessor(Component):\n \"\"\"Extract answer and create answer start and end positions in characters from subtoken positions.\"\"\"\n\n def __init__(self, *args, **kwargs):\n pass\n\n def __call__(self, answers_start, answers_end, contexts, bert_features, subtok2chars, *args, **kwargs):\n answers = []\n starts = []\n ends = []\n for batch_counter, (answer_st, answer_end, context, features, sub2c) in \\\n enumerate(zip(answers_start, answers_end, contexts, bert_features, subtok2chars)):\n # CLS token is no_answer token\n if answer_st == 0 or answer_end == 0:\n answers += ['']\n starts += [-1]\n ends += [-1]\n else:\n st = self.get_char_position(sub2c, answer_st)\n end = self.get_char_position(sub2c, answer_end)\n if len(args) > 0:\n subtok = args[0][batch_counter][answer_end]\n else:\n subtok = features.tokens[answer_end]\n subtok = subtok[2:] if subtok.startswith('##') else subtok\n answer = context[st:end + len(subtok)]\n answers += [answer]\n starts += [st]\n ends += [ends]\n return answers, starts, ends\n\n @staticmethod\n def get_char_position(sub2c, sub_pos):\n keys = list(sub2c.keys())\n found_idx = bisect.bisect(keys, sub_pos)\n if found_idx == 0:\n return sub2c[keys[0]]\n\n return sub2c[keys[found_idx - 1]]\n", "# Copyright 2017 Neural Networks and Deep Learning lab, MIPT\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow.keras.backend as K\nfrom tensorflow.keras.initializers import Constant\nfrom tensorflow.keras.layers import InputSpec, Layer, Lambda, Dropout, Multiply\n\nINFTY = -100\n\n\nclass Highway(Layer):\n\n def __init__(self, activation=None, bias_initializer=-1, **kwargs):\n super().__init__(**kwargs)\n self.activation = tf.keras.activations.get(activation)\n self.bias_initializer = bias_initializer\n if isinstance(self.bias_initializer, int):\n self.bias_initializer = Constant(self.bias_initializer)\n self.input_spec = [InputSpec(min_ndim=2)]\n\n def build(self, input_shape):\n assert len(input_shape) >= 2\n input_dim = input_shape[-1]\n\n self.gate_kernel = self.add_weight(\n shape=(input_dim, input_dim), initializer='uniform', name='gate_kernel')\n self.gate_bias = self.add_weight(\n shape=(input_dim,), initializer=self.bias_initializer, name='gate_bias')\n self.dense_kernel = self.add_weight(\n shape=(input_dim, input_dim), initializer='uniform', name='dense_kernel')\n self.dense_bias = self.add_weight(\n shape=(input_dim,), initializer=self.bias_initializer, name='dense_bias')\n self.input_spec = InputSpec(min_ndim=2, axes={-1: input_dim})\n self.built = True\n\n def call(self, inputs, **kwargs):\n gate = K.dot(inputs, self.gate_kernel)\n gate = K.bias_add(gate, self.gate_bias, data_format=\"channels_last\")\n gate = self.activation(gate)\n new_value = K.dot(inputs, self.dense_kernel)\n new_value = K.bias_add(new_value, self.dense_bias, data_format=\"channels_last\")\n return gate * new_value + (1.0 - gate) * inputs\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n\ndef weighted_sum(first, second, sigma, first_threshold=-np.inf, second_threshold=np.inf):\n logit_probs = first * sigma + second * (1.0 - sigma)\n infty_tensor = K.ones_like(logit_probs) * INFTY\n logit_probs = K.switch(K.greater(first, first_threshold), logit_probs, infty_tensor)\n logit_probs = K.switch(K.greater(second, second_threshold), logit_probs, infty_tensor)\n return logit_probs\n\n\nclass WeightedCombinationLayer(Layer):\n\n \"\"\"\n A class for weighted combination of probability distributions\n \"\"\"\n\n def __init__(self, first_threshold=None, second_threshold=None,\n use_dimension_bias=False, use_intermediate_layer=False,\n intermediate_dim=64, intermediate_activation=None,\n from_logits=False, return_logits=False,\n bias_initializer=1.0, **kwargs):\n # if 'input_shape' not in kwargs:\n # kwargs['input_shape'] = [(None, input_dim,), (None, input_dim)]\n super(WeightedCombinationLayer, self).__init__(**kwargs)\n self.first_threshold = first_threshold if first_threshold is not None else INFTY\n self.second_threshold = second_threshold if second_threshold is not None else INFTY\n self.use_dimension_bias = use_dimension_bias\n self.use_intermediate_layer = use_intermediate_layer\n self.intermediate_dim = intermediate_dim\n self.intermediate_activation = tf.keras.activations.get(intermediate_activation)\n self.from_logits = from_logits\n self.return_logits = return_logits\n self.bias_initializer = bias_initializer\n self.input_spec = [InputSpec(), InputSpec(), InputSpec()]\n\n def build(self, input_shape):\n assert len(input_shape) == 3\n assert input_shape[0] == input_shape[1]\n assert input_shape[0][:-1] == input_shape[2][:-1]\n\n input_dim, features_dim = input_shape[0][-1], input_shape[2][-1]\n if self.use_intermediate_layer:\n self.first_kernel = self.add_weight(\n shape=(features_dim, self.intermediate_dim),\n initializer=\"random_uniform\", name='first_kernel')\n self.first_bias = self.add_weight(\n shape=(self.intermediate_dim,),\n initializer=\"random_uniform\", name='first_bias')\n self.features_kernel = self.add_weight(\n shape=(features_dim, 1), initializer=\"random_uniform\", name='kernel')\n self.features_bias = self.add_weight(\n shape=(1,), initializer=Constant(self.bias_initializer), name='bias')\n if self.use_dimension_bias:\n self.dimensions_bias = self.add_weight(\n shape=(input_dim,), initializer=\"random_uniform\", name='dimension_bias')\n super(WeightedCombinationLayer, self).build(input_shape)\n\n def call(self, inputs, **kwargs):\n assert isinstance(inputs, list) and len(inputs) == 3\n first, second, features = inputs[0], inputs[1], inputs[2]\n if not self.from_logits:\n first = K.clip(first, 1e-10, 1.0)\n second = K.clip(second, 1e-10, 1.0)\n first_, second_ = K.log(first), K.log(second)\n else:\n first_, second_ = first, second\n # embedded_features.shape = (M, T, 1)\n if self.use_intermediate_layer:\n features = K.dot(features, self.first_kernel)\n features = K.bias_add(features, self.first_bias, data_format=\"channels_last\")\n features = self.intermediate_activation(features)\n embedded_features = K.dot(features, self.features_kernel)\n embedded_features = K.bias_add(\n embedded_features, self.features_bias, data_format=\"channels_last\")\n if self.use_dimension_bias:\n tiling_shape = [1] * (K.ndim(first) - 1) + [K.shape(first)[-1]]\n embedded_features = K.tile(embedded_features, tiling_shape)\n embedded_features = K.bias_add(\n embedded_features, self.dimensions_bias, data_format=\"channels_last\")\n sigma = K.sigmoid(embedded_features)\n\n result = weighted_sum(first_, second_, sigma,\n self.first_threshold, self.second_threshold)\n probs = K.softmax(result)\n if self.return_logits:\n return [probs, result]\n return probs\n\n def compute_output_shape(self, input_shape):\n first_shape = input_shape[0]\n if self.return_logits:\n return [first_shape, first_shape]\n return first_shape\n\n\ndef TemporalDropout(inputs, dropout=0.0):\n \"\"\"\n Drops with :dropout probability temporal steps of input 3D tensor\n \"\"\"\n # TO DO: adapt for >3D tensors\n if dropout == 0.0:\n return inputs\n inputs_func = lambda x: K.ones_like(inputs[:, :, 0:1])\n inputs_mask = Lambda(inputs_func)(inputs)\n inputs_mask = Dropout(dropout)(inputs_mask)\n tiling_shape = [1, 1, K.shape(inputs)[2]] + [1] * (K.ndim(inputs) - 3)\n inputs_mask = Lambda(K.tile, arguments={\"n\": tiling_shape},\n output_shape=inputs._keras_shape[1:])(inputs_mask)\n answer = Multiply()([inputs, inputs_mask])\n return answer\n\n\ndef positions_func(inputs, pad=0):\n \"\"\"\n A layer filling i-th column of a 2D tensor with\n 1+ln(1+i) when it contains a meaningful symbol\n and with 0 when it contains PAD\n \"\"\"\n position_inputs = K.cumsum(K.ones_like(inputs, dtype=\"float32\"), axis=1)\n position_inputs *= K.cast(K.not_equal(inputs, pad), \"float32\")\n return K.log(1.0 + position_inputs)" ]
[ [ "numpy.array" ], [ "tensorflow.keras.backend.dot", "tensorflow.keras.initializers.Constant", "tensorflow.keras.backend.softmax", "tensorflow.keras.layers.Dropout", "tensorflow.keras.backend.greater", "tensorflow.keras.backend.clip", "tensorflow.keras.backend.bias_add", "tensorflow.keras.backend.tile", "tensorflow.keras.layers.InputSpec", "tensorflow.keras.layers.Lambda", "tensorflow.keras.backend.not_equal", "tensorflow.keras.backend.sigmoid", "tensorflow.keras.backend.log", "tensorflow.keras.layers.Multiply", "tensorflow.keras.backend.ones_like", "tensorflow.keras.backend.shape", "tensorflow.keras.backend.ndim", "tensorflow.keras.activations.get" ] ]
zhangbo2008/vqvae_pytorch
[ "98f2f2386328245ae26ac999528c7dda57680aca" ]
[ "dvq/data/cifar10.py" ]
[ "from torch.utils.data import DataLoader\nfrom torchvision import transforms as T\nfrom torchvision.datasets import CIFAR10\n\nimport pytorch_lightning as pl\n\nclass CIFAR10Data(pl.LightningDataModule):\n \"\"\" returns cifar-10 examples in floats in range [0,1] \"\"\"\n\n def __init__(self, args):\n super().__init__()\n self.a=args\n # self.a = args\n\n def train_dataloader(self):\n transform = T.Compose(\n [\n T.RandomCrop(32, padding=4, padding_mode='reflect'),\n T.RandomHorizontalFlip(),\n T.ToTensor(),\n ]\n )\n dataset = CIFAR10(root=self.a.data_dir, train=True, transform=transform, download=True)\n dataloader = DataLoader(\n dataset,\n batch_size=self.a.batch_size,\n num_workers=self.a.num_workers,\n drop_last=True,\n pin_memory=True,\n shuffle=True,\n )\n return dataloader\n\n def val_dataloader(self):\n transform = T.Compose(\n [\n T.ToTensor(),\n ]\n )\n dataset = CIFAR10(root=self.a.data_dir, train=False, transform=transform, download=True)\n dataloader = DataLoader(\n dataset,\n batch_size=self.a.batch_size,\n num_workers=self.a.num_workers,\n drop_last=True,\n pin_memory=True,\n )\n return dataloader\n\n def test_dataloader(self):\n return self.val_dataloader()\n\n" ]
[ [ "torch.utils.data.DataLoader" ] ]
Xtuden-com/language
[ "70c0328968d5ffa1201c6fdecde45bbc4fec19fc" ]
[ "language/serene/training.py" ]
[ "# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# Lint as: python3\n\"\"\"Training manager for fever code.\"\"\"\n\nimport json\nimport os\n\n\nfrom absl import logging\nimport dataclasses\nfrom language.serene import callbacks\nfrom language.serene import fever_tfds\nfrom language.serene import layers\nfrom language.serene import losses\nfrom language.serene import model\nfrom language.serene import preprocessing\nfrom language.serene import tokenizers\nfrom language.serene import util\nimport numpy as np\nimport tensorflow.compat.v2 as tf\nimport tensorflow_datasets as tfds\nimport tqdm\n\nfrom official.common import distribute_utils\n\n\[email protected]\nclass ModelConfig:\n \"\"\"Typed parameters for model.\"\"\"\n fever_experiment_id: int\n model_checkpoint: Text\n dataset: Text\n buffer_size: int\n batch_size: int\n word_emb_size: int\n hidden_size: int\n learning_rate: float\n positive_class_weight: Optional[float]\n max_epochs: int\n dropout: float\n activation: Text\n use_batch_norm: bool\n # Model Choice: two_tower or one_tower (not implemented yet).\n model: Text\n # Preprocessing\n tokenizer: Text # EG: Convert strings to list of strings.\n text_encoder: Text # EG: Convert list of strings to integers.\n basic_lowercase: bool\n\n # Embedder + Contextualizer\n embedder: Text\n contextualizer: Text\n context_num_layers: int\n tied_encoders: bool\n bidirectional: bool\n bert_model_name: Text\n bert_max_seq_length: int\n bert_vocab_path: Text\n bert_model_path: Text\n bert_trainable: bool\n bert_dropout: float\n\n # Neural Module Configuration\n matcher: Text\n matcher_hidden_size: int\n\n projection_dim: int\n\n fever_dev_path: Text\n max_evidence: int\n\n max_claim_tokens: int\n max_evidence_tokens: int\n\n # Whether to include the title/sentence_id in evidence encoding.\n include_title: bool\n include_sentence_id: bool\n n_similar_negatives: int\n n_background_negatives: int\n scrape_type: Text\n include_not_enough_info: bool\n title_in_scoring: bool\n\n classify_claim: bool\n claim_loss_weight: float\n\n def validate(self):\n \"\"\"Validate that the arguments to the config are correct, error if not.\"\"\"\n if self.tokenizer not in ['bert', 'basic']:\n raise ValueError(f'Invalid tokenizer: \"{self.tokenizer}\"')\n\n if self.text_encoder not in ['bert', 'basic']:\n raise ValueError(f'Invalid text encoder: \"{self.text_encoder}\"')\n\n if self.matcher not in layers.matcher_registry:\n raise ValueError(f'Invalid matcher: \"{self.matcher}\"')\n\n if self.contextualizer not in ['bert', 'rnn', 'lstm', 'gru']:\n raise ValueError(f'Invalid contextualizer: \"{self.contextualizer}\"')\n\n if self.model not in ['one_tower', 'two_tower']:\n raise ValueError(f'Invalid model: \"{self.model}\"')\n\n if self.bert_model_name not in ['base', 'large']:\n raise ValueError(f'Invalid bert model: \"{self.bert_model_name}')\n\n if self.embedder not in ['classic_embedder', 'bert_embedder']:\n raise ValueError(f'Invalid embedder: \"{self.embedder}\"')\n\n @classmethod\n def from_dict(cls, params):\n return ModelConfig(**params)\n\n @classmethod\n def from_file(cls,\n file_path,\n overrides = None):\n with util.safe_open(file_path) as f:\n params: Dict[Text, Any] = json.load(f)\n if overrides is not None:\n params.update(overrides)\n return ModelConfig.from_dict(params)\n\n def save(self, file_path):\n with util.safe_open(file_path, 'w') as f:\n json.dump(self.asdict(), f)\n\n def asdict(self):\n return dataclasses.asdict(self)\n\n\nclass Trainer:\n \"\"\"Training wrapper around keras to manage vocab/saving/dataset creation.\n\n The primary methods of this class are:\n - train()\n - predict()\n - embed()\n - save()\n - load()\n\n The intended use of this is\n > trainer = Trainer(my_config)\n > trainer.train()\n\n The following methods are primarily for converting TFDS to tf.data.Dataset\n for keras training\n - _build_tokenizer()\n - _build_encoder()\n - _encode_and_batch()\n - _batch_dataset()\n - _encode_dataset()\n - _build_vocab()\n - _tokenize_example()\n\n These are utilities for embedding different TFDSs\n - embed_wiki_dataset()\n - embed_claim_dataset()\n\n The following methods deal with preparing the keras model for training\n - _compile(): Compile model uner right scope, create callbacks, glue losses\n to model\n - _build_callbacks(): Keras callbacks\n \"\"\"\n\n def __init__(\n self,\n model_config,\n debug = False,\n tpu = None,\n distribution_strategy = None,\n tb_log_dir = None):\n \"\"\"Configure the trainer.\n\n Args:\n model_config: ModelConfig parameters for training\n debug: Enables certain debug behaviors like dataset subsampling\n tpu: The TPU to use or None otherwise\n distribution_strategy: Parallel training strategy\n tb_log_dir: The directory for Tensorboard to log to\n \"\"\"\n self._debug = debug\n if debug:\n logging.info('Debug mode enabled on trainer')\n self._tpu = tpu\n self._distribution_strategy = distribution_strategy\n self._tb_log_dir = tb_log_dir\n self._strategy: Optional[tf.distribute.Strategy] = None\n self._model_config = model_config\n self._vocab: Optional[List[Text]] = None\n self._vocab_stats: Dict[Text, Any] = {}\n self._class_stats: Dict[int, int] = {0: 0, 1: 0}\n # Whitespace tokenizer\n self._tokenizer: Optional[tokenizers.Tokenizer] = None\n self._encoder: Optional[preprocessing.FeverTextEncoder] = None\n self._model: Optional[tf.keras.Model] = None\n self._inner_model: Optional[tf.keras.Model] = None\n\n def save(self):\n \"\"\"Persist the encoder and the model to disk.\n \"\"\"\n if self._model is None or self._encoder is None:\n raise ValueError('Model and encoder cannot be None')\n else:\n self._encoder.save_to_file(\n # This is a prefix, which converts to: mydir/text_encoder.tokens\n os.path.join(self._model_config.model_checkpoint, 'text_encoder'))\n self._model.save_weights(\n os.path.join(self._model_config.model_checkpoint, 'best_model.tf'))\n\n @classmethod\n def load(cls,\n model_checkpoint,\n model_config_overrides = None,\n **kwargs):\n \"\"\"Load the model, its tokenizer, and weights from the checkpoint.\n\n Args:\n model_checkpoint: Checkpoint to restore from, from .save()\n model_config_overrides: Extra args for ModelConfig\n **kwargs: Passed through to trainer, used for overriding checkpoint\n\n Returns:\n A model in the same state as just before it was saved with .save()\n \"\"\"\n # pylint: disable=protected-access\n model_config = ModelConfig.from_file(\n os.path.join(model_checkpoint, 'model_config.json'),\n overrides=model_config_overrides)\n trainer = Trainer(model_config=model_config, **kwargs)\n trainer._tokenizer = trainer._build_tokenizer()\n encoder_path = os.path.join(model_checkpoint, 'text_encoder')\n if model_config.text_encoder == 'bert':\n trainer._encoder = preprocessing.BertTextEncoder.load_from_file(\n encoder_path)\n elif model_config.text_encoder == 'basic':\n trainer._encoder = preprocessing.BasicTextEncoder.load_from_file(\n encoder_path)\n else:\n raise ValueError('Invalid text encoder')\n\n trainer._compile()\n if trainer._model is None:\n raise ValueError('Model does not exist despite being compiled')\n trainer._model.load_weights(os.path.join(model_checkpoint, 'best_model.tf'))\n return trainer\n\n def _save_model_config(self):\n \"\"\"Save only the Model configuration to disk.\"\"\"\n logging.info('Saving config to: %s/model_config.json',\n self._model_config.model_checkpoint)\n self._model_config.save(\n os.path.join(self._model_config.model_checkpoint, 'model_config.json'))\n\n def _save_encoder(self):\n \"\"\"Save only the text encoder to disk.\"\"\"\n self._encoder.save_to_file(\n os.path.join(self._model_config.model_checkpoint, 'text_encoder'))\n\n @property\n def vocab_size(self):\n if self._encoder is None:\n raise ValueError('Model has not been build, so no vocab size')\n else:\n return self._encoder.vocab_size\n\n def _init_strategy(self):\n \"\"\"Initialize the distribution strategy (e.g. TPU/GPU/Mirrored).\"\"\"\n if self._strategy is None:\n if self._tpu is not None:\n resolver = distribute_utils.tpu_initialize(self._tpu)\n self._strategy = tf.distribute.experimental.TPUStrategy(resolver)\n elif self._distribution_strategy is None or self._distribution_strategy == 'default':\n self._strategy = tf.distribute.get_strategy()\n elif self._distribution_strategy == 'cpu':\n self._strategy = tf.distribute.OneDeviceStrategy('/device:cpu:0')\n else:\n if self._distribution_strategy == 'mirrored':\n self._strategy = tf.distribute.MirroredStrategy()\n else:\n raise ValueError(\n f'Invalid distribution strategy=\"{self._distribution_strategy}\"')\n\n def _build_tokenizer(self):\n \"\"\"Build the correct tokenizer depending on model encoder.\n\n Returns:\n Tokenizer for model\n \"\"\"\n if self._model_config.tokenizer == 'basic':\n base_tokenizer = tfds.deprecated.text.Tokenizer()\n return tokenizers.ReservedTokenizer(\n tokenizer=base_tokenizer, reserved_re=preprocessing.SEPARATOR_RE)\n elif self._model_config.tokenizer == 'bert':\n return tokenizers.BertTokenizer(\n vocab_file=self._model_config.bert_vocab_path, do_lower_case=True)\n else:\n raise ValueError('Invalid tokenizer')\n\n def _build_encoder(self, vocab,\n tokenizer):\n \"\"\"Build the encoder using the given vocab and tokenizer.\n\n Args:\n vocab: Vocab to build encoder from\n tokenizer: Tokenizer to build encoder from\n\n Returns:\n The built text encoder\n \"\"\"\n if self._model_config.text_encoder == 'basic':\n return preprocessing.BasicTextEncoder(\n vocab_list=vocab,\n tokenizer=tokenizer,\n lowercase=self._model_config.basic_lowercase,\n include_title=self._model_config.include_title,\n include_sentence_id=self._model_config.include_sentence_id,\n max_claim_tokens=self._model_config.max_claim_tokens,\n max_evidence_tokens=self._model_config.max_evidence_tokens,\n )\n elif self._model_config.text_encoder == 'bert':\n return preprocessing.BertTextEncoder(\n tokenizer=tokenizer,\n max_seq_length=self._model_config.bert_max_seq_length,\n include_title=self._model_config.include_title,\n include_sentence_id=self._model_config.include_sentence_id,\n )\n\n def _encode_and_batch(self,\n dataset,\n train=False,\n filter_claims=True,\n filter_evidence=True):\n \"\"\"Convert a tensorflow dataset of unbatched, text examples to TF batches.\n\n Args:\n dataset: TF Dataset to transform\n train: Whether to encode as training dataset\n filter_claims: Whether to filter zero length claims\n filter_evidence: Whether to filter zero length evidence\n\n Returns:\n encoded and batched dataset for keras fit\n \"\"\"\n encoded = self._encode_dataset(\n dataset, filter_claims=filter_claims, filter_evidence=filter_evidence)\n if train:\n encoded = encoded.shuffle(\n self._model_config.buffer_size, reshuffle_each_iteration=False)\n batched = self._batch_dataset(encoded)\n return batched\n\n def _compile(self):\n \"\"\"Compile the keras model using the correct scope.\"\"\"\n # pylint: disable=protected-access\n self._init_strategy()\n with self._strategy.scope():\n if self._model_config.model == 'two_tower':\n module_model = model.TwoTowerRanker(\n self.vocab_size,\n activation=self._model_config.activation,\n matcher_name=self._model_config.matcher,\n word_emb_size=self._model_config.word_emb_size,\n hidden_size=self._model_config.hidden_size,\n dropout=self._model_config.dropout,\n use_batch_norm=self._model_config.use_batch_norm,\n contextualizer=self._model_config.contextualizer,\n context_num_layers=self._model_config.context_num_layers,\n bidirectional=self._model_config.bidirectional,\n tied_encoders=self._model_config.tied_encoders,\n embedder_name=self._model_config.embedder,\n matcher_hidden_size=self._model_config.matcher_hidden_size,\n bert_model_name=self._model_config.bert_model_name,\n bert_model_path=self._model_config.bert_model_path,\n bert_trainable=self._model_config.bert_trainable,\n bert_dropout=self._model_config.bert_dropout,\n projection_dim=self._model_config.projection_dim,\n classify_claim=self._model_config.classify_claim,\n )\n self._inner_model = module_model\n # This hackery is necessary since keras doesn't handle dictionary inputs\n # well, so we have to manually specify input/output output shapes. Since\n # this is dependent on the model (e.g., bert vs other), let the encoder\n # yield this.\n inputs = self._encoder.compute_input_shapes()\n outputs = module_model(inputs)\n module_model.input_names = sorted(inputs.keys())\n module_model._feed_input_names = sorted(inputs.keys())\n module_model.output_names = sorted(\n ['evidence_matching', 'claim_classification'])\n self._model = tf.keras.Model(inputs=inputs, outputs=outputs)\n self._model.input_names = sorted(inputs.keys())\n self._model._feed_input_names = sorted(inputs.keys())\n self._model.output_names = sorted(\n ['evidence_matching', 'claim_classification'])\n self._model.summary(line_length=500)\n elif self._model_config.model == 'one_tower':\n raise NotImplementedError()\n else:\n raise ValueError('Invalid model')\n metrics = {}\n evidence_metrics = [\n tf.keras.metrics.BinaryAccuracy(name='accuracy'),\n tf.keras.metrics.Precision(name='precision'),\n tf.keras.metrics.Recall(name='recall'),\n tf.keras.metrics.AUC(name='auc'),\n tf.keras.metrics.TruePositives(name='tp'),\n tf.keras.metrics.FalsePositives(name='fp'),\n tf.keras.metrics.TrueNegatives(name='tn'),\n tf.keras.metrics.FalsePositives(name='fn'),\n ]\n metrics['evidence_matching'] = evidence_metrics\n\n loss = {}\n loss['evidence_matching'] = losses.WeightedBinaryCrossentropyFromProbs(\n positive_class_weight=self._model_config.positive_class_weight)\n\n loss_weights = {\n 'evidence_matching': 1.0,\n 'claim_classification': self._model_config.claim_loss_weight\n }\n if self._model_config.classify_claim:\n # TODO(perodriguez): add claim classifier metrics\n claim_metrics = [\n tf.keras.metrics.SparseCategoricalAccuracy(name='accuracy'),\n ]\n metrics['claim_classification'] = claim_metrics\n loss[\n 'claim_classification'] = tf.keras.losses.SparseCategoricalCrossentropy(\n from_logits=False)\n else:\n loss['claim_classification'] = losses.ZeroLoss()\n metrics['claim_classification'] = []\n self._model.compile(\n loss=loss,\n optimizer=tf.keras.optimizers.Adam(self._model_config.learning_rate),\n metrics=metrics,\n loss_weights=loss_weights,\n )\n\n def train(self,\n *,\n epochs = None,\n steps_per_epoch = None,\n validation_steps = None):\n \"\"\"Prepare the dataset, callbacks, and model, then train/save it.\n\n Args:\n epochs: The number of epochs to train for, if None then default to\n early stopping (useful for debugging)\n steps_per_epoch: How many training steps to take, if None default to\n normal training (useful for debugging)\n validation_steps: How many validation steps to take, if None defualt to\n normal training (useful for debugging)\n \"\"\"\n logging.info('Preparing model with config:\\n%s', self._model_config)\n with util.log_time('Initial dataset read'):\n builder = fever_tfds.FeverEvidence(\n data_dir=self._model_config.dataset,\n n_similar_negatives=self._model_config.n_similar_negatives,\n n_background_negatives=self._model_config.n_background_negatives,\n train_scrape_type=self._model_config.scrape_type,\n include_not_enough_info=self._model_config.include_not_enough_info,\n title_in_scoring=self._model_config.title_in_scoring,\n )\n # Cache here to prevent hitting remote fs again\n train_dataset = (builder.as_dataset(split='train')).cache()\n val_dataset = builder.as_dataset(split='validation').cache()\n if self._debug:\n train_dataset = train_dataset.take(1000)\n if self._debug:\n val_dataset = val_dataset.take(200)\n\n self._tokenizer = self._build_tokenizer()\n self._vocab = list(self._build_vocab(train_dataset))\n self._encoder = self._build_encoder(self._vocab, self._tokenizer)\n\n train_batched = self._encode_and_batch(train_dataset, train=True)\n val_batched = self._encode_and_batch(val_dataset, train=False)\n # Cache the batch creation, but not the batchwise shuffle.\n train_batched = train_batched.cache().shuffle(\n 100,\n reshuffle_each_iteration=True).prefetch(tf.data.experimental.AUTOTUNE)\n # Cache the batched validation data.\n val_batched = val_batched.cache().prefetch(tf.data.experimental.AUTOTUNE)\n self._compile()\n model_callbacks = self._build_callbacks(val_batched)\n # Save enough to reconstruct anything except for the model.\n # The model itself is saved with the ModelCheckpoint callback.\n self._save_model_config()\n self._save_encoder()\n if epochs is None:\n epochs = self._model_config.max_epochs\n\n self._model.fit(\n train_batched,\n validation_data=val_batched,\n callbacks=model_callbacks,\n epochs=epochs,\n steps_per_epoch=steps_per_epoch,\n validation_steps=validation_steps)\n logging.info('Model Summary:\\n%s', self._model.summary())\n # First load the best model.\n logging.info('Loading best model weights')\n self._model.load_weights(self.model_weight_path)\n logging.info('Saving dev predictions from best model')\n self._save_dev_predictions(val_batched)\n\n @property\n def model_weight_path(self):\n return os.path.join(self._model_config.model_checkpoint, 'best_model.tf')\n\n def _save_dev_predictions(self, val_batched):\n \"\"\"Save model predictions for the dev set.\n\n This is used to compute Fever F1 as stopping metric\n\n Args:\n val_batched: The batched validation set.\n \"\"\"\n unbatched = val_batched.unbatch()\n model_predictions = self._model.predict(val_batched)\n claim_probs = model_predictions['claim_classification']\n evidence_probs = model_predictions['evidence_matching']\n predictions = []\n # Extra _ is the label, which we don't need\n for (ex, _), claim_prob, evidence_prob in tqdm.tqdm(\n zip(unbatched, claim_probs, evidence_probs), mininterval=5):\n predictions.append({\n 'claim_prob': claim_prob.tolist(),\n 'evidence_prob': evidence_prob.tolist(),\n 'metadata': json.loads(ex['metadata'].numpy().decode('utf8'))\n })\n pred_path = os.path.join(self._model_config.model_checkpoint,\n 'val_predictions.json')\n with util.safe_open(pred_path, 'w') as f:\n json.dump({'predictions': predictions}, f)\n\n\n def predict(self, examples):\n \"\"\"Given examples in JSON format, predict evidence relevance.\n\n Args:\n examples: List of claim/evidence pairs to rank\n\n Returns:\n Scalar scores for each pair\n \"\"\"\n stacked = {\n 'claim_text': [],\n 'evidence_text': [],\n 'metadata': [],\n 'label': [],\n }\n for ex in examples:\n stacked['claim_text'].append(ex['claim_text'])\n stacked['evidence_text'].append(ex['evidence_text'])\n stacked['metadata'].append(ex['metadata'])\n stacked['label'].append(ex['label'])\n\n dataset = tf.data.Dataset.from_tensor_slices((stacked,))\n batched_examples = self._encode_and_batch(\n dataset, filter_claims=False, filter_evidence=False)\n preds = []\n for batch in batched_examples:\n # model.predict() is broken after model load so we have to do this\n # manually.\n preds.append(self._model(batch))\n return np.vstack(preds).reshape(-1).tolist()\n\n def embed(self, examples, *, as_claim,\n as_evidence): # Checker .tolist() -> Any\n \"\"\"Embed a list of evidence text.\n\n Args:\n examples: A list of evidence text to embed.\n as_claim: Whether to embed examples as claims\n as_evidence: Whether to embed examples as evidence\n\n Returns:\n A list of embeddings, one for each evidence text.\n\n \"\"\"\n stacked = {\n 'claim_text': [],\n 'evidence_text': [],\n 'metadata': [],\n 'label': [],\n }\n for text in examples:\n # Dummie value to make sure tokenizing works.\n if as_claim:\n stacked['claim_text'].append(text)\n else:\n stacked['claim_text'].append('a')\n if as_evidence:\n stacked['evidence_text'].append(text)\n else:\n stacked['evidence_text'].append('a')\n stacked['metadata'].append('')\n stacked['label'].append(tf.constant(0, dtype=tf.int64))\n\n dataset = tf.data.Dataset.from_tensor_slices((stacked,))\n batched_examples = self._encode_and_batch(\n dataset, filter_claims=False, filter_evidence=False)\n claim_preds = []\n ev_preds = []\n for batch in batched_examples:\n # model.predict() is broken after model load due to missing shapes, so\n # have to do our own batching/unbatching.\n inputs, _ = batch\n claim_encoding, ev_encoding = self._model(\n inputs, embed_claim=as_claim, embed_evidence=as_evidence)\n claim_preds.append(claim_encoding)\n ev_preds.append(ev_encoding)\n return np.vstack(claim_preds).tolist(), np.vstack(ev_preds).tolist()\n\n def embed_wiki_dataset(self, dataset):\n \"\"\"Embed the wikipedia/evidence only dataset.\n\n Args:\n dataset: The wikipedia only dataset (e.g. wiki_tfds.py)\n\n Returns:\n Aligned wikipedia_urls, sentence_ids, and embeddings of model\n \"\"\"\n\n # map_fn and tf_map_fn transform the dataset to the same format as\n # tfds_evidence/the one the model expects\n def map_fn(text, wikipedia_url, sentence_id):\n return ('a', text, wikipedia_url, str(sentence_id),\n json.dumps({\n 'sentence_id': int(sentence_id.numpy()),\n 'wikipedia_url': wikipedia_url.numpy().decode('utf8')\n }))\n\n def tf_map_fn(example):\n tensors = tf.py_function(\n map_fn,\n inp=[\n example['text'], example['wikipedia_url'], example['sentence_id']\n ],\n Tout=(tf.string, tf.string, tf.string, tf.string, tf.string))\n return {\n 'claim_text': tensors[0],\n 'evidence_text': tensors[1],\n 'wikipedia_url': tensors[2],\n 'sentence_id': tensors[3],\n 'claim_label': tf.constant(0, dtype=tf.int64),\n 'evidence_label': tf.constant(0, dtype=tf.int64),\n 'metadata': tensors[4]\n }\n\n formatted_ds = dataset.map(tf_map_fn)\n batched_examples = self._encode_and_batch(\n formatted_ds, filter_claims=False, filter_evidence=False)\n preds = []\n wikipedia_urls = []\n sentence_ids = []\n for batch in tqdm.tqdm(batched_examples, mininterval=5):\n # model.predict() is broken after model load due to missing shapes, so\n # have to do our own batching/unbatching.\n inputs, _ = batch\n _, ev_encoding = self._inner_model(\n inputs, embed_claim=False, embed_evidence=True)\n for m in inputs['metadata'].numpy():\n key = json.loads(m.decode('utf8'))\n wikipedia_urls.append(key['wikipedia_url'])\n sentence_ids.append(key['sentence_id'])\n preds.append(ev_encoding)\n\n return np.array(wikipedia_urls), np.array(sentence_ids), np.vstack(preds)\n\n def embed_claim_dataset(self, dataset):\n \"\"\"Embed the claim only dataset and save them with claim_ids.\n\n Args:\n dataset: The claims only dataset (e.g. claim_tfds.py)\n\n Returns:\n Aligned claim ids and embeddings from the model\n \"\"\"\n batched_examples = self._encode_and_batch(\n dataset, filter_claims=False, filter_evidence=False)\n claim_ids = []\n embeddings = []\n for batch in tqdm.tqdm(batched_examples, mininterval=5):\n # model.predict() is broken after model load due to missing shapes, so\n # have to do our own batching/unbatching.\n inputs, _ = batch\n # Cannot use self._model since it does not take extra arguments. Since\n # we're not using the keras API (namey .predict()), we can just use the\n # underlying model stored in self._inner_model.\n claim_encoding, _ = self._inner_model(\n inputs, embed_claim=True, embed_evidence=False)\n for m in inputs['metadata'].numpy():\n key = json.loads(m.decode('utf8'))\n claim_ids.append(int(key['claim_id']))\n embeddings.append(claim_encoding)\n\n return np.array(claim_ids), np.vstack(embeddings)\n\n def _build_callbacks(self, val_batched):\n \"\"\"Build the callbacks used during training.\"\"\"\n cns_model_checkpoint = util.safe_path(\n os.path.join(self._model_config.model_checkpoint, 'best_model.tf'))\n model_callbacks = [\n # Note: Order matters here, particularly that FeverMetricsCallback\n # comes before tensorboard so it can write to the log dictionary\n # and TB picks it up.\n callbacks.FeverMetricsCallback(\n validation_batched=val_batched,\n debug=self._debug,\n fever_dev_path=self._model_config.fever_dev_path,\n max_evidence=self._model_config.max_evidence,\n checkpoint_dir=self._model_config.model_checkpoint,\n ),\n # TODO(perodriguez): Determine a better thing to stop on\n tf.keras.callbacks.EarlyStopping(\n monitor='val_loss',\n min_delta=.001,\n patience=3,\n verbose=1,\n mode='min'),\n # TODO(perodriguez): Determine a better thing to save on\n # Checkpointing also needs to know about fever recall.\n tf.keras.callbacks.ModelCheckpoint(\n filepath=cns_model_checkpoint,\n save_best_only=True,\n monitor='val_loss',\n mode='min',\n verbose=1,\n # There is no support for GRU/LSTM Dropout with normal save\n save_weights_only=True,\n ),\n ]\n\n if self._tb_log_dir is not None:\n model_callbacks.append(\n tf.keras.callbacks.TensorBoard(log_dir=self._tb_log_dir))\n return model_callbacks\n\n def _batch_dataset(self, dataset):\n \"\"\"Batch the dataset depending on what model is used.\n\n Args:\n dataset: A dataset to batch\n\n Returns:\n A batched dataset with correct padding shapes.\n \"\"\"\n return dataset.padded_batch(\n batch_size=self._model_config.batch_size,\n padded_shapes=(\n self._encoder.padded_shapes(),\n # Must match losses in training.py\n {\n 'claim_classification': [],\n 'evidence_matching': []\n }))\n\n def _encode_dataset(self,\n dataset,\n filter_claims=True,\n filter_evidence=True):\n \"\"\"Convert the tfds dataset to numbers by tokenizing/embedding.\"\"\"\n encode = self._encoder.build_encoder_fn()\n encoded_data = dataset.map(\n encode, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n if filter_claims:\n encoded_data = encoded_data.filter(preprocessing.filter_claim_fn)\n if filter_evidence:\n encoded_data = encoded_data.filter(preprocessing.filter_evidence_fn)\n\n return encoded_data\n\n def _build_vocab(self, dataset):\n \"\"\"Build the vocabulary and encoder from the dataset.\n\n Args:\n dataset: The dataset to build vocab from.\n\n Returns:\n The vocabulary in the dataset, or empty vocab if using bert\n \"\"\"\n # If we are using bert, then we do not need to build the vocab\n # since its already defined\n if self._model_config.tokenizer == 'bert' and self._model_config.text_encoder == 'bert':\n logging.info('Using bert, skipping vocabulary creation')\n return set()\n\n if self._tokenizer is None:\n raise ValueError('Cannot build vocab without a tokenizer.')\n claim_lengths = []\n evidence_lengths = []\n vocab = set()\n for example in tqdm.tqdm(dataset, mininterval=5):\n tokenized_claim, tokenized_evidence = self._tokenize_example(example)\n claim_lengths.append(len(tokenized_claim))\n evidence_lengths.append(len(tokenized_evidence))\n vocab.update(tokenized_claim)\n vocab.update(tokenized_evidence)\n logging.info('Build vocab of size (without padding): %s', len(vocab))\n logging.info('Claim length statistics')\n logging.info('Max: %s', max(claim_lengths))\n logging.info('Min: %s', min(claim_lengths))\n claim_percentiles = np.percentile(claim_lengths, [50, 90, 95, 99]).tolist()\n logging.info('50/90/95/99: %s', str(claim_percentiles))\n logging.info('Evidence length statistics')\n logging.info('Max: %s', max(evidence_lengths))\n logging.info('Min: %s', min(evidence_lengths))\n evidence_percentiles = np.percentile(evidence_lengths,\n [50, 90, 95, 99]).tolist()\n logging.info('50/90/95/99: %s', str(evidence_percentiles))\n self._vocab_stats['claim_max'] = max(claim_lengths)\n self._vocab_stats['claim_min'] = min(claim_lengths)\n self._vocab_stats['claim_percentiles'] = claim_percentiles\n self._vocab_stats['evidence_max'] = max(evidence_lengths)\n self._vocab_stats['evidence_min'] = min(evidence_lengths)\n self._vocab_stats['evidence_percentiles'] = evidence_percentiles\n return vocab\n\n def _tokenize_example(self, example):\n tokenized_claim = self._tokenizer.tokenize(\n example['claim_text'].numpy().decode('utf8'))\n tokenized_evidence = self._tokenizer.tokenize(\n example['evidence_text'].numpy().decode('utf8'))\n return tokenized_claim, tokenized_evidence\n" ]
[ [ "tensorflow.compat.v2.distribute.get_strategy", "tensorflow.compat.v2.keras.callbacks.ModelCheckpoint", "tensorflow.compat.v2.data.Dataset.from_tensor_slices", "tensorflow.compat.v2.keras.metrics.Precision", "tensorflow.compat.v2.keras.metrics.TrueNegatives", "tensorflow.compat.v2.constant", "tensorflow.compat.v2.keras.losses.SparseCategoricalCrossentropy", "tensorflow.compat.v2.keras.metrics.Recall", "numpy.vstack", "tensorflow.compat.v2.keras.optimizers.Adam", "tensorflow.compat.v2.keras.metrics.BinaryAccuracy", "tensorflow.compat.v2.distribute.OneDeviceStrategy", "tensorflow.compat.v2.keras.callbacks.TensorBoard", "tensorflow.compat.v2.keras.metrics.FalsePositives", "tensorflow.compat.v2.distribute.MirroredStrategy", "tensorflow.compat.v2.keras.metrics.AUC", "tensorflow.compat.v2.keras.Model", "numpy.percentile", "tensorflow.compat.v2.keras.metrics.TruePositives", "numpy.array", "tensorflow.compat.v2.keras.metrics.SparseCategoricalAccuracy", "tensorflow.compat.v2.keras.callbacks.EarlyStopping", "tensorflow.compat.v2.py_function", "tensorflow.compat.v2.distribute.experimental.TPUStrategy" ] ]
AthKouloumvakos/sunpy
[ "686a9c455e5b725feb005b91b74ce000368f0654" ]
[ "sunpy/coordinates/frames.py" ]
[ "\"\"\"\nCommon solar physics coordinate systems.\n\nThis submodule implements various solar physics coordinate frames for use with\nthe `astropy.coordinates` module.\n\"\"\"\nfrom contextlib import contextmanager\n\nimport numpy as np\n\nimport astropy.units as u\nfrom astropy.coordinates import ConvertError, QuantityAttribute\nfrom astropy.coordinates.baseframe import BaseCoordinateFrame, RepresentationMapping\nfrom astropy.coordinates.representation import (\n CartesianDifferential,\n CartesianRepresentation,\n CylindricalRepresentation,\n SphericalDifferential,\n SphericalRepresentation,\n UnitSphericalRepresentation,\n)\nfrom astropy.time import Time\n\nfrom sunpy.sun.constants import radius as _RSUN\nfrom sunpy.time.time import _variables_for_parse_time_docstring\nfrom sunpy.util.decorators import add_common_docstring\nfrom sunpy.util.exceptions import SunpyUserWarning\nfrom .frameattributes import ObserverCoordinateAttribute, TimeFrameAttributeSunPy\n\n_J2000 = Time('J2000.0', scale='tt')\n\n__all__ = ['SunPyBaseCoordinateFrame', 'BaseHeliographic',\n 'HeliographicStonyhurst', 'HeliographicCarrington',\n 'Heliocentric', 'Helioprojective',\n 'HeliocentricEarthEcliptic', 'GeocentricSolarEcliptic',\n 'HeliocentricInertial', 'GeocentricEarthEquatorial']\n\n\ndef _frame_parameters():\n \"\"\"\n Returns formatting dictionary to use with add_common_docstring to populate frame docstrings\n \"\"\"\n ret = {}\n\n # Each text block is missing the first indent because it already exists in the frame docstring\n ret['data'] = (\"data : `~astropy.coordinates.BaseRepresentation` or ``None``\\n\"\n \" A representation object or ``None`` to have no data\\n\"\n \" (or use the coordinate component arguments, see below).\")\n ret['common'] = (f\"obstime : {_variables_for_parse_time_docstring()['parse_time_types']}\\n\"\n \" The time of the observation. This is used to determine the\\n\"\n \" position of solar-system bodies (e.g., the Sun and the Earth) as\\n\"\n \" needed to define the origin and orientation of the frame.\\n\"\n \" representation_type : `~astropy.coordinates.BaseRepresentation`, str, optional\\n\"\n \" A representation class or string name of a representation class.\\n\"\n \" This may change the valid coordinate component arguments from the\\n\"\n \" defaults (see above). For example, passing\\n\"\n \" ``representation_type='cartesian'`` will make the frame expect\\n\"\n \" Cartesian coordinate component arguments (typically, ``x``, ``y``,\\n\"\n \" and ``z``).\\n\"\n \" copy : bool, optional\\n\"\n \" If `True` (default), make copies of the input coordinate arrays.\")\n ret['lonlat'] = (\"lon : `~astropy.coordinates.Angle` or `~astropy.units.Quantity`, optional\\n\"\n \" The longitude coordinate for this object (``lat`` must also be\\n\"\n \" given and ``data`` must be ``None``).\\n\"\n \" Not needed if ``data`` is given.\\n\"\n \" lat : `~astropy.coordinates.Angle` or `~astropy.units.Quantity`, optional\\n\"\n \" The latitude coordinate for this object (``lon`` must also be\\n\"\n \" given and ``data`` must be ``None``).\\n\"\n \" Not needed if ``data`` is given.\")\n ret['radius'] = (\"radius : `~astropy.units.Quantity`, optional\\n\"\n \" The radial distance coordinate from Sun center for this object.\\n\"\n \" Defaults to the radius of the Sun. Not needed if ``data`` is given.\")\n ret['distance_sun'] = (\"distance : `~astropy.units.Quantity`, optional\\n\"\n \" The distance coordinate from Sun center for this object.\\n\"\n \" Not needed if ``data`` is given.\")\n ret['distance_earth'] = (\"distance : `~astropy.units.Quantity`, optional\\n\"\n \" The distance coordinate from Earth center for this object.\\n\"\n \" Not needed if ``data`` is given.\")\n ret['xyz'] = (\"x : `~astropy.units.Quantity`, optional\\n\"\n \" X-axis coordinate for this object. Not needed if ``data`` is given.\\n\"\n \" y : `~astropy.units.Quantity`, optional\\n\"\n \" Y-axis coordinate for this object. Not needed if ``data`` is given.\\n\"\n \" z : `~astropy.units.Quantity`, optional\\n\"\n \" Z-axis coordinate for this object. Not needed if ``data`` is given.\")\n ret['observer'] = (\"observer : `~sunpy.coordinates.frames.HeliographicStonyhurst`, str\\n\"\n \" The location of the observer. If a string is provided,\\n\"\n \" it must be a solar system body that can be parsed by\\n\"\n \" `~sunpy.coordinates.ephemeris.get_body_heliographic_stonyhurst`\\n\"\n \" at the time ``obstime``. Defaults to Earth center.\")\n ret['rsun'] = (\"rsun : `~astropy.units.Quantity`\\n\"\n \" The radius of the Sun in length units. Used to convert a 2D\\n\"\n \" coordinate (i.e., no ``radius`` component) to a 3D coordinate by\\n\"\n \" assuming that the coordinate is on the surface of the Sun. Defaults\\n\"\n \" to the photospheric radius as defined in `sunpy.sun.constants`.\")\n ret['equinox'] = (f\"equinox : {_variables_for_parse_time_docstring()['parse_time_types']}\\n\"\n \" The date for the mean vernal equinox.\\n\"\n \" Defaults to the J2000.0 equinox.\")\n\n return ret\n\n\nclass SunPyBaseCoordinateFrame(BaseCoordinateFrame):\n \"\"\"\n Base class for sunpy coordinate frames.\n\n This class is not intended to be used directly and has no transformations defined.\n\n * Defines the frame attribute ``obstime`` for observation time.\n * Defines a default wrap angle of 180 degrees for longitude in spherical coordinates,\n which can be overridden via the class variable ``_wrap_angle``.\n * Inject a nice way of representing the object which the coordinate represents.\n \"\"\"\n obstime = TimeFrameAttributeSunPy()\n\n default_representation = SphericalRepresentation\n default_differential = SphericalDifferential\n\n frame_specific_representation_info = {\n SphericalDifferential: [RepresentationMapping('d_lon', 'd_lon', u.arcsec/u.s),\n RepresentationMapping('d_lat', 'd_lat', u.arcsec/u.s),\n RepresentationMapping('d_distance', 'd_distance', u.km/u.s)],\n }\n\n _wrap_angle = 180*u.deg # for longitude in spherical coordinates\n\n def __init__(self, *args, **kwargs):\n self.object_name = None\n\n # If wrap_longitude=False is passed in, do not impose a specific wrap angle for the frame\n if not kwargs.pop('wrap_longitude', True):\n self._wrap_angle = None\n\n super().__init__(*args, **kwargs)\n\n # If obstime is specified, treat the default observer (None) as explicitly set\n if self.obstime is not None and self.is_frame_attr_default('observer'):\n self._attr_names_with_defaults.remove('observer')\n\n return\n\n def represent_as(self, base, s='base', in_frame_units=False):\n data = super().represent_as(base, s, in_frame_units=in_frame_units)\n\n # If a frame wrap angle is set, use that wrap angle for any spherical representations.\n if self._wrap_angle is not None and \\\n isinstance(data, (UnitSphericalRepresentation, SphericalRepresentation)):\n data.lon.wrap_angle = self._wrap_angle\n return data\n\n def __str__(self):\n # We override this here so that when you print a SkyCoord it shows the\n # observer as the string and not the whole massive coordinate.\n if getattr(self, \"object_name\", None):\n return f\"<{self.__class__.__name__} Coordinate for '{self.object_name}'>\"\n else:\n return super().__str__()\n\n @property\n def _is_2d(self):\n return (self._data is not None and self._data.norm().unit is u.one\n and u.allclose(self._data.norm(), 1*u.one))\n\n def __init_subclass__(cls, **kwargs):\n super().__init_subclass__(**kwargs)\n\n # TODO: Remove this after the minimum Astropy dependency includes astropy/astropy#12005\n cls._fix_property_docstrings()\n\n @classmethod\n def _fix_property_docstrings(cls):\n # This class method adds docstrings to properties dynamically created by\n # BaseCoordinateFrame.__init_subclass__(). Accordingly, this method needs to itself be\n # called from SunPyBaseCoordinateFrame.__init_subclass__() to work for our subclasses.\n property_docstrings = {\n 'default_representation': \"Default representation for position data\",\n 'default_differential': \"Default representation for differential data\",\n 'frame_specific_representation_info': \"Mapping for frame-specific component names\",\n }\n for prop, docstring in property_docstrings.items():\n if getattr(cls, prop).__doc__ is None:\n setattr(getattr(cls, prop), '__doc__', docstring)\n\n\n# TODO: Remove this after the minimum Astropy dependency includes astropy/astropy#12005\nSunPyBaseCoordinateFrame._fix_property_docstrings()\n\n\nclass BaseHeliographic(SunPyBaseCoordinateFrame):\n \"\"\"\n Base class for HeliographicCarrington (HGC) and HeliographicStonyhurst (HGS) frames.\n\n This class is not intended to be used directly and has no transformations defined.\n \"\"\"\n frame_specific_representation_info = {\n SphericalRepresentation: [RepresentationMapping('lon', 'lon', u.deg),\n RepresentationMapping('lat', 'lat', u.deg),\n RepresentationMapping('distance', 'radius', None)],\n SphericalDifferential: [RepresentationMapping('d_lon', 'd_lon', u.arcsec/u.s),\n RepresentationMapping('d_lat', 'd_lat', u.arcsec/u.s),\n RepresentationMapping('d_distance', 'd_radius', u.km/u.s)],\n }\n\n rsun = QuantityAttribute(default=_RSUN, unit=u.km)\n\n def make_3d(self):\n \"\"\"\n Returns a fully 3D coordinate based on this coordinate.\n\n If this coordinate is only 2D (i.e., no ``radius`` component) or is a\n unit vector (i.e., the norm of the coordinate is unity), a new\n coordinate is created that corresponds to the surface of the Sun.\n That is, the 3D coordinate will retain the ``lon`` and ``lat``, and\n ``radius`` will be set to the frame's ``rsun`` frame attribute.\n\n If this coordinate is already fully 3D, it is directly returned, even\n if it does not lie on the surface of the Sun.\n\n Returns\n -------\n frame : `~sunpy.coordinates.frames.BaseHeliographic`\n The fully 3D coordinate\n \"\"\"\n if self._is_2d:\n return self.realize_frame(self._data * self.rsun)\n\n # The coordinate is already 3D\n return self\n\n\n@add_common_docstring(**_frame_parameters())\nclass HeliographicStonyhurst(BaseHeliographic):\n \"\"\"\n A coordinate or frame in the Stonyhurst Heliographic (HGS) system.\n\n - The origin is the center of the Sun.\n - The Z-axis (+90 degrees latitude) is aligned with the Sun's north pole.\n - The X-axis (0 degrees longitude and 0 degrees latitude) is aligned with the projection of\n the Sun-Earth line onto the Sun's equatorial plane.\n\n This system is also know as the Heliocentric Earth Equatorial (HEEQ) system when\n represented using Cartesian components.\n\n A new instance can be created using the following signatures\n (note that if supplied, ``obstime`` and ``representation_type`` must be\n keyword arguments)::\n\n HeliographicStonyhurst(lon, lat, obstime=obstime)\n HeliographicStonyhurst(lon, lat, radius, obstime=obstime)\n HeliographicStonyhurst(x, y, z, representation_type='cartesian', obstime=obstime)\n\n Parameters\n ----------\n {data}\n {lonlat}\n {radius}\n {rsun}\n {common}\n\n Examples\n --------\n >>> from astropy.coordinates import SkyCoord\n >>> import sunpy.coordinates\n >>> import astropy.units as u\n >>> sc = SkyCoord(1*u.deg, 1*u.deg, 2*u.km,\n ... frame=\"heliographic_stonyhurst\",\n ... obstime=\"2010/01/01T00:00:45\")\n >>> sc\n <SkyCoord (HeliographicStonyhurst: obstime=2010-01-01T00:00:45.000, rsun=695700.0 km): (lon, lat, radius) in (deg, deg, km)\n (1., 1., 2.)>\n >>> sc.frame\n <HeliographicStonyhurst Coordinate (obstime=2010-01-01T00:00:45.000, rsun=695700.0 km): (lon, lat, radius) in (deg, deg, km)\n (1., 1., 2.)>\n >>> sc = SkyCoord(HeliographicStonyhurst(-10*u.deg, 2*u.deg))\n >>> sc\n <SkyCoord (HeliographicStonyhurst: obstime=None, rsun=695700.0 km): (lon, lat) in deg\n (-10., 2.)>\n >>> sc = SkyCoord(CartesianRepresentation(0*u.km, 45*u.km, 2*u.km),\n ... obstime=\"2011/01/05T00:00:50\",\n ... frame=\"heliographic_stonyhurst\")\n >>> sc\n <SkyCoord (HeliographicStonyhurst: obstime=2011-01-05T00:00:50.000, rsun=695700.0 km): (lon, lat, radius) in (deg, deg, km)\n (90., 2.54480438, 45.04442252)>\n \"\"\"\n name = \"heliographic_stonyhurst\"\n\n\n@add_common_docstring(**_frame_parameters())\nclass HeliographicCarrington(BaseHeliographic):\n \"\"\"\n A coordinate or frame in the Carrington Heliographic (HGC) system.\n\n - The origin is the center of the Sun.\n - The Z-axis (+90 degrees latitude) is aligned with the Sun's north pole.\n - The X-axis and Y-axis rotate with a period of 25.38 days.\n\n This system differs from Stonyhurst Heliographic (HGS) in its definition of longitude. This\n longitude is an \"apparent\" longitude because it takes into account the time it takes for light\n to travel from the Sun's surface to the observer. Thus, the observer needs to be specified to\n be able to transform to any other coordinate frame.\n\n A new instance can be created using the following signatures\n (note that if supplied, ``obstime`` and ``observer`` must be a keyword argument)::\n\n HeliographicCarrington(lon, lat, obstime=obstime, observer=observer)\n HeliographicCarrington(lon, lat, radius, obstime=obstime, observer=observer)\n\n If you want to define the location in HGC such that the observer for the coordinate frame is\n the same as that location (e.g., the location of an observatory in its corresponding HGC\n frame), use ``observer='self'``::\n\n HeliographicCarrington(lon, lat, radius, obstime=obstime, observer='self')\n\n Parameters\n ----------\n {data}\n {lonlat}\n {radius}\n {observer}\n {rsun}\n {common}\n\n Examples\n --------\n >>> from astropy.coordinates import SkyCoord\n >>> import sunpy.coordinates\n >>> import astropy.units as u\n >>> sc = SkyCoord(1*u.deg, 2*u.deg, 3*u.km,\n ... frame=\"heliographic_carrington\",\n ... observer=\"earth\",\n ... obstime=\"2010/01/01T00:00:30\")\n >>> sc\n <SkyCoord (HeliographicCarrington: obstime=2010-01-01T00:00:30.000, rsun=695700.0 km, observer=<HeliographicStonyhurst Coordinate for 'earth'>): (lon, lat, radius) in (deg, deg, km)\n (1., 2., 3.)>\n\n >>> sc = SkyCoord([1,2,3]*u.deg, [4,5,6]*u.deg, [5,6,7]*u.km,\n ... obstime=\"2010/01/01T00:00:45\",\n ... observer=\"self\",\n ... frame=\"heliographic_carrington\")\n >>> sc\n <SkyCoord (HeliographicCarrington: obstime=2010-01-01T00:00:45.000, rsun=695700.0 km, observer=self): (lon, lat, radius) in (deg, deg, km)\n [(1., 4., 5.), (2., 5., 6.), (3., 6., 7.)]>\n\n >>> sc = SkyCoord(CartesianRepresentation(0*u.km, 45*u.km, 2*u.km),\n ... obstime=\"2011/01/05T00:00:50\",\n ... frame=\"heliographic_carrington\")\n >>> sc\n <SkyCoord (HeliographicCarrington: obstime=2011-01-05T00:00:50.000, rsun=695700.0 km, observer=None): (lon, lat, radius) in (deg, deg, km)\n (90., 2.54480438, 45.04442252)>\n \"\"\"\n name = \"heliographic_carrington\"\n _wrap_angle = 360*u.deg\n\n observer = ObserverCoordinateAttribute(HeliographicStonyhurst)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n if not isinstance(self.observer, BaseCoordinateFrame) and self.observer == 'self' and self._is_2d:\n raise ValueError(\"Full 3D coordinate (including radius) must be specified \"\n \"when observer='self'.\")\n\n\n@add_common_docstring(**_frame_parameters())\nclass Heliocentric(SunPyBaseCoordinateFrame):\n \"\"\"\n A coordinate or frame in the Heliocentric system, which is observer-based.\n\n - The origin is the center of the Sun.\n - The Z-axis is aligned with the Sun-observer line.\n - The Y-axis is aligned with the component of the vector to the Sun's north pole that is\n perpendicular to the Z-axis.\n\n This frame defaults to a Cartesian component representation, which is known as Heliocentric\n Cartesian (HCC). This frame can also be represented using cylindrical components, where\n where ``rho`` is the impact parameter and ``psi`` is the position angle.\n ``psi`` is measured relative to the west limb, rather than solar north, so is shifted\n by 90 degrees compared to the convention of the Heliocentric Radial (HCR) system.\n\n A new instance can be created using the following signatures\n (note that if supplied, ``obstime``, ``observer``, and ``representation_type`` must be\n keyword arguments)::\n\n Heliocentric(x, y, z, obstime=obstime, observer=observer)\n Heliocentric(rho, psi, z, representation_type='cylindrical', obstime=obstime, observer=observer)\n\n Parameters\n ----------\n {data}\n {xyz}\n {observer}\n {common}\n\n Examples\n --------\n\n >>> from astropy.coordinates import SkyCoord, CartesianRepresentation\n >>> import sunpy.coordinates\n >>> import astropy.units as u\n\n >>> sc = SkyCoord(CartesianRepresentation(10*u.km, 1*u.km, 2*u.km),\n ... obstime=\"2011/01/05T00:00:50\", observer=\"earth\", frame=\"heliocentric\")\n >>> sc\n <SkyCoord (Heliocentric: obstime=2011-01-05T00:00:50.000, observer=<HeliographicStonyhurst Coordinate for 'earth'>): (x, y, z) in km\n (10., 1., 2.)>\n\n >>> sc = SkyCoord([1,2]*u.km, [3,4]*u.m, [5,6]*u.cm,\n ... obstime=\"2011/01/01T00:00:54\", observer=\"earth\", frame=\"heliocentric\")\n >>> sc\n <SkyCoord (Heliocentric: obstime=2011-01-01T00:00:54.000, observer=<HeliographicStonyhurst Coordinate for 'earth'>): (x, y, z) in (km, m, cm)\n [(1., 3., 5.), (2., 4., 6.)]>\n\n >>> sc = SkyCoord(CylindricalRepresentation(10*u.km, 60*u.deg, 10*u.km),\n ... obstime=\"2011/01/05T00:00:50\", observer=\"earth\", frame=\"heliocentric\")\n >>> sc\n <SkyCoord (Heliocentric: obstime=2011-01-05T00:00:50.000, observer=<HeliographicStonyhurst Coordinate for 'earth'>): (x, y, z) in km\n (5., 8.66025404, 10.)>\n \"\"\"\n default_representation = CartesianRepresentation\n default_differential = CartesianDifferential\n\n frame_specific_representation_info = {\n CylindricalRepresentation: [RepresentationMapping('phi', 'psi', u.deg)]\n }\n\n observer = ObserverCoordinateAttribute(HeliographicStonyhurst)\n\n def represent_as(self, base, s='base', in_frame_units=False):\n data = super().represent_as(base, s, in_frame_units=in_frame_units)\n\n # For cylindrical representations, wrap the `psi` component (natively `phi`) at 360 deg\n if isinstance(data, CylindricalRepresentation):\n data.phi.wrap_at(360*u.deg, inplace=True)\n return data\n\n\n@add_common_docstring(**_frame_parameters())\nclass Helioprojective(SunPyBaseCoordinateFrame):\n \"\"\"\n A coordinate or frame in the Helioprojective Cartesian (HPC) system, which is observer-based.\n\n - The origin is the location of the observer.\n - ``Tx`` (aka \"theta_x\") is the angle relative to the plane containing the Sun-observer line\n and the Sun's rotation axis, with positive values in the direction of the Sun's west limb.\n - ``Ty`` (aka \"theta_y\") is the angle relative to the Sun's equatorial plane, with positive\n values in the direction of the Sun's north pole.\n - ``distance`` is the Sun-observer distance.\n\n This system is frequently used in a projective form without ``distance`` specified. For\n observations looking very close to the center of the Sun, where the small-angle approximation\n is appropriate, ``Tx`` and ``Ty`` can be approximated as Cartesian components.\n\n A new instance can be created using the following signatures\n (note that if supplied, ``obstime`` and ``observer`` must be keyword arguments)::\n\n Helioprojective(Tx, Ty, obstime=obstime, observer=observer)\n Helioprojective(Tx, Ty, distance, obstime=obstime, observer=observer)\n\n Parameters\n ----------\n {data}\n Tx : `~astropy.coordinates.Angle` or `~astropy.units.Quantity`\n The theta_x coordinate for this object. Not needed if ``data`` is given.\n Ty : `~astropy.coordinates.Angle` or `~astropy.units.Quantity`\n The theta_y coordinate for this object. Not needed if ``data`` is given.\n distance : `~astropy.units.Quantity`\n The distance coordinate from the observer for this object.\n Not needed if ``data`` is given.\n {observer}\n {rsun}\n {common}\n\n Examples\n --------\n >>> from astropy.coordinates import SkyCoord\n >>> import sunpy.coordinates\n >>> import astropy.units as u\n >>> sc = SkyCoord(0*u.deg, 0*u.deg, 5*u.km,\n ... obstime=\"2010/01/01T00:00:00\", observer=\"earth\", frame=\"helioprojective\")\n >>> sc\n <SkyCoord (Helioprojective: obstime=2010-01-01T00:00:00.000, rsun=695700.0 km, observer=<HeliographicStonyhurst Coordinate for 'earth'>): (Tx, Ty, distance) in (arcsec, arcsec, km)\n (0., 0., 5.)>\n >>> sc = SkyCoord(0*u.deg, 0*u.deg,\n ... obstime=\"2010/01/01T00:00:00\", observer=\"earth\", frame=\"helioprojective\")\n >>> sc\n <SkyCoord (Helioprojective: obstime=2010-01-01T00:00:00.000, rsun=695700.0 km, observer=<HeliographicStonyhurst Coordinate for 'earth'>): (Tx, Ty) in arcsec\n (0., 0.)>\n >>> sc = SkyCoord(CartesianRepresentation(1*u.AU, 1e5*u.km, -2e5*u.km),\n ... obstime=\"2011/01/05T00:00:50\", observer=\"earth\", frame=\"helioprojective\")\n >>> sc\n <SkyCoord (Helioprojective: obstime=2011-01-05T00:00:50.000, rsun=695700.0 km, observer=<HeliographicStonyhurst Coordinate for 'earth'>): (Tx, Ty, distance) in (arcsec, arcsec, AU)\n (137.87948623, -275.75878762, 1.00000112)>\n \"\"\"\n frame_specific_representation_info = {\n SphericalRepresentation: [RepresentationMapping('lon', 'Tx', u.arcsec),\n RepresentationMapping('lat', 'Ty', u.arcsec),\n RepresentationMapping('distance', 'distance', None)],\n SphericalDifferential: [RepresentationMapping('d_lon', 'd_Tx', u.arcsec/u.s),\n RepresentationMapping('d_lat', 'd_Ty', u.arcsec/u.s),\n RepresentationMapping('d_distance', 'd_distance', u.km/u.s)],\n UnitSphericalRepresentation: [RepresentationMapping('lon', 'Tx', u.arcsec),\n RepresentationMapping('lat', 'Ty', u.arcsec)],\n }\n\n rsun = QuantityAttribute(default=_RSUN, unit=u.km)\n observer = ObserverCoordinateAttribute(HeliographicStonyhurst)\n\n @property\n def angular_radius(self):\n \"\"\"\n Angular radius of the Sun as seen by the observer.\n\n The ``rsun`` frame attribute is the radius of the Sun in length units.\n The tangent vector from the observer to the edge of the Sun forms a\n right-angle triangle with the radius of the Sun as the far side and the\n Sun-observer distance as the hypotenuse. Thus, the sine of the angular\n radius of the Sun is ratio of these two distances.\n \"\"\"\n from sunpy.coordinates.sun import _angular_radius # avoiding a circular import\n\n if not isinstance(self.observer, HeliographicStonyhurst):\n if self.observer is None:\n raise ValueError(\"The observer must be defined, not `None`.\")\n raise ValueError(\"The observer must be fully defined by specifying `obstime`.\")\n return _angular_radius(self.rsun, self.observer.radius)\n\n def make_3d(self):\n \"\"\"\n This method calculates the third coordinate of the Helioprojective\n frame. It assumes that the coordinate point is on the surface of the Sun.\n\n If a point in the frame is off limb then NaN will be returned.\n\n Returns\n -------\n new_frame : `~sunpy.coordinates.frames.Helioprojective`\n A new frame instance with all the attributes of the original but\n now with a third coordinate.\n \"\"\"\n # Skip if we already are 3D\n if not self._is_2d:\n return self\n\n if not isinstance(self.observer, BaseCoordinateFrame):\n raise ConvertError(\"Cannot calculate distance to the Sun \"\n f\"for observer '{self.observer}' \"\n \"without `obstime` being specified.\")\n\n rep = self.represent_as(UnitSphericalRepresentation)\n lat, lon = rep.lat, rep.lon\n\n # Check for the use of floats with lower precision than the native Python float\n if not set([lon.dtype.type, lat.dtype.type]).issubset([float, np.float64, np.longdouble]):\n raise SunpyUserWarning(\"The Helioprojective component values appear to be lower \"\n \"precision than the native Python float: \"\n f\"Tx is {lon.dtype.name}, and Ty is {lat.dtype.name}. \"\n \"To minimize precision loss, you may want to cast the values to \"\n \"`float` or `numpy.float64` via the NumPy method `.astype()`.\")\n\n # Calculate the distance to the surface of the Sun using the law of cosines\n cos_alpha = np.cos(lat) * np.cos(lon)\n c = self.observer.radius**2 - self.rsun**2\n b = -2 * self.observer.radius * cos_alpha\n # Ignore sqrt of NaNs\n with np.errstate(invalid='ignore'):\n d = ((-1*b) - np.sqrt(b**2 - 4*c)) / 2 # use the \"near\" solution\n\n if self._spherical_screen:\n sphere_center = self._spherical_screen['center'].transform_to(self).cartesian\n c = sphere_center.norm()**2 - self._spherical_screen['radius']**2\n b = -2 * sphere_center.dot(rep)\n # Ignore sqrt of NaNs\n with np.errstate(invalid='ignore'):\n dd = ((-1*b) + np.sqrt(b**2 - 4*c)) / 2 # use the \"far\" solution\n\n d = np.fmin(d, dd) if self._spherical_screen['only_off_disk'] else dd\n\n return self.realize_frame(SphericalRepresentation(lon=lon,\n lat=lat,\n distance=d))\n\n _spherical_screen = None\n\n @classmethod\n @contextmanager\n def assume_spherical_screen(cls, center, only_off_disk=False):\n \"\"\"\n Context manager to interpret 2D coordinates as being on the inside of a spherical screen.\n\n The radius of the screen is the distance between the specified ``center`` and Sun center.\n This ``center`` does not have to be the same as the observer location for the coordinate\n frame. If they are the same, then this context manager is equivalent to assuming that the\n helioprojective \"zeta\" component is zero.\n\n This replaces the default assumption where 2D coordinates are mapped onto the surface of the\n Sun.\n\n Parameters\n ----------\n center : `~astropy.coordinates.SkyCoord`\n The center of the spherical screen\n only_off_disk : `bool`, optional\n If `True`, apply this assumption only to off-disk coordinates, with on-disk coordinates\n still mapped onto the surface of the Sun. Defaults to `False`.\n\n Examples\n --------\n\n .. minigallery:: sunpy.coordinates.Helioprojective.assume_spherical_screen\n\n >>> import astropy.units as u\n >>> from sunpy.coordinates import Helioprojective\n >>> h = Helioprojective(range(7)*u.arcsec*319, [0]*7*u.arcsec,\n ... observer='earth', obstime='2020-04-08')\n >>> print(h.make_3d())\n <Helioprojective Coordinate (obstime=2020-04-08T00:00:00.000, rsun=695700.0 km, observer=<HeliographicStonyhurst Coordinate for 'earth'>): (Tx, Ty, distance) in (arcsec, arcsec, AU)\n [( 0., 0., 0.99660825), ( 319., 0., 0.99687244),\n ( 638., 0., 0.99778472), ( 957., 0., 1.00103285),\n (1276., 0., nan), (1595., 0., nan),\n (1914., 0., nan)]>\n\n >>> with Helioprojective.assume_spherical_screen(h.observer):\n ... print(h.make_3d())\n <Helioprojective Coordinate (obstime=2020-04-08T00:00:00.000, rsun=695700.0 km, observer=<HeliographicStonyhurst Coordinate for 'earth'>): (Tx, Ty, distance) in (arcsec, arcsec, AU)\n [( 0., 0., 1.00125872), ( 319., 0., 1.00125872),\n ( 638., 0., 1.00125872), ( 957., 0., 1.00125872),\n (1276., 0., 1.00125872), (1595., 0., 1.00125872),\n (1914., 0., 1.00125872)]>\n\n >>> with Helioprojective.assume_spherical_screen(h.observer, only_off_disk=True):\n ... print(h.make_3d())\n <Helioprojective Coordinate (obstime=2020-04-08T00:00:00.000, rsun=695700.0 km, observer=<HeliographicStonyhurst Coordinate for 'earth'>): (Tx, Ty, distance) in (arcsec, arcsec, AU)\n [( 0., 0., 0.99660825), ( 319., 0., 0.99687244),\n ( 638., 0., 0.99778472), ( 957., 0., 1.00103285),\n (1276., 0., 1.00125872), (1595., 0., 1.00125872),\n (1914., 0., 1.00125872)]>\n \"\"\"\n try:\n old_spherical_screen = cls._spherical_screen # nominally None\n\n center_hgs = center.transform_to(HeliographicStonyhurst(obstime=center.obstime))\n cls._spherical_screen = {\n 'center': center,\n 'radius': center_hgs.radius,\n 'only_off_disk': only_off_disk\n }\n yield\n finally:\n cls._spherical_screen = old_spherical_screen\n\n\n@add_common_docstring(**_frame_parameters())\nclass HeliocentricEarthEcliptic(SunPyBaseCoordinateFrame):\n \"\"\"\n A coordinate or frame in the Heliocentric Earth Ecliptic (HEE) system.\n\n - The origin is the center of the Sun.\n - The X-axis (0 degrees longitude and 0 degrees latitude) is aligned with the Sun-Earth line.\n - The Z-axis (+90 degrees latitude) is aligned with the component perpendicular to the X-axis\n of the mean ecliptic pole at the observation time.\n\n Parameters\n ----------\n {data}\n {lonlat}\n {distance_sun}\n {common}\n \"\"\"\n\n\n@add_common_docstring(**_frame_parameters())\nclass GeocentricSolarEcliptic(SunPyBaseCoordinateFrame):\n \"\"\"\n A coordinate or frame in the Geocentric Solar Ecliptic (GSE) system.\n\n - The origin is the center of the Earth.\n - The X-axis (0 degrees longitude and 0 degrees latitude) is aligned with the Earth-Sun line.\n - The Z-axis (+90 degrees latitude) is aligned with the component perpendicular to the X-axis\n of the mean ecliptic pole at the observation time.\n\n Parameters\n ----------\n {data}\n {lonlat}\n {distance_earth}\n {common}\n\n Notes\n -----\n Aberration due to Earth motion is not included.\n \"\"\"\n\n\n@add_common_docstring(**_frame_parameters())\nclass HeliocentricInertial(SunPyBaseCoordinateFrame):\n \"\"\"\n A coordinate or frame in the Heliocentric Inertial (HCI) system.\n\n - The origin is the center of the Sun.\n - The Z-axis (+90 degrees latitude) is aligned with the Sun's north pole.\n - The X-axis (0 degrees longitude and 0 degrees latitude) is aligned with the solar ascending\n node on the ecliptic (mean J2000.0).\n\n Parameters\n ----------\n {data}\n {lonlat}\n {distance_sun}\n {common}\n\n Notes\n -----\n The solar ascending node on the ecliptic lies on the intersection of the solar equatorial\n plane with the ecliptic plane, not on the intersection of the celestial equatorial plane with\n the ecliptic plane.\n \"\"\"\n\n\n@add_common_docstring(**_frame_parameters())\nclass GeocentricEarthEquatorial(SunPyBaseCoordinateFrame):\n \"\"\"\n A coordinate or frame in the Geocentric Earth Equatorial (GEI) system.\n\n - The origin is the center of the Earth.\n - The Z-axis (+90 degrees latitude) is aligned with the Earth's north pole.\n - The X-axis (0 degrees longitude and 0 degrees latitude) is aligned with the mean (not true)\n vernal equinox.\n\n Parameters\n ----------\n {data}\n {lonlat}\n {distance_earth}\n {equinox}\n {common}\n\n Notes\n -----\n Aberration due to Earth motion is not included.\n \"\"\"\n equinox = TimeFrameAttributeSunPy(default=_J2000)\n" ]
[ [ "numpy.sqrt", "numpy.fmin", "numpy.cos", "numpy.errstate" ] ]
marinaevers/regional-correlations
[ "8ca91a5283a92e75f3d99f870c295ca580edb949" ]
[ "backend/helper/pearson.py" ]
[ "import numpy as np\nfrom joblib import Parallel, delayed\nimport multiprocessing\n\nnum_cores = multiprocessing.cpu_count()\n\n\ndef pearson_corr_distance_matrix(timelines, lag=0):\n if lag == 0:\n return np.corrcoef(timelines)\n\n def corr(timelines, timeline, lag):\n corr_mat = np.zeros((1, len(timelines)))\n for j, t in enumerate(timelines):\n t1 = timeline if lag == 0 else timeline[:-lag],\n t2 = t[lag:]\n corr_mat[0][j] = np.corrcoef(t1, t2)[0, 1]\n return corr_mat[0]\n\n results = Parallel(n_jobs=int(num_cores), verbose=10)(delayed(corr)(timelines, timeline, lag) for timeline in timelines)\n\n return np.array(results)\n" ]
[ [ "numpy.array", "numpy.corrcoef" ] ]
vishalbelsare/bayesian_bootstrap
[ "57a093a128ac1aaf7ff7a6cf70f6b05d684589d7" ]
[ "bayesian_bootstrap/tests/test_bootstrap.py" ]
[ "import unittest\nimport numpy as np\nimport scipy\nimport random\nimport bayesian_bootstrap.bootstrap as bb\nfrom bayesian_bootstrap.bootstrap import (\n mean,\n var,\n bayesian_bootstrap,\n central_credible_interval,\n highest_density_interval,\n BayesianBootstrapBagging,\n covar,\n)\nfrom sklearn.linear_model import LinearRegression\n\n\nclass TestMoments(unittest.TestCase):\n def test_mean(self):\n X = [-1, 0, 1]\n posterior_samples = mean(X, 10000)\n self.assertAlmostEqual(np.mean(posterior_samples), 0, delta=0.01)\n self.assertAlmostEqual(len([s for s in posterior_samples if s < 0]), 5000, delta=1000)\n\n def test_variance(self):\n X = np.random.uniform(-1, 1, 500)\n posterior_samples = var(X, 10000)\n self.assertAlmostEqual(np.mean(posterior_samples), 1 / 3.0, delta=0.05)\n\n def test_self_covar(self):\n X = np.random.uniform(-1, 1, 500)\n posterior_samples = covar(X, X, 10000)\n self.assertAlmostEqual(np.mean(posterior_samples), np.var(X), delta=0.05)\n\n def test_covar(self):\n X = np.random.uniform(-1, 1, 500)\n Y = np.random.uniform(-1, 1, 500)\n posterior_samples = covar(X, Y, 10000)\n self.assertAlmostEqual(np.mean(posterior_samples), 0, delta=0.05)\n\n def test_mean_resample(self):\n X = [-1, 0, 1]\n posterior_samples = bayesian_bootstrap(X, np.mean, 10000, 100, low_mem=True)\n self.assertAlmostEqual(np.mean(posterior_samples), 0, delta=0.01)\n self.assertAlmostEqual(len([s for s in posterior_samples if s < 0]), 5000, delta=1000)\n posterior_samples = bayesian_bootstrap(X, np.mean, 10000, 100, low_mem=False)\n self.assertAlmostEqual(np.mean(posterior_samples), 0, delta=0.01)\n self.assertAlmostEqual(len([s for s in posterior_samples if s < 0]), 5000, delta=1000)\n\n def test_var_resample(self):\n X = np.random.uniform(-1, 1, 500)\n posterior_samples = bayesian_bootstrap(X, np.var, 10000, 5000, low_mem=True)\n self.assertAlmostEqual(np.mean(posterior_samples), 1 / 3.0, delta=0.05)\n X = np.random.uniform(-1, 1, 500)\n posterior_samples = bayesian_bootstrap(X, np.var, 10000, 5000, low_mem=False)\n self.assertAlmostEqual(np.mean(posterior_samples), 1 / 3.0, delta=0.05)\n\n\nclass TestIntervals(unittest.TestCase):\n def test_central_credible_interval(self):\n l, r = central_credible_interval(self._shuffle(list(range(10))), alpha=0.2)\n self.assertEqual(l, 1)\n self.assertEqual(r, 8)\n l, r = central_credible_interval(self._shuffle(list(range(10))), alpha=0.19)\n self.assertEqual(l, 1)\n self.assertEqual(r, 8)\n l, r = central_credible_interval(self._shuffle(list(range(20))), alpha=0.1)\n self.assertEqual(l, 1)\n self.assertEqual(r, 18)\n\n def test_hpdi(self):\n l, r = highest_density_interval(self._shuffle([0, 10, 1] + [1.1] * 7), alpha=0.2)\n self.assertEqual(l, 1)\n self.assertEqual(r, 1.1)\n l, r = highest_density_interval(self._shuffle([0, 10, 1.1, 1]), alpha=0.5)\n self.assertEqual(l, 1)\n self.assertEqual(r, 1.1)\n\n def _shuffle(self, x):\n x = list(x)\n random.shuffle(x)\n return x\n\n\nclass TestRegression(unittest.TestCase):\n def test_parameter_estimation_resampling_low_memory(self):\n X = np.random.uniform(0, 4, 1000)\n y = X + np.random.normal(0, 1, 1000)\n m = BayesianBootstrapBagging(LinearRegression(), 10000, 1000, low_mem=True)\n m.fit(X.reshape(-1, 1), y)\n coef_samples = [b.coef_ for b in m.base_models_]\n intercept_samples = [b.intercept_ for b in m.base_models_]\n self.assertAlmostEqual(np.mean(coef_samples), 1, delta=0.3)\n l, r = central_credible_interval(coef_samples, alpha=0.05)\n self.assertLess(l, 1)\n self.assertGreater(r, 1)\n l, r = highest_density_interval(coef_samples, alpha=0.05)\n self.assertLess(l, 1)\n self.assertGreater(r, 1)\n self.assertAlmostEqual(np.mean(intercept_samples), 0, delta=0.3)\n l, r = central_credible_interval(intercept_samples, alpha=0.05)\n self.assertLess(l, 0)\n self.assertGreater(r, 0)\n self.assertAlmostEqual(np.mean(intercept_samples), 0, delta=0.3)\n l, r = highest_density_interval(intercept_samples, alpha=0.05)\n self.assertLess(l, 0)\n self.assertGreater(r, 0)\n\n def test_parameter_estimation_resampling(self):\n X = np.random.uniform(0, 4, 1000)\n y = X + np.random.normal(0, 1, 1000)\n m = BayesianBootstrapBagging(LinearRegression(), 10000, 1000, low_mem=False)\n m.fit(X.reshape(-1, 1), y)\n coef_samples = [b.coef_ for b in m.base_models_]\n intercept_samples = [b.intercept_ for b in m.base_models_]\n self.assertAlmostEqual(np.mean(coef_samples), 1, delta=0.3)\n l, r = central_credible_interval(coef_samples, alpha=0.05)\n self.assertLess(l, 1)\n self.assertGreater(r, 1)\n l, r = highest_density_interval(coef_samples, alpha=0.05)\n self.assertLess(l, 1)\n self.assertGreater(r, 1)\n self.assertAlmostEqual(np.mean(intercept_samples), 0, delta=0.3)\n l, r = central_credible_interval(intercept_samples, alpha=0.05)\n self.assertLess(l, 0)\n self.assertGreater(r, 0)\n self.assertAlmostEqual(np.mean(intercept_samples), 0, delta=0.3)\n l, r = highest_density_interval(intercept_samples, alpha=0.05)\n self.assertLess(l, 0)\n self.assertGreater(r, 0)\n\n def test_parameter_estimation_bayes(self):\n X = np.random.uniform(0, 4, 1000)\n y = X + np.random.normal(0, 1, 1000)\n m = BayesianBootstrapBagging(LinearRegression(), 10000, low_mem=False)\n m.fit(X.reshape(-1, 1), y)\n coef_samples = [b.coef_ for b in m.base_models_]\n intercept_samples = [b.intercept_ for b in m.base_models_]\n self.assertAlmostEqual(np.mean(coef_samples), 1, delta=0.3)\n l, r = central_credible_interval(coef_samples, alpha=0.05)\n self.assertLess(l, 1)\n self.assertGreater(r, 1)\n l, r = highest_density_interval(coef_samples, alpha=0.05)\n self.assertLess(l, 1)\n self.assertGreater(r, 1)\n self.assertAlmostEqual(np.mean(intercept_samples), 0, delta=0.3)\n l, r = central_credible_interval(intercept_samples, alpha=0.05)\n self.assertLess(l, 0)\n self.assertGreater(r, 0)\n self.assertAlmostEqual(np.mean(intercept_samples), 0, delta=0.3)\n l, r = highest_density_interval(intercept_samples, alpha=0.05)\n self.assertLess(l, 0)\n self.assertGreater(r, 0)\n\n def test_parameter_estimation_bayes_low_memory(self):\n X = np.random.uniform(0, 4, 1000)\n y = X + np.random.normal(0, 1, 1000)\n m = BayesianBootstrapBagging(LinearRegression(), 10000, low_mem=True)\n m.fit(X.reshape(-1, 1), y)\n coef_samples = [b.coef_ for b in m.base_models_]\n intercept_samples = [b.intercept_ for b in m.base_models_]\n self.assertAlmostEqual(np.mean(coef_samples), 1, delta=0.3)\n l, r = central_credible_interval(coef_samples, alpha=0.05)\n self.assertLess(l, 1)\n self.assertGreater(r, 1)\n l, r = highest_density_interval(coef_samples, alpha=0.05)\n self.assertLess(l, 1)\n self.assertGreater(r, 1)\n self.assertAlmostEqual(np.mean(intercept_samples), 0, delta=0.3)\n l, r = central_credible_interval(intercept_samples, alpha=0.05)\n self.assertLess(l, 0)\n self.assertGreater(r, 0)\n self.assertAlmostEqual(np.mean(intercept_samples), 0, delta=0.3)\n l, r = highest_density_interval(intercept_samples, alpha=0.05)\n self.assertLess(l, 0)\n self.assertGreater(r, 0)\n\n\ndef test_pearsonr():\n x = np.linspace(0, 5, 10)\n y = np.linspace(0, 5, 10)\n assert np.mean(bb.pearsonr(x, y, 10000)) == 1\n assert np.mean(bb.pearsonr(x, -y, 10000)) == -1\n\n np.random.seed(1337)\n x = [0, 1, 3, 6]\n y = [1, 2, 5, 7]\n assert np.isclose(np.mean(bb.pearsonr(x, y, 10000)), scipy.stats.pearsonr(x, y)[0], atol=0.001)\n\n np.random.seed(1337)\n x = np.linspace(-10, 10, 10000)\n y = np.abs(x)\n assert np.isclose(scipy.stats.pearsonr(x, y)[0], np.mean(bb.pearsonr(x, y, 1000)), atol=0.001)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n" ]
[ [ "numpy.random.uniform", "scipy.stats.pearsonr", "numpy.var", "sklearn.linear_model.LinearRegression", "numpy.random.seed", "numpy.abs", "numpy.random.normal", "numpy.linspace", "numpy.mean" ] ]
yannick-t/probabilistic_forecasting_of_energy_time_series_using_deep_learning
[ "98a2b12270e79045b8704e9d9cc506ffadb95127" ]
[ "code/util/visualization/main_unc_forecast_visualization.py" ]
[ "from datetime import datetime\n\nimport matplotlib.pyplot as plt\nimport torch\n\nfrom evaluation.evaluate_forecasting_util import timeframe\nfrom load_forecasting.forecast_util import dataset_df_to_np\nfrom load_forecasting.post_processing import recalibrate\nfrom load_forecasting.predict import predict_transform\nfrom models.deep_ensemble_sklearn import DeepEnsemble\nfrom training.loss.heteroscedastic_loss import HeteroscedasticLoss\nfrom training.training_util import load_train\nfrom util.data.data_src_tools import load_opsd_de_load_dataset\n\n'''\nCode to visualize an uncertain forecast on the opsd data, by plotting one week of forecasts.\n'''\n\nuse_cuda = True\nuse_cuda = use_cuda & torch.cuda.is_available()\n\ndevice = torch.device('cuda' if use_cuda else 'cpu')\n\n\ndef main():\n train_df, test_df, scaler = load_opsd_de_load_dataset('transparency', short_term=False, reprocess=False,\n n_ahead=1)\n\n x_train, y_train, offset_train = dataset_df_to_np(train_df)\n x_test, y_test, offset_test = dataset_df_to_np(test_df)\n timestamp_test = test_df.index.to_numpy()\n\n y_test_orig = scaler.inverse_transform(y_test) + offset_test\n y_train_orig = scaler.inverse_transform(y_train) + offset_train\n\n hs = [132, 77, 50]\n lr = 5.026e-05\n epochs = 1253\n\n # initialize model\n ensemble_model = DeepEnsemble(\n input_size=x_train.shape[-1],\n output_size=y_train.shape[-1] * 2,\n hidden_size=hs,\n lr=lr,\n max_epochs=epochs,\n batch_size=1024,\n optimizer=torch.optim.Adam,\n criterion=HeteroscedasticLoss,\n device=device\n )\n\n # train and recalibrate\n load_train(ensemble_model, x_train, y_train, 'deep_ens', '../trained_models/', 'load_forecasting_', True)\n\n pred_mean_train, pred_var_train, _, _, _ = predict_transform(ensemble_model, x_train, scaler, offset_train,\n 'Deep Ensemble')\n recal = recalibrate(pred_mean_train, pred_var_train, y_train_orig)\n\n # predict\n pred_mean, pred_var, _, _, _ = predict_transform(ensemble_model, x_test, scaler, offset_test, 'Deep Ensemble UCI')\n pred_mean, pred_var = recal(pred_mean, pred_var)\n\n ax = plt.subplot(1, 1, 1)\n timeframe(datetime(2018, month=7, day=9), datetime(2018, month=7, day=16), pred_mean, pred_var, timestamp_test,\n y_test_orig, ax)\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "matplotlib.pyplot.show", "matplotlib.pyplot.subplot", "torch.cuda.is_available", "torch.device" ] ]
jvario/inside_Airbnb-Athens-
[ "8abae93756d1e4388f770dfb073ec27cfc9bacbf" ]
[ "helper_functions/join_main_category.py" ]
[ "import pandas as pd\nimport collections as col\nimport numpy as np\n\n\ndef join_main_category(new_category, sub_categories, word_dict, size, data):\n '''\n this function joins sub_categories into a main category\n ==============================================================\n input:\n - new_category : name of the new main category\n type : string\n\n - sub_categories : the names of the sub_categories to be joined\n type : list\n\n - word_dict : the dictionary with all raw amenities\n type : dict\n\n - size : how many elements should have the np.array\n type : int\n\n - data : our main data\n type : pd DataFrame\n **************************************************************\n output:\n - category_exists: 1 if the category exists , 0 if not\n type = np.array\n ==============================================================\n '''\n name_of_category = new_category\n\n for amen in data[\"amenities\"]:\n for list_item in amen:\n ind = amen.index(list_item)\n amen[ind] = amen[ind].replace(' \\\"', '\\\"')\n\n category = pd.Series(sub_categories)\n # inside of the category belongs all the sub_categories\n\n myDict = col.defaultdict(list)\n for key in word_dict.keys():\n\n for cat in category:\n if (cat in key):\n myDict[name_of_category].append(str(key))\n\n # create a zeros np array\n myDict = dict(myDict)\n\n category_exists = np.zeros(size, dtype=int)\n key = name_of_category\n\n for ind in range(0, size):\n\n amenity = data.iloc[ind][\"amenities\"]\n\n for key, value in myDict.items(): # iterate in keys,values of myDict\n for val in value:\n\n if val in amenity:\n # if the list contains the value , then set the key columns to 1\n\n category_exists[ind] = 1\n\n return category_exists" ]
[ [ "pandas.Series", "numpy.zeros" ] ]
mohammadbashiri/bashiri-et-al-2021
[ "c7c15ea0bf165d4d3db2ff63a04a1e78c29bf44c" ]
[ "lib/nnsysident/nnsysident/utility/data_helpers.py" ]
[ "import numpy as np\nimport torch.utils.data as utils\n\nfrom neuralpredictors.data.samplers import RepeatsBatchSampler\n\n\ndef get_oracle_dataloader(dat, toy_data=False, oracle_condition=None, verbose=False, file_tree=False):\n\n if toy_data:\n condition_hashes = dat.info.condition_hash\n else:\n dat_info = dat.info if not file_tree else dat.trial_info\n if \"image_id\" in dir(dat_info):\n condition_hashes = dat_info.image_id\n image_class = dat_info.image_class\n\n elif \"colorframeprojector_image_id\" in dir(dat_info):\n condition_hashes = dat_info.colorframeprojector_image_id\n image_class = dat_info.colorframeprojector_image_class\n elif \"frame_image_id\" in dir(dat_info):\n condition_hashes = dat_info.frame_image_id\n image_class = dat_info.frame_image_class\n else:\n raise ValueError(\n \"'image_id' 'colorframeprojector_image_id', or 'frame_image_id' have to present in the dataset under dat.info \"\n \"in order to load get the oracle repeats.\"\n )\n\n max_idx = condition_hashes.max() + 1\n classes, class_idx = np.unique(image_class, return_inverse=True)\n identifiers = condition_hashes + class_idx * max_idx\n\n dat_tiers = dat.tiers if not file_tree else dat.trial_info.tiers\n sampling_condition = (\n np.where(dat_tiers == \"test\")[0]\n if oracle_condition is None\n else np.where((dat_tiers == \"test\") & (class_idx == oracle_condition))[0]\n )\n if (oracle_condition is not None) and verbose:\n print(\"Created Testloader for image class {}\".format(classes[oracle_condition]))\n\n sampler = RepeatsBatchSampler(identifiers, sampling_condition)\n return utils.DataLoader(dat, batch_sampler=sampler)\n\n\ndef unpack_data_info(data_info):\n\n in_shapes_dict = {k: v[\"input_dimensions\"] for k, v in data_info.items()}\n input_channels = [v[\"input_channels\"] for k, v in data_info.items()]\n n_neurons_dict = {k: v[\"output_dimension\"] for k, v in data_info.items()}\n return n_neurons_dict, in_shapes_dict, input_channels\n" ]
[ [ "torch.utils.data.DataLoader", "numpy.where", "numpy.unique" ] ]
abrahambotros/pytorch-lightning
[ "a5538af3558cf544dffd92b1b8bab3a5793f0ba0" ]
[ "tests/utilities/test_dtype_device_mixin.py" ]
[ "import pytest\nimport torch\nimport torch.nn as nn\n\nfrom pytorch_lightning import Trainer, Callback\nfrom pytorch_lightning.utilities.device_dtype_mixin import DeviceDtypeModuleMixin\nfrom tests.base import EvalModelTemplate\n\n\nclass SubSubModule(DeviceDtypeModuleMixin):\n pass\n\n\nclass SubModule(nn.Module):\n\n def __init__(self):\n super().__init__()\n self.module = SubSubModule()\n\n\nclass TopModule(EvalModelTemplate):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.module = SubModule()\n\n\nclass DeviceAssertCallback(Callback):\n\n def on_batch_start(self, trainer, model):\n rank = trainer.local_rank\n assert isinstance(model, TopModule)\n # index = None also means first device\n assert (model.device.index is None and rank == 0) or model.device.index == rank\n assert model.device == model.module.module.device\n\n\[email protected](['dst_dtype'], [\n pytest.param(torch.float),\n pytest.param(torch.double),\n pytest.param(torch.half),\n])\[email protected](['dst_device'], [\n pytest.param(torch.device('cpu')),\n pytest.param(torch.device('cuda')),\n pytest.param(torch.device('cuda', 0)),\n])\[email protected](not torch.cuda.is_available(), reason=\"test requires GPU machine\")\ndef test_submodules_device_and_dtype(dst_device, dst_dtype):\n \"\"\"\n Test that the device and dtype property updates propagate through mixed nesting of regular\n nn.Modules and the special modules of type DeviceDtypeModuleMixin (e.g. Metric or LightningModule).\n \"\"\"\n\n model = TopModule()\n assert model.device == torch.device('cpu')\n model = model.to(device=dst_device, dtype=dst_dtype)\n # nn.Module does not have these attributes\n assert not hasattr(model.module, '_device')\n assert not hasattr(model.module, '_dtype')\n # device and dtype change should propagate down into all children\n assert model.device == model.module.module.device == dst_device\n assert model.dtype == model.module.module.dtype == dst_dtype\n\n\[email protected](torch.cuda.device_count() < 2, reason=\"test requires multi-GPU machine\")\ndef test_submodules_multi_gpu_dp(tmpdir):\n model = TopModule()\n trainer = Trainer(\n default_root_dir=tmpdir,\n distributed_backend='dp',\n gpus=2,\n callbacks=[DeviceAssertCallback()],\n max_steps=1,\n )\n trainer.fit(model)\n\n\[email protected](torch.cuda.device_count() < 2, reason=\"test requires multi-GPU machine\")\ndef test_submodules_multi_gpu_ddp_spawn(tmpdir):\n model = TopModule()\n trainer = Trainer(\n default_root_dir=tmpdir,\n distributed_backend='dpp_spawn',\n gpus=2,\n callbacks=[DeviceAssertCallback()],\n max_steps=1,\n )\n trainer.fit(model)\n" ]
[ [ "torch.cuda.is_available", "torch.device", "torch.cuda.device_count" ] ]
charlesmackin/tiny
[ "bf8afc5cfc15e12efdd3bca0d559adfdfc435981" ]
[ "v0.5/training/anomaly_detection/eval_functions_eembc.py" ]
[ "'''\nMLCommons\ngroup: TinyMLPerf (https://github.com/mlcommons/tiny)\n\nimage classification on cifar10\n\neval_functions_eembc.py: performances evaluation functions from eembc\n\nrefs:\nhttps://github.com/SiliconLabs/platform_ml_models/blob/master/eembc/Methodology/eval_functions_eembc.py\n'''\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib\nmatplotlib.use('Qt5Agg') # Qt5Agg, GTKAgg, Qt4Agg\n\n# Classifier overall accuracy calculation\n# y_pred contains the outputs of the network for the validation data\n# labels are the correct answers\ndef calculate_accuracy(y_pred, labels):\n y_pred_label = np.argmax(y_pred, axis=1)\n correct = np.sum(labels == y_pred_label)\n accuracy = 100 * correct / len(y_pred)\n print(f\"Overall accuracy = {accuracy:2.1f}\")\n return accuracy\n\n\n# Classifier accuracy per class calculation\n# y_pred contains the outputs of the network for the validation data\n# labels are the correct answers\n# classes are the model's classes\ndef calculate_all_accuracies(y_pred, labels, classes):\n n_classes = len(classes)\n\n # Initialize array of accuracies\n accuracies = np.zeros(n_classes)\n\n # Loop on classes\n for class_item in range(n_classes):\n true_positives = 0\n # Loop on all predictions\n for i in range(len(y_pred)):\n # Check if it matches the class that we are working on\n if (labels[i] == class_item):\n # Get prediction label\n y_pred_label = np.argmax(y_pred[i, :])\n # Check if the prediction is correct\n if (labels[i] == y_pred_label):\n true_positives += 1\n\n accuracies[class_item] = 100 * true_positives / np.sum(labels == class_item)\n print(f\"Accuracy = {accuracies[class_item]:2.1f} ({classes[class_item]})\")\n\n return accuracies\n\n\n# Classifier ROC AUC calculation\n# y_pred contains the outputs of the network for the validation data\n# labels are the correct answers\n# classes are the model's classes\n# name is the model's name\ndef calculate_auc(y_pred, labels, classes, name):\n n_classes = len(classes)\n\n # thresholds, linear range, may need improvements for better precision\n thresholds = np.arange(0.0, 1.01, .01)\n # false positive rate\n fpr = np.zeros([n_classes, len(thresholds)])\n # true positive rate\n tpr = np.zeros([n_classes, len(thresholds)])\n # area under curve\n roc_auc = np.zeros(n_classes)\n\n # get number of positive and negative examples in the dataset\n for class_item in range(n_classes):\n # Sum of all true positive answers\n all_positives = sum(labels == class_item)\n # Sum of all true negative answers\n all_negatives = len(labels) - all_positives\n\n # iterate through all thresholds and determine fraction of true positives\n # and false positives found at this threshold\n for threshold_item in range(1, len(thresholds)):\n threshold = thresholds[threshold_item]\n false_positives = 0\n true_positives = 0\n for i in range(len(y_pred)):\n # Check prediction for this threshold\n if (y_pred[i, class_item] > threshold):\n if labels[i] == class_item:\n true_positives += 1\n else:\n false_positives += 1\n fpr[class_item, threshold_item] = false_positives / float(all_negatives)\n tpr[class_item, threshold_item] = true_positives / float(all_positives)\n\n # Force boundary condition\n fpr[class_item, 0] = 1\n tpr[class_item, 0] = 1\n\n # calculate area under curve, trapezoid integration\n for threshold_item in range(len(thresholds) - 1):\n roc_auc[class_item] += .5 * (tpr[class_item, threshold_item] + tpr[class_item, threshold_item + 1]) * (\n fpr[class_item, threshold_item] - fpr[class_item, threshold_item + 1]);\n\n # results\n roc_auc_avg = np.mean(roc_auc)\n print(f\"Simplified average roc_auc = {roc_auc_avg:.3f}\")\n\n plt.figure()\n for class_item in range(n_classes):\n plt.plot(fpr[class_item, :], tpr[class_item, :],\n label=f\"auc: {roc_auc[class_item]:0.3f} ({classes[class_item]})\")\n plt.xlim([0.0, 0.1])\n plt.ylim([0.5, 1.0])\n plt.legend(loc=\"lower right\")\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title('ROC: ' + name)\n plt.grid(which='major')\n plt.show(block=False)\n\n return roc_auc\n\n\n# Classifier overall accuracy calculation\n# y_pred contains the outputs of the network for the validation data\n# y_true are the correct answers (0.0 for normal, 1.0 for anomaly)\n# using this function is not recommended\ndef calculate_ae_accuracy(y_pred, y_true):\n thresholds = np.amin(y_pred) + np.arange(0.0, 1.0, .01) * (np.amax(y_pred) - np.amin(y_pred))\n accuracy = 0\n for threshold in thresholds:\n y_pred_binary = (y_pred > threshold).astype(int)\n correct = np.sum(y_pred_binary == y_true)\n accuracy_tmp = 100 * correct / len(y_pred_binary)\n if accuracy_tmp > accuracy:\n accuracy = accuracy_tmp\n\n print(f\"Overall accuracy = {accuracy:2.1f}\")\n return accuracy\n\n\n# Classifier overall accuracy calculation\n# y_pred contains the outputs of the network for the validation data\n# y_true are the correct answers (0.0 for normal, 1.0 for anomaly)\n# this is the function that should be used for accuracy calculations\ndef calculate_ae_pr_accuracy(y_pred, y_true):\n # initialize all arrays\n thresholds = np.amin(y_pred) + np.arange(0.0, 1.0, .01) * (np.amax(y_pred) - np.amin(y_pred))\n accuracy = 0\n n_normal = np.sum(y_true == 0)\n precision = np.zeros(len(thresholds))\n recall = np.zeros(len(thresholds))\n\n # Loop on all the threshold values\n for threshold_item in range(len(thresholds)):\n threshold = thresholds[threshold_item]\n # Binarize the result\n y_pred_binary = (y_pred > threshold).astype(int)\n # Build matrix of TP, TN, FP and FN\n true_negative = np.sum((y_pred_binary[0:n_normal] == 0))\n false_positive = np.sum((y_pred_binary[0:n_normal] == 1))\n true_positive = np.sum((y_pred_binary[n_normal:] == 1))\n false_negative = np.sum((y_pred_binary[n_normal:] == 0))\n # Calculate and store precision and recall\n precision[threshold_item] = true_positive / (true_positive + false_positive)\n recall[threshold_item] = true_positive / (true_positive + false_negative)\n # See if the accuracy has improved\n accuracy_tmp = 100 * (precision[threshold_item] + recall[threshold_item]) / 2\n if accuracy_tmp > accuracy:\n accuracy = accuracy_tmp\n\n # Results\n print(f\"Precision/recall accuracy = {accuracy:2.1f}\")\n\n plt.figure()\n plt.plot(recall, precision)\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.0])\n plt.xlabel('Recall')\n plt.ylabel('Precision')\n plt.title('Precision vs Recall')\n plt.grid(which='major')\n plt.show(block=False)\n\n return accuracy\n\n\n# Autoencoder ROC AUC calculation\n# y_pred contains the outputs of the network for the validation data\n# y_true are the correct answers (0.0 for normal, 1.0 for anomaly)\n# this is the function that should be used for accuracy calculations\n# name is the model's name\ndef calculate_ae_auc(y_pred, y_true, name):\n # initialize all arrays\n thresholds = np.amin(y_pred) + np.arange(0.0, 1.01, .01) * (np.amax(y_pred) - np.amin(y_pred))\n roc_auc = 0\n\n n_normal = np.sum(y_true == 0)\n tpr = np.zeros(len(thresholds))\n fpr = np.zeros(len(thresholds))\n\n # Loop on all the threshold values\n for threshold_item in range(1, len(thresholds)):\n threshold = thresholds[threshold_item]\n # Binarize the result\n y_pred_binary = (y_pred > threshold).astype(int)\n # Build TP and FP\n tpr[threshold_item] = np.sum((y_pred_binary[n_normal:] == 1)) / float(len(y_true) - n_normal)\n fpr[threshold_item] = np.sum((y_pred_binary[0:n_normal] == 1)) / float(n_normal)\n\n # Force boundary condition\n fpr[0] = 1\n tpr[0] = 1\n\n # Integrate\n for threshold_item in range(len(thresholds) - 1):\n roc_auc += .5 * (tpr[threshold_item] + tpr[threshold_item + 1]) * (\n fpr[threshold_item] - fpr[threshold_item + 1]);\n\n # Results\n print(f\"Simplified roc_auc = {roc_auc:.3f}\")\n\n plt.figure()\n plt.plot(tpr, fpr, label=f\"auc: {roc_auc:0.3f}\")\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.0])\n plt.legend(loc=\"lower right\")\n plt.xlabel('False positive rate')\n plt.ylabel('True positive rate')\n plt.title('ROC: ' + name)\n plt.grid(which='major')\n plt.show(block=False)\n\n return roc_auc\n" ]
[ [ "numpy.sum", "matplotlib.pyplot.legend", "numpy.zeros", "matplotlib.pyplot.figure", "matplotlib.pyplot.grid", "numpy.argmax", "matplotlib.pyplot.xlim", "numpy.arange", "matplotlib.pyplot.title", "numpy.amin", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel", "numpy.amax", "matplotlib.pyplot.ylim", "matplotlib.use", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlabel", "numpy.mean" ] ]
adrianjav/lipschitz-standardization
[ "d97b9c069802ef15ff747583b42b94c0bc3e2940" ]
[ "utils/probabilistc_model.py" ]
[ "from __future__ import annotations\n\nfrom typing import List\nfrom functools import reduce\n\nimport torch\nfrom utils.distributions import get_distribution_by_name, Base\n\n\ndef _get_distributions(dists_names) -> List[Base]:\n dists = []\n\n for i, name in enumerate(dists_names):\n is_gammatrick = name[-1] == '*'\n is_bernoullitrick = name[-1] == '+'\n\n if is_gammatrick or is_bernoullitrick:\n name = name[:-1]\n\n if 'categorical' in name or 'ordinal' in name:\n pos = name.find('(')\n num_probs = int(name[pos + 1 : name.find(')')])\n name = name[:pos]\n else:\n num_probs = 1\n\n if is_gammatrick:\n if num_probs == 1:\n dist_i = get_distribution_by_name('gammatrick')[name]()\n else:\n dist_i = get_distribution_by_name('gammatrick')[name](num_probs)\n elif is_bernoullitrick:\n if num_probs == 1:\n dist_i = get_distribution_by_name('bernoullitrick')[name]()\n else:\n dist_i = get_distribution_by_name('bernoullitrick')[name](num_probs)\n else:\n if num_probs == 1:\n dist_i = get_distribution_by_name(name)()\n else:\n dist_i = get_distribution_by_name(name)(num_probs)\n\n dists += [dist_i]\n\n return dists\n\n\nclass ProbabilisticModel(object):\n def __init__(self, dists_names):\n self.dists = _get_distributions(dists_names)\n self.indexes = reduce(list.__add__, [[[i, j] for j in range(d.num_dists)] for i, d in enumerate(self.dists)])\n\n @property\n def weights(self):\n return [d.weight for d in self]\n\n @weights.setter\n def weights(self, values):\n if isinstance(values, torch.Tensor):\n values = values.detach().tolist()\n\n for w, d in zip(values, self):\n d.weight = w\n\n def scale_data(self, x):\n new_x = []\n for i, d in enumerate(self):\n new_x.append(d >> x[:, i])\n return torch.stack(new_x, dim=-1)\n\n def __rshift__(self, data):\n return self.scale_data(data)\n\n def params_from_data(self, x, mask):\n params = []\n for i, d in enumerate(self):\n pos = self.gathered_index(i)\n data = x[..., i] if mask is None or mask[..., pos].all() else torch.masked_select(x[..., i], mask[..., pos])\n params += d.params_from_data(data)\n return params\n\n def preprocess_data(self, x, mask=None):\n new_x = []\n for i, dist_i in enumerate(self.dists):\n new_x += dist_i.preprocess_data(x[:, i], mask)\n\n for i in range(len(self.dists), x.size(1)):\n new_x += [x[:, i]]\n\n return torch.stack(new_x, 1)\n\n def gathered_index(self, index):\n return self.indexes[index][0]\n\n def __len__(self):\n return len(self.indexes)\n\n def __getitem__(self, item) -> Base:\n if isinstance(item, int):\n return self.__getitem__(self.indexes[item])\n\n return self.dists[item[0]][item[1]]\n\n @property\n def gathered(self):\n class GatherProbabilisticModel(object):\n def __init__(self, model):\n self.model = model\n\n def __len__(self):\n return len(self.model.dists)\n\n def __getitem__(self, item):\n offset = sum([d.num_dists for d in self.model.dists[: item]])\n idxs = range(offset, offset + self.model.dists[item].num_dists)\n\n return idxs, self.model.dists[item]\n\n @property\n def weights(self):\n return [d.weight for [_, d] in self]\n\n @weights.setter\n def weights(self, values):\n if isinstance(values, torch.Tensor):\n values = values.detach().tolist()\n\n for w, [_, d] in zip(values, self):\n d.weight = w\n\n def __iter__(self):\n offset = 0\n for i, d in enumerate(self.model.dists):\n yield list(range(offset, offset + d.num_dists)), d\n offset += d.num_dists\n\n def get_param_names(self):\n names = []\n for i, dist_i in enumerate(self.model.dists):\n if dist_i.num_dists > 1 or dist_i.size_params[0] > 1:\n param_name = dist_i.real_parameters[0]\n num_classes = dist_i.size_params[0] if dist_i.num_dists == 1 else dist_i.num_dists\n names += [f'{dist_i}_{param_name}{j}_dim{i}' for j in range(num_classes)]\n else:\n names += [f'{dist_i}_{v}_dim{i}' for v in dist_i.real_parameters]\n\n return names\n\n def scale_data(self, x):\n new_x = []\n for i, [_, d] in enumerate(self):\n new_x.append(d >> x[:, i])\n return torch.stack(new_x, dim=-1)\n\n def __rshift__(self, data):\n return self.scale_data(data)\n\n return GatherProbabilisticModel(self)\n\n\n\n" ]
[ [ "torch.stack", "torch.masked_select" ] ]
JGCRI/stitches
[ "a55e5801279bd153bb7bcc247422e29eecbbc209" ]
[ "stitches/make_tas_archive.py" ]
[ "# Define the functions used to get Get the weighted global mean temperature\n# from pangeo CMIP6 results.\n\n# Import packages\nimport stitches.fx_pangeo as pangeo\nimport stitches.fx_data as data\nimport stitches.fx_util as util\nimport os\nimport pkg_resources\nimport pandas as pd\n\n\ndef get_global_tas(path):\n \"\"\"\n Calculate the weighted annual global mean temp.\n\n :param path: a zstore path to the CMIP6 files stored on pangeo.\n :type path: str\n\n :return: str path to the location of file containing the weighted global mean.\n \"\"\"\n\n # Make the name of the output file that will only be created if the output\n # does not already exists.\n temp_dir = pkg_resources.resource_filename('stitches', 'data/temp-data')\n\n if os.path.isdir(temp_dir) == False:\n os.mkdir(temp_dir)\n\n tag = path.replace(\"/\", \"_\")\n file_name = tag.replace(\"gs:__\", \"\") + \"temp.csv\"\n ofile = temp_dir + \"/\" + file_name\n\n if os.path.isfile(ofile) == False:\n # Download the CMIP data & calculate the weighted annual global mean .\n d = pangeo.fetch_nc(path)\n global_mean = data.global_mean(d)\n annual_mean = global_mean.coarsen(time=12).mean()\n\n # Format the CMIP meta data & global means, then combine into a single data frame.\n meta = data.get_ds_meta(d)\n t = annual_mean[\"time\"].dt.strftime(\"%Y%m%d\").values\n year = list(map(lambda x: util.selstr(x, start=0, stop=4), t))\n\n val = annual_mean[\"tas\"].values\n d = {'year': year, 'value': val}\n df = pd.DataFrame(data=d)\n out = util.combine_df(meta, df)\n\n # Write the output\n out.to_csv(ofile, index=False)\n return ofile\n else:\n return ofile\n\n\n\ndef calculate_anomaly(data, startYr=1995, endYr=2014):\n \"\"\"\n Convert the temp data from absolute into an anomaly relative to a reference period.\n\n :param data: A data frame of the cmip absolute temperature\n :type data: pandas.core.frame.DataFrame\n :param startYr: The first year of the reference period, default set to 1995 corresponding to the IPCC defined reference period.\n :type startYr: int\n :param endYr: The final year of the reference period, default set to 2014 corresponding to the IPCC defined reference period.\n :type endYr: int\n\n :return: A pandas data frame of cmip tgav as anomalies relative to a time-averaged value from a reference period, default uses a reference period form 1995-2014\n \"\"\"\n\n # Inputs\n util.check_columns(data, {'variable', 'experiment', 'ensemble', 'model', 'year', 'value'})\n to_use = data[['model', 'experiment', 'ensemble', 'year', 'value']].copy()\n\n # Calculate the average value for the reference period defined\n # by the startYr and ednYr arguments.\n # The default reference period is set from 1995 - 2014.\n #\n # Start by subsetting the data so that it only includes values from the specified reference period.\n to_use[\"year\"] = to_use[\"year\"].astype(int)\n to_use = to_use[to_use[\"experiment\"] == \"historical\"]\n subset_data = to_use[to_use['year'].between(startYr, endYr)]\n\n # Calculate the time-averaged reference value for each ensemble\n # realization. This reference value will be used to convert from absolute to\n # relative temperature.\n reference_values = subset_data.groupby(['model', 'ensemble']).agg(\n {'value': lambda x: sum(x) / len(x)}).reset_index().copy()\n reference_values = reference_values.rename(columns={\"value\": \"ref_values\"})\n\n # Combine the dfs that contain the absolute temperature values with the reference values.\n # Then calculate the relative temperature.\n merged_df = data.merge(reference_values, on=['model', 'ensemble'], how='inner')\n merged_df['value'] = merged_df['value'] - merged_df['ref_values']\n merged_df = merged_df.drop(columns='ref_values')\n\n return merged_df\n\n\ndef paste_historical_data(input_data):\n \"\"\"\"\n Paste the appropriate historical data into each future scenario so that SSP585 realization 1, for\n example, has the appropriate data from 1850-2100.\n\n :param input_data: A data frame of the cmip absolute temperature\n :type input_data: pandas.core.frame.DataFrame\n\n :return: A pandas data frame of the smoothed time series (rolling mean applied)\n \"\"\"\n\n # Relabel the historical values so that there is a continuous rolling mean between the\n # historical and future values.\n # #######################################################\n # Create a subset of the non historical & future scns\n other_exps = ['1pctCO2', 'abrupt-2xCO2', 'abrupt-4xCO2']\n other_data = input_data[input_data[\"experiment\"].isin(other_exps)]\n\n # Subset the historical data\n historical_data = input_data[input_data[\"experiment\"] == \"historical\"].copy()\n\n # Create a subset of the future data\n fut_exps = ['ssp126', 'ssp245', 'ssp370', 'ssp585', 'ssp534-over', 'ssp119', 'ssp434', 'ssp460']\n future_data = input_data[input_data[\"experiment\"].isin(fut_exps)]\n future_scns = set(future_data[\"experiment\"].unique())\n\n frames = []\n for scn in future_scns:\n d = historical_data.copy()\n d[\"experiment\"] = scn\n frames.append(d)\n\n frames.append(future_data)\n frames.append(other_data)\n data = pd.concat(frames)\n\n # TODO is there a better way to prevent duplicates in 2015 & 2016 values?\n d = data.groupby(['variable', 'experiment', 'ensemble', 'model', 'year'])['value'].agg('mean').reset_index()\n\n return d\n\n\ndef make_tas_archive():\n \"\"\"\"\n # The function that creates the archive\n\n :return: Array of the tas files created.\n \"\"\"\n # Get tas data & calculate global mean temp\n\n # Get the pangeo table of contents.\n df = pangeo.fetch_pangeo_table()\n\n # Subset the monthly tas data, these are the files that we will want to process\n # for the tas archive.\n xps = [\"historical\", \"1pctCO2\", \"abrupt-4xCO2\", \"abrupt-2xCO2\", \"ssp370\", \"ssp245\", \"ssp119\",\n \"ssp434\", \"ssp460\", \"ssp126\", \"ssp585\", \"ssp534-over\"]\n df = df.loc[(df[\"experiment_id\"].isin(xps)) & # experiments of interest\n (df[\"table_id\"] == \"Amon\") & # monthly data\n (df[\"grid_label\"] == \"gn\") & # we are only interested in the results returned in the native\n (df[\"variable_id\"] == \"tas\") & # select temperature data\n (df['member_id'].str.contains('p1'))] # select only the members of the p1 physics group\n\n # For each of the CMIP6 files to calculate the global mean temperature and write the\n # results to the temporary directory.\n files = list(map(get_global_tas, df.zstore.values))\n\n # Clean Up & Quality Control\n #\n # Find all of the files and read in the data, store as a single data frame.\n raw_data = pd.concat(list(map(pd.read_csv, files)))\n\n\n # Note that the first three steps only apply to the historical & ssp experiments,\n # the idealized experiments do not need to go through these steps.\n #\n # First round of cleaning check the historical dates.\n # Make sure that the historical run starts some time\n # before 1855 & that that it runs until 2014.\n # Subset the Hector historical\n his_info = (raw_data.loc[(raw_data[\"experiment\"] == \"historical\")]\n .groupby([\"model\", \"experiment\", \"ensemble\"])[\"year\"]\n .agg([\"min\", \"max\"]).reset_index())\n his_info[\"min\"] = his_info[\"min\"].astype(int)\n his_info[\"max\"] = his_info[\"max\"].astype(int)\n\n # Make sure the start date is some times before 1855.\n start_yr = his_info[his_info[\"min\"] > 1855].copy()\n to_remove = start_yr[[\"model\", \"experiment\", \"ensemble\"]]\n\n # Make sure that all of historical have data up until 2014\n end_yr = his_info[his_info[\"max\"] < 2014].copy()\n to_remove = to_remove.append(end_yr[[\"model\", \"experiment\", \"ensemble\"]])\n clean_d1 = util.join_exclude(raw_data, to_remove)\n\n # Second round of cleaning check the future dates.\n # Make sure that the future scenarios start at 2015 & run beyond 2100.\n fut_exps = ['ssp245', 'ssp126', 'ssp585', 'ssp119', 'ssp370', 'ssp434', 'ssp534-over', 'ssp460']\n fut_info = (clean_d1.loc[(clean_d1[\"experiment\"].isin(fut_exps))]\n .groupby([\"model\", \"experiment\", \"ensemble\"])[\"year\"]\n .agg([\"min\", \"max\"]).reset_index())\n fut_info[\"min\"] = fut_info[\"min\"].astype(int)\n fut_info[\"max\"] = fut_info[\"max\"].astype(int)\n\n # If the future scenario starts after 2015 drop it.\n start_yr = fut_info[fut_info[\"min\"] > 2015].copy()\n to_remove = start_yr[[\"model\", \"experiment\", \"ensemble\"]]\n\n # Make sure the future scenario runs until 2098 otherwise drop it.\n end_yr = fut_info[fut_info[\"max\"] < 2098].copy()\n to_remove = to_remove.append(end_yr[[\"model\", \"experiment\", \"ensemble\"]])\n clean_d2 = util.join_exclude(clean_d1, to_remove)\n\n # Third round of clean up\n # Make sure that there is data from the historical experiment for each ensemble member with\n # future results.\n exp_en_mod = clean_d2[[\"experiment\", \"ensemble\", \"model\"]].drop_duplicates().copy()\n\n # Separate the data frame of the experiment / ensemble / model information into\n # the historical and non historical experiments.\n hist_ensemble = (exp_en_mod.loc[exp_en_mod[\"experiment\"] == \"historical\"][[\"model\", \"ensemble\"]]\n .drop_duplicates()\n .copy())\n non_hist_ensemble = (exp_en_mod[exp_en_mod[\"experiment\"] != \"historical\"][[\"model\", \"ensemble\"]]\n .drop_duplicates()\n .copy())\n # use an inner join to select the historical ensemble that have future results as well as\n # the future results have have historical results.\n to_keep = non_hist_ensemble.merge(hist_ensemble, how=\"inner\", on=[\"ensemble\", \"model\"])\n\n # Update the raw data table to only include the model / ensembles members that have both a\n # historical and non historical ensemble realization.\n clean_d3 = clean_d2.merge(to_keep, how=\"inner\")\n\n # Before the fourth round of clean up add back in the idealized experiment results.\n idealized_exps = {'1pctCO2', 'abrupt-2xCO2', 'abrupt-4xCO2'}\n idealized_dat = raw_data.loc[raw_data['experiment'].isin(idealized_exps)]\n clean_d3 = pd.concat([clean_d3, idealized_dat])\n\n # Fourth round of cleaning make sure there are no missing dates.\n yrs = (clean_d2.groupby([\"model\", \"experiment\", \"ensemble\"])[\"year\"]\n .agg([\"min\", \"max\", \"count\"])\n .reset_index())\n yrs[\"min\"] = yrs[\"min\"].astype(int)\n yrs[\"max\"] = yrs[\"max\"].astype(int)\n yrs[\"count\"] = yrs[\"count\"].astype(int)\n yrs[\"diff\"] = (yrs[\"max\"] - yrs[\"min\"]) + 1\n yrs[\"diff\"] = yrs[\"diff\"].astype(int)\n to_remove = yrs[yrs[\"diff\"] != yrs[\"count\"]]\n clean_d4 = util.join_exclude(clean_d3, to_remove)\n\n # Order the data frame to make sure that all of the years are in order.\n cleaned_data = (clean_d4.sort_values(by=['variable', 'experiment', 'ensemble', 'model', 'year'])\n .reset_index(drop=True))\n\n # Format Data\n #\n # In this section convert from absolute value to an anomaly & concatenate the historical data\n # with the future scenarios.\n data_anomaly = calculate_anomaly(cleaned_data)\n data = paste_historical_data(data_anomaly)\n data = data.sort_values(by=['variable', 'experiment', 'ensemble', 'model', 'year'])\n data = data[[\"variable\", \"experiment\", \"ensemble\", \"model\", \"year\", \"value\"]].reset_index(drop=True)\n\n # Add the z store values to the data frame.\n # Get the pangeo table of contents & assert that the data frame exists and contains information.\n df = pangeo.fetch_pangeo_table()\n df = df.loc[(df[\"table_id\"] == \"Amon\") & # monthly data\n (df[\"grid_label\"] == \"gn\") & # we are only interested in the results returned in the native\n (df[\"variable_id\"] == \"tas\") & # select temperature data\n (df['member_id'].str.contains('p1'))].copy() # select only the members of the p1 physics group\n\n if len(df) <= 0:\n raise Exception('Unable to connect to pangeo, make sure to disconnect from VP')\n\n # Format the pangeo data frame so that it reflects the contents of data.\n pangeo_df = df[[\"source_id\", \"experiment_id\", \"member_id\", \"variable_id\", \"zstore\"]].drop_duplicates()\n pangeo_df = pangeo_df.rename(columns={\"source_id\": \"model\", \"experiment_id\": \"experiment\",\n \"member_id\": \"ensemble\", \"variable_id\": \"variable\"})\n\n # Add the zstore file information to the data frame via a left join.\n data = data.merge(pangeo_df, on=['variable', 'experiment', 'ensemble', 'model'], how=\"inner\")\n\n # Modify the zstore path names to replace the future scn string with historical.\n # TODO replace this for loop it is pretty slow\n new_zstore = []\n for i in data.index:\n # Select the row from the data frame.\n row = data.loc[i]\n\n # Check to see if the zstore needs to be changed based on if it is a future experiment.\n fut_exps = set(['ssp119', 'ssp126', 'ssp245', 'ssp370', 'ssp434', 'ssp460', 'ssp534-over', 'ssp585'])\n change = row[\"experiment\"] in fut_exps\n if change:\n new = row[\"zstore\"].replace(row[\"experiment\"], \"historical\")\n else:\n new = row[\"zstore\"]\n\n new_zstore.append(new)\n\n data[\"zstore\"] = new_zstore\n\n # Save a copy of the tas values, these are the value that will be used to get the\n # tas data chunks. Note that this file has to be compressed so will need to read in\n # using pickle_utils.load()\n\n files = []\n tas_data_dir = pkg_resources.resource_filename('stitches', 'data/tas-data')\n os.mkdir(tas_data_dir)\n for name, group in data.groupby(['model']):\n path = tas_data_dir + '/' + name + '_tas.csv'\n files.append(path)\n group.to_csv(path, index=False)\n\n return files\n" ]
[ [ "pandas.DataFrame", "pandas.concat" ] ]
i2mint/meshed
[ "4201f9efcce4f2859ffc8253811ac9335f21856b" ]
[ "meshed/makers.py" ]
[ "\"\"\"Makers\"\"\"\n\nfrom contextlib import suppress\nfrom typing import Mapping, Iterable, TypeVar, Callable\nfrom itertools import product\nfrom collections import defaultdict\n\n\nT = TypeVar('T')\n\nwith suppress(ModuleNotFoundError, ImportError):\n from numpy.random import randint, choice\n\n def random_graph(n_nodes=7):\n \"\"\"Get a random graph\"\"\"\n nodes = range(n_nodes)\n\n def gen():\n for src in nodes:\n n_dst = randint(0, n_nodes - 1)\n dst = choice(n_nodes, n_dst, replace=False)\n yield src, list(dst)\n\n return dict(gen())\n\n\ndef edge_reversed_graph(\n g: Mapping[T, Iterable[T]],\n dst_nodes_factory: Callable[[], Iterable[T]] = list,\n dst_nodes_append: Callable[[Iterable[T], T], None] = list.append,\n) -> Mapping[T, Iterable[T]]:\n \"\"\"\n >>> g = dict(a='c', b='cd', c='abd', e='')\n >>> assert edge_reversed_graph(g) == {'c': ['a', 'b'], 'd': ['b', 'c'], 'a': ['c'], 'b': ['c'], 'e': []}\n >>> reverse_g_with_sets = edge_reversed_graph(g, set, set.add)\n >>> assert reverse_g_with_sets == {'c': {'a', 'b'}, 'd': {'b', 'c'}, 'a': {'c'}, 'b': {'c'}, 'e': set([])}\n\n Testing border cases\n >>> assert edge_reversed_graph(dict(e='', a='e')) == {'e': ['a'], 'a': []}\n >>> assert edge_reversed_graph(dict(a='e', e='')) == {'e': ['a'], 'a': []}\n \"\"\"\n # Pattern: Groupby logic\n\n d = defaultdict(dst_nodes_factory)\n for src, dst_nodes in g.items():\n d.setdefault(src, dst_nodes_factory()) # add node if not present\n for dst in dst_nodes: # empty iterable does nothing\n dst_nodes_append(d[dst], src)\n return d\n" ]
[ [ "numpy.random.randint", "numpy.random.choice" ] ]
dmitryvinn/SparseConvNet
[ "0bf2476b08e688fa53abf956e4e5232793dea64c" ]
[ "sparseconvnet/denseToSparse.py" ]
[ "# Copyright 2016-present, Facebook, Inc.\n# All rights reserved.\n#\n# This source code is licensed under the BSD-style license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom torch.autograd import Function\nfrom torch.nn import Module\nfrom .utils import *\nfrom .metadata import Metadata\nfrom .sparseConvNetTensor import SparseConvNetTensor\n\nclass DenseToSparse(Module):\n \"\"\"\n Function to convert a Dense Input into a sparse input.\n If possible, avoid using this module; build the hidden layer using InputBatch.\n\n Parameters:\n dimension : of the input field\n \"\"\"\n def __init__(self, dimension):\n Module.__init__(self)\n self.dimension = dimension\n\n def forward(self, input):\n output = SparseConvNetTensor()\n output.metadata = Metadata(self.dimension)\n output.spatial_size = torch.LongTensor(list(input.size()[2:]))\n output.features = DenseToSparseFunction.apply(\n input,\n output.metadata,\n output.spatial_size,\n self.dimension)\n return output\n\n def __repr__(self):\n return 'DenseToSparse(' + str(self.dimension) + ')'\n\n def input_spatial_size(self, out_size):\n return out_size\n\nclass DenseToSparseFunction(Function):\n @staticmethod\n def forward(\n ctx,\n input,\n output_metadata,\n output_spatial_size,\n dimension):\n ctx.dimension = dimension\n aa = input.permute(\n *([0, ] + list(range(2, 2 + dimension)) + [1, ])).clone()\n ctx.aas = aa.size()\n nz = aa.abs().sum(dimension + 1).view(aa.size()[0:-1])\n s = torch.LongTensor(nz.stride()).view(1, dimension + 1)\n nz = nz.nonzero()\n s = s.type_as(nz)\n aa = aa.reshape(-1, input.size(1))\n ctx.aas2 = aa.size()\n r = (nz * s.expand_as(nz)).sum(1).view(-1)\n output_features = aa.index_select(0, r)\n output_metadata.createMetadataForDenseToSparse(\n output_spatial_size,\n nz.cpu(),\n input.size(0))\n ctx.save_for_backward(output_features, r)\n return output_features\n\n @staticmethod\n def backward(ctx, grad_output):\n output_features, r = ctx.saved_tensors\n grad_input = grad_output.new().resize_(\n ctx.aas2).zero_().index_copy_(0, r, grad_output)\n grad_input = grad_input.view(ctx.aas).permute(\n *([0, ctx.dimension + 1] + list(range(1, ctx.dimension + 1))))\n return grad_input, None, None, None\n" ]
[ [ "torch.nn.Module.__init__" ] ]
amitmate/visualwakeword
[ "24412fc830b6f579156bb1106eeffa68e90b02d4" ]
[ "EvalTFLiteModel.py" ]
[ "#!/usr/bin/env python\r\n# coding: utf-8\r\n\r\n# In[ ]:\r\n\r\n\r\nimport numpy as np\r\nimport tensorflow as tf\r\n\r\ntf.enable_eager_execution()\r\n\r\ndef eval_model(interpreter, coco_ds):\r\n total_seen = 0\r\n num_correct = 0\r\n\r\n for img, label in coco_ds:\r\n total_seen += 1\r\n interpreter.set_tensor(input_index, img)\r\n interpreter.invoke()\r\n predictions = interpreter.get_tensor(output_index)\r\n predictions = (predictions > 0.5).astype(np.uint8)\r\n \r\n if predictions == label.numpy():\r\n num_correct += 1\r\n\r\n if total_seen % 500 == 0:\r\n print(\"Accuracy after %i images: %f\" %\r\n (total_seen, float(num_correct) / float(total_seen)))\r\n\r\n return float(num_correct) / float(total_seen)\r\n\r\n#function to print evaluation accuracy stats on x_test (batchsize,96, 96. 3) and y_test (batch size, 1)\r\ndef eval_data(x_test,y_test) :\r\n images, labels = tf.cast(x_test, tf.float32), y_test\r\n print(images.shape)\r\n print(labels.shape)\r\n coco_ds = tf.data.Dataset.from_tensor_slices((images, labels)).batch(1)\r\n\r\n\r\n interpreter = tf.lite.Interpreter(model_path=\"modelVisualWakeWord.tflite\")\r\n interpreter.allocate_tensors()\r\n input_index = interpreter.get_input_details()[0][\"index\"]\r\n output_index = interpreter.get_output_details()[0][\"index\"]\r\n eval_model(interpreter,coco_ds)\r\n\r\n" ]
[ [ "tensorflow.cast", "tensorflow.enable_eager_execution", "tensorflow.data.Dataset.from_tensor_slices", "tensorflow.lite.Interpreter" ] ]
yahu911/DMGCN2.0
[ "a0370dbbdaa756330dc6ff18b58e6f7fa44b3513" ]
[ "layer/gcn.py" ]
[ "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Oct 16\r\nKeras Implementation of Deep Multiple Graph Convolution Neural Network (DMGCN) model in:\r\nHu Yang, Wei Pan, Zhong Zhuang.\r\n@author: Hu Yang ([email protected])\r\n\"\"\"\r\n\r\nfrom keras.layers import Layer\r\nfrom keras import activations, initializers, constraints\r\nfrom keras import regularizers\r\nimport keras.backend as K\r\nimport tensorflow as tf\r\n\r\nclass GCN(Layer):\r\n\r\n def __init__(self, output_dim, graphs, \r\n activation=None,\r\n use_bias=True,\r\n kernel_initializer='glorot_uniform',\r\n bias_initializer='zeros',\r\n kernel_regularizer=None,\r\n bias_regularizer=None,\r\n activity_regularizer=None,\r\n kernel_constraint=None,\r\n bias_constraint=None, **kwargs):\r\n if 'input_shape' not in kwargs and 'input_dim' in kwargs:\r\n kwargs['input_shape'] = (kwargs.pop('input_dim'),)\r\n self.graphs = graphs\r\n self.output_dim = output_dim\r\n self.activation = activations.get(activation)\r\n self.use_bias = use_bias\r\n self.kernel_initializer = initializers.get(kernel_initializer)\r\n self.bias_initializer = initializers.get(bias_initializer)\r\n self.kernel_regularizer = regularizers.get(kernel_regularizer)\r\n self.bias_regularizer = regularizers.get(bias_regularizer)\r\n self.activity_regularizer = regularizers.get(activity_regularizer)\r\n self.kernel_constraint = constraints.get(kernel_constraint)\r\n self.bias_constraint = constraints.get(bias_constraint)\r\n self.supports_masking = True\r\n super(GCN, self).__init__(**kwargs)\r\n\r\n def build(self, input_shape):\r\n # Create a trainable weight variable for this layer.\r\n self.kernel = self.add_weight(name='kernel', \r\n shape=(input_shape[1], self.output_dim),\r\n initializer='uniform',\r\n trainable=True)\r\n if self.use_bias:\r\n self.bias = self.add_weight(shape=(self.output_dim,),\r\n initializer=self.bias_initializer,\r\n name='bias',\r\n regularizer=self.bias_regularizer,\r\n constraint=self.bias_constraint)\r\n else:\r\n self.bias = None\r\n super(GCN, self).build(input_shape) \r\n\r\n def call(self, x):\r\n xl = tf.matmul(tf.cast(x,tf.float32), tf.cast(self.graphs,tf.float32))\r\n xl = K.dot(xl, self.kernel)\r\n\r\n if self.bias:\r\n xl += self.bias\r\n\r\n return self.activation(xl)\r\n\r\n def compute_output_shape(self, input_shape):\r\n return (input_shape[0], self.output_dim)" ]
[ [ "tensorflow.cast" ] ]
moliushang/wireframe_
[ "57dd774e20740af9aadd7151d64b40cc915abb5c" ]
[ "linepx/datasets/transforms.py" ]
[ "import math\nimport numpy as np\nimport torch\nimport random\n\n# ipt is nparray with dimension (height, width, channel)\n# xml is nparray with dimension (height, width)\n\ndef addNoise(ipt, miu, std):\n noise = np.random.normal(miu, std, ipt.shape)\n noise = np.float32(noise)\n return ipt + noise\n\n\ndef thAddNoise(ipt, miu, std):\n noise = np.random.normal(miu, std, ipt.size())\n noise = torch.from_numpy(np.float32(noise))\n return ipt + noise\n\ndef scaleRGB(ipt):\n return np.float32(ipt/255)\n\ndef unScaleRGB(ipt):\n opt = ipt*255\n opt = opt.astype(np.uint8)\n return opt\n\ndef normalize(ipt, mean, std):\n ipt[:][:][0] = (ipt[:][:][0] - mean[0]) / std[0]\n ipt[:][:][1] = (ipt[:][:][1] - mean[1]) / std[1]\n ipt[:][:][2] = (ipt[:][:][2] - mean[2]) / std[2]\n return ipt\n\ndef unNormalize(ipt, mean, std):\n ipt[:][:][0] = (ipt[:][:][0] * std[0]) + mean[0]\n ipt[:][:][1] = (ipt[:][:][1] * std[1]) + mean[1]\n ipt[:][:][2] = (ipt[:][:][2] * std[2]) + mean[2]\n return ipt\n\ndef randomFlip(ipt, xml):\n if random.uniform(0, 1) > 0.5:\n ipt = np.fliplr(ipt).copy()\n xml = np.fliplr(xml).copy()\n return ipt, xml\n\ndef randomCrop(ipt, xml, size):\n origH = ipt.shape[0]\n origW = ipt.shape[1]\n newH = size[0]\n newW = size[1]\n startH = random.randint(0, origH - newH)\n startW = random.randint(0, origW - newW)\n ipt = ipt[startH : startH+newH, startW : startW+newW, :]\n xml = xml[startH : startH+newH, startW : startW+newW]\n return ipt, xml\n\ndef randomSizeCrop(ipt, xml, LowBound):\n newH = math.floor(random.uniform(LowBound, 1)*ipt.shape[0])\n while newH%8 != 0:\n newH -= 1\n newW = math.floor(random.uniform(LowBound, 1)*ipt.shape[1])\n while newW%8 != 0:\n newW -= 1\n return randomCrop(ipt, xml, (newH, newW))\n" ]
[ [ "numpy.random.normal", "numpy.fliplr", "numpy.float32" ] ]
baderex/AIArtathon
[ "e72c7ef73bbc2eb0eaf9cc906e34d801cdd13d15" ]
[ "src/projector.py" ]
[ "# Copyright (c) 2019, NVIDIA Corporation. All rights reserved.\n#\n# This work is made available under the Nvidia Source Code License-NC.\n# To view a copy of this license, visit\n# https://nvlabs.github.io/stylegan2/license.html\n\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL']='2'\nimport numpy as np\nimport tensorflow as tf\nimport dnnlib\nimport dnnlib.tflib as tflib\n\nfrom training import misc\n\n#----------------------------------------------------------------------------\n\nclass Projector:\n def __init__(self, steps=1000):\n self.num_steps = steps\n self.dlatent_avg_samples = 10000\n self.initial_learning_rate = 0.1\n self.initial_noise_factor = 0.05\n self.lr_rampdown_length = 0.25\n self.lr_rampup_length = 0.05\n self.noise_ramp_length = 0.75\n self.regularize_noise_weight = 1e5\n self.verbose = False\n self.clone_net = True\n\n self._Gs = None\n self._minibatch_size = None\n self._dlatent_avg = None\n self._dlatent_std = None\n self._noise_vars = None\n self._noise_init_op = None\n self._noise_normalize_op = None\n self._dlatents_var = None\n self._noise_in = None\n self._dlatents_expr = None\n self._images_expr = None\n self._target_images_var = None\n self._lpips = None\n self._dist = None\n self._loss = None\n self._reg_sizes = None\n self._lrate_in = None\n self._opt = None\n self._opt_step = None\n self._cur_step = None\n\n def _info(self, *args):\n if self.verbose:\n print('Projector:', *args)\n\n def set_network(self, Gs, minibatch_size=1):\n assert minibatch_size == 1\n self._Gs = Gs\n self._minibatch_size = minibatch_size\n if self._Gs is None:\n return\n if self.clone_net:\n self._Gs = self._Gs.clone()\n\n # Find dlatent stats.\n self._info('Finding W midpoint and stddev using %d samples...' % self.dlatent_avg_samples)\n latent_samples = np.random.RandomState(123).randn(self.dlatent_avg_samples, *self._Gs.input_shapes[0][1:])\n dlatent_samples = self._Gs.components.mapping.run(latent_samples, None) # [N, 1, 512]\n self._dlatent_avg = np.mean(dlatent_samples, axis=0, keepdims=True) # [1, 1, 512]\n self._dlatent_std = (np.sum((dlatent_samples - self._dlatent_avg) ** 2) / self.dlatent_avg_samples) ** 0.5\n self._info('std = %g' % self._dlatent_std)\n\n # Find noise inputs.\n self._info('Setting up noise inputs...')\n self._noise_vars = []\n noise_init_ops = []\n noise_normalize_ops = []\n while True:\n n = 'G_synthesis/noise%d' % len(self._noise_vars)\n if not n in self._Gs.vars:\n break\n v = self._Gs.vars[n]\n self._noise_vars.append(v)\n noise_init_ops.append(tf.assign(v, tf.random_normal(tf.shape(v), dtype=tf.float32)))\n noise_mean = tf.reduce_mean(v)\n noise_std = tf.reduce_mean((v - noise_mean)**2)**0.5\n noise_normalize_ops.append(tf.assign(v, (v - noise_mean) / noise_std))\n self._info(n, v)\n self._noise_init_op = tf.group(*noise_init_ops)\n self._noise_normalize_op = tf.group(*noise_normalize_ops)\n\n # Image output graph.\n self._info('Building image output graph...')\n self._dlatents_var = tf.Variable(tf.zeros([self._minibatch_size] + list(self._dlatent_avg.shape[1:])), name='dlatents_var')\n self._noise_in = tf.placeholder(tf.float32, [], name='noise_in')\n dlatents_noise = tf.random.normal(shape=self._dlatents_var.shape) * self._noise_in\n # self._dlatents_expr = tf.tile(self._dlatents_var + dlatents_noise, [1, self._Gs.components.synthesis.input_shape[1], 1])\n self._dlatents_expr = self._dlatents_var + dlatents_noise\n self._images_expr = self._Gs.components.synthesis.get_output_for(self._dlatents_expr, randomize_noise=False)\n\n # Downsample image to 256x256 if it's larger than that. VGG was built for 224x224 images.\n proc_images_expr = (self._images_expr[:,:3,:,:] + 1) * (255 / 2) # go uint range, fix to rgb colospace\n sh = proc_images_expr.shape.as_list()\n if sh[2] > 256:\n factor = sh[2] // 256\n proc_images_expr = tf.reduce_mean(tf.reshape(proc_images_expr, [-1, sh[1], sh[2] // factor, factor, sh[2] // factor, factor]), axis=[3,5])\n\n # Loss graph.\n self._info('Building loss graph...')\n self._target_images_var = tf.Variable(tf.zeros(proc_images_expr.shape), name='target_images_var')\n if self._lpips is None:\n vgg_file = 'models/vgg/vgg16_zhang_perceptual.pkl'\n if os.path.isfile(vgg_file) and os.stat(vgg_file).st_size == 58871973:\n self._lpips = misc.load_pkl(vgg_file)\n else:\n self._lpips = misc.load_pkl('https://drive.google.com/uc?id=1N2-m9qszOeVC9Tq77WxsLnuWwOedQiD2')\n \n self._dist = self._lpips.get_output_for(proc_images_expr, self._target_images_var)\n self._loss = tf.reduce_sum(self._dist)\n\n # Noise regularization graph.\n self._info('Building noise regularization graph...')\n reg_loss = 0.0\n for v in self._noise_vars:\n sz = v.shape[2]\n while True:\n reg_loss += tf.reduce_mean(v * tf.roll(v, shift=1, axis=3))**2 + tf.reduce_mean(v * tf.roll(v, shift=1, axis=2))**2\n if sz <= 8:\n break # Small enough already\n v = tf.reshape(v, [1, 1, sz//2, 2, sz//2, 2]) # Downscale\n v = tf.reduce_mean(v, axis=[3, 5])\n sz = sz // 2\n self._loss += reg_loss * self.regularize_noise_weight\n\n # Optimizer.\n self._info('Setting up optimizer...')\n self._lrate_in = tf.placeholder(tf.float32, [], name='lrate_in')\n self._opt = dnnlib.tflib.Optimizer(learning_rate=self._lrate_in)\n self._opt.register_gradients(self._loss, [self._dlatents_var] + self._noise_vars)\n self._opt_step = self._opt.apply_updates()\n\n def run(self, target_images):\n # Run to completion.\n self.start(target_images)\n while self._cur_step < self.num_steps:\n self.step()\n\n # Collect results.\n pres = dnnlib.EasyDict()\n pres.dlatents = self.get_dlatents()\n pres.noises = self.get_noises()\n pres.images = self.get_images()\n return pres\n\n def start(self, target_images):\n assert self._Gs is not None\n\n # Prepare target images.\n self._info('Preparing target images...')\n target_images = np.asarray(target_images, dtype='float32')\n target_images = (target_images + 1) * (255 / 2)\n sh = target_images.shape\n assert sh[0] == self._minibatch_size\n if sh[2] > self._target_images_var.shape[2]:\n factor = sh[2] // self._target_images_var.shape[2]\n target_images = np.reshape(target_images, [-1, sh[1], sh[2] // factor, factor, sh[3] // factor, factor]).mean((3, 5))\n\n # Initialize optimization state.\n self._info('Initializing optimization state...')\n tflib.set_vars({self._target_images_var: target_images, self._dlatents_var: np.tile(self._dlatent_avg, [self._minibatch_size, 1, 1])})\n tflib.run(self._noise_init_op)\n self._opt.reset_optimizer_state()\n self._cur_step = 0\n\n def step(self):\n assert self._cur_step is not None\n if self._cur_step >= self.num_steps:\n return\n if self._cur_step == 0:\n self._info('Running...')\n\n # Hyperparameters.\n t = self._cur_step / self.num_steps\n noise_strength = self._dlatent_std * self.initial_noise_factor * max(0.0, 1.0 - t / self.noise_ramp_length) ** 2\n lr_ramp = min(1.0, (1.0 - t) / self.lr_rampdown_length)\n lr_ramp = 0.5 - 0.5 * np.cos(lr_ramp * np.pi)\n lr_ramp = lr_ramp * min(1.0, t / self.lr_rampup_length)\n learning_rate = self.initial_learning_rate * lr_ramp\n\n # Train.\n feed_dict = {self._noise_in: noise_strength, self._lrate_in: learning_rate}\n _, dist_value, loss_value = tflib.run([self._opt_step, self._dist, self._loss], feed_dict)\n tflib.run(self._noise_normalize_op)\n\n # Print status.\n self._cur_step += 1\n if self._cur_step == self.num_steps or self._cur_step % 10 == 0:\n self._info('%-8d%-12g%-12g' % (self._cur_step, dist_value, loss_value))\n if self._cur_step == self.num_steps:\n self._info('Done.')\n\n def get_cur_step(self):\n return self._cur_step\n\n def get_dlatents(self):\n return tflib.run(self._dlatents_expr, {self._noise_in: 0})\n\n def get_noises(self):\n return tflib.run(self._noise_vars)\n\n def get_images(self):\n return tflib.run(self._images_expr, {self._noise_in: 0})\n\n#----------------------------------------------------------------------------\n" ]
[ [ "numpy.sum", "tensorflow.placeholder", "tensorflow.zeros", "numpy.tile", "tensorflow.shape", "tensorflow.reshape", "tensorflow.roll", "numpy.reshape", "tensorflow.reduce_mean", "numpy.asarray", "numpy.cos", "numpy.random.RandomState", "tensorflow.assign", "tensorflow.random.normal", "tensorflow.group", "tensorflow.reduce_sum", "numpy.mean" ] ]
tmtmaj/Exploiting-PrLM-for-NLG-tasks
[ "e8752593d3ee881cf9c0fb5ed26d26fcb02e6dd5", "e8752593d3ee881cf9c0fb5ed26d26fcb02e6dd5" ]
[ "fairseq/models/bart/hub_interface.py", "fairseq/data/denoising_dataset.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates.\r\n#\r\n# This source code is licensed under the MIT license found in the\r\n# LICENSE file in the root directory of this source tree.\r\n\r\nimport copy\r\nimport logging\r\n\r\nimport numpy as np\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\n\r\nfrom typing import List\r\n\r\nfrom fairseq import utils\r\nfrom fairseq.data import encoders\r\n\r\n\r\nlogger = logging.getLogger(__name__)\r\n\r\n\r\nclass BARTHubInterface(nn.Module):\r\n \"\"\"A simple PyTorch Hub interface to BART.\r\n\r\n Usage: https://github.com/pytorch/fairseq/tree/master/examples/BART\r\n \"\"\"\r\n\r\n def __init__(self, args, task, model):\r\n super().__init__()\r\n self.args = args\r\n self.task = task\r\n self.model = model\r\n\r\n self.bpe = encoders.build_bpe(args)\r\n\r\n self.max_positions = min(utils.resolve_max_positions(\r\n self.task.max_positions(),\r\n self.model.max_positions(),\r\n ))\r\n\r\n # this is useful for determining the device\r\n self.register_buffer('_float_tensor', torch.tensor([0], dtype=torch.float))\r\n\r\n @property\r\n def device(self):\r\n return self._float_tensor.device\r\n\r\n def encode(self, sentence: str, *addl_sentences, no_separator=True) -> torch.LongTensor:\r\n \"\"\"\r\n BPE-encode a sentence (or multiple sentences).\r\n\r\n Every sequence begins with a beginning-of-sentence (`<s>`) symbol.\r\n Every sentence ends with an end-of-sentence (`</s>`).\r\n\r\n Example (single sentence): `<s> a b c </s>`\r\n Example (sentence pair): `<s> d e f </s> 1 2 3 </s>`\r\n\r\n The BPE encoding follows GPT-2. One subtle detail is that the GPT-2 BPE\r\n requires leading spaces. For example::\r\n\r\n >>> bart.encode('Hello world').tolist()\r\n [0, 31414, 232, 2]\r\n >>> bart.encode(' world').tolist()\r\n [0, 232, 2]\r\n >>> bart.encode('world').tolist()\r\n [0, 8331, 2]\r\n \"\"\"\r\n tokens = self.bpe.encode(sentence)\r\n if len(tokens.split(' ')) > self.max_positions - 2:\r\n tokens = ' '.join(tokens.split(' ')[:self.max_positions - 2])\r\n bpe_sentence = '<s> ' + tokens + ' </s>'\r\n for s in addl_sentences:\r\n bpe_sentence += (' </s>' if not no_separator else '')\r\n bpe_sentence += ' ' + self.bpe.encode(s) + ' </s>'\r\n tokens = self.task.source_dictionary.encode_line(bpe_sentence, append_eos=False)\r\n return tokens.long()\r\n\r\n def decode(self, tokens: torch.LongTensor):\r\n assert tokens.dim() == 1\r\n tokens = tokens.cpu().numpy()\r\n if tokens[0] == self.task.source_dictionary.bos():\r\n tokens = tokens[1:] # remove <s>\r\n eos_mask = (tokens == self.task.source_dictionary.eos())\r\n doc_mask = eos_mask[1:] & eos_mask[:-1]\r\n sentences = np.split(tokens, doc_mask.nonzero()[0] + 1)\r\n sentences = [self.bpe.decode(self.task.source_dictionary.string(s)) for s in sentences]\r\n if len(sentences) == 1:\r\n return sentences[0]\r\n return sentences\r\n\r\n def _build_sample(self, src_tokens: List[torch.LongTensor]):\r\n # assert torch.is_tensor(src_tokens)\r\n dataset = self.task.build_dataset_for_inference(\r\n src_tokens,\r\n [x.numel() for x in src_tokens],\r\n )\r\n sample = dataset.collater(dataset)\r\n sample = utils.apply_to_sample(\r\n lambda tensor: tensor.to(self.device),\r\n sample\r\n )\r\n return sample\r\n\r\n def sample(self, sentences: List[str], beam: int = 1, verbose: bool = False, **kwargs) -> str:\r\n input = [self.encode(sentence) for sentence in sentences]\r\n hypos = self.generate(input, beam, verbose, **kwargs)\r\n return [self.decode(x['tokens']) for x in hypos]\r\n\r\n def generate(self, tokens: List[torch.LongTensor], beam: int = 5, verbose: bool = False, **kwargs) -> torch.LongTensor:\r\n sample = self._build_sample(tokens)\r\n\r\n # build generator using current args as well as any kwargs\r\n gen_args = copy.copy(self.args)\r\n gen_args.beam = beam\r\n for k, v in kwargs.items():\r\n setattr(gen_args, k, v)\r\n generator = self.task.build_generator([self.model], gen_args)\r\n translations = self.task.inference_step(\r\n generator,\r\n [self.model],\r\n sample,\r\n prefix_tokens=sample['net_input']['src_tokens'].new_zeros((len(tokens), 1)).fill_(self.task.source_dictionary.bos()),\r\n )\r\n\r\n if verbose:\r\n src_str_with_unk = self.string(tokens)\r\n logger.info('S\\t{}'.format(src_str_with_unk))\r\n\r\n def getarg(name, default):\r\n return getattr(gen_args, name, getattr(self.args, name, default))\r\n\r\n # Process top predictions\r\n hypos = [x[0] for x in translations]\r\n hypos = [v for _, v in sorted(zip(sample['id'].tolist(), hypos))]\r\n return hypos\r\n\r\n def extract_features(self, tokens: torch.LongTensor, return_all_hiddens: bool = False) -> torch.Tensor:\r\n if tokens.dim() == 1:\r\n tokens = tokens.unsqueeze(0)\r\n if tokens.size(-1) > min(self.model.max_positions()):\r\n raise ValueError('tokens exceeds maximum length: {} > {}'.format(\r\n tokens.size(-1), self.model.max_positions()\r\n ))\r\n tokens.to(device=self.device),\r\n prev_output_tokens = tokens.clone()\r\n\r\n prev_output_tokens[:, 0] = tokens.gather(\r\n 1,\r\n (tokens.ne(self.task.source_dictionary.pad()).sum(dim=1)- 1).unsqueeze(-1),\r\n ).squeeze()\r\n\r\n prev_output_tokens[:, 1:] = tokens[:, :-1]\r\n features, extra = self.model(\r\n src_tokens=tokens,\r\n src_lengths=None,\r\n prev_output_tokens=prev_output_tokens,\r\n features_only=True,\r\n return_all_hiddens=return_all_hiddens,\r\n )\r\n if return_all_hiddens:\r\n # convert from T x B x C -> B x T x C\r\n inner_states = extra['inner_states']\r\n return [inner_state.transpose(0, 1) for inner_state in inner_states]\r\n else:\r\n return features # just the last layer's features\r\n\r\n def register_classification_head(\r\n self, name: str, num_classes: int = None, embedding_size: int = None, **kwargs\r\n ):\r\n self.model.register_classification_head(\r\n name, num_classes=num_classes, embedding_size=embedding_size, **kwargs\r\n )\r\n\r\n def predict(self, head: str, tokens: torch.LongTensor, return_logits: bool = False):\r\n if tokens.dim() == 1:\r\n tokens = tokens.unsqueeze(0)\r\n features = self.extract_features(tokens.to(device=self.device))\r\n sentence_representation = features[\r\n tokens.eq(self.task.source_dictionary.eos()), :\r\n ].view(features.size(0), -1, features.size(-1))[:, -1, :]\r\n\r\n logits = self.model.classification_heads[head](sentence_representation)\r\n if return_logits:\r\n return logits\r\n return F.log_softmax(logits, dim=-1)\r\n", "# Copyright (c) Facebook, Inc. and its affiliates.\r\n#\r\n# This source code is licensed under the MIT license found in the\r\n# LICENSE file in the root directory of this source tree.\r\n\r\nimport numpy as np\r\nimport torch\r\nimport math\r\n\r\nfrom . import data_utils, FairseqDataset\r\n\r\n\r\ndef collate(\r\n samples,\r\n pad_idx,\r\n eos_idx,\r\n vocab,\r\n left_pad_source=False,\r\n left_pad_target=False,\r\n input_feeding=True,\r\n):\r\n assert input_feeding\r\n if len(samples) == 0:\r\n return {}\r\n\r\n def merge(key, left_pad, move_eos_to_beginning=False):\r\n return data_utils.collate_tokens(\r\n [s[key] for s in samples],\r\n pad_idx,\r\n eos_idx=None, # use eos_idx of each sample instead of vocab.eos()\r\n left_pad=left_pad,\r\n move_eos_to_beginning=move_eos_to_beginning,\r\n )\r\n\r\n id = torch.LongTensor([s['id'] for s in samples])\r\n src_tokens = merge('source', left_pad=left_pad_source)\r\n # sort by descending source length\r\n src_lengths = torch.LongTensor([s['source'].numel() for s in samples])\r\n src_lengths, sort_order = src_lengths.sort(descending=True)\r\n id = id.index_select(0, sort_order)\r\n src_tokens = src_tokens.index_select(0, sort_order)\r\n\r\n prev_output_tokens = None\r\n target = None\r\n if samples[0].get('target', None) is not None:\r\n target = merge('target', left_pad=left_pad_target)\r\n target = target.index_select(0, sort_order)\r\n ntokens = sum(len(s['target']) for s in samples)\r\n\r\n if input_feeding:\r\n # we create a shifted version of targets for feeding the\r\n # previous output token(s) into the next decoder step\r\n prev_output_tokens = merge(\r\n 'target',\r\n left_pad=left_pad_target,\r\n move_eos_to_beginning=True,\r\n )\r\n prev_output_tokens = prev_output_tokens.index_select(0, sort_order)\r\n else:\r\n ntokens = sum(len(s['source']) for s in samples)\r\n\r\n batch = {\r\n 'id': id,\r\n 'ntokens': ntokens,\r\n 'net_input': {\r\n 'src_tokens': src_tokens,\r\n 'src_lengths': src_lengths,\r\n },\r\n 'target': target,\r\n 'nsentences': samples[0]['source'].size(0),\r\n }\r\n if prev_output_tokens is not None:\r\n batch['net_input']['prev_output_tokens'] = prev_output_tokens\r\n\r\n return batch\r\n\r\n\r\nclass DenoisingDataset(FairseqDataset):\r\n \"\"\"\r\n A wrapper around TokenBlockDataset for BART dataset.\r\n\r\n Args:\r\n dataset (TokenBlockDataset): dataset to wrap\r\n sizes (List[int]): sentence lengths\r\n vocab (~fairseq.data.Dictionary): vocabulary\r\n mask_idx (int): dictionary index used for masked token\r\n mask_whole_words: only mask whole words. This should be a byte mask\r\n over vocab indices, indicating whether it is the beginning of a\r\n word. We will extend any mask to encompass the whole word.\r\n shuffle (bool, optional): shuffle the elements before batching.\r\n Default: ``True``\r\n seed: Seed for random number generator for reproducibility.\r\n args: argparse arguments.\r\n \"\"\"\r\n\r\n def __init__(\r\n self,\r\n dataset,\r\n sizes,\r\n vocab,\r\n mask_idx,\r\n mask_whole_words,\r\n shuffle,\r\n seed,\r\n args,\r\n eos=None\r\n ):\r\n self.dataset = dataset\r\n\r\n self.sizes = sizes\r\n\r\n self.vocab = vocab\r\n self.shuffle = shuffle\r\n self.seed = seed\r\n self.mask_idx = mask_idx\r\n self.mask_whole_word = mask_whole_words\r\n self.mask_ratio = args.mask\r\n self.random_ratio = args.mask_random\r\n self.insert_ratio = args.insert\r\n self.rotate_ratio = args.rotate\r\n self.permute_sentence_ratio = args.permute_sentences\r\n self.eos = (eos if eos is not None else vocab.eos())\r\n\r\n if args.bpe != 'gpt2':\r\n self.full_stop_index = self.vocab.eos()\r\n else:\r\n assert args.bpe == 'gpt2'\r\n self.full_stop_index = self.vocab.index('13')\r\n\r\n self.replace_length = args.replace_length\r\n if not self.replace_length in [-1, 0, 1]:\r\n raise ValueError(f'invalid arg: replace_length={self.replace_length}')\r\n if not args.mask_length in ['subword', 'word', 'span-poisson']:\r\n raise ValueError(f'invalid arg: mask-length={args.mask_length}')\r\n if args.mask_length == 'subword' and not args.replace_length in [0, 1]:\r\n raise ValueError(f'if using subwords, use replace-length=1 or 0')\r\n\r\n self.mask_span_distribution = None\r\n if args.mask_length == 'span-poisson':\r\n _lambda = args.poisson_lambda\r\n\r\n lambda_to_the_k = 1\r\n e_to_the_minus_lambda = math.exp(-_lambda)\r\n k_factorial = 1\r\n ps = []\r\n for k in range(0, 128):\r\n ps.append(e_to_the_minus_lambda * lambda_to_the_k / k_factorial)\r\n lambda_to_the_k *= _lambda\r\n k_factorial *= (k + 1)\r\n if ps[-1] < 0.0000001:\r\n break\r\n ps = torch.FloatTensor(ps)\r\n self.mask_span_distribution = torch.distributions.Categorical(ps)\r\n\r\n self.epoch = 0\r\n\r\n def set_epoch(self, epoch, **unused):\r\n self.epoch = epoch\r\n\r\n def __getitem__(self, index):\r\n with data_utils.numpy_seed(self.seed, self.epoch, index):\r\n tokens = self.dataset[index]\r\n assert tokens[-1] == self.eos\r\n source, target = tokens, tokens.clone()\r\n\r\n if self.permute_sentence_ratio > 0.0:\r\n source = self.permute_sentences(source, self.permute_sentence_ratio)\r\n\r\n if self.mask_ratio > 0:\r\n source = self.add_whole_word_mask(source, self.mask_ratio)\r\n\r\n if self.insert_ratio > 0:\r\n source = self.add_insertion_noise(source, self.insert_ratio)\r\n\r\n if self.rotate_ratio > 0.0 and np.random.random() < self.rotate_ratio:\r\n source = self.add_rolling_noise(source)\r\n\r\n assert (source >= 0).all()\r\n assert (source[1:-1] >= 1).all()\r\n assert (source <= len(self.vocab)).all()\r\n assert source[0] == self.vocab.bos()\r\n assert source[-1] == self.eos\r\n return {\r\n 'id': index,\r\n 'source': source,\r\n 'target': target,\r\n }\r\n\r\n def __len__(self):\r\n return len(self.dataset)\r\n\r\n def permute_sentences(self, source, p=1.0):\r\n full_stops = (source == self.full_stop_index)\r\n # Pretend it ends with a full stop so last span is a sentence\r\n full_stops[-2] = 1\r\n\r\n # Tokens that are full stops, where the previous token is not\r\n sentence_ends = (full_stops[1:] * ~full_stops[:-1]).nonzero() + 2\r\n result = source.clone()\r\n\r\n num_sentences = sentence_ends.size(0)\r\n num_to_permute = math.ceil((num_sentences * 2 * p) / 2.0)\r\n substitutions = torch.randperm(num_sentences)[:num_to_permute]\r\n ordering = torch.arange(0, num_sentences)\r\n ordering[substitutions] = substitutions[torch.randperm(num_to_permute)]\r\n\r\n # Ignore <bos> at start\r\n index = 1\r\n for i in ordering:\r\n sentence = source[(sentence_ends[i - 1] if i > 0 else 1):sentence_ends[i]]\r\n result[index:index + sentence.size(0)] = sentence\r\n index += sentence.size(0)\r\n return result\r\n\r\n def word_starts(self, source):\r\n if self.mask_whole_word is not None:\r\n is_word_start = self.mask_whole_word.gather(0, source)\r\n else:\r\n is_word_start = torch.ones(source.size())\r\n is_word_start[0] = 0\r\n is_word_start[-1] = 0\r\n return is_word_start\r\n\r\n def add_whole_word_mask(self, source, p):\r\n is_word_start = self.word_starts(source)\r\n num_to_mask = int(math.ceil(is_word_start.float().sum() * p))\r\n num_inserts = 0\r\n if num_to_mask == 0:\r\n return source\r\n\r\n if self.mask_span_distribution is not None:\r\n lengths = self.mask_span_distribution.sample(sample_shape=(num_to_mask,))\r\n\r\n # Make sure we have enough to mask\r\n cum_length = torch.cumsum(lengths, 0)\r\n while cum_length[-1] < num_to_mask:\r\n lengths = torch.cat([lengths, self.mask_span_distribution.sample(sample_shape=(num_to_mask,))], dim=0)\r\n cum_length = torch.cumsum(lengths, 0)\r\n\r\n # Trim to masking budget\r\n i = 0\r\n while cum_length[i] < num_to_mask:\r\n i += 1\r\n lengths[i] = num_to_mask - (0 if i == 0 else cum_length[i - 1])\r\n num_to_mask = i + 1\r\n lengths = lengths[:num_to_mask]\r\n\r\n # Handle 0-length mask (inserts) separately\r\n lengths = lengths[lengths > 0]\r\n num_inserts = num_to_mask - lengths.size(0)\r\n num_to_mask -= num_inserts\r\n if num_to_mask == 0:\r\n return self.add_insertion_noise(source, num_inserts / source.size(0))\r\n\r\n assert (lengths > 0).all()\r\n else:\r\n lengths = torch.ones((num_to_mask,)).long()\r\n assert is_word_start[-1] == 0\r\n word_starts = is_word_start.nonzero()\r\n indices = word_starts[torch.randperm(word_starts.size(0))[:num_to_mask]].squeeze(1)\r\n mask_random = torch.FloatTensor(num_to_mask).uniform_() < self.random_ratio\r\n\r\n source_length = source.size(0)\r\n assert source_length - 1 not in indices\r\n to_keep = torch.ones(source_length, dtype=torch.bool)\r\n is_word_start[-1] = 255 # acts as a long length, so spans don't go over the end of doc\r\n if self.replace_length == 0:\r\n to_keep[indices] = 0\r\n else:\r\n # keep index, but replace it with [MASK]\r\n source[indices] = self.mask_idx\r\n source[indices[mask_random]] = torch.randint(1, len(self.vocab), size=(mask_random.sum(),))\r\n\r\n if self.mask_span_distribution is not None:\r\n assert len(lengths.size()) == 1\r\n assert lengths.size() == indices.size()\r\n lengths -= 1\r\n while indices.size(0) > 0:\r\n assert lengths.size() == indices.size()\r\n lengths -= is_word_start[indices + 1].long()\r\n uncompleted = lengths >= 0\r\n indices = indices[uncompleted] + 1\r\n mask_random = mask_random[uncompleted]\r\n lengths = lengths[uncompleted]\r\n if self.replace_length != -1:\r\n # delete token\r\n to_keep[indices] = 0\r\n else:\r\n # keep index, but replace it with [MASK]\r\n source[indices] = self.mask_idx\r\n source[indices[mask_random]] = torch.randint(1, len(self.vocab), size=(mask_random.sum(),))\r\n else:\r\n # A bit faster when all lengths are 1\r\n while indices.size(0) > 0:\r\n uncompleted = is_word_start[indices + 1] == 0\r\n indices = indices[uncompleted] + 1\r\n mask_random = mask_random[uncompleted]\r\n if self.replace_length != -1:\r\n # delete token\r\n to_keep[indices] = 0\r\n else:\r\n # keep index, but replace it with [MASK]\r\n source[indices] = self.mask_idx\r\n source[indices[mask_random]] = torch.randint(1, len(self.vocab), size=(mask_random.sum(),))\r\n\r\n assert source_length - 1 not in indices\r\n\r\n source = source[to_keep]\r\n\r\n if num_inserts > 0:\r\n source = self.add_insertion_noise(source, num_inserts / source.size(0))\r\n\r\n return source\r\n\r\n def add_permuted_noise(self, tokens, p):\r\n num_words = len(tokens)\r\n num_to_permute = math.ceil(((num_words * 2) * p) / 2.0)\r\n substitutions = torch.randperm(num_words - 2)[:num_to_permute] + 1\r\n tokens[substitutions] = tokens[substitutions[torch.randperm(num_to_permute)]]\r\n return tokens\r\n\r\n def add_rolling_noise(self, tokens):\r\n offset = np.random.randint(1, max(1, tokens.size(-1) - 1) + 1)\r\n tokens = torch.cat(\r\n (tokens[0:1], tokens[offset:-1], tokens[1:offset], tokens[-1:]),\r\n dim=0,\r\n )\r\n return tokens\r\n\r\n def add_insertion_noise(self, tokens, p):\r\n if p == 0.0:\r\n return tokens\r\n\r\n num_tokens = len(tokens)\r\n n = int(math.ceil(num_tokens * p))\r\n\r\n noise_indices = torch.randperm(num_tokens + n - 2)[:n] + 1\r\n noise_mask = torch.zeros(size=(num_tokens + n,), dtype=torch.bool)\r\n noise_mask[noise_indices] = 1\r\n result = torch.LongTensor(n + len(tokens)).fill_(-1)\r\n\r\n num_random = int(math.ceil(n * self.random_ratio))\r\n result[noise_indices[num_random:]] = self.mask_idx\r\n result[noise_indices[:num_random]] = torch.randint(low=1, high=len(self.vocab), size=(num_random,))\r\n\r\n result[~noise_mask] = tokens\r\n\r\n assert (result >= 0).all()\r\n return result\r\n\r\n def collater(self, samples):\r\n \"\"\"Merge a list of samples to form a mini-batch.\r\n Args:\r\n samples (List[dict]): samples to collate\r\n Returns:\r\n dict: a mini-batch of data\r\n \"\"\"\r\n return collate(samples, self.vocab.pad(), self.eos, self.vocab)\r\n\r\n def num_tokens(self, index):\r\n \"\"\"Return the number of tokens in a sample. This value is used to\r\n enforce ``--max-tokens`` during batching.\"\"\"\r\n return self.sizes[index]\r\n\r\n def size(self, index):\r\n \"\"\"Return an example's size as a float or tuple. This value is used when\r\n filtering a dataset with ``--max-positions``.\"\"\"\r\n return self.sizes[index]\r\n\r\n def ordered_indices(self):\r\n \"\"\"Return an ordered list of indices. Batches will be constructed based\r\n on this order.\"\"\"\r\n if self.shuffle:\r\n indices = np.random.permutation(len(self))\r\n else:\r\n indices = np.arange(len(self))\r\n return indices[np.argsort(self.sizes[indices], kind='mergesort')]\r\n\r\n def prefetch(self, indices):\r\n self.src.prefetch(indices)\r\n self.tgt.prefetch(indices)\r\n\r\n @property\r\n def supports_prefetch(self):\r\n return (\r\n hasattr(self.src, 'supports_prefetch')\r\n and self.src.supports_prefetch\r\n and hasattr(self.tgt, 'supports_prefetch')\r\n and self.tgt.supports_prefetch\r\n )\r\n" ]
[ [ "torch.nn.functional.log_softmax", "torch.tensor" ], [ "torch.ones", "torch.FloatTensor", "torch.distributions.Categorical", "numpy.argsort", "numpy.random.random", "torch.cumsum", "torch.arange", "torch.randperm", "torch.zeros", "torch.LongTensor", "torch.cat" ] ]
lighthall-lab/nipype-legacy
[ "6c23846aa50c2ce34653f9517d95f02b071dc52d" ]
[ "nipype/pipeline/engine/tests/test_utils.py" ]
[ "# -*- coding: utf-8 -*-\n# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n\"\"\"Tests for the engine utils module\n\"\"\"\nfrom __future__ import print_function, division, unicode_literals, absolute_import\nfrom builtins import range, open\n\nimport os\nfrom copy import deepcopy\nfrom shutil import rmtree\nimport pytest\n\nfrom ... import engine as pe\nfrom ....interfaces import base as nib\nfrom ....interfaces import utility as niu\nfrom .... import config\nfrom ..utils import merge_dict, clean_working_directory, write_workflow_prov\n\n\ndef test_identitynode_removal():\n\n def test_function(arg1, arg2, arg3):\n import numpy as np\n return (np.array(arg1) + arg2 + arg3).tolist()\n\n wf = pe.Workflow(name=\"testidentity\")\n\n n1 = pe.Node(niu.IdentityInterface(fields=['a', 'b']), name='src')\n n1.iterables = ('b', [0, 1, 2, 3])\n n1.inputs.a = [0, 1, 2, 3]\n\n n2 = pe.Node(niu.Select(), name='selector')\n wf.connect(n1, ('a', test_function, 1, -1), n2, 'inlist')\n wf.connect(n1, 'b', n2, 'index')\n\n n3 = pe.Node(niu.IdentityInterface(fields=['c', 'd']), name='passer')\n n3.inputs.c = [1, 2, 3, 4]\n wf.connect(n2, 'out', n3, 'd')\n\n n4 = pe.Node(niu.Select(), name='selector2')\n wf.connect(n3, ('c', test_function, 1, -1), n4, 'inlist')\n wf.connect(n3, 'd', n4, 'index')\n\n fg = wf._create_flat_graph()\n wf._set_needed_outputs(fg)\n eg = pe.generate_expanded_graph(deepcopy(fg))\n assert len(eg.nodes()) == 8\n\n\ndef test_clean_working_directory(tmpdir):\n class OutputSpec(nib.TraitedSpec):\n files = nib.traits.List(nib.File)\n others = nib.File()\n\n class InputSpec(nib.TraitedSpec):\n infile = nib.File()\n outputs = OutputSpec()\n inputs = InputSpec()\n\n wd = str(tmpdir)\n filenames = ['file.hdr', 'file.img', 'file.BRIK', 'file.HEAD',\n '_0x1234.json', 'foo.txt']\n outfiles = []\n for filename in filenames:\n outfile = os.path.join(wd, filename)\n with open(outfile, 'wt') as fp:\n fp.writelines('dummy')\n outfiles.append(outfile)\n outputs.files = outfiles[:4:2]\n outputs.others = outfiles[5]\n inputs.infile = outfiles[-1]\n needed_outputs = ['files']\n config.set_default_config()\n assert os.path.exists(outfiles[5])\n config.set_default_config()\n config.set('execution', 'remove_unnecessary_outputs', False)\n out = clean_working_directory(outputs, wd, inputs, needed_outputs,\n deepcopy(config._sections))\n assert os.path.exists(outfiles[5])\n assert out.others == outfiles[5]\n config.set('execution', 'remove_unnecessary_outputs', True)\n out = clean_working_directory(outputs, wd, inputs, needed_outputs,\n deepcopy(config._sections))\n assert os.path.exists(outfiles[1])\n assert os.path.exists(outfiles[3])\n assert os.path.exists(outfiles[4])\n assert not os.path.exists(outfiles[5])\n assert out.others == nib.Undefined\n assert len(out.files) == 2\n config.set_default_config()\n\n\ndef test_outputs_removal(tmpdir):\n\n def test_function(arg1):\n import os\n file1 = os.path.join(os.getcwd(), 'file1.txt')\n file2 = os.path.join(os.getcwd(), 'file2.txt')\n fp = open(file1, 'wt')\n fp.write('%d' % arg1)\n fp.close()\n fp = open(file2, 'wt')\n fp.write('%d' % arg1)\n fp.close()\n return file1, file2\n\n out_dir = str(tmpdir)\n n1 = pe.Node(niu.Function(input_names=['arg1'],\n output_names=['file1', 'file2'],\n function=test_function),\n base_dir=out_dir,\n name='testoutputs')\n n1.inputs.arg1 = 1\n n1.config = {'execution': {'remove_unnecessary_outputs': True}}\n n1.config = merge_dict(deepcopy(config._sections), n1.config)\n n1.run()\n assert os.path.exists(os.path.join(out_dir,\n n1.name,\n 'file1.txt'))\n assert os.path.exists(os.path.join(out_dir,\n n1.name,\n 'file2.txt'))\n n1.needed_outputs = ['file2']\n n1.run()\n assert not os.path.exists(os.path.join(out_dir,\n n1.name,\n 'file1.txt'))\n assert os.path.exists(os.path.join(out_dir,\n n1.name,\n 'file2.txt'))\n\n\nclass InputSpec(nib.TraitedSpec):\n in_file = nib.File(exists=True, copyfile=True)\n\n\nclass OutputSpec(nib.TraitedSpec):\n output1 = nib.traits.List(nib.traits.Int, desc='outputs')\n\n\nclass UtilsTestInterface(nib.BaseInterface):\n input_spec = InputSpec\n output_spec = OutputSpec\n\n def _run_interface(self, runtime):\n runtime.returncode = 0\n return runtime\n\n def _list_outputs(self):\n outputs = self._outputs().get()\n outputs['output1'] = [1]\n return outputs\n\n\ndef test_inputs_removal(tmpdir):\n out_dir = str(tmpdir)\n file1 = os.path.join(out_dir, 'file1.txt')\n fp = open(file1, 'wt')\n fp.write('dummy_file')\n fp.close()\n n1 = pe.Node(UtilsTestInterface(),\n base_dir=out_dir,\n name='testinputs')\n n1.inputs.in_file = file1\n n1.config = {'execution': {'keep_inputs': True}}\n n1.config = merge_dict(deepcopy(config._sections), n1.config)\n n1.run()\n assert os.path.exists(os.path.join(out_dir,\n n1.name,\n 'file1.txt'))\n n1.inputs.in_file = file1\n n1.config = {'execution': {'keep_inputs': False}}\n n1.config = merge_dict(deepcopy(config._sections), n1.config)\n n1.overwrite = True\n n1.run()\n assert not os.path.exists(os.path.join(out_dir,\n n1.name,\n 'file1.txt'))\n\n\ndef test_outputs_removal_wf(tmpdir):\n\n def test_function(arg1):\n import os\n file1 = os.path.join(os.getcwd(), 'file1.txt')\n file2 = os.path.join(os.getcwd(), 'file2.txt')\n file3 = os.path.join(os.getcwd(), 'file3.txt')\n file4 = os.path.join(os.getcwd(), 'subdir', 'file1.txt')\n files = [file1, file2, file3, file4]\n os.mkdir(\"subdir\")\n for filename in files:\n with open(filename, 'wt') as fp:\n fp.write('%d' % arg1)\n return file1, file2, os.path.join(os.getcwd(), \"subdir\")\n\n def test_function2(in_file, arg):\n import os\n in_arg = open(in_file).read()\n file1 = os.path.join(os.getcwd(), 'file1.txt')\n file2 = os.path.join(os.getcwd(), 'file2.txt')\n file3 = os.path.join(os.getcwd(), 'file3.txt')\n files = [file1, file2, file3]\n for filename in files:\n with open(filename, 'wt') as fp:\n fp.write('%d' % arg + in_arg)\n return file1, file2, 1\n\n def test_function3(arg):\n import os\n return arg\n\n out_dir = str(tmpdir)\n\n for plugin in ('Linear',): # , 'MultiProc'):\n n1 = pe.Node(niu.Function(input_names=['arg1'],\n output_names=['out_file1', 'out_file2', 'dir'],\n function=test_function),\n name='n1')\n n1.inputs.arg1 = 1\n\n n2 = pe.Node(niu.Function(input_names=['in_file', 'arg'],\n output_names=['out_file1', 'out_file2', 'n'],\n function=test_function2),\n name='n2')\n n2.inputs.arg = 2\n\n n3 = pe.Node(niu.Function(input_names=['arg'],\n output_names=['n'],\n function=test_function3),\n name='n3')\n\n wf = pe.Workflow(name=\"node_rem_test\" + plugin, base_dir=out_dir)\n wf.connect(n1, \"out_file1\", n2, \"in_file\")\n\n wf.run(plugin='Linear')\n\n for remove_unnecessary_outputs in [True, False]:\n config.set_default_config()\n wf.config = {'execution': {'remove_unnecessary_outputs': remove_unnecessary_outputs}}\n rmtree(os.path.join(wf.base_dir, wf.name))\n wf.run(plugin=plugin)\n\n assert os.path.exists(os.path.join(wf.base_dir,\n wf.name,\n n1.name,\n 'file2.txt')) != remove_unnecessary_outputs\n assert os.path.exists(os.path.join(wf.base_dir,\n wf.name,\n n1.name,\n \"subdir\",\n 'file1.txt')) != remove_unnecessary_outputs\n assert os.path.exists(os.path.join(wf.base_dir,\n wf.name,\n n1.name,\n 'file1.txt'))\n assert os.path.exists(os.path.join(wf.base_dir,\n wf.name,\n n1.name,\n 'file3.txt')) != remove_unnecessary_outputs\n assert os.path.exists(os.path.join(wf.base_dir,\n wf.name,\n n2.name,\n 'file1.txt'))\n assert os.path.exists(os.path.join(wf.base_dir,\n wf.name,\n n2.name,\n 'file2.txt'))\n assert os.path.exists(os.path.join(wf.base_dir,\n wf.name,\n n2.name,\n 'file3.txt')) != remove_unnecessary_outputs\n\n n4 = pe.Node(UtilsTestInterface(), name='n4')\n wf.connect(n2, \"out_file1\", n4, \"in_file\")\n\n def pick_first(l):\n return l[0]\n\n wf.connect(n4, (\"output1\", pick_first), n3, \"arg\")\n for remove_unnecessary_outputs in [True, False]:\n for keep_inputs in [True, False]:\n config.set_default_config()\n wf.config = {'execution': {'keep_inputs': keep_inputs, 'remove_unnecessary_outputs': remove_unnecessary_outputs}}\n rmtree(os.path.join(wf.base_dir, wf.name))\n wf.run(plugin=plugin)\n assert os.path.exists(os.path.join(wf.base_dir,\n wf.name,\n n2.name,\n 'file1.txt'))\n assert os.path.exists(os.path.join(wf.base_dir,\n wf.name,\n n2.name,\n 'file2.txt')) != remove_unnecessary_outputs\n assert os.path.exists(os.path.join(wf.base_dir,\n wf.name,\n n4.name,\n 'file1.txt')) == keep_inputs\n\n\ndef fwhm(fwhm):\n return fwhm\n\n\ndef create_wf(name):\n pipe = pe.Workflow(name=name)\n process = pe.Node(niu.Function(input_names=['fwhm'],\n output_names=['fwhm'],\n function=fwhm),\n name='proc')\n process.iterables = ('fwhm', [0])\n process2 = pe.Node(niu.Function(input_names=['fwhm'],\n output_names=['fwhm'],\n function=fwhm),\n name='proc2')\n process2.iterables = ('fwhm', [0])\n pipe.connect(process, 'fwhm', process2, 'fwhm')\n return pipe\n\n\ndef test_multi_disconnected_iterable(tmpdir):\n metawf = pe.Workflow(name='meta')\n metawf.base_dir = str(tmpdir)\n metawf.add_nodes([create_wf('wf%d' % i) for i in range(30)])\n eg = metawf.run(plugin='Linear')\n assert len(eg.nodes()) == 60\n\n\ndef test_provenance(tmpdir):\n out_dir = str(tmpdir)\n metawf = pe.Workflow(name='meta')\n metawf.base_dir = out_dir\n metawf.add_nodes([create_wf('wf%d' % i) for i in range(1)])\n eg = metawf.run(plugin='Linear')\n prov_base = os.path.join(out_dir,\n 'workflow_provenance_test')\n psg = write_workflow_prov(eg, prov_base, format='all')\n assert len(psg.bundles) == 2\n assert len(psg.get_records()) == 7\n\n\ndef dummy_func(value):\n return value + 1\n\n\ndef test_mapnode_crash(tmpdir):\n \"\"\"Test mapnode crash when stop_on_first_crash is True\"\"\"\n cwd = os.getcwd()\n node = pe.MapNode(niu.Function(input_names=['WRONG'],\n output_names=['newstring'],\n function=dummy_func),\n iterfield=['WRONG'],\n name='myfunc')\n node.inputs.WRONG = ['string{}'.format(i) for i in range(3)]\n node.config = deepcopy(config._sections)\n node.config['execution']['stop_on_first_crash'] = True\n node.base_dir = str(tmpdir)\n with pytest.raises(TypeError):\n node.run()\n os.chdir(cwd)\n\n\ndef test_mapnode_crash2(tmpdir):\n \"\"\"Test mapnode crash when stop_on_first_crash is False\"\"\"\n cwd = os.getcwd()\n node = pe.MapNode(niu.Function(input_names=['WRONG'],\n output_names=['newstring'],\n function=dummy_func),\n iterfield=['WRONG'],\n name='myfunc')\n node.inputs.WRONG = ['string{}'.format(i) for i in range(3)]\n node.base_dir = str(tmpdir)\n\n with pytest.raises(Exception):\n node.run()\n os.chdir(cwd)\n\n\ndef test_mapnode_crash3(tmpdir):\n \"\"\"Test mapnode crash when mapnode is embedded in a workflow\"\"\"\n node = pe.MapNode(niu.Function(input_names=['WRONG'],\n output_names=['newstring'],\n function=dummy_func),\n iterfield=['WRONG'],\n name='myfunc')\n node.inputs.WRONG = ['string{}'.format(i) for i in range(3)]\n wf = pe.Workflow('testmapnodecrash')\n wf.add_nodes([node])\n wf.base_dir = str(tmpdir)\n with pytest.raises(RuntimeError):\n wf.run(plugin='Linear')\n" ]
[ [ "numpy.array" ] ]
Akuchi612/keras-attention-mechanism
[ "63fc19fd8f0618da98a8122ee755d0a9e7e33a73" ]
[ "attention_lstm.py" ]
[ "from keras.layers import merge\nfrom keras.layers.core import *\nfrom keras.layers.recurrent import LSTM\nfrom keras.models import *\n\nfrom attention_utils import get_activations, get_data_recurrent\n\nINPUT_DIM = 2\nTIME_STEPS = 20\n# if True, the attention vector is shared across the input_dimensions where the attention is applied.\nSINGLE_ATTENTION_VECTOR = False\nAPPLY_ATTENTION_BEFORE_LSTM = False\n\n\ndef attention_3d_block(inputs):\n # inputs.shape = (batch_size, time_steps, input_dim)\n input_dim = int(inputs.shape[2])\n a = Permute((2, 1))(inputs)\n a = Reshape((input_dim, TIME_STEPS))(a)\n a = Dense(TIME_STEPS, activation='softmax')(a)\n if SINGLE_ATTENTION_VECTOR:\n a = Lambda(lambda x: K.mean(x, axis=1), name='dim_reduction')(a)\n a = RepeatVector(input_dim)(a)\n a_probs = Permute((2, 1), name='attention_vec')(a)\n output_attention_mul = merge([inputs, a_probs], name='attention_mul', mode='mul')\n return output_attention_mul\n\n\ndef model_attention_applied_after_lstm():\n inputs = Input(shape=(TIME_STEPS, INPUT_DIM,))\n lstm_units = 32\n lstm_out = LSTM(lstm_units, return_sequences=True)(inputs)\n attention_mul = attention_3d_block(lstm_out)\n attention_mul = Flatten()(attention_mul)\n output = Dense(1, activation='sigmoid')(attention_mul)\n model = Model(input=[inputs], output=output)\n return model\n\n\ndef model_attention_applied_before_lstm():\n inputs = Input(shape=(TIME_STEPS, INPUT_DIM,))\n attention_mul = attention_3d_block(inputs)\n lstm_units = 32\n attention_mul = LSTM(lstm_units, return_sequences=False)(attention_mul)\n output = Dense(1, activation='sigmoid')(attention_mul)\n model = Model(input=[inputs], output=output)\n return model\n\n\nif __name__ == '__main__':\n\n N = 300000\n # N = 300 -> too few = no training\n inputs_1, outputs = get_data_recurrent(N, TIME_STEPS, INPUT_DIM)\n\n if APPLY_ATTENTION_BEFORE_LSTM:\n m = model_attention_applied_after_lstm()\n else:\n m = model_attention_applied_before_lstm()\n\n m.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])\n print(m.summary())\n\n m.fit([inputs_1], outputs, epochs=1, batch_size=64, validation_split=0.1)\n\n attention_vectors = []\n for i in range(300):\n testing_inputs_1, testing_outputs = get_data_recurrent(1, TIME_STEPS, INPUT_DIM)\n attention_vector = np.mean(get_activations(m,\n testing_inputs_1,\n print_shape_only=True,\n layer_name='attention_vec')[0], axis=2).squeeze()\n print('attention =', attention_vector)\n assert (np.sum(attention_vector) - 1.0) < 1e-5\n attention_vectors.append(attention_vector)\n\n attention_vector_final = np.mean(np.array(attention_vectors), axis=0)\n # plot part.\n import matplotlib.pyplot as plt\n import pandas as pd\n\n pd.DataFrame(attention_vector_final, columns=['attention (%)']).plot(kind='bar',\n title='Attention Mechanism as '\n 'a function of input'\n ' dimensions.')\n plt.show()\n" ]
[ [ "matplotlib.pyplot.show", "pandas.DataFrame" ] ]
maxspahn/exotica
[ "f748a5860939b870ab522a1bd553d2fa0da56f8e" ]
[ "exotica_core/test/test_box_qp.py" ]
[ "import numpy as np\nimport pyexotica as exo\nimport unittest\nfrom numpy import testing as nptest\nfrom scipy.optimize import minimize\n\nclass TestBoxQP(unittest.TestCase):\n \"\"\"Tests BoxQP implementation against scipy.\"\"\" \n \n def test_zero_q(self):\n np.random.seed(100)\n\n # check against 100 state,control pairs\n for i in range(100):\n H = np.random.normal(\n size=(2, 2), loc=0, scale=10\n )\n H = np.abs(H)\n H[0, 1] = H[1,0] = 0\n\n b_low = np.array([-5., -5.])\n b_high = np.array([5., 5.])\n x_init = np.array([-3., 2.])\n q = np.array([0.0, 0.0])\n\n sol = exo.box_qp(H, q, b_low, b_high, x_init, 0.1, 100, 1e-5, 1e-5)\n \n def cost(x):\n return .5 * np.matmul(np.matmul(x.T, H), x) + np.matmul(q.T, x)\n\n sp_sol = minimize(cost, x_init, method='TNC', bounds=[\n (b_low[0], b_high[0]),\n (b_low[1], b_high[1]),\n ])\n\n nptest.assert_allclose(sp_sol.x, sol.x, rtol=1, atol=1e-4, err_msg=\"BoxQP and scipy differ!\")\n \n def test_zero_h(self):\n np.random.seed(100)\n\n # check against 100 state,control pairs\n for i in range(10):\n H = np.array([[0.,0.], [0.,0.]])\n\n b_low = np.array([-5., -5.])\n b_high = np.array([5., 5.])\n x_init = np.array([-3., 2.])\n q = np.random.normal(size=(2,1), loc=0, scale=10)\n\n sol = exo.box_qp(H, q, b_low, b_high, x_init, 0.1, 100, 1e-5, 1e-5)\n \n def cost(x):\n return .5 * np.matmul(np.matmul(x.T, H), x) + np.matmul(q.T, x)\n\n sp_sol = minimize(cost, x_init, method='TNC', bounds=[\n (b_low[0], b_high[0]),\n (b_low[1], b_high[1]),\n ])\n\n nptest.assert_allclose(sp_sol.x, sol.x, rtol=1, atol=1e-4, err_msg=\"BoxQP and scipy differ!\")\n\n def test_big_numbers(self):\n np.random.seed(100)\n\n # check against 100 state,control pairs\n for i in range(100):\n H = np.random.normal(\n size=(2, 2), loc=0, scale=10\n )\n H = np.abs(H) * 1e20\n H[0, 1] = H[1,0] = 0\n\n b_low = np.array([-5., -5.])\n b_high = np.array([5., 5.])\n x_init = np.array([-3., 2.])\n q = np.array([0, 0])\n\n sol = exo.box_qp(H, q, b_low, b_high, x_init, 0.1, 100, 1e-5, 1e-5)\n \n def cost(x):\n return .5 * np.matmul(np.matmul(x.T, H), x) + np.matmul(q.T, x)\n\n sp_sol = minimize(cost, x_init, method='TNC', bounds=[\n (b_low[0], b_high[0]),\n (b_low[1], b_high[1]),\n ])\n\n nptest.assert_allclose(sp_sol.x, sol.x, rtol=1, atol=1e-4, err_msg=\"BoxQP and scipy differ!\")\n\n def test_small_numbers(self):\n np.random.seed(100)\n\n # check against 100 state,control pairs\n for i in range(10):\n H = np.random.normal(\n size=(2, 2), loc=0, scale=10\n )\n H = np.abs(H) * 1e-20\n H[0, 1] = H[1,0] = 0\n\n b_low = np.array([-5., -5.])\n b_high = np.array([5., 5.])\n x_init = np.array([-3., 2.])\n q = np.array([0, 0])\n\n sol = exo.box_qp(H, q, b_low, b_high, x_init, 0.1, 100, 1e-5, 1e-5)\n \n def cost(x):\n return .5 * np.matmul(np.matmul(x.T, H), x) + np.matmul(q.T, x)\n\n sp_sol = minimize(cost, x_init, method='TNC', bounds=[\n (b_low[0], b_high[0]),\n (b_low[1], b_high[1]),\n ])\n\n nptest.assert_allclose(sp_sol.x, sol.x, rtol=1, atol=1e-4, err_msg=\"BoxQP and scipy differ!\")\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "numpy.matmul", "scipy.optimize.minimize", "numpy.random.seed", "numpy.abs", "numpy.testing.assert_allclose", "numpy.random.normal", "numpy.array" ] ]
AssafZohar/eddington
[ "c67536c41a66a1f96d0aa85d5113b11b79759a7e" ]
[ "src/eddington/fit_function_class.py" ]
[ "\"\"\"Fitting function to evaluate with the fitting algorithm.\"\"\"\nimport functools\nfrom dataclasses import InitVar, dataclass, field\nfrom typing import Callable, Optional, Dict\n\nimport numpy as np\n\nfrom eddington.exceptions import FitFunctionRuntimeError\nfrom eddington.fit_functions_registry import FitFunctionsRegistry\n\n\n@dataclass(unsafe_hash=True)\nclass FitFunction: # pylint: disable=invalid-name,too-many-instance-attributes\n \"\"\"\n Fitting function class.\n\n :param fit_func: Callable. The actual fitting function.\n The function gets 2 parameters:\n a - an array with the parameters of the function.\n x - the sample data to be fit.\n :param n: Number of parameters. the length of \"a\" in fit_func.\n :param name: The name of the function.\n :param syntax: The syntax of the fitting function\n :param a_derivative: a function representing the derivative of fit_func according\n to the \"a\" array\n :param x_derivative: a function representing the derivative of fit_func according\n to x\n :param title_name: same as \"name\" but in title case\n :param costumed: Is this fit functioned made from a string.\n This will be deprecated soon.\n :param save: Boolean. Should this function be saved in the\n :class:`FitFunctionsRegistry`\n \"\"\"\n\n fit_func: Callable = field(repr=False)\n n: int = field(repr=False)\n name: Optional[str] = field()\n syntax: Optional[str] = field(default=None)\n a_derivative: Optional[Callable] = field(default=None, repr=False)\n x_derivative: Optional[Callable] = field(default=None, repr=False)\n title_name: str = field(init=False, repr=False)\n fixed: Dict[int, float] = field(init=False, repr=False)\n save: InitVar[bool] = True\n\n def __post_init__(self, save):\n \"\"\"Post init methods.\"\"\"\n self.title_name = self.__get_title_name()\n self.fixed = dict()\n self.x_derivative = self.__wrap_x_derivative(self.x_derivative)\n self.a_derivative = self.__wrap_a_derivative(self.a_derivative)\n if save:\n FitFunctionsRegistry.add(self)\n\n def __get_title_name(self):\n return self.name.title().replace(\"_\", \" \")\n\n def __validate_parameters_number(self, a):\n a_length = len(a)\n if a_length != self.n:\n raise FitFunctionRuntimeError(\n f\"Input length should be {self.active_parameters}, \"\n f\"got {a_length - len(self.fixed)}\"\n )\n\n def __call__(self, *args):\n \"\"\"Call the fit function as a regular callable.\"\"\"\n a, x = self.__extract_a_and_x(args)\n self.__validate_parameters_number(a)\n return self.fit_func(a, x)\n\n def assign(self, a):\n \"\"\"Assign the function parameters.\"\"\"\n a = self.__add_fixed_values(a)\n self.__validate_parameters_number(a)\n self.fixed = dict(enumerate(a))\n return self\n\n def fix(self, index, value):\n \"\"\"\n Fix parameter with predefined value.\n\n :param index: The index of the parameter to fix. Starting from 0\n :param value: The value to fix\n :return: self\n \"\"\"\n if index < 0 or index >= self.n:\n raise FitFunctionRuntimeError(\n f\"Cannot fix index {index}. \"\n f\"Indices should be between 0 and {self.n - 1}\"\n )\n self.fixed[index] = value\n return self\n\n def unfix(self, index):\n \"\"\"\n Unfix a fixed parameter.\n\n :param index: The index of the parameter to unfix\n :return: self\n \"\"\"\n del self.fixed[index]\n return self\n\n def clear_fixed(self):\n \"\"\"\n Clear all fixed parameters.\n\n :return: self\n \"\"\"\n self.fixed.clear()\n\n @property\n def signature(self):\n \"\"\"Same as name.\"\"\"\n return self.name\n\n @property\n def active_parameters(self):\n \"\"\"Number of active parameters (aka, unfixed).\"\"\"\n return self.n - len(self.fixed)\n\n def __wrap_x_derivative(self, method):\n if method is None:\n return None\n\n @functools.wraps(method)\n def wrapper(*args):\n a, x = self.__extract_a_and_x(args)\n self.__validate_parameters_number(a)\n return method(a, x)\n\n return wrapper\n\n def __wrap_a_derivative(self, method):\n if method is None:\n return None\n\n @functools.wraps(method)\n def wrapper(*args):\n a, x = self.__extract_a_and_x(args)\n self.__validate_parameters_number(a)\n result = method(a, x)\n if len(self.fixed) == 0:\n return result\n return np.delete(result, list(self.fixed.keys()), axis=0)\n\n return wrapper\n\n def __extract_a_and_x(self, args):\n if len(args) == 0:\n raise FitFunctionRuntimeError(\n f'No parameters has been given to \"{self.name}\"'\n )\n if len(args) == 1:\n a = [self.fixed[i] for i in sorted(self.fixed.keys())]\n x = args[0]\n else:\n a = self.__add_fixed_values(args[0])\n x = args[1]\n return a, x\n\n def __add_fixed_values(self, a):\n for i in sorted(self.fixed.keys()):\n a = np.insert(a, i, self.fixed[i])\n return a\n\n\ndef fit_function( # pylint: disable=invalid-name,too-many-arguments\n n, name=None, syntax=None, a_derivative=None, x_derivative=None, save=True\n):\n \"\"\"\n Wrapper making a simple callable into a :class:`FitFunction`.\n\n :param n: Number of parameters. the length of \"a\" in fit_func.\n :param name: The name of the function.\n :param syntax: The syntax of the fitting function\n :param a_derivative: a function representing the derivative of fit_func according\n to the \"a\" array\n :param x_derivative: a function representing the derivative of fit_func according\n to x\n :param save: Boolean. Should this function be saved in the\n :class:`FitFunctionsRegistry`\n :return: :class:`FitFunction` instance.\n \"\"\"\n\n def wrapper(func):\n func_name = func.__name__ if name is None else name\n return functools.wraps(func)(\n FitFunction(\n fit_func=func,\n n=n,\n name=func_name,\n syntax=syntax,\n a_derivative=a_derivative,\n x_derivative=x_derivative,\n save=save,\n )\n )\n\n return wrapper\n" ]
[ [ "numpy.insert" ] ]
itisaby/HacktoberFest2021
[ "dffeabb306082b276a9065ca318d3adc47bd6177" ]
[ "Gradient Descent/KNN Iris/Classification.py" ]
[ "from sklearn import datasets\nfrom sklearn.neighbors import KNeighborsClassifier\n\n#loading Datasets\niris = datasets.load_iris()\n\n# print(iris.DESCR)\n\nfeatures = iris.data\nlabels = iris.target\nprint(features[0], labels[0])\n\n#Training the data\nclf = KNeighborsClassifier()\n\nclf.fit(features, labels)\n\n#Prediction\npred = clf.predict([[9.1, 3.5, 6.4, 4.8]])\nprint(pred)\n" ]
[ [ "sklearn.neighbors.KNeighborsClassifier", "sklearn.datasets.load_iris" ] ]
shredEngineer/MagnetiCalc
[ "bfccb8b6ef9a4642d30b2f0639b0ab41784598ad" ]
[ "magneticalc/SamplingVolume.py" ]
[ "\"\"\" Sampling volume module. \"\"\"\n\n# ISC License\n#\n# Copyright (c) 2020–2021, Paul Wilhelm, M. Sc. <[email protected]>\n#\n# Permission to use, copy, modify, and/or distribute this software for any\n# purpose with or without fee is hereby granted, provided that the above\n# copyright notice and this permission notice appear in all copies.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES\n# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF\n# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR\n# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\n# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\n# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF\n# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n\nfrom typing import Tuple, List, Optional\nimport numpy as np\nfrom PyQt5.QtCore import QThread\nfrom magneticalc.Assert_Dialog import Assert_Dialog\nfrom magneticalc.Debug import Debug\nfrom magneticalc.Theme import Theme\n\n\nclass SamplingVolume:\n \"\"\" Sampling volume class. \"\"\"\n\n # Enable to show additional debug info during constraint calculation\n Debug_Constraints = False\n\n def __init__(self, resolution: float, label_resolution: float):\n \"\"\"\n Initializes an empty sampling volume, with zero bounds and no constraints.\n\n @param resolution: Resolution\n @param label_resolution: Label resolution\n \"\"\"\n Debug(self, \": Init\")\n\n self._resolution = resolution\n self._label_resolution = label_resolution\n\n self.constraints = []\n\n self._bounds_min = np.zeros(3)\n self._bounds_max = np.zeros(3)\n self._dimension = None\n\n self._points = None\n self._permeabilities = None\n self._labeled_indices = None\n self._neighbor_indices = None\n\n Assert_Dialog(resolution > 0, \"Resolution must be > 0\")\n Assert_Dialog(label_resolution > 0, \"Label resolution must be > 0\")\n\n @property\n def dimension(self) -> Optional[Tuple[int, int, int]]:\n \"\"\"\n Gets the sampling volume dimension if it is valid, None otherwise.\n\n @return: Sampling volume dimension if it is valid, None otherwise.\n \"\"\"\n return self._dimension if self.is_valid() else None\n\n def is_valid(self) -> bool:\n \"\"\"\n Indicates valid data for display.\n\n @return: True if data is valid for display, False otherwise\n \"\"\"\n return \\\n self._dimension is not None and \\\n self._points is not None and \\\n self._permeabilities is not None and \\\n self._labeled_indices is not None and \\\n self._neighbor_indices is not None\n\n def invalidate(self):\n \"\"\"\n Resets data, hiding from display.\n \"\"\"\n Debug(self, \".invalidate()\", color=Theme.InvalidColor)\n\n self._dimension = None\n self._points = None\n self._permeabilities = None\n self._labeled_indices = None\n self._neighbor_indices = None\n\n # ------------------------------------------------------------------------------------------------------------------\n\n def get_resolution(self) -> float:\n \"\"\"\n Returns this volume's resolution.\n\n @return: Resolution\n \"\"\"\n return self._resolution\n\n def get_bounds(self) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n Returns this volume's bounding box.\n\n @return: _bounds_min, _bounds_max\n \"\"\"\n return self._bounds_min, self._bounds_max\n\n def get_extent(self) -> List:\n \"\"\"\n Returns this volume's extent.\n\n @return: 3D point\n \"\"\"\n return self._bounds_max - self._bounds_min\n\n def get_points(self) -> List:\n \"\"\"\n Returns this sampling volume's points.\n\n @return: Ordered list of 3D points\n \"\"\"\n Assert_Dialog(self.is_valid(), \"Accessing invalidated sampling volume\")\n\n return self._points\n\n def get_points_count(self) -> int:\n \"\"\"\n Returns this sampling volume's point count.\n\n @return: Point count\n \"\"\"\n Assert_Dialog(self.is_valid(), \"Accessing invalidated sampling volume\")\n\n return len(self._points)\n\n def get_permeabilities(self) -> List:\n \"\"\"\n Returns this sampling volume's relative permeabilities µ_r.\n\n @return: Ordered list of sampling volume's relative permeabilities µ_r\n \"\"\"\n Assert_Dialog(self.is_valid(), \"Accessing invalidated sampling volume\")\n\n return self._permeabilities\n\n def get_labeled_indices(self) -> List:\n \"\"\"\n Returns this sampling volume's labeled indices.\n\n @return: Unordered list of pairs [sampling volume point, field index]\n \"\"\"\n Assert_Dialog(self.is_valid(), \"Accessing invalidated sampling volume\")\n\n return self._labeled_indices\n\n def get_labels_count(self) -> int:\n \"\"\"\n Returns this sampling volume's label count.\n\n @return: Label count\n \"\"\"\n Assert_Dialog(self.is_valid(), \"Accessing invalidated sampling volume\")\n\n return len(self._labeled_indices)\n\n def get_neighbor_indices(self) -> List:\n \"\"\"\n Returns this sampling volume's neighborhood indices.\n\n @return: Ordered list of sampling volume neighborhood indices (six 3D vectors)\n \"\"\"\n Assert_Dialog(self.is_valid(), \"Accessing invalidated sampling volume\")\n\n return self._neighbor_indices\n\n # ------------------------------------------------------------------------------------------------------------------\n\n def set_bounds_nearest(self, bounds_min, bounds_max) -> None:\n \"\"\"\n Adjusts this volume's bounding box to fully enclose a 3D wire curve.\n This expands the bounding box to the next integer grid coordinates.\n\n Note: This will not automatically invalidate the sampling volume\n\n @param bounds_min: Minimum bounding box point\n @param bounds_max: Maximum bounding box point\n @return: Rounded (_bounds_min, _bounds_max)\n \"\"\"\n self._bounds_min = np.array([np.floor(x) for x in bounds_min])\n self._bounds_max = np.array([np.ceil(x) for x in bounds_max])\n\n def set_padding_nearest(self, padding) -> None:\n \"\"\"\n Shrinks or enlarges this volume's bounding box by some amount, in each direction, symmetrically.\n This shrinks or expands the bounding box to the next integer grid coordinates.\n\n Note: This will not automatically invalidate the sampling volume\n\n @param padding: Amount of padding (3D point)\n \"\"\"\n self._bounds_min -= np.array([np.floor(x) for x in padding])\n self._bounds_max += np.array([np.ceil(x) for x in padding])\n\n # ------------------------------------------------------------------------------------------------------------------\n\n def add_constraint(self, constraint) -> None:\n \"\"\"\n Adds some constraint to this volume's point generator.\n\n @param constraint: Constraint\n \"\"\"\n Debug(self, f\".add_constraint()\")\n\n self.constraints.append(constraint)\n\n # ------------------------------------------------------------------------------------------------------------------\n\n def recalculate(self, progress_callback) -> bool:\n \"\"\"\n Recalculates the sampling volume points, permeabilities, labels and neighborhoods according to the constraints.\n\n @param progress_callback: Progress callback\n @return: True if successful, False if interrupted\n \"\"\"\n Debug(self, \".recalculate()\")\n\n # Group constraints by permeability\n constraints_precedence_dict = {}\n for constraint in self.constraints:\n if constraint.permeability in constraints_precedence_dict:\n constraints_precedence_dict[constraint.permeability].append(constraint)\n else:\n constraints_precedence_dict[constraint.permeability] = [constraint]\n\n if self.Debug_Constraints:\n Debug(\n self,\n f\".recalculate(): Created {len(constraints_precedence_dict)} constraint group(s)\",\n color=Theme.PrimaryColor,\n force=True\n )\n\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n\n # Calculate all possible grid points\n points_axes_all = [[], [], []]\n for i in range(3):\n steps = np.ceil((self._bounds_max[i] - self._bounds_min[i]) * self._resolution).astype(int) + 1\n points_axes_all[i] = np.linspace(self._bounds_min[i], self._bounds_max[i], steps)\n\n self._dimension = np.array([len(axis) for axis in points_axes_all])\n n = self._dimension[0] * self._dimension[1] * self._dimension[2]\n\n points_all = np.zeros(shape=(n, 3))\n permeabilities_all = np.zeros(n)\n neighbor_indices_all = [[0, 0, 0, 0, 0, 0]] * n\n\n labeled_indices = []\n\n def i_to_xyz(_i: int):\n \"\"\"\n Convert 1D index to 3D indices.\n\n @param _i: 1D index\n @return: 3D indices\n \"\"\"\n _x = _i % self._dimension[0]\n _y = (_i // self._dimension[0]) % self._dimension[1]\n _z = _i // (self._dimension[0] * self._dimension[1])\n return [_x, _y, _z]\n\n def xyz_to_i(xyz) -> int:\n \"\"\"\n Convert 3D indices to 1D index.\n\n @param xyz: 3D indices\n @return: 1D index\n \"\"\"\n return xyz[0] + xyz[1] * self._dimension[0] + xyz[2] * self._dimension[0] * self._dimension[1]\n\n # Linearly iterate through all possible grid points, computing the 3D cartesian (\"euclidean\") product\n for i in range(n):\n\n x, y, z = i_to_xyz(i)\n\n point = np.array([points_axes_all[0][x], points_axes_all[1][y], points_axes_all[2][z]])\n\n permeability = 1.0 # Default relative permeability for unconstrained points\n\n # Iterate over constraint groups of descending permeability; higher permeabilities take precedence\n for permeability_key in sorted(constraints_precedence_dict, reverse=True):\n\n included = True\n\n if self.Debug_Constraints:\n print()\n Debug(\n self,\n f\".recalculate(): Point = {point}: \"\n f\"Calculating {len(constraints_precedence_dict[permeability_key])} constraint(s) \"\n f\"for permeability = {permeability_key} …\",\n color=Theme.PrimaryColor,\n force=True\n )\n\n # Calculate the inclusion relation for the current group\n for constraint in constraints_precedence_dict[permeability_key]:\n\n if not constraint.evaluate(point):\n\n if self.Debug_Constraints:\n Debug(\n self,\n f\".recalculate(): Point = {point}: Constraint evaluated to False (breaking)\",\n color=Theme.WarningColor,\n force=True\n )\n\n # Exclude this point within the current group\n included = False\n break\n\n else:\n\n if self.Debug_Constraints:\n Debug(\n self,\n f\".recalculate(): Point = {point}: Constraint evaluated to True\",\n color=Theme.SuccessColor,\n force=True\n )\n\n if included:\n\n if self.Debug_Constraints:\n Debug(\n self,\n f\".recalculate(): Point = {point}: Included by precedence grouping\",\n color=Theme.SuccessColor,\n force=True\n )\n\n permeability = permeability_key\n break\n\n else:\n\n if self.Debug_Constraints:\n Debug(\n self,\n f\".recalculate(): Point = {point}: Excluded by precedence grouping\",\n color=Theme.WarningColor,\n force=True\n )\n\n if permeability != 0:\n\n if self.Debug_Constraints:\n Debug(\n self,\n f\".recalculate(): Point = {point}: Finally included with permeability = {permeability}\",\n color=Theme.SuccessColor,\n force=True\n )\n\n # Include this point\n points_all[i] = point\n permeabilities_all[i] = permeability\n\n # Generate this sampling volume point's neighborhood\n neighborhood = [\n xyz_to_i([x + 1, y, z]),\n xyz_to_i([x, y + 1, z]),\n xyz_to_i([x, y, z + 1]),\n xyz_to_i([x - 1, y, z]),\n xyz_to_i([x, y - 1, z]),\n xyz_to_i([x, y, z - 1])\n ]\n neighbor_indices_all[i] = neighborhood\n\n # Provide orthogonal spacing between labels\n if \\\n np.fmod(x, self._resolution / self._label_resolution) == 0 and \\\n np.fmod(y, self._resolution / self._label_resolution) == 0 and \\\n np.fmod(z, self._resolution / self._label_resolution) == 0:\n # Generate a label at this point\n labeled_indices.append([point, i])\n\n else:\n\n if self.Debug_Constraints:\n Debug(\n self,\n f\".recalculate(): Point = {point}: Finally excluded with permeability = 0\",\n color=Theme.WarningColor,\n force=True\n )\n\n # Signal progress update, handle interrupt (every 16 iterations to keep overhead low)\n if i & 0xf == 0:\n progress_callback(100 * (i + 1) / n)\n\n if QThread.currentThread().isInterruptionRequested():\n Debug(self, \".recalculate(): Interruption requested, exiting now\", color=Theme.PrimaryColor)\n return False\n\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n\n index_all_to_filtered = [-1] * n\n filtered_index = 0\n\n # Generate mapping from \"all\" indices to \"filtered\" indices\n for i, permeability in enumerate(permeabilities_all):\n if permeability == 0:\n continue\n\n index_all_to_filtered[i] = filtered_index\n filtered_index += 1\n\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n\n points_filtered = []\n permeabilities_filtered = []\n neighbor_indices_filtered = []\n\n # Filter for included points, i.e. those with permeability != 0; translate neighborhood indices\n for i, permeability in enumerate(permeabilities_all):\n if permeability == 0:\n continue\n\n point = points_all[i]\n permeability = permeabilities_all[i]\n\n # Translate neighborhood indices\n neighborhood = neighbor_indices_all[i]\n for j in range(6):\n if 0 <= neighborhood[j] < n:\n if index_all_to_filtered[neighborhood[j]] != -1:\n neighborhood[j] = index_all_to_filtered[neighborhood[j]]\n else:\n neighborhood[j] = -1 # Neighbor out of bounds\n else:\n neighborhood[j] = -1 # Neighbor out of bounds\n\n points_filtered.append(point)\n permeabilities_filtered.append(permeability)\n neighbor_indices_filtered.append(neighborhood)\n\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n\n # Translate label indices\n for i in range(len(labeled_indices)):\n labeled_indices[i][1] = index_all_to_filtered[labeled_indices[i][1]]\n\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n\n self._points = np.array(points_filtered)\n self._permeabilities = np.array(permeabilities_filtered)\n self._labeled_indices = labeled_indices\n self._neighbor_indices = np.array(neighbor_indices_filtered)\n\n Debug(\n self,\n \".recalculate(): \"\n f\"{len(self.constraints)} constraints left {n} of {len(self._points)} possible points\"\n )\n\n if len(self._points) == 0:\n Debug(\n self,\n \".recalculate: USER WARNING: Avoiding empty sampling volume by adding origin\",\n color=Theme.WarningColor,\n force=True\n )\n origin = np.zeros(3)\n self._points = np.array([origin])\n self._permeabilities = np.array([0])\n self._labeled_indices = [(origin, 0)]\n self._neighbor_indices = np.array([[0, 0, 0, 0, 0, 0]])\n\n progress_callback(100)\n\n return True\n" ]
[ [ "numpy.ceil", "numpy.zeros", "numpy.fmod", "numpy.floor", "numpy.array", "numpy.linspace" ] ]
chunweiyuan/numpy
[ "bfe43b26969231cfe8196868280c07f0c0aa8f50" ]
[ "numpy/ma/tests/test_core.py" ]
[ "# pylint: disable-msg=W0400,W0511,W0611,W0612,W0614,R0201,E1102\n\"\"\"Tests suite for MaskedArray & subclassing.\n\n:author: Pierre Gerard-Marchant\n:contact: pierregm_at_uga_dot_edu\n\"\"\"\nfrom __future__ import division, absolute_import, print_function\n\n__author__ = \"Pierre GF Gerard-Marchant\"\n\nimport warnings\nimport pickle\nimport operator\nimport itertools\nfrom functools import reduce\n\n\nimport numpy as np\nimport numpy.ma.core\nimport numpy.core.fromnumeric as fromnumeric\nimport numpy.core.umath as umath\nfrom numpy.testing import (\n run_module_suite, assert_raises, assert_warns, suppress_warnings\n )\nfrom numpy import ndarray\nfrom numpy.compat import asbytes, asbytes_nested\nfrom numpy.ma.testutils import (\n assert_, assert_array_equal, assert_equal, assert_almost_equal,\n assert_equal_records, fail_if_equal, assert_not_equal,\n assert_mask_equal\n )\nfrom numpy.ma.core import (\n MAError, MaskError, MaskType, MaskedArray, abs, absolute, add, all,\n allclose, allequal, alltrue, angle, anom, arange, arccos, arccosh, arctan2,\n arcsin, arctan, argsort, array, asarray, choose, concatenate,\n conjugate, cos, cosh, count, default_fill_value, diag, divide, empty,\n empty_like, equal, exp, flatten_mask, filled, fix_invalid,\n flatten_structured_array, fromflex, getmask, getmaskarray, greater,\n greater_equal, identity, inner, isMaskedArray, less, less_equal, log,\n log10, make_mask, make_mask_descr, mask_or, masked, masked_array,\n masked_equal, masked_greater, masked_greater_equal, masked_inside,\n masked_less, masked_less_equal, masked_not_equal, masked_outside,\n masked_print_option, masked_values, masked_where, max, maximum,\n maximum_fill_value, min, minimum, minimum_fill_value, mod, multiply,\n mvoid, nomask, not_equal, ones, outer, power, product, put, putmask,\n ravel, repeat, reshape, resize, shape, sin, sinh, sometrue, sort, sqrt,\n subtract, sum, take, tan, tanh, transpose, where, zeros,\n )\n\npi = np.pi\n\n\nsuppress_copy_mask_on_assignment = suppress_warnings()\nsuppress_copy_mask_on_assignment.filter(\n numpy.ma.core.MaskedArrayFutureWarning,\n \"setting an item on a masked array which has a shared mask will not copy\")\n\n\nclass TestMaskedArray(object):\n # Base test class for MaskedArrays.\n\n def setup(self):\n # Base data definition.\n x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])\n y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])\n a10 = 10.\n m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]\n m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1]\n xm = masked_array(x, mask=m1)\n ym = masked_array(y, mask=m2)\n z = np.array([-.5, 0., .5, .8])\n zm = masked_array(z, mask=[0, 1, 0, 0])\n xf = np.where(m1, 1e+20, x)\n xm.set_fill_value(1e+20)\n self.d = (x, y, a10, m1, m2, xm, ym, z, zm, xf)\n\n def test_basicattributes(self):\n # Tests some basic array attributes.\n a = array([1, 3, 2])\n b = array([1, 3, 2], mask=[1, 0, 1])\n assert_equal(a.ndim, 1)\n assert_equal(b.ndim, 1)\n assert_equal(a.size, 3)\n assert_equal(b.size, 3)\n assert_equal(a.shape, (3,))\n assert_equal(b.shape, (3,))\n\n def test_basic0d(self):\n # Checks masking a scalar\n x = masked_array(0)\n assert_equal(str(x), '0')\n x = masked_array(0, mask=True)\n assert_equal(str(x), str(masked_print_option))\n x = masked_array(0, mask=False)\n assert_equal(str(x), '0')\n x = array(0, mask=1)\n assert_(x.filled().dtype is x._data.dtype)\n\n def test_basic1d(self):\n # Test of basic array creation and properties in 1 dimension.\n (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d\n assert_(not isMaskedArray(x))\n assert_(isMaskedArray(xm))\n assert_((xm - ym).filled(0).any())\n fail_if_equal(xm.mask.astype(int), ym.mask.astype(int))\n s = x.shape\n assert_equal(np.shape(xm), s)\n assert_equal(xm.shape, s)\n assert_equal(xm.dtype, x.dtype)\n assert_equal(zm.dtype, z.dtype)\n assert_equal(xm.size, reduce(lambda x, y:x * y, s))\n assert_equal(count(xm), len(m1) - reduce(lambda x, y:x + y, m1))\n assert_array_equal(xm, xf)\n assert_array_equal(filled(xm, 1.e20), xf)\n assert_array_equal(x, xm)\n\n def test_basic2d(self):\n # Test of basic array creation and properties in 2 dimensions.\n (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d\n for s in [(4, 3), (6, 2)]:\n x.shape = s\n y.shape = s\n xm.shape = s\n ym.shape = s\n xf.shape = s\n\n assert_(not isMaskedArray(x))\n assert_(isMaskedArray(xm))\n assert_equal(shape(xm), s)\n assert_equal(xm.shape, s)\n assert_equal(xm.size, reduce(lambda x, y:x * y, s))\n assert_equal(count(xm), len(m1) - reduce(lambda x, y:x + y, m1))\n assert_equal(xm, xf)\n assert_equal(filled(xm, 1.e20), xf)\n assert_equal(x, xm)\n\n def test_concatenate_basic(self):\n # Tests concatenations.\n (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d\n # basic concatenation\n assert_equal(np.concatenate((x, y)), concatenate((xm, ym)))\n assert_equal(np.concatenate((x, y)), concatenate((x, y)))\n assert_equal(np.concatenate((x, y)), concatenate((xm, y)))\n assert_equal(np.concatenate((x, y, x)), concatenate((x, ym, x)))\n\n def test_concatenate_alongaxis(self):\n # Tests concatenations.\n (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d\n # Concatenation along an axis\n s = (3, 4)\n x.shape = y.shape = xm.shape = ym.shape = s\n assert_equal(xm.mask, np.reshape(m1, s))\n assert_equal(ym.mask, np.reshape(m2, s))\n xmym = concatenate((xm, ym), 1)\n assert_equal(np.concatenate((x, y), 1), xmym)\n assert_equal(np.concatenate((xm.mask, ym.mask), 1), xmym._mask)\n\n x = zeros(2)\n y = array(ones(2), mask=[False, True])\n z = concatenate((x, y))\n assert_array_equal(z, [0, 0, 1, 1])\n assert_array_equal(z.mask, [False, False, False, True])\n z = concatenate((y, x))\n assert_array_equal(z, [1, 1, 0, 0])\n assert_array_equal(z.mask, [False, True, False, False])\n\n def test_concatenate_flexible(self):\n # Tests the concatenation on flexible arrays.\n data = masked_array(list(zip(np.random.rand(10),\n np.arange(10))),\n dtype=[('a', float), ('b', int)])\n\n test = concatenate([data[:5], data[5:]])\n assert_equal_records(test, data)\n\n def test_creation_ndmin(self):\n # Check the use of ndmin\n x = array([1, 2, 3], mask=[1, 0, 0], ndmin=2)\n assert_equal(x.shape, (1, 3))\n assert_equal(x._data, [[1, 2, 3]])\n assert_equal(x._mask, [[1, 0, 0]])\n\n def test_creation_ndmin_from_maskedarray(self):\n # Make sure we're not losing the original mask w/ ndmin\n x = array([1, 2, 3])\n x[-1] = masked\n xx = array(x, ndmin=2, dtype=float)\n assert_equal(x.shape, x._mask.shape)\n assert_equal(xx.shape, xx._mask.shape)\n\n def test_creation_maskcreation(self):\n # Tests how masks are initialized at the creation of Maskedarrays.\n data = arange(24, dtype=float)\n data[[3, 6, 15]] = masked\n dma_1 = MaskedArray(data)\n assert_equal(dma_1.mask, data.mask)\n dma_2 = MaskedArray(dma_1)\n assert_equal(dma_2.mask, dma_1.mask)\n dma_3 = MaskedArray(dma_1, mask=[1, 0, 0, 0] * 6)\n fail_if_equal(dma_3.mask, dma_1.mask)\n\n x = array([1, 2, 3], mask=True)\n assert_equal(x._mask, [True, True, True])\n x = array([1, 2, 3], mask=False)\n assert_equal(x._mask, [False, False, False])\n y = array([1, 2, 3], mask=x._mask, copy=False)\n assert_(np.may_share_memory(x.mask, y.mask))\n y = array([1, 2, 3], mask=x._mask, copy=True)\n assert_(not np.may_share_memory(x.mask, y.mask))\n\n def test_creation_with_list_of_maskedarrays(self):\n # Tests creating a masked array from a list of masked arrays.\n x = array(np.arange(5), mask=[1, 0, 0, 0, 0])\n data = array((x, x[::-1]))\n assert_equal(data, [[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]])\n assert_equal(data._mask, [[1, 0, 0, 0, 0], [0, 0, 0, 0, 1]])\n\n x.mask = nomask\n data = array((x, x[::-1]))\n assert_equal(data, [[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]])\n assert_(data.mask is nomask)\n\n def test_creation_from_ndarray_with_padding(self):\n x = np.array([('A', 0)], dtype={'names':['f0','f1'],\n 'formats':['S4','i8'],\n 'offsets':[0,8]})\n data = array(x) # used to fail due to 'V' padding field in x.dtype.descr\n\n def test_asarray(self):\n (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d\n xm.fill_value = -9999\n xm._hardmask = True\n xmm = asarray(xm)\n assert_equal(xmm._data, xm._data)\n assert_equal(xmm._mask, xm._mask)\n assert_equal(xmm.fill_value, xm.fill_value)\n assert_equal(xmm._hardmask, xm._hardmask)\n\n def test_asarray_default_order(self):\n # See Issue #6646\n m = np.eye(3).T\n assert_(not m.flags.c_contiguous)\n\n new_m = asarray(m)\n assert_(new_m.flags.c_contiguous)\n\n def test_asarray_enforce_order(self):\n # See Issue #6646\n m = np.eye(3).T\n assert_(not m.flags.c_contiguous)\n\n new_m = asarray(m, order='C')\n assert_(new_m.flags.c_contiguous)\n\n def test_fix_invalid(self):\n # Checks fix_invalid.\n with np.errstate(invalid='ignore'):\n data = masked_array([np.nan, 0., 1.], mask=[0, 0, 1])\n data_fixed = fix_invalid(data)\n assert_equal(data_fixed._data, [data.fill_value, 0., 1.])\n assert_equal(data_fixed._mask, [1., 0., 1.])\n\n def test_maskedelement(self):\n # Test of masked element\n x = arange(6)\n x[1] = masked\n assert_(str(masked) == '--')\n assert_(x[1] is masked)\n assert_equal(filled(x[1], 0), 0)\n\n def test_set_element_as_object(self):\n # Tests setting elements with object\n a = empty(1, dtype=object)\n x = (1, 2, 3, 4, 5)\n a[0] = x\n assert_equal(a[0], x)\n assert_(a[0] is x)\n\n import datetime\n dt = datetime.datetime.now()\n a[0] = dt\n assert_(a[0] is dt)\n\n def test_indexing(self):\n # Tests conversions and indexing\n x1 = np.array([1, 2, 4, 3])\n x2 = array(x1, mask=[1, 0, 0, 0])\n x3 = array(x1, mask=[0, 1, 0, 1])\n x4 = array(x1)\n # test conversion to strings\n str(x2) # raises?\n repr(x2) # raises?\n assert_equal(np.sort(x1), sort(x2, endwith=False))\n # tests of indexing\n assert_(type(x2[1]) is type(x1[1]))\n assert_(x1[1] == x2[1])\n assert_(x2[0] is masked)\n assert_equal(x1[2], x2[2])\n assert_equal(x1[2:5], x2[2:5])\n assert_equal(x1[:], x2[:])\n assert_equal(x1[1:], x3[1:])\n x1[2] = 9\n x2[2] = 9\n assert_equal(x1, x2)\n x1[1:3] = 99\n x2[1:3] = 99\n assert_equal(x1, x2)\n x2[1] = masked\n assert_equal(x1, x2)\n x2[1:3] = masked\n assert_equal(x1, x2)\n x2[:] = x1\n x2[1] = masked\n assert_(allequal(getmask(x2), array([0, 1, 0, 0])))\n x3[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0])\n assert_(allequal(getmask(x3), array([0, 1, 1, 0])))\n x4[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0])\n assert_(allequal(getmask(x4), array([0, 1, 1, 0])))\n assert_(allequal(x4, array([1, 2, 3, 4])))\n x1 = np.arange(5) * 1.0\n x2 = masked_values(x1, 3.0)\n assert_equal(x1, x2)\n assert_(allequal(array([0, 0, 0, 1, 0], MaskType), x2.mask))\n assert_equal(3.0, x2.fill_value)\n x1 = array([1, 'hello', 2, 3], object)\n x2 = np.array([1, 'hello', 2, 3], object)\n s1 = x1[1]\n s2 = x2[1]\n assert_equal(type(s2), str)\n assert_equal(type(s1), str)\n assert_equal(s1, s2)\n assert_(x1[1:1].shape == (0,))\n\n def test_matrix_indexing(self):\n # Tests conversions and indexing\n x1 = np.matrix([[1, 2, 3], [4, 3, 2]])\n x2 = array(x1, mask=[[1, 0, 0], [0, 1, 0]])\n x3 = array(x1, mask=[[0, 1, 0], [1, 0, 0]])\n x4 = array(x1)\n # test conversion to strings\n str(x2) # raises?\n repr(x2) # raises?\n # tests of indexing\n assert_(type(x2[1, 0]) is type(x1[1, 0]))\n assert_(x1[1, 0] == x2[1, 0])\n assert_(x2[1, 1] is masked)\n assert_equal(x1[0, 2], x2[0, 2])\n assert_equal(x1[0, 1:], x2[0, 1:])\n assert_equal(x1[:, 2], x2[:, 2])\n assert_equal(x1[:], x2[:])\n assert_equal(x1[1:], x3[1:])\n x1[0, 2] = 9\n x2[0, 2] = 9\n assert_equal(x1, x2)\n x1[0, 1:] = 99\n x2[0, 1:] = 99\n assert_equal(x1, x2)\n x2[0, 1] = masked\n assert_equal(x1, x2)\n x2[0, 1:] = masked\n assert_equal(x1, x2)\n x2[0, :] = x1[0, :]\n x2[0, 1] = masked\n assert_(allequal(getmask(x2), np.array([[0, 1, 0], [0, 1, 0]])))\n x3[1, :] = masked_array([1, 2, 3], [1, 1, 0])\n assert_(allequal(getmask(x3)[1], array([1, 1, 0])))\n assert_(allequal(getmask(x3[1]), array([1, 1, 0])))\n x4[1, :] = masked_array([1, 2, 3], [1, 1, 0])\n assert_(allequal(getmask(x4[1]), array([1, 1, 0])))\n assert_(allequal(x4[1], array([1, 2, 3])))\n x1 = np.matrix(np.arange(5) * 1.0)\n x2 = masked_values(x1, 3.0)\n assert_equal(x1, x2)\n assert_(allequal(array([0, 0, 0, 1, 0], MaskType), x2.mask))\n assert_equal(3.0, x2.fill_value)\n\n @suppress_copy_mask_on_assignment\n def test_copy(self):\n # Tests of some subtle points of copying and sizing.\n n = [0, 0, 1, 0, 0]\n m = make_mask(n)\n m2 = make_mask(m)\n assert_(m is m2)\n m3 = make_mask(m, copy=1)\n assert_(m is not m3)\n\n x1 = np.arange(5)\n y1 = array(x1, mask=m)\n assert_equal(y1._data.__array_interface__, x1.__array_interface__)\n assert_(allequal(x1, y1.data))\n assert_equal(y1._mask.__array_interface__, m.__array_interface__)\n\n y1a = array(y1)\n assert_(y1a._data.__array_interface__ ==\n y1._data.__array_interface__)\n assert_(y1a.mask is y1.mask)\n\n y2 = array(x1, mask=m3)\n assert_(y2._data.__array_interface__ == x1.__array_interface__)\n assert_(y2._mask.__array_interface__ == m3.__array_interface__)\n assert_(y2[2] is masked)\n y2[2] = 9\n assert_(y2[2] is not masked)\n assert_(y2._mask.__array_interface__ == m3.__array_interface__)\n assert_(allequal(y2.mask, 0))\n\n y2a = array(x1, mask=m, copy=1)\n assert_(y2a._data.__array_interface__ != x1.__array_interface__)\n #assert_( y2a.mask is not m)\n assert_(y2a._mask.__array_interface__ != m.__array_interface__)\n assert_(y2a[2] is masked)\n y2a[2] = 9\n assert_(y2a[2] is not masked)\n #assert_( y2a.mask is not m)\n assert_(y2a._mask.__array_interface__ != m.__array_interface__)\n assert_(allequal(y2a.mask, 0))\n\n y3 = array(x1 * 1.0, mask=m)\n assert_(filled(y3).dtype is (x1 * 1.0).dtype)\n\n x4 = arange(4)\n x4[2] = masked\n y4 = resize(x4, (8,))\n assert_equal(concatenate([x4, x4]), y4)\n assert_equal(getmask(y4), [0, 0, 1, 0, 0, 0, 1, 0])\n y5 = repeat(x4, (2, 2, 2, 2), axis=0)\n assert_equal(y5, [0, 0, 1, 1, 2, 2, 3, 3])\n y6 = repeat(x4, 2, axis=0)\n assert_equal(y5, y6)\n y7 = x4.repeat((2, 2, 2, 2), axis=0)\n assert_equal(y5, y7)\n y8 = x4.repeat(2, 0)\n assert_equal(y5, y8)\n\n y9 = x4.copy()\n assert_equal(y9._data, x4._data)\n assert_equal(y9._mask, x4._mask)\n\n x = masked_array([1, 2, 3], mask=[0, 1, 0])\n # Copy is False by default\n y = masked_array(x)\n assert_equal(y._data.ctypes.data, x._data.ctypes.data)\n assert_equal(y._mask.ctypes.data, x._mask.ctypes.data)\n y = masked_array(x, copy=True)\n assert_not_equal(y._data.ctypes.data, x._data.ctypes.data)\n assert_not_equal(y._mask.ctypes.data, x._mask.ctypes.data)\n\n def test_copy_on_python_builtins(self):\n # Tests copy works on python builtins (issue#8019)\n assert_(isMaskedArray(np.ma.copy([1,2,3])))\n assert_(isMaskedArray(np.ma.copy((1,2,3))))\n\n def test_copy_immutable(self):\n # Tests that the copy method is immutable, GitHub issue #5247\n a = np.ma.array([1, 2, 3])\n b = np.ma.array([4, 5, 6])\n a_copy_method = a.copy\n b.copy\n assert_equal(a_copy_method(), [1, 2, 3])\n\n def test_deepcopy(self):\n from copy import deepcopy\n a = array([0, 1, 2], mask=[False, True, False])\n copied = deepcopy(a)\n assert_equal(copied.mask, a.mask)\n assert_not_equal(id(a._mask), id(copied._mask))\n\n copied[1] = 1\n assert_equal(copied.mask, [0, 0, 0])\n assert_equal(a.mask, [0, 1, 0])\n\n copied = deepcopy(a)\n assert_equal(copied.mask, a.mask)\n copied.mask[1] = False\n assert_equal(copied.mask, [0, 0, 0])\n assert_equal(a.mask, [0, 1, 0])\n\n def test_str_repr(self):\n a = array([0, 1, 2], mask=[False, True, False])\n assert_equal(str(a), '[0 -- 2]')\n assert_equal(repr(a), 'masked_array(data = [0 -- 2],\\n'\n ' mask = [False True False],\\n'\n ' fill_value = 999999)\\n')\n\n a = np.ma.arange(2000)\n a[1:50] = np.ma.masked\n assert_equal(\n repr(a),\n 'masked_array(data = [0 -- -- ..., 1997 1998 1999],\\n'\n ' mask = [False True True ..., False False False],\\n'\n ' fill_value = 999999)\\n'\n )\n\n def test_pickling(self):\n # Tests pickling\n for dtype in (int, float, str, object):\n a = arange(10).astype(dtype)\n a.fill_value = 999\n\n masks = ([0, 0, 0, 1, 0, 1, 0, 1, 0, 1], # partially masked\n True, # Fully masked\n False) # Fully unmasked\n\n for mask in masks:\n a.mask = mask\n a_pickled = pickle.loads(a.dumps())\n assert_equal(a_pickled._mask, a._mask)\n assert_equal(a_pickled._data, a._data)\n if dtype in (object, int):\n assert_equal(a_pickled.fill_value, 999)\n else:\n assert_equal(a_pickled.fill_value, dtype(999))\n assert_array_equal(a_pickled.mask, mask)\n\n def test_pickling_subbaseclass(self):\n # Test pickling w/ a subclass of ndarray\n a = array(np.matrix(list(range(10))), mask=[1, 0, 1, 0, 0] * 2)\n a_pickled = pickle.loads(a.dumps())\n assert_equal(a_pickled._mask, a._mask)\n assert_equal(a_pickled, a)\n assert_(isinstance(a_pickled._data, np.matrix))\n\n def test_pickling_maskedconstant(self):\n # Test pickling MaskedConstant\n mc = np.ma.masked\n mc_pickled = pickle.loads(mc.dumps())\n assert_equal(mc_pickled._baseclass, mc._baseclass)\n assert_equal(mc_pickled._mask, mc._mask)\n assert_equal(mc_pickled._data, mc._data)\n\n def test_pickling_wstructured(self):\n # Tests pickling w/ structured array\n a = array([(1, 1.), (2, 2.)], mask=[(0, 0), (0, 1)],\n dtype=[('a', int), ('b', float)])\n a_pickled = pickle.loads(a.dumps())\n assert_equal(a_pickled._mask, a._mask)\n assert_equal(a_pickled, a)\n\n def test_pickling_keepalignment(self):\n # Tests pickling w/ F_CONTIGUOUS arrays\n a = arange(10)\n a.shape = (-1, 2)\n b = a.T\n test = pickle.loads(pickle.dumps(b))\n assert_equal(test, b)\n\n def test_single_element_subscript(self):\n # Tests single element subscripts of Maskedarrays.\n a = array([1, 3, 2])\n b = array([1, 3, 2], mask=[1, 0, 1])\n assert_equal(a[0].shape, ())\n assert_equal(b[0].shape, ())\n assert_equal(b[1].shape, ())\n\n def test_topython(self):\n # Tests some communication issues with Python.\n assert_equal(1, int(array(1)))\n assert_equal(1.0, float(array(1)))\n assert_equal(1, int(array([[[1]]])))\n assert_equal(1.0, float(array([[1]])))\n assert_raises(TypeError, float, array([1, 1]))\n\n with suppress_warnings() as sup:\n sup.filter(UserWarning, 'Warning: converting a masked element')\n assert_(np.isnan(float(array([1], mask=[1]))))\n\n a = array([1, 2, 3], mask=[1, 0, 0])\n assert_raises(TypeError, lambda: float(a))\n assert_equal(float(a[-1]), 3.)\n assert_(np.isnan(float(a[0])))\n assert_raises(TypeError, int, a)\n assert_equal(int(a[-1]), 3)\n assert_raises(MAError, lambda:int(a[0]))\n\n def test_oddfeatures_1(self):\n # Test of other odd features\n x = arange(20)\n x = x.reshape(4, 5)\n x.flat[5] = 12\n assert_(x[1, 0] == 12)\n z = x + 10j * x\n assert_equal(z.real, x)\n assert_equal(z.imag, 10 * x)\n assert_equal((z * conjugate(z)).real, 101 * x * x)\n z.imag[...] = 0.0\n\n x = arange(10)\n x[3] = masked\n assert_(str(x[3]) == str(masked))\n c = x >= 8\n assert_(count(where(c, masked, masked)) == 0)\n assert_(shape(where(c, masked, masked)) == c.shape)\n\n z = masked_where(c, x)\n assert_(z.dtype is x.dtype)\n assert_(z[3] is masked)\n assert_(z[4] is not masked)\n assert_(z[7] is not masked)\n assert_(z[8] is masked)\n assert_(z[9] is masked)\n assert_equal(x, z)\n\n def test_oddfeatures_2(self):\n # Tests some more features.\n x = array([1., 2., 3., 4., 5.])\n c = array([1, 1, 1, 0, 0])\n x[2] = masked\n z = where(c, x, -x)\n assert_equal(z, [1., 2., 0., -4., -5])\n c[0] = masked\n z = where(c, x, -x)\n assert_equal(z, [1., 2., 0., -4., -5])\n assert_(z[0] is masked)\n assert_(z[1] is not masked)\n assert_(z[2] is masked)\n\n @suppress_copy_mask_on_assignment\n def test_oddfeatures_3(self):\n # Tests some generic features\n atest = array([10], mask=True)\n btest = array([20])\n idx = atest.mask\n atest[idx] = btest[idx]\n assert_equal(atest, [20])\n\n def test_filled_with_object_dtype(self):\n a = np.ma.masked_all(1, dtype='O')\n assert_equal(a.filled('x')[0], 'x')\n\n def test_filled_with_flexible_dtype(self):\n # Test filled w/ flexible dtype\n flexi = array([(1, 1, 1)],\n dtype=[('i', int), ('s', '|S8'), ('f', float)])\n flexi[0] = masked\n assert_equal(flexi.filled(),\n np.array([(default_fill_value(0),\n default_fill_value('0'),\n default_fill_value(0.),)], dtype=flexi.dtype))\n flexi[0] = masked\n assert_equal(flexi.filled(1),\n np.array([(1, '1', 1.)], dtype=flexi.dtype))\n\n def test_filled_with_mvoid(self):\n # Test filled w/ mvoid\n ndtype = [('a', int), ('b', float)]\n a = mvoid((1, 2.), mask=[(0, 1)], dtype=ndtype)\n # Filled using default\n test = a.filled()\n assert_equal(tuple(test), (1, default_fill_value(1.)))\n # Explicit fill_value\n test = a.filled((-1, -1))\n assert_equal(tuple(test), (1, -1))\n # Using predefined filling values\n a.fill_value = (-999, -999)\n assert_equal(tuple(a.filled()), (1, -999))\n\n def test_filled_with_nested_dtype(self):\n # Test filled w/ nested dtype\n ndtype = [('A', int), ('B', [('BA', int), ('BB', int)])]\n a = array([(1, (1, 1)), (2, (2, 2))],\n mask=[(0, (1, 0)), (0, (0, 1))], dtype=ndtype)\n test = a.filled(0)\n control = np.array([(1, (0, 1)), (2, (2, 0))], dtype=ndtype)\n assert_equal(test, control)\n\n test = a['B'].filled(0)\n control = np.array([(0, 1), (2, 0)], dtype=a['B'].dtype)\n assert_equal(test, control)\n\n # test if mask gets set correctly (see #6760)\n Z = numpy.ma.zeros(2, numpy.dtype([(\"A\", \"(2,2)i1,(2,2)i1\", (2,2))]))\n assert_equal(Z.data.dtype, numpy.dtype([('A', [('f0', 'i1', (2, 2)),\n ('f1', 'i1', (2, 2))], (2, 2))]))\n assert_equal(Z.mask.dtype, numpy.dtype([('A', [('f0', '?', (2, 2)),\n ('f1', '?', (2, 2))], (2, 2))]))\n\n def test_filled_with_f_order(self):\n # Test filled w/ F-contiguous array\n a = array(np.array([(0, 1, 2), (4, 5, 6)], order='F'),\n mask=np.array([(0, 0, 1), (1, 0, 0)], order='F'),\n order='F') # this is currently ignored\n assert_(a.flags['F_CONTIGUOUS'])\n assert_(a.filled(0).flags['F_CONTIGUOUS'])\n\n def test_optinfo_propagation(self):\n # Checks that _optinfo dictionary isn't back-propagated\n x = array([1, 2, 3, ], dtype=float)\n x._optinfo['info'] = '???'\n y = x.copy()\n assert_equal(y._optinfo['info'], '???')\n y._optinfo['info'] = '!!!'\n assert_equal(x._optinfo['info'], '???')\n\n def test_fancy_printoptions(self):\n # Test printing a masked array w/ fancy dtype.\n fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])])\n test = array([(1, (2, 3.0)), (4, (5, 6.0))],\n mask=[(1, (0, 1)), (0, (1, 0))],\n dtype=fancydtype)\n control = \"[(--, (2, --)) (4, (--, 6.0))]\"\n assert_equal(str(test), control)\n\n # Test 0-d array with multi-dimensional dtype\n t_2d0 = masked_array(data = (0, [[0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0]],\n 0.0),\n mask = (False, [[True, False, True],\n [False, False, True]],\n False),\n dtype = \"int, (2,3)float, float\")\n control = \"(0, [[--, 0.0, --], [0.0, 0.0, --]], 0.0)\"\n assert_equal(str(t_2d0), control)\n\n\n def test_flatten_structured_array(self):\n # Test flatten_structured_array on arrays\n # On ndarray\n ndtype = [('a', int), ('b', float)]\n a = np.array([(1, 1), (2, 2)], dtype=ndtype)\n test = flatten_structured_array(a)\n control = np.array([[1., 1.], [2., 2.]], dtype=float)\n assert_equal(test, control)\n assert_equal(test.dtype, control.dtype)\n # On masked_array\n a = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype)\n test = flatten_structured_array(a)\n control = array([[1., 1.], [2., 2.]],\n mask=[[0, 1], [1, 0]], dtype=float)\n assert_equal(test, control)\n assert_equal(test.dtype, control.dtype)\n assert_equal(test.mask, control.mask)\n # On masked array with nested structure\n ndtype = [('a', int), ('b', [('ba', int), ('bb', float)])]\n a = array([(1, (1, 1.1)), (2, (2, 2.2))],\n mask=[(0, (1, 0)), (1, (0, 1))], dtype=ndtype)\n test = flatten_structured_array(a)\n control = array([[1., 1., 1.1], [2., 2., 2.2]],\n mask=[[0, 1, 0], [1, 0, 1]], dtype=float)\n assert_equal(test, control)\n assert_equal(test.dtype, control.dtype)\n assert_equal(test.mask, control.mask)\n # Keeping the initial shape\n ndtype = [('a', int), ('b', float)]\n a = np.array([[(1, 1), ], [(2, 2), ]], dtype=ndtype)\n test = flatten_structured_array(a)\n control = np.array([[[1., 1.], ], [[2., 2.], ]], dtype=float)\n assert_equal(test, control)\n assert_equal(test.dtype, control.dtype)\n\n def test_void0d(self):\n # Test creating a mvoid object\n ndtype = [('a', int), ('b', int)]\n a = np.array([(1, 2,)], dtype=ndtype)[0]\n f = mvoid(a)\n assert_(isinstance(f, mvoid))\n\n a = masked_array([(1, 2)], mask=[(1, 0)], dtype=ndtype)[0]\n assert_(isinstance(a, mvoid))\n\n a = masked_array([(1, 2), (1, 2)], mask=[(1, 0), (0, 0)], dtype=ndtype)\n f = mvoid(a._data[0], a._mask[0])\n assert_(isinstance(f, mvoid))\n\n def test_mvoid_getitem(self):\n # Test mvoid.__getitem__\n ndtype = [('a', int), ('b', int)]\n a = masked_array([(1, 2,), (3, 4)], mask=[(0, 0), (1, 0)],\n dtype=ndtype)\n # w/o mask\n f = a[0]\n assert_(isinstance(f, mvoid))\n assert_equal((f[0], f['a']), (1, 1))\n assert_equal(f['b'], 2)\n # w/ mask\n f = a[1]\n assert_(isinstance(f, mvoid))\n assert_(f[0] is masked)\n assert_(f['a'] is masked)\n assert_equal(f[1], 4)\n\n # exotic dtype\n A = masked_array(data=[([0,1],)],\n mask=[([True, False],)],\n dtype=[(\"A\", \">i2\", (2,))])\n assert_equal(A[0][\"A\"], A[\"A\"][0])\n assert_equal(A[0][\"A\"], masked_array(data=[0, 1],\n mask=[True, False], dtype=\">i2\"))\n\n def test_mvoid_iter(self):\n # Test iteration on __getitem__\n ndtype = [('a', int), ('b', int)]\n a = masked_array([(1, 2,), (3, 4)], mask=[(0, 0), (1, 0)],\n dtype=ndtype)\n # w/o mask\n assert_equal(list(a[0]), [1, 2])\n # w/ mask\n assert_equal(list(a[1]), [masked, 4])\n\n def test_mvoid_print(self):\n # Test printing a mvoid\n mx = array([(1, 1), (2, 2)], dtype=[('a', int), ('b', int)])\n assert_equal(str(mx[0]), \"(1, 1)\")\n mx['b'][0] = masked\n ini_display = masked_print_option._display\n masked_print_option.set_display(\"-X-\")\n try:\n assert_equal(str(mx[0]), \"(1, -X-)\")\n assert_equal(repr(mx[0]), \"(1, -X-)\")\n finally:\n masked_print_option.set_display(ini_display)\n\n # also check if there are object datatypes (see gh-7493)\n mx = array([(1,), (2,)], dtype=[('a', 'O')])\n assert_equal(str(mx[0]), \"(1,)\")\n\n def test_mvoid_multidim_print(self):\n\n # regression test for gh-6019\n t_ma = masked_array(data = [([1, 2, 3],)],\n mask = [([False, True, False],)],\n fill_value = ([999999, 999999, 999999],),\n dtype = [('a', '<i4', (3,))])\n assert_(str(t_ma[0]) == \"([1, --, 3],)\")\n assert_(repr(t_ma[0]) == \"([1, --, 3],)\")\n\n # additional tests with structured arrays\n\n t_2d = masked_array(data = [([[1, 2], [3,4]],)],\n mask = [([[False, True], [True, False]],)],\n dtype = [('a', '<i4', (2,2))])\n assert_(str(t_2d[0]) == \"([[1, --], [--, 4]],)\")\n assert_(repr(t_2d[0]) == \"([[1, --], [--, 4]],)\")\n\n t_0d = masked_array(data = [(1,2)],\n mask = [(True,False)],\n dtype = [('a', '<i4'), ('b', '<i4')])\n assert_(str(t_0d[0]) == \"(--, 2)\")\n assert_(repr(t_0d[0]) == \"(--, 2)\")\n\n t_2d = masked_array(data = [([[1, 2], [3,4]], 1)],\n mask = [([[False, True], [True, False]], False)],\n dtype = [('a', '<i4', (2,2)), ('b', float)])\n assert_(str(t_2d[0]) == \"([[1, --], [--, 4]], 1.0)\")\n assert_(repr(t_2d[0]) == \"([[1, --], [--, 4]], 1.0)\")\n\n t_ne = masked_array(data=[(1, (1, 1))],\n mask=[(True, (True, False))],\n dtype = [('a', '<i4'), ('b', 'i4,i4')])\n assert_(str(t_ne[0]) == \"(--, (--, 1))\")\n assert_(repr(t_ne[0]) == \"(--, (--, 1))\")\n\n def test_object_with_array(self):\n mx1 = masked_array([1.], mask=[True])\n mx2 = masked_array([1., 2.])\n mx = masked_array([mx1, mx2], mask=[False, True])\n assert_(mx[0] is mx1)\n assert_(mx[1] is not mx2)\n assert_(np.all(mx[1].data == mx2.data))\n assert_(np.all(mx[1].mask))\n # check that we return a view.\n mx[1].data[0] = 0.\n assert_(mx2[0] == 0.)\n\n\nclass TestMaskedArrayArithmetic(object):\n # Base test class for MaskedArrays.\n\n def setup(self):\n # Base data definition.\n x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])\n y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])\n a10 = 10.\n m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]\n m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1]\n xm = masked_array(x, mask=m1)\n ym = masked_array(y, mask=m2)\n z = np.array([-.5, 0., .5, .8])\n zm = masked_array(z, mask=[0, 1, 0, 0])\n xf = np.where(m1, 1e+20, x)\n xm.set_fill_value(1e+20)\n self.d = (x, y, a10, m1, m2, xm, ym, z, zm, xf)\n self.err_status = np.geterr()\n np.seterr(divide='ignore', invalid='ignore')\n\n def teardown(self):\n np.seterr(**self.err_status)\n\n def test_basic_arithmetic(self):\n # Test of basic arithmetic.\n (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d\n a2d = array([[1, 2], [0, 4]])\n a2dm = masked_array(a2d, [[0, 0], [1, 0]])\n assert_equal(a2d * a2d, a2d * a2dm)\n assert_equal(a2d + a2d, a2d + a2dm)\n assert_equal(a2d - a2d, a2d - a2dm)\n for s in [(12,), (4, 3), (2, 6)]:\n x = x.reshape(s)\n y = y.reshape(s)\n xm = xm.reshape(s)\n ym = ym.reshape(s)\n xf = xf.reshape(s)\n assert_equal(-x, -xm)\n assert_equal(x + y, xm + ym)\n assert_equal(x - y, xm - ym)\n assert_equal(x * y, xm * ym)\n assert_equal(x / y, xm / ym)\n assert_equal(a10 + y, a10 + ym)\n assert_equal(a10 - y, a10 - ym)\n assert_equal(a10 * y, a10 * ym)\n assert_equal(a10 / y, a10 / ym)\n assert_equal(x + a10, xm + a10)\n assert_equal(x - a10, xm - a10)\n assert_equal(x * a10, xm * a10)\n assert_equal(x / a10, xm / a10)\n assert_equal(x ** 2, xm ** 2)\n assert_equal(abs(x) ** 2.5, abs(xm) ** 2.5)\n assert_equal(x ** y, xm ** ym)\n assert_equal(np.add(x, y), add(xm, ym))\n assert_equal(np.subtract(x, y), subtract(xm, ym))\n assert_equal(np.multiply(x, y), multiply(xm, ym))\n assert_equal(np.divide(x, y), divide(xm, ym))\n\n def test_divide_on_different_shapes(self):\n x = arange(6, dtype=float)\n x.shape = (2, 3)\n y = arange(3, dtype=float)\n\n z = x / y\n assert_equal(z, [[-1., 1., 1.], [-1., 4., 2.5]])\n assert_equal(z.mask, [[1, 0, 0], [1, 0, 0]])\n\n z = x / y[None,:]\n assert_equal(z, [[-1., 1., 1.], [-1., 4., 2.5]])\n assert_equal(z.mask, [[1, 0, 0], [1, 0, 0]])\n\n y = arange(2, dtype=float)\n z = x / y[:, None]\n assert_equal(z, [[-1., -1., -1.], [3., 4., 5.]])\n assert_equal(z.mask, [[1, 1, 1], [0, 0, 0]])\n\n def test_mixed_arithmetic(self):\n # Tests mixed arithmetics.\n na = np.array([1])\n ma = array([1])\n assert_(isinstance(na + ma, MaskedArray))\n assert_(isinstance(ma + na, MaskedArray))\n\n def test_limits_arithmetic(self):\n tiny = np.finfo(float).tiny\n a = array([tiny, 1. / tiny, 0.])\n assert_equal(getmaskarray(a / 2), [0, 0, 0])\n assert_equal(getmaskarray(2 / a), [1, 0, 1])\n\n def test_masked_singleton_arithmetic(self):\n # Tests some scalar arithmetics on MaskedArrays.\n # Masked singleton should remain masked no matter what\n xm = array(0, mask=1)\n assert_((1 / array(0)).mask)\n assert_((1 + xm).mask)\n assert_((-xm).mask)\n assert_(maximum(xm, xm).mask)\n assert_(minimum(xm, xm).mask)\n\n def test_masked_singleton_equality(self):\n # Tests (in)equality on masked singleton\n a = array([1, 2, 3], mask=[1, 1, 0])\n assert_((a[0] == 0) is masked)\n assert_((a[0] != 0) is masked)\n assert_equal((a[-1] == 0), False)\n assert_equal((a[-1] != 0), True)\n\n def test_arithmetic_with_masked_singleton(self):\n # Checks that there's no collapsing to masked\n x = masked_array([1, 2])\n y = x * masked\n assert_equal(y.shape, x.shape)\n assert_equal(y._mask, [True, True])\n y = x[0] * masked\n assert_(y is masked)\n y = x + masked\n assert_equal(y.shape, x.shape)\n assert_equal(y._mask, [True, True])\n\n def test_arithmetic_with_masked_singleton_on_1d_singleton(self):\n # Check that we're not losing the shape of a singleton\n x = masked_array([1, ])\n y = x + masked\n assert_equal(y.shape, x.shape)\n assert_equal(y.mask, [True, ])\n\n def test_scalar_arithmetic(self):\n x = array(0, mask=0)\n assert_equal(x.filled().ctypes.data, x.ctypes.data)\n # Make sure we don't lose the shape in some circumstances\n xm = array((0, 0)) / 0.\n assert_equal(xm.shape, (2,))\n assert_equal(xm.mask, [1, 1])\n\n def test_basic_ufuncs(self):\n # Test various functions such as sin, cos.\n (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d\n assert_equal(np.cos(x), cos(xm))\n assert_equal(np.cosh(x), cosh(xm))\n assert_equal(np.sin(x), sin(xm))\n assert_equal(np.sinh(x), sinh(xm))\n assert_equal(np.tan(x), tan(xm))\n assert_equal(np.tanh(x), tanh(xm))\n assert_equal(np.sqrt(abs(x)), sqrt(xm))\n assert_equal(np.log(abs(x)), log(xm))\n assert_equal(np.log10(abs(x)), log10(xm))\n assert_equal(np.exp(x), exp(xm))\n assert_equal(np.arcsin(z), arcsin(zm))\n assert_equal(np.arccos(z), arccos(zm))\n assert_equal(np.arctan(z), arctan(zm))\n assert_equal(np.arctan2(x, y), arctan2(xm, ym))\n assert_equal(np.absolute(x), absolute(xm))\n assert_equal(np.angle(x + 1j*y), angle(xm + 1j*ym))\n assert_equal(np.angle(x + 1j*y, deg=True), angle(xm + 1j*ym, deg=True))\n assert_equal(np.equal(x, y), equal(xm, ym))\n assert_equal(np.not_equal(x, y), not_equal(xm, ym))\n assert_equal(np.less(x, y), less(xm, ym))\n assert_equal(np.greater(x, y), greater(xm, ym))\n assert_equal(np.less_equal(x, y), less_equal(xm, ym))\n assert_equal(np.greater_equal(x, y), greater_equal(xm, ym))\n assert_equal(np.conjugate(x), conjugate(xm))\n\n def test_count_func(self):\n # Tests count\n assert_equal(1, count(1))\n assert_equal(0, array(1, mask=[1]))\n\n ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0])\n res = count(ott)\n assert_(res.dtype.type is np.intp)\n assert_equal(3, res)\n\n ott = ott.reshape((2, 2))\n res = count(ott)\n assert_(res.dtype.type is np.intp)\n assert_equal(3, res)\n res = count(ott, 0)\n assert_(isinstance(res, ndarray))\n assert_equal([1, 2], res)\n assert_(getmask(res) is nomask)\n\n ott = array([0., 1., 2., 3.])\n res = count(ott, 0)\n assert_(isinstance(res, ndarray))\n assert_(res.dtype.type is np.intp)\n assert_raises(np.AxisError, ott.count, axis=1)\n\n def test_count_on_python_builtins(self):\n # Tests count works on python builtins (issue#8019)\n assert_equal(3, count([1,2,3]))\n assert_equal(2, count((1,2)))\n\n def test_minmax_func(self):\n # Tests minimum and maximum.\n (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d\n # max doesn't work if shaped\n xr = np.ravel(x)\n xmr = ravel(xm)\n # following are true because of careful selection of data\n assert_equal(max(xr), maximum.reduce(xmr))\n assert_equal(min(xr), minimum.reduce(xmr))\n\n assert_equal(minimum([1, 2, 3], [4, 0, 9]), [1, 0, 3])\n assert_equal(maximum([1, 2, 3], [4, 0, 9]), [4, 2, 9])\n x = arange(5)\n y = arange(5) - 2\n x[3] = masked\n y[0] = masked\n assert_equal(minimum(x, y), where(less(x, y), x, y))\n assert_equal(maximum(x, y), where(greater(x, y), x, y))\n assert_(minimum.reduce(x) == 0)\n assert_(maximum.reduce(x) == 4)\n\n x = arange(4).reshape(2, 2)\n x[-1, -1] = masked\n assert_equal(maximum.reduce(x, axis=None), 2)\n\n def test_minimummaximum_func(self):\n a = np.ones((2, 2))\n aminimum = minimum(a, a)\n assert_(isinstance(aminimum, MaskedArray))\n assert_equal(aminimum, np.minimum(a, a))\n\n aminimum = minimum.outer(a, a)\n assert_(isinstance(aminimum, MaskedArray))\n assert_equal(aminimum, np.minimum.outer(a, a))\n\n amaximum = maximum(a, a)\n assert_(isinstance(amaximum, MaskedArray))\n assert_equal(amaximum, np.maximum(a, a))\n\n amaximum = maximum.outer(a, a)\n assert_(isinstance(amaximum, MaskedArray))\n assert_equal(amaximum, np.maximum.outer(a, a))\n\n def test_minmax_reduce(self):\n # Test np.min/maximum.reduce on array w/ full False mask\n a = array([1, 2, 3], mask=[False, False, False])\n b = np.maximum.reduce(a)\n assert_equal(b, 3)\n\n def test_minmax_funcs_with_output(self):\n # Tests the min/max functions with explicit outputs\n mask = np.random.rand(12).round()\n xm = array(np.random.uniform(0, 10, 12), mask=mask)\n xm.shape = (3, 4)\n for funcname in ('min', 'max'):\n # Initialize\n npfunc = getattr(np, funcname)\n mafunc = getattr(numpy.ma.core, funcname)\n # Use the np version\n nout = np.empty((4,), dtype=int)\n try:\n result = npfunc(xm, axis=0, out=nout)\n except MaskError:\n pass\n nout = np.empty((4,), dtype=float)\n result = npfunc(xm, axis=0, out=nout)\n assert_(result is nout)\n # Use the ma version\n nout.fill(-999)\n result = mafunc(xm, axis=0, out=nout)\n assert_(result is nout)\n\n def test_minmax_methods(self):\n # Additional tests on max/min\n (_, _, _, _, _, xm, _, _, _, _) = self.d\n xm.shape = (xm.size,)\n assert_equal(xm.max(), 10)\n assert_(xm[0].max() is masked)\n assert_(xm[0].max(0) is masked)\n assert_(xm[0].max(-1) is masked)\n assert_equal(xm.min(), -10.)\n assert_(xm[0].min() is masked)\n assert_(xm[0].min(0) is masked)\n assert_(xm[0].min(-1) is masked)\n assert_equal(xm.ptp(), 20.)\n assert_(xm[0].ptp() is masked)\n assert_(xm[0].ptp(0) is masked)\n assert_(xm[0].ptp(-1) is masked)\n\n x = array([1, 2, 3], mask=True)\n assert_(x.min() is masked)\n assert_(x.max() is masked)\n assert_(x.ptp() is masked)\n\n def test_addsumprod(self):\n # Tests add, sum, product.\n (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d\n assert_equal(np.add.reduce(x), add.reduce(x))\n assert_equal(np.add.accumulate(x), add.accumulate(x))\n assert_equal(4, sum(array(4), axis=0))\n assert_equal(4, sum(array(4), axis=0))\n assert_equal(np.sum(x, axis=0), sum(x, axis=0))\n assert_equal(np.sum(filled(xm, 0), axis=0), sum(xm, axis=0))\n assert_equal(np.sum(x, 0), sum(x, 0))\n assert_equal(np.product(x, axis=0), product(x, axis=0))\n assert_equal(np.product(x, 0), product(x, 0))\n assert_equal(np.product(filled(xm, 1), axis=0), product(xm, axis=0))\n s = (3, 4)\n x.shape = y.shape = xm.shape = ym.shape = s\n if len(s) > 1:\n assert_equal(np.concatenate((x, y), 1), concatenate((xm, ym), 1))\n assert_equal(np.add.reduce(x, 1), add.reduce(x, 1))\n assert_equal(np.sum(x, 1), sum(x, 1))\n assert_equal(np.product(x, 1), product(x, 1))\n\n def test_binops_d2D(self):\n # Test binary operations on 2D data\n a = array([[1.], [2.], [3.]], mask=[[False], [True], [True]])\n b = array([[2., 3.], [4., 5.], [6., 7.]])\n\n test = a * b\n control = array([[2., 3.], [2., 2.], [3., 3.]],\n mask=[[0, 0], [1, 1], [1, 1]])\n assert_equal(test, control)\n assert_equal(test.data, control.data)\n assert_equal(test.mask, control.mask)\n\n test = b * a\n control = array([[2., 3.], [4., 5.], [6., 7.]],\n mask=[[0, 0], [1, 1], [1, 1]])\n assert_equal(test, control)\n assert_equal(test.data, control.data)\n assert_equal(test.mask, control.mask)\n\n a = array([[1.], [2.], [3.]])\n b = array([[2., 3.], [4., 5.], [6., 7.]],\n mask=[[0, 0], [0, 0], [0, 1]])\n test = a * b\n control = array([[2, 3], [8, 10], [18, 3]],\n mask=[[0, 0], [0, 0], [0, 1]])\n assert_equal(test, control)\n assert_equal(test.data, control.data)\n assert_equal(test.mask, control.mask)\n\n test = b * a\n control = array([[2, 3], [8, 10], [18, 7]],\n mask=[[0, 0], [0, 0], [0, 1]])\n assert_equal(test, control)\n assert_equal(test.data, control.data)\n assert_equal(test.mask, control.mask)\n\n def test_domained_binops_d2D(self):\n # Test domained binary operations on 2D data\n a = array([[1.], [2.], [3.]], mask=[[False], [True], [True]])\n b = array([[2., 3.], [4., 5.], [6., 7.]])\n\n test = a / b\n control = array([[1. / 2., 1. / 3.], [2., 2.], [3., 3.]],\n mask=[[0, 0], [1, 1], [1, 1]])\n assert_equal(test, control)\n assert_equal(test.data, control.data)\n assert_equal(test.mask, control.mask)\n\n test = b / a\n control = array([[2. / 1., 3. / 1.], [4., 5.], [6., 7.]],\n mask=[[0, 0], [1, 1], [1, 1]])\n assert_equal(test, control)\n assert_equal(test.data, control.data)\n assert_equal(test.mask, control.mask)\n\n a = array([[1.], [2.], [3.]])\n b = array([[2., 3.], [4., 5.], [6., 7.]],\n mask=[[0, 0], [0, 0], [0, 1]])\n test = a / b\n control = array([[1. / 2, 1. / 3], [2. / 4, 2. / 5], [3. / 6, 3]],\n mask=[[0, 0], [0, 0], [0, 1]])\n assert_equal(test, control)\n assert_equal(test.data, control.data)\n assert_equal(test.mask, control.mask)\n\n test = b / a\n control = array([[2 / 1., 3 / 1.], [4 / 2., 5 / 2.], [6 / 3., 7]],\n mask=[[0, 0], [0, 0], [0, 1]])\n assert_equal(test, control)\n assert_equal(test.data, control.data)\n assert_equal(test.mask, control.mask)\n\n def test_noshrinking(self):\n # Check that we don't shrink a mask when not wanted\n # Binary operations\n a = masked_array([1., 2., 3.], mask=[False, False, False],\n shrink=False)\n b = a + 1\n assert_equal(b.mask, [0, 0, 0])\n # In place binary operation\n a += 1\n assert_equal(a.mask, [0, 0, 0])\n # Domained binary operation\n b = a / 1.\n assert_equal(b.mask, [0, 0, 0])\n # In place binary operation\n a /= 1.\n assert_equal(a.mask, [0, 0, 0])\n\n def test_ufunc_nomask(self):\n # check the case ufuncs should set the mask to false\n m = np.ma.array([1])\n # check we don't get array([False], dtype=bool)\n assert_equal(np.true_divide(m, 5).mask.shape, ())\n\n def test_noshink_on_creation(self):\n # Check that the mask is not shrunk on array creation when not wanted\n a = np.ma.masked_values([1., 2.5, 3.1], 1.5, shrink=False)\n assert_equal(a.mask, [0, 0, 0])\n\n def test_mod(self):\n # Tests mod\n (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d\n assert_equal(mod(x, y), mod(xm, ym))\n test = mod(ym, xm)\n assert_equal(test, np.mod(ym, xm))\n assert_equal(test.mask, mask_or(xm.mask, ym.mask))\n test = mod(xm, ym)\n assert_equal(test, np.mod(xm, ym))\n assert_equal(test.mask, mask_or(mask_or(xm.mask, ym.mask), (ym == 0)))\n\n def test_TakeTransposeInnerOuter(self):\n # Test of take, transpose, inner, outer products\n x = arange(24)\n y = np.arange(24)\n x[5:6] = masked\n x = x.reshape(2, 3, 4)\n y = y.reshape(2, 3, 4)\n assert_equal(np.transpose(y, (2, 0, 1)), transpose(x, (2, 0, 1)))\n assert_equal(np.take(y, (2, 0, 1), 1), take(x, (2, 0, 1), 1))\n assert_equal(np.inner(filled(x, 0), filled(y, 0)),\n inner(x, y))\n assert_equal(np.outer(filled(x, 0), filled(y, 0)),\n outer(x, y))\n y = array(['abc', 1, 'def', 2, 3], object)\n y[2] = masked\n t = take(y, [0, 3, 4])\n assert_(t[0] == 'abc')\n assert_(t[1] == 2)\n assert_(t[2] == 3)\n\n def test_imag_real(self):\n # Check complex\n xx = array([1 + 10j, 20 + 2j], mask=[1, 0])\n assert_equal(xx.imag, [10, 2])\n assert_equal(xx.imag.filled(), [1e+20, 2])\n assert_equal(xx.imag.dtype, xx._data.imag.dtype)\n assert_equal(xx.real, [1, 20])\n assert_equal(xx.real.filled(), [1e+20, 20])\n assert_equal(xx.real.dtype, xx._data.real.dtype)\n\n def test_methods_with_output(self):\n xm = array(np.random.uniform(0, 10, 12)).reshape(3, 4)\n xm[:, 0] = xm[0] = xm[-1, -1] = masked\n\n funclist = ('sum', 'prod', 'var', 'std', 'max', 'min', 'ptp', 'mean',)\n\n for funcname in funclist:\n npfunc = getattr(np, funcname)\n xmmeth = getattr(xm, funcname)\n # A ndarray as explicit input\n output = np.empty(4, dtype=float)\n output.fill(-9999)\n result = npfunc(xm, axis=0, out=output)\n # ... the result should be the given output\n assert_(result is output)\n assert_equal(result, xmmeth(axis=0, out=output))\n\n output = empty(4, dtype=int)\n result = xmmeth(axis=0, out=output)\n assert_(result is output)\n assert_(output[0] is masked)\n\n def test_count_mean_with_matrix(self):\n m = np.ma.array(np.matrix([[1,2],[3,4]]), mask=np.zeros((2,2)))\n\n assert_equal(m.count(axis=0).shape, (1,2))\n assert_equal(m.count(axis=1).shape, (2,1))\n\n #make sure broadcasting inside mean and var work\n assert_equal(m.mean(axis=0), [[2., 3.]])\n assert_equal(m.mean(axis=1), [[1.5], [3.5]])\n\n def test_eq_on_structured(self):\n # Test the equality of structured arrays\n ndtype = [('A', int), ('B', int)]\n a = array([(1, 1), (2, 2)], mask=[(0, 1), (0, 0)], dtype=ndtype)\n test = (a == a)\n assert_equal(test.data, [True, True])\n assert_equal(test.mask, [False, False])\n test = (a == a[0])\n assert_equal(test.data, [True, False])\n assert_equal(test.mask, [False, False])\n b = array([(1, 1), (2, 2)], mask=[(1, 0), (0, 0)], dtype=ndtype)\n test = (a == b)\n assert_equal(test.data, [False, True])\n assert_equal(test.mask, [True, False])\n test = (a[0] == b)\n assert_equal(test.data, [False, False])\n assert_equal(test.mask, [True, False])\n b = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype)\n test = (a == b)\n assert_equal(test.data, [True, True])\n assert_equal(test.mask, [False, False])\n # complicated dtype, 2-dimensional array.\n ndtype = [('A', int), ('B', [('BA', int), ('BB', int)])]\n a = array([[(1, (1, 1)), (2, (2, 2))],\n [(3, (3, 3)), (4, (4, 4))]],\n mask=[[(0, (1, 0)), (0, (0, 1))],\n [(1, (0, 0)), (1, (1, 1))]], dtype=ndtype)\n test = (a[0, 0] == a)\n assert_equal(test.data, [[True, False], [False, False]])\n assert_equal(test.mask, [[False, False], [False, True]])\n\n def test_ne_on_structured(self):\n # Test the equality of structured arrays\n ndtype = [('A', int), ('B', int)]\n a = array([(1, 1), (2, 2)], mask=[(0, 1), (0, 0)], dtype=ndtype)\n test = (a != a)\n assert_equal(test.data, [False, False])\n assert_equal(test.mask, [False, False])\n test = (a != a[0])\n assert_equal(test.data, [False, True])\n assert_equal(test.mask, [False, False])\n b = array([(1, 1), (2, 2)], mask=[(1, 0), (0, 0)], dtype=ndtype)\n test = (a != b)\n assert_equal(test.data, [True, False])\n assert_equal(test.mask, [True, False])\n test = (a[0] != b)\n assert_equal(test.data, [True, True])\n assert_equal(test.mask, [True, False])\n b = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype)\n test = (a != b)\n assert_equal(test.data, [False, False])\n assert_equal(test.mask, [False, False])\n # complicated dtype, 2-dimensional array.\n ndtype = [('A', int), ('B', [('BA', int), ('BB', int)])]\n a = array([[(1, (1, 1)), (2, (2, 2))],\n [(3, (3, 3)), (4, (4, 4))]],\n mask=[[(0, (1, 0)), (0, (0, 1))],\n [(1, (0, 0)), (1, (1, 1))]], dtype=ndtype)\n test = (a[0, 0] != a)\n assert_equal(test.data, [[False, True], [True, True]])\n assert_equal(test.mask, [[False, False], [False, True]])\n\n def test_eq_ne_structured_extra(self):\n # ensure simple examples are symmetric and make sense.\n # from https://github.com/numpy/numpy/pull/8590#discussion_r101126465\n dt = np.dtype('i4,i4')\n for m1 in (mvoid((1, 2), mask=(0, 0), dtype=dt),\n mvoid((1, 2), mask=(0, 1), dtype=dt),\n mvoid((1, 2), mask=(1, 0), dtype=dt),\n mvoid((1, 2), mask=(1, 1), dtype=dt)):\n ma1 = m1.view(MaskedArray)\n r1 = ma1.view('2i4')\n for m2 in (np.array((1, 1), dtype=dt),\n mvoid((1, 1), dtype=dt),\n mvoid((1, 0), mask=(0, 1), dtype=dt),\n mvoid((3, 2), mask=(0, 1), dtype=dt)):\n ma2 = m2.view(MaskedArray)\n r2 = ma2.view('2i4')\n eq_expected = (r1 == r2).all()\n assert_equal(m1 == m2, eq_expected)\n assert_equal(m2 == m1, eq_expected)\n assert_equal(ma1 == m2, eq_expected)\n assert_equal(m1 == ma2, eq_expected)\n assert_equal(ma1 == ma2, eq_expected)\n # Also check it is the same if we do it element by element.\n el_by_el = [m1[name] == m2[name] for name in dt.names]\n assert_equal(array(el_by_el, dtype=bool).all(), eq_expected)\n ne_expected = (r1 != r2).any()\n assert_equal(m1 != m2, ne_expected)\n assert_equal(m2 != m1, ne_expected)\n assert_equal(ma1 != m2, ne_expected)\n assert_equal(m1 != ma2, ne_expected)\n assert_equal(ma1 != ma2, ne_expected)\n el_by_el = [m1[name] != m2[name] for name in dt.names]\n assert_equal(array(el_by_el, dtype=bool).any(), ne_expected)\n\n def test_eq_with_None(self):\n # Really, comparisons with None should not be done, but check them\n # anyway. Note that pep8 will flag these tests.\n # Deprecation is in place for arrays, and when it happens this\n # test will fail (and have to be changed accordingly).\n\n # With partial mask\n with suppress_warnings() as sup:\n sup.filter(FutureWarning, \"Comparison to `None`\")\n a = array([None, 1], mask=[0, 1])\n assert_equal(a == None, array([True, False], mask=[0, 1]))\n assert_equal(a.data == None, [True, False])\n assert_equal(a != None, array([False, True], mask=[0, 1]))\n # With nomask\n a = array([None, 1], mask=False)\n assert_equal(a == None, [True, False])\n assert_equal(a != None, [False, True])\n # With complete mask\n a = array([None, 2], mask=True)\n assert_equal(a == None, array([False, True], mask=True))\n assert_equal(a != None, array([True, False], mask=True))\n # Fully masked, even comparison to None should return \"masked\"\n a = masked\n assert_equal(a == None, masked)\n\n def test_eq_with_scalar(self):\n a = array(1)\n assert_equal(a == 1, True)\n assert_equal(a == 0, False)\n assert_equal(a != 1, False)\n assert_equal(a != 0, True)\n b = array(1, mask=True)\n assert_equal(b == 0, masked)\n assert_equal(b == 1, masked)\n assert_equal(b != 0, masked)\n assert_equal(b != 1, masked)\n\n def test_eq_different_dimensions(self):\n m1 = array([1, 1], mask=[0, 1])\n # test comparison with both masked and regular arrays.\n for m2 in (array([[0, 1], [1, 2]]),\n np.array([[0, 1], [1, 2]])):\n test = (m1 == m2)\n assert_equal(test.data, [[False, False],\n [True, False]])\n assert_equal(test.mask, [[False, True],\n [False, True]])\n\n def test_numpyarithmetics(self):\n # Check that the mask is not back-propagated when using numpy functions\n a = masked_array([-1, 0, 1, 2, 3], mask=[0, 0, 0, 0, 1])\n control = masked_array([np.nan, np.nan, 0, np.log(2), -1],\n mask=[1, 1, 0, 0, 1])\n\n test = log(a)\n assert_equal(test, control)\n assert_equal(test.mask, control.mask)\n assert_equal(a.mask, [0, 0, 0, 0, 1])\n\n test = np.log(a)\n assert_equal(test, control)\n assert_equal(test.mask, control.mask)\n assert_equal(a.mask, [0, 0, 0, 0, 1])\n\n\nclass TestMaskedArrayAttributes(object):\n\n def test_keepmask(self):\n # Tests the keep mask flag\n x = masked_array([1, 2, 3], mask=[1, 0, 0])\n mx = masked_array(x)\n assert_equal(mx.mask, x.mask)\n mx = masked_array(x, mask=[0, 1, 0], keep_mask=False)\n assert_equal(mx.mask, [0, 1, 0])\n mx = masked_array(x, mask=[0, 1, 0], keep_mask=True)\n assert_equal(mx.mask, [1, 1, 0])\n # We default to true\n mx = masked_array(x, mask=[0, 1, 0])\n assert_equal(mx.mask, [1, 1, 0])\n\n def test_hardmask(self):\n # Test hard_mask\n d = arange(5)\n n = [0, 0, 0, 1, 1]\n m = make_mask(n)\n xh = array(d, mask=m, hard_mask=True)\n # We need to copy, to avoid updating d in xh !\n xs = array(d, mask=m, hard_mask=False, copy=True)\n xh[[1, 4]] = [10, 40]\n xs[[1, 4]] = [10, 40]\n assert_equal(xh._data, [0, 10, 2, 3, 4])\n assert_equal(xs._data, [0, 10, 2, 3, 40])\n assert_equal(xs.mask, [0, 0, 0, 1, 0])\n assert_(xh._hardmask)\n assert_(not xs._hardmask)\n xh[1:4] = [10, 20, 30]\n xs[1:4] = [10, 20, 30]\n assert_equal(xh._data, [0, 10, 20, 3, 4])\n assert_equal(xs._data, [0, 10, 20, 30, 40])\n assert_equal(xs.mask, nomask)\n xh[0] = masked\n xs[0] = masked\n assert_equal(xh.mask, [1, 0, 0, 1, 1])\n assert_equal(xs.mask, [1, 0, 0, 0, 0])\n xh[:] = 1\n xs[:] = 1\n assert_equal(xh._data, [0, 1, 1, 3, 4])\n assert_equal(xs._data, [1, 1, 1, 1, 1])\n assert_equal(xh.mask, [1, 0, 0, 1, 1])\n assert_equal(xs.mask, nomask)\n # Switch to soft mask\n xh.soften_mask()\n xh[:] = arange(5)\n assert_equal(xh._data, [0, 1, 2, 3, 4])\n assert_equal(xh.mask, nomask)\n # Switch back to hard mask\n xh.harden_mask()\n xh[xh < 3] = masked\n assert_equal(xh._data, [0, 1, 2, 3, 4])\n assert_equal(xh._mask, [1, 1, 1, 0, 0])\n xh[filled(xh > 1, False)] = 5\n assert_equal(xh._data, [0, 1, 2, 5, 5])\n assert_equal(xh._mask, [1, 1, 1, 0, 0])\n\n xh = array([[1, 2], [3, 4]], mask=[[1, 0], [0, 0]], hard_mask=True)\n xh[0] = 0\n assert_equal(xh._data, [[1, 0], [3, 4]])\n assert_equal(xh._mask, [[1, 0], [0, 0]])\n xh[-1, -1] = 5\n assert_equal(xh._data, [[1, 0], [3, 5]])\n assert_equal(xh._mask, [[1, 0], [0, 0]])\n xh[filled(xh < 5, False)] = 2\n assert_equal(xh._data, [[1, 2], [2, 5]])\n assert_equal(xh._mask, [[1, 0], [0, 0]])\n\n def test_hardmask_again(self):\n # Another test of hardmask\n d = arange(5)\n n = [0, 0, 0, 1, 1]\n m = make_mask(n)\n xh = array(d, mask=m, hard_mask=True)\n xh[4:5] = 999\n xh[0:1] = 999\n assert_equal(xh._data, [999, 1, 2, 3, 4])\n\n def test_hardmask_oncemore_yay(self):\n # OK, yet another test of hardmask\n # Make sure that harden_mask/soften_mask//unshare_mask returns self\n a = array([1, 2, 3], mask=[1, 0, 0])\n b = a.harden_mask()\n assert_equal(a, b)\n b[0] = 0\n assert_equal(a, b)\n assert_equal(b, array([1, 2, 3], mask=[1, 0, 0]))\n a = b.soften_mask()\n a[0] = 0\n assert_equal(a, b)\n assert_equal(b, array([0, 2, 3], mask=[0, 0, 0]))\n\n def test_smallmask(self):\n # Checks the behaviour of _smallmask\n a = arange(10)\n a[1] = masked\n a[1] = 1\n assert_equal(a._mask, nomask)\n a = arange(10)\n a._smallmask = False\n a[1] = masked\n a[1] = 1\n assert_equal(a._mask, zeros(10))\n\n def test_shrink_mask(self):\n # Tests .shrink_mask()\n a = array([1, 2, 3], mask=[0, 0, 0])\n b = a.shrink_mask()\n assert_equal(a, b)\n assert_equal(a.mask, nomask)\n\n def test_flat(self):\n # Test that flat can return all types of items [#4585, #4615]\n # test simple access\n test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1])\n assert_equal(test.flat[1], 2)\n assert_equal(test.flat[2], masked)\n assert_(np.all(test.flat[0:2] == test[0, 0:2]))\n # Test flat on masked_matrices\n test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1])\n test.flat = masked_array([3, 2, 1], mask=[1, 0, 0])\n control = masked_array(np.matrix([[3, 2, 1]]), mask=[1, 0, 0])\n assert_equal(test, control)\n # Test setting\n test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1])\n testflat = test.flat\n testflat[:] = testflat[[2, 1, 0]]\n assert_equal(test, control)\n testflat[0] = 9\n assert_equal(test[0, 0], 9)\n # test 2-D record array\n # ... on structured array w/ masked records\n x = array([[(1, 1.1, 'one'), (2, 2.2, 'two'), (3, 3.3, 'thr')],\n [(4, 4.4, 'fou'), (5, 5.5, 'fiv'), (6, 6.6, 'six')]],\n dtype=[('a', int), ('b', float), ('c', '|S8')])\n x['a'][0, 1] = masked\n x['b'][1, 0] = masked\n x['c'][0, 2] = masked\n x[-1, -1] = masked\n xflat = x.flat\n assert_equal(xflat[0], x[0, 0])\n assert_equal(xflat[1], x[0, 1])\n assert_equal(xflat[2], x[0, 2])\n assert_equal(xflat[:3], x[0])\n assert_equal(xflat[3], x[1, 0])\n assert_equal(xflat[4], x[1, 1])\n assert_equal(xflat[5], x[1, 2])\n assert_equal(xflat[3:], x[1])\n assert_equal(xflat[-1], x[-1, -1])\n i = 0\n j = 0\n for xf in xflat:\n assert_equal(xf, x[j, i])\n i += 1\n if i >= x.shape[-1]:\n i = 0\n j += 1\n # test that matrices keep the correct shape (#4615)\n a = masked_array(np.matrix(np.eye(2)), mask=0)\n b = a.flat\n b01 = b[:2]\n assert_equal(b01.data, array([[1., 0.]]))\n assert_equal(b01.mask, array([[False, False]]))\n\n def test_assign_dtype(self):\n # check that the mask's dtype is updated when dtype is changed\n a = np.zeros(4, dtype='f4,i4')\n\n m = np.ma.array(a)\n m.dtype = np.dtype('f4')\n repr(m) # raises?\n assert_equal(m.dtype, np.dtype('f4'))\n\n # check that dtype changes that change shape of mask too much\n # are not allowed\n def assign():\n m = np.ma.array(a)\n m.dtype = np.dtype('f8')\n assert_raises(ValueError, assign)\n\n b = a.view(dtype='f4', type=np.ma.MaskedArray) # raises?\n assert_equal(b.dtype, np.dtype('f4'))\n\n # check that nomask is preserved\n a = np.zeros(4, dtype='f4')\n m = np.ma.array(a)\n m.dtype = np.dtype('f4,i4')\n assert_equal(m.dtype, np.dtype('f4,i4'))\n assert_equal(m._mask, np.ma.nomask)\n\n\nclass TestFillingValues(object):\n\n def test_check_on_scalar(self):\n # Test _check_fill_value set to valid and invalid values\n _check_fill_value = np.ma.core._check_fill_value\n\n fval = _check_fill_value(0, int)\n assert_equal(fval, 0)\n fval = _check_fill_value(None, int)\n assert_equal(fval, default_fill_value(0))\n\n fval = _check_fill_value(0, \"|S3\")\n assert_equal(fval, b\"0\")\n fval = _check_fill_value(None, \"|S3\")\n assert_equal(fval, default_fill_value(b\"camelot!\"))\n assert_raises(TypeError, _check_fill_value, 1e+20, int)\n assert_raises(TypeError, _check_fill_value, 'stuff', int)\n\n def test_check_on_fields(self):\n # Tests _check_fill_value with records\n _check_fill_value = np.ma.core._check_fill_value\n ndtype = [('a', int), ('b', float), ('c', \"|S3\")]\n # A check on a list should return a single record\n fval = _check_fill_value([-999, -12345678.9, \"???\"], ndtype)\n assert_(isinstance(fval, ndarray))\n assert_equal(fval.item(), [-999, -12345678.9, b\"???\"])\n # A check on None should output the defaults\n fval = _check_fill_value(None, ndtype)\n assert_(isinstance(fval, ndarray))\n assert_equal(fval.item(), [default_fill_value(0),\n default_fill_value(0.),\n asbytes(default_fill_value(\"0\"))])\n #.....Using a structured type as fill_value should work\n fill_val = np.array((-999, -12345678.9, \"???\"), dtype=ndtype)\n fval = _check_fill_value(fill_val, ndtype)\n assert_(isinstance(fval, ndarray))\n assert_equal(fval.item(), [-999, -12345678.9, b\"???\"])\n\n #.....Using a flexible type w/ a different type shouldn't matter\n # BEHAVIOR in 1.5 and earlier: match structured types by position\n #fill_val = np.array((-999, -12345678.9, \"???\"),\n # dtype=[(\"A\", int), (\"B\", float), (\"C\", \"|S3\")])\n # BEHAVIOR in 1.6 and later: match structured types by name\n fill_val = np.array((\"???\", -999, -12345678.9),\n dtype=[(\"c\", \"|S3\"), (\"a\", int), (\"b\", float), ])\n # suppress deprecation warning in 1.12 (remove in 1.13)\n with assert_warns(FutureWarning):\n fval = _check_fill_value(fill_val, ndtype)\n assert_(isinstance(fval, ndarray))\n assert_equal(fval.item(), [-999, -12345678.9, b\"???\"])\n\n #.....Using an object-array shouldn't matter either\n fill_val = np.ndarray(shape=(1,), dtype=object)\n fill_val[0] = (-999, -12345678.9, b\"???\")\n fval = _check_fill_value(fill_val, object)\n assert_(isinstance(fval, ndarray))\n assert_equal(fval.item(), [-999, -12345678.9, b\"???\"])\n # NOTE: This test was never run properly as \"fill_value\" rather than\n # \"fill_val\" was assigned. Written properly, it fails.\n #fill_val = np.array((-999, -12345678.9, \"???\"))\n #fval = _check_fill_value(fill_val, ndtype)\n #assert_(isinstance(fval, ndarray))\n #assert_equal(fval.item(), [-999, -12345678.9, b\"???\"])\n #.....One-field-only flexible type should work as well\n ndtype = [(\"a\", int)]\n fval = _check_fill_value(-999999999, ndtype)\n assert_(isinstance(fval, ndarray))\n assert_equal(fval.item(), (-999999999,))\n\n def test_fillvalue_conversion(self):\n # Tests the behavior of fill_value during conversion\n # We had a tailored comment to make sure special attributes are\n # properly dealt with\n a = array([b'3', b'4', b'5'])\n a._optinfo.update({'comment':\"updated!\"})\n\n b = array(a, dtype=int)\n assert_equal(b._data, [3, 4, 5])\n assert_equal(b.fill_value, default_fill_value(0))\n\n b = array(a, dtype=float)\n assert_equal(b._data, [3, 4, 5])\n assert_equal(b.fill_value, default_fill_value(0.))\n\n b = a.astype(int)\n assert_equal(b._data, [3, 4, 5])\n assert_equal(b.fill_value, default_fill_value(0))\n assert_equal(b._optinfo['comment'], \"updated!\")\n\n b = a.astype([('a', '|S3')])\n assert_equal(b['a']._data, a._data)\n assert_equal(b['a'].fill_value, a.fill_value)\n\n def test_default_fill_value(self):\n # check all calling conventions\n f1 = default_fill_value(1.)\n f2 = default_fill_value(np.array(1.))\n f3 = default_fill_value(np.array(1.).dtype)\n assert_equal(f1, f2)\n assert_equal(f1, f3)\n\n def test_default_fill_value_structured(self):\n fields = array([(1, 1, 1)],\n dtype=[('i', int), ('s', '|S8'), ('f', float)])\n\n f1 = default_fill_value(fields)\n f2 = default_fill_value(fields.dtype)\n expected = np.array((default_fill_value(0),\n default_fill_value('0'),\n default_fill_value(0.)), dtype=fields.dtype)\n assert_equal(f1, expected)\n assert_equal(f2, expected)\n\n def test_default_fill_value_void(self):\n dt = np.dtype([('v', 'V7')])\n f = default_fill_value(dt)\n assert_equal(f['v'], np.array(default_fill_value(dt['v']), dt['v']))\n\n def test_fillvalue(self):\n # Yet more fun with the fill_value\n data = masked_array([1, 2, 3], fill_value=-999)\n series = data[[0, 2, 1]]\n assert_equal(series._fill_value, data._fill_value)\n\n mtype = [('f', float), ('s', '|S3')]\n x = array([(1, 'a'), (2, 'b'), (pi, 'pi')], dtype=mtype)\n x.fill_value = 999\n assert_equal(x.fill_value.item(), [999., b'999'])\n assert_equal(x['f'].fill_value, 999)\n assert_equal(x['s'].fill_value, b'999')\n\n x.fill_value = (9, '???')\n assert_equal(x.fill_value.item(), (9, b'???'))\n assert_equal(x['f'].fill_value, 9)\n assert_equal(x['s'].fill_value, b'???')\n\n x = array([1, 2, 3.1])\n x.fill_value = 999\n assert_equal(np.asarray(x.fill_value).dtype, float)\n assert_equal(x.fill_value, 999.)\n assert_equal(x._fill_value, np.array(999.))\n\n def test_fillvalue_exotic_dtype(self):\n # Tests yet more exotic flexible dtypes\n _check_fill_value = np.ma.core._check_fill_value\n ndtype = [('i', int), ('s', '|S8'), ('f', float)]\n control = np.array((default_fill_value(0),\n default_fill_value('0'),\n default_fill_value(0.),),\n dtype=ndtype)\n assert_equal(_check_fill_value(None, ndtype), control)\n # The shape shouldn't matter\n ndtype = [('f0', float, (2, 2))]\n control = np.array((default_fill_value(0.),),\n dtype=[('f0', float)]).astype(ndtype)\n assert_equal(_check_fill_value(None, ndtype), control)\n control = np.array((0,), dtype=[('f0', float)]).astype(ndtype)\n assert_equal(_check_fill_value(0, ndtype), control)\n\n ndtype = np.dtype(\"int, (2,3)float, float\")\n control = np.array((default_fill_value(0),\n default_fill_value(0.),\n default_fill_value(0.),),\n dtype=\"int, float, float\").astype(ndtype)\n test = _check_fill_value(None, ndtype)\n assert_equal(test, control)\n control = np.array((0, 0, 0), dtype=\"int, float, float\").astype(ndtype)\n assert_equal(_check_fill_value(0, ndtype), control)\n # but when indexing, fill value should become scalar not tuple\n # See issue #6723\n M = masked_array(control)\n assert_equal(M[\"f1\"].fill_value.ndim, 0)\n\n def test_fillvalue_datetime_timedelta(self):\n # Test default fillvalue for datetime64 and timedelta64 types.\n # See issue #4476, this would return '?' which would cause errors\n # elsewhere\n\n for timecode in (\"as\", \"fs\", \"ps\", \"ns\", \"us\", \"ms\", \"s\", \"m\",\n \"h\", \"D\", \"W\", \"M\", \"Y\"):\n control = numpy.datetime64(\"NaT\", timecode)\n test = default_fill_value(numpy.dtype(\"<M8[\" + timecode + \"]\"))\n np.testing.assert_equal(test, control)\n\n control = numpy.timedelta64(\"NaT\", timecode)\n test = default_fill_value(numpy.dtype(\"<m8[\" + timecode + \"]\"))\n np.testing.assert_equal(test, control)\n\n def test_extremum_fill_value(self):\n # Tests extremum fill values for flexible type.\n a = array([(1, (2, 3)), (4, (5, 6))],\n dtype=[('A', int), ('B', [('BA', int), ('BB', int)])])\n test = a.fill_value\n assert_equal(test.dtype, a.dtype)\n assert_equal(test['A'], default_fill_value(a['A']))\n assert_equal(test['B']['BA'], default_fill_value(a['B']['BA']))\n assert_equal(test['B']['BB'], default_fill_value(a['B']['BB']))\n\n test = minimum_fill_value(a)\n assert_equal(test.dtype, a.dtype)\n assert_equal(test[0], minimum_fill_value(a['A']))\n assert_equal(test[1][0], minimum_fill_value(a['B']['BA']))\n assert_equal(test[1][1], minimum_fill_value(a['B']['BB']))\n assert_equal(test[1], minimum_fill_value(a['B']))\n\n test = maximum_fill_value(a)\n assert_equal(test.dtype, a.dtype)\n assert_equal(test[0], maximum_fill_value(a['A']))\n assert_equal(test[1][0], maximum_fill_value(a['B']['BA']))\n assert_equal(test[1][1], maximum_fill_value(a['B']['BB']))\n assert_equal(test[1], maximum_fill_value(a['B']))\n\n def test_extremum_fill_value_subdtype(self):\n a = array(([2, 3, 4],), dtype=[('value', np.int8, 3)])\n\n test = minimum_fill_value(a)\n assert_equal(test.dtype, a.dtype)\n assert_equal(test[0], np.full(3, minimum_fill_value(a['value'])))\n\n test = maximum_fill_value(a)\n assert_equal(test.dtype, a.dtype)\n assert_equal(test[0], np.full(3, maximum_fill_value(a['value'])))\n\n def test_fillvalue_individual_fields(self):\n # Test setting fill_value on individual fields\n ndtype = [('a', int), ('b', int)]\n # Explicit fill_value\n a = array(list(zip([1, 2, 3], [4, 5, 6])),\n fill_value=(-999, -999), dtype=ndtype)\n aa = a['a']\n aa.set_fill_value(10)\n assert_equal(aa._fill_value, np.array(10))\n assert_equal(tuple(a.fill_value), (10, -999))\n a.fill_value['b'] = -10\n assert_equal(tuple(a.fill_value), (10, -10))\n # Implicit fill_value\n t = array(list(zip([1, 2, 3], [4, 5, 6])), dtype=ndtype)\n tt = t['a']\n tt.set_fill_value(10)\n assert_equal(tt._fill_value, np.array(10))\n assert_equal(tuple(t.fill_value), (10, default_fill_value(0)))\n\n def test_fillvalue_implicit_structured_array(self):\n # Check that fill_value is always defined for structured arrays\n ndtype = ('b', float)\n adtype = ('a', float)\n a = array([(1.,), (2.,)], mask=[(False,), (False,)],\n fill_value=(np.nan,), dtype=np.dtype([adtype]))\n b = empty(a.shape, dtype=[adtype, ndtype])\n b['a'] = a['a']\n b['a'].set_fill_value(a['a'].fill_value)\n f = b._fill_value[()]\n assert_(np.isnan(f[0]))\n assert_equal(f[-1], default_fill_value(1.))\n\n def test_fillvalue_as_arguments(self):\n # Test adding a fill_value parameter to empty/ones/zeros\n a = empty(3, fill_value=999.)\n assert_equal(a.fill_value, 999.)\n\n a = ones(3, fill_value=999., dtype=float)\n assert_equal(a.fill_value, 999.)\n\n a = zeros(3, fill_value=0., dtype=complex)\n assert_equal(a.fill_value, 0.)\n\n a = identity(3, fill_value=0., dtype=complex)\n assert_equal(a.fill_value, 0.)\n\n def test_shape_argument(self):\n # Test that shape can be provides as an argument\n # GH issue 6106\n a = empty(shape=(3, ))\n assert_equal(a.shape, (3, ))\n\n a = ones(shape=(3, ), dtype=float)\n assert_equal(a.shape, (3, ))\n\n a = zeros(shape=(3, ), dtype=complex)\n assert_equal(a.shape, (3, ))\n\n def test_fillvalue_in_view(self):\n # Test the behavior of fill_value in view\n\n # Create initial masked array\n x = array([1, 2, 3], fill_value=1, dtype=np.int64)\n\n # Check that fill_value is preserved by default\n y = x.view()\n assert_(y.fill_value == 1)\n\n # Check that fill_value is preserved if dtype is specified and the\n # dtype is an ndarray sub-class and has a _fill_value attribute\n y = x.view(MaskedArray)\n assert_(y.fill_value == 1)\n\n # Check that fill_value is preserved if type is specified and the\n # dtype is an ndarray sub-class and has a _fill_value attribute (by\n # default, the first argument is dtype, not type)\n y = x.view(type=MaskedArray)\n assert_(y.fill_value == 1)\n\n # Check that code does not crash if passed an ndarray sub-class that\n # does not have a _fill_value attribute\n y = x.view(np.ndarray)\n y = x.view(type=np.ndarray)\n\n # Check that fill_value can be overridden with view\n y = x.view(MaskedArray, fill_value=2)\n assert_(y.fill_value == 2)\n\n # Check that fill_value can be overridden with view (using type=)\n y = x.view(type=MaskedArray, fill_value=2)\n assert_(y.fill_value == 2)\n\n # Check that fill_value gets reset if passed a dtype but not a\n # fill_value. This is because even though in some cases one can safely\n # cast the fill_value, e.g. if taking an int64 view of an int32 array,\n # in other cases, this cannot be done (e.g. int32 view of an int64\n # array with a large fill_value).\n y = x.view(dtype=np.int32)\n assert_(y.fill_value == 999999)\n\n def test_fillvalue_bytes_or_str(self):\n # Test whether fill values work as expected for structured dtypes\n # containing bytes or str. See issue #7259.\n a = empty(shape=(3, ), dtype=\"(2)3S,(2)3U\")\n assert_equal(a[\"f0\"].fill_value, default_fill_value(b\"spam\"))\n assert_equal(a[\"f1\"].fill_value, default_fill_value(\"eggs\"))\n\n\nclass TestUfuncs(object):\n # Test class for the application of ufuncs on MaskedArrays.\n\n def setup(self):\n # Base data definition.\n self.d = (array([1.0, 0, -1, pi / 2] * 2, mask=[0, 1] + [0] * 6),\n array([1.0, 0, -1, pi / 2] * 2, mask=[1, 0] + [0] * 6),)\n self.err_status = np.geterr()\n np.seterr(divide='ignore', invalid='ignore')\n\n def teardown(self):\n np.seterr(**self.err_status)\n\n def test_testUfuncRegression(self):\n # Tests new ufuncs on MaskedArrays.\n for f in ['sqrt', 'log', 'log10', 'exp', 'conjugate',\n 'sin', 'cos', 'tan',\n 'arcsin', 'arccos', 'arctan',\n 'sinh', 'cosh', 'tanh',\n 'arcsinh',\n 'arccosh',\n 'arctanh',\n 'absolute', 'fabs', 'negative',\n 'floor', 'ceil',\n 'logical_not',\n 'add', 'subtract', 'multiply',\n 'divide', 'true_divide', 'floor_divide',\n 'remainder', 'fmod', 'hypot', 'arctan2',\n 'equal', 'not_equal', 'less_equal', 'greater_equal',\n 'less', 'greater',\n 'logical_and', 'logical_or', 'logical_xor',\n ]:\n try:\n uf = getattr(umath, f)\n except AttributeError:\n uf = getattr(fromnumeric, f)\n mf = getattr(numpy.ma.core, f)\n args = self.d[:uf.nin]\n ur = uf(*args)\n mr = mf(*args)\n assert_equal(ur.filled(0), mr.filled(0), f)\n assert_mask_equal(ur.mask, mr.mask, err_msg=f)\n\n def test_reduce(self):\n # Tests reduce on MaskedArrays.\n a = self.d[0]\n assert_(not alltrue(a, axis=0))\n assert_(sometrue(a, axis=0))\n assert_equal(sum(a[:3], axis=0), 0)\n assert_equal(product(a, axis=0), 0)\n assert_equal(add.reduce(a), pi)\n\n def test_minmax(self):\n # Tests extrema on MaskedArrays.\n a = arange(1, 13).reshape(3, 4)\n amask = masked_where(a < 5, a)\n assert_equal(amask.max(), a.max())\n assert_equal(amask.min(), 5)\n assert_equal(amask.max(0), a.max(0))\n assert_equal(amask.min(0), [5, 6, 7, 8])\n assert_(amask.max(1)[0].mask)\n assert_(amask.min(1)[0].mask)\n\n def test_ndarray_mask(self):\n # Check that the mask of the result is a ndarray (not a MaskedArray...)\n a = masked_array([-1, 0, 1, 2, 3], mask=[0, 0, 0, 0, 1])\n test = np.sqrt(a)\n control = masked_array([-1, 0, 1, np.sqrt(2), -1],\n mask=[1, 0, 0, 0, 1])\n assert_equal(test, control)\n assert_equal(test.mask, control.mask)\n assert_(not isinstance(test.mask, MaskedArray))\n\n def test_treatment_of_NotImplemented(self):\n # Check that NotImplemented is returned at appropriate places\n\n a = masked_array([1., 2.], mask=[1, 0])\n assert_raises(TypeError, operator.mul, a, \"abc\")\n assert_raises(TypeError, operator.truediv, a, \"abc\")\n\n class MyClass(object):\n __array_priority__ = a.__array_priority__ + 1\n\n def __mul__(self, other):\n return \"My mul\"\n\n def __rmul__(self, other):\n return \"My rmul\"\n\n me = MyClass()\n assert_(me * a == \"My mul\")\n assert_(a * me == \"My rmul\")\n\n # and that __array_priority__ is respected\n class MyClass2(object):\n __array_priority__ = 100\n\n def __mul__(self, other):\n return \"Me2mul\"\n\n def __rmul__(self, other):\n return \"Me2rmul\"\n\n def __rdiv__(self, other):\n return \"Me2rdiv\"\n\n __rtruediv__ = __rdiv__\n\n me_too = MyClass2()\n assert_(a.__mul__(me_too) is NotImplemented)\n assert_(all(multiply.outer(a, me_too) == \"Me2rmul\"))\n assert_(a.__truediv__(me_too) is NotImplemented)\n assert_(me_too * a == \"Me2mul\")\n assert_(a * me_too == \"Me2rmul\")\n assert_(a / me_too == \"Me2rdiv\")\n\n def test_no_masked_nan_warnings(self):\n # check that a nan in masked position does not\n # cause ufunc warnings\n\n m = np.ma.array([0.5, np.nan], mask=[0,1])\n\n with warnings.catch_warnings():\n warnings.filterwarnings(\"error\")\n\n # test unary and binary ufuncs\n exp(m)\n add(m, 1)\n m > 0\n\n # test different unary domains\n sqrt(m)\n log(m)\n tan(m)\n arcsin(m)\n arccos(m)\n arccosh(m)\n\n # test binary domains\n divide(m, 2)\n\n # also check that allclose uses ma ufuncs, to avoid warning\n allclose(m, 0.5)\n\nclass TestMaskedArrayInPlaceArithmetics(object):\n # Test MaskedArray Arithmetics\n\n def setup(self):\n x = arange(10)\n y = arange(10)\n xm = arange(10)\n xm[2] = masked\n self.intdata = (x, y, xm)\n self.floatdata = (x.astype(float), y.astype(float), xm.astype(float))\n self.othertypes = np.typecodes['AllInteger'] + np.typecodes['AllFloat']\n self.othertypes = [np.dtype(_).type for _ in self.othertypes]\n self.uint8data = (\n x.astype(np.uint8),\n y.astype(np.uint8),\n xm.astype(np.uint8)\n )\n\n def test_inplace_addition_scalar(self):\n # Test of inplace additions\n (x, y, xm) = self.intdata\n xm[2] = masked\n x += 1\n assert_equal(x, y + 1)\n xm += 1\n assert_equal(xm, y + 1)\n\n (x, _, xm) = self.floatdata\n id1 = x.data.ctypes._data\n x += 1.\n assert_(id1 == x.data.ctypes._data)\n assert_equal(x, y + 1.)\n\n def test_inplace_addition_array(self):\n # Test of inplace additions\n (x, y, xm) = self.intdata\n m = xm.mask\n a = arange(10, dtype=np.int16)\n a[-1] = masked\n x += a\n xm += a\n assert_equal(x, y + a)\n assert_equal(xm, y + a)\n assert_equal(xm.mask, mask_or(m, a.mask))\n\n def test_inplace_subtraction_scalar(self):\n # Test of inplace subtractions\n (x, y, xm) = self.intdata\n x -= 1\n assert_equal(x, y - 1)\n xm -= 1\n assert_equal(xm, y - 1)\n\n def test_inplace_subtraction_array(self):\n # Test of inplace subtractions\n (x, y, xm) = self.floatdata\n m = xm.mask\n a = arange(10, dtype=float)\n a[-1] = masked\n x -= a\n xm -= a\n assert_equal(x, y - a)\n assert_equal(xm, y - a)\n assert_equal(xm.mask, mask_or(m, a.mask))\n\n def test_inplace_multiplication_scalar(self):\n # Test of inplace multiplication\n (x, y, xm) = self.floatdata\n x *= 2.0\n assert_equal(x, y * 2)\n xm *= 2.0\n assert_equal(xm, y * 2)\n\n def test_inplace_multiplication_array(self):\n # Test of inplace multiplication\n (x, y, xm) = self.floatdata\n m = xm.mask\n a = arange(10, dtype=float)\n a[-1] = masked\n x *= a\n xm *= a\n assert_equal(x, y * a)\n assert_equal(xm, y * a)\n assert_equal(xm.mask, mask_or(m, a.mask))\n\n def test_inplace_division_scalar_int(self):\n # Test of inplace division\n (x, y, xm) = self.intdata\n x = arange(10) * 2\n xm = arange(10) * 2\n xm[2] = masked\n x //= 2\n assert_equal(x, y)\n xm //= 2\n assert_equal(xm, y)\n\n def test_inplace_division_scalar_float(self):\n # Test of inplace division\n (x, y, xm) = self.floatdata\n x /= 2.0\n assert_equal(x, y / 2.0)\n xm /= arange(10)\n assert_equal(xm, ones((10,)))\n\n def test_inplace_division_array_float(self):\n # Test of inplace division\n (x, y, xm) = self.floatdata\n m = xm.mask\n a = arange(10, dtype=float)\n a[-1] = masked\n x /= a\n xm /= a\n assert_equal(x, y / a)\n assert_equal(xm, y / a)\n assert_equal(xm.mask, mask_or(mask_or(m, a.mask), (a == 0)))\n\n def test_inplace_division_misc(self):\n\n x = [1., 1., 1., -2., pi / 2., 4., 5., -10., 10., 1., 2., 3.]\n y = [5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]\n m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]\n m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1]\n xm = masked_array(x, mask=m1)\n ym = masked_array(y, mask=m2)\n\n z = xm / ym\n assert_equal(z._mask, [1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1])\n assert_equal(z._data,\n [1., 1., 1., -1., -pi / 2., 4., 5., 1., 1., 1., 2., 3.])\n\n xm = xm.copy()\n xm /= ym\n assert_equal(xm._mask, [1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1])\n assert_equal(z._data,\n [1., 1., 1., -1., -pi / 2., 4., 5., 1., 1., 1., 2., 3.])\n\n def test_datafriendly_add(self):\n # Test keeping data w/ (inplace) addition\n x = array([1, 2, 3], mask=[0, 0, 1])\n # Test add w/ scalar\n xx = x + 1\n assert_equal(xx.data, [2, 3, 3])\n assert_equal(xx.mask, [0, 0, 1])\n # Test iadd w/ scalar\n x += 1\n assert_equal(x.data, [2, 3, 3])\n assert_equal(x.mask, [0, 0, 1])\n # Test add w/ array\n x = array([1, 2, 3], mask=[0, 0, 1])\n xx = x + array([1, 2, 3], mask=[1, 0, 0])\n assert_equal(xx.data, [1, 4, 3])\n assert_equal(xx.mask, [1, 0, 1])\n # Test iadd w/ array\n x = array([1, 2, 3], mask=[0, 0, 1])\n x += array([1, 2, 3], mask=[1, 0, 0])\n assert_equal(x.data, [1, 4, 3])\n assert_equal(x.mask, [1, 0, 1])\n\n def test_datafriendly_sub(self):\n # Test keeping data w/ (inplace) subtraction\n # Test sub w/ scalar\n x = array([1, 2, 3], mask=[0, 0, 1])\n xx = x - 1\n assert_equal(xx.data, [0, 1, 3])\n assert_equal(xx.mask, [0, 0, 1])\n # Test isub w/ scalar\n x = array([1, 2, 3], mask=[0, 0, 1])\n x -= 1\n assert_equal(x.data, [0, 1, 3])\n assert_equal(x.mask, [0, 0, 1])\n # Test sub w/ array\n x = array([1, 2, 3], mask=[0, 0, 1])\n xx = x - array([1, 2, 3], mask=[1, 0, 0])\n assert_equal(xx.data, [1, 0, 3])\n assert_equal(xx.mask, [1, 0, 1])\n # Test isub w/ array\n x = array([1, 2, 3], mask=[0, 0, 1])\n x -= array([1, 2, 3], mask=[1, 0, 0])\n assert_equal(x.data, [1, 0, 3])\n assert_equal(x.mask, [1, 0, 1])\n\n def test_datafriendly_mul(self):\n # Test keeping data w/ (inplace) multiplication\n # Test mul w/ scalar\n x = array([1, 2, 3], mask=[0, 0, 1])\n xx = x * 2\n assert_equal(xx.data, [2, 4, 3])\n assert_equal(xx.mask, [0, 0, 1])\n # Test imul w/ scalar\n x = array([1, 2, 3], mask=[0, 0, 1])\n x *= 2\n assert_equal(x.data, [2, 4, 3])\n assert_equal(x.mask, [0, 0, 1])\n # Test mul w/ array\n x = array([1, 2, 3], mask=[0, 0, 1])\n xx = x * array([10, 20, 30], mask=[1, 0, 0])\n assert_equal(xx.data, [1, 40, 3])\n assert_equal(xx.mask, [1, 0, 1])\n # Test imul w/ array\n x = array([1, 2, 3], mask=[0, 0, 1])\n x *= array([10, 20, 30], mask=[1, 0, 0])\n assert_equal(x.data, [1, 40, 3])\n assert_equal(x.mask, [1, 0, 1])\n\n def test_datafriendly_div(self):\n # Test keeping data w/ (inplace) division\n # Test div on scalar\n x = array([1, 2, 3], mask=[0, 0, 1])\n xx = x / 2.\n assert_equal(xx.data, [1 / 2., 2 / 2., 3])\n assert_equal(xx.mask, [0, 0, 1])\n # Test idiv on scalar\n x = array([1., 2., 3.], mask=[0, 0, 1])\n x /= 2.\n assert_equal(x.data, [1 / 2., 2 / 2., 3])\n assert_equal(x.mask, [0, 0, 1])\n # Test div on array\n x = array([1., 2., 3.], mask=[0, 0, 1])\n xx = x / array([10., 20., 30.], mask=[1, 0, 0])\n assert_equal(xx.data, [1., 2. / 20., 3.])\n assert_equal(xx.mask, [1, 0, 1])\n # Test idiv on array\n x = array([1., 2., 3.], mask=[0, 0, 1])\n x /= array([10., 20., 30.], mask=[1, 0, 0])\n assert_equal(x.data, [1., 2 / 20., 3.])\n assert_equal(x.mask, [1, 0, 1])\n\n def test_datafriendly_pow(self):\n # Test keeping data w/ (inplace) power\n # Test pow on scalar\n x = array([1., 2., 3.], mask=[0, 0, 1])\n xx = x ** 2.5\n assert_equal(xx.data, [1., 2. ** 2.5, 3.])\n assert_equal(xx.mask, [0, 0, 1])\n # Test ipow on scalar\n x **= 2.5\n assert_equal(x.data, [1., 2. ** 2.5, 3])\n assert_equal(x.mask, [0, 0, 1])\n\n def test_datafriendly_add_arrays(self):\n a = array([[1, 1], [3, 3]])\n b = array([1, 1], mask=[0, 0])\n a += b\n assert_equal(a, [[2, 2], [4, 4]])\n if a.mask is not nomask:\n assert_equal(a.mask, [[0, 0], [0, 0]])\n\n a = array([[1, 1], [3, 3]])\n b = array([1, 1], mask=[0, 1])\n a += b\n assert_equal(a, [[2, 2], [4, 4]])\n assert_equal(a.mask, [[0, 1], [0, 1]])\n\n def test_datafriendly_sub_arrays(self):\n a = array([[1, 1], [3, 3]])\n b = array([1, 1], mask=[0, 0])\n a -= b\n assert_equal(a, [[0, 0], [2, 2]])\n if a.mask is not nomask:\n assert_equal(a.mask, [[0, 0], [0, 0]])\n\n a = array([[1, 1], [3, 3]])\n b = array([1, 1], mask=[0, 1])\n a -= b\n assert_equal(a, [[0, 0], [2, 2]])\n assert_equal(a.mask, [[0, 1], [0, 1]])\n\n def test_datafriendly_mul_arrays(self):\n a = array([[1, 1], [3, 3]])\n b = array([1, 1], mask=[0, 0])\n a *= b\n assert_equal(a, [[1, 1], [3, 3]])\n if a.mask is not nomask:\n assert_equal(a.mask, [[0, 0], [0, 0]])\n\n a = array([[1, 1], [3, 3]])\n b = array([1, 1], mask=[0, 1])\n a *= b\n assert_equal(a, [[1, 1], [3, 3]])\n assert_equal(a.mask, [[0, 1], [0, 1]])\n\n def test_inplace_addition_scalar_type(self):\n # Test of inplace additions\n for t in self.othertypes:\n with warnings.catch_warnings(record=True) as w:\n warnings.filterwarnings(\"always\")\n (x, y, xm) = (_.astype(t) for _ in self.uint8data)\n xm[2] = masked\n x += t(1)\n assert_equal(x, y + t(1))\n xm += t(1)\n assert_equal(xm, y + t(1))\n\n assert_equal(len(w), 0, \"Failed on type=%s.\" % t)\n\n def test_inplace_addition_array_type(self):\n # Test of inplace additions\n for t in self.othertypes:\n with warnings.catch_warnings(record=True) as w:\n warnings.filterwarnings(\"always\")\n (x, y, xm) = (_.astype(t) for _ in self.uint8data)\n m = xm.mask\n a = arange(10, dtype=t)\n a[-1] = masked\n x += a\n xm += a\n assert_equal(x, y + a)\n assert_equal(xm, y + a)\n assert_equal(xm.mask, mask_or(m, a.mask))\n\n assert_equal(len(w), 0, \"Failed on type=%s.\" % t)\n\n def test_inplace_subtraction_scalar_type(self):\n # Test of inplace subtractions\n for t in self.othertypes:\n with warnings.catch_warnings(record=True) as w:\n warnings.filterwarnings(\"always\")\n (x, y, xm) = (_.astype(t) for _ in self.uint8data)\n x -= t(1)\n assert_equal(x, y - t(1))\n xm -= t(1)\n assert_equal(xm, y - t(1))\n\n assert_equal(len(w), 0, \"Failed on type=%s.\" % t)\n\n def test_inplace_subtraction_array_type(self):\n # Test of inplace subtractions\n for t in self.othertypes:\n with warnings.catch_warnings(record=True) as w:\n warnings.filterwarnings(\"always\")\n (x, y, xm) = (_.astype(t) for _ in self.uint8data)\n m = xm.mask\n a = arange(10, dtype=t)\n a[-1] = masked\n x -= a\n xm -= a\n assert_equal(x, y - a)\n assert_equal(xm, y - a)\n assert_equal(xm.mask, mask_or(m, a.mask))\n\n assert_equal(len(w), 0, \"Failed on type=%s.\" % t)\n\n def test_inplace_multiplication_scalar_type(self):\n # Test of inplace multiplication\n for t in self.othertypes:\n with warnings.catch_warnings(record=True) as w:\n warnings.filterwarnings(\"always\")\n (x, y, xm) = (_.astype(t) for _ in self.uint8data)\n x *= t(2)\n assert_equal(x, y * t(2))\n xm *= t(2)\n assert_equal(xm, y * t(2))\n\n assert_equal(len(w), 0, \"Failed on type=%s.\" % t)\n\n def test_inplace_multiplication_array_type(self):\n # Test of inplace multiplication\n for t in self.othertypes:\n with warnings.catch_warnings(record=True) as w:\n warnings.filterwarnings(\"always\")\n (x, y, xm) = (_.astype(t) for _ in self.uint8data)\n m = xm.mask\n a = arange(10, dtype=t)\n a[-1] = masked\n x *= a\n xm *= a\n assert_equal(x, y * a)\n assert_equal(xm, y * a)\n assert_equal(xm.mask, mask_or(m, a.mask))\n\n assert_equal(len(w), 0, \"Failed on type=%s.\" % t)\n\n def test_inplace_floor_division_scalar_type(self):\n # Test of inplace division\n for t in self.othertypes:\n with warnings.catch_warnings(record=True) as w:\n warnings.filterwarnings(\"always\")\n (x, y, xm) = (_.astype(t) for _ in self.uint8data)\n x = arange(10, dtype=t) * t(2)\n xm = arange(10, dtype=t) * t(2)\n xm[2] = masked\n x //= t(2)\n xm //= t(2)\n assert_equal(x, y)\n assert_equal(xm, y)\n\n assert_equal(len(w), 0, \"Failed on type=%s.\" % t)\n\n def test_inplace_floor_division_array_type(self):\n # Test of inplace division\n for t in self.othertypes:\n with warnings.catch_warnings(record=True) as w:\n warnings.filterwarnings(\"always\")\n (x, y, xm) = (_.astype(t) for _ in self.uint8data)\n m = xm.mask\n a = arange(10, dtype=t)\n a[-1] = masked\n x //= a\n xm //= a\n assert_equal(x, y // a)\n assert_equal(xm, y // a)\n assert_equal(\n xm.mask,\n mask_or(mask_or(m, a.mask), (a == t(0)))\n )\n\n assert_equal(len(w), 0, \"Failed on type=%s.\" % t)\n\n def test_inplace_division_scalar_type(self):\n # Test of inplace division\n for t in self.othertypes:\n with suppress_warnings() as sup:\n sup.record(UserWarning)\n\n (x, y, xm) = (_.astype(t) for _ in self.uint8data)\n x = arange(10, dtype=t) * t(2)\n xm = arange(10, dtype=t) * t(2)\n xm[2] = masked\n\n # May get a DeprecationWarning or a TypeError.\n #\n # This is a consequence of the fact that this is true divide\n # and will require casting to float for calculation and\n # casting back to the original type. This will only be raised\n # with integers. Whether it is an error or warning is only\n # dependent on how stringent the casting rules are.\n #\n # Will handle the same way.\n try:\n x /= t(2)\n assert_equal(x, y)\n except (DeprecationWarning, TypeError) as e:\n warnings.warn(str(e), stacklevel=1)\n try:\n xm /= t(2)\n assert_equal(xm, y)\n except (DeprecationWarning, TypeError) as e:\n warnings.warn(str(e), stacklevel=1)\n\n if issubclass(t, np.integer):\n assert_equal(len(sup.log), 2, \"Failed on type=%s.\" % t)\n else:\n assert_equal(len(sup.log), 0, \"Failed on type=%s.\" % t)\n\n def test_inplace_division_array_type(self):\n # Test of inplace division\n for t in self.othertypes:\n with suppress_warnings() as sup:\n sup.record(UserWarning)\n (x, y, xm) = (_.astype(t) for _ in self.uint8data)\n m = xm.mask\n a = arange(10, dtype=t)\n a[-1] = masked\n\n # May get a DeprecationWarning or a TypeError.\n #\n # This is a consequence of the fact that this is true divide\n # and will require casting to float for calculation and\n # casting back to the original type. This will only be raised\n # with integers. Whether it is an error or warning is only\n # dependent on how stringent the casting rules are.\n #\n # Will handle the same way.\n try:\n x /= a\n assert_equal(x, y / a)\n except (DeprecationWarning, TypeError) as e:\n warnings.warn(str(e), stacklevel=1)\n try:\n xm /= a\n assert_equal(xm, y / a)\n assert_equal(\n xm.mask,\n mask_or(mask_or(m, a.mask), (a == t(0)))\n )\n except (DeprecationWarning, TypeError) as e:\n warnings.warn(str(e), stacklevel=1)\n\n if issubclass(t, np.integer):\n assert_equal(len(sup.log), 2, \"Failed on type=%s.\" % t)\n else:\n assert_equal(len(sup.log), 0, \"Failed on type=%s.\" % t)\n\n def test_inplace_pow_type(self):\n # Test keeping data w/ (inplace) power\n for t in self.othertypes:\n with warnings.catch_warnings(record=True) as w:\n warnings.filterwarnings(\"always\")\n # Test pow on scalar\n x = array([1, 2, 3], mask=[0, 0, 1], dtype=t)\n xx = x ** t(2)\n xx_r = array([1, 2 ** 2, 3], mask=[0, 0, 1], dtype=t)\n assert_equal(xx.data, xx_r.data)\n assert_equal(xx.mask, xx_r.mask)\n # Test ipow on scalar\n x **= t(2)\n assert_equal(x.data, xx_r.data)\n assert_equal(x.mask, xx_r.mask)\n\n assert_equal(len(w), 0, \"Failed on type=%s.\" % t)\n\n\nclass TestMaskedArrayMethods(object):\n # Test class for miscellaneous MaskedArrays methods.\n def setup(self):\n # Base data definition.\n x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928,\n 8.43, 7.78, 9.865, 5.878, 8.979, 4.732,\n 3.012, 6.022, 5.095, 3.116, 5.238, 3.957,\n 6.04, 9.63, 7.712, 3.382, 4.489, 6.479,\n 7.189, 9.645, 5.395, 4.961, 9.894, 2.893,\n 7.357, 9.828, 6.272, 3.758, 6.693, 0.993])\n X = x.reshape(6, 6)\n XX = x.reshape(3, 2, 2, 3)\n\n m = np.array([0, 1, 0, 1, 0, 0,\n 1, 0, 1, 1, 0, 1,\n 0, 0, 0, 1, 0, 1,\n 0, 0, 0, 1, 1, 1,\n 1, 0, 0, 1, 0, 0,\n 0, 0, 1, 0, 1, 0])\n mx = array(data=x, mask=m)\n mX = array(data=X, mask=m.reshape(X.shape))\n mXX = array(data=XX, mask=m.reshape(XX.shape))\n\n m2 = np.array([1, 1, 0, 1, 0, 0,\n 1, 1, 1, 1, 0, 1,\n 0, 0, 1, 1, 0, 1,\n 0, 0, 0, 1, 1, 1,\n 1, 0, 0, 1, 1, 0,\n 0, 0, 1, 0, 1, 1])\n m2x = array(data=x, mask=m2)\n m2X = array(data=X, mask=m2.reshape(X.shape))\n m2XX = array(data=XX, mask=m2.reshape(XX.shape))\n self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX)\n\n def test_generic_methods(self):\n # Tests some MaskedArray methods.\n a = array([1, 3, 2])\n assert_equal(a.any(), a._data.any())\n assert_equal(a.all(), a._data.all())\n assert_equal(a.argmax(), a._data.argmax())\n assert_equal(a.argmin(), a._data.argmin())\n assert_equal(a.choose(0, 1, 2, 3, 4), a._data.choose(0, 1, 2, 3, 4))\n assert_equal(a.compress([1, 0, 1]), a._data.compress([1, 0, 1]))\n assert_equal(a.conj(), a._data.conj())\n assert_equal(a.conjugate(), a._data.conjugate())\n\n m = array([[1, 2], [3, 4]])\n assert_equal(m.diagonal(), m._data.diagonal())\n assert_equal(a.sum(), a._data.sum())\n assert_equal(a.take([1, 2]), a._data.take([1, 2]))\n assert_equal(m.transpose(), m._data.transpose())\n\n def test_allclose(self):\n # Tests allclose on arrays\n a = np.random.rand(10)\n b = a + np.random.rand(10) * 1e-8\n assert_(allclose(a, b))\n # Test allclose w/ infs\n a[0] = np.inf\n assert_(not allclose(a, b))\n b[0] = np.inf\n assert_(allclose(a, b))\n # Test allclose w/ masked\n a = masked_array(a)\n a[-1] = masked\n assert_(allclose(a, b, masked_equal=True))\n assert_(not allclose(a, b, masked_equal=False))\n # Test comparison w/ scalar\n a *= 1e-8\n a[0] = 0\n assert_(allclose(a, 0, masked_equal=True))\n\n # Test that the function works for MIN_INT integer typed arrays\n a = masked_array([np.iinfo(np.int_).min], dtype=np.int_)\n assert_(allclose(a, a))\n\n def test_allany(self):\n # Checks the any/all methods/functions.\n x = np.array([[0.13, 0.26, 0.90],\n [0.28, 0.33, 0.63],\n [0.31, 0.87, 0.70]])\n m = np.array([[True, False, False],\n [False, False, False],\n [True, True, False]], dtype=np.bool_)\n mx = masked_array(x, mask=m)\n mxbig = (mx > 0.5)\n mxsmall = (mx < 0.5)\n\n assert_(not mxbig.all())\n assert_(mxbig.any())\n assert_equal(mxbig.all(0), [False, False, True])\n assert_equal(mxbig.all(1), [False, False, True])\n assert_equal(mxbig.any(0), [False, False, True])\n assert_equal(mxbig.any(1), [True, True, True])\n\n assert_(not mxsmall.all())\n assert_(mxsmall.any())\n assert_equal(mxsmall.all(0), [True, True, False])\n assert_equal(mxsmall.all(1), [False, False, False])\n assert_equal(mxsmall.any(0), [True, True, False])\n assert_equal(mxsmall.any(1), [True, True, False])\n\n def test_allany_onmatrices(self):\n x = np.array([[0.13, 0.26, 0.90],\n [0.28, 0.33, 0.63],\n [0.31, 0.87, 0.70]])\n X = np.matrix(x)\n m = np.array([[True, False, False],\n [False, False, False],\n [True, True, False]], dtype=np.bool_)\n mX = masked_array(X, mask=m)\n mXbig = (mX > 0.5)\n mXsmall = (mX < 0.5)\n\n assert_(not mXbig.all())\n assert_(mXbig.any())\n assert_equal(mXbig.all(0), np.matrix([False, False, True]))\n assert_equal(mXbig.all(1), np.matrix([False, False, True]).T)\n assert_equal(mXbig.any(0), np.matrix([False, False, True]))\n assert_equal(mXbig.any(1), np.matrix([True, True, True]).T)\n\n assert_(not mXsmall.all())\n assert_(mXsmall.any())\n assert_equal(mXsmall.all(0), np.matrix([True, True, False]))\n assert_equal(mXsmall.all(1), np.matrix([False, False, False]).T)\n assert_equal(mXsmall.any(0), np.matrix([True, True, False]))\n assert_equal(mXsmall.any(1), np.matrix([True, True, False]).T)\n\n def test_allany_oddities(self):\n # Some fun with all and any\n store = empty((), dtype=bool)\n full = array([1, 2, 3], mask=True)\n\n assert_(full.all() is masked)\n full.all(out=store)\n assert_(store)\n assert_(store._mask, True)\n assert_(store is not masked)\n\n store = empty((), dtype=bool)\n assert_(full.any() is masked)\n full.any(out=store)\n assert_(not store)\n assert_(store._mask, True)\n assert_(store is not masked)\n\n def test_argmax_argmin(self):\n # Tests argmin & argmax on MaskedArrays.\n (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d\n\n assert_equal(mx.argmin(), 35)\n assert_equal(mX.argmin(), 35)\n assert_equal(m2x.argmin(), 4)\n assert_equal(m2X.argmin(), 4)\n assert_equal(mx.argmax(), 28)\n assert_equal(mX.argmax(), 28)\n assert_equal(m2x.argmax(), 31)\n assert_equal(m2X.argmax(), 31)\n\n assert_equal(mX.argmin(0), [2, 2, 2, 5, 0, 5])\n assert_equal(m2X.argmin(0), [2, 2, 4, 5, 0, 4])\n assert_equal(mX.argmax(0), [0, 5, 0, 5, 4, 0])\n assert_equal(m2X.argmax(0), [5, 5, 0, 5, 1, 0])\n\n assert_equal(mX.argmin(1), [4, 1, 0, 0, 5, 5, ])\n assert_equal(m2X.argmin(1), [4, 4, 0, 0, 5, 3])\n assert_equal(mX.argmax(1), [2, 4, 1, 1, 4, 1])\n assert_equal(m2X.argmax(1), [2, 4, 1, 1, 1, 1])\n\n def test_clip(self):\n # Tests clip on MaskedArrays.\n x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928,\n 8.43, 7.78, 9.865, 5.878, 8.979, 4.732,\n 3.012, 6.022, 5.095, 3.116, 5.238, 3.957,\n 6.04, 9.63, 7.712, 3.382, 4.489, 6.479,\n 7.189, 9.645, 5.395, 4.961, 9.894, 2.893,\n 7.357, 9.828, 6.272, 3.758, 6.693, 0.993])\n m = np.array([0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1,\n 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1,\n 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0])\n mx = array(x, mask=m)\n clipped = mx.clip(2, 8)\n assert_equal(clipped.mask, mx.mask)\n assert_equal(clipped._data, x.clip(2, 8))\n assert_equal(clipped._data, mx._data.clip(2, 8))\n\n def test_compress(self):\n # test compress\n a = masked_array([1., 2., 3., 4., 5.], fill_value=9999)\n condition = (a > 1.5) & (a < 3.5)\n assert_equal(a.compress(condition), [2., 3.])\n\n a[[2, 3]] = masked\n b = a.compress(condition)\n assert_equal(b._data, [2., 3.])\n assert_equal(b._mask, [0, 1])\n assert_equal(b.fill_value, 9999)\n assert_equal(b, a[condition])\n\n condition = (a < 4.)\n b = a.compress(condition)\n assert_equal(b._data, [1., 2., 3.])\n assert_equal(b._mask, [0, 0, 1])\n assert_equal(b.fill_value, 9999)\n assert_equal(b, a[condition])\n\n a = masked_array([[10, 20, 30], [40, 50, 60]],\n mask=[[0, 0, 1], [1, 0, 0]])\n b = a.compress(a.ravel() >= 22)\n assert_equal(b._data, [30, 40, 50, 60])\n assert_equal(b._mask, [1, 1, 0, 0])\n\n x = np.array([3, 1, 2])\n b = a.compress(x >= 2, axis=1)\n assert_equal(b._data, [[10, 30], [40, 60]])\n assert_equal(b._mask, [[0, 1], [1, 0]])\n\n def test_compressed(self):\n # Tests compressed\n a = array([1, 2, 3, 4], mask=[0, 0, 0, 0])\n b = a.compressed()\n assert_equal(b, a)\n a[0] = masked\n b = a.compressed()\n assert_equal(b, [2, 3, 4])\n\n a = array(np.matrix([1, 2, 3, 4]), mask=[0, 0, 0, 0])\n b = a.compressed()\n assert_equal(b, a)\n assert_(isinstance(b, np.matrix))\n a[0, 0] = masked\n b = a.compressed()\n assert_equal(b, [[2, 3, 4]])\n\n def test_empty(self):\n # Tests empty/like\n datatype = [('a', int), ('b', float), ('c', '|S8')]\n a = masked_array([(1, 1.1, '1.1'), (2, 2.2, '2.2'), (3, 3.3, '3.3')],\n dtype=datatype)\n assert_equal(len(a.fill_value.item()), len(datatype))\n\n b = empty_like(a)\n assert_equal(b.shape, a.shape)\n assert_equal(b.fill_value, a.fill_value)\n\n b = empty(len(a), dtype=datatype)\n assert_equal(b.shape, a.shape)\n assert_equal(b.fill_value, a.fill_value)\n\n # check empty_like mask handling\n a = masked_array([1, 2, 3], mask=[False, True, False])\n b = empty_like(a)\n assert_(not np.may_share_memory(a.mask, b.mask))\n b = a.view(masked_array)\n assert_(np.may_share_memory(a.mask, b.mask))\n\n @suppress_copy_mask_on_assignment\n def test_put(self):\n # Tests put.\n d = arange(5)\n n = [0, 0, 0, 1, 1]\n m = make_mask(n)\n x = array(d, mask=m)\n assert_(x[3] is masked)\n assert_(x[4] is masked)\n x[[1, 4]] = [10, 40]\n assert_(x[3] is masked)\n assert_(x[4] is not masked)\n assert_equal(x, [0, 10, 2, -1, 40])\n\n x = masked_array(arange(10), mask=[1, 0, 0, 0, 0] * 2)\n i = [0, 2, 4, 6]\n x.put(i, [6, 4, 2, 0])\n assert_equal(x, asarray([6, 1, 4, 3, 2, 5, 0, 7, 8, 9, ]))\n assert_equal(x.mask, [0, 0, 0, 0, 0, 1, 0, 0, 0, 0])\n x.put(i, masked_array([0, 2, 4, 6], [1, 0, 1, 0]))\n assert_array_equal(x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, ])\n assert_equal(x.mask, [1, 0, 0, 0, 1, 1, 0, 0, 0, 0])\n\n x = masked_array(arange(10), mask=[1, 0, 0, 0, 0] * 2)\n put(x, i, [6, 4, 2, 0])\n assert_equal(x, asarray([6, 1, 4, 3, 2, 5, 0, 7, 8, 9, ]))\n assert_equal(x.mask, [0, 0, 0, 0, 0, 1, 0, 0, 0, 0])\n put(x, i, masked_array([0, 2, 4, 6], [1, 0, 1, 0]))\n assert_array_equal(x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, ])\n assert_equal(x.mask, [1, 0, 0, 0, 1, 1, 0, 0, 0, 0])\n\n def test_put_nomask(self):\n # GitHub issue 6425\n x = zeros(10)\n z = array([3., -1.], mask=[False, True])\n\n x.put([1, 2], z)\n assert_(x[0] is not masked)\n assert_equal(x[0], 0)\n assert_(x[1] is not masked)\n assert_equal(x[1], 3)\n assert_(x[2] is masked)\n assert_(x[3] is not masked)\n assert_equal(x[3], 0)\n\n def test_put_hardmask(self):\n # Tests put on hardmask\n d = arange(5)\n n = [0, 0, 0, 1, 1]\n m = make_mask(n)\n xh = array(d + 1, mask=m, hard_mask=True, copy=True)\n xh.put([4, 2, 0, 1, 3], [1, 2, 3, 4, 5])\n assert_equal(xh._data, [3, 4, 2, 4, 5])\n\n def test_putmask(self):\n x = arange(6) + 1\n mx = array(x, mask=[0, 0, 0, 1, 1, 1])\n mask = [0, 0, 1, 0, 0, 1]\n # w/o mask, w/o masked values\n xx = x.copy()\n putmask(xx, mask, 99)\n assert_equal(xx, [1, 2, 99, 4, 5, 99])\n # w/ mask, w/o masked values\n mxx = mx.copy()\n putmask(mxx, mask, 99)\n assert_equal(mxx._data, [1, 2, 99, 4, 5, 99])\n assert_equal(mxx._mask, [0, 0, 0, 1, 1, 0])\n # w/o mask, w/ masked values\n values = array([10, 20, 30, 40, 50, 60], mask=[1, 1, 1, 0, 0, 0])\n xx = x.copy()\n putmask(xx, mask, values)\n assert_equal(xx._data, [1, 2, 30, 4, 5, 60])\n assert_equal(xx._mask, [0, 0, 1, 0, 0, 0])\n # w/ mask, w/ masked values\n mxx = mx.copy()\n putmask(mxx, mask, values)\n assert_equal(mxx._data, [1, 2, 30, 4, 5, 60])\n assert_equal(mxx._mask, [0, 0, 1, 1, 1, 0])\n # w/ mask, w/ masked values + hardmask\n mxx = mx.copy()\n mxx.harden_mask()\n putmask(mxx, mask, values)\n assert_equal(mxx, [1, 2, 30, 4, 5, 60])\n\n def test_ravel(self):\n # Tests ravel\n a = array([[1, 2, 3, 4, 5]], mask=[[0, 1, 0, 0, 0]])\n aravel = a.ravel()\n assert_equal(aravel._mask.shape, aravel.shape)\n a = array([0, 0], mask=[1, 1])\n aravel = a.ravel()\n assert_equal(aravel._mask.shape, a.shape)\n a = array(np.matrix([1, 2, 3, 4, 5]), mask=[[0, 1, 0, 0, 0]])\n aravel = a.ravel()\n assert_equal(aravel.shape, (1, 5))\n assert_equal(aravel._mask.shape, a.shape)\n # Checks that small_mask is preserved\n a = array([1, 2, 3, 4], mask=[0, 0, 0, 0], shrink=False)\n assert_equal(a.ravel()._mask, [0, 0, 0, 0])\n # Test that the fill_value is preserved\n a.fill_value = -99\n a.shape = (2, 2)\n ar = a.ravel()\n assert_equal(ar._mask, [0, 0, 0, 0])\n assert_equal(ar._data, [1, 2, 3, 4])\n assert_equal(ar.fill_value, -99)\n # Test index ordering\n assert_equal(a.ravel(order='C'), [1, 2, 3, 4])\n assert_equal(a.ravel(order='F'), [1, 3, 2, 4])\n\n def test_reshape(self):\n # Tests reshape\n x = arange(4)\n x[0] = masked\n y = x.reshape(2, 2)\n assert_equal(y.shape, (2, 2,))\n assert_equal(y._mask.shape, (2, 2,))\n assert_equal(x.shape, (4,))\n assert_equal(x._mask.shape, (4,))\n\n def test_sort(self):\n # Test sort\n x = array([1, 4, 2, 3], mask=[0, 1, 0, 0], dtype=np.uint8)\n\n sortedx = sort(x)\n assert_equal(sortedx._data, [1, 2, 3, 4])\n assert_equal(sortedx._mask, [0, 0, 0, 1])\n\n sortedx = sort(x, endwith=False)\n assert_equal(sortedx._data, [4, 1, 2, 3])\n assert_equal(sortedx._mask, [1, 0, 0, 0])\n\n x.sort()\n assert_equal(x._data, [1, 2, 3, 4])\n assert_equal(x._mask, [0, 0, 0, 1])\n\n x = array([1, 4, 2, 3], mask=[0, 1, 0, 0], dtype=np.uint8)\n x.sort(endwith=False)\n assert_equal(x._data, [4, 1, 2, 3])\n assert_equal(x._mask, [1, 0, 0, 0])\n\n x = [1, 4, 2, 3]\n sortedx = sort(x)\n assert_(not isinstance(sorted, MaskedArray))\n\n x = array([0, 1, -1, -2, 2], mask=nomask, dtype=np.int8)\n sortedx = sort(x, endwith=False)\n assert_equal(sortedx._data, [-2, -1, 0, 1, 2])\n x = array([0, 1, -1, -2, 2], mask=[0, 1, 0, 0, 1], dtype=np.int8)\n sortedx = sort(x, endwith=False)\n assert_equal(sortedx._data, [1, 2, -2, -1, 0])\n assert_equal(sortedx._mask, [1, 1, 0, 0, 0])\n\n def test_argsort_matches_sort(self):\n x = array([1, 4, 2, 3], mask=[0, 1, 0, 0], dtype=np.uint8)\n\n for kwargs in [dict(),\n dict(endwith=True),\n dict(endwith=False),\n dict(fill_value=2),\n dict(fill_value=2, endwith=True),\n dict(fill_value=2, endwith=False)]:\n sortedx = sort(x, **kwargs)\n argsortedx = x[argsort(x, **kwargs)]\n assert_equal(sortedx._data, argsortedx._data)\n assert_equal(sortedx._mask, argsortedx._mask)\n\n def test_sort_2d(self):\n # Check sort of 2D array.\n # 2D array w/o mask\n a = masked_array([[8, 4, 1], [2, 0, 9]])\n a.sort(0)\n assert_equal(a, [[2, 0, 1], [8, 4, 9]])\n a = masked_array([[8, 4, 1], [2, 0, 9]])\n a.sort(1)\n assert_equal(a, [[1, 4, 8], [0, 2, 9]])\n # 2D array w/mask\n a = masked_array([[8, 4, 1], [2, 0, 9]], mask=[[1, 0, 0], [0, 0, 1]])\n a.sort(0)\n assert_equal(a, [[2, 0, 1], [8, 4, 9]])\n assert_equal(a._mask, [[0, 0, 0], [1, 0, 1]])\n a = masked_array([[8, 4, 1], [2, 0, 9]], mask=[[1, 0, 0], [0, 0, 1]])\n a.sort(1)\n assert_equal(a, [[1, 4, 8], [0, 2, 9]])\n assert_equal(a._mask, [[0, 0, 1], [0, 0, 1]])\n # 3D\n a = masked_array([[[7, 8, 9], [4, 5, 6], [1, 2, 3]],\n [[1, 2, 3], [7, 8, 9], [4, 5, 6]],\n [[7, 8, 9], [1, 2, 3], [4, 5, 6]],\n [[4, 5, 6], [1, 2, 3], [7, 8, 9]]])\n a[a % 4 == 0] = masked\n am = a.copy()\n an = a.filled(99)\n am.sort(0)\n an.sort(0)\n assert_equal(am, an)\n am = a.copy()\n an = a.filled(99)\n am.sort(1)\n an.sort(1)\n assert_equal(am, an)\n am = a.copy()\n an = a.filled(99)\n am.sort(2)\n an.sort(2)\n assert_equal(am, an)\n\n def test_sort_flexible(self):\n # Test sort on structured dtype.\n a = array(\n data=[(3, 3), (3, 2), (2, 2), (2, 1), (1, 0), (1, 1), (1, 2)],\n mask=[(0, 0), (0, 1), (0, 0), (0, 0), (1, 0), (0, 0), (0, 0)],\n dtype=[('A', int), ('B', int)])\n mask_last = array(\n data=[(1, 1), (1, 2), (2, 1), (2, 2), (3, 3), (3, 2), (1, 0)],\n mask=[(0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 1), (1, 0)],\n dtype=[('A', int), ('B', int)])\n mask_first = array(\n data=[(1, 0), (1, 1), (1, 2), (2, 1), (2, 2), (3, 2), (3, 3)],\n mask=[(1, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 1), (0, 0)],\n dtype=[('A', int), ('B', int)])\n\n test = sort(a)\n assert_equal(test, mask_last)\n assert_equal(test.mask, mask_last.mask)\n\n test = sort(a, endwith=False)\n assert_equal(test, mask_first)\n assert_equal(test.mask, mask_first.mask)\n\n # Test sort on dtype with subarray (gh-8069)\n dt = np.dtype([('v', int, 2)])\n a = a.view(dt)\n mask_last = mask_last.view(dt)\n mask_first = mask_first.view(dt)\n\n test = sort(a)\n assert_equal(test, mask_last)\n assert_equal(test.mask, mask_last.mask)\n\n test = sort(a, endwith=False)\n assert_equal(test, mask_first)\n assert_equal(test.mask, mask_first.mask)\n\n def test_argsort(self):\n # Test argsort\n a = array([1, 5, 2, 4, 3], mask=[1, 0, 0, 1, 0])\n assert_equal(np.argsort(a), argsort(a))\n\n def test_squeeze(self):\n # Check squeeze\n data = masked_array([[1, 2, 3]])\n assert_equal(data.squeeze(), [1, 2, 3])\n data = masked_array([[1, 2, 3]], mask=[[1, 1, 1]])\n assert_equal(data.squeeze(), [1, 2, 3])\n assert_equal(data.squeeze()._mask, [1, 1, 1])\n data = masked_array([[1]], mask=True)\n assert_(data.squeeze() is masked)\n\n def test_swapaxes(self):\n # Tests swapaxes on MaskedArrays.\n x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928,\n 8.43, 7.78, 9.865, 5.878, 8.979, 4.732,\n 3.012, 6.022, 5.095, 3.116, 5.238, 3.957,\n 6.04, 9.63, 7.712, 3.382, 4.489, 6.479,\n 7.189, 9.645, 5.395, 4.961, 9.894, 2.893,\n 7.357, 9.828, 6.272, 3.758, 6.693, 0.993])\n m = np.array([0, 1, 0, 1, 0, 0,\n 1, 0, 1, 1, 0, 1,\n 0, 0, 0, 1, 0, 1,\n 0, 0, 0, 1, 1, 1,\n 1, 0, 0, 1, 0, 0,\n 0, 0, 1, 0, 1, 0])\n mX = array(x, mask=m).reshape(6, 6)\n mXX = mX.reshape(3, 2, 2, 3)\n\n mXswapped = mX.swapaxes(0, 1)\n assert_equal(mXswapped[-1], mX[:, -1])\n\n mXXswapped = mXX.swapaxes(0, 2)\n assert_equal(mXXswapped.shape, (2, 2, 3, 3))\n\n def test_take(self):\n # Tests take\n x = masked_array([10, 20, 30, 40], [0, 1, 0, 1])\n assert_equal(x.take([0, 0, 3]), masked_array([10, 10, 40], [0, 0, 1]))\n assert_equal(x.take([0, 0, 3]), x[[0, 0, 3]])\n assert_equal(x.take([[0, 1], [0, 1]]),\n masked_array([[10, 20], [10, 20]], [[0, 1], [0, 1]]))\n\n # assert_equal crashes when passed np.ma.mask\n assert_(x[1] is np.ma.masked)\n assert_(x.take(1) is np.ma.masked)\n\n x = array([[10, 20, 30], [40, 50, 60]], mask=[[0, 0, 1], [1, 0, 0, ]])\n assert_equal(x.take([0, 2], axis=1),\n array([[10, 30], [40, 60]], mask=[[0, 1], [1, 0]]))\n assert_equal(take(x, [0, 2], axis=1),\n array([[10, 30], [40, 60]], mask=[[0, 1], [1, 0]]))\n\n def test_take_masked_indices(self):\n # Test take w/ masked indices\n a = np.array((40, 18, 37, 9, 22))\n indices = np.arange(3)[None,:] + np.arange(5)[:, None]\n mindices = array(indices, mask=(indices >= len(a)))\n # No mask\n test = take(a, mindices, mode='clip')\n ctrl = array([[40, 18, 37],\n [18, 37, 9],\n [37, 9, 22],\n [9, 22, 22],\n [22, 22, 22]])\n assert_equal(test, ctrl)\n # Masked indices\n test = take(a, mindices)\n ctrl = array([[40, 18, 37],\n [18, 37, 9],\n [37, 9, 22],\n [9, 22, 40],\n [22, 40, 40]])\n ctrl[3, 2] = ctrl[4, 1] = ctrl[4, 2] = masked\n assert_equal(test, ctrl)\n assert_equal(test.mask, ctrl.mask)\n # Masked input + masked indices\n a = array((40, 18, 37, 9, 22), mask=(0, 1, 0, 0, 0))\n test = take(a, mindices)\n ctrl[0, 1] = ctrl[1, 0] = masked\n assert_equal(test, ctrl)\n assert_equal(test.mask, ctrl.mask)\n\n def test_tolist(self):\n # Tests to list\n # ... on 1D\n x = array(np.arange(12))\n x[[1, -2]] = masked\n xlist = x.tolist()\n assert_(xlist[1] is None)\n assert_(xlist[-2] is None)\n # ... on 2D\n x.shape = (3, 4)\n xlist = x.tolist()\n ctrl = [[0, None, 2, 3], [4, 5, 6, 7], [8, 9, None, 11]]\n assert_equal(xlist[0], [0, None, 2, 3])\n assert_equal(xlist[1], [4, 5, 6, 7])\n assert_equal(xlist[2], [8, 9, None, 11])\n assert_equal(xlist, ctrl)\n # ... on structured array w/ masked records\n x = array(list(zip([1, 2, 3],\n [1.1, 2.2, 3.3],\n ['one', 'two', 'thr'])),\n dtype=[('a', int), ('b', float), ('c', '|S8')])\n x[-1] = masked\n assert_equal(x.tolist(),\n [(1, 1.1, b'one'),\n (2, 2.2, b'two'),\n (None, None, None)])\n # ... on structured array w/ masked fields\n a = array([(1, 2,), (3, 4)], mask=[(0, 1), (0, 0)],\n dtype=[('a', int), ('b', int)])\n test = a.tolist()\n assert_equal(test, [[1, None], [3, 4]])\n # ... on mvoid\n a = a[0]\n test = a.tolist()\n assert_equal(test, [1, None])\n\n def test_tolist_specialcase(self):\n # Test mvoid.tolist: make sure we return a standard Python object\n a = array([(0, 1), (2, 3)], dtype=[('a', int), ('b', int)])\n # w/o mask: each entry is a np.void whose elements are standard Python\n for entry in a:\n for item in entry.tolist():\n assert_(not isinstance(item, np.generic))\n # w/ mask: each entry is a ma.void whose elements should be\n # standard Python\n a.mask[0] = (0, 1)\n for entry in a:\n for item in entry.tolist():\n assert_(not isinstance(item, np.generic))\n\n def test_toflex(self):\n # Test the conversion to records\n data = arange(10)\n record = data.toflex()\n assert_equal(record['_data'], data._data)\n assert_equal(record['_mask'], data._mask)\n\n data[[0, 1, 2, -1]] = masked\n record = data.toflex()\n assert_equal(record['_data'], data._data)\n assert_equal(record['_mask'], data._mask)\n\n ndtype = [('i', int), ('s', '|S3'), ('f', float)]\n data = array([(i, s, f) for (i, s, f) in zip(np.arange(10),\n 'ABCDEFGHIJKLM',\n np.random.rand(10))],\n dtype=ndtype)\n data[[0, 1, 2, -1]] = masked\n record = data.toflex()\n assert_equal(record['_data'], data._data)\n assert_equal(record['_mask'], data._mask)\n\n ndtype = np.dtype(\"int, (2,3)float, float\")\n data = array([(i, f, ff) for (i, f, ff) in zip(np.arange(10),\n np.random.rand(10),\n np.random.rand(10))],\n dtype=ndtype)\n data[[0, 1, 2, -1]] = masked\n record = data.toflex()\n assert_equal_records(record['_data'], data._data)\n assert_equal_records(record['_mask'], data._mask)\n\n def test_fromflex(self):\n # Test the reconstruction of a masked_array from a record\n a = array([1, 2, 3])\n test = fromflex(a.toflex())\n assert_equal(test, a)\n assert_equal(test.mask, a.mask)\n\n a = array([1, 2, 3], mask=[0, 0, 1])\n test = fromflex(a.toflex())\n assert_equal(test, a)\n assert_equal(test.mask, a.mask)\n\n a = array([(1, 1.), (2, 2.), (3, 3.)], mask=[(1, 0), (0, 0), (0, 1)],\n dtype=[('A', int), ('B', float)])\n test = fromflex(a.toflex())\n assert_equal(test, a)\n assert_equal(test.data, a.data)\n\n def test_arraymethod(self):\n # Test a _arraymethod w/ n argument\n marray = masked_array([[1, 2, 3, 4, 5]], mask=[0, 0, 1, 0, 0])\n control = masked_array([[1], [2], [3], [4], [5]],\n mask=[0, 0, 1, 0, 0])\n assert_equal(marray.T, control)\n assert_equal(marray.transpose(), control)\n\n assert_equal(MaskedArray.cumsum(marray.T, 0), control.cumsum(0))\n\n\nclass TestMaskedArrayMathMethods(object):\n\n def setup(self):\n # Base data definition.\n x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928,\n 8.43, 7.78, 9.865, 5.878, 8.979, 4.732,\n 3.012, 6.022, 5.095, 3.116, 5.238, 3.957,\n 6.04, 9.63, 7.712, 3.382, 4.489, 6.479,\n 7.189, 9.645, 5.395, 4.961, 9.894, 2.893,\n 7.357, 9.828, 6.272, 3.758, 6.693, 0.993])\n X = x.reshape(6, 6)\n XX = x.reshape(3, 2, 2, 3)\n\n m = np.array([0, 1, 0, 1, 0, 0,\n 1, 0, 1, 1, 0, 1,\n 0, 0, 0, 1, 0, 1,\n 0, 0, 0, 1, 1, 1,\n 1, 0, 0, 1, 0, 0,\n 0, 0, 1, 0, 1, 0])\n mx = array(data=x, mask=m)\n mX = array(data=X, mask=m.reshape(X.shape))\n mXX = array(data=XX, mask=m.reshape(XX.shape))\n\n m2 = np.array([1, 1, 0, 1, 0, 0,\n 1, 1, 1, 1, 0, 1,\n 0, 0, 1, 1, 0, 1,\n 0, 0, 0, 1, 1, 1,\n 1, 0, 0, 1, 1, 0,\n 0, 0, 1, 0, 1, 1])\n m2x = array(data=x, mask=m2)\n m2X = array(data=X, mask=m2.reshape(X.shape))\n m2XX = array(data=XX, mask=m2.reshape(XX.shape))\n self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX)\n\n def test_cumsumprod(self):\n # Tests cumsum & cumprod on MaskedArrays.\n (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d\n mXcp = mX.cumsum(0)\n assert_equal(mXcp._data, mX.filled(0).cumsum(0))\n mXcp = mX.cumsum(1)\n assert_equal(mXcp._data, mX.filled(0).cumsum(1))\n\n mXcp = mX.cumprod(0)\n assert_equal(mXcp._data, mX.filled(1).cumprod(0))\n mXcp = mX.cumprod(1)\n assert_equal(mXcp._data, mX.filled(1).cumprod(1))\n\n def test_cumsumprod_with_output(self):\n # Tests cumsum/cumprod w/ output\n xm = array(np.random.uniform(0, 10, 12)).reshape(3, 4)\n xm[:, 0] = xm[0] = xm[-1, -1] = masked\n\n for funcname in ('cumsum', 'cumprod'):\n npfunc = getattr(np, funcname)\n xmmeth = getattr(xm, funcname)\n\n # A ndarray as explicit input\n output = np.empty((3, 4), dtype=float)\n output.fill(-9999)\n result = npfunc(xm, axis=0, out=output)\n # ... the result should be the given output\n assert_(result is output)\n assert_equal(result, xmmeth(axis=0, out=output))\n\n output = empty((3, 4), dtype=int)\n result = xmmeth(axis=0, out=output)\n assert_(result is output)\n\n def test_ptp(self):\n # Tests ptp on MaskedArrays.\n (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d\n (n, m) = X.shape\n assert_equal(mx.ptp(), mx.compressed().ptp())\n rows = np.zeros(n, float)\n cols = np.zeros(m, float)\n for k in range(m):\n cols[k] = mX[:, k].compressed().ptp()\n for k in range(n):\n rows[k] = mX[k].compressed().ptp()\n assert_equal(mX.ptp(0), cols)\n assert_equal(mX.ptp(1), rows)\n\n def test_add_object(self):\n x = masked_array(['a', 'b'], mask=[1, 0], dtype=object)\n y = x + 'x'\n assert_equal(y[1], 'bx')\n assert_(y.mask[0])\n\n def test_sum_object(self):\n # Test sum on object dtype\n a = masked_array([1, 2, 3], mask=[1, 0, 0], dtype=object)\n assert_equal(a.sum(), 5)\n a = masked_array([[1, 2, 3], [4, 5, 6]], dtype=object)\n assert_equal(a.sum(axis=0), [5, 7, 9])\n\n def test_prod_object(self):\n # Test prod on object dtype\n a = masked_array([1, 2, 3], mask=[1, 0, 0], dtype=object)\n assert_equal(a.prod(), 2 * 3)\n a = masked_array([[1, 2, 3], [4, 5, 6]], dtype=object)\n assert_equal(a.prod(axis=0), [4, 10, 18])\n\n def test_meananom_object(self):\n # Test mean/anom on object dtype\n a = masked_array([1, 2, 3], dtype=object)\n assert_equal(a.mean(), 2)\n assert_equal(a.anom(), [-1, 0, 1])\n\n def test_trace(self):\n # Tests trace on MaskedArrays.\n (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d\n mXdiag = mX.diagonal()\n assert_equal(mX.trace(), mX.diagonal().compressed().sum())\n assert_almost_equal(mX.trace(),\n X.trace() - sum(mXdiag.mask * X.diagonal(),\n axis=0))\n assert_equal(np.trace(mX), mX.trace())\n\n def test_dot(self):\n # Tests dot on MaskedArrays.\n (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d\n fx = mx.filled(0)\n r = mx.dot(mx)\n assert_almost_equal(r.filled(0), fx.dot(fx))\n assert_(r.mask is nomask)\n\n fX = mX.filled(0)\n r = mX.dot(mX)\n assert_almost_equal(r.filled(0), fX.dot(fX))\n assert_(r.mask[1,3])\n r1 = empty_like(r)\n mX.dot(mX, out=r1)\n assert_almost_equal(r, r1)\n\n mYY = mXX.swapaxes(-1, -2)\n fXX, fYY = mXX.filled(0), mYY.filled(0)\n r = mXX.dot(mYY)\n assert_almost_equal(r.filled(0), fXX.dot(fYY))\n r1 = empty_like(r)\n mXX.dot(mYY, out=r1)\n assert_almost_equal(r, r1)\n\n def test_dot_shape_mismatch(self):\n # regression test\n x = masked_array([[1,2],[3,4]], mask=[[0,1],[0,0]])\n y = masked_array([[1,2],[3,4]], mask=[[0,1],[0,0]])\n z = masked_array([[0,1],[3,3]])\n x.dot(y, out=z)\n assert_almost_equal(z.filled(0), [[1, 0], [15, 16]])\n assert_almost_equal(z.mask, [[0, 1], [0, 0]])\n\n def test_varmean_nomask(self):\n # gh-5769\n foo = array([1,2,3,4], dtype='f8')\n bar = array([1,2,3,4], dtype='f8')\n assert_equal(type(foo.mean()), np.float64)\n assert_equal(type(foo.var()), np.float64)\n assert((foo.mean() == bar.mean()) is np.bool_(True))\n\n # check array type is preserved and out works\n foo = array(np.arange(16).reshape((4,4)), dtype='f8')\n bar = empty(4, dtype='f4')\n assert_equal(type(foo.mean(axis=1)), MaskedArray)\n assert_equal(type(foo.var(axis=1)), MaskedArray)\n assert_(foo.mean(axis=1, out=bar) is bar)\n assert_(foo.var(axis=1, out=bar) is bar)\n\n def test_varstd(self):\n # Tests var & std on MaskedArrays.\n (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d\n assert_almost_equal(mX.var(axis=None), mX.compressed().var())\n assert_almost_equal(mX.std(axis=None), mX.compressed().std())\n assert_almost_equal(mX.std(axis=None, ddof=1),\n mX.compressed().std(ddof=1))\n assert_almost_equal(mX.var(axis=None, ddof=1),\n mX.compressed().var(ddof=1))\n assert_equal(mXX.var(axis=3).shape, XX.var(axis=3).shape)\n assert_equal(mX.var().shape, X.var().shape)\n (mXvar0, mXvar1) = (mX.var(axis=0), mX.var(axis=1))\n assert_almost_equal(mX.var(axis=None, ddof=2),\n mX.compressed().var(ddof=2))\n assert_almost_equal(mX.std(axis=None, ddof=2),\n mX.compressed().std(ddof=2))\n for k in range(6):\n assert_almost_equal(mXvar1[k], mX[k].compressed().var())\n assert_almost_equal(mXvar0[k], mX[:, k].compressed().var())\n assert_almost_equal(np.sqrt(mXvar0[k]),\n mX[:, k].compressed().std())\n\n @suppress_copy_mask_on_assignment\n def test_varstd_specialcases(self):\n # Test a special case for var\n nout = np.array(-1, dtype=float)\n mout = array(-1, dtype=float)\n\n x = array(arange(10), mask=True)\n for methodname in ('var', 'std'):\n method = getattr(x, methodname)\n assert_(method() is masked)\n assert_(method(0) is masked)\n assert_(method(-1) is masked)\n # Using a masked array as explicit output\n method(out=mout)\n assert_(mout is not masked)\n assert_equal(mout.mask, True)\n # Using a ndarray as explicit output\n method(out=nout)\n assert_(np.isnan(nout))\n\n x = array(arange(10), mask=True)\n x[-1] = 9\n for methodname in ('var', 'std'):\n method = getattr(x, methodname)\n assert_(method(ddof=1) is masked)\n assert_(method(0, ddof=1) is masked)\n assert_(method(-1, ddof=1) is masked)\n # Using a masked array as explicit output\n method(out=mout, ddof=1)\n assert_(mout is not masked)\n assert_equal(mout.mask, True)\n # Using a ndarray as explicit output\n method(out=nout, ddof=1)\n assert_(np.isnan(nout))\n\n def test_varstd_ddof(self):\n a = array([[1, 1, 0], [1, 1, 0]], mask=[[0, 0, 1], [0, 0, 1]])\n test = a.std(axis=0, ddof=0)\n assert_equal(test.filled(0), [0, 0, 0])\n assert_equal(test.mask, [0, 0, 1])\n test = a.std(axis=0, ddof=1)\n assert_equal(test.filled(0), [0, 0, 0])\n assert_equal(test.mask, [0, 0, 1])\n test = a.std(axis=0, ddof=2)\n assert_equal(test.filled(0), [0, 0, 0])\n assert_equal(test.mask, [1, 1, 1])\n\n def test_diag(self):\n # Test diag\n x = arange(9).reshape((3, 3))\n x[1, 1] = masked\n out = np.diag(x)\n assert_equal(out, [0, 4, 8])\n out = diag(x)\n assert_equal(out, [0, 4, 8])\n assert_equal(out.mask, [0, 1, 0])\n out = diag(out)\n control = array([[0, 0, 0], [0, 4, 0], [0, 0, 8]],\n mask=[[0, 0, 0], [0, 1, 0], [0, 0, 0]])\n assert_equal(out, control)\n\n def test_axis_methods_nomask(self):\n # Test the combination nomask & methods w/ axis\n a = array([[1, 2, 3], [4, 5, 6]])\n\n assert_equal(a.sum(0), [5, 7, 9])\n assert_equal(a.sum(-1), [6, 15])\n assert_equal(a.sum(1), [6, 15])\n\n assert_equal(a.prod(0), [4, 10, 18])\n assert_equal(a.prod(-1), [6, 120])\n assert_equal(a.prod(1), [6, 120])\n\n assert_equal(a.min(0), [1, 2, 3])\n assert_equal(a.min(-1), [1, 4])\n assert_equal(a.min(1), [1, 4])\n\n assert_equal(a.max(0), [4, 5, 6])\n assert_equal(a.max(-1), [3, 6])\n assert_equal(a.max(1), [3, 6])\n\n\nclass TestMaskedArrayMathMethodsComplex(object):\n # Test class for miscellaneous MaskedArrays methods.\n def setup(self):\n # Base data definition.\n x = np.array([8.375j, 7.545j, 8.828j, 8.5j, 1.757j, 5.928,\n 8.43, 7.78, 9.865, 5.878, 8.979, 4.732,\n 3.012, 6.022, 5.095, 3.116, 5.238, 3.957,\n 6.04, 9.63, 7.712, 3.382, 4.489, 6.479j,\n 7.189j, 9.645, 5.395, 4.961, 9.894, 2.893,\n 7.357, 9.828, 6.272, 3.758, 6.693, 0.993j])\n X = x.reshape(6, 6)\n XX = x.reshape(3, 2, 2, 3)\n\n m = np.array([0, 1, 0, 1, 0, 0,\n 1, 0, 1, 1, 0, 1,\n 0, 0, 0, 1, 0, 1,\n 0, 0, 0, 1, 1, 1,\n 1, 0, 0, 1, 0, 0,\n 0, 0, 1, 0, 1, 0])\n mx = array(data=x, mask=m)\n mX = array(data=X, mask=m.reshape(X.shape))\n mXX = array(data=XX, mask=m.reshape(XX.shape))\n\n m2 = np.array([1, 1, 0, 1, 0, 0,\n 1, 1, 1, 1, 0, 1,\n 0, 0, 1, 1, 0, 1,\n 0, 0, 0, 1, 1, 1,\n 1, 0, 0, 1, 1, 0,\n 0, 0, 1, 0, 1, 1])\n m2x = array(data=x, mask=m2)\n m2X = array(data=X, mask=m2.reshape(X.shape))\n m2XX = array(data=XX, mask=m2.reshape(XX.shape))\n self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX)\n\n def test_varstd(self):\n # Tests var & std on MaskedArrays.\n (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d\n assert_almost_equal(mX.var(axis=None), mX.compressed().var())\n assert_almost_equal(mX.std(axis=None), mX.compressed().std())\n assert_equal(mXX.var(axis=3).shape, XX.var(axis=3).shape)\n assert_equal(mX.var().shape, X.var().shape)\n (mXvar0, mXvar1) = (mX.var(axis=0), mX.var(axis=1))\n assert_almost_equal(mX.var(axis=None, ddof=2),\n mX.compressed().var(ddof=2))\n assert_almost_equal(mX.std(axis=None, ddof=2),\n mX.compressed().std(ddof=2))\n for k in range(6):\n assert_almost_equal(mXvar1[k], mX[k].compressed().var())\n assert_almost_equal(mXvar0[k], mX[:, k].compressed().var())\n assert_almost_equal(np.sqrt(mXvar0[k]),\n mX[:, k].compressed().std())\n\n\nclass TestMaskedArrayFunctions(object):\n # Test class for miscellaneous functions.\n\n def setup(self):\n x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])\n y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])\n m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]\n m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1]\n xm = masked_array(x, mask=m1)\n ym = masked_array(y, mask=m2)\n xm.set_fill_value(1e+20)\n self.info = (xm, ym)\n\n def test_masked_where_bool(self):\n x = [1, 2]\n y = masked_where(False, x)\n assert_equal(y, [1, 2])\n assert_equal(y[1], 2)\n\n def test_masked_equal_wlist(self):\n x = [1, 2, 3]\n mx = masked_equal(x, 3)\n assert_equal(mx, x)\n assert_equal(mx._mask, [0, 0, 1])\n mx = masked_not_equal(x, 3)\n assert_equal(mx, x)\n assert_equal(mx._mask, [1, 1, 0])\n\n def test_masked_equal_fill_value(self):\n x = [1, 2, 3]\n mx = masked_equal(x, 3)\n assert_equal(mx._mask, [0, 0, 1])\n assert_equal(mx.fill_value, 3)\n\n def test_masked_where_condition(self):\n # Tests masking functions.\n x = array([1., 2., 3., 4., 5.])\n x[2] = masked\n assert_equal(masked_where(greater(x, 2), x), masked_greater(x, 2))\n assert_equal(masked_where(greater_equal(x, 2), x),\n masked_greater_equal(x, 2))\n assert_equal(masked_where(less(x, 2), x), masked_less(x, 2))\n assert_equal(masked_where(less_equal(x, 2), x),\n masked_less_equal(x, 2))\n assert_equal(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2))\n assert_equal(masked_where(equal(x, 2), x), masked_equal(x, 2))\n assert_equal(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2))\n assert_equal(masked_where([1, 1, 0, 0, 0], [1, 2, 3, 4, 5]),\n [99, 99, 3, 4, 5])\n\n def test_masked_where_oddities(self):\n # Tests some generic features.\n atest = ones((10, 10, 10), dtype=float)\n btest = zeros(atest.shape, MaskType)\n ctest = masked_where(btest, atest)\n assert_equal(atest, ctest)\n\n def test_masked_where_shape_constraint(self):\n a = arange(10)\n try:\n test = masked_equal(1, a)\n except IndexError:\n pass\n else:\n raise AssertionError(\"Should have failed...\")\n test = masked_equal(a, 1)\n assert_equal(test.mask, [0, 1, 0, 0, 0, 0, 0, 0, 0, 0])\n\n def test_masked_where_structured(self):\n # test that masked_where on a structured array sets a structured\n # mask (see issue #2972)\n a = np.zeros(10, dtype=[(\"A\", \"<f2\"), (\"B\", \"<f4\")])\n am = np.ma.masked_where(a[\"A\"] < 5, a)\n assert_equal(am.mask.dtype.names, am.dtype.names)\n assert_equal(am[\"A\"],\n np.ma.masked_array(np.zeros(10), np.ones(10)))\n\n def test_masked_otherfunctions(self):\n assert_equal(masked_inside(list(range(5)), 1, 3),\n [0, 199, 199, 199, 4])\n assert_equal(masked_outside(list(range(5)), 1, 3), [199, 1, 2, 3, 199])\n assert_equal(masked_inside(array(list(range(5)),\n mask=[1, 0, 0, 0, 0]), 1, 3).mask,\n [1, 1, 1, 1, 0])\n assert_equal(masked_outside(array(list(range(5)),\n mask=[0, 1, 0, 0, 0]), 1, 3).mask,\n [1, 1, 0, 0, 1])\n assert_equal(masked_equal(array(list(range(5)),\n mask=[1, 0, 0, 0, 0]), 2).mask,\n [1, 0, 1, 0, 0])\n assert_equal(masked_not_equal(array([2, 2, 1, 2, 1],\n mask=[1, 0, 0, 0, 0]), 2).mask,\n [1, 0, 1, 0, 1])\n\n def test_round(self):\n a = array([1.23456, 2.34567, 3.45678, 4.56789, 5.67890],\n mask=[0, 1, 0, 0, 0])\n assert_equal(a.round(), [1., 2., 3., 5., 6.])\n assert_equal(a.round(1), [1.2, 2.3, 3.5, 4.6, 5.7])\n assert_equal(a.round(3), [1.235, 2.346, 3.457, 4.568, 5.679])\n b = empty_like(a)\n a.round(out=b)\n assert_equal(b, [1., 2., 3., 5., 6.])\n\n x = array([1., 2., 3., 4., 5.])\n c = array([1, 1, 1, 0, 0])\n x[2] = masked\n z = where(c, x, -x)\n assert_equal(z, [1., 2., 0., -4., -5])\n c[0] = masked\n z = where(c, x, -x)\n assert_equal(z, [1., 2., 0., -4., -5])\n assert_(z[0] is masked)\n assert_(z[1] is not masked)\n assert_(z[2] is masked)\n\n def test_round_with_output(self):\n # Testing round with an explicit output\n\n xm = array(np.random.uniform(0, 10, 12)).reshape(3, 4)\n xm[:, 0] = xm[0] = xm[-1, -1] = masked\n\n # A ndarray as explicit input\n output = np.empty((3, 4), dtype=float)\n output.fill(-9999)\n result = np.round(xm, decimals=2, out=output)\n # ... the result should be the given output\n assert_(result is output)\n assert_equal(result, xm.round(decimals=2, out=output))\n\n output = empty((3, 4), dtype=float)\n result = xm.round(decimals=2, out=output)\n assert_(result is output)\n\n def test_round_with_scalar(self):\n # Testing round with scalar/zero dimension input\n # GH issue 2244\n a = array(1.1, mask=[False])\n assert_equal(a.round(), 1)\n\n a = array(1.1, mask=[True])\n assert_(a.round() is masked)\n\n a = array(1.1, mask=[False])\n output = np.empty(1, dtype=float)\n output.fill(-9999)\n a.round(out=output)\n assert_equal(output, 1)\n\n a = array(1.1, mask=[False])\n output = array(-9999., mask=[True])\n a.round(out=output)\n assert_equal(output[()], 1)\n\n a = array(1.1, mask=[True])\n output = array(-9999., mask=[False])\n a.round(out=output)\n assert_(output[()] is masked)\n\n def test_identity(self):\n a = identity(5)\n assert_(isinstance(a, MaskedArray))\n assert_equal(a, np.identity(5))\n\n def test_power(self):\n x = -1.1\n assert_almost_equal(power(x, 2.), 1.21)\n assert_(power(x, masked) is masked)\n x = array([-1.1, -1.1, 1.1, 1.1, 0.])\n b = array([0.5, 2., 0.5, 2., -1.], mask=[0, 0, 0, 0, 1])\n y = power(x, b)\n assert_almost_equal(y, [0, 1.21, 1.04880884817, 1.21, 0.])\n assert_equal(y._mask, [1, 0, 0, 0, 1])\n b.mask = nomask\n y = power(x, b)\n assert_equal(y._mask, [1, 0, 0, 0, 1])\n z = x ** b\n assert_equal(z._mask, y._mask)\n assert_almost_equal(z, y)\n assert_almost_equal(z._data, y._data)\n x **= b\n assert_equal(x._mask, y._mask)\n assert_almost_equal(x, y)\n assert_almost_equal(x._data, y._data)\n\n def test_power_with_broadcasting(self):\n # Test power w/ broadcasting\n a2 = np.array([[1., 2., 3.], [4., 5., 6.]])\n a2m = array(a2, mask=[[1, 0, 0], [0, 0, 1]])\n b1 = np.array([2, 4, 3])\n b2 = np.array([b1, b1])\n b2m = array(b2, mask=[[0, 1, 0], [0, 1, 0]])\n\n ctrl = array([[1 ** 2, 2 ** 4, 3 ** 3], [4 ** 2, 5 ** 4, 6 ** 3]],\n mask=[[1, 1, 0], [0, 1, 1]])\n # No broadcasting, base & exp w/ mask\n test = a2m ** b2m\n assert_equal(test, ctrl)\n assert_equal(test.mask, ctrl.mask)\n # No broadcasting, base w/ mask, exp w/o mask\n test = a2m ** b2\n assert_equal(test, ctrl)\n assert_equal(test.mask, a2m.mask)\n # No broadcasting, base w/o mask, exp w/ mask\n test = a2 ** b2m\n assert_equal(test, ctrl)\n assert_equal(test.mask, b2m.mask)\n\n ctrl = array([[2 ** 2, 4 ** 4, 3 ** 3], [2 ** 2, 4 ** 4, 3 ** 3]],\n mask=[[0, 1, 0], [0, 1, 0]])\n test = b1 ** b2m\n assert_equal(test, ctrl)\n assert_equal(test.mask, ctrl.mask)\n test = b2m ** b1\n assert_equal(test, ctrl)\n assert_equal(test.mask, ctrl.mask)\n\n def test_where(self):\n # Test the where function\n x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])\n y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])\n m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]\n m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1]\n xm = masked_array(x, mask=m1)\n ym = masked_array(y, mask=m2)\n xm.set_fill_value(1e+20)\n\n d = where(xm > 2, xm, -9)\n assert_equal(d, [-9., -9., -9., -9., -9., 4.,\n -9., -9., 10., -9., -9., 3.])\n assert_equal(d._mask, xm._mask)\n d = where(xm > 2, -9, ym)\n assert_equal(d, [5., 0., 3., 2., -1., -9.,\n -9., -10., -9., 1., 0., -9.])\n assert_equal(d._mask, [1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0])\n d = where(xm > 2, xm, masked)\n assert_equal(d, [-9., -9., -9., -9., -9., 4.,\n -9., -9., 10., -9., -9., 3.])\n tmp = xm._mask.copy()\n tmp[(xm <= 2).filled(True)] = True\n assert_equal(d._mask, tmp)\n\n ixm = xm.astype(int)\n d = where(ixm > 2, ixm, masked)\n assert_equal(d, [-9, -9, -9, -9, -9, 4, -9, -9, 10, -9, -9, 3])\n assert_equal(d.dtype, ixm.dtype)\n\n def test_where_object(self):\n a = np.array(None)\n b = masked_array(None)\n r = b.copy()\n assert_equal(np.ma.where(True, a, a), r)\n assert_equal(np.ma.where(True, b, b), r)\n\n def test_where_with_masked_choice(self):\n x = arange(10)\n x[3] = masked\n c = x >= 8\n # Set False to masked\n z = where(c, x, masked)\n assert_(z.dtype is x.dtype)\n assert_(z[3] is masked)\n assert_(z[4] is masked)\n assert_(z[7] is masked)\n assert_(z[8] is not masked)\n assert_(z[9] is not masked)\n assert_equal(x, z)\n # Set True to masked\n z = where(c, masked, x)\n assert_(z.dtype is x.dtype)\n assert_(z[3] is masked)\n assert_(z[4] is not masked)\n assert_(z[7] is not masked)\n assert_(z[8] is masked)\n assert_(z[9] is masked)\n\n def test_where_with_masked_condition(self):\n x = array([1., 2., 3., 4., 5.])\n c = array([1, 1, 1, 0, 0])\n x[2] = masked\n z = where(c, x, -x)\n assert_equal(z, [1., 2., 0., -4., -5])\n c[0] = masked\n z = where(c, x, -x)\n assert_equal(z, [1., 2., 0., -4., -5])\n assert_(z[0] is masked)\n assert_(z[1] is not masked)\n assert_(z[2] is masked)\n\n x = arange(1, 6)\n x[-1] = masked\n y = arange(1, 6) * 10\n y[2] = masked\n c = array([1, 1, 1, 0, 0], mask=[1, 0, 0, 0, 0])\n cm = c.filled(1)\n z = where(c, x, y)\n zm = where(cm, x, y)\n assert_equal(z, zm)\n assert_(getmask(zm) is nomask)\n assert_equal(zm, [1, 2, 3, 40, 50])\n z = where(c, masked, 1)\n assert_equal(z, [99, 99, 99, 1, 1])\n z = where(c, 1, masked)\n assert_equal(z, [99, 1, 1, 99, 99])\n\n def test_where_type(self):\n # Test the type conservation with where\n x = np.arange(4, dtype=np.int32)\n y = np.arange(4, dtype=np.float32) * 2.2\n test = where(x > 1.5, y, x).dtype\n control = np.find_common_type([np.int32, np.float32], [])\n assert_equal(test, control)\n\n def test_where_broadcast(self):\n # Issue 8599\n x = np.arange(9).reshape(3, 3)\n y = np.zeros(3)\n core = np.where([1, 0, 1], x, y)\n ma = where([1, 0, 1], x, y)\n\n assert_equal(core, ma)\n assert_equal(core.dtype, ma.dtype)\n\n def test_where_structured(self):\n # Issue 8600\n dt = np.dtype([('a', int), ('b', int)])\n x = np.array([(1, 2), (3, 4), (5, 6)], dtype=dt)\n y = np.array((10, 20), dtype=dt)\n core = np.where([0, 1, 1], x, y)\n ma = np.where([0, 1, 1], x, y)\n\n assert_equal(core, ma)\n assert_equal(core.dtype, ma.dtype)\n\n def test_where_structured_masked(self):\n dt = np.dtype([('a', int), ('b', int)])\n x = np.array([(1, 2), (3, 4), (5, 6)], dtype=dt)\n\n ma = where([0, 1, 1], x, masked)\n expected = masked_where([1, 0, 0], x)\n\n assert_equal(ma.dtype, expected.dtype)\n assert_equal(ma, expected)\n assert_equal(ma.mask, expected.mask)\n\n def test_choose(self):\n # Test choose\n choices = [[0, 1, 2, 3], [10, 11, 12, 13],\n [20, 21, 22, 23], [30, 31, 32, 33]]\n chosen = choose([2, 3, 1, 0], choices)\n assert_equal(chosen, array([20, 31, 12, 3]))\n chosen = choose([2, 4, 1, 0], choices, mode='clip')\n assert_equal(chosen, array([20, 31, 12, 3]))\n chosen = choose([2, 4, 1, 0], choices, mode='wrap')\n assert_equal(chosen, array([20, 1, 12, 3]))\n # Check with some masked indices\n indices_ = array([2, 4, 1, 0], mask=[1, 0, 0, 1])\n chosen = choose(indices_, choices, mode='wrap')\n assert_equal(chosen, array([99, 1, 12, 99]))\n assert_equal(chosen.mask, [1, 0, 0, 1])\n # Check with some masked choices\n choices = array(choices, mask=[[0, 0, 0, 1], [1, 1, 0, 1],\n [1, 0, 0, 0], [0, 0, 0, 0]])\n indices_ = [2, 3, 1, 0]\n chosen = choose(indices_, choices, mode='wrap')\n assert_equal(chosen, array([20, 31, 12, 3]))\n assert_equal(chosen.mask, [1, 0, 0, 1])\n\n def test_choose_with_out(self):\n # Test choose with an explicit out keyword\n choices = [[0, 1, 2, 3], [10, 11, 12, 13],\n [20, 21, 22, 23], [30, 31, 32, 33]]\n store = empty(4, dtype=int)\n chosen = choose([2, 3, 1, 0], choices, out=store)\n assert_equal(store, array([20, 31, 12, 3]))\n assert_(store is chosen)\n # Check with some masked indices + out\n store = empty(4, dtype=int)\n indices_ = array([2, 3, 1, 0], mask=[1, 0, 0, 1])\n chosen = choose(indices_, choices, mode='wrap', out=store)\n assert_equal(store, array([99, 31, 12, 99]))\n assert_equal(store.mask, [1, 0, 0, 1])\n # Check with some masked choices + out ina ndarray !\n choices = array(choices, mask=[[0, 0, 0, 1], [1, 1, 0, 1],\n [1, 0, 0, 0], [0, 0, 0, 0]])\n indices_ = [2, 3, 1, 0]\n store = empty(4, dtype=int).view(ndarray)\n chosen = choose(indices_, choices, mode='wrap', out=store)\n assert_equal(store, array([999999, 31, 12, 999999]))\n\n def test_reshape(self):\n a = arange(10)\n a[0] = masked\n # Try the default\n b = a.reshape((5, 2))\n assert_equal(b.shape, (5, 2))\n assert_(b.flags['C'])\n # Try w/ arguments as list instead of tuple\n b = a.reshape(5, 2)\n assert_equal(b.shape, (5, 2))\n assert_(b.flags['C'])\n # Try w/ order\n b = a.reshape((5, 2), order='F')\n assert_equal(b.shape, (5, 2))\n assert_(b.flags['F'])\n # Try w/ order\n b = a.reshape(5, 2, order='F')\n assert_equal(b.shape, (5, 2))\n assert_(b.flags['F'])\n\n c = np.reshape(a, (2, 5))\n assert_(isinstance(c, MaskedArray))\n assert_equal(c.shape, (2, 5))\n assert_(c[0, 0] is masked)\n assert_(c.flags['C'])\n\n def test_make_mask_descr(self):\n # Flexible\n ntype = [('a', float), ('b', float)]\n test = make_mask_descr(ntype)\n assert_equal(test, [('a', bool), ('b', bool)])\n assert_(test is make_mask_descr(test))\n\n # Standard w/ shape\n ntype = (float, 2)\n test = make_mask_descr(ntype)\n assert_equal(test, (bool, 2))\n assert_(test is make_mask_descr(test))\n\n # Standard standard\n ntype = float\n test = make_mask_descr(ntype)\n assert_equal(test, np.dtype(bool))\n assert_(test is make_mask_descr(test))\n\n # Nested\n ntype = [('a', float), ('b', [('ba', float), ('bb', float)])]\n test = make_mask_descr(ntype)\n control = np.dtype([('a', 'b1'), ('b', [('ba', 'b1'), ('bb', 'b1')])])\n assert_equal(test, control)\n assert_(test is make_mask_descr(test))\n\n # Named+ shape\n ntype = [('a', (float, 2))]\n test = make_mask_descr(ntype)\n assert_equal(test, np.dtype([('a', (bool, 2))]))\n assert_(test is make_mask_descr(test))\n\n # 2 names\n ntype = [(('A', 'a'), float)]\n test = make_mask_descr(ntype)\n assert_equal(test, np.dtype([(('A', 'a'), bool)]))\n assert_(test is make_mask_descr(test))\n\n # nested boolean types should preserve identity\n base_type = np.dtype([('a', int, 3)])\n base_mtype = make_mask_descr(base_type)\n sub_type = np.dtype([('a', int), ('b', base_mtype)])\n test = make_mask_descr(sub_type)\n assert_equal(test, np.dtype([('a', bool), ('b', [('a', bool, 3)])]))\n assert_(test.fields['b'][0] is base_mtype)\n\n def test_make_mask(self):\n # Test make_mask\n # w/ a list as an input\n mask = [0, 1]\n test = make_mask(mask)\n assert_equal(test.dtype, MaskType)\n assert_equal(test, [0, 1])\n # w/ a ndarray as an input\n mask = np.array([0, 1], dtype=bool)\n test = make_mask(mask)\n assert_equal(test.dtype, MaskType)\n assert_equal(test, [0, 1])\n # w/ a flexible-type ndarray as an input - use default\n mdtype = [('a', bool), ('b', bool)]\n mask = np.array([(0, 0), (0, 1)], dtype=mdtype)\n test = make_mask(mask)\n assert_equal(test.dtype, MaskType)\n assert_equal(test, [1, 1])\n # w/ a flexible-type ndarray as an input - use input dtype\n mdtype = [('a', bool), ('b', bool)]\n mask = np.array([(0, 0), (0, 1)], dtype=mdtype)\n test = make_mask(mask, dtype=mask.dtype)\n assert_equal(test.dtype, mdtype)\n assert_equal(test, mask)\n # w/ a flexible-type ndarray as an input - use input dtype\n mdtype = [('a', float), ('b', float)]\n bdtype = [('a', bool), ('b', bool)]\n mask = np.array([(0, 0), (0, 1)], dtype=mdtype)\n test = make_mask(mask, dtype=mask.dtype)\n assert_equal(test.dtype, bdtype)\n assert_equal(test, np.array([(0, 0), (0, 1)], dtype=bdtype))\n # Ensure this also works for void\n mask = np.array((False, True), dtype='?,?')[()]\n assert_(isinstance(mask, np.void))\n test = make_mask(mask, dtype=mask.dtype)\n assert_equal(test, mask)\n assert_(test is not mask)\n mask = np.array((0, 1), dtype='i4,i4')[()]\n test2 = make_mask(mask, dtype=mask.dtype)\n assert_equal(test2, test)\n # test that nomask is returned when m is nomask.\n bools = [True, False]\n dtypes = [MaskType, float]\n msgformat = 'copy=%s, shrink=%s, dtype=%s'\n for cpy, shr, dt in itertools.product(bools, bools, dtypes):\n res = make_mask(nomask, copy=cpy, shrink=shr, dtype=dt)\n assert_(res is nomask, msgformat % (cpy, shr, dt))\n\n def test_mask_or(self):\n # Initialize\n mtype = [('a', bool), ('b', bool)]\n mask = np.array([(0, 0), (0, 1), (1, 0), (0, 0)], dtype=mtype)\n # Test using nomask as input\n test = mask_or(mask, nomask)\n assert_equal(test, mask)\n test = mask_or(nomask, mask)\n assert_equal(test, mask)\n # Using False as input\n test = mask_or(mask, False)\n assert_equal(test, mask)\n # Using another array w / the same dtype\n other = np.array([(0, 1), (0, 1), (0, 1), (0, 1)], dtype=mtype)\n test = mask_or(mask, other)\n control = np.array([(0, 1), (0, 1), (1, 1), (0, 1)], dtype=mtype)\n assert_equal(test, control)\n # Using another array w / a different dtype\n othertype = [('A', bool), ('B', bool)]\n other = np.array([(0, 1), (0, 1), (0, 1), (0, 1)], dtype=othertype)\n try:\n test = mask_or(mask, other)\n except ValueError:\n pass\n # Using nested arrays\n dtype = [('a', bool), ('b', [('ba', bool), ('bb', bool)])]\n amask = np.array([(0, (1, 0)), (0, (1, 0))], dtype=dtype)\n bmask = np.array([(1, (0, 1)), (0, (0, 0))], dtype=dtype)\n cntrl = np.array([(1, (1, 1)), (0, (1, 0))], dtype=dtype)\n assert_equal(mask_or(amask, bmask), cntrl)\n\n def test_flatten_mask(self):\n # Tests flatten mask\n # Standard dtype\n mask = np.array([0, 0, 1], dtype=bool)\n assert_equal(flatten_mask(mask), mask)\n # Flexible dtype\n mask = np.array([(0, 0), (0, 1)], dtype=[('a', bool), ('b', bool)])\n test = flatten_mask(mask)\n control = np.array([0, 0, 0, 1], dtype=bool)\n assert_equal(test, control)\n\n mdtype = [('a', bool), ('b', [('ba', bool), ('bb', bool)])]\n data = [(0, (0, 0)), (0, (0, 1))]\n mask = np.array(data, dtype=mdtype)\n test = flatten_mask(mask)\n control = np.array([0, 0, 0, 0, 0, 1], dtype=bool)\n assert_equal(test, control)\n\n def test_on_ndarray(self):\n # Test functions on ndarrays\n a = np.array([1, 2, 3, 4])\n m = array(a, mask=False)\n test = anom(a)\n assert_equal(test, m.anom())\n test = reshape(a, (2, 2))\n assert_equal(test, m.reshape(2, 2))\n\n def test_compress(self):\n # Test compress function on ndarray and masked array\n # Address Github #2495.\n arr = np.arange(8)\n arr.shape = 4, 2\n cond = np.array([True, False, True, True])\n control = arr[[0, 2, 3]]\n test = np.ma.compress(cond, arr, axis=0)\n assert_equal(test, control)\n marr = np.ma.array(arr)\n test = np.ma.compress(cond, marr, axis=0)\n assert_equal(test, control)\n\n def test_compressed(self):\n # Test ma.compressed function.\n # Address gh-4026\n a = np.ma.array([1, 2])\n test = np.ma.compressed(a)\n assert_(type(test) is np.ndarray)\n\n # Test case when input data is ndarray subclass\n class A(np.ndarray):\n pass\n\n a = np.ma.array(A(shape=0))\n test = np.ma.compressed(a)\n assert_(type(test) is A)\n\n # Test that compress flattens\n test = np.ma.compressed([[1],[2]])\n assert_equal(test.ndim, 1)\n test = np.ma.compressed([[[[[1]]]]])\n assert_equal(test.ndim, 1)\n\n # Test case when input is MaskedArray subclass\n class M(MaskedArray):\n pass\n\n test = np.ma.compressed(M(shape=(0,1,2)))\n assert_equal(test.ndim, 1)\n\n # with .compressed() overridden\n class M(MaskedArray):\n def compressed(self):\n return 42\n\n test = np.ma.compressed(M(shape=(0,1,2)))\n assert_equal(test, 42)\n\n def test_convolve(self):\n a = masked_equal(np.arange(5), 2)\n b = np.array([1, 1])\n test = np.ma.convolve(a, b)\n assert_equal(test, masked_equal([0, 1, -1, -1, 7, 4], -1))\n\n test = np.ma.convolve(a, b, propagate_mask=False)\n assert_equal(test, masked_equal([0, 1, 1, 3, 7, 4], -1))\n\n test = np.ma.convolve([1, 1], [1, 1, 1])\n assert_equal(test, masked_equal([1, 2, 2, 1], -1))\n\n a = [1, 1]\n b = masked_equal([1, -1, -1, 1], -1)\n test = np.ma.convolve(a, b, propagate_mask=False)\n assert_equal(test, masked_equal([1, 1, -1, 1, 1], -1))\n test = np.ma.convolve(a, b, propagate_mask=True)\n assert_equal(test, masked_equal([-1, -1, -1, -1, -1], -1))\n\n\nclass TestMaskedFields(object):\n\n def setup(self):\n ilist = [1, 2, 3, 4, 5]\n flist = [1.1, 2.2, 3.3, 4.4, 5.5]\n slist = ['one', 'two', 'three', 'four', 'five']\n ddtype = [('a', int), ('b', float), ('c', '|S8')]\n mdtype = [('a', bool), ('b', bool), ('c', bool)]\n mask = [0, 1, 0, 0, 1]\n base = array(list(zip(ilist, flist, slist)), mask=mask, dtype=ddtype)\n self.data = dict(base=base, mask=mask, ddtype=ddtype, mdtype=mdtype)\n\n def test_set_records_masks(self):\n base = self.data['base']\n mdtype = self.data['mdtype']\n # Set w/ nomask or masked\n base.mask = nomask\n assert_equal_records(base._mask, np.zeros(base.shape, dtype=mdtype))\n base.mask = masked\n assert_equal_records(base._mask, np.ones(base.shape, dtype=mdtype))\n # Set w/ simple boolean\n base.mask = False\n assert_equal_records(base._mask, np.zeros(base.shape, dtype=mdtype))\n base.mask = True\n assert_equal_records(base._mask, np.ones(base.shape, dtype=mdtype))\n # Set w/ list\n base.mask = [0, 0, 0, 1, 1]\n assert_equal_records(base._mask,\n np.array([(x, x, x) for x in [0, 0, 0, 1, 1]],\n dtype=mdtype))\n\n def test_set_record_element(self):\n # Check setting an element of a record)\n base = self.data['base']\n (base_a, base_b, base_c) = (base['a'], base['b'], base['c'])\n base[0] = (pi, pi, 'pi')\n\n assert_equal(base_a.dtype, int)\n assert_equal(base_a._data, [3, 2, 3, 4, 5])\n\n assert_equal(base_b.dtype, float)\n assert_equal(base_b._data, [pi, 2.2, 3.3, 4.4, 5.5])\n\n assert_equal(base_c.dtype, '|S8')\n assert_equal(base_c._data,\n [b'pi', b'two', b'three', b'four', b'five'])\n\n def test_set_record_slice(self):\n base = self.data['base']\n (base_a, base_b, base_c) = (base['a'], base['b'], base['c'])\n base[:3] = (pi, pi, 'pi')\n\n assert_equal(base_a.dtype, int)\n assert_equal(base_a._data, [3, 3, 3, 4, 5])\n\n assert_equal(base_b.dtype, float)\n assert_equal(base_b._data, [pi, pi, pi, 4.4, 5.5])\n\n assert_equal(base_c.dtype, '|S8')\n assert_equal(base_c._data,\n [b'pi', b'pi', b'pi', b'four', b'five'])\n\n def test_mask_element(self):\n \"Check record access\"\n base = self.data['base']\n base[0] = masked\n\n for n in ('a', 'b', 'c'):\n assert_equal(base[n].mask, [1, 1, 0, 0, 1])\n assert_equal(base[n]._data, base._data[n])\n\n def test_getmaskarray(self):\n # Test getmaskarray on flexible dtype\n ndtype = [('a', int), ('b', float)]\n test = empty(3, dtype=ndtype)\n assert_equal(getmaskarray(test),\n np.array([(0, 0), (0, 0), (0, 0)],\n dtype=[('a', '|b1'), ('b', '|b1')]))\n test[:] = masked\n assert_equal(getmaskarray(test),\n np.array([(1, 1), (1, 1), (1, 1)],\n dtype=[('a', '|b1'), ('b', '|b1')]))\n\n def test_view(self):\n # Test view w/ flexible dtype\n iterator = list(zip(np.arange(10), np.random.rand(10)))\n data = np.array(iterator)\n a = array(iterator, dtype=[('a', float), ('b', float)])\n a.mask[0] = (1, 0)\n controlmask = np.array([1] + 19 * [0], dtype=bool)\n # Transform globally to simple dtype\n test = a.view(float)\n assert_equal(test, data.ravel())\n assert_equal(test.mask, controlmask)\n # Transform globally to dty\n test = a.view((float, 2))\n assert_equal(test, data)\n assert_equal(test.mask, controlmask.reshape(-1, 2))\n\n test = a.view((float, 2), np.matrix)\n assert_equal(test, data)\n assert_(isinstance(test, np.matrix))\n\n def test_getitem(self):\n ndtype = [('a', float), ('b', float)]\n a = array(list(zip(np.random.rand(10), np.arange(10))), dtype=ndtype)\n a.mask = np.array(list(zip([0, 0, 0, 0, 0, 0, 0, 0, 1, 1],\n [1, 0, 0, 0, 0, 0, 0, 0, 1, 0])),\n dtype=[('a', bool), ('b', bool)])\n\n def _test_index(i):\n assert_equal(type(a[i]), mvoid)\n assert_equal_records(a[i]._data, a._data[i])\n assert_equal_records(a[i]._mask, a._mask[i])\n\n assert_equal(type(a[i, ...]), MaskedArray)\n assert_equal_records(a[i,...]._data, a._data[i,...])\n assert_equal_records(a[i,...]._mask, a._mask[i,...])\n\n _test_index(1) # No mask\n _test_index(0) # One element masked\n _test_index(-2) # All element masked\n\n def test_setitem(self):\n # Issue 4866: check that one can set individual items in [record][col]\n # and [col][record] order\n ndtype = np.dtype([('a', float), ('b', int)])\n ma = np.ma.MaskedArray([(1.0, 1), (2.0, 2)], dtype=ndtype)\n ma['a'][1] = 3.0\n assert_equal(ma['a'], np.array([1.0, 3.0]))\n ma[1]['a'] = 4.0\n assert_equal(ma['a'], np.array([1.0, 4.0]))\n # Issue 2403\n mdtype = np.dtype([('a', bool), ('b', bool)])\n # soft mask\n control = np.array([(False, True), (True, True)], dtype=mdtype)\n a = np.ma.masked_all((2,), dtype=ndtype)\n a['a'][0] = 2\n assert_equal(a.mask, control)\n a = np.ma.masked_all((2,), dtype=ndtype)\n a[0]['a'] = 2\n assert_equal(a.mask, control)\n # hard mask\n control = np.array([(True, True), (True, True)], dtype=mdtype)\n a = np.ma.masked_all((2,), dtype=ndtype)\n a.harden_mask()\n a['a'][0] = 2\n assert_equal(a.mask, control)\n a = np.ma.masked_all((2,), dtype=ndtype)\n a.harden_mask()\n a[0]['a'] = 2\n assert_equal(a.mask, control)\n\n def test_setitem_scalar(self):\n # 8510\n mask_0d = np.ma.masked_array(1, mask=True)\n arr = np.ma.arange(3)\n arr[0] = mask_0d\n assert_array_equal(arr.mask, [True, False, False])\n\n def test_element_len(self):\n # check that len() works for mvoid (Github issue #576)\n for rec in self.data['base']:\n assert_equal(len(rec), len(self.data['ddtype']))\n\n\nclass TestMaskedObjectArray(object):\n\n def test_getitem(self):\n arr = np.ma.array([None, None])\n for dt in [float, object]:\n a0 = np.eye(2).astype(dt)\n a1 = np.eye(3).astype(dt)\n arr[0] = a0\n arr[1] = a1\n\n assert_(arr[0] is a0)\n assert_(arr[1] is a1)\n assert_(isinstance(arr[0,...], MaskedArray))\n assert_(isinstance(arr[1,...], MaskedArray))\n assert_(arr[0,...][()] is a0)\n assert_(arr[1,...][()] is a1)\n\n arr[0] = np.ma.masked\n\n assert_(arr[1] is a1)\n assert_(isinstance(arr[0,...], MaskedArray))\n assert_(isinstance(arr[1,...], MaskedArray))\n assert_equal(arr[0,...].mask, True)\n assert_(arr[1,...][()] is a1)\n\n # gh-5962 - object arrays of arrays do something special\n assert_equal(arr[0].data, a0)\n assert_equal(arr[0].mask, True)\n assert_equal(arr[0,...][()].data, a0)\n assert_equal(arr[0,...][()].mask, True)\n\n def test_nested_ma(self):\n\n arr = np.ma.array([None, None])\n # set the first object to be an unmasked masked constant. A little fiddly\n arr[0,...] = np.array([np.ma.masked], object)[0,...]\n\n # check the above line did what we were aiming for\n assert_(arr.data[0] is np.ma.masked)\n\n # test that getitem returned the value by identity\n assert_(arr[0] is np.ma.masked)\n\n # now mask the masked value!\n arr[0] = np.ma.masked\n assert_(arr[0] is np.ma.masked)\n\n\nclass TestMaskedView(object):\n\n def setup(self):\n iterator = list(zip(np.arange(10), np.random.rand(10)))\n data = np.array(iterator)\n a = array(iterator, dtype=[('a', float), ('b', float)])\n a.mask[0] = (1, 0)\n controlmask = np.array([1] + 19 * [0], dtype=bool)\n self.data = (data, a, controlmask)\n\n def test_view_to_nothing(self):\n (data, a, controlmask) = self.data\n test = a.view()\n assert_(isinstance(test, MaskedArray))\n assert_equal(test._data, a._data)\n assert_equal(test._mask, a._mask)\n\n def test_view_to_type(self):\n (data, a, controlmask) = self.data\n test = a.view(np.ndarray)\n assert_(not isinstance(test, MaskedArray))\n assert_equal(test, a._data)\n assert_equal_records(test, data.view(a.dtype).squeeze())\n\n def test_view_to_simple_dtype(self):\n (data, a, controlmask) = self.data\n # View globally\n test = a.view(float)\n assert_(isinstance(test, MaskedArray))\n assert_equal(test, data.ravel())\n assert_equal(test.mask, controlmask)\n\n def test_view_to_flexible_dtype(self):\n (data, a, controlmask) = self.data\n\n test = a.view([('A', float), ('B', float)])\n assert_equal(test.mask.dtype.names, ('A', 'B'))\n assert_equal(test['A'], a['a'])\n assert_equal(test['B'], a['b'])\n\n test = a[0].view([('A', float), ('B', float)])\n assert_(isinstance(test, MaskedArray))\n assert_equal(test.mask.dtype.names, ('A', 'B'))\n assert_equal(test['A'], a['a'][0])\n assert_equal(test['B'], a['b'][0])\n\n test = a[-1].view([('A', float), ('B', float)])\n assert_(isinstance(test, MaskedArray))\n assert_equal(test.dtype.names, ('A', 'B'))\n assert_equal(test['A'], a['a'][-1])\n assert_equal(test['B'], a['b'][-1])\n\n def test_view_to_subdtype(self):\n (data, a, controlmask) = self.data\n # View globally\n test = a.view((float, 2))\n assert_(isinstance(test, MaskedArray))\n assert_equal(test, data)\n assert_equal(test.mask, controlmask.reshape(-1, 2))\n # View on 1 masked element\n test = a[0].view((float, 2))\n assert_(isinstance(test, MaskedArray))\n assert_equal(test, data[0])\n assert_equal(test.mask, (1, 0))\n # View on 1 unmasked element\n test = a[-1].view((float, 2))\n assert_(isinstance(test, MaskedArray))\n assert_equal(test, data[-1])\n\n def test_view_to_dtype_and_type(self):\n (data, a, controlmask) = self.data\n\n test = a.view((float, 2), np.matrix)\n assert_equal(test, data)\n assert_(isinstance(test, np.matrix))\n assert_(not isinstance(test, MaskedArray))\n\nclass TestOptionalArgs(object):\n def test_ndarrayfuncs(self):\n # test axis arg behaves the same as ndarray (including multiple axes)\n\n d = np.arange(24.0).reshape((2,3,4))\n m = np.zeros(24, dtype=bool).reshape((2,3,4))\n # mask out last element of last dimension\n m[:,:,-1] = True\n a = np.ma.array(d, mask=m)\n\n def testaxis(f, a, d):\n numpy_f = numpy.__getattribute__(f)\n ma_f = np.ma.__getattribute__(f)\n\n # test axis arg\n assert_equal(ma_f(a, axis=1)[...,:-1], numpy_f(d[...,:-1], axis=1))\n assert_equal(ma_f(a, axis=(0,1))[...,:-1],\n numpy_f(d[...,:-1], axis=(0,1)))\n\n def testkeepdims(f, a, d):\n numpy_f = numpy.__getattribute__(f)\n ma_f = np.ma.__getattribute__(f)\n\n # test keepdims arg\n assert_equal(ma_f(a, keepdims=True).shape,\n numpy_f(d, keepdims=True).shape)\n assert_equal(ma_f(a, keepdims=False).shape,\n numpy_f(d, keepdims=False).shape)\n\n # test both at once\n assert_equal(ma_f(a, axis=1, keepdims=True)[...,:-1],\n numpy_f(d[...,:-1], axis=1, keepdims=True))\n assert_equal(ma_f(a, axis=(0,1), keepdims=True)[...,:-1],\n numpy_f(d[...,:-1], axis=(0,1), keepdims=True))\n\n for f in ['sum', 'prod', 'mean', 'var', 'std']:\n testaxis(f, a, d)\n testkeepdims(f, a, d)\n\n for f in ['min', 'max']:\n testaxis(f, a, d)\n\n d = (np.arange(24).reshape((2,3,4))%2 == 0)\n a = np.ma.array(d, mask=m)\n for f in ['all', 'any']:\n testaxis(f, a, d)\n testkeepdims(f, a, d)\n\n def test_count(self):\n # test np.ma.count specially\n\n d = np.arange(24.0).reshape((2,3,4))\n m = np.zeros(24, dtype=bool).reshape((2,3,4))\n m[:,0,:] = True\n a = np.ma.array(d, mask=m)\n\n assert_equal(count(a), 16)\n assert_equal(count(a, axis=1), 2*ones((2,4)))\n assert_equal(count(a, axis=(0,1)), 4*ones((4,)))\n assert_equal(count(a, keepdims=True), 16*ones((1,1,1)))\n assert_equal(count(a, axis=1, keepdims=True), 2*ones((2,1,4)))\n assert_equal(count(a, axis=(0,1), keepdims=True), 4*ones((1,1,4)))\n assert_equal(count(a, axis=-2), 2*ones((2,4)))\n assert_raises(ValueError, count, a, axis=(1,1))\n assert_raises(np.AxisError, count, a, axis=3)\n\n # check the 'nomask' path\n a = np.ma.array(d, mask=nomask)\n\n assert_equal(count(a), 24)\n assert_equal(count(a, axis=1), 3*ones((2,4)))\n assert_equal(count(a, axis=(0,1)), 6*ones((4,)))\n assert_equal(count(a, keepdims=True), 24*ones((1,1,1)))\n assert_equal(np.ndim(count(a, keepdims=True)), 3)\n assert_equal(count(a, axis=1, keepdims=True), 3*ones((2,1,4)))\n assert_equal(count(a, axis=(0,1), keepdims=True), 6*ones((1,1,4)))\n assert_equal(count(a, axis=-2), 3*ones((2,4)))\n assert_raises(ValueError, count, a, axis=(1,1))\n assert_raises(np.AxisError, count, a, axis=3)\n\n # check the 'masked' singleton\n assert_equal(count(np.ma.masked), 0)\n\n # check 0-d arrays do not allow axis > 0\n assert_raises(np.AxisError, count, np.ma.array(1), axis=1)\n\n\nclass TestMaskedConstant(object):\n def _do_add_test(self, add):\n # sanity check\n assert_(add(np.ma.masked, 1) is np.ma.masked)\n\n # now try with a vector\n vector = np.array([1, 2, 3])\n result = add(np.ma.masked, vector)\n\n # lots of things could go wrong here\n assert_(result is not np.ma.masked)\n assert_(not isinstance(result, np.ma.core.MaskedConstant))\n assert_equal(result.shape, vector.shape)\n assert_equal(np.ma.getmask(result), np.ones(vector.shape, dtype=bool))\n\n def test_ufunc(self):\n self._do_add_test(np.add)\n\n def test_operator(self):\n self._do_add_test(lambda a, b: a + b)\n\n def test_ctor(self):\n m = np.ma.array(np.ma.masked)\n\n # most importantly, we do not want to create a new MaskedConstant\n # instance\n assert_(not isinstance(m, np.ma.core.MaskedConstant))\n assert_(m is not np.ma.masked)\n\n\ndef test_masked_array():\n a = np.ma.array([0, 1, 2, 3], mask=[0, 0, 1, 0])\n assert_equal(np.argwhere(a), [[1], [3]])\n\ndef test_append_masked_array():\n a = np.ma.masked_equal([1,2,3], value=2)\n b = np.ma.masked_equal([4,3,2], value=2)\n\n result = np.ma.append(a, b)\n expected_data = [1, 2, 3, 4, 3, 2]\n expected_mask = [False, True, False, False, False, True]\n assert_array_equal(result.data, expected_data)\n assert_array_equal(result.mask, expected_mask)\n\n a = np.ma.masked_all((2,2))\n b = np.ma.ones((3,1))\n\n result = np.ma.append(a, b)\n expected_data = [1] * 3\n expected_mask = [True] * 4 + [False] * 3\n assert_array_equal(result.data[-3], expected_data)\n assert_array_equal(result.mask, expected_mask)\n\n result = np.ma.append(a, b, axis=None)\n assert_array_equal(result.data[-3], expected_data)\n assert_array_equal(result.mask, expected_mask)\n\n\ndef test_append_masked_array_along_axis():\n a = np.ma.masked_equal([1,2,3], value=2)\n b = np.ma.masked_values([[4, 5, 6], [7, 8, 9]], 7)\n\n # When `axis` is specified, `values` must have the correct shape.\n assert_raises(ValueError, np.ma.append, a, b, axis=0)\n\n result = np.ma.append(a[np.newaxis,:], b, axis=0)\n expected = np.ma.arange(1, 10)\n expected[[1, 6]] = np.ma.masked\n expected = expected.reshape((3,3))\n assert_array_equal(result.data, expected.data)\n assert_array_equal(result.mask, expected.mask)\n\n\ndef test_default_fill_value_complex():\n # regression test for Python 3, where 'unicode' was not defined\n assert_(default_fill_value(1 + 1j) == 1.e20 + 0.0j)\n\n\ndef test_ufunc_with_output():\n # check that giving an output argument always returns that output.\n # Regression test for gh-8416.\n x = array([1., 2., 3.], mask=[0, 0, 1])\n y = np.add(x, 1., out=x)\n assert_(y is x)\n\ndef test_astype():\n descr = [('v', int, 3), ('x', [('y', float)])]\n x = array(([1, 2, 3], (1.0,)), dtype=descr)\n assert_equal(x, x.astype(descr))\n\n\n###############################################################################\nif __name__ == \"__main__\":\n run_module_suite()\n" ]
[ [ "numpy.ones", "numpy.ma.core.maximum_fill_value", "numpy.multiply", "numpy.subtract", "numpy.testing.assert_equal", "numpy.ma.core.mvoid", "numpy.ma.core.ones", "numpy.asarray", "numpy.ma.core.allclose", "numpy.testing.assert_warns", "numpy.ma.core.empty", "numpy.ma.core.filled", "numpy.ma.core.min", "numpy.ma.core.angle", "numpy.ma.core.minimum.outer", "numpy.transpose", "numpy.arccos", "numpy.bool_", "numpy.ma.core.flatten_structured_array", "numpy.ma.MaskedArray", "numpy.absolute", "numpy.ma.core.cosh", "numpy.ma.core.take", "numpy.ma.core.allequal", "numpy.ma.__getattribute__", "numpy.ma.core.greater", "numpy.ma.convolve", "numpy.ma.core.getmask", "numpy.ma.core.divide", "numpy.ma.core.shape", "numpy.ma.core.ravel", "numpy.ma.core.getmaskarray", "numpy.ma.core.max", "numpy.maximum.reduce", "numpy.ma.core.arccosh", "numpy.ma.testutils.assert_equal_records", "numpy.arcsin", "numpy.divide", "numpy.ma.testutils.fail_if_equal", "numpy.ma.core.masked_print_option.set_display", "numpy.matrix", "numpy.arctan", "numpy.exp", "numpy.errstate", "numpy.ma.copy", "numpy.round", "numpy.array", "numpy.find_common_type", "numpy.ma.testutils.assert_", "numpy.ma.core.maximum", "numpy.ma.core.arccos", "numpy.ma.core.masked_equal", "numpy.geterr", "numpy.argwhere", "numpy.ma.core.sqrt", "numpy.ma.core.diag", "numpy.add", "numpy.ma.core.MaskedArray.cumsum", "numpy.ma.core.MaskedArray", "numpy.ma.core.equal", "numpy.ma.core.mod", "numpy.ma.core.arctan", "numpy.ma.core.reshape", "numpy.add.reduce", "numpy.ma.core.repeat", "numpy.reshape", "numpy.ma.core.flatten_mask", "numpy.seterr", "numpy.ma.core.zeros", "numpy.ma.core.empty_like", "numpy.ma.core.maximum.reduce", "numpy.ma.core.masked_less", "numpy.ma.core.add", "numpy.equal", "numpy.ma.core.less_equal", "numpy.ma.masked_all", "numpy.greater", "numpy.ma.core.add.accumulate", "numpy.ma.core.masked_array", "numpy.ma.core.minimum_fill_value", "numpy.ma.core.sort", "numpy.may_share_memory", "numpy.finfo", "numpy.testing.assert_raises", "numpy.arctan2", "numpy.ma.core.masked_less_equal", "numpy.ma.core.less", "numpy.ma.ones", "numpy.not_equal", "numpy.ma.core.argsort", "numpy.ma.core.putmask", "numpy.ma.array", "numpy.ma.core.arctan2", "numpy.angle", "numpy.product", "numpy.ma.core.tanh", "numpy.sqrt", "numpy.ma.compress", "numpy.concatenate", "numpy.ma.testutils.assert_not_equal", "numpy.sin", "numpy.sum", "numpy.ma.compressed", "numpy.ma.core.greater_equal", "numpy.minimum.outer", "numpy.ma.core.isMaskedArray", "numpy.ma.core.product", "numpy.take", "numpy.less", "numpy.testing.run_module_suite", "numpy.argsort", "numpy.trace", "numpy.log", "numpy.ma.core.masked_not_equal", "numpy.ma.core.masked_greater_equal", "numpy.ma.testutils.assert_almost_equal", "numpy.ma.core.exp", "numpy.ma.core.alltrue", "numpy.ma.core.asarray", "numpy.ma.core.resize", "numpy.ma.core.minimum.reduce", "numpy.ma.core.array", "numpy.ma.core.sinh", "numpy.random.rand", "numpy.ma.core.put", "numpy.isnan", "numpy.ma.core.minimum", "numpy.random.uniform", "numpy.ma.testutils.assert_array_equal", "numpy.ma.core.transpose", "numpy.zeros", "numpy.ma.masked_equal", "numpy.ma.getmask", "numpy.maximum.outer", "numpy.ma.core.sum", "numpy.arange", "numpy.sort", "numpy.ma.core.multiply.outer", "numpy.ma.core.not_equal", "numpy.cosh", "numpy.ma.core.anom", "numpy.ma.core.conjugate", "numpy.ma.core.subtract", "numpy.ma.core.multiply", "numpy.true_divide", "numpy.ma.core.sin", "numpy.ma.core.absolute", "numpy.ma.masked_values", "numpy.less_equal", "numpy.ma.core.default_fill_value", "numpy.ma.core.add.reduce", "numpy.ma.testutils.assert_equal", "numpy.diag", "numpy.dtype", "numpy.ma.testutils.assert_mask_equal", "numpy.ma.core.cos", "numpy.greater_equal", "numpy.ma.core.log", "numpy.ma.core.masked_greater", "numpy.ma.core.count", "numpy.ma.core.where", "numpy.tanh", "numpy.ma.core.log10", "numpy.ma.core.inner", "numpy.ma.core.fix_invalid", "numpy.ma.core.tan", "numpy.ma.core.arange", "numpy.ma.masked_where", "numpy.ma.core.masked_where", "numpy.ma.masked_array", "numpy.ma.core.arcsin", "numpy.cos", "numpy.ndarray", "numpy.ma.core.sometrue", "numpy.ma.core.make_mask_descr", "numpy.where", "numpy.ma.arange", "numpy.identity", "numpy.minimum", "numpy.ma.where", "numpy.eye", "numpy.ma.core.abs", "numpy.sinh", "numpy.mod", "numpy.all", "numpy.tan", "numpy.ma.core.masked_values", "numpy.maximum", "numpy.ma.core.power", "numpy.ma.core.concatenate", "numpy.ma.core.mask_or", "numpy.ma.append", "numpy.ma.core.maximum.outer", "numpy.empty", "numpy.add.accumulate", "numpy.conjugate", "numpy.ma.core.identity", "numpy.ravel", "numpy.ma.core.outer", "numpy.ma.core.make_mask", "numpy.shape", "numpy.iinfo", "numpy.testing.suppress_warnings", "numpy.ma.core.choose" ] ]
bulletPr/label-efficient-unsupervised-learning
[ "8e320dd96dab8de97d304e0fb6550cf3ae2aa022" ]
[ "datasets.py" ]
[ "from __future__ import print_function\nimport torch.utils.data as data\nfrom PIL import Image\nimport os\nimport os.path\nimport errno\nimport torch\nimport json\nimport codecs\nimport numpy as np\nimport sys\nimport torchvision.transforms as transforms\nimport argparse\nimport json\n\n#Part Dataset\nclass PartDataset(data.Dataset):\n def __init__(self, root, npoints = 2500, classification = False, class_choice = None, train = True):\n self.npoints = npoints\n self.root = root\n self.catfile = os.path.join(self.root, 'synsetoffset2category.txt')\n self.cat = {}\n\n self.classification = classification\n\n with open(self.catfile, 'r') as f:\n for line in f:\n ls = line.strip().split()\n self.cat[ls[0]] = ls[1]\n #print(self.cat)\n if not class_choice is None:\n self.cat = {k:v for k,v in self.cat.items() if k in class_choice}\n\n self.meta = {}\n for item in self.cat:\n #print('category', item)\n self.meta[item] = []\n dir_point = os.path.join(self.root, self.cat[item], 'points')\n dir_seg = os.path.join(self.root, self.cat[item], 'points_label')\n #print(dir_point, dir_seg)\n fns = sorted(os.listdir(dir_point))\n if train:\n fns = fns[:int(len(fns) * 0.9)]\n else:\n fns = fns[int(len(fns) * 0.9):]\n\n #print(os.path.basename(fns))\n for fn in fns:\n token = (os.path.splitext(os.path.basename(fn))[0])\n self.meta[item].append((os.path.join(dir_point, token + '.pts'), os.path.join(dir_seg, token + '.seg')))\n\n self.datapath = []\n for item in self.cat:\n for fn in self.meta[item]:\n self.datapath.append((item, fn[0], fn[1]))\n\n\n self.classes = dict(zip(sorted(self.cat), range(len(self.cat))))\n print(self.classes)\n self.num_seg_classes = 0\n if not self.classification:\n for i in range(len(self.datapath)//50):\n l = len(np.unique(np.loadtxt(self.datapath[i][-1]).astype(np.uint8)))\n if l > self.num_seg_classes:\n self.num_seg_classes = l\n #print(self.num_seg_classes)\n\n\n def __getitem__(self, index):\n fn = self.datapath[index]\n cls = self.classes[self.datapath[index][0]]\n point_set = np.loadtxt(fn[1]).astype(np.float32)\n seg = np.loadtxt(fn[2]).astype(np.int64)\n #print(point_set.shape, seg.shape)\n\n choice = np.random.choice(len(seg), self.npoints, replace=True)\n #resample\n point_set = point_set[choice, :]\n seg = seg[choice]\n point_set = torch.from_numpy(point_set)\n seg = torch.from_numpy(seg)\n cls = torch.from_numpy(np.array([cls]).astype(np.int64))\n if self.classification:\n return point_set, cls\n else:\n return point_set, seg\n\n def __len__(self):\n return len(self.datapath)\n\n\nif __name__ == '__main__':\n print('test')\n d = PartDataset(root = 'shapenetcore_partanno_segmentation_benchmark_v0', class_choice = ['Chair'])\n print(len(d))\n ps, seg = d[0]\n print(ps.size(), ps.type(), seg.size(),seg.type())\n\n d = PartDataset(root = 'shapenetcore_partanno_segmentation_benchmark_v0', classification = True)\n print(len(d))\n ps, cls = d[0]\n print(ps.size(), ps.type(), cls.size(),cls.type())" ]
[ [ "numpy.array", "torch.from_numpy", "numpy.loadtxt" ] ]
sumau/PredictCode
[ "e2a2d5a8fa5d83f011c33e18d4ce6ac7e1429aa8" ]
[ "tests/kernels_test.py" ]
[ "import numpy as np\nimport scipy.stats as stats\nimport scipy.linalg\nimport pytest\nimport open_cp.kernels as testmod\nimport open_cp.data\nimport unittest.mock as mock\nimport shapely.geometry\n\ndef slow_gaussian_kernel_new(pts, mean, var):\n \"\"\"Test case where `pts`, `mean`, `var` are all of shape 2.\"\"\"\n assert(len(pts.shape) == 2 and len(mean.shape) == 2 and len(var.shape) == 2)\n space_dim = pts.shape[0]\n num_pts = pts.shape[1]\n num_samples = mean.shape[1]\n assert(space_dim == mean.shape[0])\n assert((space_dim, num_samples) == var.shape)\n\n out = np.empty(num_pts)\n for i in range(num_pts):\n total = np.empty(num_samples)\n for j in range(num_samples):\n prod = np.empty(space_dim)\n for k in range(space_dim):\n v = var[k][j] * 2\n prod[k] = np.exp(- (pts[k][i] - mean[k][j]) **\n 2 / v) / np.sqrt(np.pi * v)\n total[j] = np.product(prod)\n out[i] = np.mean(total)\n\n return out\n\ndef test_slow_gaussian_kernel_single_new():\n pts = np.empty((1, 1))\n pts[0][0] = 1\n mean = np.empty((1, 1))\n mean[0][0] = 0.5\n var = np.empty((1, 1))\n var[0][0] = 3\n\n expected = np.exp(-0.25 / 6) / np.sqrt(6 * np.pi)\n got = slow_gaussian_kernel_new(pts, mean, var)\n np.testing.assert_allclose(expected, got)\n\ndef test_compare_GaussianKernel():\n for k in range(1, 6):\n for M in range(1, 6):\n mean = np.random.random(size=(k,M))\n var = 0.0001 + np.random.random(size=(k,M))**2\n kernel = testmod.GaussianKernel(mean, var)\n for N in range(1, 6):\n pts = np.random.random(size=(k,N))\n want = slow_gaussian_kernel_new(pts, mean, var)\n got = kernel(pts)\n print(k,M,N)\n np.testing.assert_allclose(got, want)\n # Single point case\n pts = np.random.random(size=k)\n want = slow_gaussian_kernel_new(pts[:,None], mean, var)[0]\n got = kernel(pts)\n print(\"Single point case k={}, M={}\".format(k,M))\n assert want == pytest.approx(got)\n\ndef test_compare_GaussianKernel_k1_case():\n for M in range(1, 6):\n mean = np.random.random(size=M)\n var = 0.0001 + np.random.random(size=M)**2\n kernel = testmod.GaussianKernel(mean, var)\n for N in range(1, 6):\n pts = np.random.random(size=N)\n want = slow_gaussian_kernel_new(pts[None,:], mean[None,:], var[None,:])\n got = kernel(pts)\n print(M,N)\n np.testing.assert_allclose(got, want)\n # Single point case\n print(\"Single point case, M={}\".format(M))\n pts = np.random.random()\n want = slow_gaussian_kernel_new(np.asarray(pts)[None,None], mean[None,:], var[None,:])[0]\n got = kernel(pts)\n assert want == pytest.approx(got)\n \ndef test_1D_kth_distance():\n coords = [0,1,2,3,6,7,9,15]\n distances = testmod.compute_kth_distance(coords, k=3)\n np.testing.assert_allclose(distances, [3,2,2,3,3,4,6,9])\n\ndef test_2D_kth_distance():\n coords = [[0,0,1,1],[0,1,0,2]]\n distances = testmod.compute_kth_distance(coords, k=2)\n np.testing.assert_allclose(distances, [1,np.sqrt(2),np.sqrt(2),2])\n\ndef slow_kth_nearest(points, index):\n \"\"\"(k, N) input. Returns ordered list [0,...] of distance to kth nearest point from index\"\"\"\n if len(points.shape) == 1:\n points = points[None, :]\n pt = points[:, index]\n distances = np.sqrt(np.sum((points - pt[:,None])**2, axis=0))\n distances.sort()\n return distances\n\ndef test_slow_kth_nearest():\n pts = np.array([1,2,4,5,7,8,9])\n got = slow_kth_nearest(pts, 0)\n np.testing.assert_array_equal(got, [0,1,3,4,6,7,8])\n got = slow_kth_nearest(pts, 3)\n np.testing.assert_array_equal(got, [0,1,2,3,3,4,4])\n got = slow_kth_nearest(pts, 4)\n np.testing.assert_array_equal(got, [0,1,2,2,3,5,6])\n\n pts = np.array([[0,0],[1,1],[0,1],[1,0],[2,3]]).T\n got = slow_kth_nearest(pts, 0)\n np.testing.assert_allclose(got, [0,1,1,np.sqrt(2),np.sqrt(13)])\n got = slow_kth_nearest(pts, 1)\n np.testing.assert_allclose(got, [0,1,1,np.sqrt(2),np.sqrt(5)])\n\ndef test_1d_kth_nearest():\n # In the 1D scale we don't need to rescale\n pts = np.random.random(size=20) * 20 - 10\n for k in [1,2,3,4,5]:\n distances = [slow_kth_nearest(pts, i)[k] for i in range(len(pts))]\n def expected_kernel(x):\n value = 0\n for i, p in enumerate(pts):\n value += stats.norm(loc=p, scale=distances[i]).pdf(x)\n return value / len(pts)\n kernel = testmod.kth_nearest_neighbour_gaussian_kde(pts, k=k)\n test_points = np.random.random(size=10) * 15\n np.testing.assert_allclose( kernel(test_points), expected_kernel(test_points) )\n\ndef test_2d_kth_nearest():\n for space_dim in range(2, 5):\n pts = np.random.random(size=(space_dim, 20))\n stds = np.std(pts, axis=1)\n rescaled = np.empty_like(pts)\n for i in range(space_dim):\n rescaled[i] = pts[i] / stds[i]\n for k in [1,2,3,4,5,6]:\n distances = [slow_kth_nearest(rescaled, i)[k] for i in range(pts.shape[1])]\n def expected_kernel(x):\n value = 0\n for i in range(pts.shape[1]):\n prod = 1\n for coord in range(space_dim):\n p = pts[coord,i]\n prod *= stats.norm(loc=p, scale=distances[i]*stds[coord]).pdf(x[coord])\n value += prod\n return value / pts.shape[1]\n kernel = testmod.kth_nearest_neighbour_gaussian_kde(pts, k=k)\n test_points = np.random.random(size=(space_dim, 10))\n np.testing.assert_allclose( kernel(test_points), expected_kernel(test_points) )\n\ndef test_ReflectedKernel():\n kernel = lambda pt : np.abs(pt)\n testkernel = testmod.ReflectedKernel(kernel)\n assert( testkernel(5) == 10 )\n np.testing.assert_allclose(testkernel([1,2,3]), [2,4,6])\n \n # 2 (or 3 etc.) dim kernel only\n testkernel = testmod.ReflectedKernel(lambda pt : np.abs(pt[0]))\n np.testing.assert_allclose(testkernel([[1,2,3],[4,5,6]]), [2,4,6])\n testkernel = testmod.ReflectedKernel(lambda pt : pt[0] * (pt[0]>=0))\n np.testing.assert_allclose(testkernel([[1,2,3],[4,5,6]]), [1,2,3])\n testkernel = testmod.ReflectedKernel(lambda pt : pt[0] * (pt[0]>=0), reflected_axis=1)\n np.testing.assert_allclose(testkernel([[1,2,3],[4,5,6]]), [2,4,6])\n\ndef test_ReflectedKernelEstimator():\n estimator = mock.MagicMock()\n kernel_mock = mock.MagicMock()\n estimator.return_value = kernel_mock\n test = testmod.ReflectedKernelEstimator(estimator)\n kernel = test([1,2,3,4])\n estimator.assert_called_with([1,2,3,4])\n assert(kernel.reflected_axis == 0)\n assert(kernel.delegate is kernel_mock)\n\n test = testmod.ReflectedKernelEstimator(estimator, reflected_axis=2)\n kernel = test([1,2,3,4])\n assert(kernel.reflected_axis == 2)\n \n \ndef test_GaussianBase_not_point():\n with pytest.raises(ValueError):\n testmod.GaussianBase(5.2)\n \ndef test_GaussianBase_set_covariance():\n gb = testmod.GaussianBase([1,2,3,4])\n with pytest.raises(ValueError):\n gb.covariance_matrix = [[1,2,3], [2,3,4]]\n with pytest.raises(ValueError):\n gb.covariance_matrix = [[1,2], [3,4]]\n gb.covariance_matrix = 1\n \n gb = testmod.GaussianBase([[1,2,3,4], [4,2,2,1]])\n with pytest.raises(ValueError):\n gb.covariance_matrix = [[1,2,3], [2,3,4]]\n gb.covariance_matrix = [[2,2], [3,4]]\n with pytest.raises(ValueError):\n gb.covariance_matrix = [[1,2], [3,4]]\n with pytest.raises(ValueError):\n gb.covariance_matrix = 1\n \ndef test_GaussianBase_set_band():\n gb = testmod.GaussianBase([1,2,3,4])\n assert gb.bandwidth == pytest.approx(4 ** (-1/5))\n gb.bandwidth = \"scott\"\n assert gb.bandwidth == pytest.approx(4 ** (-1/5))\n with pytest.raises(ValueError):\n gb.bandwidth = \"matt\"\n gb.bandwidth = \"silverman\"\n assert gb.bandwidth == pytest.approx(3 ** (-1/5))\n\n gb = testmod.GaussianBase([[1,2,3,4],[4,2,1,3]])\n assert gb.bandwidth == pytest.approx(4 ** (-1/6))\n gb.bandwidth = \"scott\"\n assert gb.bandwidth == pytest.approx(4 ** (-1/6))\n with pytest.raises(ValueError):\n gb.bandwidth = \"matt\"\n gb.bandwidth = \"silverman\"\n assert gb.bandwidth == pytest.approx(4 ** (-1/6))\n\ndef test_GaussianBase_set_weights():\n gb = testmod.GaussianBase([1,2,3,4])\n assert gb.weights is None\n \n gb.weights = [.2, 0, 5, 2]\n \n with pytest.raises(ValueError):\n gb.weights = [.2, 0, 5]\n \n with pytest.raises(ValueError):\n gb.weights = 2\n \n with pytest.raises(ValueError):\n gb.weights = [[1,2,3],[4,5,6]]\n\nsqrt2pi = np.sqrt(2 * np.pi)\n\ndef test_GaussianBase_eval():\n gb = testmod.GaussianBase([1,2,3,4])\n assert gb.covariance_matrix[0,0] == pytest.approx(20/12)\n \n gb.covariance_matrix = 1.0\n gb.bandwidth = 1.0\n x5 = np.sum(np.exp([-16/2, -9/2, -4/2, -1/2])) / 4 / sqrt2pi\n assert gb(5) == pytest.approx(x5)\n x2 = np.sum(np.exp([-1/2, 0, -1/2, -4/2])) / 4 / sqrt2pi\n assert gb(2) == pytest.approx(x2)\n x0 = np.sum(np.exp([-1/2, -4/2, -9/2, -16/2])) / 4 / sqrt2pi\n assert gb(0) == pytest.approx(x0)\n np.testing.assert_allclose(gb([0]), [x0])\n np.testing.assert_allclose(gb([0,2,5,2,5,0]), [x0,x2,x5,x2,x5,x0])\n\ndef test_GaussianBase_eval_with_bandwidth():\n gb = testmod.GaussianBase([1,2,3,4])\n assert gb.covariance_matrix[0,0] == pytest.approx(20/12)\n \n gb.covariance_matrix = 1.0\n gb.bandwidth = 2.0\n x5 = np.sum(np.exp([-16/8, -9/8, -4/8, -1/8])) / 8 / sqrt2pi\n assert gb(5) == pytest.approx(x5)\n x2 = np.sum(np.exp([-1/8, 0, -1/8, -4/8])) / 8 / sqrt2pi\n assert gb(2) == pytest.approx(x2)\n x0 = np.sum(np.exp([-1/8, -4/8, -9/8, -16/8])) / 8 / sqrt2pi\n assert gb(0) == pytest.approx(x0)\n np.testing.assert_allclose(gb([0]), [x0])\n np.testing.assert_allclose(gb([0,2,5,2,5,0]), [x0,x2,x5,x2,x5,x0])\n\ndef test_GaussianBase_large_eval():\n n = 1000000\n pts = np.arange(n) / n\n gb = testmod.GaussianBase(pts)\n gb.covariance_matrix = 1.0\n gb.bandwidth = 1.0\n\n x5 = np.sum(np.exp(-(5 - pts)**2 / 2)) / n / sqrt2pi\n assert gb(5) == pytest.approx(x5)\n x3 = np.sum(np.exp(-(3 - pts)**2 / 2)) / n / sqrt2pi\n assert gb(3) == pytest.approx(x3)\n np.testing.assert_allclose(gb([5,3]), [x5,x3])\n\ndef test_GaussianBase_large_eval_3d():\n n = 1000000\n pts = np.random.random((3,n)) * 100\n gb = testmod.GaussianBase(pts)\n gb.covariance_matrix = np.eye(3)\n gb.bandwidth = 1.0\n\n pt = np.asarray([1,2,3])\n x = np.sum(np.exp(-np.sum((pts - pt[:,None])**2,axis=0) / 2)) / n / (sqrt2pi**3)\n assert gb([1,2,3]) == pytest.approx(x)\n pt = np.asarray([4,2,1])\n y = np.sum(np.exp(-np.sum((pts - pt[:,None])**2,axis=0) / 2)) / n / (sqrt2pi**3)\n assert gb([4,2,1]) == pytest.approx(y)\n\n np.testing.assert_allclose(gb([[1,4], [2,2], [3,1]]), [x,y])\n\ndef test_GaussianBase_eval_with_cov():\n gb = testmod.GaussianBase([1,2,3,4])\n assert gb.covariance_matrix[0,0] == pytest.approx(20/12)\n \n gb.covariance_matrix = 0.5\n gb.bandwidth = 1.0\n x5 = np.sum(np.exp([-16, -9, -4, -1])) / 4 / np.sqrt(0.5) / sqrt2pi\n assert gb(5) == pytest.approx(x5)\n x2 = np.sum(np.exp([-1, 0, -1, -4])) / 4 / np.sqrt(0.5) / sqrt2pi\n assert gb(2) == pytest.approx(x2)\n x0 = np.sum(np.exp([-1, -4, -9, -16])) / 4 / np.sqrt(0.5) / sqrt2pi\n assert gb(0) == pytest.approx(x0)\n np.testing.assert_allclose(gb([0]), [x0])\n np.testing.assert_allclose(gb([0,2,5,2,5,0]), [x0,x2,x5,x2,x5,x0])\n\ndef test_GaussianBase_eval_with_weights():\n gb = testmod.GaussianBase([1,2,3,4])\n assert gb.covariance_matrix[0,0] == pytest.approx(20/12)\n \n gb.covariance_matrix = 1.0\n gb.bandwidth = 1.0\n gb.weights = [0,1,20,30]\n x5 = np.sum(np.exp([-16/2, -9/2, -4/2, -1/2]) * [0,1,20,30]) / 51 / sqrt2pi\n assert gb(5) == pytest.approx(x5)\n x2 = np.sum(np.exp([-1/2, 0, -1/2, -4/2]) * [0,1,20,30]) / 51 / sqrt2pi\n assert gb(2) == pytest.approx(x2)\n x0 = np.sum(np.exp([-1/2, -4/2, -9/2, -16/2]) * [0,1,20,30]) / 51 / sqrt2pi\n assert gb(0) == pytest.approx(x0)\n np.testing.assert_allclose(gb([0]), [x0])\n np.testing.assert_allclose(gb([0,2,5,2,5,0]), [x0,x2,x5,x2,x5,x0])\n\ndef test_GaussianBase_eval_with_bandwidths():\n gb = testmod.GaussianBase([1,2,3,4])\n assert gb.covariance_matrix[0,0] == pytest.approx(20/12)\n \n gb.covariance_matrix = 1.0\n gb.bandwidth = [0.5, 0.1, 0.7, 5]\n x5 = np.sum(np.exp([-16/2/(0.5**2), -9/2/(0.1**2), -4/2/(0.7**2), -1/2/(5**2)])\n / [0.5, 0.1, 0.7, 5] ) / 4 / sqrt2pi\n assert gb(5) == pytest.approx(x5)\n x3 = np.sum(np.exp([-4/2/(0.5**2), -1/2/(0.1**2), 0, -1/2/(5**2)])\n / [0.5, 0.1, 0.7, 5] ) / 4 / sqrt2pi\n assert gb(3) == pytest.approx(x3)\n np.testing.assert_allclose(gb([3,5,3]), [x3,x5,x3])\n \n with pytest.raises(ValueError):\n gb.bandwidth = [[0.5, 0.1], [0.7, 5]]\n\ndef test_GaussianBase_eval_2d():\n gb = testmod.GaussianBase([[1,2,3,4],[1,3,7,5]])\n gb.covariance_matrix = [[1,0],[0,1]]\n gb.bandwidth = 1.0\n \n with pytest.raises(ValueError):\n gb(5)\n with pytest.raises(ValueError):\n gb([1,2,3])\n \n x0 = np.sum(np.exp([-1/2, -2/2, -29/2, -18/2])) / 4 / sqrt2pi / sqrt2pi\n assert gb([1,2]) == pytest.approx(x0)\n \n gb.bandwidth = 2.0\n x0 = np.sum(np.exp([-1/2/4, -2/2/4, -29/2/4, -18/2/4])) / 4 / 4 / sqrt2pi / sqrt2pi\n assert gb([1,2]) == pytest.approx(x0)\n\ndef test_GaussianBase_agrees_with_scipy():\n data = np.random.random(size=100)\n gb = testmod.GaussianBase(data)\n kernel = stats.kde.gaussian_kde(data, bw_method=\"scott\")\n \n pts = np.random.random(size=50)\n np.testing.assert_allclose(gb(pts), kernel(pts))\n\n gb = testmod.GaussianBase(data)\n gb.bandwidth = \"silverman\"\n kernel = stats.kde.gaussian_kde(data, bw_method=\"silverman\")\n np.testing.assert_allclose(gb(pts), kernel(pts))\n\ndef test_GaussianBase_agrees_with_scipy_nd():\n for n in range(2,5):\n data = np.random.random(size=(n, 100))\n gb = testmod.GaussianBase(data)\n kernel = stats.kde.gaussian_kde(data, bw_method=\"scott\")\n \n pts = np.random.random(size=(n, 50))\n np.testing.assert_allclose(gb(pts), kernel(pts))\n \n gb = testmod.GaussianBase(data)\n gb.bandwidth = \"silverman\"\n kernel = stats.kde.gaussian_kde(data, bw_method=\"silverman\")\n np.testing.assert_allclose(gb(pts), kernel(pts))\n \ndef test_GaussianNearestNeighbour():\n data = np.random.random(size=20)\n gnn = testmod.GaussianNearestNeighbour(data)\n kernel = testmod.kth_nearest_neighbour_gaussian_kde(data)\n \n pts = np.random.random(size=50)\n np.testing.assert_allclose(gnn(pts), kernel(pts))\n\n for n in range(1,7):\n data = np.random.random(size=(n,100))\n gnn = testmod.GaussianNearestNeighbour(data)\n kernel = testmod.kth_nearest_neighbour_gaussian_kde(data)\n \n pts = np.random.random(size=(n,50))\n np.testing.assert_allclose(gnn(pts), kernel(pts))\n \ndef check_marginal_kernel(ker, axis=0):\n new_ker = testmod.marginalise_gaussian_kernel(ker, axis)\n \n import scipy.integrate\n def expect(x):\n def func(t):\n y = list(x)\n y.insert(axis, t)\n return ker(y)\n return scipy.integrate.quad(func, -10, 10)\n \n for _ in range(20):\n pt = np.random.random(2)\n val, error = expect(pt)\n assert np.abs(new_ker(pt) - val) <= val * 1e-5\n \ndef test_marginalise_gaussian_kernel():\n pts = np.random.random((3,20))\n ker = testmod.GaussianBase(pts)\n ker.covariance_matrix = np.diag([2,3,4])\n ker.bandwidth = 1.4\n check_marginal_kernel(ker, 0)\n\n ker = testmod.GaussianBase(pts)\n ker.covariance_matrix = np.diag([2,3,4])\n ker.bandwidth = 1.4\n ker.weights = np.random.random(20)\n check_marginal_kernel(ker, 0)\n\n ker = testmod.GaussianBase(pts)\n ker.covariance_matrix = np.diag([2,3,4])\n ker.bandwidth = np.random.random(20)\n ker.weights = np.random.random(20)\n check_marginal_kernel(ker, 0)\n\n ker = testmod.GaussianBase(pts)\n ker.covariance_matrix = np.diag([2,3,4])\n check_marginal_kernel(ker, 1)\n \[email protected]\ndef geometry_square():\n return shapely.geometry.Polygon([[0,0],[10,0], [10,10], [0,10]])\n\[email protected]\ndef gec1(geometry_square):\n data = [[1,9,9,1], [1,1,9,9]]\n data = np.asarray(data)\n assert data.shape == (2,4)\n return testmod.GaussianEdgeCorrect(data, geometry_square)\n\ndef test_GaussianEdgeCorrect_point_inside(gec1):\n assert gec1.point_inside(0, 0)\n assert gec1.point_inside(10, 0)\n assert gec1.point_inside(9, 9)\n assert not gec1.point_inside(-1, 0)\n assert not gec1.point_inside(0, -1)\n assert not gec1.point_inside(11, 11)\n\ndef test_GaussianEdgeCorrect_agrees_with_GaussianBase(gec1):\n gb = testmod.GaussianBase(gec1.data)\n pts = np.random.random((2,10)) * np.asarray([10,10])[:,None]\n np.testing.assert_allclose(gb(pts), gec1(pts))\n\ndef test_GaussianEdgeCorrect_halfS(gec1):\n gb = testmod.GaussianBase(gec1.data)\n hS = scipy.linalg.inv(gb.covariance_matrix)\n hS = scipy.linalg.fractional_matrix_power(hS, 0.5)\n np.testing.assert_allclose(gec1.half_S, hS)\n\ndef test_GaussianEdgeCorrect_transformed_geometry(gec1):\n hS = gec1.half_S\n pts = np.asarray([[0,10,10,0], [0,0,10,10]])\n pts = np.dot(hS, pts)\n\n got = np.asarray(gec1.transformed_geometry.exterior)\n assert got.shape == (5, 2)\n got = got[:4,:]\n np.testing.assert_allclose(pts.T, got)\n\ndef _make_sample_points(h, m=10, k=100):\n expected_points = []\n for i in range(1, m+1):\n r = np.sqrt(-2 * h * h * (np.log(m-i+0.5) - np.log(m)))\n for a in range(k):\n angle = a * 2 * np.pi / k\n x, y = r * np.cos(angle), r * np.sin(angle)\n expected_points.append([x, y])\n return np.asarray(expected_points)\n\ndef test_GaussianEdgeCorrect_edge_sample_points(gec1):\n expected_points = _make_sample_points(h=gec1.bandwidth)\n\n def expected_pts(x, y):\n pt = np.dot(gec1.half_S, np.asarray([x,y]))\n return expected_points + pt\n\n for _ in range(10):\n x, y = np.random.random(2) * [10, 10]\n print(\"Possibly we don't expect the _order_ to be the same...\")\n np.testing.assert_allclose(gec1.edge_sample_points([x,y]), expected_pts(x, y))\n\ndef test_GaussianEdgeCorrect_number_intersecting_pts(gec1):\n for _ in range(10):\n pt = np.random.random(2) * [10, 10]\n got = gec1.number_intersecting_pts(pt)\n pts = shapely.geometry.MultiPoint(gec1.edge_sample_points(pt)).intersection(gec1.transformed_geometry)\n assert len(pts) == got\n\ndef test_GaussianEdgeCorrect_correction_factor(gec1):\n for _ in range(10):\n pt = np.random.random(2) * [10, 10]\n got = gec1.correction_factor(pt)\n expected = gec1.number_intersecting_pts(pt) / (gec1._m * gec1._k)\n assert expected == pytest.approx(got)\n \n pts = np.random.random((100,2)) * [10, 10]\n got = gec1.correction_factor(pts.T)\n expected = [gec1.correction_factor(pt) for pt in pts]\n np.testing.assert_allclose(got, expected)\n \[email protected]\ndef masked_grid():\n mask = np.random.random((10,20)) <= 0.5\n return open_cp.data.MaskedGrid(10, 15, 5, 7, mask)\n\[email protected]\ndef gecg1(masked_grid):\n data = [[10,90,90,10], [10,10,90,90]]\n data = np.asarray(data)\n assert data.shape == (2,4)\n return testmod.GaussianEdgeCorrectGrid(data, masked_grid)\n\ndef test_GaussianEdgeCorrectGrid_pts_to_grid_space(gecg1):\n expected_points = _make_sample_points(h=gecg1.bandwidth)\n pt = np.asarray([1,2])\n S = scipy.linalg.fractional_matrix_power(gecg1.covariance_matrix, 0.5)\n expected_points = np.dot(S, expected_points.T)\n expected_points = (expected_points.T + pt - [5,7] ) / [10,15]\n np.testing.assert_allclose(gecg1.points_to_grid_space(pt), np.floor(expected_points))\n assert expected_points.shape == (1000, 2)\n\ndef test_GaussianEdgeCorrectGrid_number_intersecting_pts(gecg1, masked_grid):\n pt = [1,2]\n got = gecg1.number_intersecting_pts([1,2])\n expected = 0\n for gx, gy in gecg1.points_to_grid_space(pt):\n if gx >= 0 and gy >= 0 and gx < 20 and gy < 10 and not masked_grid.mask[gy][gx]:\n expected += 1\n assert got == expected\n\ndef test_GaussianEdgeCorrectGrid_correction_factor(gecg1):\n pt = np.asarray([[1,2,3], [4,5,6]])\n assert pt.shape == (2,3)\n expected = []\n for x, y in pt.T:\n expected.append(gecg1.correction_factor((x,y)))\n np.testing.assert_allclose(expected, gecg1.correction_factor(pt))\n\ndef _masked_grid_to_poly(mg):\n poly = None\n for x in range(mg.xextent):\n for y in range(mg.yextent):\n if mg.is_valid(x, y):\n xx = x * mg.xsize + mg.xoffset\n yy = y * mg.ysize + mg.yoffset\n p = [[xx,yy], [xx+mg.xsize,yy], [xx+mg.xsize,yy+mg.ysize], [xx,yy+mg.ysize]]\n p = shapely.geometry.Polygon(p)\n if poly is None:\n poly = p\n else:\n poly = poly.union(p)\n return poly\n\ndef test_GaussianEdgeCorrectGrid_vs_GaussianEdgeCorrect(gecg1):\n geo = _masked_grid_to_poly(gecg1.masked_grid)\n gec = testmod.GaussianEdgeCorrect(gecg1.data, geo)\n\n for _ in range(100):\n pt = np.random.random(2) * 100\n gpt = pt - [gecg1.masked_grid.xoffset, gecg1.masked_grid.yoffset]\n gpt = np.floor_divide(gpt, [gecg1.masked_grid.xsize, gecg1.masked_grid.ysize]).astype(np.int)\n if gecg1.masked_grid.mask[gpt[1], gpt[0]]:\n continue\n assert gec.number_intersecting_pts(pt) == gecg1.number_intersecting_pts(pt)\n assert gec.correction_factor(pt) == gecg1.correction_factor(pt)\n\n\ndef test_Reflect1D():\n def kernel(pts):\n return np.exp(-pts*pts)\n\n k = testmod.Reflect1D(kernel)\n\n assert k.kernel is kernel\n assert k(5) == pytest.approx(2*np.exp(-25))\n np.testing.assert_allclose(k([5,7]), 2*np.exp([-25, -49]))\n" ]
[ [ "numpy.sum", "numpy.diag", "numpy.asarray", "numpy.log", "numpy.abs", "numpy.testing.assert_array_equal", "numpy.cos", "numpy.empty_like", "scipy.stats.kde.gaussian_kde", "numpy.mean", "numpy.eye", "numpy.floor_divide", "numpy.arange", "numpy.std", "numpy.array", "numpy.empty", "numpy.floor", "numpy.exp", "numpy.random.random", "scipy.stats.norm", "numpy.product", "numpy.testing.assert_allclose", "numpy.sqrt", "numpy.sin", "numpy.dot" ] ]
eduardodut/Trabalho_final_estatistica_cd
[ "fbedbbea6bdd7a79e1d62030cde0fab4e93fc338", "fbedbbea6bdd7a79e1d62030cde0fab4e93fc338" ]
[ ".history/src/Simulador_20200712191028.py", ".history/src/Simulador_20200712172903.py" ]
[ "import pandas as pd\nimport numpy as np\nfrom Matriz_esferica import Matriz_esferica\nfrom Individuo import Individuo, Fabrica_individuo\nimport random\nfrom itertools import permutations \nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import ListedColormap\nfrom scipy.sparse import csr_matrix, lil_matrix\nimport math\n\nimport copy\n\n\nclass Simulador():\n SADIO = 0\n INFECTADO_TIPO_1 = 1 #assintomáticos e o infectado inicial\n INFECTADO_TIPO_2 = 2 #sintomático\n CURADO = 3\n MORTO = 4\n\n def __init__(\n self,\n tamanho_matriz, #numero de linhas e colunas da matriz esférica\n percentual_inicial_tipo1, #percentual inicial da população que será infectada tipo 1\n percentual_inicial_tipo2, #percentual inicial da população que será infectada tipo 2\n chance_infeccao, #chance que um infectado tipo 2 tem de infectar um indivíduo saudável\n chance_infeccao_tipo2, #chance de um indivíduo infectado se tornar contagioso\n chance_morte, #chance de um indivíduo tipo 2 morrer ao fim de uma atualização\n atualizacoes_cura): #número de atualizações necessárias para a cura de um indivíduo tipo 1 ou 2\n \n self.num_atualizacoes = 0 \n self.lista_infectados_tipo_2 = []\n self.lista_infectados_tipo_1 = []\n self.num_curados = 0\n self.num_mortos = 0\n\n self.chance_infeccao = chance_infeccao\n self.chance_infeccao_tipo2 = chance_infeccao_tipo2\n self.chance_morte = chance_morte\n self.atualizacoes_cura = atualizacoes_cura\n \n self.populacao_inicial = int(tamanho_matriz**2)\n self.num_inicial_tipo2 = int(self.populacao_inicial * percentual_inicial_tipo2)\n self.num_inicial_tipo1 = 1 + int(self.populacao_inicial * percentual_inicial_tipo1)\n self.num_inicial_sadios = self.populacao_inicial - (self.num_inicial_tipo2 + self.num_inicial_tipo1)\n \n self.matriz_status = np.zeros((tamanho_matriz, tamanho_matriz),dtype= np.uint8)#lil_matrix((tamanho_matriz, tamanho_matriz),dtype= np.uint8) #\n self.matriz_atualizacoes_cura = np.zeros((tamanho_matriz, tamanho_matriz),dtype= np.uint8)#lil_matrix((tamanho_matriz, tamanho_matriz),dtype= np.uint8)#\n \n self.dict_resumo = {}\n\n #self.matriz_status = self.df_individuos.to_numpy()\n self.popular(tamanho_matriz)\n \n self.lista_matrizes_status = []\n \n\n #objeto que é responsável por validar a movimentação no grid n x n \n self.matriz_esferica = Matriz_esferica(tamanho_matriz)\n \n \n\n \n dict = {\n 'num_sadios':self.num_inicial_sadios,\n 'num_infect_t1':self.num_inicial_tipo1,\n 'num_infect_t2':self.num_inicial_tipo2,\n 'num_curados':0,\n 'num_mortos':0}\n \n \n \n #dataframe que guardará os resultados de cada atualização \n self.dataframe = pd.DataFrame(dict,index = [0])\n self.salvar_posicionamento()\n \n \n def criar_individuo(self, status, posicao):\n \n self.matriz_status[posicao[0], posicao[1]] = status\n if status == self.INFECTADO_TIPO_1 or status == self.INFECTADO_TIPO_2:\n self.matriz_atualizacoes_cura[posicao[0], posicao[1]] = self.atualizacoes_cura\n else:\n self.matriz_atualizacoes_cura[posicao[0], posicao[1]] = 0 \n \n\n def salvar_posicionamento(self):\n \n self.lista_matrizes_status.append(copy.deepcopy(self.matriz_status))\n \n\n def verificar_infeccao(self, lista_infectantes):\n lista_novos_infectados_tipo1 = []\n lista_novos_infectados_tipo2 = []\n #itera sobre sobre a lista de individuos que infectam e cada um realiza a tividade de infectar\n for indice_infectante in lista_infectantes: \n \n #busca os vizinhos do infectante atual\n lista_vizinhos = self.matriz_esferica.get_vizinhos(indice_infectante)\n \n #Para cada vizinho, se ele for sadio, é gerado um número aleatório para verificar se foi infectado\n for indice_vizinho in lista_vizinhos:\n \n #verificação de SADIO\n if self.verifica_status(indice_vizinho) == self.SADIO:\n #verificação do novo status\n novo_status = self.infectar(chance_infeccao, chance_infeccao_tipo2)\n #se for um infectado tipo 1\n if novo_status == Individuo.INFECTADO_TIPO_1:\n #adiciona na lista de novos tipo 1\n lista_novos_infectados_tipo1.append(indice_vizinho)\n if novo_status == Individuo.INFECTADO_TIPO_2:\n #adiciona na lista de novos tipo 2\n lista_novos_infectados_tipo2.append(indice_vizinho)\n \n \n return lista_novos_infectados_tipo1, lista_novos_infectados_tipo2\n \n def checagem_morte_individual(self, chance_morte):\n rng_morte = random.random()\n if rng_morte <= chance_morte:\n \n return self.MORTO\n else:\n return self.INFECTADO_TIPO_2\n\n def checar_cura_individual(self, indice):\n \n self.matriz_atualizacoes_cura[indice[0], indice[1]] = self.matriz_atualizacoes_cura[indice[0], indice[1]] - 1\n if self.matriz_atualizacoes_cura[indice[0], indice[1]] == 0:\n return self.CURADO\n else:\n return self.matriz_status[indice[0], indice[1]]\n\n def checagem_morte_lista(self, lista_infectantes):\n lista_mortos = []\n for indice_infectante in lista_infectantes:\n novo_status = self.checagem_morte_individual(self.chance_morte)\n if novo_status == Individuo.MORTO:\n lista_mortos.append(indice_infectante) \n return lista_mortos\n \n \n def checagem_cura_lista(self, lista_infectantes):\n lista_curados = []\n for indice_infectante in lista_infectantes:\n novo_status = self.checar_cura_individual(indice_infectante)\n if novo_status == Individuo.CURADO:\n lista_curados.append(indice_infectante)\n \n return lista_curados\n \n \n \n \n def iterar(self):\n\n #Verifica os novos infectados por infectantes do tipo 1 e 2\n #print(self.lista_infectados_tipo_1+self.lista_infectados_tipo_2)\n lista_novos_infectados_tipo1, lista_novos_infectados_tipo2 = self.verificar_infeccao(self.lista_infectados_tipo_1+self.lista_infectados_tipo_2)\n\n for indice in lista_novos_infectados_tipo1:\n self.criar_individuo(self.INFECTADO_TIPO_1, indice)\n for indice in lista_novos_infectados_tipo2:\n self.criar_individuo(self.INFECTADO_TIPO_2, indice)\n\n\n #Verifica morte dos infectados tipo 2\n lista_mortos = self.checagem_morte_lista(self.lista_infectados_tipo_2)\n #retira os indices dos individuos mortos da lista de infectados\n self.lista_infectados_tipo_2 = [indice for indice in self.lista_infectados_tipo_2 if indice not in lista_mortos]\n #Instancia individuos mortos na matriz\n for indice in lista_mortos:\n self.criar_individuo(self.MORTO, indice)\n #atualiza o número de mortos na matriz\n self.num_mortos = self.num_mortos + len(lista_mortos)\n\n #Verifica cura dos infectados tipo 1\n lista_curados_t1 = self.checagem_cura_lista(self.lista_infectados_tipo_1)\n\n #Verifica cura dos infectados tipo 2 \n lista_curados_t2 = self.checagem_cura_lista(self.lista_infectados_tipo_2 )\n\n #Instancia individuos mortos na matriz\n for indice in lista_curados_t1+lista_curados_t2:\n self.criar_individuo(self.CURADO, indice)\n\n #atualiza o número de curados na matriz\n self.num_curados = self.num_curados + len(lista_curados_t1 + lista_curados_t2)\n\n\n #Atualiza a lista de infectados após a cura dos individuos\n self.lista_infectados_tipo_1 = [indice for indice in self.lista_infectados_tipo_1 if indice not in lista_curados_t1]\n self.lista_infectados_tipo_2 = [indice for indice in self.lista_infectados_tipo_2 if indice not in lista_curados_t2]\n \n #movimentação \n nova_lista_t1 = [] \n for indice in self.lista_infectados_tipo_1:\n nova_lista_t1.append(self.mover_infectante(indice))\n self.lista_infectados_tipo_1 = nova_lista_t1\n #print(self.lista_infectados_tipo_1)\n nova_lista_t2 = [] \n for indice in self.lista_infectados_tipo_2:\n nova_lista_t2.append(self.mover_infectante(indice))\n self.lista_infectados_tipo_2 = nova_lista_t2\n #print(self.lista_infectados_tipo_2)\n\n \n \n \n # matriz_infectantes = matriz_infectantes[matriz_infectantes < 3]\n indices_infectados = list(zip(*np.where((self.matriz_status == 1) + (self.matriz_status == 2))))\n # indices_infectados = list(zip(*self.matriz_status.nonzero()))\n #indices_infectados = [indice for indice in indices_infectados if indice not in self.lista_infectados_tipo_1 + self.lista_infectados_tipo_2]\n # self.num_curados = 0\n #self.num_mortos = 0\n self.lista_infectados_tipo_1 = []\n self.lista_infectados_tipo_2 = []\n #novos_t1 = []\n #novos_t2 = []\n\n for indice in indices_infectados:\n #if indice not in self.lista_infectados_tipo_1 and indice not in self.lista_infectados_tipo_2:\n # print(indice)\n # print(self.matriz_status.shape)\n status = self.matriz_status[indice[0], indice[1]]\n if status == self.INFECTADO_TIPO_1:\n self.lista_infectados_tipo_1.append(indice)\n #novos_t1.append(indice)\n if status == self.INFECTADO_TIPO_2:\n self.lista_infectados_tipo_2.append(indice)\n #novos_t2.append(indice)\n \n\n #self.lista_infectados_tipo_1 = self.lista_infectados_tipo_1 + novos_t1 \n #self.lista_infectados_tipo_2 = self.lista_infectados_tipo_2 + novos_t2\n\n dict = {'num_sadios': self.populacao_inicial - len(self.lista_infectados_tipo_1) -len(self.lista_infectados_tipo_2) -self.num_curados-self.num_mortos,\n 'num_infect_t1': len(self.lista_infectados_tipo_1),\n 'num_infect_t2': len(self.lista_infectados_tipo_2),\n 'num_curados': self.num_curados,\n 'num_mortos': self.num_mortos}\n \n self.dataframe = self.dataframe.append(dict, ignore_index=True) \n\n self.salvar_posicionamento()\n\n #adiciona 1 ao número de atualizações realizadas na matriz\n self.num_atualizacoes +=1\n\n def infectar(self, chance_infeccao, chance_infeccao_tipo2):\n saida = Individuo.SADIO \n \n #número aleatório para chance de infectar o vizinho\n rng_infeccao = random.random()\n if rng_infeccao <= chance_infeccao:\n #número aleatório para chance de infecção tipo 1 ou 2\n rng_infeccao_tipo2 = random.random()\n if rng_infeccao_tipo2 <= chance_infeccao_tipo2:\n saida = Individuo.INFECTADO_TIPO_2\n else:\n saida = Individuo.INFECTADO_TIPO_1\n return saida\n \n def popular(self, tamanho_matriz):\n \n #lista de possíveis combinações de índices da matriz de dados\n permutacoes = permutations(list(range(tamanho_matriz)),2)\n #conversão para lista de tuplas(x,y)\n lista_indices = list(permutacoes)\n #embaralhamento dos índices\n random.shuffle(lista_indices)\n \n #cria o primeiro tipo1:\n indice = lista_indices.pop()\n self.criar_individuo(Individuo.INFECTADO_TIPO_1, indice)\n self.lista_infectados_tipo_1.append(indice)\n #cria o restante dos tipos 1\n for i in range(self.num_inicial_tipo1-1):\n indice = lista_indices.pop()\n self.criar_individuo(Individuo.INFECTADO_TIPO_1,indice)\n self.lista_infectados_tipo_1.append(indice)\n #cria o restante dos tipo 2:\n for indice in range(self.num_inicial_tipo2):\n indice = lista_indices.pop()\n self.criar_individuo(Individuo.INFECTADO_TIPO_2,indice)\n self.lista_infectados_tipo_2.append(indice)\n \n def trocar(self,matriz,ponto_ini,ponto_final):\n x_ini = ponto_ini[0]\n y_ini = ponto_ini[1]\n x_fin = ponto_final[0]\n y_fin = ponto_final[1]\n\n aux = matriz[x_fin,y_fin]\n matriz[x_fin,y_fin] = matriz[x_ini,y_ini]\n matriz[x_ini,y_ini] = aux\n \n\n def verifica_status(self, indice):\n return self.matriz_status[indice[0], indice[1]]\n\n def mover_infectante(self, posicao_inicial):\n pos_x, pos_y = posicao_inicial[0], posicao_inicial[1]\n rng_posicao = random.random()\n if rng_posicao <=0.25:\n #move pra cima\n pos_x -= 1\n elif rng_posicao <=0.5:\n #move pra baixo\n pos_x += 1\n elif rng_posicao <=0.75:\n #move para esquerda\n pos_y -= 1\n else:\n #move para direita\n pos_y += 1\n \n posicao_final= self.matriz_esferica.valida_ponto_matriz(pos_x, pos_y)\n \n\n self.trocar(self.matriz_status, posicao_inicial, posicao_final)\n self.trocar(self.matriz_atualizacoes_cura, posicao_inicial, posicao_final)\n return posicao_final\n \n def executar_simulacao(self):\n while (self.dataframe.iloc[-1]['num_infect_t1']+self.dataframe.iloc[-1]['num_infect_t2']) > 0:\n self.iterar() \n \n num_sadios_min = self.dataframe.iloc[-1]['num_sadios']\n #descobre linha que ocorreu o máximo de infectados\n indice_infeccao_maxima = self.dataframe[self.dataframe.num_sadios == num_sadios_min].index[0]\n \n metade_infeccao_maxima = math.ceil(indice_infeccao_maxima/2)\n \n\n self.dict_resumo = {\n \"pop_inicial\": self.populacao_inicial,\n \"tipo1_inicial\":self.dataframe.iloc[0]['num_infect_t1'],\n \"tipo2_inicial\":self.dataframe.iloc[0]['num_infect_t2'],\n \"n/2_100%_infectados\":metade_infeccao_maxima,\n \"tipo1_n/2\":self.dataframe.iloc[metade_infeccao_maxima]['num_infect_t1'],\n \"tipo2_n/2\":self.dataframe.iloc[metade_infeccao_maxima]['num_infect_t2'],\n \"curados_n/2\":self.dataframe.iloc[metade_infeccao_maxima]['num_curados'],\n \"mortos_n/2\":self.dataframe.iloc[metade_infeccao_maxima]['num_mortos'],\n \"n_atualizacoes_100%_infectados\":indice_infeccao_maxima,\n \"tipo1_n\":self.dataframe.iloc[indice_infeccao_maxima]['num_infect_t1'],\n \"tipo2_n\":self.dataframe.iloc[indice_infeccao_maxima]['num_infect_t2'],\n \"curados_n\":self.dataframe.iloc[indice_infeccao_maxima]['num_curados'],\n \"mortos_n\":self.dataframe.iloc[indice_infeccao_maxima]['num_mortos'],\n \"numero_total_atualizacoes\":self.dataframe.shape[0],\n \"sadios_final\":self.dataframe.iloc[-1]['num_sadios'],\n \"curados_final\":self.dataframe.iloc[-1]['num_curados'],\n \"mortos_final\":self.dataframe.iloc[-1]['num_mortos']\n }\n \nproporcao_inicial_infectados = 0.8*random.random()\nproporcao_t1 = random.random()\nprint(proporcao_inicial_infectados)\n\nchance_infeccao = 0.3 \nchance_infeccao_tipo2 = 0.6 \nchance_morte = 0.02 \natualizacoes_cura = 10 \npercentual_inicial_tipo1 = proporcao_t1*proporcao_inicial_infectados\npercentual_inicial_tipo2 = (1-proporcao_t1)*proporcao_inicial_infectados\n\nsim = Simulador(\n 100,\n percentual_inicial_tipo1, \n percentual_inicial_tipo2, \n chance_infeccao,\n chance_infeccao_tipo2,\n chance_morte,atualizacoes_cura)\n\n\ncmap = ListedColormap(['w', 'y', 'r', 'blue', 'black'])\n#\n\nsim.executar_simulacao() \nprint(sim.dataframe)\npd.DataFrame(sim.dict_resumo, index=[0]).head()\nplt.matshow(sim.lista_matrizes_status[-1], cmap = cmap, vmin= 0, vmax = 4)\nplt.show()", "import pandas as pd\nimport numpy as np\nfrom Matriz_esferica import Matriz_esferica\nfrom Individuo import Individuo, Fabrica_individuo\nimport random\nfrom itertools import permutations \nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import ListedColormap\nfrom scipy.sparse import csr_matrix, lil_matrix\nimport math\n\n\n\n\nclass Simulador():\n SADIO = 0\n INFECTADO_TIPO_1 = 1 #assintomáticos e o infectado inicial\n INFECTADO_TIPO_2 = 2 #sintomático\n CURADO = 3\n MORTO = 4\n\n def __init__(\n self,\n tamanho_matriz, #numero de linhas e colunas da matriz esférica\n percentual_inicial_tipo1, #percentual inicial da população que será infectada tipo 1\n percentual_inicial_tipo2, #percentual inicial da população que será infectada tipo 2\n chance_infeccao, #chance que um infectado tipo 2 tem de infectar um indivíduo saudável\n chance_infeccao_tipo2, #chance de um indivíduo infectado se tornar contagioso\n chance_morte, #chance de um indivíduo tipo 2 morrer ao fim de uma atualização\n atualizacoes_cura): #número de atualizações necessárias para a cura de um indivíduo tipo 1 ou 2\n \n self.num_atualizacoes = 0 \n self.lista_infectados_tipo_2 = []\n self.lista_infectados_tipo_1 = []\n self.num_curados = 0\n self.num_mortos = 0\n\n self.chance_infeccao = chance_infeccao\n self.chance_infeccao_tipo2 = chance_infeccao_tipo2\n self.chance_morte = chance_morte\n self.atualizacoes_cura = atualizacoes_cura\n \n self.populacao_inicial = int(tamanho_matriz**2)\n self.num_inicial_tipo2 = int(self.populacao_inicial * percentual_inicial_tipo2)\n self.num_inicial_tipo1 = 1 + int(self.populacao_inicial * percentual_inicial_tipo1)\n self.num_inicial_sadios = self.populacao_inicial - (self.num_inicial_tipo2 + self.num_inicial_tipo1)\n \n self.matriz_status = np.zeros((tamanho_matriz, tamanho_matriz),dtype= np.uint8)#lil_matrix((tamanho_matriz, tamanho_matriz),dtype= np.uint8) #\n self.matriz_atualizacoes_cura = np.zeros((tamanho_matriz, tamanho_matriz),dtype= np.uint8)#lil_matrix((tamanho_matriz, tamanho_matriz),dtype= np.uint8)#\n\n\n\n #self.matriz_status = self.df_individuos.to_numpy()\n self.popular(tamanho_matriz)\n \n self.lista_matrizes_status = []\n \n\n #objeto que é responsável por validar a movimentação no grid n x n \n self.matriz_esferica = Matriz_esferica(tamanho_matriz)\n \n \n\n \n dict = {\n 'num_sadios':self.num_inicial_sadios,\n 'num_infect_t1':self.num_inicial_tipo1,\n 'num_infect_t2':self.num_inicial_tipo2,\n 'num_curados':0,\n 'num_mortos':0}\n \n \n \n #dataframe que guardará os resultados de cada atualização \n self.dataframe = pd.DataFrame(dict,index = [0])\n self.salvar_posicionamento()\n \n \n def criar_individuo(self, status, posicao):\n \n self.matriz_status[posicao[0], posicao[1]] = status\n if status == self.INFECTADO_TIPO_1 or status == self.INFECTADO_TIPO_2:\n self.matriz_atualizacoes_cura[posicao[0], posicao[1]] = self.atualizacoes_cura\n else:\n self.matriz_atualizacoes_cura[posicao[0], posicao[1]] = 0 \n \n\n def salvar_posicionamento(self):\n \n self.lista_matrizes_status.append(self.matriz_status)\n \n\n def verificar_infeccao(self, lista_infectantes):\n lista_novos_infectados_tipo1 = []\n lista_novos_infectados_tipo2 = []\n #itera sobre sobre a lista de individuos que infectam e cada um realiza a tividade de infectar\n for indice_infectante in lista_infectantes: \n \n #busca os vizinhos do infectante atual\n lista_vizinhos = self.matriz_esferica.get_vizinhos(indice_infectante)\n \n #Para cada vizinho, se ele for sadio, é gerado um número aleatório para verificar se foi infectado\n for indice_vizinho in lista_vizinhos:\n \n #verificação de SADIO\n if self.verifica_status(indice_vizinho) == self.SADIO:\n #verificação do novo status\n novo_status = self.infectar(chance_infeccao, chance_infeccao_tipo2)\n #se for um infectado tipo 1\n if novo_status == Individuo.INFECTADO_TIPO_1:\n #adiciona na lista de novos tipo 1\n lista_novos_infectados_tipo1.append(indice_vizinho)\n if novo_status == Individuo.INFECTADO_TIPO_2:\n #adiciona na lista de novos tipo 2\n lista_novos_infectados_tipo2.append(indice_vizinho)\n \n \n return lista_novos_infectados_tipo1, lista_novos_infectados_tipo2\n \n def checagem_morte_individual(self, chance_morte):\n rng_morte = random.random()\n if rng_morte <= chance_morte:\n \n return self.MORTO\n else:\n return self.INFECTADO_TIPO_2\n\n def checar_cura_individual(self, indice):\n \n self.matriz_atualizacoes_cura[indice[0], indice[1]] = self.matriz_atualizacoes_cura[indice[0], indice[1]] - 1\n if self.matriz_atualizacoes_cura[indice[0], indice[1]] == 0:\n return self.CURADO\n else:\n return self.matriz_status[indice[0], indice[1]]\n\n def checagem_morte_lista(self, lista_infectantes):\n lista_mortos = []\n for indice_infectante in lista_infectantes:\n novo_status = self.checagem_morte_individual(self.chance_morte)\n if novo_status == Individuo.MORTO:\n lista_mortos.append(indice_infectante) \n return lista_mortos\n \n \n def checagem_cura_lista(self, lista_infectantes):\n lista_curados = []\n for indice_infectante in lista_infectantes:\n novo_status = self.checar_cura_individual(indice_infectante)\n if novo_status == Individuo.CURADO:\n lista_curados.append(indice_infectante)\n \n return lista_curados\n \n \n \n \n def iterar(self):\n\n #Verifica os novos infectados por infectantes do tipo 1 e 2\n #print(self.lista_infectados_tipo_1+self.lista_infectados_tipo_2)\n lista_novos_infectados_tipo1, lista_novos_infectados_tipo2 = self.verificar_infeccao(self.lista_infectados_tipo_1+self.lista_infectados_tipo_2)\n\n for indice in lista_novos_infectados_tipo1:\n self.criar_individuo(self.INFECTADO_TIPO_1, indice)\n for indice in lista_novos_infectados_tipo2:\n self.criar_individuo(self.INFECTADO_TIPO_2, indice)\n\n\n #Verifica morte dos infectados tipo 2\n lista_mortos = self.checagem_morte_lista(self.lista_infectados_tipo_2)\n #retira os indices dos individuos mortos da lista de infectados\n self.lista_infectados_tipo_2 = [indice for indice in self.lista_infectados_tipo_2 if indice not in lista_mortos]\n #Instancia individuos mortos na matriz\n for indice in lista_mortos:\n self.criar_individuo(self.MORTO, indice)\n #atualiza o número de mortos na matriz\n self.num_mortos = self.num_mortos + len(lista_mortos)\n\n #Verifica cura dos infectados tipo 1\n lista_curados_t1 = self.checagem_cura_lista(self.lista_infectados_tipo_1)\n\n #Verifica cura dos infectados tipo 2 \n lista_curados_t2 = self.checagem_cura_lista(self.lista_infectados_tipo_2 )\n\n #Instancia individuos mortos na matriz\n for indice in lista_curados_t1+lista_curados_t2:\n self.criar_individuo(self.CURADO, indice)\n\n #atualiza o número de curados na matriz\n self.num_curados = self.num_curados + len(lista_curados_t1 + lista_curados_t2)\n\n\n #Atualiza a lista de infectados após a cura dos individuos\n self.lista_infectados_tipo_1 = [indice for indice in self.lista_infectados_tipo_1 if indice not in lista_curados_t1]\n self.lista_infectados_tipo_2 = [indice for indice in self.lista_infectados_tipo_2 if indice not in lista_curados_t2]\n \n #movimentação \n nova_lista_t1 = [] \n for indice in self.lista_infectados_tipo_1:\n nova_lista_t1.append(self.mover_infectante(indice))\n self.lista_infectados_tipo_1 = nova_lista_t1\n #print(self.lista_infectados_tipo_1)\n nova_lista_t2 = [] \n for indice in self.lista_infectados_tipo_2:\n nova_lista_t2.append(self.mover_infectante(indice))\n self.lista_infectados_tipo_2 = nova_lista_t2\n #print(self.lista_infectados_tipo_2)\n\n \n \n \n # matriz_infectantes = matriz_infectantes[matriz_infectantes < 3]\n indices_infectados = list(zip(*np.where((self.matriz_status == 1) + (self.matriz_status == 2))))\n # indices_infectados = list(zip(*self.matriz_status.nonzero()))\n #indices_infectados = [indice for indice in indices_infectados if indice not in self.lista_infectados_tipo_1 + self.lista_infectados_tipo_2]\n # self.num_curados = 0\n #self.num_mortos = 0\n self.lista_infectados_tipo_1 = []\n self.lista_infectados_tipo_2 = []\n #novos_t1 = []\n #novos_t2 = []\n\n for indice in indices_infectados:\n #if indice not in self.lista_infectados_tipo_1 and indice not in self.lista_infectados_tipo_2:\n # print(indice)\n # print(self.matriz_status.shape)\n status = self.matriz_status[indice[0], indice[1]]\n if status == self.INFECTADO_TIPO_1:\n self.lista_infectados_tipo_1.append(indice)\n #novos_t1.append(indice)\n if status == self.INFECTADO_TIPO_2:\n self.lista_infectados_tipo_2.append(indice)\n #novos_t2.append(indice)\n \n\n #self.lista_infectados_tipo_1 = self.lista_infectados_tipo_1 + novos_t1 \n #self.lista_infectados_tipo_2 = self.lista_infectados_tipo_2 + novos_t2\n\n dict = {'num_sadios': self.populacao_inicial - len(self.lista_infectados_tipo_1) -len(self.lista_infectados_tipo_2) -self.num_curados-self.num_mortos,\n 'num_infect_t1': len(self.lista_infectados_tipo_1),\n 'num_infect_t2': len(self.lista_infectados_tipo_2),\n 'num_curados': self.num_curados,\n 'num_mortos': self.num_mortos}\n \n self.dataframe = self.dataframe.append(dict, ignore_index=True) \n\n self.salvar_posicionamento()\n\n #adiciona 1 ao número de atualizações realizadas na matriz\n self.num_atualizacoes +=1\n\n def infectar(self, chance_infeccao, chance_infeccao_tipo2):\n saida = Individuo.SADIO \n \n #número aleatório para chance de infectar o vizinho\n rng_infeccao = random.random()\n if rng_infeccao <= chance_infeccao:\n #número aleatório para chance de infecção tipo 1 ou 2\n rng_infeccao_tipo2 = random.random()\n if rng_infeccao_tipo2 <= chance_infeccao_tipo2:\n saida = Individuo.INFECTADO_TIPO_2\n else:\n saida = Individuo.INFECTADO_TIPO_1\n return saida\n \n def popular(self, tamanho_matriz):\n \n #lista de possíveis combinações de índices da matriz de dados\n permutacoes = permutations(list(range(tamanho_matriz)),2)\n #conversão para lista de tuplas(x,y)\n lista_indices = list(permutacoes)\n #embaralhamento dos índices\n random.shuffle(lista_indices)\n \n #cria o primeiro tipo1:\n indice = lista_indices.pop()\n self.criar_individuo(Individuo.INFECTADO_TIPO_1, indice)\n self.lista_infectados_tipo_1.append(indice)\n #cria o restante dos tipos 1\n for i in range(self.num_inicial_tipo1-1):\n indice = lista_indices.pop()\n self.criar_individuo(Individuo.INFECTADO_TIPO_1,indice)\n self.lista_infectados_tipo_1.append(indice)\n #cria o restante dos tipo 2:\n for indice in range(self.num_inicial_tipo2):\n indice = lista_indices.pop()\n self.criar_individuo(Individuo.INFECTADO_TIPO_2,indice)\n self.lista_infectados_tipo_2.append(indice)\n \n def trocar(self,matriz,ponto_ini,ponto_final):\n x_ini = ponto_ini[0]\n y_ini = ponto_ini[1]\n x_fin = ponto_final[0]\n y_fin = ponto_final[1]\n\n aux = matriz[x_fin,y_fin]\n matriz[x_fin,y_fin] = matriz[x_ini,y_ini]\n matriz[x_ini,y_ini] = aux\n \n\n def verifica_status(self, indice):\n return self.matriz_status[indice[0], indice[1]]\n\n def mover_infectante(self, posicao_inicial):\n pos_x, pos_y = posicao_inicial[0], posicao_inicial[1]\n rng_posicao = random.random()\n if rng_posicao <=0.25:\n #move pra cima\n pos_x -= 1\n elif rng_posicao <=0.5:\n #move pra baixo\n pos_x += 1\n elif rng_posicao <=0.75:\n #move para esquerda\n pos_y -= 1\n else:\n #move para direita\n pos_y += 1\n \n posicao_final= self.matriz_esferica.valida_ponto_matriz(pos_x, pos_y)\n \n\n self.trocar(self.matriz_status, posicao_inicial, posicao_final)\n self.trocar(self.matriz_atualizacoes_cura, posicao_inicial, posicao_final)\n return posicao_final\n \n def executar_simulacao(self):\n while (self.dataframe.iloc[-1]['num_infect_t1']+self.dataframe.iloc[-1]['num_infect_t2']) > 0:\n self.iterar() \n print(self.dataframe)\n num_sadios_min = self.dataframe.iloc[-1]['num_sadios']\n #descobre linha que ocorreu o máximo de infectados\n indice_infeccao_maxima = self.dataframe[self.dataframe.num_sadios == num_sadios_min].index[0]\n print(indice_infeccao_maxima)\n metade_infeccao_maxima = round(indice_infeccao_maxima/2)\n\n \nproporcao_inicial_infectados = random.random()\nproporcao_t1 = random.random()\n\nchance_infeccao = 0.3 \nchance_infeccao_tipo2 = 0.2 \nchance_morte = 0.02 \natualizacoes_cura = 10 \npercentual_inicial_tipo1 = proporcao_t1*proporcao_inicial_infectados\npercentual_inicial_tipo2 = (1-proporcao_t1)*proporcao_inicial_infectados\n#print(\"% inicial t1: \",percentual_inicial_tipo1)\n#print(\"% inicial t2: \",percentual_inicial_tipo2)\n\nsim = Simulador(\n 10,\n percentual_inicial_tipo1, \n percentual_inicial_tipo2, \n chance_infeccao,\n chance_infeccao_tipo2,\n chance_morte,atualizacoes_cura)\n\n#print(sim.lista_matrizes_posicionamento[0])\n#print(sim.lista_infectados_tipo_2)\n#print(sim.lista_infectados_tipo_1)\ncmap = ListedColormap(['w', 'y', 'r', 'blue', 'black'])\n\nsim.executar_simulacao() \n \n# for i in range(30):\n# #plt.matshow(sim.lista_matrizes_status[i].toarray(), cmap = cmap, vmin= 0, vmax = 4)\n# sim.iterar()\n# print(sim.dataframe) \n# plt.show()\n\n\n \n" ]
[ [ "numpy.zeros", "matplotlib.colors.ListedColormap", "pandas.DataFrame", "matplotlib.pyplot.show", "matplotlib.pyplot.matshow", "numpy.where" ], [ "numpy.zeros", "numpy.where", "pandas.DataFrame", "matplotlib.colors.ListedColormap" ] ]
arvindershinh/DevnagriLipi
[ "77539f2ecae68809bea5286a2113f1b723ae0a0f" ]
[ "DevnagriLipiTrainer/Archive/TensorFlow4_ConvNN - V6.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jul 28 22:26:40 2018\n\n@author: Arvinder Shinh\n\"\"\"\nimport tensorflow as tf\nfrom PIL import Image\nimport numpy as np\nimport os\nfrom tensorflow import saved_model as sm\n\n\nimageFiles=os.listdir('image')\n\nSerializedImgContainer=[]\nLabelContainer=[]\n\nfor f in imageFiles:\n if f.endswith('.jpg'):\n fname, fext = os.path.splitext(f)\n label = 0 if fname == 'ka' else 1\n image=Image.open(os.path.join('image',f))\n image=image.resize((28,28))\n image=image.convert(mode='L')\n image=np.array(image).reshape((28,28,1))\n image=image.tostring()\n \n FloatList1=tf.train.FloatList(value=image) \n \n SerializedImage=tf.train.Feature(float_list=FloatList1)\n \n Features_Map={'image': SerializedImage}\n Features=tf.train.Features(feature=Features_Map)\n Example=tf.train.Example(features=Features).SerializeToString()\n \n SerializedImgContainer.append(Example)\n LabelContainer.append(label)\n \n\nNum_Exp=4\na=np.random.randint(0,9,Num_Exp)\n\nlabels=np.zeros((Num_Exp,10),dtype=np.int32)\nfor i in range(Num_Exp-1):\n labels[i,a[i]]=1\n \n\nClassfy_Inputs=tf.placeholder(dtype=tf.string, name='Classfy_Inputs')\n\nFeature_trans={'image': tf.FixedLenFeature(shape=(784), dtype=tf.float32)}\ndata=tf.parse_example(Classfy_Inputs, Feature_trans)\n\nPredict_Inputs=tf.reshape(data['image'], shape=(-1,28,28,1), name='Predict_Inputs')\n\nTrain_Outputs=tf.placeholder(shape=(None,10),dtype=tf.float32, name='Labels')\n\ndata=tf.data.Dataset.from_tensor_slices({'x': Predict_Inputs, 'y': Train_Outputs})\ndata=data.shuffle(100).repeat().batch(5)\n\niterator=data.make_initializable_iterator()\n\nbatch=iterator.get_next()\nImageBatch=batch['x']\nLabelBatch=batch['y']\n\n\n'''Convolution Layer'''\ndef conv_layer(inputs, In, Out, name='conv'):\n with tf.name_scope(name):\n w=tf.Variable(tf.random_normal((5,5,In,Out)), dtype=tf.float32, name='w')\n b=tf.Variable(tf.zeros((Out)), dtype=tf.float32, name='b')\n conv=tf.nn.conv2d(inputs,w,strides=[1,1,1,1],padding='SAME')\n activation=tf.nn.relu(conv+b)\n tf.summary.histogram(name+'_w_kernal', w)\n tf.summary.histogram(name+'_b_kernal', b)\n tf.summary.histogram(name+'_activation', activation)\n return activation\n \n'''Pooling Layer'''\ndef pooling_layer(inputs, name='pooling'):\n with tf.name_scope(name):\n pooling=tf.nn.max_pool(inputs,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')\n return pooling\n \n'''Dense Layer'''\ndef dense_layer(inputs, In, Out, name='Dense'):\n with tf.name_scope(name):\n w=tf.Variable(tf.random_normal((In,Out)), dtype=tf.float32, name='w')\n b=tf.Variable(tf.zeros((Out)), dtype=tf.float32, name='b')\n dense=tf.matmul(inputs,w)\n activation=tf.nn.relu(dense+b)\n tf.summary.histogram(name+'_w_kernal', w)\n tf.summary.histogram(name+'_b_kernal', b)\n tf.summary.histogram(name+'_activation', activation)\n return activation\n\ndef ConvolutionNN(learning_rate, Num_ConvLayer, Num_DenseLayer, HyperParaStr, name='HyperParameters'):\n\n with tf.name_scope(name): \n if Num_ConvLayer == 2 and Num_DenseLayer == 2:\n#In = [-1,28,28,1] #Out = [-1,28,28,32]\n conv1=conv_layer(ImageBatch, 1, 32, name='conv1')\n\n#In = [-1,28,28,32] #Out = [-1,14,14,32]\n pooling1=pooling_layer(conv1, name='pooling1')\n\n#In = [-1,14,14,32] #Out = [-1,14,14,64]\n conv2=conv_layer(pooling1, 32, 64, name='conv2')\n\n#In = [-1,14,14,64] #Out = [-1,7,7,64]\n pooling2=pooling_layer(conv2, name='pooling2')\n \n#In = [-1,7,7,64] #Out = [-1,7*7*64]\n flatImages=tf.reshape(pooling2, (-1,7*7*64))\n\n#In = [-1,7*7*64] #Out = [-1,1024]\n dense1=dense_layer(flatImages, 7*7*64, 1024, name='dense1')\n\n#In = [-1,1024] #Out = [-1,10]\n logits=dense_layer(dense1, 1024, 10, name='dense2')\n \n \n elif Num_ConvLayer == 1 and Num_DenseLayer == 2:\n#In = [-1,28,28,1] #Out = [-1,28,28,32]\n conv1=conv_layer(ImageBatch, 1, 32, name='conv1')\n\n#In = [-1,28,28,32] #Out = [-1,14,14,32]\n pooling1=pooling_layer(conv1, name='pooling1')\n \n#In = [-1,14,14,32] #Out = [-1,14*14*32]\n flatImages=tf.reshape(pooling1, (-1,14*14*32))\n\n#In = [-1,14*14*32] #Out = [-1,1024]\n dense1=dense_layer(flatImages, 14*14*32, 1024, name='dense1')\n\n#In = [-1,1024] #Out = [-1,10]\n logits=dense_layer(dense1, 1024, 10, name='dense2')\n \n \n Predict_Outputs = tf.nn.softmax(logits, 1, name='Predict_Outputs')\n Classify_Output_Scores, indices = tf.reduce_max(Predict_Outputs, 1, name='values'), tf.argmax(Predict_Outputs, 1, name='indices')\n \n table=tf.contrib.lookup.index_to_string_table_from_tensor(tf.constant([str(i) for i in range(10)]))\n Classify_Output_Classes=table.lookup(tf.to_int64(indices))\n\n \n with tf.name_scope('loss'):\n loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=LabelBatch))\n \n tf.summary.scalar('loss', loss)\n \n with tf.name_scope('train'):\n train=tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss)\n \n with tf.name_scope('accuracy'):\n accuracy=tf.reduce_mean((tf.cast(tf.equal(tf.argmax(logits,1),tf.argmax(LabelBatch,1)), tf.int32)))\n \n tf.summary.scalar('accuracy', accuracy)\n\n merger=tf.summary.merge_all()\n\n\n epocs=7 #100\n path=\"C:/Workspace/PythonProject/TensorFlow/TensorFlow_Learning/ConvNN/TensorBoard/\"+HyperParaStr\n\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n sess.run(iterator.initializer, feed_dict={Classfy_Inputs: SerializedImgContainer, Train_Outputs: labels})\n \n writer=tf.summary.FileWriter(path)\n writer.add_graph(sess.graph)\n\n print('Active Hyper Parameter'+HyperParaStr)\n for i in range(epocs):\n if i%5==0 :\n Merger, Loss, Accuracy = sess.run((merger, loss, accuracy), feed_dict={Classfy_Inputs: SerializedImgContainer, Train_Outputs: labels})\n print('Loss {} Accuracy {}'.format(Loss, Accuracy))\n writer.add_summary(Merger,i)\n sess.run(train, feed_dict={Classfy_Inputs: SerializedImgContainer, Train_Outputs: labels})\n \n \n '''Serving''' \n Classify_Inputs_proto=sm.utils.build_tensor_info(Classfy_Inputs)\n Classify_Output_Classes_proto=sm.utils.build_tensor_info(Classify_Output_Classes)\n Classify_Output_Scores_proto=sm.utils.build_tensor_info(Classify_Output_Scores)\n \n ClassifySignatureDef=(sm.signature_def_utils.build_signature_def(\n inputs={sm.signature_constants.CLASSIFY_INPUTS: Classify_Inputs_proto},\n outputs={sm.signature_constants.CLASSIFY_OUTPUT_CLASSES: Classify_Output_Classes_proto,\n sm.signature_constants.CLASSIFY_OUTPUT_SCORES: Classify_Output_Scores_proto},\n method_name=sm.signature_constants.CLASSIFY_METHOD_NAME))\n \n Predict_Inputs_proto=sm.utils.build_tensor_info(Predict_Inputs)\n Predict_Outputs_proto=sm.utils.build_tensor_info(Predict_Outputs)\n \n PredictSignatureDef=(sm.signature_def_utils.build_signature_def(\n inputs={'image': Predict_Inputs_proto},\n outputs={'scores': Predict_Outputs_proto},\n method_name=sm.signature_constants.PREDICT_METHOD_NAME))\n \n ServingPath='Serving/'+name\n SavedModel=sm.builder.SavedModelBuilder(export_dir=ServingPath)\n SavedModel.add_meta_graph_and_variables(sess, \n [sm.tag_constants.SERVING],\n signature_def_map={'serving': PredictSignatureDef,\n sm.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: ClassifySignatureDef}, \n strip_default_attrs=True)\n \n SavedModel.save()\n\n\ndef HyperParameterStr(learning_rate,Num_ConvLayer,Num_DenseLayer):\n return \"LR= lr_%.0E,ConvLayer=%s,DenseLayer=%s\" % (learning_rate, Num_ConvLayer, Num_DenseLayer)\n\n\ndef main():\n learning_rates =[1e-4]\n i=0\n Num_ConvLayers=[1,2]\n Num_DenseLayers=[2]\n \n for learning_rate in learning_rates:\n for Num_ConvLayer in Num_ConvLayers:\n for Num_DenseLayer in Num_DenseLayers:\n i=i+1\n HyperParaStr=HyperParameterStr(learning_rate,Num_ConvLayer,Num_DenseLayer)\n ConvolutionNN(learning_rate, Num_ConvLayer, Num_DenseLayer, HyperParaStr, 'HyperParameter%s' % (i))\n \n\nif __name__ == '__main__':\n main()\n\n\n\n\n\n \n \n \n" ]
[ [ "tensorflow.summary.scalar", "tensorflow.reduce_max", "tensorflow.reshape", "tensorflow.train.Feature", "tensorflow.train.FloatList", "tensorflow.matmul", "tensorflow.name_scope", "tensorflow.summary.FileWriter", "tensorflow.nn.softmax", "tensorflow.random_normal", "tensorflow.summary.histogram", "tensorflow.nn.max_pool", "tensorflow.global_variables_initializer", "tensorflow.parse_example", "tensorflow.train.Features", "tensorflow.FixedLenFeature", "tensorflow.saved_model.builder.SavedModelBuilder", "tensorflow.nn.relu", "numpy.zeros", "tensorflow.saved_model.utils.build_tensor_info", "tensorflow.Session", "tensorflow.placeholder", "tensorflow.zeros", "tensorflow.summary.merge_all", "tensorflow.to_int64", "tensorflow.train.AdamOptimizer", "tensorflow.saved_model.signature_def_utils.build_signature_def", "tensorflow.nn.conv2d", "tensorflow.argmax", "tensorflow.nn.softmax_cross_entropy_with_logits_v2", "numpy.array", "tensorflow.data.Dataset.from_tensor_slices", "numpy.random.randint", "tensorflow.train.Example" ] ]
google-research/ibc
[ "c2f6775418c3d7b1ffd0e822fc0050c834030d15" ]
[ "networks/layers/spectral_norm.py" ]
[ "# coding=utf-8\n# Copyright 2022 The Reach ML Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# pylint: disable=invalid-name\n# Keeping code style from original author (was given this class by imordatch@\n# in a colab notebook.)\n\"\"\"Keras layer for spectral norm.\n\nReference: https://arxiv.org/abs/1802.05957\nSpectral normalization ensures Lipschitz continuity of the model.\n\"\"\"\nimport tensorflow.compat.v2 as tf\nK = tf.keras.backend\n\n\nclass DenseSN(tf.keras.layers.Dense):\n \"\"\"Spectral norm dense layers.\"\"\"\n\n def build(self, input_shape):\n assert len(input_shape) >= 2\n input_dim = input_shape[-1]\n self.kernel = self.add_weight(shape=(input_dim, self.units),\n initializer=self.kernel_initializer,\n name='kernel',\n regularizer=self.kernel_regularizer,\n constraint=self.kernel_constraint)\n if self.use_bias:\n self.bias = self.add_weight(\n shape=(self.units,),\n initializer=self.bias_initializer,\n name='bias',\n regularizer=self.bias_regularizer,\n constraint=self.bias_constraint)\n else:\n self.bias = None\n self.u = self.add_weight(\n shape=tuple([1, self.kernel.shape.as_list()[-1]]),\n initializer=tf.keras.initializers.RandomNormal(0, 1),\n name='sn',\n trainable=False)\n self.input_spec = tf.keras.layers.InputSpec(\n min_ndim=2, axes={-1: input_dim})\n self.built = True\n\n def call(self, inputs, training=None):\n \"\"\"Forward the net.\"\"\"\n def _l2normalize(v, eps=1e-12):\n return v / (K.sum(v ** 2) ** 0.5 + eps)\n def power_iteration(W, u):\n _u = u\n _v = _l2normalize(K.dot(_u, K.transpose(W)))\n _u = _l2normalize(K.dot(_v, W))\n return _u, _v\n W_shape = self.kernel.shape.as_list()\n # Flatten the Tensor\n W_reshaped = K.reshape(self.kernel, [-1, W_shape[-1]])\n _u, _v = power_iteration(W_reshaped, self.u)\n # Calculate Sigma\n sigma = K.dot(_v, W_reshaped)\n sigma = K.dot(sigma, K.transpose(_u))\n # normalize it\n W_bar = W_reshaped / sigma\n # reshape weight tensor\n if not training or training is None:\n W_bar = K.reshape(W_bar, W_shape)\n else:\n with tf.control_dependencies([self.u.assign(_u)]):\n W_bar = K.reshape(W_bar, W_shape)\n output = K.dot(inputs, W_bar)\n if self.use_bias:\n output = K.bias_add(output, self.bias, data_format='channels_last')\n if self.activation is not None:\n output = self.activation(output)\n return output\n" ]
[ [ "tensorflow.compat.v2.keras.initializers.RandomNormal", "tensorflow.compat.v2.keras.layers.InputSpec" ] ]
Beautyya/BenchENA
[ "776cd1dd035d73c4af369d0106d010b932f64782" ]
[ "algs/nsga_net/model/micro_encoding.py" ]
[ "# NASNet Search Space https://arxiv.org/pdf/1707.07012.pdf\n# code modified from DARTS https://github.com/quark0/darts\nimport numpy as np\nfrom collections import namedtuple\n\nimport torch\nfrom algs.nsga_net.model.micro_models import NetworkCIFAR as Network\n\nGenotype = namedtuple('Genotype', 'normal normal_concat reduce reduce_concat')\nGenotype_norm = namedtuple('Genotype', 'normal normal_concat')\nGenotype_redu = namedtuple('Genotype', 'reduce reduce_concat')\n\n# what you want to search should be defined here and in micro_operations\nPRIMITIVES = [\n 'max_pool_3x3',\n 'avg_pool_3x3',\n 'skip_connect',\n 'sep_conv_3x3',\n 'sep_conv_5x5',\n 'dil_conv_3x3',\n 'dil_conv_5x5',\n 'sep_conv_7x7',\n 'conv_7x1_1x7',\n]\n\n\ndef convert_cell(cell_bit_string):\n # convert cell bit-string to genome\n tmp = [cell_bit_string[i:i + 2] for i in range(0, len(cell_bit_string), 2)]\n return [tmp[i:i + 2] for i in range(0, len(tmp), 2)]\n\n\ndef convert(bit_string):\n # convert network bit-string (norm_cell + redu_cell) to genome\n norm_gene = convert_cell(bit_string[:len(bit_string)//2])\n redu_gene = convert_cell(bit_string[len(bit_string)//2:])\n return [norm_gene, redu_gene]\n\n\ndef decode_cell(genome, norm=True):\n\n cell, cell_concat = [], list(range(2, len(genome)+2))\n for block in genome:\n for unit in block:\n cell.append((PRIMITIVES[unit[0]], unit[1]))\n if unit[1] in cell_concat:\n cell_concat.remove(unit[1])\n\n if norm:\n return Genotype_norm(normal=cell, normal_concat=cell_concat)\n else:\n return Genotype_redu(reduce=cell, reduce_concat=cell_concat)\n\n\ndef decode(genome):\n # decodes genome to architecture\n normal_cell = genome[0]\n reduce_cell = genome[1]\n\n normal, normal_concat = [], list(range(2, len(normal_cell)+2))\n reduce, reduce_concat = [], list(range(2, len(reduce_cell)+2))\n\n for block in normal_cell:\n for unit in block:\n normal.append((PRIMITIVES[int(unit[0])], int(unit[1])))\n if unit[1] in normal_concat:\n normal_concat.remove(unit[1])\n\n for block in reduce_cell:\n for unit in block:\n reduce.append((PRIMITIVES[unit[0]], unit[1]))\n if unit[1] in reduce_concat:\n reduce_concat.remove(unit[1])\n\n return Genotype(\n normal=normal, normal_concat=normal_concat,\n reduce=reduce, reduce_concat=reduce_concat\n )\n\n\ndef compare_cell(cell_string1, cell_string2):\n cell_genome1 = convert_cell(cell_string1)\n cell_genome2 = convert_cell(cell_string2)\n cell1, cell2 = cell_genome1[:], cell_genome2[:]\n\n for block1 in cell1:\n for block2 in cell2:\n if block1 == block2 or block1 == block2[::-1]:\n cell2.remove(block2)\n break\n if len(cell2) > 0:\n return False\n else:\n return True\n\n\ndef compare(string1, string2):\n\n if compare_cell(string1[:len(string1)//2],\n string2[:len(string2)//2]):\n if compare_cell(string1[len(string1)//2:],\n string2[len(string2)//2:]):\n return True\n\n return False\n\n\ndef debug():\n # design to debug the encoding scheme\n seed = 0\n np.random.seed(seed)\n budget = 2000\n B, n_ops, n_cell = 5, 7, 2\n networks = []\n design_id = 1\n while len(networks) < budget:\n bit_string = []\n for c in range(n_cell):\n for b in range(B):\n bit_string += [np.random.randint(n_ops),\n np.random.randint(b + 2),\n np.random.randint(n_ops),\n np.random.randint(b + 2)\n ]\n\n genome = convert(bit_string)\n # check against evaluated networks in case of duplicates\n doTrain = True\n for network in networks:\n if compare(genome, network):\n doTrain = False\n break\n\n if doTrain:\n genotype = decode(genome)\n model = Network(16, 10, 8, False, genotype)\n model.drop_path_prob = 0.0\n data = torch.randn(1, 3, 32, 32)\n output, output_aux = model(torch.autograd.Variable(data))\n networks.append(genome)\n design_id += 1\n print(design_id)\n\n\nif __name__ == \"__main__\":\n bit_string1 = [3,1,3,0,3,1,3,0,3,1,2,0,2,0,5,2,0,0,0,1,2,2,0,1,0,0,2,2,2,2,0,1]\n\n" ]
[ [ "torch.randn", "torch.autograd.Variable", "numpy.random.seed", "numpy.random.randint" ] ]