repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
possible_versions
list
mxndrwgrdnr/activitysim
[ "722d6e36b2210d5d24dfa2ac4a3504c1e8f75336" ]
[ "activitysim/abm/models/atwork_subtour_scheduling.py" ]
[ "# ActivitySim\n# See full license in LICENSE.txt.\nimport logging\n\nimport pandas as pd\nimport numpy as np\n\nfrom activitysim.core import simulate\nfrom activitysim.core import tracing\nfrom activitysim.core import pipeline\nfrom activitysim.core import config\nfrom activitysim.core import inject\nfrom activitysim.core import timetable as tt\nfrom activitysim.core import expressions\n\nfrom .util.vectorize_tour_scheduling import vectorize_subtour_scheduling\n\nfrom .util import estimation\n\nfrom activitysim.core.util import assign_in_place\n\nlogger = logging.getLogger(__name__)\n\nDUMP = False\n\n\[email protected]()\ndef atwork_subtour_scheduling(\n tours,\n persons_merged,\n tdd_alts,\n skim_dict,\n chunk_size,\n trace_hh_id):\n \"\"\"\n This model predicts the departure time and duration of each activity for at work subtours tours\n \"\"\"\n\n trace_label = 'atwork_subtour_scheduling'\n model_settings_file_name = 'tour_scheduling_atwork.yaml'\n\n tours = tours.to_frame()\n subtours = tours[tours.tour_category == 'atwork']\n\n # - if no atwork subtours\n if subtours.shape[0] == 0:\n tracing.no_results(trace_label)\n return\n\n model_settings = config.read_model_settings(model_settings_file_name)\n estimator = estimation.manager.begin_estimation('atwork_subtour_scheduling')\n\n model_spec = simulate.read_model_spec(file_name=model_settings['SPEC'])\n coefficients_df = simulate.read_model_coefficients(model_settings)\n model_spec = simulate.eval_coefficients(model_spec, coefficients_df, estimator)\n\n persons_merged = persons_merged.to_frame()\n\n logger.info(\"Running %s with %d tours\", trace_label, len(subtours))\n\n # preprocessor\n constants = config.get_model_constants(model_settings)\n od_skim_wrapper = skim_dict.wrap('origin', 'destination')\n skims = {\n \"od_skims\": od_skim_wrapper,\n }\n expressions.annotate_preprocessors(\n subtours, constants, skims,\n model_settings, trace_label)\n\n # parent_tours table with columns ['tour_id', 'tdd'] index = tour_id\n parent_tour_ids = subtours.parent_tour_id.astype(np.int64).unique()\n parent_tours = pd.DataFrame({'tour_id': parent_tour_ids}, index=parent_tour_ids)\n parent_tours = parent_tours.merge(tours[['tdd']], left_index=True, right_index=True)\n\n if estimator:\n estimator.write_model_settings(model_settings, model_settings_file_name)\n estimator.write_spec(model_settings)\n estimator.write_coefficients(coefficients_df, model_settings)\n # we don't need to update timetable because subtours are scheduled inside work trip windows\n\n choices = vectorize_subtour_scheduling(\n parent_tours,\n subtours,\n persons_merged,\n tdd_alts, model_spec,\n model_settings,\n estimator=estimator,\n chunk_size=chunk_size,\n trace_label=trace_label)\n\n if estimator:\n estimator.write_choices(choices)\n choices = estimator.get_survey_values(choices, 'tours', 'tdd')\n estimator.write_override_choices(choices)\n estimator.end_estimation()\n\n # choices are tdd alternative ids\n # we want to add start, end, and duration columns to tours, which we have in tdd_alts table\n tdd_choices = pd.merge(choices.to_frame('tdd'), tdd_alts, left_on=['tdd'], right_index=True, how='left')\n\n assign_in_place(tours, tdd_choices)\n pipeline.replace_table(\"tours\", tours)\n\n if trace_hh_id:\n tracing.trace_df(tours[tours.tour_category == 'atwork'],\n label=\"atwork_subtour_scheduling\",\n slicer='person_id',\n index_label='tour_id',\n columns=None)\n\n if DUMP:\n subtours = tours[tours.tour_category == 'atwork']\n parent_tours = tours[tours.index.isin(subtours.parent_tour_id)]\n\n tracing.dump_df(DUMP, subtours, trace_label, 'sub_tours')\n tracing.dump_df(DUMP, parent_tours, trace_label, 'parent_tours')\n\n parent_tours['parent_tour_id'] = parent_tours.index\n subtours = pd.concat([parent_tours, subtours])\n tracing.dump_df(DUMP,\n tt.tour_map(parent_tours, subtours, tdd_alts,\n persons_id_col='parent_tour_id'),\n trace_label, 'tour_map')\n" ]
[ [ "pandas.concat", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
willprice/shap
[ "e491b2c2332b1cf9d4d79b194a3f98fb7b9f2001" ]
[ "shap/plots/waterfall.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as pl\nfrom shap.plots import labels\nfrom shap.common import safe_isinstance\nfrom . import colors\n\n\ndef waterfall_plot(expected_value, shap_values, features=None, feature_names=None, max_display=10, show=True):\n \n # unwrap pandas series\n if safe_isinstance(features, \"pandas.core.series.Series\"):\n if feature_names is None:\n feature_names = list(features.index)\n features = features.values\n \n if feature_names is None:\n feature_names = np.array([labels['FEATURE'] % str(i) for i in range(len(shap_values))])\n \n num_features = min(max_display, len(shap_values))\n row_height=0.5\n rng = range(num_features - 1, -1, -1)\n order = np.argsort(-np.abs(shap_values))\n pos_lefts = []\n pos_inds = []\n pos_widths = []\n neg_lefts = []\n neg_inds = []\n neg_widths = []\n loc = expected_value + shap_values.sum()\n yticklabels = [\"\" for i in range(num_features + 1)]\n \n pl.gcf().set_size_inches(8, num_features * row_height + 1.5)\n\n if num_features == len(shap_values):\n num_individual = num_features\n else:\n num_individual = num_features - 1\n for i in range(num_individual):\n sval = shap_values[order[i]]\n loc -= sval\n if sval >= 0:\n pos_inds.append(rng[i])\n pos_widths.append(sval)\n pos_lefts.append(loc)\n else:\n neg_inds.append(rng[i])\n neg_widths.append(sval)\n neg_lefts.append(loc)\n if num_individual != num_features or i + 1 < num_individual:\n pl.plot([loc, loc], [rng[i] -1 - 0.4, rng[i] + 0.4], color=\"#bbbbbb\", linestyle=\"--\", linewidth=0.5, zorder=-1)\n if features is None:\n yticklabels[rng[i]] = feature_names[order[i]]\n else:\n yticklabels[rng[i]] = feature_names[order[i]] + \" = \" + str(features[order[i]])\n\n # add a last grouped feature to represent the impact of all the features we didn't show\n if num_features < len(shap_values):\n yticklabels[0] = \"%d other features\" % (len(shap_values) - num_features + 1)\n remaining_impact = expected_value - loc\n if remaining_impact < 0:\n c = colors.red_rgb\n else:\n c = colors.blue_rgb\n\n pl.barh([0], [remaining_impact], left=loc, color=c)\n \n # draw the bars\n pl.barh(pos_inds, pos_widths, left=pos_lefts, color=colors.red_rgb)\n pl.barh(neg_inds, neg_widths, left=neg_lefts, color=colors.blue_rgb)\n pl.yticks(range(num_features), yticklabels, fontsize=13)\n \n # put horizontal lines for each feature row\n for i in range(num_features):\n pl.axhline(i, color=\"#cccccc\", lw=0.5, dashes=(1, 5), zorder=-1)\n \n # mark the prior expected value and the model prediction\n pl.axvline(expected_value, 0, 1/num_features, color=\"#bbbbbb\", linestyle=\"--\", linewidth=0.5, zorder=-1)\n pl.axvline(expected_value + shap_values.sum(), 0, 1, color=\"#bbbbbb\", linestyle=\"--\", linewidth=0.5, zorder=-1)\n\n pl.gca().xaxis.set_ticks_position('bottom')\n pl.gca().yaxis.set_ticks_position('none')\n pl.gca().spines['right'].set_visible(False)\n pl.gca().spines['top'].set_visible(False)\n pl.gca().spines['left'].set_visible(False)\n #pl.gca().tick_params(color=, labelcolor=axis_color)\n pl.xlabel(\"Feature impact on the model output\", fontsize=13)\n\n if show:\n pl.show()" ]
[ [ "matplotlib.pyplot.gca", "matplotlib.pyplot.axvline", "matplotlib.pyplot.axhline", "numpy.abs", "matplotlib.pyplot.barh", "matplotlib.pyplot.gcf", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
apexrl/malib
[ "3785309e9b695ff359131fbbecabb6b5a52ef559" ]
[ "malib/backend/datapool/test/test_parameter_server.py" ]
[ "# -*- encoding: utf-8 -*-\n# -----\n# Created Date: 2021/7/16\n# Author: Hanjing Wang\n# -----\n# Last Modified:\n# Modified By:\n# -----\n# Copyright (c) 2020 MARL @ SJTU\n# -----\n\nimport os\nimport ray\nimport copy\nimport pytest\nimport torch\nimport time\n\nfrom malib.backend.datapool.parameter_server import (\n Parameter,\n ParameterDescription,\n ParameterDescription,\n ParameterServer,\n PARAMETER_TABLE_NAME_GEN,\n)\n\n\nclass MLP(torch.nn.Module):\n def __init__(self, in_channels, out_channels):\n super().__init__()\n self.layers = torch.nn.Sequential(\n torch.nn.Linear(in_channels, 64),\n torch.nn.ReLU(inplace=True),\n torch.nn.Linear(64, 64),\n torch.nn.ReLU(inplace=True),\n torch.nn.Linear(64, out_channels),\n )\n for p in self.layers.parameters():\n torch.nn.init.normal_(p)\n\n def forward(self, x):\n return self.layers(x)\n\n\ndef test_dump_and_load():\n mlp1 = MLP(in_channels=10, out_channels=20)\n mlp2 = MLP(in_channels=15, out_channels=20)\n\n x1 = torch.rand(size=(16, 10))\n x2 = torch.rand(size=(16, 15))\n with torch.no_grad():\n y1 = mlp1(x1)\n y2 = mlp2(x2)\n\n exp_cfg = {\"group\": \"test_parameter_server\", \"name\": \"dump_and_load\"}\n\n # dump\n ray.init(address=None)\n parameter_server_config = {\n # configuration for dumping parameters at /tmp/\n \"quit_job\": {\n \"dump_when_closed\": True,\n # must ended with slash to indicate it is a directory\n \"path\": \"/tmp/test_ps/\",\n }\n }\n parameter_server = ParameterServer.options(\n name=\"ParameterServer\", max_concurrency=1000\n ).remote(test_mode=True, **parameter_server_config, exp_cfg=exp_cfg)\n param_desc1 = ParameterDescription(\n time_stamp=time.time(),\n identify=\"test_agent_1\",\n env_id=\"test_env\",\n id=\"mlp1\",\n type=ParameterDescription.Type.PARAMETER,\n lock=False,\n description={\"registered_name\": \"MLP\"},\n data=None,\n )\n param_desc2 = copy.copy(param_desc1)\n\n param_desc1.data = mlp1.state_dict()\n expected_table_name1 = (\n PARAMETER_TABLE_NAME_GEN(\n env_id=param_desc1.env_id,\n agent_id=param_desc1.identify,\n pid=param_desc1.id,\n policy_type=param_desc1.description[\"registered_name\"],\n )\n + \".pkl\"\n )\n status = ray.get(parameter_server.push.remote(param_desc1))\n print(status)\n\n param_desc2.identify = \"test_agent_2\"\n param_desc2.id = \"mlp2\"\n param_desc2.data = mlp2.state_dict()\n expected_table_name2 = (\n PARAMETER_TABLE_NAME_GEN(\n env_id=param_desc2.env_id,\n agent_id=param_desc2.identify,\n pid=param_desc2.id,\n policy_type=param_desc2.description[\"registered_name\"],\n )\n + \".pkl\"\n )\n status = ray.get(parameter_server.push.remote(param_desc2))\n print(status)\n\n # wait for the ps to dump the data\n _ = ray.get(parameter_server.shutdown.remote())\n parameter_server = None\n # check the existence of dumped file\n files = os.listdir(parameter_server_config[\"quit_job\"][\"path\"])\n assert expected_table_name1 in files\n assert expected_table_name2 in files\n\n parameter_server_config.update(\n {\n # load the dumped parameters\n \"init_job\": {\n \"load_when_start\": True,\n \"path\": parameter_server_config[\"quit_job\"][\"path\"],\n },\n # clean the properties of quitting schedule\n \"quit_job\": {},\n }\n )\n parameter_server = ParameterServer.options(\n name=\"ParameterServerRec\", max_concurrency=1000\n ).remote(test_mode=True, **parameter_server_config, exp_cfg=exp_cfg)\n\n epsilon = 1e-8\n\n # clean data\n param_desc1.data = None\n status, mlp1_param = ray.get(\n parameter_server.pull.remote(param_desc1, keep_return=True)\n )\n assert mlp1_param.data\n mlp1.load_state_dict(mlp1_param.data)\n with torch.no_grad():\n y1_rec = mlp1(x1)\n res = torch.sub(y1, y1_rec)\n assert torch.all(res < epsilon).item()\n\n param_desc2.data = None\n status, mlp2_param = ray.get(\n parameter_server.pull.remote(param_desc2, keep_return=True)\n )\n mlp2.load_state_dict(mlp2_param.data)\n with torch.no_grad():\n y2_rec = mlp2(x2)\n res = torch.sub(y2, y2_rec)\n assert torch.all(res < epsilon).item()\n\n _ = ray.get(parameter_server.shutdown.remote())\n ray.shutdown()\n" ]
[ [ "torch.all", "torch.sub", "torch.nn.Linear", "torch.no_grad", "torch.rand", "torch.nn.init.normal_", "torch.nn.ReLU" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
satra/nibabel
[ "b04fe7c29e4c097edc9d519fccf1c804969232f1", "b04fe7c29e4c097edc9d519fccf1c804969232f1", "b04fe7c29e4c097edc9d519fccf1c804969232f1" ]
[ "nibabel/spm2analyze.py", "nibabel/tests/test_funcs.py", "nibabel/eulerangles.py" ]
[ "# emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the NiBabel package for the\n# copyright and license terms.\n#\n### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n''' Header reading functions for SPM2 version of analyze format '''\n\nimport numpy as np\n\nfrom .spatialimages import HeaderDataError\nfrom .batteryrunners import Report\nfrom . import spm99analyze as spm99 # module import\n\nimage_dimension_dtd = spm99.image_dimension_dtd[:]\nimage_dimension_dtd[\n image_dimension_dtd.index(('funused2', 'f4'))\n ] = ('scl_inter', 'f4')\n\n# Full header numpy dtype combined across sub-fields\nheader_dtype = np.dtype(spm99.header_key_dtd +\n image_dimension_dtd +\n spm99.data_history_dtd)\n\n\nclass Spm2AnalyzeHeader(spm99.Spm99AnalyzeHeader):\n ''' SPM2 header; adds possibility of reading, but not writing DC\n offset for data'''\n\n # Copies of module level definitions\n _dtype = header_dtype\n\n def get_slope_inter(self):\n ''' Get data scaling (slope) and offset (intercept) from header data\n\n Uses the algorithm from SPM2 spm_vol_ana.m by John Ashburner\n\n Parameters\n ----------\n self : header\n Mapping with fields:\n * scl_slope - slope\n * scl_inter - possible intercept (SPM2 use - shared by nifti)\n * glmax - the (recorded) maximum value in the data (unscaled)\n * glmin - recorded minimum unscaled value\n * cal_max - the calibrated (scaled) maximum value in the dataset\n * cal_min - ditto minimum value\n\n Returns\n -------\n scl_slope : None or float\n scaling (slope). None if there is no valid scaling from\n these fields\n scl_inter : None or float\n offset (intercept). Also None if there is no valid scaling,\n offset\n\n Examples\n --------\n >>> fields = {'scl_slope':1,'scl_inter':0,'glmax':0,'glmin':0,'cal_max':0, 'cal_min':0}\n >>> hdr = Spm2AnalyzeHeader()\n >>> for key, value in fields.items():\n ... hdr[key] = value\n >>> hdr.get_slope_inter()\n (1.0, 0.0)\n >>> hdr['scl_inter'] = 0.5\n >>> hdr.get_slope_inter()\n (1.0, 0.5)\n >>> hdr['scl_inter'] = np.nan\n >>> hdr.get_slope_inter()\n (1.0, 0.0)\n\n If 'scl_slope' is 0, nan or inf, cannot use 'scl_slope'.\n Without valid information in the gl / cal fields, we cannot get\n scaling, and return None\n\n >>> hdr['scl_slope'] = 0\n >>> hdr.get_slope_inter()\n (None, None)\n >>> hdr['scl_slope'] = np.nan\n >>> hdr.get_slope_inter()\n (None, None)\n\n Valid information in the gl AND cal fields are needed\n\n >>> hdr['cal_max'] = 0.8\n >>> hdr['cal_min'] = 0.2\n >>> hdr.get_slope_inter()\n (None, None)\n >>> hdr['glmax'] = 110\n >>> hdr['glmin'] = 10\n >>> np.allclose(hdr.get_slope_inter(), [0.6/100, 0.2-0.6/100*10])\n True\n '''\n # get scaling factor from 'scl_slope' (funused1)\n scale = float(self['scl_slope'])\n if np.isfinite(scale) and scale:\n # try to get offset from scl_inter\n dc_offset = float(self['scl_inter'])\n if not np.isfinite(dc_offset):\n dc_offset = 0.0\n return scale, dc_offset\n # no non-zero and finite scaling, try gl/cal fields\n unscaled_range = self['glmax'] - self['glmin']\n scaled_range = self['cal_max'] - self['cal_min']\n if unscaled_range and scaled_range:\n scale = float(scaled_range) / unscaled_range\n dc_offset = self['cal_min'] - scale * self['glmin']\n return scale, dc_offset\n return None, None\n\n @classmethod\n def _chk_scale(klass, hdr, fix=True):\n rep = Report(HeaderDataError)\n scale, offset = hdr.get_slope_inter()\n if not scale is None:\n return hdr, rep\n rep.problem_level = 30\n rep.problem_msg = ('no valid scaling in scalefactor (=%s) '\n 'or cal / gl fields; scalefactor assumed 1.0'\n % scale)\n if fix:\n hdr['scl_slope'] = 1\n rep.fix_msg = 'setting scalefactor \"scl_slope\" to 1'\n return hdr, rep\n\n\nclass Spm2AnalyzeImage(spm99.Spm99AnalyzeImage):\n header_class = Spm2AnalyzeHeader\n\n\nload = Spm2AnalyzeImage.load\nsave = Spm2AnalyzeImage.instance_to_filename\n", "# emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the NiBabel package for the\n# copyright and license terms.\n#\n### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n''' Test for image funcs '''\n\nfrom StringIO import StringIO\n\nimport numpy as np\n\nimport nibabel as nf\n\nfrom ..funcs import concat_images, as_closest_canonical, OrientationError\nfrom ..nifti1 import Nifti1Image\n\nfrom numpy.testing import assert_array_equal\nfrom nose.tools import (assert_true, assert_false,\n assert_equal, assert_raises)\nfrom ..testing import parametric\n\n\n@parametric\ndef test_concat():\n shape = (1,2,5)\n data0 = np.arange(10).reshape(shape)\n affine = np.eye(4)\n img0 = nf.Nifti1Image(data0, affine)\n data1 = data0 - 10\n img1 = nf.Nifti1Image(data1, affine)\n all_imgs = concat_images([img0, img1])\n all_data = np.concatenate(\n [data0[:,:,:,np.newaxis],data1[:,:,:,np.newaxis]],3)\n yield assert_array_equal(all_imgs.get_data(), all_data)\n yield assert_array_equal(all_imgs.get_affine(), affine)\n # check that not-matching affines raise error\n img2 = nf.Nifti1Image(data1, affine+1)\n yield assert_raises(ValueError, concat_images, [img0, img2])\n img2 = nf.Nifti1Image(data1.T, affine)\n yield assert_raises(ValueError, concat_images, [img0, img2])\n # except if check_affines is False\n all_imgs = concat_images([img0, img1])\n yield assert_array_equal(all_imgs.get_data(), all_data)\n yield assert_array_equal(all_imgs.get_affine(), affine)\n \n\n@parametric\ndef test_closest_canonical():\n arr = np.arange(24).reshape((2,3,4,1))\n # no funky stuff, returns same thing\n img = Nifti1Image(arr, np.eye(4))\n xyz_img = as_closest_canonical(img)\n yield assert_true(img is xyz_img)\n # a axis flip\n img = Nifti1Image(arr, np.diag([-1,1,1,1]))\n xyz_img = as_closest_canonical(img)\n yield assert_false(img is xyz_img)\n out_arr = xyz_img.get_data()\n yield assert_array_equal(out_arr, np.flipud(arr))\n # no error for enforce_diag in this case\n xyz_img = as_closest_canonical(img, True)\n # but there is if the affine is not diagonal\n aff = np.eye(4)\n aff[0,1] = 0.1\n # although it's more or less canonical already\n img = Nifti1Image(arr, aff)\n xyz_img = as_closest_canonical(img)\n yield assert_true(img is xyz_img)\n # it's still not diagnonal\n yield assert_raises(OrientationError, as_closest_canonical, img, True)\n", "# emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the NiBabel package for the\n# copyright and license terms.\n#\n### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n''' Module implementing Euler angle rotations and their conversions\n\nSee:\n\n* http://en.wikipedia.org/wiki/Rotation_matrix\n* http://en.wikipedia.org/wiki/Euler_angles\n* http://mathworld.wolfram.com/EulerAngles.html\n\nSee also: *Representing Attitude with Euler Angles and Quaternions: A\nReference* (2006) by James Diebel. A cached PDF link last found here:\n\nhttp://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.110.5134\n\nEuler's rotation theorem tells us that any rotation in 3D can be\ndescribed by 3 angles. Let's call the 3 angles the *Euler angle vector*\nand call the angles in the vector :math:`alpha`, :math:`beta` and\n:math:`gamma`. The vector is [ :math:`alpha`,\n:math:`beta`. :math:`gamma` ] and, in this description, the order of the\nparameters specifies the order in which the rotations occur (so the\nrotation corresponding to :math:`alpha` is applied first).\n\nIn order to specify the meaning of an *Euler angle vector* we need to\nspecify the axes around which each of the rotations corresponding to\n:math:`alpha`, :math:`beta` and :math:`gamma` will occur.\n\nThere are therefore three axes for the rotations :math:`alpha`,\n:math:`beta` and :math:`gamma`; let's call them :math:`i` :math:`j`,\n:math:`k`.\n\nLet us express the rotation :math:`alpha` around axis `i` as a 3 by 3\nrotation matrix `A`. Similarly :math:`beta` around `j` becomes 3 x 3\nmatrix `B` and :math:`gamma` around `k` becomes matrix `G`. Then the\nwhole rotation expressed by the Euler angle vector [ :math:`alpha`,\n:math:`beta`. :math:`gamma` ], `R` is given by::\n\n R = np.dot(G, np.dot(B, A))\n\nSee http://mathworld.wolfram.com/EulerAngles.html\n\nThe order :math:`G B A` expresses the fact that the rotations are\nperformed in the order of the vector (:math:`alpha` around axis `i` =\n`A` first).\n\nTo convert a given Euler angle vector to a meaningful rotation, and a\nrotation matrix, we need to define:\n\n* the axes `i`, `j`, `k`\n* whether a rotation matrix should be applied on the left of a vector to\n be transformed (vectors are column vectors) or on the right (vectors\n are row vectors).\n* whether the rotations move the axes as they are applied (intrinsic\n rotations) - compared the situation where the axes stay fixed and the\n vectors move within the axis frame (extrinsic)\n* the handedness of the coordinate system\n\nSee: http://en.wikipedia.org/wiki/Rotation_matrix#Ambiguities\n\nWe are using the following conventions:\n\n* axes `i`, `j`, `k` are the `z`, `y`, and `x` axes respectively. Thus\n an Euler angle vector [ :math:`alpha`, :math:`beta`. :math:`gamma` ]\n in our convention implies a :math:`alpha` radian rotation around the\n `z` axis, followed by a :math:`beta` rotation around the `y` axis,\n followed by a :math:`gamma` rotation around the `x` axis.\n* the rotation matrix applies on the left, to column vectors on the\n right, so if `R` is the rotation matrix, and `v` is a 3 x N matrix\n with N column vectors, the transformed vector set `vdash` is given by\n ``vdash = np.dot(R, v)``.\n* extrinsic rotations - the axes are fixed, and do not move with the\n rotations.\n* a right-handed coordinate system\n\nThe convention of rotation around ``z``, followed by rotation around\n``y``, followed by rotation around ``x``, is known (confusingly) as\n\"xyz\", pitch-roll-yaw, Cardan angles, or Tait-Bryan angles.\n'''\n\nimport math\nimport numpy as np\n\n\n_FLOAT_EPS_4 = np.finfo(float).eps * 4.0\n\n\ndef euler2mat(z=0, y=0, x=0):\n ''' Return matrix for rotations around z, y and x axes\n\n Uses the z, then y, then x convention above\n\n Parameters\n ----------\n z : scalar\n Rotation angle in radians around z-axis (performed first)\n y : scalar\n Rotation angle in radians around y-axis\n x : scalar\n Rotation angle in radians around x-axis (performed last)\n\n Returns\n -------\n M : array shape (3,3)\n Rotation matrix giving same rotation as for given angles\n\n Examples\n --------\n >>> zrot = 1.3 # radians\n >>> yrot = -0.1\n >>> xrot = 0.2\n >>> M = euler2mat(zrot, yrot, xrot)\n >>> M.shape\n (3, 3)\n\n The output rotation matrix is equal to the composition of the\n individual rotations\n\n >>> M1 = euler2mat(zrot)\n >>> M2 = euler2mat(0, yrot)\n >>> M3 = euler2mat(0, 0, xrot)\n >>> composed_M = np.dot(M3, np.dot(M2, M1))\n >>> np.allclose(M, composed_M)\n True\n\n You can specify rotations by named arguments\n\n >>> np.all(M3 == euler2mat(x=xrot))\n True\n\n When applying M to a vector, the vector should column vector to the\n right of M. If the right hand side is a 2D array rather than a\n vector, then each column of the 2D array represents a vector.\n\n >>> vec = np.array([1, 0, 0]).reshape((3,1))\n >>> v2 = np.dot(M, vec)\n >>> vecs = np.array([[1, 0, 0],[0, 1, 0]]).T # giving 3x2 array\n >>> vecs2 = np.dot(M, vecs)\n\n Rotations are counter-clockwise.\n\n >>> zred = np.dot(euler2mat(z=np.pi/2), np.eye(3))\n >>> np.allclose(zred, [[0, -1, 0],[1, 0, 0], [0, 0, 1]])\n True\n >>> yred = np.dot(euler2mat(y=np.pi/2), np.eye(3))\n >>> np.allclose(yred, [[0, 0, 1],[0, 1, 0], [-1, 0, 0]])\n True\n >>> xred = np.dot(euler2mat(x=np.pi/2), np.eye(3))\n >>> np.allclose(xred, [[1, 0, 0],[0, 0, -1], [0, 1, 0]])\n True\n\n Notes\n -----\n The direction of rotation is given by the right-hand rule (orient\n the thumb of the right hand along the axis around which the rotation\n occurs, with the end of the thumb at the positive end of the axis;\n curl your fingers; the direction your fingers curl is the direction\n of rotation). Therefore, the rotations are counterclockwise if\n looking along the axis of rotation from positive to negative.\n '''\n Ms = []\n if z:\n cosz = math.cos(z)\n sinz = math.sin(z)\n Ms.append(np.array(\n [[cosz, -sinz, 0],\n [sinz, cosz, 0],\n [0, 0, 1]]))\n if y:\n cosy = math.cos(y)\n siny = math.sin(y)\n Ms.append(np.array(\n [[cosy, 0, siny],\n [0, 1, 0],\n [-siny, 0, cosy]]))\n if x:\n cosx = math.cos(x)\n sinx = math.sin(x)\n Ms.append(np.array(\n [[1, 0, 0],\n [0, cosx, -sinx],\n [0, sinx, cosx]]))\n if Ms:\n return reduce(np.dot, Ms[::-1])\n return np.eye(3)\n\n\ndef mat2euler(M, cy_thresh=None):\n ''' Discover Euler angle vector from 3x3 matrix\n\n Uses the conventions above.\n\n Parameters\n ----------\n M : array-like, shape (3,3)\n cy_thresh : None or scalar, optional\n threshold below which to give up on straightforward arctan for\n estimating x rotation. If None (default), estimate from\n precision of input.\n\n Returns\n -------\n z : scalar\n y : scalar\n x : scalar\n Rotations in radians around z, y, x axes, respectively\n\n Notes\n -----\n If there was no numerical error, the routine could be derived using\n Sympy expression for z then y then x rotation matrix, which is::\n\n [ cos(y)*cos(z), -cos(y)*sin(z), sin(y)],\n [cos(x)*sin(z) + cos(z)*sin(x)*sin(y), cos(x)*cos(z) - sin(x)*sin(y)*sin(z), -cos(y)*sin(x)],\n [sin(x)*sin(z) - cos(x)*cos(z)*sin(y), cos(z)*sin(x) + cos(x)*sin(y)*sin(z), cos(x)*cos(y)]\n\n with the obvious derivations for z, y, and x\n\n z = atan2(-r12, r11)\n y = asin(r13)\n x = atan2(-r23, r33)\n\n Problems arise when cos(y) is close to zero, because both of::\n\n z = atan2(cos(y)*sin(z), cos(y)*cos(z))\n x = atan2(cos(y)*sin(x), cos(x)*cos(y))\n\n will be close to atan2(0, 0), and highly unstable.\n\n The ``cy`` fix for numerical instability below is from: *Graphics\n Gems IV*, Paul Heckbert (editor), Academic Press, 1994, ISBN:\n 0123361559. Specifically it comes from EulerAngles.c by Ken\n Shoemake, and deals with the case where cos(y) is close to zero:\n\n See: http://www.graphicsgems.org/\n\n The code appears to be licensed (from the website) as \"can be used\n without restrictions\".\n '''\n M = np.asarray(M)\n if cy_thresh is None:\n try:\n cy_thresh = np.finfo(M.dtype).eps * 4\n except ValueError:\n cy_thresh = _FLOAT_EPS_4\n r11, r12, r13, r21, r22, r23, r31, r32, r33 = M.flat\n # cy: sqrt((cos(y)*cos(z))**2 + (cos(x)*cos(y))**2)\n cy = math.sqrt(r33*r33 + r23*r23)\n if cy > cy_thresh: # cos(y) not close to zero, standard form\n z = math.atan2(-r12, r11) # atan2(cos(y)*sin(z), cos(y)*cos(z))\n y = math.atan2(r13, cy) # atan2(sin(y), cy)\n x = math.atan2(-r23, r33) # atan2(cos(y)*sin(x), cos(x)*cos(y))\n else: # cos(y) (close to) zero, so x -> 0.0 (see above)\n # so r21 -> sin(z), r22 -> cos(z) and\n z = math.atan2(r21, r22)\n y = math.atan2(r13, cy) # atan2(sin(y), cy)\n x = 0.0\n return z, y, x\n\n\ndef euler2quat(z=0, y=0, x=0):\n ''' Return quaternion corresponding to these Euler angles\n\n Uses the z, then y, then x convention above\n\n Parameters\n ----------\n z : scalar\n Rotation angle in radians around z-axis (performed first)\n y : scalar\n Rotation angle in radians around y-axis\n x : scalar\n Rotation angle in radians around x-axis (performed last)\n\n Returns\n -------\n quat : array shape (4,)\n Quaternion in w, x, y z (real, then vector) format\n\n Notes\n -----\n We can derive this formula in Sympy using:\n\n 1. Formula giving quaternion corresponding to rotation of theta radians\n about arbitrary axis:\n http://mathworld.wolfram.com/EulerParameters.html\n 2. Generated formulae from 1.) for quaternions corresponding to\n theta radians rotations about ``x, y, z`` axes\n 3. Apply quaternion multiplication formula -\n http://en.wikipedia.org/wiki/Quaternions#Hamilton_product - to\n formulae from 2.) to give formula for combined rotations.\n '''\n z = z/2.0\n y = y/2.0\n x = x/2.0\n cz = math.cos(z)\n sz = math.sin(z)\n cy = math.cos(y)\n sy = math.sin(y)\n cx = math.cos(x)\n sx = math.sin(x)\n return np.array([\n cx*cy*cz - sx*sy*sz,\n cx*sy*sz + cy*cz*sx,\n cx*cz*sy - sx*cy*sz,\n cx*cy*sz + sx*cz*sy])\n\n\ndef quat2euler(q):\n ''' Return Euler angles corresponding to quaternion `q`\n\n Parameters\n ----------\n q : 4 element sequence\n w, x, y, z of quaternion\n\n Returns\n -------\n z : scalar\n Rotation angle in radians around z-axis (performed first)\n y : scalar\n Rotation angle in radians around y-axis\n x : scalar\n Rotation angle in radians around x-axis (performed last)\n\n Notes\n -----\n It's possible to reduce the amount of calculation a little, by\n combining parts of the ``quat2mat`` and ``mat2euler`` functions, but\n the reduction in computation is small, and the code repetition is\n large.\n '''\n # delayed import to avoid cyclic dependencies\n import nibabel.quaternions as nq\n return mat2euler(nq.quat2mat(q))\n\n\ndef euler2angle_axis(z=0, y=0, x=0):\n ''' Return angle, axis corresponding to these Euler angles\n\n Uses the z, then y, then x convention above\n\n Parameters\n ----------\n z : scalar\n Rotation angle in radians around z-axis (performed first)\n y : scalar\n Rotation angle in radians around y-axis\n x : scalar\n Rotation angle in radians around x-axis (performed last)\n\n Returns\n -------\n theta : scalar\n angle of rotation\n vector : array shape (3,)\n axis around which rotation occurs\n\n Examples\n --------\n >>> theta, vec = euler2angle_axis(0, 1.5, 0)\n >>> theta\n 1.5\n >>> np.allclose(vec, [0, 1, 0])\n True\n '''\n # delayed import to avoid cyclic dependencies\n import nibabel.quaternions as nq\n return nq.quat2angle_axis(euler2quat(z, y, x))\n\n\ndef angle_axis2euler(theta, vector, is_normalized=False):\n ''' Convert angle, axis pair to Euler angles\n\n Parameters\n ----------\n theta : scalar\n angle of rotation\n vector : 3 element sequence\n vector specifying axis for rotation.\n is_normalized : bool, optional\n True if vector is already normalized (has norm of 1). Default\n False\n\n Returns\n -------\n z : scalar\n y : scalar\n x : scalar\n Rotations in radians around z, y, x axes, respectively\n\n Examples\n --------\n >>> z, y, x = angle_axis2euler(0, [1, 0, 0])\n >>> np.allclose((z, y, x), 0)\n True\n\n Notes\n -----\n It's possible to reduce the amount of calculation a little, by\n combining parts of the ``angle_axis2mat`` and ``mat2euler``\n functions, but the reduction in computation is small, and the code\n repetition is large.\n '''\n # delayed import to avoid cyclic dependencies\n import nibabel.quaternions as nq\n M = nq.angle_axis2mat(theta, vector, is_normalized)\n return mat2euler(M)\n" ]
[ [ "numpy.isfinite", "numpy.dtype" ], [ "numpy.diag", "numpy.arange", "numpy.eye", "numpy.flipud", "numpy.concatenate" ], [ "numpy.asarray", "numpy.eye", "numpy.array", "numpy.finfo" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
david-lindner/idrl
[ "54cfad330b0598ad4f6621796f2411644e50a6ba" ]
[ "active_reward_learning/reward_models/basic_gp_reward_model.py" ]
[ "import os\nimport pickle\nimport time\nfrom collections import Counter\nfrom typing import Callable, Dict, List, Optional, Tuple, Union\n\nimport gym\nimport numpy as np\nimport scipy.sparse as sp\n\nfrom active_reward_learning.common.policy import (\n BasePolicy,\n CombinedPolicy,\n EpsGreedyPolicy,\n GaussianNoisePolicy,\n LinearPolicy,\n)\nfrom active_reward_learning.envs import RewardModelMeanWrapper, TabularMDP\nfrom active_reward_learning.envs.reward_model_sample_wrapper import (\n RewardModelSampleWrapper,\n)\nfrom active_reward_learning.reward_models.gaussian_process_linear import (\n LinearObservationGP,\n)\nfrom active_reward_learning.reward_models.kernels import LinearKernel\nfrom active_reward_learning.reward_models.query import (\n ComparisonQueryLinear,\n LinearQuery,\n PointQuery,\n QueryBase,\n StateComparisonQueryLinear,\n StateQuery,\n TrajectoryQuery,\n)\nfrom active_reward_learning.solvers import (\n ArgmaxSolver,\n BaseSolver,\n LBFGSArgmaxSolver,\n LPSolver,\n)\nfrom active_reward_learning.util.helpers import (\n get_hash,\n mean_jaccard_distance,\n np_to_tuple,\n subsample_sequence,\n timing,\n)\n\n\nclass BasicGPRewardModel:\n \"\"\"\n Implements the basic GP reward modelling framework.\n\n The general procedure is:\n (i) Select state to query with acquisition function\n (ii) Query state and observe the reward\n (iii) Update GP model with observation\n (iv) Update policy to be optimal w.r.t new reward predictions from GP\n\n Attributes\n -------------\n env (gym.Env): environment\n gp_model (GP): gaussian process model of the reward function\n observed (set): states that have already been observed\n acquisition_function (callable): function that takes a BasicGPRewardModel\n object as an argument and returns the next\n state to query according to some\n acquisition function\n _last_pred_mu (np.ndarray): last cached means from GP reward prediction\n _last_pred_var (np.ndarray): last cached variances from GP reward prediction\n _last_predictions_up_to_date (bool): if True, the cached values are still\n up to date\n \"\"\"\n\n def __init__(\n self,\n env: gym.Env,\n acquisition_function: Callable[[\"BasicGPRewardModel\"], int],\n kernel,\n solver: BaseSolver,\n obs_var: float = 0,\n use_trajectories_to_evaluate_policy: bool = False,\n solver_iterations: int = 100,\n optimize_gp_parameters_every_n: Optional[int] = None,\n use_thompson_sampling_for_candidate_policies: bool = True,\n update_candidate_policies_every_n: Optional[int] = 10,\n n_candidate_policies_thompson_sampling: Optional[int] = 10,\n n_rollouts_for_states: int = 1,\n n_rollouts_for_eval: int = 1,\n candidate_queries_from: str = \"fixed\",\n initialize_candidate_policies: bool = True,\n use_mean_for_candidate_policies: bool = False,\n gp_num_inducing: Optional[int] = None,\n rollout_horizon: Optional[int] = None,\n subsampling_candidate_queries_n: Optional[int] = None,\n use_comparisons: bool = False,\n comparison_response: str = \"bernoulli\",\n subsample_traj_for_queries_len: Optional[int] = None,\n n_policies_initial_ts: Optional[int] = None,\n candidate_queries_file: Optional[Union[str, list]] = None,\n trajectory_clip_length: Optional[int] = None,\n trajectory_n_clips: Optional[int] = None,\n af_label: Optional[str] = None,\n observation_batch_size: int = 1,\n ):\n self.env = env\n self.environment_is_tabular = isinstance(env, TabularMDP) or isinstance(\n env.unwrapped, TabularMDP\n )\n self.use_tabular_solver = isinstance(solver, LPSolver) or (\n self.environment_is_tabular and isinstance(solver, ArgmaxSolver)\n )\n\n # initial observation\n env.reset()\n self.env.step(self.env.action_space.sample())\n obs, reward1, done, info1 = self.env.step(self.env.action_space.sample())\n\n self.use_comparisons = use_comparisons\n self.comparison_response = comparison_response\n\n self.gp_model = LinearObservationGP(kernel, obs_var=obs_var)\n\n if self.use_comparisons:\n # ground model in simulated observation at 0\n self.gp_model.observe((info1[\"gp_repr\"], [1]), 0, obs_noise=0.001)\n\n if self.environment_is_tabular:\n self.last_query = info1[\"state\"]\n else:\n self.last_query = None\n\n self.acquisition_function = acquisition_function\n\n self.observed: List[QueryBase] = []\n self.observed_dict: list = []\n self.observed_counter: Dict[QueryBase, int] = Counter()\n\n self._last_pred_mu = None\n self._last_pred_var = None\n self._last_pred_cov = None\n self._last_predictions_up_to_date = False\n self.selected_policy = None\n\n self.rollout_horizon = rollout_horizon\n self.use_trajectories_to_evaluate_policy = use_trajectories_to_evaluate_policy\n self.optimize_gp_parameters_every_n = optimize_gp_parameters_every_n\n self.use_thompson_sampling_for_candidate_policies = (\n use_thompson_sampling_for_candidate_policies\n )\n self.use_mean_for_candidate_policies = use_mean_for_candidate_policies\n self.update_candidate_policies_every_n = update_candidate_policies_every_n\n self.n_candidate_policies_thompson_sampling = (\n n_candidate_policies_thompson_sampling\n )\n\n self.candidate_queries_from = candidate_queries_from\n self.n_rollouts_for_states = n_rollouts_for_states\n self.n_rollouts_for_eval = n_rollouts_for_eval\n\n self.subsampling_candidate_queries_n = subsampling_candidate_queries_n\n self.subsample_traj_for_queries_len = subsample_traj_for_queries_len\n\n assert observation_batch_size >= 1\n self.observation_batch_size = observation_batch_size\n self.run_time = 0\n\n self.candidate_policies: Optional[List[np.ndarray]]\n self.candidate_rewards: Optional[List[np.ndarray]]\n self.candidate_policy_posterior_probabilities: Optional[List[float]]\n self.candidate_policy_mean_jaccard_dist: Optional[float] = None\n\n if initialize_candidate_policies:\n self.candidate_policies = env.get_candidate_policies()\n assert self.candidate_policies is not None\n assert len(self.candidate_policies) > 0\n n_policies = len(self.candidate_policies)\n self.candidate_policy_posterior_probabilities = [\n 1 / n_policies\n ] * n_policies\n self.updated_candidate_policies_in_last_step = True\n else:\n self.candidate_policies = None\n self.candidate_policy_posterior_probabilities = None\n self.updated_candidate_policies_in_last_step = False\n\n self.solver = solver\n if not self.use_tabular_solver:\n if isinstance(kernel, LinearKernel):\n self.solver.set_env(RewardModelSampleWrapper(env, self))\n else:\n assert not self.use_thompson_sampling_for_candidate_policies\n self.solver.set_env(RewardModelMeanWrapper(env, self))\n self.solver_iterations = solver_iterations\n\n self.candidate_queries: list = []\n self.candidate_queries_gp_repr: Optional[List[Tuple]] = None\n self.candidate_queries_gp_repr_idx: Optional[List[int]] = None\n self.candidate_queries_linear_combinations = None\n self.state_visitation_frequencies: List[np.ndarray] = []\n self.state_repr_dtype = info1[\"gp_repr\"].dtype\n if self.candidate_queries_from == \"fixed\":\n unique_repr = set()\n if self.environment_is_tabular:\n if use_comparisons:\n for s1 in range(env.N_states):\n for s2 in range(s1 + 1, env.N_states):\n gp_repr1 = env.get_state_repr(s1)\n gp_repr2 = env.get_state_repr(s2)\n if list(gp_repr1) != list(gp_repr2):\n gp_repr = tuple(list(gp_repr1) + list(gp_repr2))\n\n if gp_repr not in unique_repr:\n unique_repr.add(gp_repr)\n self.candidate_queries.append(\n StateComparisonQueryLinear(\n gp_repr1,\n gp_repr2,\n env.get_reward(s1),\n env.get_reward(s2),\n dict(),\n response=comparison_response,\n )\n )\n else:\n for s in range(env.N_states):\n gp_repr = tuple(env.get_state_repr(s))\n if gp_repr not in unique_repr:\n unique_repr.add(gp_repr)\n self.candidate_queries.append(\n StateQuery(\n s,\n gp_repr,\n env.get_reward(s),\n dict(),\n obs=env.get_observation(s),\n )\n )\n else:\n # uniformly sample candidates for non-tabular environments\n n_samples = self.n_rollouts_for_states * self.env.spec.max_episode_steps\n x_test, y_test = self.env.sample_features_rewards(n_samples)\n if self.use_comparisons:\n for i in range(len(x_test)):\n x1 = x_test[i]\n r1 = y_test[i]\n for j in range(i + 1, len(x_test)):\n x2 = x_test[j]\n r2 = y_test[j]\n info: Dict[object, object] = dict()\n self.candidate_queries.append(\n ComparisonQueryLinear(\n x1, x2, r1, r2, info, response=comparison_response\n )\n )\n else:\n for i in range(len(x_test)):\n x = x_test[i]\n r = y_test[i]\n info = dict()\n self.candidate_queries.append(PointQuery(x, r, info))\n elif self.candidate_queries_from == \"rollouts_fixed\":\n if self.candidate_policies is None:\n assert (self.n_candidate_policies_thompson_sampling is not None) or (\n n_policies_initial_ts is not None\n )\n self.candidate_policies = []\n self.candidate_rewards = []\n self.candidate_policy_posterior_probabilities = []\n print(\"\\tinitial thompson sampling\")\n if n_policies_initial_ts is None:\n assert self.n_candidate_policies_thompson_sampling is not None\n n_policies = self.n_candidate_policies_thompson_sampling\n else:\n n_policies = n_policies_initial_ts\n self.update_candidate_policies_using_thompson_sampling(n_policies)\n\n self.collect_candidate_policy_rollouts(True)\n\n candidate_policies_features = [None] * len(self.candidate_policies)\n for query in self.candidate_queries:\n i, j = query.info[\"policy_i1\"], query.info[\"policy_i2\"]\n features_i, features_j = query.gp_repr_list\n if candidate_policies_features[i] is None:\n candidate_policies_features[i] = features_i\n if candidate_policies_features[j] is None:\n candidate_policies_features[j] = features_j\n\n assert self.candidate_policies is not None\n assert self.candidate_rewards is not None\n assert self.candidate_policy_posterior_probabilities is not None\n\n for i in range(len(self.candidate_policies)):\n for j in range(len(self.candidate_policies)):\n reward_i = self.candidate_rewards[i]\n features_i = candidate_policies_features[i]\n features_j = candidate_policies_features[j]\n G_pi_i_w_i = np.dot(features_i, reward_i)\n G_pi_j_w_i = np.dot(features_j, reward_i)\n if G_pi_j_w_i > G_pi_i_w_i:\n self.candidate_rewards[i] = self.candidate_rewards[j]\n self.candidate_policies[i] = self.candidate_policies[j]\n candidate_policies_features[i] = candidate_policies_features[j]\n self.candidate_policy_posterior_probabilities[\n i\n ] = self.candidate_policy_posterior_probabilities[j]\n\n self.collect_candidate_policy_rollouts(True)\n\n if \"Highway\" in self.env.spec.id:\n print(\"Update LBFGSArgmaxSolver...\", end=\" \")\n\n if isinstance(self.solver, LBFGSArgmaxSolver):\n print(\"Appending candidate policies.\")\n cand = self.solver.candidate_policies\n cand = cand + self.candidate_policies\n else:\n print(\"Using candidate policies.\")\n cand = self.candidate_policies\n\n self.solver = LBFGSArgmaxSolver(\n self.solver.env,\n candidate_policies=cand,\n debug=False,\n )\n\n elif self.candidate_queries_from == \"query_file\":\n assert isinstance(candidate_queries_file, str)\n assert candidate_queries_file is not None\n assert candidate_queries_file.endswith(\".pkl\")\n print(f\"Loading candidate queries from '{candidate_queries_file}'...\")\n with open(candidate_queries_file, \"rb\") as f:\n self.candidate_queries = pickle.load(f)\n elif self.candidate_queries_from in (\"random_rollouts\", \"policy_file\"):\n assert trajectory_clip_length is not None\n assert trajectory_n_clips is not None\n\n expl_policies: List[BasePolicy]\n if self.candidate_queries_from == \"random_rollouts\":\n print(f\"Randomly exploring...\")\n policy: BasePolicy\n policy = LinearPolicy(np.zeros(2)) # will never be used\n policy = EpsGreedyPolicy(policy, 1, env.action_space)\n expl_policies = [policy]\n else:\n assert candidate_queries_file is not None\n assert isinstance(candidate_queries_file, (str, list))\n if isinstance(candidate_queries_file, str):\n candidate_queries_file = [candidate_queries_file]\n expl_policies = []\n for policy_file in candidate_queries_file:\n assert policy_file.endswith(\".npy\")\n print(f\"Loading exploration policy from '{policy_file}'...\")\n policy = LinearPolicy.load(policy_file, env)\n eps = 0.1\n policy = EpsGreedyPolicy(policy, eps, env.action_space)\n expl_policies.append(policy)\n\n policy_idx = 0\n\n trajectories = []\n for i_rollout in range(self.n_rollouts_for_states):\n obs = self.env.reset()\n done = False\n t = 0\n gp_repr_list, reward_list, info_list = [], [], []\n\n print(policy_idx)\n expl_policy = expl_policies[policy_idx]\n policy_idx = (policy_idx + 1) % len(expl_policies)\n\n while not done and (\n self.rollout_horizon is None or t <= self.rollout_horizon\n ):\n t += 1\n a = expl_policy.get_action(obs)\n obs, reward, done, info = self.env.step(a)\n gp_repr_list.append(info[\"gp_repr\"])\n reward_list.append(reward)\n info_list.append({k: v for k, v in info.items() if k != \"gp_repr\"})\n trajectories.append((gp_repr_list, reward_list, info_list))\n\n for gp_repr_list, reward_list, info_list in trajectories:\n info = {\"info_list\": tuple(info_list)}\n L = trajectory_clip_length\n N = trajectory_n_clips\n for _ in range(N):\n start = np.random.randint(0, len(gp_repr_list) - L)\n self.candidate_queries.append(\n TrajectoryQuery(\n gp_repr_list[start : start + L],\n reward_list[start : start + L],\n info,\n )\n )\n\n self.timing: Dict[str, float] = dict()\n\n def run(self, iterations, callback=None, print_timing=False):\n iteration = 0\n\n self.run_time = 0\n while iteration < iterations:\n current_time = time.time()\n print(\"\\tquery x\")\n\n if (\n (\n self.use_thompson_sampling_for_candidate_policies\n or self.use_mean_for_candidate_policies\n )\n and self.update_candidate_policies_every_n is not None\n and iteration % self.update_candidate_policies_every_n == 0\n ):\n self.update_candidate_policies()\n self.updated_candidate_policies_in_last_step = True\n update_candidate_queries = (\n self.candidate_queries_from == \"rollouts_updated\"\n )\n else:\n self.updated_candidate_policies_in_last_step = False\n update_candidate_queries = False\n\n if (\n update_candidate_queries or self.use_trajectories_to_evaluate_policy\n ) and self.candidate_policies is not None:\n print(\"\\trolling out policies\")\n self.collect_candidate_policy_rollouts(update_candidate_queries)\n\n self.query_reward(iteration, iterations)\n iteration += 1\n\n if (\n self.optimize_gp_parameters_every_n is not None\n and self.optimize_gp_parameters_every_n > 0\n and iteration % self.optimize_gp_parameters_every_n == 0\n ):\n self.gp_model.optimize_parameters()\n\n new_current_time = time.time()\n self.run_time += new_current_time - current_time\n current_time = new_current_time\n\n if callback:\n callback(locals(), globals())\n\n if print_timing:\n print(\"\\t\\tTiming\")\n for key, value in self.timing.items():\n print(\"\\t\\t\\t'{}': {:.2f} seconds\".format(key, value))\n\n @timing\n def update_candidate_policies(self):\n self.candidate_policies = []\n self.candidate_rewards = []\n self.candidate_policy_posterior_probabilities = []\n if self.use_thompson_sampling_for_candidate_policies:\n print(\"\\tthompson sampling\")\n self.update_candidate_policies_using_thompson_sampling()\n if self.use_mean_for_candidate_policies:\n print(\"\\tget new mean-optimal candidate policy\")\n self.update_candidate_policies_using_mean()\n\n print(\"candidate_policies\", self.candidate_policies)\n print(\"posterior_probabilities\", self.candidate_policy_posterior_probabilities)\n\n @timing\n def update_reward_model(self, query: QueryBase, y) -> None:\n \"\"\"\n Update the GP reward model with a specific observation.\n\n Args:\n -----------\n query (QueryBase): query at which the reward observation was made\n optimize_gp (bool): if set to true the parameters of the GP will be\n optimize by max log-likelihood\n \"\"\"\n self.observed.append(query)\n self.observed_dict.append((dict(query._asdict()), y))\n self.observed_counter[query] += 1\n assert isinstance(self.gp_model, LinearObservationGP)\n assert isinstance(query, LinearQuery)\n print(\"query.gp_repr_list\", query.gp_repr_list)\n print(\"query.linear_combination\", query.linear_combination)\n self.gp_model.observe([query.gp_repr_list, query.linear_combination], y)\n self._last_predictions_up_to_date = False\n\n @timing\n def query_reward(self, iteration, iterations) -> None:\n \"\"\"\n Query the reward of a query determined by the acquistion function and update\n the GP reward model.\n \"\"\"\n print(\"\\trunning acquisition function\")\n\n for sample_i in range(self.observation_batch_size):\n print(sample_i, end=\" \")\n i = self.acquisition_function(self)\n query = self.candidate_queries[i]\n y = query.reward\n\n self.update_reward_model(query, y)\n self.last_query = query\n\n def get_candidate_queries_gp_repr(self):\n \"\"\"\n Return the features and linear weights for every candidate query.\n\n If subsampling is activated returns them for a random subsample of candidates.\n For this case a list of indices is returned that maps the subsampled queries\n to positions in the full list of candidate queries.\n \"\"\"\n if (\n self.candidate_queries_gp_repr is None\n or self.candidate_queries_gp_repr_idx is None\n or self.candidate_queries_linear_combinations is None\n ) or self.subsampling_candidate_queries_n is not None:\n sample_idx = np.arange(len(self.candidate_queries))\n if self.subsampling_candidate_queries_n is not None:\n # sample a subset of candidate queries\n sample_idx = np.random.choice(\n sample_idx,\n self.subsampling_candidate_queries_n,\n )\n\n # dictionary to contain unique set of queries\n candidate_queries_dict = dict()\n for i in sample_idx:\n query = self.candidate_queries[i]\n gp_repr_list = query.gp_repr_list\n linear_combination = query.linear_combination\n gp_repr_tuple = tuple(\n [np_to_tuple(gp_repr) for gp_repr in gp_repr_list]\n )\n linear_combination_tuple = tuple(linear_combination)\n key = (gp_repr_tuple, linear_combination_tuple)\n\n if key not in candidate_queries_dict:\n candidate_queries_dict[key] = i\n\n # convert unique set of queries into list\n candidate_queries_gp_repr = []\n candidate_queries_linear_combinations = []\n candidate_queries_gp_repr_idx = []\n\n for key, idx in candidate_queries_dict.items():\n gp_repr_tuple, linear_combination_tuple = key\n gp_repr_list = [list(x) for x in gp_repr_tuple]\n linear_combination = list(linear_combination_tuple)\n candidate_queries_gp_repr.append(gp_repr_list)\n candidate_queries_linear_combinations.append(linear_combination)\n candidate_queries_gp_repr_idx.append(idx)\n\n if self.subsampling_candidate_queries_n is not None:\n return (\n candidate_queries_gp_repr,\n candidate_queries_linear_combinations,\n candidate_queries_gp_repr_idx,\n )\n else:\n self.candidate_queries_gp_repr = candidate_queries_gp_repr\n self.candidate_queries_linear_combinations = (\n candidate_queries_linear_combinations\n )\n self.candidate_queries_gp_repr_idx = candidate_queries_gp_repr_idx\n return (\n self.candidate_queries_gp_repr,\n self.candidate_queries_linear_combinations,\n self.candidate_queries_gp_repr_idx,\n )\n\n @timing\n def get_candidate_queries_reward_predictions(\n self, get_full_cov: bool = False\n ) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n Get mean and variance of reward predictions for the candidate states from the GP.\n\n Uses caching to avoid querying the GP model if no update happened since the last call\n fo this function.\n\n Note: currently this removes duplicates, which is not obvious.\n \"\"\"\n if (\n self._last_predictions_up_to_date\n and self.subsampling_candidate_queries_n is None\n ):\n if get_full_cov:\n return self._last_pred_mu, self._last_pred_cov\n else:\n return self._last_pred_mu, self._last_pred_var\n else:\n (\n candidate_query_repr,\n candidate_queries_linear_combinations,\n candidate_queries_gp_repr_idx,\n ) = self.get_candidate_queries_gp_repr()\n\n mu_pred, cov_pred = self.gp_model.predict_multiple(\n candidate_query_repr,\n linear_combination=candidate_queries_linear_combinations,\n )\n var_pred = np.diag(cov_pred)\n\n self._last_pred_mu = mu_pred\n self._last_pred_cov = cov_pred\n self._last_pred_var = var_pred\n self._last_predictions_up_to_date = True\n if get_full_cov:\n return mu_pred, cov_pred\n else:\n return mu_pred, var_pred\n\n @timing\n def get_policy_return_mean_var(\n self,\n policy_i: int,\n mu: Optional[np.ndarray] = None,\n sigma: Optional[np.ndarray] = None,\n ) -> Tuple[float, float]:\n \"\"\"\n Returns mean and variance of a policy given the current GP reward model.\n\n Assuming the reward is Gaussian, the expected return of a policy is also\n Gaussian. The mean and variance of this gaussian are returned by this method.\n \"\"\"\n assert self.candidate_policies is not None\n assert 0 <= policy_i < len(self.candidate_policies)\n if self.use_trajectories_to_evaluate_policy:\n freq = self.state_visitation_frequencies[policy_i]\n state_repr = [\n np.fromstring(k, dtype=self.state_repr_dtype) for k in freq.keys()\n ]\n W = np.array([freq[repr.tostring()] for repr in state_repr])\n mu, sigma = self.gp_model.predict_multiple(state_repr)\n E_G_pi = np.dot(W, mu)\n Var_G_pi = np.dot(W, np.dot(sigma, W))\n else:\n assert self.environment_is_tabular\n policy = self.candidate_policies[policy_i]\n W = self.env.get_return_trafo_for_policy(policy)\n all_states = self.env.get_all_states_repr()\n mu, sigma = self.gp_model.predict_multiple(all_states)\n E_G_pi = W.dot(mu)\n Var_G_pi = W.dot(sp.csr_matrix(sigma).dot(W))\n return E_G_pi, Var_G_pi\n\n @timing\n def update_candidate_policies_using_thompson_sampling(\n self,\n n_policies=None,\n ) -> None:\n \"\"\"\n Uses Thompson sampling to get a new set of candidate policies based on\n the current reward model.\n\n Performs two steps n_policies times:\n 1. Sample a reward function from the model\n 2. Finds the optimal policy for this function\n \"\"\"\n assert self.candidate_policies is not None\n assert self.candidate_rewards is not None\n assert self.candidate_policy_posterior_probabilities is not None\n\n if n_policies is None:\n assert self.n_candidate_policies_thompson_sampling is not None\n n_policies = self.n_candidate_policies_thompson_sampling\n\n if self.use_tabular_solver:\n all_states = np.asarray(self.env.get_all_states_repr())\n samples = self.gp_model.sample_y_from_posterior(all_states, n_policies)\n for i in range(n_policies):\n print(\"\\tTS Policy (tabular) {}\".format(i))\n rewards = samples[:, i]\n policy = self.solver.solve(self.solver_iterations, rewards=rewards)\n self.candidate_policies.append(policy)\n self.candidate_rewards.append(rewards)\n else:\n for i in range(n_policies):\n print(\"\\tTS Policy {}\".format(i))\n posterior_prob = self.solver.env.new_sample(self)\n\n policy = self.solver.solve(self.solver_iterations)\n self.candidate_policies.append(policy)\n self.candidate_rewards.append(self.solver.env.theta)\n self.candidate_policy_posterior_probabilities.append(posterior_prob)\n\n self._last_predictions_up_to_date = False\n print(\"\\tresampled candidate policies\")\n\n @timing\n def update_candidate_policies_using_mean(self) -> None:\n \"\"\"\n Will add the current mean-optimal policy to the set of candidates.\n \"\"\"\n assert self.candidate_policies is not None\n assert self.candidate_rewards is not None\n assert self.candidate_policy_posterior_probabilities is not None\n if self.use_tabular_solver:\n all_states = np.asarray(self.env.get_all_states_repr())\n mu_pred, _ = self.gp_model.predict_multiple(all_states)\n rewards = mu_pred\n policy = self.solver.solve(self.solver_iterations, rewards=mu_pred)\n else:\n if isinstance(self.solver.env, RewardModelSampleWrapper):\n posterior_prob = self.solver.env.set_sample_to_mean(self)\n self.candidate_policy_posterior_probabilities.append(posterior_prob)\n rewards = self.solver.env.theta\n elif isinstance(self.gp_model.kernel, LinearKernel):\n rewards = self.gp_model.linear_predictive_mean\n else:\n rewards = None\n policy = self.solver.solve(self.solver_iterations)\n self.candidate_policies.append(policy)\n self.candidate_rewards.append(rewards)\n self._last_predictions_up_to_date = False\n print(\"resampled mean candidate policy\")\n\n @timing\n def collect_candidate_policy_rollouts(self, update_candidate_queries) -> None:\n \"\"\"\n Performs `n_rollouts` for each policy in `candidate_policy` and updates\n `candidate_queries` to contain all distinct states that were visited\n during the rollouts.\n \"\"\"\n assert self.candidate_policies is not None\n assert self.use_trajectories_to_evaluate_policy or update_candidate_queries\n\n if self.use_trajectories_to_evaluate_policy:\n self.state_visitation_frequencies = []\n\n if update_candidate_queries:\n self.candidate_queries = []\n self.candidate_queries_gp_repr = None\n self.candidate_queries_linear_combinations = None\n self.candidate_queries_gp_repr_idx = None\n\n if update_candidate_queries:\n n_rollouts = max(self.n_rollouts_for_eval, self.n_rollouts_for_states)\n else:\n n_rollouts = self.n_rollouts_for_eval\n\n state_action_per_policy = []\n trajectories = []\n for policy_i, policy in enumerate(self.candidate_policies):\n # print(\"policy_i\", policy_i)\n\n state_actions = set()\n if self.use_trajectories_to_evaluate_policy:\n W_pi: Dict[str, float] = dict()\n count = 0\n for rollout_i in range(n_rollouts):\n # print(\"rollout_i\", rollout_i)\n # print(\"rollout\")\n obs = self.env.reset()\n done = False\n t = 0\n gp_repr_list, reward_list, info_list = [], [], []\n while not done and (\n self.rollout_horizon is None or t <= self.rollout_horizon\n ):\n t += 1\n a = policy.get_action(obs)\n\n state_actions.add((get_hash(obs), get_hash(a)))\n obs, reward, done, info = self.env.step(a)\n\n if update_candidate_queries:\n gp_repr_list.append(info[\"gp_repr\"])\n reward_list.append(reward)\n info_list.append(\n {k: v for k, v in info.items() if k != \"gp_repr\"}\n )\n if self.use_trajectories_to_evaluate_policy:\n # print(f'DEBUG: x {info[\"x\"]}, y {info[\"y\"]}')\n gp_repr = info[\"gp_repr\"].tostring()\n if self.state_repr_dtype is None:\n self.state_repr_dtype = info[\"gp_repr\"].dtype\n elif self.state_repr_dtype != info[\"gp_repr\"].dtype:\n raise Exception(\n \"Inconsistent datatypes in gp_representation: \"\n \"{} and {}\".format(\n self.state_repr_dtype, info[\"gp_repr\"].dtype\n )\n )\n if gp_repr not in W_pi:\n W_pi[gp_repr] = 0\n W_pi[gp_repr] += 1\n count += 1\n\n if self.subsample_traj_for_queries_len is not None:\n old_len = len(gp_repr_list)\n new_len = self.subsample_traj_for_queries_len\n a, b = subsample_sequence(old_len, new_len)\n assert len(gp_repr_list) == old_len\n assert len(reward_list) == old_len\n assert len(info_list) == old_len\n gp_repr_list = gp_repr_list[a:b]\n reward_list = reward_list[a:b]\n info_list = info_list[a:b]\n assert len(gp_repr_list) == new_len\n assert len(reward_list) == new_len\n assert len(info_list) == new_len\n\n trajectories.append((gp_repr_list, reward_list, info_list, policy_i))\n\n state_action_per_policy.append(state_actions)\n\n if self.use_trajectories_to_evaluate_policy:\n for k in W_pi.keys():\n W_pi[k] /= n_rollouts\n self.state_visitation_frequencies.append(W_pi)\n\n if update_candidate_queries:\n if self.use_comparisons:\n # Note: this currently only works for linear reward functions and has only\n # been tested for HighwayDriving.\n if not \"Highway\" in self.env.spec.id:\n raise NotImplementedError(\n \"Comparisons from Thompson sampled trajectories \"\n \"is currently only implemented for highway environment.\"\n )\n\n ## TODO: make this a proper parameter\n normalize_features = False\n\n if not normalize_features:\n # normalize rewards\n rewards = []\n for i in range(len(trajectories)):\n gp_repr_list1, _, _, _ = trajectories[i]\n gp_repr1 = np.sum(gp_repr_list1, axis=0) / len(gp_repr_list1)\n reward1 = np.dot(gp_repr1, self.env.reward_w)\n rewards.append(reward1)\n max_reward = np.max(rewards)\n min_reward = np.min(rewards)\n\n for i in range(len(trajectories)):\n gp_repr_list1, reward_list1, info_list1, policy_i1 = trajectories[i]\n gp_repr1 = np.sum(gp_repr_list1, axis=0) / len(gp_repr_list1)\n\n # Normalize features to ensure rewards are between 0 and 1\n if normalize_features:\n gp_repr1[-1] = 0\n gp_repr1 /= np.linalg.norm(gp_repr1)\n gp_repr1[-1] = 1\n\n reward1 = np.dot(gp_repr1, self.env.reward_w)\n\n if not normalize_features:\n reward1 = (reward1 - min_reward) / (max_reward - min_reward)\n\n for j in range(i + 1, len(trajectories)):\n (\n gp_repr_list2,\n reward_list2,\n info_list2,\n policy_i2,\n ) = trajectories[j]\n gp_repr2 = np.sum(gp_repr_list2, axis=0) / len(gp_repr_list2)\n\n if normalize_features:\n gp_repr2[-1] = 0\n gp_repr2 /= np.linalg.norm(gp_repr2)\n gp_repr2[-1] = 1\n\n reward2 = np.dot(gp_repr2, self.env.reward_w)\n\n if not normalize_features:\n reward2 = (reward2 - min_reward) / (max_reward - min_reward)\n\n info = {\n \"info_list1\": tuple(info_list1),\n \"info_list2\": tuple(info_list2),\n \"policy_i1\": policy_i1,\n \"policy_i2\": policy_i2,\n }\n self.candidate_queries.append(\n ComparisonQueryLinear(\n gp_repr1,\n gp_repr2,\n reward1,\n reward2,\n info,\n response=self.comparison_response,\n )\n )\n else:\n for gp_repr_list, reward_list, info_list, policy_i in trajectories:\n info = {\"info_list\": tuple(info_list)}\n self.candidate_queries.append(\n TrajectoryQuery(\n gp_repr_list,\n reward_list,\n info,\n )\n )\n\n jac_dist = mean_jaccard_distance(state_action_per_policy)\n self.candidate_policy_mean_jaccard_dist = jac_dist\n" ]
[ [ "numpy.diag", "numpy.dot", "numpy.random.choice", "numpy.min", "numpy.linalg.norm", "scipy.sparse.csr_matrix", "numpy.max", "numpy.fromstring", "numpy.zeros", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
bileschi/tpu
[ "0731831addc47d45342708093697492e4e9a68ca" ]
[ "models/official/squeezenet/squeezenet_main.py" ]
[ "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"SqueezeNet implementation with TPU support.\n\nTraining loop and input pipeline.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl import flags\nimport absl.logging as _logging # pylint: disable=unused-import\nimport tensorflow as tf\n\nimport data_pipeline\nimport squeezenet_model\nfrom tensorflow.contrib.tpu.python.tpu import tpu_config\nfrom tensorflow.contrib.tpu.python.tpu import tpu_estimator\n\n\n# Cloud TPU Cluster Resolvers\nflags.DEFINE_string(\n \"gcp_project\", default=None,\n help=\"Project name for the Cloud TPU-enabled project. If not specified, we \"\n \"will attempt to automatically detect the GCE project from metadata.\")\nflags.DEFINE_string(\n \"tpu_zone\", default=None,\n help=\"GCE zone where the Cloud TPU is located in. If not specified, we \"\n \"will attempt to automatically detect the GCE project from metadata.\")\nflags.DEFINE_string(\n \"tpu_name\", default=None,\n help=\"Name of the Cloud TPU for Cluster Resolvers. You must specify either \"\n \"this flag or --master.\")\n\n# Model specific paramenters\nflags.DEFINE_string(\n \"master\", default=None,\n help=\"GRPC URL of the master (e.g. grpc://ip.address.of.tpu:8470). You \"\n \"must specify either this flag or --tpu_name.\")\n\nflags.DEFINE_string(\"data_dir\", \"\", \"Location of training files.\")\nflags.DEFINE_string(\"model_dir\", \"\", \"Where to store model checkpoints.\")\nflags.DEFINE_integer(\"save_checkpoints_secs\", 3600,\n \"Interval between saving model checkpoints.\")\nflags.DEFINE_integer(\"num_shards\", 8, \"Number of TPU shards.\")\nflags.DEFINE_integer(\"batch_size\", 1024, \"Batch size for training and eval.\")\nflags.DEFINE_boolean(\"use_tpu\", True, \"If true, use TPU device.\")\n\nflags.DEFINE_string(\"optimizer\", \"momentum\", \"Optimizer: momentum|adam|rmsprop\")\nflags.DEFINE_float(\"momentum\", 0.9, \"Momentum parameter for SGD optimizer.\")\nflags.DEFINE_integer(\"num_epochs\", 150,\n \"Number of epochs of the training set to process.\")\nflags.DEFINE_integer(\"num_evals\", 10,\n \"How many times to run an evaluation during training.\")\nflags.DEFINE_float(\"learning_rate\", 0.03, \"Learning rate.\")\n\nFLAGS = flags.FLAGS\n\n\ndef main(argv):\n del argv\n\n if FLAGS.master is None and FLAGS.tpu_name is None:\n raise RuntimeError(\"You must specify either --master or --tpu_name.\")\n\n if FLAGS.master is not None:\n if FLAGS.tpu_name is not None:\n tf.logging.warn(\"Both --master and --tpu_name are set. Ignoring \"\n \"--tpu_name and using --master.\")\n tpu_grpc_url = FLAGS.master\n else:\n tpu_cluster_resolver = (\n tf.contrib.cluster_resolver.TPUClusterResolver(\n FLAGS.tpu_name,\n zone=FLAGS.tpu_zone,\n project=FLAGS.gcp_project))\n tpu_grpc_url = tpu_cluster_resolver.get_master()\n\n training_examples = 1300 * 1000 * FLAGS.num_epochs\n eval_examples = 50 * 1000\n\n params = {\n \"num_classes\": 1001,\n \"lr\": FLAGS.learning_rate,\n \"min_lr\": 0.005,\n \"momentum\": FLAGS.momentum,\n \"optimizer\": FLAGS.optimizer,\n \"num_eval_examples\": eval_examples,\n \"num_shards\": FLAGS.num_shards,\n \"num_epochs\": FLAGS.num_epochs,\n }\n\n run_config = tpu_config.RunConfig(\n master=tpu_grpc_url,\n model_dir=FLAGS.model_dir,\n save_checkpoints_secs=FLAGS.save_checkpoints_secs,\n session_config=tf.ConfigProto(\n allow_soft_placement=True, log_device_placement=False),\n tpu_config=tpu_config.TPUConfig(\n iterations_per_loop=100,\n num_shards=FLAGS.num_shards,\n ),\n )\n\n estimator = tpu_estimator.TPUEstimator(\n model_fn=squeezenet_model.model_fn,\n use_tpu=FLAGS.use_tpu,\n config=run_config,\n train_batch_size=FLAGS.batch_size,\n eval_batch_size=FLAGS.batch_size,\n params=dict(params, use_tpu=FLAGS.use_tpu),\n )\n\n num_evals = max(FLAGS.num_evals, 1)\n examples_per_eval = training_examples // num_evals\n for _ in range(num_evals):\n estimator.train(\n input_fn=data_pipeline.InputReader(FLAGS.data_dir, is_training=True),\n steps=examples_per_eval // FLAGS.batch_size)\n\n tf.logging.info(\"Running evaluation\")\n tf.logging.info(\"%s\",\n estimator.evaluate(\n input_fn=data_pipeline.InputReader(\n FLAGS.data_dir, is_training=False),\n steps=eval_examples // FLAGS.batch_size,\n ))\n\n\nif __name__ == \"__main__\":\n tf.logging.set_verbosity(tf.logging.INFO)\n tf.app.run(main)\n" ]
[ [ "tensorflow.contrib.cluster_resolver.TPUClusterResolver", "tensorflow.logging.warn", "tensorflow.ConfigProto", "tensorflow.logging.info", "tensorflow.logging.set_verbosity", "tensorflow.contrib.tpu.python.tpu.tpu_config.TPUConfig", "tensorflow.app.run" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
sergpolly/cworld-dekker
[ "7557bbe873e623e9059482722922faca4e784ad0" ]
[ "scripts/python/boundary2tad.py" ]
[ "#!/usr/local/bin/python\r\n\"\"\"\r\n***********************************************\r\n- PROGRAM: boundary2tad.py\r\n- CONTACT: Bryan lajoie ([email protected])\r\n***********************************************\r\n\"\"\"\r\n\r\nfrom __future__ import print_function\r\nfrom __future__ import division\r\n\r\nimport sys\r\nimport argparse\r\nimport subprocess\r\nimport shlex\r\nimport logging\r\nimport itertools\r\nimport time\r\nimport gzip\r\nimport re\r\nimport os\r\nimport math\r\nimport uuid\r\nimport socket\r\nfrom collections import defaultdict\r\nfrom collections import Counter\r\nfrom datetime import datetime\r\nfrom operator import itemgetter\r\n\r\n# deprecated from scipy and unusde in the script:\r\n# from scipy.stats.stats import nanmean\r\n\r\nimport numpy as np\r\nimport scipy as sp\r\n\r\n# For eigenvectors and eigenvalues\r\nfrom scipy import linalg as la\r\nfrom math import cos,log,sin,sqrt \r\n# deprecated from scipy and unusde in the script:\r\n# from scipy import weave \r\n\r\nverboseprint=lambda *a, **k: None\r\n__version__ = \"1.0\"\r\ndebug = None\r\n\r\ndef main():\r\n \r\n parser=argparse.ArgumentParser(description='Extract data from hdf5 file.',formatter_class=argparse.ArgumentDefaultsHelpFormatter)\r\n\r\n parser.add_argument('-bf' , metavar='--boundaryFile', help=\"boundary input file\", required=True, dest=\"boundaryFile\", type=str, default=\"\")\r\n parser.add_argument('-if' , metavar='--insulationFile', help=\"insulation input file\", required=True, dest=\"insulationFile\", type=str, default=\"\")\r\n parser.add_argument('-bn' , metavar='--boundaryNoise', help=\"boundary noise estimated\", default=0.25, dest=\"boundary_noise\", type=float)\r\n parser.add_argument('-v', '--verbose', dest='verbose', action='count', help='Increase verbosity (specify multiple times for more)')\r\n parser.add_argument('--version', action='version', version='%(prog)s '+__version__)\r\n \r\n args=parser.parse_args()\r\n\r\n boundaryFile = args.boundaryFile\r\n insulationFile = args.insulationFile\r\n boundary_noise = args.boundary_noise\r\n verbose=args.verbose\r\n \r\n log_level = logging.WARNING\r\n if verbose == 1:\r\n log_level = logging.INFO\r\n elif verbose >= 2:\r\n log_level = logging.DEBUG\r\n logging.basicConfig(level=log_level)\r\n \r\n global verboseprint\r\n verboseprint = print if verbose else lambda *a, **k: None\r\n \r\n if not os.path.isfile(boundaryFile):\r\n sys.exit('invalid input file! (non-existant)')\r\n if not os.path.isfile(insulationFile):\r\n sys.exit('invalid input file! (non-existant)')\r\n \r\n jobName=os.path.basename(boundaryFile)\r\n jobName=re.sub(\".gz\", \"\", jobName) \r\n jobName=re.sub(\".matrix\", \"\", jobName)\r\n \r\n verboseprint(\"\")\r\n \r\n # load boundary file\r\n nBoundaries,chr2index,index2chr,boundary_index2field,boundary_field2index,boundary_ref = load_boundary_file(boundaryFile)\r\n\r\n # load insulation file\r\n header_rows,header_cols,header2idx,insulation,nan_rowcols = load_insulation_file(insulationFile)\r\n \r\n # nchrs\r\n nchrs=len(chr2index)\r\n \r\n # calculate all chr ranges\r\n chr_range=np.zeros((nchrs,2),dtype='int32')\r\n for chr_idx in xrange(nchrs):\r\n chr=index2chr[chr_idx]\r\n chr_boundaries=np.nonzero(boundary_ref[\"chr\"]==chr_idx)\r\n chr_range[chr_idx]=np.min(chr_boundaries),np.max(chr_boundaries)\r\n \r\n # shift all boundary_field2index keys (due to adding in i=index)\r\n for key in boundary_field2index:\r\n value=boundary_field2index[key]\r\n boundary_field2index[key]=value+1\r\n boundary_field2index[\"index\"]=0\r\n \r\n verboseprint(\"\")\r\n \r\n tads=[]\r\n \r\n # now assemble tads from boundaries\r\n #verboseprint(\"assmebing tads ... \",end=\"\") \r\n for chr_idx in xrange(nchrs):\r\n chr=index2chr[chr_idx]\r\n chr_start,chr_end=chr_range[chr_idx]\r\n \r\n chr_ref=boundary_ref[chr_start:chr_end]\r\n chr_boundaries=chr_ref[[\"boundaryStrength\",\"index\"]]\r\n chr_boundaries=chr_boundaries.tolist()\r\n chr_boundaries=sorted(chr_boundaries,key=lambda chr_boundaries:chr_boundaries[0], reverse=True)\r\n \r\n boundary_ref,chr_boundaries,chr_range,chr_idx,tads,boundary_field2index=assemble_tads(boundary_noise,boundary_ref,chr_boundaries,chr_range,chr_idx,tads,boundary_field2index,header2idx,insulation)\r\n \r\n tad_bedFile=jobName+\"__nested-tads.bed\"\r\n tad_fh=output_wrapper(tad_bedFile,suppress_comments=True)\r\n print(\"track name='\",jobName,\"__nested-tads' description='\",jobName,\"__nested-tads' visibility=squish\",sep=\"\",end=\"\\n\",file=tad_fh)\r\n\r\n for i in xrange(len(tads)):\r\n tad,tad_headers,tad_strength=tads[i]\r\n \r\n chr_1=splitHeader(tad[0])[2]\r\n chr_2=splitHeader(tad[0])[2]\r\n \r\n if chr_1 != chr_2:\r\n sys.exit('error - inter-chr-TAD detected, cis only please!')\r\n \r\n chr=chr_1=chr_2\r\n chr=deGroupChr(chr)\r\n name=\"___\".join(tad)\r\n tad_start=splitHeader(tad[0])[3]\r\n tad_end=splitHeader(tad[1])[4]\r\n \r\n print(chr,tad_start,tad_end,name,tad_strength,sep=\"\\t\",file=tad_fh)\r\n \r\n tad_fh.close()\r\n verboseprint(\"done\")\r\n \r\n verboseprint(\"\")\r\n \r\n # build matrix\r\n rows=len(header_rows)\r\n cols=len(header_cols)\r\n verboseprint(\"building matrix \",\"[\",rows,\"x\",cols,\"]\",sep=\"\")\r\n matrix=np.zeros((rows,cols),dtype=\"float32\")\r\n \r\n matrix=fillTadMatrix(header_rows,header_cols,matrix,tads,header2idx,nan_rowcols)\r\n \r\n tadMatrixFile=jobName+'__nested-tads.matrix.gz'\r\n verboseprint(\"writing tad matrix ...\",end=\"\")\r\n writeMatrix(header_rows,header_cols,matrix,tadMatrixFile)\r\n verboseprint(\"done\")\r\n \r\ndef fillTadMatrix(header_rows,header_cols,matrix,tads,header2idx,nan_rowcols):\r\n \r\n for i,t in enumerate(tads):\r\n tad_start_header,tad_end_header=t[1]\r\n tad_strength=t[2]\r\n \r\n tad_start_idx=header2idx[tad_start_header]\r\n tad_end_idx=header2idx[tad_end_header]\r\n \r\n #print(i,tad_start_header,tad_start_idx,tad_end_header,tad_end_idx,tad_strength,sep=\"\\t\")\r\n \r\n for y in xrange(tad_start_idx,tad_end_idx):\r\n for x in xrange(tad_start_idx,tad_end_idx):\r\n matrix[y,x]+=tad_strength\r\n \r\n # fill all nans\r\n for i in nan_rowcols:\r\n idx=header2idx[i]\r\n matrix[idx,:]=np.nan\r\n matrix[:,idx]=np.nan\r\n \r\n return matrix\r\n \r\ndef deGroupChr(chr_id):\r\n return(chr_id.split('-')[0])\r\n \r\ndef assemble_tads(boundary_noise,boundary_ref,chr_boundaries,chr_range,chr_idx,tads,field2index,header2idx,insulation):\r\n\r\n chr_start,chr_end=chr_range[chr_idx]\r\n \r\n while(len(chr_boundaries) != 0):\r\n tmp_strength,tmp_index=chr_boundaries.pop()\r\n \r\n #print(\"\")\r\n #print(\"STARTING TAD #\",tmp_index,boundary_ref[tmp_index][\"header\"],\" ... \")\r\n \r\n boundary_ref[tmp_index][\"available\"]=False\r\n tads=create_tad(boundary_noise,boundary_ref,tmp_index,chr_range[chr_idx],tads,header2idx,insulation)\r\n \r\n return boundary_ref,chr_boundaries,chr_range,chr_idx,tads,field2index\r\n \r\ndef create_tad(boundary_noise,boundary_ref,anchor_idx,chr_bound,tads,header2idx,insulation):\r\n \r\n left_bound=None\r\n left_bound_header=None\r\n left_bound_boundary_header=None\r\n left_ref=np.nonzero(\r\n (boundary_ref[\"available\"]==True) & \r\n ((boundary_ref[\"index\"]>=chr_bound[0]) & (boundary_ref[\"index\"]<=chr_bound[1])) & \r\n (boundary_ref[\"index\"]<anchor_idx) )[0]\r\n left_size=len(left_ref)\r\n \r\n if left_size > 0:\r\n left_bound=np.max(left_ref)\r\n left_bound_boundary_header=boundary_ref[left_bound][\"header\"]\r\n left_bound_header=boundary_ref[left_bound][\"boundaryHeader\"]\r\n \r\n right_bound=None\r\n right_bound_header=None\r\n right_bound_boundary_header=None\r\n right_ref=np.nonzero( \r\n (boundary_ref[\"available\"]==True) & \r\n ((boundary_ref[\"index\"]>=chr_bound[0]) & (boundary_ref[\"index\"]<=chr_bound[1])) & \r\n (boundary_ref[\"index\"]>anchor_idx) )[0]\r\n right_size=len(right_ref)\r\n \r\n if right_size > 0:\r\n right_bound=np.min(right_ref)\r\n right_bound_boundary_header=boundary_ref[right_bound][\"header\"]\r\n right_bound_header=boundary_ref[right_bound][\"boundaryHeader\"]\r\n \r\n anchor_boundary_header=boundary_ref[anchor_idx][\"header\"]\r\n anchor_header=boundary_ref[anchor_idx][\"boundaryHeader\"]\r\n anchor_strength=boundary_ref[anchor_idx][\"boundaryStrength\"]\r\n \r\n verboseprint(\"\\t\",\"ANCHOR\",anchor_idx,anchor_strength)\r\n verboseprint(\"\\t\",left_bound_boundary_header,\"::\",anchor_boundary_header,\"::\",right_bound_boundary_header)\r\n verboseprint(\"\\t\",left_bound_header,\"::\",anchor_header,\"::\",right_bound_header)\r\n verboseprint(\"\\tleft\",left_ref)\r\n verboseprint(\"\\tright\",right_ref)\r\n \r\n # search potential boundaries to the right\r\n \r\n left_idx=None\r\n if left_bound != None:\r\n for i in left_ref[::-1]:\r\n #print(\"\\t\\tleft searching\",i,\"...\")\r\n tmp_strength=boundary_ref[i][\"boundaryStrength\"]\r\n #print(\"\\t\\t\\t\",\"left\",anchor_idx,left_bound,i,anchor_strength,\"vs\",tmp_strength,\"(\",anchor_strength-tmp_strength,\") [\",boundary_noise,\"]\")\r\n \r\n if(tmp_strength > (anchor_strength-boundary_noise)) or (abs(anchor_strength-tmp_strength) > boundary_noise):\r\n #print(\"\\t\\t\\t\\t\",\"found a potential TAD!\",anchor_strength,tmp_strength)\r\n left_idx=i\r\n break\r\n # else keep looking\r\n \r\n if left_idx != None:\r\n #print(\"\\t\\t\\t\\t\",\"good left idx\",left_idx)\r\n tad_start_header_idx=header2idx[boundary_ref[left_idx][\"boundaryHeader\"]]\r\n tad_end_header_idx=header2idx[boundary_ref[anchor_idx][\"boundaryHeader\"]]\r\n tad_insulation=np.array(insulation[tad_start_header_idx:tad_end_header_idx+1])\r\n na_pc=np.float((np.count_nonzero(np.isnan(tad_insulation)))/tad_insulation.shape[0])\r\n tad_strength=np.nanmax(tad_insulation)-np.nanmin(tad_insulation)\r\n con_nan=num_consecutive_nan(tad_insulation)\r\n \r\n #print(\"\\t\\t\\t\\t\",na_pc,con_nan)\r\n if na_pc < 0.25 and con_nan < 10:\r\n left_tad=[boundary_ref[left_idx][\"header\"],boundary_ref[anchor_idx][\"header\"]]\r\n left_tad_headers=[boundary_ref[left_idx][\"boundaryHeader\"],boundary_ref[anchor_idx][\"boundaryHeader\"]]\r\n tads.append((left_tad,left_tad_headers,tad_strength))\r\n verboseprint(\"\\tleft_tad\",left_tad,tad_strength)\r\n \r\n # search potential boundaries to the right\r\n \r\n right_idx=None \r\n if right_bound != None:\r\n for i in right_ref:\r\n # print(\"\\t\\tright searching\",i,\"...\")\r\n tmp_strength=boundary_ref[i][\"boundaryStrength\"]\r\n #print(\"\\t\\t\\t\",\"right\",anchor_idx,right_bound,i,anchor_strength,\"vs\",tmp_strength,\"(\",anchor_strength-tmp_strength,\") [\",boundary_noise,\"]\")\r\n \r\n if(tmp_strength > (anchor_strength-boundary_noise)) or (abs(anchor_strength-tmp_strength) > boundary_noise):\r\n #print(\"\\t\\t\\t\\t\",\"found a potential TAD!\",anchor_strength,tmp_strength)\r\n right_idx=i\r\n break\r\n \r\n if right_idx != None:\r\n #print(\"\\t\\t\\t\\t\",\"good right idx\",right_idx)\r\n tad_start_header_idx=header2idx[boundary_ref[anchor_idx][\"boundaryHeader\"]]\r\n tad_end_header_idx=header2idx[boundary_ref[right_idx][\"boundaryHeader\"]]\r\n tad_insulation=np.array(insulation[tad_start_header_idx:tad_end_header_idx+1])\r\n na_pc=np.float((np.count_nonzero(np.isnan(tad_insulation)))/tad_insulation.shape[0])\r\n tad_strength=np.nanmax(tad_insulation)-np.nanmin(tad_insulation)\r\n con_nan=num_consecutive_nan(tad_insulation)\r\n \r\n #print(\"\\t\\t\\t\\t\",na_pc,con_nan)\r\n if na_pc < 0.25 and con_nan < 10:\r\n right_tad=[boundary_ref[anchor_idx][\"header\"],boundary_ref[right_idx][\"header\"]]\r\n right_tad_headers=[boundary_ref[anchor_idx][\"boundaryHeader\"],boundary_ref[right_idx][\"boundaryHeader\"]]\r\n tads.append((right_tad,right_tad_headers,tad_strength))\r\n verboseprint(\"\\tright_tad\",right_tad,tad_strength)\r\n \r\n return(tads)\r\n \r\ndef num_consecutive_nan(arr):\r\n max_con_nan = 0\r\n con_nan = [len(list(v)) for i, v in itertools.groupby(np.isnan(arr)) if i]\r\n if(len(con_nan) > 0):\r\n max_con_nan = max(con_nan)\r\n \r\n return max_con_nan\r\n \r\ndef splitHeader(header):\r\n m=re.search(r'(\\S+)\\|(\\S+)\\|(\\S+):(\\d+)-(\\d+)',header)\r\n if m==None:\r\n sys.exit('error: incorrect header format ['+str(header)+']!') \r\n\r\n header_name,header_assembly,header_chr,header_start,header_end=m.groups()\r\n \r\n return(header_name,header_assembly,header_chr,header_start,header_end)\r\n \r\ndef load_boundary_file(boundaryFile):\r\n\r\n init=1\r\n current_chr=0\r\n chr2index=dict()\r\n index2chr=dict()\r\n \r\n index2field=dict()\r\n field2index=dict()\r\n num_boundaries=0\r\n \r\n b_fh=input_wrapper(boundaryFile)\r\n for line in b_fh:\r\n l=line.rstrip(\"\\n\").split(\"\\t\")\r\n \r\n if line.startswith(\"#\"):\r\n continue\r\n \r\n if init == 1:\r\n index2field=dict(enumerate(l))\r\n field2index=dict((value, key) for key, value in index2field.iteritems())\r\n init=0\r\n continue\r\n \r\n header=l[field2index[\"header\"]]\r\n header_name,header_assembly,header_chr,header_start,header_end=splitHeader(header)\r\n \r\n if(header_chr not in chr2index):\r\n chr2index[header_chr]=current_chr\r\n index2chr[current_chr]=header_chr\r\n current_chr+=1\r\n \r\n chr_id=chr2index[header_chr]\r\n if(chr_id < (current_chr-1)):\r\n sys.exit('improperly sorted boundary file!')\r\n \r\n num_boundaries += 1\r\n \r\n b_fh.close()\r\n \r\n boundary_ref=np.empty(num_boundaries, \r\n dtype={'names':['index', 'header', 'boundaryHeader', 'start', 'end', 'boundaryStrength','chr','available'],\r\n 'formats':['int64','a500','a500','int64','int64','float64','int64','bool']})\r\n \r\n init=1\r\n i=0\r\n b_fh=input_wrapper(boundaryFile)\r\n for line in b_fh:\r\n l=line.rstrip(\"\\n\").split(\"\\t\")\r\n \r\n if line.startswith(\"#\"):\r\n continue\r\n \r\n if init == 1:\r\n index2field=dict(enumerate(l))\r\n field2index=dict((value, key) for key, value in index2field.iteritems())\r\n init=0\r\n continue\r\n \r\n header=l[field2index[\"header\"]]\r\n header_name,header_assembly,header_chr,header_start,header_end=splitHeader(header)\r\n \r\n if(header_chr not in chr2index):\r\n chr2index[header_chr]=current_chr\r\n index2chr[current_chr]=header_chr\r\n current_chr+=1\r\n \r\n chr_id=chr2index[header_chr]\r\n if(chr_id < (current_chr-1)):\r\n sys.exit('improperly sorted boundary file!')\r\n \r\n\r\n boundary_ref[i]=(i,l[field2index[\"header\"]],l[field2index[\"boundaryHeader\"]],l[field2index[\"start\"]],l[field2index[\"end\"]],l[field2index[\"boundaryInsulation\"]],chr_id,1)\r\n \r\n i += 1\r\n \r\n b_fh.close()\r\n \r\n # if using insulation score\r\n boundary_ref[\"boundaryStrength\"]=boundary_ref[\"boundaryStrength\"]*-1\r\n boundary_ref[\"boundaryStrength\"]=boundary_ref[\"boundaryStrength\"]+abs(min(boundary_ref[\"boundaryStrength\"]))\r\n \r\n return(num_boundaries,chr2index,index2chr,index2field,field2index,boundary_ref)\r\n\r\n \r\ndef load_insulation_file(insulationFile):\r\n\r\n i_fh=input_wrapper(insulationFile)\r\n \r\n headers=[]\r\n \r\n header2idx=dict()\r\n insulation=[]\r\n \r\n init=1\r\n index2field=dict()\r\n field2index=dict()\r\n nan_rowcols=[]\r\n \r\n i=0\r\n for line in i_fh:\r\n l=line.rstrip(\"\\n\").split(\"\\t\")\r\n \r\n if line.startswith(\"#\"):\r\n continue\r\n \r\n if init == 1:\r\n index2field=dict(enumerate(l))\r\n field2index=dict((value, key) for key, value in index2field.iteritems())\r\n init=0\r\n continue\r\n \r\n header=l[field2index[\"header\"]]\r\n insulation_score=(l[field2index[\"insulationScore\"]])\r\n if insulation_score == 'NA':\r\n insulation_score=np.nan\r\n nan_rowcols.append(header)\r\n insulation_score=float(insulation_score)\r\n \r\n header2idx[header]=i\r\n insulation.append(insulation_score)\r\n headers.append(header)\r\n \r\n i += 1\r\n \r\n \r\n return(headers,headers,header2idx,insulation,nan_rowcols)\r\n \r\ndef input_wrapper(infile):\r\n if infile.endswith('.gz'):\r\n fh=gzip.open(infile,'r')\r\n else:\r\n fh=open(infile,'r')\r\n \r\n return fh\r\n \r\ndef output_wrapper(outfile,append=False,suppress_comments=False):\r\n \r\n if outfile.endswith('.gz'):\r\n if append:\r\n fh=gzip.open(outfile,'a')\r\n else:\r\n fh=gzip.open(outfile,'w') \r\n else:\r\n if append:\r\n fh=open(outfile,'a')\r\n else:\r\n fh=open(outfile,'w')\r\n \r\n # disable comment(s)if (UCSC format file)\r\n if outfile.endswith('.bed'):\r\n suppress_comments = True\r\n if outfile.endswith('.bed.gz'):\r\n suppress_comments = True\r\n if outfile.endswith('.bedGraph'):\r\n suppress_comments = True\r\n if outfile.endswith('.bedGraph.gz'):\r\n suppress_comments = True\r\n if outfile.endswith('.wig'):\r\n suppress_comments = True\r\n if outfile.endswith('.wig.gz'):\r\n suppress_comments = True\r\n if outfile.endswith('.sam'):\r\n suppress_comments = True\r\n if outfile.endswith('.sam.gz'):\r\n suppress_comments = True\r\n if outfile.endswith('.bam'):\r\n suppress_comments = True\r\n if outfile.endswith('.fastq'):\r\n suppress_comments = True\r\n if outfile.endswith('.fastq.gz'):\r\n suppress_comments = True\r\n\r\n if not suppress_comments:\r\n print(\"## \",os.path.basename(__file__),sep=\"\",file=fh)\r\n print(\"## \",sep=\"\",file=fh)\r\n print(\"## Dekker Lab\",sep=\"\",file=fh)\r\n print(\"## Contact: Bryan R. Lajoie\",sep=\"\",file=fh)\r\n print(\"## https://github.com/blajoie\",sep=\"\",file=fh)\r\n print(\"## \",sep=\"\",file=fh)\r\n print(\"## Version:\\t\",__version__,sep=\"\",file=fh)\r\n print(\"## Date:\\t\",get_date(),sep=\"\",file=fh)\r\n print(\"## Host:\\t\",get_compute_resource(),sep=\"\",file=fh)\r\n \r\n return(fh)\r\n\r\ndef get_date():\r\n time=datetime.now()\r\n date=time.strftime('%I:%M:%S %p, %m/%d/%Y')\r\n \r\n return date\r\n\r\ndef get_compute_resource():\r\n return(socket.gethostname())\r\n\r\ndef writeMatrix(header_rows,header_cols,matrix,matrixFile,precision=4):\r\n \"\"\"\r\n write a np matrix with row/col headers - my5C file format - txt formatted gzipped file\r\n \"\"\"\r\n \r\n nrows=len(header_rows)\r\n ncols=len(header_cols)\r\n \r\n # interaction matrix output\r\n out_fh=gzip.open(matrixFile,\"wb\")\r\n \r\n # write matrix col headers\r\n header=[str(i) for i in header_cols]\r\n print(str(nrows)+\"x\"+str(ncols)+\"\\t\"+\"\\t\".join(header),file=out_fh)\r\n\r\n format_func=(\"{:0.\"+str(precision)+\"f}\").format\r\n \r\n k=0\r\n \r\n for i in xrange(nrows):\r\n print(header_rows[i]+\"\\t\"+\"\\t\".join(map(format_func,matrix[i,:])),file=out_fh)\r\n \r\n out_fh.close()\r\n\r\nif __name__==\"__main__\":\r\n main()\r\n" ]
[ [ "numpy.nanmax", "numpy.nonzero", "numpy.min", "numpy.isnan", "numpy.nanmin", "numpy.max", "numpy.array", "numpy.zeros", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Timothy102/EEG-Sleep
[ "2dd7020fc8c3e7e5c195416b9177227aef4dc278" ]
[ "dhedfreader.py" ]
[ "import re, logging\nimport numpy as np\nfrom collections import namedtuple\nfrom functools import reduce\nimport datetime\n\nEVENT_CHANNEL = 'EDF Annotations'\nlog = logging.getLogger(__name__)\n\nclass EDFEndOfData(BaseException): pass\n\n\ndef tal(tal_str):\n '''Return a list with (onset, duration, annotation) tuples for an EDF+ TAL\n stream.\n '''\n exp = '(?P<onset>[+\\-]\\d+(?:\\.\\d*)?)' + \\\n '(?:\\x15(?P<duration>\\d+(?:\\.\\d*)?))?' + \\\n '(\\x14(?P<annotation>[^\\x00]*))?' + \\\n '(?:\\x14\\x00)'\n\n def annotation_to_list(annotation):\n #return str(annotation, 'utf-8').split('\\x14') if annotation else []\n return annotation.split('\\x14') if annotation else []\n\n def parse(dic):\n return (\n float(dic['onset']),\n float(dic['duration']) if dic['duration'] else 0.,\n annotation_to_list(dic['annotation']))\n\n return [parse(m.groupdict()) for m in re.finditer(exp, tal_str)]\n\n\ndef edf_header(f):\n h = {}\n assert f.tell() == 0 # check file position\n assert f.read(8) == '0 '\n\n # recording info)\n h['local_subject_id'] = f.read(80).strip()\n h['local_recording_id'] = f.read(80).strip()\n\n # parse timestamp\n (day, month, year) = [int(x) for x in re.findall('(\\d+)', f.read(8))]\n (hour, minute, sec)= [int(x) for x in re.findall('(\\d+)', f.read(8))]\n h['date_time'] = str(datetime.datetime(year + 2000, month, day,\n hour, minute, sec))\n\n # misc\n header_nbytes = int(f.read(8))\n subtype = f.read(44)[:5]\n h['EDF+'] = subtype in ['EDF+C', 'EDF+D']\n h['contiguous'] = subtype != 'EDF+D'\n h['n_records'] = int(f.read(8))\n h['record_length'] = float(f.read(8)) # in seconds\n nchannels = h['n_channels'] = int(f.read(4))\n\n # read channel info\n channels = list(range(h['n_channels']))\n h['label'] = [f.read(16).strip() for n in channels]\n h['transducer_type'] = [f.read(80).strip() for n in channels]\n h['units'] = [f.read(8).strip() for n in channels]\n h['physical_min'] = np.asarray([float(f.read(8)) for n in channels])\n h['physical_max'] = np.asarray([float(f.read(8)) for n in channels])\n h['digital_min'] = np.asarray([float(f.read(8)) for n in channels])\n h['digital_max'] = np.asarray([float(f.read(8)) for n in channels])\n h['prefiltering'] = [f.read(80).strip() for n in channels]\n h['n_samples_per_record'] = [int(f.read(8)) for n in channels]\n f.read(32 * nchannels) # reserved\n\n assert f.tell() == header_nbytes\n return h\n\n\nclass BaseEDFReader:\n def __init__(self, file):\n self.file = file\n\n\n def read_header(self):\n self.header = h = edf_header(self.file)\n\n # calculate ranges for rescaling\n self.dig_min = h['digital_min']\n self.phys_min = h['physical_min']\n phys_range = h['physical_max'] - h['physical_min']\n dig_range = h['digital_max'] - h['digital_min']\n assert np.all(phys_range > 0)\n assert np.all(dig_range > 0)\n self.gain = phys_range / dig_range\n\n\n def read_raw_record(self):\n '''Read a record with data and return a list containing arrays with raw\n bytes.\n '''\n result = []\n for nsamp in self.header['n_samples_per_record']:\n samples = self.file.read(nsamp * 2)\n if len(samples) != nsamp * 2:\n raise EDFEndOfData\n result.append(samples)\n return result\n\n\n def convert_record(self, raw_record):\n '''Convert a raw record to a (time, signals, events) tuple based on\n information in the header.\n '''\n h = self.header\n dig_min, phys_min, gain = self.dig_min, self.phys_min, self.gain\n time = float('nan')\n signals = []\n events = []\n for (i, samples) in enumerate(raw_record):\n if h['label'][i] == EVENT_CHANNEL:\n ann = tal(samples)\n time = ann[0][0]\n events.extend(ann[1:])\n # print(i, samples)\n # exit()\n else:\n # 2-byte little-endian integers\n dig = np.fromstring(samples, '<i2').astype(np.float32)\n phys = (dig - dig_min[i]) * gain[i] + phys_min[i]\n signals.append(phys)\n\n return time, signals, events\n\n\n def read_record(self):\n return self.convert_record(self.read_raw_record())\n\n\n def records(self):\n '''\n Record generator.\n '''\n try:\n while True:\n yield self.read_record()\n except EDFEndOfData:\n pass\n\n\ndef load_edf(edffile):\n '''Load an EDF+ file.\n Very basic reader for EDF and EDF+ files. While BaseEDFReader does support\n exotic features like non-homogeneous sample rates and loading only parts of\n the stream, load_edf expects a single fixed sample rate for all channels and\n tries to load the whole file.\n Parameters\n ----------\n edffile : file-like object or string\n Returns\n -------\n Named tuple with the fields:\n X : NumPy array with shape p by n.\n Raw recording of n samples in p dimensions.\n sample_rate : float\n The sample rate of the recording. Note that mixed sample-rates are not\n supported.\n sens_lab : list of length p with strings\n The labels of the sensors used to record X.\n time : NumPy array with length n\n The time offset in the recording for each sample.\n annotations : a list with tuples\n EDF+ annotations are stored in (start, duration, description) tuples.\n start : float\n Indicates the start of the event in seconds.\n duration : float\n Indicates the duration of the event in seconds.\n description : list with strings\n Contains (multiple?) descriptions of the annotation event.\n '''\n if isinstance(edffile, str):\n with open(edffile, 'rb') as f:\n return load_edf(f) # convert filename to file\n\n reader = BaseEDFReader(edffile)\n reader.read_header()\n\n h = reader.header\n log.debug('EDF header: %s' % h)\n\n # get sample rate info\n nsamp = np.unique(\n [n for (l, n) in zip(h['label'], h['n_samples_per_record'])\n if l != EVENT_CHANNEL])\n assert nsamp.size == 1, 'Multiple sample rates not supported!'\n sample_rate = float(nsamp[0]) / h['record_length']\n\n rectime, X, annotations = list(zip(*reader.records()))\n X = np.hstack(X)\n annotations = reduce(operator.add, annotations)\n chan_lab = [lab for lab in reader.header['label'] if lab != EVENT_CHANNEL]\n\n # create timestamps\n if reader.header['contiguous']:\n time = np.arange(X.shape[1]) / sample_rate\n else:\n reclen = reader.header['record_length']\n within_rec_time = np.linspace(0, reclen, nsamp, endpoint=False)\n time = np.hstack([t + within_rec_time for t in rectime])\n\n tup = namedtuple('EDF', 'X sample_rate chan_lab time annotations')\n return tup(X, sample_rate, chan_lab, time, annotations)" ]
[ [ "numpy.hstack", "numpy.linspace", "numpy.arange", "numpy.all", "numpy.fromstring" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
bnord01/alpha-zero-general-sogo
[ "76e08d2fc1f0d331018ee1b07bdd33128fbd6d83" ]
[ "othello/tensorflow/NNet.py" ]
[ "import os\nimport shutil\nimport time\nimport random\nimport numpy as np\nimport math\nimport sys\nsys.path.append('../../')\nfrom utils import *\nfrom pytorch_classification.utils import Bar, AverageMeter\nfrom NeuralNet import NeuralNet\n\nimport tensorflow as tf\nfrom .OthelloNNet import OthelloNNet as onnet\n\nargs = dotdict({\n 'lr': 0.001,\n 'dropout': 0.3,\n 'epochs': 10,\n 'batch_size': 64,\n 'num_channels': 512,\n})\n\nclass NNetWrapper(NeuralNet):\n def __init__(self, game):\n self.nnet = onnet(game, args)\n self.board_x, self.board_y = game.board_size()\n self.action_size = game.action_size()\n\n self.sess = tf.Session(graph=self.nnet.graph)\n self.saver = None\n with tf.Session() as temp_sess:\n temp_sess.run(tf.global_variables_initializer())\n self.sess.run(tf.variables_initializer(self.nnet.graph.get_collection('variables')))\n\n def train(self, examples):\n \"\"\"\n examples: list of examples, each example is of form (board, pi, v)\n \"\"\"\n\n for epoch in range(args.epochs):\n print('EPOCH ::: ' + str(epoch+1))\n data_time = AverageMeter()\n batch_time = AverageMeter()\n pi_losses = AverageMeter()\n v_losses = AverageMeter()\n end = time.time()\n\n bar = Bar('Training Net', max=int(len(examples)/args.batch_size))\n batch_idx = 0\n\n # self.sess.run(tf.local_variables_initializer())\n while batch_idx < int(len(examples)/args.batch_size):\n sample_ids = np.random.randint(len(examples), size=args.batch_size)\n boards, pis, vs = list(zip(*[examples[i] for i in sample_ids]))\n\n # predict and compute gradient and do SGD step\n input_dict = {self.nnet.input_boards: boards, self.nnet.target_pis: pis, self.nnet.target_vs: vs, self.nnet.dropout: args.dropout, self.nnet.isTraining: True}\n\n # measure data loading time\n data_time.update(time.time() - end)\n\n # record loss\n self.sess.run(self.nnet.train_step, feed_dict=input_dict)\n pi_loss, v_loss = self.sess.run([self.nnet.loss_pi, self.nnet.loss_v], feed_dict=input_dict)\n pi_losses.update(pi_loss, len(boards))\n v_losses.update(v_loss, len(boards))\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n batch_idx += 1\n\n # plot progress\n bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss_pi: {lpi:.4f} | Loss_v: {lv:.3f}'.format(\n batch=batch_idx,\n size=int(len(examples)/args.batch_size),\n data=data_time.avg,\n bt=batch_time.avg,\n total=bar.elapsed_td,\n eta=bar.eta_td,\n lpi=pi_losses.avg,\n lv=v_losses.avg,\n )\n bar.next()\n bar.finish()\n\n\n def predict(self, board):\n \"\"\"\n board: np array with board\n \"\"\"\n # timing\n start = time.time()\n\n # preparing input\n board = board[np.newaxis, :, :]\n\n # run\n prob, v = self.sess.run([self.nnet.prob, self.nnet.v], feed_dict={self.nnet.input_boards: board, self.nnet.dropout: 0, self.nnet.isTraining: False})\n\n #print('PREDICTION TIME TAKEN : {0:03f}'.format(time.time()-start))\n return prob[0], v[0]\n\n def save_checkpoint(self, folder='checkpoint', filename='checkpoint.pth.tar'):\n filepath = os.path.join(folder, filename)\n if not os.path.exists(folder):\n print(\"Checkpoint Directory does not exist! Making directory {}\".format(folder))\n os.mkdir(folder)\n else:\n print(\"Checkpoint Directory exists! \")\n if self.saver == None: \n self.saver = tf.train.Saver(self.nnet.graph.get_collection('variables'))\n with self.nnet.graph.as_default():\n self.saver.save(self.sess, filepath)\n\n def load_checkpoint(self, folder='checkpoint', filename='checkpoint.pth.tar'):\n filepath = os.path.join(folder, filename)\n if not os.path.exists(filepath+'.meta'):\n raise(\"No model in path {}\".format(filepath))\n with self.nnet.graph.as_default():\n self.saver = tf.train.Saver()\n self.saver.restore(self.sess, filepath)" ]
[ [ "tensorflow.train.Saver", "tensorflow.global_variables_initializer", "tensorflow.Session" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
carrier-io/perfreporter
[ "f5b472137af5471bce0252af7de6995cc9d87532" ]
[ "perfreporter/jtl_parser.py" ]
[ "import csv\nimport re\nfrom os import path\nimport numpy as np\n\n\nFIELDNAMES = 'timeStamp', 'response_time', 'request_name', \"status_code\", \"responseMessage\", \"threadName\", \"dataType\",\\\n \"success\", \"failureMessage\", \"bytes\", \"sentBytes\", \"grpThreads\", \"allThreads\", \"URL\", \"Latency\",\\\n \"IdleTime\", \"Connect\"\n\n\nclass JTLParser(object):\n\n def parse_jtl(self, log_file=\"/tmp/reports/jmeter.jtl\"):\n unparsed_counter = 0\n requests = {}\n if not path.exists(log_file):\n return requests\n start_timestamp, end_timestamp = float('inf'), 0\n with open(log_file, 'r+', encoding=\"utf-8\") as tsv:\n entries = csv.DictReader(tsv, delimiter=\",\", fieldnames=FIELDNAMES, restval=\"not_found\")\n\n for entry in entries:\n\n try:\n if entry['request_name'] != 'label':\n if re.search(r'-\\d+$', entry['request_name']):\n continue\n if start_timestamp > int(entry['timeStamp']):\n start_timestamp = int(entry['timeStamp']) - int(entry['response_time'])\n if end_timestamp < int(entry['timeStamp']):\n end_timestamp = int(entry['timeStamp'])\n if entry['request_name'] not in requests:\n data = {'request_name': entry['request_name'],\n 'response_time': [int(entry['response_time'])]}\n if entry['success'] == 'true':\n data['OK'], data['KO'] = 1, 0\n else:\n data['OK'], data['KO'] = 0, 1\n requests[entry['request_name']] = data\n else:\n requests[entry['request_name']]['response_time'].append(int(entry['response_time']))\n if entry['success'] == 'true':\n requests[entry['request_name']]['OK'] += 1\n else:\n requests[entry['request_name']]['KO'] += 1\n except Exception as e:\n print(e)\n unparsed_counter += 1\n pass\n\n if unparsed_counter > 0:\n print(\"Unparsed errors: %d\" % unparsed_counter)\n for req in requests:\n requests[req]['response_time'] = int(np.percentile(requests[req]['response_time'], 95, interpolation=\"linear\"))\n duration = int((end_timestamp - start_timestamp)/1000)\n throughput = self.calculate_throughput(requests, duration)\n error_rate = self.calculate_error_rate(requests)\n\n results = {\"requests\": requests, \"throughput\": throughput, \"error_rate\": error_rate}\n\n return results\n\n @staticmethod\n def calculate_throughput(requests, duration):\n count = 0\n for req in requests:\n count += requests[req]['OK']\n return round(float(count/duration), 2)\n\n @staticmethod\n def calculate_error_rate(requests):\n count, failed = 0, 0\n for req in requests:\n count += requests[req]['OK']\n count += requests[req]['KO']\n failed += requests[req]['KO']\n return round(float(failed/count) * 100, 2)\n" ]
[ [ "numpy.percentile" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
makaveli10/4P
[ "fea29d9b5a1bd3f7ca3fef1e7b1a2218ba14e8db", "fea29d9b5a1bd3f7ca3fef1e7b1a2218ba14e8db" ]
[ "tflib/data_helpers.py", "tflib/train_tff.py" ]
[ "import os\nimport pathlib\n\nimport tensorflow as tf\nimport tensorflow_federated as tff\n\n\nAUTOTUNE = tf.data.experimental.AUTOTUNE\n\ndef make_client_ids(clients_dir: pathlib.Path):\n return [p.name for p in clients_dir.iterdir() if p.is_dir()]\n\n\ndef provide_client_data_fn(\n clients_dir: pathlib.Path,\n img_height: int,\n img_width: int,\n batch_size: int,\n augment_fn: callable = None):\n process_path = _provide_process_fn(\n img_width=img_width, img_height=img_height)\n\n def create_tf_dataset_for_client(client_id):\n this_client = clients_dir.joinpath(client_id)\n image_glob = this_client / '*/*'\n ds = tf.data.Dataset.list_files(str(image_glob))\n ds = ds.map(process_path, num_parallel_calls=AUTOTUNE)\n if augment_fn is not None:\n ds = ds.map(augment_fn, num_parallel_calls=AUTOTUNE)\n return ds.batch(batch_size)\n\n return create_tf_dataset_for_client\n\n\ndef _get_label(file_path):\n # convert the path to a list of path components\n parts = tf.strings.split(file_path, os.path.sep)\n # The second to last is the class-directory\n return tf.cast(int(parts[-2]), tf.int64)\n\n\ndef _decode_img(img, img_width, img_height):\n # convert the compressed string to a 3D uint8 tensor\n img = tf.image.decode_jpeg(img, channels=3)\n # Use `convert_image_dtype` to convert to floats in the [0,1] range.\n img = tf.image.convert_image_dtype(img, tf.float32)\n # resize the image to the desired size.\n return tf.image.resize(img, [img_width, img_height])\n\n\ndef _provide_process_fn(**decode_params):\n\n def process_path(file_path):\n label = _get_label(file_path)\n # load the raw data from the file as a string\n img = tf.io.read_file(file_path)\n img = _decode_img(img, **decode_params)\n return img, label\n\n return process_path\n\n\n# data = ClientData.from_clients_and_fn(client_ids, create_tf_dataset_for_client)", "import pathlib\nimport random\n\nfrom absl import app\nfrom absl import flags\nimport tensorflow as tf\nimport tensorflow_federated as tff\n\nfrom data_helpers import make_client_ids\nfrom data_helpers import provide_client_data_fn\nfrom model_helpers import build_vgg16\n\n\n# Hyperparams\nflags.DEFINE_integer(\"num_rounds\", default=10,\n help=\"Number of rounds of federated averaging.\")\nflags.DEFINE_integer(\"clients_per_round\", default=10,\n help=\"Number of clients to sample for training per round.\")\nflags.DEFINE_float(\"client_learning_rate\", default=.02,\n help=\"Learning rate for client optimizers.\")\nflags.DEFINE_float(\"server_learning_rate\", default=1.0,\n help=\"Learning rate for client optimizers.\")\nflags.DEFINE_bool(\"freeze_model\", default=True,\n help=\"Freeze early layers in the model (if its builder fn allows)\")\nflags.DEFINE_integer(\"image_width\", default=224,\n help=\"Width dimension of input radiology images.\")\nflags.DEFINE_integer(\"image_height\", default=224,\n help=\"Height dimension of input radiology images.\")\nflags.DEFINE_integer(\"batch_size\", default=4,\n help=\"Local batch size for each client.\")\nflags.DEFINE_enum(\"model\", default=\"vgg16\", enum_values=[\"vgg16\"],\n help=\"Which model to use. Must have a builder in model_helpers.\")\n\n# Data flags\nflags.DEFINE_string(\"data_root\", default=\"./data\",\n help=\"Path to the root folder containing chest xray data\")\nflags.DEFINE_string(\"train_clients_subdir\", default=\"train_clients\",\n help=\"Subdirectory of `data_root` containing data allocated to the \"\n \"training subset of clients.\")\nflags.DEFINE_string(\"test_clients_subdir\", default=\"test_clients\",\n help=\"Subdirectory of `data-root` containing data allocated to the \"\n \"evaluation subset of clients.\")\n\nFLAGS = flags.FLAGS\n\n\ndef main(argv):\n dataroot = pathlib.Path(FLAGS.data_root)\n train_path = dataroot.joinpath(FLAGS.train_clients_subdir)\n test_path = dataroot.joinpath(FLAGS.test_clients_subdir)\n train_client_ids = make_client_ids(train_path)\n test_client_ids = make_client_ids(test_path)\n\n img_dims = (FLAGS.image_width, FLAGS.image_height)\n train_client_fn = provide_client_data_fn(train_path, *img_dims, FLAGS.batch_size)\n test_client_fn = provide_client_data_fn(test_path, *img_dims, FLAGS.batch_size)\n\n train_clients = tff.simulation.ClientData.from_clients_and_fn(\n train_client_ids, train_client_fn)\n test_clients = tff.simulation.ClientData.from_clients_and_fn(\n test_client_ids, test_client_fn)\n\n federated_train_data = [\n train_clients.create_tf_dataset_for_client(client_id)\n for client_id in train_client_ids\n ]\n federated_test_data = [\n test_clients.create_tf_dataset_for_client(client_id)\n for client_id in test_client_ids\n ]\n\n client_opt_fn = lambda: tf.keras.optimizers.SGD(FLAGS.client_learning_rate)\n server_opt_fn = lambda: tf.keras.optimizers.SGD(FLAGS.server_learning_rate)\n\n iterative_process = tff.learning.build_federated_averaging_process(\n model_fn, client_opt_fn, server_opt_fn)\n\n state = iterative_process.initialize()\n for rnd in range(FLAGS.num_rounds):\n round_clients = random.sample(federated_train_data, FLAGS.clients_per_round)\n state, metrics = iterative_process.next(state, round_clients)\n print('round {rnd}, metrics={metrics}'.format(rnd=rnd, metrics=metrics))\n\n\ndef model_fn():\n x_spec = (tf.float32, [None, 224, 224, 3])\n y_spec = (tf.int64, [None])\n input_spec = (x_spec, y_spec)\n loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\n model = build_vgg16(freeze=FLAGS.freeze_model)\n return tff.learning.from_keras_model(\n model, loss_fn, input_spec=input_spec,\n metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])\n\n\nif __name__ == \"__main__\":\n app.run(main)\n" ]
[ [ "tensorflow.strings.split", "tensorflow.image.resize", "tensorflow.image.convert_image_dtype", "tensorflow.io.read_file", "tensorflow.image.decode_jpeg" ], [ "tensorflow.keras.losses.SparseCategoricalCrossentropy", "tensorflow.keras.metrics.SparseCategoricalAccuracy", "tensorflow.keras.optimizers.SGD" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.7", "2.2", "2.3", "2.4", "2.5", "2.6" ] } ]
jbuckman/pytorch-lightning
[ "cc74fb717a7127fecd4dbb9c743ba28b40de7f64", "cc74fb717a7127fecd4dbb9c743ba28b40de7f64" ]
[ "tests/helpers/pipelines.py", "tests/models/test_tpu.py" ]
[ "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport torch\n\nfrom pytorch_lightning import LightningDataModule, LightningModule, Trainer\nfrom pytorch_lightning.metrics.functional import accuracy\nfrom pytorch_lightning.utilities import DistributedType\nfrom tests.helpers import BoringModel\nfrom tests.helpers.utils import get_default_logger, load_model_from_checkpoint, reset_seed\n\n\ndef run_model_test_without_loggers(\n trainer_options: dict, model: LightningModule, data: LightningDataModule = None, min_acc: float = 0.50\n):\n reset_seed()\n\n # fit model\n trainer = Trainer(**trainer_options)\n trainer.fit(model, datamodule=data)\n\n # correct result and ok accuracy\n assert trainer.state.finished, f\"Training failed with {trainer.state}\"\n\n model2 = load_model_from_checkpoint(trainer.logger, trainer.checkpoint_callback.best_model_path, type(model))\n\n # test new model accuracy\n test_loaders = model2.test_dataloader() if not data else data.test_dataloader()\n if not isinstance(test_loaders, list):\n test_loaders = [test_loaders]\n\n if not isinstance(model2, BoringModel):\n for dataloader in test_loaders:\n run_prediction_eval_model_template(model2, dataloader, min_acc=min_acc)\n\n\ndef run_model_test(\n trainer_options,\n model: LightningModule,\n data: LightningDataModule = None,\n on_gpu: bool = True,\n version=None,\n with_hpc: bool = True,\n min_acc: float = 0.25\n):\n reset_seed()\n save_dir = trainer_options['default_root_dir']\n\n # logger file to get meta\n logger = get_default_logger(save_dir, version=version)\n trainer_options.update(logger=logger)\n trainer = Trainer(**trainer_options)\n initial_values = torch.tensor([torch.sum(torch.abs(x)) for x in model.parameters()])\n trainer.fit(model, datamodule=data)\n post_train_values = torch.tensor([torch.sum(torch.abs(x)) for x in model.parameters()])\n\n assert trainer.state.finished, f\"Training failed with {trainer.state}\"\n # Check that the model is actually changed post-training\n change_ratio = torch.norm(initial_values - post_train_values)\n assert change_ratio > 0.1, f\"the model is changed of {change_ratio}\"\n\n # test model loading\n pretrained_model = load_model_from_checkpoint(logger, trainer.checkpoint_callback.best_model_path, type(model))\n\n # test new model accuracy\n test_loaders = model.test_dataloader() if not data else data.test_dataloader()\n if not isinstance(test_loaders, list):\n test_loaders = [test_loaders]\n\n if not isinstance(model, BoringModel):\n for dataloader in test_loaders:\n run_prediction_eval_model_template(model, dataloader, min_acc=min_acc)\n\n if with_hpc:\n if trainer._distrib_type in (DistributedType.DDP, DistributedType.DDP_SPAWN, DistributedType.DDP2):\n # on hpc this would work fine... but need to hack it for the purpose of the test\n trainer.optimizers, trainer.lr_schedulers, trainer.optimizer_frequencies = \\\n trainer.init_optimizers(pretrained_model)\n\n # test HPC saving\n trainer.checkpoint_connector.hpc_save(save_dir, logger)\n # test HPC loading\n checkpoint_path = trainer.checkpoint_connector.get_max_ckpt_path_from_folder(save_dir)\n trainer.checkpoint_connector.hpc_load(checkpoint_path, on_gpu=on_gpu)\n\n\[email protected]_grad()\ndef run_prediction_eval_model_template(trained_model, dataloader, min_acc=0.50):\n # run prediction on 1 batch\n trained_model.cpu()\n trained_model.eval()\n\n batch = next(iter(dataloader))\n x, y = batch\n x = x.flatten(1)\n\n y_hat = trained_model(x)\n acc = accuracy(y_hat.cpu(), y.cpu(), top_k=2).item()\n\n assert acc >= min_acc, f\"This model is expected to get > {min_acc} in test set (it got {acc})\"\n", "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport os\nfrom argparse import ArgumentParser\nfrom unittest import mock\n\nimport pytest\nimport torch\nfrom torch.utils.data import DataLoader\n\nimport tests.helpers.pipelines as tpipes\nimport tests.helpers.utils as tutils\nfrom pytorch_lightning import Trainer\nfrom pytorch_lightning.accelerators import TPUAccelerator\nfrom pytorch_lightning.callbacks import EarlyStopping\nfrom pytorch_lightning.core.step_result import Result\nfrom pytorch_lightning.plugins import TPUSpawnPlugin\nfrom pytorch_lightning.utilities import _TPU_AVAILABLE\nfrom pytorch_lightning.utilities.distributed import ReduceOp\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\nfrom tests.helpers import BoringModel, RandomDataset\nfrom tests.helpers.runif import RunIf\nfrom tests.helpers.utils import pl_multi_process_test\n\nif _TPU_AVAILABLE:\n import torch_xla\n import torch_xla.distributed.xla_multiprocessing as xmp\n SERIAL_EXEC = xmp.MpSerialExecutor()\n\n_LARGER_DATASET = RandomDataset(32, 2000)\n\n\n# 8 cores needs a big dataset\ndef _serial_train_loader():\n return DataLoader(_LARGER_DATASET, batch_size=32)\n\n\nclass SerialLoaderBoringModel(BoringModel):\n\n def train_dataloader(self):\n return DataLoader(RandomDataset(32, 2000), batch_size=32)\n\n def val_dataloader(self):\n return DataLoader(RandomDataset(32, 2000), batch_size=32)\n\n\n@RunIf(tpu=True)\n@pl_multi_process_test\ndef test_model_tpu_cores_1(tmpdir):\n \"\"\"Make sure model trains on TPU.\"\"\"\n tutils.reset_seed()\n trainer_options = dict(\n default_root_dir=tmpdir,\n progress_bar_refresh_rate=0,\n max_epochs=2,\n tpu_cores=1,\n limit_train_batches=4,\n limit_val_batches=4,\n )\n\n model = BoringModel()\n tpipes.run_model_test(trainer_options, model, on_gpu=False, with_hpc=False)\n\n\[email protected]('tpu_core', [1, 5])\n@RunIf(tpu=True)\n@pl_multi_process_test\ndef test_model_tpu_index(tmpdir, tpu_core):\n \"\"\"Make sure model trains on TPU.\"\"\"\n tutils.reset_seed()\n trainer_options = dict(\n default_root_dir=tmpdir,\n progress_bar_refresh_rate=0,\n max_epochs=2,\n tpu_cores=[tpu_core],\n limit_train_batches=4,\n limit_val_batches=4,\n )\n\n model = BoringModel()\n tpipes.run_model_test(trainer_options, model, on_gpu=False, with_hpc=False)\n assert torch_xla._XLAC._xla_get_default_device() == f'xla:{tpu_core}'\n\n\n@RunIf(tpu=True)\n@pl_multi_process_test\ndef test_model_tpu_cores_8(tmpdir):\n \"\"\"Make sure model trains on TPU.\"\"\"\n tutils.reset_seed()\n trainer_options = dict(\n default_root_dir=tmpdir,\n progress_bar_refresh_rate=0,\n max_epochs=1,\n tpu_cores=8,\n limit_train_batches=4,\n limit_val_batches=4,\n )\n\n # 8 cores needs a big dataset\n model = SerialLoaderBoringModel()\n tpipes.run_model_test(trainer_options, model, on_gpu=False, with_hpc=False, min_acc=0.05)\n\n\n@RunIf(tpu=True)\n@pl_multi_process_test\ndef test_model_16bit_tpu_cores_1(tmpdir):\n \"\"\"Make sure model trains on TPU.\"\"\"\n tutils.reset_seed()\n trainer_options = dict(\n default_root_dir=tmpdir,\n precision=16,\n progress_bar_refresh_rate=0,\n max_epochs=2,\n tpu_cores=1,\n limit_train_batches=8,\n limit_val_batches=2,\n )\n\n model = BoringModel()\n tpipes.run_model_test(trainer_options, model, on_gpu=False)\n assert os.environ.get('XLA_USE_BF16') == str(1), \"XLA_USE_BF16 was not set in environment variables\"\n\n\[email protected]('tpu_core', [1, 5])\n@RunIf(tpu=True)\n@pl_multi_process_test\ndef test_model_16bit_tpu_index(tmpdir, tpu_core):\n \"\"\"Make sure model trains on TPU.\"\"\"\n tutils.reset_seed()\n trainer_options = dict(\n default_root_dir=tmpdir,\n precision=16,\n progress_bar_refresh_rate=0,\n max_epochs=2,\n tpu_cores=[tpu_core],\n limit_train_batches=4,\n limit_val_batches=2,\n )\n\n model = BoringModel()\n tpipes.run_model_test(trainer_options, model, on_gpu=False)\n assert torch_xla._XLAC._xla_get_default_device() == f'xla:{tpu_core}'\n assert os.environ.get('XLA_USE_BF16') == str(1), \"XLA_USE_BF16 was not set in environment variables\"\n\n\n@RunIf(tpu=True)\n@pl_multi_process_test\ndef test_model_16bit_tpu_cores_8(tmpdir):\n \"\"\"Make sure model trains on TPU.\"\"\"\n tutils.reset_seed()\n trainer_options = dict(\n default_root_dir=tmpdir,\n precision=16,\n progress_bar_refresh_rate=0,\n max_epochs=1,\n tpu_cores=8,\n limit_train_batches=4,\n limit_val_batches=4,\n )\n\n # 8 cores needs a big dataset\n model = SerialLoaderBoringModel()\n tpipes.run_model_test(trainer_options, model, on_gpu=False, with_hpc=False, min_acc=0.05)\n\n\n@RunIf(tpu=True)\n@pl_multi_process_test\ndef test_model_tpu_early_stop(tmpdir):\n \"\"\"Test if single TPU core training works\"\"\"\n\n class CustomBoringModel(BoringModel):\n\n def validation_step(self, *args, **kwargs):\n out = super().validation_step(*args, **kwargs)\n self.log('val_loss', out['x'])\n return out\n\n tutils.reset_seed()\n model = CustomBoringModel()\n trainer = Trainer(\n callbacks=[EarlyStopping(monitor='val_loss')],\n default_root_dir=tmpdir,\n progress_bar_refresh_rate=0,\n max_epochs=2,\n limit_train_batches=2,\n limit_val_batches=2,\n tpu_cores=8,\n )\n trainer.fit(model)\n trainer.test(test_dataloaders=DataLoader(RandomDataset(32, 2000), batch_size=32))\n\n\n@RunIf(tpu=True)\n@pl_multi_process_test\ndef test_tpu_grad_norm(tmpdir):\n \"\"\"Test if grad_norm works on TPU.\"\"\"\n tutils.reset_seed()\n trainer_options = dict(\n default_root_dir=tmpdir,\n progress_bar_refresh_rate=0,\n max_epochs=4,\n tpu_cores=1,\n limit_train_batches=0.4,\n limit_val_batches=0.4,\n gradient_clip_val=0.5,\n )\n\n model = BoringModel()\n tpipes.run_model_test(trainer_options, model, on_gpu=False, with_hpc=False)\n\n\n@RunIf(tpu=True)\n@pl_multi_process_test\ndef test_tpu_clip_grad_by_value(tmpdir):\n \"\"\"Test if clip_gradients by value works on TPU\"\"\"\n tutils.reset_seed()\n trainer_options = dict(\n default_root_dir=tmpdir,\n progress_bar_refresh_rate=0,\n max_epochs=4,\n tpu_cores=1,\n limit_train_batches=10,\n limit_val_batches=10,\n gradient_clip_val=0.5,\n gradient_clip_algorithm='value'\n )\n\n model = BoringModel()\n tpipes.run_model_test(trainer_options, model, on_gpu=False, with_hpc=False)\n\n\n@RunIf(tpu=True)\n@pl_multi_process_test\ndef test_dataloaders_passed_to_fit(tmpdir):\n \"\"\"Test if dataloaders passed to trainer works on TPU\"\"\"\n tutils.reset_seed()\n model = BoringModel()\n\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_epochs=1,\n tpu_cores=8,\n )\n trainer.fit(\n model,\n train_dataloader=model.train_dataloader(),\n val_dataloaders=model.val_dataloader(),\n )\n assert trainer.state.finished, f\"Training failed with {trainer.state}\"\n\n\[email protected](\n ['tpu_cores', 'expected_tpu_id'],\n [pytest.param(1, None), pytest.param(8, None),\n pytest.param([1], 1), pytest.param([8], 8)],\n)\n@RunIf(tpu=True)\ndef test_tpu_id_to_be_as_expected(tpu_cores, expected_tpu_id):\n \"\"\"Test if trainer.tpu_id is set as expected\"\"\"\n assert Trainer(tpu_cores=tpu_cores).accelerator_connector.tpu_id == expected_tpu_id\n\n\ndef test_tpu_misconfiguration():\n \"\"\"Test if trainer.tpu_id is set as expected\"\"\"\n with pytest.raises(MisconfigurationException, match=\"`tpu_cores` can only be\"):\n Trainer(tpu_cores=[1, 8])\n\n\[email protected](_TPU_AVAILABLE, reason=\"test requires missing TPU\")\ndef test_exception_when_no_tpu_found(tmpdir):\n \"\"\"Test if exception is thrown when xla devices are not available\"\"\"\n\n with pytest.raises(MisconfigurationException, match='No TPU devices were found.'):\n Trainer(tpu_cores=8)\n\n\[email protected]('tpu_cores', [1, 8, [1]])\n@RunIf(tpu=True)\ndef test_distributed_backend_set_when_using_tpu(tmpdir, tpu_cores):\n \"\"\"Test if distributed_backend is set to `tpu` when tpu_cores is not None\"\"\"\n assert Trainer(tpu_cores=tpu_cores).distributed_backend == \"tpu\"\n\n\n@RunIf(tpu=True)\n@pl_multi_process_test\ndef test_broadcast_on_tpu():\n \"\"\" Checks if an object from the master process is broadcasted to other processes correctly\"\"\"\n\n def test_broadcast(rank):\n trainer = Trainer(tpu_cores=8)\n assert isinstance(trainer.accelerator, TPUAccelerator)\n assert isinstance(trainer.training_type_plugin, TPUSpawnPlugin)\n obj = (\"ver_0.5\", \"logger_name\", rank)\n result = trainer.training_type_plugin.broadcast(obj)\n assert result == (\"ver_0.5\", \"logger_name\", 0)\n\n xmp.spawn(test_broadcast, nprocs=8, start_method='fork')\n\n\[email protected](\n [\"tpu_cores\", \"expected_tpu_id\", \"error_expected\"],\n [\n pytest.param(1, None, False),\n pytest.param(8, None, False),\n pytest.param([1], 1, False),\n pytest.param([8], 8, False),\n pytest.param(\"1,\", 1, False),\n pytest.param(\"1\", None, False),\n pytest.param(\"9, \", 9, True),\n pytest.param([9], 9, True),\n pytest.param([0], 0, True),\n pytest.param(2, None, True),\n pytest.param(10, None, True),\n ],\n)\n@RunIf(tpu=True)\n@pl_multi_process_test\ndef test_tpu_choice(tmpdir, tpu_cores, expected_tpu_id, error_expected):\n if error_expected:\n with pytest.raises(MisconfigurationException, match=r\".*tpu_cores` can only be 1, 8 or [<1-8>]*\"):\n Trainer(default_root_dir=tmpdir, tpu_cores=tpu_cores)\n else:\n trainer = Trainer(default_root_dir=tmpdir, tpu_cores=tpu_cores)\n assert trainer.accelerator_connector.tpu_id == expected_tpu_id\n\n\[email protected](\n ['cli_args', 'expected'],\n [pytest.param('--tpu_cores=8', {'tpu_cores': 8}),\n pytest.param(\"--tpu_cores=1,\", {'tpu_cores': '1,'})]\n)\n@RunIf(tpu=True)\n@pl_multi_process_test\ndef test_tpu_cores_with_argparse(cli_args, expected):\n \"\"\"Test passing tpu_cores in command line\"\"\"\n cli_args = cli_args.split(' ') if cli_args else []\n with mock.patch(\"argparse._sys.argv\", [\"any.py\"] + cli_args):\n parser = ArgumentParser(add_help=False)\n parser = Trainer.add_argparse_args(parent_parser=parser)\n args = Trainer.parse_argparser(parser)\n\n for k, v in expected.items():\n assert getattr(args, k) == v\n assert Trainer.from_argparse_args(args)\n\n\n@RunIf(tpu=True)\n@pl_multi_process_test\ndef test_tpu_reduce():\n \"\"\"Test tpu spawn reduce operation \"\"\"\n\n def test_reduce(rank):\n trainer = Trainer(tpu_cores=8)\n # faster this way\n reduce_ops = [\"mean\", \"AVG\", \"undefined\", \"sum\", ReduceOp.SUM, ReduceOp.MAX]\n for reduce_op in reduce_ops:\n if reduce_op == \"undefined\" or reduce_op == ReduceOp.MAX:\n with pytest.raises(MisconfigurationException, match=\"TPUSpawn TrainingTypePlugin only support\"):\n result = trainer.training_type_plugin.reduce(1, reduce_op)\n else:\n result = trainer.training_type_plugin.reduce(1, reduce_op)\n if isinstance(reduce_op, str) and reduce_op.lower() in (\"mean\", \"avg\"):\n assert result.item() == 1\n else:\n assert result.item() == 8\n\n xmp.spawn(test_reduce, nprocs=8, start_method='fork')\n\n\n@RunIf(tpu=True)\n@pl_multi_process_test\[email protected](\"clip_val\", [10])\[email protected](\"torch.nn.utils.clip_grad_norm_\")\ndef test_tpu_precision_16_clip_gradients(mock_clip_grad_norm, clip_val, tmpdir):\n \"\"\"\n Ensure that clip gradients is only called if the value is greater than 0.\n TODO: Fix (test fails with parametrize)\n \"\"\"\n tutils.reset_seed()\n trainer_options = dict(\n default_root_dir=tmpdir,\n progress_bar_refresh_rate=0,\n max_epochs=1,\n tpu_cores=1,\n precision=16,\n limit_train_batches=4,\n limit_val_batches=4,\n gradient_clip_val=clip_val,\n )\n model = BoringModel()\n tpipes.run_model_test(trainer_options, model, on_gpu=False, with_hpc=False)\n\n if clip_val > 0:\n mock_clip_grad_norm.assert_called()\n else:\n mock_clip_grad_norm.assert_not_called()\n\n\n@RunIf(tpu=True)\n@pl_multi_process_test\ndef test_if_test_works_with_checkpoint_false(tmpdir):\n \"\"\"Ensure that model trains properly when `checkpoint_callback` is set to False.\"\"\"\n\n # Train a model on TPU\n model = BoringModel()\n trainer = Trainer(max_epochs=1, tpu_cores=8, default_root_dir=tmpdir, fast_dev_run=True, checkpoint_callback=False)\n trainer.fit(model)\n assert trainer.state.finished, f\"Training failed with {trainer.state}\"\n\n\n@RunIf(tpu=True)\n@pl_multi_process_test\ndef test_tpu_sync_dist():\n \"\"\"Test tpu spawn sync dist operation \"\"\"\n\n def test_sync_dist(rank):\n tensor = torch.tensor([1.0])\n training_type_plugin = TPUSpawnPlugin()\n\n res = Result()\n res.log(\n \"test_tensor\",\n tensor,\n sync_fn=training_type_plugin.reduce,\n sync_dist=True,\n sync_dist_op=torch.distributed.ReduceOp.SUM\n )\n\n assert res[\"test_tensor\"].item() == 8, \"Result-Log does not work properly with TPU Spawn and Tensors\"\n\n xmp.spawn(test_sync_dist, nprocs=8, start_method='fork')\n\n\n@RunIf(tpu=True)\n@pl_multi_process_test\ndef test_tpu_debug_mode(tmpdir):\n \"\"\"Test if debug mode works on TPU.\"\"\"\n\n class DebugModel(BoringModel):\n\n def on_train_start(self):\n assert os.environ.get(\"PT_XLA_DEBUG\") == str(1), \"PT_XLA_DEBUG was not set in environment variables\"\n\n def teardown(self, stage):\n assert \"PT_XLA_DEBUG\" not in os.environ\n\n tutils.reset_seed()\n trainer_options = dict(\n default_root_dir=tmpdir,\n progress_bar_refresh_rate=0,\n max_epochs=4,\n tpu_cores=8,\n limit_train_batches=0.4,\n limit_val_batches=0.4,\n plugins=TPUSpawnPlugin(debug=True),\n )\n\n model = DebugModel()\n tpipes.run_model_test(trainer_options, model, on_gpu=False, with_hpc=False)\n\n\n@RunIf(tpu=True)\n@pl_multi_process_test\ndef test_tpu_host_world_size(tmpdir):\n \"\"\"Test Host World size env setup on TPU.\"\"\"\n\n class DebugModel(BoringModel):\n\n def on_train_start(self):\n assert os.environ.get(\"XRT_HOST_WORLD_SIZE\") == str(1)\n\n def teardown(self, stage):\n assert \"XRT_HOST_WORLD_SIZE\" not in os.environ\n\n tutils.reset_seed()\n trainer_options = dict(\n default_root_dir=tmpdir,\n progress_bar_refresh_rate=0,\n max_epochs=4,\n tpu_cores=8,\n limit_train_batches=0.4,\n limit_val_batches=0.4,\n )\n\n model = DebugModel()\n tpipes.run_model_test(trainer_options, model, on_gpu=False, with_hpc=False)\n" ]
[ [ "torch.abs", "torch.norm", "torch.no_grad" ], [ "torch.utils.data.DataLoader", "torch.tensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
johnh2o2/pylabeler
[ "afabd72f899efeced0ed488b0addbf15fe101938" ]
[ "test/test.py" ]
[ "# Test script that demonstrates the capabilities\n# of the pylabeler library\n\nfrom pylabeler.labeler import Labeler\nimport matplotlib.pyplot as plt\n\n# Locations of images\nimage_folder = 'img'\nimage_filename = lambda ID : \"%s/%s.jpg\"%(image_folder, ID)\n\n# Where to load/save the labels for each image\nlabel_file = 'labels.txt'\n\n# Allows for keyboard shortcuts (not required)\nkey_mapping = { '1' : 'Human', '2' : 'Lizard' }\n\n# List of all ID's\nids = [ '001', '002', '003', '004', '005', '006' ]\n\n# The image file(s) to show for each ID; must be a dict of lists since \n# more than one image can be used for the same ID\nimage_files = { ID : [ image_filename(ID) ] for ID in ids }\n\n# Starts labeler\nlabeler = Labeler(image_files, label_file, sorted(key_mapping.values()), key_mapping)\nlabeler.connect()\nplt.show(block=True)\n\n" ]
[ [ "matplotlib.pyplot.show" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Aaron-Jin-Xu/probabilistic-semantic-image-inpainting
[ "8ce630eaf7e8f9ef5fc5ad19d5474d050d71807d" ]
[ "learners/learner.py" ]
[ "import os\nimport sys\nimport json\nimport time\nimport numpy as np\nimport tensorflow as tf\nfrom blocks.helpers import Monitor\nfrom blocks.helpers import visualize_samples, get_nonlinearity, int_shape, get_trainable_variables, broadcast_masks_np\nfrom blocks.optimizers import adam_updates\nimport data.load_data as load_data\nfrom masks import get_generator\n\nclass Learner(object):\n\n def __init__(self, nr_gpu, save_dir, img_size, exp_name='default'):\n self.nr_gpu = nr_gpu\n self.save_dir = save_dir\n self.img_size = img_size\n self.exp_name = exp_name\n self.train_set = None\n self.eval_set = None\n self.test_set = None\n self.data_set = None\n self.batch_size = None\n self.monitor = None\n if not os.path.exists(\"results/\"+self.exp_name):\n os.makedirs(\"results/\"+self.exp_name)\n\n def load_data(self, dataset_name, batch_size, use_debug_mode=False):\n assert dataset_name in ['celeba', 'binarized-mnist', 'church_outdoor'], \"cannot find the dataset\"\n self.data_set = dataset_name\n self.batch_size = batch_size\n assert self.batch_size % self.nr_gpu == 0, \"Batch of data cannot be evenly distributed to {0} GPUs\".format(self.nr_gpu)\n if dataset_name == 'celeba':\n data_dir = \"/data/ziz/not-backed-up/datasets-ziz-all/processed_data/CelebA\"\n data_set = load_data.CelebA(data_dir=data_dir, batch_size=batch_size, img_size=self.img_size)\n self.num_channels = 3\n self.vrange = [-1., 1.]\n elif dataset_name == 'binarized-mnist':\n # data_dir = \"/data/ziz/not-backed-up/datasets-ziz-all/processed_data/mnist\"\n data_dir = \"/data/ziz/not-backed-up/jxu/mnist\"\n data_set = load_data.BinarizedMNIST(data_dir=data_dir, batch_size=batch_size, img_size=self.img_size)\n self.num_channels = 1\n self.vrange = [0, 1]\n elif dataset_name == 'church_outdoor':\n #data_dir = \"/data/ziz/not-backed-up/datasets-ziz-all/raw_data/lsun/church_outdoor\"\n data_dir = \"/data/ziz/not-backed-up/jxu/church_outdoor\"\n data_set = load_data.ChurchOutdoor(data_dir=data_dir, batch_size=batch_size, img_size=self.img_size)\n self.num_channels = 3\n self.vrange = [-1., 1.]\n\n if use_debug_mode:\n self.train_set = data_set.train(shuffle=True, limit=batch_size*2)\n self.eval_set = data_set.train(shuffle=True, limit=batch_size*2)\n self.test_set = data_set.test(shuffle=False, limit=-1)\n else:\n self.train_set = data_set.train(shuffle=True, limit=-1)\n self.eval_set = data_set.train(shuffle=True, limit=batch_size*10)\n self.test_set = data_set.test(shuffle=False, limit=-1)\n\n def construct_models(self, model_cls, model_opt, learning_rate, trainable_params=None, eval_keys=['total loss']):\n # models\n self.models = [model_cls(counters={}) for i in range(self.nr_gpu)]\n template = tf.make_template('model', model_cls.build_graph)\n for i in range(self.nr_gpu):\n with tf.device('/gpu:%d' % i):\n template(self.models[i], **model_opt)\n if trainable_params is None:\n self.params = tf.trainable_variables()\n else:\n self.params = get_trainable_variables(trainable_params)\n # gradients\n grads = []\n for i in range(self.nr_gpu):\n with tf.device('/gpu:%d' % i):\n grads.append(tf.gradients(self.models[i].loss, self.params, colocate_gradients_with_ops=True))\n with tf.device('/gpu:0'):\n for i in range(1, self.nr_gpu):\n for j in range(len(grads[0])):\n grads[0][j] += grads[i][j]\n\n mdict = {}\n if 'total loss' in eval_keys:\n mdict['total loss'] = tf.add_n([model.loss for model in self.models]) / self.nr_gpu\n if 'nll loss' in eval_keys:\n mdict['nll loss'] = tf.add_n([model.loss_nll for model in self.models]) / self.nr_gpu\n if 'reg loss' in eval_keys:\n mdict['reg loss'] = tf.add_n([model.loss_reg for model in self.models]) / self.nr_gpu\n if 'bits per dim' in eval_keys:\n mdict['bits per dim'] = tf.add_n([model.bits_per_dim for model in self.models]) / self.nr_gpu\n if 'mi' in eval_keys:\n mdict['mi'] = tf.add_n([model.mi for model in self.models]) / self.nr_gpu\n\n self.monitor = Monitor(dict=mdict, config_str=\"\", log_file_path=self.save_dir+\"/logfile\")\n self.train_step = adam_updates(self.params, grads[0], lr=learning_rate)\n #\n self.saver = tf.train.Saver()\n\n def train_epoch(self, mgen, which_set='train'):\n raise NotImplementedError(\"Must override methodB\")\n\n def eval_epoch(self, mgen, which_set='eval'):\n raise NotImplementedError(\"Must override\")\n\n def sample(self, data, mgen):\n raise NotImplementedError(\"Must override\")\n\n def preload(self, from_dir, var_list):\n preload_saver = tf.train.Saver(var_list=var_list)\n ckpt_file = from_dir + '/params_' + self.data_set + '.ckpt'\n print('restoring parameters from', ckpt_file)\n preload_saver.restore(self.sess, ckpt_file)\n\n def set_session(self, sess):\n self.sess = sess\n\n def save(self):\n self.saver.save(self.sess, self.save_dir + '/params_' + self.data_set + '.ckpt')\n\n def restore(self, saver=None, dir=None):\n if saver is None:\n saver = self.saver\n if dir is None:\n dir = self.save_dir\n ckpt_file = dir + '/params_' + self.data_set + '.ckpt'\n print('restoring parameters from', ckpt_file)\n saver.restore(self.sess, ckpt_file)\n\n\n def train(self, train_mgen, sample_mgen, max_num_epoch=100, save_interval=None, restore=False):\n if restore:\n self.restore()\n for epoch in range(max_num_epoch+1):\n tt = time.time()\n self.train_epoch(train_mgen, which_set='train')\n self.eval_epoch(train_mgen, which_set='eval')\n self.monitor.summarise_epoch(time=time.time()-tt, log=True)\n\n if save_interval is not None and epoch % save_interval == 0:\n self.save()\n data = next(self.test_set) # note that test set is used here\n self.test_set.reset()\n ori_x, masked_x, sample_x = self.sample(data, sample_mgen)\n visualize_samples(ori_x, os.path.join(\"results\", self.exp_name, 'train_%s_gt_%d.png' % (self.data_set, epoch)), layout=(5, 5), vrange=self.vrange)\n visualize_samples(masked_x, os.path.join(\"results\", self.exp_name, 'train_%s_masked_%d.png' % (self.data_set, epoch)), layout=(5, 5), vrange=self.vrange)\n visualize_samples(sample_x, os.path.join(\"results\", self.exp_name, 'train_%s_sample_%d.png' % (self.data_set, epoch)), layout=(5, 5), vrange=self.vrange)\n print(\"------------ saved\")\n sys.stdout.flush()\n\n def eval(self, which_set, mgen=None, generate_samples=False, restore=True, layout=(5,5), same_inputs=False, use_mask_at=None):\n if restore:\n self.restore()\n self.eval_epoch(mgen, which_set=which_set)\n self.monitor.summarise_epoch(time=0., log=False)\n if which_set == 'train':\n data_set = self.train_set\n elif which_set == 'eval':\n data_set = self.eval_set\n elif which_set == 'test':\n data_set = self.test_set\n if generate_samples:\n data = next(data_set)\n data_set.reset()\n ori_x, masked_x, sample_x = self.sample(data, mgen, same_inputs=same_inputs, use_mask_at=use_mask_at)\n visualize_samples(ori_x, os.path.join(\"results\", self.exp_name, 'gen_%s_gt_%s.png' % (self.data_set, which_set)), layout=layout, vrange=self.vrange)\n visualize_samples(masked_x, os.path.join(\"results\", self.exp_name, 'gen_%s_masked_%s.png' % (self.data_set, which_set)), layout=layout, vrange=self.vrange)\n visualize_samples(sample_x, os.path.join(\"results\", self.exp_name, 'gen_%s_sample_%s.png' % (self.data_set, which_set)), layout=layout, vrange=self.vrange)\n" ]
[ [ "tensorflow.device", "tensorflow.gradients", "tensorflow.trainable_variables", "tensorflow.make_template", "tensorflow.train.Saver", "tensorflow.add_n" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
lixiny/CPF
[ "69129a3a2ec76347752241850da5ced09d795b1d" ]
[ "hocontact/models/manobranch.py" ]
[ "import pickle\n\nimport numpy as np\nimport torch\nfrom manopth.manolayer import ManoLayer\nfrom torch import nn\n\n\nclass ManoAdaptor(torch.nn.Module):\n def __init__(self, mano_layer, load_path=None):\n super().__init__()\n self.adaptor = torch.nn.Linear(778, 21, bias=False)\n if load_path is not None:\n with open(load_path, \"rb\") as p_f:\n exp_data = pickle.load(p_f)\n weights = exp_data[\"adaptor\"]\n regressor = torch.from_numpy(weights)\n self.register_buffer(\"J_regressor\", regressor)\n else:\n regressor = mano_layer._buffers[\"th_J_regressor\"]\n tip_reg = regressor.new_zeros(5, regressor.shape[1])\n tip_reg[0, 745] = 1\n tip_reg[1, 317] = 1\n tip_reg[2, 444] = 1\n tip_reg[3, 556] = 1\n tip_reg[4, 673] = 1\n reordered_reg = torch.cat([regressor, tip_reg])[\n [0, 13, 14, 15, 16, 1, 2, 3, 17, 4, 5, 6, 18, 10, 11, 12, 19, 7, 8, 9, 20]\n ]\n self.register_buffer(\"J_regressor\", reordered_reg)\n self.adaptor.weight.data = self.J_regressor\n\n def forward(self, inp):\n fix_idxs = [0, 4, 8, 12, 16, 20]\n for idx in fix_idxs:\n self.adaptor.weight.data[idx] = self.J_regressor[idx]\n return self.adaptor(inp.transpose(2, 1)), self.adaptor.weight - self.J_regressor\n\n\nclass ManoBranch(nn.Module):\n def __init__(\n self,\n ncomps, # ncomps = 15 in all expermients\n base_neurons=[512, 512],\n center_idx: int = 9,\n use_pca=True,\n use_shape=True,\n mano_root=\"assets/mano\",\n mano_pose_coeff=1,\n mano_side=\"right\",\n dropout=0,\n ):\n \"\"\"\n Args:\n mano_root (path): dir containing mano pickle files\n center_idx: Joint idx on which to hand is centered (given joint has position\n [0, 0, 0]\n ncomps: Number of pose principal components that are predicted\n \"\"\"\n super(ManoBranch, self).__init__()\n\n self.use_shape = use_shape\n self.use_pca = use_pca\n self.mano_pose_coeff = mano_pose_coeff\n self.mano_side = mano_side\n\n if self.use_pca:\n # Final number of coefficients to predict for pose\n # is sum of PCA components and 3 global axis-angle params\n # for the global rotation\n mano_pose_size = ncomps + 3\n else:\n # 15 joints + 1 global rotations, 9 components per joint\n # rotation\n mano_pose_size = 16 * 9\n # Initial base layers of MANO decoder\n base_layers = []\n for inp_neurons, out_neurons in zip(base_neurons[:-1], base_neurons[1:]):\n if dropout:\n base_layers.append(nn.Dropout(p=dropout))\n base_layers.append(nn.Linear(inp_neurons, out_neurons))\n base_layers.append(nn.ReLU())\n self.base_layer = nn.Sequential(*base_layers)\n\n # Pose layers to predict pose parameters\n self.pose_reg = nn.Linear(base_neurons[-1], mano_pose_size)\n if not self.use_pca:\n # Initialize all nondiagonal items on rotation matrix weights to 0\n self.pose_reg.bias.data.fill_(0)\n weight_mask = self.pose_reg.weight.data.new(np.identity(3)).view(9).repeat(16)\n self.pose_reg.weight.data = torch.abs(\n weight_mask.unsqueeze(1).repeat(1, 256).float() * self.pose_reg.weight.data\n )\n\n # Shape layers to predict MANO shape parameters\n if self.use_shape:\n self.shape_reg = torch.nn.Sequential(nn.Linear(base_neurons[-1], 10))\n\n # Mano layer which outputs the hand mesh given the hand pose and shape\n # paramters\n self.mano_layer = ManoLayer(\n ncomps=ncomps,\n center_idx=center_idx,\n side=mano_side,\n mano_root=mano_root,\n use_pca=use_pca,\n flat_hand_mean=False,\n return_full_pose=True, # * @Xinyu, here I will return the axisang.\n )\n self.faces = self.mano_layer.th_faces\n\n def forward(self, inp):\n base_features = self.base_layer(inp)\n pose = self.pose_reg(base_features) # TENSOR (B, N_PCA)\n\n if self.mano_pose_coeff != 1:\n pose = torch.cat([pose[:, :3], self.mano_pose_coeff * pose[:, 3:]], 1)\n if not self.use_pca:\n # Reshape to rotation matrixes\n mano_pose = pose.reshape(pose.shape[0], 16, 3, 3)\n else:\n mano_pose = pose\n\n # Get shape\n if self.use_shape:\n shape = self.shape_reg(base_features)\n else:\n shape = None\n\n # Get MANO vertices and joints for left and right hands given\n # predicted mano parameters\n verts, joints, full_pose = self.mano_layer(mano_pose, th_betas=shape)\n\n # Gather results in metric space (vs MANO millimeter outputs)\n # pose: the 18 ncomps (3 global rot + 15 pca hand pose)\n # full_pose: the 48 (16 * 3) full relative axis-angles of all 16 joints rotations (from root to finger)\n results = {\"verts3d\": verts, \"joints3d\": joints, \"shape\": shape, \"pose\": pose, \"full_pose\": full_pose}\n\n return results\n\n\ndef test_full_pose():\n mano_branch = ManoBranch(ncomps=15, base_neurons=[512, 512, 512],)\n test_mano_layer = ManoLayer(\n ncomps=15, center_idx=9, side=\"right\", mano_root=\"assets/mano\", use_pca=False, flat_hand_mean=True,\n )\n\n rand_pose = torch.rand((1, 18))\n rand_shape = torch.rand((1, 10))\n verts3d, joints3d, full_pose = mano_branch.mano_layer(rand_pose, rand_shape)\n test_verts3d, test_joints3d = test_mano_layer(full_pose, rand_shape)\n\n verts_dist = ((test_verts3d - verts3d) * 1000.0).norm(2, -1).mean().item()\n joints_dist = ((test_joints3d - joints3d) * 1000.0).norm(2, -1).mean().item()\n print(verts_dist, joints_dist) # should be 0.\n return\n\n\nif __name__ == \"__main__\":\n test_full_pose()\n" ]
[ [ "torch.nn.Sequential", "torch.nn.Dropout", "torch.cat", "torch.from_numpy", "torch.nn.Linear", "numpy.identity", "torch.rand", "torch.nn.ReLU" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
isotrforever/R-BERT
[ "99e986cab12f2d91f2445c651908c8a18c8c9efe" ]
[ "model.py" ]
[ "import torch\nimport torch.nn as nn\nfrom transformers import BertModel, BertPreTrainedModel\n\n\nclass FCLayer(nn.Module):\n def __init__(self, input_dim, output_dim, dropout_rate=0.0, use_activation=True):\n super(FCLayer, self).__init__()\n self.use_activation = use_activation\n self.dropout = nn.Dropout(dropout_rate)\n self.linear = nn.Linear(input_dim, output_dim)\n self.tanh = nn.Tanh()\n\n def forward(self, x):\n x = self.dropout(x)\n if self.use_activation:\n x = self.tanh(x)\n return self.linear(x)\n\n\nclass RBERT(BertPreTrainedModel):\n def __init__(self, config, args):\n super(RBERT, self).__init__(config)\n self.bert = BertModel(config=config) # Load pretrained bert\n\n self.num_labels = config.num_labels\n\n self.cls_fc_layer = FCLayer(config.hidden_size, config.hidden_size, args.dropout_rate)\n self.entity_fc_layer = FCLayer(config.hidden_size, config.hidden_size, args.dropout_rate)\n self.label_classifier = FCLayer(\n config.hidden_size * 3,\n config.num_labels,\n args.dropout_rate,\n use_activation=False,\n )\n\n @staticmethod\n def entity_average(hidden_output, e_mask):\n \"\"\"\n Average the entity hidden state vectors (H_i ~ H_j)\n :param hidden_output: [batch_size, j-i+1, dim]\n :param e_mask: [batch_size, max_seq_len]\n e.g. e_mask[0] == [0, 0, 0, 1, 1, 1, 0, 0, ... 0]\n :return: [batch_size, dim]\n \"\"\"\n e_mask_unsqueeze = e_mask.unsqueeze(1) # [b, 1, j-i+1]\n length_tensor = (e_mask != 0).sum(dim=1).unsqueeze(1) # [batch_size, 1]\n\n # [b, 1, j-i+1] * [b, j-i+1, dim] = [b, 1, dim] -> [b, dim]\n sum_vector = torch.bmm(e_mask_unsqueeze.float(), hidden_output).squeeze(1)\n avg_vector = sum_vector.float() / length_tensor.float() # broadcasting\n return avg_vector\n\n def forward(self, input_ids, attention_mask, token_type_ids, labels, e1_mask, e2_mask):\n outputs = self.bert(\n input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids\n ) # sequence_output, pooled_output, (hidden_states), (attentions)\n sequence_output = outputs[0]\n pooled_output = outputs[1] # [CLS]\n\n # Average\n e1_h = self.entity_average(sequence_output, e1_mask)\n e2_h = self.entity_average(sequence_output, e2_mask)\n\n # Dropout -> tanh -> fc_layer (Share FC layer for e1 and e2)\n pooled_output = self.cls_fc_layer(pooled_output)\n e1_h = self.entity_fc_layer(e1_h)\n e2_h = self.entity_fc_layer(e2_h)\n\n # Concat -> fc_layer\n concat_h = torch.cat([pooled_output, e1_h, e2_h], dim=-1)\n logits = self.label_classifier(concat_h)\n\n outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here\n\n # Softmax\n if labels is not None:\n if self.num_labels == 1:\n loss_fct = nn.MSELoss()\n loss = loss_fct(logits.view(-1), labels.view(-1))\n else:\n loss_fct = nn.CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n\n outputs = (loss,) + outputs\n\n return outputs # (loss), logits, (hidden_states), (attentions)\n" ]
[ [ "torch.nn.Dropout", "torch.nn.CrossEntropyLoss", "torch.cat", "torch.nn.Tanh", "torch.nn.Linear", "torch.nn.MSELoss" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
finagle29/linetools
[ "8da5c232d8744864f8f5d7ff8c31e4165f03c5ac" ]
[ "linetools/analysis/interactive_plot.py" ]
[ "\"\"\" Classes for making interactive plots.\n\"\"\"\nfrom __future__ import division, print_function, unicode_literals, absolute_import\n\nimport os\nimport numpy as np\nfrom ..utils import between, loadjson, savejson\nfrom ..spectra.convolve import convolve_psf\nfrom ..spectra.plotting import get_flux_plotrange\nfrom ..guis import utils as ltgu\nfrom .interp import AkimaSpline\n\nfrom astropy.modeling import models\nimport astropy.units as u\n\nimport matplotlib.transforms as mtran\nimport matplotlib.pyplot as plt\n\ntry: # Python 3\n input = raw_input\nexcept NameError:\n pass\n\n\ndef local_median(wa, fl, er, x, npix=10, default=None):\n \"\"\" find the median flux value at x using +/- npix pixels.\n \"\"\"\n if (x > np.max(wa)) or (x < np.min(wa)):\n # out or range\n return default\n i = np.searchsorted(wa, x)\n i0,i1 = i - npix, i + npix\n good = (er[i0:i1] > 0) & ~np.isnan(fl[i0:i1])\n if good.sum():\n return np.median(fl[i0:i1][good])\n else:\n return default\n\n\nclass PlotWrapBase(object):\n \"\"\" A base class that has all the navigation and smoothing\n keypress events.\n\n Notes\n -----\n These attributes must be defined in a subclass:\n\n * self.wa, self.fl Spectrum wavelength and flux\n * self.nsmooth integer > 0 that determines the smoothing\n * self.ax Axes where spectrum is plotted\n * self.fig Figure which holds the axes.\n * self.artists['fl'] The Matplotlib line artist that represents the flux.\n\n The keypress events need to be connected to the figure with\n something like::\n\n def connect(self, fig):\n cids = dict(key=[])\n # the top two are methods of PlotWrapBase\n cids['key'].append(fig.canvas.mpl_connect(\n 'key_press_event', self.on_keypress_navigate))\n cids['key'].append(fig.canvas.mpl_connect(\n 'key_press_event', self.on_keypress_smooth))\n self.cids.update(cids)\n \"\"\"\n _help_string = \"\"\"\ni,o Zoom in/out x limits\ny Zoom out y limits\nY Guess y limits\nt,b Set y top/bottom limit\nl,r Set left/right x limit\n[,] Pan left/right\nw Plot the whole spectrum\n\nS,U Smooth/unsmooth spectrum\n\"\"\"\n\n def __init__(self):\n \"\"\" Initialize parameters for plotting the spectrum\n \"\"\"\n self.nav_dict = dict(nav=ltgu.navigate(0, 0, init=True))\n\n def on_keypress_navigate(self, event):\n \"\"\" Process a keypress event. Requires attributes self.ax,\n self.fl, self.wa, self.fig\n \"\"\"\n # Requiring inaxes for all of these now\n if (event.key in self.nav_dict['nav']) and event.inaxes:\n try:\n ltgu.navigate(self.nav_dict, event, flux=self.fl, wave=self.wa)\n except KeyError:\n pass\n else:\n self.ax.set_xlim(self.nav_dict['x_minmax'])\n self.ax.set_ylim(self.nav_dict['y_minmax'])\n self.fig.canvas.draw()\n\n def on_keypress_smooth(self, event):\n \"\"\" Smooth the flux with a gaussian. Requires attributes\n self.fl and self.nsmooth, self.artists['fl'] and self.fig.\"\"\"\n\n # maybe should use boxcar smoothing?\n if event.key == 'S':\n if self.nsmooth > 0:\n self.nsmooth += 0.5\n else:\n self.nsmooth += 1\n sfl = convolve_psf(self.fl, self.nsmooth)\n self.artists['fl'].set_ydata(sfl)\n self.fig.canvas.draw()\n elif event.key == 'U':\n self.nsmooth = 0\n self.artists['fl'].set_ydata(self.fl)\n self.fig.canvas.draw()\n\nclass PlotWrapNav(PlotWrapBase):\n \"\"\" Enable simple XIDL-style navigation for plotting a spectrum.\n\n For example, i and o for zooming in y direction, [ and ] for\n panning, S and U for smoothing and unsmoothing.\n\n Parameters\n ----------\n fig : matplotlib Figure\n ax : matplotlib axes\n The Axes where the spectrum is plotted.\n wa, fl : array\n Wavelength and flux arrays\n artists : dict\n A dictionary which must contain a key 'fl', which is the\n matplotlib artist corresponding to the flux line.\n printhelp : bool, optional\n Whether to print a help message when first called.\n \"\"\"\n def __init__(self, fig, ax, wa, fl, artists, printhelp=True,\n xlim=None):\n\n super(PlotWrapNav, self).__init__()\n\n if isinstance(wa, u.Quantity):\n wa = wa.value\n self.wa = wa\n if isinstance(fl, u.Quantity):\n fl = fl.value\n self.fl = fl\n\n if xlim is None:\n xmin = np.min(self.wa)\n xmax = np.max(self.wa)\n else:\n xmin, xmax = xlim\n\n ymin, ymax = get_flux_plotrange(self.fl)\n self.nav_dict['x_minmax'] = np.array([xmin, xmax])\n self.nav_dict['y_minmax'] = [ymin, ymax]\n\n self.artists = artists\n self.fig = fig\n self.ax = ax\n\n self.nsmooth = 0\n self.last_keypress = None\n # disable existing keypress events (like 's' for save).\n try:\n cids = list(fig.canvas.callbacks.callbacks['key_press_event'])\n except KeyError:\n pass\n else:\n for cid in cids:\n fig.canvas.callbacks.disconnect(cid)\n self.cids = {}\n self.connect()\n if printhelp:\n print(self._help_string)\n\n def on_keypress(self, event):\n \"\"\" Print a help message\"\"\"\n\n # store the last key pressed\n self.last_keypress = event.key\n if event.key == '?':\n print(self._help_string)\n\n def connect(self):\n cids = dict(key=[])\n # the top two are methods of PlotWrapBase\n cids['key'].append(self.fig.canvas.mpl_connect(\n 'key_press_event', self.on_keypress))\n cids['key'].append(self.fig.canvas.mpl_connect(\n 'key_press_event', self.on_keypress_navigate))\n cids['key'].append(self.fig.canvas.mpl_connect(\n 'key_press_event', self.on_keypress_smooth))\n self.cids.update(cids)\n\nclass InteractiveCoFit(PlotWrapNav):\n \"\"\" Class for interactively fitting a continuum\n\n Parameters\n ----------\n wa : Wavelengths\n fl : Fluxes\n er : One sigma errors\n contpoints : list of x,y tuple pairs (None)\n The points through which a cubic spline is passed,\n defining the continuum.\n co : Continuum, optional\n The existing continuum, if one is already defined.\n anchor : bool\n Whether to prevent modification of the first and last\n spline knots. Default is None, which means anchor only if\n co is given.\n\n Notes\n -----\n Updates the following attributes:\n\n * self.wa, self.fl, self.er : wa, fl, er\n * self.contpoints : Points used to define the continuum.\n * self.artists : Dictionary of matplotlib plotting artists.\n * self.connections : Callback connections.\n * self.fig : The plotting figure instance.\n\n \"\"\"\n help_message = PlotWrapNav._help_string + \"\"\"\na : add a new spline knot\nA : add a new spline knot, and use a flux median to guess y position\n+ : double the number of spline knots\n_ : halve the number of spline knots\nd : delete the nearest knot\nm : move the nearest knot\nM : move the nearest knot, and use a flux median to guess y position\nc : toggle initial continuum display\n\nq : quit\n\"\"\"\n def __init__(self, wa, fl, er, contpoints, co=None,\n fig=None, anchor=None, numguesspix=None):\n \"\"\" Initialise figure, plots and variables.\n \"\"\"\n\n self.artists = {}\n if fig is None:\n self.fig = plt.figure()\n else:\n self.fig = fig\n\n self.nsmooth = 0\n self.wa = wa\n self.fl = fl\n self.er = er\n self.anchor = anchor\n\n if os.path.lexists('./_knots.jsn'):\n c = input('knots file exists, use this? (y) ')\n if c.lower() != 'n':\n contpoints = loadjson('./_knots.jsn')\n # need the float call here to make sure values are all float64\n # and thus json serializable.\n contpoints = sorted(tuple(float(val) for val in cp) for\n cp in contpoints)\n\n #import pdb; pdb.set_trace()\n if co is not None:\n self.continuum = np.array(co, copy=True)\n if self.anchor is None:\n self.anchor = True\n else:\n xpts, ypts = zip(*contpoints)\n if len(contpoints) >= 5:\n # need 5 points to define an Akima Spline\n spline = AkimaSpline(xpts, ypts)\n self.continuum = spline(wa)\n else:\n self.continuum = np.interp(wa, xpts, ypts)\n co = self.continuum\n if self.anchor is None:\n self.anchor = False\n\n if self.anchor:\n wmin = contpoints[0][0]\n wmax = contpoints[-1][0]\n else:\n wmin = wa[0]\n wmax = wa[-1]\n\n if numguesspix is not None:\n self.numguesspix = numguesspix\n\n # add extra anchor points so the slopes match at each end of\n # the fitting region.\n\n i1, i2 = wa.searchsorted([wmin, wmax])\n if i1 == 0:\n i1 = 1\n if i2 == len(wa) or i2 < 0:\n i2 = len(wa) - 1\n x,y = contpoints[0]\n contpoints[0] = wa[i1], y\n x,y = contpoints[-1]\n contpoints[-1] = wa[i2], y\n self.indices = i1, i2\n if self.anchor:\n self.anchor_start = wa[i1 - 1], float(co[i1 - 1])\n self.anchor_end = wa[i2], float(co[i2])\n self.contpoints = contpoints\n self.wmin = wmin\n self.wmax = wmax\n\n # disable any existing key press callbacks\n cids = list(self.fig.canvas.callbacks.callbacks['key_press_event'])\n for cid in cids:\n self.fig.canvas.callbacks.disconnect(cid)\n\n self.connections = []\n self.finished = False\n self.plotinit()\n\n #setup\n super(InteractiveCoFit, self).__init__(\n self.fig, self.ax, wa, fl, self.artists, printhelp=False)\n\n self.update()\n self.modifypoints()\n plt.draw()\n\n def plotinit(self):\n \"\"\" Set up the figure and do initial plots.\n\n Updates the following attributes:\n * self.artists\n \"\"\"\n wa,fl,er = self.wa, self.fl, self.er\n\n # axis for spectrum & continuum\n a0 = self.fig.add_axes((0.05,0.1,0.9,0.6))\n self.ax = a0\n a0.set_autoscale_on(0)\n # axis for residuals\n a1 = self.fig.add_axes((0.05,0.75,0.9,0.2),sharex=a0)\n a1.set_autoscale_on(0)\n a1.axhline(0, color='k', alpha=0.7, zorder=99)\n a1.axhline(1, color='k', alpha=0.7, zorder=99)\n a1.axhline(-1, color='k', alpha=0.7, zorder=99)\n a1.axhline(2, color='k', linestyle='dashed', zorder=99)\n a1.axhline(-2, color='k', linestyle='dashed', zorder=99)\n m0, = a1.plot([0],[0],'.r',marker='.', mec='none', lw=0, mew=0,\n ms=6, alpha=0.5)\n a1.set_ylim(-4, 4)\n a0.axhline(0, color='0.7')\n\n # Initial plot limits\n i0,i1 = self.indices\n xmin = wa[i0]\n xmax = wa[i1]\n self.nav_dict = dict(nav=ltgu.navigate(0, 0, init=True))\n self.nav_dict['xmnx'] = [xmin, xmax]\n ymin,ymax = get_flux_plotrange(self.fl[between(wa, xmin, xmax)])\n #\n art = []\n art.append(a0.axvline(wa[i0], color='r', ls='--', lw=2, zorder=10))\n art.append(a0.axvline(wa[i1], color='r', ls='--', lw=2, zorder=10))\n self.artists['indices'] = art\n self.artists['initcont'], = a0.plot(wa, self.continuum, color='k', lw=2, ls='dashed', zorder=3)\n self.artists['fl'], = a0.plot(wa, fl, lw=1, color='0.7',\n drawstyle='steps-mid')\n a0.plot(wa, er, lw=0.5, color='orange')\n m1, = a0.plot([0], [0], 'r', zorder=4, lw=2)\n m2, = a0.plot([0], [0], 'o', mfc='None', mew=2, ms=12, mec='r',\n alpha=0.7)\n\n a0.set_xlim(self.nav_dict['xmnx'])\n good = (er[i0:i1] > 0) & ~np.isnan(fl[i0:i1]) & ~np.isinf(fl[i0:i1])\n ymax = 2 * np.abs(np.percentile(fl[i0:i1][good], 95))\n ymin = -0.1 * ymax\n self.nav_dict['ymnx'] = [ymin, ymax]\n a0.set_ylim(self.nav_dict['ymnx'])\n\n self.nav_dict['sv_xy'] = [[xmin, xmax], [ymin, ymax]]\n self.nav_dict['tmp_xy'] = None\n\n # for histogram\n trans = mtran.blended_transform_factory(a1.transAxes, a1.transData)\n hist, = a1.plot([], [], color='k', transform=trans)\n x = np.linspace(-3,3)\n\n g = models.Gaussian1D(amplitude=0.05, mean=0, stddev=1)\n a1.plot(g(x), x, color='k', transform=trans, lw=0.5)\n\n self.fig.canvas.draw()\n self.artists.update(contpoints=m2, cont=m1, resid=m0, hist_left=hist)\n\n self.finished = False\n\n\n def update(self):\n \"\"\" Calculates the new continuum, residuals and updates the plots.\n\n Updates the following attributes:\n * self.artists\n * self.continuum\n \"\"\"\n wa,fl,er = self.wa, self.fl, self.er\n co = self.continuum\n if self.anchor:\n cpts = [self.anchor_start] + self.contpoints + [self.anchor_end]\n else:\n cpts = self.contpoints\n i,j = self.indices\n xpts, ypts = zip(*cpts)\n if len(cpts) >= 5:\n # need 5 points to define an Akima Spline\n spline = AkimaSpline(xpts, ypts)\n co[i:j] = spline(wa[i:j])\n else:\n co[i:j] = np.interp(wa[i:j], xpts, ypts)\n\n resid = (fl[i:j] - co[i:j]) / er[i:j]\n # histogram\n bins = np.arange(0, 5 + 0.1, 0.2)\n w0,w1 = self.fig.axes[1].get_xlim()\n i,j = self.indices\n x,_ = np.histogram(resid[between(wa[i:j], w0, w1)],\n range=(bins[0],bins[-1]), # For NaNs\n bins=bins)\n b = np.repeat(bins, 2)\n X = np.concatenate([[0], np.repeat(x,2), [0]])\n Xmax = X.max()\n X = 0.05 * X / Xmax\n self.artists['hist_left'].set_data(X, b)\n\n if self.anchor:\n xpts, ypts = zip(*self.contpoints[1:-1])\n else:\n xpts, ypts = zip(*self.contpoints)\n\n self.artists['contpoints'].set_data((xpts, ypts))\n self.artists['cont'].set_data(wa[i:j], co[i:j])\n self.artists['resid'].set_data(wa[i:j], resid)\n self.continuum = co\n savejson('_knots.jsn', self.contpoints, overwrite=True)\n self.fig.canvas.draw()\n\n def on_keypress(self, event):\n \"\"\" Interactive fiddling via the keyboard\n\n Updates:\n * self.contpoints\n \"\"\"\n if event.key == 'q':\n self.finished = True\n plt.close()\n return\n if event.key == '+':\n # double the number of knots\n xc, yc = zip(*self.contpoints)\n xa0, ya0 = self.contpoints[0]\n xnew = []\n xnew.extend(np.array(xc[:-1]) + 0.5*np.diff(xc))\n ynew = np.interp(xnew, xc, yc)\n ynew = [float(local_median(self.wa, self.fl, self.er, xnew[i], npix=self.numguesspix,\n default=ynew[i]))\n for i in range(len(xnew))]\n # add to contpoints\n self.contpoints.extend(zip(xnew, ynew))\n self.contpoints.sort()\n self.update()\n if event.key == '_':\n # remove (roughly) half the number of knots\n cp = self.contpoints\n if len(cp) < 2:\n print(\"Too few spline knots.\")\n return\n self.contpoints = [cp[0]] + cp[1:-1][1::2] + [cp[-1]]\n self.update()\n if event.inaxes != self.fig.axes[0]:\n return\n\n if event.key in ('a', '3'):\n if not (self.wmin < event.xdata < self.wmax):\n print('Outside fitting region')\n return\n # add a point to contpoints\n x, y = event.xdata, event.ydata\n if not self.contpoints or x not in list(zip(*self.contpoints))[0]:\n self.contpoints.append((x, float(y)))\n self.contpoints.sort()\n self.update()\n if event.key == 'A':\n # add a point to contpoints, estimating via median\n if not (self.wmin < event.xdata < self.wmax):\n print('Outside fitting region')\n return\n x = event.xdata\n if not self.contpoints or x not in list(zip(*self.contpoints))[0]:\n y = local_median(self.wa, self.fl, self.er, x, npix=self.numguesspix,\n default=event.ydata)\n self.contpoints.append((x, float(y)))\n self.contpoints.sort()\n self.update()\n elif event.key in ('d', '4'):\n # remove a point from contpoints\n if len(self.contpoints) < 2:\n print('Need at least 1 spline knot')\n return\n\n contx,conty = zip(*self.ax.transData.transform(self.contpoints))\n sep = np.hypot(event.x - np.array(contx),\n event.y - np.array(conty))\n ind = sep.argmin()\n if ind in (0, len(sep) - 1) and self.anchor:\n print('Cannot remove anchor knots')\n return\n self.contpoints.remove(self.contpoints[ind])\n self.update()\n elif event.key in ('m', 'M'):\n # Move a point\n if not between(event.xdata, self.wmin, self.wmax):\n print('Outside fitting region')\n return\n\n contx,conty = zip(*self.ax.transData.transform(self.contpoints))\n sep = np.hypot(event.x - np.array(contx),\n event.y - np.array(conty))\n ind = np.argmin(sep)\n if self.anchor and ind == 0:\n if len(self.contpoints) > 2:\n ind = 1\n else:\n print('Cannot move anchor knots')\n return\n elif self.anchor and ind == len(sep) - 1:\n if len(self.contpoints) > 2:\n ind = len(sep) - 2\n else:\n print('Cannot move anchor knots')\n return\n\n x, y = event.xdata, event.ydata\n # if M, get y value from a local_median\n if event.key == 'M' and \\\n (not self.contpoints or\n x not in list(zip(*self.contpoints))[0]):\n y = local_median(self.wa, self.fl, self.er, x, npix=self.numguesspix,\n default=event.ydata)\n # Check for duplication\n xpts, ypts = zip(*self.contpoints)\n xpts = np.array(xpts)\n xpts[ind] = x\n uni = np.unique(xpts)\n if len(self.contpoints) != len(uni):\n print(\"Duplicate x value! Try another spot\")\n return\n # Finish\n self.contpoints[ind] = x, float(y)\n self.contpoints.sort()\n self.update()\n elif event.key == 'c':\n # Toggle initial continuum display\n if self.artists['initcont'].get_visible() is True:\n self.artists['initcont'].set_visible(False)\n else:\n self.artists['initcont'].set_visible(True)\n self.update()\n\n elif event.key == '?':\n print(self.help_message)\n\n def on_button_release(self, event):\n self.update()\n\n def modifypoints(self):\n \"\"\" Add/remove continuum points.\"\"\"\n print(self.help_message)\n id1 = self.fig.canvas.mpl_connect('key_press_event',self.on_keypress)\n id2 = self.fig.canvas.mpl_connect('key_press_event',self.on_keypress_smooth)\n id3 = self.fig.canvas.mpl_connect('key_press_event',self.on_keypress_navigate)\n id4 = self.fig.canvas.mpl_connect('button_release_event',self.on_button_release)\n self.connections.extend([id1, id2, id3, id4])\n" ]
[ [ "numpy.linspace", "numpy.max", "numpy.argmin", "numpy.searchsorted", "numpy.unique", "numpy.arange", "numpy.diff", "numpy.interp", "matplotlib.pyplot.close", "numpy.repeat", "matplotlib.pyplot.figure", "numpy.min", "numpy.isnan", "numpy.median", "numpy.array", "numpy.percentile", "matplotlib.pyplot.draw", "matplotlib.transforms.blended_transform_factory", "numpy.isinf" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jrueb/uproot-methods
[ "8b38c49acc233228d4fed32a6d4f77f057f8eb2f" ]
[ "uproot_methods/classes/TGraphAsymmErrors.py" ]
[ "#!/usr/bin/env python\n\n# BSD 3-Clause License; see https://github.com/scikit-hep/uproot-methods/blob/master/LICENSE\n\nimport uproot_methods.base\n\nclass Methods(uproot_methods.base.ROOTMethods):\n\t\n\t@property\n\tdef xerrorshigh(self):\n\t\treturn self._fEXhigh\n\t\t\n\t@property\n\tdef xerrorslow(self):\n\t\treturn self._fEXlow\n\t\t\n\t@property\n\tdef yerrorshigh(self):\n\t\treturn self._fEYhigh\n\t\t\n\t@property\n\tdef yerrorslow(self):\n\t\treturn self._fEYlow\n\t\t\n\tdef matplotlib(self, showtitle=True, show=False, **kwargs):\n\t\timport matplotlib.pyplot as pyplot\n\t\t\n\t\t_xerrs = [self.xerrorslow, self.xerrorshigh]\n\t\t_yerrs = [self.yerrorslow, self.yerrorshigh]\n\n\t\t_xlabel = _decode(self.xlabel if self.xlabel is not None else \"\")\n\t\t_ylabel = _decode(self.ylabel if self.ylabel is not None else \"\")\n\t\t\n\t\tpyplot.errorbar(self.xvalues, self.yvalues, xerr=_xerrs, yerr=_yerrs, **kwargs)\n\t\tpyplot.xlabel(_xlabel)\n\t\tpyplot.ylabel(_ylabel)\n\t\tif showtitle:\n\t\t\t_title = _decode(self.title)\n\t\t\tpyplot.title(_title)\n\t\t\t\n\t\tif show:\n\t\t\tpyplot.show()\n\t\t\t\ndef _decode(sequence):\n\treturn sequence.decode() if isinstance(sequence, bytes) else sequence\n" ]
[ [ "matplotlib.pyplot.title", "matplotlib.pyplot.errorbar", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
hanyas/mimo
[ "6f9b327a1a202a88b33a419520474ef4f10749e8", "6f9b327a1a202a88b33a419520474ef4f10749e8" ]
[ "examples/ilr/toy/evaluate_cmb.py", "examples/ilr/toy/evaluate_ard.py" ]
[ "import os\nimport argparse\n\nos.environ[\"OMP_NUM_THREADS\"] = \"1\"\n\nimport numpy as np\nimport numpy.random as npr\n\nimport mimo\nfrom mimo.distributions import NormalGamma\nfrom mimo.distributions import MatrixNormalWishart\nfrom mimo.distributions import GaussianWithNormalGamma\nfrom mimo.distributions import LinearGaussianWithMatrixNormalWishart\n\nfrom mimo.distributions import TruncatedStickBreaking\nfrom mimo.distributions import Dirichlet\nfrom mimo.distributions import CategoricalWithDirichlet\nfrom mimo.distributions import CategoricalWithStickBreaking\n\nfrom mimo.mixtures import BayesianMixtureOfLinearGaussians\n\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm\n\nimport pathos\nfrom pathos.pools import _ProcessPool as Pool\n\nnb_cores = pathos.multiprocessing.cpu_count()\n\n\ndef _job(kwargs):\n args = kwargs.pop('arguments')\n seed = kwargs.pop('seed')\n\n input = kwargs.pop('train_input')\n target = kwargs.pop('train_target')\n\n input_dim = input.shape[-1]\n target_dim = target.shape[-1]\n\n # set random seed\n np.random.seed(seed)\n\n nb_params = input_dim\n if args.affine:\n nb_params += 1\n\n basis_prior = []\n models_prior = []\n\n # initialize Normal\n alpha_ng = 1.\n beta_ng = 1. / (2. * 1e2)\n kappas = 1e-2\n\n # initialize Matrix-Normal\n psi_mnw = 1e0\n K = 1e-1\n\n for n in range(args.nb_models):\n basis_hypparams = dict(mu=np.zeros((input_dim,)),\n alphas=np.ones(input_dim) * alpha_ng,\n betas=np.ones(input_dim) * beta_ng,\n kappas=np.ones(input_dim) * kappas)\n\n aux = NormalGamma(**basis_hypparams)\n basis_prior.append(aux)\n\n models_hypparams = dict(M=np.zeros((target_dim, nb_params)),\n K=np.eye(nb_params) * K, nu=target_dim + 1,\n psi=np.eye(target_dim) * psi_mnw)\n\n aux = MatrixNormalWishart(**models_hypparams)\n models_prior.append(aux)\n\n # define gating\n if args.prior == 'stick-breaking':\n gating_hypparams = dict(K=args.nb_models, gammas=np.ones((args.nb_models,)),\n deltas=np.ones((args.nb_models,)) * args.alpha)\n gating_prior = TruncatedStickBreaking(**gating_hypparams)\n\n ilr = BayesianMixtureOfLinearGaussians(gating=CategoricalWithStickBreaking(gating_prior),\n basis=[GaussianWithNormalGamma(basis_prior[i])\n for i in range(args.nb_models)],\n models=[LinearGaussianWithMatrixNormalWishart(models_prior[i], affine=args.affine)\n for i in range(args.nb_models)])\n\n else:\n gating_hypparams = dict(K=args.nb_models, alphas=np.ones((args.nb_models,)) * args.alpha)\n gating_prior = Dirichlet(**gating_hypparams)\n\n ilr = BayesianMixtureOfLinearGaussians(gating=CategoricalWithDirichlet(gating_prior),\n basis=[GaussianWithNormalGamma(basis_prior[i])\n for i in range(args.nb_models)],\n models=[LinearGaussianWithMatrixNormalWishart(models_prior[i], affine=args.affine)\n for i in range(args.nb_models)])\n\n ilr.add_data(target, input, whiten=True,\n labels_from_prior=True)\n\n # Gibbs sampling\n ilr.resample(maxiter=args.gibbs_iters,\n progprint=args.verbose)\n\n for _ in range(args.super_iters):\n if args.stochastic:\n # Stochastic meanfield VI\n ilr.meanfield_stochastic_descent(maxiter=args.svi_iters,\n stepsize=args.svi_stepsize,\n batchsize=args.svi_batchsize)\n if args.deterministic:\n # Meanfield VI\n ilr.meanfield_coordinate_descent(tol=args.earlystop,\n maxiter=args.meanfield_iters,\n progprint=args.verbose)\n\n ilr.gating.prior = ilr.gating.posterior\n for i in range(ilr.likelihood.size):\n ilr.basis[i].prior = ilr.basis[i].posterior\n ilr.models[i].prior = ilr.models[i].posterior\n\n return ilr\n\n\ndef parallel_ilr_inference(nb_jobs=50, **kwargs):\n kwargs_list = []\n for n in range(nb_jobs):\n kwargs['seed'] = n\n kwargs_list.append(kwargs.copy())\n\n with Pool(processes=min(nb_jobs, nb_cores),\n initializer=tqdm.set_lock,\n initargs=(tqdm.get_lock(),)) as p:\n res = p.map(_job, kwargs_list)\n\n return res\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser(description='Evaluate ilr with a Stick-breaking prior')\n parser.add_argument('--datapath', help='path to dataset', default=os.path.abspath(mimo.__file__ + '/../../datasets'))\n parser.add_argument('--evalpath', help='path to evaluation', default=os.path.abspath(mimo.__file__ + '/../../evaluation/toy'))\n parser.add_argument('--nb_seeds', help='number of seeds', default=1, type=int)\n parser.add_argument('--prior', help='prior type', default='stick-breaking')\n parser.add_argument('--alpha', help='concentration parameter', default=25, type=float)\n parser.add_argument('--nb_models', help='max number of models', default=50, type=int)\n parser.add_argument('--affine', help='affine functions', action='store_true', default=True)\n parser.add_argument('--no_affine', help='non-affine functions', dest='affine', action='store_false')\n parser.add_argument('--super_iters', help='interleaving Gibbs/VI iterations', default=3, type=int)\n parser.add_argument('--gibbs_iters', help='Gibbs iterations', default=5, type=int)\n parser.add_argument('--stochastic', help='use stochastic VI', action='store_true', default=False)\n parser.add_argument('--no_stochastic', help='do not use stochastic VI', dest='stochastic', action='store_false')\n parser.add_argument('--deterministic', help='use deterministic VI', action='store_true', default=True)\n parser.add_argument('--no_deterministic', help='do not use deterministic VI', dest='deterministic', action='store_false')\n parser.add_argument('--meanfield_iters', help='max VI iterations', default=250, type=int)\n parser.add_argument('--svi_iters', help='SVI iterations', default=500, type=int)\n parser.add_argument('--svi_stepsize', help='SVI step size', default=5e-4, type=float)\n parser.add_argument('--svi_batchsize', help='SVI batch size', default=256, type=int)\n parser.add_argument('--prediction', help='prediction w/ mode or average', default='average')\n parser.add_argument('--earlystop', help='stopping criterion for VI', default=1e-2, type=float)\n parser.add_argument('--verbose', help='show learning progress', action='store_true', default=True)\n parser.add_argument('--mute', help='show no output', dest='verbose', action='store_false')\n parser.add_argument('--nb_train', help='size of train dataset', default=2000, type=int)\n parser.add_argument('--seed', help='choose seed', default=1337, type=int)\n\n args = parser.parse_args()\n\n np.random.seed(args.seed)\n\n # load Cosmic Microwave Background (CMB) training_data from Hannah (2011)\n data = np.loadtxt(args.datapath + '/cmb.csv', delimiter=\",\", skiprows=1)\n\n # shuffle data\n from sklearn.utils import shuffle\n\n data = shuffle(data)\n\n # training data\n nb_train = args.nb_train\n input, target = data[:nb_train, :1], data[:nb_train, 1:]\n\n ilr = parallel_ilr_inference(nb_jobs=args.nb_seeds,\n train_input=input,\n train_target=target,\n arguments=args)[0]\n\n # predict on training\n mu, var, std, nlpd = \\\n ilr.meanfield_prediction(input, target, prediction=args.prediction)\n\n # metrics\n from sklearn.metrics import explained_variance_score, mean_squared_error, r2_score\n\n mse = mean_squared_error(target, mu)\n evar = explained_variance_score(target, mu, multioutput='variance_weighted')\n smse = 1. - r2_score(target, mu, multioutput='variance_weighted')\n\n print('TRAIN - EVAR:', evar, 'MSE:', mse, 'SMSE:', smse, 'NLPD:',\n nlpd.mean(), 'Compnents:', len(ilr.used_labels))\n\n fig, axes = plt.subplots(2, 1)\n\n # # plot prediction\n sorter = np.argsort(input[:, 0], axis=0).flatten()\n sorted_input, sorted_target = input[sorter, 0], target[sorter, 0]\n sorted_mu, sorted_std = mu[sorter, 0], std[sorter, 0]\n\n axes[0].scatter(sorted_input, sorted_target, s=0.75, color='k')\n axes[0].plot(sorted_input, sorted_mu, color='crimson')\n for c in [1., 2., 3.]:\n axes[0].fill_between(sorted_input,\n sorted_mu - c * sorted_std,\n sorted_mu + c * sorted_std,\n edgecolor=(0, 0, 1, 0.1), facecolor=(0, 0, 1, 0.1))\n\n axes[0].set_ylabel('y')\n\n # plot gaussian activations\n axes[1].set_xlabel('x')\n axes[1].set_ylabel('p(x)')\n\n activations = ilr.meanfield_predictive_activation(sorted_input)\n axes[1].plot(sorted_input, activations)\n\n plt.show()\n", "import os\nimport argparse\n\nos.environ[\"OMP_NUM_THREADS\"] = \"1\"\n\nimport numpy as np\nimport numpy.random as npr\n\nimport mimo\nfrom mimo.distributions import NormalGamma\nfrom mimo.distributions import MatrixNormalWishart\nfrom mimo.distributions import GaussianWithNormalGamma\nfrom mimo.distributions import LinearGaussianWithMatrixNormalWishartAndAutomaticRelevance\n\nfrom mimo.distributions import Gamma\n\nfrom mimo.distributions import TruncatedStickBreaking\nfrom mimo.distributions import Dirichlet\nfrom mimo.distributions import CategoricalWithDirichlet\nfrom mimo.distributions import CategoricalWithStickBreaking\n\nfrom mimo.mixtures import BayesianMixtureOfLinearGaussians\n\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm\n\nimport pathos\nfrom pathos.pools import _ProcessPool as Pool\n\nnb_cores = pathos.multiprocessing.cpu_count()\n\n\ndef _job(kwargs):\n args = kwargs.pop('arguments')\n seed = kwargs.pop('seed')\n\n input = kwargs.pop('train_input')\n target = kwargs.pop('train_target')\n\n input_dim = input.shape[-1]\n target_dim = target.shape[-1]\n\n # set random seed\n np.random.seed(seed)\n\n nb_params = input_dim\n if args.affine:\n nb_params += 1\n\n basis_prior = []\n models_prior = []\n models_hypprior = []\n\n # initialize Normal\n alpha_ng = 1.\n beta_ng = 1. / (2. * 1e2)\n kappas = 1e-2\n\n # initialize Matrix-Normal\n psi_mnw = 1e0\n K = 1e0\n\n # initialize ard-Gamma\n alphas_ard = 1.\n betas_ard = 1. / (2. * 1e2)\n\n for n in range(args.nb_models):\n basis_hypparams = dict(mu=np.zeros((input_dim,)),\n alphas=np.ones(input_dim) * alpha_ng,\n betas=np.ones(input_dim) * beta_ng,\n kappas=np.ones(input_dim) * kappas)\n\n aux = NormalGamma(**basis_hypparams)\n basis_prior.append(aux)\n\n models_hypparams = dict(M=np.zeros((target_dim, nb_params)),\n K=np.eye(nb_params) * K, nu=target_dim + 1,\n psi=np.eye(target_dim) * psi_mnw)\n\n aux = MatrixNormalWishart(**models_hypparams)\n models_prior.append(aux)\n\n models_hyphypparams = dict(alphas=alphas_ard * np.ones(nb_params),\n betas=betas_ard * np.ones(nb_params))\n\n aux = Gamma(**models_hyphypparams)\n models_hypprior.append(aux)\n\n # define gating\n if args.prior == 'stick-breaking':\n gating_hypparams = dict(K=args.nb_models, gammas=np.ones((args.nb_models,)),\n deltas=np.ones((args.nb_models,)) * args.alpha)\n gating_prior = TruncatedStickBreaking(**gating_hypparams)\n\n ilr = BayesianMixtureOfLinearGaussians(gating=CategoricalWithStickBreaking(gating_prior),\n basis=[GaussianWithNormalGamma(basis_prior[i])\n for i in range(args.nb_models)],\n models=[LinearGaussianWithMatrixNormalWishartAndAutomaticRelevance(models_prior[i],\n models_hypprior[i],\n affine=args.affine)\n for i in range(args.nb_models)])\n else:\n gating_hypparams = dict(K=args.nb_models, alphas=np.ones((args.nb_models,)) * args.alpha)\n gating_prior = Dirichlet(**gating_hypparams)\n\n ilr = BayesianMixtureOfLinearGaussians(gating=CategoricalWithDirichlet(gating_prior),\n basis=[GaussianWithNormalGamma(basis_prior[i])\n for i in range(args.nb_models)],\n models=[LinearGaussianWithMatrixNormalWishartAndAutomaticRelevance(models_prior[i],\n models_hypprior[i],\n affine=args.affine)\n for i in range(args.nb_models)])\n ilr.add_data(target, input, whiten=False,\n labels_from_prior=True)\n\n # Gibbs sampling\n ilr.resample(maxiter=args.gibbs_iters,\n progprint=args.verbose)\n\n for _ in range(args.super_iters):\n if args.stochastic:\n # Stochastic meanfield VI\n ilr.meanfield_stochastic_descent(maxiter=args.svi_iters,\n stepsize=args.svi_stepsize,\n batchsize=args.svi_batchsize)\n if args.deterministic:\n # Meanfield VI\n ilr.meanfield_coordinate_descent(tol=args.earlystop,\n maxiter=args.meanfield_iters,\n progprint=args.verbose)\n\n ilr.gating.prior = ilr.gating.posterior\n for i in range(ilr.likelihood.size):\n ilr.basis[i].prior = ilr.basis[i].posterior\n ilr.models[i].prior = ilr.models[i].posterior\n\n return ilr\n\n\ndef parallel_ilr_inference(nb_jobs=50, **kwargs):\n kwargs_list = []\n for n in range(nb_jobs):\n kwargs['seed'] = n\n kwargs_list.append(kwargs.copy())\n\n with Pool(processes=min(nb_jobs, nb_cores),\n initializer=tqdm.set_lock,\n initargs=(tqdm.get_lock(),)) as p:\n res = p.map(_job, kwargs_list)\n\n return res\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser(description='Evaluate ilr with a Stick-breaking prior')\n parser.add_argument('--datapath', help='path to dataset', default=os.path.abspath(mimo.__file__ + '/../../datasets'))\n parser.add_argument('--evalpath', help='path to evaluation', default=os.path.abspath(mimo.__file__ + '/../../evaluation/toy'))\n parser.add_argument('--nb_seeds', help='number of seeds', default=1, type=int)\n parser.add_argument('--prior', help='prior type', default='stick-breaking')\n parser.add_argument('--alpha', help='concentration parameter', default=25, type=float)\n parser.add_argument('--nb_models', help='max number of models', default=50, type=int)\n parser.add_argument('--affine', help='affine functions', action='store_true', default=True)\n parser.add_argument('--no_affine', help='non-affine functions', dest='affine', action='store_false')\n parser.add_argument('--super_iters', help='interleaving Gibbs/VI iterations', default=1, type=int)\n parser.add_argument('--gibbs_iters', help='Gibbs iterations', default=1, type=int)\n parser.add_argument('--stochastic', help='use stochastic VI', action='store_true', default=False)\n parser.add_argument('--no_stochastic', help='do not use stochastic VI', dest='stochastic', action='store_false')\n parser.add_argument('--deterministic', help='use deterministic VI', action='store_true', default=True)\n parser.add_argument('--no_deterministic', help='do not use deterministic VI', dest='deterministic', action='store_false')\n parser.add_argument('--meanfield_iters', help='max VI iterations', default=100, type=int)\n parser.add_argument('--svi_iters', help='SVI iterations', default=500, type=int)\n parser.add_argument('--svi_stepsize', help='SVI step size', default=5e-4, type=float)\n parser.add_argument('--svi_batchsize', help='SVI batch size', default=256, type=int)\n parser.add_argument('--prediction', help='prediction w/ mode or average', default='average')\n parser.add_argument('--earlystop', help='stopping criterion for VI', default=1e-2, type=float)\n parser.add_argument('--verbose', help='show learning progress', action='store_true', default=True)\n parser.add_argument('--mute', help='show no output', dest='verbose', action='store_false')\n parser.add_argument('--nb_train', help='size of train dataset', default=2000, type=int)\n parser.add_argument('--seed', help='choose seed', default=1337, type=int)\n\n args = parser.parse_args()\n\n # np.random.seed(args.seed)\n\n # load Cosmic Microwave Background (CMB) training_data from Hannah (2011)\n data = np.loadtxt(args.datapath + '/cmb.csv', delimiter=\",\", skiprows=1)\n\n # shuffle data\n from sklearn.utils import shuffle\n\n data = shuffle(data)\n\n # training data\n nb_train = args.nb_train\n input, target = data[:nb_train, :1], data[:nb_train, 1:]\n noise = npr.randn(len(input), 2) * 1e3\n input = np.hstack((input, noise))\n\n ilr = parallel_ilr_inference(nb_jobs=args.nb_seeds,\n train_input=input,\n train_target=target,\n arguments=args)[0]\n\n # predict on training\n mu, var, std, nlpd = \\\n ilr.meanfield_prediction(input, target, prediction=args.prediction)\n\n # metrics\n from sklearn.metrics import explained_variance_score, mean_squared_error, r2_score\n\n mse = mean_squared_error(target, mu)\n evar = explained_variance_score(target, mu, multioutput='variance_weighted')\n smse = 1. - r2_score(target, mu, multioutput='variance_weighted')\n\n print('TRAIN - EVAR:', evar, 'MSE:', mse, 'SMSE:', smse, 'NLPD:',\n nlpd.mean(), 'Compnents:', len(ilr.used_labels))\n\n fig, axes = plt.subplots(1, 1)\n\n # # plot prediction\n sorter = np.argsort(input[:, 0], axis=0).flatten()\n sorted_input, sorted_target = input[sorter, 0], target[sorter, 0]\n sorted_mu, sorted_std = mu[sorter, 0], std[sorter, 0]\n\n axes.scatter(sorted_input, sorted_target, s=0.75, color='k')\n axes.plot(sorted_input, sorted_mu, color='crimson')\n for c in [1., 2., 3.]:\n axes.fill_between(sorted_input,\n sorted_mu - c * sorted_std,\n sorted_mu + c * sorted_std,\n edgecolor=(0, 0, 1, 0.1), facecolor=(0, 0, 1, 0.1))\n\n axes.set_ylabel('y')\n plt.show()\n" ]
[ [ "sklearn.metrics.explained_variance_score", "sklearn.metrics.r2_score", "numpy.random.seed", "sklearn.utils.shuffle", "numpy.eye", "matplotlib.pyplot.subplots", "sklearn.metrics.mean_squared_error", "numpy.ones", "numpy.argsort", "matplotlib.pyplot.show", "numpy.zeros", "numpy.loadtxt" ], [ "sklearn.metrics.explained_variance_score", "numpy.hstack", "sklearn.metrics.r2_score", "numpy.random.seed", "sklearn.utils.shuffle", "numpy.eye", "matplotlib.pyplot.subplots", "sklearn.metrics.mean_squared_error", "numpy.ones", "numpy.argsort", "matplotlib.pyplot.show", "numpy.zeros", "numpy.loadtxt" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
norveclibalikci/easyreg-mirror
[ "a16254733fe957cc4024923f8dce91412966a189", "a16254733fe957cc4024923f8dce91412966a189" ]
[ "tools/draw_deformation.py", "easyreg/seg_unet.py" ]
[ "import numpy as np\nimport sys,os\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = ''\nfrom easyreg.viewers_tomv import *\nfrom mermaid.utils import *\nfrom mermaid.data_utils import *\nimport SimpleITK as sitk\nfrom glob import glob\nimport os\n\n\n\nsz = [160,200,200]\ndef get_image_list_to_draw(refer_folder,momentum_folder,img_type,source_target_folder,t_list):\n \"\"\"\n we first need to get a dict, where we can get the {\"pair_name\": \"pair\":[source,target], \"fluid_warped\":[warped],\"phi\"[phi for t=1],\"t\":[],\"linear_warped\"[]}\n :param refer_folder:\n :param img_type:\n :param source_txt:\n :return:\n \"\"\"\n pair_path_list = glob(os.path.join(refer_folder,\"*\"+img_type))\n #pair_path_list = glob(os.path.join(refer_folder,\"*9069761_image_9074437_image_9069761_image_9397988_image_0d0000_1d0000_t_1d00_image.nii.gz\"))\n pair_name_list = [get_file_name(path).replace(img_type.split(\".\")[0],\"\") for path in pair_path_list]\n source_name_list = [name.split(\"_\")[0]+\"_image\" for name in pair_name_list]\n target_name_list = [name.split(\"_\")[6]+\"_image\" for name in pair_name_list]\n momentum_list = [source_name+'_'+target_name+\"_0000Momentum.nii.gz\" for source_name, target_name in zip(source_name_list,target_name_list)]\n momentum_list = [os.path.join(momentum_folder,fname) for fname in momentum_list]\n source_path_list = [os.path.join(source_target_folder,source_name+'.nii.gz') for source_name in source_name_list]\n target_path_list = [os.path.join(source_target_folder,target_name+'.nii.gz') for target_name in target_name_list]\n lsource_path_list = [path.replace(\"image.nii.gz\",\"masks.nii.gz\") for path in source_path_list]\n ltarget_path_list = [path.replace(\"image.nii.gz\",\"masks.nii.gz\") for path in target_path_list]\n warped_path_list = [[os.path.join(refer_folder,pair_name+\"_0d0000_1d0000_t_{}_image.nii.gz\".format(str(\"{:.2f}\".format(t)).replace(\".\",\"d\")))for t in t_list] for pair_name in pair_name_list ]\n phi_path_list =[[path.replace(\"_image.nii.gz\",\"_phi_map.nii.gz\") for path in paths] for paths in warped_path_list]\n inv_phi_path_list =[[path.replace(\"_image.nii.gz\",\"_inv_map.nii.gz\") for path in paths] for paths in warped_path_list]\n lwarped_path_list = [[warped_path.replace(\"image.nii.gz\",\"label.nii.gz\") for warped_path in pair_warped_path] for pair_warped_path in warped_path_list]\n phi1_path = [path.replace(\"_image.nii.gz\",\"_phi_map.nii.gz\") for path in pair_path_list]\n dict_to_draw = {}\n for i, pair_name in enumerate(pair_name_list):\n dict_to_draw[pair_name] = {\"pair_name\": pair_name, \"pair_path\":[source_path_list[i],target_path_list[i],lsource_path_list[i],ltarget_path_list[i]]\n ,\"fluid_path\":warped_path_list[i], \"lfluid_path\":lwarped_path_list[i],\"phi_path\":phi_path_list[i],\"phi1\":phi1_path[i],\"t\":t_list,\"momentum_path\":momentum_list[i],\"inv_phi_path\":inv_phi_path_list[i]}\n\n return dict_to_draw\n\n\n\n\n\ndef draw_images(dict_to_draw):\n for pair_name in dict_to_draw:\n try:\n draw_image(dict_to_draw[pair_name])\n except:\n pass\n\ndef draw_image(single_image_dict):\n source_path = single_image_dict['pair_path'][0]\n target_path = single_image_dict['pair_path'][1]\n lsource_path = single_image_dict['pair_path'][2]\n ltarget_path = single_image_dict['pair_path'][3]\n fluid_path_list = single_image_dict['fluid_path']\n lfluid_path_list = single_image_dict['lfluid_path']\n phi_path_list = single_image_dict['phi_path']\n phi1_path =single_image_dict[\"phi1\"]\n t_list =single_image_dict[\"t\"]\n fr_sitk = lambda x: sitk.GetArrayFromImage(sitk.ReadImage(x))\n source = fr_sitk(source_path)\n lsource = fr_sitk(lsource_path)\n target = fr_sitk(target_path)\n ltarget = fr_sitk(ltarget_path)\n fluid_images = [fr_sitk(path) for path in fluid_path_list]\n lfluid_images = [fr_sitk(path) for path in lfluid_path_list]\n phis = [np.transpose(fr_sitk(path),[3,2,1,0]) for path in phi_path_list]\n phi1 = np.transpose(fr_sitk(phi1_path),[3,2,1,0])\n phi1_tensor = torch.Tensor(phi1[None])\n spacing = 1./(np.array(source.shape)-1)\n identity_map_np = identity_map_multiN([1,1]+sz,spacing)\n identity_map = torch.Tensor(identity_map_np)\n source_tensor = torch.Tensor(source)[None][None]\n lsource_tensor = torch.Tensor(lsource)[None][None]\n if list(phi1_tensor.shape[2:])!=list(source.shape[2:]):\n fres = lambda x:resample_image(x, spacing, [1, 3] + list(lsource_tensor.shape[2:]))\n phi1_tensor, _ = fres(phi1_tensor)\n phis = [fres(torch.Tensor(phi[None]))[0] for phi in phis]\n phis =[phi[0].numpy() for phi in phis]\n disp = phi1_tensor - identity_map\n linear_images = []\n llinear_images = []\n linear_phis = []\n for t in t_list:\n phi = identity_map + disp*t\n linear = compute_warped_image_multiNC(source_tensor,phi,spacing,spline_order=1,zero_boundary=True)\n llinear = compute_warped_image_multiNC(lsource_tensor,phi,spacing,spline_order=0,zero_boundary=True)\n linear_images.append(linear.numpy()[0,0])\n llinear_images.append(llinear.numpy()[0,0])\n linear_phis.append(phi.numpy()[0])\n\n draw_defomation(fluid_images, phis, linear_images, linear_phis,source,identity_map_np[0])\n\n\ndef draw_defomation(fluid_images,phis,linear_images,linear_phis,source,identity_map):\n\n fig, ax = plt.subplots(2, 6, figsize=(45, 16))\n # img = np.zeros_like(img)\n # plt.setp(plt.gcf(), 'facecolor', 'white')\n # plt.style.use('grayscale')\n plt.style.use(\"bmh\")\n\n ivx = ImageViewer3D_Sliced_Contour(ax[0][0], linear_images[0], linear_phis[0], 0, '', showColorbar=False)\n ivy = ImageViewer3D_Sliced_Contour(ax[0][1], linear_images[1], linear_phis[1], 0, '', showColorbar=False)\n #ivz = ImageViewer3D_Sliced_Contour(ax[0][2], source, identity_map, 0, '', showColorbar=False)\n ivz = ImageViewer3D_Sliced_Contour(ax[0][2], linear_images[2], linear_phis[2], 0, '', showColorbar=False)\n\n ivz = ImageViewer3D_Sliced_Contour(ax[0][3], linear_images[3], linear_phis[3], 0, '', showColorbar=False)\n ivz = ImageViewer3D_Sliced_Contour(ax[0][4], linear_images[4], linear_phis[4], 0, '', showColorbar=False)\n ivz = ImageViewer3D_Sliced_Contour(ax[0][5], linear_images[5], linear_phis[5], 0, '', showColorbar=False)\n #ivz = ImageViewer3D_Sliced_Contour(ax[0][7], linear_images[6], linear_phis[6], 0, '', showColorbar=False)\n\n ivx = ImageViewer3D_Sliced_Contour(ax[1][0], fluid_images[0], phis[0], 0, '', showColorbar=False)\n ivy = ImageViewer3D_Sliced_Contour(ax[1][1], fluid_images[1], phis[1], 0, '', showColorbar=False)\n ivz = ImageViewer3D_Sliced_Contour(ax[1][2], fluid_images[2], phis[2], 0, '', showColorbar=False)\n #ivz = ImageViewer3D_Sliced_Contour(ax[1][3], source, identity_map, 0, '', showColorbar=False)\n\n ivz = ImageViewer3D_Sliced_Contour(ax[1][3], fluid_images[3], phis[3], 0, '', showColorbar=False)\n ivz = ImageViewer3D_Sliced_Contour(ax[1][4], fluid_images[4], phis[4], 0, '', showColorbar=False)\n ivz = ImageViewer3D_Sliced_Contour(ax[1][5], fluid_images[5], phis[5], 0, '', showColorbar=False)\n #ivz = ImageViewer3D_Sliced_Contour(ax[1][7], fluid_images[6], phis[6], 0, '', showColorbar=False)\n plt.axis('off')\n\n\n plt.clim(vmin=-1., vmax=1.)\n plt.show()\n\n\n\ndef view_2d_from_3d(img=None, phi=None,fpth=None,color=True):\n fig, ax = plt.subplots(1,1)\n #plt.setp(plt.gcf(), 'facecolor', 'white')\n if not color:\n plt.style.use('grayscale')\n else:\n plt.style.use(\"bmh\")\n ax.set_axis_off()\n if img is None:\n img = np.zeros_like(phi[0])\n ImageViewer3D_Sliced(ax, img, 0, '', False)\n if phi is not None:\n ImageViewer3D_Sliced_Contour(ax, img, phi, 0, '', showColorbar=False)\n if fpth is not None:\n plt.savefig(fpth, dpi=100, bbox_inches='tight')\n plt.close('all')\n else:\n plt.show()\n plt.clf()\n\n#\n# img_type = \"_0d0000_1d0000_t_1d00_image.nii.gz\"\n# t_list = [-3,-1,0.5,1,3,4]\n# source_target_folder = \"/playpen-raid/olut/Nifti_resampled_rescaled_2Left_Affine2atlas\"\n# #\n# refer_folder = \"/playpen-raid/zyshen/data/oai_reg/draw4\"\n# dict_to_draw = get_image_list_to_draw(refer_folder,\"\",img_type,source_target_folder,t_list)\n# draw_images(dict_to_draw)\n#\n\n\ndef read_img_phi(img_path_list, phi_path_list=None):\n f = lambda pth: sitk.GetArrayFromImage(sitk.ReadImage(pth))\n img_list = [f(pth) for pth in img_path_list]\n phi_list = None\n if phi_path_list is not None:\n phi_list = [f(pth) for pth in phi_path_list]\n phi_list = [np.transpose(phi, (3, 2, 1,0)) for phi in phi_list]\n return img_list, phi_list\n\n\nfrom tools.visual_tools import *\nimg_type = \"_0d0000_1d0000_t_1d00_image.nii.gz\"\nt_list = [-1, -0.5, 0.5,1, 1.5, 2.0]\nsource_target_folder = \"/playpen-raid/olut/Nifti_resampled_rescaled_2Left_Affine2atlas\"\n#/playpen-raid/zyshen/data/oai_reg/train_with_10/momentum_lresol/9397988_image_9074437_image_0000Momentum.nii.gz\nmomentum_folder =\"/playpen-raid/zyshen/data/oai_reg/train_with_10/momentum_lresol\"\nmomentum_ftype = \"_0000Momentum.nii.gz\"\nrefer_folder = \"/playpen-raid/zyshen/data/oai_reg/draw4\"\ndict_to_draw = get_image_list_to_draw(refer_folder,momentum_folder,img_type,source_target_folder,t_list)\noutput_folder = \"/playpen-raid1/zyshen/data/oai_reg/draw_output4\"\n\"\"\"\ndict_to_draw[pair_name] = {\"pair_name\": pair_name, \"pair_path\":[source_path_list[i],target_path_list[i],lsource_path_list[i],ltarget_path_list[i]]\n ,\"fluid_path\":warped_path_list[i], \"lfluid_path\":lwarped_path_list[i],\"phi_path\":phi_path_list[i],\"phi1\":phi1_path[i],\"t\":t_list,\"momentum_path\":momentum_list[i]}\nfor each pair name, we have source.png, target.png, momentum.png, phi_name.png, warped_name.png, l_warped_name.png \"\"\"\nfor pair_name, pair_detail in dict_to_draw.items():\n output_path = os.path.join(output_folder,pair_name)\n os.makedirs(output_path,exist_ok=True)\n source_path = pair_detail[\"pair_path\"][0]\n target_path = pair_detail[\"pair_path\"][1]\n lsource_path = pair_detail[\"pair_path\"][2]\n momentum_path = pair_detail[\"momentum_path\"]\n phi_path_list = pair_detail[\"phi_path\"]\n inv_phi_path_list = pair_detail[\"inv_phi_path\"]\n warped_path_list = pair_detail[\"fluid_path\"]\n l_warped_path_list = pair_detail[\"lfluid_path\"]\n source_save_path = os.path.join(output_path,\"source.png\")\n lsource_save_path = os.path.join(output_path,\"lsource.png\")\n target_save_path = os.path.join(output_path,\"target.png\")\n momentum_save_path = os.path.join(output_path,\"momentum.png\")\n warped_name_list = [get_file_name(pth) for pth in warped_path_list]\n warped_save_path_list = [os.path.join(output_path,fname) +\"_warped.png\" for fname in warped_name_list]\n lwarped_save_path_list = [os.path.join(output_path,fname) + \"_lwarped.png\" for fname in warped_name_list]\n lwarped_phi_save_path_list = [os.path.join(output_path,fname) + \"_lwarpedphi.png\" for fname in warped_name_list]\n lwarped_invphi_save_path_list = [os.path.join(output_path,fname) + \"_lwarpedinvphi.png\" for fname in warped_name_list]\n phi_save_path_list = [os.path.join(output_path,fname) + \"_phi.png\" for fname in warped_name_list]\n inv_phi_save_path_list = [os.path.join(output_path,fname) + \"_inv_phi.png\" for fname in warped_name_list]\n img_phi_save_path_list = [os.path.join(output_path,fname) + \"_imgphi.png\" for fname in warped_name_list]\n f = lambda x: sitk.GetArrayFromImage(sitk.ReadImage(x))\n f_v = lambda x: np.transpose(f(x),[3,2,1,0])\n view_2d_from_3d(img=f(source_path),fpth=source_save_path)\n view_2d_from_3d(img=f(target_path),fpth=target_save_path)\n view_2d_from_3d(img=f(lsource_path),fpth=lsource_save_path)\n momentum = f_v(momentum_path)\n momentum = np.sum(momentum ** 2, 1)\n view_2d_from_3d(img=momentum, fpth=momentum_save_path,color=True)\n l = f(lsource_path)\n for i in range(len(warped_name_list)):\n warped = f(warped_path_list[i])\n view_2d_from_3d(img=warped, fpth=warped_save_path_list[i])\n view_2d_from_3d(img=f(l_warped_path_list[i]), fpth=lwarped_save_path_list[i])\n view_2d_from_3d(img=f(l_warped_path_list[i]),phi=f_v(phi_path_list[i]), fpth=lwarped_phi_save_path_list[i])\n view_2d_from_3d(phi=f_v(phi_path_list[i]), fpth=phi_save_path_list[i])\n try:\n view_2d_from_3d(img=l, phi=f_v(inv_phi_path_list[i]), fpth=lwarped_invphi_save_path_list[i])\n view_2d_from_3d(phi=f_v(inv_phi_path_list[i]), fpth=inv_phi_save_path_list[i])\n except:\n pass\n view_2d_from_3d(img =warped ,phi=f_v(phi_path_list[i]), fpth=img_phi_save_path_list[i])\n\n\n\n\n\n\n\n\n\n\n\n\n\n#\n# disp_pth = '/playpen-raid/zyshen/data/reg_debug_labeled_oai_reg_inter/visualize_affine/records/3D/9003406_20060322_SAG_3D_DESS_LEFT_016610899303_image_9357383_20040927_SAG_3D_DESS_LEFT_016610250606_imagemap.nii.gz'\n# img_pth = '/playpen-raid/zyshen/data/reg_debug_labeled_oai_reg_inter/visualize_affine/records/3D/9003406_20060322_SAG_3D_DESS_LEFT_016610899303_image_9357383_20040927_SAG_3D_DESS_LEFT_016610250606_image_reproduce.nii.gz'\n# disp = sitk.ReadImage(disp_pth)\n# disp = sitk.GetArrayFromImage(disp)\n# img = sitk.GetArrayFromImage(sitk.ReadImage(img_pth))\n# #disp = np.transpose(disp,(3,2,1,0))\n#\n#\n# spacing = 1. / (sz - 1)\n# identity_map = np.mgrid[0:sz[0], 0:sz[1], 0:sz[2]]\n# grid = identity_map+ disp\n# grid[0] = grid[0]*spacing[0]\n# grid[1] = grid[1]*spacing[1]\n# grid[2] = grid[2]*spacing[2]\n# grid = grid*2-1\n# print(np.max(grid), np.min(grid))\n#\n#\n# fig,ax = plt.subplots(2,7,figsize=(50, 30))\n# # img = np.zeros_like(img)\n# img[1,:,1]=1\n# plt.setp(plt.gcf(), 'facecolor', 'white')\n# plt.style.use('grayscale')\n#\n# ivx = ImageViewer3D_Sliced_Contour( ax[0][0], img,grid, 0, '',showColorbar=True)\n# ivy = ImageViewer3D_Sliced_Contour( ax[0][1], img,grid, 0, '',showColorbar=True)\n# ivz = ImageViewer3D_Sliced_Contour( ax[0][2], img,grid, 0, '',showColorbar=True)\n# ivz = ImageViewer3D_Sliced_Contour( ax[0][3], img,grid, 0, '',showColorbar=True)\n# ivz = ImageViewer3D_Sliced_Contour( ax[0][4], img,grid, 0, '',showColorbar=True)\n# ivz = ImageViewer3D_Sliced_Contour( ax[0][5], img,grid, 0, '',showColorbar=True)\n# ivz = ImageViewer3D_Sliced_Contour( ax[0][6], img,grid, 0, '',showColorbar=True)\n#\n# ivx = ImageViewer3D_Sliced_Contour( ax[1][0], img,grid, 0, '',showColorbar=True)\n# ivy = ImageViewer3D_Sliced_Contour( ax[1][1], img,grid, 0, '',showColorbar=True)\n# ivz = ImageViewer3D_Sliced_Contour( ax[1][2], img,grid, 0, '',showColorbar=True)\n# ivz = ImageViewer3D_Sliced_Contour( ax[1][3], img,grid, 0, '',showColorbar=True)\n# ivz = ImageViewer3D_Sliced_Contour( ax[1][4], img,grid, 0, '',showColorbar=True)\n# ivz = ImageViewer3D_Sliced_Contour( ax[1][5], img,grid, 0, '',showColorbar=True)\n# ivz = ImageViewer3D_Sliced_Contour( ax[1][6], img,grid, 0, '',showColorbar=True)\n#\n# # feh = FigureEventHandler(fig)\n# #\n# # feh.add_axes_event('button_press_event', ax[0], ivx.on_mouse_press)\n# # feh.add_axes_event('button_press_event', ax[1], ivy.on_mouse_press)\n# # feh.add_axes_event('button_press_event', ax[2], ivz.on_mouse_press)\n# #\n# # feh.synchronize([ax[0], ax[1], ax[2]])\n# plt.clim(vmin=-1., vmax=1.)\n# plt.show()\n#\n", "from .modules import Seg_resid\nfrom .utils import *\nimport torch.nn as nn\nfrom data_pre.partition import partition\n\nclass SegUnet(nn.Module):\n def __init__(self, opt=None):\n super(SegUnet, self).__init__()\n self.opt = opt\n seg_opt = opt['tsk_set'][('seg',{},\"settings for seg task\")]\n self.is_train = opt['tsk_set'][\"train\"]\n self.num_class = seg_opt['class_num',-1,\"the num of class\"]\n use_bn = seg_opt[\"use_bn\", True, \"use the batch normalization\"]\n patch_sz = opt['dataset']['seg']['patch_size',[-1,-1,-1],\"the size of input patch\"]\n overlap_sz = opt['dataset']['seg']['partition']['overlap_size',[-1,-1,-1],\"the size of input patch\"]\n patch_sz_itk = list(np.flipud(np.array(patch_sz)))\n overlap_sz_itk = list(np.flipud(np.array(overlap_sz)))\n self.img_sz = None\n self.unet = Seg_resid(self.num_class,bn=use_bn)\n self.print_count = 0\n self.partition = partition(opt['dataset']['seg']['partition'],patch_sz_itk,overlap_sz_itk)\n self.ensemble_during_the_test = opt['tsk_set']['seg'][(\"ensemble_during_the_test\",False,\"do test phase ensemble, which needs the test phase data augmentation already done\")]\n\n def set_loss_fn(self, loss_fn):\n \"\"\" set loss function\"\"\"\n self.loss_fn = loss_fn\n\n def get_loss(self, output, gt):\n loss = self.loss_fn.get_loss(output,gt)\n return loss\n\n def check_if_update_lr(self):\n return False, None\n\n def set_img_sz(self, img_sz):\n self.img_sz = img_sz\n\n\n\n\n\n\n def forward(self, input, is_train=True):\n if is_train:\n output = self.unet(input)\n else:\n with torch.no_grad():\n if not self.is_train and self.ensemble_during_the_test:\n output = self.get_assemble_ensemble(input)\n else:\n output = self.get_assemble_pred(input)\n self.print_count += 1\n return output\n\n def get_assemble_pred(self, input, split_size=8):\n output = []\n input_split = torch.split(input, split_size)\n for input_sub in input_split:\n res = self.forward(input_sub)\n if isinstance(res, list):\n res = res[-1]\n output.append(res.detach().cpu())\n pred_patched = torch.cat(output, dim=0)\n pred_patched = torch.max(pred_patched.data, 1)[1]\n output_np = self.partition.assemble(pred_patched,image_size=self.img_sz)\n return output_np\n\n\n def set_file_path(self, file_path, fname):\n self.file_path =file_path\n self.fname = fname\n\n\n def get_assemble_pred_for_ensemble(self, input, split_size=8):\n output = []\n input_split = torch.split(input, split_size)\n for input_sub in input_split:\n res = self.forward(input_sub)\n if isinstance(res, list):\n res = res[-1]\n output.append(res.detach().cpu())\n pred_patched = torch.cat(output, dim=0)\n\n return pred_patched\n\n\n def get_assemble_ensemble(self, input):\n import os\n from .reg_data_utils import read_txt_into_list, get_file_name\n from tools.image_rescale import save_image_with_given_reference\n import SimpleITK as sitk\n import torch\n import numpy as np\n from glob import glob\n from copy import deepcopy\n from mermaid.utils import compute_warped_image_multiNC\n patch_sz = self.opt['dataset']['seg']['patch_size', [-1, -1, -1], \"the size of input patch\"]\n overlap_sz = self.opt['dataset']['seg']['partition']['overlap_size', [-1, -1, -1], \"the size of input patch\"]\n option_p = self.opt['dataset']['seg'][('partition', {}, \"settings for the partition\")]\n patch_sz_itk = list(np.flipud(np.array(patch_sz)))\n overlap_sz_itk = list(np.flipud(np.array(overlap_sz)))\n corr_partition_pool = deepcopy(partition(option_p, patch_sz_itk, overlap_sz_itk))\n\n def compute_warped_image_label(input, warped_pth, warped_type,inv_phi_pth,inv_switcher,num_max=50,weight_for_orig_img=0):\n warped_pth_list = glob(os.path.join(warped_pth, warped_type))\n num_max = min(len(warped_pth_list),num_max)\n inv_phi_pth_list = [pth.replace(warped_pth,inv_phi_pth).replace(*inv_switcher) for pth in warped_pth_list]\n f = lambda pth: sitk.GetArrayFromImage(sitk.ReadImage(pth))\n fname = get_file_name(self.fname[0])\n f_warped = lambda x: get_file_name(x).find(fname+'_') == 0\n warped_sub_list = list(filter(f_warped, warped_pth_list))\n inv_phi_sub_list = list(filter(f_warped, inv_phi_pth_list))\n warped_sub_list = warped_sub_list[:num_max]\n inv_phi_sub_list = inv_phi_sub_list[:num_max]\n num_aug = len(warped_sub_list)\n warped_list = [f(pth) for pth in warped_sub_list]\n inv_phi_list = [f(pth) for pth in inv_phi_sub_list]\n warped_img = np.stack(warped_list, 0)[:,None]\n #warped_img = torch.Tensor(warped_img)*2-1.\n warped_img = self.normalize_input(warped_img,None)#self.file_path[0][0])\n warped_img = torch.Tensor(warped_img)\n inv_phi = np.stack(inv_phi_list, 0)\n inv_phi = np.transpose(inv_phi, (0, 4, 3, 2, 1))\n inv_phi = torch.Tensor(inv_phi)\n img_input_sz = self.opt[\"dataset\"][\"img_after_resize\"]\n differ_sz = any(np.array(warped_img.shape[2:]) != np.array(img_input_sz))\n\n\n\n sz = np.array(self.img_sz)\n spacing = 1. / (sz - 1)\n output_np = np.zeros([1, self.num_class] + self.img_sz)\n if weight_for_orig_img!=0:\n tzero_img = self.get_assemble_pred_for_ensemble(input)\n tzero_pred = self.partition.assemble_multi_torch(tzero_img, image_size=self.img_sz)\n output_np = tzero_pred.cpu().numpy() * float(round(weight_for_orig_img*num_aug))\n\n for i in range(num_aug):\n if differ_sz:\n warped_img_cur, _ = resample_image(warped_img[i:i+1].cuda(), [1, 1, 1], [1, 3] + self.img_sz)\n inv_phi_cur, _ = resample_image(inv_phi[i:i+1].cuda(), [1, 1, 1], [1, 1] + self.img_sz)\n warped_img_cur = warped_img_cur.detach().cpu()\n inv_phi_cur = inv_phi_cur.detach().cpu()\n else:\n warped_img_cur = warped_img[i:i+1]\n inv_phi_cur = inv_phi[i:i+1]\n sample = {\"image\":[warped_img_cur[0,0].numpy()]}\n sample_p =corr_partition_pool(sample)\n pred_patched = self.get_assemble_pred_for_ensemble(torch.Tensor(sample_p[\"image\"]).cuda())\n pred_patched = self.partition.assemble_multi_torch(pred_patched, image_size=self.img_sz)\n pred_patched = torch.nn.functional.softmax(pred_patched,1)\n pred_patched = compute_warped_image_multiNC(pred_patched.cuda(), inv_phi_cur.cuda(),spacing, spline_order=1, zero_boundary=True)\n output_np += pred_patched.cpu().numpy()\n res = torch.max(torch.Tensor(output_np), 1)[1]\n return res[None]\n seg_ensemble_opt = self.opt['tsk_set']['seg'][(\"seg_ensemble\",{},\"settings of test phase data ensemble\")]\n warped_pth = seg_ensemble_opt[(\"warped_pth\", None,\"the folder path containing the warped image from the original image\")]\n inv_phi_pth = seg_ensemble_opt[(\"inv_phi_pth\",None,\"the folder path containing the inverse transformation\")]\n warped_type = seg_ensemble_opt[(\"warped_type\",\"*_warped.nii.gz\",\"the suffix of the augmented data\")]\n inv_switcher = seg_ensemble_opt[(\"inv_switcher\",[\"_warped.nii.gz\",\"_inv_phi.nii.gz\"],\"the fname switcher from warped image to inverse transformation map\")]\n num_max = seg_ensemble_opt[(\"num_max\",20,\"max num of augmentation for per test image\")]\n weight_for_orig_img = seg_ensemble_opt[(\"weight_for_orig_img\",0.0,\"the weight of original image\")]\n\n output_np = compute_warped_image_label(input, warped_pth, warped_type,inv_phi_pth,inv_switcher,num_max=num_max,weight_for_orig_img=weight_for_orig_img)\n return output_np\n\n\n\n\n\n\n def normalize_input(self,img,refer_img_path):\n import SimpleITK as sitk\n if refer_img_path is not None:\n refer_img = sitk.GetArrayFromImage(sitk.ReadImage(refer_img_path))\n else:\n refer_img = img\n min_intensity = refer_img.min()\n max_intensity = refer_img.max()\n normalized_img = (img - refer_img.min()) / (max_intensity - min_intensity)\n normalized_img = normalized_img * 2 - 1\n return normalized_img\n\n\n\n" ]
[ [ "numpy.array", "numpy.zeros_like", "numpy.sum", "numpy.transpose" ], [ "torch.nn.functional.softmax", "torch.max", "torch.Tensor", "torch.cat", "numpy.stack", "numpy.transpose", "torch.no_grad", "torch.split", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
stuarteberg/schist
[ "330476567cf061478aff5ce862c741095b8795a3" ]
[ "schist/tools/_affinity_tools.py" ]
[ "from typing import Optional#, Tuple, Sequence, Type, Union, Dict\n\nimport numpy as np\nfrom anndata import AnnData\nimport scipy.stats\n\nfrom scanpy import logging as logg\n\n\ndef cluster_consistency(\n adata: AnnData,\n level: int = 1,\n group: Optional[str] = None,\n key: Optional[str] = 'nsbm',\n copy: bool = False\n) -> Optional[AnnData]:\n \"\"\"\\\n Calculate cluster consistency at a given level\n Parameters\n ----------\n adata\n Annotated data matrix. \n level\n The NSBM level, as an alternative of full group name\n group\n The name of the NSBM level for which consistency should be calculated\n \n key\n The key used to store NSBM groupings\n copy\n Return a copy instead of writing to adata.\n\n Returns\n -------\n Depending on `copy`, returns or updates `adata` with consistency values \n in adata.uns['cluster_consistency'] and adata.obs['cluster_consistency']\n\"\"\" \n\n if group:\n level = group.split('_')[-1]\n else:\n group = f'{key}_level_{level}'\n\n if not group and not level:\n raise ValueError(\"You should specify at least one of group or level\")\n\n if not key in adata.uns.keys():\n raise KeyError(\n f\"Your dataset does not contain {key}, did you run nSBM?\"\n )\n elif not 'cell_affinity' in adata.uns[key]:\n raise KeyError(\n f\"Your dataset does not contain cell affinities, did you run nSBM?\"\n )\n elif not f'{level}' in adata.uns['nsbm']['cell_affinity'].keys():\n raise ValueError(\n f\"Affinitity for the specfified level {level} do not exist\"\n )\n \n\n affinity = adata.uns[key]['cell_affinity'][f'{level}']\n entropy = scipy.stats.entropy(affinity, axis=0) / np.log(adata.shape[0]) #normalized entropy\n\n adata.uns['cluster_consistency'] = entropy\n\n # now assign consistency to each cell, according to their group\n e_dict = dict(zip(adata.obs[group].cat.categories, entropy))\n g = adata.obs[group].values\n adata.obs['cluster_consistency'] = [e_dict[g[x]] for x in range(adata.shape[0])]\n \n return adata if copy else None\n\n\ndef cell_stability(\n adata: AnnData,\n key: Optional[str] = 'nsbm',\n copy: bool = False\n) -> Optional[AnnData]:\n \"\"\"\\\n Calculate cell stability given cell affinity\n Parameters\n ----------\n adata\n Annotated data matrix. \n key\n The key used to store NSBM groupings\n copy\n Return a copy instead of writing to adata.\n\n Returns\n -------\n Depending on `copy`, returns or updates `adata` with stability values \n in adata.obs['cell_stability']\n\"\"\" \n\n if not key in adata.uns.keys():\n raise KeyError(\n f\"Your dataset does not contain {key}, did you run nSBM?\"\n )\n elif not 'cell_affinity' in adata.uns[key]:\n raise KeyError(\n f\"Your dataset does not contain cell affinities, did you run nSBM?\"\n )\n\n aff_dict = adata.uns[key]['cell_affinity']\n \n _S = np.array([scipy.stats.entropy(aff_dict[x], axis=1) /np.log(aff_dict[x].shape[1]) for x in aff_dict.keys()]).T\n adata.obs['cell_stability'] = 1-np.nanmax(_S, axis=1) #/ np.nanmean(EE, axis=1)\n\n return adata if copy else None\n" ]
[ [ "numpy.nanmax", "numpy.log" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
thoughtmachines/Stochastic-AlgoTrader
[ "62c9a5f9b2778fec2e964401159497da3580cd5b" ]
[ "trader/pairsTradingLstm.py" ]
[ "import sys, os\nmyPath = os.path.dirname(os.path.abspath(__file__))\nsys.path.insert(0, myPath + '/../')\n\nimport torch\nfrom torch import nn\nfrom torch.optim import Adam\nfrom torch.nn.init import xavier_normal as xavier\nimport matplotlib.pyplot as plt\n\nfrom data.loader import cryptoData\nfrom models.model import SeqRegressor\n\nDEVICE = torch.device(\"cpu\")\nCOIN1 = \"eth\"\nCOIN2 = \"btc\"\nMODEL = \"norm\"\n\nclass Residual(object):\n\n def __init__(self,dataloader_coin1,dataloader_coin2):\n self.dataloader_coin1 = dataloader_coin1\n self.dataloader_coin2 = dataloader_coin2\n\n def zScore(self,upperIndex,out_coin1,out_coin2):\n coin1_30 = self.dataloader_coin1.getDataFrame(upperIndex,20)\n coin2_30 = self.dataloader_coin2.getDataFrame(upperIndex,20)\n coin1_30 = torch.cat((coin1_30,out_coin1))\n coin2_30 = torch.cat((coin2_30,out_coin2))\n \n meanDiffernce30 = torch.mean(coin1_30-coin2_30)\n standardDev30 = torch.std(coin1_30-coin2_30)\n\n coin1_5 = self.dataloader_coin1.getDataFrame(upperIndex,5)\n coin2_5 = self.dataloader_coin2.getDataFrame(upperIndex,5)\n coin1_5 = torch.cat((coin1_5,out_coin1))\n coin2_5 = torch.cat((coin2_5,out_coin2))\n\n meanDiffernce5 = torch.mean(coin1_5-coin2_5)\n\n if standardDev30 > 0:\n return (meanDiffernce5 - meanDiffernce30)/standardDev30, self.riskModel(coin1_30,coin2_30)\n else:\n return 0, self.riskModel(coin1_30,coin2_30)\n\n def riskModel(self,coin1_30,coin2_30):\n c1 = coin1_30 - coin1_30.mean()\n c2 = coin2_30 - coin2_30.mean()\n\n corr = torch.sum(c1*c2) / (torch.sqrt(torch.sum(c1 ** 2)) * torch.sqrt(torch.sum(c2 ** 2)))\n if corr > 0.8:\n risk = False\n else:\n risk = True\n return risk\n\ndef getGeneralTrends(dataloader,upperIndex):\n upper = dataloader.getDataFrame(upperIndex,10).mean()\n lower = dataloader.getDataFrame(upperIndex,30).mean()\n return upper/lower\n\ndef main(COIN1,COIN2):\n model_coin1 = SeqRegressor(coin=COIN1,model= MODEL)\n model_coin1.to(DEVICE)\n model_coin2 = SeqRegressor(coin=COIN2,model= MODEL)\n model_coin2.to(DEVICE)\n\n dataloader_coin1 = cryptoData(COIN1,DEVICE=DEVICE,model=MODEL)\n DAYS_coin1 = len(dataloader_coin1)\n dataloader_coin2 = cryptoData(COIN2,DEVICE=DEVICE,model=MODEL)\n DAYS_coin2 = len(dataloader_coin2)\n\n model_coin1.eval(dataloader_coin1[0][0].unsqueeze(1))\n model_coin2.eval(dataloader_coin2[0][0].unsqueeze(1))\n\n residualModel = Residual(dataloader_coin1,dataloader_coin2)\n\n coin1_amt = 0\n coin2_amt = 0\n cash = 0\n\n startDay = 30\n trendThreshold = 1\n shorts = longs = holds = 0\n for i in range(startDay,min(DAYS_coin1,DAYS_coin2)):\n\n x_coin1,target_coin1 = dataloader_coin1[i]\n x_coin2,target_coin2 = dataloader_coin2[i]\n price_coin1 = dataloader_coin1.getDataFrame(i,1)\n price_coin2 = dataloader_coin2.getDataFrame(i,1)\n\n if i == startDay:\n coin1_amt = 5000/ price_coin1\n coin2_amt = 5000/ price_coin2\n\n x_coin1 = x_coin1.unsqueeze(1)\n x_coin2 = x_coin2.unsqueeze(1)\n out_coin1 = model_coin1(x_coin1).view(1,1)\n out_coin2 = model_coin2(x_coin2).view(1,1)\n \n zScore, risk = residualModel.zScore(i,out_coin1,out_coin2)\n trend_coin1 = getGeneralTrends(dataloader_coin1,i)\n trend_coin2 = getGeneralTrends(dataloader_coin2,i)\n\n if not risk:\n if zScore > 1:\n shorts+=1\n if coin1_amt > 0:\n if trend_coin2 > trendThreshold:\n temp = coin1_amt* price_coin1\n coin1_amt = 0\n coin2_amt += (temp / price_coin2)\n # print(\"\\t\",i,\"Transaction: short at \",price_coin1.item(),price_coin2.item())\n elif zScore < -1:\n longs+=1\n if coin2_amt > 0:\n if trend_coin1 > trendThreshold:\n temp = coin2_amt* price_coin2\n coin2_amt = 0\n coin1_amt += (temp / price_coin1)\n # print(\"\\t\",i,\"Transaction: long at \",price_coin1.item(),price_coin2.item())\n else:\n holds+=1\n\n \n \n\n out_coin1 = out_coin1.item()*dataloader_coin1.pmax.item()\n out_coin2 = out_coin2.item()*dataloader_coin2.pmax.item()\n print(COIN1,COIN2,\"\\n\\t\",(coin1_amt * price_coin1) + (coin2_amt * price_coin2) + cash)\n print('\\n')\nif __name__ == \"__main__\":\n \n main(\"eth\",\"btc\") \n main(\"eth\",\"ltc\") \n main(\"ltc\",\"btc\")" ]
[ [ "torch.mean", "torch.cat", "torch.sum", "torch.std", "torch.device" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Blitzdude/RealTimeGraphics-engine
[ "0a92467f7ab9710aabe14f8d4bb25c51f277ed26" ]
[ "RTG_proj/Vendor/bullet/examples/pybullet/gym/pybullet_envs/agents/visualize_ppo.py" ]
[ "# Copyright 2017 The TensorFlow Agents Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nr\"\"\"Script to render videos of the Proximal Policy Gradient algorithm.\n\nCommand line:\n\n python3 -m agents.scripts.visualize \\\n --logdir=/path/to/logdir/<time>-<config> --outdir=/path/to/outdir/\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport functools\nimport os\n\nimport gym\nimport tensorflow as tf\n\nfrom agents import tools\nfrom agents.scripts import utility\n\n\ndef _create_environment(config, outdir):\n \"\"\"Constructor for an instance of the environment.\n\n Args:\n config: Object providing configurations via attributes.\n outdir: Directory to store videos in.\n\n Returns:\n Wrapped OpenAI Gym environment.\n \"\"\"\n if isinstance(config.env, str):\n env = gym.make(config.env)\n else:\n env = config.env()\n # Ensure that the environment has the specification attribute set as expected\n # by the monitor wrapper.\n if not hasattr(env, 'spec'):\n setattr(env, 'spec', getattr(env, 'spec', None))\n if config.max_length:\n env = tools.wrappers.LimitDuration(env, config.max_length)\n env = gym.wrappers.Monitor(\n env, outdir, lambda unused_episode_number: True)\n env = tools.wrappers.RangeNormalize(env)\n env = tools.wrappers.ClipAction(env)\n env = tools.wrappers.ConvertTo32Bit(env)\n return env\n\n\ndef _define_loop(graph, eval_steps):\n \"\"\"Create and configure an evaluation loop.\n\n Args:\n graph: Object providing graph elements via attributes.\n eval_steps: Number of evaluation steps per epoch.\n\n Returns:\n Loop object.\n \"\"\"\n loop = tools.Loop(\n None, graph.step, graph.should_log, graph.do_report, graph.force_reset)\n loop.add_phase(\n 'eval', graph.done, graph.score, graph.summary, eval_steps,\n report_every=eval_steps,\n log_every=None,\n checkpoint_every=None,\n feed={graph.is_training: False})\n return loop\n\n\ndef visualize(\n logdir, outdir, num_agents, num_episodes, checkpoint=None,\n env_processes=True):\n \"\"\"Recover checkpoint and render videos from it.\n\n Args:\n logdir: Logging directory of the trained algorithm.\n outdir: Directory to store rendered videos in.\n num_agents: Number of environments to simulate in parallel.\n num_episodes: Total number of episodes to simulate.\n checkpoint: Checkpoint name to load; defaults to most recent.\n env_processes: Whether to step environments in separate processes.\n \"\"\"\n config = utility.load_config(logdir)\n with config.unlocked:\n config.policy_optimizer = getattr(tf.train, config.policy_optimizer)\n config.value_optimizer = getattr(tf.train, config.value_optimizer)\n with tf.device('/cpu:0'):\n batch_env = utility.define_batch_env(\n lambda: _create_environment(config, outdir),\n num_agents, env_processes)\n graph = utility.define_simulation_graph(\n batch_env, config.algorithm, config)\n total_steps = num_episodes * config.max_length\n loop = _define_loop(graph, total_steps)\n saver = utility.define_saver(\n exclude=(r'.*_temporary/.*', r'global_step'))\n sess_config = tf.ConfigProto(allow_soft_placement=True)\n sess_config.gpu_options.allow_growth = True\n with tf.Session(config=sess_config) as sess:\n utility.initialize_variables(\n sess, saver, config.logdir, checkpoint, resume=True)\n for unused_score in loop.run(sess, saver, total_steps):\n pass\n batch_env.close()\n\n\ndef main(_):\n \"\"\"Load a trained algorithm and render videos.\"\"\"\n utility.set_up_logging()\n if not FLAGS.logdir or not FLAGS.outdir:\n raise KeyError('You must specify logging and outdirs directories.')\n FLAGS.logdir = os.path.expanduser(FLAGS.logdir)\n FLAGS.outdir = os.path.expanduser(FLAGS.outdir)\n visualize(\n FLAGS.logdir, FLAGS.outdir, FLAGS.num_agents, FLAGS.num_episodes,\n FLAGS.checkpoint, FLAGS.env_processes)\n\n\nif __name__ == '__main__':\n FLAGS = tf.app.flags.FLAGS\n tf.app.flags.DEFINE_string(\n 'logdir', None,\n 'Directory to the checkpoint of a training run.')\n tf.app.flags.DEFINE_string(\n 'outdir', None,\n 'Local directory for storing the monitoring outdir.')\n tf.app.flags.DEFINE_string(\n 'checkpoint', None,\n 'Checkpoint name to load; defaults to most recent.')\n tf.app.flags.DEFINE_integer(\n 'num_agents', 1,\n 'How many environments to step in parallel.')\n tf.app.flags.DEFINE_integer(\n 'num_episodes', 5,\n 'Minimum number of episodes to render.')\n tf.app.flags.DEFINE_boolean(\n 'env_processes', True,\n 'Step environments in separate processes to circumvent the GIL.')\n tf.app.run()\n" ]
[ [ "tensorflow.device", "tensorflow.app.flags.DEFINE_integer", "tensorflow.ConfigProto", "tensorflow.app.flags.DEFINE_string", "tensorflow.Session", "tensorflow.app.flags.DEFINE_boolean", "tensorflow.app.run" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
johnbomidi/data-explorer-dash-app
[ "80df9928aa41a20d8098870662f444ba1fb1aa74" ]
[ "_app/callback.py" ]
[ "\nfrom dash_extensions.enrich import Output, Input, State, ServersideOutput\n\nfrom _app.gen import parse_contents, find_closest\nfrom _app.update import update_dropdown, update_marks, update_table, update_graph\n\nfrom dash import callback_context\nfrom dash.exceptions import PreventUpdate\nimport pandas as pd\n\n\ndef register_callbacks(app):\n @app.callback(ServersideOutput(\"store\", \"data\"), Input('upload-data', 'contents'), \n [State('upload-data', 'filename'),\n State('upload-data', 'last_modified'),],\n memoize=True)\n def query_data(contents, filename, date): \n print('query_data')\n df = parse_contents(contents, filename, date)\n return df\n\n @app.callback([Output('plot-index-selection', 'options'),\n Output('plot-index-selection', 'value'),\n Output('after-upload-children', 'hidden')],\n [Input(\"store\", \"data\"),],)\n def update_index_selector(df):\n print('update_index_selector')\n options, value = update_dropdown(df)\n return options, value[0], False\n\n\n @app.callback([Output('plot-selection', 'options'),\n Output('plot-selection', 'value'),\n Output('range-slider', 'min'),\n Output('range-slider', 'max'),\n Output('range-slider', 'marks')\n ],\n [Input('plot-index-selection', 'value'),\n # Input(\"store\", \"data\"),\n ], [State(\"store\", \"data\"),\n State('plot-selection', 'value'),\n State('range-slider', 'marks')])\n def update_plot_selector(index, df, columns, marks):\n print('update_plot_selector')\n # options, value = update_dropdown(df, index=index)\n options, value = update_dropdown(df.set_index(index))\n if (columns is not None) and (set(columns) <= set(df.columns.to_list())):\n value = [col for col in columns if col != index]\n # (int(len(df[index])/4))\n marks = update_marks(df[index])\n print (marks) \n return options, value, 0, len(df[index])-1, marks\n\n @app.callback([Output('range-slider', 'value'),\n Output('start-index', 'value'),\n Output('end-index', 'value')],\n [Input('range-slider', 'value'),\n Input('plot-index-selection', 'value'),\n Input('start-index', 'value'),\n Input('end-index', 'value'),\n # Input(\"store\", \"data\"),\n ], State(\"store\", \"data\"),\n # prevent_initial_call=True\n )\n def update_range_selector(slider_range, index, start_range, end_range, df):\n print('update_range_selector')\n ctx = callback_context\n trigger_id = ctx.triggered[0][\"prop_id\"].split(\".\")[0]\n # (slider_range is not None) and \n if (trigger_id == 'range-slider'): \n start_range_index, end_range_index = max(0, slider_range[0]), min(len(df[index])-1, slider_range[1]) \n start_range, end_range = df[index][start_range_index], df[index][end_range_index]\n # ((start_range is not None) or (end_range is not None)) and \n elif ((trigger_id == 'start-index') or (trigger_id == 'end-index')): \n print(index) \n if index in df.select_dtypes(include = ['datetime']).columns.to_list():\n print('its a datetime index')\n start_range, end_range = pd.to_datetime(start_range), pd.to_datetime(end_range)\n else:\n start_range, end_range = float(start_range), float(end_range)\n start_range, end_range = max(df[index].min(), start_range), min(df[index].max(), end_range)\n start_range_index, end_range_index = find_closest(start_range, df, index), find_closest(end_range, df, index, return_lower=False)\n # start_range, end_range = int(start_range), int(end_range)\n # elif (not start_range) or (not end_range):\n else:\n # print('setting initial ranges')\n # intial_index_iloc = min(len(df[index])-1, 10)\n intial_index_iloc = int(len(df[index])/2.5)\n start_range, end_range = df[index][0], df[index][intial_index_iloc]\n start_range_index, end_range_index = 0, intial_index_iloc\n print(start_range_index, end_range_index)\n if end_range_index < start_range_index:\n raise PreventUpdate\n slider_range = [start_range_index, end_range_index]\n # print(trigger_id)\n # print(slider_range, start_range, end_range)\n\n return slider_range, start_range, end_range\n\n\n @app.callback(Output('output-data-upload', 'children'),\n [Input('range-slider', 'value'),],\n [State('plot-index-selection', 'value'),\n State(\"store\", \"data\"),\n State('upload-data', 'filename'),\n ])\n def update_output(slider_range, index, df, filename):\n print('update_output')\n # df = df.loc[(df[index]>=slider_range[0]) & (df[index]<=slider_range[1])]\n df = df.loc[slider_range[0]:slider_range[1]]\n children = update_table(df, filename)\n return children\n\n\n @app.callback(Output('exploration-plot', 'figure'),\n [Input('plot-selection', 'value'),\n Input('range-slider', 'value'),\n Input('plot-index-selection', 'value'),\n # Input(\"store\", \"data\"),\n ], [State(\"store\", \"data\"),\n ],)\n def update_plot(columns, slider_range, index, df):\n # if (not index) or (not columns) or (not slider_range):\n # raise PreventUpdate\n print('update_plot')\n print(columns, index)\n if (index is not None) and (columns is not None) and (slider_range is not None):\n # df = df.loc[(df[index]>=slider_range[0]) & (df[index]<=slider_range[1])]\n df = df.loc[slider_range[0]:slider_range[1]]\n figure = update_graph(df, index, columns)\n return figure" ]
[ [ "pandas.to_datetime" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
berak/opencv_smallfry
[ "fd8f64980dff0527523791984d6cb3dfcd2bc9bc" ]
[ "audio_landmarks3d/lm.py" ]
[ "import numpy as np\nimport cv2\nimport wave\nimport subprocess\nimport os, base64\n\n# landmarks connections\ncons = [[48, 49], [49, 50], [50, 51], [51, 52], [52, 53], [53, 54], [54, 55], [55, 56], [56, 57],\n [57, 58], [58, 59], [59, 48], [60, 61], [61, 62], [62, 63], [63, 64], [64, 65], [65, 66],\n [66, 67], [67, 60], [27, 28], [28, 29], [29, 30], [30, 31], [30, 35], [31, 32], [32, 33],\n [33, 34], [34, 35], [27, 31], [27, 35], [17, 18], [18, 19], [19, 20], [20, 21],\n [22, 23], [23, 24], [24, 25], [25, 26], [36, 37], [37, 38], [38, 39], [39, 40], [40, 41],\n [36, 41], [43, 44], [44, 45], [45, 46], [46, 47], [42, 47], [0, 1], [1, 2], [2, 3], [3, 4],\n [4, 5], [5, 6], [6, 7], [7, 8], [8, 9], [9, 10], [10, 11], [11, 12],\n [12, 13], [13, 14], [14, 15], [15, 16]]\n\nsr = 8000 # assumes: 8khz mono\nnum_frames = 7\nincrement = sr * 0.04 # 25 fps\nW,H = 400,400; # drawing\n\nnet = cv2.dnn.readNet(\"model.onnx\")\nmean_shape = np.load(\"mean_shape.npy\")\neigen_vectors = np.load(\"eigen_vectors.npy\").T\n\ndef animate(wfile):\n\tw = wave.open(wfile,\"rb\")\n\tn = w.getnframes()\n\tb = w.readframes(n)\n\ta = np.frombuffer(b,np.int16)\n\ta = np.array(a,np.float32)\n\ta /= 0x7ffff\n\ta /= a.max()\n\n\tsample_len = int(num_frames * increment)\n\tsample_pos = int(0)\n\n\tvid = cv2.VideoWriter(\"my.avi\",cv2.VideoWriter_fourcc(*'MJPG'), 25.0, (W,H))\n\twhile (sample_pos < n - sample_len):\n\t\tdata = a[int(sample_pos):int(sample_pos+sample_len)].reshape(1,1,sample_len)\n\t\tsample_pos += increment;\n\t\tnet.setInput(data)\n\t\tres = net.forward()\n\t\tpts = mean_shape.copy()\n\t\tfor i in range(eigen_vectors.shape[0]):\n\t\t\tpts[0,i] += res.dot(eigen_vectors[i,:])\n\t\tpts = pts.reshape(68,3) # 204==68*3\n\t\timg = np.ones((H,W,3),np.uint8)\n\t\timg[:,:] = (127,127,127)\n\t\tfor i in range(pts.shape[0]):\n\t\t\tx = int(pts[i,0] * W*2 + W/2)\n\t\t\ty = int(pts[i,1] * H*2 + H/2)\n\t\t\tcv2.circle(img, (x,y), 3, (50,50,255), -1)\n\t\tfor c in cons:\n\t\t\tx1 = int(pts[c[0],0] * W*2 + W/2)\n\t\t\ty1 = int(pts[c[0],1] * H*2 + H/2)\n\t\t\tx2 = int(pts[c[1],0] * W*2 + W/2)\n\t\t\ty2 = int(pts[c[1],1] * H*2 + H/2)\n\t\t\tcv2.line(img,(x1,y1),(x2,y2),(20,20,180),1)\n\t\tvid.write(img)\n\t\tcv2.imshow(\"draw\", img)\n\t\tcv2.waitKey(6)\n\n\tvid.release()\n\t#cv2.waitKey();\n\n\tcmd = 'ffmpeg -y -i my.avi -i '+wfile+' -c:v h264 -c:a aac -strict experimental res_.mp4'\n\tsubprocess.call(cmd, shell=True)\n\nanimate(\"S2.wav\")\n\"\"\"\ndef application(environ, start_response):\n request_body=None\n retcode = '200 OK'\n resp = \"dummy\\r\\n\"\n ct =\"text/html\"\n try:\n request_body_size = int(environ.get('CONTENT_LENGTH', 0))\n request_body = environ['wsgi.input'].read(request_body_size)\n except (ValueError):\n resp = \"no response\"\n url = environ['PATH_INFO'];\n if url == \"/\":\n resp = _read(\"up.html\")\n elif url == \"/dn\":\n ct = 'image/png'\n resp = _read(\"my.png\")\n elif url == \"/up\" and request_body:\n ct = 'image/png'\n resp = request_body.replace('data:' + ct + ';base64,', \"\")\n data = base64.b64decode(resp)\n buf = np.frombuffer(data, dtype=np.uint8)\n img = cv2.imdecode(buf, 1)\n img = process(img)\n cv2.imwrite(\"my.png\", img)\n ok, enc = cv2.imencode(\".png\", img)\n resp = base64.b64encode(enc.tostring())\n resp = 'data:' + ct + ';base64,' + resp\n start_response(retcode, [('Content-Type', ct), ('Content-Length', str(len(resp)))])\n return [resp]\n\nif __name__ == '__main__':\n from wsgiref.simple_server import make_server\n httpd = make_server('0.0.0.0', int(os.environ.get(\"PORT\", 9000)), application)\n while True: httpd.handle_request()\n\"\"\"" ]
[ [ "numpy.load", "numpy.ones", "numpy.array", "numpy.frombuffer" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ArthurWuTW/openpilot
[ "920d8bf9d292f1ff076dd75c130a31805c357fb4" ]
[ "selfdrive/controls/lib/pid.py" ]
[ "import numpy as np\nfrom common.numpy_fast import clip, interp\n\ndef apply_deadzone(error, deadzone):\n if error > deadzone:\n error -= deadzone\n elif error < - deadzone:\n error += deadzone\n else:\n error = 0.\n return error\n\nclass PIController(object):\n def __init__(self, k_p, k_i, k_f=1., pos_limit=None, neg_limit=None, rate=100, sat_limit=0.8, convert=None):\n self._k_p = k_p # proportional gain\n self._k_i = k_i # integral gain\n self.k_f = k_f # feedforward gain\n\n self.pos_limit = pos_limit\n self.neg_limit = neg_limit\n\n self.sat_count_rate = 1.0 / rate\n self.i_unwind_rate = 0.3 / rate\n self.i_rate = 1.0 / rate\n self.sat_limit = sat_limit\n self.convert = convert\n\n self.reset()\n\n @property\n def k_p(self):\n return interp(self.speed, self._k_p[0], self._k_p[1])\n\n @property\n def k_i(self):\n return interp(self.speed, self._k_i[0], self._k_i[1])\n\n def _check_saturation(self, control, override, error):\n saturated = (control < self.neg_limit) or (control > self.pos_limit)\n\n if saturated and not override and abs(error) > 0.1:\n self.sat_count += self.sat_count_rate\n else:\n self.sat_count -= self.sat_count_rate\n\n self.sat_count = clip(self.sat_count, 0.0, 1.0)\n\n return self.sat_count > self.sat_limit\n\n def reset(self):\n self.p = 0.0\n self.i = 0.0\n self.f = 0.0\n self.sat_count = 0.0\n self.saturated = False\n self.control = 0\n\n def update(self, setpoint, measurement, speed=0.0, check_saturation=True, override=False, feedforward=0., deadzone=0., freeze_integrator=False):\n self.speed = speed\n\n error = float(apply_deadzone(setpoint - measurement, deadzone))\n self.p = error * self.k_p\n self.f = feedforward * self.k_f\n\n #if override:\n if False:\n self.i -= self.i_unwind_rate * float(np.sign(self.i))\n else:\n i = self.i + error * self.k_i * self.i_rate\n control = self.p + self.f + i\n\n if self.convert is not None:\n control = self.convert(control, speed=self.speed)\n\n # Update when changing i will move the control away from the limits\n # or when i will move towards the sign of the error\n if ((error >= 0 and (control <= self.pos_limit or i < 0.0)) or \\\n (error <= 0 and (control >= self.neg_limit or i > 0.0))) and \\\n not freeze_integrator:\n self.i = i\n\n control = self.p + self.f + self.i\n if self.convert is not None:\n control = self.convert(control, speed=self.speed)\n\n if check_saturation:\n self.saturated = self._check_saturation(control, override, error)\n else:\n self.saturated = False\n\n self.control = clip(control, self.neg_limit, self.pos_limit)\n return self.control\n" ]
[ [ "numpy.sign" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
RBrearton/local_stats
[ "d38a10d296d3b1f0a83f81b3ebc95e17779d394a" ]
[ "tests/test_cluster.py" ]
[ "\"\"\"\nThis file contains tests for the cluster module's Cluster class.\n\"\"\"\n\n# Obviously we want to test 'private' attributes.\n# pylint: disable=protected-access\n\nimport numpy as np\n\nfrom local_stats.cluster import Cluster\n\n\ndef test_init():\n \"\"\"\n Classic test to blow up if attribute names change.\n \"\"\"\n cluster = Cluster([])\n assert len(cluster._arr) == 0\n\n\ndef test_mean(simple_cluster: Cluster):\n \"\"\"\n Make sure that the mean is being calculated correctly.\n \"\"\"\n assert simple_cluster.mean[0] == 1\n assert simple_cluster.mean[1] == 2\n\n\ndef test_size(simple_cluster: Cluster):\n \"\"\"\n Make sure we're calculating the size of a cluster properly.\n \"\"\"\n assert simple_cluster.size == 3\n\n\ndef test_pixel_indices(simple_cluster: Cluster):\n \"\"\"\n Make sure that the pixel indices are being returned correctly.\n \"\"\"\n assert isinstance(simple_cluster.pixel_indices, tuple)\n assert (simple_cluster.pixel_indices[0] == np.array([0, 1, 2])).all()\n assert (simple_cluster.pixel_indices[1] == np.array([1, 2, 3])).all()\n\n\ndef test_intensity(simple_cluster: Cluster):\n \"\"\"\n Make sure that we can properly calculate the area under a cluster.\n \"\"\"\n test_img = np.ones((5, 5))\n\n assert simple_cluster.intensity(test_img) == 3\n" ]
[ [ "numpy.array", "numpy.ones" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
karhankaan/CausalGAN
[ "2cf4d2038f5a2522cb60a18d1e2b5d67b82ab19f" ]
[ "figure_scripts/utils.py" ]
[ "from __future__ import print_function,division\nimport tensorflow as tf\nimport os\nfrom os import listdir\nfrom os.path import isfile, join\nimport shutil\nimport sys\nimport math\nimport json\nimport logging\nimport numpy as np\nfrom PIL import Image\nfrom datetime import datetime\n\nimport tensorflow as tf\nfrom PIL import Image\n\nimport math\nimport random\nimport pprint\nimport scipy.misc\nimport numpy as np\nfrom time import gmtime, strftime\nfrom six.moves import xrange\n\npp = pprint.PrettyPrinter()\n\ndef nhwc_to_nchw(x):\n return tf.transpose(x, [0, 3, 1, 2])\ndef to_nchw_numpy(image):\n if image.shape[3] in [1, 3]:\n new_image = image.transpose([0, 3, 1, 2])\n else:\n new_image = image\n return new_image\n\ndef norm_img(image, data_format=None):\n #image = tf.cast(image,tf.float32)/127.5 - 1.\n image = image/127.5 - 1.\n #if data_format:\n #image = to_nhwc(image, data_format)\n if data_format=='NCHW':\n image = to_nchw_numpy(image)\n\n image=tf.cast(image,tf.float32)\n return image\n\n\n#Denorming\ndef nchw_to_nhwc(x):\n return tf.transpose(x, [0, 2, 3, 1])\ndef to_nhwc(image, data_format):\n if data_format == 'NCHW':\n new_image = nchw_to_nhwc(image)\n else:\n new_image = image\n return new_image\ndef denorm_img(norm, data_format):\n return tf.clip_by_value(to_nhwc((norm + 1)*127.5, data_format), 0, 255)\n\n\ndef read_prepared_uint8_image(img_path):\n '''\n img_path should point to a uint8 image that is\n already cropped and resized\n '''\n cropped_image=scipy.misc.imread(img_path)\n if not np.all( np.array([64,64,3])==cropped_image.shape):\n raise ValueError('image must already be cropped and resized:',img_path)\n #TODO: warn if wrong dtype\n return cropped_image\n\ndef make_encode_dir(model,image_name):\n #Terminology\n if model.model_type=='began':\n result_dir=model.model_dir\n elif model.model_type=='dcgan':\n print('DCGAN')\n result_dir=model.checkpoint_dir\n encode_dir=os.path.join(result_dir,'encode_'+str(image_name))\n if not os.path.exists(encode_dir):\n os.mkdir(encode_dir)\n return encode_dir\n\ndef make_sample_dir(model):\n #Terminology\n if model.model_type=='began':\n result_dir=model.model_dir\n elif model.model_type=='dcgan':\n print('DCGAN')\n result_dir=model.checkpoint_dir\n\n sample_dir=os.path.join(result_dir,'sample_figures')\n if not os.path.exists(sample_dir):\n os.mkdir(sample_dir)\n return sample_dir\n\ndef guess_model_step(model):\n if model.model_type=='began':\n str_step=str( model.sess.run(model.step) )+'_'\n elif model.model_type=='dcgan':\n result_dir=model.checkpoint_dir\n ckpt = tf.train.get_checkpoint_state(result_dir)\n ckpt_name = os.path.basename(ckpt.model_checkpoint_path)\n str_step=ckpt_name[-5:]+'_'\n return str_step\n\ndef infer_grid_image_shape(N):\n if N%8==0:\n size=[8,N//8]\n else:\n size=[8,8]\n return size\n\n\ndef save_figure_images(model_type, tensor, filename, size, padding=2, normalize=False, scale_each=False):\n\n print('[*] saving:',filename)\n\n #nrow=size[0]\n nrow=size[1]#Was this number per row and now number of rows?\n\n if model_type=='began':\n began_save_image(tensor,filename,nrow,padding,normalize,scale_each)\n elif model_type=='dcgan':\n #images = np.split(tensor,len(tensor))\n images=tensor\n dcgan_save_images(images,size,filename)\n\n\n#Began originally\ndef make_grid(tensor, nrow=8, padding=2,\n normalize=False, scale_each=False):\n \"\"\"Code based on https://github.com/pytorch/vision/blob/master/torchvision/utils.py\"\"\"\n nmaps = tensor.shape[0]\n xmaps = min(nrow, nmaps)\n ymaps = int(math.ceil(float(nmaps) / xmaps))\n height, width = int(tensor.shape[1] + padding), int(tensor.shape[2] + padding)\n grid = np.zeros([height * ymaps + 1 + padding // 2, width * xmaps + 1 + padding // 2, 3], dtype=np.uint8)\n k = 0\n for y in range(ymaps):\n for x in range(xmaps):\n if k >= nmaps:\n break\n h, h_width = y * height + 1 + padding // 2, height - padding\n w, w_width = x * width + 1 + padding // 2, width - padding\n\n grid[h:h+h_width, w:w+w_width] = tensor[k]\n k = k + 1\n return grid\n\ndef began_save_image(tensor, filename, nrow=8, padding=2,\n normalize=False, scale_each=False):\n ndarr = make_grid(tensor, nrow=nrow, padding=padding,\n normalize=normalize, scale_each=scale_each)\n im = Image.fromarray(ndarr)\n im.save(filename)\n\n\n\n#Dcgan originally\nget_stddev = lambda x, k_h, k_w: 1/math.sqrt(k_w*k_h*x.get_shape()[-1])\n\ndef get_image(image_path, input_height, input_width,\n resize_height=64, resize_width=64,\n is_crop=True, is_grayscale=False):\n image = imread(image_path, is_grayscale)\n return transform(image, input_height, input_width,\n resize_height, resize_width, is_crop)\n\ndef dcgan_save_images(images, size, image_path):\n return imsave(inverse_transform(images), size, image_path)\n\ndef imread(path, is_grayscale = False):\n if (is_grayscale):\n return scipy.misc.imread(path, flatten = True).astype(np.float)\n else:\n return scipy.misc.imread(path).astype(np.float)\n\ndef merge_images(images, size):\n return inverse_transform(images)\n\ndef merge(images, size):\n h, w = images.shape[1], images.shape[2]\n img = np.zeros((h * size[0], w * size[1], 3))\n for idx, image in enumerate(images):\n i = idx % size[1]\n j = idx // size[1]\n img[j*h:j*h+h, i*w:i*w+w, :] = image\n return img\n\ndef imsave(images, size, path):\n return scipy.misc.imsave(path, merge(images, size))\n\ndef center_crop(x, crop_h, crop_w,\n resize_h=64, resize_w=64):\n if crop_w is None:\n crop_w = crop_h\n h, w = x.shape[:2]\n j = int(round((h - crop_h)/2.))\n i = int(round((w - crop_w)/2.))\n return scipy.misc.imresize(\n x[j:j+crop_h, i:i+crop_w], [resize_h, resize_w])\n\ndef transform(image, input_height, input_width, \n resize_height=64, resize_width=64, is_crop=True):\n if is_crop:\n cropped_image = center_crop(\n image, input_height, input_width, \n resize_height, resize_width)\n else:\n cropped_image = scipy.misc.imresize(image, [resize_height, resize_width])\n return np.array(cropped_image)/127.5 - 1.\n\ndef inverse_transform(images):\n return (images+1.)/2.\n\n\n" ]
[ [ "tensorflow.train.get_checkpoint_state", "tensorflow.transpose", "tensorflow.cast", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
reinforcementdriving/rlkit
[ "01319447037f3fb3b48ba0779dab2a48af357fa7" ]
[ "rlkit/data_management/obs_dict_replay_buffer.py" ]
[ "import numpy as np\nfrom gym.spaces import Dict, Discrete\n\nfrom rlkit.data_management.replay_buffer import ReplayBuffer\n\n\nclass ObsDictRelabelingBuffer(ReplayBuffer):\n \"\"\"\n Replay buffer for environments whose observations are dictionaries, such as\n - OpenAI Gym GoalEnv environments. https://blog.openai.com/ingredients-for-robotics-research/\n - multiworld MultitaskEnv. https://github.com/vitchyr/multiworld/\n\n\n Implementation details:\n - Only add_path is implemented.\n - Image observations are presumed to start with the 'image_' prefix\n - Every sample from [0, self._size] will be valid.\n - Observation and next observation are saved separately. It's a memory\n inefficient to save the observations twice, but it makes the code\n *much* easier since you no longer have to worry about termination\n conditions.\n \"\"\"\n\n def __init__(\n self,\n max_size,\n env,\n fraction_goals_rollout_goals=1.0,\n fraction_goals_env_goals=0.0,\n internal_keys=None,\n observation_key='observation',\n achieved_goal_key='achieved_goal',\n desired_goal_key='desired_goal',\n ):\n \"\"\"\n\n :param max_size:\n :param env:\n :param fraction_goals_rollout_goals: Default, no her.\n :param fraction_goals_env_goals: What fraction of goals are sampled\n \"from the environment\" assuming that the environment has a \"sample\n goal\" method. The remaining resampled goals are resampled using the\n \"future\" strategy, described in Hindsight Experience Replay.\n :param internal_keys: Extra keys in the observation dictoary to save.\n Mostly for debugging.\n :param observation_key:\n :param desired_goal_key:\n :param achieved_goal_key:\n \"\"\"\n if internal_keys is None:\n internal_keys = []\n self.internal_keys = internal_keys\n assert isinstance(env.observation_space, Dict)\n assert fraction_goals_env_goals >= 0\n assert fraction_goals_rollout_goals >= 0\n assert fraction_goals_env_goals + fraction_goals_rollout_goals <= 1.0\n self.max_size = max_size\n self.env = env\n self.fraction_goals_rollout_goals = fraction_goals_rollout_goals\n self.fraction_goals_env_goals = fraction_goals_env_goals\n self.ob_keys_to_save = [\n observation_key,\n desired_goal_key,\n achieved_goal_key,\n ]\n self.observation_key = observation_key\n self.desired_goal_key = desired_goal_key\n self.achieved_goal_key = achieved_goal_key\n\n self._action_dim = env.action_space.low.size\n self._actions = np.zeros((max_size, self._action_dim))\n # self._terminals[i] = a terminal was received at time i\n self._terminals = np.zeros((max_size, 1), dtype='uint8')\n # self._obs[key][i] is the value of observation[key] at time i\n self._obs = {}\n self._next_obs = {}\n self.ob_spaces = self.env.observation_space.spaces\n for key in self.ob_keys_to_save + internal_keys:\n assert key in self.ob_spaces, \\\n \"Key not found in the observation space: %s\" % key\n type = np.float64\n if key.startswith('image'):\n type = np.uint8\n self._obs[key] = np.zeros(\n (max_size, self.ob_spaces[key].low.size), dtype=type)\n self._next_obs[key] = np.zeros(\n (max_size, self.ob_spaces[key].low.size), dtype=type)\n\n self._top = 0\n self._size = 0\n\n # Let j be any index in self._idx_to_future_obs_idx[i]\n # Then self._next_obs[j] is a valid next observation for observation i\n self._idx_to_future_obs_idx = [None] * max_size\n\n if isinstance(self.env.action_space, Discrete):\n raise NotImplementedError(\"TODO. See issue 28.\")\n\n def add_sample(self, observation, action, reward, terminal,\n next_observation, **kwargs):\n raise NotImplementedError(\"Only use add_path\")\n\n def terminate_episode(self):\n pass\n\n def num_steps_can_sample(self):\n return self._size\n\n def add_path(self, path):\n obs = path[\"observations\"]\n actions = path[\"actions\"]\n rewards = path[\"rewards\"]\n next_obs = path[\"next_observations\"]\n terminals = path[\"terminals\"]\n path_len = len(rewards)\n\n actions = flatten_n(actions)\n obs = flatten_dict(obs, self.ob_keys_to_save + self.internal_keys)\n next_obs = flatten_dict(next_obs,\n self.ob_keys_to_save + self.internal_keys)\n obs = preprocess_obs_dict(obs)\n next_obs = preprocess_obs_dict(next_obs)\n\n if self._top + path_len >= self.max_size:\n \"\"\"\n All of this logic is to handle wrapping the pointer when the\n replay buffer gets full.\n \"\"\"\n num_pre_wrap_steps = self.max_size - self._top\n # numpy slice\n pre_wrap_buffer_slice = np.s_[\n self._top:self._top + num_pre_wrap_steps, :\n ]\n pre_wrap_path_slice = np.s_[0:num_pre_wrap_steps, :]\n\n num_post_wrap_steps = path_len - num_pre_wrap_steps\n post_wrap_buffer_slice = slice(0, num_post_wrap_steps)\n post_wrap_path_slice = slice(num_pre_wrap_steps, path_len)\n for buffer_slice, path_slice in [\n (pre_wrap_buffer_slice, pre_wrap_path_slice),\n (post_wrap_buffer_slice, post_wrap_path_slice),\n ]:\n self._actions[buffer_slice] = actions[path_slice]\n self._terminals[buffer_slice] = terminals[path_slice]\n for key in self.ob_keys_to_save + self.internal_keys:\n self._obs[key][buffer_slice] = obs[key][path_slice]\n self._next_obs[key][buffer_slice] = next_obs[key][\n path_slice]\n # Pointers from before the wrap\n for i in range(self._top, self.max_size):\n self._idx_to_future_obs_idx[i] = np.hstack((\n # Pre-wrap indices\n np.arange(i, self.max_size),\n # Post-wrap indices\n np.arange(0, num_post_wrap_steps)\n ))\n # Pointers after the wrap\n for i in range(0, num_post_wrap_steps):\n self._idx_to_future_obs_idx[i] = np.arange(\n i,\n num_post_wrap_steps,\n )\n else:\n slc = np.s_[self._top:self._top + path_len, :]\n self._actions[slc] = actions\n self._terminals[slc] = terminals\n for key in self.ob_keys_to_save + self.internal_keys:\n self._obs[key][slc] = obs[key]\n self._next_obs[key][slc] = next_obs[key]\n for i in range(self._top, self._top + path_len):\n self._idx_to_future_obs_idx[i] = np.arange(\n i, self._top + path_len\n )\n self._top = (self._top + path_len) % self.max_size\n self._size = min(self._size + path_len, self.max_size)\n\n def _sample_indices(self, batch_size):\n return np.random.randint(0, self._size, batch_size)\n\n def random_batch(self, batch_size):\n indices = self._sample_indices(batch_size)\n resampled_goals = self._next_obs[self.desired_goal_key][indices]\n\n num_rollout_goals = int(batch_size * self.fraction_goals_rollout_goals)\n num_env_goals = int(batch_size * self.fraction_goals_env_goals)\n num_future_goals = batch_size - (num_env_goals + num_rollout_goals)\n new_obs_dict = self._batch_obs_dict(indices)\n new_next_obs_dict = self._batch_next_obs_dict(indices)\n\n if num_env_goals > 0:\n env_goals = self.env.sample_goals(num_env_goals)\n env_goals = preprocess_obs_dict(env_goals)\n last_env_goal_idx = num_rollout_goals + num_env_goals\n resampled_goals[num_rollout_goals:last_env_goal_idx] = (\n env_goals[self.desired_goal_key]\n )\n if num_future_goals > 0:\n future_obs_idxs = []\n for i in indices[-num_future_goals:]:\n possible_future_obs_idxs = self._idx_to_future_obs_idx[i]\n # This is generally faster than random.choice.\n # Makes you wonder what random.choice is doing...\n num_options = len(possible_future_obs_idxs)\n next_obs_i = int(np.random.randint(0, num_options))\n future_obs_idxs.append(possible_future_obs_idxs[next_obs_i])\n future_obs_idxs = np.array(future_obs_idxs)\n resampled_goals[-num_future_goals:] = (\n self._next_obs[self.achieved_goal_key][future_obs_idxs]\n )\n\n new_obs_dict[self.desired_goal_key] = resampled_goals\n new_next_obs_dict[self.desired_goal_key] = resampled_goals\n new_obs_dict = postprocess_obs_dict(new_obs_dict)\n new_next_obs_dict = postprocess_obs_dict(new_next_obs_dict)\n # resampled_goals must be postprocessed as well\n resampled_goals = new_next_obs_dict[self.desired_goal_key]\n\n new_actions = self._actions[indices]\n \"\"\"\n For example, the environments in this repo have batch-wise\n implementations of computing rewards:\n\n https://github.com/vitchyr/multiworld\n \"\"\"\n if hasattr(self.env, 'compute_rewards'):\n new_rewards = self.env.compute_rewards(\n new_actions,\n new_next_obs_dict,\n )\n else: # Assuming it's a (possibly wrapped) gym GoalEnv\n new_rewards = np.ones((batch_size, 1))\n for i in range(batch_size):\n new_rewards[i] = self.env.compute_reward(\n new_next_obs_dict[self.achieved_goal_key][i],\n new_next_obs_dict[self.desired_goal_key][i],\n None\n )\n new_rewards = new_rewards.reshape(-1, 1)\n\n new_obs = new_obs_dict[self.observation_key]\n new_next_obs = new_next_obs_dict[self.observation_key]\n batch = {\n 'observations': new_obs,\n 'actions': new_actions,\n 'rewards': new_rewards,\n 'terminals': self._terminals[indices],\n 'next_observations': new_next_obs,\n 'resampled_goals': resampled_goals,\n 'indices': np.array(indices).reshape(-1, 1),\n }\n return batch\n\n def _batch_obs_dict(self, indices):\n return {\n key: self._obs[key][indices]\n for key in self.ob_keys_to_save\n }\n\n def _batch_next_obs_dict(self, indices):\n return {\n key: self._next_obs[key][indices]\n for key in self.ob_keys_to_save\n }\n\n\ndef flatten_n(xs):\n xs = np.asarray(xs)\n return xs.reshape((xs.shape[0], -1))\n\n\ndef flatten_dict(dicts, keys):\n \"\"\"\n Turns list of dicts into dict of np arrays\n \"\"\"\n return {\n key: flatten_n([d[key] for d in dicts])\n for key in keys\n }\n\n\ndef preprocess_obs_dict(obs_dict):\n \"\"\"\n Apply internal replay buffer representation changes: save images as bytes\n \"\"\"\n for obs_key, obs in obs_dict.items():\n if 'image' in obs_key and obs is not None:\n obs_dict[obs_key] = unnormalize_image(obs)\n return obs_dict\n\n\ndef postprocess_obs_dict(obs_dict):\n \"\"\"\n Undo internal replay buffer representation changes: save images as bytes\n \"\"\"\n for obs_key, obs in obs_dict.items():\n if 'image' in obs_key and obs is not None:\n obs_dict[obs_key] = normalize_image(obs)\n return obs_dict\n\n\ndef normalize_image(image):\n assert image.dtype == np.uint8\n return np.float64(image) / 255.0\n\n\ndef unnormalize_image(image):\n assert image.dtype != np.uint8\n return np.uint8(image * 255.0)\n" ]
[ [ "numpy.asarray", "numpy.uint8", "numpy.arange", "numpy.ones", "numpy.float64", "numpy.array", "numpy.zeros", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
shawnvosburg/IFT712-Projet
[ "bf7fe023f4cec02291dd1198d65824bc74ee05e4" ]
[ "src/DataManagement/Preprocessing/PolynomialFeatures.py" ]
[ "#!/usr/bin/python\n#-*- coding: utf-8 -*-\n\nimport pandas as pd\nfrom src.DataManagement.Preprocessing import PreprocessingStrategy\nfrom sklearn.preprocessing import PolynomialFeatures as _PolynomialFeatures\n\nclass PolynomialFeatures(PreprocessingStrategy):\n def __init__(self,**kwargs):\n super().__init__()\n self._method = _PolynomialFeatures(**kwargs)\n self.hyperparams = self._method.get_params()\n \n def preprocess(self, data):\n \"\"\" Return the transformed data \"\"\"\n return pd.DataFrame(self._method.fit_transform(data), columns = self._method.get_feature_names(data.columns), index = data.index)\n\n def jsonify(self):\n out = super().jsonify()\n out.update(**{'hyperparams':self.hyperparams})\n return out\n" ]
[ [ "sklearn.preprocessing.PolynomialFeatures" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Cubbee/apex
[ "0a991543846966d5f586540dc2441e512139e9fc" ]
[ "examples/deprecated_api/word_language_model/generate.py" ]
[ "###############################################################################\n# Language Modeling on Penn Tree Bank\n#\n# This file generates new sentences sampled from the language model\n#\n###############################################################################\n\nimport argparse\n\nimport torch\n\nimport data\n\nparser = argparse.ArgumentParser(description='PyTorch Wikitext-2 Language Model')\n\n# Model parameters.\nparser.add_argument('--data', type=str, default='./data/wikitext-2',\n help='location of the data corpus')\nparser.add_argument('--checkpoint', type=str, default='./model.pt',\n help='model checkpoint to use')\nparser.add_argument('--outf', type=str, default='generated.txt',\n help='output file for generated text')\nparser.add_argument('--words', type=int, default='1000',\n help='number of words to generate')\nparser.add_argument('--seed', type=int, default=1111,\n help='random seed')\nparser.add_argument('--cuda', action='store_true',\n help='use CUDA')\nparser.add_argument('--temperature', type=float, default=1.0,\n help='temperature - higher will increase diversity')\nparser.add_argument('--log-interval', type=int, default=100,\n help='reporting interval')\nargs = parser.parse_args()\n\n# Set the random seed manually for reproducibility.\ntorch.manual_seed(args.seed)\nif torch.cuda.is_available():\n if not args.cuda:\n print(\"WARNING: You have a CUDA device, so you should probably run with --cuda\")\n\nif args.temperature < 1e-3:\n parser.error(\"--temperature has to be greater or equal 1e-3\")\n\nwith open(args.checkpoint, 'rb') as f:\n model = torch.load(f)\nmodel.eval()\n\nif args.cuda:\n model.cuda()\nelse:\n model.cpu()\n\ncorpus = data.Corpus(args.data)\nntokens = len(corpus.dictionary)\nhidden = model.init_hidden(1)\nwith torch.no_grad():\n input = torch.rand(1, 1).mul(ntokens).long()\n if args.cuda:\n input = input.cuda()\n\n with open(args.outf, 'w') as outf:\n for i in range(args.words):\n output, hidden = model(input, hidden)\n word_weights = output.squeeze().float().data.div(args.temperature).exp().cpu()\n word_idx = torch.multinomial(word_weights, 1)[0]\n input.data.fill_(word_idx)\n word = corpus.dictionary.idx2word[word_idx]\n\n outf.write(word + ('\\n' if i % 20 == 19 else ' '))\n\n if i % args.log_interval == 0:\n print('| Generated {}/{} words'.format(i, args.words))\n" ]
[ [ "torch.load", "torch.manual_seed", "torch.multinomial", "torch.no_grad", "torch.rand", "torch.cuda.is_available" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
AllenZYJ/torchcv
[ "8432507a910cf0b42366ed7e5a526f454956e9f1", "8432507a910cf0b42366ed7e5a526f454956e9f1", "8432507a910cf0b42366ed7e5a526f454956e9f1" ]
[ "methods/gan/image_translator_test.py", "models/seg/model_manager.py", "models/seg/nets/denseassp.py" ]
[ "#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# Author: Donny You ([email protected])\n# Class Definition for GAN.\n\n\nimport os\nimport torch\n\nfrom methods.tools.blob_helper import BlobHelper\nfrom methods.tools.runner_helper import RunnerHelper\nfrom models.gan.model_manager import ModelManager\nfrom datasets.test.test_data_loader import TestDataLoader\nfrom utils.helpers.dc_helper import DCHelper\nfrom utils.helpers.image_helper import ImageHelper\nfrom utils.tools.logger import Logger as Log\n\n\nclass ImageTranslatorTest(object):\n def __init__(self, configer):\n self.configer = configer\n self.blob_helper = BlobHelper(configer)\n self.model_manager = ModelManager(configer)\n self.test_loader = TestDataLoader(configer)\n self.device = torch.device('cpu' if self.configer.get('gpu') is None else 'cuda')\n self.gan_net = None\n\n self._init_model()\n\n def _init_model(self):\n self.gan_net = self.model_manager.gan_model()\n self.gan_net = RunnerHelper.load_net(self, self.gan_net)\n self.gan_net.eval()\n\n def test(self, test_dir, out_dir):\n if self.configer.exists('test', 'mode') and self.configer.get('test', 'mode') == 'nir2vis':\n jsonA_path = os.path.join(test_dir, 'val_label{}A.json'.format(self.configer.get('data', 'tag')))\n test_loader_A = self.test_loader.get_testloader(json_path=jsonA_path) if os.path.exists(jsonA_path) else None\n jsonB_path = os.path.join(test_dir, 'val_label{}B.json'.format(self.configer.get('data', 'tag')))\n test_loader_B = self.test_loader.get_testloader(json_path=jsonB_path) if os.path.exists(jsonB_path) else None\n elif self.configer.exists('test', 'mode') and self.configer.get('test', 'mode') == 'pix2pix':\n imgA_dir = os.path.join(test_dir, 'imageA')\n test_loader_A = self.test_loader.get_testloader(test_dir=imgA_dir) if os.path.exists(imgA_dir) else None\n imgB_dir = os.path.join(test_dir, 'imageB')\n test_loader_B = self.test_loader.get_testloader(test_dir=imgB_dir) if os.path.exists(imgB_dir) else None\n else:\n imgA_dir = os.path.join(test_dir, 'imageA')\n test_loader_A = self.test_loader.get_testloader(test_dir=imgA_dir) if os.path.exists(imgA_dir) else None\n imgB_dir = os.path.join(test_dir, 'imageB')\n test_loader_B = self.test_loader.get_testloader(test_dir=imgB_dir) if os.path.exists(imgB_dir) else None\n\n if test_loader_A is not None:\n for data_dict in test_loader_A:\n new_data_dict = dict(imgA=data_dict['img'], testing=True)\n with torch.no_grad():\n out_dict = self.gan_net(new_data_dict)\n\n meta_list = DCHelper.tolist(data_dict['meta'])\n for key, value in out_dict.items():\n for i in range(len(value)):\n img_bgr = self.blob_helper.tensor2bgr(value[i])\n img_path = meta_list[i]['img_path']\n Log.info('Image Path: {}'.format(img_path))\n ImageHelper.save(img_bgr, os.path.join(out_dir, '{}_{}.jpg'.format(meta_list[i]['filename'], key)))\n\n if test_loader_B is not None:\n for data_dict in test_loader_B:\n new_data_dict = dict(imgB=data_dict['img'], testing=True)\n with torch.no_grad():\n out_dict = self.gan_net(new_data_dict)\n meta_list = DCHelper.tolist(data_dict['meta'])\n for key, value in out_dict.items():\n for i in range(len(value)):\n img_bgr = self.blob_helper.tensor2bgr(value[i])\n img_path = meta_list[i]['img_path']\n Log.info('Image Path: {}'.format(img_path))\n ImageHelper.save(img_bgr, os.path.join(out_dir, '{}_{}.jpg'.format(meta_list[i]['filename'], key)))\n", "#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# Author: Donny You([email protected]), Xiangtai([email protected])\n# Select Seg Model for semantic segmentation.\n\n\nimport torch\n\nfrom models.seg.nets.denseassp import DenseASPP\nfrom models.seg.nets.deeplabv3 import DeepLabV3\nfrom models.seg.nets.pspnet import PSPNet\nfrom models.seg.loss.seg_modules import SegCELoss, SegOhemCELoss, SegAuxCELoss, SegAuxEncCELoss, SegAuxOhemCELoss\nfrom utils.tools.logger import Logger as Log\n\n\nSEG_MODEL_DICT = {\n 'deeplabv3': DeepLabV3,\n 'pspnet': PSPNet,\n 'denseaspp': DenseASPP\n}\n\nSEG_LOSS_DICT = {\n 'seg_ce_loss': SegCELoss,\n 'seg_ohemce_loss': SegOhemCELoss,\n 'seg_auxce_loss':SegAuxCELoss,\n 'seg_auxencce_loss': SegAuxEncCELoss,\n 'seg_auxohemce_loss': SegAuxOhemCELoss\n}\n\n\nclass ModelManager(object):\n\n def __init__(self, configer):\n self.configer = configer\n\n def get_seg_model(self):\n model_name = self.configer.get('network', 'model_name')\n\n if model_name not in SEG_MODEL_DICT:\n Log.error('Model: {} not valid!'.format(model_name))\n exit(1)\n\n model = SEG_MODEL_DICT[model_name](self.configer)\n\n return model\n\n def get_seg_loss(self, loss_type=None):\n key = self.configer.get('loss', 'loss_type') if loss_type is None else loss_type\n if key not in SEG_LOSS_DICT:\n Log.error('Loss: {} not valid!'.format(key))\n exit(1)\n\n loss = SEG_LOSS_DICT[key](self.configer)\n if self.configer.get('network', 'loss_balance') and len(range(torch.cuda.device_count())) > 1:\n from extensions.tools.parallel.data_parallel import DataParallelCriterion\n loss = DataParallelCriterion(loss)\n\n return loss\n", "#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# Author: Donny You([email protected])\n# SynBN_version of DenseAspp\n\n\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\n\nfrom models.backbones.backbone_selector import BackboneSelector\nfrom models.tools.module_helper import ModuleHelper\n\n\nMODEL_CONFIG = {\n 'dropout0': 0.1,\n 'dropout1': 0.1,\n 'd_feature0': 256,\n 'd_feature1': 128\n}\n\n\nclass DenseASPP(nn.Module):\n \"\"\"\n * output_scale can only set as 8 or 16\n \"\"\"\n def __init__(self, configer):\n super(DenseASPP, self).__init__()\n self.configer = configer\n\n dropout0 = MODEL_CONFIG['dropout0']\n dropout1 = MODEL_CONFIG['dropout1']\n d_feature0 = MODEL_CONFIG['d_feature0']\n d_feature1 = MODEL_CONFIG['d_feature1']\n\n self.backbone = BackboneSelector(configer).get_backbone()\n\n num_features = self.backbone.get_num_features()\n\n self.trans = _Transition(num_input_features=self.num_features,\n num_output_features=self.num_features // 2,\n norm_type=self.configer.get('network', 'norm_type'))\n\n self.num_features = self.num_features // 2\n\n self.ASPP_3 = _DenseAsppBlock(input_num=num_features, num1=d_feature0, num2=d_feature1,\n dilation_rate=3, drop_out=dropout0,\n norm_type=self.configer.get('network', 'norm_type'))\n\n self.ASPP_6 = _DenseAsppBlock(input_num=num_features + d_feature1 * 1, num1=d_feature0, num2=d_feature1,\n dilation_rate=6, drop_out=dropout0,\n norm_type=self.configer.get('network', 'norm_type'))\n\n self.ASPP_12 = _DenseAsppBlock(input_num=num_features + d_feature1 * 2, num1=d_feature0, num2=d_feature1,\n dilation_rate=12, drop_out=dropout0,\n norm_type=self.configer.get('network', 'norm_type'))\n\n self.ASPP_18 = _DenseAsppBlock(input_num=num_features + d_feature1 * 3, num1=d_feature0, num2=d_feature1,\n dilation_rate=18, drop_out=dropout0,\n norm_type=self.configer.get('network', 'norm_type'))\n\n self.ASPP_24 = _DenseAsppBlock(input_num=num_features + d_feature1 * 4, num1=d_feature0, num2=d_feature1,\n dilation_rate=24, drop_out=dropout0,\n norm_type=self.configer.get('network', 'norm_type'))\n\n num_features = num_features + 5 * d_feature1\n\n self.classification = nn.Sequential(\n nn.Dropout2d(p=dropout1),\n nn.Conv2d(num_features, self.configer.get('data', 'num_classes'), kernel_size=1, padding=0)\n )\n\n def forward(self, data_dict):\n x = self.backbone(data_dict['img'])\n feature = self.trans(x)\n\n aspp3 = self.ASPP_3(feature)\n feature = torch.cat((aspp3, feature), dim=1)\n\n aspp6 = self.ASPP_6(feature)\n feature = torch.cat((aspp6, feature), dim=1)\n\n aspp12 = self.ASPP_12(feature)\n feature = torch.cat((aspp12, feature), dim=1)\n\n aspp18 = self.ASPP_18(feature)\n feature = torch.cat((aspp18, feature), dim=1)\n\n aspp24 = self.ASPP_24(feature)\n feature = torch.cat((aspp24, feature), dim=1)\n\n x = self.classification(feature)\n\n x = F.interpolate(x, size=(data_dict['img'].size(2), data_dict['img'].size(3)),\n mode=\"bilinear\", align_corners=True)\n return dict(out=out)\n\n\nclass _DenseAsppBlock(nn.Sequential):\n \"\"\" ConvNet block for building DenseASPP. \"\"\"\n\n def __init__(self, input_num, num1, num2, dilation_rate, drop_out, norm_type):\n super(_DenseAsppBlock, self).__init__()\n self.add_module('conv1', nn.Conv2d(in_channels=input_num, out_channels=num1, kernel_size=1)),\n\n self.add_module('norm1', ModuleHelper.BatchNorm2d(norm_type=norm_type)(num_features=num1)),\n self.add_module('relu1', nn.ReLU(inplace=False)),\n self.add_module('conv2', nn.Conv2d(in_channels=num1, out_channels=num2, kernel_size=3,\n dilation=dilation_rate, padding=dilation_rate)),\n self.add_module('norm2', ModuleHelper.BatchNorm2d(norm_type=norm_type)(num_features=input_num)),\n self.add_module('relu2', nn.ReLU(inplace=False)),\n\n self.drop_rate = drop_out\n\n def forward(self, _input):\n feature = super(_DenseAsppBlock, self).forward(_input)\n if self.drop_rate > 0:\n feature = F.dropout2d(feature, p=self.drop_rate, training=self.training)\n\n return feature\n\n\nclass _Transition(nn.Sequential):\n def __init__(self, num_input_features, num_output_features, norm_type):\n super(_Transition, self).__init__()\n self.add_module('conv', nn.Conv2d(num_input_features, num_output_features, kernel_size=1, stride=1, bias=False))\n self.add_module('norm', ModuleHelper.BatchNorm2d(norm_type=norm_type)(num_features=num_output_features)),\n self.add_module('relu', nn.ReLU(inplace=False))\n\n\nif __name__ == \"__main__\":\n model = DenseASPP(12)\n image = torch.autograd.Variable(torch.randn(1, 3, 512, 512))\n out = model(image)\n print(out.size())\n" ]
[ [ "torch.no_grad" ], [ "torch.cuda.device_count" ], [ "torch.nn.functional.dropout2d", "torch.nn.Dropout2d", "torch.cat", "torch.randn", "torch.nn.Conv2d", "torch.nn.ReLU" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
carlgogo/dl4ds
[ "2675fe772b7e165ab8726a51c75dd3d9d0a7a465" ]
[ "dl4ds/models/spt_preups.py" ]
[ "import tensorflow as tf\nfrom tensorflow.keras.layers import (Add, Conv2D, Input, Concatenate, \n TimeDistributed)\nfrom tensorflow.keras.models import Model\n\nfrom .blocks import (RecurrentConvBlock, ResidualBlock, ConvBlock, \n DenseBlock, TransitionBlock, LocalizedConvBlock,\n get_dropout_layer)\nfrom ..utils import checkarg_backbone, checkarg_dropout_variant\n\n\ndef recnet_pin(\n backbone_block,\n n_channels, \n n_aux_channels,\n hr_size,\n time_window,\n # ----- below are parameters that shall be tweaked by the user -----\n n_channels_out=1, \n n_filters=8, \n n_blocks=6, \n normalization=None,\n dropout_rate=0,\n dropout_variant=None,\n attention=False,\n activation='relu',\n output_activation=None,\n localcon_layer=False):\n \"\"\"\n Recurrent deep neural network with different backbone architectures \n (according to the ``backbone_block``) and pre-upsampling via interpolation\n (the samples are expected to be interpolated to the HR grid). This model is \n capable of exploiting spatio-temporal samples.\n\n The interpolation method depends on the ``interpolation`` argument used in\n the training procedure (which is passed to the DataGenerator).\n\n Parameters\n ----------\n backbone_block : str\n Backbone type. One of dl4ds.BACKBONE_BLOCKS. WARNING: this parameter is\n not supposed to be set by the user. It's set internallly through\n dl4ds.Trainers. \n n_channels : int\n Number of channels/variables in each sample. WARNING: this parameter is\n not supposed to be set by the user. It's set internallly through\n dl4ds.Trainers. \n n_aux_channels : int\n Number of auxiliary channels. WARNING: this parameter is not supposed to \n be set by the user. It's set internallly through dl4ds.Trainers. \n hr_size : tuple\n Height and width of the HR grid. WARNING: this parameter is not supposed \n to be set by the user. It's set internallly through dl4ds.Trainers.\n time_window : int\n Temporal window or number of time steps in each sample. WARNING: this \n parameter is not supposed to be set by the user. It's set internallly \n through dl4ds.Trainers.\n n_filters : int, optional\n Number of convolutional filters in RecurrentConvBlock. `n_filters` sets \n the number of output filters in the convolution inside the ConvLSTM unit. \n n_blocks : int, optional\n Number of recurrent convolutional blocks (RecurrentConvBlock). \n Sets the depth of the network. \n normalization : str or None, optional\n Normalization method in the residual or dense block. Can be either 'bn'\n for BatchNormalization or 'ln' for LayerNormalization. If None, then no\n normalization is performed (eg., for the 'resnet' backbone this results \n in the EDSR-style residual block).\n dropout_rate : float, optional\n Float between 0 and 1. Fraction of the input units to drop. If 0 then no\n dropout is applied. \n dropout_variant : str or None, optional\n Type of dropout. Defined in dl4ds.DROPOUT_VARIANTS variable. \n attention : bool, optional\n If True, dl4ds.ChannelAttention2D is used in convolutional blocks. \n activation : str, optional\n Activation function to use, as supported by tf.keras. E.g., 'relu' or \n 'gelu'.\n output_activation : str, optional\n Activation function to use in the last ConvBlock. Useful to constraint \n the values distribution of the output grid.\n localcon_layer : bool, optional\n If True, the LocalizedConvBlock is activated in the output module. \n \"\"\"\n backbone_block = checkarg_backbone(backbone_block)\n dropout_variant = checkarg_dropout_variant(dropout_variant)\n\n auxvar_array_is_given = True if n_aux_channels > 0 else False\n h_hr, w_hr = hr_size\n if not localcon_layer: \n x_in = Input(shape=(None, None, None, n_channels))\n else:\n x_in = Input(shape=(None, h_hr, w_hr, n_channels))\n \n init_n_filters = n_filters\n\n x = b = RecurrentConvBlock(n_filters, activation=activation, \n normalization=normalization)(x_in)\n\n for i in range(n_blocks):\n b = RecurrentConvBlock(n_filters, activation=activation, \n normalization=normalization, dropout_rate=dropout_rate,\n dropout_variant=dropout_variant, name_suffix=str(i + 2))(b)\n\n b = get_dropout_layer(dropout_rate, dropout_variant, dim=3)(b)\n\n if backbone_block == 'convnet':\n x = b\n elif backbone_block == 'resnet':\n x = Add()([x, b])\n elif backbone_block == 'densenet':\n x = Concatenate()([x, b])\n\n #---------------------------------------------------------------------------\n # HR aux channels are processed\n if auxvar_array_is_given:\n s_in = Input(shape=(None, None, n_aux_channels))\n s = ConvBlock(n_filters, activation=activation, dropout_rate=0, \n normalization=None, attention=attention)(s_in)\n s = tf.expand_dims(s, 1)\n s = tf.repeat(s, time_window, axis=1)\n x = Concatenate()([x, s])\n\n #---------------------------------------------------------------------------\n # Localized convolutional layer\n if localcon_layer:\n lcb = LocalizedConvBlock(filters=2, use_bias=True)\n lws = TimeDistributed(lcb, name='localized_conv_block')(x)\n x = Concatenate()([x, lws])\n\n #---------------------------------------------------------------------------\n # Last conv layers\n x = TransitionBlock(init_n_filters, name='TransitionLast')(x)\n x = ConvBlock(init_n_filters, activation=None, dropout_rate=dropout_rate, \n normalization=normalization, attention=True)(x) \n\n x = ConvBlock(n_channels_out, activation=output_activation, dropout_rate=0, \n normalization=normalization, attention=False)(x) \n \n model_name = 'rec' + backbone_block + '_pin' \n if auxvar_array_is_given:\n return Model(inputs=[x_in, s_in], outputs=x, name=model_name)\n else:\n return Model(inputs=[x_in], outputs=x, name=model_name)\n" ]
[ [ "tensorflow.keras.layers.Concatenate", "tensorflow.keras.models.Model", "tensorflow.keras.layers.TimeDistributed", "tensorflow.expand_dims", "tensorflow.repeat", "tensorflow.keras.layers.Add", "tensorflow.keras.layers.Input" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.7", "2.2", "2.3", "2.4", "2.5", "2.6" ] } ]
navinsingh1977/ga-learner-dsmp-repo
[ "8eebf96b1c7289b9c325737251ec0042b8151be0", "8eebf96b1c7289b9c325737251ec0042b8151be0" ]
[ "Gradient-Boosting-Machine/code.py", "Linear-Regression/code.py" ]
[ "# --------------\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\n#path - Path of file \ndf = pd.read_csv(path)\n# Code starts here\nX = df.drop(['customerID','Churn'], axis = 1)\ny = df['Churn'].copy()\nX_train,X_test,y_train,y_test = train_test_split(X,y, test_size = 0.3, random_state = 0)\n\n\n\n\n\n# --------------\nimport numpy as np\nfrom sklearn.preprocessing import LabelEncoder\n\n# Code starts here\nX_train['TotalCharges'] = X_train['TotalCharges'].replace(' ', np.NaN)\nX_test['TotalCharges'] = X_test['TotalCharges'].replace(' ', np.NaN)\nX_train['TotalCharges'] = X_train['TotalCharges'].astype(float)\nX_test['TotalCharges'] = X_test['TotalCharges'].astype(float)\nX_train['TotalCharges'] = X_train['TotalCharges'].fillna(X_train['TotalCharges'].mean())\nX_test['TotalCharges'] = X_test['TotalCharges'].fillna(X_test['TotalCharges'].mean())\nprint(X_test['TotalCharges'].dtypes)\nprint(X_train['TotalCharges'].dtypes)\nX_train.isnull().sum()\ncat_col = X_train.select_dtypes(include=['object']).columns.values.tolist()\n\nfor i in range(0, len(cat_col)):\n le = LabelEncoder()\n X_train[cat_col[i]] = le.fit_transform(X_train[cat_col[i]])\n X_test[cat_col[i]] = le.fit_transform(X_test[cat_col[i]])\n\n#X_train = le.fit_transform(X_train)\n#X_test = le.fit_transform(X_test)\n\ny_train = y_train.replace({'No':0, 'Yes':1})\ny_test = y_test.replace({'No':0, 'Yes':1})\n\n\n\n\n\n\n\n# --------------\nfrom sklearn.ensemble import AdaBoostClassifier\nfrom sklearn.metrics import accuracy_score,classification_report,confusion_matrix\n\n# Code starts here\n\nada_model = AdaBoostClassifier(random_state=0)\nada_model.fit(X_train, y_train)\ny_pred = ada_model.predict(X_test)\nada_score = accuracy_score(y_test, y_pred)\nada_cm = confusion_matrix(y_test, y_pred)\nada_cr = classification_report(y_test, y_pred)\nprint('ada_score', ada_score)\nprint('ada_cm',ada_cm)\nprint('ada_cr',ada_cr)\n\n\n\n# --------------\nfrom xgboost import XGBClassifier\nfrom sklearn.model_selection import GridSearchCV\n\n#Parameter list\nparameters={'learning_rate':[0.1,0.15,0.2,0.25,0.3],\n 'max_depth':range(1,3)}\n\n# Code starts here\nxgb_model = XGBClassifier(random_state=0)\nxgb_model.fit(X_train, y_train)\ny_pred = xgb_model.predict(X_test)\nxgb_score = accuracy_score(y_test, y_pred)\nxgb_cm = confusion_matrix(y_test, y_pred)\nxgb_cr = classification_report(y_test, y_pred)\nprint('xgb_score', xgb_score)\nprint('xgb_cm', xgb_cm)\nprint('xgb_cr', xgb_cr)\n\nclf_model = GridSearchCV(estimator=xgb_model, param_grid=parameters)\nclf_model.fit(X_train, y_train)\ny_pred = clf_model.predict(X_test)\nclf_score = accuracy_score(y_test, y_pred)\nclf_cm = confusion_matrix(y_test, y_pred)\nclf_cr = classification_report(y_test, y_pred)\n\nprint('clf_score', clf_score)\nprint('clf_cm', clf_cm)\nprint('clf_cr', clf_cr)\n\n\n\n", "# --------------\nimport pandas as pd\nimport numpy as np\nfrom sklearn.cross_validation import train_test_split\n# code starts here\ndf = pd.read_csv(path)\n#print(df.head())\nX = df.drop('list_price', axis = 1)\ny = df['list_price']\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state=6)\nprint(X_train)\nprint(X_test)\n\n# code ends here\n\n\n\n# --------------\nimport matplotlib.pyplot as plt\n\n# code starts here \ncols = X_train.columns\n\nfig, axes = plt.subplots(nrows=3, ncols=3, figsize=(20,20))\n\nfor i in range(0,3):\n for j in range(0,3): \n col = cols[i*3 + j]\n axes[i,j].set_title(col)\n axes[i,j].scatter(X_train[col],y_train)\n axes[i,j].set_xlabel(col)\n axes[i,j].set_ylabel('list_price')\n\n\n\n# code ends here\n\n\n\n# --------------\n# Code starts here\ncorr = X_train.corr()\nprint(corr)\nX_train = X_train.drop('play_star_rating',axis = 1)\nX_train = X_train.drop('val_star_rating',axis = 1)\nX_test = X_test.drop('play_star_rating', axis = 1)\nX_test = X_test.drop('val_star_rating', axis = 1)\n\n# Code ends here\n\n\n# --------------\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.metrics import mean_squared_error, r2_score\n\n# Code starts here\nregressor = LinearRegression()\nregressor.fit(X_train, y_train)\ny_pred = regressor.predict(X_test)\nprint(y_pred[0])\nmse = mean_squared_error(y_test, y_pred)\nprint(mse)\nr2 = r2_score(y_test, y_pred)\nprint(r2)\n# Code ends here\n\n\n# --------------\n# Code starts here\nresidual = y_test-y_pred\nprint(residual)\nplt.hist(residual)\n\n\n# Code ends here\n\n\n" ]
[ [ "sklearn.model_selection.GridSearchCV", "pandas.read_csv", "sklearn.metrics.confusion_matrix", "sklearn.model_selection.train_test_split", "sklearn.ensemble.AdaBoostClassifier", "sklearn.preprocessing.LabelEncoder", "sklearn.metrics.classification_report", "sklearn.metrics.accuracy_score" ], [ "sklearn.cross_validation.train_test_split", "pandas.read_csv", "sklearn.metrics.r2_score", "matplotlib.pyplot.subplots", "sklearn.metrics.mean_squared_error", "sklearn.linear_model.LinearRegression", "matplotlib.pyplot.hist" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
Harsha-Musunuri/stylegan2-pytorch
[ "bd9b42f7031aa1d16245ac64bc562baf0fc0945f", "bd9b42f7031aa1d16245ac64bc562baf0fc0945f" ]
[ "train_styleGAN2_expts.py", "dev/train_swap_cond.py" ]
[ "import argparse\nimport math\nimport random\nimport os\nfrom PIL import Image\nimport numpy as np\nimport torch\nfrom torch import nn, autograd, optim\nfrom torch.nn import functional as F\nfrom torch.utils import data\nimport torch.distributed as dist\nfrom torchvision import datasets, transforms, utils\nfrom tqdm import tqdm\nimport util\n\nfrom fid import extract_feature_from_samples, calc_fid\nimport pickle\n\ntry:\n import wandb\n\nexcept ImportError:\n wandb = None\n\n\nfrom dataset import MultiResolutionDataset\nfrom distributed import (\n get_rank,\n synchronize,\n reduce_loss_dict,\n reduce_sum,\n get_world_size,\n)\n\nfrom non_leaking import augment, AdaptiveAugment\n\n\ndef data_sampler(dataset, shuffle, distributed):\n if distributed:\n return data.distributed.DistributedSampler(dataset, shuffle=shuffle)\n\n if shuffle:\n return data.RandomSampler(dataset)\n\n else:\n return data.SequentialSampler(dataset)\n\n\ndef requires_grad(model, flag=True):\n for p in model.parameters():\n p.requires_grad = flag\n\n\ndef accumulate(model1, model2, decay=0.999):\n par1 = dict(model1.named_parameters())\n par2 = dict(model2.named_parameters())\n\n for k in par1.keys():\n par1[k].data.mul_(decay).add_(par2[k].data, alpha=1 - decay)\n\n\ndef sample_data(loader, datatype=\"imagefolder\"):\n if datatype == \"imagefolder\":\n while True:\n for batch, _ in loader:\n yield batch\n elif datatype == \"other\":\n while True:\n for batch in loader:\n yield batch\n\n\ndef d_logistic_loss(real_pred, fake_pred):\n real_loss = F.softplus(-real_pred)\n fake_loss = F.softplus(fake_pred)\n\n return real_loss.mean() + fake_loss.mean()\n\n\ndef d_r1_loss(real_pred, real_img, args):\n if args.useConvdFix==True:\n print(\"I entered\")\n from op import conv2d_gradfix\n with conv2d_gradfix.no_weight_gradients():\n grad_real, = autograd.grad(\n outputs=real_pred.sum(), inputs=real_img, create_graph=True\n )\n else:\n grad_real, = autograd.grad(\n outputs=real_pred.sum(), inputs=real_img, create_graph=True\n )\n grad_penalty = grad_real.pow(2).reshape(grad_real.shape[0], -1).sum(1).mean()\n\n return grad_penalty\n\n\ndef g_nonsaturating_loss(fake_pred):\n loss = F.softplus(-fake_pred).mean()\n\n return loss\n\n\ndef g_path_regularize(fake_img, latents, mean_path_length, decay=0.01):\n noise = torch.randn_like(fake_img) / math.sqrt(\n fake_img.shape[2] * fake_img.shape[3]\n )\n grad, = autograd.grad(\n outputs=(fake_img * noise).sum(), inputs=latents, create_graph=True\n )\n path_lengths = torch.sqrt(grad.pow(2).sum(2).mean(1))\n\n path_mean = mean_path_length + decay * (path_lengths.mean() - mean_path_length)\n\n path_penalty = (path_lengths - path_mean).pow(2).mean()\n\n return path_penalty, path_mean.detach(), path_lengths\n\n\ndef make_noise(batch, latent_dim, n_noise, device):\n if n_noise == 1:\n return torch.randn(batch, latent_dim, device=device)\n\n noises = torch.randn(n_noise, batch, latent_dim, device=device).unbind(0)\n\n return noises\n\n\ndef mixing_noise(batch, latent_dim, prob, device):\n if prob > 0 and random.random() < prob:\n return make_noise(batch, latent_dim, 2, device)\n\n else:\n return [make_noise(batch, latent_dim, 1, device)]\n\n\ndef set_grad_none(model, targets):\n for n, p in model.named_parameters():\n if n in targets:\n p.grad = None\n\n\ndef train(args, loader, generator, discriminator, g_optim, d_optim, g_ema, device):\n loader = sample_data(loader, datatype=\"imagefolder\")\n # inception related:\n if (get_rank() == 0):\n from calc_inception import load_patched_inception_v3\n inception = load_patched_inception_v3().to(device)\n inception.eval()\n if args.eval_every > 0:\n with open(os.path.join(args.log_dir, 'log_fid.txt'), 'a+') as f:\n f.write(f\"Name: {getattr(args, 'name', 'NA')}\\n{'-' * 50}\\n\")\n if args.log_every > 0:\n with open(os.path.join(args.log_dir, 'log.txt'), 'a+') as f:\n f.write(f\"Name: {getattr(args, 'name', 'NA')}\\n{'-' * 50}\\n\")\n\n pbar = range(args.iter)\n\n if get_rank() == 0:\n pbar = tqdm(pbar, initial=args.start_iter, dynamic_ncols=True, smoothing=0.01)\n\n mean_path_length = 0\n\n d_loss_val = 0\n r1_loss = torch.tensor(0.0, device=device)\n g_loss_val = 0\n path_loss = torch.tensor(0.0, device=device)\n path_lengths = torch.tensor(0.0, device=device)\n mean_path_length_avg = 0\n loss_dict = {}\n\n if args.distributed:\n g_module = generator.module\n d_module = discriminator.module\n\n else:\n g_module = generator\n d_module = discriminator\n\n accum = 0.5 ** (32 / (10 * 1000))\n ada_aug_p = args.augment_p if args.augment_p > 0 else 0.0\n r_t_stat = 0\n\n if args.augment and args.augment_p == 0:\n ada_augment = AdaptiveAugment(args.ada_target, args.ada_length, 8, device)\n\n sample_z = torch.randn(args.n_sample, args.latent, device=device)\n\n for idx in pbar:\n i = idx + args.start_iter\n\n if i > args.iter:\n print(\"Done!\")\n\n break\n\n real_img = next(loader)\n real_img = real_img.to(device)\n\n requires_grad(generator, False)\n requires_grad(discriminator, True)\n\n noise = mixing_noise(args.batch, args.latent, args.mixing, device)\n fake_img, _ = generator(noise)\n\n if args.augment:\n real_img_aug, _ = augment(real_img, ada_aug_p)\n fake_img, _ = augment(fake_img, ada_aug_p)\n\n else:\n real_img_aug = real_img\n\n fake_pred = discriminator(fake_img)\n real_pred = discriminator(real_img_aug)\n d_loss = d_logistic_loss(real_pred, fake_pred)\n\n loss_dict[\"d\"] = d_loss\n loss_dict[\"real_score\"] = real_pred.mean()\n loss_dict[\"fake_score\"] = fake_pred.mean()\n\n discriminator.zero_grad()\n d_loss.backward()\n d_optim.step()\n\n if args.augment and args.augment_p == 0:\n ada_aug_p = ada_augment.tune(real_pred)\n r_t_stat = ada_augment.r_t_stat\n\n d_regularize = i % args.d_reg_every == 0\n\n if d_regularize:\n real_img.requires_grad = True\n\n if args.augment:\n real_img_aug, _ = augment(real_img, ada_aug_p)\n\n else:\n real_img_aug = real_img\n\n real_pred = discriminator(real_img_aug)\n r1_loss = d_r1_loss(real_pred, real_img,args)\n\n discriminator.zero_grad()\n (args.r1 / 2 * r1_loss * args.d_reg_every + 0 * real_pred[0]).backward()\n\n d_optim.step()\n\n loss_dict[\"r1\"] = r1_loss\n\n requires_grad(generator, True)\n requires_grad(discriminator, False)\n\n noise = mixing_noise(args.batch, args.latent, args.mixing, device)\n fake_img, _ = generator(noise)\n\n if args.augment:\n fake_img, _ = augment(fake_img, ada_aug_p)\n\n fake_pred = discriminator(fake_img)\n g_loss = g_nonsaturating_loss(fake_pred)\n\n loss_dict[\"g\"] = g_loss\n\n generator.zero_grad()\n g_loss.backward()\n g_optim.step()\n\n #g_reg starts\n g_regularize = False\n if args.useG_reg==True:\n # print(\"I entered g_reg\")\n g_regularize = i % args.g_reg_every == 0\n if g_regularize:\n path_batch_size = max(1, args.batch // args.path_batch_shrink)\n noise = mixing_noise(path_batch_size, args.latent, args.mixing, device)\n fake_img, latents = generator(noise, return_latents=True)\n\n path_loss, mean_path_length, path_lengths = g_path_regularize(\n fake_img, latents, mean_path_length\n )\n\n generator.zero_grad()\n weighted_path_loss = args.path_regularize * args.g_reg_every * path_loss\n\n if args.path_batch_shrink:\n weighted_path_loss += 0 * fake_img[0, 0, 0, 0]\n\n weighted_path_loss.backward()\n\n g_optim.step()\n\n mean_path_length_avg = (\n reduce_sum(mean_path_length).item() / get_world_size()\n )\n\n loss_dict[\"path\"] = path_loss\n loss_dict[\"path_length\"] = path_lengths.mean()\n\n accumulate(g_ema, g_module, accum)\n\n loss_reduced = reduce_loss_dict(loss_dict)\n\n d_loss_val = loss_reduced[\"d\"].mean().item()\n g_loss_val = loss_reduced[\"g\"].mean().item()\n r1_val = loss_reduced[\"r1\"].mean().item()\n path_loss_val = loss_reduced[\"path\"].mean().item()\n real_score_val = loss_reduced[\"real_score\"].mean().item()\n fake_score_val = loss_reduced[\"fake_score\"].mean().item()\n path_length_val = loss_reduced[\"path_length\"].mean().item()\n\n if get_rank() == 0:\n pbar.set_description(\n (\n f\"d: {d_loss_val:.4f}; g: {g_loss_val:.4f}; r1: {r1_val:.4f}; \"\n f\"path: {path_loss_val:.4f}; mean path: {mean_path_length_avg:.4f}; \"\n f\"augment: {ada_aug_p:.4f}\"\n )\n )\n\n # inception related:\n if args.eval_every > 0 and i % args.eval_every == 0:\n real_mean = real_cov = mean_latent = None\n with open(args.inception, \"rb\") as f:\n embeds = pickle.load(f)\n real_mean = embeds[\"mean\"]\n real_cov = embeds[\"cov\"]\n # print(\"yahooo!\\n\")\n with torch.no_grad():\n g_ema.eval()\n if args.truncation < 1:\n mean_latent = g_ema.mean_latent(4096)\n # print(\"I am fine sir!\\n\")\n features = extract_feature_from_samples(\n g_ema, inception, args.truncation, mean_latent, 64, args.n_sample_fid, device\n ).numpy()\n # print(\"I am normal sir!\")\n sample_mean = np.mean(features, 0)\n sample_cov = np.cov(features, rowvar=False)\n fid = calc_fid(sample_mean, sample_cov, real_mean, real_cov)\n with open(os.path.join(args.log_dir, 'log_fid.txt'), 'a+') as f:\n f.write(f\"{i:07d}; fid: {float(fid):.4f};\\n\")\n # print(\"alright hurray \\n\")\n\n if i % args.log_every == 0:\n with open(os.path.join(args.log_dir, 'log.txt'), 'a+') as f:\n f.write(\n (\n f\"{i:07d}; \"\n f\"d: {d_loss_val:.4f}; g: {g_loss_val:.4f}; r1: {r1_val:.4f}; \"\n f\"path: {path_loss_val:.4f}; mean path: {mean_path_length_avg:.4f}; \"\n f\"augment: {ada_aug_p:.4f};\\n\"\n )\n )\n\n if i % args.log_every == 0:\n with torch.no_grad():\n g_ema.eval()\n sample, _ = g_ema([sample_z])\n utils.save_image(\n sample,\n os.path.join(args.log_dir, 'sample', f\"{str(i).zfill(6)}.png\"),\n nrow=int(args.n_sample ** 0.5),\n normalize=True,\n range=(-1, 1),\n )\n\n if i % args.save_every == 0:\n torch.save(\n {\n \"g\": g_module.state_dict(),\n \"d\": d_module.state_dict(),\n \"g_ema\": g_ema.state_dict(),\n \"g_optim\": g_optim.state_dict(),\n \"d_optim\": d_optim.state_dict(),\n \"args\": args,\n \"ada_aug_p\": ada_aug_p,\n \"iter\": i,\n },\n os.path.join(args.log_dir, 'weight', f\"{str(i).zfill(6)}.pt\"),\n )\n\n if i % args.save_latest_every == 0:\n torch.save(\n {\n \"g\": g_module.state_dict(),\n \"d\": d_module.state_dict(),\n \"g_ema\": g_ema.state_dict(),\n \"g_optim\": g_optim.state_dict(),\n \"d_optim\": d_optim.state_dict(),\n \"args\": args,\n \"ada_aug_p\": ada_aug_p,\n \"iter\": i,\n },\n os.path.join(args.log_dir, 'weight', f\"latest.pt\"),\n )\n\n\nif __name__ == \"__main__\":\n device = \"cuda\"\n\n parser = argparse.ArgumentParser(description=\"StyleGAN2 trainer\")\n parser.add_argument(\"--path\", type=str, help=\"path to the lmdb dataset\",\n default=\"/common/users/sm2322/MS-Thesis/AllDatasets/cifar10Train\")\n parser.add_argument(\"--resume\", action='store_true')\n parser.add_argument('--arch', type=str, default='stylegan2', help='model architectures (stylegan2 | swagan)')\n parser.add_argument(\"--name\", type=str, help=\"experiment name\", default='expt3-withoutGreg-newRosan-cifar10')\n parser.add_argument(\"--iter\", type=int, default=10000, help=\"total training iterations\")\n # inception args:\n parser.add_argument(\"--truncation\", type=float, default=1, help=\"truncation factor\")\n parser.add_argument(\"--inception\", type=str,\n default=\"/common/users/sm2322/MS-Thesis/GAN-Thesis-Work-Remote/styleGAN2-AE-Ligong-Remote/inception_cifar10Train10000.pkl\",\n help=\"path to precomputed inception embedding\")\n parser.add_argument(\"--n_sample_fid\", type=int, default=10000, help=\"number of the samples for calculating FID\")\n parser.add_argument(\"--useConvdFix\", action='store_true', help=\"should I use ConvdFix\") # args for when to eval ?\n parser.add_argument(\"--useG_reg\", action='store_true', help=\"should I use G_Reg\") # args for when to eval ?\n parser.add_argument(\"--log_root\", type=str, help=\"where to save training logs\", default='/common/users/sm2322/MS-Thesis/GAN-Thesis-Work-Remote/styleGAN2-AE-Ligong-Remote/logs')\n parser.add_argument(\"--eval_every\", type=int, default=1000, help=\"interval of metric evaluation\")\n parser.add_argument(\"--log_every\", type=int, default=100, help=\"save samples every # iters\")\n parser.add_argument(\"--save_every\", type=int, default=1000, help=\"save checkpoints every # iters\")\n parser.add_argument(\"--save_latest_every\", type=int, default=100, help=\"save latest checkpoints every # iters\")\n\n parser.add_argument(\n \"--batch\", type=int, default=16, help=\"batch sizes for each gpus\"\n )\n parser.add_argument(\n \"--n_sample\",\n type=int,\n default=20,\n help=\"number of the samples generated during training\",\n )\n parser.add_argument(\n \"--size\", type=int, default=32, help=\"image sizes for the model\"\n )\n parser.add_argument(\n \"--r1\", type=float, default=10, help=\"weight of the r1 regularization\"\n )\n parser.add_argument(\n \"--path_regularize\",\n type=float,\n default=2,\n help=\"weight of the path length regularization\",\n )\n parser.add_argument(\n \"--path_batch_shrink\",\n type=int,\n default=2,\n help=\"batch size reducing factor for the path length regularization (reduce memory consumption)\",\n )\n parser.add_argument(\n \"--d_reg_every\",\n type=int,\n default=16,\n help=\"interval of the applying r1 regularization\",\n )\n parser.add_argument(\n \"--g_reg_every\",\n type=int,\n default=4,\n help=\"interval of the applying path length regularization\",\n )\n parser.add_argument(\n \"--mixing\", type=float, default=0.9, help=\"probability of latent code mixing\"\n )\n parser.add_argument(\n \"--ckpt\",\n type=str,\n default=None,\n help=\"path to the checkpoints to resume training\",\n )\n parser.add_argument(\"--lr\", type=float, default=0.002, help=\"learning rate\")\n parser.add_argument(\n \"--channel_multiplier\",\n type=int,\n default=2,\n help=\"channel multiplier factor for the model. config-f = 2, else = 1\",\n )\n parser.add_argument(\n \"--wandb\", action=\"store_true\", help=\"use weights and biases logging\"\n )\n parser.add_argument(\n \"--local_rank\", type=int, default=0, help=\"local rank for distributed training\"\n )\n parser.add_argument(\n \"--augment\", action=\"store_true\", help=\"apply non leaking augmentation\"\n )\n parser.add_argument(\n \"--augment_p\",\n type=float,\n default=0,\n help=\"probability of applying augmentation. 0 = use adaptive augmentation\",\n )\n parser.add_argument(\n \"--ada_target\",\n type=float,\n default=0.6,\n help=\"target augmentation probability for adaptive augmentation\",\n )\n parser.add_argument(\n \"--ada_length\",\n type=int,\n default=500 * 1000,\n help=\"target duraing to reach augmentation probability for adaptive augmentation\",\n )\n parser.add_argument(\n \"--ada_every\",\n type=int,\n default=256,\n help=\"probability update interval of the adaptive augmentation\",\n )\n\n args = parser.parse_args()\n\n print(\"okay man\",args.useConvdFix,\"\\n\",args.useG_reg)\n\n n_gpu = int(os.environ[\"WORLD_SIZE\"]) if \"WORLD_SIZE\" in os.environ else 1\n args.distributed = n_gpu > 1\n\n if args.distributed:\n torch.cuda.set_device(args.local_rank)\n torch.distributed.init_process_group(backend=\"nccl\", init_method=\"env://\")\n synchronize()\n\n args.latent = 512\n args.n_mlp = 8\n\n args.start_iter = 0\n if get_rank() == 0:\n util.set_log_dir(args)\n util.print_args(parser, args)\n\n if args.arch == 'stylegan2':\n from model import Generator, Discriminator\n\n # elif args.arch == 'swagan':\n # from swagan import Generator, Discriminator\n\n generator = Generator(\n args.size, args.latent, args.n_mlp, channel_multiplier=args.channel_multiplier\n ).to(device)\n discriminator = Discriminator(\n args.size, channel_multiplier=args.channel_multiplier\n ).to(device)\n g_ema = Generator(\n args.size, args.latent, args.n_mlp, channel_multiplier=args.channel_multiplier\n ).to(device)\n g_ema.eval()\n accumulate(g_ema, generator, 0)\n\n g_reg_ratio = 1\n if args.useG_reg==True:\n g_reg_ratio = args.g_reg_every / (args.g_reg_every + 1)\n d_reg_ratio = args.d_reg_every / (args.d_reg_every + 1)\n\n g_optim = optim.Adam(\n generator.parameters(),\n lr=args.lr * g_reg_ratio,\n betas=(0 ** g_reg_ratio, 0.99 ** g_reg_ratio),\n )\n d_optim = optim.Adam(\n discriminator.parameters(),\n lr=args.lr * d_reg_ratio,\n betas=(0 ** d_reg_ratio, 0.99 ** d_reg_ratio),\n )\n\n if args.resume:\n if args.ckpt is None:\n args.ckpt = os.path.join(args.log_dir, 'weight', f\"latest.pt\")\n print(\"load model:\", args.ckpt)\n\n ckpt = torch.load(args.ckpt, map_location=lambda storage, loc: storage)\n\n try:\n ckpt_name = os.path.basename(args.ckpt)\n if 'iter' in ckpt:\n args.start_iter = ckpt[\"iter\"]\n else:\n args.start_iter = int(os.path.splitext(ckpt_name)[0])\n\n except ValueError:\n pass\n\n generator.load_state_dict(ckpt[\"g\"])\n discriminator.load_state_dict(ckpt[\"d\"])\n g_ema.load_state_dict(ckpt[\"g_ema\"])\n\n g_optim.load_state_dict(ckpt[\"g_optim\"])\n d_optim.load_state_dict(ckpt[\"d_optim\"])\n\n if args.distributed:\n generator = nn.parallel.DistributedDataParallel(\n generator,\n device_ids=[args.local_rank],\n output_device=args.local_rank,\n broadcast_buffers=False,\n )\n\n discriminator = nn.parallel.DistributedDataParallel(\n discriminator,\n device_ids=[args.local_rank],\n output_device=args.local_rank,\n broadcast_buffers=False,\n )\n\n transform = transforms.Compose(\n [\n transforms.RandomHorizontalFlip(),\n transforms.Resize(args.size, Image.LANCZOS),\n transforms.CenterCrop(args.size),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True),\n ]\n )\n dataset = datasets.ImageFolder(args.path, transform=transform)\n loader = data.DataLoader(\n dataset,\n batch_size=args.batch,\n sampler=data_sampler(dataset, shuffle=True, distributed=args.distributed),\n drop_last=True,\n )\n\n # if get_rank() == 0 and wandb is not None and args.wandb:\n # wandb.init(project=\"stylegan 2\")\n\n train(args, loader, generator, discriminator, g_optim, d_optim, g_ema, device)\n", "import argparse\r\nimport math\r\nimport random\r\nimport os\r\n\r\nimport numpy as np\r\nimport torch\r\nfrom torch import nn, autograd, optim\r\nfrom torch.nn import functional as F\r\nfrom torch.utils import data\r\nimport torch.distributed as dist\r\nfrom torchvision import transforms, utils\r\nfrom PIL import Image\r\nfrom tqdm import tqdm\r\nimport util\r\nimport pdb\r\nst = pdb.set_trace\r\n\r\ntry:\r\n import wandb\r\n\r\nexcept ImportError:\r\n wandb = None\r\n\r\nfrom model import Generator, Discriminator\r\nfrom idinvert_pytorch.models.perceptual_model import VGG16\r\nfrom dataset import MultiResolutionDataset, VideoFolderDataset\r\nfrom distributed import (\r\n get_rank,\r\n synchronize,\r\n reduce_loss_dict,\r\n reduce_sum,\r\n get_world_size,\r\n)\r\nfrom non_leaking import augment, AdaptiveAugment\r\n\r\n\r\ndef data_sampler(dataset, shuffle, distributed):\r\n if distributed:\r\n return data.distributed.DistributedSampler(dataset, shuffle=shuffle)\r\n\r\n if shuffle:\r\n return data.RandomSampler(dataset)\r\n\r\n else:\r\n return data.SequentialSampler(dataset)\r\n\r\n\r\ndef requires_grad(model, flag=True):\r\n if model is not None:\r\n for p in model.parameters():\r\n p.requires_grad = flag\r\n\r\n\r\ndef accumulate(model1, model2, decay=0.999):\r\n par1 = dict(model1.named_parameters())\r\n par2 = dict(model2.named_parameters())\r\n\r\n for k in par1.keys():\r\n par1[k].data.mul_(decay).add_(par2[k].data, alpha=1 - decay)\r\n\r\n\r\ndef sample_data(loader):\r\n # Endless iterator\r\n while True:\r\n for batch in loader:\r\n yield batch\r\n\r\n\r\ndef d_logistic_loss(real_pred, fake_pred):\r\n real_loss = F.softplus(-real_pred)\r\n fake_loss = F.softplus(fake_pred)\r\n\r\n return real_loss.mean() + fake_loss.mean()\r\n\r\n\r\ndef d_r1_loss(real_pred, real_img):\r\n grad_real, = autograd.grad(\r\n outputs=real_pred.sum(), inputs=real_img, create_graph=True\r\n )\r\n grad_penalty = grad_real.pow(2).reshape(grad_real.shape[0], -1).sum(1).mean()\r\n\r\n return grad_penalty\r\n\r\n\r\ndef g_nonsaturating_loss(fake_pred):\r\n loss = F.softplus(-fake_pred).mean()\r\n\r\n return loss\r\n\r\n\r\ndef g_path_regularize(fake_img, latents, mean_path_length, decay=0.01):\r\n noise = torch.randn_like(fake_img) / math.sqrt(\r\n fake_img.shape[2] * fake_img.shape[3]\r\n )\r\n grad, = autograd.grad(\r\n outputs=(fake_img * noise).sum(), inputs=latents, create_graph=True\r\n )\r\n path_lengths = torch.sqrt(grad.pow(2).sum(2).mean(1))\r\n\r\n path_mean = mean_path_length + decay * (path_lengths.mean() - mean_path_length)\r\n\r\n path_penalty = (path_lengths - path_mean).pow(2).mean()\r\n\r\n return path_penalty, path_mean.detach(), path_lengths\r\n\r\n\r\ndef make_noise(batch, latent_dim, n_noise, device):\r\n if n_noise == 1:\r\n return torch.randn(batch, latent_dim, device=device)\r\n\r\n noises = torch.randn(n_noise, batch, latent_dim, device=device).unbind(0)\r\n\r\n return noises\r\n\r\n\r\ndef mixing_noise(batch, latent_dim, prob, device):\r\n if prob > 0 and random.random() < prob:\r\n return make_noise(batch, latent_dim, 2, device)\r\n\r\n else:\r\n return [make_noise(batch, latent_dim, 1, device)]\r\n\r\n\r\ndef set_grad_none(model, targets):\r\n for n, p in model.named_parameters():\r\n if n in targets:\r\n p.grad = None\r\n\r\n\r\ndef accumulate_batches(data_iter, num):\r\n samples = []\r\n while num > 0:\r\n imgs = next(data_iter)\r\n samples.append(imgs)\r\n num -= imgs.size(0)\r\n samples = torch.cat(samples, dim=0)\r\n if num < 0:\r\n samples = samples[:num, ...]\r\n return samples\r\n\r\n\r\ndef load_real_samples(args, data_iter):\r\n if args.cache is not None:\r\n npy_path = os.path.splitext(args.cache)[0] + f\"_real_{args.n_sample}.npy\"\r\n else:\r\n npy_path = None\r\n if os.path.exists(npy_path):\r\n sample_x = torch.from_numpy(np.load(npy_path)).to(args.device)\r\n else:\r\n sample_x = accumulate_batches(data_iter, args.n_sample).to(args.device)\r\n if npy_path is not None:\r\n np.save(npy_path, sample_x.cpu().numpy())\r\n return sample_x\r\n\r\n\r\ndef cross_reconstruction(encoder, generator, frames1, frames2, frames3, cond='cond1'):\r\n # Conditional Discriminator 1:\r\n # recon pair: [frame1, recon2]\r\n # cross pair: [frame1, cross2]\r\n # real pair: [[frame1, frame2], [frame1, frame3]]\r\n # fake pair: [[frame1, recon2], [frame1, cross2]]\r\n # ---\r\n # Conditional Discriminator 2:\r\n # recon pair: [frame1, recon2]\r\n # cross pair: [frame2, cross3]\r\n # real pair: [[frame1, frame2], [frame2, frame3]]\r\n # fake pair: [[frame1, recon2], [frame2, cross3]]\r\n # ---\r\n # Pac Discriminator:\r\n # real pair: [frame1, frame2]\r\n # fake pair: [recon1, cross2]\r\n batch = frames1.shape[0]\r\n if cond == 'cond1':\r\n w1, _ = encoder(frames1)\r\n w2, _ = encoder(frames2)\r\n delta_w = w2 - w1\r\n delta_w = delta_w[torch.randperm(batch),...]\r\n x_recon, _ = generator([w2], input_is_latent=True, return_latents=False)\r\n x_real = frames2\r\n x_cross, _ = generator([w1 + delta_w], input_is_latent=True, return_latents=False)\r\n recon_pair = torch.cat((frames1, x_recon), 1)\r\n cross_pair = torch.cat((frames1, x_cross), 1)\r\n real_pair12 = torch.cat((frames1, frames2), 1)\r\n real_pair13 = torch.cat((frames1, frames3), 1)\r\n fake_pair = torch.cat((recon_pair, cross_pair), 0)\r\n real_pair = torch.cat((real_pair12, real_pair13), 0)\r\n elif cond == 'cond2':\r\n w1, _ = encoder(frames1)\r\n w2, _ = encoder(frames2)\r\n w3, _ = encoder(frames3)\r\n delta_w = w3 - w2\r\n delta_w = delta_w[torch.randperm(batch),...]\r\n x_recon, _ = generator([w2], input_is_latent=True, return_latents=False)\r\n x_real = frames2\r\n x_cross, _ = generator([w2 + delta_w], input_is_latent=True, return_latents=False)\r\n recon_pair = torch.cat((frames1, x_recon), 1)\r\n cross_pair = torch.cat((frames2, x_cross), 1)\r\n real_pair12 = torch.cat((frames1, frames2), 1)\r\n real_pair23 = torch.cat((frames2, frames3), 1)\r\n fake_pair = torch.cat((recon_pair, cross_pair), 0)\r\n real_pair = torch.cat((real_pair12, real_pair23), 0)\r\n elif cond == 'pac':\r\n w1, _ = encoder(frames1)\r\n w2, _ = encoder(frames2)\r\n delta_w = w2 - w1\r\n delta_w = delta_w[torch.randperm(batch),...]\r\n x_recon, _ = generator([w1], input_is_latent=True, return_latents=False)\r\n x_real = frames1\r\n x_cross, _ = generator([w1 + delta_w], input_is_latent=True, return_latents=False)\r\n fake_pair = torch.cat((x_recon, x_cross), 1)\r\n real_pair = torch.cat((frames1, frames2), 1)\r\n # return real_img, fake_img, x_real, x_recon, x_cross\r\n return real_pair, fake_pair, x_real, x_recon, x_cross\r\n\r\n\r\ndef train(args, loader, encoder, generator, discriminator, discriminator_w,\r\n vggnet, pwcnet, e_optim, g_optim, g1_optim, d_optim, dw_optim,\r\n e_ema, g_ema, device):\r\n loader = sample_data(loader)\r\n args.toggle_grads = True\r\n args.augment = False\r\n\r\n pbar = range(args.iter)\r\n\r\n if get_rank() == 0:\r\n pbar = tqdm(pbar, initial=args.start_iter, dynamic_ncols=True, smoothing=0.01)\r\n mean_path_length = 0\r\n d_loss_val = 0\r\n e_loss_val = 0\r\n rec_loss_val = 0\r\n vgg_loss_val = 0\r\n adv_loss_val = 0\r\n path_loss = torch.tensor(0.0, device=device)\r\n path_lengths = torch.tensor(0.0, device=device)\r\n loss_dict = {\"d\": torch.tensor(0., device=device),\r\n \"real_score\": torch.tensor(0., device=device),\r\n \"fake_score\": torch.tensor(0., device=device),\r\n \"hybrid_score\": torch.tensor(0., device=device),\r\n \"r1_d\": torch.tensor(0., device=device),\r\n \"rec\": torch.tensor(0., device=device),}\r\n avg_pix_loss = util.AverageMeter()\r\n avg_vgg_loss = util.AverageMeter()\r\n\r\n if args.distributed:\r\n e_module = encoder.module\r\n d_module = discriminator.module\r\n g_module = generator.module\r\n else:\r\n e_module = encoder\r\n d_module = discriminator\r\n g_module = generator\r\n\r\n accum = 0.5 ** (32 / (10 * 1000))\r\n ada_aug_p = args.augment_p if args.augment_p > 0 else 0.0\r\n r_t_stat = 0\r\n\r\n if args.augment and args.augment_p == 0:\r\n ada_augment = AdaptiveAugment(args.ada_target, args.ada_length, 256, device)\r\n\r\n # sample_x = accumulate_batches(loader, args.n_sample).to(device)\r\n sample_x = load_real_samples(args, loader)\r\n sample_x1 = sample_x[:,0,...]\r\n sample_x2 = sample_x[:,-1,...]\r\n sample_idx = torch.randperm(args.n_sample)\r\n sample_z = torch.randn(args.n_sample, args.latent, device=device)\r\n\r\n for idx in pbar:\r\n i = idx + args.start_iter\r\n\r\n if get_rank() == 0:\r\n if i % args.log_every == 0:\r\n with torch.no_grad():\r\n e_eval = e_ema\r\n e_eval.eval()\r\n g_ema.eval()\r\n nrow = int(args.n_sample ** 0.5)\r\n nchw = list(sample_x1.shape)[1:]\r\n # Recon\r\n latent_real, _ = e_eval(sample_x1)\r\n fake_img, _ = g_ema([latent_real], input_is_latent=True, return_latents=False)\r\n sample = torch.cat((sample_x1.reshape(args.n_sample//nrow, nrow, *nchw), \r\n fake_img.reshape(args.n_sample//nrow, nrow, *nchw)), 1)\r\n utils.save_image(\r\n sample.reshape(2*args.n_sample, *nchw),\r\n os.path.join(args.log_dir, 'sample', f\"{str(i).zfill(6)}-recon.png\"),\r\n nrow=nrow,\r\n normalize=True,\r\n value_range=(-1, 1),\r\n )\r\n # Cross\r\n w1, _ = e_eval(sample_x1)\r\n w2, _ = e_eval(sample_x2)\r\n delta_w = w2 - w1\r\n delta_w = delta_w[sample_idx,...]\r\n fake_img, _ = g_ema([w1 + delta_w], input_is_latent=True, return_latents=False)\r\n sample = torch.cat((sample_x2.reshape(args.n_sample//nrow, nrow, *nchw), \r\n fake_img.reshape(args.n_sample//nrow, nrow, *nchw)), 1)\r\n utils.save_image(\r\n sample.reshape(2*args.n_sample, *nchw),\r\n os.path.join(args.log_dir, 'sample', f\"{str(i).zfill(6)}-cross.png\"),\r\n nrow=nrow,\r\n normalize=True,\r\n value_range=(-1, 1),\r\n )\r\n # Sample\r\n sample, _ = g_ema([sample_z])\r\n utils.save_image(\r\n sample,\r\n os.path.join(args.log_dir, 'sample', f\"{str(i).zfill(6)}-sample.png\"),\r\n nrow=nrow,\r\n normalize=True,\r\n value_range=(-1, 1),\r\n )\r\n e_eval.train()\r\n\r\n if i > args.iter:\r\n print(\"Done!\")\r\n break\r\n\r\n frames = next(loader) # [N, T, C, H, W]\r\n batch = frames.shape[0]\r\n frames1 = frames[:,0,...]\r\n selected_indices = torch.sort(torch.multinomial(torch.ones(batch, args.nframe_num-1), 2)+1, 1)[0]\r\n frames2 = frames[range(batch),selected_indices[:,0],...]\r\n frames3 = frames[range(batch),selected_indices[:,1],...]\r\n frames1 = frames1.to(device)\r\n frames2 = frames2.to(device)\r\n frames3 = frames3.to(device)\r\n\r\n # Train Discriminator\r\n if args.toggle_grads:\r\n requires_grad(encoder, False)\r\n requires_grad(generator, False)\r\n requires_grad(discriminator, True)\r\n requires_grad(discriminator_w, True)\r\n\r\n real_img, fake_img, _, _, _ = cross_reconstruction(encoder, generator, frames1, frames2, frames3, args.cond_disc)\r\n\r\n if args.augment:\r\n real_img_aug, _ = augment(real_img, ada_aug_p)\r\n fake_img_aug, _ = augment(fake_img, ada_aug_p)\r\n else:\r\n real_img_aug = real_img\r\n fake_img_aug = fake_img\r\n \r\n fake_pred = discriminator(fake_img_aug)\r\n real_pred = discriminator(real_img_aug)\r\n d_loss = d_logistic_loss(real_pred, fake_pred)\r\n\r\n if args.lambda_gan > 0:\r\n noise = mixing_noise(args.batch, args.latent, args.mixing, device)\r\n fake_img, _ = generator(noise)\r\n if args.augment:\r\n fake_img, _ = augment(fake_img, ada_aug_p)\r\n fake_pred = discriminator(fake_img)\r\n fake_loss = F.softplus(fake_pred)\r\n d_loss += fake_loss.mean() * args.lambda_gan\r\n\r\n loss_dict[\"d\"] = d_loss\r\n loss_dict[\"real_score\"] = real_pred.mean()\r\n # loss_dict[\"fake_score\"] = fake_pred.mean()\r\n fake_pred1, fake_pred2 = fake_pred.chunk(2, dim=0)\r\n loss_dict[\"fake_score\"] = fake_pred1.mean()\r\n loss_dict[\"hybrid_score\"] = fake_pred2.mean()\r\n\r\n discriminator.zero_grad()\r\n d_loss.backward()\r\n d_optim.step()\r\n\r\n if args.augment and args.augment_p == 0:\r\n ada_aug_p = ada_augment.tune(real_pred)\r\n r_t_stat = ada_augment.r_t_stat\r\n \r\n d_regularize = args.d_reg_every > 0 and i % args.d_reg_every == 0\r\n if d_regularize:\r\n # why not regularize on augmented real?\r\n real_img.requires_grad = True\r\n real_pred = discriminator(real_img)\r\n r1_loss_d = d_r1_loss(real_pred, real_img)\r\n\r\n d_optim.zero_grad()\r\n (args.r1 / 2 * r1_loss_d * args.d_reg_every + 0 * real_pred.view(-1)[0]).backward()\r\n # Why 0* ? Answer is here https://github.com/rosinality/stylegan2-pytorch/issues/76\r\n d_optim.step()\r\n\r\n loss_dict[\"r1_d\"] = r1_loss_d\r\n \r\n # Train Discriminator_W\r\n if args.learned_prior and args.lambda_gan_w > 0:\r\n noise = mixing_noise(args.batch, args.latent, 0, device)\r\n fake_w = generator.get_latent(noise[0])\r\n real_w, _ = encoder(frames1)\r\n fake_pred = discriminator_w(fake_w)\r\n real_pred = discriminator_w(real_w)\r\n d_loss_w = d_logistic_loss(real_pred, fake_pred)\r\n dw_optim.zero_grad()\r\n d_loss_w.backward()\r\n dw_optim.step()\r\n\r\n # Train Encoder and Generator\r\n if args.toggle_grads:\r\n requires_grad(encoder, True)\r\n requires_grad(generator, True)\r\n requires_grad(discriminator, False)\r\n requires_grad(discriminator_w, False)\r\n pix_loss = vgg_loss = adv_loss = rec_loss = torch.tensor(0., device=device)\r\n\r\n _, fake_img, x_real, x_recon, x_cross = cross_reconstruction(encoder, generator, frames1, frames2, frames3, args.cond_disc)\r\n\r\n if args.lambda_adv > 0:\r\n if args.augment:\r\n fake_img_aug, _ = augment(fake_img, ada_aug_p)\r\n else:\r\n fake_img_aug = fake_img\r\n fake_pred = discriminator(fake_img_aug)\r\n adv_loss = g_nonsaturating_loss(fake_pred)\r\n\r\n if args.lambda_pix > 0:\r\n if args.pix_loss == 'l2':\r\n pix_loss = torch.mean((x_recon - x_real) ** 2)\r\n else:\r\n pix_loss = F.l1_loss(x_recon, x_real)\r\n\r\n if args.lambda_vgg > 0:\r\n real_feat = vggnet(x_real)\r\n fake_feat = vggnet(x_recon) if not args.vgg_on_cross else vggnet(x_cross)\r\n vgg_loss = torch.mean((fake_feat - real_feat) ** 2)\r\n\r\n e_loss = pix_loss * args.lambda_pix + vgg_loss * args.lambda_vgg + adv_loss * args.lambda_adv\r\n\r\n if args.lambda_gan > 0 and not args.no_sim_opt:\r\n noise = mixing_noise(args.batch, args.latent, args.mixing, device)\r\n fake_img, _ = generator(noise)\r\n if args.augment:\r\n fake_img, _ = augment(fake_img, ada_aug_p)\r\n fake_pred = discriminator(fake_img)\r\n g_loss = g_nonsaturating_loss(fake_pred)\r\n e_loss += g_loss * args.lambda_gan\r\n\r\n loss_dict[\"e\"] = e_loss\r\n loss_dict[\"pix\"] = pix_loss\r\n loss_dict[\"vgg\"] = vgg_loss\r\n loss_dict[\"adv\"] = adv_loss\r\n\r\n e_optim.zero_grad()\r\n g_optim.zero_grad()\r\n e_loss.backward()\r\n e_optim.step()\r\n g_optim.step()\r\n\r\n if args.learned_prior:\r\n g_loss_w = 0.\r\n if args.lambda_gan_w > 0:\r\n noise = mixing_noise(args.batch, args.latent, 0, device)\r\n fake_w = generator.get_latent(noise[0])\r\n fake_pred = discriminator_w(fake_w)\r\n g_loss_w += g_nonsaturating_loss(fake_pred) * args.lambda_gan_w\r\n if args.lambda_adv_w > 0:\r\n noise = mixing_noise(args.batch, args.latent, args.mixing, device)\r\n fake_img, _ = generator(noise)\r\n fake_pred = discriminator(fake_img)\r\n g_loss_w += g_nonsaturating_loss(fake_pred) * args.lambda_adv_w\r\n g1_optim.zero_grad()\r\n g_loss_w.backward()\r\n g1_optim.step()\r\n \r\n if args.lambda_gan > 0 and args.no_sim_opt:\r\n noise = mixing_noise(args.batch, args.latent, args.mixing, device)\r\n fake_img, _ = generator(noise)\r\n if args.augment:\r\n fake_img, _ = augment(fake_img, ada_aug_p)\r\n fake_pred = discriminator(fake_img)\r\n g_loss = g_nonsaturating_loss(fake_pred) * args.lambda_gan\r\n generator.zero_grad()\r\n g_loss.backward()\r\n g_optim.step()\r\n \r\n g_regularize = args.lambda_gan > 0 and args.g_reg_every > 0 and i % args.g_reg_every == 0\r\n if g_regularize:\r\n path_batch_size = max(1, args.batch // args.path_batch_shrink)\r\n noise = mixing_noise(path_batch_size, args.latent, args.mixing, device)\r\n fake_img, latents = generator(noise, return_latents=True)\r\n path_loss, mean_path_length, path_lengths = g_path_regularize(\r\n fake_img, latents, mean_path_length\r\n )\r\n generator.zero_grad()\r\n weighted_path_loss = args.path_regularize * args.g_reg_every * path_loss\r\n if args.path_batch_shrink:\r\n weighted_path_loss += 0 * fake_img[0, 0, 0, 0]\r\n weighted_path_loss.backward()\r\n g_optim.step()\r\n # mean_path_length_avg = (\r\n # reduce_sum(mean_path_length).item() / get_world_size()\r\n # )\r\n loss_dict[\"path\"] = path_loss\r\n loss_dict[\"path_length\"] = path_lengths.mean()\r\n\r\n accumulate(e_ema, e_module, accum)\r\n accumulate(g_ema, g_module, accum)\r\n\r\n loss_reduced = reduce_loss_dict(loss_dict)\r\n\r\n d_loss_val = loss_reduced[\"d\"].mean().item()\r\n e_loss_val = loss_reduced[\"e\"].mean().item()\r\n r1_d_val = loss_reduced[\"r1_d\"].mean().item()\r\n pix_loss_val = loss_reduced[\"pix\"].mean().item()\r\n vgg_loss_val = loss_reduced[\"vgg\"].mean().item()\r\n adv_loss_val = loss_reduced[\"adv\"].mean().item()\r\n rec_loss_val = loss_reduced[\"rec\"].mean().item()\r\n real_score_val = loss_reduced[\"real_score\"].mean().item()\r\n fake_score_val = loss_reduced[\"fake_score\"].mean().item()\r\n hybrid_score_val = loss_reduced[\"hybrid_score\"].mean().item()\r\n path_loss_val = loss_reduced[\"path\"].mean().item()\r\n # path_length_val = loss_reduced[\"path_length\"].mean().item()\r\n avg_pix_loss.update(pix_loss_val, real_img.shape[0])\r\n avg_vgg_loss.update(vgg_loss_val, real_img.shape[0])\r\n\r\n if get_rank() == 0:\r\n pbar.set_description(\r\n (\r\n f\"d: {d_loss_val:.4f}; e: {e_loss_val:.4f}; r1_d: {r1_d_val:.4f}; \"\r\n f\"pix: {pix_loss_val:.4f}; vgg: {vgg_loss_val:.4f}; adv: {adv_loss_val:.4f}; \"\r\n f\"path: {path_loss_val:.4f}; augment: {ada_aug_p:.4f}\"\r\n )\r\n )\r\n\r\n if i % args.log_every == 0:\r\n with torch.no_grad():\r\n latent_x, _ = e_ema(sample_x1)\r\n fake_x, _ = generator([latent_x], input_is_latent=True, return_latents=False)\r\n sample_pix_loss = torch.sum((sample_x1 - fake_x) ** 2)\r\n with open(os.path.join(args.log_dir, 'log.txt'), 'a+') as f:\r\n f.write(f\"{i:07d}; pix: {avg_pix_loss.avg}; vgg: {avg_vgg_loss.avg}; \"\r\n f\"ref: {sample_pix_loss.item()};\\n\")\r\n\r\n if wandb and args.wandb:\r\n wandb.log(\r\n {\r\n \"Encoder\": e_loss_val,\r\n \"Discriminator\": d_loss_val,\r\n \"Augment\": ada_aug_p,\r\n \"Rt\": r_t_stat,\r\n \"R1 D\": r1_d_val,\r\n \"Pix Loss\": pix_loss_val,\r\n \"VGG Loss\": vgg_loss_val,\r\n \"Adv Loss\": adv_loss_val,\r\n \"Rec Loss\": rec_loss_val,\r\n \"Real Score\": real_score_val,\r\n \"Fake Score\": fake_score_val,\r\n \"Hybrid Score\": hybrid_score_val,\r\n }\r\n )\r\n\r\n if i % args.save_every == 0:\r\n e_eval = e_ema\r\n torch.save(\r\n {\r\n \"e\": e_module.state_dict(),\r\n \"d\": d_module.state_dict(),\r\n \"g\": g_module.state_dict(),\r\n \"g_ema\": g_module.state_dict(),\r\n \"e_ema\": e_eval.state_dict(),\r\n \"e_optim\": e_optim.state_dict(),\r\n \"d_optim\": d_optim.state_dict(),\r\n \"g_optim\": g_optim.state_dict(),\r\n \"args\": args,\r\n \"ada_aug_p\": ada_aug_p,\r\n \"iter\": i,\r\n },\r\n os.path.join(args.log_dir, 'weight', f\"{str(i).zfill(6)}.pt\"),\r\n )\r\n \r\n if i % args.save_latest_every == 0:\r\n torch.save(\r\n {\r\n \"e\": e_module.state_dict(),\r\n \"d\": d_module.state_dict(),\r\n \"g\": g_module.state_dict(),\r\n \"g_ema\": g_module.state_dict(),\r\n \"e_ema\": e_eval.state_dict(),\r\n \"e_optim\": e_optim.state_dict(),\r\n \"d_optim\": d_optim.state_dict(),\r\n \"g_optim\": g_optim.state_dict(),\r\n \"args\": args,\r\n \"ada_aug_p\": ada_aug_p,\r\n \"iter\": i,\r\n },\r\n os.path.join(args.log_dir, 'weight', f\"latest.pt\"),\r\n )\r\n\r\n\r\nif __name__ == \"__main__\":\r\n device = \"cuda\"\r\n\r\n parser = argparse.ArgumentParser(description=\"StyleGAN2 encoder trainer\")\r\n\r\n parser.add_argument(\"--path\", type=str, help=\"path to the lmdb dataset\")\r\n parser.add_argument(\"--dataset\", type=str, default='videofolder')\r\n parser.add_argument(\"--cache\", type=str, default='local.db')\r\n parser.add_argument(\"--name\", type=str, help=\"experiment name\", default='default_exp')\r\n parser.add_argument(\"--log_root\", type=str, help=\"where to save training logs\", default='logs')\r\n parser.add_argument(\"--log_every\", type=int, default=100, help=\"save samples every # iters\")\r\n parser.add_argument(\"--save_every\", type=int, default=1000, help=\"save checkpoints every # iters\")\r\n parser.add_argument(\"--save_latest_every\", type=int, default=100, help=\"save latest checkpoints every # iters\")\r\n parser.add_argument(\"--resume\", action='store_true')\r\n parser.add_argument(\"--toggle_grads\", action='store_true')\r\n parser.add_argument(\"--use_optical_flow\", action='store_true')\r\n parser.add_argument(\"--use_wscale\", action='store_true', help=\"whether to use `wscale` layer in idinvert encoder\")\r\n parser.add_argument(\"--no_ema\", action='store_true', help=\"do not use ema if enabled\")\r\n parser.add_argument(\"--train_on_fake\", action='store_true', help=\"train encoder on fake?\")\r\n parser.add_argument(\"--e_rec_every\", type=int, default=1, help=\"interval of minimizing recon loss on w\")\r\n parser.add_argument(\"--pix_loss\", type=str, default='l2')\r\n parser.add_argument(\"--lambda_pix\", type=float, default=1.0, help=\"recon loss on pixel (x)\")\r\n parser.add_argument(\"--lambda_vgg\", type=float, default=5e-5)\r\n parser.add_argument(\"--lambda_adv\", type=float, default=0.1)\r\n parser.add_argument(\"--lambda_gan\", type=float, default=0., help=\"train a gan branch?\")\r\n parser.add_argument(\"--lambda_rec\", type=float, default=1.0, help=\"recon loss on style (w)\")\r\n parser.add_argument(\"--lambda_adv_w\", type=float, default=0., help=\"adversarial loss from image discriminator\")\r\n parser.add_argument(\"--lambda_gan_w\", type=float, default=0., help=\"adversarial loss from latent discriminator\")\r\n parser.add_argument(\"--lambda_mmd_w\", type=float, default=0.)\r\n parser.add_argument(\"--output_layer_idx\", type=int, default=23)\r\n parser.add_argument(\"--vgg_ckpt\", type=str, default=\"pretrained/vgg16.pth\")\r\n parser.add_argument(\"--which_encoder\", type=str, default='style')\r\n parser.add_argument(\"--which_latent\", type=str, default='w_tied')\r\n parser.add_argument(\"--stddev_group\", type=int, default=4)\r\n parser.add_argument(\"--nframe_num\", type=int, default=5)\r\n parser.add_argument(\"--shuffle\", action='store_true')\r\n parser.add_argument(\"--learned_prior\", action='store_true', help=\"learned latent prior (w)?\")\r\n parser.add_argument(\"--no_sim_opt\", action='store_true')\r\n parser.add_argument(\"--cond_disc\", type=str, default='cond1', choices=['cond1', 'cond2', 'pac'])\r\n parser.add_argument(\"--train_from_scratch\", action='store_true')\r\n parser.add_argument(\"--vgg_on_cross\", action='store_true')\r\n parser.add_argument(\r\n \"--iter\", type=int, default=800000, help=\"total training iterations\"\r\n )\r\n parser.add_argument(\r\n \"--batch\", type=int, default=16, help=\"batch sizes for each gpus\"\r\n )\r\n parser.add_argument(\r\n \"--n_sample\",\r\n type=int,\r\n default=64,\r\n help=\"number of the samples generated during training\",\r\n )\r\n parser.add_argument(\r\n \"--size\", type=int, default=256, help=\"image sizes for the model\"\r\n )\r\n parser.add_argument(\r\n \"--r1\", type=float, default=10, help=\"weight of the r1 regularization\"\r\n )\r\n parser.add_argument(\r\n \"--path_regularize\",\r\n type=float,\r\n default=2,\r\n help=\"weight of the path length regularization\",\r\n )\r\n parser.add_argument(\r\n \"--path_batch_shrink\",\r\n type=int,\r\n default=2,\r\n help=\"batch size reducing factor for the path length regularization (reduce memory consumption)\",\r\n )\r\n parser.add_argument(\r\n \"--d_reg_every\",\r\n type=int,\r\n default=16,\r\n help=\"interval of the applying r1 regularization, no if 0\",\r\n )\r\n parser.add_argument(\r\n \"--g_reg_every\",\r\n type=int,\r\n default=0,\r\n help=\"interval of the applying path length regularization\",\r\n )\r\n parser.add_argument(\r\n \"--e_reg_every\",\r\n type=int,\r\n default=0,\r\n help=\"interval of the applying r1 regularization, no if 0\",\r\n )\r\n parser.add_argument(\r\n \"--mixing\", type=float, default=0.9, help=\"probability of latent code mixing\"\r\n )\r\n parser.add_argument(\r\n \"--ckpt\",\r\n type=str,\r\n default=None,\r\n help=\"path to the checkpoints to resume training\",\r\n )\r\n parser.add_argument(\r\n \"--d_ckpt\",\r\n type=str,\r\n default=None,\r\n help=\"path to the checkpoints to resume training\",\r\n )\r\n parser.add_argument(\r\n \"--e_ckpt\",\r\n type=str,\r\n default=None,\r\n help=\"path to the checkpoints to resume training\",\r\n )\r\n parser.add_argument(\r\n \"--g_ckpt\",\r\n type=str,\r\n default=None,\r\n help=\"path to the checkpoints to resume training\",\r\n )\r\n parser.add_argument(\"--lr\", type=float, default=0.002, help=\"learning rate\")\r\n parser.add_argument(\r\n \"--channel_multiplier\",\r\n type=int,\r\n default=2,\r\n help=\"channel multiplier factor for the model. config-f = 2, else = 1\",\r\n )\r\n parser.add_argument(\r\n \"--wandb\", action=\"store_true\", help=\"use weights and biases logging\"\r\n )\r\n parser.add_argument(\r\n \"--local_rank\", type=int, default=0, help=\"local rank for distributed training\"\r\n )\r\n parser.add_argument(\r\n \"--augment\", action=\"store_true\", help=\"apply non leaking augmentation\"\r\n )\r\n parser.add_argument(\r\n \"--augment_p\",\r\n type=float,\r\n default=0,\r\n help=\"probability of applying augmentation. 0 = use adaptive augmentation\",\r\n )\r\n parser.add_argument(\r\n \"--ada_target\",\r\n type=float,\r\n default=0.6,\r\n help=\"target augmentation probability for adaptive augmentation\",\r\n )\r\n parser.add_argument(\r\n \"--ada_length\",\r\n type=int,\r\n default=500 * 1000,\r\n help=\"target duraing to reach augmentation probability for adaptive augmentation\",\r\n )\r\n parser.add_argument(\r\n \"--ada_every\",\r\n type=int,\r\n default=256,\r\n help=\"probability update interval of the adaptive augmentation\",\r\n )\r\n\r\n args = parser.parse_args()\r\n util.seed_everything(0)\r\n args.device = device\r\n\r\n n_gpu = int(os.environ[\"WORLD_SIZE\"]) if \"WORLD_SIZE\" in os.environ else 1\r\n args.distributed = n_gpu > 1\r\n\r\n if args.distributed:\r\n torch.cuda.set_device(args.local_rank)\r\n torch.distributed.init_process_group(backend=\"nccl\", init_method=\"env://\")\r\n synchronize()\r\n\r\n args.n_latent = int(np.log2(args.size)) * 2 - 2 # used in Generator\r\n args.latent = 512 # fixed, dim of w or z (same size)\r\n if args.which_latent == 'w_plus':\r\n args.latent_full = args.latent * args.n_latent\r\n elif args.which_latent == 'w_tied':\r\n args.latent_full = args.latent\r\n else:\r\n raise NotImplementedError\r\n args.n_mlp = 8\r\n args.use_latent_discriminator = args.learned_prior and args.lambda_gan_w > 0\r\n args.nframe_num = max(3, args.nframe_num)\r\n\r\n args.start_iter = 0\r\n util.set_log_dir(args)\r\n util.print_args(parser, args)\r\n \r\n # Auxiliary models (VGG and PWC)\r\n vggnet = VGG16(output_layer_idx=args.output_layer_idx).to(device)\r\n vgg_ckpt = torch.load(args.vgg_ckpt, map_location=lambda storage, loc: storage)\r\n vggnet.load_state_dict(vgg_ckpt)\r\n\r\n pwcnet = None\r\n # if args.use_optical_flow:\r\n # pwc = __import__('pytorch-pwc.run', globals(), locals(), ['Network'], 0)\r\n # pwcnet = pwc.Network().to(device) # state_dict loaded in init\r\n # pwcnet.eval()\r\n\r\n in_channel = 6\r\n discriminator = Discriminator(\r\n args.size, channel_multiplier=args.channel_multiplier, in_channel=in_channel,\r\n ).to(device)\r\n generator = Generator(\r\n args.size, args.latent, args.n_mlp, channel_multiplier=args.channel_multiplier\r\n ).to(device)\r\n g_ema = Generator(\r\n args.size, args.latent, args.n_mlp, channel_multiplier=args.channel_multiplier\r\n ).to(device)\r\n g_ema.eval()\r\n accumulate(g_ema, generator, 0)\r\n\r\n e_ema = None\r\n if args.which_encoder == 'idinvert':\r\n from idinvert_pytorch.models.stylegan_encoder_network import StyleGANEncoderNet\r\n encoder = StyleGANEncoderNet(resolution=args.size, w_space_dim=args.latent,\r\n which_latent=args.which_latent, reshape_latent=False,\r\n use_wscale=args.use_wscale).to(device)\r\n e_ema = StyleGANEncoderNet(resolution=args.size, w_space_dim=args.latent,\r\n which_latent=args.which_latent, reshape_latent=False,\r\n use_wscale=args.use_wscale).to(device)\r\n else:\r\n from model import Encoder\r\n encoder = Encoder(args.size, args.latent, channel_multiplier=args.channel_multiplier,\r\n which_latent=args.which_latent, reshape_latent=False, stddev_group=args.stddev_group).to(device)\r\n e_ema = Encoder(args.size, args.latent, channel_multiplier=args.channel_multiplier,\r\n which_latent=args.which_latent, reshape_latent=False, stddev_group=args.stddev_group).to(device)\r\n e_ema.eval()\r\n accumulate(e_ema, encoder, 0)\r\n\r\n # For lazy regularization (see paper appendix page 11)\r\n g_reg_ratio = args.g_reg_every / (args.g_reg_every + 1) if args.g_reg_every > 0 else 1.\r\n d_reg_ratio = args.d_reg_every / (args.d_reg_every + 1) if args.d_reg_every > 0 else 1.\r\n e_reg_ratio = 1.\r\n \r\n g_optim = optim.Adam(\r\n generator.parameters(),\r\n lr=args.lr * g_reg_ratio,\r\n betas=(0 ** g_reg_ratio, 0.99 ** g_reg_ratio),\r\n )\r\n g1_optim = optim.Adam( # rmsprop, sgd w mom\r\n generator.style.parameters(),\r\n lr=args.lr * g_reg_ratio,\r\n betas=(0 ** g_reg_ratio, 0.99 ** g_reg_ratio),\r\n )\r\n e_optim = optim.Adam(\r\n encoder.parameters(),\r\n lr=args.lr * e_reg_ratio,\r\n betas=(0 ** e_reg_ratio, 0.99 ** e_reg_ratio),\r\n )\r\n d_optim = optim.Adam(\r\n discriminator.parameters(),\r\n lr=args.lr * d_reg_ratio,\r\n betas=(0 ** d_reg_ratio, 0.99 ** d_reg_ratio),\r\n )\r\n\r\n discriminator_w = dw_optim = None\r\n if args.use_latent_discriminator:\r\n from model import LatentDiscriminator\r\n discriminator_w = LatentDiscriminator(args.latent, args.n_mlp).to(device)\r\n dw_optim = optim.Adam(\r\n discriminator_w.parameters(),\r\n lr=args.lr * 1,\r\n betas=(0 ** 1, 0.99 ** 1),\r\n )\r\n\r\n if args.resume and args.ckpt is not None:\r\n print(\"load model:\", args.ckpt)\r\n ckpt = torch.load(args.ckpt, map_location=lambda storage, loc: storage)\r\n try:\r\n ckpt_name = os.path.basename(args.ckpt)\r\n if 'iter' in ckpt:\r\n args.start_iter = ckpt[\"iter\"]\r\n else:\r\n args.start_iter = int(os.path.splitext(ckpt_name)[0])\r\n except ValueError:\r\n pass\r\n encoder.load_state_dict(ckpt[\"e\"])\r\n generator.load_state_dict(ckpt[\"g\"])\r\n discriminator.load_state_dict(ckpt[\"d\"])\r\n e_ema.load_state_dict(ckpt[\"e_ema\"])\r\n g_ema.load_state_dict(ckpt[\"g_ema\"])\r\n e_optim.load_state_dict(ckpt[\"e_optim\"])\r\n g_optim.load_state_dict(ckpt[\"g_optim\"])\r\n d_optim.load_state_dict(ckpt[\"d_optim\"])\r\n elif not args.train_from_scratch:\r\n # if e_ckpt is provided, load encoder as warm start, else train encoder from scratch\r\n # if g_ckpt is provided, load generator as warm start, else train generator from scratch\r\n if args.e_ckpt is not None:\r\n print(\"load e model:\", args.e_ckpt)\r\n e_ckpt = torch.load(args.e_ckpt, map_location=lambda storage, loc: storage)\r\n encoder.load_state_dict(e_ckpt[\"e\"])\r\n e_ema.load_state_dict(e_ckpt[\"e_ema\"])\r\n e_optim.load_state_dict(e_ckpt[\"e_optim\"])\r\n if args.g_ckpt is not None:\r\n print(\"load g model:\", args.g_ckpt)\r\n g_ckpt = torch.load(args.g_ckpt, map_location=lambda storage, loc: storage)\r\n generator.load_state_dict(g_ckpt[\"g\"])\r\n g_ema.load_state_dict(g_ckpt[\"g_ema\"])\r\n g_optim.load_state_dict(g_ckpt[\"g_optim\"])\r\n if args.d_ckpt is not None:\r\n print(\"load d model:\", args.d_ckpt)\r\n d_ckpt = torch.load(args.d_ckpt, map_location=lambda storage, loc: storage)\r\n discriminator.load_state_dict(d_ckpt[\"d\"])\r\n d_optim.load_state_dict(d_ckpt[\"d_optim\"])\r\n\r\n if args.resume:\r\n try:\r\n ckpt_name = os.path.basename(args.ckpt)\r\n if 'iter' in ckpt:\r\n args.start_iter = ckpt[\"iter\"]\r\n else:\r\n args.start_iter = int(os.path.splitext(ckpt_name)[0])\r\n except ValueError:\r\n pass\r\n encoder.load_state_dict(ckpt[\"e\"])\r\n e_optim.load_state_dict(ckpt[\"e_optim\"])\r\n\r\n if args.distributed:\r\n generator = nn.parallel.DistributedDataParallel(\r\n generator,\r\n device_ids=[args.local_rank],\r\n output_device=args.local_rank,\r\n broadcast_buffers=False,\r\n )\r\n\r\n encoder = nn.parallel.DistributedDataParallel(\r\n encoder,\r\n device_ids=[args.local_rank],\r\n output_device=args.local_rank,\r\n broadcast_buffers=False,\r\n )\r\n\r\n discriminator = nn.parallel.DistributedDataParallel(\r\n discriminator,\r\n device_ids=[args.local_rank],\r\n output_device=args.local_rank,\r\n broadcast_buffers=False,\r\n )\r\n if args.use_latent_discriminator:\r\n discriminator_w = nn.parallel.DistributedDataParallel(\r\n discriminator_w,\r\n device_ids=[args.local_rank],\r\n output_device=args.local_rank,\r\n broadcast_buffers=False,\r\n )\r\n\r\n if args.dataset == 'multires':\r\n # TODO: force G(w+Dy) to be real\r\n transform = transforms.Compose(\r\n [\r\n transforms.RandomHorizontalFlip(),\r\n transforms.ToTensor(),\r\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True),\r\n ]\r\n )\r\n dataset = MultiResolutionDataset(args.path, transform, args.size)\r\n elif args.dataset == 'videofolder':\r\n # [Note] Potentially, same transforms will be applied to a batch of images,\r\n # either a sequence or a pair (optical flow), so we should apply ToTensor first.\r\n transform = transforms.Compose(\r\n [\r\n # transforms.ToTensor(), # this should be done in loader\r\n transforms.RandomHorizontalFlip(),\r\n transforms.Resize(args.size), # Image.LANCZOS\r\n transforms.CenterCrop(args.size),\r\n # transforms.ToTensor(), # normally placed here\r\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True),\r\n ]\r\n )\r\n dataset = VideoFolderDataset(args.path, transform, cache=args.cache, unbind=False,\r\n mode='nframe', nframe_num=args.nframe_num)\r\n if len(dataset) == 0:\r\n raise ValueError\r\n loader = data.DataLoader(\r\n dataset,\r\n batch_size=args.batch,\r\n sampler=data_sampler(dataset, shuffle=True, distributed=args.distributed),\r\n drop_last=True,\r\n )\r\n\r\n if get_rank() == 0 and wandb is not None and args.wandb:\r\n wandb.init(project=args.name)\r\n\r\n train(args, loader, encoder, generator, discriminator, discriminator_w, \r\n vggnet, pwcnet, e_optim, g_optim, g1_optim, d_optim, dw_optim,\r\n e_ema, g_ema, device)\r\n" ]
[ [ "torch.randn_like", "torch.distributed.init_process_group", "torch.utils.data.distributed.DistributedSampler", "torch.cuda.set_device", "torch.load", "torch.randn", "torch.utils.data.SequentialSampler", "torch.utils.data.RandomSampler", "torch.tensor", "numpy.cov", "torch.no_grad", "numpy.mean", "torch.nn.functional.softplus", "torch.nn.parallel.DistributedDataParallel" ], [ "torch.randn_like", "torch.mean", "torch.nn.functional.l1_loss", "torch.cat", "torch.randperm", "torch.load", "torch.sum", "torch.no_grad", "torch.ones", "torch.distributed.init_process_group", "torch.utils.data.distributed.DistributedSampler", "torch.randn", "torch.tensor", "numpy.load", "torch.nn.functional.softplus", "torch.nn.parallel.DistributedDataParallel", "numpy.log2", "torch.cuda.set_device", "torch.utils.data.SequentialSampler", "torch.utils.data.RandomSampler" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
vhwzIs/mmgeneration
[ "f4c950fa4cb81b8151cf9baf71abd6d8cb204f9b" ]
[ "tools/utils/inception_stat.py" ]
[ "import argparse\nimport os.path as osp\nimport pickle\nimport sys\n\nimport mmcv\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom mmcv import Config, print_log\n\n# yapf: disable\nsys.path.append(osp.abspath(osp.join(__file__, '../../..'))) # isort:skip # noqa\n\nfrom mmgen.core.evaluation.metric_utils import extract_inception_features # isort:skip # noqa\nfrom mmgen.datasets import (UnconditionalImageDataset, build_dataloader, # isort:skip # noqa\n build_dataset) # isort:skip # noqa\nfrom mmgen.models.architectures import InceptionV3 # isort:skip # noqa\n# yapf: enable\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='Pre-calculate inception data and save it in pkl file')\n parser.add_argument(\n '--imgsdir', type=str, default=None, help='the dir containing images.')\n parser.add_argument(\n '--data-cfg',\n type=str,\n default=None,\n help='the config file for test data pipeline')\n parser.add_argument(\n '--pklname', type=str, help='the name of inception pkl')\n parser.add_argument(\n '--pkl-dir',\n type=str,\n default='work_dirs/inception_pkl',\n help='path to save pkl file')\n parser.add_argument(\n '--pipeline-cfg',\n type=str,\n default=None,\n help=('config file containing dataset pipeline. If None, the default'\n ' pipeline will be adopted'))\n parser.add_argument(\n '--flip', action='store_true', help='whether to flip real images')\n parser.add_argument(\n '--size',\n type=int,\n nargs='+',\n default=(299, 299),\n help='image size in the data pipeline')\n parser.add_argument(\n '--batch-size',\n type=int,\n default=25,\n help='batch size used in extracted features')\n parser.add_argument(\n '--num-samples',\n type=int,\n default=50000,\n help='the number of total samples')\n parser.add_argument(\n '--no-shuffle',\n action='store_true',\n help='not use shuffle in data loader')\n parser.add_argument(\n '--inception-style',\n choices=['stylegan', 'pytorch'],\n default='pytorch',\n help='which inception network to use')\n parser.add_argument(\n '--inception-pth',\n type=str,\n default='work_dirs/cache/inception-2015-12-05.pt')\n args = parser.parse_args()\n\n # dataset pipeline (only be used when args.imgsdir is not None)\n if args.pipeline_cfg is not None:\n pipeline = Config.fromfile(args.pipeline_cfg)['inception_pipeline']\n elif args.imgsdir is not None:\n if isinstance(args.size, list) and len(args.size) == 2:\n size = args.size\n elif isinstance(args.size, list) and len(args.size) == 1:\n size = (args.size[0], args.size[0])\n elif isinstance(args.size, int):\n size = (args.size, args.size)\n else:\n raise TypeError(\n f'args.size mush be int or tuple but got {args.size}')\n\n pipeline = [\n dict(type='LoadImageFromFile', key='real_img'),\n dict(\n type='Resize', keys=['real_img'], scale=size,\n keep_ratio=False),\n dict(\n type='Normalize',\n keys=['real_img'],\n mean=[127.5] * 3,\n std=[127.5] * 3,\n to_rgb=True), # default to RGB images\n dict(type='Collect', keys=['real_img'], meta_keys=[]),\n dict(type='ImageToTensor', keys=['real_img'])\n ]\n # insert flip aug\n if args.flip:\n pipeline.insert(\n 1,\n dict(type='Flip', keys=['real_img'], direction='horizontal'))\n\n # build dataloader\n if args.imgsdir is not None:\n dataset = UnconditionalImageDataset(args.imgsdir, pipeline)\n elif args.data_cfg is not None:\n # Please make sure the dataset will sample images in `RGB` order.\n data_config = Config.fromfile(args.data_cfg)\n dataset = build_dataset(data_config.data.test)\n else:\n raise RuntimeError('Please provide imgsdir or data_cfg')\n\n data_loader = build_dataloader(\n dataset, args.batch_size, 4, dist=False, shuffle=(not args.no_shuffle))\n\n mmcv.mkdir_or_exist(args.pkl_dir)\n\n # build inception network\n if args.inception_style == 'stylegan':\n inception = torch.jit.load(args.inception_pth).eval().cuda()\n inception = nn.DataParallel(inception)\n mmcv.print_log('Adopt Inception network in StyleGAN', 'mmgen')\n else:\n inception = nn.DataParallel(\n InceptionV3([3], resize_input=True, normalize_input=False).cuda())\n inception.eval()\n\n features = extract_inception_features(data_loader, inception,\n args.num_samples,\n args.inception_style).numpy()\n\n # sanity check for the number of features\n assert features.shape[\n 0] == args.num_samples, 'the number of features != num_samples'\n print_log(f'Extract {args.num_samples} features', 'mmgen')\n\n mean = np.mean(features, 0)\n cov = np.cov(features, rowvar=False)\n\n with open(osp.join(args.pkl_dir, args.pklname), 'wb') as f:\n pickle.dump(\n {\n 'mean': mean,\n 'cov': cov,\n 'size': args.num_samples,\n 'name': args.pklname\n }, f)\n" ]
[ [ "torch.nn.DataParallel", "numpy.cov", "numpy.mean", "torch.jit.load" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
NSLS-II-SMI/pygix
[ "2bcdb881366e061c20c6bcc6a5abf463c643f28a", "2bcdb881366e061c20c6bcc6a5abf463c643f28a" ]
[ "pygix/tools.py", "pygix/process.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\" A collection of useful tools for GIXS and fibre diffraction.\n\nThere are two main tools:\n\n1. Four quadrant averaging:\n Fibre diffraction patterns are related through four quadrant symmetry\n (assuming ideal fibre texture). This means that the data in each of the\n four quadrants are equivalent.[1] This allows us to cut out each quadrant,\n centre, rotate and overlay, then average the sum of all four. This has two\n advantages. Firstly, statistics are improved. Secondly, regions of missing\n data (detector module gaps, asymmetric splitting due to large incident\n angle) are filled from other quadrants - prettier images!\n\n Example usage:\n averaged, x, y = quadrant_average(data, x, y)\n\n N.B. The span of the image will change and will be determined by\n -absmax(q)) to +absmax(q) such that new x and y scales are returned.\n\n2. Integration ROIs:\n Arrays defining the regions-of-interest (ROIs) for 1D data reduction methods\n can be calculated. These are useful for overlaying on diffraction patterns\n in figures to show the regions of data that have been integrated. Four\n methods exist for each of the four 1D reduction methods:\n sector_roi\n chi_roi\n op_box_roi\n ip_box_roi\n\n These functions take the same parameters as the integration methods.\n\n N.B. These are all calculated in the output coordinate space, so must be\n overlayed onto transformed images, NOT raw images.\n\nReferences:\n [1] Stribeck and Nöchel, J. Appl. Crystallogr., (2009), 42, 295.\n\"\"\"\n\nimport numpy as np\nfrom . import io\n\n\ndef quadrant_average(data, x=None, y=None, dummy=0, filename=None):\n \"\"\"\n Function to perform four quadrant averaging of fiber diffraction\n patterns. If only the data array is given, the function \n assumes the center of reciprocal space is the central pixel\n and the returned averaged array will have the same shape as \n the input data. In general this should be avoided, it is safest if\n x and y scales are provided!\n\n If x and y scaling are given, it deduces the\n center from these arrays. One quadrant will have the size of \n the largest quadrant and the resulting array will be larger \n than the input array. In this case a new x and y will be\n calculated and returned.\n \n Args:\n data (ndarray): Input reciprocal space map array.\n x (ndarray): x scaling of the image.\n y (ndarray: y scaling of the image.\n dummy (int): Value of masked invalid regions.\n filename (string): Name of file to be saved.\n\n Returns:\n out_full (ndarray): The four quadrant averaged array\n \"\"\"\n if (x is not None) and (y is not None):\n cen_x = np.argmin(abs(x))\n cen_y = np.argmin(abs(y))\n elif (x is None) and (y is None):\n cen_y = data.shape[0] / 2.0\n cen_x = data.shape[1] / 2.0\n else:\n raise RuntimeError('Must pass both x and y scales or neither')\n\n quad1 = np.flipud(np.fliplr(data[0:cen_y, 0:cen_x]))\n quad2 = np.fliplr(data[cen_y:, 0:cen_x])\n quad3 = data[cen_y:, cen_x:]\n quad4 = np.flipud(data[0:cen_y, cen_x:])\n\n quad_shape_y = max(data.shape[0] - cen_y,\n data.shape[0] - (data.shape[0] - cen_y))\n quad_shape_x = max(data.shape[1] - cen_x,\n data.shape[1] - (data.shape[1] - cen_x))\n mask = np.zeros((quad_shape_y, quad_shape_x))\n out = np.zeros((quad_shape_y, quad_shape_x))\n\n out[np.where(quad1 > dummy)] += quad1[np.where(quad1 > dummy)]\n out[np.where(quad2 > dummy)] += quad2[np.where(quad2 > dummy)]\n out[np.where(quad3 > dummy)] += quad3[np.where(quad3 > dummy)]\n out[np.where(quad4 > dummy)] += quad4[np.where(quad4 > dummy)]\n\n mask[np.where(quad1 > dummy)] += 1\n mask[np.where(quad2 > dummy)] += 1\n mask[np.where(quad3 > dummy)] += 1\n mask[np.where(quad4 > dummy)] += 1\n\n out[np.where(mask > 0)] /= mask[np.where(mask > 0)]\n out[np.where(mask == 0)] = dummy\n\n out_full = np.zeros((out.shape[0] * 2, out.shape[1] * 2))\n cen_x = out_full.shape[1] / 2.0\n cen_y = out_full.shape[0] / 2.0\n\n out_full[0:cen_y, 0:cen_x] = np.flipud(np.fliplr(out))\n out_full[0:cen_y, cen_x:] = np.flipud(out)\n out_full[cen_y:, cen_x:] = out\n out_full[cen_y:, 0:cen_x] = np.fliplr(out)\n\n if (x is not None) and (y is not None):\n out_x = np.linspace(-abs(x).max(), abs(x).max(), out_full.shape[1])\n out_y = np.linspace(-abs(y).max(), abs(y).max(), out_full.shape[0])\n else:\n out_x = None\n out_y = None\n\n if filename is not None:\n writer = io.Writer(None, None)\n writer.save2D(filename, out_full, out_x, out_y)\n\n if (out_x is not None) and (out_y is not None):\n return out_full, out_x, out_y\n else:\n return out_full\n\n\ndef sector_roi(chi_pos=None, chi_width=None, radial_range=None, filename=None):\n \"\"\"Generate array defining region of interest for sector integration.\n\n Args:\n chi_pos (float): chi angle (deg) defining the centre of the sector.\n chi_width (float): width (deg) of sector.\n radial_range (tuple): integration range (min, max).\n filename (string): filename to save the arrays.\n\n Returns:\n qr, qz (tuple of ndarrays): arrays defining the region of interest.\n \"\"\"\n param = locals() # passed only to io to write key, val in the header\n param.pop('filename')\n\n if (len([x for x in [chi_pos, chi_width, radial_range] if\n x is not None]) is 0) \\\n or (radial_range is None):\n raise RuntimeError('Integration over whole image, no ROI to display.')\n roi_x, roi_y = _calc_sector(radial_range, chi_pos, chi_width)\n\n if filename:\n io.save_roi(roi_x, roi_y, filename, **param)\n return roi_x, roi_y\n\n\ndef chi_roi(radial_pos, radial_width, chi_range=None, filename=None):\n \"\"\"Generate array defining region of interest for chi integration.\n\n Args:\n radial_pos (float): position defining the radius of the sector.\n radial_width (float): width (q or 2th) of sector.\n chi_range (tuple): azimuthal range (min, max).\n filename (string): filename to save the arrays.\n\n Returns:\n qr, qz (tuple of ndarrays): arrays defining the region of interest.\n \"\"\"\n param = locals() # passed only to io to write key, val in the header\n param.pop('filename')\n\n if (chi_range is None) or (chi_range[0] + chi_range[1] is 360):\n chi_width = None\n chi_pos = None\n else:\n chi_width = chi_range[1] - chi_range[0]\n chi_pos = chi_range[0] + chi_width / 2.0\n\n radial_min = radial_pos - radial_width / 2.0\n radial_max = radial_pos + radial_width / 2.0\n roi_x, roi_y = _calc_sector((radial_min, radial_max), chi_pos, chi_width)\n\n if filename:\n io.save_roi(roi_x, roi_y, filename, **param)\n return roi_x, roi_y\n\n\ndef op_box_roi(ip_pos, ip_width, op_range, filename=None):\n \"\"\"Generate array defining region of interest for out-of-plane box integration.\n\n Args:\n ip_pos (float): in-plane centre of integration box.\n ip_width (float): in-plane width of integration box.\n op_range (tuple): out-of-plane range (min, max).\n filename (string): filename to save the arrays.\n\n Returns:\n qr, qz (tuple of ndarrays): arrays defining the region of interest.\n \"\"\"\n param = locals() # passed only to io to write key, val in the header\n param.pop('filename')\n\n ip_min = ip_pos - ip_width / 2.0\n ip_max = ip_pos + ip_width / 2.0\n roi_x, roi_y = _calc_box((ip_min, ip_max), op_range)\n\n if filename:\n io.save_roi(roi_x, roi_y, filename, **param)\n return roi_x, roi_y\n\n\ndef ip_box_roi(op_pos, op_width, ip_range, filename=None):\n \"\"\"Generate array defining region of interest for in-plane box integration.\n\n Args:\n op_pos (float): out-of-plane centre of integration box.\n op_width (float): out-of-plane width of integration box.\n ip_range (tuple): in-plane range (min, max).\n filename (string): filename to save the arrays.\n\n Returns:\n qr, qz (tuple of ndarrays): arrays defining the region of interest.\n \"\"\"\n param = locals() # passed only to io to write key, val in the header\n param.pop('filename')\n\n op_min = op_pos - op_width / 2.0\n op_max = op_pos + op_width / 2.0\n roi_x, roi_y = _calc_box(ip_range, (op_min, op_max))\n\n if filename:\n io.save_roi(roi_x, roi_y, filename, **param)\n return roi_x, roi_y\n\n\ndef _calc_sector(radial_range, chi_pos, chi_width):\n \"\"\"Main function for calculating sector region of interest.\n Called by sector_roi and chi_roi.\n\n Args:\n radial_range (tuple): integration range (min, max).\n chi_pos (float): chi angle (deg) defining the centre of the sector.\n chi_width (float): width (deg) of sector.\n\n Returns:\n qr, qz (tuple of ndarrays): arrays defining the region of interest.\n \"\"\"\n if len([x for x in [chi_pos, chi_width] if x is not None]) is 1:\n raise RuntimeError('both chi_pos and chi_width must be supplied or '\n 'neither')\n\n if (chi_pos is None) and (chi_width is None):\n chi_min = 0\n chi_max = 359\n npts = 360\n else:\n chi_min = -(chi_pos - chi_width / 2.0 - 90.0)\n chi_max = -(chi_pos + chi_width / 2.0 - 90.0)\n npts = abs(int(chi_max - chi_min))\n\n chi = np.radians(np.linspace(chi_min, chi_max, npts))\n\n # lower part of arc\n if radial_range[0] is 0:\n lo_qr = np.array(0)\n lo_qz = np.array(0)\n else:\n lo_qr = radial_range[0] * np.cos(chi)\n lo_qz = radial_range[0] * np.sin(chi)\n\n # upper part of arc\n hi_qr = (radial_range[1] * np.cos(chi))[::-1]\n hi_qz = (radial_range[1] * np.sin(chi))[::-1]\n\n qr = np.hstack((lo_qr, hi_qr))\n qz = np.hstack((lo_qz, hi_qz))\n\n if (chi_pos is not None) and (chi_width is not None):\n qr = np.append(qr, qr[0])\n qz = np.append(qz, qz[0])\n return qr, qz\n\n\ndef _calc_box(ip_range, op_range):\n \"\"\"Main function for calculating box regions of interest.\n Called by op_box_roi and ip_box_roi.\n\n Args:\n ip_range (tuple): in-plane (min, max).\n op_range (tuple): out-of-plane (min, max).\n\n Returns:\n qr, qz (tuple of ndarrays): arrays defining the region of interest.\n \"\"\"\n qr = [ip_range[0], ip_range[0], ip_range[1], ip_range[1], ip_range[0]]\n qz = [op_range[0], op_range[1], op_range[1], op_range[0], op_range[0]]\n return qr, qz\n", "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\" Processor module for batch processing data reduction.\n\nThe user creates a yaml file[1] containing all of the detector and sample\ngeometry parameters, the input files, output files and the data reduction\nparameters. Passing this file to the processor, all input files will be\nprocessed and output data saved if requested. If batch 1D is used, an array\ncontaining the stacked 1D profiles will be returned from the processor.\n\nUsage:\n\nimport pygix.process as ppr\n\nprocessor = ppr.Process('process_recipe.yaml')\nout = processor.process()\n\n\"\"\"\n\nimport sys\nimport os\nfrom glob import glob\nimport yaml\nimport fabio\nimport pygix\nimport numpy as np\nimport time\n\n\nclass Processor(object):\n \"\"\"\n Class for batch processing data with pygix module. Takes a\n yaml file, which lists geometry, correction files, input data,\n output file names and integration parameters and performs the\n data reduction.\n \"\"\"\n\n def __init__(self, yaml_file):\n \"\"\"\n Initialization. Takes the yaml_file and reads in all parameters, which\n are stored as class attributes. Instatiates pygix.transform.Transform.\n\n Args:\n yaml_file (string): path to recipe yaml file.\n \"\"\"\n self.recipe = yaml_file\n with open(yaml_file, 'r') as f:\n self.pars = yaml.load(f)\n\n try:\n calibration = self.pars['calibration']\n except KeyError:\n raise RuntimeError('calibration data not present in yaml file')\n self._pg = init_pygix(calibration)\n\n try:\n data = self.pars['data']\n except KeyError:\n raise RuntimeError('data information not present in yaml file')\n self.file_list = get_file_list(data['infiles']['dname'],\n data['infiles']['fname'],\n data['infiles']['numbers'])\n\n if 'outfiles' in data.keys():\n raise NotImplementedError('Saving data not yet implemented')\n\n # self.bkg_dname = data['backfiles']['dname']\n # self.bkg_fname = data['backfiles']['fname']\n # self.bkg_numbers = data['backfiles']['numbers']\n # self.out_dname = data['outfiles']['dname']\n # self.out_fname = data['outfiles']['fname']\n\n # set basename and extn checks for compressed formats like file.edf.bz2\n # basename, extn = os.path.splitext(self.in_fname)\n # compressed_formats = ['bz2', 'gz']\n # if extn in compressed_formats:\n # self.basename = os.path.splitext(basename)[0]\n # self.extension = os.path.splitext(basename)[1] + '.' + extn\n # else:\n # self.basename = basename\n # self.extension = extn\n\n red_methods = ['transform_reciprocal',\n 'transform_polar',\n 'transform_angular',\n 'profile_sector',\n 'profile_chi',\n 'profile_op_box',\n 'profile_ip_box']\n\n try:\n reduction = self.pars['data_reduction']\n except KeyError:\n raise RuntimeError('reduction parameters not in yaml file')\n\n self.reduction = reduction.keys()[0]\n if self.reduction not in red_methods:\n raise RuntimeError(('Invalid reduction method: %s \\nValid reduction'\n ' methods: %s') % (self.reduction, red_methods))\n\n red_kwargs = reduction[self.reduction]\n\n # set as class attribute and remove if value is None otherwise\n # can interfere with integration if kwarg = None\n self.red_kwargs = {}\n if reduction[self.reduction].__class__ is dict:\n for key in red_kwargs:\n if red_kwargs[key] is not None:\n self.red_kwargs.update({key: red_kwargs[key]})\n\n for key in self.red_kwargs:\n if 'range' in key:\n rng = ''.join(\n c for c in self.red_kwargs[key] if c not in '()')\n rng = rng.strip().split(',')\n self.red_kwargs[key] = tuple(float(x) for x in rng)\n\n self._reducer = self.set_reducer()\n\n def set_reducer(self):\n \"\"\"\n Returns the reduction function (called in __init__ and set as\n class attribute.\n \"\"\"\n return getattr(self._pg, self.reduction)\n\n def do_reduction(self, filename):\n \"\"\"\n Loads the data from filename and returns the reduced data.\n\n Args:\n filename (string): path to data.\n\n Returns:\n data (ndarray): reduced data. This can be 1D or 2D depending on\n what was specified in the yaml file.\n \"\"\"\n data = fabio.open(filename).data\n if os.path.splitext(filename)[1] == '.tif':\n data = np.flipud(data)\n return self._reducer(data, **self.red_kwargs)\n\n def _process1d(self):\n \"\"\" Core function for batch processing data (1D reduction).\n\n All information is stored as class attributes\n \"\"\"\n file_list = self.file_list\n n_files = len(file_list)\n out_array = np.zeros((n_files, self.red_kwargs['npt']))\n\n t_start = time.time()\n\n for i, f in enumerate(file_list):\n print('processing file: {}/{}\\n\\t{}'.format(i + 1, n_files, f))\n\n # THIS WORKS BUT NEEDS TESTING\n # if self.out_fname is not None:\n # out_fname = rootname+str(i).zfill(nz)+'_p'+self.extension\n # self.red_kwargs['filename'] = os.path.join(self.out_dname,\n # out_fname)\n\n out_array[i, ], x_scale = self.do_reduction(f)\n\n t_end = time.time() - t_start\n msg = '{} files processed in {} seconds\\n'\n msg += '{} seconds per file'\n print(msg.format(n_files, t_end, t_end/n_files))\n\n y_scale = np.arange(0, n_files)\n out = (out_array, x_scale, y_scale)\n\n # sv_array = fabio.edfimage.edfimage(out_array)\n # sv_array.write('test.edf')\n return out\n\n def _process2d(self):\n \"\"\"\n :return:\n \"\"\"\n file_list = self.file_list\n n_files = len(file_list)\n\n t_start = time.time()\n\n for i, f in enumerate(file_list):\n print('processing file: {}/{}\\n\\t{}'.format(i + 1, n_files, f))\n\n # THIS WORKS BUT NEEDS TESTING\n # if self.out_fname is not None:\n # out_fname = rootname+str(i).zfill(nz)+'_p'+self.extension\n # self.red_kwargs['filename'] = os.path.join(self.out_dname,\n # out_fname)\n self.do_reduction(f)\n\n t_end = time.time() - t_start\n msg = '{} files processed in {} seconds\\n'\n msg += '{} seconds per file'\n print(msg.format(n_files, t_end, t_end/n_files))\n\n return True\n\n def process(self):\n \"\"\"\n Main batch processor. Will get file list the iterate over \n each performing reduction. Will save the data if outfiles \n have been specified. Will plot all if live_view is True,\n requests user input if interactive is True.\n \"\"\"\n # CHECKS FOR WRITING DATA\n # if (self.out_fname is not None) and (self.out_dname is None):\n # raise RuntimeError(('Cannot write reduced data as no reduced '\n # 'directory specified.'))\n # if (self.out_dname is not None) and (self.out_fname is None):\n # self.out_fname = self.basename.rstrip('0')\n # elif (self.out_fname is not None) and (self.out_dname is not None):\n # rootname = self.basename.rstrip('0')\n # nz = len(self.basename)-len(rootname)\n\n if 'profile' in self.reduction:\n out = self._process1d()\n elif 'transform' in self.reduction:\n out = self._process2d()\n return out\n\n\ndef list_to_indices(index_string):\n \"\"\"\n Return an integer list from a string representing indices.\n e.g. index_string = '1-3, 5-6, 8-13, 15, 20'\n indices = [1, 2, 3, 5, 6, 8, 9, 10, 11, 12, 13, 15, 20]\n\n Args:\n index_string (string): condensed string representation of integer list.\n\n Returns:\n indices (list):\n \"\"\"\n indices = []\n for s in index_string.split(','):\n if '-' in s:\n first, last = s.split('-')\n for i in range(int(first), int(last) + 1):\n indices.append(i)\n else:\n indices.append(int(s))\n return indices\n\n\ndef indices_to_list(indices):\n \"\"\"\n Return an abbreviated string representing indices.\n e.g. indices = [1, 2, 3, 5, 6, 8, 9, 10, 11, 12, 13, 15, 20]\n index_string = '1-3, 5-6, 8-13, 15, 20'\n\n Args:\n indices (list):\n\n Returns:\n index_string (string): condensed string representation of integer list.\n \"\"\"\n index_string = \"\"\n end = start = indices[0]\n for i in range(1, len(indices)):\n if indices[i] == (indices[i - 1] + 1):\n end = indices[i]\n else:\n if start == end:\n index_string += str(start) + \",\"\n else:\n index_string += str(start) + \"-\" + str(end) + \",\"\n start = end = indices[i]\n if start == end:\n index_string += str(start)\n else:\n index_string += str(start) + \"-\" + str(end)\n return index_string\n\n\ndef get_file_list(dname, fname, numbers=None):\n \"\"\"\n Takes a directory path, filename format and (optionally) frame numbers and\n returns a list of full file paths.\n\n Args:\n dname (string): directory path.\n fname (string): basename for images. Can be full filename or can contain\n wildcard ('*', or '0000').\n numbers: (string, list or None): contains information on images with\n fname to be used. If None ('*' or '00...' must be in fname), will\n take all images. Can be an integer list of file numbers or can be a\n string with hyphens representing ranges, e.g., '1, 3, 5, 9-15'.\n\n Returns:\n file_list (list): list of full paths to data.\n \"\"\"\n if not os.path.isdir(dname):\n raise IOError('Directory does not exist!\\n{}'.format(dname))\n elif (numbers is None) and ('*' not in fname):\n raise IOError(\n 'No file numbers provided and no wildcard (*) in filename')\n\n compressed_formats = ['bz2', 'gz']\n\n if (numbers is None) and ('*' in fname):\n file_list = sorted(glob(os.path.join(dname, fname)))\n else:\n if '*' in fname:\n fname = fname.replace('*', '{:04d}')\n else:\n basename, extn = os.path.splitext(fname)\n if extn in compressed_formats:\n basename, tmp_extn = os.path.splitext(basename)\n extn = '{}{}'.format(tmp_extn, extn)\n\n zero_stripped = basename.rstrip('0')\n n_zeros = len(basename) - len(zero_stripped)\n if n_zeros > 0:\n fname = '{}{{:0{}d}}{}'.format(zero_stripped, n_zeros, extn)\n elif '{:0' not in fname:\n raise IOError('bad filename specifier')\n\n if numbers.__class__ == str:\n numbers = list_to_indices(numbers)\n\n file_list = []\n for i in numbers:\n in_file = fname.format(i)\n file_list.append(os.path.join(dname, in_file))\n return file_list\n\n\ndef init_pygix(calibration_dict):\n \"\"\"\n Instantiate a pygix.Transform() class instance.\n All parameters are set from the yaml file.\n\n Args:\n calibration_dict (dict): dictionary containing all parameters\n for instantiating pygix.Transform().\n\n Returns:\n pg (object): class instance of pygix.Transform().\n \"\"\"\n pg = pygix.transform.Transform()\n pg.load(calibration_dict['ponifile'])\n\n if 'splinefile' in calibration_dict.keys():\n pg.splinefile = calibration_dict['splinefile']\n if 'flatfile' in calibration_dict.keys():\n pg.flatfiles = calibration_dict['flatfile']\n if 'darkfile' in calibration_dict.keys():\n pg.darkfiles = calibration_dict['darkfile']\n if 'maskfile' in calibration_dict.keys():\n pg.maskfile = calibration_dict['maskfile']\n\n grazing = calibration_dict['grazing_parameters']\n if 'sample_orientation' in grazing.keys():\n pg.sample_orientation = grazing['sample_orientation']\n else:\n pg.sample_orientation = 1\n\n pg.incident_angle = grazing['incident_angle']\n if 'tilt_angle' in grazing.keys():\n pg.tilt_angle = grazing['tilt_angle']\n return pg\n\n\nif __name__ == '__main__':\n if len(sys.argv) is not 2:\n print('usage: process.py recipe.yaml')\n sys.exit(0)\n else:\n rp = Processor(sys.argv[1])\n rp.process()\n" ]
[ [ "numpy.hstack", "numpy.linspace", "numpy.fliplr", "numpy.flipud", "numpy.cos", "numpy.sin", "numpy.append", "numpy.array", "numpy.zeros", "numpy.where" ], [ "numpy.arange", "numpy.flipud", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
shvetsiya/mask-rcnn
[ "dbe1ae7a7bf457ae53aa88f2d1dee3aef1a32936" ]
[ "net/lib/box/nms/gpu_nms/setup.py" ]
[ "from distutils.core import setup\nfrom distutils.extension import Extension\nfrom Cython.Distutils import build_ext\nfrom Cython.Build import cythonize\n\nimport os\nfrom os.path import join as pjoin\nimport numpy as np\n\n# /opt/anaconda3/bin/python3 setup.py build_ext --inplace\n# http://martinsosic.com/development/2016/02/08/wrapping-c-library-as-python-module.html\n\ntry:\n numpy_include = np.get_include()\nexcept AttributeError:\n numpy_include = np.get_numpy_include()\n\n\ndef find_in_path(name, path):\n \"Find a file in a search path\"\n #adapted fom http://code.activestate.com/recipes/52224-find-a-file-given-a-search-path/\n for dir in path.split(os.pathsep):\n binpath = pjoin(dir, name)\n if os.path.exists(binpath):\n return os.path.abspath(binpath)\n return None\n\n\ndef locate_cuda():\n \"\"\"Locate the CUDA environment on the system\n\n Returns a dict with keys 'home', 'nvcc', 'include', and 'lib64'\n and values giving the absolute path to each directory.\n\n Starts by looking for the CUDAHOME env variable. If not found, everything\n is based on finding 'nvcc' in the PATH.\n \"\"\"\n\n # first check if the CUDAHOME env variable is in use\n if 'CUDAHOME' in os.environ:\n home = os.environ['CUDAHOME']\n nvcc = pjoin(home, 'bin', 'nvcc')\n else:\n # otherwise, search the PATH for NVCC\n default_path = pjoin(os.sep, 'usr', 'local', 'cuda', 'bin')\n nvcc = find_in_path('nvcc', os.environ['PATH'] + os.pathsep + default_path)\n if nvcc is None:\n return None\n home = os.path.dirname(os.path.dirname(nvcc))\n\n cudaconfig = {\n 'home': home,\n 'nvcc': nvcc,\n 'include': pjoin(home, 'include'),\n 'lib64': pjoin(home, 'lib64')\n }\n for k, v in cudaconfig.items():\n if not os.path.exists(v):\n return None\n\n return cudaconfig\n\n\nCUDA = locate_cuda()\nprint(\"CUDA found:\", CUDA)\n\n##----------------------------------------------------------------------------------------\n\n\ndef customize_compiler_for_nvcc(self):\n \"\"\"inject deep into distutils to customize how the dispatch\n to gcc/nvcc works.\n If you subclass UnixCCompiler, it's not trivial to get your subclass\n injected in, and still have the right customizations (i.e.\n distutils.sysconfig.customize_compiler) run on it. So instead of going\n the OO route, I have this. Note, it's kindof like a wierd functional\n subclassing going on.\"\"\"\n\n # tell the compiler it can processes .cu\n self.src_extensions.append('.cu')\n\n # save references to the default compiler_so and _comple methods\n default_compiler_so = self.compiler_so\n super = self._compile\n\n # now redefine the _compile method. This gets executed for each\n # object but distutils doesn't have the ability to change compilers\n # based on source extension: we add it.\n def _compile(obj, src, ext, cc_args, extra_postargs, pp_opts):\n if os.path.splitext(src)[1] == '.cu':\n # use the cuda for .cu files\n self.set_executable('compiler_so', CUDA['nvcc'])\n # use only a subset of the extra_postargs, which are 1-1 translated\n # from the extra_compile_args in the Extension class\n postargs = extra_postargs['nvcc']\n else:\n postargs = extra_postargs['gcc']\n\n super(obj, src, ext, cc_args, postargs, pp_opts)\n # reset the default compiler_so, which we might have changed for cuda\n self.compiler_so = default_compiler_so\n\n # inject our redefined _compile method into the class\n self._compile = _compile\n\n\n# run the customize_compiler\nclass custom_build_ext(build_ext):\n\n def build_extensions(self):\n customize_compiler_for_nvcc(self.compiler)\n build_ext.build_extensions(self)\n\n\n##----------------------------------------------------------------------------------------\n\n\n# run the customize_compiler\nclass custom_build_ext(build_ext):\n\n def build_extensions(self):\n customize_compiler_for_nvcc(self.compiler)\n build_ext.build_extensions(self)\n\n\n#/usr/local/cuda-9.1/bin/nvcc -c -o gpu_nms_kernel.cu.o gpu_nms_kernel.cu -x cu -Xcompiler -fPIC -arch=sm_52\next_modules = [\n Extension(\n \"gpu_nms\",\n sources=[\"gpu_nms.pyx\", \"src/gpu_nms_kernel.cu\"],\n library_dirs=[CUDA['lib64']],\n libraries=['cudart'],\n language='c++',\n runtime_library_dirs=[CUDA['lib64']],\n # this syntax is specific to this build system\n # we're only going to use certain compiler args with nvcc and not with gcc\n # the implementation of this trick is in customize_compiler() below\n extra_compile_args={\n 'gcc': [],\n 'nvcc': ['-arch=sm_52', '--ptxas-options=-v', '-c', '--compiler-options', \"'-fPIC'\"],\n },\n include_dirs=[numpy_include, CUDA['include'], 'src']),\n]\n\nsetup(name='mask_rcnn', cmdclass={'build_ext': custom_build_ext}, ext_modules=ext_modules)\n" ]
[ [ "numpy.get_numpy_include", "numpy.get_include" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
unAlpha/AgManim
[ "c34390af41d943c8785beee52015b22eb9df8bde" ]
[ "manimlib/mobject/value_tracker.py" ]
[ "import numpy as np\n\nfrom manimlib.mobject.mobject import Mobject\n\n\nclass ValueTracker(Mobject):\n \"\"\"\n Note meant to be displayed. Instead the position encodes some\n number, often one which another animation or continual_animation\n uses for its update function, and by treating it as a mobject it can\n still be animated and manipulated just like anything else.\n \"\"\"\n\n def __init__(self, value=0, **kwargs):\n Mobject.__init__(self, **kwargs)\n self.points = np.zeros((1, 3))\n self.set_value(value)\n\n def get_value(self):\n return self.points[0, 0]\n\n def set_value(self, value):\n self.points[0, 0] = value\n return self\n\n # 增值\n def increment_value(self, d_value):\n self.set_value(self.get_value() + d_value)\n\n\nclass ExponentialValueTracker(ValueTracker):\n \"\"\"\n Operates just like ValueTracker, except it encodes the value as the\n exponential of a position coordinate, which changes how interpolation\n behaves\n \"\"\"\n\n def get_value(self):\n return np.exp(ValueTracker.get_value(self))\n\n def set_value(self, value):\n return ValueTracker.set_value(self, np.log(value))\n\n\nclass ComplexValueTracker(ValueTracker):\n def get_value(self):\n return complex(*self.points[0, :2])\n\n def set_value(self, z):\n z = complex(z)\n self.points[0, :2] = (z.real, z.imag)\n return self\n" ]
[ [ "numpy.log", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
fwgg8547/deeplean_mc
[ "1b858e59caf082df0cd4b1ca12dc21875fb00b26" ]
[ "trainmine.py" ]
[ "from absl import app, flags, logging\nfrom absl.flags import FLAGS\n\nimport tensorflow as tf\nimport numpy as np\nimport cv2\nfrom tensorflow.keras.callbacks import (\n ReduceLROnPlateau,\n EarlyStopping,\n ModelCheckpoint,\n TensorBoard\n)\nfrom yolov3_tf2.models import (\n YoloV3, YoloV3Tiny, YoloLoss,\n yolo_anchors, yolo_anchor_masks,\n yolo_tiny_anchors, yolo_tiny_anchor_masks\n)\nfrom yolov3_tf2.utils import freeze_all\nimport yolov3_tf2.dataset as dataset\n\nflags.DEFINE_string('dataset', '', 'path to dataset')\nflags.DEFINE_string('val_dataset', '', 'path to validation dataset')\nflags.DEFINE_boolean('tiny', False, 'yolov3 or yolov3-tiny')\nflags.DEFINE_string('weights', './checkpoints/mine.tf',\n 'path to weights file')\nflags.DEFINE_string('classes', './data/vocmine.names', 'path to classes file')\nflags.DEFINE_enum('mode', 'fit', ['fit', 'eager_fit', 'eager_tf'],\n 'fit: model.fit, '\n 'eager_fit: model.fit(run_eagerly=True), '\n 'eager_tf: custom GradientTape')\nflags.DEFINE_enum('transfer', 'none',\n ['none', 'darknet', 'no_output', 'frozen', 'fine_tune'],\n 'none: Training from scratch, '\n 'darknet: Transfer darknet, '\n 'no_output: Transfer all but output, '\n 'frozen: Transfer and freeze all, '\n 'fine_tune: Transfer all and freeze darknet only')\nflags.DEFINE_integer('size', 416, 'image size')\nflags.DEFINE_integer('epochs', 2, 'number of epochs')\nflags.DEFINE_integer('batch_size', 8, 'batch size')\nflags.DEFINE_float('learning_rate', 1e-3, 'learning rate')\nflags.DEFINE_integer('num_classes', 80, 'number of classes in the model')\nflags.DEFINE_integer('weights_num_classes', None, 'specify num class for `weights` file if different, '\n 'useful in transfer learning with different number of classes')\n\n\ndef main(_argv):\n physical_devices = tf.config.experimental.list_physical_devices('GPU')\n for physical_device in physical_devices:\n tf.config.experimental.set_memory_growth(physical_device, True)\n\n if FLAGS.tiny:\n model = YoloV3Tiny(FLAGS.size, training=True,\n classes=FLAGS.num_classes)\n anchors = yolo_tiny_anchors\n anchor_masks = yolo_tiny_anchor_masks\n else:\n model = YoloV3(FLAGS.size, training=True, classes=FLAGS.num_classes)\n anchors = yolo_anchors\n anchor_masks = yolo_anchor_masks\n\n if FLAGS.dataset:\n train_dataset = dataset.load_tfrecord_dataset(\n FLAGS.dataset, FLAGS.classes, FLAGS.size)\n else:\n train_dataset = dataset.load_fake_dataset()\n train_dataset = train_dataset.shuffle(buffer_size=512)\n train_dataset = train_dataset.batch(FLAGS.batch_size)\n train_dataset = train_dataset.map(lambda x, y: (\n dataset.transform_images(x, FLAGS.size),\n dataset.transform_targets(y, anchors, anchor_masks, FLAGS.size)))\n train_dataset = train_dataset.prefetch(\n buffer_size=tf.data.experimental.AUTOTUNE)\n\n if FLAGS.val_dataset:\n val_dataset = dataset.load_tfrecord_dataset(\n FLAGS.val_dataset, FLAGS.classes, FLAGS.size)\n else:\n val_dataset = dataset.load_fake_dataset()\n val_dataset = val_dataset.batch(FLAGS.batch_size)\n val_dataset = val_dataset.map(lambda x, y: (\n dataset.transform_images(x, FLAGS.size),\n dataset.transform_targets(y, anchors, anchor_masks, FLAGS.size)))\n\n # Configure the model for transfer learning\n if FLAGS.transfer == 'none':\n pass # Nothing to do\n elif FLAGS.transfer in ['darknet', 'no_output']:\n # Darknet transfer is a special case that works\n # with incompatible number of classes\n\n # reset top layers\n if FLAGS.tiny:\n model_pretrained = YoloV3Tiny(\n FLAGS.size, training=True, classes=FLAGS.weights_num_classes or FLAGS.num_classes)\n else:\n model_pretrained = YoloV3(\n FLAGS.size, training=True, classes=FLAGS.weights_num_classes or FLAGS.num_classes)\n model_pretrained.load_weights(FLAGS.weights)\n\n if FLAGS.transfer == 'darknet':\n model.get_layer('yolo_darknet').set_weights(\n model_pretrained.get_layer('yolo_darknet').get_weights())\n freeze_all(model.get_layer('yolo_darknet'))\n\n elif FLAGS.transfer == 'no_output':\n for l in model.layers:\n if not l.name.startswith('yolo_output'):\n l.set_weights(model_pretrained.get_layer(\n l.name).get_weights())\n freeze_all(l)\n\n else:\n # All other transfer require matching classes\n model.load_weights(FLAGS.weights)\n if FLAGS.transfer == 'fine_tune':\n # freeze darknet and fine tune other layers\n darknet = model.get_layer('yolo_darknet')\n freeze_all(darknet)\n elif FLAGS.transfer == 'frozen':\n # freeze everything\n freeze_all(model)\n\n optimizer = tf.keras.optimizers.Adam(lr=FLAGS.learning_rate)\n loss = [YoloLoss(anchors[mask], classes=FLAGS.num_classes)\n for mask in anchor_masks]\n\n if FLAGS.mode == 'eager_tf':\n # Eager mode is great for debugging\n # Non eager graph mode is recommended for real training\n avg_loss = tf.keras.metrics.Mean('loss', dtype=tf.float32)\n avg_val_loss = tf.keras.metrics.Mean('val_loss', dtype=tf.float32)\n\n for epoch in range(1, FLAGS.epochs + 1):\n for batch, (images, labels) in enumerate(train_dataset):\n with tf.GradientTape() as tape:\n outputs = model(images, training=True)\n regularization_loss = tf.reduce_sum(model.losses)\n pred_loss = []\n for output, label, loss_fn in zip(outputs, labels, loss):\n pred_loss.append(loss_fn(label, output))\n total_loss = tf.reduce_sum(pred_loss) + regularization_loss\n\n grads = tape.gradient(total_loss, model.trainable_variables)\n optimizer.apply_gradients(\n zip(grads, model.trainable_variables))\n\n logging.info(\"{}_train_{}, {}, {}\".format(\n epoch, batch, total_loss.numpy(),\n list(map(lambda x: np.sum(x.numpy()), pred_loss))))\n avg_loss.update_state(total_loss)\n\n for batch, (images, labels) in enumerate(val_dataset):\n outputs = model(images)\n regularization_loss = tf.reduce_sum(model.losses)\n pred_loss = []\n for output, label, loss_fn in zip(outputs, labels, loss):\n pred_loss.append(loss_fn(label, output))\n total_loss = tf.reduce_sum(pred_loss) + regularization_loss\n\n logging.info(\"{}_val_{}, {}, {}\".format(\n epoch, batch, total_loss.numpy(),\n list(map(lambda x: np.sum(x.numpy()), pred_loss))))\n avg_val_loss.update_state(total_loss)\n\n logging.info(\"{}, train: {}, val: {}\".format(\n epoch,\n avg_loss.result().numpy(),\n avg_val_loss.result().numpy()))\n\n avg_loss.reset_states()\n avg_val_loss.reset_states()\n model.save_weights(\n 'checkpoints/yolov3_train_{}.tf'.format(epoch))\n else:\n model.compile(optimizer=optimizer, loss=loss,\n run_eagerly=(FLAGS.mode == 'eager_fit'))\n\n callbacks = [\n ReduceLROnPlateau(verbose=1),\n EarlyStopping(patience=3, verbose=1),\n ModelCheckpoint('checkpoints/yolov3_train_{epoch}.tf',\n verbose=1, save_weights_only=True),\n TensorBoard(log_dir='logs')\n ]\n\n history = model.fit(train_dataset,\n epochs=FLAGS.epochs,\n callbacks=callbacks,\n validation_data=val_dataset)\n\n\nif __name__ == '__main__':\n try:\n app.run(main)\n except SystemExit:\n pass\n" ]
[ [ "tensorflow.keras.callbacks.ModelCheckpoint", "tensorflow.config.experimental.set_memory_growth", "tensorflow.reduce_sum", "tensorflow.config.experimental.list_physical_devices", "tensorflow.keras.callbacks.ReduceLROnPlateau", "tensorflow.keras.optimizers.Adam", "tensorflow.keras.callbacks.TensorBoard", "tensorflow.keras.callbacks.EarlyStopping", "tensorflow.keras.metrics.Mean", "tensorflow.GradientTape" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.7", "2.2", "2.3", "2.4", "2.5", "2.6" ] } ]
bugface/NeMo
[ "431c561380a120e9e164a4c9deed8f1ca9acace5", "431c561380a120e9e164a4c9deed8f1ca9acace5" ]
[ "nemo/collections/nlp/modules/common/megatron/token_level_encoder_decoder.py", "nemo/collections/tts/models/talknet.py" ]
[ "# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport torch\n\nfrom nemo.collections.nlp.modules.common.megatron.fused_bias_dropout_add import bias_dropout_add_fused_inference\nfrom nemo.collections.nlp.modules.common.megatron.language_model import Embedding\nfrom nemo.collections.nlp.modules.common.megatron.megatron_decoders import get_decoder_model\nfrom nemo.collections.nlp.modules.common.megatron.megatron_encoder_decoder import (\n MegatronTransformerEncoderDecoderModule,\n)\nfrom nemo.collections.nlp.modules.common.megatron.megatron_encoders import get_encoder_model\nfrom nemo.collections.nlp.modules.common.megatron.module import MegatronModule\nfrom nemo.collections.nlp.modules.common.megatron.utils import (\n ApexGuardDefaults,\n build_position_ids,\n init_method_normal,\n parallel_lm_logits,\n scaled_init_method_normal,\n)\n\ntry:\n from apex.transformer import tensor_parallel\n from apex.transformer.enums import AttnMaskType, ModelType\n\n HAVE_APEX = True\nexcept (ImportError, ModuleNotFoundError):\n HAVE_APEX = False\n # fake missing classes with None attributes\n AttnMaskType = ApexGuardDefaults()\n ModelType = ApexGuardDefaults()\n\n__all__ = [\"MegatronTokenLevelHead\", \"MegatronTokenLevelEncoderDecoderModule\"]\n\n\nclass MegatronTokenLevelHead(MegatronModule):\n \"\"\"Masked LM head for token-based encoder-decoder models (e.g., T5)\n\n Arguments:\n mpu_vocab_size: model parallel size of vocabulary.\n parallel_output: wether output logits being distributed or not.\n \"\"\"\n\n def __init__(self, mpu_vocab_size, parallel_output):\n super(MegatronTokenLevelHead, self).__init__()\n\n self.bias = torch.nn.Parameter(torch.zeros(mpu_vocab_size))\n self.bias.model_parallel = True\n self.bias.partition_dim = 0\n self.bias.stride = 1\n self.parallel_output = parallel_output\n\n def forward(self, hidden_states, word_embeddings_weight):\n output = parallel_lm_logits(hidden_states, word_embeddings_weight, self.parallel_output, bias=self.bias)\n return output\n\n\n# TODO: add soft prompts as an Embedding sub-class\n\n\nclass MegatronTokenLevelEncoderDecoderModule(MegatronModule):\n \"\"\"Token-based (input/output is tokens) encoder-decoder model (e.g. T5 Language model.)\"\"\"\n\n def __init__(\n self,\n encoder_arch,\n decoder_arch,\n vocab_size,\n hidden_size,\n max_position_embeddings,\n num_layers,\n num_attention_heads,\n ffn_hidden_size,\n apply_query_key_layer_scaling=True,\n kv_channels=None,\n num_tokentypes=0,\n parallel_output=True,\n pre_process=True,\n post_process=True,\n init_method_std=0.02,\n fp16_cross_entropy=False,\n use_cpu_initialization=False,\n hidden_dropout=0.1,\n attention_dropout=0.1,\n precision=16,\n fp32_residual_connection=False,\n activations_checkpoint_method=None,\n activations_checkpoint_num_layers=1,\n layernorm_epsilon=1e-5,\n persist_layer_norm=False,\n bias_gelu_fusion=True,\n bias_dropout_add_fusion=True,\n masked_softmax_fusion=True,\n openai_gelu=False,\n activation='gelu',\n onnx_safe=False,\n bias=True,\n hidden_steps=-1,\n hidden_blocks=1,\n add_encoder=True,\n add_decoder=True,\n ):\n super(MegatronTokenLevelEncoderDecoderModule, self).__init__()\n\n self.parallel_output = parallel_output\n self.pre_process = pre_process\n self.post_process = post_process\n self.fp16_cross_entropy = fp16_cross_entropy\n self.precision = precision\n self.add_encoder = add_encoder\n self.add_decoder = add_decoder\n\n if kv_channels is None:\n assert (\n hidden_size % num_attention_heads == 0\n ), 'hidden_size must be divisible by num_attention_heads if kv_channels is None'\n kv_channels = hidden_size // num_attention_heads\n\n encoder, decoder = None, None\n if add_encoder:\n if pre_process:\n self.encoder_embedding = Embedding(\n hidden_size=hidden_size,\n vocab_size=vocab_size,\n max_sequence_length=max_position_embeddings,\n init_method=init_method_normal(init_method_std),\n num_tokentypes=num_tokentypes,\n use_cpu_initialization=use_cpu_initialization,\n embedding_dropout_prob=hidden_dropout,\n )\n self._encoder_embedding_key = \"encoder_embedding\"\n\n encoder = get_encoder_model(\n arch=encoder_arch,\n hidden_size=hidden_size,\n ffn_hidden_size=ffn_hidden_size,\n num_layers=num_layers,\n num_attention_heads=num_attention_heads,\n apply_query_key_layer_scaling=apply_query_key_layer_scaling,\n kv_channels=kv_channels,\n init_method=init_method_normal(init_method_std),\n scaled_init_method=scaled_init_method_normal(init_method_std, num_layers),\n encoder_attn_mask_type=AttnMaskType.padding,\n pre_process=pre_process,\n post_process=post_process,\n init_method_std=init_method_std,\n use_cpu_initialization=use_cpu_initialization,\n hidden_dropout=hidden_dropout,\n attention_dropout=attention_dropout,\n precision=precision,\n fp32_residual_connection=fp32_residual_connection,\n activations_checkpoint_method=activations_checkpoint_method,\n activations_checkpoint_num_layers=activations_checkpoint_num_layers,\n layernorm_epsilon=layernorm_epsilon,\n bias_gelu_fusion=bias_gelu_fusion,\n bias_dropout_add_fusion=bias_dropout_add_fusion,\n masked_softmax_fusion=masked_softmax_fusion,\n persist_layer_norm=persist_layer_norm,\n openai_gelu=openai_gelu,\n onnx_safe=onnx_safe,\n hidden_steps=hidden_steps,\n hidden_blocks=hidden_blocks,\n activation=activation,\n bias=bias,\n parent_model_type=ModelType.encoder_and_decoder,\n )\n\n if add_decoder:\n # If this is the decoder first stage\n if pre_process:\n # If the encoder also lies on this rank (PP = 1), then just assign embeddings directly.\n if hasattr(self, 'encoder_embedding'):\n self.decoder_embedding = self.encoder_embedding\n else:\n # This is the case where PP > 1 and first decoder first stage.\n # We initialize decoder embeddings, but set them to zero since we they're tied with the encoder embeddings.\n # A later initialize_embedding call will synchronize the embeddings.\n self.decoder_embedding = Embedding(\n hidden_size=hidden_size,\n vocab_size=vocab_size,\n max_sequence_length=max_position_embeddings,\n init_method=init_method_normal(init_method_std),\n num_tokentypes=num_tokentypes,\n use_cpu_initialization=use_cpu_initialization,\n embedding_dropout_prob=hidden_dropout,\n )\n self.decoder_embedding.zero_parameters()\n\n self._decoder_embedding_key = \"decoder_embedding\"\n\n decoder = get_decoder_model(\n arch=decoder_arch,\n hidden_size=hidden_size,\n ffn_hidden_size=ffn_hidden_size,\n num_layers=num_layers,\n num_attention_heads=num_attention_heads,\n apply_query_key_layer_scaling=apply_query_key_layer_scaling,\n kv_channels=kv_channels,\n init_method=init_method_normal(init_method_std),\n scaled_init_method=scaled_init_method_normal(init_method_std, num_layers),\n decoder_attn_mask_type=AttnMaskType.causal,\n pre_process=pre_process,\n post_process=post_process,\n init_method_std=init_method_std,\n use_cpu_initialization=use_cpu_initialization,\n hidden_dropout=hidden_dropout,\n attention_dropout=attention_dropout,\n precision=precision,\n fp32_residual_connection=fp32_residual_connection,\n activations_checkpoint_method=activations_checkpoint_method,\n activations_checkpoint_num_layers=activations_checkpoint_num_layers,\n layernorm_epsilon=layernorm_epsilon,\n bias_gelu_fusion=bias_gelu_fusion,\n bias_dropout_add_fusion=bias_dropout_add_fusion,\n masked_softmax_fusion=masked_softmax_fusion,\n persist_layer_norm=persist_layer_norm,\n openai_gelu=openai_gelu,\n onnx_safe=onnx_safe,\n hidden_steps=hidden_steps,\n hidden_blocks=hidden_blocks,\n activation=activation,\n bias=bias,\n parent_model_type=ModelType.encoder_and_decoder,\n )\n\n self.enc_dec_model = MegatronTransformerEncoderDecoderModule(encoder=encoder, decoder=decoder)\n self._enc_dec_model_key = \"enc_dec_model\"\n\n self.initialize_word_embeddings(\n init_method=init_method_normal(init_method_std), vocab_size=vocab_size, hidden_size=hidden_size\n )\n\n if add_decoder and post_process:\n self.tokens_head = MegatronTokenLevelHead(self.word_embeddings_weight().size(0), parallel_output)\n self._tokens_head_key = 'tokens_head'\n\n def set_input_tensor(self, input_tensor):\n \"\"\" See megatron.model.transformer.set_input_tensor()\"\"\"\n # This is usually handled in schedules.py but some inference code still\n # gives us non-lists or None\n\n if not isinstance(input_tensor, list):\n input_tensor = [input_tensor]\n\n if self.add_encoder and self.add_decoder:\n assert (\n len(input_tensor) == 1\n ), 'input_tensor should only be length 1 for stage with both encoder and decoder'\n self.enc_dec_model.encoder.set_input_tensor(input_tensor[0])\n elif self.add_encoder:\n assert len(input_tensor) == 1, 'input_tensor should only be length 1 for stage with only encoder'\n self.enc_dec_model.encoder.set_input_tensor(input_tensor[0])\n elif self.add_decoder:\n if len(input_tensor) == 2:\n self.enc_dec_model.decoder.set_input_tensor(input_tensor[0])\n self.enc_dec_model.encoder_hidden_state = input_tensor[1]\n elif len(input_tensor) == 1:\n self.enc_dec_model.decoder.set_input_tensor(None)\n self.enc_dec_model.encoder_hidden_state = input_tensor[0]\n else:\n raise Exception('input_tensor must have either length 1 or 2')\n else:\n raise Exception('Stage must have at least either encoder or decoder')\n\n def forward(\n self,\n enc_input_ids,\n enc_attn_mask,\n dec_input_ids,\n dec_attn_mask,\n token_type_ids=None,\n labels=None,\n enc_hidden_states=None,\n enc_output_mask=None,\n output_enc_hidden_only=False,\n enc_input=None,\n ):\n \"\"\"\n Return value is per token / per dimension (i.e., non collapsed loss value)\n \"\"\"\n if enc_input is None:\n if self.pre_process and self.add_encoder:\n # encoder embeddings\n enc_position_ids = build_position_ids(enc_input_ids)\n enc_input = self.encoder_embedding(enc_input_ids, enc_position_ids, token_type_ids=token_type_ids)\n else:\n enc_input = None\n\n if output_enc_hidden_only:\n enc_output = self.enc_dec_model.encode(\n enc_input=enc_input, enc_attn_mask=enc_attn_mask, enc_layer_past=None, enc_get_key_value=False,\n )\n return enc_output\n else:\n if self.pre_process and self.add_decoder:\n dec_position_ids = build_position_ids(dec_input_ids)\n dec_input = self.decoder_embedding(dec_input_ids, dec_position_ids, token_type_ids=token_type_ids)\n else:\n # Note: This is when the decoder itself is split across PP ranks.\n dec_input = None\n\n output = self.enc_dec_model(\n enc_input=enc_input,\n enc_attn_mask=enc_attn_mask,\n dec_input=dec_input,\n dec_attn_mask=dec_attn_mask,\n enc_layer_past=None,\n enc_get_key_value=False,\n enc_output=None,\n dec_layer_past=None,\n dec_get_key_value=False,\n )\n\n if self.post_process and self.add_decoder:\n dec_output, enc_output = output\n # project decoder output to vocabulary-size dimensions\n token_logits = self.tokens_head(dec_output, self.word_embeddings_weight())\n\n if labels is not None:\n # tensor_parallel.vocab_parallel_cross_entropy performs log_softmax and return log p(x_i|z) per token i\n if self.fp16_cross_entropy:\n assert token_logits.dtype == torch.half\n tokens_loss = tensor_parallel.vocab_parallel_cross_entropy(token_logits, labels)\n else:\n tokens_loss = tensor_parallel.vocab_parallel_cross_entropy(token_logits.float(), labels)\n return tokens_loss\n else:\n return token_logits\n\n elif self.add_decoder and not self.add_encoder:\n decoder_output, _ = output\n return decoder_output\n else:\n encoder_output = output\n return encoder_output\n\n def state_dict_for_save_checkpoint(self, destination=None, prefix='', keep_vars=False):\n \"\"\"For easy load when model is combined with other heads,\n add an extra key.\"\"\"\n\n state_dict_ = {}\n\n state_dict_[self._encoder_embedding_key] = self.encoder_embedding.state_dict_for_save_checkpoint(\n destination, prefix, keep_vars\n )\n state_dict_[self._decoder_embedding_key] = self.decoder_embedding.state_dict_for_save_checkpoint(\n destination, prefix, keep_vars\n )\n state_dict_[self._enc_dec_model_key] = self.enc_dec_model.state_dict_for_save_checkpoint(\n destination, prefix, keep_vars\n )\n state_dict_[self._tokens_head_key] = self.tokens_head.state_dict_for_save_checkpoint(\n destination, prefix, keep_vars\n )\n return state_dict_\n\n def load_state_dict(self, state_dict, strict=True):\n \"\"\"Customized load.\"\"\"\n\n self.encoder_embedding.encoder_embeddingload_state_dict(state_dict[self._encoder_embedding_key], strict=strict)\n self.decoder_embedding.load_state_dict(state_dict[self._decoder_embedding_key], strict=strict)\n self.enc_dec_model.load_state_dict(state_dict[self._enc_dec_model_key], strict=strict)\n self.tokens_head.load_state_dict(state_dict[self._tokens_head_key], strict=strict)\n", "# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom collections import OrderedDict\nfrom typing import List\n\nimport torch\nfrom hydra.utils import instantiate\nfrom omegaconf import DictConfig\nfrom pytorch_lightning import Trainer\nfrom torch import nn\nfrom torch.nn import functional as F\n\nfrom nemo.collections.asr.data.audio_to_text import AudioToCharWithDursF0Dataset\nfrom nemo.collections.tts.helpers.helpers import get_mask_from_lengths\nfrom nemo.collections.tts.models.base import SpectrogramGenerator\nfrom nemo.collections.tts.modules.talknet import GaussianEmbedding, MaskedInstanceNorm1d, StyleResidual\nfrom nemo.core import Exportable\nfrom nemo.core.classes import ModelPT, PretrainedModelInfo, typecheck\nfrom nemo.core.neural_types import MelSpectrogramType, NeuralType\nfrom nemo.core.neural_types.elements import LengthsType, MelSpectrogramType, RegressionValuesType, TokenIndex\nfrom nemo.utils.decorators import deprecated\n\n\n@deprecated(version=\"1.9\", explanation=\"TalkNetDursModel will be removed. Please, use MixerTTSModel instead.\")\nclass TalkNetDursModel(ModelPT):\n \"\"\"TalkNet's durations prediction pipeline.\"\"\"\n\n def __init__(self, cfg: DictConfig, trainer: 'Trainer' = None):\n super().__init__(cfg=cfg, trainer=trainer)\n\n cfg = self._cfg\n self.vocab = AudioToCharWithDursF0Dataset.make_vocab(**cfg.train_ds.dataset.vocab)\n self.embed = nn.Embedding(len(self.vocab.labels), cfg.d_char)\n self.encoder = instantiate(cfg.encoder)\n d_out = cfg.encoder.jasper[-1].filters\n self.proj = nn.Conv1d(d_out, 1, kernel_size=1)\n\n @typecheck(\n input_types={\"text\": NeuralType(('B', 'T'), TokenIndex()), \"text_len\": NeuralType(('B'), LengthsType()),}\n )\n def forward(self, text, text_len):\n x, x_len = self.embed(text).transpose(1, 2), text_len\n y, _ = self.encoder(audio_signal=x, length=x_len)\n durs = self.proj(y).squeeze(1)\n return durs\n\n @staticmethod\n def _metrics(true_durs, true_text_len, pred_durs):\n loss = F.mse_loss(pred_durs, (true_durs + 1).float().log(), reduction='none')\n mask = get_mask_from_lengths(true_text_len)\n loss *= mask.float()\n loss = loss.sum() / mask.sum()\n\n durs_pred = pred_durs.exp() - 1\n durs_pred[durs_pred < 0.0] = 0.0\n durs_pred = durs_pred.round().long()\n\n acc = ((true_durs == durs_pred) * mask).sum().float() / mask.sum() * 100\n acc_dist_1 = (((true_durs - durs_pred).abs() <= 1) * mask).sum().float() / mask.sum() * 100\n acc_dist_3 = (((true_durs - durs_pred).abs() <= 3) * mask).sum().float() / mask.sum() * 100\n\n return loss, acc, acc_dist_1, acc_dist_3\n\n def training_step(self, batch, batch_idx):\n _, _, text, text_len, durs, *_ = batch\n pred_durs = self(text=text, text_len=text_len)\n loss, acc, acc_dist_1, acc_dist_3 = self._metrics(true_durs=durs, true_text_len=text_len, pred_durs=pred_durs,)\n train_log = {\n 'train_loss': loss,\n 'train_acc': acc,\n 'train_acc_dist_1': acc_dist_1,\n 'train_acc_dist_3': acc_dist_3,\n }\n return {'loss': loss, 'progress_bar': train_log, 'log': train_log}\n\n def validation_step(self, batch, batch_idx):\n _, _, text, text_len, durs, *_ = batch\n pred_durs = self(text=text, text_len=text_len)\n loss, acc, acc_dist_1, acc_dist_3 = self._metrics(true_durs=durs, true_text_len=text_len, pred_durs=pred_durs,)\n val_log = {'val_loss': loss, 'val_acc': acc, 'val_acc_dist_1': acc_dist_1, 'val_acc_dist_3': acc_dist_3}\n self.log_dict(val_log, prog_bar=False, on_epoch=True, logger=True, sync_dist=True)\n\n @staticmethod\n def _loader(cfg):\n dataset = instantiate(cfg.dataset)\n return torch.utils.data.DataLoader( # noqa\n dataset=dataset, collate_fn=dataset.collate_fn, **cfg.dataloader_params,\n )\n\n def setup_training_data(self, cfg):\n self._train_dl = self._loader(cfg)\n\n def setup_validation_data(self, cfg):\n self._validation_dl = self._loader(cfg)\n\n def setup_test_data(self, cfg):\n \"\"\"Omitted.\"\"\"\n pass\n\n @classmethod\n def list_available_models(cls) -> 'List[PretrainedModelInfo]':\n \"\"\"\n This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud.\n Returns:\n List of available pre-trained models.\n \"\"\"\n list_of_models = []\n model = PretrainedModelInfo(\n pretrained_model_name=\"tts_en_talknet\",\n location=(\n \"https://api.ngc.nvidia.com/v2/models/nvidia/nemo/tts_en_talknet/versions/1.0.0rc1/files\"\n \"/talknet_durs.nemo\"\n ),\n description=(\n \"This model is trained on LJSpeech sampled at 22050Hz, and can be used to generate durations \"\n \"values for English voice with an American accent.\"\n ),\n class_=cls, # noqa\n aliases=[\"TalkNet-22050Hz\"],\n )\n list_of_models.append(model)\n return list_of_models\n\n\n@deprecated(version=\"1.9\", explanation=\"TalkNetPitchModel will be removed. Please, use MixerTTSModel instead.\")\nclass TalkNetPitchModel(ModelPT):\n \"\"\"TalkNet's pitch prediction pipeline.\"\"\"\n\n def __init__(self, cfg: DictConfig, trainer: 'Trainer' = None):\n super().__init__(cfg=cfg, trainer=trainer)\n\n cfg = self._cfg\n self.vocab = AudioToCharWithDursF0Dataset.make_vocab(**cfg.train_ds.dataset.vocab)\n self.embed = GaussianEmbedding(self.vocab, cfg.d_char)\n self.encoder = instantiate(cfg.encoder)\n d_out = cfg.encoder.jasper[-1].filters\n self.sil_proj = nn.Conv1d(d_out, 1, kernel_size=1)\n self.body_proj = nn.Conv1d(d_out, 1, kernel_size=1)\n self.f0_mean, self.f0_std = cfg.f0_mean, cfg.f0_std\n\n @typecheck(\n input_types={\n \"text\": NeuralType(('B', 'T'), TokenIndex()),\n \"text_len\": NeuralType(('B'), LengthsType()),\n \"durs\": NeuralType(('B', 'T'), LengthsType()),\n }\n )\n def forward(self, text, text_len, durs):\n x, x_len = self.embed(text, durs).transpose(1, 2), durs.sum(-1)\n y, _ = self.encoder(audio_signal=x, length=x_len)\n f0_sil = self.sil_proj(y).squeeze(1)\n f0_body = self.body_proj(y).squeeze(1)\n return f0_sil, f0_body\n\n def _metrics(self, true_f0, true_f0_mask, pred_f0_sil, pred_f0_body):\n sil_mask = true_f0 < 1e-5\n sil_gt = sil_mask.long()\n sil_loss = F.binary_cross_entropy_with_logits(input=pred_f0_sil, target=sil_gt.float(), reduction='none',)\n sil_loss *= true_f0_mask.type_as(sil_loss)\n sil_loss = sil_loss.sum() / true_f0_mask.sum()\n sil_acc = ((torch.sigmoid(pred_f0_sil) > 0.5).long() == sil_gt).float() # noqa\n sil_acc *= true_f0_mask.type_as(sil_acc)\n sil_acc = sil_acc.sum() / true_f0_mask.sum()\n\n body_mse = F.mse_loss(pred_f0_body, (true_f0 - self.f0_mean) / self.f0_std, reduction='none')\n body_mask = ~sil_mask\n body_mse *= body_mask.type_as(body_mse) # noqa\n body_mse = body_mse.sum() / body_mask.sum() # noqa\n body_mae = ((pred_f0_body * self.f0_std + self.f0_mean) - true_f0).abs()\n body_mae *= body_mask.type_as(body_mae) # noqa\n body_mae = body_mae.sum() / body_mask.sum() # noqa\n\n loss = sil_loss + body_mse\n\n return loss, sil_acc, body_mae\n\n def training_step(self, batch, batch_idx):\n _, audio_len, text, text_len, durs, f0, f0_mask = batch\n pred_f0_sil, pred_f0_body = self(text=text, text_len=text_len, durs=durs)\n loss, sil_acc, body_mae = self._metrics(\n true_f0=f0, true_f0_mask=f0_mask, pred_f0_sil=pred_f0_sil, pred_f0_body=pred_f0_body,\n )\n train_log = {'train_loss': loss, 'train_sil_acc': sil_acc, 'train_body_mae': body_mae}\n return {'loss': loss, 'progress_bar': train_log, 'log': train_log}\n\n def validation_step(self, batch, batch_idx):\n _, _, text, text_len, durs, f0, f0_mask = batch\n pred_f0_sil, pred_f0_body = self(text=text, text_len=text_len, durs=durs)\n loss, sil_acc, body_mae = self._metrics(\n true_f0=f0, true_f0_mask=f0_mask, pred_f0_sil=pred_f0_sil, pred_f0_body=pred_f0_body,\n )\n\n val_log = {'val_loss': loss, 'val_sil_acc': sil_acc, 'val_body_mae': body_mae}\n self.log_dict(val_log, prog_bar=False, on_epoch=True, logger=True, sync_dist=True)\n\n @staticmethod\n def _loader(cfg):\n dataset = instantiate(cfg.dataset)\n return torch.utils.data.DataLoader( # noqa\n dataset=dataset, collate_fn=dataset.collate_fn, **cfg.dataloader_params,\n )\n\n def setup_training_data(self, cfg):\n self._train_dl = self._loader(cfg)\n\n def setup_validation_data(self, cfg):\n self._validation_dl = self._loader(cfg)\n\n def setup_test_data(self, cfg):\n \"\"\"Omitted.\"\"\"\n pass\n\n @classmethod\n def list_available_models(cls) -> 'List[PretrainedModelInfo]':\n \"\"\"\n This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud.\n Returns:\n List of available pre-trained models.\n \"\"\"\n list_of_models = []\n model = PretrainedModelInfo(\n pretrained_model_name=\"tts_en_talknet\",\n location=(\n \"https://api.ngc.nvidia.com/v2/models/nvidia/nemo/tts_en_talknet/versions/1.0.0rc1/files\"\n \"/talknet_pitch.nemo\"\n ),\n description=(\n \"This model is trained on LJSpeech sampled at 22050Hz, and can be used to generate pitch \"\n \"values for English voice with an American accent.\"\n ),\n class_=cls, # noqa\n aliases=[\"TalkNet-22050Hz\"],\n )\n list_of_models.append(model)\n return list_of_models\n\n\n@deprecated(version=\"1.9\", explanation=\"TalkNetSpectModel will be removed. Please, use MixerTTSModel instead.\")\nclass TalkNetSpectModel(SpectrogramGenerator, Exportable):\n \"\"\"TalkNet's mel spectrogram prediction pipeline.\"\"\"\n\n @property\n def output_types(self):\n return OrderedDict({\"mel-spectrogram\": NeuralType(('B', 'D', 'T'), MelSpectrogramType())})\n\n def __init__(self, cfg: DictConfig, trainer: 'Trainer' = None):\n super().__init__(cfg=cfg, trainer=trainer)\n\n cfg = self._cfg\n self.vocab = AudioToCharWithDursF0Dataset.make_vocab(**cfg.train_ds.dataset.vocab)\n self.blanking = cfg.train_ds.dataset.blanking\n self.preprocessor = instantiate(cfg.preprocessor)\n self.embed = GaussianEmbedding(self.vocab, cfg.d_char)\n self.norm_f0 = MaskedInstanceNorm1d(1)\n self.res_f0 = StyleResidual(cfg.d_char, 1, kernel_size=3)\n self.encoder = instantiate(cfg.encoder)\n d_out = cfg.encoder.jasper[-1].filters\n self.proj = nn.Conv1d(d_out, cfg.n_mels, kernel_size=1)\n\n @typecheck(\n input_types={\n \"text\": NeuralType(('B', 'T'), TokenIndex()),\n \"text_len\": NeuralType(('B'), LengthsType()),\n \"durs\": NeuralType(('B', 'T'), LengthsType()),\n \"f0\": NeuralType(('B', 'T'), RegressionValuesType()),\n }\n )\n def forward(self, text, text_len, durs, f0):\n x, x_len = self.embed(text, durs).transpose(1, 2), durs.sum(-1)\n f0, f0_mask = f0.clone(), f0 > 0.0\n f0 = self.norm_f0(f0.unsqueeze(1), f0_mask)\n f0[~f0_mask.unsqueeze(1)] = 0.0\n x = self.res_f0(x, f0)\n y, _ = self.encoder(audio_signal=x, length=x_len)\n mel = self.proj(y)\n return mel\n\n @staticmethod\n def _metrics(true_mel, true_mel_len, pred_mel):\n loss = F.mse_loss(pred_mel, true_mel, reduction='none').mean(dim=-2)\n mask = get_mask_from_lengths(true_mel_len)\n loss *= mask.float()\n loss = loss.sum() / mask.sum()\n return loss\n\n def training_step(self, batch, batch_idx):\n audio, audio_len, text, text_len, durs, f0, f0_mask = batch\n mel, mel_len = self.preprocessor(input_signal=audio, length=audio_len)\n pred_mel = self(text=text, text_len=text_len, durs=durs, f0=f0)\n loss = self._metrics(true_mel=mel, true_mel_len=mel_len, pred_mel=pred_mel)\n train_log = {'train_loss': loss}\n return {'loss': loss, 'progress_bar': train_log, 'log': train_log}\n\n def validation_step(self, batch, batch_idx):\n audio, audio_len, text, text_len, durs, f0, f0_mask = batch\n mel, mel_len = self.preprocessor(input_signal=audio, length=audio_len)\n pred_mel = self(text=text, text_len=text_len, durs=durs, f0=f0)\n loss = self._metrics(true_mel=mel, true_mel_len=mel_len, pred_mel=pred_mel)\n val_log = {'val_loss': loss}\n self.log_dict(val_log, prog_bar=False, on_epoch=True, logger=True, sync_dist=True)\n\n @staticmethod\n def _loader(cfg):\n dataset = instantiate(cfg.dataset)\n return torch.utils.data.DataLoader( # noqa\n dataset=dataset, collate_fn=dataset.collate_fn, **cfg.dataloader_params,\n )\n\n def setup_training_data(self, cfg):\n self._train_dl = self._loader(cfg)\n\n def setup_validation_data(self, cfg):\n self._validation_dl = self._loader(cfg)\n\n def setup_test_data(self, cfg):\n \"\"\"Omitted.\"\"\"\n pass\n\n def parse(self, text: str, **kwargs) -> torch.Tensor:\n return torch.tensor(self.vocab.encode(text)).long().unsqueeze(0).to(self.device)\n\n def generate_spectrogram(self, tokens: torch.Tensor, **kwargs) -> torch.Tensor:\n assert hasattr(self, '_durs_model') and hasattr(self, '_pitch_model')\n\n if self.blanking:\n tokens = [\n AudioToCharWithDursF0Dataset.interleave(\n x=torch.empty(len(t) + 1, dtype=torch.long, device=t.device).fill_(self.vocab.blank), y=t,\n )\n for t in tokens\n ]\n tokens = AudioToCharWithDursF0Dataset.merge(tokens, value=self.vocab.pad, dtype=torch.long)\n\n text_len = torch.tensor(tokens.shape[-1], dtype=torch.long).unsqueeze(0)\n durs = self._durs_model(text=tokens, text_len=text_len)\n durs = durs.exp() - 1\n durs[durs < 0.0] = 0.0\n durs = durs.round().long()\n\n # Pitch\n f0_sil, f0_body = self._pitch_model(text=tokens, text_len=text_len, durs=durs)\n sil_mask = f0_sil.sigmoid() > 0.5\n f0 = f0_body * self._pitch_model.f0_std + self._pitch_model.f0_mean\n f0 = (~sil_mask * f0).float()\n\n # Spect\n mel = self(text=tokens, text_len=text_len, durs=durs, f0=f0)\n\n return mel\n\n def forward_for_export(self, tokens: torch.Tensor, text_len: torch.Tensor):\n durs = self._durs_model(text=tokens, text_len=text_len)\n durs = durs.exp() - 1\n durs[durs < 0.0] = 0.0\n durs = durs.round().long()\n\n # Pitch\n f0_sil, f0_body = self._pitch_model(text=tokens, text_len=text_len, durs=durs)\n sil_mask = f0_sil.sigmoid() > 0.5\n f0 = f0_body * self._pitch_model.f0_std + self._pitch_model.f0_mean\n f0 = (~sil_mask * f0).float()\n\n # Spect\n x, x_len = self.embed(tokens, durs).transpose(1, 2), durs.sum(-1)\n f0, f0_mask = f0.clone(), f0 > 0.0\n f0 = self.norm_f0(f0.unsqueeze(1), f0_mask)\n f0[~f0_mask.unsqueeze(1)] = 0.0\n x = self.res_f0(x, f0)\n y, _ = self.encoder(audio_signal=x, length=x_len)\n mel = self.proj(y)\n\n return mel\n\n @classmethod\n def list_available_models(cls) -> 'List[PretrainedModelInfo]':\n \"\"\"\n This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud.\n Returns:\n List of available pre-trained models.\n \"\"\"\n list_of_models = []\n model = PretrainedModelInfo(\n pretrained_model_name=\"tts_en_talknet\",\n location=(\n \"https://api.ngc.nvidia.com/v2/models/nvidia/nemo/tts_en_talknet/versions/1.0.0rc1/files\"\n \"/talknet_spect.nemo\"\n ),\n description=(\n \"This model is trained on LJSpeech sampled at 22050Hz, and can be used to generate female \"\n \"English voices with an American accent.\"\n ),\n class_=cls, # noqa\n aliases=[\"TalkNet-22050Hz\"],\n )\n list_of_models.append(model)\n return list_of_models\n\n @classmethod\n def from_pretrained(cls, model_name: str, *args, **kwargs):\n \"\"\"Custom TalkNet's three-part load logic.\"\"\"\n model = super().from_pretrained(model_name, *args, **kwargs)\n model.add_module('_pitch_model', TalkNetPitchModel.from_pretrained(model_name, *args, **kwargs))\n model.add_module('_durs_model', TalkNetDursModel.from_pretrained(model_name, *args, **kwargs))\n return model\n" ]
[ [ "torch.zeros" ], [ "torch.sigmoid", "torch.utils.data.DataLoader", "torch.tensor", "torch.nn.functional.mse_loss", "torch.nn.Conv1d" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
davidpvilaca/TEP
[ "decbf61a96863d76e1b84dc097aa37b12038aa75", "decbf61a96863d76e1b84dc097aa37b12038aa75" ]
[ "aula13/tarefa.py", "aula8/tarefa3.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Nov 16 00:14:59 2017\n\n@author: davidpvilaca\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport cv2\nimport numpy as np\nimport scipy\nfrom scipy import ndimage\nfrom skimage import measure\n\n\ndef showImg(img, gray=False):\n plt.figure()\n cmap = None\n if (gray):\n cmap = plt.cm.Greys_r\n plt.imshow(img, cmap=cmap)\n return\n\ndef loadImg(path):\n img = cv2.imread(path)\n return (img, cv2.cvtColor(img, cv2.COLOR_BGR2GRAY))\n\ndef bgr2Rgb(img):\n return cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n\ndef binOtsu(img_gray):\n img_bin = cv2.threshold(img_gray,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)[1]\n #img_bin = cv2.morphologyEx(img_bin, cv2.MORPH_OPEN, np.ones((3, 3), dtype=int))\n return img_bin\n\n\nmaze = loadImg('maze20_1.png')[1]\nbinMaze = binOtsu(maze)\n\nscipy.sparse.csgraph.connected_components(binMaze)\n\nshowImg(binMaze, True)", "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Sep 21 09:40:38 2017\n\n@author: davidpvilaca\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport cv2\n\nDIGITS_LOOKUP = {\n (1, 1, 1, 0, 1, 1, 1): 0,\n (0, 0, 1, 0, 0, 1, 0): 1,\n (0, 1, 0, 0, 1, 0, 0): 1,\n (1, 0, 1, 1, 1, 1, 0): 2,\n (1, 0, 1, 1, 0, 1, 1): 3,\n (0, 1, 1, 1, 0, 1, 0): 4,\n (1, 1, 0, 1, 0, 1, 1): 5,\n (1, 0, 1, 0, 1, 0, 0): 5,\n (1, 1, 0, 1, 1, 1, 1): 6,\n (1, 0, 1, 0, 0, 1, 0): 7,\n (1, 1, 1, 1, 1, 1, 1): 8,\n (1, 1, 1, 1, 0, 1, 1): 9\n}\n\n\ndef main():\n \n img = cv2.imread('ex2.png', cv2.IMREAD_GRAYSCALE)\n \n thresh1 = cv2.threshold(img, 0, 255,\tcv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]\n \n kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (1, 5))\n thresh2 = cv2.morphologyEx(thresh1, cv2.MORPH_OPEN, kernel) # abertura\n closing = cv2.morphologyEx(thresh2, cv2.MORPH_CLOSE, kernel) # fechamento\n \n #plt.imshow(thresh2, cmap=plt.cm.Greys_r)\n \n \n cnts1 = cv2.findContours(closing.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n cnts = cnts1[1]\n digitCnts = []\n \n # loop over the digit area candidates\n for c in cnts:\n # compute the bounding box of the contour\n (x, y, w, h) = cv2.boundingRect(c)\n # if the contour is sufficiently large, it must be a digit\n if w >= 10 and (h >= 3):\n digitCnts.append(c)\n cv2.rectangle(img, (x, y), (x+w, y+h), (0, 255, 0), 2)\n \n #plt.imshow(img, cmap=plt.cm.Greys_r)\n \n digits = []\n \n # loop over each of the digits\n for c in digitCnts:\n # extract the digit ROI\n (x, y, w, h) = cv2.boundingRect(c)\n roi = thresh2[y:y + h, x:x + w]\n # compute the width and height of each of the 7 segments\n (roiH, roiW) = roi.shape\n (dW, dH) = (int(roiW * 0.25), int(roiH * 0.15))\n dHC = int(roiH * 0.05)\n digits.append(roi)\n segments = [((0, 0), (w, dH)),\t# top\n ((0, 0), (dW, h // 2)),\t# top-left\n ((w - dW, 0), (w, h // 2)),\t# top-right\n ((0, (h // 2) - dHC) , (w, (h // 2) + dHC)), # center\n ((0, h // 2), (dW, h)),\t# bottom-left\n ((w - dW, h // 2), (w, h)),\t# bottom-right\n ((0, h - dH), (w, h))\t# bottom\n ]\n on = [0] * len(segments)\n # loop over the segments\n for (i, ((xA, yA), (xB, yB))) in enumerate(segments):\n segROI = roi[yA:yB, xA:xB]\n total = cv2.countNonZero(segROI)\n area = (xB - xA) * (yB - yA)\n # if the total number of non-zero pixels is greater than\n # 50% of the area, mark the segment as \"on\"\n if total / float(area) > 0.5:\n on[i]= 1\n # lookup the digit and draw it on the image\n digit = DIGITS_LOOKUP[tuple(on)]\n digits.append(digit)\n print(tuple(on))\n print(digit)\n \n cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 1)\n cv2.putText(img, str(digit), (x, y +20),\n cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 255, 0), 2)\n \n plt.imshow(img, cmap=plt.cm.Greys_r)\n \n return 0\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "matplotlib.pyplot.imshow", "scipy.sparse.csgraph.connected_components", "matplotlib.pyplot.figure" ], [ "matplotlib.pyplot.imshow" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
psobolewskiPhD/SEM_fiber_analysis
[ "a378e842d32b7cf2f5d7e69d28a423d928c2fafc" ]
[ "Analyze_fibers.py" ]
[ "# %%\n# Import needed libraries and modules\nimport os\n\nimport dask\nimport numpy as np\nfrom dask import delayed\nfrom dask_image.imread import imread\nfrom quanfima import morphology as mrph\nfrom skimage import filters, morphology\n\n# %%\n# Use dask to read the image files, which permits for lazy loading.\nall_samples = imread(\"./fiber_data/*.jpg\")\n# set the scale in micron per pixel of the images\nscale = [1 / 35.5, 1 / 35.5]\n# get list of filenames to match the imported images, ignore dot files\nfile_list = []\nfor item in os.listdir(\"fiber_data\"):\n if not item.startswith(\".\") and os.path.isfile(os.path.join(\"fiber_data\", item)):\n file_list.append(item)\nfile_list = sorted(file_list)\n\n# %%\n# Ensure images are grayscale\ndef grayscale(rgb):\n gray = (rgb[..., 0] * 0.2125) + (rgb[..., 1] * 0.7154) + (rgb[..., 2] * 0.0721)\n return gray\n\n\nall_samples = grayscale(all_samples)\n\n# %%\n# Define segmentation function. The input is an image stack\ndef segment_img(img_stack):\n # ensure lead dimension is dropped, so just one slice, a 2D image is passed\n slice = img_stack[0, ...]\n # crop out the bottom, where the SEM info is located\n crop = slice[\n 0:890,\n ]\n # Use skimage Niblack thresholding algorithm\n # Can change this to preview a different one method.\n thresh = filters.threshold_niblack(crop)\n seg = (crop > thresh).astype(np.uint8)\n # restore the missing dimension to make a stack again\n return seg[None, ...]\n\n\n# Using dask `map_block` the segmentation\n# The function `segment_img` is applied blockwise to `all_samples`\nseg = all_samples.map_blocks(segment_img, dtype=np.uint8)\n# Do the actual computations\nseg_stack = seg.compute()\n\n# %%\n# Define function to use quanfima to calculate porosity\n# The input is a segmented image\ndef porosity(seg_slice):\n pr = mrph.calc_porosity(seg_slice)\n porosity_val = pr[\"Material 1\"]\n return porosity_val\n\n\n# %%\n# Using dask delayed the porosity function will be run on each img\n# The values will be appended to a list `porosity_out`\nporosity_out = []\n\nfor sample in seg_stack:\n por_vals = delayed(porosity)(sample)\n porosity_out.append(por_vals)\n\n# %%\n# Compute the porosity values\n# Note you may wish to use additional workers\n# or a different scheduler. See:\n# https://docs.dask.org/en/latest/scheduler-overview.html\nporosity_c = dask.compute(*porosity_out)\n\n# Make a dict pairing up the names of the files with the values\nporosity_dict = dict(zip(file_list, porosity_c))\n\n# %%\n# Define function to use quanfima to calculate porosity\n# The input is a segmented image\ndef fiber_analysis(seg_slice):\n # first the image is skeletonized using skimage\n skel = morphology.skeletonize(seg_slice)\n # quanfima function to get fiber parameters\n cskel, fskel, omap, dmap, ovals, dvals = mrph.estimate_fiber_properties(\n seg_slice, skel, window_radius=3\n )\n # return a list of the orientation and diameter arrays\n fvals = [ovals, dvals]\n return fvals\n\n\n# %%\n# Using dask delayed the analysis function will be run on each img\n# The values will be appended to a list `fvals_out`\nfvals_out = []\n\nfor sample in seg_stack:\n fvals = delayed(fiber_analysis)(sample)\n fvals_out.append(fvals)\n\n\n# %%\n# Compute the fiber parameters.\n# Note: this can take ~1 min per image\n# You may wish to use additional workers\n# or a different scheduler. See:\n# https://docs.dask.org/en/latest/scheduler-overview.html\nfvals_c = dask.compute(*fvals_out)\n# %%\n# Iterate over samples to compute means and st_devs\nf_out = []\n\nfor sample in fvals_c:\n ovals = sample[0]\n o_m_s = [np.mean(ovals), np.std(ovals)]\n dvals = sample[1]\n d_m_s = [np.mean(dvals), np.std(dvals)]\n f_o = [o_m_s, d_m_s]\n f_out.append(f_o)\n\n# Make a dict pairing up the names of the files with the values\n# File name: [orientation_mean, std], [diameter_mean, std]\n# Note the orientation values are radians, diameters in pixels\nf_out_dict = dict(zip(file_list, f_out))\n\n# %%\n" ]
[ [ "numpy.std", "numpy.mean" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
PUT-II/Computer-Vision-Project-2
[ "14da786e0beb94eb8df1d3d06fdd1fde718dad05" ]
[ "src/pipeline_digit_recognition.py" ]
[ "from typing import List\r\n\r\nimport numpy as np\r\nfrom matplotlib import pyplot as plt\r\n\r\n\r\ndef segment_to_digits(segment: np.array, verbose: int = 0) -> List[np.array]:\r\n from sklearn.cluster import OPTICS\r\n from statistics import mean\r\n\r\n nonzero_y, nonzero_x = np.where(segment > 0)\r\n nonzero_points = np.array(list(zip(nonzero_x, nonzero_y)))\r\n\r\n clt = OPTICS(min_samples=15,\r\n max_eps=6,\r\n xi=0.8,\r\n min_cluster_size=50)\r\n\r\n try:\r\n clt.fit(nonzero_points)\r\n except Exception:\r\n return []\r\n\r\n labels = np.unique(clt.labels_)\r\n\r\n if labels.shape[0] < 6:\r\n clt.max_eps = 4\r\n clt.fit(nonzero_points)\r\n labels = np.unique(clt.labels_)\r\n elif labels.shape[0] > 6:\r\n clt.max_eps = 8\r\n clt.fit(nonzero_points)\r\n labels = np.unique(clt.labels_)\r\n\r\n outliers = []\r\n digits_coord = []\r\n digits_points = []\r\n for label in labels:\r\n points = nonzero_points[np.where(clt.labels_ == label)]\r\n\r\n if label == -1:\r\n outliers = points\r\n continue\r\n\r\n if verbose >= 1:\r\n digits_points.append(points)\r\n\r\n digits_coord.append((min(points[:, 1]), max(points[:, 1] + 1), min(points[:, 0]), max(points[:, 0]) + 1))\r\n\r\n if verbose >= 1:\r\n if len(outliers) > 2:\r\n plt.scatter(outliers[:, 0], outliers[:, 1], c='black')\r\n\r\n for points in digits_points:\r\n plt.scatter(points[:, 0], points[:, 1])\r\n plt.gca().invert_yaxis()\r\n plt.show()\r\n\r\n if verbose >= 2:\r\n print(\"Groups count:\", len(digits_points))\r\n print(\"Min points in group:\", min(g.shape[0] for g in digits_points))\r\n print(\"Mean points in group:\", mean(g.shape[0] for g in digits_points))\r\n\r\n digits_coord.sort(key=lambda x: x[2])\r\n\r\n digits = []\r\n for d in digits_coord:\r\n digits.append(segment[d[0]:d[1], d[2]:d[3]])\r\n\r\n return digits\r\n\r\n\r\ndef digits_to_mnist_format(digits: List[np.array]) -> None:\r\n from cv2 import resize, INTER_AREA, copyMakeBorder, BORDER_CONSTANT, dilate\r\n import math\r\n\r\n for i in range(len(digits)):\r\n d = digits[i]\r\n\r\n d_height, d_width = d.shape\r\n\r\n sample_size = 28\r\n border_size = 4\r\n max_size = sample_size - (border_size * 2)\r\n\r\n if d_width > d_height:\r\n d_proportion = d_height / d_width\r\n scaled_dim = max(1, int(max_size * d_proportion))\r\n d = resize(d, (max_size, scaled_dim), interpolation=INTER_AREA)\r\n else:\r\n d_proportion = d_width / d_height\r\n scaled_dim = max(1, int(max_size * d_proportion))\r\n d = resize(d, (scaled_dim, max_size), interpolation=INTER_AREA)\r\n\r\n border_v = (sample_size - d.shape[0]) / 2\r\n border_v_T = math.ceil(border_v)\r\n border_v_B = math.floor(border_v)\r\n\r\n border_h = (sample_size - d.shape[1]) / 2\r\n border_h_L = math.ceil(border_h)\r\n border_h_R = math.floor(border_h)\r\n\r\n d = copyMakeBorder(d, border_v_T, border_v_B, border_h_L, border_h_R, BORDER_CONSTANT)\r\n\r\n kernel = np.ones((2, 2), np.uint8)\r\n d = dilate(d, kernel, iterations=1)\r\n\r\n digits[i] = d\r\n\r\n\r\ndef load_clf_and_dataset(clf_pickle_path: str):\r\n import os\r\n import pickle\r\n from sklearn.datasets import fetch_openml\r\n from sklearn.svm import SVC\r\n\r\n if not os.path.isfile(clf_pickle_path):\r\n X, y = fetch_openml('mnist_784', version=1, return_X_y=True, as_frame=False)\r\n\r\n clf = SVC().fit(X, y)\r\n\r\n with open(clf_pickle_path, 'wb') as f:\r\n pickle.dump(clf, f)\r\n else:\r\n with open(clf_pickle_path, 'rb') as f:\r\n clf = pickle.load(f)\r\n\r\n return clf\r\n\r\n\r\ndef predict_digits(clf, digits: List[np.array]) -> np.array:\r\n if not digits:\r\n return []\r\n reshaped = np.array([d.reshape(784) for d in digits])\r\n return clf.predict(reshaped)\r\n\r\n\r\ndef predict_segment(clf, segment: np.array):\r\n digits = segment_to_digits(segment)\r\n digits_to_mnist_format(digits)\r\n\r\n return predict_digits(clf, digits)\r\n" ]
[ [ "matplotlib.pyplot.gca", "matplotlib.pyplot.scatter", "numpy.unique", "numpy.ones", "sklearn.cluster.OPTICS", "sklearn.svm.SVC", "matplotlib.pyplot.show", "numpy.where", "sklearn.datasets.fetch_openml" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
abdulhaim/d4rl
[ "fe9e711ad7335be492759af563c6fa2f80c8e906" ]
[ "d4rl/pointmaze/maze_model.py" ]
[ "\"\"\" A pointmass maze env.\"\"\"\nfrom gym.envs.mujoco import mujoco_env\nfrom gym import utils\nfrom d4rl import offline_env\nfrom d4rl.pointmaze.dynamic_mjc import MJCModel\nimport numpy as np\nimport random\n\n\nWALL = 10\nEMPTY = 11\nGOAL = 12\n\n\ndef parse_maze(maze_str):\n lines = maze_str.strip().split('\\\\')\n width, height = len(lines), len(lines[0])\n maze_arr = np.zeros((width, height), dtype=np.int32)\n for w in range(width):\n for h in range(height):\n tile = lines[w][h]\n if tile == '#':\n maze_arr[w][h] = WALL\n elif tile == 'G':\n maze_arr[w][h] = GOAL\n elif tile == ' ' or tile == 'O' or tile == '0':\n maze_arr[w][h] = EMPTY\n else:\n raise ValueError('Unknown tile type: %s' % tile)\n return maze_arr\n\n\ndef point_maze(maze_str):\n maze_arr = parse_maze(maze_str)\n\n mjcmodel = MJCModel('point_maze')\n mjcmodel.root.compiler(inertiafromgeom=\"true\", angle=\"radian\", coordinate=\"local\")\n mjcmodel.root.option(timestep=\"0.01\", gravity=\"0 0 0\", iterations=\"20\", integrator=\"Euler\")\n default = mjcmodel.root.default()\n default.joint(damping=1, limited='false')\n default.geom(friction=\".5 .1 .1\", density=\"1000\", margin=\"0.002\", condim=\"1\", contype=\"2\", conaffinity=\"1\")\n\n asset = mjcmodel.root.asset()\n asset.texture(type=\"2d\",name=\"groundplane\",builtin=\"checker\",rgb1=\"0.2 0.3 0.4\",rgb2=\"0.1 0.2 0.3\",width=100,height=100)\n asset.texture(name=\"skybox\",type=\"skybox\",builtin=\"gradient\",rgb1=\".4 .6 .8\",rgb2=\"0 0 0\",\n width=\"800\",height=\"800\",mark=\"random\",markrgb=\"1 1 1\")\n asset.material(name=\"groundplane\",texture=\"groundplane\",texrepeat=\"20 20\")\n asset.material(name=\"wall\",rgba=\".7 .5 .3 1\")\n asset.material(name=\"target\",rgba=\".6 .3 .3 1\")\n\n visual = mjcmodel.root.visual()\n visual.headlight(ambient=\".4 .4 .4\",diffuse=\".8 .8 .8\",specular=\"0.1 0.1 0.1\")\n visual.map(znear=.01)\n visual.quality(shadowsize=2048)\n\n worldbody = mjcmodel.root.worldbody()\n worldbody.geom(name='ground',size=\"40 40 0.25\",pos=\"0 0 -0.1\",type=\"plane\",contype=1,conaffinity=0,material=\"groundplane\")\n\n particle = worldbody.body(name='particle', pos=[1.2,1.2,0])\n particle.geom(name='particle_geom', type='sphere', size=0.1, rgba='0.0 0.0 1.0 0.0', contype=1)\n particle.site(name='particle_site', pos=[0.0,0.0,0], size=0.2, rgba='0.3 0.6 0.3 1')\n particle.joint(name='ball_x', type='slide', pos=[0,0,0], axis=[1,0,0])\n particle.joint(name='ball_y', type='slide', pos=[0,0,0], axis=[0,1,0])\n\n worldbody.site(name='target_site', pos=[0.0,0.0,0], size=0.2, material='target')\n\n width, height = maze_arr.shape\n for w in range(width):\n for h in range(height):\n if maze_arr[w,h] == WALL:\n worldbody.geom(conaffinity=1,\n type='box',\n name='wall_%d_%d'%(w,h),\n material='wall',\n pos=[w+1.0,h+1.0,0],\n size=[0.5,0.5,0.2])\n\n actuator = mjcmodel.root.actuator()\n actuator.motor(joint=\"ball_x\", ctrlrange=[-1.0, 1.0], ctrllimited=True, gear=100)\n actuator.motor(joint=\"ball_y\", ctrlrange=[-1.0, 1.0], ctrllimited=True, gear=100)\n\n return mjcmodel\n\n\nLARGE_MAZE = \\\n \"############\\\\\"+\\\n \"#OOOO#OOOOO#\\\\\"+\\\n \"#O##O#O#O#O#\\\\\"+\\\n \"#OOOOOO#OOO#\\\\\"+\\\n \"#O####O###O#\\\\\"+\\\n \"#OO#O#OOOOO#\\\\\"+\\\n \"##O#O#O#O###\\\\\"+\\\n \"#OO#OOO#OGO#\\\\\"+\\\n \"############\"\n\nLARGE_MAZE_EVAL = \\\n \"############\\\\\"+\\\n \"#OO#OOO#OGO#\\\\\"+\\\n \"##O###O#O#O#\\\\\"+\\\n \"#OO#O#OOOOO#\\\\\"+\\\n \"#O##O#OO##O#\\\\\"+\\\n \"#OOOOOO#OOO#\\\\\"+\\\n \"#O##O#O#O###\\\\\"+\\\n \"#OOOO#OOOOO#\\\\\"+\\\n \"############\"\n\nMEDIUM_MAZE = \\\n '########\\\\'+\\\n '#OO##OO#\\\\'+\\\n '#OO#OOO#\\\\'+\\\n '##OOO###\\\\'+\\\n '#OO#OOO#\\\\'+\\\n '#O#OO#O#\\\\'+\\\n '#OOO#OG#\\\\'+\\\n \"########\"\n\nMEDIUM_MAZE_EVAL = \\\n '########\\\\'+\\\n '#OOOOOG#\\\\'+\\\n '#O#O##O#\\\\'+\\\n '#OOOO#O#\\\\'+\\\n '###OO###\\\\'+\\\n '#OOOOOO#\\\\'+\\\n '#OO##OO#\\\\'+\\\n \"########\"\n\nSMALL_MAZE = \\\n \"######\\\\\"+\\\n \"#OOOO#\\\\\"+\\\n \"#O##O#\\\\\"+\\\n \"#OOOO#\\\\\"+\\\n \"######\"\n\nU_MAZE = \\\n \"#####\\\\\"+\\\n \"#GOO#\\\\\"+\\\n \"###O#\\\\\"+\\\n \"#OOO#\\\\\"+\\\n \"#####\"\n\nU_MAZE_EVAL = \\\n \"#####\\\\\"+\\\n \"#OOG#\\\\\"+\\\n \"#O###\\\\\"+\\\n \"#OOO#\\\\\"+\\\n \"#####\"\n\nOPEN = \\\n \"#######\\\\\"+\\\n \"#OOOOO#\\\\\"+\\\n \"#OOGOO#\\\\\"+\\\n \"#OOOOO#\\\\\"+\\\n \"#######\"\n\n\nclass MazeEnv(mujoco_env.MujocoEnv, utils.EzPickle, offline_env.OfflineEnv):\n def __init__(self,\n maze_spec=U_MAZE,\n reward_type='dense',\n reset_target=False,\n **kwargs):\n offline_env.OfflineEnv.__init__(self, **kwargs)\n\n self.reset_target = reset_target\n self.str_maze_spec = maze_spec\n self.maze_arr = parse_maze(maze_spec)\n self.maze_obs = np.zeros((len(self.maze_arr), len(self.maze_arr[0])), dtype=np.float32)\n for w in range(len(self.maze_obs)):\n for h in range(len(self.maze_obs[w])):\n tile = self.maze_arr[w][h]\n if tile == WALL:\n self.maze_obs[w][h] = 0.25\n elif tile == EMPTY:\n self.maze_obs[w][h] = 0.75\n elif tile == GOAL:\n self.maze_obs[w][h] = 1 \n\n self.reward_type = reward_type\n self.reset_locations = list(zip(*np.where(self.maze_arr == EMPTY)))\n self.reset_locations.sort()\n #self.reward_type = \"dense\"\n self._target = np.array([0.0,0.0])\n\n model = point_maze(maze_spec)\n with model.asfile() as f:\n mujoco_env.MujocoEnv.__init__(self, model_path=f.name, frame_skip=1)\n utils.EzPickle.__init__(self)\n # Set the default goal (overriden by a call to set_target)\n # Try to find a goal if it exists\n self.goal_locations = list(zip(*np.where(self.maze_arr == GOAL)))\n if len(self.goal_locations) == 1:\n self.set_target(self.goal_locations[0])\n elif len(self.goal_locations) > 1:\n raise ValueError(\"More than 1 goal specified!\")\n else:\n # If no goal, use the first empty tile\n self.set_target(np.array(self.reset_locations[0]).astype(self.observation_space.dtype))\n self.empty_and_goal_locations = self.reset_locations + self.goal_locations\n\n def step(self, action):\n done = False\n action = np.clip(action, -1.0, 1.0)\n self.clip_velocity()\n self.maze_obs[int(self.sim.data.qpos[0]), int(self.sim.data.qpos[1])] = 0.75\n self.do_simulation(action, self.frame_skip)\n self.set_marker() \n self.maze_obs[int(self.sim.data.qpos[0]), int(self.sim.data.qpos[1])] = 0\n ob = self._get_obs()\n\n if self.reward_type == 'sparse': \n reward = 1.0 if np.linalg.norm(self.sim.data.qpos - self._target) <= 0.5 else 0.0\n elif self.reward_type == 'dense':\n reward = np.exp(-np.linalg.norm(self.sim.data.qpos - self._target))\n else:\n raise ValueError('Unknown reward type %s' % self.reward_type)\n return ob, reward, done, {}\n\n def _get_obs(self):\n return np.concatenate([np.array(self.maze_obs).flatten().tolist(), self.sim.data.qpos, self.sim.data.qvel]).ravel()\n\n def get_target(self):\n return self._target\n\n def set_target(self, target_location=None):\n if target_location is None:\n idx = self.np_random.choice(len(self.empty_and_goal_locations))\n reset_location = np.array(self.empty_and_goal_locations[idx]).astype(self.observation_space.dtype)\n target_location = reset_location + self.np_random.uniform(low=-.1, high=.1, size=self.model.nq)\n \n self.maze_obs[target_location[0], target_location[1]] = 1\n self._target = target_location\n\n def set_marker(self):\n self.data.site_xpos[self.model.site_name2id('target_site')] = np.array([self._target[0]+1, self._target[1]+1, 0.0])\n\n def clip_velocity(self):\n qvel = np.clip(self.sim.data.qvel, -5.0, 5.0)\n self.set_state(self.sim.data.qpos, qvel)\n\n def reset_model(self):\n idx = self.np_random.choice(len(self.empty_and_goal_locations))\n reset_location = np.array(self.empty_and_goal_locations[idx]).astype(self.observation_space.dtype)\n qpos = reset_location + self.np_random.uniform(low=-.1, high=.1, size=self.model.nq)\n qvel = self.init_qvel + self.np_random.randn(self.model.nv) * .1\n self.set_state(qpos, qvel)\n if self.reset_target:\n self.set_target()\n return self._get_obs()\n\n def reset_to_location(self, location):\n self.sim.reset()\n reset_location = np.array(location).astype(self.observation_space.dtype)\n qpos = reset_location + self.np_random.uniform(low=-.1, high=.1, size=self.model.nq)\n qvel = self.init_qvel + self.np_random.randn(self.model.nv) * .1\n self.set_state(qpos, qvel)\n return self._get_obs()\n\n def viewer_setup(self):\n pass\n\n" ]
[ [ "numpy.clip", "numpy.linalg.norm", "numpy.array", "numpy.zeros", "numpy.where" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
willsmithorg/autogluon
[ "1e8c6a2f30fcc473411bf393c9827eb4713dcef6", "1e8c6a2f30fcc473411bf393c9827eb4713dcef6" ]
[ "text/src/autogluon/text/automm/optimization/utils.py", "tabular/src/autogluon/tabular/learner/abstract_learner.py" ]
[ "from typing import Optional, Union, Tuple, List, Dict\nimport functools\nfrom torch import nn\nfrom torch import optim\nfrom torch.nn import functional as F\nfrom transformers.trainer_pt_utils import get_parameter_names\nimport torchmetrics\nfrom .lr_scheduler import (\n get_cosine_schedule_with_warmup,\n get_polynomial_decay_schedule_with_warmup,\n get_linear_schedule_with_warmup,\n)\nfrom ..constants import BINARY, MULTICLASS, REGRESSION, MAX, MIN, NORM_FIT, BIT_FIT\n\n\ndef get_loss_func(problem_type: str):\n \"\"\"\n Choose a suitable Pytorch loss module based on the provided problem type.\n\n Parameters\n ----------\n problem_type\n Type of problem.\n\n Returns\n -------\n A Pytorch loss module.\n \"\"\"\n if problem_type in [BINARY, MULTICLASS]:\n loss_func = nn.CrossEntropyLoss()\n elif problem_type == REGRESSION:\n loss_func = nn.MSELoss()\n else:\n raise NotImplementedError\n\n return loss_func\n\n\ndef get_metric(\n metric_name: str,\n problem_type: str,\n num_classes: Optional[int] = None,\n):\n \"\"\"\n Obtain a torchmerics.Metric from its name.\n Define a customized metric function in case that torchmetrics doesn't support some metric.\n\n Parameters\n ----------\n metric_name\n Name of metric\n problem_type\n The type of the problem.\n num_classes\n Number of classes, used in the quadratic_kappa metric for binary classification.\n\n Returns\n -------\n torchmetrics.Metric\n A torchmetrics.Metric object.\n mode\n The min/max mode used in selecting model checkpoints.\n - min\n Its means that smaller metric is better.\n - max\n It means that larger metric is better.\n custom_metric_func\n A customized metric function.\n \"\"\"\n metric_name = metric_name.lower()\n if metric_name in [\"acc\", \"accuracy\"]:\n return torchmetrics.Accuracy(), MAX, None\n elif metric_name in [\"rmse\", \"root_mean_squared_error\"]:\n return torchmetrics.MeanSquaredError(squared=False), MIN, None\n elif metric_name == \"r2\":\n return torchmetrics.R2Score(), MAX, None\n elif metric_name == \"quadratic_kappa\":\n return torchmetrics.CohenKappa(num_classes=num_classes,\n weights=\"quadratic\"), MAX, None\n elif metric_name == \"roc_auc\":\n return torchmetrics.AUROC(), MAX, None\n elif metric_name == 'average_precision':\n return torchmetrics.AveragePrecision(), MAX, None\n elif metric_name in [\"log_loss\", \"cross_entropy\"]:\n return torchmetrics.MeanMetric(), MIN, \\\n functools.partial(F.cross_entropy, reduction=\"none\")\n else:\n warnings.warn(f\"Currently, we cannot convert the metric: {metric_name} to a metric supported in torchmetrics. \"\n f\"Thus, we will fall-back to use accuracy for multi-class classification problems \"\n f\", ROC-AUC for binary classification problem, and MSE for regression problems.\", UserWarning)\n if problem_type == REGRESSION:\n return torchmetrics.MeanSquaredError(squared=False), MIN, None\n elif problem_type == MULTICLASS:\n return torchmetrics.Accuracy(), MAX, None\n elif problem_type == BINARY:\n return torchmetrics.AUROC(), MAX, None\n else:\n raise ValueError(f'The problem_type={problem_type} is currently not supported')\n\n\ndef get_optimizer(\n optim_type: str,\n optimizer_grouped_parameters,\n lr: float,\n weight_decay: float,\n eps: Optional[float] = 1e-6,\n betas: Optional[Tuple[float, float]] = (0.9, 0.999),\n momentum: Optional[float] = 0.9,\n):\n \"\"\"\n Choose a Pytorch optimizer based on its name.\n\n Parameters\n ----------\n optim_type\n Name of optimizer.\n optimizer_grouped_parameters\n The model parameters to be optimized.\n lr\n Learning rate.\n weight_decay\n Optimizer weight decay.\n eps\n Optimizer eps.\n betas\n Optimizer betas.\n momentum\n Momentum used in the SGD optimizer.\n\n Returns\n -------\n A Pytorch optimizer.\n \"\"\"\n if optim_type == \"adamw\":\n optimizer = optim.AdamW(\n optimizer_grouped_parameters,\n lr=lr,\n weight_decay=weight_decay,\n eps=eps,\n betas=betas,\n )\n elif optim_type == \"adam\":\n optimizer = optim.Adam(\n optimizer_grouped_parameters,\n lr=lr,\n weight_decay=weight_decay,\n )\n elif optim_type == \"sgd\":\n optimizer = optim.SGD(\n optimizer_grouped_parameters,\n lr=lr,\n weight_decay=weight_decay,\n momentum=momentum,\n )\n else:\n raise ValueError(f\"unknown optimizer: {optim_type}\")\n\n return optimizer\n\n\ndef get_lr_scheduler(\n optimizer: optim.Optimizer,\n num_max_steps: int,\n num_warmup_steps: int,\n lr_schedule: str,\n end_lr: Union[float, int],\n):\n \"\"\"\n Get the learning rate scheduler from its name. Here we use our defined learning rate\n scheduler instead of those imported from \"transformers\" because we want to support\n Pytorch lightning's \"ddp_spawn\" training strategy.\n\n Parameters\n ----------\n optimizer\n A Pytorch optimizer.\n num_max_steps\n Number of maximum training steps.\n num_warmup_steps\n Number of steps to do learning rate warmup.\n lr_schedule\n Name of the learning rate scheduler.\n end_lr\n The final learning rate after decay.\n\n Returns\n -------\n A learning rate scheduler.\n \"\"\"\n if lr_schedule == \"cosine_decay\":\n scheduler = get_cosine_schedule_with_warmup(\n optimizer=optimizer,\n num_warmup_steps=num_warmup_steps,\n num_training_steps=num_max_steps,\n )\n elif lr_schedule == \"polynomial_decay\":\n scheduler = get_polynomial_decay_schedule_with_warmup(\n optimizer=optimizer,\n num_warmup_steps=num_warmup_steps,\n num_training_steps=num_max_steps,\n lr_end=end_lr,\n power=1,\n )\n elif lr_schedule == \"linear_decay\":\n scheduler = get_linear_schedule_with_warmup(\n optimizer=optimizer,\n num_warmup_steps=num_warmup_steps,\n num_training_steps=num_max_steps\n )\n else:\n raise ValueError(f\"unknown lr schedule: {lr_schedule}\")\n\n return scheduler\n\n\ndef get_weight_decay_param_names(model: nn.Module):\n \"\"\"\n Set the layer normalization parameters and other layers' bias parameters not to use weight decay.\n\n Parameters\n ----------\n model\n A Pytorch model.\n\n Returns\n -------\n A list of parameter names not using weight decay.\n \"\"\"\n # By default, we should not apply weight decay for all the norm layers\n decay_param_names = get_parameter_names(model,\n [nn.LayerNorm, nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d,\n nn.GroupNorm])\n decay_param_names = [name for name in decay_param_names if \"bias\" not in name]\n return decay_param_names\n\n\ndef get_norm_layer_param_names(model: nn.Module):\n \"\"\"\n Get parameters associated with the normalization layers\n\n Parameters\n ----------\n model\n A Pytorch model\n\n Returns\n -------\n norm_param_names\n A list of normalization parameter names\n \"\"\"\n all_param_names = [name for name, _ in model.named_parameters()]\n all_param_names_except_norm_names = get_parameter_names(\n model, [nn.LayerNorm, nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d, nn.GroupNorm])\n norm_param_names = [name for name in all_param_names if name not in all_param_names_except_norm_names]\n return norm_param_names\n\n\ndef apply_single_lr(\n model: nn.Module,\n lr: float,\n weight_decay: float,\n return_params: Optional[bool] = True,\n):\n \"\"\"\n Set to use a single learning rate for all parameters. Layer normalization parameters and other\n layers' bias parameters don't use weight decay.\n\n Parameters\n ----------\n model\n A Pytorch model.\n lr\n Learning rate.\n weight_decay\n Weight decay.\n return_params\n Whether to return parameters or their names. If you want to double-check\n whether the learning rate setup is as expected, you can set \"return_params=False\",\n and print the layer names along with their learning rates through\n \"print(\"Param groups = %s\" % json.dumps(optimizer_grouped_parameters, indent=2))\".\n\n Returns\n -------\n The grouped parameters or their names.\n \"\"\"\n decay_param_names = get_weight_decay_param_names(model)\n optimizer_grouped_parameters = [\n {\n \"params\": [p if return_params else n for n, p in model.named_parameters() if n in decay_param_names],\n \"weight_decay\": weight_decay,\n \"lr\": lr,\n },\n {\n \"params\": [p if return_params else n for n, p in model.named_parameters() if n not in decay_param_names],\n \"weight_decay\": 0.0,\n \"lr\": lr,\n },\n ]\n return optimizer_grouped_parameters\n\n\ndef apply_two_stages_lr(\n model: nn.Module,\n lr: float,\n lr_mult: Union[float, int],\n weight_decay: float,\n return_params: Optional[bool] = True,\n):\n \"\"\"\n Set up the pretrained backbone to use a smaller learning rate (lr * lr_mult).\n The newly added head layers use the normal learning rate (lr).\n Layer normalization parameters and other layers' bias parameters don't use weight decay.\n\n Parameters\n ----------\n model\n A Pytorch model.\n lr\n The learning rate.\n lr_mult\n The multiplier (0, 1) to scale down the learning rate.\n weight_decay\n Weight decay.\n return_params\n return_params\n Whether to return parameters or their names. If you want to double-check\n whether the learning rate setup is as expected, you can set \"return_params=False\",\n and print the layer names along with their learning rates through\n \"print(\"Param groups = %s\" % json.dumps(optimizer_grouped_parameters, indent=2))\".\n\n Returns\n -------\n The grouped parameters or their names.\n \"\"\"\n decay_param_names = get_weight_decay_param_names(model)\n\n optimizer_grouped_parameters = [\n {\n \"params\": [\n p if return_params else n\n for n, p in model.named_parameters()\n if n in decay_param_names\n and not any(bb in n for bb in model.head_layer_names)\n ],\n \"weight_decay\": weight_decay,\n \"lr\": lr,\n },\n {\n \"params\": [\n p if return_params else n\n for n, p in model.named_parameters()\n if n not in decay_param_names\n and not any(bb in n for bb in model.head_layer_names)\n ],\n \"weight_decay\": 0.0,\n \"lr\": lr,\n },\n {\n \"params\": [\n p if return_params else n\n for n, p in model.named_parameters()\n if n in decay_param_names\n and any(bb in n for bb in model.head_layer_names)\n ],\n \"weight_decay\": weight_decay,\n \"lr\": lr * lr_mult,\n },\n {\n \"params\": [\n p if return_params else n\n for n, p in model.named_parameters()\n if n not in decay_param_names\n and any(bb in n for bb in model.head_layer_names)\n ],\n \"weight_decay\": 0.0,\n \"lr\": lr * lr_mult,\n },\n ]\n\n return optimizer_grouped_parameters\n\n\ndef apply_layerwise_lr_decay(\n model: nn.Module,\n lr: float,\n lr_decay: float,\n weight_decay: float,\n efficient_finetune: Optional[str] = None,\n):\n \"\"\"\n Assign monotonically decreasing learning rates for layers from the output end to the input end.\n The intuition behind is that later layers are more task-related compared to the early layers.\n Layer normalization parameters and other layers' bias parameters don't use weight decay.\n If you want to double-check whether the learning rate setup is as expected,\n you can print the layer names along with their learning rates through\n \"print(\"Param groups = %s\" % json.dumps(parameter_group_names, indent=2))\".\n\n Parameters\n ----------\n model\n A Pytorch model.\n lr\n The learning rate.\n lr_decay\n The learning rate decay factor (0, 1).\n weight_decay\n Weight decay.\n efficient_finetune\n Efficient finetuning strategy. Can be \"bit_fit\", \"norm_fit\". It will only finetune part of the parameters\n\n Returns\n -------\n The grouped parameters based on their layer ids and whether using weight decay.\n \"\"\"\n parameter_group_names = {}\n parameter_group_vars = {}\n decay_param_names = get_weight_decay_param_names(model)\n norm_param_names = get_norm_layer_param_names(model)\n for name, param in model.named_parameters():\n if efficient_finetune == BIT_FIT:\n # For bit_fit, we disable tuning everything except the bias terms\n if 'bias' not in name:\n param.requires_grad = False\n elif efficient_finetune == NORM_FIT:\n # For norm-fit, we finetune all the normalization layers and bias layers\n if name not in norm_param_names and 'bias' not in name:\n param.requires_grad = False\n\n if not param.requires_grad:\n continue # frozen weights\n\n if name in decay_param_names:\n group_name = \"decay\"\n this_weight_decay = weight_decay\n else:\n group_name = \"no_decay\"\n this_weight_decay = 0.\n\n layer_id = model.name_to_id[name]\n group_name = \"layer_%d_%s\" % (layer_id, group_name)\n\n if group_name not in parameter_group_names:\n scale = lr_decay ** layer_id\n\n parameter_group_names[group_name] = {\n \"weight_decay\": this_weight_decay,\n \"params\": [],\n \"lr\": scale * lr\n }\n parameter_group_vars[group_name] = {\n \"weight_decay\": this_weight_decay,\n \"params\": [],\n \"lr\": scale * lr\n }\n\n parameter_group_vars[group_name][\"params\"].append(param)\n parameter_group_names[group_name][\"params\"].append(name)\n\n return list(parameter_group_vars.values())\n", "import copy\nimport json\nimport logging\nimport os\nimport random\nimport sys\nimport time\nfrom collections.abc import Iterable\n\nimport numpy as np\nimport pandas as pd\nfrom pandas import DataFrame, Series\nfrom sklearn.metrics import classification_report\n\nfrom autogluon.core.constants import BINARY, MULTICLASS, REGRESSION, QUANTILE, AUTO_WEIGHT, BALANCE_WEIGHT\nfrom autogluon.core.data.label_cleaner import LabelCleaner, LabelCleanerMulticlassToBinary\nfrom autogluon.core.metrics import confusion_matrix, get_metric\nfrom autogluon.core.models.greedy_ensemble.ensemble_selection import EnsembleSelection\nfrom autogluon.core.trainer.abstract_trainer import AbstractTrainer\nfrom autogluon.core.utils import get_leaderboard_pareto_frontier, augment_rare_classes, extract_column, compute_weighted_metric\nfrom autogluon.core.utils.loaders import load_pkl\nfrom autogluon.core.utils.savers import save_json, save_pkl\nfrom autogluon.core.utils import get_pred_from_proba, get_pred_from_proba_df, infer_problem_type\nfrom autogluon.features.generators import PipelineFeatureGenerator\n\nlogger = logging.getLogger(__name__)\n\n\n# TODO: - Semi-supervised learning\n# TODO: - Minimize memory usage of DataFrames (convert int64 -> uint8 when possible etc.)\n# Learner encompasses full problem, loading initial data, feature generation, model training, model prediction\n# TODO: Loading learner from S3 on Windows may cause issues due to os.path.sep\nclass AbstractLearner:\n learner_file_name = 'learner.pkl'\n learner_info_name = 'info.pkl'\n learner_info_json_name = 'info.json'\n\n def __init__(self, path_context: str, label: str, feature_generator: PipelineFeatureGenerator, ignored_columns: list = None, label_count_threshold=10,\n problem_type=None, quantile_levels=None, eval_metric=None, positive_class=None, cache_data=True, is_trainer_present=False,\n random_state=0, sample_weight=None, weight_evaluation=False, groups=None):\n self.path, self.model_context, self.save_path = self.create_contexts(path_context)\n self.label = label\n self.ignored_columns = ignored_columns\n if self.ignored_columns is None:\n self.ignored_columns = []\n self.threshold = label_count_threshold\n self.problem_type = problem_type\n self.eval_metric = get_metric(eval_metric, self.problem_type, 'eval_metric')\n\n if self.problem_type == QUANTILE and quantile_levels is None:\n raise ValueError(\"if `problem_type='quantile'`, `quantile_levels` has to be specified\")\n if isinstance(quantile_levels, float):\n quantile_levels = [quantile_levels]\n if isinstance(quantile_levels, Iterable):\n for quantile in quantile_levels:\n if quantile <= 0.0 or quantile >= 1.0:\n raise ValueError(\"quantile values have to be non-negative and less than 1.0 (0.0 < q < 1.0). \"\n \"For example, 0.95 quantile = 95 percentile\")\n quantile_levels = np.sort(np.array(quantile_levels))\n self.quantile_levels = quantile_levels\n\n self.cache_data = cache_data\n if not self.cache_data:\n logger.log(30, 'Warning: `cache_data=False` will disable or limit advanced functionality after training such as feature importance calculations. It is recommended to set `cache_data=True` unless you explicitly wish to not have the data saved to disk.')\n self.is_trainer_present = is_trainer_present\n if random_state is None:\n random_state = random.randint(0, 1000000)\n self.random_state = random_state\n self.cleaner = None\n self.label_cleaner: LabelCleaner = None\n self.feature_generator: PipelineFeatureGenerator = feature_generator\n\n self.trainer: AbstractTrainer = None\n self.trainer_type = None\n self.trainer_path = None\n self.reset_paths = False\n\n self._pre_X_rows = None\n self._post_X_rows = None\n self._positive_class = positive_class\n self.sample_weight = sample_weight\n self.weight_evaluation = weight_evaluation\n self.groups = groups\n if sample_weight is not None and not isinstance(sample_weight, str):\n raise ValueError(\"sample_weight must be a string indicating the name of the column that contains sample weights. If you have a vector of sample weights, first add these as an extra column to your data.\")\n if weight_evaluation and sample_weight is None:\n raise ValueError(\"Must specify sample_weight column if you specify weight_evaluation=True\")\n if groups is not None and not isinstance(groups, str):\n raise ValueError('groups must be a string indicating the name of the column that contains the split groups. If you have a vector of split groups, first add these as an extra column to your data.')\n try:\n from ..version import __version__\n self.version = __version__\n except:\n self.version = None\n self._python_version = f'{sys.version_info.major}.{sys.version_info.minor}.{sys.version_info.micro}'\n\n # TODO: Possibly rename to features_in or consider refactoring all feature_generators features_in -> features\n @property\n def features(self):\n return self.feature_generator.features_in\n\n @property\n def feature_metadata_in(self):\n return self.feature_generator.feature_metadata_in\n\n @property\n def feature_generators(self):\n return [self.feature_generator]\n\n @property\n def class_labels(self):\n return self.label_cleaner.ordered_class_labels\n\n @property\n def is_fit(self):\n return self.trainer_path is not None or self.trainer is not None\n\n @property\n def positive_class(self):\n \"\"\"\n Returns the positive class name in binary classification. Useful for computing metrics such as F1 which require a positive and negative class.\n In binary classification, :class:`TabularPredictor.predict_proba()` returns the estimated probability that each row belongs to the positive class.\n Will print a warning and return None if called when `predictor.problem_type != 'binary'`.\n\n Returns\n -------\n The positive class name in binary classification or None if the problem is not binary classification.\n \"\"\"\n if not self.is_fit:\n if self._positive_class is not None:\n return self._positive_class\n raise AssertionError('Predictor must be fit to return positive_class.')\n if self.problem_type != BINARY:\n logger.warning(f\"Warning: Attempted to retrieve positive class label in a non-binary problem. Positive class labels only exist in binary classification. Returning None instead. self.problem_type is '{self.problem_type}' but positive_class only exists for '{BINARY}'.\")\n return None\n return self.label_cleaner.cat_mappings_dependent_var[1]\n\n def set_contexts(self, path_context):\n self.path, self.model_context, self.save_path = self.create_contexts(path_context)\n\n def create_contexts(self, path_context):\n model_context = path_context + 'models' + os.path.sep\n save_path = path_context + self.learner_file_name\n return path_context, model_context, save_path\n\n def fit(self, X: DataFrame, X_val: DataFrame = None, **kwargs):\n if self.is_fit:\n raise AssertionError('Learner is already fit.')\n self._validate_fit_input(X=X, X_val=X_val, **kwargs)\n return self._fit(X=X, X_val=X_val, **kwargs)\n\n def _fit(self, X: DataFrame, X_val: DataFrame = None, scheduler_options=None, hyperparameter_tune=False,\n feature_prune=False, holdout_frac=0.1, hyperparameters=None, verbosity=2):\n raise NotImplementedError\n\n def predict_proba(self, X: DataFrame, model=None, as_pandas=True, as_multiclass=True, inverse_transform=True):\n if as_pandas:\n X_index = copy.deepcopy(X.index)\n else:\n X_index = None\n if X.empty:\n y_pred_proba = np.array([])\n else:\n y_pred_proba = self.load_trainer().predict_proba(self.transform_features(X), model=model)\n if inverse_transform:\n y_pred_proba = self.label_cleaner.inverse_transform_proba(y_pred_proba)\n if as_multiclass and (self.problem_type == BINARY):\n y_pred_proba = LabelCleanerMulticlassToBinary.convert_binary_proba_to_multiclass_proba(y_pred_proba)\n if as_pandas:\n if self.problem_type == MULTICLASS or (as_multiclass and self.problem_type == BINARY):\n y_pred_proba = pd.DataFrame(data=y_pred_proba, columns=self.class_labels, index=X_index)\n elif self.problem_type == QUANTILE:\n y_pred_proba = pd.DataFrame(data=y_pred_proba, columns=self.quantile_levels, index=X_index)\n else:\n y_pred_proba = pd.Series(data=y_pred_proba, name=self.label, index=X_index)\n return y_pred_proba\n\n def predict(self, X: DataFrame, model=None, as_pandas=True):\n if as_pandas:\n X_index = copy.deepcopy(X.index)\n else:\n X_index = None\n y_pred_proba = self.predict_proba(X=X, model=model, as_pandas=False, as_multiclass=False, inverse_transform=False)\n problem_type = self.label_cleaner.problem_type_transform or self.problem_type\n y_pred = get_pred_from_proba(y_pred_proba=y_pred_proba, problem_type=problem_type)\n if problem_type != QUANTILE:\n y_pred = self.label_cleaner.inverse_transform(pd.Series(y_pred))\n if as_pandas:\n y_pred.index = X_index\n y_pred.name = self.label\n else:\n y_pred = y_pred.values\n else:\n if as_pandas:\n y_pred = pd.DataFrame(data=y_pred, columns=self.quantile_levels, index=X_index)\n return y_pred\n\n def _validate_fit_input(self, X: DataFrame, **kwargs):\n if self.label not in X.columns:\n raise KeyError(f\"Label column '{self.label}' is missing from training data. Training data columns: {list(X.columns)}\")\n X_val = kwargs.get('X_val', None)\n self._validate_sample_weight(X, X_val)\n self._validate_groups(X, X_val)\n\n def _validate_sample_weight(self, X, X_val):\n if self.sample_weight is not None:\n if self.sample_weight in [AUTO_WEIGHT, BALANCE_WEIGHT]:\n prefix = f\"Using predefined sample weighting strategy: {self.sample_weight}.\"\n if self.weight_evaluation:\n prefix += \" Warning: We do not recommend weight_evaluation=True with predefined sample weighting.\"\n else:\n if self.sample_weight not in X.columns:\n raise KeyError(f\"sample_weight column '{self.sample_weight}' is missing from training data. Training data columns: {list(X.columns)}\")\n weight_vals = X[self.sample_weight]\n if weight_vals.isna().sum() > 0:\n raise ValueError(f\"Sample weights in column '{self.sample_weight}' cannot be nan\")\n if weight_vals.dtype.kind not in 'biuf':\n raise ValueError(f\"Sample weights in column '{self.sample_weight}' must be numeric values\")\n if weight_vals.min() < 0:\n raise ValueError(f\"Sample weights in column '{self.sample_weight}' must be nonnegative\")\n if self.weight_evaluation and X_val is not None and self.sample_weight not in X_val.columns:\n raise KeyError(f\"sample_weight column '{self.sample_weight}' cannot be missing from validation data if weight_evaluation=True\")\n prefix = f\"Values in column '{self.sample_weight}' used as sample weights instead of predictive features.\"\n if self.weight_evaluation:\n suffix = \" Evaluation will report weighted metrics, so ensure same column exists in test data.\"\n else:\n suffix = \" Evaluation metrics will ignore sample weights, specify weight_evaluation=True to instead report weighted metrics.\"\n logger.log(20, prefix+suffix)\n\n def _validate_groups(self, X, X_val):\n if self.groups is not None:\n if self.groups not in X.columns:\n raise KeyError(f\"groups column '{self.groups}' is missing from training data. Training data columns: {list(X.columns)}\")\n groups_vals = X[self.groups]\n if len(groups_vals.unique()) < 2:\n raise ValueError(f\"Groups in column '{self.groups}' cannot have fewer than 2 unique values. Values: {list(groups_vals.unique())}\")\n if X_val is not None and self.groups in X_val.columns:\n raise KeyError(f\"groups column '{self.groups}' cannot be in validation data. Validation data columns: {list(X_val.columns)}\")\n logger.log(20, f\"Values in column '{self.groups}' used as split folds instead of being automatically set. Bagged models will have {len(groups_vals.unique())} splits.\")\n\n def get_inputs_to_stacker(self, dataset=None, model=None, base_models: list = None, use_orig_features=True):\n if model is not None or base_models is not None:\n if model is not None and base_models is not None:\n raise AssertionError('Only one of `model`, `base_models` is allowed to be set.')\n\n trainer = self.load_trainer()\n if dataset is None:\n if trainer.bagged_mode:\n dataset_preprocessed = trainer.load_X()\n fit = True\n else:\n dataset_preprocessed = trainer.load_X_val()\n fit = False\n else:\n dataset_preprocessed = self.transform_features(dataset)\n fit = False\n if base_models is not None:\n dataset_preprocessed = trainer.get_inputs_to_stacker(X=dataset_preprocessed, base_models=base_models, fit=fit, use_orig_features=use_orig_features)\n elif model is not None:\n base_models = list(trainer.model_graph.predecessors(model))\n dataset_preprocessed = trainer.get_inputs_to_stacker(X=dataset_preprocessed, base_models=base_models, fit=fit, use_orig_features=use_orig_features)\n # Note: Below doesn't quite work here because weighted_ensemble has unique input format returned that isn't a DataFrame.\n # dataset_preprocessed = trainer.get_inputs_to_model(model=model_to_get_inputs_for, X=dataset_preprocessed, fit=fit)\n\n return dataset_preprocessed\n\n # Fits _FULL models and links them in the stack so _FULL models only use other _FULL models as input during stacking\n # If model is specified, will fit all _FULL models that are ancestors of the provided model, automatically linking them.\n # If no model is specified, all models are refit and linked appropriately.\n def refit_ensemble_full(self, model='all'):\n return self.load_trainer().refit_ensemble_full(model=model)\n\n def fit_transform_features(self, X, y=None, **kwargs):\n if self.label in X:\n X = X.drop(columns=[self.label])\n if self.ignored_columns:\n logger.log(20, f'Dropping user-specified ignored columns: {self.ignored_columns}')\n X = X.drop(columns=self.ignored_columns, errors='ignore')\n for feature_generator in self.feature_generators:\n X = feature_generator.fit_transform(X, y, **kwargs)\n return X\n\n def transform_features(self, X):\n for feature_generator in self.feature_generators:\n X = feature_generator.transform(X)\n return X\n\n def score(self, X: DataFrame, y=None, model=None):\n if y is None:\n X, y = self.extract_label(X)\n self._validate_class_labels(y)\n w = None\n if self.weight_evaluation:\n X, w = extract_column(X, self.sample_weight)\n if self.eval_metric.needs_pred:\n y_pred = self.predict(X=X, model=model, as_pandas=False)\n if self.problem_type == BINARY:\n # Use 1 and 0, otherwise f1 can crash due to unknown pos_label.\n y_pred = self.label_cleaner.transform(y_pred)\n y = self.label_cleaner.transform(y)\n elif self.eval_metric.needs_quantile:\n y_pred = self.predict(X=X, model=model, as_pandas=False)\n else:\n y_pred = self.predict_proba(X=X, model=model, as_pandas=False, as_multiclass=False)\n y = self.label_cleaner.transform(y)\n return compute_weighted_metric(y, y_pred, self.eval_metric, w, weight_evaluation=self.weight_evaluation, quantile_levels=self.quantile_levels)\n\n # Scores both learner and all individual models, along with computing the optimal ensemble score + weights (oracle)\n def score_debug(self, X: DataFrame, y=None, extra_info=False, compute_oracle=False, extra_metrics=None, silent=False):\n leaderboard_df = self.leaderboard(extra_info=extra_info, silent=silent)\n if y is None:\n X, y = self.extract_label(X)\n if extra_metrics is None:\n extra_metrics = []\n self._validate_class_labels(y)\n w = None\n if self.weight_evaluation:\n X, w = extract_column(X, self.sample_weight)\n\n X = self.transform_features(X)\n y_internal = self.label_cleaner.transform(y)\n y_internal = y_internal.fillna(-1)\n\n trainer = self.load_trainer()\n scores = {}\n all_trained_models = trainer.get_model_names()\n all_trained_models_can_infer = trainer.get_model_names(can_infer=True)\n all_trained_models_original = all_trained_models.copy()\n model_pred_proba_dict, pred_time_test_marginal = trainer.get_model_pred_proba_dict(X=X, models=all_trained_models_can_infer, fit=False, record_pred_time=True)\n\n if compute_oracle:\n pred_probas = list(model_pred_proba_dict.values())\n ensemble_selection = EnsembleSelection(ensemble_size=100, problem_type=trainer.problem_type, metric=self.eval_metric, quantile_levels=self.quantile_levels)\n ensemble_selection.fit(predictions=pred_probas, labels=y_internal, identifiers=None, sample_weight=w) # TODO: Only fit non-nan\n\n oracle_weights = ensemble_selection.weights_\n oracle_pred_time_start = time.time()\n oracle_pred_proba_norm = [pred * weight for pred, weight in zip(pred_probas, oracle_weights)]\n oracle_pred_proba_ensemble = np.sum(oracle_pred_proba_norm, axis=0)\n oracle_pred_time = time.time() - oracle_pred_time_start\n model_pred_proba_dict['OracleEnsemble'] = oracle_pred_proba_ensemble\n pred_time_test_marginal['OracleEnsemble'] = oracle_pred_time\n all_trained_models.append('OracleEnsemble')\n\n scoring_args = dict(\n y=y,\n y_internal=y_internal,\n sample_weight=w\n )\n\n extra_scores = {}\n for model_name, y_pred_proba_internal in model_pred_proba_dict.items():\n scores[model_name] = self._score_with_pred_proba(\n y_pred_proba_internal=y_pred_proba_internal,\n metric=self.eval_metric,\n **scoring_args\n )\n for metric in extra_metrics:\n metric = get_metric(metric, self.problem_type, 'leaderboard_metric')\n if metric.name not in extra_scores:\n extra_scores[metric.name] = {}\n extra_scores[metric.name][model_name] = self._score_with_pred_proba(\n y_pred_proba_internal=y_pred_proba_internal,\n metric=metric,\n **scoring_args\n )\n\n if extra_scores:\n series = []\n for metric in extra_scores:\n series.append(pd.Series(extra_scores[metric], name=metric))\n df_extra_scores = pd.concat(series, axis=1)\n extra_metrics_names = list(df_extra_scores.columns)\n df_extra_scores['model'] = df_extra_scores.index\n df_extra_scores = df_extra_scores.reset_index(drop=True)\n else:\n df_extra_scores = None\n extra_metrics_names = None\n\n pred_time_test = {}\n # TODO: Add support for calculating pred_time_test_full for oracle_ensemble, need to copy graph from trainer and add oracle_ensemble to it with proper edges.\n for model in model_pred_proba_dict.keys():\n if model in all_trained_models_original:\n base_model_set = trainer.get_minimum_model_set(model)\n if len(base_model_set) == 1:\n pred_time_test[model] = pred_time_test_marginal[base_model_set[0]]\n else:\n pred_time_test_full_num = 0\n for base_model in base_model_set:\n pred_time_test_full_num += pred_time_test_marginal[base_model]\n pred_time_test[model] = pred_time_test_full_num\n else:\n pred_time_test[model] = None\n\n scored_models = list(scores.keys())\n for model in all_trained_models:\n if model not in scored_models:\n scores[model] = None\n pred_time_test[model] = None\n pred_time_test_marginal[model] = None\n\n logger.debug('Model scores:')\n logger.debug(str(scores))\n model_names_final = list(scores.keys())\n df = pd.DataFrame(\n data={\n 'model': model_names_final,\n 'score_test': list(scores.values()),\n 'pred_time_test': [pred_time_test[model] for model in model_names_final],\n 'pred_time_test_marginal': [pred_time_test_marginal[model] for model in model_names_final],\n }\n )\n if df_extra_scores is not None:\n df = pd.merge(df, df_extra_scores, on='model', how='left')\n\n df_merged = pd.merge(df, leaderboard_df, on='model', how='left')\n df_merged = df_merged.sort_values(by=['score_test', 'pred_time_test', 'score_val', 'pred_time_val', 'model'], ascending=[False, True, False, True, False]).reset_index(drop=True)\n df_columns_lst = df_merged.columns.tolist()\n explicit_order = [\n 'model',\n 'score_test',\n ]\n if extra_metrics_names is not None:\n explicit_order += extra_metrics_names\n explicit_order += [\n 'score_val',\n 'pred_time_test',\n 'pred_time_val',\n 'fit_time',\n 'pred_time_test_marginal',\n 'pred_time_val_marginal',\n 'fit_time_marginal',\n 'stack_level',\n 'can_infer',\n 'fit_order',\n ]\n df_columns_other = [column for column in df_columns_lst if column not in explicit_order]\n df_columns_new = explicit_order + df_columns_other\n df_merged = df_merged[df_columns_new]\n\n return df_merged\n\n def _score_with_pred_proba(self,\n y,\n y_internal,\n y_pred_proba_internal,\n metric,\n sample_weight=None,\n weight_evaluation=None):\n metric = get_metric(metric, self.problem_type, 'leaderboard_metric')\n if weight_evaluation is None:\n weight_evaluation = self.weight_evaluation\n if metric.needs_pred:\n if self.problem_type == BINARY:\n # Use 1 and 0, otherwise f1 can crash due to unknown pos_label.\n y_pred = get_pred_from_proba(y_pred_proba_internal, problem_type=self.problem_type)\n y_tmp = y_internal\n else:\n y_pred = self.label_cleaner.inverse_transform_proba(y_pred_proba_internal, as_pred=True)\n y_tmp = y\n elif metric.needs_quantile:\n y_pred = self.label_cleaner.inverse_transform_proba(y_pred_proba_internal, as_pred=True)\n y_tmp = y\n else:\n y_pred = self.label_cleaner.inverse_transform_proba(y_pred_proba_internal, as_pred=False)\n y_tmp = y_internal\n return compute_weighted_metric(y_tmp, y_pred, metric, weights=sample_weight, weight_evaluation=weight_evaluation, quantile_levels=self.quantile_levels)\n\n def _score_with_pred(self,\n y,\n y_internal,\n y_pred_internal,\n metric,\n sample_weight=None,\n weight_evaluation=None):\n metric = get_metric(metric, self.problem_type, 'leaderboard_metric')\n if weight_evaluation is None:\n weight_evaluation = self.weight_evaluation\n if self.problem_type == BINARY:\n # Use 1 and 0, otherwise f1 can crash due to unknown pos_label.\n y_pred = y_pred_internal\n y_tmp = y_internal\n else:\n y_pred = self.label_cleaner.inverse_transform(y_pred_internal)\n y_tmp = y\n return compute_weighted_metric(y_tmp, y_pred, metric, weights=sample_weight, weight_evaluation=weight_evaluation, quantile_levels=self.quantile_levels)\n\n def _validate_class_labels(self, y: Series):\n null_count = y.isnull().sum()\n if null_count:\n raise ValueError(f'Labels cannot contain missing (nan) values. Found {null_count} missing label values.')\n if self.problem_type == MULTICLASS and not self.eval_metric.needs_pred:\n y_unique = np.unique(y)\n valid_class_set = set(self.class_labels)\n unknown_classes = []\n for cls in y_unique:\n if cls not in valid_class_set:\n unknown_classes.append(cls)\n if unknown_classes:\n # log_loss / pac_score\n raise ValueError(f'Multiclass scoring with eval_metric=\\'{self.eval_metric.name}\\' does not support unknown classes. Unknown classes: {unknown_classes}')\n\n def evaluate_predictions(self, y_true, y_pred, sample_weight=None, silent=False, auxiliary_metrics=True, detailed_report=False):\n \"\"\" Evaluate predictions. Does not support sample weights since this method reports a variety of metrics.\n Args:\n silent (bool): Should we print which metric is being used as well as performance.\n auxiliary_metrics (bool): Should we compute other (problem_type specific) metrics in addition to the default metric?\n detailed_report (bool): Should we computed more-detailed versions of the auxiliary_metrics? (requires auxiliary_metrics=True).\n\n Returns single performance-value if auxiliary_metrics=False.\n Otherwise returns dict where keys = metrics, values = performance along each metric.\n \"\"\"\n\n is_proba = False\n assert isinstance(y_true, (np.ndarray, pd.Series))\n assert isinstance(y_pred, (np.ndarray, pd.Series, pd.DataFrame))\n self._validate_class_labels(y_true)\n if isinstance(y_pred, np.ndarray):\n if self.problem_type == QUANTILE:\n y_pred = pd.DataFrame(data=y_pred, columns=self.quantile_levels)\n elif len(y_pred.shape) > 1:\n y_pred = pd.DataFrame(data=y_pred, columns=self.class_labels)\n\n if isinstance(y_pred, pd.DataFrame):\n is_proba = True\n elif not self.eval_metric.needs_pred:\n raise AssertionError(f'`evaluate_predictions` requires y_pred_proba input '\n f'when evaluating \"{self.eval_metric.name}\"... Please generate valid input via `predictor.predict_proba(data)`.\\n'\n f'This may have occurred if you passed in predict input instead of predict_proba input, '\n f'or if you specified `as_multiclass=False` to `predictor.predict_proba(data, as_multiclass=False)`, '\n f'which is not supported by `evaluate_predictions`.')\n if is_proba:\n y_pred_proba = y_pred\n y_pred = get_pred_from_proba_df(y_pred_proba, problem_type=self.problem_type)\n if self.problem_type == BINARY:\n # roc_auc crashes if this isn't done\n y_pred_proba = y_pred_proba[self.positive_class]\n else:\n y_pred_proba = None\n y_pred = pd.Series(y_pred)\n if y_pred_proba is not None:\n y_pred_proba_internal = self.label_cleaner.transform_proba(y_pred_proba, as_pandas=True)\n else:\n y_pred_proba_internal = None\n y_true_internal = self.label_cleaner.transform(y_true) # Get labels in numeric order\n y_true_internal = y_true_internal.fillna(-1)\n y_pred_internal = self.label_cleaner.transform(y_pred) # Get labels in numeric order\n\n # Compute auxiliary metrics:\n auxiliary_metrics_lst = [self.eval_metric]\n performance_dict = {}\n\n if auxiliary_metrics:\n if self.problem_type == REGRESSION: # Adding regression metrics\n auxiliary_metrics_lst += [\n 'root_mean_squared_error',\n 'mean_squared_error',\n 'mean_absolute_error',\n 'r2',\n 'pearsonr',\n 'median_absolute_error',\n ]\n if self.problem_type in [BINARY, MULTICLASS]: # Adding classification metrics\n auxiliary_metrics_lst += [\n 'accuracy',\n 'balanced_accuracy',\n # 'log_loss', # Don't include as it probably adds more confusion to novice users (can be infinite)\n 'mcc',\n ]\n if self.problem_type == BINARY: # binary-specific metrics\n auxiliary_metrics_lst += [\n 'roc_auc',\n 'f1',\n 'precision',\n 'recall',\n ]\n\n scoring_args = dict(\n y=y_true,\n y_internal=y_true_internal,\n weight_evaluation=False,\n )\n\n if sample_weight is not None:\n scoring_args['sample_weight'] = sample_weight\n scoring_args['weight_evaluation'] = True\n\n for aux_metric in auxiliary_metrics_lst:\n if isinstance(aux_metric, str):\n aux_metric = get_metric(metric=aux_metric, problem_type=self.problem_type, metric_type='aux_metric')\n if not aux_metric.needs_pred and y_pred_proba_internal is None:\n logger.log(15, f'Skipping {aux_metric.name} because no prediction probabilities are available to score.')\n continue\n\n if aux_metric.name not in performance_dict:\n if y_pred_proba_internal is not None:\n score = self._score_with_pred_proba(\n y_pred_proba_internal=y_pred_proba_internal,\n metric=aux_metric,\n **scoring_args\n )\n else:\n score = self._score_with_pred(\n y_pred_internal=y_pred_internal,\n metric=aux_metric,\n **scoring_args\n )\n performance_dict[aux_metric.name] = score\n\n if self.eval_metric.name in performance_dict:\n score_eval = performance_dict[self.eval_metric.name]\n score_eval_flipped = self.eval_metric.convert_score_to_sklearn_val(score_eval) # flip negative once again back to positive (so higher is no longer necessarily better)\n if score_eval_flipped != score_eval:\n flipped = True\n else:\n flipped = False\n if not silent:\n logger.log(20, f\"Evaluation: {self.eval_metric.name} on test data: {score_eval}\")\n if flipped:\n logger.log(20, f\"\\tNote: Scores are always higher_is_better. This metric score can be multiplied by -1 to get the metric value.\")\n\n if not silent:\n logger.log(20, \"Evaluations on test data:\")\n logger.log(20, json.dumps(performance_dict, indent=4))\n\n if detailed_report and (self.problem_type != REGRESSION):\n # Construct confusion matrix\n try:\n performance_dict['confusion_matrix'] = confusion_matrix(y_true, y_pred, labels=self.label_cleaner.ordered_class_labels, output_format='pandas_dataframe')\n except ValueError:\n pass\n # One final set of metrics to report\n cl_metric = lambda y_true, y_pred: classification_report(y_true, y_pred, output_dict=True)\n metric_name = 'classification_report'\n if metric_name not in performance_dict:\n try: # only compute auxiliary metrics which do not error (y_pred = class-probabilities may cause some metrics to error)\n performance_dict[metric_name] = cl_metric(y_true, y_pred)\n except ValueError:\n pass\n if not silent and metric_name in performance_dict:\n logger.log(20, \"Detailed (per-class) classification report:\")\n logger.log(20, json.dumps(performance_dict[metric_name], indent=4))\n return performance_dict\n\n def extract_label(self, X):\n if self.label not in list(X.columns):\n raise ValueError(f\"Provided DataFrame does not contain label column: {self.label}\")\n y = X[self.label].copy()\n X = X.drop(self.label, axis=1)\n return X, y\n\n def leaderboard(self, X=None, y=None, extra_info=False, extra_metrics=None, only_pareto_frontier=False, silent=False):\n if X is not None:\n leaderboard = self.score_debug(X=X, y=y, extra_info=extra_info, extra_metrics=extra_metrics, silent=True)\n else:\n if extra_metrics:\n raise AssertionError('`extra_metrics` is only valid when data is specified.')\n trainer = self.load_trainer()\n leaderboard = trainer.leaderboard(extra_info=extra_info)\n if only_pareto_frontier:\n if 'score_test' in leaderboard.columns and 'pred_time_test' in leaderboard.columns:\n score_col = 'score_test'\n inference_time_col = 'pred_time_test'\n else:\n score_col = 'score_val'\n inference_time_col = 'pred_time_val'\n leaderboard = get_leaderboard_pareto_frontier(leaderboard=leaderboard, score_col=score_col, inference_time_col=inference_time_col)\n if not silent:\n with pd.option_context('display.max_rows', None, 'display.max_columns', None, 'display.width', 1000):\n print(leaderboard)\n return leaderboard\n\n # TODO: cache_data must be set to True to be able to pass X and y as None in this function, otherwise it will error.\n # Warning: This can take a very, very long time to compute if the data is large and the model is complex.\n # A value of 0.01 means that the objective metric error would be expected to increase by 0.01 if the feature were removed.\n # Negative values mean the feature is likely harmful.\n # model: model (str) to get feature importances for, if None will choose best model.\n # features: list of feature names that feature importances are calculated for and returned, specify None to get all feature importances.\n # feature_stage: Whether to compute feature importance on raw original features ('original'), transformed features ('transformed') or on the features used by the particular model ('transformed_model').\n def get_feature_importance(self, model=None, X=None, y=None, features: list = None, feature_stage='original', subsample_size=5000, silent=False, **kwargs) -> DataFrame:\n valid_feature_stages = ['original', 'transformed', 'transformed_model']\n if feature_stage not in valid_feature_stages:\n raise ValueError(f'feature_stage must be one of: {valid_feature_stages}, but was {feature_stage}.')\n trainer = self.load_trainer()\n if X is not None:\n if y is None:\n X, y = self.extract_label(X)\n y = self.label_cleaner.transform(y)\n X, y = self._remove_nan_label_rows(X, y)\n if self.ignored_columns:\n X = X.drop(columns=self.ignored_columns, errors='ignore')\n unused_features = [f for f in list(X.columns) if f not in self.features]\n if len(unused_features) > 0:\n logger.log(30, f'These features in provided data are not utilized by the predictor and will be ignored: {unused_features}')\n X = X.drop(columns=unused_features)\n \n if feature_stage == 'original':\n return trainer._get_feature_importance_raw(model=model, X=X, y=y, features=features, subsample_size=subsample_size, transform_func=self.transform_features, silent=silent, **kwargs)\n X = self.transform_features(X)\n else:\n if feature_stage == 'original':\n raise AssertionError('Feature importance `dataset` cannot be None if `feature_stage==\\'original\\'`. A test dataset must be specified.')\n y = None\n raw = feature_stage == 'transformed'\n return trainer.get_feature_importance(X=X, y=y, model=model, features=features, raw=raw, subsample_size=subsample_size, silent=silent, **kwargs)\n\n @staticmethod\n def _remove_nan_label_rows(X, y):\n if y.isnull().any():\n y = y.dropna()\n X = X.loc[y.index]\n return X, y\n\n @staticmethod\n def infer_problem_type(y: Series):\n return infer_problem_type(y=y)\n\n def save(self):\n trainer = None\n if self.trainer is not None:\n if not self.is_trainer_present:\n self.trainer.save()\n trainer = self.trainer\n self.trainer = None\n save_pkl.save(path=self.save_path, object=self)\n self.trainer = trainer\n\n # reset_paths=True if the learner files have changed location since fitting.\n # TODO: Potentially set reset_paths=False inside load function if it is the same path to avoid re-computing paths on all models\n # TODO: path_context -> path for v0.1\n @classmethod\n def load(cls, path_context, reset_paths=True):\n load_path = path_context + cls.learner_file_name\n obj = load_pkl.load(path=load_path)\n if reset_paths:\n obj.set_contexts(path_context)\n if obj.trainer_path is not None:\n obj.trainer_path = obj.model_context\n obj.reset_paths = reset_paths\n # TODO: Still have to change paths of models in trainer + trainer object path variables\n return obj\n else:\n obj.set_contexts(obj.path_context)\n return obj\n\n def save_trainer(self, trainer):\n if self.is_trainer_present:\n self.trainer = trainer\n self.save()\n else:\n self.trainer_path = trainer.path\n trainer.save()\n\n def load_trainer(self) -> AbstractTrainer:\n if self.trainer is not None:\n return self.trainer\n else:\n if self.trainer_path is None:\n raise AssertionError('Trainer does not exist.')\n return self.trainer_type.load(path=self.trainer_path, reset_paths=self.reset_paths)\n\n # Loads models in memory so that they don't have to be loaded during predictions\n def persist_trainer(self, low_memory=False, models='all', with_ancestors=False, max_memory=None) -> list:\n self.trainer = self.load_trainer()\n if not low_memory:\n return self.trainer.persist_models(models, with_ancestors=with_ancestors, max_memory=max_memory)\n # Warning: After calling this, it is not necessarily safe to save learner or trainer anymore\n # If neural network is persisted and then trainer or learner is saved, there will be an exception thrown\n else:\n return []\n\n def distill(self, X=None, y=None, X_val=None, y_val=None, time_limit=None, hyperparameters=None, holdout_frac=None,\n verbosity=None, models_name_suffix=None, teacher_preds='soft',\n augmentation_data=None, augment_method='spunge', augment_args={'size_factor': 5, 'max_size': int(1e5)}):\n \"\"\" See abstract_trainer.distill() for details. \"\"\"\n if X is not None:\n if (self.eval_metric is not None) and (self.eval_metric.name == 'log_loss') and (self.problem_type == MULTICLASS):\n X = augment_rare_classes(X, self.label, self.threshold)\n if y is None:\n X, y = self.extract_label(X)\n X = self.transform_features(X)\n y = self.label_cleaner.transform(y)\n if self.problem_type == MULTICLASS:\n y = y.fillna(-1)\n else:\n y = None\n\n if X_val is not None:\n if X is None:\n raise ValueError(\"Cannot specify X_val without specifying X\")\n if y_val is None:\n X_val, y_val = self.extract_label(X_val)\n X_val = self.transform_features(X_val)\n y_val = self.label_cleaner.transform(y_val)\n\n if augmentation_data is not None:\n augmentation_data = self.transform_features(augmentation_data)\n\n trainer = self.load_trainer()\n distilled_model_names = trainer.distill(X=X, y=y, X_val=X_val, y_val=y_val, time_limit=time_limit, hyperparameters=hyperparameters,\n holdout_frac=holdout_frac, verbosity=verbosity, teacher_preds=teacher_preds, models_name_suffix=models_name_suffix,\n augmentation_data=augmentation_data, augment_method=augment_method, augment_args=augment_args)\n self.save_trainer(trainer=trainer)\n return distilled_model_names\n\n @classmethod\n def load_info(cls, path, reset_paths=True, load_model_if_required=True):\n load_path = path + cls.learner_info_name\n try:\n return load_pkl.load(path=load_path)\n except Exception as e:\n if load_model_if_required:\n learner = cls.load(path_context=path, reset_paths=reset_paths)\n return learner.get_info()\n else:\n raise e\n\n def save_info(self, include_model_info=False):\n info = self.get_info(include_model_info=include_model_info)\n\n save_pkl.save(path=self.path + self.learner_info_name, object=info)\n save_json.save(path=self.path + self.learner_info_json_name, obj=info)\n return info\n\n # TODO: Add data info gathering at beginning of .fit() that is used by all learners to add to get_info output\n # TODO: Add feature inference / feature engineering info to get_info output\n def get_info(self, **kwargs):\n learner_info = {\n 'path': self.path,\n 'label': self.label,\n 'random_state': self.random_state,\n 'version': self.version,\n 'features': self.features,\n 'feature_metadata_in': self.feature_metadata_in,\n }\n\n return learner_info\n" ]
[ [ "torch.optim.Adam", "torch.nn.CrossEntropyLoss", "torch.optim.AdamW", "torch.optim.SGD", "torch.nn.MSELoss" ], [ "pandas.merge", "pandas.concat", "pandas.Series", "numpy.unique", "pandas.option_context", "pandas.DataFrame", "numpy.array", "sklearn.metrics.classification_report", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
cffbots/swan
[ "ea8f1a0943a7ffd3cd20d8adee6f8b7a96a117f1" ]
[ "scripts/run_torch_models.py" ]
[ "#!/usr/bin/env python\n\nimport logging\nfrom pathlib import Path\nimport torch\nfrom swan.dataset import TorchGeometricGraphData, FingerprintsData, DGLGraphData\nfrom swan.modeller import TorchModeller\nfrom swan.modeller.models import FingerprintFullyConnected, MPNN, SE3Transformer\nfrom swan.utils.log_config import configure_logger\nfrom swan.utils.plot import create_scatter_plot\n\nconfigure_logger(Path(\".\"))\n\n# Starting logger\nLOGGER = logging.getLogger(__name__)\n\n\n# Path to the DATASET\npath_files = Path(\"tests/files\")\npath_data = path_files / \"cdft_properties.csv\"\npath_geometries = path_files / \"cdft_geometries.json\"\n\n\n# Training variables\nnepoch = 150\nbatch_size = 32\nproperties = [\n # \"Dissocation energy (nucleofuge)\",\n # \"Dissociation energy (electrofuge)\",\n # \"Electroaccepting power(w+)\",\n # \"Electrodonating power (w-)\",\n # \"Electronegativity (chi=-mu)\",\n \"Electronic chemical potential (mu)\",\n # \"Electronic chemical potential (mu+)\",\n # \"Electronic chemical potential (mu-)\",\n # \"Electrophilicity index (w=omega)\",\n # \"Global Dual Descriptor Deltaf+\",\n # \"Global Dual Descriptor Deltaf-\",\n # \"Hardness (eta)\",\n # \"Hyperhardness (gamma)\",\n # \"Net Electrophilicity\",\n # \"Softness (S)\"\n]\nnum_labels = len(properties)\n\n# Datasets\ndata = FingerprintsData(\n path_data, properties=properties, sanitize=False)\n# data = DGLGraphData(\n# path_data, properties=properties, file_geometries=path_geometries, sanitize=False)\n# data = TorchGeometricGraphData(path_data, properties=properties, file_geometries=path_geometries, sanitize=False)\n# FullyConnected NN\nnet = FingerprintFullyConnected(hidden_cells=100, num_labels=num_labels)\n\n# # Graph NN configuration\n# net = MPNN(batch_size=batch_size, output_channels=40, num_labels=num_labels)\n\n# # se3 transformers\n# num_layers = 2 # Number of equivariant layers\n# num_channels = 8 # Number of channels in middle layers\n# num_nlayers = 0 # Number of layers for nonlinearity\n# num_degrees = 2 # Number of irreps {0,1,...,num_degrees-1}\n# div = 4 # Low dimensional embedding fraction\n# pooling = 'avg' # Choose from avg or max\n# n_heads = 1 # Number of attention heads\n\n# net = SE3Transformer(\n# num_layers, num_channels, num_nlayers=num_nlayers, num_degrees=num_degrees, div=div,\n# pooling=pooling, n_heads=n_heads)\n\n# training and validation\ntorch.set_default_dtype(torch.float32)\nresearcher = TorchModeller(net, data, use_cuda=False)\nresearcher.set_optimizer(\"Adam\", lr=0.0005)\nresearcher.set_scheduler(\"StepLR\", 0.1)\nresearcher.data.scale_labels()\ntrained_data = researcher.train_model(nepoch=nepoch, batch_size=batch_size)\npredicted_train, expected_train = [x for x in trained_data]\nprint(\"train regression\")\ncreate_scatter_plot(predicted_train, expected_train, properties, \"trained_scatterplot\")\n\n# Print validation scatterplot\nprint(\"validation regression\")\npredicted_validation, expected_validation = [x for x in researcher.validate_model()]\ncreate_scatter_plot(predicted_validation, expected_validation, properties, \"validation_scatterplot\")\n\nprint(\"properties stored in the HDF5\")\nresearcher.state.show()\n" ]
[ [ "torch.set_default_dtype" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
anuragbms/Sales-forecasting-with-RNNs
[ "22b4639ecbb48381af53326ace94a3538201b586", "22b4639ecbb48381af53326ace94a3538201b586", "22b4639ecbb48381af53326ace94a3538201b586", "22b4639ecbb48381af53326ace94a3538201b586", "22b4639ecbb48381af53326ace94a3538201b586", "22b4639ecbb48381af53326ace94a3538201b586", "22b4639ecbb48381af53326ace94a3538201b586", "22b4639ecbb48381af53326ace94a3538201b586", "22b4639ecbb48381af53326ace94a3538201b586", "22b4639ecbb48381af53326ace94a3538201b586", "22b4639ecbb48381af53326ace94a3538201b586", "22b4639ecbb48381af53326ace94a3538201b586", "22b4639ecbb48381af53326ace94a3538201b586", "22b4639ecbb48381af53326ace94a3538201b586" ]
[ "MetamorphicTests/all_mutants/sales_forecasting_file/228.py", "MetamorphicTests/all_mutants/sales_forecasting_file/230.py", "MetamorphicTests/all_mutants/sales_forecasting_file/273.py", "MetamorphicTests/mutants_of_interest/sales_forecasting_file/257_bug.py", "MetamorphicTests/all_mutants/sales_forecasting_file/208.py", "MetamorphicTests/all_mutants/sales_forecasting_file/249.py", "MetamorphicTests/all_mutants/sales_forecasting_file/191.py", "MetamorphicTests/all_mutants/sales_forecasting_file/56.py", "MetamorphicTests/all_mutants/load_model_forecast_file/42.py", "MetamorphicTests/all_mutants/sales_forecasting_file/0.py", "MetamorphicTests/all_mutants/sales_forecasting_file/279.py", "MetamorphicTests/all_mutants/sales_forecasting_file/159.py", "MetamorphicTests/all_mutants/sales_forecasting_file/256.py", "MetamorphicTests/all_mutants/sales_forecasting_file/153.py" ]
[ "def gen_mutants():\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n import tensorflow as tf\n import pandas\n import numpy as np\n \n \n \n \n \n \n DATAFILE_TRAIN = 'mock_kaggle_edit_train.csv'\n \n DATAFILE_VALIDATE = 'mock_kaggle_edit_validate.csv'\n \n \n \n \n \n TRAINED_MODEL_PATH = 'savedModel'\n \n TIME_STEPS = 10\n NUMBER_OF_DAYS_TO_FORECAST = 1\n \n BATCH_SIZE = 100\n \n NUM_EPOCHS = 100\n \n LSTM_UNITS = 250\n \n TENSORBOARD_LOGDIR = 'tensorboard_log'\n \n \n \n \n \n \n data_train = pandas.read_csv(DATAFILE_TRAIN)\n data_validate = pandas.read_csv(DATAFILE_VALIDATE)\n \n \n \n \n \n \n data_train.head()\n \n \n \n \n \n \n \n numTrainingData = len(data_train)\n numValidationData = len(data_validate)\n \n trainingData_date = data_train['date'][0:numTrainingData]\n trainingData_sales = data_train['sales'][0:numTrainingData]\n trainindData_price = data_train['price'][0:numTrainingData]\n \n validationData_date = data_validate['date'][0:numValidationData]\n validationData_sales = data_validate['sales'][0:numValidationData]\n validationData_price = data_validate['price'][0:numValidationData]\n \n \n \n \n \n trainingData_sales.head()\n \n \n \n \n \n print(len(trainingData_sales))\n print(len(validationData_sales))\n \n \n \n \n \n \n \n \n trainingData_sales_min = min(trainingData_sales)\n trainingData_sales_max = max(trainingData_sales)\n trainingData_sales_range = trainingData_sales_max - trainingData_sales_min\n trainingData_sales_normalised = [(i - trainingData_sales_min) / trainingData_sales_range for i in trainingData_sales]\n \n validationData_sales_normalised = [(i - trainingData_sales_min) / trainingData_sales_range for i in validationData_sales]\n \n \n \n \n \n \n print('Min:', trainingData_sales_min)\n print('Range:', trainingData_sales_max - trainingData_sales_min)\n \n \n \n \n \n \n trainingDataSequence_sales = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, TIME_STEPS, 1))\n targetDataSequence_sales = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, NUMBER_OF_DAYS_TO_FORECAST))\n start = 0\n for i in range(TIME_STEPS, (len(trainingData_sales) - NUMBER_OF_DAYS_TO_FORECAST) + 1):\n trainingDataSequence_sales[start,:,0] = trainingData_sales_normalised[start:i]\n targetDataSequence_sales[start] = trainingData_sales_normalised[i:i + NUMBER_OF_DAYS_TO_FORECAST]\n start = start + 1\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n [trainingDataSequence_sales[i,:,0] for i in range(3)]\n \n \n \n \n \n \n [targetDataSequence_sales[i] for i in range(3)]\n \n \n \n \n \n \n \n \n \n \n \n \n a = np.arange(len(targetDataSequence_sales))\n np.random.shuffle(a)\n trainingDataSequence_sales_shuffle = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, TIME_STEPS, 1))\n targetDataSequence_sales_shuffle = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, NUMBER_OF_DAYS_TO_FORECAST))\n \n loc = 0\n for i in a:\n trainingDataSequence_sales_shuffle[loc] = trainingDataSequence_sales[i]\n targetDataSequence_sales_shuffle[loc] = targetDataSequence_sales[i]\n loc += 1\n \n trainingDataSequence_sales = trainingDataSequence_sales_shuffle\n targetDataSequence_sales = targetDataSequence_sales_shuffle\n \n \n \n \n \n \n validationDataSequence_sales = np.zeros(shape=(((len(validationData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, TIME_STEPS, 1))\n validationDataSequence_sales_target = np.zeros(shape=(((len(validationData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, NUMBER_OF_DAYS_TO_FORECAST))\n \n start = 0\n for i in range(TIME_STEPS, (len(validationData_sales) - NUMBER_OF_DAYS_TO_FORECAST) + 1):\n validationDataSequence_sales[start,:,0] = validationData_sales_normalised[start:i]\n validationDataSequence_sales_target[start] = validationData_sales_normalised[i:i + NUMBER_OF_DAYS_TO_FORECAST]\n start += 1\n \n \n \n \n \n \n tf.reset_default_graph()\n \n inputSequencePlaceholder = tf.placeholder(dtype=tf.float32, shape=(None, TIME_STEPS, 1), name='inputSequencePlaceholder')\n targetPlaceholder = tf.placeholder(dtype=tf.float32, shape=(None, NUMBER_OF_DAYS_TO_FORECAST), name='targetPlaceholder')\n \n \n cell = tf.nn.rnn_cell.LSTMCell(num_units=LSTM_UNITS, name='LSTM_cell')\n \n \n (output, state) = tf.nn.dynamic_rnn(cell=cell, inputs=inputSequencePlaceholder, dtype=tf.float32)\n \n \n lastCellOutput = output[:,-1,:]\n \n \n \n \n \n print('output:', output)\n print('state:', state)\n print('lastCellOutput:', lastCellOutput)\n \n \n \n \n \n \n \n \n \n \n \n \n \n weights = tf.Variable(initial_value=tf.truncated_normal(shape=(LSTM_UNITS, NUMBER_OF_DAYS_TO_FORECAST)))\n bias = tf.Variable(initial_value=tf.ones(shape=NUMBER_OF_DAYS_TO_FORECAST))\n \n forecast = tf.add(x=tf.matmul(a=lastCellOutput, b=weights), y=bias, name='forecast_normalised_scale')\n \n \n \n \n forecast_originalScale = tf.add(x=forecast * trainingData_sales_range, y=trainingData_sales_min, name='forecast_original_scale')\n \n \n \n \n \n print(forecast)\n print(forecast_originalScale)\n \n \n \n \n \n \n \n loss = tf.reduce_mean(tf.squared_difference(x=forecast, y=targetPlaceholder), name='loss_comp')\n \n tf.summary.scalar(tensor=loss, name='loss')\n \n \n \n \n \n optimizer = tf.train.AdamOptimizer(learning_rate=0.1)\n minimize_step = optimizer.minimize(loss)\n \n \n \n \n \n \n \n \n \n \n \n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n sess.run(tf.local_variables_initializer())\n \n \n tensorboard_writer = tf.summary.FileWriter(TENSORBOARD_LOGDIR, sess.graph)\n \n \n all_summary_ops = tf.summary.merge_all()\n \n \n numSteps = 0\n for e in range(NUM_EPOCHS):\n print('starting training for epoch:', e + 1)\n \n startLocation = 0\n iteration = 0\n for iteration in range(int(len(targetDataSequence_sales) / BATCH_SIZE)):\n print('epoch:', e + 1, ' iteration:', iteration + 1)\n trainingBatchInput = trainingDataSequence_sales[startLocation:startLocation + BATCH_SIZE,:,:]\n trainingBatchTarget = targetDataSequence_sales[startLocation:startLocation + BATCH_SIZE]\n \n (_, lsBatch, forecastBatch, forecastBatch_originalScale, summary_values) = sess.run([minimize_step, loss, forecast, forecast_originalScale, all_summary_ops], feed_dict={inputSequencePlaceholder: trainingBatchInput, \\\n targetPlaceholder: trainingBatchTarget})\n \n tensorboard_writer.add_summary(summary_values, numSteps)\n numSteps += 1\n \n if (iteration + 1) % 1 == 0:\n print('got a loss of:', lsBatch)\n print('the forecast of first 5 normalised are:', forecastBatch[0:5])\n print('while the actuals were normalised :', trainingBatchTarget[0:5])\n print('the forecast of first 5 orignal scale are:', forecastBatch_originalScale[0:5])\n print('while the actuals were original scale :', (trainingBatchTarget[0:5] * trainingData_sales_range) + trainingData_sales_min)\n \n startLocation += BATCH_SIZE\n \n \n if len(targetDataSequence_sales) > startLocation:\n print('epoch:', e + 1, ' iteration:', iteration + 1)\n trainingBatchInput = trainingDataSequence_sales[startLocation:len(targetDataSequence_sales),:,:]\n trainingBatchTarget = targetDataSequence_sales[startLocation:len(targetDataSequence_sales)]\n \n (_, lsBatch, forecastBatch, forecastBatch_originalScale) = sess.run([minimize_step, loss, forecast, forecast_originalScale], feed_dict={inputSequencePlaceholder: trainingBatchInput, \\\n targetPlaceholder: trainingBatchTarget})\n \n print('got a loss of:', lsBatch)\n print('the forecast of first 5 normalised are:', forecastBatch[0:5])\n print('while the actuals were normalised :', trainingBatchTarget[0:5])\n print('the forecast of first 5 orignal scale are:', forecastBatch_originalScale[0:5])\n print('while the actuals were original scale :', (trainingBatchTarget[0:5] * trainingData_sales_range) + trainingData_sales_min)\n \n \n \n totalValidationLoss = 0\n startLocation = 0\n print('starting validation')\n for iter in range(len(validationDataSequence_sales) // BATCH_SIZE):\n validationBatchInput = validationDataSequence_sales[startLocation:startLocation + BATCH_SIZE,:,:]\n validationBatchTarget = validationDataSequence_sales_target[startLocation:startLocation + BATCH_SIZE]\n \n (validationLsBatch, validationForecastBatch, validationForecastBatch_originalScale) = sess.run([loss, forecast, forecast_originalScale], feed_dict={inputSequencePlaceholder: validationBatchInput, \\\n targetPlaceholder: validationBatchTarget})\n \n \n startLocation += BATCH_SIZE\n totalValidationLoss += validationLsBatch\n \n print('first five predictions:', validationForecastBatch[0:5])\n print('first five actuals :', validationBatchTarget[0:5])\n print('the forecast of first 5 orignal scale are:', validationForecastBatch_originalScale[1:5])\n print('while the actuals were original scale :', (validationBatchTarget[0:5] * trainingData_sales_range) + trainingData_sales_min)\n \n \n if startLocation < len(validationDataSequence_sales):\n validationBatchInput = validationDataSequence_sales[startLocation:len(validationDataSequence_sales)]\n validationBatchTarget = validationDataSequence_sales_target[startLocation:len(validationDataSequence_sales)]\n \n (validationLsBatch, validationForecastBatch) = sess.run([loss, forecast], feed_dict={inputSequencePlaceholder: validationBatchInput, \\\n targetPlaceholder: validationBatchTarget})\n \n totalValidationLoss += validationLsBatch\n \n \n print('Validation completed after epoch:', e + 1, '. Total validation loss:', totalValidationLoss)\n \n \n print('----------- Saving Model')\n tf.saved_model.simple_save(sess, export_dir=TRAINED_MODEL_PATH, inputs=\\\n {'inputSequencePlaceholder': inputSequencePlaceholder, 'targetPlaceholder': targetPlaceholder}, outputs=\\\n {'loss': loss, 'forecast_originalScale': forecast_originalScale})\n print('saved model to:', TRAINED_MODEL_PATH)\n \n print('----------- Finis')", "def gen_mutants():\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n import tensorflow as tf\n import pandas\n import numpy as np\n \n \n \n \n \n \n DATAFILE_TRAIN = 'mock_kaggle_edit_train.csv'\n \n DATAFILE_VALIDATE = 'mock_kaggle_edit_validate.csv'\n \n \n \n \n \n TRAINED_MODEL_PATH = 'savedModel'\n \n TIME_STEPS = 10\n NUMBER_OF_DAYS_TO_FORECAST = 1\n \n BATCH_SIZE = 100\n \n NUM_EPOCHS = 100\n \n LSTM_UNITS = 250\n \n TENSORBOARD_LOGDIR = 'tensorboard_log'\n \n \n \n \n \n \n data_train = pandas.read_csv(DATAFILE_TRAIN)\n data_validate = pandas.read_csv(DATAFILE_VALIDATE)\n \n \n \n \n \n \n data_train.head()\n \n \n \n \n \n \n \n numTrainingData = len(data_train)\n numValidationData = len(data_validate)\n \n trainingData_date = data_train['date'][0:numTrainingData]\n trainingData_sales = data_train['sales'][0:numTrainingData]\n trainindData_price = data_train['price'][0:numTrainingData]\n \n validationData_date = data_validate['date'][0:numValidationData]\n validationData_sales = data_validate['sales'][0:numValidationData]\n validationData_price = data_validate['price'][0:numValidationData]\n \n \n \n \n \n trainingData_sales.head()\n \n \n \n \n \n print(len(trainingData_sales))\n print(len(validationData_sales))\n \n \n \n \n \n \n \n \n trainingData_sales_min = min(trainingData_sales)\n trainingData_sales_max = max(trainingData_sales)\n trainingData_sales_range = trainingData_sales_max - trainingData_sales_min\n trainingData_sales_normalised = [(i - trainingData_sales_min) / trainingData_sales_range for i in trainingData_sales]\n \n validationData_sales_normalised = [(i - trainingData_sales_min) / trainingData_sales_range for i in validationData_sales]\n \n \n \n \n \n \n print('Min:', trainingData_sales_min)\n print('Range:', trainingData_sales_max - trainingData_sales_min)\n \n \n \n \n \n \n trainingDataSequence_sales = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, TIME_STEPS, 1))\n targetDataSequence_sales = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, NUMBER_OF_DAYS_TO_FORECAST))\n start = 0\n for i in range(TIME_STEPS, (len(trainingData_sales) - NUMBER_OF_DAYS_TO_FORECAST) + 1):\n trainingDataSequence_sales[start,:,0] = trainingData_sales_normalised[start:i]\n targetDataSequence_sales[start] = trainingData_sales_normalised[i:i + NUMBER_OF_DAYS_TO_FORECAST]\n start = start + 1\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n [trainingDataSequence_sales[i,:,0] for i in range(3)]\n \n \n \n \n \n \n [targetDataSequence_sales[i] for i in range(3)]\n \n \n \n \n \n \n \n \n \n \n \n \n a = np.arange(len(targetDataSequence_sales))\n np.random.shuffle(a)\n trainingDataSequence_sales_shuffle = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, TIME_STEPS, 1))\n targetDataSequence_sales_shuffle = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, NUMBER_OF_DAYS_TO_FORECAST))\n \n loc = 0\n for i in a:\n trainingDataSequence_sales_shuffle[loc] = trainingDataSequence_sales[i]\n targetDataSequence_sales_shuffle[loc] = targetDataSequence_sales[i]\n loc += 1\n \n trainingDataSequence_sales = trainingDataSequence_sales_shuffle\n targetDataSequence_sales = targetDataSequence_sales_shuffle\n \n \n \n \n \n \n validationDataSequence_sales = np.zeros(shape=(((len(validationData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, TIME_STEPS, 1))\n validationDataSequence_sales_target = np.zeros(shape=(((len(validationData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, NUMBER_OF_DAYS_TO_FORECAST))\n \n start = 0\n for i in range(TIME_STEPS, (len(validationData_sales) - NUMBER_OF_DAYS_TO_FORECAST) + 1):\n validationDataSequence_sales[start,:,0] = validationData_sales_normalised[start:i]\n validationDataSequence_sales_target[start] = validationData_sales_normalised[i:i + NUMBER_OF_DAYS_TO_FORECAST]\n start += 1\n \n \n \n \n \n \n tf.reset_default_graph()\n \n inputSequencePlaceholder = tf.placeholder(dtype=tf.float32, shape=(None, TIME_STEPS, 1), name='inputSequencePlaceholder')\n targetPlaceholder = tf.placeholder(dtype=tf.float32, shape=(None, NUMBER_OF_DAYS_TO_FORECAST), name='targetPlaceholder')\n \n \n cell = tf.nn.rnn_cell.LSTMCell(num_units=LSTM_UNITS, name='LSTM_cell')\n \n \n (output, state) = tf.nn.dynamic_rnn(cell=cell, inputs=inputSequencePlaceholder, dtype=tf.float32)\n \n \n lastCellOutput = output[:,-1,:]\n \n \n \n \n \n print('output:', output)\n print('state:', state)\n print('lastCellOutput:', lastCellOutput)\n \n \n \n \n \n \n \n \n \n \n \n \n \n weights = tf.Variable(initial_value=tf.truncated_normal(shape=(LSTM_UNITS, NUMBER_OF_DAYS_TO_FORECAST)))\n bias = tf.Variable(initial_value=tf.ones(shape=NUMBER_OF_DAYS_TO_FORECAST))\n \n forecast = tf.add(x=tf.matmul(a=lastCellOutput, b=weights), y=bias, name='forecast_normalised_scale')\n \n \n \n \n forecast_originalScale = tf.add(x=forecast * trainingData_sales_range, y=trainingData_sales_min, name='forecast_original_scale')\n \n \n \n \n \n print(forecast)\n print(forecast_originalScale)\n \n \n \n \n \n \n \n loss = tf.reduce_mean(tf.squared_difference(x=forecast, y=targetPlaceholder), name='loss_comp')\n \n tf.summary.scalar(tensor=loss, name='loss')\n \n \n \n \n \n optimizer = tf.train.AdamOptimizer(learning_rate=0.1)\n minimize_step = optimizer.minimize(loss)\n \n \n \n \n \n \n \n \n \n \n \n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n sess.run(tf.local_variables_initializer())\n \n \n tensorboard_writer = tf.summary.FileWriter(TENSORBOARD_LOGDIR, sess.graph)\n \n \n all_summary_ops = tf.summary.merge_all()\n \n \n numSteps = 0\n for e in range(NUM_EPOCHS):\n print('starting training for epoch:', e + 1)\n \n startLocation = 0\n iteration = 0\n for iteration in range(int(len(targetDataSequence_sales) / BATCH_SIZE)):\n print('epoch:', e + 1, ' iteration:', iteration + 1)\n trainingBatchInput = trainingDataSequence_sales[startLocation:startLocation + BATCH_SIZE,:,:]\n trainingBatchTarget = targetDataSequence_sales[startLocation:startLocation + BATCH_SIZE]\n \n (_, lsBatch, forecastBatch, forecastBatch_originalScale, summary_values) = sess.run([minimize_step, loss, forecast, forecast_originalScale, all_summary_ops], feed_dict={inputSequencePlaceholder: trainingBatchInput, \\\n targetPlaceholder: trainingBatchTarget})\n \n tensorboard_writer.add_summary(summary_values, numSteps)\n numSteps += 1\n \n if (iteration + 1) % 1 == 0:\n print('got a loss of:', lsBatch)\n print('the forecast of first 5 normalised are:', forecastBatch[0:5])\n print('while the actuals were normalised :', trainingBatchTarget[0:5])\n print('the forecast of first 5 orignal scale are:', forecastBatch_originalScale[0:5])\n print('while the actuals were original scale :', (trainingBatchTarget[0:5] * trainingData_sales_range) + trainingData_sales_min)\n \n startLocation += BATCH_SIZE\n \n \n if len(targetDataSequence_sales) > startLocation:\n print('epoch:', e + 1, ' iteration:', iteration + 1)\n trainingBatchInput = trainingDataSequence_sales[startLocation:len(targetDataSequence_sales),:,:]\n trainingBatchTarget = targetDataSequence_sales[startLocation:len(targetDataSequence_sales)]\n \n (_, lsBatch, forecastBatch, forecastBatch_originalScale) = sess.run([minimize_step, loss, forecast, forecast_originalScale], feed_dict={inputSequencePlaceholder: trainingBatchInput, \\\n targetPlaceholder: trainingBatchTarget})\n \n print('got a loss of:', lsBatch)\n print('the forecast of first 5 normalised are:', forecastBatch[0:5])\n print('while the actuals were normalised :', trainingBatchTarget[0:5])\n print('the forecast of first 5 orignal scale are:', forecastBatch_originalScale[0:5])\n print('while the actuals were original scale :', (trainingBatchTarget[0:5] * trainingData_sales_range) + trainingData_sales_min)\n \n \n \n totalValidationLoss = 0\n startLocation = 0\n print('starting validation')\n for iter in range(len(validationDataSequence_sales) // BATCH_SIZE):\n validationBatchInput = validationDataSequence_sales[startLocation:startLocation + BATCH_SIZE,:,:]\n validationBatchTarget = validationDataSequence_sales_target[startLocation:startLocation + BATCH_SIZE]\n \n (validationLsBatch, validationForecastBatch, validationForecastBatch_originalScale) = sess.run([loss, forecast, forecast_originalScale], feed_dict={inputSequencePlaceholder: validationBatchInput, \\\n targetPlaceholder: validationBatchTarget})\n \n \n startLocation += BATCH_SIZE\n totalValidationLoss += validationLsBatch\n \n print('first five predictions:', validationForecastBatch[0:5])\n print('first five actuals :', validationBatchTarget[0:5])\n print('the forecast of first 5 orignal scale are:', validationForecastBatch_originalScale[0:5])\n print('mutpy', (validationBatchTarget[0:5] * trainingData_sales_range) + trainingData_sales_min)\n \n \n if startLocation < len(validationDataSequence_sales):\n validationBatchInput = validationDataSequence_sales[startLocation:len(validationDataSequence_sales)]\n validationBatchTarget = validationDataSequence_sales_target[startLocation:len(validationDataSequence_sales)]\n \n (validationLsBatch, validationForecastBatch) = sess.run([loss, forecast], feed_dict={inputSequencePlaceholder: validationBatchInput, \\\n targetPlaceholder: validationBatchTarget})\n \n totalValidationLoss += validationLsBatch\n \n \n print('Validation completed after epoch:', e + 1, '. Total validation loss:', totalValidationLoss)\n \n \n print('----------- Saving Model')\n tf.saved_model.simple_save(sess, export_dir=TRAINED_MODEL_PATH, inputs=\\\n {'inputSequencePlaceholder': inputSequencePlaceholder, 'targetPlaceholder': targetPlaceholder}, outputs=\\\n {'loss': loss, 'forecast_originalScale': forecast_originalScale})\n print('saved model to:', TRAINED_MODEL_PATH)\n \n print('----------- Finis')", "def gen_mutants():\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n import tensorflow as tf\n import pandas\n import numpy as np\n \n \n \n \n \n \n DATAFILE_TRAIN = 'mock_kaggle_edit_train.csv'\n \n DATAFILE_VALIDATE = 'mock_kaggle_edit_validate.csv'\n \n \n \n \n \n TRAINED_MODEL_PATH = 'savedModel'\n \n TIME_STEPS = 10\n NUMBER_OF_DAYS_TO_FORECAST = 1\n \n BATCH_SIZE = 100\n \n NUM_EPOCHS = 100\n \n LSTM_UNITS = 250\n \n TENSORBOARD_LOGDIR = 'tensorboard_log'\n \n \n \n \n \n \n data_train = pandas.read_csv(DATAFILE_TRAIN)\n data_validate = pandas.read_csv(DATAFILE_VALIDATE)\n \n \n \n \n \n \n data_train.head()\n \n \n \n \n \n \n \n numTrainingData = len(data_train)\n numValidationData = len(data_validate)\n \n trainingData_date = data_train['date'][0:numTrainingData]\n trainingData_sales = data_train['sales'][0:numTrainingData]\n trainindData_price = data_train['price'][0:numTrainingData]\n \n validationData_date = data_validate['date'][0:numValidationData]\n validationData_sales = data_validate['sales'][0:numValidationData]\n validationData_price = data_validate['price'][0:numValidationData]\n \n \n \n \n \n trainingData_sales.head()\n \n \n \n \n \n print(len(trainingData_sales))\n print(len(validationData_sales))\n \n \n \n \n \n \n \n \n trainingData_sales_min = min(trainingData_sales)\n trainingData_sales_max = max(trainingData_sales)\n trainingData_sales_range = trainingData_sales_max - trainingData_sales_min\n trainingData_sales_normalised = [(i - trainingData_sales_min) / trainingData_sales_range for i in trainingData_sales]\n \n validationData_sales_normalised = [(i - trainingData_sales_min) / trainingData_sales_range for i in validationData_sales]\n \n \n \n \n \n \n print('Min:', trainingData_sales_min)\n print('Range:', trainingData_sales_max - trainingData_sales_min)\n \n \n \n \n \n \n trainingDataSequence_sales = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, TIME_STEPS, 1))\n targetDataSequence_sales = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, NUMBER_OF_DAYS_TO_FORECAST))\n start = 0\n for i in range(TIME_STEPS, (len(trainingData_sales) - NUMBER_OF_DAYS_TO_FORECAST) + 1):\n trainingDataSequence_sales[start,:,0] = trainingData_sales_normalised[start:i]\n targetDataSequence_sales[start] = trainingData_sales_normalised[i:]\n start = start + 1\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n [trainingDataSequence_sales[i,:,0] for i in range(3)]\n \n \n \n \n \n \n [targetDataSequence_sales[i] for i in range(3)]\n \n \n \n \n \n \n \n \n \n \n \n \n a = np.arange(len(targetDataSequence_sales))\n np.random.shuffle(a)\n trainingDataSequence_sales_shuffle = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, TIME_STEPS, 1))\n targetDataSequence_sales_shuffle = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, NUMBER_OF_DAYS_TO_FORECAST))\n \n loc = 0\n for i in a:\n trainingDataSequence_sales_shuffle[loc] = trainingDataSequence_sales[i]\n targetDataSequence_sales_shuffle[loc] = targetDataSequence_sales[i]\n loc += 1\n \n trainingDataSequence_sales = trainingDataSequence_sales_shuffle\n targetDataSequence_sales = targetDataSequence_sales_shuffle\n \n \n \n \n \n \n validationDataSequence_sales = np.zeros(shape=(((len(validationData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, TIME_STEPS, 1))\n validationDataSequence_sales_target = np.zeros(shape=(((len(validationData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, NUMBER_OF_DAYS_TO_FORECAST))\n \n start = 0\n for i in range(TIME_STEPS, (len(validationData_sales) - NUMBER_OF_DAYS_TO_FORECAST) + 1):\n validationDataSequence_sales[start,:,0] = validationData_sales_normalised[start:i]\n validationDataSequence_sales_target[start] = validationData_sales_normalised[i:i + NUMBER_OF_DAYS_TO_FORECAST]\n start += 1\n \n \n \n \n \n \n tf.reset_default_graph()\n \n inputSequencePlaceholder = tf.placeholder(dtype=tf.float32, shape=(None, TIME_STEPS, 1), name='inputSequencePlaceholder')\n targetPlaceholder = tf.placeholder(dtype=tf.float32, shape=(None, NUMBER_OF_DAYS_TO_FORECAST), name='targetPlaceholder')\n \n \n cell = tf.nn.rnn_cell.LSTMCell(num_units=LSTM_UNITS, name='LSTM_cell')\n \n \n (output, state) = tf.nn.dynamic_rnn(cell=cell, inputs=inputSequencePlaceholder, dtype=tf.float32)\n \n \n lastCellOutput = output[:,-1,:]\n \n \n \n \n \n print('output:', output)\n print('state:', state)\n print('lastCellOutput:', lastCellOutput)\n \n \n \n \n \n \n \n \n \n \n \n \n \n weights = tf.Variable(initial_value=tf.truncated_normal(shape=(LSTM_UNITS, NUMBER_OF_DAYS_TO_FORECAST)))\n bias = tf.Variable(initial_value=tf.ones(shape=NUMBER_OF_DAYS_TO_FORECAST))\n \n forecast = tf.add(x=tf.matmul(a=lastCellOutput, b=weights), y=bias, name='forecast_normalised_scale')\n \n \n \n \n forecast_originalScale = tf.add(x=forecast * trainingData_sales_range, y=trainingData_sales_min, name='forecast_original_scale')\n \n \n \n \n \n print(forecast)\n print(forecast_originalScale)\n \n \n \n \n \n \n \n loss = tf.reduce_mean(tf.squared_difference(x=forecast, y=targetPlaceholder), name='loss_comp')\n \n tf.summary.scalar(tensor=loss, name='loss')\n \n \n \n \n \n optimizer = tf.train.AdamOptimizer(learning_rate=0.1)\n minimize_step = optimizer.minimize(loss)\n \n \n \n \n \n \n \n \n \n \n \n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n sess.run(tf.local_variables_initializer())\n \n \n tensorboard_writer = tf.summary.FileWriter(TENSORBOARD_LOGDIR, sess.graph)\n \n \n all_summary_ops = tf.summary.merge_all()\n \n \n numSteps = 0\n for e in range(NUM_EPOCHS):\n print('starting training for epoch:', e + 1)\n \n startLocation = 0\n iteration = 0\n for iteration in range(int(len(targetDataSequence_sales) / BATCH_SIZE)):\n print('epoch:', e + 1, ' iteration:', iteration + 1)\n trainingBatchInput = trainingDataSequence_sales[startLocation:startLocation + BATCH_SIZE,:,:]\n trainingBatchTarget = targetDataSequence_sales[startLocation:startLocation + BATCH_SIZE]\n \n (_, lsBatch, forecastBatch, forecastBatch_originalScale, summary_values) = sess.run([minimize_step, loss, forecast, forecast_originalScale, all_summary_ops], feed_dict={inputSequencePlaceholder: trainingBatchInput, \\\n targetPlaceholder: trainingBatchTarget})\n \n tensorboard_writer.add_summary(summary_values, numSteps)\n numSteps += 1\n \n if (iteration + 1) % 1 == 0:\n print('got a loss of:', lsBatch)\n print('the forecast of first 5 normalised are:', forecastBatch[0:5])\n print('while the actuals were normalised :', trainingBatchTarget[0:5])\n print('the forecast of first 5 orignal scale are:', forecastBatch_originalScale[0:5])\n print('while the actuals were original scale :', (trainingBatchTarget[0:5] * trainingData_sales_range) + trainingData_sales_min)\n \n startLocation += BATCH_SIZE\n \n \n if len(targetDataSequence_sales) > startLocation:\n print('epoch:', e + 1, ' iteration:', iteration + 1)\n trainingBatchInput = trainingDataSequence_sales[startLocation:len(targetDataSequence_sales),:,:]\n trainingBatchTarget = targetDataSequence_sales[startLocation:len(targetDataSequence_sales)]\n \n (_, lsBatch, forecastBatch, forecastBatch_originalScale) = sess.run([minimize_step, loss, forecast, forecast_originalScale], feed_dict={inputSequencePlaceholder: trainingBatchInput, \\\n targetPlaceholder: trainingBatchTarget})\n \n print('got a loss of:', lsBatch)\n print('the forecast of first 5 normalised are:', forecastBatch[0:5])\n print('while the actuals were normalised :', trainingBatchTarget[0:5])\n print('the forecast of first 5 orignal scale are:', forecastBatch_originalScale[0:5])\n print('while the actuals were original scale :', (trainingBatchTarget[0:5] * trainingData_sales_range) + trainingData_sales_min)\n \n \n \n totalValidationLoss = 0\n startLocation = 0\n print('starting validation')\n for iter in range(len(validationDataSequence_sales) // BATCH_SIZE):\n validationBatchInput = validationDataSequence_sales[startLocation:startLocation + BATCH_SIZE,:,:]\n validationBatchTarget = validationDataSequence_sales_target[startLocation:startLocation + BATCH_SIZE]\n \n (validationLsBatch, validationForecastBatch, validationForecastBatch_originalScale) = sess.run([loss, forecast, forecast_originalScale], feed_dict={inputSequencePlaceholder: validationBatchInput, \\\n targetPlaceholder: validationBatchTarget})\n \n \n startLocation += BATCH_SIZE\n totalValidationLoss += validationLsBatch\n \n print('first five predictions:', validationForecastBatch[0:5])\n print('first five actuals :', validationBatchTarget[0:5])\n print('the forecast of first 5 orignal scale are:', validationForecastBatch_originalScale[0:5])\n print('while the actuals were original scale :', (validationBatchTarget[0:5] * trainingData_sales_range) + trainingData_sales_min)\n \n \n if startLocation < len(validationDataSequence_sales):\n validationBatchInput = validationDataSequence_sales[startLocation:len(validationDataSequence_sales)]\n validationBatchTarget = validationDataSequence_sales_target[startLocation:len(validationDataSequence_sales)]\n \n (validationLsBatch, validationForecastBatch) = sess.run([loss, forecast], feed_dict={inputSequencePlaceholder: validationBatchInput, \\\n targetPlaceholder: validationBatchTarget})\n \n totalValidationLoss += validationLsBatch\n \n \n print('Validation completed after epoch:', e + 1, '. Total validation loss:', totalValidationLoss)\n \n \n print('----------- Saving Model')\n tf.saved_model.simple_save(sess, export_dir=TRAINED_MODEL_PATH, inputs=\\\n {'inputSequencePlaceholder': inputSequencePlaceholder, 'targetPlaceholder': targetPlaceholder}, outputs=\\\n {'loss': loss, 'forecast_originalScale': forecast_originalScale})\n print('saved model to:', TRAINED_MODEL_PATH)\n \n print('----------- Finis')", "\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nimport tensorflow as tf\nimport pandas\nimport numpy as np\n\n\n\n\n\n\nDATAFILE_TRAIN = 'mock_kaggle_edit_train.csv'\n\nDATAFILE_VALIDATE = 'mock_kaggle_edit_validate.csv'\n\n\n\n\n\nTRAINED_MODEL_PATH = 'savedModel'\n\nTIME_STEPS = 10\nNUMBER_OF_DAYS_TO_FORECAST = 1\n\nBATCH_SIZE = 100\n\nNUM_EPOCHS = 100\n\nLSTM_UNITS = 250\n\nTENSORBOARD_LOGDIR = 'tensorboard_log'\n\n\n\n\n\n\ndata_train = pandas.read_csv(DATAFILE_TRAIN)\ndata_validate = pandas.read_csv(DATAFILE_VALIDATE)\n\n\n\n\n\n\ndata_train.head()\n\n\n\n\n\n\n\nnumTrainingData = len(data_train)\nnumValidationData = len(data_validate)\n\ntrainingData_date = data_train['date'][0:numTrainingData]\ntrainingData_sales = data_train['sales'][0:numTrainingData]\ntrainindData_price = data_train['price'][0:numTrainingData]\n\nvalidationData_date = data_validate['date'][0:numValidationData]\nvalidationData_sales = data_validate['sales'][0:numValidationData]\nvalidationData_price = data_validate['price'][0:numValidationData]\n\n\n\n\n\ntrainingData_sales.head()\n\n\n\n\n\nprint(len(trainingData_sales))\nprint(len(validationData_sales))\n\n\n\n\n\n\n\n\ntrainingData_sales_min = min(trainingData_sales)\ntrainingData_sales_max = max(trainingData_sales)\ntrainingData_sales_range = trainingData_sales_max - trainingData_sales_min\ntrainingData_sales_normalised = [(i - trainingData_sales_min) / trainingData_sales_range for i in trainingData_sales]\n\nvalidationData_sales_normalised = [(i - trainingData_sales_min) / trainingData_sales_range for i in validationData_sales]\n\n\n\n\n\n\nprint('Min:', trainingData_sales_min)\nprint('Range:', trainingData_sales_max - trainingData_sales_min)\n\n\n\n\n\n\ntrainingDataSequence_sales = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, TIME_STEPS, 1))\ntargetDataSequence_sales = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, NUMBER_OF_DAYS_TO_FORECAST))\nstart = 0\nfor i in range(TIME_STEPS, (len(trainingData_sales) - NUMBER_OF_DAYS_TO_FORECAST) + 1):\n trainingDataSequence_sales[start,:,0] = trainingData_sales_normalised[start:i]\n targetDataSequence_sales[start] = trainingData_sales_normalised[i:i + NUMBER_OF_DAYS_TO_FORECAST]\n start = start + 1\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n[trainingDataSequence_sales[i,:,0] for i in range(3)]\n\n\n\n\n\n\n[targetDataSequence_sales[i] for i in range(3)]\n\n\n\n\n\n\n\n\n\n\n\n\na = np.arange(len(targetDataSequence_sales))\nnp.random.shuffle(a)\ntrainingDataSequence_sales_shuffle = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, TIME_STEPS, 1))\ntargetDataSequence_sales_shuffle = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, NUMBER_OF_DAYS_TO_FORECAST))\n\nloc = 0\nfor i in a:\n trainingDataSequence_sales_shuffle[loc] = trainingDataSequence_sales[i]\n targetDataSequence_sales_shuffle[loc] = targetDataSequence_sales[i]\n loc += 1\n\ntrainingDataSequence_sales = trainingDataSequence_sales_shuffle\ntargetDataSequence_sales = targetDataSequence_sales_shuffle\n\n\n\n\n\n\nvalidationDataSequence_sales = np.zeros(shape=(((len(validationData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, TIME_STEPS, 1))\nvalidationDataSequence_sales_target = np.zeros(shape=(((len(validationData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, NUMBER_OF_DAYS_TO_FORECAST))\n\nstart = 0\nfor i in range(TIME_STEPS, (len(validationData_sales) - NUMBER_OF_DAYS_TO_FORECAST) + 1):\n validationDataSequence_sales[start,:,0] = validationData_sales_normalised[start:i]\n validationDataSequence_sales_target[start] = validationData_sales_normalised[i:i + NUMBER_OF_DAYS_TO_FORECAST]\n start += 1\n\n\n\n\n\n\ntf.reset_default_graph()\n\ninputSequencePlaceholder = tf.placeholder(dtype=tf.float32, shape=(None, TIME_STEPS, 1), name='inputSequencePlaceholder')\ntargetPlaceholder = tf.placeholder(dtype=tf.float32, shape=(None, NUMBER_OF_DAYS_TO_FORECAST), name='targetPlaceholder')\n\n\ncell = tf.nn.rnn_cell.LSTMCell(num_units=LSTM_UNITS, name='LSTM_cell')\n\n\n(output, state) = tf.nn.dynamic_rnn(cell=cell, inputs=inputSequencePlaceholder, dtype=tf.float32)\n\n\nlastCellOutput = output[:,-1,:]\n\n\n\n\n\nprint('output:', output)\nprint('state:', state)\nprint('lastCellOutput:', lastCellOutput)\n\n\n\n\n\n\n\n\n\n\n\n\n\nweights = tf.Variable(initial_value=tf.truncated_normal(shape=(LSTM_UNITS, NUMBER_OF_DAYS_TO_FORECAST)))\nbias = tf.Variable(initial_value=tf.ones(shape=NUMBER_OF_DAYS_TO_FORECAST))\n\nforecast = tf.add(x=tf.matmul(a=lastCellOutput, b=weights), y=bias, name='forecast_normalised_scale')\n\n\n\n\nforecast_originalScale = tf.add(x=forecast * trainingData_sales_range, y=trainingData_sales_min, name='forecast_original_scale')\n\n\n\n\n\nprint(forecast)\nprint(forecast_originalScale)\n\n\n\n\n\n\n\nloss = tf.reduce_mean(tf.squared_difference(x=forecast, y=targetPlaceholder), name='loss_comp')\n\ntf.summary.scalar(tensor=loss, name='loss')\n\n\n\n\n\noptimizer = tf.train.AdamOptimizer(learning_rate=0.1)\nminimize_step = optimizer.minimize(loss)\n\n\n\n\n\n\n\n\n\n\n\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n sess.run(tf.local_variables_initializer())\n \n \n tensorboard_writer = tf.summary.FileWriter(TENSORBOARD_LOGDIR, sess.graph)\n \n \n all_summary_ops = tf.summary.merge_all()\n \n \n numSteps = 0\n for e in range(NUM_EPOCHS):\n print('starting training for epoch:', e + 1)\n \n startLocation = 0\n iteration = 0\n for iteration in range(int(len(targetDataSequence_sales) / BATCH_SIZE)):\n print('epoch:', e + 1, ' iteration:', iteration + 1)\n trainingBatchInput = trainingDataSequence_sales[startLocation:startLocation + BATCH_SIZE,:,:]\n trainingBatchTarget = targetDataSequence_sales[startLocation:startLocation + BATCH_SIZE]\n \n (_, lsBatch, forecastBatch, forecastBatch_originalScale, summary_values) = sess.run([minimize_step, loss, forecast, forecast_originalScale, all_summary_ops], feed_dict={inputSequencePlaceholder: trainingBatchInput, \\\n targetPlaceholder: trainingBatchTarget})\n \n tensorboard_writer.add_summary(summary_values, numSteps)\n numSteps += 1\n \n if (iteration + 1) % 1 == 0:\n print('got a loss of:', lsBatch)\n print('the forecast of first 5 normalised are:', forecastBatch[0:5])\n print('while the actuals were normalised :', trainingBatchTarget[0:5])\n print('the forecast of first 5 orignal scale are:', forecastBatch_originalScale[0:5])\n print('while the actuals were original scale :', (trainingBatchTarget[0:5] * trainingData_sales_range) + trainingData_sales_min)\n \n startLocation += BATCH_SIZE\n \n \n if len(targetDataSequence_sales) > startLocation:\n print('epoch:', e + 1, ' iteration:', iteration + 1)\n trainingBatchInput = trainingDataSequence_sales[startLocation:len(targetDataSequence_sales),:,:]\n trainingBatchTarget = targetDataSequence_sales[startLocation:len(targetDataSequence_sales)]\n \n (_, lsBatch, forecastBatch, forecastBatch_originalScale) = sess.run([minimize_step, loss, forecast, forecast_originalScale], feed_dict={inputSequencePlaceholder: trainingBatchInput, \\\n targetPlaceholder: trainingBatchTarget})\n \n print('got a loss of:', lsBatch)\n print('the forecast of first 5 normalised are:', forecastBatch[0:5])\n print('while the actuals were normalised :', trainingBatchTarget[0:5])\n print('the forecast of first 5 orignal scale are:', forecastBatch_originalScale[0:5])\n print('while the actuals were original scale :', (trainingBatchTarget[0:5] * trainingData_sales_range) + trainingData_sales_min)\n \n \n \n totalValidationLoss = 0\n startLocation = 0\n print('starting validation')\n for iter in range(len(validationDataSequence_sales) // BATCH_SIZE):\n validationBatchInput = validationDataSequence_sales[startLocation:startLocation + BATCH_SIZE,:,:]\n validationBatchTarget = validationDataSequence_sales_target[startLocation:startLocation + BATCH_SIZE]\n \n (validationLsBatch, validationForecastBatch, validationForecastBatch_originalScale) = sess.run([loss, forecast, forecast_originalScale], feed_dict={inputSequencePlaceholder: validationBatchInput, \\\n targetPlaceholder: validationBatchTarget})\n \n \n startLocation += BATCH_SIZE\n totalValidationLoss += validationLsBatch\n \n print('first five predictions:', validationForecastBatch[0:5])\n print('first five actuals :', validationBatchTarget[0:5])\n print('the forecast of first 5 orignal scale are:', validationForecastBatch_originalScale[0:5])\n print('while the actuals were original scale :', (validationBatchTarget[0:5] * trainingData_sales_range) + trainingData_sales_min)\n \n \n if startLocation <= len(validationDataSequence_sales):\n validationBatchInput = validationDataSequence_sales[startLocation:len(validationDataSequence_sales)]\n validationBatchTarget = validationDataSequence_sales_target[startLocation:len(validationDataSequence_sales)]\n \n (validationLsBatch, validationForecastBatch) = sess.run([loss, forecast], feed_dict={inputSequencePlaceholder: validationBatchInput, \\\n targetPlaceholder: validationBatchTarget})\n \n totalValidationLoss += validationLsBatch\n \n \n print('Validation completed after epoch:', e + 1, '. Total validation loss:', totalValidationLoss)\n \n \n print('----------- Saving Model')\n tf.saved_model.simple_save(sess, export_dir=TRAINED_MODEL_PATH, inputs=\\\n {'inputSequencePlaceholder': inputSequencePlaceholder, 'targetPlaceholder': targetPlaceholder}, outputs=\\\n {'loss': loss, 'forecast_originalScale': forecast_originalScale})\n print('saved model to:', TRAINED_MODEL_PATH)\n\nprint('----------- Finis')", "def gen_mutants():\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n import tensorflow as tf\n import pandas\n import numpy as np\n \n \n \n \n \n \n DATAFILE_TRAIN = 'mock_kaggle_edit_train.csv'\n \n DATAFILE_VALIDATE = 'mock_kaggle_edit_validate.csv'\n \n \n \n \n \n TRAINED_MODEL_PATH = 'savedModel'\n \n TIME_STEPS = 10\n NUMBER_OF_DAYS_TO_FORECAST = 1\n \n BATCH_SIZE = 100\n \n NUM_EPOCHS = 100\n \n LSTM_UNITS = 250\n \n TENSORBOARD_LOGDIR = 'tensorboard_log'\n \n \n \n \n \n \n data_train = pandas.read_csv(DATAFILE_TRAIN)\n data_validate = pandas.read_csv(DATAFILE_VALIDATE)\n \n \n \n \n \n \n data_train.head()\n \n \n \n \n \n \n \n numTrainingData = len(data_train)\n numValidationData = len(data_validate)\n \n trainingData_date = data_train['date'][0:numTrainingData]\n trainingData_sales = data_train['sales'][0:numTrainingData]\n trainindData_price = data_train['price'][0:numTrainingData]\n \n validationData_date = data_validate['date'][0:numValidationData]\n validationData_sales = data_validate['sales'][0:numValidationData]\n validationData_price = data_validate['price'][0:numValidationData]\n \n \n \n \n \n trainingData_sales.head()\n \n \n \n \n \n print(len(trainingData_sales))\n print(len(validationData_sales))\n \n \n \n \n \n \n \n \n trainingData_sales_min = min(trainingData_sales)\n trainingData_sales_max = max(trainingData_sales)\n trainingData_sales_range = trainingData_sales_max - trainingData_sales_min\n trainingData_sales_normalised = [(i - trainingData_sales_min) / trainingData_sales_range for i in trainingData_sales]\n \n validationData_sales_normalised = [(i - trainingData_sales_min) / trainingData_sales_range for i in validationData_sales]\n \n \n \n \n \n \n print('Min:', trainingData_sales_min)\n print('Range:', trainingData_sales_max - trainingData_sales_min)\n \n \n \n \n \n \n trainingDataSequence_sales = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, TIME_STEPS, 1))\n targetDataSequence_sales = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, NUMBER_OF_DAYS_TO_FORECAST))\n start = 0\n for i in range(TIME_STEPS, (len(trainingData_sales) - NUMBER_OF_DAYS_TO_FORECAST) + 1):\n trainingDataSequence_sales[start,:,0] = trainingData_sales_normalised[start:i]\n targetDataSequence_sales[start] = trainingData_sales_normalised[i:i + NUMBER_OF_DAYS_TO_FORECAST]\n start = start + 1\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n [trainingDataSequence_sales[i,:,0] for i in range(3)]\n \n \n \n \n \n \n [targetDataSequence_sales[i] for i in range(3)]\n \n \n \n \n \n \n \n \n \n \n \n \n a = np.arange(len(targetDataSequence_sales))\n np.random.shuffle(a)\n trainingDataSequence_sales_shuffle = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, TIME_STEPS, 1))\n targetDataSequence_sales_shuffle = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, NUMBER_OF_DAYS_TO_FORECAST))\n \n loc = 0\n for i in a:\n trainingDataSequence_sales_shuffle[loc] = trainingDataSequence_sales[i]\n targetDataSequence_sales_shuffle[loc] = targetDataSequence_sales[i]\n loc += 1\n \n trainingDataSequence_sales = trainingDataSequence_sales_shuffle\n targetDataSequence_sales = targetDataSequence_sales_shuffle\n \n \n \n \n \n \n validationDataSequence_sales = np.zeros(shape=(((len(validationData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, TIME_STEPS, 1))\n validationDataSequence_sales_target = np.zeros(shape=(((len(validationData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, NUMBER_OF_DAYS_TO_FORECAST))\n \n start = 0\n for i in range(TIME_STEPS, (len(validationData_sales) - NUMBER_OF_DAYS_TO_FORECAST) + 1):\n validationDataSequence_sales[start,:,0] = validationData_sales_normalised[start:i]\n validationDataSequence_sales_target[start] = validationData_sales_normalised[i:i + NUMBER_OF_DAYS_TO_FORECAST]\n start += 1\n \n \n \n \n \n \n tf.reset_default_graph()\n \n inputSequencePlaceholder = tf.placeholder(dtype=tf.float32, shape=(None, TIME_STEPS, 1), name='inputSequencePlaceholder')\n targetPlaceholder = tf.placeholder(dtype=tf.float32, shape=(None, NUMBER_OF_DAYS_TO_FORECAST), name='targetPlaceholder')\n \n \n cell = tf.nn.rnn_cell.LSTMCell(num_units=LSTM_UNITS, name='LSTM_cell')\n \n \n (output, state) = tf.nn.dynamic_rnn(cell=cell, inputs=inputSequencePlaceholder, dtype=tf.float32)\n \n \n lastCellOutput = output[:,-1,:]\n \n \n \n \n \n print('output:', output)\n print('state:', state)\n print('lastCellOutput:', lastCellOutput)\n \n \n \n \n \n \n \n \n \n \n \n \n \n weights = tf.Variable(initial_value=tf.truncated_normal(shape=(LSTM_UNITS, NUMBER_OF_DAYS_TO_FORECAST)))\n bias = tf.Variable(initial_value=tf.ones(shape=NUMBER_OF_DAYS_TO_FORECAST))\n \n forecast = tf.add(x=tf.matmul(a=lastCellOutput, b=weights), y=bias, name='forecast_normalised_scale')\n \n \n \n \n forecast_originalScale = tf.add(x=forecast * trainingData_sales_range, y=trainingData_sales_min, name='forecast_original_scale')\n \n \n \n \n \n print(forecast)\n print(forecast_originalScale)\n \n \n \n \n \n \n \n loss = tf.reduce_mean(tf.squared_difference(x=forecast, y=targetPlaceholder), name='loss_comp')\n \n tf.summary.scalar(tensor=loss, name='loss')\n \n \n \n \n \n optimizer = tf.train.AdamOptimizer(learning_rate=0.1)\n minimize_step = optimizer.minimize(loss)\n \n \n \n \n \n \n \n \n \n \n \n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n sess.run(tf.local_variables_initializer())\n \n \n tensorboard_writer = tf.summary.FileWriter(TENSORBOARD_LOGDIR, sess.graph)\n \n \n all_summary_ops = tf.summary.merge_all()\n \n \n numSteps = 0\n for e in range(NUM_EPOCHS):\n print('starting training for epoch:', e + 1)\n \n startLocation = 0\n iteration = 0\n for iteration in range(int(len(targetDataSequence_sales) / BATCH_SIZE)):\n print('epoch:', e + 1, ' iteration:', iteration + 1)\n trainingBatchInput = trainingDataSequence_sales[startLocation:startLocation + BATCH_SIZE,:,:]\n trainingBatchTarget = targetDataSequence_sales[startLocation:startLocation + BATCH_SIZE]\n \n (_, lsBatch, forecastBatch, forecastBatch_originalScale, summary_values) = sess.run([minimize_step, loss, forecast, forecast_originalScale, all_summary_ops], feed_dict={inputSequencePlaceholder: trainingBatchInput, \\\n targetPlaceholder: trainingBatchTarget})\n \n tensorboard_writer.add_summary(summary_values, numSteps)\n numSteps += 1\n \n if (iteration + 1) % 1 == 0:\n print('got a loss of:', lsBatch)\n print('the forecast of first 5 normalised are:', forecastBatch[0:5])\n print('while the actuals were normalised :', trainingBatchTarget[0:5])\n print('the forecast of first 5 orignal scale are:', forecastBatch_originalScale[0:5])\n print('while the actuals were original scale :', (trainingBatchTarget[0:5] * trainingData_sales_range) + trainingData_sales_min)\n \n startLocation += BATCH_SIZE\n \n \n if len(targetDataSequence_sales) > startLocation:\n print('epoch:', e + 1, ' iteration:', iteration + 1)\n trainingBatchInput = trainingDataSequence_sales[startLocation:len(targetDataSequence_sales),:,:]\n trainingBatchTarget = targetDataSequence_sales[startLocation:len(targetDataSequence_sales)]\n \n (_, lsBatch, forecastBatch, forecastBatch_originalScale) = sess.run([minimize_step, loss, forecast, forecast_originalScale], feed_dict={inputSequencePlaceholder: trainingBatchInput, \\\n targetPlaceholder: trainingBatchTarget})\n \n print('got a loss of:', lsBatch)\n print('the forecast of first 5 normalised are:', forecastBatch[0:5])\n print('while the actuals were normalised :', trainingBatchTarget[0:5])\n print('the forecast of first 5 orignal scale are:', forecastBatch_originalScale[1:5])\n print('while the actuals were original scale :', (trainingBatchTarget[0:5] * trainingData_sales_range) + trainingData_sales_min)\n \n \n \n totalValidationLoss = 0\n startLocation = 0\n print('starting validation')\n for iter in range(len(validationDataSequence_sales) // BATCH_SIZE):\n validationBatchInput = validationDataSequence_sales[startLocation:startLocation + BATCH_SIZE,:,:]\n validationBatchTarget = validationDataSequence_sales_target[startLocation:startLocation + BATCH_SIZE]\n \n (validationLsBatch, validationForecastBatch, validationForecastBatch_originalScale) = sess.run([loss, forecast, forecast_originalScale], feed_dict={inputSequencePlaceholder: validationBatchInput, \\\n targetPlaceholder: validationBatchTarget})\n \n \n startLocation += BATCH_SIZE\n totalValidationLoss += validationLsBatch\n \n print('first five predictions:', validationForecastBatch[0:5])\n print('first five actuals :', validationBatchTarget[0:5])\n print('the forecast of first 5 orignal scale are:', validationForecastBatch_originalScale[0:5])\n print('while the actuals were original scale :', (validationBatchTarget[0:5] * trainingData_sales_range) + trainingData_sales_min)\n \n \n if startLocation < len(validationDataSequence_sales):\n validationBatchInput = validationDataSequence_sales[startLocation:len(validationDataSequence_sales)]\n validationBatchTarget = validationDataSequence_sales_target[startLocation:len(validationDataSequence_sales)]\n \n (validationLsBatch, validationForecastBatch) = sess.run([loss, forecast], feed_dict={inputSequencePlaceholder: validationBatchInput, \\\n targetPlaceholder: validationBatchTarget})\n \n totalValidationLoss += validationLsBatch\n \n \n print('Validation completed after epoch:', e + 1, '. Total validation loss:', totalValidationLoss)\n \n \n print('----------- Saving Model')\n tf.saved_model.simple_save(sess, export_dir=TRAINED_MODEL_PATH, inputs=\\\n {'inputSequencePlaceholder': inputSequencePlaceholder, 'targetPlaceholder': targetPlaceholder}, outputs=\\\n {'loss': loss, 'forecast_originalScale': forecast_originalScale})\n print('saved model to:', TRAINED_MODEL_PATH)\n \n print('----------- Finis')", "def gen_mutants():\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n import tensorflow as tf\n import pandas\n import numpy as np\n \n \n \n \n \n \n DATAFILE_TRAIN = 'mock_kaggle_edit_train.csv'\n \n DATAFILE_VALIDATE = 'mock_kaggle_edit_validate.csv'\n \n \n \n \n \n TRAINED_MODEL_PATH = 'savedModel'\n \n TIME_STEPS = 10\n NUMBER_OF_DAYS_TO_FORECAST = 1\n \n BATCH_SIZE = 100\n \n NUM_EPOCHS = 100\n \n LSTM_UNITS = 250\n \n TENSORBOARD_LOGDIR = 'tensorboard_log'\n \n \n \n \n \n \n data_train = pandas.read_csv(DATAFILE_TRAIN)\n data_validate = pandas.read_csv(DATAFILE_VALIDATE)\n \n \n \n \n \n \n data_train.head()\n \n \n \n \n \n \n \n numTrainingData = len(data_train)\n numValidationData = len(data_validate)\n \n trainingData_date = data_train['date'][0:numTrainingData]\n trainingData_sales = data_train['sales'][0:numTrainingData]\n trainindData_price = data_train['price'][0:numTrainingData]\n \n validationData_date = data_validate['date'][0:numValidationData]\n validationData_sales = data_validate['sales'][0:numValidationData]\n validationData_price = data_validate['price'][0:numValidationData]\n \n \n \n \n \n trainingData_sales.head()\n \n \n \n \n \n print(len(trainingData_sales))\n print(len(validationData_sales))\n \n \n \n \n \n \n \n \n trainingData_sales_min = min(trainingData_sales)\n trainingData_sales_max = max(trainingData_sales)\n trainingData_sales_range = trainingData_sales_max - trainingData_sales_min\n trainingData_sales_normalised = [(i - trainingData_sales_min) / trainingData_sales_range for i in trainingData_sales]\n \n validationData_sales_normalised = [(i - trainingData_sales_min) / trainingData_sales_range for i in validationData_sales]\n \n \n \n \n \n \n print('Min:', trainingData_sales_min)\n print('Range:', trainingData_sales_max - trainingData_sales_min)\n \n \n \n \n \n \n trainingDataSequence_sales = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, TIME_STEPS, 1))\n targetDataSequence_sales = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, NUMBER_OF_DAYS_TO_FORECAST))\n start = 0\n for i in range(TIME_STEPS, (len(trainingData_sales) - NUMBER_OF_DAYS_TO_FORECAST) + 1):\n trainingDataSequence_sales[start,:,0] = trainingData_sales_normalised[start:i]\n targetDataSequence_sales[start] = trainingData_sales_normalised[i:i + NUMBER_OF_DAYS_TO_FORECAST]\n start = start + 1\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n [trainingDataSequence_sales[i,:,0] for i in range(3)]\n \n \n \n \n \n \n [targetDataSequence_sales[i] for i in range(3)]\n \n \n \n \n \n \n \n \n \n \n \n \n a = np.arange(len(targetDataSequence_sales))\n np.random.shuffle(a)\n trainingDataSequence_sales_shuffle = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, TIME_STEPS, 1))\n targetDataSequence_sales_shuffle = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, NUMBER_OF_DAYS_TO_FORECAST))\n \n loc = 0\n for i in a:\n trainingDataSequence_sales_shuffle[loc] = trainingDataSequence_sales[i]\n targetDataSequence_sales_shuffle[loc] = targetDataSequence_sales[i]\n loc += 1\n \n trainingDataSequence_sales = trainingDataSequence_sales_shuffle\n targetDataSequence_sales = targetDataSequence_sales_shuffle\n \n \n \n \n \n \n validationDataSequence_sales = np.zeros(shape=(((len(validationData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, TIME_STEPS, 1))\n validationDataSequence_sales_target = np.zeros(shape=(((len(validationData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, NUMBER_OF_DAYS_TO_FORECAST))\n \n start = 0\n for i in range(TIME_STEPS, (len(validationData_sales) - NUMBER_OF_DAYS_TO_FORECAST) + 1):\n validationDataSequence_sales[start,:,0] = validationData_sales_normalised[start:i]\n validationDataSequence_sales_target[start] = validationData_sales_normalised[i:i + NUMBER_OF_DAYS_TO_FORECAST]\n start += 1\n \n \n \n \n \n \n tf.reset_default_graph()\n \n inputSequencePlaceholder = tf.placeholder(dtype=tf.float32, shape=(None, TIME_STEPS, 1), name='inputSequencePlaceholder')\n targetPlaceholder = tf.placeholder(dtype=tf.float32, shape=(None, NUMBER_OF_DAYS_TO_FORECAST), name='targetPlaceholder')\n \n \n cell = tf.nn.rnn_cell.LSTMCell(num_units=LSTM_UNITS, name='LSTM_cell')\n \n \n (output, state) = tf.nn.dynamic_rnn(cell=cell, inputs=inputSequencePlaceholder, dtype=tf.float32)\n \n \n lastCellOutput = output[:,-1,:]\n \n \n \n \n \n print('output:', output)\n print('state:', state)\n print('lastCellOutput:', lastCellOutput)\n \n \n \n \n \n \n \n \n \n \n \n \n \n weights = tf.Variable(initial_value=tf.truncated_normal(shape=(LSTM_UNITS, NUMBER_OF_DAYS_TO_FORECAST)))\n bias = tf.Variable(initial_value=tf.ones(shape=NUMBER_OF_DAYS_TO_FORECAST))\n \n forecast = tf.add(x=tf.matmul(a=lastCellOutput, b=weights), y=bias, name='forecast_normalised_scale')\n \n \n \n \n forecast_originalScale = tf.add(x=forecast * trainingData_sales_range, y=trainingData_sales_min, name='forecast_original_scale')\n \n \n \n \n \n print(forecast)\n print(forecast_originalScale)\n \n \n \n \n \n \n \n loss = tf.reduce_mean(tf.squared_difference(x=forecast, y=targetPlaceholder), name='loss_comp')\n \n tf.summary.scalar(tensor=loss, name='loss')\n \n \n \n \n \n optimizer = tf.train.AdamOptimizer(learning_rate=0.1)\n minimize_step = optimizer.minimize(loss)\n \n \n \n \n \n \n \n \n \n \n \n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n sess.run(tf.local_variables_initializer())\n \n \n tensorboard_writer = tf.summary.FileWriter(TENSORBOARD_LOGDIR, sess.graph)\n \n \n all_summary_ops = tf.summary.merge_all()\n \n \n numSteps = 0\n for e in range(NUM_EPOCHS):\n print('starting training for epoch:', e + 1)\n \n startLocation = 0\n iteration = 0\n for iteration in range(int(len(targetDataSequence_sales) / BATCH_SIZE)):\n print('epoch:', e + 1, ' iteration:', iteration + 1)\n trainingBatchInput = trainingDataSequence_sales[startLocation:startLocation + BATCH_SIZE,:,:]\n trainingBatchTarget = targetDataSequence_sales[startLocation:startLocation + BATCH_SIZE]\n \n (_, lsBatch, forecastBatch, forecastBatch_originalScale, summary_values) = sess.run([minimize_step, loss, forecast, forecast_originalScale, all_summary_ops], feed_dict={inputSequencePlaceholder: trainingBatchInput, \\\n targetPlaceholder: trainingBatchTarget})\n \n tensorboard_writer.add_summary(summary_values, numSteps)\n numSteps += 1\n \n if (iteration + 1) % 1 == 0:\n print('got a loss of:', lsBatch)\n print('the forecast of first 5 normalised are:', forecastBatch[0:5])\n print('while the actuals were normalised :', trainingBatchTarget[0:5])\n print('the forecast of first 5 orignal scale are:', forecastBatch_originalScale[0:5])\n print('while the actuals were original scale :', (trainingBatchTarget[0:5] * trainingData_sales_range) + trainingData_sales_min)\n \n startLocation += BATCH_SIZE\n \n \n if len(targetDataSequence_sales) > startLocation:\n print('epoch:', e + 1, ' iteration:', iteration + 1)\n trainingBatchInput = trainingDataSequence_sales[startLocation:len(targetDataSequence_sales),:,:]\n trainingBatchTarget = targetDataSequence_sales[startLocation:len(targetDataSequence_sales)]\n \n (_, lsBatch, forecastBatch, forecastBatch_originalScale) = sess.run([minimize_step, loss, forecast, forecast_originalScale], feed_dict={inputSequencePlaceholder: trainingBatchInput, \\\n targetPlaceholder: trainingBatchTarget})\n \n print('got a loss of:', lsBatch)\n print('the forecast of first 5 normalised are:', forecastBatch[0:5])\n print('while the actuals were normalised :', trainingBatchTarget[0:5])\n print('the forecast of first 5 orignal scale are:', forecastBatch_originalScale[0:5])\n print('while the actuals were original scale :', (trainingBatchTarget[0:5] * trainingData_sales_range) + trainingData_sales_min)\n \n \n \n totalValidationLoss = 0\n startLocation = 0\n print('starting validation')\n for iter in range(len(validationDataSequence_sales) // BATCH_SIZE):\n validationBatchInput = validationDataSequence_sales[startLocation:startLocation + BATCH_SIZE,:,:]\n validationBatchTarget = validationDataSequence_sales_target[startLocation:startLocation + BATCH_SIZE]\n \n (validationLsBatch, validationForecastBatch, validationForecastBatch_originalScale) = sess.run([loss, forecast, forecast_originalScale], feed_dict={inputSequencePlaceholder: validationBatchInput, \\\n targetPlaceholder: validationBatchTarget})\n \n \n startLocation += BATCH_SIZE\n totalValidationLoss += validationLsBatch\n \n print('first five predictions:', validationForecastBatch[0:5])\n print('first five actuals :', validationBatchTarget[0:5])\n print('the forecast of first 5 orignal scale are:', validationForecastBatch_originalScale[0:5])\n print('while the actuals were original scale :', (validationBatchTarget[0:5] * trainingData_sales_range) + trainingData_sales_min)\n \n \n if startLocation < len(validationDataSequence_sales):\n validationBatchInput = validationDataSequence_sales[startLocation:len(validationDataSequence_sales)]\n validationBatchTarget = validationDataSequence_sales_target[startLocation:len(validationDataSequence_sales)]\n \n (validationLsBatch, validationForecastBatch) = sess.run([loss, forecast], feed_dict={inputSequencePlaceholder: validationBatchInput, \\\n targetPlaceholder: validationBatchTarget})\n \n totalValidationLoss += validationLsBatch\n \n \n print('Validation completed after epoch:', e + 1, '. Total validation loss:', totalValidationLoss)\n \n \n print('----------- Saving Model')\n tf.saved_model.simple_save(sess, export_dir=TRAINED_MODEL_PATH, inputs=\\\n {'inputSequencePlaceholder': inputSequencePlaceholder, 'targetPlaceholder': targetPlaceholder}, outputs=\\\n {'loss': loss, 'forecast_originalScale': forecast_originalScale})\n print('mutpy', TRAINED_MODEL_PATH)\n \n print('----------- Finis')", "def gen_mutants():\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n import tensorflow as tf\n import pandas\n import numpy as np\n \n \n \n \n \n \n DATAFILE_TRAIN = 'mock_kaggle_edit_train.csv'\n \n DATAFILE_VALIDATE = 'mock_kaggle_edit_validate.csv'\n \n \n \n \n \n TRAINED_MODEL_PATH = 'savedModel'\n \n TIME_STEPS = 10\n NUMBER_OF_DAYS_TO_FORECAST = 1\n \n BATCH_SIZE = 100\n \n NUM_EPOCHS = 100\n \n LSTM_UNITS = 250\n \n TENSORBOARD_LOGDIR = 'tensorboard_log'\n \n \n \n \n \n \n data_train = pandas.read_csv(DATAFILE_TRAIN)\n data_validate = pandas.read_csv(DATAFILE_VALIDATE)\n \n \n \n \n \n \n data_train.head()\n \n \n \n \n \n \n \n numTrainingData = len(data_train)\n numValidationData = len(data_validate)\n \n trainingData_date = data_train['date'][0:numTrainingData]\n trainingData_sales = data_train['sales'][0:numTrainingData]\n trainindData_price = data_train['price'][0:numTrainingData]\n \n validationData_date = data_validate['date'][0:numValidationData]\n validationData_sales = data_validate['sales'][0:numValidationData]\n validationData_price = data_validate['price'][0:numValidationData]\n \n \n \n \n \n trainingData_sales.head()\n \n \n \n \n \n print(len(trainingData_sales))\n print(len(validationData_sales))\n \n \n \n \n \n \n \n \n trainingData_sales_min = min(trainingData_sales)\n trainingData_sales_max = max(trainingData_sales)\n trainingData_sales_range = trainingData_sales_max - trainingData_sales_min\n trainingData_sales_normalised = [(i - trainingData_sales_min) / trainingData_sales_range for i in trainingData_sales]\n \n validationData_sales_normalised = [(i - trainingData_sales_min) / trainingData_sales_range for i in validationData_sales]\n \n \n \n \n \n \n print('Min:', trainingData_sales_min)\n print('Range:', trainingData_sales_max - trainingData_sales_min)\n \n \n \n \n \n \n trainingDataSequence_sales = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, TIME_STEPS, 1))\n targetDataSequence_sales = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, NUMBER_OF_DAYS_TO_FORECAST))\n start = 0\n for i in range(TIME_STEPS, (len(trainingData_sales) - NUMBER_OF_DAYS_TO_FORECAST) + 1):\n trainingDataSequence_sales[start,:,0] = trainingData_sales_normalised[start:i]\n targetDataSequence_sales[start] = trainingData_sales_normalised[i:i + NUMBER_OF_DAYS_TO_FORECAST]\n start = start + 1\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n [trainingDataSequence_sales[i,:,0] for i in range(3)]\n \n \n \n \n \n \n [targetDataSequence_sales[i] for i in range(3)]\n \n \n \n \n \n \n \n \n \n \n \n \n a = np.arange(len(targetDataSequence_sales))\n np.random.shuffle(a)\n trainingDataSequence_sales_shuffle = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, TIME_STEPS, 1))\n targetDataSequence_sales_shuffle = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, NUMBER_OF_DAYS_TO_FORECAST))\n \n loc = 0\n for i in a:\n trainingDataSequence_sales_shuffle[loc] = trainingDataSequence_sales[i]\n targetDataSequence_sales_shuffle[loc] = targetDataSequence_sales[i]\n loc += 1\n \n trainingDataSequence_sales = trainingDataSequence_sales_shuffle\n targetDataSequence_sales = targetDataSequence_sales_shuffle\n \n \n \n \n \n \n validationDataSequence_sales = np.zeros(shape=(((len(validationData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, TIME_STEPS, 1))\n validationDataSequence_sales_target = np.zeros(shape=(((len(validationData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, NUMBER_OF_DAYS_TO_FORECAST))\n \n start = 0\n for i in range(TIME_STEPS, (len(validationData_sales) - NUMBER_OF_DAYS_TO_FORECAST) + 1):\n validationDataSequence_sales[start,:,0] = validationData_sales_normalised[start:i]\n validationDataSequence_sales_target[start] = validationData_sales_normalised[i:i + NUMBER_OF_DAYS_TO_FORECAST]\n start += 1\n \n \n \n \n \n \n tf.reset_default_graph()\n \n inputSequencePlaceholder = tf.placeholder(dtype=tf.float32, shape=(None, TIME_STEPS, 1), name='inputSequencePlaceholder')\n targetPlaceholder = tf.placeholder(dtype=tf.float32, shape=(None, NUMBER_OF_DAYS_TO_FORECAST), name='targetPlaceholder')\n \n \n cell = tf.nn.rnn_cell.LSTMCell(num_units=LSTM_UNITS, name='LSTM_cell')\n \n \n (output, state) = tf.nn.dynamic_rnn(cell=cell, inputs=inputSequencePlaceholder, dtype=tf.float32)\n \n \n lastCellOutput = output[:,-1,:]\n \n \n \n \n \n print('output:', output)\n print('state:', state)\n print('lastCellOutput:', lastCellOutput)\n \n \n \n \n \n \n \n \n \n \n \n \n \n weights = tf.Variable(initial_value=tf.truncated_normal(shape=(LSTM_UNITS, NUMBER_OF_DAYS_TO_FORECAST)))\n bias = tf.Variable(initial_value=tf.ones(shape=NUMBER_OF_DAYS_TO_FORECAST))\n \n forecast = tf.add(x=tf.matmul(a=lastCellOutput, b=weights), y=bias, name='forecast_normalised_scale')\n \n \n \n \n forecast_originalScale = tf.add(x=forecast * trainingData_sales_range, y=trainingData_sales_min, name='forecast_original_scale')\n \n \n \n \n \n print(forecast)\n print(forecast_originalScale)\n \n \n \n \n \n \n \n loss = tf.reduce_mean(tf.squared_difference(x=forecast, y=targetPlaceholder), name='loss_comp')\n \n tf.summary.scalar(tensor=loss, name='loss')\n \n \n \n \n \n optimizer = tf.train.AdamOptimizer(learning_rate=0.1)\n minimize_step = optimizer.minimize(loss)\n \n \n \n \n \n \n \n \n \n \n \n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n sess.run(tf.local_variables_initializer())\n \n \n tensorboard_writer = tf.summary.FileWriter(TENSORBOARD_LOGDIR, sess.graph)\n \n \n all_summary_ops = tf.summary.merge_all()\n \n \n numSteps = 0\n for e in range(NUM_EPOCHS):\n print('starting training for epoch:', e + 1)\n \n startLocation = 0\n iteration = 0\n for iteration in range(int(len(targetDataSequence_sales) / BATCH_SIZE)):\n print('epoch:', e + 1, ' iteration:', iteration + 1)\n trainingBatchInput = trainingDataSequence_sales[startLocation:startLocation + BATCH_SIZE,:,:]\n trainingBatchTarget = targetDataSequence_sales[startLocation:startLocation + BATCH_SIZE]\n \n (_, lsBatch, forecastBatch, forecastBatch_originalScale, summary_values) = sess.run([minimize_step, loss, forecast, forecast_originalScale, all_summary_ops], feed_dict={inputSequencePlaceholder: trainingBatchInput, \\\n targetPlaceholder: trainingBatchTarget})\n \n tensorboard_writer.add_summary(summary_values, numSteps)\n numSteps += 1\n \n if (iteration + 1) % 1 == 0:\n print('got a loss of:', lsBatch)\n print('the forecast of first 5 normalised are:', forecastBatch[0:5])\n print('while the actuals were normalised :', trainingBatchTarget[0:5])\n print('the forecast of first 5 orignal scale are:', forecastBatch_originalScale[0:5])\n print('while the actuals were original scale :', (trainingBatchTarget[0:5] * trainingData_sales_range) + trainingData_sales_min)\n \n startLocation += BATCH_SIZE\n \n \n if len(targetDataSequence_sales) > startLocation:\n print('', e + 1, ' iteration:', iteration + 1)\n trainingBatchInput = trainingDataSequence_sales[startLocation:len(targetDataSequence_sales),:,:]\n trainingBatchTarget = targetDataSequence_sales[startLocation:len(targetDataSequence_sales)]\n \n (_, lsBatch, forecastBatch, forecastBatch_originalScale) = sess.run([minimize_step, loss, forecast, forecast_originalScale], feed_dict={inputSequencePlaceholder: trainingBatchInput, \\\n targetPlaceholder: trainingBatchTarget})\n \n print('got a loss of:', lsBatch)\n print('the forecast of first 5 normalised are:', forecastBatch[0:5])\n print('while the actuals were normalised :', trainingBatchTarget[0:5])\n print('the forecast of first 5 orignal scale are:', forecastBatch_originalScale[0:5])\n print('while the actuals were original scale :', (trainingBatchTarget[0:5] * trainingData_sales_range) + trainingData_sales_min)\n \n \n \n totalValidationLoss = 0\n startLocation = 0\n print('starting validation')\n for iter in range(len(validationDataSequence_sales) // BATCH_SIZE):\n validationBatchInput = validationDataSequence_sales[startLocation:startLocation + BATCH_SIZE,:,:]\n validationBatchTarget = validationDataSequence_sales_target[startLocation:startLocation + BATCH_SIZE]\n \n (validationLsBatch, validationForecastBatch, validationForecastBatch_originalScale) = sess.run([loss, forecast, forecast_originalScale], feed_dict={inputSequencePlaceholder: validationBatchInput, \\\n targetPlaceholder: validationBatchTarget})\n \n \n startLocation += BATCH_SIZE\n totalValidationLoss += validationLsBatch\n \n print('first five predictions:', validationForecastBatch[0:5])\n print('first five actuals :', validationBatchTarget[0:5])\n print('the forecast of first 5 orignal scale are:', validationForecastBatch_originalScale[0:5])\n print('while the actuals were original scale :', (validationBatchTarget[0:5] * trainingData_sales_range) + trainingData_sales_min)\n \n \n if startLocation < len(validationDataSequence_sales):\n validationBatchInput = validationDataSequence_sales[startLocation:len(validationDataSequence_sales)]\n validationBatchTarget = validationDataSequence_sales_target[startLocation:len(validationDataSequence_sales)]\n \n (validationLsBatch, validationForecastBatch) = sess.run([loss, forecast], feed_dict={inputSequencePlaceholder: validationBatchInput, \\\n targetPlaceholder: validationBatchTarget})\n \n totalValidationLoss += validationLsBatch\n \n \n print('Validation completed after epoch:', e + 1, '. Total validation loss:', totalValidationLoss)\n \n \n print('----------- Saving Model')\n tf.saved_model.simple_save(sess, export_dir=TRAINED_MODEL_PATH, inputs=\\\n {'inputSequencePlaceholder': inputSequencePlaceholder, 'targetPlaceholder': targetPlaceholder}, outputs=\\\n {'loss': loss, 'forecast_originalScale': forecast_originalScale})\n print('saved model to:', TRAINED_MODEL_PATH)\n \n print('----------- Finis')", "def gen_mutants():\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n import tensorflow as tf\n import pandas\n import numpy as np\n \n \n \n \n \n \n DATAFILE_TRAIN = 'mock_kaggle_edit_train.csv'\n \n DATAFILE_VALIDATE = 'mock_kaggle_edit_validate.csv'\n \n \n \n \n \n TRAINED_MODEL_PATH = 'savedModel'\n \n TIME_STEPS = 10\n NUMBER_OF_DAYS_TO_FORECAST = 1\n \n BATCH_SIZE = 100\n \n NUM_EPOCHS = 100\n \n LSTM_UNITS = 250\n \n TENSORBOARD_LOGDIR = 'tensorboard_log'\n \n \n \n \n \n \n data_train = pandas.read_csv(DATAFILE_TRAIN)\n data_validate = pandas.read_csv(DATAFILE_VALIDATE)\n \n \n \n \n \n \n data_train.head()\n \n \n \n \n \n \n \n numTrainingData = len(data_train)\n numValidationData = len(data_validate)\n \n trainingData_date = data_train['date'][0:numTrainingData]\n trainingData_sales = data_train['sales'][0:numTrainingData]\n trainindData_price = data_train['price'][0:numTrainingData]\n \n validationData_date = data_validate['date'][0:numValidationData]\n validationData_sales = data_validate['sales'][0:numValidationData]\n validationData_price = data_validate['price'][0:numValidationData]\n \n \n \n \n \n trainingData_sales.head()\n \n \n \n \n \n print(len(trainingData_sales))\n print(len(validationData_sales))\n \n \n \n \n \n \n \n \n trainingData_sales_min = min(trainingData_sales)\n trainingData_sales_max = max(trainingData_sales)\n trainingData_sales_range = trainingData_sales_max - trainingData_sales_min\n trainingData_sales_normalised = [(i - trainingData_sales_min) / trainingData_sales_range for i in trainingData_sales]\n \n validationData_sales_normalised = [(i - trainingData_sales_min) / trainingData_sales_range for i in validationData_sales]\n \n \n \n \n \n \n print('Min:', trainingData_sales_min)\n print('Range:', trainingData_sales_max - trainingData_sales_min)\n \n \n \n \n \n \n trainingDataSequence_sales = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, TIME_STEPS, 1))\n targetDataSequence_sales = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, NUMBER_OF_DAYS_TO_FORECAST))\n start = 0\n for i in range(TIME_STEPS, (len(trainingData_sales) - NUMBER_OF_DAYS_TO_FORECAST) + 1):\n trainingDataSequence_sales[start,:,0] = trainingData_sales_normalised[start:i]\n targetDataSequence_sales[start] = trainingData_sales_normalised[i:i + NUMBER_OF_DAYS_TO_FORECAST]\n start = start + 1\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n [trainingDataSequence_sales[i,:,0] for i in range(3)]\n \n \n \n \n \n \n [targetDataSequence_sales[i] for i in range(3)]\n \n \n \n \n \n \n \n \n \n \n \n \n a = np.arange(len(targetDataSequence_sales))\n np.random.shuffle(a)\n trainingDataSequence_sales_shuffle = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, TIME_STEPS, 1))\n targetDataSequence_sales_shuffle = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, NUMBER_OF_DAYS_TO_FORECAST))\n \n loc = 0\n for i in a:\n trainingDataSequence_sales_shuffle[loc] = trainingDataSequence_sales[i]\n targetDataSequence_sales_shuffle[loc] = targetDataSequence_sales[i]\n loc += 1\n \n trainingDataSequence_sales = trainingDataSequence_sales_shuffle\n targetDataSequence_sales = targetDataSequence_sales_shuffle\n \n \n \n \n \n \n validationDataSequence_sales = np.zeros(shape=(((len(validationData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, TIME_STEPS, 1))\n validationDataSequence_sales_target = np.zeros(shape=(((len(validationData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, NUMBER_OF_DAYS_TO_FORECAST))\n \n start = 0\n for i in range(TIME_STEPS, (len(validationData_sales) - NUMBER_OF_DAYS_TO_FORECAST) + 1):\n validationDataSequence_sales[start,:,0] = validationData_sales_normalised[start:i]\n validationDataSequence_sales_target[start] = validationData_sales_normalised[i:i + NUMBER_OF_DAYS_TO_FORECAST]\n start += 1\n \n \n \n \n \n \n tf.reset_default_graph()\n \n inputSequencePlaceholder = tf.placeholder(dtype=tf.float32, shape=(None, TIME_STEPS, 1), name='inputSequencePlaceholder')\n targetPlaceholder = tf.placeholder(dtype=tf.float32, shape=(None, NUMBER_OF_DAYS_TO_FORECAST), name='targetPlaceholder')\n \n \n cell = tf.nn.rnn_cell.LSTMCell(num_units=LSTM_UNITS, name='LSTM_cell')\n \n \n (output, state) = tf.nn.dynamic_rnn(cell=cell, inputs=inputSequencePlaceholder, dtype=tf.float32)\n \n \n lastCellOutput = output[:,-1,:]\n \n \n \n \n \n print('output:', output)\n print('state:', state)\n print('lastCellOutput:', lastCellOutput)\n \n \n \n \n \n \n \n \n \n \n \n \n \n weights = tf.Variable(initial_value=tf.truncated_normal(shape=(LSTM_UNITS, NUMBER_OF_DAYS_TO_FORECAST)))\n bias = tf.Variable(initial_value=tf.ones(shape=NUMBER_OF_DAYS_TO_FORECAST))\n \n forecast = tf.add(x=tf.matmul(a=lastCellOutput, b=weights), y=bias, name='forecast_normalised_scale')\n \n \n \n \n forecast_originalScale = tf.add(x=forecast * trainingData_sales_range, y=trainingData_sales_min, name='forecast_original_scale')\n \n \n \n \n \n print(forecast)\n print(forecast_originalScale)\n \n \n \n \n \n \n \n loss = tf.reduce_mean(tf.squared_difference(x=forecast, y=targetPlaceholder), name='loss_comp')\n \n tf.summary.scalar(tensor=loss, name='loss')\n \n \n \n \n \n optimizer = tf.train.AdamOptimizer(learning_rate=0.1)\n minimize_step = optimizer.minimize(loss)\n \n \n \n \n \n \n \n \n \n \n \n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n sess.run(tf.local_variables_initializer())\n \n \n tensorboard_writer = tf.summary.FileWriter(TENSORBOARD_LOGDIR, sess.graph)\n \n \n all_summary_ops = tf.summary.merge_all()\n \n \n numSteps = 0\n for e in range(NUM_EPOCHS):\n print('starting training for epoch:', e + 1)\n \n startLocation = 0\n iteration = 0\n for iteration in range(int(len(targetDataSequence_sales) / BATCH_SIZE)):\n print('epoch:', e + 1, ' iteration:', iteration + 1)\n trainingBatchInput = trainingDataSequence_sales[startLocation:startLocation + BATCH_SIZE,:,:]\n trainingBatchTarget = targetDataSequence_sales[startLocation:startLocation + BATCH_SIZE]\n \n (_, lsBatch, forecastBatch, forecastBatch_originalScale, summary_values) = sess.run([minimize_step, loss, forecast, forecast_originalScale, all_summary_ops], feed_dict={inputSequencePlaceholder: trainingBatchInput, \\\n targetPlaceholder: trainingBatchTarget})\n \n tensorboard_writer.add_summary(summary_values, numSteps)\n numSteps += 1\n \n if (iteration + 1) % 1 == 0:\n print('got a loss of:', lsBatch)\n print('the forecast of first 5 normalised are:', forecastBatch[0:5])\n print('while the actuals were normalised :', trainingBatchTarget[0:5])\n print('the forecast of first 5 orignal scale are:', forecastBatch_originalScale[0:5])\n print('while the actuals were original scale :', (trainingBatchTarget[0:5] * trainingData_sales_range) + trainingData_sales_min)\n \n startLocation += BATCH_SIZE\n \n \n if len(targetDataSequence_sales) > startLocation:\n print('epoch:', e + 1, ' iteration:', iteration + 1)\n trainingBatchInput = trainingDataSequence_sales[startLocation:len(targetDataSequence_sales),:,:]\n trainingBatchTarget = targetDataSequence_sales[startLocation:len(targetDataSequence_sales)]\n \n (_, lsBatch, forecastBatch, forecastBatch_originalScale) = sess.run([minimize_step, loss, forecast, forecast_originalScale], feed_dict={inputSequencePlaceholder: trainingBatchInput, \\\n targetPlaceholder: trainingBatchTarget})\n \n print('got a loss of:', lsBatch)\n print('the forecast of first 5 normalised are:', forecastBatch[0:5])\n print('while the actuals were normalised :', trainingBatchTarget[0:5])\n print('the forecast of first 5 orignal scale are:', forecastBatch_originalScale[0:5])\n print('while the actuals were original scale :', (trainingBatchTarget[0:5] * trainingData_sales_range) - trainingData_sales_min)\n \n \n \n totalValidationLoss = 0\n startLocation = 0\n print('starting validation')\n for iter in range(len(validationDataSequence_sales) // BATCH_SIZE):\n validationBatchInput = validationDataSequence_sales[startLocation:startLocation + BATCH_SIZE,:,:]\n validationBatchTarget = validationDataSequence_sales_target[startLocation:startLocation + BATCH_SIZE]\n \n (validationLsBatch, validationForecastBatch, validationForecastBatch_originalScale) = sess.run([loss, forecast, forecast_originalScale], feed_dict={inputSequencePlaceholder: validationBatchInput, \\\n targetPlaceholder: validationBatchTarget})\n \n \n startLocation += BATCH_SIZE\n totalValidationLoss += validationLsBatch\n \n print('first five predictions:', validationForecastBatch[0:5])\n print('first five actuals :', validationBatchTarget[0:5])\n print('the forecast of first 5 orignal scale are:', validationForecastBatch_originalScale[0:5])\n print('while the actuals were original scale :', (validationBatchTarget[0:5] * trainingData_sales_range) + trainingData_sales_min)\n \n \n if startLocation < len(validationDataSequence_sales):\n validationBatchInput = validationDataSequence_sales[startLocation:len(validationDataSequence_sales)]\n validationBatchTarget = validationDataSequence_sales_target[startLocation:len(validationDataSequence_sales)]\n \n (validationLsBatch, validationForecastBatch) = sess.run([loss, forecast], feed_dict={inputSequencePlaceholder: validationBatchInput, \\\n targetPlaceholder: validationBatchTarget})\n \n totalValidationLoss += validationLsBatch\n \n \n print('Validation completed after epoch:', e + 1, '. Total validation loss:', totalValidationLoss)\n \n \n print('----------- Saving Model')\n tf.saved_model.simple_save(sess, export_dir=TRAINED_MODEL_PATH, inputs=\\\n {'inputSequencePlaceholder': inputSequencePlaceholder, 'targetPlaceholder': targetPlaceholder}, outputs=\\\n {'loss': loss, 'forecast_originalScale': forecast_originalScale})\n print('saved model to:', TRAINED_MODEL_PATH)\n \n print('----------- Finis')", "def gen_mutant():\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n import tensorflow as tf\n from tensorflow.python.saved_model import tag_constants\n import pandas as pd\n import numpy as np\n \n \n \n \n \n \n DATAFILE_VALIDATE = '/home/ubuntu/anurag/rnn/data_for_MRs/mock_kaggle_edit_validate_normalise.csv'\n \n TRAINED_MODEL_PATH = '/home/ubuntu/anurag/rnn/savedModel'\n \n TIME_STEPS = 10\n NUMBER_OF_DAYS_TO_FORECAST = 1\n \n BATCH_SIZE = 100\n \n \n \n \n \n \n \n MIN = 0\n RANGE = 542\n \n \n \n \n \n \n data_validate = pd.read_csv(DATAFILE_VALIDATE)\n \n numValidationData = len(data_validate)\n \n validationData_sales = data_validate['sales_add_309'][0:numValidationData]\n \n \n \n \n \n print(len(validationData_sales))\n \n \n \n \n \n validationData_sales_normalised = [(i - MIN) / RANGE for i in validationData_sales]\n \n \n \n \n \n \n validationDataSequence_sales = np.zeros(shape=(((len(validationData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, TIME_STEPS, 1))\n validationDataSequence_sales_target = np.zeros(shape=(((len(validationData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, NUMBER_OF_DAYS_TO_FORECAST))\n \n start = 0\n for i in range(TIME_STEPS, (len(validationData_sales) - NUMBER_OF_DAYS_TO_FORECAST) + 1):\n validationDataSequence_sales[start,:,1] = validationData_sales_normalised[start:i]\n validationDataSequence_sales_target[start] = validationData_sales_normalised[i:i + NUMBER_OF_DAYS_TO_FORECAST]\n start += 1\n \n \n \n \n \n validationDataSequence_sales_target.shape\n \n \n \n \n \n with tf.Session() as sess:\n print('Loading the model from:', TRAINED_MODEL_PATH)\n tf.saved_model.loader.load(sess=sess, export_dir=TRAINED_MODEL_PATH, tags=[tag_constants.SERVING])\n \n \n \n inputSequence = tf.get_default_graph().get_tensor_by_name('inputSequencePlaceholder:0')\n targetForecast = tf.get_default_graph().get_tensor_by_name('targetPlaceholder:0')\n \n loss = tf.get_default_graph().get_tensor_by_name('loss_comp:0')\n forecast_originalScale = tf.get_default_graph().get_tensor_by_name('forecast_original_scale:0')\n \n startLoc = 0\n totalLoss = 0\n for i in range(0, len(validationDataSequence_sales) // BATCH_SIZE):\n sequence = validationDataSequence_sales[startLoc:startLoc + BATCH_SIZE,:,:]\n target = validationDataSequence_sales_target[startLoc:startLoc + BATCH_SIZE]\n (fcast, ls) = sess.run([forecast_originalScale, loss], feed_dict={inputSequence: sequence, targetForecast: target})\n \n print('first five predictions (original scale):', fcast[0:5])\n print('first five actuals (original scale) :', (target[0:5] * RANGE) + MIN)\n totalLoss += ls\n startLoc += BATCH_SIZE\n \n if startLoc < len(validationDataSequence_sales):\n sequence = validationDataSequence_sales[startLoc:]\n target = validationDataSequence_sales_target[startLoc:]\n (fcast, ls) = sess.run([forecast_originalScale, loss], feed_dict={inputSequence: sequence, targetForecast: target})\n totalLoss += ls\n \n print('Validation complete. Total loss:', totalLoss)", "def gen_mutants():\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n import tensorflow as tf\n import pandas\n import numpy as np\n \n \n \n \n \n \n DATAFILE_TRAIN = 'mock_kaggle_edit_train.csv'\n \n DATAFILE_VALIDATE = 'mock_kaggle_edit_validate.csv'\n \n \n \n \n \n TRAINED_MODEL_PATH = 'savedModel'\n \n TIME_STEPS = 10\n NUMBER_OF_DAYS_TO_FORECAST = 1\n \n BATCH_SIZE = 100\n \n NUM_EPOCHS = 100\n \n LSTM_UNITS = 250\n \n TENSORBOARD_LOGDIR = 'tensorboard_log'\n \n \n \n \n \n \n data_train = pandas.read_csv(DATAFILE_TRAIN)\n data_validate = pandas.read_csv(DATAFILE_VALIDATE)\n \n \n \n \n \n \n data_train.head()\n \n \n \n \n \n \n \n numTrainingData = len(data_train)\n numValidationData = len(data_validate)\n \n trainingData_date = data_train['date'][0:numTrainingData]\n trainingData_sales = data_train['sales'][0:numTrainingData]\n trainindData_price = data_train['price'][0:numTrainingData]\n \n validationData_date = data_validate['date'][0:numValidationData]\n validationData_sales = data_validate['sales'][0:numValidationData]\n validationData_price = data_validate['price'][0:numValidationData]\n \n \n \n \n \n trainingData_sales.head()\n \n \n \n \n \n print(len(trainingData_sales))\n print(len(validationData_sales))\n \n \n \n \n \n \n \n \n trainingData_sales_min = min(trainingData_sales)\n trainingData_sales_max = max(trainingData_sales)\n trainingData_sales_range = trainingData_sales_max - trainingData_sales_min\n trainingData_sales_normalised = [(i - trainingData_sales_min) / trainingData_sales_range for i in trainingData_sales]\n \n validationData_sales_normalised = [(i - trainingData_sales_min) / trainingData_sales_range for i in validationData_sales]\n \n \n \n \n \n \n print('Min:', trainingData_sales_min)\n print('Range:', trainingData_sales_max - trainingData_sales_min)\n \n \n \n \n \n \n trainingDataSequence_sales = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, TIME_STEPS, 1))\n targetDataSequence_sales = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, NUMBER_OF_DAYS_TO_FORECAST))\n start = 0\n for i in range(TIME_STEPS, (len(trainingData_sales) - NUMBER_OF_DAYS_TO_FORECAST) + 1):\n trainingDataSequence_sales[start,:,0] = trainingData_sales_normalised[start:i]\n targetDataSequence_sales[start] = trainingData_sales_normalised[i:i + NUMBER_OF_DAYS_TO_FORECAST]\n start = start + 1\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n [trainingDataSequence_sales[i,:,0] for i in range(3)]\n \n \n \n \n \n \n [targetDataSequence_sales[i] for i in range(3)]\n \n \n \n \n \n \n \n \n \n \n \n \n a = np.arange(len(targetDataSequence_sales))\n np.random.shuffle(a)\n trainingDataSequence_sales_shuffle = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, TIME_STEPS, 1))\n targetDataSequence_sales_shuffle = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, NUMBER_OF_DAYS_TO_FORECAST))\n \n loc = 0\n for i in a:\n trainingDataSequence_sales_shuffle[loc] = trainingDataSequence_sales[i]\n targetDataSequence_sales_shuffle[loc] = targetDataSequence_sales[i]\n loc += 1\n \n trainingDataSequence_sales = trainingDataSequence_sales_shuffle\n targetDataSequence_sales = targetDataSequence_sales_shuffle\n \n \n \n \n \n \n validationDataSequence_sales = np.zeros(shape=(((len(validationData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, TIME_STEPS, 1))\n validationDataSequence_sales_target = np.zeros(shape=(((len(validationData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, NUMBER_OF_DAYS_TO_FORECAST))\n \n start = 0\n for i in range(TIME_STEPS, (len(validationData_sales) - NUMBER_OF_DAYS_TO_FORECAST) + 1):\n validationDataSequence_sales[start,:,0] = validationData_sales_normalised[start:i]\n validationDataSequence_sales_target[start] = validationData_sales_normalised[i:i + NUMBER_OF_DAYS_TO_FORECAST]\n start += 1\n \n \n \n \n \n \n tf.reset_default_graph()\n \n inputSequencePlaceholder = tf.placeholder(dtype=tf.float32, shape=(None, TIME_STEPS, 1), name='inputSequencePlaceholder')\n targetPlaceholder = tf.placeholder(dtype=tf.float32, shape=(None, NUMBER_OF_DAYS_TO_FORECAST), name='targetPlaceholder')\n \n \n cell = tf.nn.rnn_cell.LSTMCell(num_units=LSTM_UNITS, name='LSTM_cell')\n \n \n (output, state) = tf.nn.dynamic_rnn(cell=cell, inputs=inputSequencePlaceholder, dtype=tf.float32)\n \n \n lastCellOutput = output[:,1,:]\n \n \n \n \n \n print('output:', output)\n print('state:', state)\n print('lastCellOutput:', lastCellOutput)\n \n \n \n \n \n \n \n \n \n \n \n \n \n weights = tf.Variable(initial_value=tf.truncated_normal(shape=(LSTM_UNITS, NUMBER_OF_DAYS_TO_FORECAST)))\n bias = tf.Variable(initial_value=tf.ones(shape=NUMBER_OF_DAYS_TO_FORECAST))\n \n forecast = tf.add(x=tf.matmul(a=lastCellOutput, b=weights), y=bias, name='forecast_normalised_scale')\n \n \n \n \n forecast_originalScale = tf.add(x=forecast * trainingData_sales_range, y=trainingData_sales_min, name='forecast_original_scale')\n \n \n \n \n \n print(forecast)\n print(forecast_originalScale)\n \n \n \n \n \n \n \n loss = tf.reduce_mean(tf.squared_difference(x=forecast, y=targetPlaceholder), name='loss_comp')\n \n tf.summary.scalar(tensor=loss, name='loss')\n \n \n \n \n \n optimizer = tf.train.AdamOptimizer(learning_rate=0.1)\n minimize_step = optimizer.minimize(loss)\n \n \n \n \n \n \n \n \n \n \n \n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n sess.run(tf.local_variables_initializer())\n \n \n tensorboard_writer = tf.summary.FileWriter(TENSORBOARD_LOGDIR, sess.graph)\n \n \n all_summary_ops = tf.summary.merge_all()\n \n \n numSteps = 0\n for e in range(NUM_EPOCHS):\n print('starting training for epoch:', e + 1)\n \n startLocation = 0\n iteration = 0\n for iteration in range(int(len(targetDataSequence_sales) / BATCH_SIZE)):\n print('epoch:', e + 1, ' iteration:', iteration + 1)\n trainingBatchInput = trainingDataSequence_sales[startLocation:startLocation + BATCH_SIZE,:,:]\n trainingBatchTarget = targetDataSequence_sales[startLocation:startLocation + BATCH_SIZE]\n \n (_, lsBatch, forecastBatch, forecastBatch_originalScale, summary_values) = sess.run([minimize_step, loss, forecast, forecast_originalScale, all_summary_ops], feed_dict={inputSequencePlaceholder: trainingBatchInput, \\\n targetPlaceholder: trainingBatchTarget})\n \n tensorboard_writer.add_summary(summary_values, numSteps)\n numSteps += 1\n \n if (iteration + 1) % 1 == 0:\n print('got a loss of:', lsBatch)\n print('the forecast of first 5 normalised are:', forecastBatch[0:5])\n print('while the actuals were normalised :', trainingBatchTarget[0:5])\n print('the forecast of first 5 orignal scale are:', forecastBatch_originalScale[0:5])\n print('while the actuals were original scale :', (trainingBatchTarget[0:5] * trainingData_sales_range) + trainingData_sales_min)\n \n startLocation += BATCH_SIZE\n \n \n if len(targetDataSequence_sales) > startLocation:\n print('epoch:', e + 1, ' iteration:', iteration + 1)\n trainingBatchInput = trainingDataSequence_sales[startLocation:len(targetDataSequence_sales),:,:]\n trainingBatchTarget = targetDataSequence_sales[startLocation:len(targetDataSequence_sales)]\n \n (_, lsBatch, forecastBatch, forecastBatch_originalScale) = sess.run([minimize_step, loss, forecast, forecast_originalScale], feed_dict={inputSequencePlaceholder: trainingBatchInput, \\\n targetPlaceholder: trainingBatchTarget})\n \n print('got a loss of:', lsBatch)\n print('the forecast of first 5 normalised are:', forecastBatch[0:5])\n print('while the actuals were normalised :', trainingBatchTarget[0:5])\n print('the forecast of first 5 orignal scale are:', forecastBatch_originalScale[0:5])\n print('while the actuals were original scale :', (trainingBatchTarget[0:5] * trainingData_sales_range) + trainingData_sales_min)\n \n \n \n totalValidationLoss = 0\n startLocation = 0\n print('starting validation')\n for iter in range(len(validationDataSequence_sales) // BATCH_SIZE):\n validationBatchInput = validationDataSequence_sales[startLocation:startLocation + BATCH_SIZE,:,:]\n validationBatchTarget = validationDataSequence_sales_target[startLocation:startLocation + BATCH_SIZE]\n \n (validationLsBatch, validationForecastBatch, validationForecastBatch_originalScale) = sess.run([loss, forecast, forecast_originalScale], feed_dict={inputSequencePlaceholder: validationBatchInput, \\\n targetPlaceholder: validationBatchTarget})\n \n \n startLocation += BATCH_SIZE\n totalValidationLoss += validationLsBatch\n \n print('first five predictions:', validationForecastBatch[0:5])\n print('first five actuals :', validationBatchTarget[0:5])\n print('the forecast of first 5 orignal scale are:', validationForecastBatch_originalScale[0:5])\n print('while the actuals were original scale :', (validationBatchTarget[0:5] * trainingData_sales_range) + trainingData_sales_min)\n \n \n if startLocation < len(validationDataSequence_sales):\n validationBatchInput = validationDataSequence_sales[startLocation:len(validationDataSequence_sales)]\n validationBatchTarget = validationDataSequence_sales_target[startLocation:len(validationDataSequence_sales)]\n \n (validationLsBatch, validationForecastBatch) = sess.run([loss, forecast], feed_dict={inputSequencePlaceholder: validationBatchInput, \\\n targetPlaceholder: validationBatchTarget})\n \n totalValidationLoss += validationLsBatch\n \n \n print('Validation completed after epoch:', e + 1, '. Total validation loss:', totalValidationLoss)\n \n \n print('----------- Saving Model')\n tf.saved_model.simple_save(sess, export_dir=TRAINED_MODEL_PATH, inputs=\\\n {'inputSequencePlaceholder': inputSequencePlaceholder, 'targetPlaceholder': targetPlaceholder}, outputs=\\\n {'loss': loss, 'forecast_originalScale': forecast_originalScale})\n print('saved model to:', TRAINED_MODEL_PATH)\n \n print('----------- Finis')", "def gen_mutants():\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n import tensorflow as tf\n import pandas\n import numpy as np\n \n \n \n \n \n \n DATAFILE_TRAIN = 'mock_kaggle_edit_train.csv'\n \n DATAFILE_VALIDATE = 'mock_kaggle_edit_validate.csv'\n \n \n \n \n \n TRAINED_MODEL_PATH = 'savedModel'\n \n TIME_STEPS = 10\n NUMBER_OF_DAYS_TO_FORECAST = 1\n \n BATCH_SIZE = 100\n \n NUM_EPOCHS = 100\n \n LSTM_UNITS = 250\n \n TENSORBOARD_LOGDIR = 'tensorboard_log'\n \n \n \n \n \n \n data_train = pandas.read_csv(DATAFILE_TRAIN)\n data_validate = pandas.read_csv(DATAFILE_VALIDATE)\n \n \n \n \n \n \n data_train.head()\n \n \n \n \n \n \n \n numTrainingData = len(data_train)\n numValidationData = len(data_validate)\n \n trainingData_date = data_train['date'][0:numTrainingData]\n trainingData_sales = data_train['sales'][0:numTrainingData]\n trainindData_price = data_train['price'][0:numTrainingData]\n \n validationData_date = data_validate['date'][0:numValidationData]\n validationData_sales = data_validate['sales'][0:numValidationData]\n validationData_price = data_validate['price'][0:numValidationData]\n \n \n \n \n \n trainingData_sales.head()\n \n \n \n \n \n print(len(trainingData_sales))\n print(len(validationData_sales))\n \n \n \n \n \n \n \n \n trainingData_sales_min = min(trainingData_sales)\n trainingData_sales_max = max(trainingData_sales)\n trainingData_sales_range = trainingData_sales_max - trainingData_sales_min\n trainingData_sales_normalised = [(i - trainingData_sales_min) / trainingData_sales_range for i in trainingData_sales]\n \n validationData_sales_normalised = [(i - trainingData_sales_min) / trainingData_sales_range for i in validationData_sales]\n \n \n \n \n \n \n print('Min:', trainingData_sales_min)\n print('Range:', trainingData_sales_max - trainingData_sales_min)\n \n \n \n \n \n \n trainingDataSequence_sales = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, TIME_STEPS, 1))\n targetDataSequence_sales = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, NUMBER_OF_DAYS_TO_FORECAST))\n start = 0\n for i in range(TIME_STEPS, (len(trainingData_sales) - NUMBER_OF_DAYS_TO_FORECAST) + 1):\n trainingDataSequence_sales[start,:,0] = trainingData_sales_normalised[start:i]\n targetDataSequence_sales[start] = trainingData_sales_normalised[i:i + NUMBER_OF_DAYS_TO_FORECAST]\n start = start + 1\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n [trainingDataSequence_sales[i,:,0] for i in range(3)]\n \n \n \n \n \n \n [targetDataSequence_sales[i] for i in range(3)]\n \n \n \n \n \n \n \n \n \n \n \n \n a = np.arange(len(targetDataSequence_sales))\n np.random.shuffle(a)\n trainingDataSequence_sales_shuffle = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, TIME_STEPS, 1))\n targetDataSequence_sales_shuffle = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, NUMBER_OF_DAYS_TO_FORECAST))\n \n loc = 0\n for i in a:\n trainingDataSequence_sales_shuffle[loc] = trainingDataSequence_sales[i]\n targetDataSequence_sales_shuffle[loc] = targetDataSequence_sales[i]\n loc += 1\n \n trainingDataSequence_sales = trainingDataSequence_sales_shuffle\n targetDataSequence_sales = targetDataSequence_sales_shuffle\n \n \n \n \n \n \n validationDataSequence_sales = np.zeros(shape=(((len(validationData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, TIME_STEPS, 1))\n validationDataSequence_sales_target = np.zeros(shape=(((len(validationData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, NUMBER_OF_DAYS_TO_FORECAST))\n \n start = 0\n for i in range(TIME_STEPS, (len(validationData_sales) - NUMBER_OF_DAYS_TO_FORECAST) + 1):\n validationDataSequence_sales[start,:,0] = validationData_sales_normalised[start:i]\n validationDataSequence_sales_target[start] = validationData_sales_normalised[i:i + NUMBER_OF_DAYS_TO_FORECAST]\n start += 1\n \n \n \n \n \n \n tf.reset_default_graph()\n \n inputSequencePlaceholder = tf.placeholder(dtype=tf.float32, shape=(None, TIME_STEPS, 1), name='inputSequencePlaceholder')\n targetPlaceholder = tf.placeholder(dtype=tf.float32, shape=(None, NUMBER_OF_DAYS_TO_FORECAST), name='targetPlaceholder')\n \n \n cell = tf.nn.rnn_cell.LSTMCell(num_units=LSTM_UNITS, name='LSTM_cell')\n \n \n (output, state) = tf.nn.dynamic_rnn(cell=cell, inputs=inputSequencePlaceholder, dtype=tf.float32)\n \n \n lastCellOutput = output[:,-1,:]\n \n \n \n \n \n print('output:', output)\n print('state:', state)\n print('lastCellOutput:', lastCellOutput)\n \n \n \n \n \n \n \n \n \n \n \n \n \n weights = tf.Variable(initial_value=tf.truncated_normal(shape=(LSTM_UNITS, NUMBER_OF_DAYS_TO_FORECAST)))\n bias = tf.Variable(initial_value=tf.ones(shape=NUMBER_OF_DAYS_TO_FORECAST))\n \n forecast = tf.add(x=tf.matmul(a=lastCellOutput, b=weights), y=bias, name='forecast_normalised_scale')\n \n \n \n \n forecast_originalScale = tf.add(x=forecast * trainingData_sales_range, y=trainingData_sales_min, name='forecast_original_scale')\n \n \n \n \n \n print(forecast)\n print(forecast_originalScale)\n \n \n \n \n \n \n \n loss = tf.reduce_mean(tf.squared_difference(x=forecast, y=targetPlaceholder), name='loss_comp')\n \n tf.summary.scalar(tensor=loss, name='loss')\n \n \n \n \n \n optimizer = tf.train.AdamOptimizer(learning_rate=0.1)\n minimize_step = optimizer.minimize(loss)\n \n \n \n \n \n \n \n \n \n \n \n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n sess.run(tf.local_variables_initializer())\n \n \n tensorboard_writer = tf.summary.FileWriter(TENSORBOARD_LOGDIR, sess.graph)\n \n \n all_summary_ops = tf.summary.merge_all()\n \n \n numSteps = 0\n for e in range(NUM_EPOCHS):\n print('starting training for epoch:', e + 1)\n \n startLocation = 0\n iteration = 0\n for iteration in range(int(len(targetDataSequence_sales) / BATCH_SIZE)):\n print('epoch:', e + 1, ' iteration:', iteration + 1)\n trainingBatchInput = trainingDataSequence_sales[startLocation:,:,:]\n trainingBatchTarget = targetDataSequence_sales[startLocation:startLocation + BATCH_SIZE]\n \n (_, lsBatch, forecastBatch, forecastBatch_originalScale, summary_values) = sess.run([minimize_step, loss, forecast, forecast_originalScale, all_summary_ops], feed_dict={inputSequencePlaceholder: trainingBatchInput, \\\n targetPlaceholder: trainingBatchTarget})\n \n tensorboard_writer.add_summary(summary_values, numSteps)\n numSteps += 1\n \n if (iteration + 1) % 1 == 0:\n print('got a loss of:', lsBatch)\n print('the forecast of first 5 normalised are:', forecastBatch[0:5])\n print('while the actuals were normalised :', trainingBatchTarget[0:5])\n print('the forecast of first 5 orignal scale are:', forecastBatch_originalScale[0:5])\n print('while the actuals were original scale :', (trainingBatchTarget[0:5] * trainingData_sales_range) + trainingData_sales_min)\n \n startLocation += BATCH_SIZE\n \n \n if len(targetDataSequence_sales) > startLocation:\n print('epoch:', e + 1, ' iteration:', iteration + 1)\n trainingBatchInput = trainingDataSequence_sales[startLocation:len(targetDataSequence_sales),:,:]\n trainingBatchTarget = targetDataSequence_sales[startLocation:len(targetDataSequence_sales)]\n \n (_, lsBatch, forecastBatch, forecastBatch_originalScale) = sess.run([minimize_step, loss, forecast, forecast_originalScale], feed_dict={inputSequencePlaceholder: trainingBatchInput, \\\n targetPlaceholder: trainingBatchTarget})\n \n print('got a loss of:', lsBatch)\n print('the forecast of first 5 normalised are:', forecastBatch[0:5])\n print('while the actuals were normalised :', trainingBatchTarget[0:5])\n print('the forecast of first 5 orignal scale are:', forecastBatch_originalScale[0:5])\n print('while the actuals were original scale :', (trainingBatchTarget[0:5] * trainingData_sales_range) + trainingData_sales_min)\n \n \n \n totalValidationLoss = 0\n startLocation = 0\n print('starting validation')\n for iter in range(len(validationDataSequence_sales) // BATCH_SIZE):\n validationBatchInput = validationDataSequence_sales[startLocation:startLocation + BATCH_SIZE,:,:]\n validationBatchTarget = validationDataSequence_sales_target[startLocation:startLocation + BATCH_SIZE]\n \n (validationLsBatch, validationForecastBatch, validationForecastBatch_originalScale) = sess.run([loss, forecast, forecast_originalScale], feed_dict={inputSequencePlaceholder: validationBatchInput, \\\n targetPlaceholder: validationBatchTarget})\n \n \n startLocation += BATCH_SIZE\n totalValidationLoss += validationLsBatch\n \n print('first five predictions:', validationForecastBatch[0:5])\n print('first five actuals :', validationBatchTarget[0:5])\n print('the forecast of first 5 orignal scale are:', validationForecastBatch_originalScale[0:5])\n print('while the actuals were original scale :', (validationBatchTarget[0:5] * trainingData_sales_range) + trainingData_sales_min)\n \n \n if startLocation < len(validationDataSequence_sales):\n validationBatchInput = validationDataSequence_sales[startLocation:len(validationDataSequence_sales)]\n validationBatchTarget = validationDataSequence_sales_target[startLocation:len(validationDataSequence_sales)]\n \n (validationLsBatch, validationForecastBatch) = sess.run([loss, forecast], feed_dict={inputSequencePlaceholder: validationBatchInput, \\\n targetPlaceholder: validationBatchTarget})\n \n totalValidationLoss += validationLsBatch\n \n \n print('Validation completed after epoch:', e + 1, '. Total validation loss:', totalValidationLoss)\n \n \n print('----------- Saving Model')\n tf.saved_model.simple_save(sess, export_dir=TRAINED_MODEL_PATH, inputs=\\\n {'inputSequencePlaceholder': inputSequencePlaceholder, 'targetPlaceholder': targetPlaceholder}, outputs=\\\n {'loss': loss, 'forecast_originalScale': forecast_originalScale})\n print('saved model to:', TRAINED_MODEL_PATH)\n \n print('----------- Finis')", "def gen_mutants():\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n import tensorflow as tf\n import pandas\n import numpy as np\n \n \n \n \n \n \n DATAFILE_TRAIN = 'mock_kaggle_edit_train.csv'\n \n DATAFILE_VALIDATE = 'mock_kaggle_edit_validate.csv'\n \n \n \n \n \n TRAINED_MODEL_PATH = 'savedModel'\n \n TIME_STEPS = 10\n NUMBER_OF_DAYS_TO_FORECAST = 1\n \n BATCH_SIZE = 100\n \n NUM_EPOCHS = 100\n \n LSTM_UNITS = 250\n \n TENSORBOARD_LOGDIR = 'tensorboard_log'\n \n \n \n \n \n \n data_train = pandas.read_csv(DATAFILE_TRAIN)\n data_validate = pandas.read_csv(DATAFILE_VALIDATE)\n \n \n \n \n \n \n data_train.head()\n \n \n \n \n \n \n \n numTrainingData = len(data_train)\n numValidationData = len(data_validate)\n \n trainingData_date = data_train['date'][0:numTrainingData]\n trainingData_sales = data_train['sales'][0:numTrainingData]\n trainindData_price = data_train['price'][0:numTrainingData]\n \n validationData_date = data_validate['date'][0:numValidationData]\n validationData_sales = data_validate['sales'][0:numValidationData]\n validationData_price = data_validate['price'][0:numValidationData]\n \n \n \n \n \n trainingData_sales.head()\n \n \n \n \n \n print(len(trainingData_sales))\n print(len(validationData_sales))\n \n \n \n \n \n \n \n \n trainingData_sales_min = min(trainingData_sales)\n trainingData_sales_max = max(trainingData_sales)\n trainingData_sales_range = trainingData_sales_max - trainingData_sales_min\n trainingData_sales_normalised = [(i - trainingData_sales_min) / trainingData_sales_range for i in trainingData_sales]\n \n validationData_sales_normalised = [(i - trainingData_sales_min) / trainingData_sales_range for i in validationData_sales]\n \n \n \n \n \n \n print('Min:', trainingData_sales_min)\n print('Range:', trainingData_sales_max - trainingData_sales_min)\n \n \n \n \n \n \n trainingDataSequence_sales = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, TIME_STEPS, 1))\n targetDataSequence_sales = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, NUMBER_OF_DAYS_TO_FORECAST))\n start = 0\n for i in range(TIME_STEPS, (len(trainingData_sales) - NUMBER_OF_DAYS_TO_FORECAST) + 1):\n trainingDataSequence_sales[start,:,0] = trainingData_sales_normalised[start:i]\n targetDataSequence_sales[start] = trainingData_sales_normalised[i:i + NUMBER_OF_DAYS_TO_FORECAST]\n start = start + 1\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n [trainingDataSequence_sales[i,:,0] for i in range(3)]\n \n \n \n \n \n \n [targetDataSequence_sales[i] for i in range(3)]\n \n \n \n \n \n \n \n \n \n \n \n \n a = np.arange(len(targetDataSequence_sales))\n np.random.shuffle(a)\n trainingDataSequence_sales_shuffle = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, TIME_STEPS, 1))\n targetDataSequence_sales_shuffle = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, NUMBER_OF_DAYS_TO_FORECAST))\n \n loc = 0\n for i in a:\n trainingDataSequence_sales_shuffle[loc] = trainingDataSequence_sales[i]\n targetDataSequence_sales_shuffle[loc] = targetDataSequence_sales[i]\n loc += 1\n \n trainingDataSequence_sales = trainingDataSequence_sales_shuffle\n targetDataSequence_sales = targetDataSequence_sales_shuffle\n \n \n \n \n \n \n validationDataSequence_sales = np.zeros(shape=(((len(validationData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, TIME_STEPS, 1))\n validationDataSequence_sales_target = np.zeros(shape=(((len(validationData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, NUMBER_OF_DAYS_TO_FORECAST))\n \n start = 0\n for i in range(TIME_STEPS, (len(validationData_sales) - NUMBER_OF_DAYS_TO_FORECAST) + 1):\n validationDataSequence_sales[start,:,0] = validationData_sales_normalised[start:i]\n validationDataSequence_sales_target[start] = validationData_sales_normalised[i:i + NUMBER_OF_DAYS_TO_FORECAST]\n start += 1\n \n \n \n \n \n \n tf.reset_default_graph()\n \n inputSequencePlaceholder = tf.placeholder(dtype=tf.float32, shape=(None, TIME_STEPS, 1), name='inputSequencePlaceholder')\n targetPlaceholder = tf.placeholder(dtype=tf.float32, shape=(None, NUMBER_OF_DAYS_TO_FORECAST), name='targetPlaceholder')\n \n \n cell = tf.nn.rnn_cell.LSTMCell(num_units=LSTM_UNITS, name='LSTM_cell')\n \n \n (output, state) = tf.nn.dynamic_rnn(cell=cell, inputs=inputSequencePlaceholder, dtype=tf.float32)\n \n \n lastCellOutput = output[:,-1,:]\n \n \n \n \n \n print('output:', output)\n print('state:', state)\n print('lastCellOutput:', lastCellOutput)\n \n \n \n \n \n \n \n \n \n \n \n \n \n weights = tf.Variable(initial_value=tf.truncated_normal(shape=(LSTM_UNITS, NUMBER_OF_DAYS_TO_FORECAST)))\n bias = tf.Variable(initial_value=tf.ones(shape=NUMBER_OF_DAYS_TO_FORECAST))\n \n forecast = tf.add(x=tf.matmul(a=lastCellOutput, b=weights), y=bias, name='forecast_normalised_scale')\n \n \n \n \n forecast_originalScale = tf.add(x=forecast * trainingData_sales_range, y=trainingData_sales_min, name='forecast_original_scale')\n \n \n \n \n \n print(forecast)\n print(forecast_originalScale)\n \n \n \n \n \n \n \n loss = tf.reduce_mean(tf.squared_difference(x=forecast, y=targetPlaceholder), name='loss_comp')\n \n tf.summary.scalar(tensor=loss, name='loss')\n \n \n \n \n \n optimizer = tf.train.AdamOptimizer(learning_rate=0.1)\n minimize_step = optimizer.minimize(loss)\n \n \n \n \n \n \n \n \n \n \n \n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n sess.run(tf.local_variables_initializer())\n \n \n tensorboard_writer = tf.summary.FileWriter(TENSORBOARD_LOGDIR, sess.graph)\n \n \n all_summary_ops = tf.summary.merge_all()\n \n \n numSteps = 0\n for e in range(NUM_EPOCHS):\n print('starting training for epoch:', e + 2)\n \n startLocation = 0\n iteration = 0\n for iteration in range(int(len(targetDataSequence_sales) / BATCH_SIZE)):\n print('epoch:', e + 1, ' iteration:', iteration + 1)\n trainingBatchInput = trainingDataSequence_sales[startLocation:startLocation + BATCH_SIZE,:,:]\n trainingBatchTarget = targetDataSequence_sales[startLocation:startLocation + BATCH_SIZE]\n \n (_, lsBatch, forecastBatch, forecastBatch_originalScale, summary_values) = sess.run([minimize_step, loss, forecast, forecast_originalScale, all_summary_ops], feed_dict={inputSequencePlaceholder: trainingBatchInput, \\\n targetPlaceholder: trainingBatchTarget})\n \n tensorboard_writer.add_summary(summary_values, numSteps)\n numSteps += 1\n \n if (iteration + 1) % 1 == 0:\n print('got a loss of:', lsBatch)\n print('the forecast of first 5 normalised are:', forecastBatch[0:5])\n print('while the actuals were normalised :', trainingBatchTarget[0:5])\n print('the forecast of first 5 orignal scale are:', forecastBatch_originalScale[0:5])\n print('while the actuals were original scale :', (trainingBatchTarget[0:5] * trainingData_sales_range) + trainingData_sales_min)\n \n startLocation += BATCH_SIZE\n \n \n if len(targetDataSequence_sales) > startLocation:\n print('epoch:', e + 1, ' iteration:', iteration + 1)\n trainingBatchInput = trainingDataSequence_sales[startLocation:len(targetDataSequence_sales),:,:]\n trainingBatchTarget = targetDataSequence_sales[startLocation:len(targetDataSequence_sales)]\n \n (_, lsBatch, forecastBatch, forecastBatch_originalScale) = sess.run([minimize_step, loss, forecast, forecast_originalScale], feed_dict={inputSequencePlaceholder: trainingBatchInput, \\\n targetPlaceholder: trainingBatchTarget})\n \n print('got a loss of:', lsBatch)\n print('the forecast of first 5 normalised are:', forecastBatch[0:5])\n print('while the actuals were normalised :', trainingBatchTarget[0:5])\n print('the forecast of first 5 orignal scale are:', forecastBatch_originalScale[0:5])\n print('while the actuals were original scale :', (trainingBatchTarget[0:5] * trainingData_sales_range) + trainingData_sales_min)\n \n \n \n totalValidationLoss = 0\n startLocation = 0\n print('starting validation')\n for iter in range(len(validationDataSequence_sales) // BATCH_SIZE):\n validationBatchInput = validationDataSequence_sales[startLocation:startLocation + BATCH_SIZE,:,:]\n validationBatchTarget = validationDataSequence_sales_target[startLocation:startLocation + BATCH_SIZE]\n \n (validationLsBatch, validationForecastBatch, validationForecastBatch_originalScale) = sess.run([loss, forecast, forecast_originalScale], feed_dict={inputSequencePlaceholder: validationBatchInput, \\\n targetPlaceholder: validationBatchTarget})\n \n \n startLocation += BATCH_SIZE\n totalValidationLoss += validationLsBatch\n \n print('first five predictions:', validationForecastBatch[0:5])\n print('first five actuals :', validationBatchTarget[0:5])\n print('the forecast of first 5 orignal scale are:', validationForecastBatch_originalScale[0:5])\n print('while the actuals were original scale :', (validationBatchTarget[0:5] * trainingData_sales_range) + trainingData_sales_min)\n \n \n if startLocation < len(validationDataSequence_sales):\n validationBatchInput = validationDataSequence_sales[startLocation:len(validationDataSequence_sales)]\n validationBatchTarget = validationDataSequence_sales_target[startLocation:len(validationDataSequence_sales)]\n \n (validationLsBatch, validationForecastBatch) = sess.run([loss, forecast], feed_dict={inputSequencePlaceholder: validationBatchInput, \\\n targetPlaceholder: validationBatchTarget})\n \n totalValidationLoss += validationLsBatch\n \n \n print('Validation completed after epoch:', e + 1, '. Total validation loss:', totalValidationLoss)\n \n \n print('----------- Saving Model')\n tf.saved_model.simple_save(sess, export_dir=TRAINED_MODEL_PATH, inputs=\\\n {'inputSequencePlaceholder': inputSequencePlaceholder, 'targetPlaceholder': targetPlaceholder}, outputs=\\\n {'loss': loss, 'forecast_originalScale': forecast_originalScale})\n print('saved model to:', TRAINED_MODEL_PATH)\n \n print('----------- Finis')", "def gen_mutants():\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n import tensorflow as tf\n import pandas\n import numpy as np\n \n \n \n \n \n \n DATAFILE_TRAIN = 'mock_kaggle_edit_train.csv'\n \n DATAFILE_VALIDATE = 'mock_kaggle_edit_validate.csv'\n \n \n \n \n \n TRAINED_MODEL_PATH = 'savedModel'\n \n TIME_STEPS = 10\n NUMBER_OF_DAYS_TO_FORECAST = 1\n \n BATCH_SIZE = 100\n \n NUM_EPOCHS = 100\n \n LSTM_UNITS = 250\n \n TENSORBOARD_LOGDIR = 'tensorboard_log'\n \n \n \n \n \n \n data_train = pandas.read_csv(DATAFILE_TRAIN)\n data_validate = pandas.read_csv(DATAFILE_VALIDATE)\n \n \n \n \n \n \n data_train.head()\n \n \n \n \n \n \n \n numTrainingData = len(data_train)\n numValidationData = len(data_validate)\n \n trainingData_date = data_train['date'][0:numTrainingData]\n trainingData_sales = data_train['sales'][0:numTrainingData]\n trainindData_price = data_train['price'][0:numTrainingData]\n \n validationData_date = data_validate['date'][0:numValidationData]\n validationData_sales = data_validate['sales'][0:numValidationData]\n validationData_price = data_validate['price'][0:numValidationData]\n \n \n \n \n \n trainingData_sales.head()\n \n \n \n \n \n print(len(trainingData_sales))\n print(len(validationData_sales))\n \n \n \n \n \n \n \n \n trainingData_sales_min = min(trainingData_sales)\n trainingData_sales_max = max(trainingData_sales)\n trainingData_sales_range = trainingData_sales_max - trainingData_sales_min\n trainingData_sales_normalised = [(i - trainingData_sales_min) / trainingData_sales_range for i in trainingData_sales]\n \n validationData_sales_normalised = [(i - trainingData_sales_min) / trainingData_sales_range for i in validationData_sales]\n \n \n \n \n \n \n print('Min:', trainingData_sales_min)\n print('Range:', trainingData_sales_max - trainingData_sales_min)\n \n \n \n \n \n \n trainingDataSequence_sales = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, TIME_STEPS, 1))\n targetDataSequence_sales = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, NUMBER_OF_DAYS_TO_FORECAST))\n start = 0\n for i in range(TIME_STEPS, (len(trainingData_sales) - NUMBER_OF_DAYS_TO_FORECAST) + 1):\n trainingDataSequence_sales[start,:,0] = trainingData_sales_normalised[start:i]\n targetDataSequence_sales[start] = trainingData_sales_normalised[i:i + NUMBER_OF_DAYS_TO_FORECAST]\n start = start + 1\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n [trainingDataSequence_sales[i,:,0] for i in range(3)]\n \n \n \n \n \n \n [targetDataSequence_sales[i] for i in range(3)]\n \n \n \n \n \n \n \n \n \n \n \n \n a = np.arange(len(targetDataSequence_sales))\n np.random.shuffle(a)\n trainingDataSequence_sales_shuffle = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, TIME_STEPS, 1))\n targetDataSequence_sales_shuffle = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, NUMBER_OF_DAYS_TO_FORECAST))\n \n loc = 0\n for i in a:\n trainingDataSequence_sales_shuffle[loc] = trainingDataSequence_sales[i]\n targetDataSequence_sales_shuffle[loc] = targetDataSequence_sales[i]\n loc += 1\n \n trainingDataSequence_sales = trainingDataSequence_sales_shuffle\n targetDataSequence_sales = targetDataSequence_sales_shuffle\n \n \n \n \n \n \n validationDataSequence_sales = np.zeros(shape=(((len(validationData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, TIME_STEPS, 1))\n validationDataSequence_sales_target = np.zeros(shape=(((len(validationData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, NUMBER_OF_DAYS_TO_FORECAST))\n \n start = 0\n for i in range(TIME_STEPS, (len(validationData_sales) - NUMBER_OF_DAYS_TO_FORECAST) + 1):\n validationDataSequence_sales[start,:,0] = validationData_sales_normalised[start:i]\n validationDataSequence_sales_target[start] = validationData_sales_normalised[i:i + NUMBER_OF_DAYS_TO_FORECAST]\n start += 1\n \n \n \n \n \n \n tf.reset_default_graph()\n \n inputSequencePlaceholder = tf.placeholder(dtype=tf.float32, shape=(None, TIME_STEPS, 1), name='inputSequencePlaceholder')\n targetPlaceholder = tf.placeholder(dtype=tf.float32, shape=(None, NUMBER_OF_DAYS_TO_FORECAST), name='targetPlaceholder')\n \n \n cell = tf.nn.rnn_cell.LSTMCell(num_units=LSTM_UNITS, name='LSTM_cell')\n \n \n (output, state) = tf.nn.dynamic_rnn(cell=cell, inputs=inputSequencePlaceholder, dtype=tf.float32)\n \n \n lastCellOutput = output[:,-1,:]\n \n \n \n \n \n print('output:', output)\n print('state:', state)\n print('lastCellOutput:', lastCellOutput)\n \n \n \n \n \n \n \n \n \n \n \n \n \n weights = tf.Variable(initial_value=tf.truncated_normal(shape=(LSTM_UNITS, NUMBER_OF_DAYS_TO_FORECAST)))\n bias = tf.Variable(initial_value=tf.ones(shape=NUMBER_OF_DAYS_TO_FORECAST))\n \n forecast = tf.add(x=tf.matmul(a=lastCellOutput, b=weights), y=bias, name='forecast_normalised_scale')\n \n \n \n \n forecast_originalScale = tf.add(x=forecast * trainingData_sales_range, y=trainingData_sales_min, name='forecast_original_scale')\n \n \n \n \n \n print(forecast)\n print(forecast_originalScale)\n \n \n \n \n \n \n \n loss = tf.reduce_mean(tf.squared_difference(x=forecast, y=targetPlaceholder), name='loss_comp')\n \n tf.summary.scalar(tensor=loss, name='loss')\n \n \n \n \n \n optimizer = tf.train.AdamOptimizer(learning_rate=0.1)\n minimize_step = optimizer.minimize(loss)\n \n \n \n \n \n \n \n \n \n \n \n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n sess.run(tf.local_variables_initializer())\n \n \n tensorboard_writer = tf.summary.FileWriter(TENSORBOARD_LOGDIR, sess.graph)\n \n \n all_summary_ops = tf.summary.merge_all()\n \n \n numSteps = 0\n for e in range(NUM_EPOCHS):\n print('starting training for epoch:', e + 1)\n \n startLocation = 0\n iteration = 0\n for iteration in range(int(len(targetDataSequence_sales) / BATCH_SIZE)):\n print('epoch:', e + 1, ' iteration:', iteration + 1)\n trainingBatchInput = trainingDataSequence_sales[startLocation:startLocation + BATCH_SIZE,:,:]\n trainingBatchTarget = targetDataSequence_sales[startLocation:startLocation + BATCH_SIZE]\n \n (_, lsBatch, forecastBatch, forecastBatch_originalScale, summary_values) = sess.run([minimize_step, loss, forecast, forecast_originalScale, all_summary_ops], feed_dict={inputSequencePlaceholder: trainingBatchInput, \\\n targetPlaceholder: trainingBatchTarget})\n \n tensorboard_writer.add_summary(summary_values, numSteps)\n numSteps += 1\n \n if (iteration + 1) % 1 == 0:\n print('got a loss of:', lsBatch)\n print('the forecast of first 5 normalised are:', forecastBatch[0:5])\n print('while the actuals were normalised :', trainingBatchTarget[0:5])\n print('the forecast of first 5 orignal scale are:', forecastBatch_originalScale[0:5])\n print('while the actuals were original scale :', (trainingBatchTarget[0:5] * trainingData_sales_range) + trainingData_sales_min)\n \n startLocation += BATCH_SIZE\n \n \n if len(targetDataSequence_sales) > startLocation:\n print('epoch:', e + 1, ' iteration:', iteration + 1)\n trainingBatchInput = trainingDataSequence_sales[startLocation:len(targetDataSequence_sales),:,:]\n trainingBatchTarget = targetDataSequence_sales[startLocation:len(targetDataSequence_sales)]\n \n (_, lsBatch, forecastBatch, forecastBatch_originalScale) = sess.run([minimize_step, loss, forecast, forecast_originalScale], feed_dict={inputSequencePlaceholder: trainingBatchInput, \\\n targetPlaceholder: trainingBatchTarget})\n \n print('got a loss of:', lsBatch)\n print('the forecast of first 5 normalised are:', forecastBatch[0:5])\n print('while the actuals were normalised :', trainingBatchTarget[0:5])\n print('the forecast of first 5 orignal scale are:', forecastBatch_originalScale[0:5])\n print('while the actuals were original scale :', (trainingBatchTarget[0:5] * trainingData_sales_range) + trainingData_sales_min)\n \n \n \n totalValidationLoss = 0\n startLocation = 0\n print('starting validation')\n for iter in range(len(validationDataSequence_sales) // BATCH_SIZE):\n validationBatchInput = validationDataSequence_sales[startLocation:startLocation + BATCH_SIZE,:,:]\n validationBatchTarget = validationDataSequence_sales_target[startLocation:startLocation + BATCH_SIZE]\n \n (validationLsBatch, validationForecastBatch, validationForecastBatch_originalScale) = sess.run([loss, forecast, forecast_originalScale], feed_dict={inputSequencePlaceholder: validationBatchInput, \\\n targetPlaceholder: validationBatchTarget})\n \n \n startLocation += BATCH_SIZE\n totalValidationLoss += validationLsBatch\n \n print('first five predictions:', validationForecastBatch[0:5])\n print('first five actuals :', validationBatchTarget[0:5])\n print('the forecast of first 5 orignal scale are:', validationForecastBatch_originalScale[0:5])\n print('while the actuals were original scale :', (validationBatchTarget[0:5] * trainingData_sales_range) + trainingData_sales_min)\n \n \n if startLocation > len(validationDataSequence_sales):\n validationBatchInput = validationDataSequence_sales[startLocation:len(validationDataSequence_sales)]\n validationBatchTarget = validationDataSequence_sales_target[startLocation:len(validationDataSequence_sales)]\n \n (validationLsBatch, validationForecastBatch) = sess.run([loss, forecast], feed_dict={inputSequencePlaceholder: validationBatchInput, \\\n targetPlaceholder: validationBatchTarget})\n \n totalValidationLoss += validationLsBatch\n \n \n print('Validation completed after epoch:', e + 1, '. Total validation loss:', totalValidationLoss)\n \n \n print('----------- Saving Model')\n tf.saved_model.simple_save(sess, export_dir=TRAINED_MODEL_PATH, inputs=\\\n {'inputSequencePlaceholder': inputSequencePlaceholder, 'targetPlaceholder': targetPlaceholder}, outputs=\\\n {'loss': loss, 'forecast_originalScale': forecast_originalScale})\n print('saved model to:', TRAINED_MODEL_PATH)\n \n print('----------- Finis')", "def gen_mutants():\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n import tensorflow as tf\n import pandas\n import numpy as np\n \n \n \n \n \n \n DATAFILE_TRAIN = 'mock_kaggle_edit_train.csv'\n \n DATAFILE_VALIDATE = 'mock_kaggle_edit_validate.csv'\n \n \n \n \n \n TRAINED_MODEL_PATH = 'savedModel'\n \n TIME_STEPS = 10\n NUMBER_OF_DAYS_TO_FORECAST = 1\n \n BATCH_SIZE = 100\n \n NUM_EPOCHS = 100\n \n LSTM_UNITS = 250\n \n TENSORBOARD_LOGDIR = 'tensorboard_log'\n \n \n \n \n \n \n data_train = pandas.read_csv(DATAFILE_TRAIN)\n data_validate = pandas.read_csv(DATAFILE_VALIDATE)\n \n \n \n \n \n \n data_train.head()\n \n \n \n \n \n \n \n numTrainingData = len(data_train)\n numValidationData = len(data_validate)\n \n trainingData_date = data_train['date'][0:numTrainingData]\n trainingData_sales = data_train['sales'][0:numTrainingData]\n trainindData_price = data_train['price'][0:numTrainingData]\n \n validationData_date = data_validate['date'][0:numValidationData]\n validationData_sales = data_validate['sales'][0:numValidationData]\n validationData_price = data_validate['price'][0:numValidationData]\n \n \n \n \n \n trainingData_sales.head()\n \n \n \n \n \n print(len(trainingData_sales))\n print(len(validationData_sales))\n \n \n \n \n \n \n \n \n trainingData_sales_min = min(trainingData_sales)\n trainingData_sales_max = max(trainingData_sales)\n trainingData_sales_range = trainingData_sales_max - trainingData_sales_min\n trainingData_sales_normalised = [(i - trainingData_sales_min) / trainingData_sales_range for i in trainingData_sales]\n \n validationData_sales_normalised = [(i - trainingData_sales_min) / trainingData_sales_range for i in validationData_sales]\n \n \n \n \n \n \n print('Min:', trainingData_sales_min)\n print('Range:', trainingData_sales_max - trainingData_sales_min)\n \n \n \n \n \n \n trainingDataSequence_sales = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, TIME_STEPS, 1))\n targetDataSequence_sales = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, NUMBER_OF_DAYS_TO_FORECAST))\n start = 0\n for i in range(TIME_STEPS, (len(trainingData_sales) - NUMBER_OF_DAYS_TO_FORECAST) + 1):\n trainingDataSequence_sales[start,:,0] = trainingData_sales_normalised[start:i]\n targetDataSequence_sales[start] = trainingData_sales_normalised[i:i + NUMBER_OF_DAYS_TO_FORECAST]\n start = start + 1\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n [trainingDataSequence_sales[i,:,0] for i in range(3)]\n \n \n \n \n \n \n [targetDataSequence_sales[i] for i in range(3)]\n \n \n \n \n \n \n \n \n \n \n \n \n a = np.arange(len(targetDataSequence_sales))\n np.random.shuffle(a)\n trainingDataSequence_sales_shuffle = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, TIME_STEPS, 1))\n targetDataSequence_sales_shuffle = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, NUMBER_OF_DAYS_TO_FORECAST))\n \n loc = 0\n for i in a:\n trainingDataSequence_sales_shuffle[loc] = trainingDataSequence_sales[i]\n targetDataSequence_sales_shuffle[loc] = targetDataSequence_sales[i]\n loc += 1\n \n trainingDataSequence_sales = trainingDataSequence_sales_shuffle\n targetDataSequence_sales = targetDataSequence_sales_shuffle\n \n \n \n \n \n \n validationDataSequence_sales = np.zeros(shape=(((len(validationData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, TIME_STEPS, 1))\n validationDataSequence_sales_target = np.zeros(shape=(((len(validationData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, NUMBER_OF_DAYS_TO_FORECAST))\n \n start = 0\n for i in range(TIME_STEPS, (len(validationData_sales) - NUMBER_OF_DAYS_TO_FORECAST) + 1):\n validationDataSequence_sales[start,:,0] = validationData_sales_normalised[start:i]\n validationDataSequence_sales_target[start] = validationData_sales_normalised[i:i + NUMBER_OF_DAYS_TO_FORECAST]\n start += 1\n \n \n \n \n \n \n tf.reset_default_graph()\n \n inputSequencePlaceholder = tf.placeholder(dtype=tf.float32, shape=(None, TIME_STEPS, 1), name='inputSequencePlaceholder')\n targetPlaceholder = tf.placeholder(dtype=tf.float32, shape=(None, NUMBER_OF_DAYS_TO_FORECAST), name='targetPlaceholder')\n \n \n cell = tf.nn.rnn_cell.LSTMCell(num_units=LSTM_UNITS, name='LSTM_cell')\n \n \n (output, state) = tf.nn.dynamic_rnn(cell=cell, inputs=inputSequencePlaceholder, dtype=tf.float32)\n \n \n lastCellOutput = output[:,-1,:]\n \n \n \n \n \n print('output:', output)\n print('state:', state)\n print('lastCellOutput:', lastCellOutput)\n \n \n \n \n \n \n \n \n \n \n \n \n \n weights = tf.Variable(initial_value=tf.truncated_normal(shape=(LSTM_UNITS, NUMBER_OF_DAYS_TO_FORECAST)))\n bias = tf.Variable(initial_value=tf.ones(shape=NUMBER_OF_DAYS_TO_FORECAST))\n \n forecast = tf.add(x=tf.matmul(a=lastCellOutput, b=weights), y=bias, name='forecast_normalised_scale')\n \n \n \n \n forecast_originalScale = tf.add(x=forecast * trainingData_sales_range, y=trainingData_sales_min, name='forecast_original_scale')\n \n \n \n \n \n print(forecast)\n print(forecast_originalScale)\n \n \n \n \n \n \n \n loss = tf.reduce_mean(tf.squared_difference(x=forecast, y=targetPlaceholder), name='loss_comp')\n \n tf.summary.scalar(tensor=loss, name='mutpy')\n \n \n \n \n \n optimizer = tf.train.AdamOptimizer(learning_rate=0.1)\n minimize_step = optimizer.minimize(loss)\n \n \n \n \n \n \n \n \n \n \n \n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n sess.run(tf.local_variables_initializer())\n \n \n tensorboard_writer = tf.summary.FileWriter(TENSORBOARD_LOGDIR, sess.graph)\n \n \n all_summary_ops = tf.summary.merge_all()\n \n \n numSteps = 0\n for e in range(NUM_EPOCHS):\n print('starting training for epoch:', e + 1)\n \n startLocation = 0\n iteration = 0\n for iteration in range(int(len(targetDataSequence_sales) / BATCH_SIZE)):\n print('epoch:', e + 1, ' iteration:', iteration + 1)\n trainingBatchInput = trainingDataSequence_sales[startLocation:startLocation + BATCH_SIZE,:,:]\n trainingBatchTarget = targetDataSequence_sales[startLocation:startLocation + BATCH_SIZE]\n \n (_, lsBatch, forecastBatch, forecastBatch_originalScale, summary_values) = sess.run([minimize_step, loss, forecast, forecast_originalScale, all_summary_ops], feed_dict={inputSequencePlaceholder: trainingBatchInput, \\\n targetPlaceholder: trainingBatchTarget})\n \n tensorboard_writer.add_summary(summary_values, numSteps)\n numSteps += 1\n \n if (iteration + 1) % 1 == 0:\n print('got a loss of:', lsBatch)\n print('the forecast of first 5 normalised are:', forecastBatch[0:5])\n print('while the actuals were normalised :', trainingBatchTarget[0:5])\n print('the forecast of first 5 orignal scale are:', forecastBatch_originalScale[0:5])\n print('while the actuals were original scale :', (trainingBatchTarget[0:5] * trainingData_sales_range) + trainingData_sales_min)\n \n startLocation += BATCH_SIZE\n \n \n if len(targetDataSequence_sales) > startLocation:\n print('epoch:', e + 1, ' iteration:', iteration + 1)\n trainingBatchInput = trainingDataSequence_sales[startLocation:len(targetDataSequence_sales),:,:]\n trainingBatchTarget = targetDataSequence_sales[startLocation:len(targetDataSequence_sales)]\n \n (_, lsBatch, forecastBatch, forecastBatch_originalScale) = sess.run([minimize_step, loss, forecast, forecast_originalScale], feed_dict={inputSequencePlaceholder: trainingBatchInput, \\\n targetPlaceholder: trainingBatchTarget})\n \n print('got a loss of:', lsBatch)\n print('the forecast of first 5 normalised are:', forecastBatch[0:5])\n print('while the actuals were normalised :', trainingBatchTarget[0:5])\n print('the forecast of first 5 orignal scale are:', forecastBatch_originalScale[0:5])\n print('while the actuals were original scale :', (trainingBatchTarget[0:5] * trainingData_sales_range) + trainingData_sales_min)\n \n \n \n totalValidationLoss = 0\n startLocation = 0\n print('starting validation')\n for iter in range(len(validationDataSequence_sales) // BATCH_SIZE):\n validationBatchInput = validationDataSequence_sales[startLocation:startLocation + BATCH_SIZE,:,:]\n validationBatchTarget = validationDataSequence_sales_target[startLocation:startLocation + BATCH_SIZE]\n \n (validationLsBatch, validationForecastBatch, validationForecastBatch_originalScale) = sess.run([loss, forecast, forecast_originalScale], feed_dict={inputSequencePlaceholder: validationBatchInput, \\\n targetPlaceholder: validationBatchTarget})\n \n \n startLocation += BATCH_SIZE\n totalValidationLoss += validationLsBatch\n \n print('first five predictions:', validationForecastBatch[0:5])\n print('first five actuals :', validationBatchTarget[0:5])\n print('the forecast of first 5 orignal scale are:', validationForecastBatch_originalScale[0:5])\n print('while the actuals were original scale :', (validationBatchTarget[0:5] * trainingData_sales_range) + trainingData_sales_min)\n \n \n if startLocation < len(validationDataSequence_sales):\n validationBatchInput = validationDataSequence_sales[startLocation:len(validationDataSequence_sales)]\n validationBatchTarget = validationDataSequence_sales_target[startLocation:len(validationDataSequence_sales)]\n \n (validationLsBatch, validationForecastBatch) = sess.run([loss, forecast], feed_dict={inputSequencePlaceholder: validationBatchInput, \\\n targetPlaceholder: validationBatchTarget})\n \n totalValidationLoss += validationLsBatch\n \n \n print('Validation completed after epoch:', e + 1, '. Total validation loss:', totalValidationLoss)\n \n \n print('----------- Saving Model')\n tf.saved_model.simple_save(sess, export_dir=TRAINED_MODEL_PATH, inputs=\\\n {'inputSequencePlaceholder': inputSequencePlaceholder, 'targetPlaceholder': targetPlaceholder}, outputs=\\\n {'loss': loss, 'forecast_originalScale': forecast_originalScale})\n print('saved model to:', TRAINED_MODEL_PATH)\n \n print('----------- Finis')" ]
[ [ "tensorflow.nn.dynamic_rnn", "tensorflow.train.AdamOptimizer", "tensorflow.summary.scalar", "pandas.read_csv", "tensorflow.reset_default_graph", "tensorflow.add", "tensorflow.Session", "tensorflow.matmul", "tensorflow.truncated_normal", "tensorflow.saved_model.simple_save", "tensorflow.placeholder", "tensorflow.global_variables_initializer", "tensorflow.summary.merge_all", "tensorflow.summary.FileWriter", "tensorflow.local_variables_initializer", "tensorflow.nn.rnn_cell.LSTMCell", "tensorflow.ones", "numpy.random.shuffle", "tensorflow.squared_difference" ], [ "tensorflow.nn.dynamic_rnn", "tensorflow.train.AdamOptimizer", "tensorflow.summary.scalar", "pandas.read_csv", "tensorflow.reset_default_graph", "tensorflow.add", "tensorflow.Session", "tensorflow.matmul", "tensorflow.truncated_normal", "tensorflow.saved_model.simple_save", "tensorflow.placeholder", "tensorflow.global_variables_initializer", "tensorflow.summary.merge_all", "tensorflow.summary.FileWriter", "tensorflow.local_variables_initializer", "tensorflow.nn.rnn_cell.LSTMCell", "tensorflow.ones", "numpy.random.shuffle", "tensorflow.squared_difference" ], [ "tensorflow.nn.dynamic_rnn", "tensorflow.train.AdamOptimizer", "tensorflow.summary.scalar", "pandas.read_csv", "tensorflow.reset_default_graph", "tensorflow.add", "tensorflow.Session", "tensorflow.matmul", "tensorflow.truncated_normal", "tensorflow.saved_model.simple_save", "tensorflow.placeholder", "tensorflow.global_variables_initializer", "tensorflow.summary.merge_all", "tensorflow.summary.FileWriter", "tensorflow.local_variables_initializer", "tensorflow.nn.rnn_cell.LSTMCell", "tensorflow.ones", "numpy.random.shuffle", "tensorflow.squared_difference" ], [ "tensorflow.nn.dynamic_rnn", "tensorflow.train.AdamOptimizer", "tensorflow.summary.scalar", "pandas.read_csv", "tensorflow.reset_default_graph", "tensorflow.add", "tensorflow.Session", "tensorflow.matmul", "tensorflow.truncated_normal", "tensorflow.saved_model.simple_save", "tensorflow.placeholder", "tensorflow.global_variables_initializer", "tensorflow.summary.merge_all", "tensorflow.summary.FileWriter", "tensorflow.local_variables_initializer", "tensorflow.nn.rnn_cell.LSTMCell", "tensorflow.ones", "numpy.random.shuffle", "tensorflow.squared_difference" ], [ "tensorflow.nn.dynamic_rnn", "tensorflow.train.AdamOptimizer", "tensorflow.summary.scalar", "pandas.read_csv", "tensorflow.reset_default_graph", "tensorflow.add", "tensorflow.Session", "tensorflow.matmul", "tensorflow.truncated_normal", "tensorflow.saved_model.simple_save", "tensorflow.placeholder", "tensorflow.global_variables_initializer", "tensorflow.summary.merge_all", "tensorflow.summary.FileWriter", "tensorflow.local_variables_initializer", "tensorflow.nn.rnn_cell.LSTMCell", "tensorflow.ones", "numpy.random.shuffle", "tensorflow.squared_difference" ], [ "tensorflow.nn.dynamic_rnn", "tensorflow.train.AdamOptimizer", "tensorflow.summary.scalar", "pandas.read_csv", "tensorflow.reset_default_graph", "tensorflow.add", "tensorflow.Session", "tensorflow.matmul", "tensorflow.truncated_normal", "tensorflow.saved_model.simple_save", "tensorflow.placeholder", "tensorflow.global_variables_initializer", "tensorflow.summary.merge_all", "tensorflow.summary.FileWriter", "tensorflow.local_variables_initializer", "tensorflow.nn.rnn_cell.LSTMCell", "tensorflow.ones", "numpy.random.shuffle", "tensorflow.squared_difference" ], [ "tensorflow.nn.dynamic_rnn", "tensorflow.train.AdamOptimizer", "tensorflow.summary.scalar", "pandas.read_csv", "tensorflow.reset_default_graph", "tensorflow.add", "tensorflow.Session", "tensorflow.matmul", "tensorflow.truncated_normal", "tensorflow.saved_model.simple_save", "tensorflow.placeholder", "tensorflow.global_variables_initializer", "tensorflow.summary.merge_all", "tensorflow.summary.FileWriter", "tensorflow.local_variables_initializer", "tensorflow.nn.rnn_cell.LSTMCell", "tensorflow.ones", "numpy.random.shuffle", "tensorflow.squared_difference" ], [ "tensorflow.nn.dynamic_rnn", "tensorflow.train.AdamOptimizer", "tensorflow.summary.scalar", "pandas.read_csv", "tensorflow.reset_default_graph", "tensorflow.add", "tensorflow.Session", "tensorflow.matmul", "tensorflow.truncated_normal", "tensorflow.saved_model.simple_save", "tensorflow.placeholder", "tensorflow.global_variables_initializer", "tensorflow.summary.merge_all", "tensorflow.summary.FileWriter", "tensorflow.local_variables_initializer", "tensorflow.nn.rnn_cell.LSTMCell", "tensorflow.ones", "numpy.random.shuffle", "tensorflow.squared_difference" ], [ "tensorflow.get_default_graph", "pandas.read_csv", "tensorflow.saved_model.loader.load", "tensorflow.Session" ], [ "tensorflow.nn.dynamic_rnn", "tensorflow.train.AdamOptimizer", "tensorflow.summary.scalar", "pandas.read_csv", "tensorflow.reset_default_graph", "tensorflow.add", "tensorflow.Session", "tensorflow.matmul", "tensorflow.truncated_normal", "tensorflow.saved_model.simple_save", "tensorflow.placeholder", "tensorflow.global_variables_initializer", "tensorflow.summary.merge_all", "tensorflow.summary.FileWriter", "tensorflow.local_variables_initializer", "tensorflow.nn.rnn_cell.LSTMCell", "tensorflow.ones", "numpy.random.shuffle", "tensorflow.squared_difference" ], [ "tensorflow.nn.dynamic_rnn", "tensorflow.train.AdamOptimizer", "tensorflow.summary.scalar", "pandas.read_csv", "tensorflow.reset_default_graph", "tensorflow.add", "tensorflow.Session", "tensorflow.matmul", "tensorflow.truncated_normal", "tensorflow.saved_model.simple_save", "tensorflow.placeholder", "tensorflow.global_variables_initializer", "tensorflow.summary.merge_all", "tensorflow.summary.FileWriter", "tensorflow.local_variables_initializer", "tensorflow.nn.rnn_cell.LSTMCell", "tensorflow.ones", "numpy.random.shuffle", "tensorflow.squared_difference" ], [ "tensorflow.nn.dynamic_rnn", "tensorflow.train.AdamOptimizer", "tensorflow.summary.scalar", "pandas.read_csv", "tensorflow.reset_default_graph", "tensorflow.add", "tensorflow.Session", "tensorflow.matmul", "tensorflow.truncated_normal", "tensorflow.saved_model.simple_save", "tensorflow.placeholder", "tensorflow.global_variables_initializer", "tensorflow.summary.merge_all", "tensorflow.summary.FileWriter", "tensorflow.local_variables_initializer", "tensorflow.nn.rnn_cell.LSTMCell", "tensorflow.ones", "numpy.random.shuffle", "tensorflow.squared_difference" ], [ "tensorflow.nn.dynamic_rnn", "tensorflow.train.AdamOptimizer", "tensorflow.summary.scalar", "pandas.read_csv", "tensorflow.reset_default_graph", "tensorflow.add", "tensorflow.Session", "tensorflow.matmul", "tensorflow.truncated_normal", "tensorflow.saved_model.simple_save", "tensorflow.placeholder", "tensorflow.global_variables_initializer", "tensorflow.summary.merge_all", "tensorflow.summary.FileWriter", "tensorflow.local_variables_initializer", "tensorflow.nn.rnn_cell.LSTMCell", "tensorflow.ones", "numpy.random.shuffle", "tensorflow.squared_difference" ], [ "tensorflow.nn.dynamic_rnn", "tensorflow.train.AdamOptimizer", "tensorflow.summary.scalar", "pandas.read_csv", "tensorflow.reset_default_graph", "tensorflow.add", "tensorflow.Session", "tensorflow.matmul", "tensorflow.truncated_normal", "tensorflow.saved_model.simple_save", "tensorflow.placeholder", "tensorflow.global_variables_initializer", "tensorflow.summary.merge_all", "tensorflow.summary.FileWriter", "tensorflow.local_variables_initializer", "tensorflow.nn.rnn_cell.LSTMCell", "tensorflow.ones", "numpy.random.shuffle", "tensorflow.squared_difference" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [ "1.10" ] } ]
hyunwoo18/decisionengine_modules
[ "a67462628c2074e768d0825edee4ee5d570030e0", "a67462628c2074e768d0825edee4ee5d570030e0" ]
[ "src/decisionengine_modules/tests/test_GceFigureOfMerit.py", "src/decisionengine_modules/tests/test_GCEResourceLimits.py" ]
[ "import os\n\nimport numpy as np\nimport pandas as pd\nimport tabulate\n\nfrom decisionengine_modules.GCE.transforms import GceFigureOfMerit\n\nDATA_DIR = os.path.join(os.path.dirname(__file__), \"data\")\nCSV_FILE = os.path.join(DATA_DIR, \"GceOccupancy.output.fixture.csv\")\n\n_PRODUCES = [\"GCE_Price_Performance\", \"GCE_Figure_Of_Merit\"]\n_PRODUCES_DICT = dict.fromkeys(_PRODUCES, pd.DataFrame)\nCONFIG = {\n}\n\nGCE_OCCUPANCY_DF = pd.read_csv(CSV_FILE)\n\ngce_instance_performance_df = pd.DataFrame([\n {\"EntryName\": \"FNAL_HEPCLOUD_GOOGLE_us-central1-a_n1-standard-1\",\n \"InstanceType\": \"n1-standard-1\",\n \"AvailabilityZone\": \"us-central1-a\",\n \"OnDemandPrice\": 0.0475,\n \"PerfTtbarTotal\": 0.0317}, ])\n\ngce_instance_performance_df.reindex(columns=(\"EnryName\",\n \"InstanceType\",\n \"AvailabilityZone\",\n \"OnDemandPrice\",\n \"PerfTtbarTotal\"))\n\n# expected datablock\ndata_block = {\n \"GCE_Instance_Performance\": gce_instance_performance_df.reindex(columns=(\"EntryName\",\n \"InstanceType\",\n \"AvailabilityZone\",\n \"OnDemandPrice\",\n \"PerfTtbarTotal\")),\n \"Factory_Entries_GCE\": pd.DataFrame([\n {\"EntryName\": \"FNAL_HEPCLOUD_GOOGLE_us-central1-a_n1-standard-1\",\n \"GlideinConfigPerEntryMaxIdle\": 100,\n \"GlideinMonitorTotalStatusIdle\": 10,\n \"GlideinConfigPerEntryMaxGlideins\": 200,\n \"GlideinMonitorTotalStatusRunning\": 100}]),\n\n \"GCE_Occupancy\": GCE_OCCUPANCY_DF,\n}\n\ngce_price_performance_df = pd.DataFrame([\n {\"EntryName\": \"FNAL_HEPCLOUD_GOOGLE_us-central1-a_n1-standard-1\",\n \"PricePerformance\": 1.498423}, ])\n\nexpected_transform_output = {\n _PRODUCES[0]: gce_price_performance_df.reindex(columns=(\"EntryName\",\n \"PricePerformance\")),\n _PRODUCES[1]: pd.DataFrame([\n {\"EntryName\": \"FNAL_HEPCLOUD_GOOGLE_us-central1-a_n1-standard-1\",\n \"FigureOfMerit\": 0.08241324921135648\n }, ]),\n}\n\nfor k, value in data_block.items():\n print(tabulate.tabulate(value, headers='keys', tablefmt='psql'))\n\n\nclass TestGceFigureOfMerit:\n\n def test_produces(self):\n gce_figure_of_merit = GceFigureOfMerit.GceFigureOfMerit(CONFIG)\n assert gce_figure_of_merit._produces == _PRODUCES_DICT\n\n def test_transform(self):\n gce_figure_of_merit = GceFigureOfMerit.GceFigureOfMerit(CONFIG)\n res = gce_figure_of_merit.transform(data_block)\n assert _PRODUCES.sort() == list(res.keys()).sort()\n\n expected_df = expected_transform_output[_PRODUCES[0]]\n res_df = res[_PRODUCES[0]]\n assert np.isclose(expected_df[\"FigureOfMerit\"],\n res_df[\"FigureOfMerit\"])\n\n expected_df = expected_transform_output[_PRODUCES[1]]\n res_df = res[_PRODUCES[1]]\n assert np.isclose(expected_df[\"PricePerformance\"],\n res_df[\"PricePerformance\"])\n", "import os\n\nimport pandas\nimport pytest\nimport typing\n\nfrom unittest import mock\n\nimport decisionengine.framework.config.ChannelConfigHandler as configmanager\nimport decisionengine.framework.dataspace.dataspace as dataspace\nfrom decisionengine.framework.modules import SourceProxy\nfrom decisionengine_modules.GCE.sources import GCEResourceLimits\nfrom decisionengine_modules.util import testutils as utils\n\nDATA_DIR = os.path.join(os.path.dirname(__file__), \"data\")\nFIXTURE_FILE = os.path.join(DATA_DIR, \"gce_limits_factory_entries.test\")\n\nCONFIG = {\n \"channel_name\": \"factory_data_channel\",\n \"Dataproducts\": [\"Factory_Entries_GCE\"],\n \"retries\": 3,\n \"retry_timeout\": 20,\n \"entry_limit_attrs\": [\"EntryName\",\n \"GlideinConfigDefaultPerFrontendMaxGlideins\",\n \"GlideinConfigDefaultPerFrontendMaxHeld\",\n \"GlideinConfigDefaultPerFrontendMaxIdle\",\n \"GlideinConfigPerEntryMaxGlideins\",\n \"GlideinConfigPerEntryMaxHeld\",\n \"GlideinConfigPerEntryMaxIdle\",\n \"GlideinConfigPerFrontendMaxGlideins\",\n \"GlideinConfigPerFrontendMaxHeld\",\n \"GlideinConfigPerFrontendMaxIdle\"]\n}\n\n_PRODUCES = {\"GCE_Resource_Limits\": typing.Any}\n\ndef test_produces():\n with mock.patch.object(configmanager, \"ChannelConfigHandler\"), \\\n mock.patch.object(dataspace, \"DataSpace\"):\n gce_resource_limits = GCEResourceLimits.GCEResourceLimits(CONFIG)\n assert gce_resource_limits._produces == _PRODUCES\n\ndef test_acquire():\n with mock.patch.object(configmanager, \"ChannelConfigHandler\"), \\\n mock.patch.object(dataspace, \"DataSpace\"), \\\n mock.patch.object(SourceProxy.SourceProxy, \"acquire\") as factory_data:\n gce_resource_limits = GCEResourceLimits.GCEResourceLimits(CONFIG)\n factory_entries = utils.input_from_file(FIXTURE_FILE)\n factory_data.return_value = {\"Factory_Entries_GCE\": pandas.DataFrame(factory_entries)}\n gce_limits = gce_resource_limits.acquire()\n assert _PRODUCES.keys() == gce_limits.keys()\n assert CONFIG.get(\"entry_limit_attrs\").sort() == list(gce_limits.get('GCE_Resource_Limits')).sort()\n\ndef test_config():\n CONFIG['Dataproducts'].insert(0, \"bad\")\n with pytest.raises(RuntimeError, match=\"Only one element may be specified\"):\n GCEResourceLimits.GCEResourceLimits(CONFIG)\n" ]
[ [ "pandas.read_csv", "pandas.DataFrame", "numpy.isclose" ], [ "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
WangHelin1997/MaskSpec
[ "d4acf1343c780ba481abecbfe426ff657857b8f1", "d4acf1343c780ba481abecbfe426ff657857b8f1" ]
[ "audioset/dataset.py", "trainer/engine_pretrain.py" ]
[ "import io\nimport os\nimport random\n\nimport av\nfrom torch.utils.data import Dataset as TorchDataset, ConcatDataset, DistributedSampler, WeightedRandomSampler, RandomSampler\nimport torch\nimport numpy as np\nimport sys\nsys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))\nfrom audioset.audiodatasets import PreprocessDataset\nimport h5py\nimport augly.audio as audaugs\n\nLMODE = os.environ.get(\"LMODE\", False)\nif LMODE:\n def LMODE_default_config():\n cache_root_path = \"/system/user/publicdata/CP/DCASE/cached_datasets/\"\n\n\ndef decode_mp3(mp3_arr):\n \"\"\"\n decodes an array if uint8 representing an mp3 file\n :rtype: np.array\n \"\"\"\n container = av.open(io.BytesIO(mp3_arr.tobytes()))\n stream = next(s for s in container.streams if s.type == 'audio')\n # print(stream)\n a = []\n for i, packet in enumerate(container.demux(stream)):\n for frame in packet.decode():\n a.append(frame.to_ndarray().reshape(-1))\n waveform = np.concatenate(a)\n if waveform.dtype != 'float32':\n raise RuntimeError(\"Unexpected wave type\")\n return waveform\n\n\ndef pad_or_truncate(x, audio_length):\n \"\"\"Pad all audio to specific length.\"\"\"\n if len(x) <= audio_length:\n return np.concatenate((x, np.zeros(audio_length - len(x), dtype=np.float32)), axis=0)\n else:\n return x[0: audio_length]\n\n\n\ndef pydub_augment(waveform, gain_augment=7):\n if gain_augment:\n gain = torch.randint(gain_augment * 2, (1,)).item() - gain_augment\n amp = 10 ** (gain / 20)\n waveform = waveform * amp\n return waveform\n\n\nclass MixupDataset(TorchDataset):\n \"\"\" Mixing Up wave forms\n \"\"\"\n\n def __init__(self, dataset, beta=2, rate=0.5):\n self.beta = beta\n self.rate = rate\n self.dataset = dataset\n print(f\"Mixing up waveforms from dataset of len {len(dataset)}\")\n\n def __getitem__(self, index):\n if torch.rand(1) < self.rate:\n x1, y1 = self.dataset[index]\n idx2 = torch.randint(len(self.dataset), (1,)).item()\n x2, y2 = self.dataset[idx2]\n l = np.random.beta(self.beta, self.beta)\n l = max(l, 1. - l)\n x1 = x1-x1.mean()\n x2 = x2-x2.mean()\n x = (x1 * l + x2 * (1. - l))\n x = x - x.mean()\n return x, (y1 * l + y2 * (1. - l))\n return self.dataset[index]\n\n def __len__(self):\n return len(self.dataset)\n\n\nclass AudioSetDataset(TorchDataset):\n def __init__(self, hdf5_file, sample_rate=32000, classes_num=527, clip_length=10, augment=False, in_mem=False, extra_augment=False):\n \"\"\"\n Reads the mp3 bytes from HDF file decodes using av and returns a fixed length audio wav\n \"\"\"\n self.sample_rate = sample_rate\n self.hdf5_file = hdf5_file\n if in_mem:\n print(\"\\nPreloading in memory\\n\")\n with open(hdf5_file, 'rb') as f:\n self.hdf5_file = io.BytesIO(f.read())\n with h5py.File(hdf5_file, 'r') as f:\n self.length = len(f['audio_name'])\n print(f\"Dataset from {hdf5_file} with length {self.length}.\")\n self.dataset_file = None # lazy init\n self.clip_length = clip_length * sample_rate\n self.classes_num = classes_num\n self.augment = augment\n self.extra_augment = extra_augment\n if augment:\n print(f\"Will agument data from {hdf5_file}\")\n\n def open_hdf5(self):\n self.dataset_file = h5py.File(self.hdf5_file, 'r')\n\n def __len__(self):\n return self.length\n\n def __del__(self):\n if self.dataset_file is not None:\n self.dataset_file.close()\n self.dataset_file = None\n\n def __getitem__(self, index):\n \"\"\"Load waveform and target of an audio clip.\n\n Args:\n meta: {\n 'hdf5_path': str,\n 'index_in_hdf5': int}\n Returns:\n data_dict: {\n 'audio_name': str,\n 'waveform': (clip_samples,),\n 'target': (classes_num,)}\n \"\"\"\n if self.dataset_file is None:\n self.open_hdf5()\n\n audio_name = self.dataset_file['audio_name'][index].decode()\n try:\n waveform = decode_mp3(self.dataset_file['mp3'][index])\n except:\n print(\"Read Error:\" + audio_name)\n index = random.randint(1,self.length-1)\n audio_name = self.dataset_file['audio_name'][index].decode()\n waveform = decode_mp3(self.dataset_file['mp3'][index])\n #else:\n # waveform = decode_mp3(self.dataset_file['mp3'][index])\n #waveform = decode_mp3(self.dataset_file['mp3'][index])\n if self.augment:\n waveform = pydub_augment(waveform)\n \n waveform = self.resample(waveform)\n if self.extra_augment:\n Transforms = audaugs.Compose([\n audaugs.AddBackgroundNoise(snr_level_db=random.uniform(0.0, 15.0), p=random.random()),\n audaugs.ChangeVolume(volume_db=random.uniform(-2.0, 2.0), p=random.random()),\n audaugs.HighPassFilter(cutoff_hz=random.sample([5000.0, 6000.0, 7000.0, 8000.0, 9000.0, 10000.0, 11000.0, 12000.0], 1)[0], p=random.random()),\n audaugs.LowPassFilter(cutoff_hz=random.sample([1000.0, 2000.0, 3000.0, 4000.0, 5000.0], 1)[0], p=random.random()),\n audaugs.Speed(factor=random.uniform(0.8, 1.2), p=random.random()),\n ])\n waveform, _ = Transforms(waveform, self.sample_rate)\n if waveform.ndim > 1:\n waveform = waveform[0, :]\n waveform = pad_or_truncate(waveform, self.clip_length)\n if 'target' in self.dataset_file.keys():\n target = self.dataset_file['target'][index]\n target = np.unpackbits(target, axis=-1,\n count=self.classes_num).astype(np.float32)\n else:\n target = None\n return waveform.reshape(1, -1), target\n\n def resample(self, waveform):\n \"\"\"Resample.\n Args:\n waveform: (clip_samples,)\n Returns:\n (resampled_clip_samples,)\n \"\"\"\n if self.sample_rate == 32000:\n return waveform\n elif self.sample_rate == 16000:\n return waveform[0:: 2]\n elif self.sample_rate == 8000:\n return waveform[0:: 4]\n else:\n raise Exception('Incorrect sample rate!')\n\n\n\ndef preload_mp3(balanced_train_hdf5, unbalanced_train_hdf5, num_of_classes):\n for hdf5_file in [balanced_train_hdf5, unbalanced_train_hdf5]:\n print(f\"\\n \\n will now preload {hdf5_file} \\n\\n \")\n with h5py.File(hdf5_file, 'r') as dataset_file:\n target = dataset_file['mp3'][:]\n print(len(target))\n print(f\"\\n \\n done with {hdf5_file} \\n\\n \")\n return target[1000]\n\n\ndef get_ft_cls_balanced_sample_weights(balanced_train_hdf5, unbalanced_train_hdf5, num_of_classes,\n sample_weight_offset=100, sample_weight_sum=True):\n \"\"\"\n :return: float tenosr of shape len(full_training_set) representing the weights of each sample.\n \"\"\"\n # the order of balanced_train_hdf5,unbalanced_train_hdf5 is important.\n # should match get_full_training_set\n all_y = []\n for hdf5_file in [balanced_train_hdf5, unbalanced_train_hdf5]:\n with h5py.File(hdf5_file, 'r') as dataset_file:\n target = dataset_file['target']\n target = np.unpackbits(target, axis=-1,\n count=num_of_classes)\n all_y.append(target)\n all_y = np.concatenate(all_y, axis=0)\n all_y = torch.as_tensor(all_y)\n per_class = all_y.long().sum(0).float().reshape(1, -1) # frequencies per class\n\n per_class = sample_weight_offset + per_class # offset low freq classes\n if sample_weight_offset > 0:\n print(f\"Warning: sample_weight_offset={sample_weight_offset} minnow={per_class.min()}\")\n per_class_weights = 1000. / per_class\n all_weight = all_y * per_class_weights\n if sample_weight_sum:\n print(\"\\nsample_weight_sum\\n\")\n all_weight = all_weight.sum(dim=1)\n else:\n all_weight, _ = all_weight.max(dim=1)\n return all_weight\n\n\ndef get_ft_weighted_sampler(balanced_train_hdf5, unbalanced_train_hdf5, num_of_classes,\n epoch_len=100000, sampler_replace=False):\n samples_weights=get_ft_cls_balanced_sample_weights(balanced_train_hdf5, unbalanced_train_hdf5, num_of_classes)\n num_nodes = int(os.environ.get('num_nodes', 1))\n ddp = int(os.environ.get('DDP', 1))\n num_nodes = max(ddp, num_nodes)\n print(\"num_nodes= \", num_nodes)\n rank = int(os.environ.get('NODE_RANK', 0))\n return DistributedSamplerWrapper(sampler=WeightedRandomSampler(samples_weights,\n num_samples=epoch_len, replacement=sampler_replace),\n dataset=range(epoch_len),\n num_replicas=num_nodes,\n rank=rank,\n )\n\ndef get_random_sampler(dataset, epoch_len=100000, sampler_replace=True):\n num_nodes = int(os.environ.get('num_nodes', 1))\n ddp = int(os.environ.get('DDP', 1))\n num_nodes = max(ddp, num_nodes)\n print(\"num_nodes= \", num_nodes)\n rank = int(os.environ.get('NODE_RANK', 0))\n return DistributedSamplerWrapper(sampler=RandomSampler(data_source=dataset, num_samples=epoch_len, replacement=sampler_replace),\n dataset=range(epoch_len),\n num_replicas=num_nodes,\n rank=rank,\n )\n\ndef get_roll_func(axis=1, shift=None, shift_range=50):\n print(\"rolling...\")\n\n def roll_func(b):\n x, y = b\n x = torch.as_tensor(x)\n sf = shift\n if shift is None:\n sf = int(np.random.random_integers(-shift_range, shift_range))\n global FirstTime\n\n return x.roll(sf, axis), y\n\n return roll_func\n\ndef get_base_training_set(balanced_train_hdf5, sample_rate=32000, classes_num=527, clip_length=10, augment=False, in_mem=False, extra_augment=True, roll=True, wavmix=True):\n ds = AudioSetDataset(\n hdf5_file=balanced_train_hdf5, sample_rate=sample_rate, classes_num=classes_num, \n clip_length=clip_length, \n augment=augment, in_mem=in_mem, extra_augment=extra_augment)\n if roll:\n ds = PreprocessDataset(ds, get_roll_func())\n if wavmix:\n ds = MixupDataset(ds)\n return ds\n\ndef get_full_training_set(balanced_train_hdf5, unbalanced_train_hdf5, sample_rate=32000, classes_num=527, clip_length=10, augment=False, in_mem=False, extra_augment=True, roll=True, wavmix=True):\n sets = [\n AudioSetDataset(\n hdf5_file=balanced_train_hdf5, sample_rate=sample_rate, classes_num=classes_num, \n clip_length=clip_length, \n augment=augment, in_mem=in_mem, extra_augment=extra_augment\n ), \n AudioSetDataset(\n hdf5_file=unbalanced_train_hdf5, sample_rate=sample_rate, classes_num=classes_num, \n clip_length=clip_length, \n augment=augment, in_mem=in_mem, extra_augment=extra_augment\n )]\n ds = ConcatDataset(sets)\n if roll:\n ds = PreprocessDataset(ds, get_roll_func())\n if wavmix:\n ds = MixupDataset(ds)\n return ds\n\n\ndef get_test_set(eval_hdf5, sample_rate=32000, classes_num=527, clip_length=10):\n ds = AudioSetDataset(\n hdf5_file=eval_hdf5, sample_rate=sample_rate, classes_num=classes_num, \n clip_length=clip_length, \n augment=False, in_mem=False, extra_augment=False)\n return ds\n\ndef get_other_sets(others_hdf5_path, use_audioset, balanced_train_hdf5, unbalanced_train_hdf5, sample_rate=32000, classes_num=527, clip_length=10, augment=False, in_mem=False, extra_augment=True, roll=True, wavmix=True):\n sets = []\n for root, dirs, files in os.walk(others_hdf5_path, topdown=False):\n for name in files:\n if name[-3:] == 'hdf':\n sets.append(AudioSetDataset(\n hdf5_file=os.path.join(root, name), sample_rate=sample_rate, classes_num=classes_num, clip_length=clip_length, \n augment=augment, in_mem=in_mem, extra_augment=extra_augment))\n if use_audioset:\n sets.append(AudioSetDataset(\n hdf5_file=balanced_train_hdf5, sample_rate=sample_rate, classes_num=classes_num, clip_length=clip_length, \n augment=augment, in_mem=in_mem, extra_augment=extra_augment))\n sets.append(AudioSetDataset(\n hdf5_file=unbalanced_train_hdf5, sample_rate=sample_rate, classes_num=classes_num, clip_length=clip_length, \n augment=augment, in_mem=in_mem, extra_augment=extra_augment))\n sets.append(AudioSetDataset(\n hdf5_file=eval_hdf5, sample_rate=sample_rate, classes_num=classes_num, clip_length=clip_length, \n augment=augment, in_mem=in_mem, extra_augment=extra_augment))\n\n ds = ConcatDataset(sets)\n if roll:\n ds = PreprocessDataset(ds, get_roll_func())\n if wavmix:\n ds = MixupDataset(ds)\n return ds\n\n\nclass DistributedSamplerWrapper(DistributedSampler):\n def __init__(\n self, sampler, dataset,\n num_replicas=None,\n rank=None,\n shuffle: bool = True):\n super(DistributedSamplerWrapper, self).__init__(\n dataset, num_replicas, rank, shuffle)\n # source: @awaelchli https://github.com/PyTorchLightning/pytorch-lightning/issues/3238\n self.sampler = sampler\n\n def __iter__(self):\n if self.sampler.generator is None:\n self.sampler.generator = torch.Generator()\n self.sampler.generator.manual_seed(self.seed + self.epoch)\n #print(self.sampler)\n indices = list(self.sampler)\n if self.epoch == 0:\n print(f\"\\n DistributedSamplerWrapper : {indices[:10]} \\n\\n\")\n indices = indices[self.rank:self.total_size:self.num_replicas]\n return iter(indices)\n\n\nif __name__ == \"__main__\":\n\n name = 'audioset' # dataset name\n roll = True # apply roll augmentation\n wavmix = True # apply wave-level mixup\n base_dir = \"/data/dean/whl/audioset_Kong/\" # base directory of the dataset, change it or make a link\n if LMODE:\n base_dir = \"/system/user/publicdata/CP/audioset/audioset_hdf5s/\"\n\n balanced_train_hdf5 = base_dir + \"mp3/balanced_train_segments_mp3.hdf\"\n eval_hdf5 = base_dir + \"mp3/eval_segments_mp3.hdf\"\n unbalanced_train_hdf5 = base_dir + \"mp3/unbalanced_train_segments_mp3.hdf\"\n\n if LMODE:\n balanced_train_hdf5 = balanced_train_hdf5.replace(base_dir, os.environ.get(\"TMPDIR\", base_dir)+\"/\")\n unbalanced_train_hdf5 = unbalanced_train_hdf5.replace(base_dir, os.environ.get(\"TMPDIR\", base_dir)+\"/\")\n eval_hdf5 = eval_hdf5.replace(base_dir, os.environ.get(\"TMPDIR\", base_dir)+\"/\")\n \n num_of_classes = 527\n\n print(\"get_base_test_set\", len(get_test_set(eval_hdf5)))\n print(\"get_full_training_set\", len(get_full_training_set(balanced_train_hdf5, unbalanced_train_hdf5)))\n", "# Copyright (c) Meta Platforms, Inc. and affiliates.\n# All rights reserved.\n\n# This source code is licensed under the license found in the\n# LICENSE file in the root directory of this source tree.\n# --------------------------------------------------------\n# References:\n# DeiT: https://github.com/facebookresearch/deit\n# BEiT: https://github.com/microsoft/unilm/tree/master/beit\n# --------------------------------------------------------\nimport math\nimport sys\nfrom typing import Iterable\n\nimport torch\nimport os\nsys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))\nimport utils.misc as misc\nimport utils.lr_sched as lr_sched\n\n\ndef train_one_epoch(model: torch.nn.Module,\n data_loader: Iterable, optimizer: torch.optim.Optimizer,\n device: torch.device, epoch: int, loss_scaler,\n log_writer=None,\n args=None):\n model.train(True)\n metric_logger = misc.MetricLogger(delimiter=\" \")\n metric_logger.add_meter('lr', misc.SmoothedValue(window_size=1, fmt='{value:.6f}'))\n header = 'Epoch: [{}]'.format(epoch)\n print_freq = args.print_freq\n\n accum_iter = args.accum_iter\n\n optimizer.zero_grad()\n\n if log_writer is not None:\n print('log_dir: {}'.format(log_writer.log_dir))\n\n for data_iter_step, (samples, _) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):\n\n # we use a per iteration (instead of per epoch) lr scheduler\n if data_iter_step % accum_iter == 0:\n lr_sched.adjust_learning_rate(optimizer, data_iter_step / len(data_loader) + epoch, args)\n\n samples = samples.to(device, non_blocking=True)\n\n with torch.cuda.amp.autocast():\n loss, _, _ = model(samples, mask_ratio=args.mask_ratio)\n\n loss_value = loss.item()\n\n if not math.isfinite(loss_value):\n print(\"Loss is {}, stopping training\".format(loss_value))\n sys.exit(1)\n\n loss /= accum_iter\n loss_scaler(loss, optimizer, parameters=model.parameters(),\n update_grad=(data_iter_step + 1) % accum_iter == 0)\n if (data_iter_step + 1) % accum_iter == 0:\n optimizer.zero_grad()\n\n torch.cuda.synchronize()\n\n metric_logger.update(loss=loss_value)\n\n lr = optimizer.param_groups[0][\"lr\"]\n metric_logger.update(lr=lr)\n\n loss_value_reduce = misc.all_reduce_mean(loss_value)\n if log_writer is not None and (data_iter_step + 1) % accum_iter == 0:\n \"\"\" We use epoch_1000x as the x-axis in tensorboard.\n This calibrates different curves when batch size changes.\n \"\"\"\n epoch_1000x = int((data_iter_step / len(data_loader) + epoch) * 1000)\n log_writer.add_scalar('train_loss', loss_value_reduce, epoch_1000x)\n log_writer.add_scalar('lr', lr, epoch_1000x)\n\n\n # gather the stats from all processes\n metric_logger.synchronize_between_processes()\n print(\"Averaged stats:\", metric_logger)\n return {k: meter.global_avg for k, meter in metric_logger.meters.items()}\n" ]
[ [ "torch.Generator", "numpy.random.beta", "torch.randint", "torch.utils.data.RandomSampler", "torch.utils.data.WeightedRandomSampler", "numpy.concatenate", "torch.utils.data.ConcatDataset", "numpy.unpackbits", "numpy.random.random_integers", "torch.rand", "torch.as_tensor" ], [ "torch.cuda.synchronize", "torch.cuda.amp.autocast" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
uofuseismo/shakemap
[ "cbad8622bd520e1936447620edfb3a4feea1a8d9", "cbad8622bd520e1936447620edfb3a4feea1a8d9" ]
[ "tests/shakelib/conversions/imc/beyer_bommer_2006_test.py", "shakelib/conversions/imc/boore_kishida_2017.py" ]
[ "#!/usr/bin/env python\n\n# stdlib imports\nimport os.path\nimport sys\n\n# third party imports\nimport numpy as np\nfrom openquake.hazardlib import const\nfrom openquake.hazardlib.imt import PGA, PGV, SA\nimport pytest\n\n# local imports\nfrom shakelib.conversions.imc.beyer_bommer_2006 import BeyerBommer2006\n\n\n# important constants\nhomedir = os.path.dirname(os.path.abspath(__file__)) # where is this script?\nshakedir = os.path.abspath(os.path.join(homedir, '..', '..', '..', '..'))\nsys.path.insert(0, shakedir)\n\namps_in = np.log(np.array([0.05, 0.1, 0.2, 0.4, 0.8, 1.6]))\nsigmas_in = np.array([0.5, 0.55, 0.6, 0.65, 0.61, 0.7])\nimc_in = [const.IMC.MEDIAN_HORIZONTAL,\n const.IMC.MEDIAN_HORIZONTAL,\n const.IMC.GMRotI50,\n const.IMC.GREATER_OF_TWO_HORIZONTAL,\n const.IMC.GREATER_OF_TWO_HORIZONTAL,\n const.IMC.HORIZONTAL]\nimc_out = [const.IMC.GREATER_OF_TWO_HORIZONTAL,\n const.IMC.GMRotI50,\n const.IMC.GREATER_OF_TWO_HORIZONTAL,\n const.IMC.GMRotI50,\n const.IMC.MEDIAN_HORIZONTAL,\n const.IMC.MEDIAN_HORIZONTAL]\nimt_in = [PGA(), PGV(), SA(0.3), SA(1.0), SA(3.0)]\n\namps_target = np.array(\n [[-2.90042209, -2.20727491, -1.51412773, -0.82098055, -0.12783337,\n 0.56531381],\n [-2.99573227, -2.30258509, -1.60943791, -0.91629073, -0.22314355,\n 0.47000363],\n [-2.90042209, -2.20727491, -1.51412773, -0.82098055, -0.12783337,\n 0.56531381],\n [-3.09104245, -2.39789527, -1.70474809, -1.01160091, -0.31845373,\n 0.37469345],\n [-3.09104245, -2.39789527, -1.70474809, -1.01160091, -0.31845373,\n 0.37469345],\n [-2.99573227, -2.30258509, -1.60943791, -0.91629073, -0.22314355,\n 0.47000363],\n [-2.90042209, -2.20727491, -1.51412773, -0.82098055, -0.12783337,\n 0.56531381],\n [-2.99573227, -2.30258509, -1.60943791, -0.91629073, -0.22314355,\n 0.47000363],\n [-2.90042209, -2.20727491, -1.51412773, -0.82098055, -0.12783337,\n 0.56531381],\n [-3.09104245, -2.39789527, -1.70474809, -1.01160091, -0.31845373,\n 0.37469345],\n [-3.09104245, -2.39789527, -1.70474809, -1.01160091, -0.31845373,\n 0.37469345],\n [-2.99573227, -2.30258509, -1.60943791, -0.91629073, -0.22314355,\n 0.47000363],\n [-2.86347036, -2.17032318, -1.477176, -0.78402882, -0.09088164,\n 0.60226554],\n [-2.99573227, -2.30258509, -1.60943791, -0.91629073, -0.22314355,\n 0.47000363],\n [-2.86347036, -2.17032318, -1.477176, -0.78402882, -0.09088164,\n 0.60226554],\n [-3.12799418, -2.434847, -1.74169982, -1.04855264, -0.35540546,\n 0.33774172],\n [-3.12799418, -2.434847, -1.74169982, -1.04855264, -0.35540546,\n 0.33774172],\n [-2.99573227, -2.30258509, -1.60943791, -0.91629073, -0.22314355,\n 0.47000363],\n [-2.81341072, -2.12026354, -1.42711636, -0.73396918, -0.04082199,\n 0.65232519],\n [-2.99573227, -2.30258509, -1.60943791, -0.91629073, -0.22314355,\n 0.47000363],\n [-2.81341072, -2.12026354, -1.42711636, -0.73396918, -0.04082199,\n 0.65232519],\n [-3.17805383, -2.48490665, -1.79175947, -1.09861229, -0.40546511,\n 0.28768207],\n [-3.17805383, -2.48490665, -1.79175947, -1.09861229, -0.40546511,\n 0.28768207],\n [-2.99573227, -2.30258509, -1.60943791, -0.91629073, -0.22314355,\n 0.47000363],\n [-2.81341072, -2.12026354, -1.42711636, -0.73396918, -0.04082199,\n 0.65232519],\n [-2.99573227, -2.30258509, -1.60943791, -0.91629073, -0.22314355,\n 0.47000363],\n [-2.81341072, -2.12026354, -1.42711636, -0.73396918, -0.04082199,\n 0.65232519],\n [-3.17805383, -2.48490665, -1.79175947, -1.09861229, -0.40546511,\n 0.28768207],\n [-3.17805383, -2.48490665, -1.79175947, -1.09861229, -0.40546511,\n 0.28768207],\n [-2.99573227, -2.30258509, -1.60943791, -0.91629073, -0.22314355,\n 0.47000363]]\n)\n\nsigs_target = np.array(\n [[0.5123436, 0.56313139, 0.61395436, 0.66480445, 0.62412242,\n 0.71567588],\n [0.50029991, 0.55027266, 0.60024995, 0.65023073, 0.61024585,\n 0.70021425],\n [0.5120389, 0.56285419, 0.61370012, 0.66456967, 0.62387233,\n 0.71545778],\n [0.48814882, 0.53735522, 0.58653034, 0.63568144, 0.59636229,\n 0.68481368],\n [0.48784144, 0.537076, 0.58627454, 0.63544542, 0.59611071,\n 0.68459461],\n [0.48076008, 0.52973252, 0.57863271, 0.62747753, 0.58840566,\n 0.67627899],\n [0.5123436, 0.56313139, 0.61395436, 0.66480445, 0.62412242,\n 0.71567588],\n [0.50029991, 0.55027266, 0.60024995, 0.65023073, 0.61024585,\n 0.70021425],\n [0.5120389, 0.56285419, 0.61370012, 0.66456967, 0.62387233,\n 0.71545778],\n [0.48814882, 0.53735522, 0.58653034, 0.63568144, 0.59636229,\n 0.68481368],\n [0.48784144, 0.537076, 0.58627454, 0.63544542, 0.59611071,\n 0.68459461],\n [0.48076008, 0.52973252, 0.57863271, 0.62747753, 0.58840566,\n 0.67627899],\n [0.51248419, 0.56325931, 0.61407169, 0.66491281, 0.62423784,\n 0.71577653],\n [0.5009647, 0.55087715, 0.60080415, 0.65074237, 0.61079099,\n 0.7006894],\n [0.51150308, 0.56236679, 0.61325313, 0.66415691, 0.62343263,\n 0.71507441],\n [0.48868846, 0.53784549, 0.58697954, 0.63609593, 0.59680409,\n 0.68519845],\n [0.48769948, 0.53694706, 0.58615642, 0.63533644, 0.59599454,\n 0.68449346],\n [0.469213, 0.51747452, 0.56562716, 0.61369652, 0.57524702,\n 0.66170077],\n [0.51437714, 0.56498216, 0.61565237, 0.6663729, 0.62579284,\n 0.71713307],\n [0.50119856, 0.55108983, 0.60099917, 0.65092242, 0.61098281,\n 0.70085662],\n [0.51316212, 0.56387619, 0.61463758, 0.66543547, 0.62479453,\n 0.71626207],\n [0.48701383, 0.53632437, 0.58558606, 0.63481027, 0.5954336,\n 0.6840051],\n [0.48578027, 0.53520447, 0.58456055, 0.6338644, 0.59442508,\n 0.68312735],\n [0.4649541, 0.51361597, 0.56209924, 0.61044647, 0.57177846,\n 0.65868763],\n [0.51437714, 0.56498216, 0.61565237, 0.6663729, 0.62579284,\n 0.71713307],\n [0.50119856, 0.55108983, 0.60099917, 0.65092242, 0.61098281,\n 0.70085662],\n [0.51316212, 0.56387619, 0.61463758, 0.66543547, 0.62479453,\n 0.71626207],\n [0.48701383, 0.53632437, 0.58558606, 0.63481027, 0.5954336,\n 0.6840051],\n [0.48578027, 0.53520447, 0.58456055, 0.6338644, 0.59442508,\n 0.68312735],\n [0.4649541, 0.51361597, 0.56209924, 0.61044647, 0.57177846,\n 0.65868763]]\n)\n\n\ndef test_bb06():\n amps_out = np.empty([0, 6])\n sigs_out = np.empty([0, 6])\n for imt in imt_in:\n for i in range(len(imc_in)):\n bb06 = BeyerBommer2006(imc_in[i], imc_out[i])\n tmp = bb06.convertAmps(imt, amps_in)\n amps_out = np.vstack((amps_out, tmp))\n tmp = bb06.convertSigmas(imt, sigmas_in)\n sigs_out = np.vstack((sigs_out, tmp))\n np.testing.assert_allclose(amps_out, amps_target, atol=1e-5)\n np.testing.assert_allclose(sigs_out, sigs_target, atol=1e-5)\n\n # Test that an invalid/unknown parameter is changed to AVERAGE_HORIZONTAL\n bb06 = BeyerBommer2006('wrong', imc_out[0])\n assert bb06.imc_in == 'Average horizontal'\n assert bb06.imc_out == imc_out[0]\n bb06 = BeyerBommer2006(imc_out[0], 'wrong')\n assert bb06.imc_in == imc_out[0]\n assert bb06.imc_out == 'Average horizontal'\n bb06 = BeyerBommer2006('wrong', 'wrong')\n assert bb06.imc_in == 'Average horizontal'\n assert bb06.imc_out == 'Average horizontal'\n\n\n # Test that the correct input/output imc returns the right path\n bb06 = BeyerBommer2006('Median horizontal', 'Random horizontal')\n assert len(bb06.path) == 2\n assert bb06.path[0] == 'Median horizontal'\n assert bb06.path[-1] == 'Random horizontal'\n bb06 = BeyerBommer2006('Average Horizontal (RotD50)', 'Horizontal')\n assert len(bb06.path) == 2\n assert bb06.path[0] == 'Average Horizontal (RotD50)'\n assert bb06.path[-1] == 'Horizontal'\n\n # Test exception for unknown imt\n with pytest.raises(ValueError) as e:\n bb06.convertSigmasOnce('wrong', 0)\n with pytest.raises(ValueError) as e:\n bb06.convertAmpsOnce('wrong', [10.0], None, None)\n # Test exception for unknown imc\n with pytest.raises(ValueError) as e:\n bb06._verifyConversion('Wrong', imc_out=None)\n bb06.imc_out = 'wrong'\n with pytest.raises(ValueError) as e:\n bb06.convertAmpsOnce(PGA(), [10.0], None, None)\n\n # Test that AVERAGE_HORIZONTAL returns 1 regardless of imt\n bb06 = BeyerBommer2006('Average horizontal', 'Average Horizontal (RotD50)')\n denom = 1\n numer = 1\n result = np.asarray([10.0]) + np.log(numer / denom)\n returned1 = bb06.convertAmpsOnce(PGA(), [10.0], None, None)\n returned2 = bb06.convertAmpsOnce(PGV(), [10.0], None, None)\n returned3 = bb06.convertAmpsOnce(SA(0.3), [10.0], None, None)\n returned4 = bb06.convertAmpsOnce(SA(1.0), [10.0], None, None)\n assert result == returned1 == returned2 == returned3 == returned4\n ss = np.asarray([10])\n R1, C1 = 1, 0\n s1 = (ss**2 - C1**2) / R1**2\n R2 = np.asarray([1, 1, 1, 1])\n C2 = np.asarray([0.02, 0.02, 0.0241407224538, 0.03])\n imt_list = [PGA(), PGV(), SA(0.3), SA(1.0)]\n for idx, imt in enumerate(imt_list):\n target = np.sqrt(s1 * R2[idx]**2 + C2[idx]**2)\n returned = bb06.convertSigmasOnce(imt, ss)\n assert target == returned\n\n\nif __name__ == '__main__':\n test_bb06()\n", "\"\"\"\nModule implements BooreKishida2017 class to convert between various\nhorizontal intensity measure components.\n\"\"\"\n# Standard imports\nimport glob\nimport logging\nimport os.path\nimport pkg_resources\n\n# Third party imports\nimport numpy as np\nfrom openquake.hazardlib.const import IMC\nfrom openquake.hazardlib.imt import PGA, PGV\nimport pandas as pd\n\n# Local imports\nfrom shakelib.conversions.convert_imc import ComponentConverter\n\n\nclass BooreKishida2017(ComponentConverter):\n \"\"\"\n This class implements the Boore and Kishida (2017) conversions for\n horizontal intensity measure components.\n\n This class explicitly supports the following subset of the conversions\n provided by B&K (specified as OpenQuake IMCs):\n\n - RotD50 <=> GMRotI50\n - RotD50 <=> AVERAGE_HORIZONTAL (i.e., Geometric Mean \"as recorded\")\n - RotD100 <=> RotD50\n - RotD50 <=> GREATER_OF_TWO_HORIZONTAL\n - RotD100 <=> GREATER_OF_TWO_HORIZONTAL\n - GMRotI50 <=> GREATER_OF_TWO_HORIZONTAL\n - AVERAGE_HORIZONTAL <=> GREATER_OF_TWO_HORIZONTAL\n\n Chain conversions are supported when using `convertAmps`. Otherwise\n conversions must be done in two+ steps using `convertAmpsOnce`. For IMCs\n not explicitly supported by B&K, we assume the IMC is equivalent\n to the geometric mean (which B&K call GM_AR).\n\n Notes\n - Assumes ALL unknown IMC types are AVERAGE_HORIZONTAL.\n\n References\n\n Boore, D.M. and T. Kishida (2017). Relations between some\n horizontal-component ground-motion intensity measures used\n in practice. Bulletin of the Seismological Society of\n America, 107(1), 334-343, doi: 10.1785/0120160250.\n\n Beyer, K., & Bommer, J. J. (2006). Relationships between median values\n and between aleatory variabilities for different definitions of the\n horizontal component of motion. Bulletin of the Seismological Society\n of America, 96(4A), 1512-1522.\n \"\"\"\n def __init__(self, imc_in, imc_out):\n super().__init__()\n self.imc_in = imc_in\n self.imc_out = imc_out\n # Possible conversions\n self.conversion_graph = {\n 'Average Horizontal (RotD50)': set([\n 'Average Horizontal (GMRotI50)',\n 'Average horizontal',\n 'Horizontal Maximum Direction (RotD100)',\n 'Greater of two horizontal',\n 'Random horizontal',\n 'Horizontal',\n 'Median horizontal']),\n 'Average Horizontal (GMRotI50)': set([\n 'Average Horizontal (RotD50)',\n 'Greater of two horizontal']),\n 'Average horizontal': set([\n 'Average Horizontal (RotD50)',\n 'Greater of two horizontal']),\n 'Horizontal Maximum Direction (RotD100)': set([\n 'Average Horizontal (RotD50)',\n 'Greater of two horizontal']),\n 'Greater of two horizontal': set([\n 'Average Horizontal (RotD50)',\n 'Average Horizontal (GMRotI50)',\n 'Average horizontal',\n 'Horizontal Maximum Direction (RotD100)',\n 'Random horizontal',\n 'Horizontal',\n 'Median horizontal']),\n 'Horizontal': set([\n 'Greater of two horizontal',\n 'Average Horizontal (RotD50)']),\n 'Median horizontal': set([\n 'Greater of two horizontal',\n 'Average Horizontal (RotD50)']),\n 'Random horizontal': set([\n 'Greater of two horizontal',\n 'Average Horizontal (RotD50)'])\n }\n # Check if any imc values are unknown. If they are, convert\n # to AVERAGE_HORIZONTAL\n self.checkUnknown()\n # Get shortest conversion \"path\" between imc_in and imc_out\n self.path = self.getShortestPath(self.conversion_graph,\n self.imc_in, self.imc_out)\n\n def convertAmpsOnce(self, imt, amps, rrups=None, mag=None):\n \"\"\"\n Return an array of amps converted from one IMC to another.\n\n Args:\n imt (OpenQuake IMT): The intensity measure type of the input\n ground motions. Valid IMTs are PGA, PGV, and SA.\n amps (array): A numpy array of the (logged) ground motions\n to be converted.\n rrups (array): A numpy array of the same shape as amps,\n containing the rupture distances of the ground motions.\n mag (float): The earthquake magnitude.\n\n Returns:\n array: A numpy array of converted ground motions (logged).\n\n Raises:\n ValueError: If mag and rrup are none or the IMT\n is not an allowed type.\n \"\"\"\n # Check if mag and rrups are real values\n if mag is None or rrups is None:\n raise ValueError('No magnitude or rupture distances specified.')\n # Verify that the conversion is possible\n self._verifyConversion(self.imc_in, self.imc_out)\n # Return original amps if imc_in and imc_out are the same\n if self.pars is None:\n return amps.copy()\n # Get coeffecients\n (sigma, c0, r1, m1, m2) = self._getParamsFromIMT(imt)\n # Limit magnitude and rupture distances\n rrups_clipped = np.clip(rrups, 1e-2, 400)\n if mag < 2:\n mag = 2.0\n elif mag > 9:\n mag = 9.0\n # Calculate conversion variable\n ln_ratio = c0 + r1 * np.log(rrups_clipped / 50) + \\\n m1 * (mag - 5.5) + m2 * (mag - 5.5)**2\n #\n # The B&K file naming convention has things like D100D50, which\n # means the parameters give the (log) ratio of RotD100/RotD50,\n # but we use the convention that RotD100 would be the input IMC\n # and RotD50 would be the output IMC, so we reverse the sense\n # of the conversion here.\n #\n if self.forward:\n amps = amps - ln_ratio\n else:\n amps = amps + ln_ratio\n return amps\n\n def convertSigmasOnce(self, imt, sigmas):\n \"\"\"\n Return an array of standard deviations converted from one IMC\n to another.\n\n Note that the action of this method is to always increase the\n input standard deviations. Thus, while converting from one IMC\n to another and then back again will yield the original ground\n motions via convertAmps(), the standard deviations will be\n inflated by both conversions via this method.\n\n Args:\n imt (OpenQuake IMT): The intensity measure type of the input\n ground motions. Valid IMTs are PGA, PGV, and SA.\n sigmas (array): A numpy array of the standard deviations of\n the logged ground motions.\n\n Returns:\n array: A numpy array of converted standard deviations.\n\n Raises:\n ValueError: If mag and rrup are none or the IMT\n is not an allowed type.\n \"\"\"\n # Verify that the conversion is possible\n self._verifyConversion(self.imc_in, self.imc_out)\n # Return original sigmas if imc_in and imc_out are the same\n if self.pars is None:\n return sigmas.copy()\n # Get coeffecients\n (sigma, c0, r1, m1, m2) = self._getParamsFromIMT(imt)\n # Calculate conversion\n sigmas = np.sqrt(sigmas**2 + sigma**2)\n\n return sigmas\n\n def _getParamsFromIMT(self, imt):\n \"\"\"\n Helper function to return (possibly interpolated) conversion\n parameters for a given IMT.\n\n Args:\n imt (OpenQuake IMT): The intensity measure type of the input\n ground motions. Valid IMTs are PGA, PGV, and SA.\n\n Returns:\n (float, float, float, float, float): Coeffients for conversion.\n \"\"\"\n if imt == PGA():\n sigma = self.pars['sigma'][0]\n c0 = self.pars['c0smooth'][0]\n r1 = self.pars['r1smooth'][0]\n m1 = self.pars['m1smooth'][0]\n m2 = self.pars['m2smooth'][0]\n elif imt == PGV():\n sigma = self.pars['sigma'][1]\n c0 = self.pars['c0smooth'][1]\n r1 = self.pars['r1smooth'][1]\n m1 = self.pars['m1smooth'][1]\n m2 = self.pars['m2smooth'][1]\n elif 'SA' in imt:\n imt_per = imt.period\n pa = self.pars['per'][2:]\n sigma = np.interp(imt_per, pa, self.pars['sigma'][2:])\n c0 = np.interp(imt_per, pa, self.pars['c0smooth'][2:])\n r1 = np.interp(imt_per, pa, self.pars['r1smooth'][2:])\n m1 = np.interp(imt_per, pa, self.pars['m1smooth'][2:])\n m2 = np.interp(imt_per, pa, self.pars['m2smooth'][2:])\n else:\n raise ValueError(\"Unknown IMT: %s\" % str(imt))\n return (sigma, c0, r1, m1, m2)\n\n @staticmethod\n def _imcPairToFile(imc_in, imc_out):\n \"\"\"\n Helper function to find the name of the file representing\n the conversion.\n\n Returns:\n (str, bool): The filename and a boolean 'forward' indicating\n whether the conversion should be done in the forward (True)\n or inverse (False) direction. If filename is None, then no\n appropriate conversion file coule be found; if it is the\n string 'Null', then imc_in and imc_out evaluated to be the\n same.\n \"\"\"\n datadir = pkg_resources.resource_filename('shakelib.conversions.imc',\n 'data')\n conv_files = glob.glob(os.path.join(datadir, '*.csv'))\n stub1 = BooreKishida2017._imcToFilestr(imc_in)\n stub2 = BooreKishida2017._imcToFilestr(imc_out)\n if stub1 == stub2:\n # No conversion necessary\n return ('Null', True)\n #\n # Look for the conversion from imc_in -> imc_out\n #\n stub = stub1 + stub2\n filelist = [name for name in conv_files if stub in name]\n if len(filelist) == 1:\n return (filelist[0], True)\n #\n # Now try the conversion from imc_out -> imc_in\n #\n stub = stub2 + stub1\n filelist = [name for name in conv_files if stub in name]\n if len(filelist) == 1:\n return (filelist[0], False)\n #\n # Can't find anything\n #\n return (None, None)\n\n @staticmethod\n def _imcToFilestr(oq_imc):\n \"\"\"\n Helper function to convert an OpenQuake IMC into part of the\n Boore & Kishida file name.\n \"\"\"\n\n if oq_imc == IMC.RotD50:\n return 'D50'\n elif oq_imc == IMC.RotD100:\n return 'D100'\n elif oq_imc == IMC.GMRotI50:\n return 'GM50'\n elif oq_imc == IMC.AVERAGE_HORIZONTAL or \\\n oq_imc == IMC.HORIZONTAL or \\\n oq_imc == IMC.RANDOM_HORIZONTAL or \\\n oq_imc == IMC.MEDIAN_HORIZONTAL:\n return 'GMAR'\n elif oq_imc == IMC.GREATER_OF_TWO_HORIZONTAL:\n return 'Larger'\n else:\n #\n # For less common IMCs, Beyer & Bommer (2006) found most\n # of them to be more or less equivalent to geometric mean\n #\n logging.warning(\"Can't handle IMC %s, using GMAR\" % oq_imc)\n return 'GMAR'\n\n def _verifyConversion(self, imc_in, imc_out=None):\n \"\"\"\n Helper method to ensure that the conversion is possible.\n\n Args:\n imc_in (IMC): OpenQuake IMC type of the input amp array.\n imc_out (IMC): Desired OpenQuake IMC type of the output amps.\n Default is None.\n\n Raises:\n ValueError if imc_in or imc_out are not valid..\n \"\"\"\n filename, forward = self._imcPairToFile(imc_in, imc_out)\n if filename is None:\n raise ValueError(\"Can't find a conversion file for %s and %s\" %\n (imc_in, imc_out))\n self.forward = forward\n if filename == 'Null':\n # Null conversion -- imc_in and imc_out are either identical\n # or at least functionally equivalent\n self.pars = None\n else:\n self.pars = pd.read_csv(filename)\n" ]
[ [ "numpy.log", "numpy.sqrt", "numpy.asarray", "numpy.vstack", "numpy.testing.assert_allclose", "numpy.array", "numpy.empty" ], [ "numpy.log", "pandas.read_csv", "numpy.sqrt", "numpy.clip", "numpy.interp" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
kongfanmiao/Qcodes
[ "de6b0d312cca2b252087885fc8812875e81c6a59" ]
[ "qcodes/dataset/sqlite/queries.py" ]
[ "\"\"\"\nThis module contains useful SQL queries and their combinations which are\nspecific to the domain of QCoDeS database.\n\"\"\"\nimport logging\nimport sqlite3\nimport time\nimport unicodedata\nimport warnings\nfrom typing import (Any, Callable, Dict, List, Mapping, Optional, Sequence,\n Tuple, Union, cast)\nfrom copy import copy\nimport numpy as np\nfrom numpy import VisibleDeprecationWarning\n\nimport qcodes as qc\nfrom qcodes.dataset.descriptions.dependencies import InterDependencies_\nfrom qcodes.dataset.descriptions.param_spec import ParamSpec, ParamSpecBase\nfrom qcodes.dataset.descriptions.rundescriber import RunDescriber\nfrom qcodes.dataset.descriptions.versioning import serialization as serial\nfrom qcodes.dataset.descriptions.versioning import v0\nfrom qcodes.dataset.descriptions.versioning.converters import old_to_new\nfrom qcodes.dataset.guids import generate_guid, parse_guid\nfrom qcodes.dataset.sqlite.connection import (ConnectionPlus, atomic,\n atomic_transaction, transaction)\nfrom qcodes.dataset.sqlite.query_helpers import (VALUES, insert_column,\n insert_values,\n is_column_in_table, many,\n many_many, one,\n select_many_where,\n select_one_where,\n sql_placeholder_string,\n update_where)\nfrom qcodes.utils.deprecate import deprecate\n\nlog = logging.getLogger(__name__)\n\n\n_unicode_categories = ('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nd', 'Pc', 'Pd', 'Zs')\n\n\n# in the current version, these are the standard columns of the \"runs\" table\n# Everything else is metadata\nRUNS_TABLE_COLUMNS = [\"run_id\", \"exp_id\", \"name\", \"result_table_name\",\n \"result_counter\", \"run_timestamp\", \"completed_timestamp\",\n \"is_completed\", \"parameters\", \"guid\",\n \"run_description\", \"snapshot\", \"parent_datasets\",\n \"captured_run_id\", \"captured_counter\"]\n\n\ndef is_run_id_in_database(conn: ConnectionPlus,\n *run_ids: int) -> Dict[int, bool]:\n \"\"\"\n Look up run_ids and return a dictionary with the answers to the question\n \"is this run_id in the database?\"\n\n Args:\n conn: the connection to the database\n run_ids: the run_ids to look up\n\n Returns:\n a dict with the run_ids as keys and bools as values. True means that\n the run_id DOES exist in the database\n \"\"\"\n run_ids = np.unique(run_ids)\n placeholders = sql_placeholder_string(len(run_ids))\n\n query = f\"\"\"\n SELECT run_id\n FROM runs\n WHERE run_id in {placeholders}\n \"\"\"\n\n cursor = conn.cursor()\n cursor.execute(query, run_ids)\n rows = cursor.fetchall()\n existing_ids = [row[0] for row in rows]\n return {run_id: (run_id in existing_ids) for run_id in run_ids}\n\n\ndef _build_data_query(table_name: str,\n columns: List[str],\n start: Optional[int] = None,\n end: Optional[int] = None,\n ) -> str:\n\n _columns = \",\".join(columns)\n query = f\"\"\"\n SELECT {_columns}\n FROM \"{table_name}\"\n \"\"\"\n\n start_specified = start is not None\n end_specified = end is not None\n\n where = ' WHERE' if start_specified or end_specified else ''\n start_condition = f' rowid >= {start}' if start_specified else ''\n end_condition = f' rowid <= {end}' if end_specified else ''\n and_ = ' AND' if start_specified and end_specified else ''\n\n query += where + start_condition + and_ + end_condition\n return query\n\n\n@deprecate('This method does not accurately represent the dataset.',\n 'Use `get_parameter_data` instead.')\ndef get_data(conn: ConnectionPlus,\n table_name: str,\n columns: List[str],\n start: Optional[int] = None,\n end: Optional[int] = None,\n ) -> List[List[Any]]:\n \"\"\"\n Get data from the columns of a table.\n Allows to specify a range of rows (1-based indexing, both ends are\n included).\n\n Args:\n conn: database connection\n table_name: name of the table\n columns: list of columns\n start: start of range; if None, then starts from the top of the table\n end: end of range; if None, then ends at the bottom of the table\n\n Returns:\n the data requested in the format of list of rows of values\n \"\"\"\n if len(columns) == 0:\n warnings.warn(\n 'get_data: requested data without specifying parameters/columns.'\n 'Returning empty list.'\n )\n return [[]]\n query = _build_data_query(table_name, columns, start, end)\n c = atomic_transaction(conn, query)\n res = many_many(c, *columns)\n\n return res\n\n\ndef get_parameter_data(conn: ConnectionPlus,\n table_name: str,\n columns: Sequence[str] = (),\n start: Optional[int] = None,\n end: Optional[int] = None) -> \\\n Dict[str, Dict[str, np.ndarray]]:\n \"\"\"\n Get data for one or more parameters and its dependencies. The data\n is returned as numpy arrays within 2 layers of nested dicts. The keys of\n the outermost dict are the requested parameters and the keys of the second\n level are the loaded parameters (requested parameter followed by its\n dependencies). Start and End allows one to specify a range of rows to\n be returned (1-based indexing, both ends are included). The range filter\n is applied AFTER the NULL values have been filtered out.\n Be aware that different parameters that are independent of each other\n may return a different number of rows.\n\n Note that this assumes that all array type parameters have the same length.\n This should always be the case for a parameter and its dependencies.\n\n Note that all numeric data will at the moment be returned as floating point\n values.\n\n Args:\n conn: database connection\n table_name: name of the table\n columns: list of columns. If no columns are provided, all parameters\n are returned.\n start: start of range; if None, then starts from the top of the table\n end: end of range; if None, then ends at the bottom of the table\n \"\"\"\n rundescriber = get_rundescriber_from_result_table_name(conn, table_name)\n\n output = {}\n if len(columns) == 0:\n columns = [ps.name for ps in rundescriber.interdeps.non_dependencies]\n\n # loop over all the requested parameters\n for output_param in columns:\n output[output_param] = get_shaped_parameter_data_for_one_paramtree(\n conn,\n table_name,\n rundescriber,\n output_param,\n start,\n end)\n return output\n\n\ndef get_shaped_parameter_data_for_one_paramtree(\n conn: ConnectionPlus,\n table_name: str,\n rundescriber: RunDescriber,\n output_param: str,\n start: Optional[int],\n end: Optional[int]\n) -> Dict[str, np.ndarray]:\n \"\"\"\n Get the data for a parameter tree and reshape it according to the\n metadata about the dataset. This will only reshape the loaded data if\n the number of points in the loaded data matches the expected number of\n points registered in the metadata.\n If there are more measured datapoints\n than expected a warning will be given.\n \"\"\"\n\n one_param_output, _ = get_parameter_data_for_one_paramtree(\n conn,\n table_name,\n rundescriber,\n output_param,\n start,\n end\n )\n if rundescriber.shapes is not None:\n shape = rundescriber.shapes.get(output_param)\n\n if shape is not None:\n total_len_shape = np.prod(shape)\n for name, paramdata in one_param_output.items():\n total_data_shape = np.prod(paramdata.shape)\n if total_data_shape == total_len_shape:\n one_param_output[name] = paramdata.reshape(shape)\n elif total_data_shape > total_len_shape:\n log.warning(f\"Tried to set data shape for {name} in \"\n f\"dataset {output_param} \"\n f\"from metadata when \"\n f\"loading but found inconsistent lengths \"\n f\"{total_data_shape} and {total_len_shape}\")\n return one_param_output\n\n\ndef get_rundescriber_from_result_table_name(\n conn: ConnectionPlus,\n result_table_name: str\n) -> RunDescriber:\n sql = \"\"\"\n SELECT run_id FROM runs WHERE result_table_name = ?\n \"\"\"\n c = atomic_transaction(conn, sql, result_table_name)\n run_id = one(c, 'run_id')\n rd = serial.from_json_to_current(get_run_description(conn, run_id))\n return rd\n\n\ndef get_interdeps_from_result_table_name(conn: ConnectionPlus, result_table_name: str) -> InterDependencies_:\n rd = get_rundescriber_from_result_table_name(conn, result_table_name)\n interdeps = rd.interdeps\n return interdeps\n\n\ndef get_parameter_data_for_one_paramtree(\n conn: ConnectionPlus,\n table_name: str,\n rundescriber: RunDescriber,\n output_param: str,\n start: Optional[int],\n end: Optional[int]\n) -> Tuple[Dict[str, np.ndarray], int]:\n interdeps = rundescriber.interdeps\n data, paramspecs, n_rows = _get_data_for_one_param_tree(\n conn, table_name, interdeps, output_param, start, end\n )\n if not paramspecs[0].name == output_param:\n raise ValueError(\"output_param should always be the first \"\n \"parameter in a parameter tree. It is not\")\n _expand_data_to_arrays(data, paramspecs)\n\n param_data = {}\n # Benchmarking shows that transposing the data with python types is\n # faster than transposing the data using np.array.transpose\n res_t = map(list, zip(*data))\n\n for paramspec, column_data in zip(paramspecs, res_t):\n try:\n if paramspec.type == \"numeric\":\n # there is no reliable way to\n # tell the difference between a float and and int loaded\n # from sqlite numeric columns so always fall back to float\n dtype: Optional[type] = np.float64\n else:\n dtype = None\n with warnings.catch_warnings():\n warnings.filterwarnings(\n \"ignore\",\n category=VisibleDeprecationWarning,\n message=\"Creating an ndarray from ragged nested sequences\"\n )\n # numpy warns here and coming versions\n # will eventually raise\n # for ragged arrays if you don't explicitly set\n # dtype=object\n # It is time consuming to detect ragged arrays here\n # and it is expected to be a relatively rare situation\n # so fallback to object if the regular dtype fail\n param_data[paramspec.name] = np.array(column_data, dtype=dtype)\n except:\n # Not clear which error to catch here. This will only be clarified\n # once numpy actually starts to raise here.\n param_data[paramspec.name] = np.array(column_data, dtype=object)\n return param_data, n_rows\n\n\ndef _expand_data_to_arrays(data: List[List[Any]], paramspecs: Sequence[ParamSpecBase]) -> None:\n types = [param.type for param in paramspecs]\n # if we have array type parameters expand all other parameters\n # to arrays\n if 'array' in types and ('numeric' in types or 'text' in types\n or 'complex' in types):\n first_array_element = types.index('array')\n numeric_elms = [i for i, x in enumerate(types)\n if x == \"numeric\"]\n complex_elms = [i for i, x in enumerate(types)\n if x == 'complex']\n text_elms = [i for i, x in enumerate(types)\n if x == \"text\"]\n for row in data:\n for element in numeric_elms:\n row[element] = np.full_like(row[first_array_element],\n row[element],\n dtype=np.dtype(np.float64))\n # todo should we handle int/float types here\n # we would in practice have to perform another\n # loop to check that all elements of a given can be cast to\n # int without loosing precision before choosing an integer\n # representation of the array\n for element in complex_elms:\n row[element] = np.full_like(row[first_array_element],\n row[element],\n dtype=np.dtype(np.complex128))\n for element in text_elms:\n strlen = len(row[element])\n row[element] = np.full_like(row[first_array_element],\n row[element],\n dtype=np.dtype(f'U{strlen}'))\n\n\ndef _get_data_for_one_param_tree(conn: ConnectionPlus, table_name: str,\n interdeps: InterDependencies_, output_param: str,\n start: Optional[int], end: Optional[int]) \\\n -> Tuple[List[List[Any]], List[ParamSpecBase], int]:\n output_param_spec = interdeps._id_to_paramspec[output_param]\n # find all the dependencies of this param\n\n dependency_params = list(interdeps.dependencies.get(output_param_spec, ()))\n dependency_names = [param.name for param in dependency_params]\n paramspecs = [output_param_spec] + dependency_params\n res = get_parameter_tree_values(conn,\n table_name,\n output_param,\n *dependency_names,\n start=start,\n end=end)\n n_rows = len(res)\n return res, paramspecs, n_rows\n\n\n@deprecate('This method does not accurately represent the dataset.',\n 'Use `get_parameter_data` instead.')\ndef get_values(conn: ConnectionPlus,\n table_name: str,\n param_name: str) -> List[List[Any]]:\n \"\"\"\n Get the not-null values of a parameter\n\n Args:\n conn: Connection to the database\n table_name: Name of the table that holds the data\n param_name: Name of the parameter to get the setpoints of\n\n Returns:\n The values\n \"\"\"\n sql = f\"\"\"\n SELECT {param_name} FROM \"{table_name}\"\n WHERE {param_name} IS NOT NULL\n \"\"\"\n c = atomic_transaction(conn, sql)\n res = many_many(c, param_name)\n\n return res\n\n\ndef get_parameter_tree_values(conn: ConnectionPlus,\n result_table_name: str,\n toplevel_param_name: str,\n *other_param_names: str,\n start: Optional[int] = None,\n end: Optional[int] = None) -> List[List[Any]]:\n \"\"\"\n Get the values of one or more columns from a data table. The rows\n retrieved are the rows where the 'toplevel_param_name' column has\n non-NULL values, which is useful when retrieving a top level parameter\n and its setpoints (and inferred_from parameter values)\n\n Args:\n conn: Connection to the DB file\n result_table_name: The result table whence the values are to be\n retrieved\n toplevel_param_name: Name of the column that holds the top level\n parameter\n other_param_names: Names of additional columns to retrieve\n start: The (1-indexed) result to include as the first results to\n be returned. None is equivalent to 1. If start > end, nothing\n is returned.\n end: The (1-indexed) result to include as the last result to be\n returned. None is equivalent to \"all the rest\". If start > end,\n nothing is returned.\n\n Returns:\n A list of list. The outer list index is row number, the inner list\n index is parameter value (first toplevel_param, then other_param_names)\n \"\"\"\n\n offset = max((start - 1), 0) if start is not None else 0\n limit = max((end - offset), 0) if end is not None else -1\n\n if start is not None and end is not None and start > end:\n limit = 0\n\n # Note: if we use placeholders for the SELECT part, then we get rows\n # back that have \"?\" as all their keys, making further data extraction\n # impossible\n #\n # Also, placeholders seem to be ignored in the WHERE X IS NOT NULL line\n\n columns = [toplevel_param_name] + list(other_param_names)\n columns_for_select = ','.join(columns)\n\n sql_subquery = f\"\"\"\n (SELECT {columns_for_select}\n FROM \"{result_table_name}\"\n WHERE {toplevel_param_name} IS NOT NULL)\n \"\"\"\n sql = f\"\"\"\n SELECT {columns_for_select}\n FROM {sql_subquery}\n LIMIT {limit} OFFSET {offset}\n \"\"\"\n\n cursor = conn.cursor()\n cursor.execute(sql, ())\n res = many_many(cursor, *columns)\n\n return res\n\n\n@deprecate(alternative=\"get_parameter_data\")\ndef get_setpoints(conn: ConnectionPlus,\n table_name: str,\n param_name: str) -> Dict[str, List[List[Any]]]:\n \"\"\"\n Get the setpoints for a given dependent parameter\n\n Args:\n conn: Connection to the database\n table_name: Name of the table that holds the data\n param_name: Name of the parameter to get the setpoints of\n\n Returns:\n A list of returned setpoint values. Each setpoint return value\n is a list of lists of Any. The first list is a list of run points,\n the second list is a list of parameter values.\n \"\"\"\n # TODO: We do this in no less than 5 table lookups, surely\n # this number can be reduced\n\n # get run_id\n sql = \"\"\"\n SELECT run_id FROM runs WHERE result_table_name = ?\n \"\"\"\n c = atomic_transaction(conn, sql, table_name)\n run_id = one(c, 'run_id')\n\n # get the parameter layout id\n sql = \"\"\"\n SELECT layout_id FROM layouts\n WHERE parameter = ?\n and run_id = ?\n \"\"\"\n c = atomic_transaction(conn, sql, param_name, run_id)\n layout_id = one(c, 'layout_id')\n\n # get the setpoint layout ids\n sql = \"\"\"\n SELECT independent FROM dependencies\n WHERE dependent = ?\n \"\"\"\n c = atomic_transaction(conn, sql, layout_id)\n indeps = many_many(c, 'independent')\n indeps = [idp[0] for idp in indeps]\n\n # get the setpoint names\n sql = f\"\"\"\n SELECT parameter FROM layouts WHERE layout_id\n IN {str(indeps).replace('[', '(').replace(']', ')')}\n \"\"\"\n c = atomic_transaction(conn, sql)\n setpoint_names_temp = many_many(c, 'parameter')\n setpoint_names = [spn[0] for spn in setpoint_names_temp]\n setpoint_names = cast(List[str], setpoint_names)\n\n # get the actual setpoint data\n output: Dict[str, List[List[Any]]] = {}\n for sp_name in setpoint_names:\n sql = f\"\"\"\n SELECT {sp_name}\n FROM \"{table_name}\"\n WHERE {param_name} IS NOT NULL\n \"\"\"\n c = atomic_transaction(conn, sql)\n sps = many_many(c, sp_name)\n output[sp_name] = sps\n\n return output\n\n\ndef get_runid_from_expid_and_counter(conn: ConnectionPlus, exp_id: int,\n counter: int) -> int:\n \"\"\"\n Get the run_id of a run in the specified experiment with the specified\n counter\n\n Args:\n conn: connection to the database\n exp_id: the exp_id of the experiment containing the run\n counter: the intra-experiment run counter of that run\n \"\"\"\n sql = \"\"\"\n SELECT run_id\n FROM runs\n WHERE result_counter= ? AND\n exp_id = ?\n \"\"\"\n c = transaction(conn, sql, counter, exp_id)\n run_id = one(c, 'run_id')\n return run_id\n\n\ndef get_runid_from_guid(conn: ConnectionPlus, guid: str) -> Union[int, None]:\n \"\"\"\n Get the run_id of a run based on the guid\n\n Args:\n conn: connection to the database\n guid: the guid to look up\n\n Returns:\n The run_id if found, else -1.\n\n Raises:\n RuntimeError if more than one run with the given GUID exists\n \"\"\"\n query = \"\"\"\n SELECT run_id\n FROM runs\n WHERE guid = ?\n \"\"\"\n cursor = conn.cursor()\n cursor.execute(query, (guid,))\n rows = cursor.fetchall()\n if len(rows) == 0:\n run_id = -1\n elif len(rows) > 1:\n errormssg = ('Critical consistency error: multiple runs with'\n f' the same GUID found! {len(rows)} runs have GUID '\n f'{guid}')\n log.critical(errormssg)\n raise RuntimeError(errormssg)\n else:\n run_id = int(rows[0]['run_id'])\n\n return run_id\n\n\ndef get_guids_from_run_spec(conn: ConnectionPlus,\n captured_run_id: Optional[int] = None,\n captured_counter: Optional[int] = None,\n experiment_name: Optional[str] = None,\n sample_name: Optional[str] = None) -> List[str]:\n \"\"\"\n Get the GUIDs of runs matching the supplied run specifications.\n\n # Todo: do we need to select by start/end time too? Is result name useful?\n\n Args:\n conn: connection to the database.\n captured_run_id: the run_id that was assigned to this\n run at capture time.\n captured_counter: the counter that was assigned to this\n run at capture time.\n experiment_name: Name of the experiment that the runs should belong to.\n sample_name: Name of the sample that the query should be restricted to.\n\n Returns:\n A list of the GUIDs matching the supplied specifications.\n \"\"\"\n # first find all experiments that match the given sample\n # and experiment name\n exp_query = {}\n exp_ids: Optional[List[int]]\n if experiment_name is not None or sample_name is not None:\n if sample_name is not None:\n exp_query['sample_name'] = sample_name\n if experiment_name is not None:\n exp_query['name'] = experiment_name\n exp_ids = get_matching_exp_ids(conn,\n **exp_query)\n if exp_ids == []:\n return []\n else:\n exp_ids = None\n\n conds = []\n inputs = []\n\n if exp_ids is not None:\n exp_placeholder = sql_placeholder_string(len(exp_ids))\n conds.append(f\"exp_id in {exp_placeholder}\")\n inputs.extend(exp_ids)\n if captured_run_id is not None:\n conds.append(\"captured_run_id is ?\")\n inputs.append(captured_run_id)\n if captured_counter is not None:\n conds.append(\"captured_counter is ?\")\n inputs.append(captured_counter)\n\n if len(conds) >= 1:\n where_clause = \" WHERE \" + \" AND \".join(conds)\n else:\n where_clause = \"\"\n\n query = \"SELECT guid from runs\" + where_clause + \" ORDER BY run_id\"\n\n cursor = conn.cursor()\n if len(inputs) > 0:\n cursor.execute(query, inputs)\n else:\n cursor.execute(query)\n\n rows = cursor.fetchall()\n results = []\n for r in rows:\n results.append(r['guid'])\n return results\n\n\ndef _get_layout_id(conn: ConnectionPlus,\n parameter: Union[ParamSpec, str],\n run_id: int) -> int:\n \"\"\"\n Get the layout id of a parameter in a given run\n\n Args:\n conn: The database connection\n parameter: A ParamSpec or the name of the parameter\n run_id: The run_id of the run in question\n \"\"\"\n # get the parameter layout id\n sql = \"\"\"\n SELECT layout_id FROM layouts\n WHERE parameter = ?\n and run_id = ?\n \"\"\"\n\n if isinstance(parameter, ParamSpec):\n name = parameter.name\n elif isinstance(parameter, str):\n name = parameter\n else:\n raise ValueError('Wrong parameter type, must be ParamSpec or str, '\n f'received {type(parameter)}.')\n\n c = atomic_transaction(conn, sql, name, run_id)\n res = one(c, 'layout_id')\n\n return res\n\n\ndef _get_dependents(conn: ConnectionPlus,\n run_id: int) -> List[int]:\n \"\"\"\n Get dependent layout_ids for a certain run_id, i.e. the layout_ids of all\n the dependent variables\n \"\"\"\n sql = \"\"\"\n SELECT layout_id FROM layouts\n WHERE run_id=? and layout_id in (SELECT dependent FROM dependencies)\n \"\"\"\n c = atomic_transaction(conn, sql, run_id)\n res = [d[0] for d in many_many(c, 'layout_id')]\n return res\n\n\ndef _get_dependencies(conn: ConnectionPlus,\n layout_id: int) -> List[List[int]]:\n \"\"\"\n Get the dependencies of a certain dependent variable (indexed by its\n layout_id)\n\n Args:\n conn: connection to the database\n layout_id: the layout_id of the dependent variable\n \"\"\"\n sql = \"\"\"\n SELECT independent, axis_num FROM dependencies WHERE dependent=?\n \"\"\"\n c = atomic_transaction(conn, sql, layout_id)\n res = many_many(c, 'independent', 'axis_num')\n return res\n\n\n# Higher level Wrappers\n\n\ndef new_experiment(conn: ConnectionPlus,\n name: str,\n sample_name: str,\n format_string: Optional[str] = \"{}-{}-{}\",\n start_time: Optional[float] = None,\n end_time: Optional[float] = None,\n ) -> int:\n \"\"\"\n Add new experiment to container.\n\n Args:\n conn: database connection\n name: the name of the experiment\n sample_name: the name of the current sample\n format_string: basic format string for table-name\n must contain 3 placeholders.\n start_time: time when the experiment was started. Do not supply this\n unless you have a very good reason to do so.\n end_time: time when the experiment was completed. Do not supply this\n unless you have a VERY good reason to do so\n\n Returns:\n id: row-id of the created experiment\n \"\"\"\n query = \"\"\"\n INSERT INTO experiments\n (name, sample_name, format_string,\n run_counter, start_time, end_time)\n VALUES\n (?,?,?,?,?,?)\n \"\"\"\n\n start_time = start_time or time.time()\n values = (name, sample_name, format_string, 0, start_time, end_time)\n\n curr = atomic_transaction(conn, query, *values)\n return curr.lastrowid\n\n\n# TODO(WilliamHPNielsen): we should remove the redundant\n# is_completed\ndef mark_run_complete(conn: ConnectionPlus, run_id: int) -> None:\n \"\"\" Mark run complete\n\n Args:\n conn: database connection\n run_id: id of the run to mark complete\n \"\"\"\n query = \"\"\"\n UPDATE\n runs\n SET\n completed_timestamp=?,\n is_completed=?\n WHERE run_id=?;\n \"\"\"\n atomic_transaction(conn, query, time.time(), True, run_id)\n\n\ndef completed(conn: ConnectionPlus, run_id: int) -> bool:\n \"\"\" Check if the run is complete\n\n Args:\n conn: database connection\n run_id: id of the run to check\n \"\"\"\n return bool(select_one_where(conn, \"runs\", \"is_completed\",\n \"run_id\", run_id))\n\n\ndef get_completed_timestamp_from_run_id(\n conn: ConnectionPlus, run_id: int) -> float:\n \"\"\"\n Retrieve the timestamp when the given measurement run was completed\n\n If the measurement run has not been marked as completed, then the returned\n value is None.\n\n Args:\n conn: database connection\n run_id: id of the run\n\n Returns:\n timestamp in seconds since the Epoch, or None\n \"\"\"\n return select_one_where(conn, \"runs\", \"completed_timestamp\",\n \"run_id\", run_id)\n\n\ndef get_guid_from_run_id(conn: ConnectionPlus, run_id: int) -> str:\n \"\"\"\n Get the guid of the given run\n\n Args:\n conn: database connection\n run_id: id of the run\n \"\"\"\n return select_one_where(conn, \"runs\", \"guid\", \"run_id\", run_id)\n\n\ndef finish_experiment(conn: ConnectionPlus, exp_id: int) -> None:\n \"\"\" Finish experiment\n\n Args:\n conn: database connection\n exp_id: the id of the experiment\n \"\"\"\n query = \"\"\"\n UPDATE experiments SET end_time=? WHERE exp_id=?;\n \"\"\"\n atomic_transaction(conn, query, time.time(), exp_id)\n\n\ndef get_run_counter(conn: ConnectionPlus, exp_id: int) -> int:\n \"\"\" Get the experiment run counter\n\n Args:\n conn: the connection to the sqlite database\n exp_id: experiment identifier\n\n Returns:\n the experiment run counter\n\n \"\"\"\n return select_one_where(conn, \"experiments\", \"run_counter\",\n where_column=\"exp_id\",\n where_value=exp_id)\n\n\ndef get_experiments(conn: ConnectionPlus) -> List[sqlite3.Row]:\n \"\"\" Get a list of experiments\n Args:\n conn: database connection\n\n Returns:\n list of rows\n \"\"\"\n sql = \"\"\"\n SELECT * FROM experiments\n \"\"\"\n c = atomic_transaction(conn, sql)\n\n return c.fetchall()\n\n\ndef get_matching_exp_ids(conn: ConnectionPlus,\n **match_conditions: Any) -> List[int]:\n \"\"\"\n Get exp_ids for experiments matching the match_conditions\n\n Raises:\n ValueError if a match_condition that is not \"name\", \"sample_name\",\n \"format_string\", \"run_counter\", \"start_time\", or \"end_time\"\n \"\"\"\n valid_conditions = [\"name\", \"sample_name\", \"start_time\", \"end_time\",\n \"run_counter\", \"format_string\"]\n\n for mcond in match_conditions:\n if mcond not in valid_conditions:\n raise ValueError(f\"{mcond} is not a valid match condition.\")\n\n end_time = match_conditions.get('end_time', None)\n time_eq = \"=\" if end_time is not None else \"IS\"\n\n sample_name = match_conditions.get('sample_name', None)\n sample_name_eq = \"=\" if sample_name is not None else \"IS\"\n\n query = \"SELECT exp_id FROM experiments \"\n for n, mcond in enumerate(match_conditions):\n if n == 0:\n query += f\"WHERE {mcond} = ? \"\n else:\n query += f\"AND {mcond} = ? \"\n\n # now some syntax clean-up\n if \"format_string\" in match_conditions:\n format_string = match_conditions[\"format_string\"]\n query = query.replace(\"format_string = ?\",\n f'format_string = \"{format_string}\"')\n match_conditions.pop(\"format_string\")\n query = query.replace(\"end_time = ?\", f\"end_time {time_eq} ?\")\n query = query.replace(\"sample_name = ?\", f\"sample_name {sample_name_eq} ?\")\n\n cursor = conn.cursor()\n cursor.execute(query, tuple(match_conditions.values()))\n rows = cursor.fetchall()\n\n return [row[0] for row in rows]\n\n\ndef get_exp_ids_from_run_ids(conn: ConnectionPlus,\n run_ids: Sequence[int]) -> List[int]:\n \"\"\"\n Get the corresponding exp_id for a sequence of run_ids\n\n Args:\n conn: connection to the database\n run_ids: a sequence of the run_ids to get the exp_id of\n\n Returns:\n A list of exp_ids matching the run_ids\n \"\"\"\n sql_placeholders = sql_placeholder_string(len(run_ids))\n exp_id_query = f\"\"\"\n SELECT exp_id\n FROM runs\n WHERE run_id IN {sql_placeholders}\n \"\"\"\n cursor = conn.cursor()\n cursor.execute(exp_id_query, run_ids)\n rows = cursor.fetchall()\n\n return [exp_id for row in rows for exp_id in row]\n\n\ndef get_last_experiment(conn: ConnectionPlus) -> Optional[int]:\n \"\"\"\n Return last started experiment id\n\n Returns None if there are no experiments in the database\n \"\"\"\n query = \"SELECT MAX(exp_id) FROM experiments\"\n c = atomic_transaction(conn, query)\n return c.fetchall()[0][0]\n\n\ndef get_runs(conn: ConnectionPlus,\n exp_id: Optional[int] = None) -> List[sqlite3.Row]:\n \"\"\" Get a list of runs.\n\n Args:\n conn: database connection\n exp_id: id of the experiment to look inside.\n If None all experiments will be included\n\n Returns:\n list of rows\n \"\"\"\n with atomic(conn) as conn:\n if exp_id:\n sql = \"\"\"\n SELECT * FROM runs\n where exp_id = ?\n \"\"\"\n c = transaction(conn, sql, exp_id)\n else:\n sql = \"\"\"\n SELECT * FROM runs\n \"\"\"\n c = transaction(conn, sql)\n\n return c.fetchall()\n\n\ndef get_last_run(conn: ConnectionPlus,\n exp_id: Optional[int] = None) -> Optional[int]:\n \"\"\"\n Get run_id of the last run in experiment with exp_id\n\n Args:\n conn: connection to use for the query\n exp_id: id of the experiment to look inside.\n If None all experiments will be included\n\n Returns:\n the integer id of the last run or None if there are not runs in the\n experiment\n \"\"\"\n if exp_id is not None:\n query = \"\"\"\n SELECT run_id, max(run_timestamp), exp_id\n FROM runs\n WHERE exp_id = ?;\n \"\"\"\n c = atomic_transaction(conn, query, exp_id)\n else:\n query = \"\"\"\n SELECT run_id, max(run_timestamp)\n FROM runs\n \"\"\"\n c = atomic_transaction(conn, query)\n return one(c, 'run_id')\n\n\ndef run_exists(conn: ConnectionPlus, run_id: int) -> bool:\n # the following query always returns a single sqlite3.Row with an integer\n # value of `1` or `0` for existing and non-existing run_id in the database\n query = \"\"\"\n SELECT EXISTS(\n SELECT 1\n FROM runs\n WHERE run_id = ?\n LIMIT 1\n );\n \"\"\"\n res: sqlite3.Row = atomic_transaction(conn, query, run_id).fetchone()\n return bool(res[0])\n\n\ndef data_sets(conn: ConnectionPlus) -> List[sqlite3.Row]:\n \"\"\" Get a list of datasets\n Args:\n conn: database connection\n\n Returns:\n list of rows\n \"\"\"\n sql = \"\"\"\n SELECT * FROM runs\n \"\"\"\n c = atomic_transaction(conn, sql)\n return c.fetchall()\n\n\ndef format_table_name(fmt_str: str, name: str, exp_id: int,\n run_counter: int) -> str:\n \"\"\"\n Format the format_string into a table name\n\n Args:\n fmt_str: a valid format string\n name: the run name\n exp_id: the experiment ID\n run_counter: the intra-experiment runnumber of this run\n \"\"\"\n table_name = fmt_str.format(name, exp_id, run_counter)\n _validate_table_name(table_name) # raises if table_name not valid\n return table_name\n\n\ndef _insert_run(conn: ConnectionPlus, exp_id: int, name: str,\n guid: str,\n parameters: Optional[List[ParamSpec]] = None,\n captured_run_id: Optional[int] = None,\n captured_counter: Optional[int] = None,\n parent_dataset_links: str = \"[]\"\n ) -> Tuple[int, str, int]:\n\n # get run counter and formatter from experiments\n run_counter, format_string = select_many_where(conn,\n \"experiments\",\n \"run_counter\",\n \"format_string\",\n where_column=\"exp_id\",\n where_value=exp_id)\n run_counter += 1\n if captured_counter is None:\n with atomic(conn) as conn:\n query = \"\"\"\n SELECT\n max(captured_counter)\n FROM\n runs\n WHERE\n exp_id = ?\"\"\"\n curr = transaction(conn, query, exp_id)\n existing_captured_counter = one(curr, 0)\n if existing_captured_counter is not None:\n captured_counter = existing_captured_counter + 1\n else:\n captured_counter = run_counter\n formatted_name = format_table_name(format_string, name, exp_id,\n run_counter)\n table = \"runs\"\n\n parameters = parameters or []\n\n run_desc = RunDescriber(old_to_new(v0.InterDependencies(*parameters)))\n desc_str = serial.to_json_for_storage(run_desc)\n\n if captured_run_id is None:\n with atomic(conn) as conn:\n query = \"\"\"\n SELECT\n max(captured_run_id)\n FROM\n runs\"\"\"\n curr = transaction(conn, query)\n existing_captured_run_id = one(curr, 0)\n if existing_captured_run_id is not None:\n captured_run_id = existing_captured_run_id + 1\n else:\n captured_run_id = 1\n\n with atomic(conn) as conn:\n\n if parameters:\n query = f\"\"\"\n INSERT INTO {table}\n (name,\n exp_id,\n guid,\n result_table_name,\n result_counter,\n run_timestamp,\n parameters,\n is_completed,\n run_description,\n captured_run_id,\n captured_counter,\n parent_datasets)\n VALUES\n (?,?,?,?,?,?,?,?,?,?,?,?)\n \"\"\"\n curr = transaction(conn, query,\n name,\n exp_id,\n guid,\n formatted_name,\n run_counter,\n None,\n \",\".join([p.name for p in parameters]),\n False,\n desc_str,\n captured_run_id,\n captured_counter,\n parent_dataset_links)\n\n _add_parameters_to_layout_and_deps(conn, formatted_name,\n *parameters)\n\n else:\n query = f\"\"\"\n INSERT INTO {table}\n (name,\n exp_id,\n guid,\n result_table_name,\n result_counter,\n run_timestamp,\n is_completed,\n run_description,\n captured_run_id,\n captured_counter,\n parent_datasets)\n VALUES\n (?,?,?,?,?,?,?,?,?,?,?)\n \"\"\"\n curr = transaction(conn, query,\n name,\n exp_id,\n guid,\n formatted_name,\n run_counter,\n None,\n False,\n desc_str,\n captured_run_id,\n captured_counter,\n parent_dataset_links)\n\n run_id = curr.lastrowid\n\n return run_counter, formatted_name, run_id\n\n\ndef _update_experiment_run_counter(conn: ConnectionPlus, exp_id: int,\n run_counter: int) -> None:\n query = \"\"\"\n UPDATE experiments\n SET run_counter = ?\n WHERE exp_id = ?\n \"\"\"\n atomic_transaction(conn, query, run_counter, exp_id)\n\n\ndef _get_parameters(conn: ConnectionPlus,\n run_id: int) -> List[ParamSpec]:\n \"\"\"\n Get the list of param specs for run\n\n Args:\n conn: the connection to the sqlite database\n run_id: The id of the run\n\n Returns:\n A list of param specs for this run\n \"\"\"\n\n sql = f\"\"\"\n SELECT parameter FROM layouts WHERE run_id={run_id}\n \"\"\"\n c = conn.execute(sql)\n param_names_temp = many_many(c, 'parameter')\n param_names = [p[0] for p in param_names_temp]\n param_names = cast(List[str], param_names)\n\n parspecs = []\n\n for param_name in param_names:\n parspecs.append(_get_paramspec(conn, run_id, param_name))\n\n return parspecs\n\n\ndef _get_paramspec(conn: ConnectionPlus,\n run_id: int,\n param_name: str) -> ParamSpec:\n \"\"\"\n Get the ParamSpec object for the given parameter name\n in the given run\n\n Args:\n conn: Connection to the database\n run_id: The run id\n param_name: The name of the parameter\n \"\"\"\n\n # get table name\n sql = f\"\"\"\n SELECT result_table_name FROM runs WHERE run_id = {run_id}\n \"\"\"\n c = conn.execute(sql)\n result_table_name = one(c, 'result_table_name')\n\n # get the data type\n sql = f\"\"\"\n PRAGMA TABLE_INFO(\"{result_table_name}\")\n \"\"\"\n c = conn.execute(sql)\n for row in c.fetchall():\n if row['name'] == param_name:\n param_type = row['type']\n break\n\n # get everything else\n\n sql = f\"\"\"\n SELECT * FROM layouts\n WHERE parameter=\"{param_name}\" and run_id={run_id}\n \"\"\"\n c = conn.execute(sql)\n resp = many(c, 'layout_id', 'run_id', 'parameter', 'label', 'unit',\n 'inferred_from')\n (layout_id, _, _, label, unit, inferred_from_string) = resp\n\n if inferred_from_string:\n inferred_from = inferred_from_string.split(', ')\n else:\n inferred_from = []\n\n deps = _get_dependencies(conn, layout_id)\n depends_on: Optional[List[str]]\n if len(deps) == 0:\n depends_on = None\n else:\n dps: List[int] = [dp[0] for dp in deps]\n ax_nums: List[int] = [dp[1] for dp in deps]\n depends_on = []\n for _, dp in sorted(zip(ax_nums, dps)):\n sql = f\"\"\"\n SELECT parameter FROM layouts WHERE layout_id = {dp}\n \"\"\"\n c = conn.execute(sql)\n depends_on.append(one(c, 'parameter'))\n\n parspec = ParamSpec(param_name, param_type, label, unit,\n inferred_from,\n depends_on)\n return parspec\n\n\ndef update_run_description(conn: ConnectionPlus, run_id: int,\n description: str) -> None:\n \"\"\"\n Update the run_description field for the given run_id. The description\n string must be a valid JSON string representation of a RunDescriber object\n \"\"\"\n try:\n serial.from_json_to_current(description)\n except Exception as e:\n raise ValueError(\"Invalid description string. Must be a JSON string \"\n \"representation of a RunDescriber object.\") from e\n\n _update_run_description(conn, run_id, description)\n\n\ndef _update_run_description(conn: ConnectionPlus, run_id: int,\n description: str) -> None:\n \"\"\"\n Update the run_description field for the given run_id. The description\n string is NOT validated.\n \"\"\"\n sql = \"\"\"\n UPDATE runs\n SET run_description = ?\n WHERE run_id = ?\n \"\"\"\n with atomic(conn) as conn:\n conn.cursor().execute(sql, (description, run_id))\n\n\ndef update_parent_datasets(conn: ConnectionPlus,\n run_id: int, links_str: str) -> None:\n \"\"\"\n Update (i.e. overwrite) the parent_datasets field for the given run_id\n \"\"\"\n if not is_column_in_table(conn, 'runs', 'parent_datasets'):\n insert_column(conn, 'runs', 'parent_datasets')\n\n sql = \"\"\"\n UPDATE runs\n SET parent_datasets = ?\n WHERE run_id = ?\n \"\"\"\n with atomic(conn) as conn:\n conn.cursor().execute(sql, (links_str, run_id))\n\n\ndef set_run_timestamp(conn: ConnectionPlus, run_id: int) -> None:\n \"\"\"\n Set the run_timestamp for the run with the given run_id. If the\n run_timestamp has already been set, a RuntimeError is raised.\n \"\"\"\n\n query = \"\"\"\n SELECT run_timestamp\n FROM runs\n WHERE run_id = ?\n \"\"\"\n cmd = \"\"\"\n UPDATE runs\n SET run_timestamp = ?\n WHERE run_id = ?\n \"\"\"\n\n with atomic(conn) as conn:\n c = conn.cursor()\n timestamp = one(c.execute(query, (run_id,)), 'run_timestamp')\n if timestamp is not None:\n raise RuntimeError('Can not set run_timestamp; it has already '\n f'been set to: {timestamp}')\n else:\n current_time = time.time()\n c.execute(cmd, (current_time, run_id))\n log.info(f\"Set the run_timestamp of run_id {run_id} to \"\n f\"{current_time}\")\n\n\ndef add_parameter(conn: ConnectionPlus,\n formatted_name: str,\n *parameter: ParamSpec) -> None:\n \"\"\"\n Add parameters to the dataset\n\n This will update the layouts and dependencies tables\n\n NOTE: two parameters with the same name are not allowed\n\n Args:\n conn: the connection to the sqlite database\n formatted_name: name of the table\n parameter: the list of ParamSpecs for parameters to add\n \"\"\"\n with atomic(conn) as conn:\n p_names = []\n for p in parameter:\n insert_column(conn, formatted_name, p.name, p.type)\n p_names.append(p.name)\n # get old parameters column from run table\n sql = f\"\"\"\n SELECT parameters FROM runs\n WHERE result_table_name=?\n \"\"\"\n with atomic(conn) as conn:\n c = transaction(conn, sql, formatted_name)\n old_parameters = one(c, 'parameters')\n if old_parameters:\n new_parameters = \",\".join([old_parameters] + p_names)\n else:\n new_parameters = \",\".join(p_names)\n sql = \"UPDATE runs SET parameters=? WHERE result_table_name=?\"\n with atomic(conn) as conn:\n transaction(conn, sql, new_parameters, formatted_name)\n\n # Update the layouts table\n c = _add_parameters_to_layout_and_deps(conn, formatted_name,\n *parameter)\n\n\ndef _add_parameters_to_layout_and_deps(conn: ConnectionPlus,\n formatted_name: str,\n *parameter: ParamSpec\n ) -> sqlite3.Cursor:\n # get the run_id\n sql = f\"\"\"\n SELECT run_id FROM runs WHERE result_table_name=\"{formatted_name}\";\n \"\"\"\n run_id = one(transaction(conn, sql), 'run_id')\n layout_args = []\n for p in parameter:\n layout_args.append(run_id)\n layout_args.append(p.name)\n layout_args.append(p.label)\n layout_args.append(p.unit)\n layout_args.append(p.inferred_from)\n rowplaceholder = '(?, ?, ?, ?, ?)'\n placeholder = ','.join([rowplaceholder] * len(parameter))\n sql = f\"\"\"\n INSERT INTO layouts (run_id, parameter, label, unit, inferred_from)\n VALUES {placeholder}\n \"\"\"\n\n with atomic(conn) as conn:\n c = transaction(conn, sql, *layout_args)\n\n for p in parameter:\n\n if p.depends_on != '':\n\n layout_id = _get_layout_id(conn, p, run_id)\n\n deps = p.depends_on.split(', ')\n for ax_num, dp in enumerate(deps):\n\n sql = \"\"\"\n SELECT layout_id FROM layouts\n WHERE run_id=? and parameter=?;\n \"\"\"\n\n c = transaction(conn, sql, run_id, dp)\n dep_ind = one(c, 'layout_id')\n\n sql = \"\"\"\n INSERT INTO dependencies (dependent, independent, axis_num)\n VALUES (?,?,?)\n \"\"\"\n\n c = transaction(conn, sql, layout_id, dep_ind, ax_num)\n return c\n\n\ndef _validate_table_name(table_name: str) -> bool:\n valid = True\n for i in table_name:\n if unicodedata.category(i) not in _unicode_categories:\n valid = False\n raise RuntimeError(\"Invalid table name \"\n \"{} starting at {}\".format(table_name, i))\n return valid\n\n\ndef _create_run_table(conn: ConnectionPlus,\n formatted_name: str,\n parameters: Optional[List[ParamSpec]] = None,\n values: Optional[VALUES] = None\n ) -> None:\n \"\"\"Create run table with formatted_name as name\n\n Args:\n conn: database connection\n formatted_name: the name of the table to create\n \"\"\"\n _validate_table_name(formatted_name)\n\n with atomic(conn) as conn:\n\n if parameters and values:\n _parameters = \",\".join([p.sql_repr() for p in parameters])\n query = f\"\"\"\n CREATE TABLE \"{formatted_name}\" (\n id INTEGER PRIMARY KEY,\n {_parameters}\n );\n \"\"\"\n transaction(conn, query)\n # now insert values\n insert_values(conn, formatted_name,\n [p.name for p in parameters], values)\n elif parameters:\n _parameters = \",\".join([p.sql_repr() for p in parameters])\n query = f\"\"\"\n CREATE TABLE \"{formatted_name}\" (\n id INTEGER PRIMARY KEY,\n {_parameters}\n );\n \"\"\"\n transaction(conn, query)\n else:\n query = f\"\"\"\n CREATE TABLE \"{formatted_name}\" (\n id INTEGER PRIMARY KEY\n );\n \"\"\"\n transaction(conn, query)\n\n\ndef create_run(conn: ConnectionPlus, exp_id: int, name: str,\n guid: str,\n parameters: Optional[List[ParamSpec]] = None,\n values: Optional[List[Any]] = None,\n metadata: Optional[Mapping[str, Any]] = None,\n captured_run_id: Optional[int] = None,\n captured_counter: Optional[int] = None,\n parent_dataset_links: str = \"[]\"\n ) -> Tuple[int, int, str]:\n \"\"\" Create a single run for the experiment.\n\n\n This will register the run in the runs table, the counter in the\n experiments table and create a new table with the formatted name.\n\n Args:\n - conn: the connection to the sqlite database\n - exp_id: the experiment id we want to create the run into\n - name: a friendly name for this run\n - guid: the guid adhering to our internal guid format\n - parameters: optional list of parameters this run has\n - values: optional list of values for the parameters\n - metadata: optional metadata dictionary\n - captured_run_id: The run_id this data was originally captured with.\n Should only be supplied when inserting an already completed run\n from another database into this database. Otherwise leave as None.\n - captured_counter: The counter this data was originally captured with.\n Should only be supplied when inserting an already completed run\n from another database into this database. Otherwise leave as None.\n\n Returns:\n - run_counter: the id of the newly created run (not unique)\n - run_id: the row id of the newly created run\n - formatted_name: the name of the newly created table\n \"\"\"\n\n with atomic(conn):\n run_counter, formatted_name, run_id = _insert_run(conn,\n exp_id,\n name,\n guid,\n parameters,\n captured_run_id,\n captured_counter,\n parent_dataset_links)\n if metadata:\n add_meta_data(conn, run_id, metadata)\n _update_experiment_run_counter(conn, exp_id, run_counter)\n _create_run_table(conn, formatted_name, parameters, values)\n\n return run_counter, run_id, formatted_name\n\n\ndef get_run_description(conn: ConnectionPlus, run_id: int) -> str:\n \"\"\"\n Return the (JSON string) run description of the specified run\n \"\"\"\n return select_one_where(conn, \"runs\", \"run_description\",\n \"run_id\", run_id)\n\n\ndef get_parent_dataset_links(conn: ConnectionPlus, run_id: int) -> str:\n \"\"\"\n Return the (JSON string) of the parent-child dataset links for the\n specified run\n \"\"\"\n\n # We cannot in general trust that NULLs will not appear in the column,\n # even if the column is present in the runs table.\n\n link_str: str\n maybe_link_str: Optional[str]\n\n if not is_column_in_table(conn, 'runs', 'parent_datasets'):\n maybe_link_str = None\n else:\n maybe_link_str = select_one_where(conn, \"runs\", \"parent_datasets\",\n \"run_id\", run_id)\n\n if maybe_link_str is None:\n link_str = \"[]\"\n else:\n link_str = str(maybe_link_str)\n\n return link_str\n\n\ndef get_metadata(conn: ConnectionPlus, tag: str, table_name: str) -> str:\n \"\"\" Get metadata under the tag from table\n \"\"\"\n return select_one_where(conn, \"runs\", tag,\n \"result_table_name\", table_name)\n\n\ndef get_metadata_from_run_id(\n conn: ConnectionPlus, run_id: int\n) -> Dict[str, Any]:\n \"\"\"\n Get all metadata associated with the specified run\n \"\"\"\n non_metadata = RUNS_TABLE_COLUMNS\n\n metadata = {}\n possible_tags = []\n\n # first fetch all columns of the runs table\n query = \"PRAGMA table_info(runs)\"\n cursor = conn.cursor()\n for row in cursor.execute(query):\n if row['name'] not in non_metadata:\n possible_tags.append(row['name'])\n\n # and then fetch whatever metadata the run might have\n for tag in possible_tags:\n query = f\"\"\"\n SELECT \"{tag}\"\n FROM runs\n WHERE run_id = ?\n AND \"{tag}\" IS NOT NULL\n \"\"\"\n cursor.execute(query, (run_id,))\n row = cursor.fetchall()\n if row != []:\n metadata[tag] = row[0][tag]\n\n return metadata\n\n\ndef insert_meta_data(conn: ConnectionPlus, row_id: int, table_name: str,\n metadata: Mapping[str, Any]) -> None:\n \"\"\"\n Insert new metadata column and add values. Note that None is not a valid\n metadata value\n\n Args:\n - conn: the connection to the sqlite database\n - row_id: the row to add the metadata at\n - table_name: the table to add to, defaults to runs\n - metadata: the metadata to add\n \"\"\"\n for tag, val in metadata.items():\n if val is None:\n raise ValueError(f'Tag {tag} has value None. '\n ' That is not a valid metadata value!')\n for key in metadata.keys():\n insert_column(conn, table_name, key)\n update_meta_data(conn, row_id, table_name, metadata)\n\n\ndef update_meta_data(conn: ConnectionPlus, row_id: int, table_name: str,\n metadata: Mapping[str, Any]) -> None:\n \"\"\"\n Updates metadata (they must exist already)\n\n Args:\n - conn: the connection to the sqlite database\n - row_id: the row to add the metadata at\n - table_name: the table to add to, defaults to runs\n - metadata: the metadata to add\n \"\"\"\n update_where(conn, table_name, 'rowid', row_id, **metadata)\n\n\ndef add_meta_data(conn: ConnectionPlus,\n row_id: int,\n metadata: Mapping[str, Any],\n table_name: str = \"runs\") -> None:\n \"\"\"\n Add metadata data (updates if exists, create otherwise).\n Note that None is not a valid metadata value.\n\n Args:\n - conn: the connection to the sqlite database\n - row_id: the row to add the metadata at\n - metadata: the metadata to add\n - table_name: the table to add to, defaults to runs\n \"\"\"\n try:\n insert_meta_data(conn, row_id, table_name, metadata)\n except sqlite3.OperationalError as e:\n # this means that the column already exists\n # so just insert the new value\n if str(e).startswith(\"duplicate\"):\n update_meta_data(conn, row_id, table_name, metadata)\n else:\n raise e\n\n\ndef get_experiment_name_from_experiment_id(\n conn: ConnectionPlus, exp_id: int) -> str:\n return select_one_where(\n conn, \"experiments\", \"name\", \"exp_id\", exp_id)\n\n\ndef get_sample_name_from_experiment_id(\n conn: ConnectionPlus, exp_id: int) -> str:\n return select_one_where(\n conn, \"experiments\", \"sample_name\", \"exp_id\", exp_id)\n\n\ndef get_run_timestamp_from_run_id(conn: ConnectionPlus,\n run_id: int) -> Optional[float]:\n return select_one_where(conn, \"runs\", \"run_timestamp\", \"run_id\", run_id)\n\n\ndef update_GUIDs(conn: ConnectionPlus) -> None:\n \"\"\"\n Update all GUIDs in this database where either the location code or the\n work_station code is zero to use the location and work_station code from\n the qcodesrc.json file in home. Runs where it is not true that both codes\n are zero are skipped.\n \"\"\"\n\n log.info('Commencing update of all GUIDs in database')\n\n cfg = qc.config\n\n location = cfg['GUID_components']['location']\n work_station = cfg['GUID_components']['work_station']\n\n if location == 0:\n log.warning('The location is still set to the default (0). Can not '\n 'proceed. Please configure the location before updating '\n 'the GUIDs.')\n return\n if work_station == 0:\n log.warning('The work_station is still set to the default (0). Can not'\n ' proceed. Please configure the location before updating '\n 'the GUIDs.')\n return\n\n query = f\"select MAX(run_id) from runs\"\n c = atomic_transaction(conn, query)\n no_of_runs = c.fetchall()[0][0]\n\n # now, there are four actions we can take\n\n def _both_nonzero(run_id: int, *args: Any) -> None:\n log.info(f'Run number {run_id} already has a valid GUID, skipping.')\n\n def _location_only_zero(run_id: int, *args: Any) -> None:\n log.warning(f'Run number {run_id} has a zero (default) location '\n 'code, but a non-zero work station code. Please manually '\n 'resolve this, skipping the run now.')\n\n def _workstation_only_zero(run_id: int, *args: Any) -> None:\n log.warning(f'Run number {run_id} has a zero (default) work station'\n ' code, but a non-zero location code. Please manually '\n 'resolve this, skipping the run now.')\n\n def _both_zero(run_id: int,\n conn: ConnectionPlus,\n guid_comps: Dict[str, Any]) -> None:\n guid_str = generate_guid(timeint=guid_comps['time'],\n sampleint=guid_comps['sample'])\n with atomic(conn) as conn:\n sql = f\"\"\"\n UPDATE runs\n SET guid = ?\n where run_id == {run_id}\n \"\"\"\n cur = conn.cursor()\n cur.execute(sql, (guid_str,))\n\n log.info(f'Succesfully updated run number {run_id}.')\n\n actions: Dict[Tuple[bool, bool],\n Callable[[int, ConnectionPlus, Dict[str, Any]], None]]\n actions = {(True, True): _both_zero,\n (False, True): _workstation_only_zero,\n (True, False): _location_only_zero,\n (False, False): _both_nonzero}\n\n for run_id in range(1, no_of_runs+1):\n guid_str = get_guid_from_run_id(conn, run_id)\n guid_comps = parse_guid(guid_str)\n loc = guid_comps['location']\n ws = guid_comps['work_station']\n\n log.info(f'Updating run number {run_id}...')\n actions[(loc == 0, ws == 0)](run_id, conn, guid_comps)\n\n\ndef remove_trigger(conn: ConnectionPlus, trigger_id: str) -> None:\n \"\"\"\n Removes a trigger with a given id if it exists.\n\n Note that this transaction is not atomic!\n\n Args:\n conn: database connection object\n trigger_id: id of the trigger\n \"\"\"\n transaction(conn, f\"DROP TRIGGER IF EXISTS {trigger_id};\")\n\n\ndef append_shaped_parameter_data_to_existing_arrays(\n conn: ConnectionPlus,\n table_name: str,\n rundescriber: RunDescriber,\n write_status: Dict[str, Optional[int]],\n read_status: Dict[str, int],\n data: Dict[str, Dict[str, np.ndarray]],\n) -> Tuple[Dict[str, Optional[int]],\n Dict[str, int],\n Dict[str, Dict[str, np.ndarray]]]:\n \"\"\"\n Append newly loaded data to an already existing cache.\n\n Args:\n conn: The connection to the sqlite database\n table_name: The name of the table the data is stored in\n rundescriber: The rundescriber that describes the run\n write_status: Mapping from dependent parameter name to number of rows\n written to the cache previously.\n read_status: Mapping from dependent parameter name to number of rows\n read from the db previously.\n data: Mapping from dependent parameter name to mapping\n from parameter name to numpy arrays that the data should be\n inserted into.\n\n Returns:\n Updated write and read status, and the updated ``data``\n \"\"\"\n parameters = tuple(ps.name for ps in\n rundescriber.interdeps.non_dependencies)\n merged_data = {}\n\n updated_write_status = copy(write_status)\n updated_read_status = copy(read_status)\n\n for meas_parameter in parameters:\n\n shapes = rundescriber.shapes\n if shapes is not None:\n shape = shapes.get(meas_parameter, None)\n else:\n shape = None\n\n start = read_status.get(meas_parameter, 0) + 1\n\n new_data, n_rows_read = get_parameter_data_for_one_paramtree(\n conn,\n table_name,\n rundescriber=rundescriber,\n output_param=meas_parameter,\n start=start,\n end=None\n )\n\n existing_data = data.get(meas_parameter, {})\n\n subtree_merged_data = {}\n subtree_parameters = set(existing_data.keys()) | set(new_data.keys())\n new_write_status: Optional[int]\n\n for subtree_param in subtree_parameters:\n existing_values = existing_data.get(subtree_param)\n new_values = new_data.get(subtree_param)\n if existing_values is not None and new_values is not None:\n (subtree_merged_data[subtree_param],\n new_write_status) = _insert_into_data_dict(\n existing_values,\n new_values,\n write_status.get(meas_parameter),\n shape=shape\n )\n updated_write_status[meas_parameter] = new_write_status\n elif new_values is not None:\n (subtree_merged_data[subtree_param],\n new_write_status) = _create_new_data_dict(\n new_values,\n shape\n )\n updated_write_status[meas_parameter] = new_write_status\n elif existing_values is not None:\n subtree_merged_data[subtree_param] = existing_values\n merged_data[meas_parameter] = subtree_merged_data\n updated_read_status[meas_parameter] = read_status.get(meas_parameter, 0) + n_rows_read\n return updated_write_status, updated_read_status, merged_data\n\n\ndef _create_new_data_dict(new_values: np.ndarray,\n shape: Optional[Tuple[int, ...]]\n ) -> Tuple[np.ndarray, int]:\n if shape is None:\n return new_values, new_values.size\n else:\n n_values = new_values.size\n data = np.zeros(shape, dtype=new_values.dtype)\n\n if new_values.dtype.kind == \"f\" or new_values.dtype.kind == \"c\":\n data[:] = np.nan\n\n data.ravel()[0:n_values] = new_values\n return data, n_values\n\n\ndef _insert_into_data_dict(\n existing_values: np.ndarray,\n new_values: np.ndarray,\n write_status: Optional[int],\n shape: Optional[Tuple[int, ...]]\n) -> Tuple[np.ndarray, Optional[int]]:\n if shape is None or write_status is None:\n return np.append(existing_values, new_values, axis=0), None\n else:\n if existing_values.dtype.kind in ('U', 'S'):\n # string type arrays may be too small for the new data\n # read so rescale if needed.\n if new_values.dtype.itemsize > existing_values.dtype.itemsize:\n existing_values = existing_values.astype(new_values.dtype)\n n_values = new_values.size\n new_write_status = write_status+n_values\n if new_write_status > existing_values.size:\n log.warning(f\"Incorrect shape of dataset: Dataset is expected to \"\n f\"contain {existing_values.size} points but trying to \"\n f\"add an amount of data that makes it contain {new_write_status} points. Cache will \"\n f\"be flattened into a 1D array\")\n return (np.append(existing_values.flatten(),\n new_values.flatten(), axis=0),\n new_write_status)\n else:\n existing_values.ravel()[write_status:new_write_status] = new_values\n return existing_values, new_write_status\n" ]
[ [ "numpy.unique", "numpy.dtype", "numpy.append", "numpy.prod", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Wesley-Du/analytics-zoo
[ "e4ca11b219a43bceec99aba39cf30c8aa368e8b3", "439f2c99d657fb20a5ff4bf510869616402ba0cf", "439f2c99d657fb20a5ff4bf510869616402ba0cf" ]
[ "pyzoo/zoo/tfpark/utils.py", "pyzoo/zoo/models/recommendation/utils.py", "pyzoo/test/zoo/automl/model/test_Seq2Seq.py" ]
[ "#\n# Copyright 2018 Analytics Zoo Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nfrom zoo.tfpark.tfnet import TFNet\nfrom zoo.tfpark.tf_optimizer import BigDLMetric, TFModel\nfrom zoo.pipeline.api.keras import metrics as zmetrics\n\nimport tensorflow as tf\n\n\ndef to_bigdl_metric(metric):\n metric = metric.lower()\n if metric == \"accuracy\" or metric == \"acc\":\n return zmetrics.Accuracy()\n elif metric == \"top5accuracy\" or metric == \"top5acc\":\n return zmetrics.Top5Accuracy()\n elif metric == \"mae\":\n from bigdl.optim.optimizer import MAE\n return MAE()\n elif metric == \"auc\":\n return zmetrics.AUC()\n elif metric == \"treennaccuracy\":\n from bigdl.optim.optimizer import TreeNNAccuracy\n return TreeNNAccuracy()\n else:\n raise TypeError(\"Unsupported metric: %s\" % metric)\n\n\ndef evaluate_string_metrics(*,\n sess,\n string_metrics,\n dataset,\n inputs,\n targets=None,\n outputs=None,\n loss=None,\n ):\n\n metrics = {}\n for i, metric in enumerate(string_metrics):\n if metric == \"loss\":\n assert loss is not None, \"loss tensor should not be None if one of the metrics is loss\"\n metrics[\"loss\"] = loss\n else:\n assert outputs is not None, \"outputs should not be None if non loss metrics exists\"\n assert targets is not None, \"targets should not be None if non loss metrics exists\"\n\n method = to_bigdl_metric(metric)\n metrics[metric] = BigDLMetric(method,\n outputs,\n targets)\n result = evaluate_metrics(inputs, sess, dataset, metrics)\n return result\n\n\ndef evaluate_metrics(inputs, sess, dataset, metrics):\n\n if dataset.batch_per_thread > 0:\n batch_size = dataset.batch_per_thread * dataset.get_num_partitions()\n else:\n batch_size = dataset.batch_size\n\n real_batch_size = tf.shape(inputs[0])[0]\n\n outputs, eval_methods = TFModel._process_metrics(inputs[0].graph,\n metrics=metrics,\n real_batch_size=real_batch_size)\n\n tfnet = TFNet.from_session(sess, inputs=inputs, outputs=outputs)\n\n results = tfnet.evaluate(dataset, batch_size, eval_methods)\n final_result = dict([(r.method, r.result) for r in results])\n return final_result\n", "#\n# Copyright 2018 Analytics Zoo Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport numpy as np\n\nfrom bigdl.util.common import JTensor, Sample\n\nfrom zoo.common.utils import callZooFunc\nfrom zoo.models.recommendation import UserItemFeature\n\n\ndef hash_bucket(content, bucket_size=1000, start=0):\n return (hash(str(content)) % bucket_size + bucket_size) % bucket_size + start\n\n\ndef categorical_from_vocab_list(sth, vocab_list, default=-1, start=0):\n if sth in vocab_list:\n return vocab_list.index(sth) + start\n else:\n return default + start\n\n\ndef get_boundaries(target, boundaries, default=-1, start=0):\n if target == '?':\n return default + start\n else:\n for i in range(len(boundaries)):\n if target < boundaries[i]:\n return i + start\n return len(boundaries) + start\n\n\ndef get_negative_samples(indexed):\n return callZooFunc(\"float\", \"getNegativeSamples\",\n indexed)\n\n\ndef get_wide_tensor(row, column_info):\n \"\"\"\n convert a row to tensor given column feature information of a WideAndDeep model\n\n :param row: Row of userId, itemId, features and label\n :param column_info: ColumnFeatureInfo specify information of different features\n :return: an array of tensors as input for wide part of a WideAndDeep model\n \"\"\"\n\n wide_columns = column_info.wide_base_cols + column_info.wide_cross_cols\n wide_dims = column_info.wide_base_dims + column_info.wide_cross_dims\n wide_length = len(wide_columns)\n acc = 0\n indices = []\n for i in range(0, wide_length):\n index = row[wide_columns[i]]\n if i == 0:\n res = index\n else:\n acc += wide_dims[i - 1]\n res = acc + index\n indices.append(res)\n values = np.array([i + 1 for i in indices])\n shape = np.array([sum(wide_dims)])\n return JTensor.sparse(values, np.array(indices), shape)\n\n\ndef get_deep_tensors(row, column_info):\n \"\"\"\n convert a row to tensors given column feature information of a WideAndDeep model\n\n :param row: Row of userId, itemId, features and label\n :param column_info: ColumnFeatureInfo specify information of different features\n :return: an array of tensors as input for deep part of a WideAndDeep model\n \"\"\"\n\n ind_col = column_info.indicator_cols\n emb_col = column_info.embed_cols\n cont_col = column_info.continuous_cols\n\n ind_tensor = np.zeros(sum(column_info.indicator_dims), )\n # setup indicators\n acc = 0\n for i in range(0, len(ind_col)):\n index = row[ind_col[i]]\n if i == 0:\n res = index\n else:\n acc += column_info.indicator_dims[i - 1]\n res = acc + index\n ind_tensor[res] = 1\n\n emb_tensor = np.zeros(len(emb_col), )\n for i in range(0, len(emb_col)):\n emb_tensor[i] = float(row[emb_col[i]])\n\n cont_tensor = np.zeros(len(cont_col), )\n for i in range(0, len(cont_col)):\n cont_tensor[i] = float(row[cont_col[i]])\n\n has_ind = len(ind_col) > 0\n has_emd = len(emb_col) > 0\n has_cont = len(cont_col) > 0\n if (has_ind and has_emd and has_cont):\n deep_tensor = [ind_tensor, emb_tensor, cont_tensor]\n elif ((not has_ind) and has_emd and has_cont):\n deep_tensor = [emb_tensor, cont_tensor]\n elif (has_ind and (not has_emd) and has_cont):\n deep_tensor = [ind_tensor, cont_tensor]\n elif (has_ind and has_emd and (not has_cont)):\n deep_tensor = [ind_tensor, emb_tensor]\n elif ((not has_ind) and (not has_emd) and has_cont):\n deep_tensor = [cont_tensor]\n elif ((not has_ind) and has_emd and (not has_cont)):\n deep_tensor = [emb_tensor]\n elif (has_ind and (not has_emd) and (not has_cont)):\n deep_tensor = [ind_tensor]\n else:\n raise TypeError(\"Empty deep tensors\")\n return deep_tensor\n\n\ndef row_to_sample(row, column_info, model_type=\"wide_n_deep\"):\n \"\"\"\n convert a row to sample given column feature information of a WideAndDeep model\n\n :param row: Row of userId, itemId, features and label\n :param column_info: ColumnFeatureInfo specify information of different features\n :return: TensorSample as input for WideAndDeep model\n \"\"\"\n\n wide_tensor = get_wide_tensor(row, column_info)\n deep_tensor = get_deep_tensors(row, column_info)\n deep_tensors = [JTensor.from_ndarray(ele) for ele in deep_tensor]\n label = row[column_info.label]\n model_type = model_type.lower()\n if model_type == \"wide_n_deep\":\n feature = [wide_tensor] + deep_tensors\n elif model_type == \"wide\":\n feature = wide_tensor\n elif model_type == \"deep\":\n feature = deep_tensors\n else:\n raise TypeError(\"Unsupported model_type: %s\" % model_type)\n return Sample.from_jtensor(feature, label)\n\n\ndef to_user_item_feature(row, column_info, model_type=\"wide_n_deep\"):\n \"\"\"\n convert a row to UserItemFeature given column feature information of a WideAndDeep model\n\n :param row: Row of userId, itemId, features and label\n :param column_info: ColumnFeatureInfo specify information of different features\n :return: UserItemFeature for recommender model\n \"\"\"\n return UserItemFeature(row[\"userId\"], row[\"itemId\"],\n row_to_sample(row, column_info, model_type))\n", "#\n# Copyright 2018 Analytics Zoo Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport shutil\nimport tempfile\n\nimport pytest\n\nfrom test.zoo.pipeline.utils.test_utils import ZooTestCase\nfrom zoo.automl.model.Seq2Seq import *\nfrom zoo.automl.feature.time_sequence import TimeSequenceFeatureTransformer\nfrom numpy.testing import assert_array_almost_equal\n\n\nclass TestSeq2Seq(ZooTestCase):\n\n def setup_method(self, method):\n # super().setup_method(method)\n self.train_data = pd.DataFrame(data=np.random.randn(64, 4))\n self.val_data = pd.DataFrame(data=np.random.randn(16, 4))\n self.test_data = pd.DataFrame(data=np.random.randn(16, 4))\n\n self.past_seq_len = 6\n self.future_seq_len_1 = 1\n self.future_seq_len_2 = 2\n\n # use roll method in time_sequence\n self.feat = TimeSequenceFeatureTransformer()\n\n self.config = {\n 'batch_size': 32,\n 'epochs': 1\n }\n\n self.model_1 = LSTMSeq2Seq(check_optional_config=False,\n future_seq_len=self.future_seq_len_1)\n self.model_2 = LSTMSeq2Seq(check_optional_config=False,\n future_seq_len=self.future_seq_len_2)\n\n self.fitted = False\n self.predict_1 = None\n self.predict_2 = None\n\n def teardown_method(self, method):\n pass\n\n def test_fit_eval_1(self):\n x_train_1, y_train_1 = self.feat._roll_train(self.train_data,\n past_seq_len=self.past_seq_len,\n future_seq_len=self.future_seq_len_1)\n print(\"fit_eval_future_seq_len_1:\",\n self.model_1.fit_eval(x_train_1, y_train_1, **self.config))\n assert self.model_1.past_seq_len == 6\n assert self.model_1.feature_num == 4\n assert self.model_1.future_seq_len == 1\n assert self.model_1.target_col_num == 1\n\n def test_fit_eval_2(self):\n x_train_2, y_train_2 = self.feat._roll_train(self.train_data,\n past_seq_len=self.past_seq_len,\n future_seq_len=self.future_seq_len_2)\n print(\"fit_eval_future_seq_len_2:\",\n self.model_2.fit_eval(x_train_2, y_train_2, **self.config))\n assert self.model_2.future_seq_len == 2\n\n self.fitted = True\n\n def test_evaluate_1(self):\n x_train_1, y_train_1 = self.feat._roll_train(self.train_data,\n past_seq_len=self.past_seq_len,\n future_seq_len=self.future_seq_len_1)\n x_val_1, y_val_1 = self.feat._roll_train(self.val_data,\n past_seq_len=self.past_seq_len,\n future_seq_len=self.future_seq_len_1)\n\n self.model_1.fit_eval(x_train_1, y_train_1, **self.config)\n\n print(\"evaluate_future_seq_len_1:\", self.model_1.evaluate(x_val_1,\n y_val_1,\n metric=['mse',\n 'r2']))\n\n def test_evaluate_2(self):\n x_train_2, y_train_2 = self.feat._roll_train(self.train_data,\n past_seq_len=self.past_seq_len,\n future_seq_len=self.future_seq_len_2)\n x_val_2, y_val_2 = self.feat._roll_train(self.val_data,\n past_seq_len=self.past_seq_len,\n future_seq_len=self.future_seq_len_2)\n\n self.model_2.fit_eval(x_train_2, y_train_2, **self.config)\n\n print(\"evaluate_future_seq_len_2:\", self.model_2.evaluate(x_val_2,\n y_val_2,\n metric=['mse',\n 'r2']))\n\n def test_predict_1(self):\n x_train_1, y_train_1 = self.feat._roll_train(self.train_data,\n past_seq_len=self.past_seq_len,\n future_seq_len=self.future_seq_len_1)\n x_test_1 = self.feat._roll_test(self.test_data, past_seq_len=self.past_seq_len)\n self.model_1.fit_eval(x_train_1, y_train_1, **self.config)\n\n predict_1 = self.model_1.predict(x_test_1)\n assert predict_1.shape == (x_test_1.shape[0], self.future_seq_len_1)\n\n def test_predict_2(self):\n x_train_2, y_train_2 = self.feat._roll_train(self.train_data,\n past_seq_len=self.past_seq_len,\n future_seq_len=self.future_seq_len_2)\n x_test_2 = self.feat._roll_test(self.test_data, past_seq_len=self.past_seq_len)\n self.model_2.fit_eval(x_train_2, y_train_2, **self.config)\n\n predict_2 = self.model_2.predict(x_test_2)\n assert predict_2.shape == (x_test_2.shape[0], self.future_seq_len_2)\n\n def test_save_restore_1(self):\n x_train_1, y_train_1 = self.feat._roll_train(self.train_data,\n past_seq_len=self.past_seq_len,\n future_seq_len=self.future_seq_len_1)\n x_test_1 = self.feat._roll_test(self.test_data, past_seq_len=self.past_seq_len)\n self.model_1.fit_eval(x_train_1, y_train_1, **self.config)\n\n predict_1_before = self.model_1.predict(x_test_1)\n new_model_1 = LSTMSeq2Seq(check_optional_config=False)\n\n dirname = tempfile.mkdtemp(prefix=\"automl_test_feature\")\n try:\n save(dirname, model=self.model_1)\n restore(dirname, model=new_model_1, config=self.config)\n predict_1_after = new_model_1.predict(x_test_1)\n assert_array_almost_equal(predict_1_before, predict_1_after, decimal=2), \\\n \"Prediction values are not the same after restore: \" \\\n \"predict before is {}, and predict after is {}\".format(predict_1_before,\n predict_1_after)\n new_config = {'epochs': 1}\n new_model_1.fit_eval(x_train_1, y_train_1, **new_config)\n finally:\n shutil.rmtree(dirname)\n\n def test_save_restore_2(self):\n x_train_2, y_train_2 = self.feat._roll_train(self.train_data,\n past_seq_len=self.past_seq_len,\n future_seq_len=self.future_seq_len_2)\n x_test_2 = self.feat._roll_test(self.test_data, past_seq_len=self.past_seq_len)\n self.model_2.fit_eval(x_train_2, y_train_2, **self.config)\n\n predict_2_before = self.model_2.predict(x_test_2)\n new_model_2 = LSTMSeq2Seq(check_optional_config=False)\n\n dirname = tempfile.mkdtemp(prefix=\"automl_test_feature\")\n try:\n save(dirname, model=self.model_2)\n restore(dirname, model=new_model_2, config=self.config)\n predict_2_after = new_model_2.predict(x_test_2)\n assert_array_almost_equal(predict_2_before, predict_2_after, decimal=2), \\\n \"Prediction values are not the same after restore: \" \\\n \"predict before is {}, and predict after is {}\".format(predict_2_before,\n predict_2_after)\n new_config = {'epochs': 2}\n new_model_2.fit_eval(x_train_2, y_train_2, **new_config)\n finally:\n shutil.rmtree(dirname)\n\n def test_predict_with_uncertainty(self,):\n x_train_2, y_train_2 = self.feat._roll_train(self.train_data,\n past_seq_len=self.past_seq_len,\n future_seq_len=self.future_seq_len_2)\n x_test_2 = self.feat._roll_test(self.test_data, past_seq_len=self.past_seq_len)\n self.model_2.fit_eval(x_train_2, y_train_2, mc=True, **self.config)\n prediction, uncertainty = self.model_2.predict_with_uncertainty(x_test_2, n_iter=2)\n assert prediction.shape == (x_test_2.shape[0], self.future_seq_len_2)\n assert uncertainty.shape == (x_test_2.shape[0], self.future_seq_len_2)\n assert np.any(uncertainty)\n\n new_model_2 = LSTMSeq2Seq(check_optional_config=False)\n dirname = tempfile.mkdtemp(prefix=\"automl_test_feature\")\n try:\n save(dirname, model=self.model_2)\n restore(dirname, model=new_model_2, config=self.config)\n prediction, uncertainty = new_model_2.predict_with_uncertainty(x_test_2, n_iter=2)\n assert prediction.shape == (x_test_2.shape[0], self.future_seq_len_2)\n assert uncertainty.shape == (x_test_2.shape[0], self.future_seq_len_2)\n assert np.any(uncertainty)\n finally:\n shutil.rmtree(dirname)\n\n\nif __name__ == '__main__':\n pytest.main([__file__])\n" ]
[ [ "tensorflow.shape" ], [ "numpy.array" ], [ "numpy.testing.assert_array_almost_equal" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
samgeen/Weltgeist
[ "c7d52e879bb3473cecbb06651b5e76dac3020da6" ]
[ "examples/example06_loading.py" ]
[ "\"\"\"\nExample 6 - Loading\nLoading a save file\n\n@author: samgeen\n\"\"\"\n\n# This piece of code basically adds the parent directory to PYTHONPATH\nimport os, sys\nparent = os.path.dirname(os.getcwd())\nsys.path.append(parent)\n\n# Import numpy, matplotlib and weltgeist\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport weltgeist\nimport weltgeist.units as wunits # make this easier to type\n\ndef run_example():\n # This is just example 5 in reverse, kinda\n integrator = weltgeist.integrator.Integrator()\n # If you haven't run example 5 yet, do it now so you have this file\n # You don't need the \".hdf5\", it'll add that for you\n print(\"Loading...\")\n integrator.Load(\"mycoolsave\")\n print(\"Loaded!\")\n # Okay! We loaded a file\n # AGAIN - this doesn't contain any information on sources\n # I swear this will happen sometime\n # Serialising nested objects is time-consuming, is all\n\n # What's the current simulation time?\n print(str(integrator.time/wunits.Myr)+\" Myr\")\n # Alright! It's not zero, which means something happened. But is the simulation state ok?\n \n # Let's try plotting the same thing as the end of example 5 when we saved it\n # I made this a function so I can do it again\n # Python does some wild stuff with nested functions and scope\n hydro = integrator.hydro\n ncells = hydro.ncells\n def plotstuff():\n # OK now plot something to show it's evolved\n plt.clf()\n plt.plot(hydro.x[0:ncells]/wunits.pc,hydro.nH[0:ncells],label=str(integrator.time/wunits.Myr)+\" Myr\",color=\"r\")\n plt.xlabel(\"radius / pc\")\n plt.ylabel(\"$n_{\\mathrm{H}}$ / cm$^{-3}$\")\n plt.yscale(\"log\")\n plt.legend(frameon=False)\n plt.show()\n plotstuff()\n \n # Run it a bunch\n niterations = 100\n for i in range(0,niterations):\n # Bootleg timer\n if i % 10 == 0:\n print(\".\")\n integrator.Step()\n\n # Plot again\n plotstuff()\n # Okay. We loaded a simulation and kept running it\n\n # And that's it. That's all the examples I could think of.\n # You can explore the code if you like to see how it all works\n # Gravity is also included, but I need to confirm it works with a\n # test problem, so that's for another time\n\n# This piece of code runs if you start this module versus importing it\nif __name__==\"__main__\":\n run_example()\n \n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.yscale", "matplotlib.pyplot.clf", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
JayZhu0104/maskrcnn-benchmark
[ "0296cae312566b2fad79074736dc64760af86fe4" ]
[ "maskrcnn_benchmark/layers/roi_align.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\nimport torch\nfrom torch import nn\nfrom torch.autograd import Function\nfrom torch.autograd.function import once_differentiable\nfrom torch.nn.modules.utils import _pair\n\n# from maskrcnn_benchmark import _C\nfrom ._utils import _C\nfrom apex import amp\n\nclass _ROIAlign(Function):\n @staticmethod\n def forward(ctx, input, roi, output_size, spatial_scale, sampling_ratio):\n ctx.save_for_backward(roi)\n ctx.output_size = _pair(output_size)\n ctx.spatial_scale = spatial_scale\n ctx.sampling_ratio = sampling_ratio\n ctx.input_shape = input.size()\n output = _C.roi_align_forward(\n input, roi, spatial_scale, output_size[0], output_size[1], sampling_ratio\n )\n return output\n\n @staticmethod\n @once_differentiable\n def backward(ctx, grad_output):\n rois, = ctx.saved_tensors\n output_size = ctx.output_size\n spatial_scale = ctx.spatial_scale\n sampling_ratio = ctx.sampling_ratio\n bs, ch, h, w = ctx.input_shape\n grad_input = _C.roi_align_backward(\n grad_output,\n rois,\n spatial_scale,\n output_size[0],\n output_size[1],\n bs,\n ch,\n h,\n w,\n sampling_ratio,\n )\n return grad_input, None, None, None, None\n\n\nroi_align = _ROIAlign.apply\n\nclass ROIAlign(nn.Module):\n def __init__(self, output_size, spatial_scale, sampling_ratio):\n super(ROIAlign, self).__init__()\n self.output_size = output_size\n self.spatial_scale = spatial_scale\n self.sampling_ratio = sampling_ratio\n\n @amp.float_function\n def forward(self, input, rois):\n return roi_align(\n input, rois, self.output_size, self.spatial_scale, self.sampling_ratio\n )\n\n def __repr__(self):\n tmpstr = self.__class__.__name__ + \"(\"\n tmpstr += \"output_size=\" + str(self.output_size)\n tmpstr += \", spatial_scale=\" + str(self.spatial_scale)\n tmpstr += \", sampling_ratio=\" + str(self.sampling_ratio)\n tmpstr += \")\"\n return tmpstr\n" ]
[ [ "torch.nn.modules.utils._pair" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
leelastar/leelastar-training
[ "b6b4a36c48c418fcc0bd3ccb7f9c2e95e29f26c9" ]
[ "polaris/tests/test_layers.py" ]
[ "\"\"\" Group all tests cases for layers\"\"\"\n\nimport pytest\nimport torch\n\nfrom polaris.network.layers import SqueezeExcitation, ResidualBlock2D\n\n\ndef test_squeeze_excitation():\n X = torch.tensor([[[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]]])\n se = SqueezeExcitation(channels=1, ratio=1)\n se.dense_linear_1.weight.data = torch.tensor([[4.0]])\n se.dense_linear_1.bias.data = torch.tensor([[2.0]])\n se.dense_linear_2.weight.data = torch.tensor([[-0.1], [2.0]])\n se.dense_linear_2.bias.data = torch.tensor([0.1, -3])\n\n output = se(X)\n expected = torch.tensor([[[[41.109, 41.218, 41.327], [41.436, 41.545, 41.655], [41.764, 41.873, 41.982]]]])\n assert pytest.approx(expected.detach().numpy(), abs=1e-3) == output.detach().numpy()\n\n\ndef test_residual_block():\n X = torch.tensor([[[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]]])\n rb = ResidualBlock2D(channels=1, kernel_size=3, se_ratio=1)\n rb.conv_layer_1.weight.data = torch.tensor([[[[0.0, 1, 0.0], [1, 2, 1], [0.0, 1, 0.0]]]])\n rb.conv_layer_2.weight.data = torch.tensor([[[[0.0, 1, 0.0], [1, 1, 1], [0.0, 1, 0.0]]]])\n rb.batch_norm_1.weight.data = torch.tensor([0.1])\n rb.batch_norm_2.weight.data = torch.tensor([1.0])\n rb.squeeze_ex.dense_linear_1.weight.data = torch.tensor([[0.0]])\n rb.squeeze_ex.dense_linear_1.bias.data = torch.tensor([[0.0]])\n rb.squeeze_ex.dense_linear_2.weight.data = torch.tensor([[1.0], [1.0]])\n rb.squeeze_ex.dense_linear_2.bias.data = torch.tensor([1.0, 0.0])\n\n output = rb(X)\n expected = torch.tensor([[[[0.000, 1.351, 2.282], [3.535, 5.685, 6.340], [7.018, 9.076, 9.823]]]])\n assert pytest.approx(expected.detach().numpy(), abs=1e-3) == output.detach().numpy()\n" ]
[ [ "torch.tensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
MrHuff/keops
[ "a7f44609ba444af8d9fcb11bc3a75f2024841dfa" ]
[ "pykeops/benchmarks/plot_benchmark_invkernel.py" ]
[ "\"\"\"\nSolving positive definite linear systems\n=========================================\n\nThis benchmark compares the performances of KeOps versus Numpy and Pytorch on a inverse matrix operation. It uses the functions :class:`torch.KernelSolve <pykeops.torch.KernelSolve>` (see also :doc:`here <../_auto_examples/pytorch/plot_test_invkernel_torch>`) and :class:`numpy.KernelSolve <pykeops.numpy.KernelSolve>` (see also :doc:`here <../_auto_examples/numpy/plot_test_invkernel_numpy>`).\n \nIn a nutshell, given :math:`x \\in\\mathbb R^{N\\\\times D}` and :math:`b \\in \\mathbb R^{N\\\\times D_v}`, we compute :math:`a \\in \\mathbb R^{N\\\\times D_v}` so that\n\n.. math::\n\n b = (\\\\alpha\\operatorname{Id} + K_{x,x}) a \\quad \\Leftrightarrow \\quad a = (\\\\alpha\\operatorname{Id}+ K_{x,x})^{-1} b\n \nwhere :math:`K_{x,x} = \\Big[\\exp(-\\|x_i -x_j\\|^2 / \\sigma^2)\\Big]_{i,j=1}^N`. The method is based on a conjugate gradient scheme. The benchmark tests various values of :math:`N \\in [10, \\cdots,10^6]`.\n\n \n\"\"\"\n\n#####################################################################\n# Setup\n# -----\n# Standard imports:\n\nimport importlib\nimport os\nimport time\n\nimport numpy as np\nimport torch\nfrom matplotlib import pyplot as plt\n\nfrom scipy.sparse import diags\nfrom scipy.sparse.linalg import aslinearoperator, cg\nfrom scipy.sparse.linalg.interface import IdentityOperator\n\nfrom pykeops.numpy import KernelSolve as KernelSolve_np, LazyTensor\nfrom pykeops.torch import KernelSolve\nfrom pykeops.torch.utils import squared_distances\n\nuse_cuda = torch.cuda.is_available()\n\n#####################################################################\n# Benchmark specifications:\n# \n\nD = 3 # Let's do this in 3D\nDv = 1 # Dimension of the vectors (= number of linear problems to solve)\nMAXTIME = 10 if use_cuda else 1 # Max number of seconds before we break the loop\nREDTIME = 5 if use_cuda else .2 # Decrease the number of runs if computations take longer than 2s...\n\n# Number of samples that we'll loop upon\nNS = [10, 20, 50,\n 100, 200, 500, \n 1000, 2000, 5000, \n 10000, 20000, 50000, \n 100000, 200000, 500000,\n 1000000\n ]\n\n#####################################################################\n# Create some random input data:\n#\n\ndef generate_samples(N, device, lang):\n \"\"\"Create point clouds sampled non-uniformly on a sphere of diameter 1.\"\"\"\n if lang == 'torch':\n if device == 'cuda':\n torch.cuda.manual_seed_all(1234)\n else:\n torch.manual_seed(1234)\n\n x = torch.rand(N, D, device=device)\n b = torch.randn(N, Dv, device=device)\n gamma = torch.ones(1, device=device) * .5 / .01 ** 2 # kernel bandwidth\n alpha = torch.ones(1, device=device) * 0.8 # regularization\n else:\n np.random.seed(1234)\n\n x = np.random.rand(N, D).astype('float32')\n b = np.random.randn(N, Dv).astype('float32')\n gamma = (np.ones(1) * 1 / .01 ** 2).astype('float32') # kernel bandwidth\n alpha = (np.ones(1) * 0.8).astype('float32') # regularization\n\n return x, b, gamma, alpha\n\n######################################################################\n# KeOps kernel\n# ---------------\n#\n# Define a Gaussian RBF kernel:\n#\nformula = 'Exp(- g * SqDist(x,y)) * a'\naliases = ['x = Vi(' + str(D) + ')', # First arg: i-variable of size D\n 'y = Vj(' + str(D) + ')', # Second arg: j-variable of size D\n 'a = Vj(' + str(Dv) + ')', # Third arg: j-variable of size Dv\n 'g = Pm(1)'] # Fourth arg: scalar parameter\n\n######################################################################\n# .. note::\n# This operator uses a conjugate gradient solver and assumes\n# that **formula** defines a **symmetric**, positive and definite\n# **linear** reduction with respect to the alias ``\"a\"``\n# specified trough the third argument.\n\n######################################################################\n# Define the Kernel solver, with a ridge regularization **alpha**:\n# \n\ndef Kinv_keops(x, b, gamma, alpha):\n Kinv = KernelSolve(formula, aliases, \"a\", axis=1)\n res = Kinv(x, x, b, gamma, alpha=alpha)\n return res\n\ndef Kinv_keops_numpy(x, b, gamma, alpha):\n Kinv = KernelSolve_np(formula, aliases, \"a\", axis=1, dtype='float32')\n res = Kinv(x, x, b, gamma, alpha=alpha)\n return res\n\ndef Kinv_scipy(x, b, gamma, alpha):\n x_i, y_j = LazyTensor( gamma * x[:, None, :]), LazyTensor( gamma * x[None, :, :])\n K_ij = (- ((x_i - y_j) ** 2).sum(2)).exp()\n A = aslinearoperator(diags(alpha * np.ones(x.shape[0]))) + aslinearoperator(K_ij)\n A.dtype = np.dtype('float32')\n res = cg(A, b)\n return res\n\n\n######################################################################\n# Define the same Kernel solver, using a **tensorized** implementation:\n#\n\ndef Kinv_pytorch(x, b, gamma, alpha):\n K_xx = alpha * torch.eye(x.shape[0], device=x.get_device()) + torch.exp( - squared_distances(x, x) * gamma)\n res = torch.solve(b, K_xx)[0]\n return res\n\ndef Kinv_numpy(x, b, gamma, alpha):\n K_xx = alpha * np.eye(x.shape[0]) + np.exp( - gamma * np.sum( (x[:,None,:] - x[None,:,:]) **2, axis=2) )\n res = np.linalg.solve(K_xx, b)\n return res\n\n######################################################################\n# Benchmarking loops\n# -----------------------\n\ndef benchmark(Routine, dev, N, loops=10, lang='torch') :\n \"\"\"Times a routine on an N-by-N problem.\"\"\"\n\n importlib.reload(torch) # In case we had a memory overflow just before...\n device = torch.device(dev)\n x, b, gamma, alpha = generate_samples(N, device, lang)\n\n # We simply benchmark a kernel inversion\n code = \"a = Routine(x, b, gamma, alpha)\"\n exec( code, locals() ) # Warmup run, to compile and load everything\n if use_cuda: torch.cuda.synchronize()\n\n t_0 = time.perf_counter() # Actual benchmark --------------------\n for i in range(loops):\n exec( code, locals() )\n if use_cuda: torch.cuda.synchronize()\n elapsed = time.perf_counter() - t_0 # ---------------------------\n\n print(\"{:3} NxN kernel inversion, with N ={:7}: {:3}x{:3.6f}s\".format(loops, N, loops, elapsed / loops))\n return elapsed / loops\n\n\ndef bench_config(Routine, backend, dev, l) :\n \"\"\"Times a routine for an increasing number of samples.\"\"\"\n\n print(\"Backend : {}, Device : {} -------------\".format(backend, dev))\n\n times = []\n not_recorded_times = []\n try :\n Nloops = [100, 10, 1]\n nloops = Nloops.pop(0)\n for n in NS :\n elapsed = benchmark(Routine, dev, n, loops=nloops, lang=l)\n\n times.append( elapsed )\n if (nloops * elapsed > MAXTIME) or (nloops * elapsed > REDTIME/nloops and len(Nloops) > 0): \n nloops = Nloops.pop(0)\n\n except RuntimeError:\n print(\"**\\nMemory overflow !\")\n not_recorded_times = (len(NS)-len(times)) * [np.nan]\n except IndexError:\n print(\"**\\nToo slow !\")\n not_recorded_times = (len(NS)-len(times)) * [np.Infinity]\n \n return times + not_recorded_times\n\n\ndef full_bench(title, routines) :\n \"\"\"Benchmarks a collection of routines.\"\"\"\n\n backends = [ backend for (_, backend, _) in routines ]\n\n print(\"Benchmarking : {} ===============================\".format(title))\n \n lines = [ NS ]\n for routine, backend, lang in routines :\n lines.append(bench_config(routine, backend, \"cuda\" if use_cuda else \"cpu\", lang) )\n\n benches = np.array(lines).T\n\n # Creates a pyplot figure:\n plt.figure(figsize=(12,8))\n linestyles = [\"o-\", \"s-\", \"^-\", \"x-\", \"<-\"]\n for i, backend in enumerate(backends):\n plt.plot( benches[:,0], benches[:,i+1], linestyles[i], \n linewidth=2, label='backend = \"{}\"'.format(backend) )\n \n for (j, val) in enumerate( benches[:,i+1] ):\n if np.isnan(val) and j > 0:\n x, y = benches[j-1,0], benches[j-1,i+1]\n plt.annotate('Memory overflow!',\n xy=(x, 1.05*y),\n horizontalalignment='center',\n verticalalignment='bottom')\n break\n elif np.isinf(val) and j > 0:\n x, y = benches[j-1,0], benches[j-1,i+1]\n plt.annotate('Too slow!',\n xy=(x, 1.05*y),\n horizontalalignment='center',\n verticalalignment='bottom')\n break\n\n plt.title('Runtimes for {} in dimension {}'.format(title, D))\n plt.xlabel('Number of samples')\n plt.ylabel('Seconds')\n plt.yscale('log') ; plt.xscale('log')\n plt.legend(loc='upper left')\n plt.grid(True, which=\"major\", linestyle=\"-\")\n plt.grid(True, which=\"minor\", linestyle=\"dotted\")\n plt.tight_layout()\n\n # Save as a .csv to put a nice Tikz figure in the papers:\n header = \"Npoints \" + \" \".join(backends)\n os.makedirs(\"output\", exist_ok=True)\n np.savetxt(\"output/benchmark_kernelsolve.csv\", benches, \n fmt='%-9.5f', header=header, comments='')\n\n\n######################################################################\n# Run the benchmark\n# ---------------------\n\nroutines = [(Kinv_numpy, \"NumPy\", \"numpy\"), \n (Kinv_pytorch, \"PyTorch\", \"torch\"), \n (Kinv_keops_numpy, \"NumPy + KeOps\", \"numpy\"), \n (Kinv_keops, \"PyTorch + KeOps\", \"torch\"),\n (Kinv_scipy, \"Scipy + KeOps\", \"numpy\"),\n ]\nfull_bench( \"Inverse radial kernel matrix\", routines )\n\nplt.show()\n" ]
[ [ "matplotlib.pyplot.legend", "numpy.dtype", "numpy.random.randn", "torch.cuda.is_available", "torch.cuda.manual_seed_all", "torch.device", "torch.cuda.synchronize", "matplotlib.pyplot.tight_layout", "torch.ones", "torch.solve", "torch.randn", "numpy.eye", "scipy.sparse.linalg.cg", "scipy.sparse.linalg.aslinearoperator", "torch.rand", "matplotlib.pyplot.figure", "numpy.isnan", "matplotlib.pyplot.annotate", "matplotlib.pyplot.xlabel", "numpy.random.rand", "numpy.savetxt", "matplotlib.pyplot.show", "numpy.array", "numpy.sum", "matplotlib.pyplot.ylabel", "numpy.linalg.solve", "numpy.random.seed", "torch.manual_seed", "matplotlib.pyplot.yscale", "numpy.ones", "matplotlib.pyplot.grid", "matplotlib.pyplot.xscale", "numpy.isinf" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
ionicsolutions/ytterbium
[ "8cc6b4f942d7040e008ecf03f58b1a241800e74f", "8cc6b4f942d7040e008ecf03f58b1a241800e74f" ]
[ "simulations/examples/lineshape.py", "polarization.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\n\nimport ytterbium as yb\nfrom ytterbium.Yb174 import FourLevelSystem\n\n# initialize the S-P transition in 174Yb+ as a four-level system\nFLS = FourLevelSystem(sat=0.5)\n\n# to measure the lineshape, we drive the system at different laser detunings,\n# which are defined in MHz across ytterbium\nlaser_detuning = np.linspace(-40.0, 40.0, num=41)\n\n# for each detuning, we generate a Hamiltonian\nhamiltonians, _ = yb.vary(FLS, delta=laser_detuning)\n\n# initially, all population is in the ground state\npsi0 = 1/np.sqrt(2) * (FLS.basis[0] + FLS.basis[1])\n\n# we prepare population operators |i><i| for all states\npopulation = [state * state.dag() for state in FLS.basis]\n\n# to use Python's multiprocessing module for parallel evaluation,\n# the call to yb.mesolve() must not be executed unless the script\n# is invoked directly\nif __name__ == \"__main__\":\n # solve the system for each Hamiltonian for 15 us\n results = yb.mesolve(hamiltonians, psi0,\n np.linspace(0, 15*10**-6, num=500),\n FLS.decay, population)\n\n # extract the steady-state excited-state population from the results\n excited_state_population = [result.expect[2][-1] + result.expect[3][-1]\n for result in results]\n\n plt.plot(laser_detuning, excited_state_population, \"o\")\n plt.xlabel(\"Laser detuning from resonance [MHz]\")\n plt.ylabel(\"Total excited-state population\")\n plt.show()\n", "# -*- coding: utf-8 -*-\n#\n# (c) 2017 Kilian Kluge\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n__all__ = [\"normalize\"]\n\nimport numpy as np\n\n\ndef normalize(vector):\n \"\"\"Normalize polarization *vector*.\"\"\"\n if len(vector) != 3:\n raise ValueError(\n \"Polarization vector must have exactly 3 components.\")\n _vector = np.array(vector)\n length = np.sqrt(np.sum(np.power(_vector, 2)))\n if length <= 0.0 or not np.all(np.isreal(_vector)):\n raise ValueError(\n \"Polarization vector has to be real and of non-zero length.\")\n return tuple(_vector/length)\n" ]
[ [ "numpy.sqrt", "numpy.linspace", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ], [ "numpy.array", "numpy.isreal", "numpy.power" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
L5vD5/PyElastica
[ "1fec949ff3dc292ae558cab8beebea7405f9d14b", "1fec949ff3dc292ae558cab8beebea7405f9d14b" ]
[ "elastica/joint.py", "examples/Visualization/AxialStretchingVisualization/axial_stretching_render.py" ]
[ "__doc__ = \"\"\" Module containing joint classes to connect multiple rods together. \"\"\"\n__all__ = [\"FreeJoint\", \"HingeJoint\", \"FixedJoint\", \"ExternalContact\", \"SelfContact\"]\nimport numpy as np\nimport numba\nfrom elastica.utils import Tolerance, MaxDimension\nfrom elastica._linalg import _batch_product_k_ik_to_ik\nfrom math import sqrt\n\n\nclass FreeJoint:\n \"\"\"\n This free joint class is the base class for all joints. Free or spherical\n joints constrains the relative movement between two nodes (chosen by the user)\n by applying restoring forces. For implementation details, refer to Zhang et al. Nature Communications (2019).\n\n Attributes\n ----------\n k: float\n Stiffness coefficient of the joint.\n nu: float\n Damping coefficient of the joint.\n\n Note\n ----\n Every new joint class must be derived from the FreeJoint class.\n\n\n \"\"\"\n\n # pass the k and nu for the forces\n # also the necessary rods for the joint\n # indices should be 0 or -1, we will provide wrappers for users later\n def __init__(self, k, nu):\n \"\"\"\n\n Parameters\n ----------\n k: float\n Stiffness coefficient of the joint.\n nu: float\n Damping coefficient of the joint.\n\n \"\"\"\n self.k = k\n self.nu = nu\n\n def apply_forces(self, rod_one, index_one, rod_two, index_two):\n \"\"\"\n Apply joint force to the connected rod objects.\n\n Parameters\n ----------\n rod_one : object\n Rod-like object\n index_one : int\n Index of first rod for joint.\n rod_two : object\n Rod-like object\n index_two : int\n Index of second rod for joint.\n\n Returns\n -------\n\n \"\"\"\n end_distance_vector = (\n rod_two.position_collection[..., index_two]\n - rod_one.position_collection[..., index_one]\n )\n # Calculate norm of end_distance_vector\n # this implementation timed: 2.48 µs ± 126 ns per loop (mean ± std. dev. of 7 runs, 100000 loops each)\n end_distance = np.sqrt(np.dot(end_distance_vector, end_distance_vector))\n\n # Below if check is not efficient find something else\n # We are checking if end of rod1 and start of rod2 are at the same point in space\n # If they are at the same point in space, it is a zero vector.\n if end_distance <= Tolerance.atol():\n normalized_end_distance_vector = np.array([0.0, 0.0, 0.0])\n else:\n normalized_end_distance_vector = end_distance_vector / end_distance\n\n elastic_force = self.k * end_distance_vector\n\n relative_velocity = (\n rod_two.velocity_collection[..., index_two]\n - rod_one.velocity_collection[..., index_one]\n )\n normal_relative_velocity = (\n np.dot(relative_velocity, normalized_end_distance_vector)\n * normalized_end_distance_vector\n )\n damping_force = -self.nu * normal_relative_velocity\n\n contact_force = elastic_force + damping_force\n\n rod_one.external_forces[..., index_one] += contact_force\n rod_two.external_forces[..., index_two] -= contact_force\n\n return\n\n def apply_torques(self, rod_one, index_one, rod_two, index_two):\n \"\"\"\n Apply restoring joint torques to the connected rod objects.\n\n In FreeJoint class, this routine simply passes.\n\n Parameters\n ----------\n rod_one : object\n Rod-like object\n index_one : int\n Index of first rod for joint.\n rod_two : object\n Rod-like object\n index_two : int\n Index of second rod for joint.\n\n Returns\n -------\n\n \"\"\"\n pass\n\n\nclass HingeJoint(FreeJoint):\n \"\"\"\n This hinge joint class constrains the relative movement and rotation\n (only one axis defined by the user) between two nodes and elements\n (chosen by the user) by applying restoring forces and torques. For\n implementation details, refer to Zhang et. al. Nature\n Communications (2019).\n\n Attributes\n ----------\n k: float\n Stiffness coefficient of the joint.\n nu: float\n Damping coefficient of the joint.\n kt: float\n Rotational stiffness coefficient of the joint.\n normal_direction: numpy.ndarray\n 2D (dim, 1) array containing data with 'float' type. Constraint rotation direction.\n \"\"\"\n\n # TODO: IN WRAPPER COMPUTE THE NORMAL DIRECTION OR ASK USER TO GIVE INPUT, IF NOT THROW ERROR\n def __init__(self, k, nu, kt, normal_direction):\n \"\"\"\n\n Parameters\n ----------\n k: float\n Stiffness coefficient of the joint.\n nu: float\n Damping coefficient of the joint.\n kt: float\n Rotational stiffness coefficient of the joint.\n normal_direction: numpy.ndarray\n 2D (dim, 1) array containing data with 'float' type. Constraint rotation direction.\n \"\"\"\n super().__init__(k, nu)\n # normal direction of the constrain plane\n # for example for yz plane (1,0,0)\n # unitize the normal vector\n self.normal_direction = normal_direction / np.linalg.norm(normal_direction)\n # additional in-plane constraint through restoring torque\n # stiffness of the restoring constraint -- tuned empirically\n self.kt = kt\n\n # Apply force is same as free joint\n def apply_forces(self, rod_one, index_one, rod_two, index_two):\n return super().apply_forces(rod_one, index_one, rod_two, index_two)\n\n def apply_torques(self, rod_one, index_one, rod_two, index_two):\n # current direction of the first element of link two\n # also NOTE: - rod two is hinged at first element\n link_direction = (\n rod_two.position_collection[..., index_two + 1]\n - rod_two.position_collection[..., index_two]\n )\n\n # projection of the link direction onto the plane normal\n force_direction = (\n -np.dot(link_direction, self.normal_direction) * self.normal_direction\n )\n\n # compute the restoring torque\n torque = self.kt * np.cross(link_direction, force_direction)\n\n # The opposite torque will be applied on link one\n rod_one.external_torques[..., index_one] -= (\n rod_one.director_collection[..., index_one] @ torque\n )\n rod_two.external_torques[..., index_two] += (\n rod_two.director_collection[..., index_two] @ torque\n )\n\n\nclass FixedJoint(FreeJoint):\n \"\"\"\n The fixed joint class restricts the relative movement and rotation\n between two nodes and elements by applying restoring forces and torques.\n For implementation details, refer to Zhang et al. Nature\n Communications (2019).\n\n Attributes\n ----------\n k: float\n Stiffness coefficient of the joint.\n nu: float\n Damping coefficient of the joint.\n kt: float\n Rotational stiffness coefficient of the joint.\n \"\"\"\n\n def __init__(self, k, nu, kt):\n \"\"\"\n\n Parameters\n ----------\n k: float\n Stiffness coefficient of the joint.\n nu: float\n Damping coefficient of the joint.\n kt: float\n Rotational stiffness coefficient of the joint.\n \"\"\"\n super().__init__(k, nu)\n # additional in-plane constraint through restoring torque\n # stiffness of the restoring constraint -- tuned empirically\n self.kt = kt\n\n # Apply force is same as free joint\n def apply_forces(self, rod_one, index_one, rod_two, index_two):\n return super().apply_forces(rod_one, index_one, rod_two, index_two)\n\n def apply_torques(self, rod_one, index_one, rod_two, index_two):\n # current direction of the first element of link two\n # also NOTE: - rod two is fixed at first element\n link_direction = (\n rod_two.position_collection[..., index_two + 1]\n - rod_two.position_collection[..., index_two]\n )\n\n # To constrain the orientation of link two, the second node of link two should align with\n # the direction of link one. Thus, we compute the desired position of the second node of link two\n # as check1, and the current position of the second node of link two as check2. Check1 and check2\n # should overlap.\n\n tgt_destination = (\n rod_one.position_collection[..., index_one]\n + rod_two.rest_lengths[index_two] * rod_one.tangents[..., index_one]\n ) # dl of rod 2 can be different than rod 1 so use rest length of rod 2\n\n curr_destination = rod_two.position_collection[\n ..., index_two + 1\n ] # second element of rod2\n\n # Compute the restoring torque\n forcedirection = -self.kt * (\n curr_destination - tgt_destination\n ) # force direction is between rod2 2nd element and rod1\n torque = np.cross(link_direction, forcedirection)\n\n # The opposite torque will be applied on link one\n rod_one.external_torques[..., index_one] -= (\n rod_one.director_collection[..., index_one] @ torque\n )\n rod_two.external_torques[..., index_two] += (\n rod_two.director_collection[..., index_two] @ torque\n )\n\n\[email protected](cache=True)\ndef _dot_product(a, b):\n sum = 0.0\n for i in range(3):\n sum += a[i] * b[i]\n return sum\n\n\[email protected](cache=True)\ndef _norm(a):\n return sqrt(_dot_product(a, a))\n\n\[email protected](cache=True)\ndef _clip(x, low, high):\n return max(low, min(x, high))\n\n\n# Can this be made more efficient than 2 comp, 1 or?\[email protected](cache=True)\ndef _out_of_bounds(x, low, high):\n return (x < low) or (x > high)\n\n\[email protected](cache=True)\ndef _find_min_dist(x1, e1, x2, e2):\n e1e1 = _dot_product(e1, e1)\n e1e2 = _dot_product(e1, e2)\n e2e2 = _dot_product(e2, e2)\n\n x1e1 = _dot_product(x1, e1)\n x1e2 = _dot_product(x1, e2)\n x2e1 = _dot_product(e1, x2)\n x2e2 = _dot_product(x2, e2)\n\n s = 0.0\n t = 0.0\n\n parallel = abs(1.0 - e1e2 ** 2 / (e1e1 * e2e2)) < 1e-6\n if parallel:\n # Some are parallel, so do processing\n t = (x2e1 - x1e1) / e1e1 # Comes from taking dot of e1 with a normal\n t = _clip(t, 0.0, 1.0)\n s = (x1e2 + t * e1e2 - x2e2) / e2e2 # Same as before\n s = _clip(s, 0.0, 1.0)\n else:\n # Using the Cauchy-Binet formula on eq(7) in docstring referenc\n s = (e1e1 * (x1e2 - x2e2) + e1e2 * (x2e1 - x1e1)) / (e1e1 * e2e2 - (e1e2) ** 2)\n t = (e1e2 * s + x2e1 - x1e1) / e1e1\n\n if _out_of_bounds(s, 0.0, 1.0) or _out_of_bounds(t, 0.0, 1.0):\n # potential_s = -100.0\n # potential_t = -100.0\n # potential_d = -100.0\n # overall_minimum_distance = 1e20\n\n # Fill in the possibilities\n potential_t = (x2e1 - x1e1) / e1e1\n s = 0.0\n t = _clip(potential_t, 0.0, 1.0)\n potential_d = _norm(x1 + e1 * t - x2)\n overall_minimum_distance = potential_d\n\n potential_t = (x2e1 + e1e2 - x1e1) / e1e1\n potential_t = _clip(potential_t, 0.0, 1.0)\n potential_d = _norm(x1 + e1 * potential_t - x2 - e2)\n if potential_d < overall_minimum_distance:\n s = 1.0\n t = potential_t\n overall_minimum_distance = potential_d\n\n potential_s = (x1e2 - x2e2) / e2e2\n potential_s = _clip(potential_s, 0.0, 1.0)\n potential_d = _norm(x2 + potential_s * e2 - x1)\n if potential_d < overall_minimum_distance:\n s = potential_s\n t = 0.0\n overall_minimum_distance = potential_d\n\n potential_s = (x1e2 + e1e2 - x2e2) / e2e2\n potential_s = _clip(potential_s, 0.0, 1.0)\n potential_d = _norm(x2 + potential_s * e2 - x1 - e1)\n if potential_d < overall_minimum_distance:\n s = potential_s\n t = 1.0\n\n return x2 + s * e2 - x1 - t * e1\n\n\[email protected](cache=True)\ndef _calculate_contact_forces_rod_rigid_body(\n x_collection_rod,\n edge_collection_rod,\n x_cylinder,\n edge_cylinder,\n radii_sum,\n length_sum,\n internal_forces_rod,\n external_forces_rod,\n external_forces_cylinder,\n velocity_rod,\n velocity_cylinder,\n contact_k,\n contact_nu,\n):\n # We already pass in only the first n_elem x\n n_points = x_collection_rod.shape[1]\n for i in range(n_points):\n # Element-wise bounding box\n x_selected = x_collection_rod[..., i]\n # x_cylinder is already a (,) array from outised\n del_x = x_selected - x_cylinder\n norm_del_x = _norm(del_x)\n\n # If outside then don't process\n if norm_del_x >= (radii_sum[i] + length_sum[i]):\n continue\n\n # find the shortest line segment between the two centerline\n # segments : differs from normal cylinder-cylinder intersection\n distance_vector = _find_min_dist(\n x_selected, edge_collection_rod[..., i], x_cylinder, edge_cylinder\n )\n distance_vector_length = _norm(distance_vector)\n distance_vector /= distance_vector_length\n\n gamma = radii_sum[i] - distance_vector_length\n\n # If distance is large, don't worry about it\n if gamma < -1e-5:\n continue\n\n rod_elemental_forces = 0.5 * (\n external_forces_rod[..., i]\n + external_forces_rod[..., i + 1]\n + internal_forces_rod[..., i]\n + internal_forces_rod[..., i + 1]\n )\n equilibrium_forces = -rod_elemental_forces + external_forces_cylinder[..., 0]\n\n normal_force = _dot_product(equilibrium_forces, distance_vector)\n # Following line same as np.where(normal_force < 0.0, -normal_force, 0.0)\n normal_force = abs(min(normal_force, 0.0))\n\n # CHECK FOR GAMMA > 0.0, heaviside but we need to overload it in numba\n # As a quick fix, use this instead\n mask = (gamma > 0.0) * 1.0\n\n contact_force = contact_k * gamma\n interpenetration_velocity = (\n 0.5 * (velocity_rod[..., i] + velocity_rod[..., i + 1])\n - velocity_cylinder[..., 0]\n )\n contact_damping_force = contact_nu * _dot_product(\n interpenetration_velocity, distance_vector\n )\n\n # magnitude* direction\n net_contact_force = (\n normal_force + 0.5 * mask * (contact_damping_force + contact_force)\n ) * distance_vector\n\n # Add it to the rods at the end of the day\n if i == 0:\n external_forces_rod[..., i] -= 0.5 * net_contact_force\n external_forces_rod[..., i + 1] -= net_contact_force\n external_forces_cylinder[..., 0] += 1.5 * net_contact_force\n elif i == n_points:\n external_forces_rod[..., i] -= net_contact_force\n external_forces_rod[..., i + 1] -= 0.5 * net_contact_force\n external_forces_cylinder[..., 0] += 1.5 * net_contact_force\n else:\n external_forces_rod[..., i] -= net_contact_force\n external_forces_rod[..., i + 1] -= net_contact_force\n external_forces_cylinder[..., 0] += 2.0 * net_contact_force\n\n\[email protected](cache=True)\ndef _calculate_contact_forces_rod_rod(\n x_collection_rod_one,\n radius_rod_one,\n length_rod_one,\n tangent_rod_one,\n velocity_rod_one,\n internal_forces_rod_one,\n external_forces_rod_one,\n x_collection_rod_two,\n radius_rod_two,\n length_rod_two,\n tangent_rod_two,\n velocity_rod_two,\n internal_forces_rod_two,\n external_forces_rod_two,\n contact_k,\n contact_nu,\n):\n # We already pass in only the first n_elem x\n n_points_rod_one = x_collection_rod_one.shape[1]\n n_points_rod_two = x_collection_rod_two.shape[1]\n edge_collection_rod_one = _batch_product_k_ik_to_ik(length_rod_one, tangent_rod_one)\n edge_collection_rod_two = _batch_product_k_ik_to_ik(length_rod_two, tangent_rod_two)\n\n for i in range(n_points_rod_one):\n for j in range(n_points_rod_two):\n radii_sum = radius_rod_one[i] + radius_rod_two[j]\n length_sum = length_rod_one[i] + length_rod_two[j]\n # Element-wise bounding box\n x_selected_rod_one = x_collection_rod_one[..., i]\n x_selected_rod_two = x_collection_rod_two[..., j]\n\n del_x = x_selected_rod_one - x_selected_rod_two\n norm_del_x = _norm(del_x)\n\n # If outside then don't process\n if norm_del_x >= (radii_sum + length_sum):\n continue\n\n # find the shortest line segment between the two centerline\n # segments : differs from normal cylinder-cylinder intersection\n distance_vector = _find_min_dist(\n x_selected_rod_one,\n edge_collection_rod_one[..., i],\n x_selected_rod_two,\n edge_collection_rod_two[..., j],\n )\n distance_vector_length = _norm(distance_vector)\n distance_vector /= distance_vector_length\n\n gamma = radii_sum - distance_vector_length\n\n # If distance is large, don't worry about it\n if gamma < -1e-5:\n continue\n\n rod_one_elemental_forces = 0.5 * (\n external_forces_rod_one[..., i]\n + external_forces_rod_one[..., i + 1]\n + internal_forces_rod_one[..., i]\n + internal_forces_rod_one[..., i + 1]\n )\n\n rod_two_elemental_forces = 0.5 * (\n external_forces_rod_two[..., j]\n + external_forces_rod_two[..., j + 1]\n + internal_forces_rod_two[..., j]\n + internal_forces_rod_two[..., j + 1]\n )\n\n equilibrium_forces = -rod_one_elemental_forces + rod_two_elemental_forces\n\n normal_force = _dot_product(equilibrium_forces, distance_vector)\n # Following line same as np.where(normal_force < 0.0, -normal_force, 0.0)\n normal_force = abs(min(normal_force, 0.0))\n\n # CHECK FOR GAMMA > 0.0, heaviside but we need to overload it in numba\n # As a quick fix, use this instead\n mask = (gamma > 0.0) * 1.0\n\n contact_force = contact_k * gamma\n interpenetration_velocity = 0.5 * (\n (velocity_rod_one[..., i] + velocity_rod_one[..., i + 1])\n - (velocity_rod_two[..., j] + velocity_rod_two[..., j + 1])\n )\n contact_damping_force = contact_nu * _dot_product(\n interpenetration_velocity, distance_vector\n )\n\n # magnitude* direction\n net_contact_force = (\n normal_force + 0.5 * mask * (contact_damping_force + contact_force)\n ) * distance_vector\n\n # Add it to the rods at the end of the day\n if i == 0:\n external_forces_rod_one[..., i] -= net_contact_force * 2 / 3\n external_forces_rod_one[..., i + 1] -= net_contact_force * 4 / 3\n elif i == n_points_rod_one:\n external_forces_rod_one[..., i] -= net_contact_force * 4 / 3\n external_forces_rod_one[..., i + 1] -= net_contact_force * 2 / 3\n else:\n external_forces_rod_one[..., i] -= net_contact_force\n external_forces_rod_one[..., i + 1] -= net_contact_force\n\n if j == 0:\n external_forces_rod_two[..., j] += net_contact_force * 2 / 3\n external_forces_rod_two[..., j + 1] += net_contact_force * 4 / 3\n elif j == n_points_rod_two:\n external_forces_rod_two[..., j] += net_contact_force * 4 / 3\n external_forces_rod_two[..., j + 1] += net_contact_force * 2 / 3\n else:\n external_forces_rod_two[..., j] += net_contact_force\n external_forces_rod_two[..., j + 1] += net_contact_force\n\n\[email protected](cache=True)\ndef _calculate_contact_forces_self_rod(\n x_collection_rod,\n radius_rod,\n length_rod,\n tangent_rod,\n velocity_rod,\n external_forces_rod,\n contact_k,\n contact_nu,\n):\n # We already pass in only the first n_elem x\n n_points_rod = x_collection_rod.shape[1]\n edge_collection_rod_one = _batch_product_k_ik_to_ik(length_rod, tangent_rod)\n\n for i in range(n_points_rod):\n skip = 1 + np.ceil(0.8 * np.pi * radius_rod[i] / length_rod[i])\n for j in range(i - skip, -1, -1):\n radii_sum = radius_rod[i] + radius_rod[j]\n length_sum = length_rod[i] + length_rod[j]\n # Element-wise bounding box\n x_selected_rod_index_i = x_collection_rod[..., i]\n x_selected_rod_index_j = x_collection_rod[..., j]\n\n del_x = x_selected_rod_index_i - x_selected_rod_index_j\n norm_del_x = _norm(del_x)\n\n # If outside then don't process\n if norm_del_x >= (radii_sum + length_sum):\n continue\n\n # find the shortest line segment between the two centerline\n # segments : differs from normal cylinder-cylinder intersection\n distance_vector = _find_min_dist(\n x_selected_rod_index_i,\n edge_collection_rod_one[..., i],\n x_selected_rod_index_j,\n edge_collection_rod_one[..., j],\n )\n distance_vector_length = _norm(distance_vector)\n distance_vector /= distance_vector_length\n\n gamma = radii_sum - distance_vector_length\n\n # If distance is large, don't worry about it\n if gamma < -1e-5:\n continue\n\n # CHECK FOR GAMMA > 0.0, heaviside but we need to overload it in numba\n # As a quick fix, use this instead\n mask = (gamma > 0.0) * 1.0\n\n contact_force = contact_k * gamma\n interpenetration_velocity = 0.5 * (\n (velocity_rod[..., i] + velocity_rod[..., i + 1])\n - (velocity_rod[..., j] + velocity_rod[..., j + 1])\n )\n contact_damping_force = contact_nu * _dot_product(\n interpenetration_velocity, distance_vector\n )\n\n # magnitude* direction\n net_contact_force = (\n 0.5 * mask * (contact_damping_force + contact_force)\n ) * distance_vector\n\n # Add it to the rods at the end of the day\n # if i == 0:\n # external_forces_rod[...,i] -= net_contact_force *2/3\n # external_forces_rod[...,i+1] -= net_contact_force * 4/3\n if i == n_points_rod:\n external_forces_rod[..., i] -= net_contact_force * 4 / 3\n external_forces_rod[..., i + 1] -= net_contact_force * 2 / 3\n else:\n external_forces_rod[..., i] -= net_contact_force\n external_forces_rod[..., i + 1] -= net_contact_force\n\n if j == 0:\n external_forces_rod[..., j] += net_contact_force * 2 / 3\n external_forces_rod[..., j + 1] += net_contact_force * 4 / 3\n # elif j == n_points_rod:\n # external_forces_rod[..., j] += net_contact_force * 4/3\n # external_forces_rod[..., j+1] += net_contact_force * 2/3\n else:\n external_forces_rod[..., j] += net_contact_force\n external_forces_rod[..., j + 1] += net_contact_force\n\n\[email protected](cache=True)\ndef _aabbs_not_intersecting(aabb_one, aabb_two):\n \"\"\"Returns true if not intersecting else false\"\"\"\n if (aabb_one[0, 1] < aabb_two[0, 0]) | (aabb_one[0, 0] > aabb_two[0, 1]):\n return 1\n if (aabb_one[1, 1] < aabb_two[1, 0]) | (aabb_one[1, 0] > aabb_two[1, 1]):\n return 1\n if (aabb_one[2, 1] < aabb_two[2, 0]) | (aabb_one[2, 0] > aabb_two[2, 1]):\n return 1\n\n return 0\n\n\[email protected](cache=True)\ndef _prune_using_aabbs_rod_rigid_body(\n rod_one_position_collection,\n rod_one_radius_collection,\n rod_one_length_collection,\n cylinder_position,\n cylinder_director,\n cylinder_radius,\n cylinder_length,\n):\n max_possible_dimension = np.zeros((3,))\n aabb_rod = np.empty((3, 2))\n aabb_cylinder = np.empty((3, 2))\n max_possible_dimension[...] = np.max(rod_one_radius_collection) + np.max(\n rod_one_length_collection\n )\n for i in range(3):\n aabb_rod[i, 0] = (\n np.min(rod_one_position_collection[i]) - max_possible_dimension[i]\n )\n aabb_rod[i, 1] = (\n np.max(rod_one_position_collection[i]) + max_possible_dimension[i]\n )\n\n # Is actually Q^T * d but numba complains about performance so we do\n # d^T @ Q\n cylinder_dimensions_in_local_FOR = np.array(\n [cylinder_radius, cylinder_radius, 0.5 * cylinder_length]\n )\n cylinder_dimensions_in_world_FOR = np.zeros_like(cylinder_dimensions_in_local_FOR)\n for i in range(3):\n for j in range(3):\n cylinder_dimensions_in_world_FOR[i] += (\n cylinder_director[j, i, 0] * cylinder_dimensions_in_local_FOR[j]\n )\n\n max_possible_dimension = np.abs(cylinder_dimensions_in_world_FOR)\n aabb_cylinder[..., 0] = cylinder_position[..., 0] - max_possible_dimension\n aabb_cylinder[..., 1] = cylinder_position[..., 0] + max_possible_dimension\n return _aabbs_not_intersecting(aabb_cylinder, aabb_rod)\n\n\[email protected](cache=True)\ndef _prune_using_aabbs_rod_rod(\n rod_one_position_collection,\n rod_one_radius_collection,\n rod_one_length_collection,\n rod_two_position_collection,\n rod_two_radius_collection,\n rod_two_length_collection,\n):\n max_possible_dimension = np.zeros((3,))\n aabb_rod_one = np.empty((3, 2))\n aabb_rod_two = np.empty((3, 2))\n max_possible_dimension[...] = np.max(rod_one_radius_collection) + np.max(\n rod_one_length_collection\n )\n for i in range(3):\n aabb_rod_one[i, 0] = (\n np.min(rod_one_position_collection[i]) - max_possible_dimension[i]\n )\n aabb_rod_one[i, 1] = (\n np.max(rod_one_position_collection[i]) + max_possible_dimension[i]\n )\n\n max_possible_dimension[...] = np.max(rod_two_radius_collection) + np.max(\n rod_two_length_collection\n )\n\n for i in range(3):\n aabb_rod_two[i, 0] = (\n np.min(rod_two_position_collection[i]) - max_possible_dimension[i]\n )\n aabb_rod_two[i, 1] = (\n np.max(rod_two_position_collection[i]) + max_possible_dimension[i]\n )\n\n return _aabbs_not_intersecting(aabb_rod_two, aabb_rod_one)\n\n\nclass ExternalContact(FreeJoint):\n \"\"\"\n Assumes that the second entity is a rigid body for now, can be\n changed at a later time\n\n Most of the cylinder-cylinder contact SHOULD be implemented\n as given in this paper:\n http://larochelle.sdsmt.edu/publications/2005-2009/Collision%20Detection%20of%20Cylindrical%20Rigid%20Bodies%20Using%20Line%20Geometry.pdf\n\n but, it isn't (the elastica-cpp kernels are implented)!\n This is maybe to speed-up the kernel, but it's\n potentially dangerous as it does not deal with \"end\" conditions\n correctly.\n \"\"\"\n\n def __init__(self, k, nu):\n super().__init__(k, nu)\n\n def apply_forces(self, rod_one, index_one, rod_two, index_two):\n # del index_one, index_two\n\n # TODO: raise error during the initialization if rod one is rigid body.\n\n # If rod two has one element, then it is rigid body.\n if rod_two.n_elems == 1:\n cylinder_two = rod_two\n # First, check for a global AABB bounding box, and see whether that\n # intersects\n if _prune_using_aabbs_rod_rigid_body(\n rod_one.position_collection,\n rod_one.radius,\n rod_one.lengths,\n cylinder_two.position_collection,\n cylinder_two.director_collection,\n cylinder_two.radius,\n cylinder_two.length,\n ):\n return\n\n x_cyl = (\n cylinder_two.position_collection[..., 0]\n - 0.5 * cylinder_two.length * cylinder_two.director_collection[2, :, 0]\n )\n\n _calculate_contact_forces_rod_rigid_body(\n rod_one.position_collection[..., :-1],\n rod_one.lengths * rod_one.tangents,\n x_cyl,\n cylinder_two.length * cylinder_two.director_collection[2, :, 0],\n rod_one.radius + cylinder_two.radius,\n rod_one.lengths + cylinder_two.length,\n rod_one.internal_forces,\n rod_one.external_forces,\n cylinder_two.external_forces,\n rod_one.velocity_collection,\n cylinder_two.velocity_collection,\n self.k,\n self.nu,\n )\n\n else:\n # First, check for a global AABB bounding box, and see whether that\n # intersects\n\n if _prune_using_aabbs_rod_rod(\n rod_one.position_collection,\n rod_one.radius,\n rod_one.lengths,\n rod_two.position_collection,\n rod_two.radius,\n rod_two.lengths,\n ):\n return\n\n _calculate_contact_forces_rod_rod(\n rod_one.position_collection[\n ..., :-1\n ], # Discount last node, we want element start position\n rod_one.radius,\n rod_one.lengths,\n rod_one.tangents,\n rod_one.velocity_collection,\n rod_one.internal_forces,\n rod_one.external_forces,\n rod_two.position_collection[\n ..., :-1\n ], # Discount last node, we want element start position\n rod_two.radius,\n rod_two.lengths,\n rod_two.tangents,\n rod_two.velocity_collection,\n rod_two.internal_forces,\n rod_two.external_forces,\n self.k,\n self.nu,\n )\n\n\nclass SelfContact(FreeJoint):\n \"\"\"\n Assumes that the second entity is a rigid body for now, can be\n changed at a later time\n\n Most of the cylinder-cylinder contact SHOULD be implemented\n as given in this paper:\n http://larochelle.sdsmt.edu/publications/2005-2009/Collision%20Detection%20of%20Cylindrical%20Rigid%20Bodies%20Using%20Line%20Geometry.pdf\n\n but, it isn't (the elastica-cpp kernels are implented)!\n This is maybe to speed-up the kernel, but it's\n potentially dangerous as it does not deal with \"end\" conditions\n correctly.\n \"\"\"\n\n def __init__(self, k, nu):\n super().__init__(k, nu)\n\n def apply_forces(self, rod_one, index_one, rod_two, index_two):\n # del index_one, index_two\n\n _calculate_contact_forces_self_rod(\n rod_one.position_collection[\n ..., :-1\n ], # Discount last node, we want element start position\n rod_one.radius,\n rod_one.lengths,\n rod_one.tangents,\n rod_one.velocity_collection,\n rod_one.external_forces,\n self.k,\n self.nu,\n )\n", "\"\"\" Rendering Script using POVray\n\nThis script reads simulated data file to render POVray animation movie.\nThe data file should contain dictionary of positions vectors and times.\n\nThe script supports multiple camera position where a video is generated\nfor each camera view.\n\nNotes\n-----\n The module requires POVray installed.\n\"\"\"\n\nimport sys\n\nsys.path.append(\"/home/l5vd5/Dev/soro/PyElastica/\")\n\nimport multiprocessing\nimport os\nfrom functools import partial\nfrom multiprocessing import Pool\n\nimport numpy as np\nfrom moviepy.editor import ImageSequenceClip\nfrom scipy import interpolate\nfrom tqdm import tqdm\n\nfrom examples.Visualization._povmacros import Stages, pyelastica_rod, render\n\nfrom PIL import ImageFile\nImageFile.LOAD_TRUNCATED_IMAGES = True\n\n# Setup (USER DEFINE)\nDATA_PATH = \"axial_stretching_diag.dat\" # Path to the simulation data\nSAVE_PICKLE = True\n\n# Rendering Configuration (USER DEFINE)\nOUTPUT_FILENAME = \"axial_stretch_diag\"\nOUTPUT_IMAGES_DIR = \"frames_diag\"\nFPS = 20.0\nWIDTH = 1920 # 400\nHEIGHT = 1080 # 250\nDISPLAY_FRAMES = \"Off\" # Display povray images during the rendering. ['On', 'Off']\n\n# Camera/Light Configuration (USER DEFINE)\nstages = Stages()\nstages.add_camera(\n # Add diagonal viewpoint\n location=[15.0, 10.5, -15.0],\n angle=30,\n look_at=[4.0, 2.7, 2.0],\n name=\"diag\",\n)\n# stages.add_camera(\n# # Add top viewpoint\n# location=[0, 15, 3],\n# angle=30,\n# look_at=[0.0, 0, 3],\n# sky=[-1, 0, 0],\n# name=\"top\",\n# )\nstages.add_light(\n # Sun light\n position=[1500, 2500, -1000],\n color=\"White\",\n camera_id=-1,\n)\nstages.add_light(\n # Flash light for camera 0\n position=[15.0, 10.5, -15.0],\n color=[0.09, 0.09, 0.1],\n camera_id=0,\n)\nstages.add_light(\n # Flash light for camera 1\n position=[0.0, 8.0, 5.0],\n color=[0.09, 0.09, 0.1],\n camera_id=1,\n)\nstage_scripts = stages.generate_scripts()\n\n# Externally Including Files (USER DEFINE)\n# If user wants to include other POVray objects such as grid or coordinate axes,\n# objects can be defined externally and included separately.\nincluded = [\"../default.inc\"]\n\n# Multiprocessing Configuration (USER DEFINE)\nMULTIPROCESSING = True\nTHREAD_PER_AGENT = 4 # Number of thread use per rendering process.\nNUM_AGENT = multiprocessing.cpu_count() # number of parallel rendering.\n\n# Execute\nif __name__ == \"__main__\":\n # Load Data\n assert os.path.exists(DATA_PATH), \"File does not exists\"\n try:\n if SAVE_PICKLE:\n import pickle as pk\n\n with open(DATA_PATH, \"rb\") as fptr:\n data = pk.load(fptr)\n else:\n # (TODO) add importing npz file format\n raise NotImplementedError(\"Only pickled data is supported\")\n except OSError as err:\n print(\"Cannot open the datafile {}\".format(DATA_PATH))\n print(str(err))\n raise\n\n # Convert data to numpy array\n print(data)\n times = np.array(data[\"time\"]) # shape: (timelength)\n xs = np.array(data[\"position\"]) # shape: (timelength, 3, num_element)\n\n # Interpolate Data\n # Interpolation step serves two purposes. If simulated frame rate is lower than\n # the video frame rate, the intermediate frames are linearly interpolated to\n # produce smooth video. Otherwise if simulated frame rate is higher than\n # the video frame rate, interpolation reduces the number of frame to reduce\n # the rendering time.\n runtime = times.max() # Physical run time\n total_frame = int(runtime * FPS) # Number of frames for the video\n recorded_frame = times.shape[0] # Number of simulated frames\n times_true = np.linspace(0, runtime, total_frame) # Adjusted timescale\n \n print(xs.shape, times.shape)\n xs = interpolate.interp1d(times, xs, axis=0)(times_true)\n times = interpolate.interp1d(times, times, axis=0)(times_true)\n base_radius = np.ones_like(xs[:, 0, :]) * 0.050 # (TODO) radius could change\n\n # Rendering\n # For each frame, a 'pov' script file is generated in OUTPUT_IMAGE_DIR directory.\n batch = []\n for view_name in stage_scripts.keys(): # Make Directory\n output_path = os.path.join(OUTPUT_IMAGES_DIR, view_name)\n os.makedirs(output_path, exist_ok=True)\n for frame_number in tqdm(range(total_frame), desc=\"Scripting\"):\n for view_name, stage_script in stage_scripts.items():\n output_path = os.path.join(OUTPUT_IMAGES_DIR, view_name)\n\n # Colect povray scripts\n script = []\n script.extend(['#include \"{}\"'.format(s) for s in included])\n script.append(stage_script)\n\n # If the data contains multiple rod, this part can be modified to include\n # multiple rods.\n rod_object = pyelastica_rod(\n x=xs[frame_number],\n r=base_radius[frame_number],\n color=\"rgb<0.45,0.39,1>\",\n )\n script.append(rod_object)\n pov_script = \"\\n\".join(script)\n\n # Write .pov script file\n file_path = os.path.join(output_path, \"frame_{:04d}\".format(frame_number))\n with open(file_path + \".pov\", \"w+\") as f:\n f.write(pov_script)\n batch.append(file_path)\n\n # Process POVray\n # For each frames, a 'png' image file is generated in OUTPUT_IMAGE_DIR directory.\n pbar = tqdm(total=len(batch), desc=\"Rendering\") # Progress Bar\n if MULTIPROCESSING:\n func = partial(\n render,\n width=WIDTH,\n height=HEIGHT,\n display=DISPLAY_FRAMES,\n pov_thread=THREAD_PER_AGENT,\n )\n with Pool(NUM_AGENT) as p:\n for message in p.imap_unordered(func, batch):\n # (TODO) POVray error within child process could be an issue\n pbar.update()\n else:\n for filename in batch:\n render(\n filename,\n width=WIDTH,\n height=HEIGHT,\n display=DISPLAY_FRAMES,\n pov_thread=multiprocessing.cpu_count(),\n )\n pbar.update()\n\n # Create Video using moviepy\n for view_name in stage_scripts.keys():\n imageset_path = os.path.join(OUTPUT_IMAGES_DIR, view_name)\n imageset = [\n os.path.join(imageset_path, path)\n for path in os.listdir(imageset_path)\n if path[-3:] == \"png\"\n ]\n imageset.sort()\n filename = OUTPUT_FILENAME + \"_\" + view_name + \".mp4\"\n clip = ImageSequenceClip(imageset, fps=FPS)\n clip.write_videofile(filename, fps=FPS)\n" ]
[ [ "numpy.dot", "numpy.abs", "numpy.min", "numpy.linalg.norm", "numpy.max", "numpy.ceil", "numpy.zeros_like", "numpy.cross", "numpy.array", "numpy.zeros", "numpy.empty" ], [ "numpy.array", "numpy.ones_like", "scipy.interpolate.interp1d", "numpy.linspace" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] } ]
cynerelee/SIRnet
[ "c03d7ace0c396095a32ff057b1b0e51fc8b5963b" ]
[ "Model/my_mlp.py" ]
[ "from torch import nn\r\nfrom functools import partial\r\nfrom einops.layers.torch import Rearrange, Reduce\r\nfrom torch.autograd import Variable\r\nimport torch\r\n\r\nclass PreNormResidual(nn.Module):\r\n def __init__(self, dim, fn):\r\n super().__init__()\r\n self.fn = fn\r\n self.norm = nn.LayerNorm(dim)\r\n\r\n def forward(self, x):\r\n \r\n return self.fn(self.norm(x)) + x\r\n\r\ndef FeedForward(dim, expansion_factor = 4, dropout = 0., dense = nn.Linear):\r\n return nn.Sequential(\r\n dense(dim, dim * expansion_factor),\r\n nn.GELU(),\r\n nn.Dropout(dropout),\r\n dense(dim * expansion_factor, dim),\r\n nn.Dropout(dropout)\r\n )\r\n\r\n# def MLPMixer(*, image_size, channels, patch_size, dim, depth, num_classes, expansion_factor = 4, dropout = 0.):\r\n# assert (image_size % patch_size) == 0, 'image must be divisible by patch size'\r\n# num_patches = (image_size // patch_size) ** 2\r\ndef MLPMixer(*, image_size_H, image_size_W, channels, patch_size, dim, depth, expansion_factor = 4, dropout = 0.):\r\n assert (image_size_H % patch_size) == 0 and (image_size_W % patch_size) == 0, 'image must be divisible by patch size'\r\n num_patches = (image_size_H // patch_size) ** 2 \r\n \r\n chan_first, chan_last = partial(nn.Conv1d, kernel_size = 1), nn.Linear\r\n \r\n return nn.Sequential(\r\n Rearrange('b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = patch_size, p2 = patch_size),\r\n nn.Linear((patch_size ** 2) * channels, dim),\r\n *[nn.Sequential(\r\n PreNormResidual(dim, FeedForward(num_patches, expansion_factor, dropout, chan_first)),\r\n PreNormResidual(dim, FeedForward(dim, expansion_factor, dropout, chan_last))\r\n ) for _ in range(depth)],\r\n nn.LayerNorm(dim),\r\n #Reduce('b n c -> b c', 'mean'),\r\n #nn.Linear(dim, num_classes)\r\n )\r\ndef test():\r\n image_size_H=256\r\n image_size_W=320\r\n channels=3\r\n patch_size=16\r\n dim = 512,\r\n depth = 12,\r\n num_classes = 32\r\n model = MLPMixer(\r\n image_size_H = image_size_H,\r\n image_size_W = image_size_W,\r\n channels = 3,\r\n patch_size = patch_size,\r\n dim = 512,\r\n depth = 12,\r\n num_classes = num_classes\r\n )\r\n IVT = model(Variable(torch.randn(2,3,256,256)))\r\n print(IVT.shape)\r\n \r\n\r\nif __name__ == '__main__':\r\n test()" ]
[ [ "torch.nn.Dropout", "torch.nn.GELU", "torch.randn", "torch.nn.LayerNorm", "torch.nn.Linear" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Nishant-codex/rnn_flip_flops
[ "4d91315ead94b87d53d9e3e403a3ea543cb7e308" ]
[ "rate_nets/distributed_r_kim/FixedPointSearch.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n#import os\nimport sys \n\nimport os\nsys.path.insert(0,os.getcwd())\nimport absl\nfrom tensorflow.python.ops import parallel_for as pfor\nfrom FixedPointStore import *\nimport tensorflow as tf\n\n# import horovod.tensorflow as hvd\n\n#import cProfile\n# %tensorflow_version 1.x magic\n#import matplotlib.pyplot as plt\n\nimport numpy.random as nrand\n\nnp.random.seed(0)\n# import numpy as np\nimport time\nfrom AdaptiveGradNormClip import AdaptiveGradNormClip\nfrom AdaptiveLearningRate import AdaptiveLearningRate\n\nclass FixedPointSearch:\n\n def __init__(self, \n ctype, \n states,\n savepath, \n cell=None,\n sess=None,\n max_iters = 5000,\n max_n_unique = np.inf,\n tol_q = 1e-12,\n tol_dq = 1e-20,\n adaptive_learning_rate_hps = {},\n grad_norm_clip_hps = {},\n adam_optimizer_hps = {'epsilon': 0.01},\n exclude_dis_outliers = True,\n outlier_distance_scale = 10.0,\n rerun_q_outliers = True,\n run_additional_iterations_on_outliers = True,\n outlier_q_scale = 10.0\n ):\n\n self.max_iters = max_iters \n self.ctype = ctype\n self.dtype = np.float32\n self.tol_q = tol_q\n self.savepath = savepath\n self.tol_dq = tol_dq\n self.adaptive_learning_rate_hps = adaptive_learning_rate_hps\n self.grad_norm_clip_hps =grad_norm_clip_hps\n self.adam_optimizer_hps = adam_optimizer_hps \n self.outlier_q_scale = outlier_q_scale\n self.outlier_distance_scale = outlier_distance_scale\n self.states = states\n self.bits = 3\n self.max_n_unique = max_n_unique\n self.rerun_q_outliers = rerun_q_outliers\n self.sampled_states = 0\n self.cell = cell\n self.is_root = False\n self.uniq_tol = 1e-3\n self.decompose_jacobians = True\n self.compute_jacobians = True\n self.sess = sess\n self.exclude_dis_outliers = exclude_dis_outliers\n self.run_additional_iterations_on_outliers = run_additional_iterations_on_outliers\n\n def convert_from_lstm_tuples(self, lstm):\n c = lstm.c\n h = lstm.h\n # print(c.shape)\n rank = len(lstm.c.shape)\n axis = rank -1 \n if(tf.is_numeric_tensor(c)):\n return tf.concat((c,h),axis=axis)\n else:\n return np.concatenate((c,h),axis=axis)\n\n def convert_to_lstm_tuples(self, lstm):\n\n array = lstm\n rank = len(array.shape)\n dim = array.shape[rank-1]\n if dim%2 ==0:\n conc_dim = dim//2\n else:\n raise ValueError(\"Dimentions are not even\")\n\n if rank == 3:\n c = array[:,:,:conc_dim]\n h = array[:,:,conc_dim:]\n elif rank == 2:\n c = array[:,:conc_dim]\n h = array[:,conc_dim:]\n\n return tf.nn.rnn_cell.LSTMStateTuple(c=c,h=h)\n\n def build_vars(self, init_states):\n if self.ctype == 'LSTM':\n c_h_init = self.convert_from_lstm_tuples(init_states)\n x = tf.Variable(c_h_init,dtype=tf.float32)\n x_rnn_cell = self.convert_to_lstm_tuples(x)\n else:\n x = tf.Variable(init_states,dtype=tf.float32)\n x_rnn_cell = x\n return x,x_rnn_cell\n\n def maybe_convert(self, x_init):\n if self.ctype=='LSTM':\n return self.convert_from_lstm_tuples(x_init)\n else:\n return x_init\n \n def get_rnn(self, init_states, inputs):\n # print('inside get rnn')\n x, x_rnn = self.build_vars(init_states)\n inputs = tf.constant(inputs,dtype=tf.float32)\n # print('before cell')\n output, F_rnn = self.cell(inputs,x_rnn)\n # print('before cell')\n if self.ctype == 'LSTM':\n F = self.convert_from_lstm_tuples(F_rnn)\n else:\n F = F_rnn\n print(x)\n print(F)\n init = tf.variables_initializer(var_list=[x])\n self.sess.run(init)\n return x, F\n\n def compute_input_jacobians(self, fps):\n def grab_RNN_for_dFdu(initial_states, inputs):\n \n x, x_rnn = self.build_vars(initial_states)\n \n inputs = tf.Variable(inputs,dtype=tf.float32)\n \n output, F_rnn = self.cell(inputs,x_rnn)\n\n if self.ctype == 'LSTM':\n F = self.convert_from_lstm_tuples(F_rnn)\n else:\n F = F_rnn\n \n init = tf.variables_initializer(var_list = [x, inputs])\n self.sess.run(init)\n\n return inputs, F\n\n inputs_np = fps.inputs\n\n if self.ctype == 'LSTM':\n states_np = self.convert_to_lstm_tuples(fps.xstar)\n else:\n states_np = fps.xstar\n\n inputs, F_tf = grab_RNN_for_dFdu(states_np, inputs_np)\n\n try: \n J_tf = pfor.batch_jacobian(F_tf, inputs)\n except absl.flags._exceptions.UnparsedFlagAccessError:\n J_tf = pfor.batch_jacobian(F_tf, inputs_tf, use_pfor=False)\n\n J_np = self.sess.run(J_tf)\n\n return J_np, J_tf\n\n \n def compute_recurrent_jacobians(self, fps):\n\n inputs = fps.inputs\n if self.ctype == 'LSTM':\n # print('line2')\n \n states_np = self.convert_to_lstm_tuples(fps.xstar)\n # print('line3')\n else:\n # print('line4')\n \n states_np = fps.xstar\n \n # print('line6')\n\n x_tf,F_tf = self.get_rnn(states_np,inputs)\n # print('line5')\n\n try: \n if self.is_root:\n print('batch jacobians')\n J_tf = pfor.batch_jacobian(F_tf,x_tf)\n except absl.flags._exceptions.UnparsedFlagAccessError:\n J_tf = pfor.batch_jacobian(F_tf, x_tf, use_pfor=False)\n if self.is_root:\n print('running cells')\n J_np = self.sess.run(J_tf)\n if self.is_root:\n print('out of batch jacobians')\n return J_np, J_tf\n\n def sample_states(self, init_size, state_matrix,c_type, noise):\n\n matrix = state_matrix\n\n [n_time, n_batch, n_states] = matrix.shape\n \n valid_idx = np.ones((n_batch, n_time), dtype=np.bool)\n \n (trial_idx, time_idx) = np.nonzero(valid_idx)\n \n min_index = min(len(trial_idx),len(time_idx))\n \n sample_indices = nrand.RandomState(200).randint(0, high = min_index, size = [init_size])\n \n trial_idx = trial_idx[sample_indices]\n \n time_idx = time_idx[sample_indices]\n \n states = np.zeros([init_size, n_states])\n \n for i in range(init_size):\n init_idx = trial_idx[i]\n t_idx = time_idx[i]\n states[i,:] = matrix[t_idx,init_idx,:]\n \n if noise>0.0:\n states = states + noise*np.random.randn(*states.shape)\n\n self.sampled_states = states\n\n def identify_distance_non_outliers(self, fps, initial_states, dist_thresh):\n if self.ctype == 'LSTM':\n initial_states = self.convert_from_lstm_tuples(initial_states)\n\n num_inits = initial_states.shape[0]\n n_fps = fps.num_inits\n\n # Centroid of initial_states, shape (n_states,)\n centroid = np.mean(initial_states, axis=0)\n\n # Distance of each initial state from the centroid, shape (n,)\n init_dists = np.linalg.norm(initial_states - centroid, axis=1)\n avg_init_dist = np.mean(init_dists)\n\n # Normalized distances of initial states to the centroid, shape: (n,)\n scaled_init_dists = np.true_divide(init_dists, avg_init_dist)\n\n # Distance of each FP from the initial_states centroid\n fps_dists = np.linalg.norm(fps.xstar - centroid, axis=1)\n\n # Normalized\n scaled_fps_dists = np.true_divide(fps_dists, avg_init_dist)\n\n init_non_outlier_idx = np.where(scaled_init_dists < dist_thresh)[0]\n n_init_non_outliers = init_non_outlier_idx.size\n if self.is_root:\n print('\\t\\tinitial_states: %d outliers detected (of %d).'\n % (num_inits - n_init_non_outliers, num_inits))\n\n fps_non_outlier_idx = np.where(scaled_fps_dists < dist_thresh)[0]\n n_fps_non_outliers = fps_non_outlier_idx.size\n if self.is_root:\n print('\\t\\tfixed points: %d outliers detected (of %d).'\n % (n_fps - n_fps_non_outliers, n_fps))\n\n return fps_non_outlier_idx\n\n\n def exclude_dis_outliers_(self, fps, initial_states):\n idx_keep = self.identify_distance_non_outliers(fps, initial_states, self.outlier_distance_scale)\n return fps[idx_keep]\n\n def identify_q_outliers(self, fps, q_thresh):\n\n return np.where(fps.qstar > q_thresh)[0]\n\n def _get_rnncell_compatible_states(self, states):\n\n if self.ctype == 'LSTM':\n return self.convert_to_lstm_tuples(states)\n else:\n return states\n\n def run_additional_iterations_on_outliers_(self, fps):\n\n def perform_outlier_optimization(fps, method):\n\n idx_outliers = self.identify_q_outliers(fps, outlier_min_q)\n n_outliers = len(idx_outliers)\n\n outlier_fps = fps[idx_outliers]\n n_prev_iters = outlier_fps.n_iters\n inputs = outlier_fps.inputs\n initial_states = self._get_rnncell_compatible_states(\n outlier_fps.xstar)\n\n if method == 'sequential':\n\n updated_outlier_fps = self.run_sequential_optimization(\n initial_states, inputs, q_prior=outlier_fps.qstar)\n elif method == 'joint':\n updated_outlier_fps = self.run_joint_optimization(initial_states, inputs)\n else:\n raise ValueError('Unsupported method: %s.' % method)\n\n updated_outlier_fps.n_iters += n_prev_iters\n fps[idx_outliers] = updated_outlier_fps\n\n return fps\n\n def outlier_update(fps):\n\n idx_outliers = self.identify_q_outliers(fps, outlier_min_q)\n n_outliers = len(idx_outliers)\n\n # self._print_if_verbose('\\n\\tDetected %d putative outliers '\n # '(q>%.2e).' % (n_outliers, outlier_min_q))\n\n return idx_outliers\n\n outlier_min_q = np.median(fps.qstar)*self.outlier_q_scale\n idx_outliers = outlier_update(fps)\n\n if len(idx_outliers) == 0:\n return fps\n\n\n fps = perform_outlier_optimization(fps, 'sequential')\n outlier_update(fps) # For print output only\n\n return fps\n\n\n def run_iteration_loops(self, states, inputs, init_array):\n\n def print_update(iter_count, q, dq, lr, is_final=False):\n\n t = time.time()\n t_elapsed = t - t_start\n avg_iter_time = t_elapsed / iter_count\n\n if is_final:\n delimiter = '\\n\\t\\t'\n print('\\t\\t%d iters%s' % (iter_count, delimiter), end='')\n else:\n delimiter = ', '\n print('\\tIter: %d%s' % (iter_count, delimiter), end='')\n\n if q.size == 1:\n print('q = %.2e%sdq = %.2e%s' %\n (q, delimiter, dq, delimiter), end='')\n else:\n mean_q = np.mean(q)\n std_q = np.std(q)\n\n mean_dq = np.mean(dq)\n std_dq = np.std(dq)\n\n print('q = %.2e +/- %.2e%s'\n 'dq = %.2e +/- %.2e%s' %\n (mean_q, std_q, delimiter, mean_dq, std_dq, delimiter),\n end='')\n\n print('learning rate = %.2e%s' % (lr, delimiter), end='')\n\n print('avg iter time = %.2e sec' % avg_iter_time, end='')\n\n if is_final:\n print('') # Just for the endline\n else:\n print('.')\n\n\n\n x, F_cell = self.get_rnn(states, inputs)\n q = 0.5 * tf.reduce_sum(tf.square(F_cell - x ))\n\n q_scalar = tf.reduce_mean(q)\n grads = tf.gradients(q_scalar, [x])\n\n q_prev_tf = tf.placeholder(tf.float32, shape=list(q.shape), name='q_prev')\n\n # when (q-q_prev) is negative, optimization is making progress\n dq = tf.abs(q - q_prev_tf)\n hps={}\n\n # Optimizer\n adaptive_learning_rate = AdaptiveLearningRate(**self.adaptive_learning_rate_hps)\n learning_rate = tf.placeholder(tf.float32, name='learning_rate')\n\n adaptive_grad_norm_clip = AdaptiveGradNormClip(**self.grad_norm_clip_hps)\n grad_norm_clip_val = tf.placeholder(tf.float32, name='grad_norm_clip_val')\n\n # Gradient clipping\n clipped_grads, grad_global_norm = tf.clip_by_global_norm(grads, grad_norm_clip_val)\n clipped_grad_global_norm = tf.global_norm(clipped_grads)\n clipped_grad_norm_diff = grad_global_norm - clipped_grad_global_norm\n grads_to_apply = clipped_grads\n\n # adam_hps = {'epsilon': 0.01}\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate, **self.adam_optimizer_hps)\n # optimizer = hvd.DistributedOptimizer(optimizer)\n train = optimizer.apply_gradients(zip(grads_to_apply, [x]))\n\n # Initialize x and AdamOptimizer's auxiliary variables\n uninitialized_vars = optimizer.variables()\n init = tf.variables_initializer(var_list=uninitialized_vars)\n self.sess.run(init)\n\n ops_to_eval = [train,x, F_cell, q_scalar, q, dq, grad_global_norm]\n\n iter_count = 1\n t_start = time.time()\n q_prev = np.tile(np.nan, q.shape.as_list())\n rnn_cell_feed_dict = {}\n while True:\n # print('inside run iter loops')\n iter_learning_rate = adaptive_learning_rate()\n iter_clip_val = adaptive_grad_norm_clip()\n\n feed_dict = {learning_rate: iter_learning_rate,\n grad_norm_clip_val: iter_clip_val,\n q_prev_tf: q_prev}\n feed_dict.update(rnn_cell_feed_dict)\n\n (ev_train,\n ev_x,\n ev_F,\n ev_q_scalar,\n ev_q,\n ev_dq,\n ev_grad_norm) = self.sess.run(ops_to_eval, feed_dict)\n\n # print('doing iter count')\n if iter_count > 1 and \\\n np.all(np.logical_or(\n ev_dq < self.tol_dq*iter_learning_rate,\n ev_q < self.tol_q)):\n if self.is_root:\n print('\\tOptimization complete to desired tolerance.')\n break\n\n if iter_count + 1 > 5000:\n if self.is_root:\n print('\\tMaximum iteration count reached. '\n 'Terminating.')\n break\n\n q_prev = ev_q\n adaptive_learning_rate.update(ev_q_scalar)\n adaptive_grad_norm_clip.update(ev_grad_norm)\n iter_count += 1\n print('outside the loop')\n # print_update(iter_count,\n # ev_q, ev_dq,\n # iter_learning_rate,\n # is_final=True)\n\n iter_count = np.tile(iter_count, ev_q.shape)\n fixed_point = FixedPointStore(xstar = ev_x,\n inputs = inputs,\n dtype = self.dtype,\n alloc_zeros = False, \n x_init = self.maybe_convert(states),\n F_xstar=ev_F, \n qstar= ev_q,\n dq=ev_dq,\n n_iters = iter_count\n )\n return fixed_point\n\n def find_shape(self, states):\n if self.ctype == 'LSTM': \n return (states.c.shape[0], states.c.shape[1]*2)\n else:\n return states.shape[0],states.shape[1]\n\n def return_index(self, states, index):\n if self.ctype=='LSTM':\n c= states.c[index]\n h = states.h[index]\n return tf.nn.rnn_cell.LSTMStateTuple(c=c,h=h)\n else: \n return states[index]\n\n def run_joint_optimization(self, initial_states, inputs):\n def print_update(iter_count, q, dq, lr, is_final=False):\n\n t = time.time()\n t_elapsed = t - t_start\n avg_iter_time = t_elapsed / iter_count\n\n if is_final:\n delimiter = '\\n\\t\\t'\n print('\\t\\t%d iters%s' % (iter_count, delimiter), end='')\n else:\n delimiter = ', '\n print('\\tIter: %d%s' % (iter_count, delimiter), end='')\n\n if q.size == 1:\n print('q = %.2e%sdq = %.2e%s' %\n (q, delimiter, dq, delimiter), end='')\n else:\n mean_q = np.mean(q)\n std_q = np.std(q)\n\n mean_dq = np.mean(dq)\n std_dq = np.std(dq)\n\n print('q = %.2e +/- %.2e%s'\n 'dq = %.2e +/- %.2e%s' %\n (mean_q, std_q, delimiter, mean_dq, std_dq, delimiter),\n end='')\n\n print('learning rate = %.2e%s' % (lr, delimiter), end='')\n\n print('avg iter time = %.2e sec' % avg_iter_time, end='')\n\n if is_final:\n print('') # Just for the endline\n else:\n print('.') \n\n n, _ = self.find_shape(initial_states)\n\n x, F = self.get_rnn(initial_states, inputs)\n\n # A shape [n,] TF Tensor of objectives (one per initial state) to be\n # combined in _run_optimization_loop.\n q = 0.5 * tf.reduce_sum(tf.square(F - x), axis=1)\n \n q_scalar = tf.reduce_mean(q)\n\n grads = tf.gradients(q_scalar, [x])\n\n q_prev_tf = tf.placeholder(tf.float32, \n shape=list(q.shape), \n name='q_prev')\n\n # when (q-q_prev) is negative, optimization is making progress\n dq = tf.abs(q - q_prev_tf)\n hps={}\n\n\n # Optimizer\n adaptive_learning_rate = AdaptiveLearningRate(**self.adaptive_learning_rate_hps)\n learning_rate = tf.placeholder(tf.float32, name='learning_rate')\n\n adaptive_grad_norm_clip = AdaptiveGradNormClip(**self.grad_norm_clip_hps)\n grad_norm_clip_val = tf.placeholder(tf.float32, name='grad_norm_clip_val')\n\n # Gradient clipping\n clipped_grads, grad_global_norm = tf.clip_by_global_norm(grads, grad_norm_clip_val)\n clipped_grad_global_norm = tf.global_norm(clipped_grads)\n clipped_grad_norm_diff = grad_global_norm - clipped_grad_global_norm\n grads_to_apply = clipped_grads\n\n # adam_hps = {'epsilon': 0.01}\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate , **self.adam_optimizer_hps)\n # optimizer = hvd.DistributedOptimizer(optimizer) * hvd.size()\n train = optimizer.apply_gradients(zip(grads_to_apply, [x]))\n\n uninitialized_vars = optimizer.variables()\n init = tf.variables_initializer(var_list=uninitialized_vars)\n self.sess.run(init)\n\n ops_to_eval = [train,x, F, q_scalar, q, dq, grad_global_norm]\n\n iter_count = 1\n t_start = time.time()\n q_prev = np.tile(np.nan, q.shape.as_list())\n rnn_cell_feed_dict = {}\n while True:\n # print('inside run iter loops')\n iter_learning_rate = adaptive_learning_rate()\n iter_clip_val = adaptive_grad_norm_clip()\n\n feed_dict = {learning_rate: iter_learning_rate,\n grad_norm_clip_val: iter_clip_val,\n q_prev_tf: q_prev}\n feed_dict.update(rnn_cell_feed_dict)\n\n (ev_train,\n ev_x,\n ev_F,\n ev_q_scalar,\n ev_q,\n ev_dq,\n ev_grad_norm) = self.sess.run(ops_to_eval, feed_dict)\n\n # if self.super_verbose and \\\n # np.mod(iter_count, self.n_iters_per_print_update)==0:\n # print_update(iter_count, ev_q, ev_dq, iter_learning_rate)\n # print('doing iter count')\n if iter_count > 1 and \\\n np.all(np.logical_or(\n ev_dq < self.tol_dq*iter_learning_rate,\n ev_q < self.tol_q)):\n '''Here dq is scaled by the learning rate. Otherwise very\n small steps due to very small learning rates would spuriously\n indicate convergence. This scaling is roughly equivalent to\n measuring the gradient norm.'''\n if self.is_root:\n print('\\tOptimization complete to desired tolerance.')\n break\n\n if iter_count + 1 > self.max_iters:\n if self.is_root: \n print('\\tMaximum iteration count reached. '\n 'Terminating.')\n break\n\n q_prev = ev_q\n adaptive_learning_rate.update(ev_q_scalar)\n adaptive_grad_norm_clip.update(ev_grad_norm)\n iter_count += 1\n # print('outside the loop')\n # print_update(iter_count,\n # ev_q, ev_dq,\n # iter_learning_rate,\n # is_final=True)\n # print(ev_x)\n iter_count = np.tile(iter_count, ev_q.shape)\n fixed_point = FixedPointStore(\n # num_states = init_array['num_states'],\n # num_inits = init_array['num_inits'], \n # num_inputs = init_array['num_inputs'], \n xstar = ev_x,\n alloc_zeros = False, \n dtype =self.dtype,\n x_init = self.maybe_convert(initial_states),\n inputs = inputs,\n F_xstar=ev_F, \n qstar= ev_q,\n dq=ev_dq,\n n_iters = iter_count\n )\n\n return fixed_point\n\n\n def run_sequential_optimization(self, states, inputs, q_prior = None):\n if self.is_root:\n print('running sequential optimization')\n num_inits, num_states = self.find_shape(states) \n num_inputs = inputs.shape[1]\n\n fresh_start = q_prior is None\n if self.is_root:\n\n print('fresh_start ', fresh_start)\n fps = FixedPointStore(num_inits=num_inits, num_states=num_states, num_inputs=num_inputs, alloc_zeros=True)\n\n init_dict = {'num_inits':num_inits,'num_states':num_states,'num_inputs':num_inputs}\n \n for i in range(num_inits):\n index = slice(i, i+1)\n state_inst_i = self.return_index(states, index)\n # print(type(state_inst_i))\n input_inst_i = inputs[index, :]\n if self.is_root : print('state number ',i)\n if fresh_start and i == 0 :\n if self.is_root:\n print('Starting to find the fixed points')\n\n\n elif fresh_start==False: \n if self.is_root:\n print('running iterations over q again')\n \n fps[index] = self.run_iteration_loops(state_inst_i, input_inst_i, init_dict)\n\n return fps\n\n def find_fixed_points(self, inputs, save=False):\n \n # hvd.init()\n\n self.is_root = True #hvd.rank() == 0\n if self.ctype == 'LSTM':\n n = (self.sampled_states.c.shape[0],self.sampled_states.c.shape[1]*2)[0]\n # _state = self.convert_from_lstm_tuples(self.sampled_states)\n _state = self.sampled_states\n else: \n n = self.sampled_states.shape[0]\n _state = self.sampled_states\n # print('here')\n \n sample = inputs\n sample_inputs = np.tile(sample,[n,1])\n # sample_inputs = inputs\n # all_fps = self.run_sequential_optimization(_state, sample_inputs)\n if self.is_root: \n print(\"running joint optimizer\")\n all_fps = self.run_joint_optimization(_state, sample_inputs)\n\n if self.is_root:\n print('All FPS shape ', all_fps.num_inits)\n # print(all_fps.xstar.shape)\n if self.is_root: \n print('Finding unique Fixedpoints')\n unique_fps = all_fps.get_unique()\n if self.is_root:\n print('Found unique Fixedpoints with size ',unique_fps.num_inits)\n\n if (self.exclude_dis_outliers):\n \n unique_fps = self.exclude_dis_outliers_(unique_fps,_state )\n if self.is_root:\n print('Distance outliers excluded, currently size',unique_fps.num_inits)\n \n if self.rerun_q_outliers:\n unique_fps = self.run_additional_iterations_on_outliers_(unique_fps)\n unique_fps = unique_fps.get_unique()\n \n if unique_fps.num_inits > self.max_n_unique:\n # self._print_if_verbose('\\tRandomly selecting %d unique '\n # 'fixed points to keep.' % self.max_n_unique)\n max_n_unique = int(self.max_n_unique)\n idx_keep = self.rng.choice(unique_fps.n, max_n_unique, replace=False)\n unique_fps = unique_fps[idx_keep]\n\n #can select fixed maximum number of points since all are not needed\n if self.compute_jacobians:\n\n if (unique_fps.num_inits > 0) :\n if self.is_root:\n\n print('computing recurrent jacobians')\n \n dFdx, dFdx_tf = self.compute_recurrent_jacobians(unique_fps)\n unique_fps.J_xstar = dFdx\n if self.is_root:\n\n print('Compute input Jacobians')\n dFdu, dFdu_tf = self.compute_input_jacobians(unique_fps)\n unique_fps.dFdu = dFdu\n else:\n num_states = unique_fps.num_states\n num_inputs = unique_fps.num_inputs\n\n shape_dFdx = (0, num_states, num_states)\n shape_dFdu = (0, num_states, num_inputs)\n \n unique_fps.J_xstar = unique_fps._alloc_zeros(shape_dFdx)\n unique_fps.dFdu = unique_fps._alloc_zeros(shape_dFdu)\n\n if self.decompose_jacobians:\n if self.is_root:\n print('decomposing Jacobians')\n unique_fps.decompose_jacobians() \n if self.is_root:\n print('decomposed Jacobians')\n \n if save == True and self.is_root:\n print('saving')\n all_fps.save(self.savepath, 'all')\n unique_fps.save(self.savepath, 'unique') \n if self.is_root:\n print('coming out')\n return unique_fps, all_fps\n" ]
[ [ "numpy.true_divide", "tensorflow.concat", "tensorflow.nn.rnn_cell.LSTMStateTuple", "tensorflow.variables_initializer", "numpy.concatenate", "numpy.mean", "numpy.random.randn", "tensorflow.train.AdamOptimizer", "numpy.where", "tensorflow.Variable", "tensorflow.gradients", "numpy.std", "tensorflow.square", "numpy.zeros", "numpy.nonzero", "numpy.median", "tensorflow.placeholder", "numpy.logical_or", "tensorflow.python.ops.parallel_for.batch_jacobian", "numpy.random.RandomState", "tensorflow.global_norm", "tensorflow.constant", "numpy.random.seed", "tensorflow.reduce_mean", "tensorflow.is_numeric_tensor", "numpy.linalg.norm", "numpy.tile", "numpy.ones", "tensorflow.clip_by_global_norm", "tensorflow.abs" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
mcx/kaolin
[ "abe006921b5d522ecd0f7c5e30abe760a4459dc7" ]
[ "kaolin/render/mesh/dibr.py" ]
[ "# Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES.\n# All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport torch\nfrom torch.autograd import Function\n\nfrom kaolin import _C\nfrom .rasterization import rasterize, _legacy_to_opengl, nvdiff, _get_nvdiff_glctx\n\n__all__ = [\n \"dibr_soft_mask\",\n \"dibr_rasterization\",\n]\n\nclass DibrSoftMaskCuda(Function):\n @staticmethod\n def forward(ctx, face_vertices_image, selected_face_idx,\n sigmainv, boxlen, knum, multiplier):\n face_vertices_image = face_vertices_image.contiguous()\n face_vertices_image = face_vertices_image * multiplier\n selected_face_idx = selected_face_idx.contiguous()\n points_min = torch.min(face_vertices_image, dim=-2)[0]\n points_max = torch.max(face_vertices_image, dim=-2)[0]\n face_large_bboxes = torch.cat([\n points_min - boxlen * multiplier,\n points_max + boxlen * multiplier\n ], dim=-1)\n soft_mask, close_face_prob, close_face_idx, close_face_dist_type = \\\n _C.render.mesh.dibr_soft_mask_forward_cuda(\n face_vertices_image,\n face_large_bboxes.contiguous(),\n selected_face_idx,\n sigmainv,\n knum,\n multiplier\n )\n ctx.multiplier = multiplier\n ctx.sigmainv = sigmainv\n ctx.save_for_backward(\n soft_mask, face_vertices_image, selected_face_idx,\n close_face_prob, close_face_idx, close_face_dist_type\n )\n return soft_mask\n\n @staticmethod\n def backward(ctx, grad_soft_mask):\n soft_mask, face_vertices_image, selected_face_idx, close_face_prob, \\\n close_face_idx, close_face_dist_type = ctx.saved_tensors\n multiplier = ctx.multiplier\n sigmainv = ctx.sigmainv\n grad_face_vertices_image = _C.render.mesh.dibr_soft_mask_backward_cuda(\n grad_soft_mask.contiguous(),\n soft_mask,\n selected_face_idx,\n close_face_prob,\n close_face_idx,\n close_face_dist_type,\n face_vertices_image,\n sigmainv,\n multiplier)\n return grad_face_vertices_image, None, None, None, None, None\n\ndef dibr_soft_mask(face_vertices_image, selected_face_idx,\n sigmainv=7000, boxlen=0.02, knum=30, multiplier=1000.):\n r\"\"\"Compute a soft mask generally used with :func:`kaolin.metrics.render.mask_iou`\n to compute a silhouette loss, as defined by *Chen, Wenzheng, et al.* in\n `Learning to Predict 3D Objects with an Interpolation-based Differentiable Renderer`_ Neurip 2019.\n\n Args:\n face_vertices_image (torch.Tensor):\n 2D positions of the face vertices on image plane,\n of shape :math:`(\\text{batch_size}, \\text{num_faces}, 3, 2)`,\n Note that ``face_vertices_camera`` is projected on image plane (z=-1)\n and forms ``face_vertices_image``.\n The coordinates of face_vertices_image are between :math:`[-1, 1]`,\n which corresponds to normalized image pixels.\n selected_face_idx (torch.LongTensor):\n Rendered face index,\n of shape :math:`(\\text{batch_size}, \\text{height}, \\text{width})`.\n See 2nd returned value from :func:`kaolin.render.mesh.rasterize`.\n sigmainv (float):\n Smoothness term for computing the softmask, the higher the sharper.\n The recommended range is :math:`[1/3e-4, 1/3e-5]`. Defaut: 7000.\n boxlen (float):\n Margin over bounding box of faces which will threshold which pixels\n will be influenced by the face. The value should be adapted to sigmainv,\n to threshold values close to 0. The recommended range is [0.05, 0.2].\n Default: 0.02.\n knum (int):\n Maximum number of faces that can influence one pixel.\n The value should be adapted to boxlen, to avoid missing faces.\n The recommended range is [20, 100]. Default: 30.\n multiplier (float):\n To avoid numerical issue,\n we internally enlarge the 2d coordinates by a multiplier.\n Default: 1000.\n Returns:\n (torch.FloatTensor):\n The soft mask, of shape :math:`(\\text{batch_size}, \\text{height}, \\text{width})`.\n\n .. _Learning to Predict 3D Objects with an Interpolation-based Differentiable Renderer:\n https://arxiv.org/abs/1908.01210\n \"\"\"\n return DibrSoftMaskCuda.apply(face_vertices_image, selected_face_idx,\n sigmainv, boxlen, knum, multiplier)\n\ndef dibr_rasterization(height, width, face_vertices_z, face_vertices_image,\n face_features, face_normals_z, sigmainv=7000,\n boxlen=0.02, knum=30, multiplier=None, eps=None,\n rast_backend='cuda'):\n r\"\"\"Fully differentiable DIB-R renderer implementation,\n that renders 3D triangle meshes with per-vertex per-face features to\n generalized feature \"images\", soft foreground masks, and face index maps.\n\n Args:\n height (int): the size of rendered images.\n width (int): the size of rendered images.\n face_vertices_z (torch.FloatTensor):\n 3D points depth (z) value of the face vertices in camera coordinate,\n of shape :math:`(\\text{batch_size}, \\text{num_faces}, 3)`.\n face_vertices_image (torch.FloatTensor):\n 2D positions of the face vertices on image plane,\n of shape :math:`(\\text{batch_size}, \\text{num_faces}, 3, 2)`,\n Note that ``face_vertices_camera`` is projected on image plane (z=-1)\n and forms ``face_vertices_image``.\n The coordinates of face_vertices_image are between :math:`[-1, 1]`,\n which corresponds to normalized image pixels.\n face_features (torch.FloatTensor or list of torch.FloatTensor):\n Features (per-vertex per-face) to be drawn,\n of shape :math:`(\\text{batch_size}, \\text{num_faces}, 3, \\text{feature_dim})`,\n feature is the features dimension,\n for instance with vertex colors num_features=3 (R, G, B),\n and texture coordinates num_features=2 (X, Y),\n or a list of num_features,\n of shapes :math:`(\\text{batch_size}, \\text{num_faces}, 3, \\text{feature_dim[i]})`\n face_normals_z (torch.FloatTensor):\n Normal directions in z axis, of shape :math:`(\\text{batch_size}, \\text{num_faces})`,\n only faces with normal z >= 0 will be drawn.\n sigmainv (float):\n Smoothness term for computing the softmask, the higher the sharper.\n The recommended range is :math:`[1/3e-4, 1/3e-5]`. Defaut: 7000.\n boxlen (float):\n Margin over bounding box of faces which will threshold which pixels\n will be influenced by the face. The value should be adapted to sigmainv,\n to threshold values close to 0. The recommended range is [0.05, 0.2].\n Default: 0.02.\n knum (int):\n Maximum number of faces that can influence one pixel.\n The value should be adapted to boxlen, to avoid missing faces.\n The recommended range is [20, 100]. Default: 30.\n multiplier (float):\n To avoid numerical issue,\n we internally enlarge the 2d coordinates by a multiplier.\n Default: 1000.\n eps (float):\n Epsilon value used to normalize barycentric weights in rasterization.\n Especially matter with small triangles,\n to increase or decrease in case of exploding or vanishing gradient.\n Ignored if ``backend`` is 'nvdiffrast'.\n Default: 1e-8.\n backend (string):\n Backend used for the rasterization, can be ['cuda', 'nvdiffrast', nvdiffrast_fwd'].\n 'nvdiffrast_fwd' is using `nvdiffrast library` for the forward pass only\n and kaolin's custom Op for backward pass.\n\n Returns:\n (torch.Tensor, torch.Tensor, torch.LongTensor):\n\n - The rendered features of shape\n :math:`(\\text{batch_size}, \\text{height}, \\text{width}, \\text{num_features})`,\n if `face_features` is a list of torch.FloatTensor, return of torch.FloatTensor,\n of shapes :math:`(\\text{batch_size}, \\text{height}, \\text{width}, \\text{num_features[i]})`.\n - The rendered soft mask, of shape :math:`(\\text{batch_size}, \\text{height}, \\text{width})`.\n It is generally used with :func:`kaolin.metrics.render.mask_iou` to compute the silhouette loss.\n - The rendered face index, -1 is None,\n of shape :math:`(\\text{batch_size}, \\text{height}, \\text{width})`.\n \"\"\"\n interpolated_features, face_idx = rasterize(\n height, width,\n face_vertices_z,\n face_vertices_image,\n face_features,\n face_normals_z >= 0.,\n multiplier,\n eps,\n rast_backend\n )\n _multiplier = 1000. if multiplier is None else multiplier\n soft_mask = dibr_soft_mask(\n face_vertices_image,\n face_idx,\n sigmainv,\n boxlen,\n knum,\n _multiplier\n )\n return interpolated_features, soft_mask, face_idx\n" ]
[ [ "torch.min", "torch.max", "torch.cat" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
tatikhonova/FEDOT.Algs
[ "aeb539f52bfbdb0ba8f4975e9ea7cb5a60859e25" ]
[ "estar/examples/ode_textbook.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Apr 21 17:36:43 2021\n\n@author: mike_ubuntu\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nfrom collections import OrderedDict\nimport pickle\n\nimport src.globals as global_var\n\nfrom src.moeadd.moeadd import *\nfrom src.moeadd.moeadd_supplementary import *\n\nimport src.sys_search_operators as operators\nfrom src.evo_optimizer import Operator_director, Operator_builder\nfrom src.evaluators import simple_function_evaluator, trigonometric_evaluator\nfrom src.supplementary import Define_Derivatives, factor_params_to_str\nfrom src.cache.cache import Cache, upload_simple_tokens, upload_grids, download_variable, prepare_var_tensor, np_ndarray_section\nfrom prep.derivatives import Preprocess_derivatives\nfrom src.structure import SoEq\nfrom src.token_family import TF_Pool, Token_family\n\nif __name__ == '__main__':\n '''\n \n В этой задаче мы ищем уравнение u sin(x) + u' cos(x) = 1 по его решению: u = sin(x) + C cos(x), \n где у частного решения C = 1.3.\n \n Задаём x - координатную ось по времени; ts - временной ряд условных измерений\n ff_filename - имя файла, куда сохраняется временной ряд; output_file_name - имя файла для производных\n step - шаг по времени\n '''\n x = np.linspace(0, 4*np.pi, 1000)\n ts = np.load('preprocessing/Fill366/fill366.npy')\n new_derivs = True\n \n ff_filename = 'preprocessing/Fill366/smoothed_ts.npy'\n output_file_name = 'preprocessing/Fill366/derivs.npy'\n step = x[1] - x[0]\n \n '''\n\n Рекомендуемый максимальный порядок производных в этой задаче - 1ый, т.к. в данном случае u = - u'', \n и в силу простоты структуры, алгоритм в больше случаев обнаруживает её, а не исходное уравнение.\n В следующем фрагменте - пример вычисления производных при помощи метода Preprocess_derivatives(...),\n который вызывается, если булева переменная new_derivs == True, т.е. указано пересчитать производные.\n \n '''\n \n max_order = 1 # presence of the 2nd order derivatives leads to equality u = d^2u/dx^2 on this data (elaborate)\n \n if new_derivs:\n derivs = Preprocess_derivatives(ts, ff_name = ff_filename, \n output_file_name = output_file_name,\n steps = (step,), smooth = True, sigma = 1, max_order = max_order)\n ts_smoothed = np.load(ff_filename) \n else:\n try:\n ts_smoothed = np.load(ff_filename)\n derivs = np.load(output_file_name)\n except FileNotFoundError:\n derivs = Preprocess_derivatives(ts, ff_name = ff_filename, \n output_file_name = output_file_name,\n steps = (step,), smooth = True, sigma = 1, max_order = max_order) \n ts_smoothed = np.load(ff_filename)\n print(derivs.shape)\n \n '''\n Инициализируем кэш для хранения вычисленных векторов слагаемых, чтобы не пересчитывать их каждый \n выпуск, и не хранить в отдельных слагаемых, создавая возможные повторные вычисления.\n \n global_var - модуль с глобальными переменными; у него метод init_caches() - создаёт кэши\n global_var.tensor_cache - кэш со значениями множителей и слагаемых;\n global_var.grid_cache - кэш, хранящий в себе тензоры значений координат в узлах.\n \n Метод .memory_usage_properties задаёт свойства использования кэшем памяти.\n \n '''\n \n global_var.init_caches(set_grids=True)\n global_var.tensor_cache.memory_usage_properties(obj_test_case=ts, mem_for_cache_frac = 25) \n global_var.grid_cache.memory_usage_properties(obj_test_case=x, mem_for_cache_frac = 5)\n\n '''\n Задаём пулл токенов, из которых будут создавать уравнения. Граница в 10 элементов позволяет \n избавиться от ошибок в значении производных, которые встречаются на границах исследумой области.\n Также, выполняем предварительную загрузку данных в кэш.\n '''\n\n boundary = 10\n upload_grids(x[boundary:-boundary], global_var.grid_cache) \n u_derivs_stacked = prepare_var_tensor(ts_smoothed, derivs, time_axis = 0, boundary = boundary, axes = [x,])\n u_names = ['t',] + Define_Derivatives('u', 1, 1) \n upload_simple_tokens(u_names, global_var.tensor_cache, u_derivs_stacked)\n global_var.tensor_cache.use_structural()\n\n '''\n Далее ряд операций для задания семейств токенов (коорд. ось, исх. функция и её производные в первом \n семействе, а во втором - тригонометрические функции): \n задание статуса использования токенов через метод .set_status(...)\n выбор параметров; названия индивидуальных токенов из семейства; параметры равенства двух множителей \n одного типа, но с разными параметрами (т.е. когда f(x, p1) == f(x, p2), где p1 и p2 - параметры\n вроде частоты, степени и т.д.), задание метода оценки значений токена на сетке через .set_evaluator(...)\n и т.д.\n \n \n\n '''\n u_tokens = Token_family('U')\n u_tokens.use_glob_cache()\n u_tokens.set_status(unique_specific_token=False, unique_token_type=False, s_and_d_merged = False, \n meaningful = True, unique_for_right_part = False)\n u_token_params = OrderedDict([('power', (1, 1))])\n u_equal_params = {'power' : 0}\n u_tokens.set_params(u_names, u_token_params, u_equal_params)\n u_tokens.set_evaluator(simple_function_evaluator, [])\n# \n# k1 = 0.5; k2 = 2\n# period_min = k1 * (x[1] - x[0])\n# period_max = k2 * (x[-1] - x[0]) \n# freq_max = 1./period_min; freq_min = 1./period_max; \n \n trig_tokens = Token_family('trig')\n trig_names = ['sin', 'cos']\n trig_tokens.use_glob_cache()\n trig_tokens.set_status(unique_specific_token=True, unique_token_type=True, \n meaningful = False, unique_for_right_part = False)\n trig_token_params = OrderedDict([('power', (1, 1)), ('freq', (0.95, 1.05)), ('dim', (0, 0))])\n trig_equal_params = {'power' : 0, 'freq' : 0.05, 'dim' : 0}\n trig_tokens.set_params(trig_names, trig_token_params, trig_equal_params)\n trig_tokens.set_evaluator(trigonometric_evaluator, [])\n \n '''\n Объединяем заданные семейства токенов в пулл, из которого будут строиться уравнения.\n '''\n pool = TF_Pool([u_tokens, trig_tokens])\n pool.families_cardinality()\n \n '''\n Используем базовый эволюционный оператор.\n '''\n director = Operator_director()\n director.operator_assembly() \n \n# test_system = SoEq(pool = pool, terms_number = 4, max_factors_in_term=2, sparcity = (0.1,))\n# test_system.set_eq_search_evolutionary(director.constructor.operator)\n# test_system.create_equations(population_size=16, eq_search_iters=300) \n \n# tokens=[h_tokens, trig_tokens]\n '''\n Настраиваем генератор новых уравнений, которые будут составлять популяцию для \n алгоритма многокритериальной оптимизации.\n '''\n pop_constructor = operators.systems_population_constructor(pool = pool, terms_number=6, \n max_factors_in_term=2, eq_search_evo=director.constructor.operator,\n sparcity_interval = (0.001, 1.2))\n \n '''\n Задаём объект многокритериального оптимизатора, эволюционный оператор и задаём лучшие возможные \n значения целевых функций.\n '''\n optimizer = moeadd_optimizer(pop_constructor, 4, 4, None, delta = 1/50., neighbors_number = 3)\n evo_operator = operators.sys_search_evolutionary_operator(operators.mixing_xover, \n operators.gaussian_mutation)\n\n optimizer.set_evolutionary(operator=evo_operator)\n best_obj = np.concatenate((np.ones([1,]), \n np.zeros(shape=len([1 for token_family in pool.families if token_family.status['meaningful']])))) \n optimizer.pass_best_objectives(*best_obj)\n \n def simple_selector(sorted_neighbors, number_of_neighbors = 4):\n return sorted_neighbors[:number_of_neighbors]\n\n '''\n Запускаем оптимизацию\n '''\n \n optimizer.optimize(simple_selector, 0.95, (4,), 100, 0.75) \n \n [print(solution.structure[0].text_form, solution.evaluate()) for solution in optimizer.pareto_levels.levels[0]]\n \n '''\n В результате мы должны получить фронт Парето, который должен включать в себя одно уравнение с \n \"0-ём слагаемых в левой части\", т.к. равенство какого-то токена константе (скорее всего, 0), а также \n 1 уравнение с \"1им слагаемым помимо константы и правой части из одного слагаемого\", которое будет либо исходным \n (т.е. искомым уравнением, либо уравнением u cos(x) - 1.3 = u' sin(x), которое имеет \n частное решение, совпадающее с рассматриваемым частным решением исходного уравнения.)\n '''\n " ]
[ [ "numpy.load", "numpy.linspace", "numpy.ones" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
cesine/BrainHacks-mmMRI
[ "388cb978b355d5e1cd2b6e4aac99145b89f19237" ]
[ "scripts/Zscoring_masking.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Oct 25 20:34:32 2014\n\n@author: Imane\n\"\"\"\nimport numpy as np\nfrom os import listdir\nfrom os.path import isfile, join\nfrom zscoring import zscoringNII\nfrom masking import maskdata\n\n#Applying Z-score and saving in NII files\nsourcedir = 'brainimages'\nfilenames = [ f for f in listdir(sourcedir) if isfile(join(sourcedir,f)) ]\nnewFn = []\nfor i in range (0, len(filenames)):\n fn = filenames[-1]\n zscoringNII(fn, sourcedir)\n newFn.extend(fn[:-7]+'_Z.nii.gz')\n \n#Applying a mask and gathering data \n#I am aware it is messily done but this was written for the sake of testing\n \nsourcedir = 'brainimages'\nfilenames = [ f for f in listdir(sourcedir) if isfile(join(sourcedir,f)) ] #a list of data file names\nmasksrc = 'masks\\submasks'\nmasknm = [ f for f in listdir(masksrc) if isfile(join(masksrc,f)) ] #a list of the masks' names\nfor k in range(0, len(masknm)):\n data = []\n for i in range (0, len(filenames)):\n fn = filenames[i]\n ms = maskdata(fn, sourcedir, masknm[k]) #harvox_heschls.nii\n ums = ms.compressed() \n ums=np.asarray(ums).reshape(-1)\n \n if (i==0):\n data = ums\n else: \n data = np.vstack([data, ums]) #The result in data is a matrix of dim. (156 x len(ms.compressed)\n \n #unrolling data -> putting the voxels of every subject in one vector (row) \n del(d)\n for i in range(0,26): \n x=np.asarray(data[i:i+5,:]).reshape(-1)\n if (i==0):\n d = x\n else: \n d = np.vstack([d, x]) #The result in d is a matrix of dim. (26 x len(ms.compressed)*6)\n i+=5\n \n np.save(\"dataMask\"+str(k), d)\n" ]
[ [ "numpy.asarray", "numpy.vstack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
eamontoyaa/pybimstab
[ "7844b5d29b4bb31f4b14f11ac4de63ad06fe83e6" ]
[ "examples/figuresScripts/polygon_example1.py" ]
[ "from numpy import array\nfrom pybimstab.polygon import Polygon\ncoords = array([[0, 1, 1, 0], [0, 0, 1.5, 1.5]])\nx, y = 0.5, 2\npolygon = Polygon(coordinates=coords)\npolygon.isinside(x=x, y=y, meshgrid=False, want2plot=True)\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ajshajib/cosmopy
[ "f56eb76e8078e917a407cad3cbe3c3641e24809f" ]
[ "test/test_angular.py" ]
[ "from astropy.cosmology import wCDM\nfrom cosmopy import Cosmology\nimport numpy as np\n\nn_sample = 300 # number of test samples\nn_dim = 4\n\ncenter = np.array([72., .5, .5, -1.5]) # H_0, omega_m, omega_v, w\nscale = np.array([8., .5, .5, 1.]) # width to uniformly distribute cosmological parameters along one direction\nparams = center + np.random.uniform(low=-1., high=1., size=(n_sample, n_dim)) * scale\n\ndef isclose(a, b, rel_tol=1e-06, abs_tol=0.0):\n return abs(a-b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)\n\nfor item in params:\n # reference distances from astropy\n cos = wCDM(H0=item[0], Om0=item[1], Ode0=item[2], w0=item[3], Tcmb0=0., Neff=0., m_nu=0.)\n D_A_ref = cos.angular_diameter_distance(0.5).value\n\n # distance computed from cosmopy\n param = {'H_0':cos.H0.value, 'omega_m':cos.Om0, 'omega_v':cos.Ode0, 'omega_gamma':cos.Ogamma0, 'omega_k':cos.Ok0, 'w_0':cos.w0, 'sum_m_nu':0., 'N_eff':0.}\n cosmo = Cosmology(param)\n D_A = cosmo.get_angular_diameter_distance(0.5)\n\n assert isclose(D_A, D_A_ref)" ]
[ [ "numpy.random.uniform", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
lulzsec2012/tensorflow
[ "6c22bbdda41d839cb9e1f7803533c571596ea4ee", "d68d869e397515655e9f41570f4db463df770563" ]
[ "tensorflow/python/kernel_tests/init_ops_test.py", "tensorflow/python/training/saver.py" ]
[ "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for tensorflow.ops.ops.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nfrom six.moves import xrange # pylint: disable=redefined-builtin\n\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import random_seed\nfrom tensorflow.python.layers import convolutional\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import init_ops\nfrom tensorflow.python.ops import linalg_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import partitioned_variables\nfrom tensorflow.python.ops import random_ops\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import test\n\n\n# Returns true iff the two initializers produce the same tensor to\n# within a tiny tolerance.\ndef identicaltest(tc, init1, init2, shape=None):\n \"\"\"Tests if two initializations are identical to within tiny tolerances.\n\n Args:\n tc: An instance of TensorFlowTestCase.\n init1: An Initializer that generates a tensor of a given shape\n init2: An Initializer that generates a tensor of a given shape\n shape: Shape of the tensor to initialize or `None` to use a vector of length\n 100.\n Returns:\n True or False as determined by test.\n \"\"\"\n if shape is None:\n shape = [100]\n with tc.test_session(graph=ops.Graph()):\n t1 = init1(shape).eval()\n with tc.test_session(graph=ops.Graph()):\n t2 = init2(shape).eval()\n return np.allclose(t1, t2, rtol=1e-15, atol=1e-15)\n\n\ndef duplicated_initializer(tc, init, graph_seed, shape=None):\n \"\"\"Tests duplicated random initializer within the same graph.\n\n This test generates two random kernels from the same initializer to the same\n graph, and checks if the results are close enough. Even given the same global,\n seed, two different instances of random kernels should generate different\n results.\n\n Args:\n tc: An instance of TensorFlowTestCase.\n init: An Initializer that generates a tensor of a given shape\n graph_seed: A graph-level seed to use.\n shape: Shape of the tensor to initialize or `None` to use a vector of length\n 100.\n Returns:\n True or False as determined by test.\n \"\"\"\n if shape is None:\n shape = [100]\n with tc.test_session(graph=ops.Graph()):\n random_seed.set_random_seed(graph_seed)\n t1 = init(shape).eval()\n t2 = init(shape).eval()\n return np.allclose(t1, t2, rtol=1e-15, atol=1e-15)\n\n\ndef _init_sampler(tc, init, num):\n \"\"\"Returns a func to generate a random tensor of shape [num].\n\n Args:\n tc: An instance of TensorFlowTestCase.\n init: An Initializer that generates a tensor of a given shape\n num: Size of 1D tensor to create.\n Returns:\n Function to generate a random tensor.\n \"\"\"\n\n def func():\n with tc.test_session(use_gpu=True):\n return init([num]).eval()\n\n return func\n\n\nclass ConstantInitializersTest(test.TestCase):\n\n def testZerosInitializer(self):\n with self.test_session(use_gpu=True):\n shape = [2, 3]\n x = variable_scope.get_variable(\n \"x\", shape=shape, initializer=init_ops.zeros_initializer())\n x.initializer.run()\n self.assertAllEqual(x.eval(), np.zeros(shape))\n\n def testOnesInitializer(self):\n with self.test_session(use_gpu=True):\n shape = [2, 3]\n x = variable_scope.get_variable(\n \"x\", shape=shape, initializer=init_ops.ones_initializer())\n x.initializer.run()\n self.assertAllEqual(x.eval(), np.ones(shape))\n\n def testConstantZeroInitializer(self):\n with self.test_session(use_gpu=True):\n shape = [2, 3]\n x = variable_scope.get_variable(\n \"x\", shape=shape, initializer=init_ops.constant_initializer(0.0))\n x.initializer.run()\n self.assertAllEqual(x.eval(), np.zeros(shape))\n\n def testConstantOneInitializer(self):\n with self.test_session(use_gpu=True):\n shape = [2, 3]\n x = variable_scope.get_variable(\n \"x\", shape=shape, initializer=init_ops.constant_initializer(1.0))\n x.initializer.run()\n self.assertAllEqual(x.eval(), np.ones(shape))\n\n def testConstantIntInitializer(self):\n with self.test_session(use_gpu=True):\n shape = [2, 3]\n x = variable_scope.get_variable(\n \"x\",\n shape=shape,\n dtype=dtypes.int32,\n initializer=init_ops.constant_initializer(7))\n x.initializer.run()\n self.assertEqual(x.dtype.base_dtype, dtypes.int32)\n self.assertAllEqual(x.eval(), 7 * np.ones(shape, dtype=np.int32))\n\n def testConstantTupleInitializer(self):\n with self.test_session(use_gpu=True):\n shape = [3]\n x = variable_scope.get_variable(\n \"x\",\n shape=shape,\n dtype=dtypes.int32,\n initializer=init_ops.constant_initializer((10, 20, 30)))\n x.initializer.run()\n self.assertEqual(x.dtype.base_dtype, dtypes.int32)\n self.assertAllEqual(x.eval(), [10, 20, 30])\n\n def _testNDimConstantInitializer(self, name, value, shape, expected):\n with self.test_session(use_gpu=True):\n init = init_ops.constant_initializer(value, dtype=dtypes.int32)\n x = variable_scope.get_variable(name, shape=shape, initializer=init)\n x.initializer.run()\n\n actual = array_ops.reshape(x, [-1]).eval()\n self.assertEqual(len(actual), len(expected))\n for a, e in zip(actual, expected):\n self.assertEqual(a, e)\n\n def testNDimConstantInitializer(self):\n value = [0, 1, 2, 3, 4, 5]\n shape = [2, 3]\n expected = list(value)\n\n self._testNDimConstantInitializer(\"list\", value, shape, expected)\n self._testNDimConstantInitializer(\"ndarray\",\n np.asarray(value), shape, expected)\n self._testNDimConstantInitializer(\"2D-ndarray\",\n np.asarray(value).reshape(tuple(shape)),\n shape, expected)\n\n def _testNDimConstantInitializerLessValues(self, name, value, shape,\n expected):\n with self.test_session(use_gpu=True):\n init = init_ops.constant_initializer(value, dtype=dtypes.int32)\n x = variable_scope.get_variable(name, shape=shape, initializer=init)\n x.initializer.run()\n\n actual = array_ops.reshape(x, [-1]).eval()\n self.assertGreater(len(actual), len(expected))\n for i in xrange(len(actual)):\n a = actual[i]\n e = expected[i] if i < len(expected) else expected[-1]\n self.assertEqual(a, e)\n\n def testNDimConstantInitializerLessValues(self):\n value = [0, 1, 2, 3, 4, 5]\n shape = [2, 4]\n expected = list(value)\n\n self._testNDimConstantInitializerLessValues(\"list\", value, shape, expected)\n self._testNDimConstantInitializerLessValues(\"ndarray\",\n np.asarray(value), shape,\n expected)\n self._testNDimConstantInitializerLessValues(\n \"2D-ndarray\", np.asarray(value).reshape(tuple([2, 3])), shape, expected)\n\n def _testNDimConstantInitializerMoreValues(self, value, shape):\n ops.reset_default_graph()\n with self.test_session(use_gpu=True):\n init = init_ops.constant_initializer(value, dtype=dtypes.int32)\n self.assertRaises(\n ValueError,\n variable_scope.get_variable,\n \"x\",\n shape=shape,\n initializer=init)\n\n def testNDimConstantInitializerMoreValues(self):\n value = [0, 1, 2, 3, 4, 5, 6, 7]\n shape = [2, 3]\n self._testNDimConstantInitializerMoreValues(value, shape)\n self._testNDimConstantInitializerMoreValues(np.asarray(value), shape)\n self._testNDimConstantInitializerMoreValues(\n np.asarray(value).reshape(tuple([2, 4])), shape)\n\n def testInvalidValueTypeForConstantInitializerCausesTypeError(self):\n c = constant_op.constant([1.0, 2.0, 3.0])\n with self.assertRaisesRegexp(\n TypeError, r\"Invalid type for initial value: .*Tensor.*\"):\n init_ops.constant_initializer(c, dtype=dtypes.float32)\n v = variables.Variable([3.0, 2.0, 1.0])\n with self.assertRaisesRegexp(\n TypeError, r\"Invalid type for initial value: .*Variable.*\"):\n init_ops.constant_initializer(v, dtype=dtypes.float32)\n\n\nclass RandomNormalInitializationTest(test.TestCase):\n\n def testInitializerIdentical(self):\n for dtype in [dtypes.float32, dtypes.float64]:\n init1 = init_ops.random_normal_initializer(0.0, 1.0, seed=1, dtype=dtype)\n init2 = init_ops.random_normal_initializer(0.0, 1.0, seed=1, dtype=dtype)\n self.assertTrue(identicaltest(self, init1, init2))\n\n def testInitializerDifferent(self):\n for dtype in [dtypes.float32, dtypes.float64]:\n init1 = init_ops.random_normal_initializer(0.0, 1.0, seed=1, dtype=dtype)\n init2 = init_ops.random_normal_initializer(0.0, 1.0, seed=2, dtype=dtype)\n self.assertFalse(identicaltest(self, init1, init2))\n\n def testDuplicatedInitializer(self):\n init = init_ops.random_normal_initializer(0.0, 1.0)\n self.assertFalse(duplicated_initializer(self, init, 1))\n\n def testInvalidDataType(self):\n self.assertRaises(\n ValueError,\n init_ops.random_normal_initializer,\n 0.0,\n 1.0,\n dtype=dtypes.string)\n\n\nclass TruncatedNormalInitializationTest(test.TestCase):\n\n def testInitializerIdentical(self):\n for dtype in [dtypes.float32, dtypes.float64]:\n init1 = init_ops.truncated_normal_initializer(\n 0.0, 1.0, seed=1, dtype=dtype)\n init2 = init_ops.truncated_normal_initializer(\n 0.0, 1.0, seed=1, dtype=dtype)\n self.assertTrue(identicaltest(self, init1, init2))\n\n def testInitializerDifferent(self):\n for dtype in [dtypes.float32, dtypes.float64]:\n init1 = init_ops.truncated_normal_initializer(\n 0.0, 1.0, seed=1, dtype=dtype)\n init2 = init_ops.truncated_normal_initializer(\n 0.0, 1.0, seed=2, dtype=dtype)\n self.assertFalse(identicaltest(self, init1, init2))\n\n def testDuplicatedInitializer(self):\n init = init_ops.truncated_normal_initializer(0.0, 1.0)\n self.assertFalse(duplicated_initializer(self, init, 1))\n\n def testInvalidDataType(self):\n self.assertRaises(\n ValueError,\n init_ops.truncated_normal_initializer,\n 0.0,\n 1.0,\n dtype=dtypes.string)\n\n\nclass RandomUniformInitializationTest(test.TestCase):\n\n def testInitializerIdentical(self):\n for dtype in [dtypes.float32, dtypes.float64, dtypes.int64]:\n init1 = init_ops.random_uniform_initializer(0, 7, seed=1, dtype=dtype)\n init2 = init_ops.random_uniform_initializer(0, 7, seed=1, dtype=dtype)\n self.assertTrue(identicaltest(self, init1, init2))\n\n def testInitializerDifferent(self):\n for dtype in [dtypes.float32, dtypes.float64, dtypes.int32, dtypes.int64]:\n init1 = init_ops.random_uniform_initializer(0, 7, seed=1, dtype=dtype)\n init2 = init_ops.random_uniform_initializer(0, 7, seed=2, dtype=dtype)\n self.assertFalse(identicaltest(self, init1, init2))\n\n def testDuplicatedInitializer(self):\n init = init_ops.random_uniform_initializer(0.0, 1.0)\n self.assertFalse(duplicated_initializer(self, init, 1))\n\n\nclass UniformUnitScalingInitializationTest(test.TestCase):\n\n def testInitializerIdentical(self):\n for dtype in [dtypes.float32, dtypes.float64]:\n init1 = init_ops.uniform_unit_scaling_initializer(seed=1, dtype=dtype)\n init2 = init_ops.uniform_unit_scaling_initializer(seed=1, dtype=dtype)\n self.assertTrue(identicaltest(self, init1, init2))\n init3 = init_ops.uniform_unit_scaling_initializer(\n 1.5, seed=1, dtype=dtype)\n init4 = init_ops.uniform_unit_scaling_initializer(\n 1.5, seed=1, dtype=dtype)\n self.assertTrue(identicaltest(self, init3, init4))\n\n def testInitializerDifferent(self):\n for dtype in [dtypes.float32, dtypes.float64]:\n init1 = init_ops.uniform_unit_scaling_initializer(seed=1, dtype=dtype)\n init2 = init_ops.uniform_unit_scaling_initializer(seed=2, dtype=dtype)\n init3 = init_ops.uniform_unit_scaling_initializer(\n 1.5, seed=1, dtype=dtype)\n self.assertFalse(identicaltest(self, init1, init2))\n self.assertFalse(identicaltest(self, init1, init3))\n self.assertFalse(identicaltest(self, init2, init3))\n\n def testZeroSize(self):\n shape = [0, 2]\n with self.test_session():\n x = variable_scope.get_variable(\n \"x\",\n shape=shape,\n initializer=init_ops.uniform_unit_scaling_initializer())\n variables.global_variables_initializer().run()\n self.assertAllEqual(shape, x.eval().shape)\n\n def testDuplicatedInitializer(self):\n init = init_ops.uniform_unit_scaling_initializer()\n self.assertFalse(duplicated_initializer(self, init, 1))\n\n def testInvalidDataType(self):\n self.assertRaises(\n ValueError,\n init_ops.uniform_unit_scaling_initializer,\n dtype=dtypes.string)\n\n\n# TODO(vrv): move to sequence_ops_test?\nclass RangeTest(test.TestCase):\n\n def _Range(self, start, limit, delta):\n with self.test_session(use_gpu=True):\n tf_ans = math_ops.range(start, limit, delta, name=\"range\")\n self.assertEqual([len(np.arange(start, limit, delta))],\n tf_ans.get_shape())\n return tf_ans.eval()\n\n def testBasic(self):\n self.assertTrue(\n np.array_equal(self._Range(0, 5, 1), np.array([0, 1, 2, 3, 4])))\n self.assertTrue(np.array_equal(self._Range(0, 5, 2), np.array([0, 2, 4])))\n self.assertTrue(np.array_equal(self._Range(0, 6, 2), np.array([0, 2, 4])))\n self.assertTrue(\n np.array_equal(self._Range(13, 32, 7), np.array([13, 20, 27])))\n self.assertTrue(\n np.array_equal(\n self._Range(100, 500, 100), np.array([100, 200, 300, 400])))\n self.assertEqual(math_ops.range(0, 5, 1).dtype, dtypes.int32)\n\n def testLimitOnly(self):\n with self.test_session(use_gpu=True):\n self.assertAllEqual(np.arange(5), math_ops.range(5).eval())\n\n def testEmpty(self):\n for start in 0, 5:\n self.assertTrue(np.array_equal(self._Range(start, start, 1), []))\n\n def testNonInteger(self):\n self.assertTrue(\n np.allclose(self._Range(0, 2, 0.5), np.array([0, 0.5, 1, 1.5])))\n self.assertTrue(np.allclose(self._Range(0, 5, 2.5), np.array([0, 2.5])))\n self.assertTrue(\n np.allclose(self._Range(0, 3, 0.9), np.array([0, 0.9, 1.8, 2.7])))\n self.assertTrue(\n np.allclose(\n self._Range(100., 500., 100.), np.array([100, 200, 300, 400])))\n self.assertEqual(math_ops.range(0., 5., 1.).dtype, dtypes.float32)\n\n def testNegativeDelta(self):\n self.assertTrue(\n np.array_equal(self._Range(5, -1, -1), np.array([5, 4, 3, 2, 1, 0])))\n self.assertTrue(\n np.allclose(self._Range(2.5, 0, -0.5), np.array([2.5, 2, 1.5, 1, 0.5])))\n self.assertTrue(\n np.array_equal(self._Range(-5, -10, -3), np.array([-5, -8])))\n\n def testDType(self):\n zero_int32 = math_ops.cast(0, dtypes.int32)\n zero_int64 = math_ops.cast(0, dtypes.int64)\n zero_float32 = math_ops.cast(0, dtypes.float32)\n zero_float64 = math_ops.cast(0, dtypes.float64)\n\n self.assertEqual(math_ops.range(zero_int32, 0, 1).dtype, dtypes.int32)\n self.assertEqual(math_ops.range(zero_int64, 0, 1).dtype, dtypes.int64)\n self.assertEqual(math_ops.range(zero_float32, 0, 1).dtype, dtypes.float32)\n self.assertEqual(math_ops.range(zero_float64, 0, 1).dtype, dtypes.float64)\n\n self.assertEqual(\n math_ops.range(zero_int32, zero_int64, 1).dtype, dtypes.int64)\n self.assertEqual(\n math_ops.range(zero_int64, zero_float32, 1).dtype, dtypes.float32)\n self.assertEqual(\n math_ops.range(zero_float32, zero_float64, 1).dtype, dtypes.float64)\n self.assertEqual(\n math_ops.range(zero_float64, zero_int32, 1).dtype, dtypes.float64)\n\n self.assertEqual(\n math_ops.range(\n 0, 0, 1, dtype=dtypes.int32).dtype, dtypes.int32)\n self.assertEqual(\n math_ops.range(\n 0, 0, 1, dtype=dtypes.int64).dtype, dtypes.int64)\n self.assertEqual(\n math_ops.range(\n 0, 0, 1, dtype=dtypes.float32).dtype, dtypes.float32)\n self.assertEqual(\n math_ops.range(\n 0, 0, 1, dtype=dtypes.float64).dtype, dtypes.float64)\n\n\n# TODO(vrv): move to sequence_ops_test?\nclass LinSpaceTest(test.TestCase):\n\n def _gpu_modes(self):\n if test.is_gpu_available():\n return [False, True]\n else:\n return [False]\n\n def _LinSpace(self, start, stop, num):\n # NOTE(touts): Needs to pass a graph to get a new session each time.\n with ops.Graph().as_default() as graph:\n with self.test_session(graph=graph, force_gpu=self.force_gpu):\n tf_ans = math_ops.linspace(start, stop, num, name=\"linspace\")\n self.assertEqual([num], tf_ans.get_shape())\n return tf_ans.eval()\n\n def testPositive(self):\n for self.force_gpu in self._gpu_modes():\n self.assertArrayNear(self._LinSpace(1., 5., 1), np.array([1.]), 1e-5)\n self.assertArrayNear(self._LinSpace(1., 5., 2), np.array([1., 5.]), 1e-5)\n self.assertArrayNear(\n self._LinSpace(1., 5., 3), np.array([1., 3., 5.]), 1e-5)\n self.assertArrayNear(\n self._LinSpace(1., 5., 4), np.array([1., 7. / 3., 11. / 3., 5.]),\n 1e-5)\n\n def testNegative(self):\n for self.force_gpu in self._gpu_modes():\n self.assertArrayNear(self._LinSpace(-1., -5., 1), np.array([-1.]), 1e-5)\n self.assertArrayNear(\n self._LinSpace(-1., -5., 2), np.array([-1., -5.]), 1e-5)\n self.assertArrayNear(\n self._LinSpace(-1., -5., 3), np.array([-1., -3., -5.]), 1e-5)\n self.assertArrayNear(\n self._LinSpace(-1., -5., 4),\n np.array([-1., -7. / 3., -11. / 3., -5.]), 1e-5)\n\n def testNegativeToPositive(self):\n for self.force_gpu in self._gpu_modes():\n self.assertArrayNear(self._LinSpace(-1., 5., 1), np.array([-1.]), 1e-5)\n self.assertArrayNear(\n self._LinSpace(-1., 5., 2), np.array([-1., 5.]), 1e-5)\n self.assertArrayNear(\n self._LinSpace(-1., 5., 3), np.array([-1., 2., 5.]), 1e-5)\n self.assertArrayNear(\n self._LinSpace(-1., 5., 4), np.array([-1., 1., 3., 5.]), 1e-5)\n\n def testPoint(self):\n for self.force_gpu in self._gpu_modes():\n self.assertArrayNear(self._LinSpace(5., 5., 1), np.array([5.]), 1e-5)\n self.assertArrayNear(self._LinSpace(5., 5., 2), np.array([5.] * 2), 1e-5)\n self.assertArrayNear(self._LinSpace(5., 5., 3), np.array([5.] * 3), 1e-5)\n self.assertArrayNear(self._LinSpace(5., 5., 4), np.array([5.] * 4), 1e-5)\n\n\nclass DeviceTest(test.TestCase):\n\n def testNoDevice(self):\n with ops.Graph().as_default():\n var = variables.Variable([[1.0, 1.0]])\n self.assertDeviceEqual(None, var.device)\n self.assertDeviceEqual(None, var.initializer.device)\n\n def testDevice(self):\n with ops.Graph().as_default():\n with ops.device(\"/job:ps\"):\n var = variables.Variable([[1.0, 1.0]])\n self.assertDeviceEqual(\"/job:ps\", var.device)\n self.assertDeviceEqual(\"/job:ps\", var.initializer.device)\n\n\nclass OrthogonalInitializerTest(test.TestCase):\n\n def testInitializerIdentical(self):\n for dtype in [dtypes.float32, dtypes.float64]:\n init1 = init_ops.orthogonal_initializer(seed=1, dtype=dtype)\n init2 = init_ops.orthogonal_initializer(seed=1, dtype=dtype)\n self.assertTrue(identicaltest(self, init1, init2, (10, 10)))\n\n def testInitializerDifferent(self):\n for dtype in [dtypes.float32, dtypes.float64]:\n init1 = init_ops.orthogonal_initializer(seed=1, dtype=dtype)\n init2 = init_ops.orthogonal_initializer(seed=2, dtype=dtype)\n self.assertFalse(identicaltest(self, init1, init2, (10, 10)))\n\n def testDuplicatedInitializer(self):\n init = init_ops.orthogonal_initializer()\n self.assertFalse(duplicated_initializer(self, init, 1, (10, 10)))\n\n def testInvalidDataType(self):\n self.assertRaises(\n ValueError, init_ops.orthogonal_initializer, dtype=dtypes.string)\n\n def testInvalidShape(self):\n init1 = init_ops.orthogonal_initializer()\n with self.test_session(graph=ops.Graph(), use_gpu=True):\n self.assertRaises(ValueError, init1, shape=[5])\n\n def testGain(self):\n shape = (10, 10)\n for dtype in [dtypes.float32, dtypes.float64]:\n init1 = init_ops.orthogonal_initializer(seed=1, dtype=dtype)\n init2 = init_ops.orthogonal_initializer(gain=3.14, seed=1, dtype=dtype)\n with self.test_session(graph=ops.Graph(), use_gpu=True):\n t1 = init1(shape).eval()\n t2 = init2(shape).eval()\n return np.allclose(t1, t2 / 3.14, rtol=1e-15, atol=1e-15)\n\n def testShapesValues(self):\n for dtype in [dtypes.float32, dtypes.float64]:\n for shape in [(10, 10), (10, 9, 8), (100, 5, 5), (50, 40), (40, 50)]:\n init = init_ops.orthogonal_initializer(dtype=dtype)\n tol = 1e-5 if dtype == dtypes.float32 else 1e-12\n with self.test_session(graph=ops.Graph(), use_gpu=True):\n # Check the shape\n t = init(shape).eval()\n self.assertAllEqual(shape, t.shape)\n # Check orthogonality by computing the inner product\n t = t.reshape((np.prod(t.shape[:-1]), t.shape[-1]))\n if t.shape[0] > t.shape[1]:\n self.assertAllClose(\n np.dot(t.T, t), np.eye(t.shape[1]), rtol=tol, atol=tol)\n else:\n self.assertAllClose(\n np.dot(t, t.T), np.eye(t.shape[0]), rtol=tol, atol=tol)\n\n\nclass ConvolutionDeltaOrthogonalInitializerTest(test.TestCase):\n\n def testInitializerIdentical(self):\n for dtype in [dtypes.float32, dtypes.float64]:\n init1 = init_ops.convolutional_delta_orthogonal(seed=1, dtype=dtype)\n init2 = init_ops.convolutional_delta_orthogonal(seed=1, dtype=dtype)\n self.assertTrue(identicaltest(self, init1, init2, (3, 3, 10, 10)))\n\n def testInitializerDifferent(self):\n for dtype in [dtypes.float32, dtypes.float64]:\n init1 = init_ops.convolutional_delta_orthogonal(seed=1, dtype=dtype)\n init2 = init_ops.convolutional_delta_orthogonal(seed=2, dtype=dtype)\n self.assertFalse(identicaltest(self, init1, init2, (3, 3, 10, 10)))\n\n def testDuplicatedInitializer(self):\n init = init_ops.convolutional_delta_orthogonal()\n self.assertFalse(duplicated_initializer(self, init, 1, (3, 3, 10, 10)))\n\n def testInvalidDataType(self):\n self.assertRaises(\n ValueError, init_ops.convolutional_delta_orthogonal,\n dtype=dtypes.string)\n\n def testInvalidShape(self):\n init1 = init_ops.convolutional_delta_orthogonal()\n with self.test_session(graph=ops.Graph(), use_gpu=True):\n self.assertRaises(ValueError, init1, shape=[3, 3, 6, 5])\n\n def testGain(self):\n shape = (3, 3, 10, 10)\n for dtype in [dtypes.float32, dtypes.float64]:\n init1 = init_ops.convolutional_delta_orthogonal(seed=1, dtype=dtype)\n init2 = init_ops.convolutional_delta_orthogonal(gain=3.14,\n seed=1, dtype=dtype)\n with self.test_session(graph=ops.Graph(), use_gpu=True):\n t1 = init1(shape).eval()\n t2 = init2(shape).eval()\n return np.allclose(t1, t2 / 3.14, rtol=1e-15, atol=1e-15)\n\n def testShapesValues(self):\n for dtype in [dtypes.float32]:\n for kernel_size in [[3], [8], [3, 5], [2, 4], [3, 3, 3], [2, 2, 2]]:\n tol = 1e-2\n # Check orthogonality by computing the 2-norms of the inputs and outputs.\n if len(kernel_size) == 1:\n shape = [4, 32, 64]\n convolution = convolutional.conv1d\n elif len(kernel_size) == 2:\n convolution = convolutional.conv2d\n shape = [4, 32, 32, 64]\n else:\n shape = [4, 16, 16, 16, 64]\n convolution = convolutional.conv3d\n inputs = random_ops.random_normal(shape, dtype=dtype)\n inputs_2norm = linalg_ops.norm(inputs)\n outputs = convolution(\n inputs, padding=\"same\", filters=128,\n kernel_size=kernel_size, use_bias=False,\n kernel_initializer=init_ops.convolutional_delta_orthogonal(\n gain=3.14))\n outputs_shape = shape[0:-1] + [128]\n outputs_2norm = linalg_ops.norm(outputs)\n my_ops = variables.global_variables_initializer()\n with self.test_session(use_gpu=True) as sess:\n sess.run(my_ops)\n # Check the shape of the outputs\n t = outputs.eval()\n self.assertAllEqual(t.shape, outputs_shape)\n # Check isometry of the delta-orthogonal kernel.\n self.assertAllClose(\n sess.run(inputs_2norm)/np.sqrt(np.prod(shape)),\n sess.run(outputs_2norm)/(np.sqrt(np.prod(shape))*np.sqrt(3.14)),\n rtol=tol, atol=tol)\n\n def testNonuniformity(self):\n value = 0\n abs_value = 0\n shape = [3, 3, 10, 10]\n count = 70\n tol = 1e-5\n with self.test_session(use_gpu=True): # as sess:\n for i in range(count):\n x = variable_scope.get_variable(\"{}\".format(i), shape=shape,\n initializer=\n init_ops.convolutional_delta_orthogonal)\n x.initializer.run()\n y = x.eval()[1, 1, :, :]\n determinant = np.linalg.det(y)\n value += determinant\n abs_value += np.abs(determinant)\n\n # Check there is some variation in the signs of the determinants\n self.assertLess(value, count - tol)\n self.assertLess(-count + tol, value)\n # Check all determinants have absolute value 1\n # Compute the sum of the absolute values of 'count' determinants\n self.assertAllClose(abs_value, count, rtol=tol, atol=tol)\n\n\nclass ConvolutionOrthogonal2dInitializerTest(test.TestCase):\n\n def testInitializerIdentical(self):\n for dtype in [dtypes.float32, dtypes.float64]:\n init1 = init_ops.convolutional_orthogonal_2d(seed=1, dtype=dtype)\n init2 = init_ops.convolutional_orthogonal_2d(seed=1, dtype=dtype)\n self.assertTrue(identicaltest(self, init1, init2, (3, 3, 10, 10)))\n\n def testInitializerDifferent(self):\n for dtype in [dtypes.float32, dtypes.float64]:\n init1 = init_ops.convolutional_orthogonal_2d(seed=1, dtype=dtype)\n init2 = init_ops.convolutional_orthogonal_2d(seed=2, dtype=dtype)\n self.assertFalse(identicaltest(self, init1, init2, (3, 3, 10, 10)))\n\n def testDuplicatedInitializer(self):\n init = init_ops.convolutional_orthogonal_2d()\n self.assertFalse(duplicated_initializer(self, init, 1, (3, 3, 10, 10)))\n\n def testInvalidDataType(self):\n self.assertRaises(\n ValueError, init_ops.convolutional_orthogonal_2d,\n dtype=dtypes.string)\n\n def testInvalidShape(self):\n init1 = init_ops.convolutional_orthogonal_2d()\n with self.test_session(graph=ops.Graph(), use_gpu=True):\n self.assertRaises(ValueError, init1, shape=[3, 3, 6, 5])\n\n def testGain(self):\n shape = (3, 3, 10, 10)\n for dtype in [dtypes.float32, dtypes.float64]:\n init1 = init_ops.convolutional_orthogonal_2d(seed=1, dtype=dtype)\n init2 = init_ops.convolutional_orthogonal_2d(gain=3.14,\n seed=1, dtype=dtype)\n with self.test_session(graph=ops.Graph(), use_gpu=True):\n t1 = init1(shape).eval()\n t2 = init2(shape).eval()\n return np.allclose(t1, t2 / 3.14, rtol=1e-15, atol=1e-15)\n\n def testShapesValues(self):\n def circular_pad(input_, width, kernel_size):\n \"\"\"Pad input_ for computing (circular) convolution.\n\n Args:\n input_: the input tensor\n width: the width of the tensor.\n kernel_size: the kernel size of the filter.\n Returns:\n a tensor whose width is (width + kernel_size - 1).\n \"\"\"\n beg = kernel_size // 2\n end = kernel_size - 1 - beg\n\n tmp_up = array_ops.slice(input_, [0, width - beg, 0, 0],\n [-1, beg, width, -1])\n tmp_down = array_ops.slice(input_, [0, 0, 0, 0], [-1, end, width, -1])\n tmp = array_ops.concat([tmp_up, input_, tmp_down], 1)\n\n new_width = width + kernel_size - 1\n tmp_left = array_ops.slice(tmp, [0, 0, width - beg, 0],\n [-1, new_width, beg, -1])\n tmp_right = array_ops.slice(tmp, [0, 0, 0, 0], [-1, new_width, end, -1])\n\n final = array_ops.concat([tmp_left, tmp, tmp_right], 2)\n return final\n\n cout = 45\n shape = [64, 28, 28, 32]\n outputs_shape = shape[0:-1] + [cout]\n dtype = dtypes.float32\n tol = 1e-3\n gain = 3.14\n # Check orthogonality/isometry by computing the ratio between\n # the 2-norms of the inputs and ouputs.\n for kernel_size in [[1, 1], [2, 2], [3, 3], [4, 4], [5, 5]]:\n convolution = convolutional.conv2d\n inputs = random_ops.random_normal(shape, dtype=dtype)\n inputs_2norm = linalg_ops.norm(inputs)\n input_with_circular_pad = circular_pad(inputs, shape[1], kernel_size[0])\n outputs = convolution(\n input_with_circular_pad, padding=\"valid\", filters=cout,\n kernel_size=kernel_size, use_bias=False,\n kernel_initializer=init_ops.convolutional_orthogonal_2d(gain=gain))\n outputs_2norm = linalg_ops.norm(outputs)\n my_ops = variables.global_variables_initializer()\n with self.test_session(use_gpu=True) as sess:\n sess.run(my_ops)\n # Check the shape of the outputs\n t = outputs.eval()\n self.assertAllEqual(t.shape, outputs_shape)\n # Check isometry of the orthogonal kernel.\n self.assertAllClose(\n sess.run(inputs_2norm)/np.sqrt(np.prod(shape)),\n sess.run(outputs_2norm)/(np.sqrt(np.prod(shape))*np.sqrt(gain)),\n rtol=tol, atol=tol)\n\n\nclass IdentityInitializerTest(test.TestCase):\n\n def testInvalidDataType(self):\n self.assertRaises(\n ValueError, init_ops.orthogonal_initializer, dtype=dtypes.string)\n\n def testInvalidShape(self):\n init = init_ops.identity_initializer()\n with self.test_session(graph=ops.Graph(), use_gpu=True):\n self.assertRaises(ValueError, init, shape=[5, 7, 7])\n self.assertRaises(ValueError, init, shape=[5])\n self.assertRaises(ValueError, init, shape=[])\n\n def testNonSquare(self):\n init = init_ops.identity_initializer()\n shape = (10, 5)\n with self.test_session(graph=ops.Graph(), use_gpu=True):\n self.assertAllClose(init(shape).eval(), np.eye(*shape))\n\n def testGain(self):\n shape = (10, 10)\n for dtype in [dtypes.float32, dtypes.float64]:\n init_default = init_ops.identity_initializer(dtype=dtype)\n init_custom = init_ops.identity_initializer(gain=0.9, dtype=dtype)\n with self.test_session(graph=ops.Graph(), use_gpu=True):\n self.assertAllClose(init_default(shape).eval(), np.eye(*shape))\n with self.test_session(graph=ops.Graph(), use_gpu=True):\n self.assertAllClose(init_custom(shape).eval(), np.eye(*shape) * 0.9)\n\n def testPartitions(self):\n shape = (10, 10)\n init = init_ops.identity_initializer()\n partitioner = partitioned_variables.variable_axis_size_partitioner(1)\n with self.test_session(graph=ops.Graph(), use_gpu=True):\n with variable_scope.variable_scope(\n \"foo\", partitioner=partitioner, initializer=init):\n v = array_ops.identity(variable_scope.get_variable(\"bar\", shape=shape))\n variables.global_variables_initializer().run()\n self.assertAllClose(v.eval(), np.eye(*shape))\n\n\nif __name__ == \"__main__\":\n test.main()\n", "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n# pylint: disable=invalid-name\n\"\"\"Save and restore variables.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport os.path\nimport re\nimport sys\nimport time\nimport uuid\n\nimport numpy as np\nimport six\n\nfrom google.protobuf import text_format\n\nfrom tensorflow.core.protobuf import checkpointable_object_graph_pb2\nfrom tensorflow.core.protobuf import meta_graph_pb2\nfrom tensorflow.core.protobuf import saver_pb2\nfrom tensorflow.python import pywrap_tensorflow\nfrom tensorflow.python.client import session\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import device as pydev\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import meta_graph\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.lib.io import file_io\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import gen_io_ops\nfrom tensorflow.python.ops import io_ops\nfrom tensorflow.python.ops import resource_variable_ops\nfrom tensorflow.python.ops import state_ops\nfrom tensorflow.python.ops import string_ops\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import gfile\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.training import checkpointable\nfrom tensorflow.python.training import training_util\nfrom tensorflow.python.training.checkpoint_state_pb2 import CheckpointState\nfrom tensorflow.python.util import compat\nfrom tensorflow.python.util.tf_export import tf_export\n\n\n# Op names which identify variable reads which should be saved.\n_VARIABLE_OPS = set([\"Variable\",\n \"VariableV2\",\n \"AutoReloadVariable\",\n \"VarHandleOp\",\n \"ReadVariableOp\"])\n\n\ndef _set_cpu0(device_string):\n \"\"\"Creates a new device string based on `device_string` but using /CPU:0.\n\n If the device is already on /CPU:0, this is a no-op.\n\n Args:\n device_string: A device string.\n\n Returns:\n A device string.\n \"\"\"\n parsed_device = pydev.DeviceSpec.from_string(device_string)\n parsed_device.device_type = \"CPU\"\n parsed_device.device_index = 0\n return parsed_device.to_string()\n\n\nclass BaseSaverBuilder(object):\n \"\"\"Base class for Savers.\n\n Can be extended to create different Ops.\n \"\"\"\n\n class SaveSpec(object):\n \"\"\"Class used to describe tensor slices that need to be saved.\"\"\"\n\n def __init__(self, tensor, slice_spec, name, dtype=None):\n \"\"\"Creates a `SaveSpec` object.\n\n Args:\n tensor: the tensor to save or callable that produces a tensor to save.\n slice_spec: the slice to be saved. See `Variable.SaveSliceInfo`.\n name: the name to save the tensor under.\n dtype: The data type of the Tensor. Required if `tensor` is callable.\n Used for error checking in the restore op.\n \"\"\"\n self._tensor = tensor\n self.slice_spec = slice_spec\n self.name = name\n if callable(self._tensor):\n if dtype is None:\n raise AssertionError(\n \"When passing a callable `tensor` to a SaveSpec, an explicit \"\n \"dtype must be provided.\")\n self.dtype = dtype\n else:\n self.dtype = tensor.dtype\n\n @property\n def tensor(self):\n return self._tensor() if callable(self._tensor) else self._tensor\n\n class SaveableObject(object):\n \"\"\"Base class for saving and restoring saveable objects.\"\"\"\n\n def __init__(self, op, specs, name):\n \"\"\"Creates a `SaveableObject` object.\n\n Args:\n op: the \"producer\" object that this class wraps; it produces a list of\n tensors to save. E.g., a \"Variable\" object saving its backing tensor.\n specs: a list of SaveSpec, each element of which describes one tensor to\n save under this object. All Tensors must be on the same device.\n name: the name to save the object under.\n \"\"\"\n self.op = op\n self.specs = specs\n self.name = name\n self._device = None\n\n @property\n def device(self):\n \"\"\"The device for SaveSpec Tensors.\"\"\"\n # Note that SaveSpec.tensor runs Tensor-gathering ops when executing\n # eagerly, making this call potentially very expensive.\n #\n # TODO(allenl): Consider another way to gather device information. Lower\n # priority since this property isn't part of the normal save()/restore()\n # workflow, but does come up when some alternative builders are passed to\n # the Saver.\n if self._device is None:\n self._device = self.specs[0].tensor.device\n return self._device\n\n def restore(self, restored_tensors, restored_shapes):\n \"\"\"Restores this object from 'restored_tensors'.\n\n Args:\n restored_tensors: the tensors that were loaded from a checkpoint\n restored_shapes: the shapes this object should conform to after\n restore, or None.\n\n Returns:\n An operation that restores the state of the object.\n\n Raises:\n ValueError: If the object cannot be restored using the provided\n parameters.\n \"\"\"\n # pylint: disable=unused-argument\n raise ValueError(\"Calling an abstract method.\")\n\n class VariableSaveable(SaveableObject):\n \"\"\"SaveableObject implementation that handles Variables.\"\"\"\n\n def __init__(self, var, slice_spec, name):\n spec = BaseSaverBuilder.SaveSpec(var, slice_spec, name, dtype=var.dtype)\n super(BaseSaverBuilder.VariableSaveable, self).__init__(var, [spec], name)\n\n def restore(self, restored_tensors, restored_shapes):\n restored_tensor = restored_tensors[0]\n if restored_shapes is not None:\n restored_tensor = array_ops.reshape(restored_tensor, restored_shapes[0])\n return state_ops.assign(\n self.op,\n restored_tensor,\n validate_shape=restored_shapes is None and\n self.op.get_shape().is_fully_defined())\n\n class ResourceVariableSaveable(SaveableObject):\n \"\"\"SaveableObject implementation that handles ResourceVariables.\"\"\"\n\n def __init__(self, var, slice_spec, name):\n self._var_device = var.device\n self._var_shape = var.shape\n if isinstance(var, ops.Tensor):\n self.handle_op = var.op.inputs[0]\n tensor = var\n elif isinstance(var, resource_variable_ops.ResourceVariable):\n\n def _read_variable_closure(v):\n def f():\n with ops.device(v.device):\n x = v.read_value()\n with ops.device(\"/device:CPU:0\"):\n return array_ops.identity(x)\n return f\n\n self.handle_op = var.handle\n tensor = _read_variable_closure(var)\n else:\n raise ValueError(\n \"Saveable is neither a resource variable nor a read operation.\"\n \" Got: %s\" % repr(var))\n spec = BaseSaverBuilder.SaveSpec(tensor, slice_spec, name,\n dtype=var.dtype)\n super(BaseSaverBuilder.ResourceVariableSaveable, self).__init__(\n var, [spec], name)\n\n def restore(self, restored_tensors, restored_shapes):\n restored_tensor = restored_tensors[0]\n if restored_shapes is not None:\n restored_tensor = array_ops.reshape(restored_tensor, restored_shapes[0])\n # Copy the restored tensor to the variable's device.\n with ops.device(self._var_device):\n restored_tensor = array_ops.identity(restored_tensor)\n return resource_variable_ops.shape_safe_assign_variable_handle(\n self.handle_op, self._var_shape, restored_tensor)\n\n def __init__(self, write_version=saver_pb2.SaverDef.V2):\n self._write_version = write_version\n\n def save_op(self, filename_tensor, saveables):\n \"\"\"Create an Op to save 'saveables'.\n\n This is intended to be overridden by subclasses that want to generate\n different Ops.\n\n Args:\n filename_tensor: String Tensor.\n saveables: A list of BaseSaverBuilder.SaveableObject objects.\n\n Returns:\n An Operation that save the variables.\n\n Raises:\n RuntimeError: (implementation detail) if \"self._write_version\" is an\n unexpected value.\n \"\"\"\n # pylint: disable=protected-access\n tensor_names = []\n tensors = []\n tensor_slices = []\n for saveable in saveables:\n for spec in saveable.specs:\n tensor_names.append(spec.name)\n tensors.append(spec.tensor)\n tensor_slices.append(spec.slice_spec)\n if self._write_version == saver_pb2.SaverDef.V1:\n return io_ops._save(\n filename=filename_tensor,\n tensor_names=tensor_names,\n tensors=tensors,\n tensor_slices=tensor_slices)\n elif self._write_version == saver_pb2.SaverDef.V2:\n # \"filename_tensor\" is interpreted *NOT AS A FILENAME*, but as a prefix\n # of a V2 checkpoint: e.g. \"/fs/train/ckpt-<step>/tmp/worker<i>-<step>\".\n return io_ops.save_v2(filename_tensor, tensor_names, tensor_slices,\n tensors)\n else:\n raise RuntimeError(\"Unexpected write_version: \" + self._write_version)\n\n def bulk_restore(self, filename_tensor, saveables, preferred_shard,\n restore_sequentially):\n \"\"\"Restore all tensors contained in saveables.\n\n By default, this issues separate calls to `restore_op` for each saveable.\n Subclasses may override to load multiple saveables in a single call.\n\n Args:\n filename_tensor: String Tensor.\n saveables: List of BaseSaverBuilder.SaveableObject objects.\n preferred_shard: Int. Shard to open first when loading a sharded file.\n restore_sequentially: Bool. If true, each restore is sequential.\n\n Returns:\n A list of Tensors resulting from reading 'saveable' from\n 'filename'.\n\n \"\"\"\n all_tensors = []\n assign_ops = []\n for saveable in saveables:\n restore_control_inputs = assign_ops[-1:] if restore_sequentially else []\n with ops.device(_set_cpu0(saveable.device) if saveable.device else None):\n with ops.control_dependencies(restore_control_inputs):\n all_tensors.extend(\n self.restore_op(filename_tensor, saveable, preferred_shard))\n return all_tensors\n\n # pylint: disable=unused-argument\n def restore_op(self, filename_tensor, saveable, preferred_shard):\n \"\"\"Create ops to restore 'saveable'.\n\n This is intended to be overridden by subclasses that want to generate\n different Ops.\n\n Args:\n filename_tensor: String Tensor.\n saveable: A BaseSaverBuilder.SaveableObject object.\n preferred_shard: Int. Shard to open first when loading a sharded file.\n\n Returns:\n A list of Tensors resulting from reading 'saveable' from\n 'filename'.\n \"\"\"\n # pylint: disable=protected-access\n tensors = []\n for spec in saveable.specs:\n tensors.append(\n io_ops.restore_v2(\n filename_tensor,\n [spec.name],\n [spec.slice_spec],\n [spec.dtype])[0])\n\n return tensors\n # pylint: enable=unused-argument\n\n def sharded_filename(self, filename_tensor, shard, num_shards):\n \"\"\"Append sharding information to a filename.\n\n Args:\n filename_tensor: A string tensor.\n shard: Integer. The shard for the filename.\n num_shards: An int Tensor for the number of shards.\n\n Returns:\n A string tensor.\n \"\"\"\n return gen_io_ops.sharded_filename(filename_tensor, shard, num_shards)\n\n def _AddSaveOps(self, filename_tensor, saveables):\n \"\"\"Add ops to save variables that are on the same shard.\n\n Args:\n filename_tensor: String Tensor.\n saveables: A list of SaveableObject objects.\n\n Returns:\n A tensor with the filename used to save.\n \"\"\"\n save = self.save_op(filename_tensor, saveables)\n return control_flow_ops.with_dependencies([save], filename_tensor)\n\n def _AddShardedSaveOpsForV2(self, checkpoint_prefix, per_device):\n \"\"\"Add ops to save the params per shard, for the V2 format.\n\n Note that the sharded save procedure for the V2 format is different from\n V1: there is a special \"merge\" step that merges the small metadata produced\n from each device.\n\n Args:\n checkpoint_prefix: scalar String Tensor. Interpreted *NOT AS A\n FILENAME*, but as a prefix of a V2 checkpoint;\n per_device: A list of (device, BaseSaverBuilder.VarToSave) pairs, as\n returned by _GroupByDevices().\n\n Returns:\n An op to save the variables, which, when evaluated, returns the prefix\n \"<user-fed prefix>\" only and does not include the sharded spec suffix.\n \"\"\"\n # IMPLEMENTATION DETAILS: most clients should skip.\n #\n # Suffix for any well-formed \"checkpoint_prefix\", when sharded.\n # Transformations:\n # * Users pass in \"save_path\" in save() and restore(). Say \"myckpt\".\n # * checkpoint_prefix gets fed <save_path><_SHARDED_SUFFIX>.\n #\n # Example:\n # During runtime, a temporary directory is first created, which contains\n # files\n #\n # <train dir>/myckpt_temp/\n # part-?????-of-?????{.index, .data-00000-of-00001}\n #\n # Before .save() finishes, they will be (hopefully, atomically) renamed to\n #\n # <train dir>/\n # myckpt{.index, .data-?????-of-?????}\n #\n # Users only need to interact with the user-specified prefix, which is\n # \"<train dir>/myckpt\" in this case. Save() and Restore() work with the\n # prefix directly, instead of any physical pathname. (On failure and\n # subsequent restore, an outdated and orphaned temporary directory can be\n # safely removed.)\n _SHARDED_SUFFIX = \"_temp_%s/part\" % uuid.uuid4().hex\n tmp_checkpoint_prefix = string_ops.string_join(\n [checkpoint_prefix, _SHARDED_SUFFIX])\n\n num_shards = len(per_device)\n sharded_saves = []\n sharded_prefixes = []\n num_shards_tensor = constant_op.constant(num_shards, name=\"num_shards\")\n last_device = None\n for shard, (device, saveables) in enumerate(per_device):\n last_device = device\n with ops.device(_set_cpu0(device)):\n sharded_filename = self.sharded_filename(tmp_checkpoint_prefix, shard,\n num_shards_tensor)\n sharded_prefixes.append(sharded_filename)\n sharded_saves.append(self._AddSaveOps(sharded_filename, saveables))\n\n with ops.control_dependencies([x.op for x in sharded_saves]):\n # Co-locates the merge step with the last device.\n with ops.device(_set_cpu0(last_device)):\n # V2 format write path consists of a metadata merge step. Once merged,\n # attempts to delete the temporary directory, \"<user-fed prefix>_temp\".\n merge_step = gen_io_ops.merge_v2_checkpoints(\n sharded_prefixes, checkpoint_prefix, delete_old_dirs=True)\n with ops.control_dependencies([merge_step]):\n # Returns the prefix \"<user-fed prefix>\" only. DOES NOT include the\n # sharded spec suffix.\n return array_ops.identity(checkpoint_prefix)\n\n def _AddShardedSaveOps(self, filename_tensor, per_device):\n \"\"\"Add ops to save the params per shard.\n\n Args:\n filename_tensor: a scalar String Tensor.\n per_device: A list of (device, BaseSaverBuilder.SaveableObject) pairs, as\n returned by _GroupByDevices().\n\n Returns:\n An op to save the variables.\n \"\"\"\n if self._write_version == saver_pb2.SaverDef.V2:\n return self._AddShardedSaveOpsForV2(filename_tensor, per_device)\n\n num_shards = len(per_device)\n sharded_saves = []\n num_shards_tensor = constant_op.constant(num_shards, name=\"num_shards\")\n for shard, (device, saveables) in enumerate(per_device):\n with ops.device(device):\n sharded_filename = self.sharded_filename(filename_tensor, shard,\n num_shards_tensor)\n sharded_saves.append(self._AddSaveOps(sharded_filename, saveables))\n # Return the sharded name for the save path.\n with ops.control_dependencies([x.op for x in sharded_saves]):\n return gen_io_ops.sharded_filespec(filename_tensor, num_shards_tensor)\n\n def _AddRestoreOps(self,\n filename_tensor,\n saveables,\n restore_sequentially,\n reshape,\n preferred_shard=-1,\n name=\"restore_all\"):\n \"\"\"Add operations to restore saveables.\n\n Args:\n filename_tensor: Tensor for the path of the file to load.\n saveables: A list of SaveableObject objects.\n restore_sequentially: True if we want to restore variables sequentially\n within a shard.\n reshape: True if we want to reshape loaded tensors to the shape of\n the corresponding variable.\n preferred_shard: Shard to open first when loading a sharded file.\n name: Name for the returned op.\n\n Returns:\n An Operation that restores the variables.\n \"\"\"\n all_tensors = self.bulk_restore(filename_tensor, saveables, preferred_shard,\n restore_sequentially)\n\n assign_ops = []\n idx = 0\n # Load and optionally reshape on the CPU, as string tensors are not\n # available on the GPU.\n # TODO(touts): Re-enable restore on GPU when we can support annotating\n # string tensors as \"HostMemory\" inputs.\n for saveable in saveables:\n shapes = None\n if reshape:\n # Compute the shapes, let the restore op decide if and how to do\n # the reshape.\n shapes = []\n for spec in saveable.specs:\n v = spec.tensor\n shape = v.get_shape()\n if not shape.is_fully_defined():\n shape = array_ops.shape(v)\n shapes.append(shape)\n saveable_tensors = all_tensors[idx:idx + len(saveable.specs)]\n idx += len(saveable.specs)\n assign_ops.append(saveable.restore(saveable_tensors, shapes))\n\n # Create a Noop that has control dependencies from all the updates.\n return control_flow_ops.group(*assign_ops, name=name)\n\n def _AddShardedRestoreOps(self, filename_tensor, per_device,\n restore_sequentially, reshape):\n \"\"\"Add Ops to restore variables from multiple devices.\n\n Args:\n filename_tensor: Tensor for the path of the file to load.\n per_device: A list of (device, SaveableObject) pairs, as\n returned by _GroupByDevices().\n restore_sequentially: True if we want to restore variables sequentially\n within a shard.\n reshape: True if we want to reshape loaded tensors to the shape of\n the corresponding variable.\n\n Returns:\n An Operation that restores the variables.\n \"\"\"\n sharded_restores = []\n for shard, (device, saveables) in enumerate(per_device):\n with ops.device(device):\n sharded_restores.append(\n self._AddRestoreOps(\n filename_tensor,\n saveables,\n restore_sequentially,\n reshape,\n preferred_shard=shard,\n name=\"restore_shard\"))\n return control_flow_ops.group(*sharded_restores, name=\"restore_all\")\n\n @staticmethod\n def _IsVariable(v):\n return isinstance(v, ops.Tensor) and v.op.type in _VARIABLE_OPS\n\n def _GroupByDevices(self, saveables):\n \"\"\"Group Variable tensor slices per device.\n\n TODO(touts): Make sure that all the devices found are on different\n job/replica/task/cpu|gpu. It would be bad if 2 were on the same device.\n It can happen if the devices are unspecified.\n\n Args:\n saveables: A list of BaseSaverBuilder.SaveableObject objects.\n\n Returns:\n A list of tuples: (device_name, BaseSaverBuilder.SaveableObject) tuples.\n The list is sorted by ascending device_name.\n\n Raises:\n ValueError: If the tensors of a saveable are on different devices.\n \"\"\"\n per_device = collections.defaultdict(lambda: [])\n for saveable in saveables:\n canonical_device = set(\n pydev.canonical_name(spec.tensor.device) for spec in saveable.specs)\n if len(canonical_device) != 1:\n raise ValueError(\"All tensors of a saveable object must be \"\n \"on the same device: %s\" % saveable.name)\n per_device[canonical_device.pop()].append(saveable)\n return sorted(per_device.items(), key=lambda t: t[0])\n\n @staticmethod\n def OpListToDict(op_list, convert_variable_to_tensor=True):\n \"\"\"Create a dictionary of names to operation lists.\n\n Args:\n op_list: A list, tuple, or set of Variables or SaveableObjects.\n convert_variable_to_tensor: Whether or not to convert single Variables\n with no slice info into Tensors.\n\n Returns:\n A dictionary of names to the operations that must be saved under\n that name. Variables with save_slice_info are grouped together under the\n same key in no particular order.\n\n Raises:\n TypeError: If the type of op_list or its elements is not supported.\n ValueError: If at least two saveables share the same name.\n \"\"\"\n if not isinstance(op_list, (list, tuple, set)):\n raise TypeError(\"Variables to save should be passed in a dict or a \"\n \"list: %s\" % op_list)\n # When ResourceVariables are converted to Tensors, read ops are added to the\n # graph. Sorting the op_list ensures that the resulting graph is always\n # constructed in a deterministic way:\n op_list = sorted(op_list, key=lambda x: x.name)\n names_to_saveables = {}\n # pylint: disable=protected-access\n for var in op_list:\n if isinstance(var, BaseSaverBuilder.SaveableObject):\n names_to_saveables[var.name] = var\n elif isinstance(var, variables.PartitionedVariable):\n if var.name in names_to_saveables:\n raise ValueError(\"At least two variables have the same name: %s\" %\n var.name)\n names_to_saveables[var.name] = var\n elif isinstance(var, variables.Variable) and var._save_slice_info:\n name = var._save_slice_info.full_name\n if name in names_to_saveables:\n if not isinstance(names_to_saveables[name], list):\n raise ValueError(\"Mixing slices and non-slices with the same name: \"\n \"%s\" % name)\n names_to_saveables[name].append(var)\n else:\n names_to_saveables[name] = [var]\n elif (isinstance(var, checkpointable.CheckpointableBase)\n and not isinstance(var, variables.Variable)):\n checkpointable_saveables = [\n (factory() if callable(factory) else factory)\n for factory in var._gather_saveables_for_checkpoint().values()]\n names_to_saveables.update(\n BaseSaverBuilder.OpListToDict(checkpointable_saveables))\n else:\n if context.executing_eagerly():\n if not isinstance(var, resource_variable_ops.ResourceVariable):\n raise ValueError(\n \"Can only save/restore ResourceVariables when eager execution \"\n \"is enabled, type: %s.\" % type(var))\n set_var = names_to_saveables.setdefault(var._shared_name, var)\n if set_var is not var:\n raise ValueError(\n (\"Two different ResourceVariable objects with the same \"\n \"shared_name '%s' were passed to the Saver. This likely means \"\n \"that they were created in different Graphs or isolation \"\n \"contexts, and may not be checkpointed together.\") %\n (var._shared_name,))\n else:\n if convert_variable_to_tensor:\n if isinstance(var, resource_variable_ops.ResourceVariable):\n var = var._graph_element # pylint: disable=protected-access\n else:\n var = ops.internal_convert_to_tensor(var, as_ref=True)\n if not BaseSaverBuilder._IsVariable(var):\n raise TypeError(\"Variable to save is not a Variable: %s\" % var)\n if var.op.type == \"ReadVariableOp\":\n name = var.op.inputs[0].op.name\n else:\n name = var.op.name\n if name in names_to_saveables:\n raise ValueError(\"At least two variables have the same name: %s\" %\n name)\n names_to_saveables[name] = var\n\n # pylint: enable=protected-access\n return names_to_saveables\n\n def _ValidateAndSliceInputs(self, names_to_saveables):\n \"\"\"Returns the variables and names that will be used for a Saver.\n\n Args:\n names_to_saveables: A dict (k, v) where k is the name of an operation and\n v is an operation to save or a BaseSaverBuilder.Saver.\n\n Returns:\n A list of BaseSaverBuilder.SaveableObject objects.\n\n Raises:\n TypeError: If any of the keys are not strings or any of the\n values are not one of Tensor or Variable or a checkpointable operation.\n ValueError: If the same operation is given in more than one value\n (this also applies to slices of SlicedVariables).\n \"\"\"\n if not isinstance(names_to_saveables, dict):\n names_to_saveables = BaseSaverBuilder.OpListToDict(names_to_saveables)\n\n saveables = []\n seen_ops = set()\n for name in sorted(names_to_saveables.keys()):\n if not isinstance(name, six.string_types):\n raise TypeError(\n \"names_to_saveables must be a dict mapping string names to \"\n \"checkpointable operations. Name is not a string: %s\" % name)\n op = names_to_saveables[name]\n if isinstance(op, BaseSaverBuilder.SaveableObject):\n self._AddSaveable(saveables, seen_ops, op)\n elif isinstance(op, (list, tuple, variables.PartitionedVariable)):\n if isinstance(op, variables.PartitionedVariable):\n op = list(op)\n # A set of slices.\n slice_name = None\n # pylint: disable=protected-access\n for variable in op:\n if not isinstance(variable, variables.Variable):\n raise ValueError(\"Slices must all be Variables: %s\" % variable)\n if not variable._save_slice_info:\n raise ValueError(\"Slices must all be slices: %s\" % variable)\n if slice_name is None:\n slice_name = variable._save_slice_info.full_name\n elif slice_name != variable._save_slice_info.full_name:\n raise ValueError(\n \"Slices must all be from the same tensor: %s != %s\" %\n (slice_name, variable._save_slice_info.full_name))\n if variable.op.type in [\"Variable\", \"VariableV2\",\n \"AutoReloadVariable\"]:\n saveable = BaseSaverBuilder.VariableSaveable(\n variable, variable._save_slice_info.spec, name)\n else:\n saveable = BaseSaverBuilder.ResourceVariableSaveable(\n variable, variable._save_slice_info.spec, name)\n self._AddSaveable(saveables, seen_ops, saveable)\n # pylint: enable=protected-access\n else:\n # A variable or tensor.\n if context.executing_eagerly():\n if not isinstance(op, resource_variable_ops.ResourceVariable):\n raise ValueError(\"Can only save/restore ResourceVariable eager \"\n \"mode is enabled, type: %s.\" % type(op))\n saveable = BaseSaverBuilder.ResourceVariableSaveable(op, \"\", name)\n else:\n if isinstance(op, resource_variable_ops.ResourceVariable):\n variable = op._graph_element # pylint: disable=protected-access\n else:\n variable = ops.internal_convert_to_tensor(op, as_ref=True)\n if not BaseSaverBuilder._IsVariable(variable):\n raise TypeError(\"names_to_saveables must be a dict mapping string \"\n \"names to Tensors/Variables. Not a variable: %s\" %\n variable)\n if variable.op.type in [\"Variable\", \"VariableV2\",\n \"AutoReloadVariable\"]:\n saveable = BaseSaverBuilder.VariableSaveable(variable, \"\", name)\n else:\n saveable = BaseSaverBuilder.ResourceVariableSaveable(\n variable, \"\", name)\n self._AddSaveable(saveables, seen_ops, saveable)\n return saveables\n\n def _AddSaveable(self, saveables, seen_ops, saveable):\n \"\"\"Adds the saveable to the saveables list.\n\n Args:\n saveables: List to append the SaveableObject to.\n seen_ops: Set of the ops of the saveables already processed. Used to\n check that each saveable is only saved once.\n saveable: The saveable.\n\n Raises:\n ValueError: If the saveable has already been processed.\n \"\"\"\n if saveable.op in seen_ops:\n raise ValueError(\"The same saveable will be restored with two names: %s\" %\n saveable.name)\n saveables.append(saveable)\n seen_ops.add(saveable.op)\n\n def build(self,\n names_to_saveables,\n reshape=False,\n sharded=False,\n max_to_keep=5,\n keep_checkpoint_every_n_hours=10000.0,\n name=None,\n restore_sequentially=False,\n filename=\"model\"):\n \"\"\"Builds save/restore graph nodes or runs save/restore in eager mode.\n\n Args:\n names_to_saveables: A dictionary mapping name to a Variable or\n SaveableObject. Each name will be associated with the\n corresponding variable in the checkpoint.\n reshape: If True, allow restoring parameters from a checkpoint\n that where the parameters have a different shape. This is\n only needed when you try to restore from a Dist-Belief checkpoint,\n and only some times.\n sharded: If True, shard the checkpoints, one per device that has\n Variable nodes.\n max_to_keep: Maximum number of checkpoints to keep. As new checkpoints\n are created, old ones are deleted. If None or 0, no checkpoints are\n deleted from the filesystem but only the last one is kept in the\n `checkpoint` file. Presently the number is only roughly enforced. For\n example in case of restarts more than max_to_keep checkpoints may be\n kept.\n keep_checkpoint_every_n_hours: How often checkpoints should be kept.\n Defaults to 10,000 hours.\n name: String. Optional name to use as a prefix when adding operations.\n restore_sequentially: A Bool, which if true, causes restore of different\n variables to happen sequentially within each device.\n filename: If known at graph construction time, filename used for variable\n loading/saving. If None, then the default name \"model\" will be used.\n\n Returns:\n A SaverDef proto.\n\n Raises:\n TypeError: If 'names_to_saveables' is not a dictionary mapping string\n keys to variable Tensors.\n ValueError: If any of the keys or values in 'names_to_saveables' is not\n unique.\n \"\"\"\n return self._build_internal(\n names_to_saveables=names_to_saveables,\n reshape=reshape,\n sharded=sharded,\n max_to_keep=max_to_keep,\n keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours,\n name=name,\n restore_sequentially=restore_sequentially,\n filename=filename)\n\n def _build_internal(self,\n names_to_saveables,\n reshape=False,\n sharded=False,\n max_to_keep=5,\n keep_checkpoint_every_n_hours=10000.0,\n name=None,\n restore_sequentially=False,\n filename=\"model\",\n build_save=True,\n build_restore=True):\n \"\"\"build() with option to only perform save and restore.\"\"\"\n if not context.executing_eagerly() and (not build_save or\n not build_restore):\n raise ValueError(\"save and restore operations need to be built together \"\n \" when eager execution is not enabled.\")\n\n saveables = self._ValidateAndSliceInputs(names_to_saveables)\n if max_to_keep is None:\n max_to_keep = 0\n\n with ops.name_scope(name, \"save\",\n [saveable.op for saveable in saveables]) as name:\n # Add the Constant string tensor for the filename.\n filename_tensor = constant_op.constant(filename or \"model\")\n\n # Add the save ops.\n if sharded:\n per_device = self._GroupByDevices(saveables)\n if build_save:\n save_tensor = self._AddShardedSaveOps(filename_tensor, per_device)\n if build_restore:\n restore_op = self._AddShardedRestoreOps(filename_tensor, per_device,\n restore_sequentially, reshape)\n else:\n if build_save:\n save_tensor = self._AddSaveOps(filename_tensor, saveables)\n if build_restore:\n restore_op = self._AddRestoreOps(filename_tensor, saveables,\n restore_sequentially, reshape)\n\n # In the following use case, it's possible to have restore_ops be called\n # something else:\n # - Build inference graph and export a meta_graph.\n # - Import the inference meta_graph\n # - Extend the inference graph to a train graph.\n # - Export a new meta_graph.\n # Now the second restore_op will be called \"restore_all_1\".\n # As such, comment out the assert for now until we know whether supporting\n # such usage model makes sense.\n #\n # assert restore_op.name.endswith(\"restore_all\"), restore_op.name\n if context.executing_eagerly():\n # Store the tensor values to the tensor_names.\n save_tensor_name = save_tensor.numpy() if build_save else \"\"\n return saver_pb2.SaverDef(\n filename_tensor_name=filename_tensor.numpy(),\n save_tensor_name=save_tensor_name,\n restore_op_name=\"\",\n max_to_keep=max_to_keep,\n sharded=sharded,\n keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours,\n version=self._write_version)\n else:\n return saver_pb2.SaverDef(\n filename_tensor_name=filename_tensor.name,\n save_tensor_name=save_tensor.name,\n restore_op_name=restore_op.name,\n max_to_keep=max_to_keep,\n sharded=sharded,\n keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours,\n version=self._write_version)\n\n\nclass BulkSaverBuilder(BaseSaverBuilder):\n \"\"\"SaverBuilder with support for bulk restoring multiple saveables.\"\"\"\n\n def bulk_restore(self, filename_tensor, saveables, preferred_shard,\n restore_sequentially):\n\n # Ignored: bulk restore is internally sequential.\n del restore_sequentially\n restore_specs = []\n for saveable in saveables:\n for spec in saveable.specs:\n restore_specs.append((spec.name, spec.slice_spec, spec.dtype))\n\n names, slices, dtypes = zip(*restore_specs)\n # Load all tensors onto CPU 0 for compatibility with existing code.\n with ops.device(\"cpu:0\"):\n return io_ops.restore_v2(filename_tensor, names, slices, dtypes)\n\n\ndef _get_saver_or_default():\n \"\"\"Returns the saver from SAVERS collection, or creates a default one.\n\n This method is used by other members of the training module, such as\n `Scaffold`, or `CheckpointSaverHook`.\n\n Returns:\n `Saver`.\n\n Raises:\n RuntimeError: If the SAVERS collection already has more than one items.\n \"\"\"\n collection_key = ops.GraphKeys.SAVERS\n savers = ops.get_collection(collection_key)\n if savers:\n if len(savers) > 1:\n raise RuntimeError(\n \"More than one item in collection {}. \"\n \"Please indicate which one to use by passing it to the constructor.\".\n format(collection_key))\n return savers[0]\n saver = Saver(sharded=True, allow_empty=True)\n if saver is not None:\n ops.add_to_collection(collection_key, saver)\n return saver\n\n\ndef _GetCheckpointFilename(save_dir, latest_filename):\n \"\"\"Returns a filename for storing the CheckpointState.\n\n Args:\n save_dir: The directory for saving and restoring checkpoints.\n latest_filename: Name of the file in 'save_dir' that is used\n to store the CheckpointState.\n\n Returns:\n The path of the file that contains the CheckpointState proto.\n \"\"\"\n if latest_filename is None:\n latest_filename = \"checkpoint\"\n return os.path.join(save_dir, latest_filename)\n\n\n@tf_export(\"train.generate_checkpoint_state_proto\")\ndef generate_checkpoint_state_proto(save_dir,\n model_checkpoint_path,\n all_model_checkpoint_paths=None):\n \"\"\"Generates a checkpoint state proto.\n\n Args:\n save_dir: Directory where the model was saved.\n model_checkpoint_path: The checkpoint file.\n all_model_checkpoint_paths: List of strings. Paths to all not-yet-deleted\n checkpoints, sorted from oldest to newest. If this is a non-empty list,\n the last element must be equal to model_checkpoint_path. These paths\n are also saved in the CheckpointState proto.\n\n Returns:\n CheckpointState proto with model_checkpoint_path and\n all_model_checkpoint_paths updated to either absolute paths or\n relative paths to the current save_dir.\n \"\"\"\n if all_model_checkpoint_paths is None:\n all_model_checkpoint_paths = []\n\n if (not all_model_checkpoint_paths or\n all_model_checkpoint_paths[-1] != model_checkpoint_path):\n logging.info(\"%s is not in all_model_checkpoint_paths. Manually adding it.\",\n model_checkpoint_path)\n all_model_checkpoint_paths.append(model_checkpoint_path)\n\n # Relative paths need to be rewritten to be relative to the \"save_dir\"\n # if model_checkpoint_path already contains \"save_dir\".\n if not os.path.isabs(save_dir):\n if not os.path.isabs(model_checkpoint_path):\n model_checkpoint_path = os.path.relpath(model_checkpoint_path, save_dir)\n for i in range(len(all_model_checkpoint_paths)):\n p = all_model_checkpoint_paths[i]\n if not os.path.isabs(p):\n all_model_checkpoint_paths[i] = os.path.relpath(p, save_dir)\n\n coord_checkpoint_proto = CheckpointState(\n model_checkpoint_path=model_checkpoint_path,\n all_model_checkpoint_paths=all_model_checkpoint_paths)\n\n return coord_checkpoint_proto\n\n\n@tf_export(\"train.update_checkpoint_state\")\ndef update_checkpoint_state(save_dir,\n model_checkpoint_path,\n all_model_checkpoint_paths=None,\n latest_filename=None):\n \"\"\"Updates the content of the 'checkpoint' file.\n\n This updates the checkpoint file containing a CheckpointState\n proto.\n\n Args:\n save_dir: Directory where the model was saved.\n model_checkpoint_path: The checkpoint file.\n all_model_checkpoint_paths: List of strings. Paths to all not-yet-deleted\n checkpoints, sorted from oldest to newest. If this is a non-empty list,\n the last element must be equal to model_checkpoint_path. These paths\n are also saved in the CheckpointState proto.\n latest_filename: Optional name of the checkpoint file. Default to\n 'checkpoint'.\n\n Raises:\n RuntimeError: If any of the model checkpoint paths conflict with the file\n containing CheckpointSate.\n \"\"\"\n _update_checkpoint_state(\n save_dir=save_dir,\n model_checkpoint_path=model_checkpoint_path,\n all_model_checkpoint_paths=all_model_checkpoint_paths,\n latest_filename=latest_filename,\n save_relative_paths=False)\n\n\ndef _update_checkpoint_state(save_dir,\n model_checkpoint_path,\n all_model_checkpoint_paths=None,\n latest_filename=None,\n save_relative_paths=False):\n \"\"\"Updates the content of the 'checkpoint' file.\n\n This updates the checkpoint file containing a CheckpointState\n proto.\n\n Args:\n save_dir: Directory where the model was saved.\n model_checkpoint_path: The checkpoint file.\n all_model_checkpoint_paths: List of strings. Paths to all not-yet-deleted\n checkpoints, sorted from oldest to newest. If this is a non-empty list,\n the last element must be equal to model_checkpoint_path. These paths\n are also saved in the CheckpointState proto.\n latest_filename: Optional name of the checkpoint file. Default to\n 'checkpoint'.\n save_relative_paths: If `True`, will write relative paths to the checkpoint\n state file.\n\n Raises:\n RuntimeError: If any of the model checkpoint paths conflict with the file\n containing CheckpointSate.\n \"\"\"\n # Writes the \"checkpoint\" file for the coordinator for later restoration.\n coord_checkpoint_filename = _GetCheckpointFilename(save_dir, latest_filename)\n if save_relative_paths:\n if os.path.isabs(model_checkpoint_path):\n rel_model_checkpoint_path = os.path.relpath(\n model_checkpoint_path, save_dir)\n else:\n rel_model_checkpoint_path = model_checkpoint_path\n rel_all_model_checkpoint_paths = []\n for p in all_model_checkpoint_paths:\n if os.path.isabs(p):\n rel_all_model_checkpoint_paths.append(os.path.relpath(p, save_dir))\n else:\n rel_all_model_checkpoint_paths.append(p)\n ckpt = generate_checkpoint_state_proto(\n save_dir,\n rel_model_checkpoint_path,\n all_model_checkpoint_paths=rel_all_model_checkpoint_paths)\n else:\n ckpt = generate_checkpoint_state_proto(\n save_dir,\n model_checkpoint_path,\n all_model_checkpoint_paths=all_model_checkpoint_paths)\n\n if coord_checkpoint_filename == ckpt.model_checkpoint_path:\n raise RuntimeError(\"Save path '%s' conflicts with path used for \"\n \"checkpoint state. Please use a different save path.\" %\n model_checkpoint_path)\n\n # Preventing potential read/write race condition by *atomically* writing to a\n # file.\n file_io.atomic_write_string_to_file(coord_checkpoint_filename,\n text_format.MessageToString(ckpt))\n\n\n@tf_export(\"train.get_checkpoint_state\")\ndef get_checkpoint_state(checkpoint_dir, latest_filename=None):\n \"\"\"Returns CheckpointState proto from the \"checkpoint\" file.\n\n If the \"checkpoint\" file contains a valid CheckpointState\n proto, returns it.\n\n Args:\n checkpoint_dir: The directory of checkpoints.\n latest_filename: Optional name of the checkpoint file. Default to\n 'checkpoint'.\n\n Returns:\n A CheckpointState if the state was available, None\n otherwise.\n\n Raises:\n ValueError: if the checkpoint read doesn't have model_checkpoint_path set.\n \"\"\"\n ckpt = None\n coord_checkpoint_filename = _GetCheckpointFilename(checkpoint_dir,\n latest_filename)\n f = None\n try:\n # Check that the file exists before opening it to avoid\n # many lines of errors from colossus in the logs.\n if file_io.file_exists(coord_checkpoint_filename):\n file_content = file_io.read_file_to_string(\n coord_checkpoint_filename)\n ckpt = CheckpointState()\n text_format.Merge(file_content, ckpt)\n if not ckpt.model_checkpoint_path:\n raise ValueError(\"Invalid checkpoint state loaded from %s\",\n checkpoint_dir)\n # For relative model_checkpoint_path and all_model_checkpoint_paths,\n # prepend checkpoint_dir.\n if not os.path.isabs(ckpt.model_checkpoint_path):\n ckpt.model_checkpoint_path = os.path.join(checkpoint_dir,\n ckpt.model_checkpoint_path)\n for i in range(len(ckpt.all_model_checkpoint_paths)):\n p = ckpt.all_model_checkpoint_paths[i]\n if not os.path.isabs(p):\n ckpt.all_model_checkpoint_paths[i] = os.path.join(checkpoint_dir, p)\n except errors.OpError as e:\n # It's ok if the file cannot be read\n logging.warning(\"%s: %s\", type(e).__name__, e)\n logging.warning(\"%s: Checkpoint ignored\", coord_checkpoint_filename)\n return None\n except text_format.ParseError as e:\n logging.warning(\"%s: %s\", type(e).__name__, e)\n logging.warning(\"%s: Checkpoint ignored\", coord_checkpoint_filename)\n return None\n finally:\n if f:\n f.close()\n return ckpt\n\n\n@tf_export(\"train.Saver\")\nclass Saver(object):\n \"\"\"Saves and restores variables.\n\n See @{$variables$Variables}\n for an overview of variables, saving and restoring.\n\n The `Saver` class adds ops to save and restore variables to and from\n *checkpoints*. It also provides convenience methods to run these ops.\n\n Checkpoints are binary files in a proprietary format which map variable names\n to tensor values. The best way to examine the contents of a checkpoint is to\n load it using a `Saver`.\n\n Savers can automatically number checkpoint filenames with a provided counter.\n This lets you keep multiple checkpoints at different steps while training a\n model. For example you can number the checkpoint filenames with the training\n step number. To avoid filling up disks, savers manage checkpoint files\n automatically. For example, they can keep only the N most recent files, or\n one checkpoint for every N hours of training.\n\n You number checkpoint filenames by passing a value to the optional\n `global_step` argument to `save()`:\n\n ```python\n saver.save(sess, 'my-model', global_step=0) ==> filename: 'my-model-0'\n ...\n saver.save(sess, 'my-model', global_step=1000) ==> filename: 'my-model-1000'\n ```\n\n Additionally, optional arguments to the `Saver()` constructor let you control\n the proliferation of checkpoint files on disk:\n\n * `max_to_keep` indicates the maximum number of recent checkpoint files to\n keep. As new files are created, older files are deleted. If None or 0,\n no checkpoints are deleted from the filesystem but only the last one is\n kept in the `checkpoint` file. Defaults to 5 (that is, the 5 most recent\n checkpoint files are kept.)\n\n * `keep_checkpoint_every_n_hours`: In addition to keeping the most recent\n `max_to_keep` checkpoint files, you might want to keep one checkpoint file\n for every N hours of training. This can be useful if you want to later\n analyze how a model progressed during a long training session. For\n example, passing `keep_checkpoint_every_n_hours=2` ensures that you keep\n one checkpoint file for every 2 hours of training. The default value of\n 10,000 hours effectively disables the feature.\n\n Note that you still have to call the `save()` method to save the model.\n Passing these arguments to the constructor will not save variables\n automatically for you.\n\n A training program that saves regularly looks like:\n\n ```python\n ...\n # Create a saver.\n saver = tf.train.Saver(...variables...)\n # Launch the graph and train, saving the model every 1,000 steps.\n sess = tf.Session()\n for step in xrange(1000000):\n sess.run(..training_op..)\n if step % 1000 == 0:\n # Append the step number to the checkpoint name:\n saver.save(sess, 'my-model', global_step=step)\n ```\n\n In addition to checkpoint files, savers keep a protocol buffer on disk with\n the list of recent checkpoints. This is used to manage numbered checkpoint\n files and by `latest_checkpoint()`, which makes it easy to discover the path\n to the most recent checkpoint. That protocol buffer is stored in a file named\n 'checkpoint' next to the checkpoint files.\n\n If you create several savers, you can specify a different filename for the\n protocol buffer file in the call to `save()`.\n \"\"\"\n\n def __init__(self,\n var_list=None,\n reshape=False,\n sharded=False,\n max_to_keep=5,\n keep_checkpoint_every_n_hours=10000.0,\n name=None,\n restore_sequentially=False,\n saver_def=None,\n builder=None,\n defer_build=False,\n allow_empty=False,\n write_version=saver_pb2.SaverDef.V2,\n pad_step_number=False,\n save_relative_paths=False,\n filename=None):\n \"\"\"Creates a `Saver`.\n\n The constructor adds ops to save and restore variables.\n\n `var_list` specifies the variables that will be saved and restored. It can\n be passed as a `dict` or a list:\n\n * A `dict` of names to variables: The keys are the names that will be\n used to save or restore the variables in the checkpoint files.\n * A list of variables: The variables will be keyed with their op name in\n the checkpoint files.\n\n For example:\n\n ```python\n v1 = tf.Variable(..., name='v1')\n v2 = tf.Variable(..., name='v2')\n\n # Pass the variables as a dict:\n saver = tf.train.Saver({'v1': v1, 'v2': v2})\n\n # Or pass them as a list.\n saver = tf.train.Saver([v1, v2])\n # Passing a list is equivalent to passing a dict with the variable op names\n # as keys:\n saver = tf.train.Saver({v.op.name: v for v in [v1, v2]})\n ```\n\n The optional `reshape` argument, if `True`, allows restoring a variable from\n a save file where the variable had a different shape, but the same number\n of elements and type. This is useful if you have reshaped a variable and\n want to reload it from an older checkpoint.\n\n The optional `sharded` argument, if `True`, instructs the saver to shard\n checkpoints per device.\n\n Args:\n var_list: A list of `Variable`/`SaveableObject`, or a dictionary mapping\n names to `SaveableObject`s. If `None`, defaults to the list of all\n saveable objects.\n reshape: If `True`, allows restoring parameters from a checkpoint\n where the variables have a different shape.\n sharded: If `True`, shard the checkpoints, one per device.\n max_to_keep: Maximum number of recent checkpoints to keep.\n Defaults to 5.\n keep_checkpoint_every_n_hours: How often to keep checkpoints.\n Defaults to 10,000 hours.\n name: String. Optional name to use as a prefix when adding operations.\n restore_sequentially: A `Bool`, which if true, causes restore of different\n variables to happen sequentially within each device. This can lower\n memory usage when restoring very large models.\n saver_def: Optional `SaverDef` proto to use instead of running the\n builder. This is only useful for specialty code that wants to recreate\n a `Saver` object for a previously built `Graph` that had a `Saver`.\n The `saver_def` proto should be the one returned by the\n `as_saver_def()` call of the `Saver` that was created for that `Graph`.\n builder: Optional `SaverBuilder` to use if a `saver_def` was not provided.\n Defaults to `BulkSaverBuilder()`.\n defer_build: If `True`, defer adding the save and restore ops to the\n `build()` call. In that case `build()` should be called before\n finalizing the graph or using the saver.\n allow_empty: If `False` (default) raise an error if there are no\n variables in the graph. Otherwise, construct the saver anyway and make\n it a no-op.\n write_version: controls what format to use when saving checkpoints. It\n also affects certain filepath matching logic. The V2 format is the\n recommended choice: it is much more optimized than V1 in terms of\n memory required and latency incurred during restore. Regardless of\n this flag, the Saver is able to restore from both V2 and V1 checkpoints.\n pad_step_number: if True, pads the global step number in the checkpoint\n filepaths to some fixed width (8 by default). This is turned off by\n default.\n save_relative_paths: If `True`, will write relative paths to the\n checkpoint state file. This is needed if the user wants to copy the\n checkpoint directory and reload from the copied directory.\n filename: If known at graph construction time, filename used for variable\n loading/saving.\n\n Raises:\n TypeError: If `var_list` is invalid.\n ValueError: If any of the keys or values in `var_list` are not unique.\n RuntimeError: If eager execution is enabled and`var_list` does not specify\n a list of varialbes to save.\n\n @compatibility(eager)\n When eager execution is enabled, `var_list` must specify a `list` or `dict`\n of variables to save. Otherwise, a `RuntimeError` will be raised.\n @end_compatibility\n \"\"\"\n if defer_build and var_list:\n raise ValueError(\n \"If `var_list` is provided then build cannot be deferred. \"\n \"Either set defer_build=False or var_list=None.\")\n if context.executing_eagerly() and var_list is None:\n raise RuntimeError(\n \"When eager execution is enabled, `var_list` must specify a list or \"\n \"dict of variables to save\")\n self._var_list = var_list\n self._reshape = reshape\n self._sharded = sharded\n self._max_to_keep = max_to_keep\n self._keep_checkpoint_every_n_hours = keep_checkpoint_every_n_hours\n self._name = name\n self._restore_sequentially = restore_sequentially\n self.saver_def = saver_def\n self._builder = builder\n self._is_built = False\n self._allow_empty = allow_empty\n self._is_empty = None\n self._write_version = write_version\n self._pad_step_number = pad_step_number\n self._filename = filename\n self._last_checkpoints = []\n self._checkpoints_to_be_deleted = []\n if context.executing_eagerly():\n self._next_checkpoint_time = (\n time.time() + self._keep_checkpoint_every_n_hours * 3600)\n elif not defer_build:\n self.build()\n if self.saver_def:\n self._check_saver_def()\n self._write_version = self.saver_def.version\n self._save_relative_paths = save_relative_paths\n # For compatibility with object-based checkpoints, we may build a second\n # Saver to read the renamed keys.\n self._object_restore_saver = None\n\n def build(self):\n if context.executing_eagerly():\n raise RuntimeError(\"Use save/restore instead of build in eager mode.\")\n self._build(self._filename, build_save=True, build_restore=True)\n\n def _build_eager(self, checkpoint_path, build_save, build_restore):\n self._build(\n checkpoint_path, build_save=build_save, build_restore=build_restore)\n\n def _build(self, checkpoint_path, build_save, build_restore):\n \"\"\"Builds saver_def.\"\"\"\n if not context.executing_eagerly():\n if self._is_built:\n return\n self._is_built = True\n\n if not self.saver_def or context.executing_eagerly():\n if self._builder is None:\n self._builder = BulkSaverBuilder(self._write_version)\n\n if self._var_list is None:\n # pylint: disable=protected-access\n self._var_list = variables._all_saveable_objects()\n if not self._var_list:\n if self._allow_empty:\n self._is_empty = True\n return\n else:\n raise ValueError(\"No variables to save\")\n self._is_empty = False\n\n self.saver_def = self._builder._build_internal( # pylint: disable=protected-access\n self._var_list,\n reshape=self._reshape,\n sharded=self._sharded,\n max_to_keep=self._max_to_keep,\n keep_checkpoint_every_n_hours=self._keep_checkpoint_every_n_hours,\n name=self._name,\n restore_sequentially=self._restore_sequentially,\n filename=checkpoint_path,\n build_save=build_save, build_restore=build_restore)\n elif self.saver_def and self._name:\n # Since self._name is used as a name_scope by builder(), we are\n # overloading the use of this field to represent the \"import_scope\" as\n # well.\n self.saver_def.filename_tensor_name = ops.prepend_name_scope(\n self.saver_def.filename_tensor_name, self._name)\n self.saver_def.save_tensor_name = ops.prepend_name_scope(\n self.saver_def.save_tensor_name, self._name)\n self.saver_def.restore_op_name = ops.prepend_name_scope(\n self.saver_def.restore_op_name, self._name)\n\n self._check_saver_def()\n if not context.executing_eagerly():\n # Updates next checkpoint time.\n # Set in __init__ when executing eagerly.\n self._next_checkpoint_time = (\n time.time() + self.saver_def.keep_checkpoint_every_n_hours * 3600)\n\n def _check_saver_def(self):\n if not isinstance(self.saver_def, saver_pb2.SaverDef):\n raise ValueError(\"saver_def must be a saver_pb2.SaverDef: %s\" %\n self.saver_def)\n if not context.executing_eagerly():\n if not self.saver_def.save_tensor_name:\n raise ValueError(\"saver_def must specify the save_tensor_name: %s\" %\n str(self.saver_def))\n if not self.saver_def.restore_op_name:\n raise ValueError(\"saver_def must specify the restore_op_name: %s\" %\n str(self.saver_def))\n\n def _CheckpointFilename(self, p):\n \"\"\"Returns the checkpoint filename given a `(filename, time)` pair.\n\n Args:\n p: (filename, time) pair.\n\n Returns:\n Checkpoint file name.\n \"\"\"\n name, _ = p\n return name\n\n def _MetaGraphFilename(self, checkpoint_filename, meta_graph_suffix=\"meta\"):\n \"\"\"Returns the meta graph filename.\n\n Args:\n checkpoint_filename: Name of the checkpoint file.\n meta_graph_suffix: Suffix for `MetaGraphDef` file. Defaults to 'meta'.\n\n Returns:\n MetaGraph file name.\n \"\"\"\n # If the checkpoint_filename is sharded, the checkpoint_filename could\n # be of format model.ckpt-step#-?????-of-shard#. For example,\n # model.ckpt-123456-?????-of-00005, or model.ckpt-123456-00001-of-00002.\n basename = re.sub(r\"-[\\d\\?]+-of-\\d+$\", \"\", checkpoint_filename)\n meta_graph_filename = \".\".join([basename, meta_graph_suffix])\n return meta_graph_filename\n\n def _RecordLastCheckpoint(self, latest_save_path):\n \"\"\"Manages the list of the latest checkpoints.\"\"\"\n if not self.saver_def.max_to_keep:\n return\n # Remove first from list if the same name was used before.\n for p in self._last_checkpoints:\n if latest_save_path == self._CheckpointFilename(p):\n self._last_checkpoints.remove(p)\n # Append new path to list\n self._last_checkpoints.append((latest_save_path, time.time()))\n\n # If more than max_to_keep, remove oldest.\n if len(self._last_checkpoints) > self.saver_def.max_to_keep:\n self._checkpoints_to_be_deleted.append(self._last_checkpoints.pop(0))\n\n def _MaybeDeleteOldCheckpoints(self, meta_graph_suffix=\"meta\"):\n \"\"\"Deletes old checkpoints if necessary.\n\n `self._checkpoints_to_be_deleted` is going to contain checkpoints that are\n over `max_to_keep`. They are going to be deleted. If\n `keep_checkpoint_every_n_hours` was specified, keep an additional checkpoint\n every `N` hours. For example, if `N` is 0.5, an additional checkpoint is\n kept for every 0.5 hours of training; if `N` is 10, an additional\n checkpoint is kept for every 10 hours of training.\n\n Args:\n meta_graph_suffix: Suffix for `MetaGraphDef` file. Defaults to 'meta'.\n \"\"\"\n if self._checkpoints_to_be_deleted:\n p = self._checkpoints_to_be_deleted.pop(0)\n # Do not delete the file if we keep_checkpoint_every_n_hours is set and we\n # have reached N hours of training.\n should_keep = p[1] > self._next_checkpoint_time\n if should_keep:\n self._next_checkpoint_time += (\n self.saver_def.keep_checkpoint_every_n_hours * 3600)\n return\n\n # Otherwise delete the files.\n try:\n checkpoint_prefix = self._CheckpointFilename(p)\n self._delete_file_if_exists(\n self._MetaGraphFilename(checkpoint_prefix, meta_graph_suffix))\n if self.saver_def.version == saver_pb2.SaverDef.V2:\n # V2 has a metadata file and some data files.\n self._delete_file_if_exists(checkpoint_prefix + \".index\")\n self._delete_file_if_exists(checkpoint_prefix +\n \".data-?????-of-?????\")\n else:\n # V1, Legacy. Exact match on the data file.\n self._delete_file_if_exists(checkpoint_prefix)\n except Exception as e: # pylint: disable=broad-except\n logging.warning(\"Ignoring: %s\", str(e))\n\n def _delete_file_if_exists(self, filespec):\n for pathname in file_io.get_matching_files(filespec):\n file_io.delete_file(pathname)\n\n def as_saver_def(self):\n \"\"\"Generates a `SaverDef` representation of this saver.\n\n Returns:\n A `SaverDef` proto.\n \"\"\"\n return self.saver_def\n\n def to_proto(self, export_scope=None):\n \"\"\"Converts this `Saver` to a `SaverDef` protocol buffer.\n\n Args:\n export_scope: Optional `string`. Name scope to remove.\n\n Returns:\n A `SaverDef` protocol buffer.\n \"\"\"\n if export_scope is None:\n return self.saver_def\n\n if not (self.saver_def.filename_tensor_name.startswith(export_scope) and\n self.saver_def.save_tensor_name.startswith(export_scope) and\n self.saver_def.restore_op_name.startswith(export_scope)):\n return None\n\n saver_def = saver_pb2.SaverDef()\n saver_def.CopyFrom(self.saver_def)\n saver_def.filename_tensor_name = ops.strip_name_scope(\n saver_def.filename_tensor_name, export_scope)\n saver_def.save_tensor_name = ops.strip_name_scope(\n saver_def.save_tensor_name, export_scope)\n saver_def.restore_op_name = ops.strip_name_scope(\n saver_def.restore_op_name, export_scope)\n return saver_def\n\n @staticmethod\n def from_proto(saver_def, import_scope=None):\n \"\"\"Returns a `Saver` object created from `saver_def`.\n\n Args:\n saver_def: a `SaverDef` protocol buffer.\n import_scope: Optional `string`. Name scope to use.\n\n Returns:\n A `Saver` built from saver_def.\n \"\"\"\n return Saver(saver_def=saver_def, name=import_scope)\n\n @property\n def last_checkpoints(self):\n \"\"\"List of not-yet-deleted checkpoint filenames.\n\n You can pass any of the returned values to `restore()`.\n\n Returns:\n A list of checkpoint filenames, sorted from oldest to newest.\n \"\"\"\n return list(self._CheckpointFilename(p) for p in self._last_checkpoints)\n\n def set_last_checkpoints(self, last_checkpoints):\n \"\"\"DEPRECATED: Use set_last_checkpoints_with_time.\n\n Sets the list of old checkpoint filenames.\n\n Args:\n last_checkpoints: A list of checkpoint filenames.\n\n Raises:\n AssertionError: If last_checkpoints is not a list.\n \"\"\"\n assert isinstance(last_checkpoints, list)\n # We use a timestamp of +inf so that this checkpoint will never be\n # deleted. This is both safe and backwards compatible to a previous\n # version of the code which used s[1] as the \"timestamp\".\n self._last_checkpoints = [(s, np.inf) for s in last_checkpoints]\n\n def set_last_checkpoints_with_time(self, last_checkpoints_with_time):\n \"\"\"Sets the list of old checkpoint filenames and timestamps.\n\n Args:\n last_checkpoints_with_time: A list of tuples of checkpoint filenames and\n timestamps.\n\n Raises:\n AssertionError: If last_checkpoints_with_time is not a list.\n \"\"\"\n assert isinstance(last_checkpoints_with_time, list)\n self._last_checkpoints = last_checkpoints_with_time\n\n def recover_last_checkpoints(self, checkpoint_paths):\n \"\"\"Recovers the internal saver state after a crash.\n\n This method is useful for recovering the \"self._last_checkpoints\" state.\n\n Globs for the checkpoints pointed to by `checkpoint_paths`. If the files\n exist, use their mtime as the checkpoint timestamp.\n\n Args:\n checkpoint_paths: a list of checkpoint paths.\n \"\"\"\n mtimes = get_checkpoint_mtimes(checkpoint_paths)\n self.set_last_checkpoints_with_time(list(zip(checkpoint_paths, mtimes)))\n\n def save(self,\n sess,\n save_path,\n global_step=None,\n latest_filename=None,\n meta_graph_suffix=\"meta\",\n write_meta_graph=True,\n write_state=True,\n strip_default_attrs=False):\n # pylint: disable=line-too-long\n \"\"\"Saves variables.\n\n This method runs the ops added by the constructor for saving variables.\n It requires a session in which the graph was launched. The variables to\n save must also have been initialized.\n\n The method returns the path prefix of the newly created checkpoint files.\n This string can be passed directly to a call to `restore()`.\n\n Args:\n sess: A Session to use to save the variables.\n save_path: String. Prefix of filenames created for the checkpoint.\n global_step: If provided the global step number is appended to\n `save_path` to create the checkpoint filenames. The optional argument\n can be a `Tensor`, a `Tensor` name or an integer.\n latest_filename: Optional name for the protocol buffer file that will\n contains the list of most recent checkpoints. That file,\n kept in the same directory as the checkpoint files, is automatically\n managed by the saver to keep track of recent checkpoints. Defaults to\n 'checkpoint'.\n meta_graph_suffix: Suffix for `MetaGraphDef` file. Defaults to 'meta'.\n write_meta_graph: `Boolean` indicating whether or not to write the meta\n graph file.\n write_state: `Boolean` indicating whether or not to write the\n `CheckpointStateProto`.\n strip_default_attrs: Boolean. If `True`, default-valued attributes will be\n removed from the NodeDefs. For a detailed guide, see\n [Stripping Default-Valued Attributes](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/README.md#stripping-default-valued-attributes).\n\n Returns:\n A string: path prefix used for the checkpoint files. If the saver is\n sharded, this string ends with: '-?????-of-nnnnn' where 'nnnnn'\n is the number of shards created.\n If the saver is empty, returns None.\n\n Raises:\n TypeError: If `sess` is not a `Session`.\n ValueError: If `latest_filename` contains path components, or if it\n collides with `save_path`.\n RuntimeError: If save and restore ops weren't built.\n \"\"\"\n # pylint: enable=line-too-long\n if not self._is_built and not context.executing_eagerly():\n raise RuntimeError(\n \"`build()` should be called before save if defer_build==True\")\n if latest_filename is None:\n latest_filename = \"checkpoint\"\n if self._write_version != saver_pb2.SaverDef.V2:\n logging.warning(\"*******************************************************\")\n logging.warning(\"TensorFlow's V1 checkpoint format has been deprecated.\")\n logging.warning(\"Consider switching to the more efficient V2 format:\")\n logging.warning(\" `tf.train.Saver(write_version=tf.train.SaverDef.V2)`\")\n logging.warning(\"now on by default.\")\n logging.warning(\"*******************************************************\")\n\n if os.path.split(latest_filename)[0]:\n raise ValueError(\"'latest_filename' must not contain path components\")\n\n if global_step is not None:\n if not isinstance(global_step, compat.integral_types):\n global_step = training_util.global_step(sess, global_step)\n checkpoint_file = \"%s-%d\" % (save_path, global_step)\n if self._pad_step_number:\n # Zero-pads the step numbers, so that they are sorted when listed.\n checkpoint_file = \"%s-%s\" % (save_path, \"{:08d}\".format(global_step))\n else:\n checkpoint_file = save_path\n if os.path.basename(\n save_path) == latest_filename and not self._sharded:\n # Guard against collision between data file and checkpoint state file.\n raise ValueError(\n \"'latest_filename' collides with 'save_path': '%s' and '%s'\" %\n (latest_filename, save_path))\n\n if (not context.executing_eagerly() and\n not isinstance(sess, session.SessionInterface)):\n raise TypeError(\"'sess' must be a Session; %s\" % sess)\n\n save_path_parent = os.path.dirname(save_path)\n if not self._is_empty:\n try:\n if context.executing_eagerly():\n self._build_eager(\n checkpoint_file, build_save=True, build_restore=False)\n model_checkpoint_path = self.saver_def.save_tensor_name\n else:\n model_checkpoint_path = sess.run(\n self.saver_def.save_tensor_name,\n {self.saver_def.filename_tensor_name: checkpoint_file})\n\n model_checkpoint_path = compat.as_str(model_checkpoint_path)\n if write_state:\n self._RecordLastCheckpoint(model_checkpoint_path)\n _update_checkpoint_state(\n save_dir=save_path_parent,\n model_checkpoint_path=model_checkpoint_path,\n all_model_checkpoint_paths=self.last_checkpoints,\n latest_filename=latest_filename,\n save_relative_paths=self._save_relative_paths)\n self._MaybeDeleteOldCheckpoints(meta_graph_suffix=meta_graph_suffix)\n except (errors.FailedPreconditionError, errors.NotFoundError) as exc:\n if not gfile.IsDirectory(save_path_parent):\n exc = ValueError(\n \"Parent directory of {} doesn't exist, can't save.\".format(\n save_path))\n raise exc\n\n if write_meta_graph:\n meta_graph_filename = self._MetaGraphFilename(\n checkpoint_file, meta_graph_suffix=meta_graph_suffix)\n if not context.executing_eagerly():\n with sess.graph.as_default():\n self.export_meta_graph(\n meta_graph_filename, strip_default_attrs=strip_default_attrs)\n\n if self._is_empty:\n return None\n else:\n return model_checkpoint_path\n\n def export_meta_graph(self,\n filename=None,\n collection_list=None,\n as_text=False,\n export_scope=None,\n clear_devices=False,\n clear_extraneous_savers=False,\n strip_default_attrs=False):\n # pylint: disable=line-too-long\n \"\"\"Writes `MetaGraphDef` to save_path/filename.\n\n Args:\n filename: Optional meta_graph filename including the path.\n collection_list: List of string keys to collect.\n as_text: If `True`, writes the meta_graph as an ASCII proto.\n export_scope: Optional `string`. Name scope to remove.\n clear_devices: Whether or not to clear the device field for an `Operation`\n or `Tensor` during export.\n clear_extraneous_savers: Remove any Saver-related information from the\n graph (both Save/Restore ops and SaverDefs) that are not associated\n with this Saver.\n strip_default_attrs: Boolean. If `True`, default-valued attributes will be\n removed from the NodeDefs. For a detailed guide, see\n [Stripping Default-Valued Attributes](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/README.md#stripping-default-valued-attributes).\n\n Returns:\n A `MetaGraphDef` proto.\n \"\"\"\n # pylint: enable=line-too-long\n return export_meta_graph(\n filename=filename,\n graph_def=ops.get_default_graph().as_graph_def(add_shapes=True),\n saver_def=self.saver_def,\n collection_list=collection_list,\n as_text=as_text,\n export_scope=export_scope,\n clear_devices=clear_devices,\n clear_extraneous_savers=clear_extraneous_savers,\n strip_default_attrs=strip_default_attrs)\n\n def restore(self, sess, save_path):\n \"\"\"Restores previously saved variables.\n\n This method runs the ops added by the constructor for restoring variables.\n It requires a session in which the graph was launched. The variables to\n restore do not have to have been initialized, as restoring is itself a way\n to initialize variables.\n\n The `save_path` argument is typically a value previously returned from a\n `save()` call, or a call to `latest_checkpoint()`.\n\n Args:\n sess: A `Session` to use to restore the parameters. None in eager mode.\n save_path: Path where parameters were previously saved.\n\n Raises:\n ValueError: If save_path is None.\n \"\"\"\n if self._is_empty:\n return\n if save_path is None:\n raise ValueError(\"Can't load save_path when it is None.\")\n logging.info(\"Restoring parameters from %s\", save_path)\n try:\n if context.executing_eagerly():\n self._build_eager(save_path, build_save=False, build_restore=True)\n else:\n sess.run(self.saver_def.restore_op_name,\n {self.saver_def.filename_tensor_name: save_path})\n except errors.NotFoundError:\n exception_type, exception_value, exception_traceback = sys.exc_info()\n # The checkpoint would not be loaded successfully as is. Try to parse it\n # as an object-based checkpoint.\n try:\n reader = pywrap_tensorflow.NewCheckpointReader(save_path)\n object_graph_string = reader.get_tensor(\n checkpointable.OBJECT_GRAPH_PROTO_KEY)\n except errors.NotFoundError:\n # This is not an object-based checkpoint, or the checkpoint doesn't\n # exist. Re-raise the original exception.\n six.reraise(exception_type, exception_value, exception_traceback)\n del exception_traceback # avoid reference cycles\n\n # This is an object-based checkpoint. We'll print a warning and then do\n # the restore.\n logging.warning(\n # TODO(allenl): Modify instructions for using the object-based saver\n # once that's in core.\n \"Restoring an object-based checkpoint using a name-based saver. This \"\n \"may be somewhat fragile, and will re-build the Saver. Instead, \"\n \"consider loading object-based checkpoints using \"\n \"tf.contrib.eager.Checkpoint().\")\n self._restore_from_object_based_checkpoint(\n sess=sess, save_path=save_path,\n object_graph_string=object_graph_string)\n\n def _restore_from_object_based_checkpoint(self, sess, save_path,\n object_graph_string):\n \"\"\"A compatibility mode for reading object-based checkpoints.\"\"\"\n object_graph_proto = (\n checkpointable_object_graph_pb2.CheckpointableObjectGraph())\n object_graph_proto.ParseFromString(object_graph_string)\n names_to_keys = {}\n for node in object_graph_proto.nodes:\n for attribute in node.attributes:\n names_to_keys[attribute.full_name] = attribute.checkpoint_key\n saveables = self._builder._ValidateAndSliceInputs(self._var_list) # pylint: disable=protected-access\n for saveable in saveables:\n for spec in saveable.specs:\n if spec.name not in names_to_keys:\n raise errors.NotFoundError(\n None, None,\n message=(\"Attempting to load an object-based checkpoint using \"\n \"variable names, but could not find %s in the \"\n \"checkpoint.\") % spec.name)\n spec.name = names_to_keys[spec.name]\n if self._object_restore_saver is None:\n # Cache the Saver so multiple restore() calls don't pollute the graph when\n # graph building. This assumes keys are consistent (i.e. this is the same\n # type of object-based checkpoint we saw previously).\n self._object_restore_saver = Saver(saveables)\n self._object_restore_saver.restore(sess=sess, save_path=save_path)\n\n @staticmethod\n def _add_collection_def(meta_graph_def, key, export_scope=None):\n \"\"\"Adds a collection to MetaGraphDef protocol buffer.\n\n Args:\n meta_graph_def: MetaGraphDef protocol buffer.\n key: One of the GraphKeys or user-defined string.\n export_scope: Optional `string`. Name scope to remove.\n \"\"\"\n meta_graph.add_collection_def(meta_graph_def, key,\n export_scope=export_scope)\n\n\ndef _prefix_to_checkpoint_path(prefix, format_version):\n \"\"\"Returns the pathname of a checkpoint file, given the checkpoint prefix.\n\n For V1 checkpoint, simply returns the prefix itself (the data file). For V2,\n returns the pathname to the index file.\n\n Args:\n prefix: a string, the prefix of a checkpoint.\n format_version: the checkpoint format version that corresponds to the\n prefix.\n Returns:\n The pathname of a checkpoint file, taking into account the checkpoint\n format version.\n \"\"\"\n if format_version == saver_pb2.SaverDef.V2:\n return prefix + \".index\" # The index file identifies a checkpoint.\n return prefix # Just the data file.\n\n\n@tf_export(\"train.latest_checkpoint\")\ndef latest_checkpoint(checkpoint_dir, latest_filename=None):\n \"\"\"Finds the filename of latest saved checkpoint file.\n\n Args:\n checkpoint_dir: Directory where the variables were saved.\n latest_filename: Optional name for the protocol buffer file that\n contains the list of most recent checkpoint filenames.\n See the corresponding argument to `Saver.save()`.\n\n Returns:\n The full path to the latest checkpoint or `None` if no checkpoint was found.\n \"\"\"\n # Pick the latest checkpoint based on checkpoint state.\n ckpt = get_checkpoint_state(checkpoint_dir, latest_filename)\n if ckpt and ckpt.model_checkpoint_path:\n # Look for either a V2 path or a V1 path, with priority for V2.\n v2_path = _prefix_to_checkpoint_path(ckpt.model_checkpoint_path,\n saver_pb2.SaverDef.V2)\n v1_path = _prefix_to_checkpoint_path(ckpt.model_checkpoint_path,\n saver_pb2.SaverDef.V1)\n if file_io.get_matching_files(v2_path) or file_io.get_matching_files(\n v1_path):\n return ckpt.model_checkpoint_path\n else:\n logging.error(\"Couldn't match files for checkpoint %s\",\n ckpt.model_checkpoint_path)\n return None\n\n\n@tf_export(\"train.import_meta_graph\")\ndef import_meta_graph(meta_graph_or_file, clear_devices=False,\n import_scope=None, **kwargs):\n \"\"\"Recreates a Graph saved in a `MetaGraphDef` proto.\n\n This function takes a `MetaGraphDef` protocol buffer as input. If\n the argument is a file containing a `MetaGraphDef` protocol buffer ,\n it constructs a protocol buffer from the file content. The function\n then adds all the nodes from the `graph_def` field to the\n current graph, recreates all the collections, and returns a saver\n constructed from the `saver_def` field.\n\n In combination with `export_meta_graph()`, this function can be used to\n\n * Serialize a graph along with other Python objects such as `QueueRunner`,\n `Variable` into a `MetaGraphDef`.\n\n * Restart training from a saved graph and checkpoints.\n\n * Run inference from a saved graph and checkpoints.\n\n ```Python\n ...\n # Create a saver.\n saver = tf.train.Saver(...variables...)\n # Remember the training_op we want to run by adding it to a collection.\n tf.add_to_collection('train_op', train_op)\n sess = tf.Session()\n for step in xrange(1000000):\n sess.run(train_op)\n if step % 1000 == 0:\n # Saves checkpoint, which by default also exports a meta_graph\n # named 'my-model-global_step.meta'.\n saver.save(sess, 'my-model', global_step=step)\n ```\n\n Later we can continue training from this saved `meta_graph` without building\n the model from scratch.\n\n ```Python\n with tf.Session() as sess:\n new_saver = tf.train.import_meta_graph('my-save-dir/my-model-10000.meta')\n new_saver.restore(sess, 'my-save-dir/my-model-10000')\n # tf.get_collection() returns a list. In this example we only want the\n # first one.\n train_op = tf.get_collection('train_op')[0]\n for step in xrange(1000000):\n sess.run(train_op)\n ```\n\n NOTE: Restarting training from saved `meta_graph` only works if the\n device assignments have not changed.\n\n Args:\n meta_graph_or_file: `MetaGraphDef` protocol buffer or filename (including\n the path) containing a `MetaGraphDef`.\n clear_devices: Whether or not to clear the device field for an `Operation`\n or `Tensor` during import.\n import_scope: Optional `string`. Name scope to add. Only used when\n initializing from protocol buffer.\n **kwargs: Optional keyed arguments.\n\n Returns:\n A saver constructed from `saver_def` in `MetaGraphDef` or None.\n\n A None value is returned if no variables exist in the `MetaGraphDef`\n (i.e., there are no variables to restore).\n\n Raises:\n RuntimeError: If called with eager execution enabled.\n\n @compatibility(eager)\n Exporting/importing meta graphs is not supported. No graph exists when eager\n execution is enabled.\n @end_compatibility\n \"\"\" # pylint: disable=g-doc-exception\n if context.executing_eagerly():\n raise RuntimeError(\"Exporting/importing meta graphs is not supported when \"\n \"eager execution is enabled. No graph exists when eager \"\n \"execution is enabled.\")\n if not isinstance(meta_graph_or_file, meta_graph_pb2.MetaGraphDef):\n meta_graph_def = meta_graph.read_meta_graph_file(meta_graph_or_file)\n else:\n meta_graph_def = meta_graph_or_file\n\n imported_vars = meta_graph.import_scoped_meta_graph(\n meta_graph_def,\n clear_devices=clear_devices,\n import_scope=import_scope,\n **kwargs)\n\n if meta_graph_def.HasField(\"saver_def\"):\n # Infer the scope that is prepended by `import_scoped_meta_graph`.\n scope = import_scope\n var_names = list(imported_vars.keys())\n if var_names:\n sample_key = var_names[0]\n sample_var = imported_vars[sample_key]\n scope = sample_var.name[:-len(sample_key)]\n\n return Saver(saver_def=meta_graph_def.saver_def, name=scope)\n else:\n if variables._all_saveable_objects(): # pylint: disable=protected-access\n # Return the default saver instance for all graph variables.\n return Saver()\n else:\n # If no graph variables exist, then a Saver cannot be constructed.\n logging.info(\"Saver not created because there are no variables in the\"\n \" graph to restore\")\n return None\n\n\n@tf_export(\"train.export_meta_graph\")\ndef export_meta_graph(filename=None,\n meta_info_def=None,\n graph_def=None,\n saver_def=None,\n collection_list=None,\n as_text=False,\n graph=None,\n export_scope=None,\n clear_devices=False,\n clear_extraneous_savers=False,\n strip_default_attrs=False,\n **kwargs):\n # pylint: disable=line-too-long\n \"\"\"Returns `MetaGraphDef` proto. Optionally writes it to filename.\n\n This function exports the graph, saver, and collection objects into\n `MetaGraphDef` protocol buffer with the intention of it being imported\n at a later time or location to restart training, run inference, or be\n a subgraph.\n\n Args:\n filename: Optional filename including the path for writing the\n generated `MetaGraphDef` protocol buffer.\n meta_info_def: `MetaInfoDef` protocol buffer.\n graph_def: `GraphDef` protocol buffer.\n saver_def: `SaverDef` protocol buffer.\n collection_list: List of string keys to collect.\n as_text: If `True`, writes the `MetaGraphDef` as an ASCII proto.\n graph: The `Graph` to export. If `None`, use the default graph.\n export_scope: Optional `string`. Name scope under which to extract\n the subgraph. The scope name will be striped from the node definitions\n for easy import later into new name scopes. If `None`, the whole graph\n is exported. graph_def and export_scope cannot both be specified.\n clear_devices: Whether or not to clear the device field for an `Operation`\n or `Tensor` during export.\n clear_extraneous_savers: Remove any Saver-related information from the\n graph (both Save/Restore ops and SaverDefs) that are not associated\n with the provided SaverDef.\n strip_default_attrs: Boolean. If `True`, default-valued attributes will be\n removed from the NodeDefs. For a detailed guide, see\n [Stripping Default-Valued Attributes](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/README.md#stripping-default-valued-attributes).\n **kwargs: Optional keyed arguments.\n\n Returns:\n A `MetaGraphDef` proto.\n\n Raises:\n ValueError: When the `GraphDef` is larger than 2GB.\n RuntimeError: If called with eager execution enabled.\n\n @compatibility(eager)\n Exporting/importing meta graphs is not supported. No graph exists when eager\n execution is enabled.\n @end_compatibility\n \"\"\"\n # pylint: enable=line-too-long\n if context.executing_eagerly():\n raise RuntimeError(\"Exporting/importing meta graphs is not supported when \"\n \"eager execution is enabled. No graph exists when eager \"\n \"execution is enabled.\")\n meta_graph_def, _ = meta_graph.export_scoped_meta_graph(\n filename=filename,\n meta_info_def=meta_info_def,\n graph_def=graph_def,\n saver_def=saver_def,\n collection_list=collection_list,\n as_text=as_text,\n graph=graph,\n export_scope=export_scope,\n clear_devices=clear_devices,\n clear_extraneous_savers=clear_extraneous_savers,\n strip_default_attrs=strip_default_attrs,\n **kwargs)\n return meta_graph_def\n\n\n@tf_export(\"train.checkpoint_exists\")\ndef checkpoint_exists(checkpoint_prefix):\n \"\"\"Checks whether a V1 or V2 checkpoint exists with the specified prefix.\n\n This is the recommended way to check if a checkpoint exists, since it takes\n into account the naming difference between V1 and V2 formats.\n\n Args:\n checkpoint_prefix: the prefix of a V1 or V2 checkpoint, with V2 taking\n priority. Typically the result of `Saver.save()` or that of\n `tf.train.latest_checkpoint()`, regardless of sharded/non-sharded or\n V1/V2.\n Returns:\n A bool, true iff a checkpoint referred to by `checkpoint_prefix` exists.\n \"\"\"\n pathname = _prefix_to_checkpoint_path(checkpoint_prefix,\n saver_pb2.SaverDef.V2)\n if file_io.get_matching_files(pathname):\n return True\n elif file_io.get_matching_files(checkpoint_prefix):\n return True\n else:\n return False\n\n\n@tf_export(\"train.get_checkpoint_mtimes\")\ndef get_checkpoint_mtimes(checkpoint_prefixes):\n \"\"\"Returns the mtimes (modification timestamps) of the checkpoints.\n\n Globs for the checkpoints pointed to by `checkpoint_prefixes`. If the files\n exist, collect their mtime. Both V2 and V1 checkpoints are considered, in\n that priority.\n\n This is the recommended way to get the mtimes, since it takes into account\n the naming difference between V1 and V2 formats.\n\n Args:\n checkpoint_prefixes: a list of checkpoint paths, typically the results of\n `Saver.save()` or those of `tf.train.latest_checkpoint()`, regardless of\n sharded/non-sharded or V1/V2.\n Returns:\n A list of mtimes (in microseconds) of the found checkpoints.\n \"\"\"\n mtimes = []\n\n def match_maybe_append(pathname):\n fnames = file_io.get_matching_files(pathname)\n if fnames:\n mtimes.append(file_io.stat(fnames[0]).mtime_nsec / 1e9)\n return True\n return False\n\n for checkpoint_prefix in checkpoint_prefixes:\n # Tries V2's metadata file first.\n pathname = _prefix_to_checkpoint_path(checkpoint_prefix,\n saver_pb2.SaverDef.V2)\n if match_maybe_append(pathname):\n continue\n # Otherwise, tries V1, where the prefix is the complete pathname.\n match_maybe_append(checkpoint_prefix)\n\n return mtimes\n\n\nops.register_proto_function(\n ops.GraphKeys.SAVERS,\n proto_type=saver_pb2.SaverDef,\n to_proto=Saver.to_proto,\n from_proto=Saver.from_proto)\n" ]
[ [ "numpy.dot", "numpy.sqrt", "numpy.asarray", "tensorflow.python.ops.variables.Variable", "tensorflow.python.ops.init_ops.random_uniform_initializer", "tensorflow.python.framework.ops.device", "tensorflow.python.ops.init_ops.orthogonal_initializer", "tensorflow.python.ops.init_ops.constant_initializer", "tensorflow.python.ops.partitioned_variables.variable_axis_size_partitioner", "numpy.allclose", "numpy.arange", "tensorflow.python.ops.math_ops.linspace", "numpy.eye", "numpy.linalg.det", "tensorflow.python.ops.init_ops.ones_initializer", "tensorflow.python.platform.test.main", "tensorflow.python.ops.variable_scope.variable_scope", "tensorflow.python.ops.init_ops.uniform_unit_scaling_initializer", "numpy.zeros", "tensorflow.python.framework.ops.reset_default_graph", "tensorflow.python.ops.math_ops.cast", "tensorflow.python.ops.array_ops.slice", "tensorflow.python.ops.init_ops.convolutional_orthogonal_2d", "tensorflow.python.platform.test.is_gpu_available", "tensorflow.python.ops.linalg_ops.norm", "tensorflow.python.ops.variables.global_variables_initializer", "numpy.array", "tensorflow.python.framework.random_seed.set_random_seed", "tensorflow.python.ops.init_ops.convolutional_delta_orthogonal", "tensorflow.python.ops.math_ops.range", "tensorflow.python.ops.array_ops.concat", "numpy.abs", "tensorflow.python.framework.ops.Graph", "tensorflow.python.ops.variable_scope.get_variable", "tensorflow.python.ops.init_ops.zeros_initializer", "numpy.ones", "tensorflow.python.ops.init_ops.random_normal_initializer", "tensorflow.python.ops.array_ops.reshape", "tensorflow.python.ops.random_ops.random_normal", "numpy.prod", "tensorflow.python.ops.init_ops.identity_initializer", "tensorflow.python.ops.init_ops.truncated_normal_initializer", "tensorflow.python.framework.constant_op.constant" ], [ "tensorflow.python.ops.array_ops.shape", "tensorflow.python.ops.resource_variable_ops.shape_safe_assign_variable_handle", "tensorflow.python.platform.tf_logging.error", "tensorflow.python.ops.variables._all_saveable_objects", "tensorflow.python.framework.meta_graph.read_meta_graph_file", "tensorflow.python.lib.io.file_io.delete_file", "tensorflow.python.ops.string_ops.string_join", "tensorflow.python.framework.ops.add_to_collection", "tensorflow.core.protobuf.checkpointable_object_graph_pb2.CheckpointableObjectGraph", "tensorflow.python.framework.device.DeviceSpec.from_string", "tensorflow.python.training.training_util.global_step", "tensorflow.python.framework.ops.device", "tensorflow.python.platform.gfile.IsDirectory", "tensorflow.python.training.checkpoint_state_pb2.CheckpointState", "tensorflow.python.eager.context.executing_eagerly", "tensorflow.python.ops.array_ops.identity", "tensorflow.python.ops.io_ops.save_v2", "tensorflow.python.ops.gen_io_ops.merge_v2_checkpoints", "tensorflow.python.framework.meta_graph.import_scoped_meta_graph", "tensorflow.python.framework.ops.register_proto_function", "tensorflow.python.ops.gen_io_ops.sharded_filename", "tensorflow.python.ops.control_flow_ops.with_dependencies", "tensorflow.python.framework.ops.get_collection", "tensorflow.python.ops.io_ops.restore_v2", "tensorflow.python.util.tf_export.tf_export", "tensorflow.python.pywrap_tensorflow.NewCheckpointReader", "tensorflow.python.framework.meta_graph.export_scoped_meta_graph", "tensorflow.python.util.compat.as_str", "tensorflow.python.framework.ops.control_dependencies", "tensorflow.python.lib.io.file_io.stat", "tensorflow.python.platform.tf_logging.warning", "tensorflow.python.ops.gen_io_ops.sharded_filespec", "tensorflow.python.framework.ops.prepend_name_scope", "tensorflow.python.lib.io.file_io.file_exists", "tensorflow.python.lib.io.file_io.read_file_to_string", "tensorflow.python.framework.ops.internal_convert_to_tensor", "tensorflow.core.protobuf.saver_pb2.SaverDef", "tensorflow.python.framework.device.canonical_name", "tensorflow.python.ops.io_ops._save", "tensorflow.python.framework.ops.strip_name_scope", "tensorflow.python.lib.io.file_io.get_matching_files", "tensorflow.python.ops.control_flow_ops.group", "tensorflow.python.platform.tf_logging.info", "tensorflow.python.framework.meta_graph.add_collection_def", "tensorflow.python.framework.ops.get_default_graph", "tensorflow.python.framework.ops.name_scope", "tensorflow.python.ops.array_ops.reshape", "tensorflow.python.framework.errors.NotFoundError", "tensorflow.python.framework.constant_op.constant" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "2.7", "2.4", "2.3", "2.9", "2.5", "2.6", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "1.10", "1.12", "2.7", "1.4", "2.2", "1.13", "2.3", "2.4", "2.9", "1.5", "1.7", "2.5", "2.6", "1.2", "2.10" ] } ]
WatChMaL/CNN
[ "2e14397bca6ced2fdfeab406e3c28561bb3af384" ]
[ "plot_utils/plot_utils.py" ]
[ "\"\"\"\nSource code borrowed from https://github.com/WatChMaL/UVicWorkshopPlayground/blob/master/B/notebooks/utils/utils.py\nEdited by : Abhishek .\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport numpy as np\nimport math\nimport os\nimport sys\nimport pandas as pd\n\nimport matplotlib as mpl\n\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import LogNorm, DivergingNorm\nfrom scipy.stats import gaussian_kde\nfrom sklearn.preprocessing import label_binarize\nfrom sklearn.metrics import roc_curve, auc\n\n# Module for suppressing overflow exceptions\nfrom contextlib import suppress\n\n# Set the style\nplt.style.use(\"classic\")\n\n# Fix the colour scheme for each particle type\ncolor_dict = {\"gamma\":\"red\", \"e\":\"blue\", \"mu\":\"green\"}\n\n# Function to convert from the true particle energies to visible energies\ndef convert_to_visible_energy(energies, labels):\n \n \"\"\"\n convert_to_visible_energy(energies, labels)\n \n Purpose : Convert the true event energies to visible energy collected by the PMTs\n \n Args: energies ... 1D array of event energies, the length = sample size\n labels ... 1D array of true label value, the length = sample size\n \"\"\"\n \n # Convert true particle energies to visible energies\n m_mu = 105.7\n m_e = 0.511\n m_p = 0.511\n\n # Constant for the inverse refractive index of water\n beta = 0.75\n\n # Denominator for the scaling factor to be used for the cherenkov threshold\n dem = math.sqrt(1 - beta**2)\n \n # Perform the conversion from true particle energy to visible energy\n for i in range(len(energies)):\n if(labels[i] == 0):\n energies[i] = max((energies[i] - (m_e / dem) - (m_p / dem)), 0)\n elif(labels[i] == 1):\n energies[i] = max((energies[i] - (m_e / dem)), 0)\n elif(labels[i] == 2):\n energies[i] = max((energies[i] - (m_mu / dem)), 0)\n \n return energies\n\n# Function to plot the energy distribution over a given dataset\ndef plot_event_energy_distribution(energies, labels, label_dict, dset_type=\"full\", show_plot=False, save_path=None):\n \n \"\"\"\n plot_confusion_matrix(labels, predictions, energies, class_names, min_energy, max_energy, save_path=None)\n \n Purpose : Plot the confusion matrix for a given energy interval\n \n Args: energies ... 1D array of event energies, the length = sample size\n labels ... 1D array of true label value, the length = sample size\n labels_dict ... Dictionary with the keys as event types and values as labels, default=None\n dset_type ... String describing the type of dataset (full, train, validation, train), default=\"full\"\n show_plot[optional] ... Boolean to determine whether to display the plot, default=False\n save_path[optional] ... Path to save the plot as an image, default=None\n \"\"\"\n # Assertions\n assert label_dict is not None\n \n # Extract the event energies corresponding to given event types\n energies_dict = {}\n for key in label_dict.keys():\n energies_dict[key] = energies[labels==label_dict[key]]\n \n fig, axes = plt.subplots(3,1,figsize=(16,12))\n plt.subplots_adjust(hspace=0.6)\n \n for label in energies_dict.keys():\n label_to_use = r\"$\\{0}$\".format(label) if label is not \"e\" else r\"${0}$\".format(label)\n \n axes[label_dict[label]].hist(energies_dict[label], bins=50, density=False, label=label_to_use, alpha=0.9,\n color=color_dict[label])\n axes[label_dict[label]].tick_params(labelsize=20)\n axes[label_dict[label]].legend(prop={\"size\":20})\n axes[label_dict[label]].grid(True, which=\"both\", axis=\"both\")\n axes[label_dict[label]].set_ylabel(\"Frequency\", fontsize=20)\n axes[label_dict[label]].set_xlabel(\"Event Visible Energy (MeV)\", fontsize=20)\n axes[label_dict[label]].set_xlim(0, max(energies)+20)\n axes[label_dict[label]].set_title(\"Energy distribution for \" + label_to_use + \" over the \" + dset_type + \" dataset\",\n fontsize=20)\n \n if save_path is not None:\n plt.savefig(save_path, format='eps', dpi=300)\n \n if show_plot:\n plt.show()\n else:\n plt.clf() # Clear the plot frame\n plt.close() # Close the opened window if any\n\n\n# Function to plot a confusion matrix\ndef plot_confusion_matrix(labels, predictions, energies, class_names, min_energy=0, max_energy=1500, \n show_plot=False, save_path=None):\n \n \"\"\"\n plot_confusion_matrix(labels, predictions, energies, class_names, min_energy, max_energy, save_path=None)\n \n Purpose : Plot the confusion matrix for a given energy interval\n \n Args: labels ... 1D array of true label value, the length = sample size\n predictions ... 1D array of predictions, the length = sample size\n energies ... 1D array of event energies, the length = sample size\n class_names ... 1D array of string label for classification targets, the length = number of categories\n min_energy ... Minimum energy for the events to consider\n max_energy ... Maximum energy for the events to consider\n show_plot[optional] ... Boolean to determine whether to display the plot\n save_path[optional] ... Path to save the plot as an image\n \"\"\"\n \n # Create a mapping to extract the energies in\n energy_slice_map = [False for i in range(len(energies))]\n for i in range(len(energies)):\n if(energies[i] >= min_energy and energies[i] < max_energy):\n energy_slice_map[i] = True\n \n # Filter the CNN outputs based on the energy intervals\n labels = labels[energy_slice_map]\n predictions = predictions[energy_slice_map]\n \n if(show_plot or save_path is not None):\n fig, ax = plt.subplots(figsize=(12,8),facecolor='w')\n num_labels = len(class_names)\n max_value = np.max([np.max(np.unique(labels)),np.max(np.unique(labels))])\n assert max_value < num_labels\n mat,_,_,im = ax.hist2d(predictions, labels,\n bins=(num_labels,num_labels),\n range=((-0.5,num_labels-0.5),(-0.5,num_labels-0.5)),cmap=plt.cm.Blues)\n\n # Normalize the confusion matrix\n mat = mat.astype(\"float\") / mat.sum(axis=0)[:, np.newaxis]\n\n cbar = plt.colorbar(im, ax=ax)\n cbar.ax.tick_params(labelsize=20) \n \n ax.set_xticks(np.arange(num_labels))\n ax.set_yticks(np.arange(num_labels))\n ax.set_xticklabels(class_names,fontsize=20)\n ax.set_yticklabels(class_names,fontsize=20)\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\n rotation_mode=\"anchor\")\n plt.setp(ax.get_yticklabels(), rotation=45, ha=\"right\",\n rotation_mode=\"anchor\")\n ax.set_xlabel('Prediction',fontsize=20)\n ax.set_ylabel('True Label',fontsize=20)\n for i in range(mat.shape[0]):\n for j in range(mat.shape[1]):\n ax.text(i,j, r\"${0:0.3f}$\".format(mat[i,j]),\n ha=\"center\", va=\"center\", fontsize=20,\n color=\"white\" if mat[i,j] > (0.5*mat.max()) else \"black\")\n fig.tight_layout()\n plt.title(\"Confusion matrix, \" + r\"${0} \\leq E < {1}$\".format(min_energy, max_energy), fontsize=20) \n \n if save_path is not None:\n plt.savefig(save_path, format='eps', dpi=300)\n \n if show_plot:\n plt.show()\n else:\n plt.clf() # Clear the plot frame\n plt.close() # Close the opened window if any\n\n# Plot the classifier for a given event type for several true event types\ndef plot_classifier_response(softmaxes, labels, energies, softmax_index_dict, event_dict, min_energy=0,\n max_energy=1500, num_bins=100, show_plot=False, save_path=None):\n \n \"\"\"\n plot_classifier_response(softmaxes, labels, energies, softmax_index_dict, event, min_energy=0,\n max_energy=1000, num_bins=100, show_plot=False, save_path=None)\n \n Purpose : Plot the classifier softmax response for a given event type for several true event types\n \n Args: softmaxes ... 2D array of softmax output, length = sample size,\n dimensions = (n_samples, n_classes)\n labels ... 1D array of true labels\n energies ... 1D array of visible event energies\n softmax_index_dict ... Dictionary with the keys as event types and values as column \n indices in the softmax array, default=None\n event_dict ... Dictionary with the softmax class as the key and column indices\n in the softmax array as the values\n min_energy ... Minimum energy for the events to consider, default=0\n max_energy ... Maximum energy for the events to consider, default=1000\n num_bins[optional] ... Number of bins to use per histogram, default=100\n show_plot[optional] ... Boolean to determine whether to show the plot, default=False\n save_path[optional] ... Path to save the plot to, format='eps', default=None\n \n \"\"\"\n \n assert softmaxes is not None and softmaxes.any() != None\n assert labels is not None and labels.any() != None\n assert energies is not None\n \n # Initialize the plot and corresponding parameters\n fig, ax = plt.subplots(figsize=(12,8),facecolor=\"w\")\n ax.tick_params(axis=\"both\", labelsize=20)\n \n # Get the softmax output class for which to plot the response\n event = list(event_dict.keys())[0]\n \n for event_type in softmax_index_dict.keys():\n \n label_to_use = r\"$\\{0}$ events\".format(event_type) if event_type is not \"e\" else r\"${0}$ events\".format(event_type)\n \n # Get the softmax values for the given true event label\n label_map = [False for i in range(len(labels))]\n for i in range(len(labels)):\n if( labels[i] == softmax_index_dict[event_type] ):\n label_map[i] = True\n \n # Get the softmax values for the given true event label\n curr_softmax = softmaxes[label_map]\n\n # Get the energy values for the given true event label\n curr_energies = energies[label_map]\n\n # Create a mapping to extract the energies in\n energy_slice_map = [False for i in range(len(curr_energies))]\n for i in range(len(curr_energies)):\n if(curr_energies[i] >= min_energy and curr_energies[i] < max_energy):\n energy_slice_map[i] = True\n\n # Filter the CNN outputs based on the energy intervals\n curr_softmax = curr_softmax[energy_slice_map]\n curr_softmax = curr_softmax[:,event_dict[event]]\n \n if(curr_softmax.shape[0] <= 0):\n return None, None, None\n else:\n values, bins, patches = plt.hist(curr_softmax, bins=num_bins, density=False,\n label= label_to_use, color=color_dict[event_type],\n alpha=0.9, stacked=True)\n \n if save_path is not None or show_plot:\n ax.grid(True)\n if event is not \"e\":\n ax.set_xlabel(r\"Classifier softmax output : $P(\\{0})$\".format(event), fontsize=20)\n else:\n ax.set_xlabel(r\"Classifier softmax output : $P(e)$\".format(event), fontsize=20)\n\n ax.set_ylabel(\"Count (Log scaled)\", fontsize=20)\n plt.yscale(\"log\")\n\n ax.set_xlim(0,1)\n\n plt.legend(loc=\"upper left\", prop={\"size\":20})\n \n plt.title(r\"${0} \\leq E < {1}$\".format(min_energy, max_energy), fontsize=20)\n \n if save_path is not None:\n plt.savefig(save_path, format='eps', dpi=300)\n \n if show_plot:\n plt.show()\n else:\n plt.clf() # Clear the current figure\n plt.close() # Close the opened window\n \n return values, bins, patches\n\n# Plot the ROC curve for one vs another class\ndef plot_ROC_curve_one_vs_one(softmaxes, labels, energies, softmax_index_dict, label_0, label_1, min_energy=0,\n max_energy=1500, show_plot=False, save_path=None):\n \"\"\"\n plot_ROC_curve_one_vs_one(softmaxes, labels, energies, softmax_index_dict, \n min_energy, max_energy, show_plot=False, save_path=None)\n \n Purpose : Plot the Reciver Operating Characteristic (ROC) curve given the softmax values and true labels\n \n Args: softmaxes ... 2D array of softmax output, length = sample size, dimensions = n_samples, n_classes\n labels ... 1D array of true labels\n energies ... 1D array of visible event energies\n softmax_index_dict ... Dictionary with the keys as event type (str) and values as the column indices \n in the np softmax array\n label_0 ... Event type for which to plot the ROC for\n label_1 ... Event type for which to plot the ROC against\n min_energy ... Minimum energy for the events to consider, default=0\n max_energy ... Maximum energy for the events to consider, default=1000\n show_plot[optional] ... Boolean to determine whether to show the plot, default=False\n save_path[optional] ... Path to save the plot to, format='eps', default=None\n \"\"\"\n \n assert softmaxes is not None\n assert labels is not None\n assert softmax_index_dict is not None\n assert softmaxes.shape[0] == labels.shape[0]\n assert label_0 in softmax_index_dict.keys()\n assert label_1 in softmax_index_dict.keys()\n \n # Create a mapping to extract the energies in\n energy_slice_map = [False for i in range(len(energies))]\n for i in range(len(energies)):\n if(energies[i] >= min_energy and energies[i] < max_energy):\n energy_slice_map[i] = True\n \n # Filter the CNN outputs based on the energy intervals\n curr_softmax = softmaxes[energy_slice_map]\n curr_labels = labels[energy_slice_map]\n \n # Extract the useful softmax and labels from the input arrays\n softmax_0 = curr_softmax[curr_labels==softmax_index_dict[label_0]]# or \n labels_0 = curr_labels[curr_labels==softmax_index_dict[label_0]] #or \n \n softmax_1 = curr_softmax[curr_labels==softmax_index_dict[label_1]]\n labels_1 = curr_labels[curr_labels==softmax_index_dict[label_1]]\n \n # Add the two arrays\n softmax = np.concatenate((softmax_0, softmax_1), axis=0)\n labels = np.concatenate((labels_0, labels_1), axis=0)\n \n # Binarize the labels\n binary_labels_1 = label_binarize(labels, classes=[softmax_index_dict[label_0], softmax_index_dict[label_1]])\n binary_labels_0 = 1 - binary_labels_1\n\n # Compute the ROC curve and the AUC for class corresponding to label 0\n fpr_0, tpr_0, threshold_0 = roc_curve(binary_labels_0, softmax[:,softmax_index_dict[label_0]])\n \n inv_fpr_0 = []\n for i in fpr_0:\n inv_fpr_0.append(1/i) if i != 0 else inv_fpr_0.append(1/1e-3)\n \n roc_auc_0 = auc(fpr_0, tpr_0)\n \n # Compute the ROC curve and the AUC for class corresponding to label 1\n fpr_1, tpr_1, threshold_1 = roc_curve(binary_labels_1, softmax[:,softmax_index_dict[label_1]])\n \n inv_fpr_1 = []\n for i in fpr_1:\n inv_fpr_1.append(1/i) if i != 0 else inv_fpr_1.append(1/1e-3)\n \n roc_auc_1 = auc(fpr_1, tpr_1)\n \n if show_plot or save_path is not None:\n # Plot the ROC curves\n fig, ax = plt.subplots(figsize=(16,9),facecolor=\"w\")\n ax.tick_params(axis=\"both\", labelsize=20)\n\n ax.plot(tpr_0, inv_fpr_0, color=color_dict[label_0],\n label=r\"$\\{0}$, AUC ${1:0.3f}$\".format(label_0, roc_auc_0) if label_0 is not \"e\" else r\"${0}$, AUC ${1:0.3f}$\".format(label_0, roc_auc_0),\n linewidth=1.0, marker=\".\", markersize=4.0, markerfacecolor=color_dict[label_0])\n\n ax.grid(True)\n xlabel = r\"$\\{0}$ signal efficiency\".format(label_0) if label_0 is not \"e\" else r\"${0}$ signal efficiency\".format(label_0)\n ylabel = r\"$\\{0}$ background rejection\".format(label_1) if label_1 is not \"e\" else r\"${0}$ background rejection\".format(label_1)\n \n ax.set_xlabel(xlabel, fontsize=20) \n ax.set_ylabel(ylabel, fontsize=20)\n \n ax.set_yscale(\"log\")\n ax.set_title(r\"${0} \\leq E < {1}$\".format(min_energy, max_energy), fontsize=20)\n ax.legend(loc=\"upper right\", prop={\"size\":20})\n\n if save_path is not None:\n plt.savefig(save_path, format='eps', dpi=300)\n \n if show_plot:\n plt.show()\n else:\n plt.clf() # Clear the current figure\n plt.close() # Close the opened window\n \n \n return fpr_0, tpr_0, threshold_0, roc_auc_0, fpr_1, tpr_1, threshold_1, roc_auc_1\n\n# Plot signal efficiency for a given event type at different energies\ndef plot_signal_efficiency(softmaxes, labels, energies, softmax_index_dict, label_0, label_1,\n avg_efficiencies=[0.2, 0.5, 0.8], avg_efficiency_colors=None,\n energy_interval=25, min_energy=100, max_energy=1000,\n num_bins=100, show_plot=False, save_path=None):\n \n \"\"\"\n plot_signal_efficiency(softmaxes, labels, energies, softmax_index_dict, event,\n avg_efficiencies=[0.2, 0.5, 0.8], energy_interval=25,\n avg_efficiency_colors=None, min_energy=100, max_energy=1000,\n num_bins=100, show_plot=False, save_path=None)\n \n Purpose : Plot the signal efficiency vs energy for several thresholds\n \n Args: softmaxes ... 2D array of softmax output, length = sample size, dimensions = n_samples, n_classes\n labels ... 1D array of true labels\n energies ... 1D array of visible event energies\n softmax_index_dict ... Dictionary with the keys as event type (str) and values as the column indices \n in the np softmax array. Should only contain two key-value pairs.\n label_0 ... Event type for which to plot the signal efficiency for\n label_1 ... Event type for which to plot the signal efficiency against\n avg_efficiencies ... 1D array with the average efficiency values for which to plot the signal efficiency\n vs energy plot, default=[0.2, 0.5, 0.8]\n avg_efficiency_colors ... Average efficiencies color dictionary to use. The keys are the iterms in the\n avg_efficiencies list and values are the colors to be used.\n energy_interval ... Energy interval to be used to calculate the response curve and calculating the signal \n efficiency, default=25\n min_energy ... Minimum energy for the events to consider, default=0\n max_energy ... Maximum energy for the events to consider, default=1000\n num_bins ... Number of bins to use in the classifier response histogram ( \n should be greater than 100 to prevent 0 values )\n show_plot[optional] ... Boolean to determine whether to show the plot, default=False\n save_path[optional] ... Path to save the plot to, format='eps', default=None\n \"\"\"\n \n # Assertions to check for valid inputs\n assert softmaxes is not None\n assert labels is not None\n assert energies is not None\n \n # Need high number of bins to avoid empty values\n assert num_bins >= 100\n assert label_0 in softmax_index_dict.keys()\n assert label_1 in softmax_index_dict.keys()\n \n # Calculate the threshold here according to the desired average efficiencies\n _, _, threshold_0, _, _, tpr_1, threshold_1, _ = plot_ROC_curve_one_vs_one(softmaxes, labels, \n energies,\n softmax_index_dict,\n label_0,\n label_1,\n min_energy,\n max_energy,\n show_plot=False)\n \n thresholds = []\n tolerance = 0.25\n \n # Get the index o\n for tpr_value in avg_efficiencies:\n \n index_list = []\n \n for i in range(len(tpr_1)):\n if(math.fabs(tpr_1[i]-tpr_value) < 0.001):\n index_list.append(i)\n \n if(len(index_list) == 0):\n lower_tpr, lower_index, upper_index, upper_tpr = 0.0, 0, 0, 1.0\n for i in range(len(tpr_1)):\n if(tpr_1[i] < tpr_value and tpr_1[i] > lower_tpr):\n lower_index = i\n lower_tpr = tpr_1[i]\n if(tpr_1[i] > tpr_value):\n upper_index = i\n upper_tpr = tpr_1[i]\n break\n if(upper_tpr - lower_tpr > tolerance):\n print(\"\"\"plot_utils.plot_signal_efficiency() : Unable to calculate threshold for average_efficiency = \n {0}\"\"\".format(tpr_value))\n return None\n else:\n thresholds.append(round((threshold_1[lower_index] + threshold_1[upper_index])/2, 2))\n \n else:\n index = index_list[math.floor(len(index_list)/2)]\n thresholds.append(round(threshold_1[index], 2))\n\n # Get the energy intervals to plot the signal efficiency against ( replace with max(energies) ) \n energy_lb = [min_energy+(energy_interval*i) for i in range(math.ceil((max_energy-min_energy)/energy_interval))]\n energy_ub = [energy_low+energy_interval for energy_low in energy_lb]\n \n # Epsilon to ensure the plots are OK for low efficiency thresholds\n epsilon = 0.0001\n \n # Plot the signal efficiency vs energy\n fig = plt.figure(figsize=(32,18), facecolor=\"w\")\n \n for threshold, efficiency in zip(thresholds, avg_efficiencies):\n \n # Values to be plotted at the end\n signal_efficiency = []\n energy_values = []\n \n # Value for the previous non-zero events\n prev_non_zero_efficiency = 0.0\n \n # Iterate over the energy intervals computing the efficiency\n for energy_lower, energy_upper in zip(energy_lb, energy_ub):\n values, bins, _ = plot_classifier_response(softmaxes, labels, energies,\n {label_0:softmax_index_dict[label_0]},\n {label_0:softmax_index_dict[label_0]},\n energy_lower, energy_upper,\n num_bins=num_bins, show_plot=False)\n if values is None or bins is None:\n print(\"\"\"plot_utils.plot_signal_efficiency() : No events for the energy interval {0} to {1}.\n Unable to plot.\"\"\".format(energy_lower, energy_upper))\n return None\n \n total_true_events = np.sum(values)\n num_true_events_selected = np.sum(values[bins[:len(bins)-1] > threshold-epsilon])\n \n curr_interval_efficiency = num_true_events_selected/total_true_events if total_true_events > 0 else 0\n\n if(curr_interval_efficiency == 0):\n curr_interval_efficiency = prev_non_zero_efficiency\n else:\n prev_non_zero_efficiency = curr_interval_efficiency\n\n # Add two times once for the lower energy bound and once for the upper energy bound\n signal_efficiency.append(curr_interval_efficiency)\n signal_efficiency.append(curr_interval_efficiency)\n\n # Add the lower and upper energy bounds\n energy_values.append(energy_lower)\n energy_values.append(energy_upper)\n\n label_to_use = r\"Average signal efficiency = {0}, Threshold = {1:0.3f}\".format(efficiency, threshold)\n\n if(avg_efficiency_colors != None):\n plt.plot(energy_values, signal_efficiency, color=avg_efficiency_colors[threshold], linewidth=2.0,\n marker=\".\", markersize=6.0, markerfacecolor=avg_efficiency_colors[threshold], label=label_to_use)\n else:\n plt.plot(energy_values, signal_efficiency, linewidth=2.0, marker=\".\", markersize=6.0, label=label_to_use)\n \n \n\n if(label_0 is not \"e\"):\n title = r\"Signal Efficiency vs Energy for $\\{0}$ events.\".format(label_0)\n else:\n title = r\"Signal Efficiency vs Energy for ${0}$ events.\".format(label_0)\n \n plt.title(title, fontsize=20)\n plt.grid(True)\n \n plt.xlim([min_energy, max_energy])\n plt.ylim([0, 1.05])\n plt.tick_params(axis=\"both\", labelsize=20)\n \n plt.xlabel(\"Event Visible Energy (MeV)\", fontsize=20)\n plt.ylabel(\"Signal Efficiency\", fontsize=20)\n plt.legend(loc=\"upper left\", prop={\"size\":20})\n \n if save_path is not None:\n plt.savefig(save_path, format='eps', dpi=300)\n \n if show_plot:\n plt.show()\n else:\n plt.clf() # Clear the current figure\n plt.close() # Close the opened window\n\n# Plot background rejection for a given event\ndef plot_background_rejection(softmaxes, labels, energies, softmax_index_dict, label_0, label_1,\n avg_efficiencies=[0.2, 0.5, 0.8], avg_efficiency_colors=None,\n energy_interval=25, min_energy=100, max_energy=1000, num_bins=100,\n show_plot=False, save_path=None):\n \n \"\"\"\n plot_background_rejection(softmaxes, labels, energies, softmax_index_dict, event,\n avg_efficiencies=[0.2, 0.5, 0.8], avg_efficiency_color=None,\n energy_interval=25, min_energy=100, max_energy=1000, num_bins=100,\n show_plot=False, save_path=None)\n \n Purpose : Plot the background rejection vs energy for several thresholds\n \n Args: softmaxes ... 2D array of softmaxes output, length = sample size, dimensions = n_samples, n_classes\n labels ... 1D array of true labels\n energies ... 1D array of visible event energies\n softmax_index_dict ... Dictionary with the keys as event type (str) and values as the column indices \n in the np softmaxes array\n label_0 ... Event type for which to plot the background rejection for\n label_1 ... Event type for which to plot the background rejection against\n avg_efficiencies ... 1D array with the average efficiency values for which to plot the signal efficiency\n vs energy plot, default=[0.2, 0.5, 0.8]\n avg_efficiency_colors ... Average efficiencies color dictionary to use. The keys are the iterms in the\n avg_efficiencies list and values are the colors to be used.\n energy_interval ... Energy interval to be used to calculate the response curve and calculating the signal \n efficiency, default=25\n min_energy ... Minimum energy for the events to consider, default=0\n max_energy ... Maximum energy for the events to consider, default=1000\n show_plot[optional] ... Boolean to determine whether to show the plot, default=False\n save_path[optional] ... Path to save the plot to, format='eps', default=None\n \"\"\"\n \n # Assertions to check for valid inputs\n assert softmaxes is not None\n assert labels is not None\n assert energies is not None\n \n # Need high number of bins to avoid empty values\n assert num_bins >= 100\n assert label_0 in softmax_index_dict.keys()\n assert label_1 in softmax_index_dict.keys()\n \n # Calculate the threshold here according to the desired average efficiencies\n _, _, threshold_0, _, _, tpr_1, threshold_1, _ = plot_ROC_curve_one_vs_one(softmaxes, labels, \n energies,\n softmax_index_dict,\n label_0,\n label_1,\n min_energy,\n max_energy,\n show_plot=False)\n \n thresholds = []\n threshold_index_dict = {}\n tolerance = 0.25\n \n # Get the index o\n for tpr_value in avg_efficiencies:\n \n index_list = []\n \n for i in range(len(tpr_1)):\n if(math.fabs(tpr_1[i]-tpr_value) < 0.001):\n index_list.append(i)\n \n if(len(index_list) == 0):\n lower_tpr, lower_index, upper_index, upper_tpr = 0.0, 0, 0, 1.0\n for i in range(len(tpr_1)):\n if(tpr_1[i] < tpr_value and tpr_1[i] > lower_tpr):\n lower_index = i\n lower_tpr = tpr_1[i]\n if(tpr_1[i] > tpr_value):\n upper_index = i\n upper_tpr = tpr_1[i]\n break\n if(upper_tpr - lower_tpr > tolerance):\n print(\"\"\"plot_utils.plot_background_rejection() : Unable to calculate threshold for average\n efficiency = {0}\"\"\".format(tpr_value))\n return None\n else:\n thresholds.append(round((threshold_1[lower_index] + threshold_1[upper_index])/2, 2))\n \n else:\n index = index_list[math.floor(len(index_list)/2)]\n thresholds.append(round(threshold_1[index], 2))\n \n # Get the energy intervals to plot the signal efficiency against ( replace with max(energies) ) \n energy_lb = [min_energy+(energy_interval*i) for i in range(math.ceil((max_energy-min_energy)/energy_interval))]\n energy_ub = [energy_low+energy_interval for energy_low in energy_lb]\n \n # Epsilon to ensure the plots are OK for low efficiency thresholds\n epsilon = 0.0001\n \n # Plot the background rejection vs energy\n fig = plt.figure(figsize=(32,18), facecolor=\"w\")\n \n for threshold, efficiency in zip(thresholds, avg_efficiencies):\n \n # Initialize the dictionary to hold the background rejection values\n background_rejection_dict = {}\n for key in softmax_index_dict.keys():\n if(key != label_0):\n background_rejection_dict[key] = []\n \n energy_values = []\n \n # List of all the keys for background rejection\n background_rejection_keys = list(background_rejection_dict.keys())\n \n # Add an extra color to the color dict for total background rejection\n color_dict[\"total\"] = \"black\"\n \n # Iterate over the energy intervals to compute the background rejection\n for key in background_rejection_dict.keys():\n\n # Value for the previous non-zero events\n prev_non_zero_rejection = 0.0\n\n # Initialize the dict to pass\n if( key == \"total\" ):\n pass_dict = softmax_index_dict.copy()\n del pass_dict[event]\n else:\n pass_dict = {key:softmax_index_dict[key]}\n\n for energy_lower, energy_upper in zip(energy_lb, energy_ub):\n\n values, bins, _ = plot_classifier_response(softmaxes, labels, energies, pass_dict,\n {label_0:softmax_index_dict[label_0]},\n energy_lower, energy_upper, \n num_bins=num_bins, show_plot=False)\n \n # Find the number of false events rejected\n if values is None or bins is None:\n print(\"\"\"plot_utils.plot_background_rejection() : No events for the energy interval {0} to {1}.\n Unable to plot.\"\"\".format(energy_lower, energy_upper))\n return None\n \n # Find the number of false events rejected\n total_false_events = np.sum(values)\n num_false_events_rejected = np.sum(values[bins[:len(bins)-1] < threshold])\n \n curr_interval_rejection = num_false_events_rejected/total_false_events if total_false_events > 0 else 0\n\n if(curr_interval_rejection == 0):\n curr_interval_rejection = prev_non_zero_rejection\n else:\n prev_non_zero_rejection = curr_interval_rejection\n\n # Add two times once for the lower energy bound and once for the upper energy bound\n background_rejection_dict[key].append(curr_interval_rejection)\n background_rejection_dict[key].append(curr_interval_rejection)\n\n # If the key is the last key in the dict\n if( key == background_rejection_keys[len(background_rejection_keys)-1]):\n\n # Add the lower and upper energy bounds\n energy_values.append(energy_lower)\n energy_values.append(energy_upper)\n \n for key in background_rejection_keys:\n \n label_to_use = None\n if( key == \"total\" ):\n label_to_use = r\"Average signal efficiency = {0}, Threshold = {1:0.3f}\".format(efficiency, threshold)\n elif( key == \"e\" ):\n label_to_use = r\"Average signal efficiency = {0}, Threshold = {1:0.3f}\".format(efficiency, threshold)\n else:\n label_to_use = r\"Average signal efficiency = {0}, Threshold = {1:0.3f}\".format(efficiency, threshold)\n\n if(avg_efficiency_colors != None):\n plt.plot(energy_values, background_rejection_dict[key], color=avg_efficiency_colors[threshold], \n linewidth=2.0, marker=\".\", markersize=6.0, markerfacecolor=avg_efficiency_colors[threshold],\n label=label_to_use)\n else:\n plt.plot(energy_values, background_rejection_dict[key], linewidth=2.0, marker=\".\", markersize=6.0,\n label=label_to_use)\n \n \n # Delete the total key from the color dict\n del color_dict[\"total\"]\n \n if label_0 is not \"e\" and key is not \"e\":\n title = r\"$\\{0}$ Background rejection vs Energy for selecting $\\{1}$ events.\".format(key, label_0)\n elif label_0 is \"e\":\n title = r\"$\\{0}$ Background rejection vs Energy for selecting ${1}$ events.\".format(key, label_0)\n elif key is \"e\":\n title = r\"${0}$ Background rejection vs Energy for selecting $\\{1}$ events.\".format(key, label_0)\n \n plt.title(title, fontsize=20)\n plt.grid(True)\n \n plt.xlim([min_energy, max_energy])\n plt.ylim([0.0, 1.05])\n plt.tick_params(axis=\"both\", labelsize=20)\n \n plt.xlabel(\"Event visible energy (MeV)\", fontsize=20)\n plt.ylabel(\"Background rejection\", fontsize=20)\n plt.legend(loc=\"upper left\", prop={\"size\":20})\n \n if save_path is not None:\n plt.savefig(save_path, format='eps', dpi=300)\n else:\n plt.show()\n\n\n# 10x10 square represents one mPMT\n# List of top-left pixel positions (row,col) for 2x2 grids representing PMTs 0 to 18\nPOS_MAP = [(8,4), #0\n (7,2), #1\n (6,0), #2\n (4,0), #3\n (2,0), #4\n (1,1), #5\n (0,4), #6\n (1,6), #7\n (2,8), #8\n (4,8), #9\n (6,8), #10\n (7,6), #11\n # Inner ring\n (6,4), #12\n (5,2), #13\n (3,2), #14\n (2,4), #15\n (3,6), #16\n (5,6), #17\n (4,4)] #18\n\nPADDING = 0\n\ndef get_plot_array(event_data):\n \n # Assertions on the shape of the data and the number of input channels\n assert(len(event_data.shape) == 3 and event_data.shape[2] == 19)\n \n # Extract the number of rows and columns from the event data\n rows = event_data.shape[0]\n cols = event_data.shape[1]\n \n # Make empty output pixel grid\n output = np.zeros(((10+PADDING)*rows, (10+PADDING)*cols))\n \n i, j = 0, 0\n \n for row in range(rows):\n j = 0\n for col in range(cols):\n pmts = event_data[row, col]\n tile(output, (i, j), pmts)\n j += 10 + PADDING\n i += 10 + PADDING\n \n return output\n\ndef tile(canvas, ul, pmts):\n \n # First, create 10x10 grid representing single mpmt\n mpmt = np.zeros((10, 10))\n for i, val in enumerate(pmts):\n mpmt[POS_MAP[i][0]][POS_MAP[i][1]] = val\n\n # Then, place grid on appropriate position on canvas\n for row in range(10):\n for col in range(10):\n canvas[row+ul[0]][col+ul[1]] = mpmt[row][col]\n\n# Plot the reconstructed vs actual events\ndef plot_actual_vs_recon(actual_event, recon_event, label, energy, predicted_label=\"gamma\", predicted_energy=500, show_plot=False, save_path=None):\n \"\"\"\n plot_actual_vs_recon(actual_event=None, recon_event=None, show_plot=False, save_path=None):\n \n Purpose : Plot the actual event vs event reconstructed by the VAE\n \n Args: actual_event ... 3-D NumPy array with the event data, shape=(width, height, depth)\n recon_event ... 3-D NumPy array with the reconstruction data, shape = (width, height, depth)\n label ... Str with the true event label, e.g. \"e\", \"mu\", \"gamma\"\n energy ... Float value of the true energy of the event\n show_plot[optional] ... Boolean to determine whether to show the plot, default=False\n save_path[optional] ... Path to save the plot to, format='eps', default=None\n \"\"\"\n \n # Assertions\n assert actual_event is not None\n assert recon_event is not None\n assert label is not None\n assert energy is not None and energy >= 0\n assert len(actual_event.shape) == 3\n assert len(recon_event.shape) == 3\n \n # Initialize the figure to plot the events\n fig, axes = plt.subplots(2,1,figsize=(32,18))\n plt.subplots_adjust(hspace=0.2)\n \n # Setup the plot\n #lognorm = LogNorm(vmax=max(np.amax(actual_event), np.amax(recon_event)), vmin=0.01, clip=True)\n dvgnorm = DivergingNorm(vcenter=0.1)\n \n # Setup the plot\n if label is not \"e\":\n actual_event_title = r\"Actual event display : $\\{0}$ event with true energy, $E = {1:.3f}$\".format(label, energy)\n else:\n actual_event_title = r\"Actual event display : ${0}$ event with true energy, $E = {1:.3f}$\".format(label, energy)\n \n fig.suptitle(\"Actual vs Reconstructed event display\", fontsize=30)\n \n # Plot the actual event\n im_0 = axes[0].imshow(get_plot_array(actual_event), origin=\"upper\", cmap=\"afmhot\", norm=dvgnorm, clim=(0.1, 10.0))\n \n axes[0].set_title(actual_event_title, fontsize=25)\n axes[0].set_xlabel(\"PMT module X-position\", fontsize=20)\n axes[0].set_ylabel(\"PMT module Y-position\", fontsize=20)\n axes[0].grid(True, which=\"both\", axis=\"both\")\n \n ax0_cbar = fig.colorbar(im_0, extend='both', ax=axes[0])\n ax0_cbar.set_label(r\"Log charge, $log c$\", fontsize=20)\n \n axes[0].tick_params(labelsize=20)\n ax0_cbar.ax.tick_params(labelsize=20) \n \n axes[0].set_xticklabels((axes[0].get_xticks()/10).astype(int))\n axes[0].set_yticklabels((axes[0].get_yticks()/10).astype(int))\n \n # Plot the reconstructed event\n im_1 = axes[1].imshow(get_plot_array(recon_event), origin=\"upper\", cmap=\"afmhot\", norm=dvgnorm, clim=(0.1, 10.0))\n \n if predicted_label is not \"e\":\n recon_event_title = r\"Reconstructed event display : $\\{0}$ event with true energy, $E = {1:.3f}$\".format(predicted_label, predicted_energy)\n else:\n recon_event_title = r\"Reconstructed event display : ${0}$ event with true energy, $E = {1:.3f}$\".format(predicted_label, predicted_energy)\n \n axes[1].set_title(recon_event_title, fontsize=25)\n axes[1].set_xlabel(\"PMT module X-position\", fontsize=20)\n axes[1].set_ylabel(\"PMT module Y-position\", fontsize=20)\n axes[1].grid(True, which=\"both\", axis=\"both\")\n \n ax1_cbar = fig.colorbar(im_1, extend='both', ax=axes[1])\n ax1_cbar.set_label(r\"Log charge, $log c$\", fontsize=20)\n \n axes[1].tick_params(labelsize=20)\n ax1_cbar.ax.tick_params(labelsize=20)\n \n axes[1].set_xticklabels((axes[1].get_xticks()/10).astype(int))\n axes[1].set_yticklabels((axes[1].get_yticks()/10).astype(int))\n \n if save_path is not None:\n plt.savefig(save_path, format='eps', dpi=300)\n \n if show_plot:\n plt.show()\n else:\n plt.clf() # Clear the plot frame\n plt.close() # Close the opened window if any\n\n# Plot model performance over the training iterations\ndef plot_training(log_paths, model_names, model_color_dict, downsample_interval=None, legend_loc=(0.8,0.5), show_plot=False, save_path=None):\n \"\"\"\n plot_training_loss(training_directories=None, model_names=None, show_plot=False, save_path=None)\n \n Purpose : Plot the training loss for various models for visual comparison\n \n Args: log_paths ... List contatining the absolute path to the .csv log files\n Type : str\n model_names ... List of the tring model name\n Type : str\n model_color_dict ... Dictionary with the model_names as keys and\n the corresponding colors as values\n downsample_interval ... Downsample interval to smoothen the results,\n Type : int\n legend_loc ... Location of where to put the legend on the plot\n Type : tuple\n Format : (x_pos, y_pos), 0 <= x_pos <= 1, 0 <= y_pos <= 1\n show_plot[optional] ... Boolean to determine whether to show the plot\n Type : Boolean\n save_path[optional] ... Absolute path to save the plot to\n Type : str\n \"\"\"\n \n # Assertions\n assert log_paths is not None\n assert model_names is not None\n assert model_color_dict is not None\n assert len(log_paths) == len(model_names)\n assert len(model_names) == len(model_color_dict.keys())\n \n # Extract the values stored in the .csv log files\n epoch_values = []\n loss_values = []\n acc_values = []\n \n true_epoch_values = []\n true_loss_values = []\n true_acc_values = []\n \n # Iterate over the list of log files provided\n for log_path in log_paths:\n if(os.path.exists(log_path)):\n log_df = pd.read_csv(log_path, usecols=[\"epoch\", \"loss\", \"accuracy\"])\n \n # Downsample the epoch and training loss values w.r.t. the downsample interval\n curr_epoch_values = log_df[\"epoch\"].values\n curr_loss_values = log_df[\"loss\"].values\n curr_acc_values = log_df[\"accuracy\"].values\n \n # Downsample using the downsample interval\n \n true_epoch_values.append(curr_epoch_values)\n true_loss_values.append(curr_loss_values)\n true_acc_values.append(curr_acc_values)\n \n if downsample_interval is not None:\n curr_epoch_values_downsampled = []\n curr_loss_values_downsampled = []\n curr_acc_values_downsampled = []\n\n curr_epoch_list = []\n curr_loss_list = []\n curr_acc_list = []\n\n for i in range(1, len(curr_epoch_values)):\n\n if(i%downsample_interval == 0):\n\n # Downsample the values using the mean of the values for the current interval\n curr_epoch_values_downsampled.append(sum(curr_epoch_list)/downsample_interval)\n curr_loss_values_downsampled.append(sum(curr_loss_list)/downsample_interval)\n curr_acc_values_downsampled.append(sum(curr_acc_list)/downsample_interval)\n \n\n # Reset the list for the next interval\n curr_epoch_list = []\n curr_loss_list = []\n curr_acc_list = []\n else:\n # Add the values in the interval to the list\n curr_epoch_list.append(curr_epoch_values[i])\n curr_loss_list.append(curr_loss_values[i]) \n curr_acc_list.append(curr_acc_values[i]) \n\n epoch_values.append(curr_epoch_values_downsampled)\n loss_values.append(curr_loss_values_downsampled)\n acc_values.append(curr_acc_values_downsampled)\n else:\n print(\"Error. log path {0} does not exist\".format(log_path))\n \n # Initialize the plot\n fig, ax1 = plt.subplots(figsize=(16,11))\n ax2 = ax1.twinx()\n \n # Plot the values\n if downsample_interval is None:\n for i, model_name in enumerate(model_names):\n ax1.plot(true_epoch_values[i], true_loss_values[i], \n color=model_color_dict[model_name][0],\n label= model_name + \" loss\")\n ax2.plot(true_epoch_values[i], true_acc_values[i],\n color=model_color_dict[model_name][1],\n label= model_name + \" accuracy\")\n else:\n for i, model_name in enumerate(model_names):\n ax1.plot(true_epoch_values[i], true_loss_values[i],\n color=model_color_dict[model_name][0], alpha=0.5)\n ax1.plot(epoch_values[i], loss_values[i],\n color=model_color_dict[model_name][0],\n label= model_name + \" loss\", alpha=0.9, linewidth=2.0)\n ax2.plot(true_epoch_values[i], true_acc_values[i],\n color=model_color_dict[model_name][1], alpha=0.5)\n ax2.plot(epoch_values[i], acc_values[i],\n color=model_color_dict[model_name][1],\n label= model_name + \" accuracy\", alpha=0.9, linewidth=2.0)\n \n \n # Setup plot characteristics\n ax1.tick_params(axis=\"x\", labelsize=30)\n ax1.set_xlabel(\"Epoch\", fontsize=30)\n \n ax1.set_yscale(\"log\")\n ax1.set_ylabel(\"Log total loss\", fontsize=30, color=model_color_dict[model_name][0])\n ax1.tick_params(axis=\"y\", labelsize=30, colors=model_color_dict[model_name][0])\n\n ax2.set_ylabel(\"Accuracy\", fontsize=30, color=model_color_dict[model_name][1])\n ax2.tick_params(axis=\"y\", labelsize=30, colors=model_color_dict[model_name][1])\n \n plt.grid(True)\n \n lgd = fig.legend(prop={\"size\":30}, bbox_to_anchor=legend_loc)\n fig.suptitle(\"Training vs Epochs\", fontsize=25)\n \n if save_path is not None:\n plt.savefig(save_path, format='eps', dpi=300, bbox_extra_artists=(lgd))\n else:\n plt.show()\n\n# Plot model performance over the training iterations\ndef plot_vae_training(log_paths, model_names, model_color_dict, downsample_interval=None, legend_loc=(0.8,0.5), show_plot=False, save_path=None):\n \"\"\"\n plot_vae_training(log_paths, model_names, model_color_dict, downsample_interval=None, legend_loc=(0.8,0.5), show_plot=False, save_path=None)\n \n Purpose : Plot the training loss for various models for visual comparison\n \n Args: log_paths ... List contatining the absolute path to the .csv log files\n Type : str\n model_names ... List of the tring model name\n Type : str\n model_color_dict ... Dictionary with the model_names as keys and\n the corresponding colors as values\n downsample_interval ... Downsample interval to smoothen the results,\n Type : int\n legend_loc ... Location of where to put the legend on the plot\n Type : tuple\n Format : (x_pos, y_pos), 0 <= x_pos <= 1, 0 <= y_pos <= 1\n show_plot[optional] ... Boolean to determine whether to show the plot\n Type : Boolean\n save_path[optional] ... Absolute path to save the plot to\n Type : str\n \"\"\"\n \n # Assertions\n assert log_paths is not None\n assert model_names is not None\n assert model_color_dict is not None\n assert len(log_paths) == len(model_names)\n assert len(model_names) == len(model_color_dict.keys())\n \n # Extract the values stored in the .csv log files\n epoch_values = []\n mse_loss_values = []\n kl_loss_values = []\n \n true_epoch_values = []\n true_mse_loss_values = []\n true_kl_loss_values = []\n \n # Iterate over the list of log files provided\n for log_path in log_paths:\n if(os.path.exists(log_path)):\n log_df = pd.read_csv(log_path, usecols=[\"epoch\", \"recon_loss\", \"kl_loss\"])\n \n # Downsample the epoch and training loss values w.r.t. the downsample interval\n curr_epoch_values = log_df[\"epoch\"].values\n curr_mse_loss_values = log_df[\"recon_loss\"].values\n curr_kl_loss_values = log_df[\"kl_loss\"].values\n \n # Downsample using the downsample interval\n \n true_epoch_values.append(curr_epoch_values)\n true_mse_loss_values.append(curr_mse_loss_values)\n true_kl_loss_values.append(curr_kl_loss_values)\n \n if downsample_interval is not None:\n curr_epoch_values_downsampled = []\n curr_mse_loss_values_downsampled = []\n curr_kl_loss_values_downsampled = []\n\n curr_epoch_list = []\n curr_mse_loss_list = []\n curr_kl_loss_list = []\n\n for i in range(1, len(curr_epoch_values)):\n\n if(i%downsample_interval == 0):\n\n # Downsample the values using the mean of the values for the current interval\n curr_epoch_values_downsampled.append(sum(curr_epoch_list)/downsample_interval)\n curr_mse_loss_values_downsampled.append(sum(curr_mse_loss_list)/downsample_interval)\n curr_kl_loss_values_downsampled.append(sum(curr_kl_loss_list)/downsample_interval)\n \n\n # Reset the list for the next interval\n curr_epoch_list = []\n curr_mse_loss_list = []\n curr_kl_loss_list = []\n else:\n # Add the values in the interval to the list\n curr_epoch_list.append(curr_epoch_values[i])\n curr_mse_loss_list.append(curr_mse_loss_values[i]) \n curr_kl_loss_list.append(curr_kl_loss_values[i]) \n\n epoch_values.append(curr_epoch_values_downsampled)\n mse_loss_values.append(curr_mse_loss_values_downsampled)\n kl_loss_values.append(curr_kl_loss_values_downsampled)\n else:\n print(\"Error. log path {0} does not exist\".format(log_path))\n \n # Initialize the plot\n fig, ax1 = plt.subplots(figsize=(16,11))\n ax2 = ax1.twinx()\n \n # Print the mpl rcParams\n mpl.rcParams['agg.path.chunksize']=1e12\n \n # Reload the backend\n mpl.use(mpl.get_backend())\n \n # Plot the values\n if downsample_interval is None:\n for i, model_name in enumerate(model_names):\n ax1.plot(true_epoch_values[i], true_mse_loss_values[i], \n color=model_color_dict[model_name][0],\n label= model_name + \" MSE loss\")\n ax2.plot(true_epoch_values[i], true_kl_loss_values[i],\n color=model_color_dict[model_name][1],\n label= model_name + \" KL loss\")\n else:\n for i, model_name in enumerate(model_names):\n ax1.plot(true_epoch_values[i], true_mse_loss_values[i],\n color=model_color_dict[model_name][0], alpha=0.5)\n ax1.plot(epoch_values[i], mse_loss_values[i],\n color=model_color_dict[model_name][0],\n label= model_name + \" MSE loss\", alpha=0.9, linewidth=2.0)\n ax2.plot(true_epoch_values[i], true_kl_loss_values[i],\n color=model_color_dict[model_name][1], alpha=0.5)\n ax2.plot(epoch_values[i], kl_loss_values[i],\n color=model_color_dict[model_name][1],\n label= model_name + \" KL loss\", alpha=0.9, linewidth=2.0)\n \n # Setup plot characteristics\n ax1.tick_params(axis=\"x\", labelsize=30)\n ax1.set_xlabel(\"Epoch\", fontsize=30)\n \n ax1.set_yscale(\"log\")\n ax1.set_ylabel(\"Log Recon loss\", fontsize=30, color=model_color_dict[model_name][0])\n ax1.tick_params(axis=\"y\", labelsize=30, colors=model_color_dict[model_name][0])\n \n ax2.set_yscale(\"log\")\n ax2.set_ylabel(\"Log KL loss\", fontsize=30, color=model_color_dict[model_name][1])\n ax2.tick_params(axis=\"y\", labelsize=30, colors=model_color_dict[model_name][1])\n \n plt.grid(True)\n \n lgd = fig.legend(prop={\"size\":30}, bbox_to_anchor=legend_loc)\n fig.suptitle(\"Training vs Epochs\", fontsize=25)\n \n if save_path is not None:\n plt.savefig(save_path, format='eps', dpi=300,bbox_extra_artists=(lgd))\n \n if show_plot:\n try:\n plt.show()\n except:\n print(\"plot_utils.plot_vae_training() : Unable to render the plot\" \n + \" due to limits on \\'agg.path.chunksize\\')\")\n if save_path is None:\n print(\"plot_utils.plot_vae_training() : Saving plot to ./{0}\".format(\"vae_training_log.eps\"))\n plt.savefig(\"vae_training_log.eps\", format='eps', dpi=300,bbox_extra_artists=(lgd))\n plt.clf() # Clear the plot frame\n plt.close() # Close the opened window if any\n else:\n plt.clf() # Clear the plot frame\n plt.close() # Close the opened window if any\n\n# Plot model performance over the training iterations\ndef plot_ae_training(log_paths, model_names, model_color_dict, downsample_interval=None, legend_loc=(0.8,0.5), show_plot=False, save_path=None):\n \"\"\"\n plot_ae_training(log_paths, model_names, model_color_dict, downsample_interval=None, legend_loc=(0.8,0.5), show_plot=False, save_path=None)\n \n Purpose : Plot the training loss for various models for visual comparison\n \n Args: log_paths ... List contatining the absolute path to the .csv log files\n Type : str\n model_names ... List of the tring model name\n Type : str\n model_color_dict ... Dictionary with the model_names as keys and\n the corresponding colors as values\n downsample_interval ... Downsample interval to smoothen the results,\n Type : int\n legend_loc ... Location of where to put the legend on the plot\n Type : tuple\n Format : (x_pos, y_pos), 0 <= x_pos <= 1, 0 <= y_pos <= 1\n show_plot[optional] ... Boolean to determine whether to show the plot\n Type : Boolean\n save_path[optional] ... Absolute path to save the plot to\n Type : str\n \"\"\"\n \n # Assertions\n assert log_paths is not None\n assert model_names is not None\n assert model_color_dict is not None\n assert len(log_paths) == len(model_names)\n assert len(model_names) == len(model_color_dict.keys())\n \n # Extract the values stored in the .csv log files\n epoch_values = []\n mse_loss_values = []\n \n true_epoch_values = []\n true_mse_loss_values = []\n \n # Iterate over the list of log files provided\n for log_path in log_paths:\n if(os.path.exists(log_path)):\n log_df = pd.read_csv(log_path, usecols=[\"epoch\", \"recon_loss\"])\n \n # Downsample the epoch and training loss values w.r.t. the downsample interval\n curr_epoch_values = log_df[\"epoch\"].values\n curr_mse_loss_values = log_df[\"recon_loss\"].values\n \n # Downsample using the downsample interval\n \n true_epoch_values.append(curr_epoch_values)\n true_mse_loss_values.append(curr_mse_loss_values)\n \n if downsample_interval is not None:\n curr_epoch_values_downsampled = []\n curr_mse_loss_values_downsampled = []\n\n curr_epoch_list = []\n curr_mse_loss_list = []\n\n for i in range(1, len(curr_epoch_values)):\n\n if(i%downsample_interval == 0):\n\n # Downsample the values using the mean of the values for the current interval\n curr_epoch_values_downsampled.append(sum(curr_epoch_list)/downsample_interval)\n curr_mse_loss_values_downsampled.append(sum(curr_mse_loss_list)/downsample_interval)\n \n\n # Reset the list for the next interval\n curr_epoch_list = []\n curr_mse_loss_list = []\n else:\n # Add the values in the interval to the list\n curr_epoch_list.append(curr_epoch_values[i])\n curr_mse_loss_list.append(curr_mse_loss_values[i]) \n\n epoch_values.append(curr_epoch_values_downsampled)\n mse_loss_values.append(curr_mse_loss_values_downsampled)\n else:\n print(\"Error. log path {0} does not exist\".format(log_path))\n \n # Initialize the plot\n fig, ax1 = plt.subplots(figsize=(16,11))\n \n # Print the mpl rcParams\n mpl.rcParams['agg.path.chunksize']=1e12\n \n # Reload the backend\n mpl.use(mpl.get_backend())\n \n # Plot the values\n if downsample_interval is None:\n for i, model_name in enumerate(model_names):\n ax1.plot(true_epoch_values[i], true_mse_loss_values[i], \n color=model_color_dict[model_name][0],\n label= model_name + \" MSE loss\")\n else:\n for i, model_name in enumerate(model_names):\n ax1.plot(true_epoch_values[i], true_mse_loss_values[i],\n color=model_color_dict[model_name][0], alpha=0.5)\n ax1.plot(epoch_values[i], mse_loss_values[i],\n color=model_color_dict[model_name][0],\n label= model_name + \" MSE loss\", alpha=0.9, linewidth=2.0)\n \n # Setup plot characteristics\n ax1.tick_params(axis=\"x\", labelsize=30)\n ax1.set_xlabel(\"Epoch\", fontsize=30)\n \n ax1.set_yscale(\"log\")\n ax1.set_ylabel(\"Log Recon loss\", fontsize=30, color=model_color_dict[model_name][0])\n ax1.tick_params(axis=\"y\", labelsize=30, colors=model_color_dict[model_name][0])\n \n plt.grid(True)\n \n lgd = fig.legend(prop={\"size\":30}, bbox_to_anchor=legend_loc)\n fig.suptitle(\"Training vs Epochs\", fontsize=25)\n \n if save_path is not None:\n plt.savefig(save_path, format='eps', dpi=300,bbox_extra_artists=(lgd))\n \n if show_plot:\n try:\n plt.show()\n except:\n print(\"plot_utils.plot_ae_training() : Unable to render the plot\" \n + \" due to limits on \\'agg.path.chunksize\\')\")\n if save_path is None:\n print(\"plot_utils.plot_ae_training() : Saving plot to ./{0}\".format(\"vae_training_log.eps\"))\n plt.savefig(\"vae_training_log.eps\", format='eps', dpi=300,bbox_extra_artists=(lgd))\n plt.clf() # Clear the plot frame\n plt.close() # Close the opened window if any\n else:\n plt.clf() # Clear the plot frame\n plt.close() # Close the opened window if any\n\n# Plot the charge distribution for a given batch\ndef plot_charge_hist(event, recon, iteration, num_bins=100):\n \n # Flatten the input numpy arrays\n event = event.reshape(-1,1)\n recon = recon.reshape(-1,1)\n \n # Initialize the plot and corresponding parameters\n fig, ax = plt.subplots(figsize=(16,9),facecolor=\"w\")\n ax.tick_params(axis=\"both\", labelsize=20)\n \n # Setup the bins beforehand\n bins = np.linspace(min(np.amin(event),np.amin(recon),1),\n max(np.amax(event),np.amax(recon)),\n num_bins)\n\n # Plot the histograms overlaid\n plt.hist(event, bins, density=False,\n label=\"actual\", color=\"red\",\n alpha=0.5, stacked=True)\n \n plt.hist(recon, bins, density=False,\n label=\"reconstructed\", color=\"blue\",\n alpha=0.5, stacked=True)\n \n # Setup the axes\n ax.set_xlabel(\"Charge, c\", fontsize=20)\n ax.set_ylabel(\"Number of hits\", fontsize=20)\n \n plt.yscale(\"log\")\n plt.legend(loc=\"upper right\", prop={\"size\":20})\n plt.title(r\"Actual vs Reconstructed charge distribution at iteration = ${0}$\".format(iteration)\n ,fontsize=20)\n \n plt.show()\n" ]
[ [ "matplotlib.pyplot.legend", "numpy.amax", "numpy.concatenate", "matplotlib.pyplot.plot", "pandas.read_csv", "numpy.unique", "numpy.arange", "matplotlib.colors.DivergingNorm", "matplotlib.pyplot.close", "matplotlib.pyplot.subplots_adjust", "numpy.zeros", "matplotlib.pyplot.style.use", "matplotlib.pyplot.figure", "matplotlib.pyplot.title", "numpy.amin", "matplotlib.pyplot.ylim", "sklearn.metrics.roc_curve", "matplotlib.pyplot.savefig", "matplotlib.get_backend", "sklearn.metrics.auc", "matplotlib.pyplot.show", "matplotlib.pyplot.hist", "numpy.sum", "matplotlib.pyplot.ylabel", "sklearn.preprocessing.label_binarize", "matplotlib.pyplot.yscale", "matplotlib.pyplot.subplots", "matplotlib.pyplot.colorbar", "matplotlib.pyplot.xlim", "matplotlib.pyplot.clf", "matplotlib.pyplot.grid", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.tick_params" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
Giuseppe5/NeMo
[ "f946aca100c9a1bf22e6bd25fba9f80299722112", "f946aca100c9a1bf22e6bd25fba9f80299722112", "f946aca100c9a1bf22e6bd25fba9f80299722112", "f946aca100c9a1bf22e6bd25fba9f80299722112", "f946aca100c9a1bf22e6bd25fba9f80299722112", "f946aca100c9a1bf22e6bd25fba9f80299722112" ]
[ "collections/nemo_asr/nemo_asr/losses.py", "collections/nemo_nlp/nemo_nlp/data/datasets/joint_intent_slot.py", "collections/nemo_tts/nemo_tts/parts/waveglow.py", "collections/nemo_tts/nemo_tts/data_layers.py", "collections/nemo_nlp/nemo_nlp/utils/callbacks/sentence_classification.py", "nemo/nemo/backends/pytorch/common/losses.py" ]
[ "# Copyright (c) 2019 NVIDIA Corporation\nimport torch\nimport torch.nn as nn\n\nfrom nemo.backends.pytorch.nm import LossNM\nfrom nemo.core.neural_types import (NeuralType, AxisType, BatchTag, TimeTag,\n ChannelTag)\n\n\nclass CTCLossNM(LossNM):\n \"\"\"\n Neural Module wrapper for pytorch's ctcloss\n\n Args:\n num_classes (int): Number of characters in ASR model's vocab/labels.\n This count should not include the CTC blank symbol.\n \"\"\"\n @staticmethod\n def create_ports():\n input_ports = {\n \"log_probs\": NeuralType({1: AxisType(TimeTag),\n 0: AxisType(BatchTag),\n 2: AxisType(ChannelTag)}),\n\n \"targets\": NeuralType({0: AxisType(BatchTag),\n 1: AxisType(TimeTag)}),\n\n \"input_length\": NeuralType({0: AxisType(BatchTag)}),\n\n \"target_length\": NeuralType({0: AxisType(BatchTag)})\n }\n\n output_ports = {\"loss\": NeuralType(None)}\n return input_ports, output_ports\n\n def __init__(self, *, num_classes, **kwargs):\n LossNM.__init__(self, **kwargs)\n\n # self._blank = self.local_parameters.get('blank', 0)\n self._blank = num_classes\n self._criterion = nn.CTCLoss(blank=self._blank,\n reduction='none')\n\n def _loss(self, log_probs, targets, input_length, target_length):\n input_length = input_length.long()\n target_length = target_length.long()\n targets = targets.long()\n loss = self._criterion(log_probs.transpose(1, 0), targets,\n input_length,\n target_length)\n # note that this is different from reduction = 'mean'\n # because we are not dividing by target lengths\n loss = torch.mean(loss)\n return loss\n\n def _loss_function(self, **kwargs):\n return self._loss(*(kwargs.values()))\n", "# Copyright 2018 The Google AI Language Team Authors and\n# The HuggingFace Inc. team.\n# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nUtility functions for Token Classification NLP tasks\nSome parts of this code were adapted from the HuggingFace library at\nhttps://github.com/huggingface/pytorch-pretrained-BERT\n\"\"\"\n\nimport itertools\nimport random\n\nimport numpy as np\nfrom torch.utils.data import Dataset\n\nfrom nemo.utils.exp_logging import get_logger\n\nfrom . import utils\n\n\nlogger = get_logger('')\n\n\ndef get_features(queries,\n max_seq_length,\n tokenizer,\n pad_label=128,\n raw_slots=None,\n ignore_extra_tokens=False,\n ignore_start_end=False):\n all_subtokens = []\n all_loss_mask = []\n all_subtokens_mask = []\n all_segment_ids = []\n all_input_ids = []\n all_input_mask = []\n sent_lengths = []\n all_slots = []\n\n with_label = False\n if raw_slots is not None:\n with_label = True\n\n for i, query in enumerate(queries):\n words = query.strip().split()\n subtokens = ['[CLS]']\n loss_mask = [1 - ignore_start_end]\n subtokens_mask = [0]\n if with_label:\n slots = [pad_label]\n\n for j, word in enumerate(words):\n word_tokens = tokenizer.tokenize(word)\n subtokens.extend(word_tokens)\n\n loss_mask.append(1)\n loss_mask.extend([not ignore_extra_tokens] *\n (len(word_tokens) - 1))\n\n subtokens_mask.append(1)\n subtokens_mask.extend([0] * (len(word_tokens) - 1))\n\n if with_label:\n slots.extend([raw_slots[i][j]] * len(word_tokens))\n\n subtokens.append('[SEP]')\n loss_mask.append(not ignore_start_end)\n subtokens_mask.append(0)\n sent_lengths.append(len(subtokens))\n all_subtokens.append(subtokens)\n all_loss_mask.append(loss_mask)\n all_subtokens_mask.append(subtokens_mask)\n all_input_mask.append([1] * len(subtokens))\n if with_label:\n slots.append(pad_label)\n all_slots.append(slots)\n\n max_seq_length = min(max_seq_length, max(sent_lengths))\n logger.info(f'Max length: {max_seq_length}')\n utils.get_stats(sent_lengths)\n too_long_count = 0\n\n for i, subtokens in enumerate(all_subtokens):\n if len(subtokens) > max_seq_length:\n subtokens = ['[CLS]'] + subtokens[-max_seq_length + 1:]\n all_input_mask[i] = [1] + all_input_mask[i][-max_seq_length + 1:]\n all_loss_mask[i] = [1 - ignore_start_end] + \\\n all_loss_mask[i][-max_seq_length + 1:]\n all_subtokens_mask[i] = [0] + \\\n all_subtokens_mask[i][-max_seq_length + 1:]\n\n if with_label:\n all_slots[i] = [pad_label] + all_slots[i][-max_seq_length + 1:]\n too_long_count += 1\n\n all_input_ids.append([tokenizer._convert_token_to_id(t)\n for t in subtokens])\n\n if len(subtokens) < max_seq_length:\n extra = (max_seq_length - len(subtokens))\n all_input_ids[i] = all_input_ids[i] + [0] * extra\n all_loss_mask[i] = all_loss_mask[i] + [0] * extra\n all_subtokens_mask[i] = all_subtokens_mask[i] + [0] * extra\n all_input_mask[i] = all_input_mask[i] + [0] * extra\n\n if with_label:\n all_slots[i] = all_slots[i] + [pad_label] * extra\n\n all_segment_ids.append([0] * max_seq_length)\n\n logger.info(f'{too_long_count} are longer than {max_seq_length}')\n\n return (all_input_ids,\n all_segment_ids,\n all_input_mask,\n all_loss_mask,\n all_subtokens_mask,\n all_slots)\n\n\nclass BertJointIntentSlotDataset(Dataset):\n \"\"\"\n Creates dataset to use for the task of joint intent\n and slot classification with pretrained model.\n\n Converts from raw data to an instance that can be used by\n NMDataLayer.\n\n For dataset to use during inference without labels, see\n BertJointIntentSlotInferDataset.\n\n Args:\n input_file (str): file to sequence + label.\n the first line is header (sentence [tab] label)\n each line should be [sentence][tab][label]\n slot_file (str): file to slot labels, each line corresponding to\n slot labels for a sentence in input_file. No header.\n max_seq_length (int): max sequence length minus 2 for [CLS] and [SEP]\n tokenizer (Tokenizer): such as BertTokenizer\n num_samples (int): number of samples you want to use for the dataset.\n If -1, use all dataset. Useful for testing.\n shuffle (bool): whether to shuffle your data.\n pad_label (int): pad value use for slot labels.\n by default, it's the neutral label.\n\n \"\"\"\n\n def __init__(self,\n input_file,\n slot_file,\n max_seq_length,\n tokenizer,\n num_samples=-1,\n shuffle=True,\n pad_label=128,\n ignore_extra_tokens=False,\n ignore_start_end=False\n ):\n if num_samples == 0:\n raise ValueError(\"num_samples has to be positive\", num_samples)\n\n with open(slot_file, 'r') as f:\n slot_lines = f.readlines()\n\n with open(input_file, 'r') as f:\n input_lines = f.readlines()[1:]\n\n assert len(slot_lines) == len(input_lines)\n\n dataset = list(zip(slot_lines, input_lines))\n\n if shuffle or num_samples > 0:\n random.shuffle(dataset)\n if num_samples > 0:\n dataset = dataset[:num_samples]\n\n raw_slots, queries, raw_intents = [], [], []\n for slot_line, input_line in dataset:\n raw_slots.append([int(slot) for slot in slot_line.strip().split()])\n parts = input_line.strip().split()\n raw_intents.append(int(parts[-1]))\n queries.append(' '.join(parts[:-1]))\n\n features = get_features(queries,\n max_seq_length,\n tokenizer,\n pad_label=pad_label,\n raw_slots=raw_slots,\n ignore_extra_tokens=ignore_extra_tokens,\n ignore_start_end=ignore_start_end)\n self.all_input_ids = features[0]\n self.all_segment_ids = features[1]\n self.all_input_mask = features[2]\n self.all_loss_mask = features[3]\n self.all_subtokens_mask = features[4]\n self.all_slots = features[5]\n self.all_intents = raw_intents\n\n def __len__(self):\n return len(self.all_input_ids)\n\n def __getitem__(self, idx):\n return (np.array(self.all_input_ids[idx]),\n np.array(self.all_segment_ids[idx]),\n np.array(self.all_input_mask[idx]),\n np.array(self.all_loss_mask[idx]),\n np.array(self.all_subtokens_mask[idx]),\n self.all_intents[idx],\n np.array(self.all_slots[idx]))\n\n\nclass BertJointIntentSlotInferDataset(Dataset):\n \"\"\"\n Creates dataset to use for the task of joint intent\n and slot classification with pretrained model.\n\n Converts from raw data to an instance that can be used by\n NMDataLayer.\n\n This is to be used during inference only.\n For dataset to use during training with labels, see\n BertJointIntentSlotDataset.\n\n Args:\n queries (list): list of queries to run inference on\n max_seq_length (int): max sequence length minus 2 for [CLS] and [SEP]\n tokenizer (Tokenizer): such as BertTokenizer\n pad_label (int): pad value use for slot labels.\n by default, it's the neutral label.\n\n \"\"\"\n\n def __init__(self,\n queries,\n max_seq_length,\n tokenizer):\n\n features = get_features(queries,\n max_seq_length,\n tokenizer)\n\n self.all_input_ids = features[0]\n self.all_segment_ids = features[1]\n self.all_input_mask = features[2]\n self.all_loss_mask = features[3]\n self.all_subtokens_mask = features[4]\n\n def __len__(self):\n return len(self.all_input_ids)\n\n def __getitem__(self, idx):\n return (np.array(self.all_input_ids[idx]),\n np.array(self.all_segment_ids[idx]),\n np.array(self.all_input_mask[idx], dtype=np.float32),\n np.array(self.all_loss_mask[idx]),\n np.array(self.all_subtokens_mask[idx]))\n", "# Copyright (c) 2019 NVIDIA Corporation\nimport torch\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\n\n\[email protected]\ndef fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):\n n_channels_int = n_channels[0]\n in_act = input_a + input_b\n t_act = torch.tanh(in_act[:, :n_channels_int, :])\n s_act = torch.sigmoid(in_act[:, n_channels_int:, :])\n acts = t_act * s_act\n return acts\n\n\nclass Invertible1x1Conv(torch.nn.Module):\n \"\"\"\n The layer outputs both the convolution, and the log determinant\n of its weight matrix. If reverse=True it does convolution with\n inverse\n \"\"\"\n\n def __init__(self, c):\n super(Invertible1x1Conv, self).__init__()\n self.conv = torch.nn.Conv1d(c, c, kernel_size=1, stride=1, padding=0,\n bias=False)\n\n # Sample a random orthonormal matrix to initialize weights\n W = torch.qr(torch.FloatTensor(c, c).normal_())[0]\n\n # Ensure determinant is 1.0 not -1.0\n if torch.det(W) < 0:\n W[:, 0] = -1 * W[:, 0]\n W = W.view(c, c, 1)\n self.conv.weight.data = W\n\n def forward(self, z, reverse=False):\n # shape\n batch_size, group_size, n_of_groups = z.size()\n\n W = self.conv.weight.squeeze()\n\n if reverse:\n if not hasattr(self, 'W_inverse'):\n # Reverse computation\n W_inverse = W.float().inverse()\n W_inverse = Variable(W_inverse[..., None])\n if (z.type() == 'torch.cuda.HalfTensor'\n or z.type() == 'torch.HalfTensor'):\n W_inverse = W_inverse.half()\n self.W_inverse = W_inverse\n z = F.conv1d(z, self.W_inverse, bias=None, stride=1, padding=0)\n return z\n else:\n # Forward computation\n log_det_W = batch_size * n_of_groups * torch.logdet(W.float())\n z = self.conv(z)\n return z, log_det_W\n\n\nclass WN(torch.nn.Module):\n \"\"\"\n This is the WaveNet like layer for the affine coupling. The primary\n difference from WaveNet is the convolutions need not be causal. There is\n also no dilation size reset. The dilation only doubles on each layer\n \"\"\"\n\n def __init__(self, n_in_channels, n_mel_channels, n_layers, n_channels,\n kernel_size):\n super(WN, self).__init__()\n assert(kernel_size % 2 == 1)\n assert(n_channels % 2 == 0)\n self.n_layers = n_layers\n self.n_channels = n_channels\n self.in_layers = torch.nn.ModuleList()\n self.res_skip_layers = torch.nn.ModuleList()\n self.cond_layers = torch.nn.ModuleList()\n\n start = torch.nn.Conv1d(n_in_channels, n_channels, 1)\n start = torch.nn.utils.weight_norm(start, name='weight')\n self.start = start\n\n # Initializing last layer to 0 makes the affine coupling layers\n # do nothing at first. This helps with training stability\n end = torch.nn.Conv1d(n_channels, 2 * n_in_channels, 1)\n end.weight.data.zero_()\n end.bias.data.zero_()\n self.end = end\n\n for i in range(n_layers):\n dilation = 2 ** i\n padding = int((kernel_size * dilation - dilation) / 2)\n in_layer = torch.nn.Conv1d(n_channels, 2 * n_channels, kernel_size,\n dilation=dilation, padding=padding)\n in_layer = torch.nn.utils.weight_norm(in_layer, name='weight')\n self.in_layers.append(in_layer)\n\n cond_layer = torch.nn.Conv1d(n_mel_channels, 2 * n_channels, 1)\n cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight')\n self.cond_layers.append(cond_layer)\n\n # last one is not necessary\n if i < n_layers - 1:\n res_skip_channels = 2 * n_channels\n else:\n res_skip_channels = n_channels\n res_skip_layer = torch.nn.Conv1d(n_channels, res_skip_channels, 1)\n res_skip_layer = torch.nn.utils.weight_norm(\n res_skip_layer, name='weight')\n self.res_skip_layers.append(res_skip_layer)\n\n def forward(self, forward_input):\n audio, spect = forward_input\n audio = self.start(audio)\n\n for i in range(self.n_layers):\n acts = fused_add_tanh_sigmoid_multiply(\n self.in_layers[i](audio),\n self.cond_layers[i](spect),\n torch.IntTensor([self.n_channels]))\n\n res_skip_acts = self.res_skip_layers[i](acts)\n if i < self.n_layers - 1:\n audio = res_skip_acts[:, :self.n_channels, :] + audio\n skip_acts = res_skip_acts[:, self.n_channels:, :]\n else:\n skip_acts = res_skip_acts\n\n if i == 0:\n output = skip_acts\n else:\n output = skip_acts + output\n return self.end(output)\n\n\nclass WaveGlow(torch.nn.Module):\n def __init__(self, n_mel_channels, n_flows, n_group, n_early_every,\n n_early_size, WN_config):\n super(WaveGlow, self).__init__()\n\n self.upsample = torch.nn.ConvTranspose1d(n_mel_channels,\n n_mel_channels,\n 1024, stride=256)\n assert(n_group % 2 == 0)\n self.n_flows = n_flows\n self.n_group = n_group\n self.n_early_every = n_early_every\n self.n_early_size = n_early_size\n self.WN = torch.nn.ModuleList()\n self.convinv = torch.nn.ModuleList()\n\n n_half = int(n_group / 2)\n\n # Set up layers with the right sizes based on how many dimensions\n # have been output already\n n_remaining_channels = n_group\n for k in range(n_flows):\n if k % self.n_early_every == 0 and k > 0:\n n_half = n_half - int(self.n_early_size / 2)\n n_remaining_channels = n_remaining_channels - self.n_early_size\n self.convinv.append(Invertible1x1Conv(n_remaining_channels))\n self.WN.append(WN(n_half, n_mel_channels * n_group, **WN_config))\n self.n_remaining_channels = n_remaining_channels\n\n def forward(self, forward_input):\n \"\"\"\n forward_input[0] = mel_spectrogram: batch x n_mel_channels x frames\n forward_input[1] = audio: batch x time\n \"\"\"\n spect, audio = forward_input\n\n # Upsample spectrogram to size of audio\n spect = self.upsample(spect)\n assert(spect.size(2) >= audio.size(1))\n if spect.size(2) > audio.size(1):\n spect = spect[:, :, :audio.size(1)]\n\n spect = spect.unfold(2, self.n_group, self.n_group).permute(0, 2, 1, 3)\n spect = spect.contiguous().view(spect.size(0), spect.size(1), -1)\n spect = spect.permute(0, 2, 1)\n\n audio = audio.unfold(1, self.n_group, self.n_group).permute(0, 2, 1)\n output_audio = []\n log_s_list = []\n log_det_W_list = []\n\n for k in range(self.n_flows):\n if k % self.n_early_every == 0 and k > 0:\n output_audio.append(audio[:, :self.n_early_size, :])\n audio = audio[:, self.n_early_size:, :]\n\n audio, log_det_W = self.convinv[k](audio)\n log_det_W_list.append(log_det_W)\n\n n_half = int(audio.size(1) / 2)\n audio_0 = audio[:, :n_half, :]\n audio_1 = audio[:, n_half:, :]\n\n output = self.WN[k]((audio_0, spect))\n log_s = output[:, n_half:, :]\n b = output[:, :n_half, :]\n audio_1 = torch.exp(log_s) * audio_1 + b\n log_s_list.append(log_s)\n\n audio = torch.cat([audio_0, audio_1], 1)\n\n output_audio.append(audio)\n return torch.cat(output_audio, 1), log_s_list, log_det_W_list\n\n def infer(self, spect, sigma=1.0):\n spect = self.upsample(spect)\n # trim conv artifacts. maybe pad spec to kernel multiple\n time_cutoff = self.upsample.kernel_size[0] - self.upsample.stride[0]\n spect = spect[:, :, :-time_cutoff]\n\n spect = spect.unfold(2, self.n_group, self.n_group).permute(0, 2, 1, 3)\n spect = spect.contiguous().view(spect.size(0), spect.size(1), -1)\n spect = spect.permute(0, 2, 1)\n\n audio = torch.randn(spect.size(0),\n self.n_remaining_channels,\n spect.size(2), device=spect.device).to(spect.dtype)\n\n audio = torch.autograd.Variable(sigma * audio)\n\n for k in reversed(range(self.n_flows)):\n n_half = int(audio.size(1) / 2)\n audio_0 = audio[:, :n_half, :]\n audio_1 = audio[:, n_half:, :]\n\n output = self.WN[k]((audio_0, spect))\n s = output[:, n_half:, :]\n b = output[:, :n_half, :]\n audio_1 = (audio_1 - b) / torch.exp(s)\n audio = torch.cat([audio_0, audio_1], 1)\n\n audio = self.convinv[k](audio, reverse=True)\n\n if k % self.n_early_every == 0 and k > 0:\n z = torch.randn(spect.size(0), self.n_early_size, spect.size(\n 2), device=spect.device).to(spect.dtype)\n audio = torch.cat((sigma * z, audio), 1)\n\n audio = audio.permute(\n 0, 2, 1).contiguous().view(\n audio.size(0), -1).data\n return audio\n\n @staticmethod\n def remove_weightnorm(model):\n waveglow = model\n for WN in waveglow.WN:\n WN.start = torch.nn.utils.remove_weight_norm(WN.start)\n WN.in_layers = remove(WN.in_layers)\n WN.cond_layers = remove(WN.cond_layers)\n WN.res_skip_layers = remove(WN.res_skip_layers)\n return waveglow\n\n\ndef remove(conv_list):\n new_conv_list = torch.nn.ModuleList()\n for old_conv in conv_list:\n old_conv = torch.nn.utils.remove_weight_norm(old_conv)\n new_conv_list.append(old_conv)\n return new_conv_list\n", "# Copyright (c) 2019 NVIDIA Corporation\nimport torch\n\nfrom nemo.backends.pytorch.nm import DataLayerNM\nfrom nemo.core import DeviceType\nfrom nemo.core.neural_types import *\nfrom .parts.datasets import AudioOnlyDataset\n\n\nclass AudioDataLayer(DataLayerNM):\n \"\"\"\n Data Layer for general speech tasks that loads only the audio.\n\n Module which reads speech data. It accepts comma-separated\n JSON manifest files describing the wav audio files and their metadata.\n JSON files should be of the following format::\n\n {\"audio_filepath\": path_to_wav_0, \"duration\": time_in_sec_0}\n ...\n {\"audio_filepath\": path_to_wav_n, \"duration\": time_in_sec_n}\n\n\n Args:\n manifest_filepath (str): path to JSON containing data.\n batch_size (int): batch sizelse.\n min_duration (float): All training files which have a duration less\n than min_duration are dropped. Note: Duration is read from the\n manifest JSON.\n Defaults to 0.1.\n max_duration (float): All training files which have a duration more\n than max_duration are dropped. Note: Duration is read from the\n manifest JSON.\n Defaults to None.\n trim_silence (bool): Whether to use trim silence from beginning and end\n of audio signal using librosa.effects.trim().\n Defaults to False.\n drop_last (bool): See PyTorch DataLoader.\n Defaults to False.\n shuffle (bool): See PyTorch DataLoader.\n Defaults to True.\n num_workers (int): See PyTorch DataLoader.\n Defaults to 0.\n n_segments (int): Number of samples to load per audiofile.\n Defaults to 0 which indicates to load the whole file.\n \"\"\"\n\n @staticmethod\n def create_ports():\n input_ports = {}\n output_ports = {\n \"audio_signal\": NeuralType({0: AxisType(BatchTag),\n 1: AxisType(TimeTag)}),\n\n \"a_sig_length\": NeuralType({0: AxisType(BatchTag)}),\n }\n return input_ports, output_ports\n\n def __init__(\n self, *,\n manifest_filepath,\n batch_size,\n min_duration=0.1,\n max_duration=None,\n trim_silence=False,\n drop_last=False,\n shuffle=True,\n num_workers=0,\n n_segments=0,\n **kwargs\n ):\n DataLayerNM.__init__(self, **kwargs)\n\n self._dataset = AudioOnlyDataset(\n manifest_filepath=manifest_filepath,\n max_duration=max_duration,\n min_duration=min_duration,\n trim=trim_silence,\n logger=self._logger,\n n_segments=n_segments\n )\n\n sampler = None\n if self._placement == DeviceType.AllGpu:\n self._logger.info('Parallelizing DATALAYER')\n sampler = torch.utils.data.distributed.DistributedSampler(\n self._dataset)\n\n self._dataloader = torch.utils.data.DataLoader(\n dataset=self._dataset,\n batch_size=batch_size,\n collate_fn=self._dataset.AudioCollateFunc,\n drop_last=drop_last,\n shuffle=shuffle if sampler is None else False,\n sampler=sampler,\n num_workers=num_workers\n )\n\n def __len__(self):\n return len(self._dataset)\n\n @property\n def dataset(self):\n return None\n\n @property\n def data_iterator(self):\n return self._dataloader\n", "# Copyright (c) 2019 NVIDIA Corporation\n__all__ = ['eval_iter_callback', 'eval_epochs_done_callback']\n\nimport os\nimport random\nimport time\n\nimport logging\n\nimport matplotlib\nmatplotlib.use(\"TkAgg\") # nopep8\nfrom matplotlib import pyplot as plt # nopep8\nimport numpy as np # nopep8\nfrom sklearn.metrics import confusion_matrix, classification_report # nopep8\n\nlogger = logging.getLogger('log')\n\n__all__ = ['eval_iter_callback', 'eval_epochs_done_callback']\n\n\ndef eval_iter_callback(tensors,\n global_vars,\n eval_data_layer):\n if \"all_preds\" not in global_vars.keys():\n global_vars[\"all_preds\"] = []\n if \"all_labels\" not in global_vars.keys():\n global_vars[\"all_labels\"] = []\n\n logits_lists = []\n labels_lists = []\n\n for kv, v in tensors.items():\n if 'logits' in kv:\n for v_tensor in v:\n for logit_tensor in v_tensor:\n logits_lists.append(logit_tensor.detach().cpu().tolist())\n\n if 'labels' in kv:\n for v_tensor in v:\n for label_tensor in v_tensor:\n labels_lists.append(label_tensor.detach().cpu().tolist())\n\n preds = list(np.argmax(np.asarray(logits_lists), 1))\n global_vars[\"all_preds\"].extend(preds)\n global_vars[\"all_labels\"].extend(labels_lists)\n\n\ndef list2str(l):\n return ' '.join([str(j) for j in l])\n\n\ndef eval_epochs_done_callback(global_vars, graph_fold):\n labels = np.asarray(global_vars['all_labels'])\n preds = np.asarray(global_vars['all_preds'])\n accuracy = sum(labels == preds) / labels.shape[0]\n logger.info(f'Accuracy: {accuracy}')\n i = 0\n if preds.shape[0] > 21:\n i = random.randint(0, preds.shape[0] - 21)\n logger.info(\"Sampled preds: [%s]\" % list2str(preds[i:i+20]))\n logger.info(\"Sampled labels: [%s]\" % list2str(labels[i:i+20]))\n cm = confusion_matrix(labels, preds)\n fig = plt.figure()\n ax = fig.add_subplot(111)\n cax = ax.matshow(cm)\n plt.title('Confusion matrix of the classifier')\n fig.colorbar(cax)\n plt.xlabel('Predicted')\n plt.ylabel('True')\n os.makedirs(graph_fold, exist_ok=True)\n plt.savefig(os.path.join(graph_fold, time.strftime('%Y%m%d-%H%M%S')))\n\n logger.info(classification_report(labels, preds))\n\n return dict({\"accuracy\": accuracy})\n", "import torch\nfrom torch import nn\n\nfrom nemo.backends.pytorch.nm import LossNM\nfrom nemo.core.neural_types import (NeuralType,\n AxisType,\n BatchTag,\n TimeTag,\n ChannelTag,\n RegressionTag)\n\n__all__ = ['SequenceLoss', 'CrossEntropyLoss', 'MSELoss']\n\nEPS = 1e-5\n\n\nclass SequenceLoss(LossNM):\n \"\"\"Loss for seq2seq tasks\n\n Args:\n pad_id (int): Label position of padding symbol.\n Defaults to 0.\n smoothing_coef (float): Label smoothing coefficient in range [0, 1].\n Defaults to 0.0.\n sample_wise (bool): Flag indicates if loss sum divisor should be batch\n size.\n Defaults to False.\n aux_ctc (bool): Whether to add auxiliary CTC loss.\n Defaults to False.\n ctc_initial_coef (float): Initial coefficient to multiply ctc component\n by.\n Defaults to 0.1.\n ctc_blank_id (int): ID of blank symbols to pass to mask when\n calculating ctc loss.\n Defaults to None.\n\n \"\"\"\n\n @staticmethod\n def create_ports():\n input_ports = {\n 'log_probs': NeuralType({\n 0: AxisType(BatchTag),\n 1: AxisType(TimeTag),\n 2: AxisType(ChannelTag)\n }),\n 'targets': NeuralType({\n 0: AxisType(BatchTag),\n 1: AxisType(TimeTag)\n })\n }\n output_ports = {\n 'loss': NeuralType(None)\n }\n return input_ports, output_ports\n\n def __init__(self, pad_id=0, smoothing_coef=0.0, sample_wise=False,\n aux_ctc=False, ctc_initial_coef=0.1, ctc_blank_id=None,\n **kwargs):\n assert (not aux_ctc) or (ctc_blank_id is not None), \\\n \"Should be a blank id if using CTC loss\"\n\n super().__init__(**kwargs)\n\n self.pad_id = pad_id\n self.smoothing_coef = smoothing_coef\n self.sample_wise = sample_wise\n self.aux_ctc = aux_ctc\n self.ctc_coef = ctc_initial_coef\n\n if aux_ctc:\n self.ctc = nn.CTCLoss(blank=ctc_blank_id,\n reduction='none', zero_infinity=True)\n self.ctc = self.ctc.to(self._device)\n\n def _loss_function(self, log_probs, targets):\n \"\"\"(BTC, BT) -> 0\"\"\"\n\n pad_mask = (targets != self.pad_id).long()\n loss = self._ce_loss(log_probs, targets, pad_mask)\n\n if self.aux_ctc:\n ctc_loss = self._ctc_loss(log_probs, targets, pad_mask)\n loss += self.ctc_coef * ctc_loss\n\n assert loss.dim() == 0, \"Zero-dim tensor check\"\n\n return loss\n\n def _ce_loss(self, log_probs, targets, pad_mask):\n target_log_probs = log_probs.gather(2, targets.unsqueeze(2)).squeeze(2)\n loss = \\\n (1.0 - self.smoothing_coef) * target_log_probs \\\n + self.smoothing_coef * log_probs.mean(-1)\n pad_mask = pad_mask.float()\n loss = -torch.sum(loss * pad_mask)\n if self.sample_wise:\n loss /= target_log_probs.size(0)\n else:\n loss /= pad_mask.sum() + EPS\n return loss\n\n def _ctc_loss(self, log_probs, targets, pad_mask):\n lengths = pad_mask.sum(-1)\n loss = self.ctc(log_probs.transpose(0, 1), targets, lengths, lengths)\n loss = torch.mean(loss)\n return loss\n\n\nclass CrossEntropyLoss(LossNM):\n \"\"\"\n CrossEntropyLoss\n\n \"\"\"\n @staticmethod\n def create_ports():\n input_ports = {\n \"logits\": NeuralType({\n 0: AxisType(BatchTag),\n 1: AxisType(ChannelTag)\n }),\n \"labels\": NeuralType({\n 0: AxisType(BatchTag),\n })\n }\n\n output_ports = {\n \"loss\": NeuralType(None),\n }\n return input_ports, output_ports\n\n def __init__(self, weight=None, **kwargs):\n LossNM.__init__(self, **kwargs)\n if weight:\n weight = torch.FloatTensor(weight).to(self._device)\n self._criterion = nn.CrossEntropyLoss(weight=weight)\n\n def _loss_function(self,\n logits,\n labels):\n loss = self._criterion(logits, labels)\n return loss\n\n\nclass MSELoss(LossNM):\n @staticmethod\n def create_ports():\n input_ports = {\n \"preds\": NeuralType({\n 0: AxisType(RegressionTag)\n }),\n \"labels\": NeuralType({\n 0: AxisType(RegressionTag)\n })\n }\n\n output_ports = {\n \"loss\": NeuralType(None)\n }\n return input_ports, output_ports\n\n def __init__(self, **kwargs):\n LossNM.__init__(self, **kwargs)\n self._criterion = nn.MSELoss()\n\n def _loss_function(self, preds, labels):\n loss = self._criterion(preds, labels)\n return loss\n" ]
[ [ "torch.mean", "torch.nn.CTCLoss" ], [ "numpy.array" ], [ "torch.sigmoid", "torch.cat", "torch.nn.utils.weight_norm", "torch.nn.ModuleList", "torch.det", "torch.nn.functional.conv1d", "torch.IntTensor", "torch.nn.utils.remove_weight_norm", "torch.tanh", "torch.exp", "torch.FloatTensor", "torch.nn.Conv1d", "torch.nn.ConvTranspose1d", "torch.autograd.Variable" ], [ "torch.utils.data.DataLoader", "torch.utils.data.distributed.DistributedSampler" ], [ "matplotlib.pyplot.title", "numpy.asarray", "matplotlib.use", "sklearn.metrics.confusion_matrix", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.xlabel", "sklearn.metrics.classification_report", "matplotlib.pyplot.figure" ], [ "torch.mean", "torch.nn.CrossEntropyLoss", "torch.sum", "torch.FloatTensor", "torch.nn.CTCLoss", "torch.nn.MSELoss" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ajjimeno/nn-hyperplane-bounds
[ "21d6fe255eb300113253ad3137694dedc90b0f83" ]
[ "Experiments.py" ]
[ "import numpy as np\n\nimport torch\nimport torch.optim as optim\nimport torch.nn as nn\n\nfrom Losses import MultiHuberLoss\n\ndevice = \"cuda\" if torch.cuda.is_available() else \"cpu\"\nbatch_size = 32\n\ndef train(model, criteria, training_set, testing_set, optim_wd=0.0, lr=0.001, epochs=100):\n optimizer = optim.AdamW(model.parameters(), lr=lr, eps=1e-08)\n scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=50, gamma=0.1, verbose=True)\n\n training_set_size = len(training_set) * batch_size\n owd_factor = optim_wd\n\n print(\"owd_factor:\", owd_factor)\n\n for e in range(epochs):\n model.train()\n\n total_loss = 0\n count = 0\n print (\"Epoch :\", e, flush=True)\n\n total_accuracy = 0\n count_labels = 0\n\n dl2 = 0\n\n dl2_sv = 0\n svs = 0\n\n\n for step, batch in enumerate(training_set):\n model.zero_grad()\n #print (e, \" \", count, flush=True)\n data = batch[0].to(device)\n labels = batch[1].to(device)\n\n output, z = model(data)\n\n loss = criteria(output, labels)\n\n if owd_factor > 0:\n wl2 = torch.dot(model.fc2.weight.flatten(), model.fc2.weight.flatten())\n loss += owd_factor * wl2\n \n norm = torch.dot(z.flatten(),z.flatten())\n #norm = torch.dot(z.flatten(),z.flatten())\n loss += owd_factor * norm\n\n if isinstance(criteria, MultiHuberLoss):\n # Find support vectors\n for i in torch.logical_or(torch.isclose(output, torch.tensor(1.0)),\n torch.isclose(output, torch.tensor(-1.0))\n ).nonzero():\n ind = i[0].item()\n mnorm = torch.dot(z[ind].flatten(), z[ind].flatten())\n\n svs += 1\n\n if mnorm > dl2_sv:\n dl2_sv = mnorm\n\n if norm > dl2:\n dl2 = norm\n\n total_loss += loss.item()\n total_accuracy += flat_accuracy(output.clone().detach(), labels)\n count_labels += len(labels)\n count += 1\n\n loss.backward()\n\n optimizer.step()\n\n print (\"loss: \", total_loss/count, flush=True)\n print (\"training acc: \", total_accuracy/count_labels, flush=True)\n\n if owd_factor > 0:\n print(\"dl2: \", dl2)\n print(\"wl2: \", torch.dot(model.fc2.weight.flatten(), model.fc2.weight.flatten()))\n\n if isinstance(criteria, MultiHuberLoss):\n print(\"dl2 sv: \", dl2_sv)\n print(\"svs: \", svs)\n\n if e > 0 and e % 25 == 0: \n print (\"Testing acc: \", predict(model, testing_set))\n\n scheduler.step()\n\ndef flat_accuracy(preds, labels):\n pred_flat = torch.argmax(preds, axis=1).flatten()\n labels_flat = labels.flatten()\n\n return torch.sum(pred_flat == labels_flat).item() \n\ndef predict(model, testing_set):\n\n model.eval()\n\n total_accuracy = 0\n count = 0\n\n for step, batch in enumerate(testing_set):\n data = batch[0].to(device)\n labels = batch[1].to(device)\n\n with torch.no_grad():\n output, _ = model(data)\n\n total_accuracy += flat_accuracy(output.clone().detach(), labels)\n count += len(labels)\n\n return total_accuracy/count\n\nimport MNIST\nimport CIFAR\n\nif __name__ == '__main__':\n criteria = [nn.CrossEntropyLoss().to(device), MultiHuberLoss().to(device)]\n\n dropout_values = [False, True]\n\n training_set_percentages = [ 1, 5, 10, 20, 40, 60, 80, 100 ]\n\n aug_values = [ False, True ]\n\n sets = [ MNIST, CIFAR ]\n\n for s in sets:\n\n testing_set = s.testloader()\n\n for tsp in training_set_percentages:\n for c in criteria:\n for aug in aug_values:\n for optim_wd in s.owd_weights:\n for dropout in dropout_values:\n for i in range(10):\n training_set = s.trainloader(tsp, aug)\n\n nmodel = s.model(dropout).to(device)\n train(nmodel, c, training_set, testing_set, optim_wd=optim_wd, lr=s.lr, epochs=s.epochs)\n\n train_result = predict(nmodel, training_set) \n test_result = predict(nmodel, testing_set)\n\n print (\"Set: \", s.name)\n print (\"Training set size %: \", tsp)\n print (\"Criteria: \", c)\n print (\"Aug: \", aug)\n print (\"optim wd: \", optim_wd)\n print (\"drop out: \", dropout)\n print (\"Training acc: \", train_result)\n print (\"Testing acc: \", test_result) \n\n print (s.name, \\\n \"|Net|tsp|\",tsp, \\\n \"|crit|\", c, \\\n \"|aug|\", aug, \\\n \"|owd|\", optim_wd, \\\n \"|do|\", dropout, \\\n \"|training|\", train_result, \\\n \"|testing|\", test_result)\n" ]
[ [ "torch.optim.lr_scheduler.StepLR", "torch.nn.CrossEntropyLoss", "torch.sum", "torch.tensor", "torch.no_grad", "torch.cuda.is_available", "torch.argmax" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
awesome-archive/delta
[ "a916e06f55213dcd1fea39a5950927dfed1483c7", "a916e06f55213dcd1fea39a5950927dfed1483c7", "841d853cf0bdb479260be112432813dcb705f859", "841d853cf0bdb479260be112432813dcb705f859", "841d853cf0bdb479260be112432813dcb705f859", "841d853cf0bdb479260be112432813dcb705f859" ]
[ "delta/utils/misc_test.py", "delta/layers/ops/kernels/simple_vocab_op_test.py", "delta/data/task/text_seq_label_task_test.py", "delta/utils/solver/asr_solver.py", "delta/data/task/text_cls_task.py", "delta/utils/solver/raw_pretrain_seq_label_solver.py" ]
[ "# Copyright (C) 2017 Beijing Didi Infinity Technology and Development Co.,Ltd.\n# All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n''' misc.py unittest'''\nimport numpy as np\nimport tensorflow as tf\n\nfrom delta.utils import misc\n\n\nclass MiscTest(tf.test.TestCase):\n ''' misc unittest'''\n\n def setUp(self):\n '''setup'''\n self.length = [3, 5, 2]\n self.mask_true = np.array([\n [1, 1, 1, 0, 0],\n [1, 1, 1, 1, 1],\n [1, 1, 0, 0, 0],\n ])\n\n def tearDown(self):\n '''tear down'''\n\n def test_len_to_mask(self):\n ''' len to mask unittest'''\n with self.session():\n mask = misc.len_to_mask(self.length, dtype=tf.int32)\n self.assertAllEqual(mask.eval(), self.mask_true)\n\n def test_len_to_padding(self):\n ''' len to padding unittest'''\n with self.session():\n padding = misc.len_to_padding(self.length, dtype=tf.int32)\n self.assertAllEqual(padding.eval(), 1 - self.mask_true)\n\n def test_gpu_device_names(self):\n ''' gpu device names unittest'''\n with self.session(use_gpu=False, force_gpu=False):\n devices, ngpus = misc.gpu_device_names()\n self.assertListEqual(devices, [])\n self.assertEqual(ngpus, 0)\n\n def test_per_device_batch_size(self):\n ''' per device batch size unittest'''\n batch_size, ngpus = 32, 2\n batch_per_dev = misc.per_device_batch_size(batch_size, ngpus)\n self.assertEqual(batch_per_dev, 16)\n\n batch_size, ngpus = 32, 1\n batch_per_dev = misc.per_device_batch_size(batch_size, ngpus)\n self.assertEqual(batch_per_dev, 32)\n\n with self.assertRaises(ValueError):\n batch_size, ngpus = 32, 3\n batch_per_dev = misc.per_device_batch_size(batch_size, ngpus)\n\n def test_generate_synthetic_data(self):\n ''' generate sythetic data unittest'''\n input_shape = tf.TensorShape([2, 3])\n input_value = 1\n input_dtype = tf.float32\n label_shape = tf.TensorShape([2])\n label_value = 2\n label_dtype = tf.int32\n nepoch = 2\n\n data_set = misc.generate_synthetic_data(input_shape, input_value,\n input_dtype, label_shape,\n label_value, label_dtype, nepoch)\n\n iterator = data_set.make_one_shot_iterator()\n\n with self.session():\n data, label = iterator.get_next()\n self.assertAllEqual(data.eval(),\n np.ones(shape=input_shape, dtype=np.float32))\n self.assertAllEqual(label.eval(),\n 2 * np.ones(shape=label_shape, dtype=np.float32))\n\n with self.assertRaises(tf.errors.OutOfRangeError):\n data.eval()\n\n\nif __name__ == '__main__':\n tf.test.main()\n", "# Copyright (C) 2017 Beijing Didi Infinity Technology and Development Co.,Ltd.\n# All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for simple_vocab.\"\"\"\nimport tensorflow as tf\nfrom delta.layers.ops import py_x_ops\n\n\nclass VocabOpsTest(tf.test.TestCase):\n ''' vocab op test '''\n\n def setUp(self):\n ''' set up '''\n\n def tearDown(self):\n ''' tear down '''\n\n def test_vocab_token_to_id(self):\n ''' tset vocab token to id'''\n with self.session(use_gpu=False):\n vocab = [\n '<s>',\n '</s>',\n '<unk>',\n '<epsilon>',\n 'a',\n 'b c d e',\n 'øut',\n 'über',\n '♣',\n '愤青',\n '←',\n ]\n self.assertEqual(0, py_x_ops.vocab_token_to_id('<s>', vocab=vocab).eval())\n self.assertEqual(4, py_x_ops.vocab_token_to_id('a', vocab=vocab).eval())\n self.assertAllEqual([5, 8],\n py_x_ops.vocab_token_to_id(['b c d e', '♣'],\n vocab=vocab).eval())\n self.assertEqual(\n 2,\n py_x_ops.vocab_token_to_id('unknown', vocab=vocab).eval())\n\n def test_vocab_token_to_load_id(self):\n ''' test vocab token to id which is loaded from vocab file'''\n with self.session(use_gpu=False):\n vocab = [\n '<s>\t3',\n '</s>\t5',\n '<unk>\t7',\n '<epsilon>\t9',\n 'a\t2',\n 'b c d e\t4',\n 'øut\t8',\n 'über\t10',\n '♣\t-1',\n '愤青\t-3',\n '←\t-5',\n ]\n self.assertEqual(\n 3,\n py_x_ops.vocab_token_to_id(\n '<s>', vocab=vocab, load_token_ids_from_vocab=True).eval())\n self.assertEqual(\n 2,\n py_x_ops.vocab_token_to_id(\n 'a', vocab=vocab, load_token_ids_from_vocab=True).eval())\n self.assertAllEqual([4, -1],\n py_x_ops.vocab_token_to_id(\n ['b c d e', '♣'],\n vocab=vocab,\n load_token_ids_from_vocab=True).eval())\n self.assertEqual(\n 7,\n py_x_ops.vocab_token_to_id(\n 'unknown', vocab=vocab, load_token_ids_from_vocab=True).eval())\n\n def test_vocab_id_to_token(self):\n ''' test vocab id to token '''\n with self.session(use_gpu=False):\n vocab = [\n '<s>',\n '</s>',\n '<unk>',\n '<epsilon>',\n 'a',\n 'b c d e',\n 'øut',\n 'über',\n '♣',\n '愤青',\n '←',\n ]\n self.assertEqual(\n '<s>',\n py_x_ops.vocab_id_to_token(0, vocab=vocab).eval().decode('utf-8'))\n self.assertEqual(\n 'a',\n py_x_ops.vocab_id_to_token(4, vocab=vocab).eval().decode('utf-8'))\n\n res = py_x_ops.vocab_id_to_token([5, 8], vocab=vocab).eval()\n res = [r.decode('utf-8') for r in res]\n self.assertAllEqual(['b c d e', '♣'], res)\n self.assertEqual(\n '<unk>',\n py_x_ops.vocab_id_to_token(2, vocab=vocab).eval().decode('utf-8'))\n self.assertEqual(\n '<unk>',\n py_x_ops.vocab_id_to_token(-1, vocab=vocab).eval().decode('utf-8'))\n self.assertEqual(\n '<unk>',\n py_x_ops.vocab_id_to_token(11, vocab=vocab).eval().decode('utf-8'))\n\n def test_vocab_id_to_token_load_id(self):\n ''' test vocab id to token which is loaded from vocabfile'''\n with self.session(use_gpu=False):\n vocab = [\n '<s>\t3',\n '</s>\t5',\n '<unk>\t7',\n '<epsilon>\t9',\n 'a\t2',\n 'b c d e\t4',\n 'øut\t8',\n 'über\t10',\n '♣\t-1',\n '愤青\t-3',\n '←\t-5',\n ]\n self.assertEqual(\n '<s>',\n py_x_ops.vocab_id_to_token(\n 3, vocab=vocab,\n load_token_ids_from_vocab=True).eval().decode('utf-8'))\n self.assertEqual(\n 'a',\n py_x_ops.vocab_id_to_token(\n 2, vocab=vocab,\n load_token_ids_from_vocab=True).eval().decode('utf-8'))\n res = py_x_ops.vocab_id_to_token([4, -1],\n vocab=vocab,\n load_token_ids_from_vocab=True).eval()\n res = [r.decode('utf-8') for r in res]\n\n self.assertAllEqual(['b c d e', '♣'], res)\n self.assertEqual(\n '<unk>',\n py_x_ops.vocab_id_to_token(\n 7, vocab=vocab,\n load_token_ids_from_vocab=True).eval().decode('utf-8'))\n self.assertEqual(\n '<unk>',\n py_x_ops.vocab_id_to_token(\n 0, vocab=vocab,\n load_token_ids_from_vocab=True).eval().decode('utf-8'))\n\n def test_token_in_vocab(self):\n '''test token whether in vocab '''\n with self.session(use_gpu=False):\n vocab = [\n '<s>',\n '</s>',\n '<unk>',\n '<epsilon>',\n 'a',\n 'b c d e',\n 'øut',\n 'über',\n '♣',\n '愤青',\n '←',\n ]\n self.assertTrue(py_x_ops.token_in_vocab('a', vocab=vocab).eval())\n self.assertTrue(py_x_ops.token_in_vocab('<unk>', vocab=vocab).eval())\n self.assertTrue(\n py_x_ops.token_in_vocab(['b c d e', '♣'], vocab=vocab).eval().all())\n self.assertFalse(py_x_ops.token_in_vocab('unknown', vocab=vocab).eval())\n\n\nif __name__ == '__main__':\n tf.test.main()\n", "# Copyright (C) 2017 Beijing Didi Infinity Technology and Development Co.,Ltd.\n# All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n''' text sequence labeling task unittest '''\n\nimport os\nfrom pathlib import Path\nimport numpy as np\nimport tensorflow as tf\nfrom absl import logging\nfrom delta import utils\nfrom delta.data.task.text_seq_label_task import TextSeqLabelTask\nfrom delta.utils.register import import_all_modules_for_register\n\n\nclass TextSeqLabelTaskTest(tf.test.TestCase):\n ''' sequence labeling task test'''\n\n def setUp(self):\n ''' set up'''\n import_all_modules_for_register()\n main_root = os.environ['MAIN_ROOT']\n main_root = Path(main_root)\n self.config_file = main_root.joinpath(\n 'egs/mock_text_seq_label_data/seq-label/v1/config/seq-label-mock.yml')\n\n def tearDown(self):\n ''' tear down '''\n\n def test_english(self):\n \"\"\" test seq label task of english data \"\"\"\n config = utils.load_config(self.config_file)\n max_len = config[\"model\"][\"net\"][\"structure\"][\"max_len\"]\n config[\"data\"][\"task\"][\"language\"] = \"english\"\n task_config = config[\"data\"][\"task\"]\n task_config[\n \"text_vocab\"] = \"egs/mock_text_seq_label_data/seq-label/v1/data/text_vocab.txt\"\n task_config[\"need_shuffle\"] = False\n\n task = TextSeqLabelTask(config, utils.TRAIN)\n\n # test offline data\n data = task.dataset()\n self.assertTrue(\"input_x_dict\" in data and\n \"input_x\" in data[\"input_x_dict\"])\n self.assertTrue(\"input_y_dict\" in data and\n \"input_y\" in data[\"input_y_dict\"])\n with self.session() as sess:\n sess.run(data[\"iterator\"].initializer, feed_dict=data[\"init_feed_dict\"])\n res = sess.run(\n [data[\"input_x_dict\"][\"input_x\"], data[\"input_y_dict\"][\"input_y\"]])\n logging.debug(res[0][0][:5])\n logging.debug(res[1][0])\n self.assertAllEqual(res[0][0][:5], [2, 3, 4, 5, 0])\n self.assertEqual(np.shape(res[0]), (10, max_len))\n self.assertEqual(np.shape(res[1]), (10, max_len))\n\n # test online data\n export_inputs = task.export_inputs()\n self.assertTrue(\"export_inputs\" in export_inputs and\n \"input_sentence\" in export_inputs[\"export_inputs\"])\n input_sentence = export_inputs[\"export_inputs\"][\"input_sentence\"]\n input_x = export_inputs[\"model_inputs\"][\"input_x\"]\n with self.session() as sess:\n sess.run(data[\"iterator\"].initializer, feed_dict=data[\"init_feed_dict\"])\n res = sess.run(input_x, feed_dict={input_sentence: [\"I feel good .\"]})\n logging.debug(res[0][:5])\n self.assertAllEqual(res[0][:5], [0, 3, 4, 5, 0])\n self.assertEqual(np.shape(res[0]), (max_len,))\n\n\nif __name__ == \"__main__\":\n logging.set_verbosity(logging.DEBUG)\n tf.test.main()\n", "# Copyright (C) 2017 Beijing Didi Infinity Technology and Development Co.,Ltd.\n# All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n''' asr sovler based on Solver'''\n\nfrom pathlib import Path\nfrom datetime import datetime\n\nfrom absl import logging\nimport tensorflow as tf\n\n#pylint: disable=import-error\nfrom tensorflow.keras.utils import multi_gpu_model\nfrom tensorflow.keras import backend as K\nfrom tensorflow.keras.callbacks import ModelCheckpoint\nfrom tensorflow.keras.callbacks import EarlyStopping\nfrom tensorflow.keras.callbacks import TensorBoard\nfrom tensorflow.keras.callbacks import CSVLogger\nfrom tensorflow.keras.callbacks import ReduceLROnPlateau\n\nfrom delta import utils\nfrom delta.utils.decode import py_ctc\nfrom delta.utils import metrics as metrics_lib\nfrom delta.utils.solver.base_solver import Solver\nfrom delta.utils.register import registers\nfrom delta.utils.solver.utils.callbacks import TokenErrMetricCallBack\n\n\n#pylint: disable=too-many-instance-attributes,too-many-public-methods\[email protected]\nclass AsrSolver(Solver):\n ''' asr keras solver'''\n\n def __init__(self, config):\n super().__init__(config)\n self.batch_input_shape = None\n\n self._solver = config['solver']\n self._num_epochs = self._solver['optimizer']['epochs']\n\n self._lr = self._solver['optimizer']['learning_rate']['rate']\n self._decay_rate = self._solver['optimizer']['learning_rate']['decay_rate']\n self._val_metric = self._solver['optimizer']['learning_rate'][\n 'type'] == 'val_metric'\n if self._val_metric:\n self._min_lr = self._solver['optimizer']['learning_rate']['min_rate']\n self._patience = self._solver['optimizer']['learning_rate']['patience']\n\n self._clipnorm = self._solver['optimizer']['clip_global_norm']\n self._early_stopping = self._solver['optimizer']['early_stopping']['enable']\n\n self._monitor_used = self._solver['metrics']['monitor_used']\n self._model_path = self._solver['saver']['model_path']\n\n logging.info('num_epochs : {}'.format(self._num_epochs))\n logging.info('lr : {}'.format(self._lr))\n logging.info('saver path : {}'.format(self._model_path))\n\n devices, self._ngpu = utils.gpu_device_names()\n logging.info(f\"ngpu: {self._ngpu}, device list: {devices}\")\n\n #model\n self._model = None\n self._parallel_model = None\n self._built = False\n\n @property\n def ngpu(self):\n ''' number of gpus '''\n return self._ngpu\n\n @property\n def raw_model(self):\n ''' Delta RawModel '''\n assert self._model is not None\n return self._model\n\n @property\n def model(self):\n ''' keras Model before doing `multi_gpu_model` '''\n return self.raw_model.model\n\n @property\n def parallel_model(self):\n ''' `multi_gpu_model` of keras Model '''\n assert self._parallel_model is not None\n return self._parallel_model\n\n @property\n def active_model(self):\n ''' real keras model for run'''\n return self.parallel_model if self.ngpu > 1 else self.model\n\n def process_config(self, config):\n ''' preprocess of config'''\n return config\n\n def input_fn(self, mode):\n ''' input function for tf.data.Dataset'''\n super().input_fn(mode)\n assert self.task\n self.batch_input_shape = self.task.batch_input_shape()\n batch_size = self.config['solver']['optimizer']['batch_size']\n num_epoch = self.config['solver']['optimizer']['epochs']\n return self.task.input_fn(mode, batch_size, num_epoch), self.task\n\n def input_data(self, mode):\n ''' input data '''\n input_fn, _task = self.input_fn(mode)\n ds_ = input_fn()\n #iterator = ds_.make_one_shot_iterator()\n #return iterator, task\n return ds_, _task\n\n #pylint: disable=no-self-use\n def get_loss(self):\n ''' dummy ctc loss, since ctc is implemented as a kearas layer '''\n loss = {'ctc': lambda y_true, y_pred: tf.reduce_mean(y_pred)}\n return loss\n\n def input_generator(self, input_iterator, input_task, cur_sess):\n ''' dataset_based generator used in keras.model.fit_generator()\n in future, it will be replaced by tf.keras.utils.Sequence'''\n next_batch = input_iterator.get_next()\n for _ in range(len(input_task)):\n next_batch_data = cur_sess.run(next_batch)\n yield next_batch_data\n\n def get_run_opts_metas(self):\n ''' RunOptions and RunMetadata '''\n opts_conf = self.config['solver']['run_options']\n run_opts = tf.RunOptions(\n trace_level=opts_conf['trace_level'],\n inter_op_thread_pool=opts_conf['inter_op_thread_pool'],\n report_tensor_allocations_upon_oom=opts_conf[\n 'report_tensor_allocations_upon_oom'])\n run_metas = tf.RunMetadata()\n\n run_metas = None\n run_opts = None\n return run_opts, run_metas\n\n def get_optimizer(self, multitask):\n ''' keras optimizer '''\n optconf = self.config['solver']['optimizer']\n method = optconf['name']\n\n learning_rate = optconf['learning_rate']['rate']\n if method == 'adam':\n opt = tf.keras.optimizers.Adam(learning_rate=learning_rate)\n elif method == 'adadelta':\n opt = tf.keras.optimizers.Adadelta(learning_rate=learning_rate)\n else:\n raise ValueError(f\"Not support optimmizer: {method}\")\n return opt\n\n #pylint: disable=arguments-differ\n def model_fn(self, mode):\n ''' build model like tf.estimator.Estimator'''\n with tf.device('/cpu:0'):\n self._model = super().model_fn()\n\n if not self.model.built:\n assert self.batch_input_shape\n # data must be (features, labels), only using features as input\n self.model.build(input_shape=self.batch_input_shape[0])\n\n # parallel and compile model\n self.build(multi_gpu=mode == utils.TRAIN)\n\n if mode != utils.TRAIN:\n model_path = Path(self._model_path).joinpath('best_model.h5')\n logging.info(f\"{mode}: load model from: {model_path}\")\n if self.model.built:\n self.model.load_weights(str(model_path))\n else:\n self._model = tf.keras.models.load_model(str(model_path))\n\n def build(self, multi_gpu=True):\n ''' main entrypoint to build model '''\n assert self.model\n\n loss = self.get_loss()\n multitask = self.config['solver']['optimizer']['multitask']\n optimizer = self.get_optimizer(multitask)\n\n run_opts, run_metas = self.get_run_opts_metas()\n\n # compile model\n if self.ngpu > 1 and multi_gpu:\n self._parallel_model = multi_gpu_model(self.model, gpus=self.ngpu)\n self.parallel_model.compile(\n loss=loss,\n optimizer=optimizer,\n metrics=['accuracy'],\n options=run_opts,\n run_metadata=run_metas)\n else:\n self.model.compile(\n loss=loss,\n optimizer=optimizer,\n metrics=['accuracy'],\n options=run_opts,\n run_metadata=run_metas)\n\n # Print model summary\n if self.model.built and self.model._is_graph_network:\n self.model.summary()\n self._built = True\n\n def get_metric_callbacks(self, eval_gen, eval_task, monitor_used,\n decoder_type):\n ''' metric_specific callbacks'''\n callbacks = []\n\n if monitor_used == 'val_token_err':\n metric_func = self.get_metric_func()\n metric_cal = TokenErrMetricCallBack(metric_func, eval_gen, eval_task,\n decoder_type)\n callbacks.append(metric_cal)\n\n logging.info(f\"CallBack: Val Metric on {monitor_used}\")\n return callbacks\n\n def get_misc_callbacks(self, monitor_used=None):\n '''misc_specific callbacks'''\n callbacks = []\n #tensorboard\n tb_cb = TensorBoard(log_dir=self._model_path)\n callbacks.append(tb_cb)\n logging.info(f\"CallBack: Tensorboard\")\n\n # metric history\n metric_log = 'metrics.csv'\n csv_logger = CSVLogger(\n filename=Path(self._model_path).joinpath(metric_log), separator='\\t')\n callbacks.append(csv_logger)\n logging.info(f\"CallBack: Metric log to {metric_log}\")\n\n #save model\n save_best = Path(self._model_path).joinpath('best_model.h5')\n save_best_cb = ModelCheckpoint(\n str(save_best),\n monitor=monitor_used,\n verbose=1,\n save_best_only=True,\n save_weights_only=False,\n period=1)\n callbacks.append(save_best_cb)\n logging.info(f\"CallBack: Save Best Model\")\n\n # save checkpoint\n save_ckpt = Path(self._model_path).joinpath('model.{epoch:02d}-{' +\n monitor_used + ':.2f}.h5')\n save_ckpt_cb = ModelCheckpoint(\n str(save_ckpt),\n monitor=monitor_used,\n verbose=1,\n save_best_only=False,\n save_weights_only=False,\n period=1)\n callbacks.append(save_ckpt_cb)\n logging.info(f\"CallBack: Save Model Checkpoint.\")\n\n # nan check\n callbacks.append(tf.keras.callbacks.TerminateOnNaN())\n\n # Stops the model early if the metrics isn't improving\n if self._early_stopping:\n logging.info(f\"CallBack: Early Stop on {monitor_used}\")\n es_cb = EarlyStopping(\n monitor=monitor_used, min_delta=0, patience=5, verbose=0, mode='auto')\n callbacks.append(es_cb)\n\n # shcedule learning rate\n if self._val_metric:\n logging.info(f\"CallBack: Learning Rate Shcedule on {monitor_used}\")\n lr_shcedule = ReduceLROnPlateau(\n monitor=monitor_used,\n factor=self._decay_rate,\n patience=self._patience,\n verbose=1,\n mode='auto',\n min_delta=0.0001,\n cooldown=0,\n min_lr=self._min_lr)\n callbacks.append(lr_shcedule)\n return callbacks\n\n def get_callbacks(self,\n eval_ds,\n eval_task,\n monitor_used='val_acc',\n decoder_type='beam_search'):\n ''' callbacks for traning'''\n\n #metric callbacks\n callbacks = self.get_metric_callbacks(eval_ds, eval_task, monitor_used,\n decoder_type)\n #misc callbacks\n misc_callbacks = self.get_misc_callbacks(monitor_used)\n callbacks.extend(misc_callbacks)\n\n return callbacks\n\n def save_model(self):\n ''' save keras model '''\n if self._model_path:\n save_model = self._model_path + str('/final_model.h5')\n self.model.save(save_model)\n logging.info(\"Model saved: {}\".format(save_model))\n\n def train(self):\n ''' only train '''\n _, train_task = self.input_data(mode=utils.TRAIN)\n self.model_fn(mode=utils.TRAIN)\n\n callbacks = self.get_misc_callbacks(monitor_used='loss')\n\n self.active_model.fit_generator(\n train_task,\n steps_per_epoch=len(train_task),\n epochs=self._num_epochs,\n verbose=1,\n callbacks=callbacks,\n max_queue_size=20,\n workers=1,\n use_multiprocessing=False,\n shuffle=True,\n initial_epoch=0)\n\n def get_metric_func(self):\n ''' build metric function '''\n _input_data = self.model.get_layer('inputs').input\n y_pred = self.model.get_layer('ctc').input[0]\n metric_func = K.function([_input_data], [y_pred])\n return metric_func\n\n #pylint: disable=too-many-locals\n def eval(self):\n ''' only eval'''\n mode = utils.EVAL\n #get eval dataset\n # data must be init before model build\n eval_ds, eval_task = self.input_data(mode=mode)\n eval_gen = tf.data.make_one_shot_iterator(eval_ds)\n\n #get eval model\n self.model_fn(mode=mode)\n assert self._built\n\n #load model\n eval_func = self.get_metric_func()\n\n target_seq_list, predict_seq_list = [], []\n for _ in range(len(eval_task)):\n batch_data = K.get_session().run(eval_gen.get_next()[0])\n batch_input = batch_data['inputs']\n batch_target = batch_data['targets'].tolist()\n batch_predict = eval_func(batch_input)[0]\n batch_decode = py_ctc.ctc_greedy_decode(batch_predict, 0, unique=True)\n target_seq_list += batch_target\n predict_seq_list += batch_decode\n token_errors = metrics_lib.token_error(\n predict_seq_list=predict_seq_list,\n target_seq_list=target_seq_list,\n eos_id=0)\n logging.info(\"eval finish!\")\n logging.info(\"Token Error: {}\".format(token_errors))\n\n def train_and_eval(self):\n ''' train and eval '''\n # data must be init before model builg\n backend_sess = K.get_session()\n train_ds, train_task = self.input_data(mode=utils.TRAIN)\n train_gen = self.input_generator(train_ds.make_one_shot_iterator(),\n train_task, backend_sess)\n eval_ds, eval_task = self.input_data(mode=utils.EVAL)\n eval_gen = self.input_generator(eval_ds.make_one_shot_iterator(), eval_task,\n backend_sess)\n\n self.model_fn(mode=utils.TRAIN)\n assert self._built\n\n callbacks = self.get_callbacks(\n eval_ds, eval_task, monitor_used=self._monitor_used)\n\n try:\n # Run training\n self.active_model.fit_generator(\n train_gen,\n steps_per_epoch=len(train_task),\n epochs=self._num_epochs,\n verbose=1,\n callbacks=callbacks,\n validation_data=eval_gen,\n validation_steps=len(eval_task),\n validation_freq=1,\n class_weight=None,\n max_queue_size=50,\n workers=1,\n use_multiprocessing=False,\n shuffle=True,\n initial_epoch=0)\n #save model\n # not work for subclassed model, using tf.keras.experimental.export_saved_model\n #self.save_model()\n\n except (Exception, ArithmeticError) as err: #pylint: disable=broad-except\n template = \"An exception of type {0} occurred. Arguments:\\n{1!r}\"\n message = template.format(type(err).__name__, err.args)\n logging.error(message)\n raise err\n\n finally:\n # Clear memory\n K.clear_session()\n logging.info(\"Ending time: {}\".format(\n datetime.now().strftime('%Y-%m-%d %H:%M:%S')))\n\n #pylint: disable=unused-argument,too-many-locals\n def infer(self, yield_single_examples=False):\n ''' only for infer '''\n #load data\n mode = utils.INFER\n # data must be init before model build\n infer_ds, infer_task = self.input_data(mode=mode)\n infer_gen = tf.data.make_one_shot_iterator(infer_ds)\n\n self.model_fn(mode=mode)\n assert self._built\n\n #load model\n infer_func = self.get_metric_func()\n\n for _ in range(len(infer_task)):\n batch_data = K.get_session().run(infer_gen.get_next()[0])\n batch_input = batch_data['inputs']\n batch_uttid = batch_data['uttids'].tolist()\n batch_predict = infer_func(batch_input)[0]\n batch_decode = py_ctc.ctc_greedy_decode(batch_predict, 0, unique=True)\n for utt_index, uttid in enumerate(batch_uttid):\n logging.info(\"utt ID: {}\".format(uttid))\n logging.info(\"infer result: {}\".format(batch_decode[utt_index]))\n\n def export_model(self):\n '''export saved_model'''\n raise NotImplementedError()\n", "# Copyright (C) 2017 Beijing Didi Infinity Technology and Development Co.,Ltd.\n# All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Task class for text classification.\"\"\"\n\nimport collections\nimport tensorflow as tf\nfrom absl import logging\n\nfrom delta.data.task.base_text_task import TextTask\nfrom delta.data.utils.common_utils import load_cls_raw_data\nfrom delta.data.utils.common_utils import load_one_label_dataset\nfrom delta.data.utils.common_utils import load_dense_dataset\nfrom delta.data.utils.common_utils import load_npy\nfrom delta.data.preprocess.utils import load_vocab_dict\nfrom delta import utils\nfrom delta.utils.register import registers\nfrom delta.layers.utils import compute_sen_lens\n\n# pylint: disable=too-many-instance-attributes, too-many-locals\n\n\[email protected]\nclass TextClsTask(TextTask):\n \"\"\"Task class for text classification.\"\"\"\n\n def __init__(self, config, mode):\n super().__init__(config, mode)\n\n self.vocab_min_frequency = self.task_config['vocab_min_frequency']\n self.text_vocab_file_path = self.task_config['text_vocab']\n self.label_vocab_file_path = self.task_config['label_vocab']\n self.max_seq_len = self.task_config['max_seq_len']\n self.num_classes = self.task_config['classes']['num_classes']\n self.split_token = self.model_config.get(\"split_token\", \"\")\n self.use_dense = self.task_config[\"use_dense\"]\n if self.use_dense:\n self.dense_input_dim = self.task_config[\"dense_input_dim\"]\n self.dense_npy = config[\"data\"][self.mode][\"dense_npy\"]\n self.paths = self.data_config[mode]['paths']\n self.paths_after_pre_process = [\n one_path + \".after\" for one_path in self.paths\n ]\n self.prepare()\n\n def generate_data(self):\n \"\"\"Generate data for offline training.\"\"\"\n\n text, label = load_cls_raw_data(\n paths=self.paths_after_pre_process, mode=self.mode)\n\n text_placeholder = tf.placeholder(tf.string, shape=(None,), name=\"text\")\n label_placeholder = tf.placeholder(tf.string, name=\"label\")\n self.init_feed_dict[text_placeholder] = text\n self.init_feed_dict[label_placeholder] = label\n # logging.debug(\"init_feed_dict: {}\".format(self.init_feed_dict))\n\n text_ds = tf.data.Dataset.from_tensor_slices(text_placeholder)\n input_pipeline_func = self.get_input_pipeline(for_export=False)\n\n text_ds = text_ds.map(\n input_pipeline_func, num_parallel_calls=self.num_parallel_calls)\n\n text_size_ds = text_ds.map(\n lambda x: compute_sen_lens(x, padding_token=utils.PAD_IDX),\n num_parallel_calls=self.num_parallel_calls)\n\n text_ds = tf.data.Dataset.zip((text_ds, text_size_ds))\n\n if self.use_dense:\n dense = load_npy(self.dense_npy)\n dense_ds = load_dense_dataset(dense)\n\n if self.infer_without_label:\n if self.use_dense:\n data_set = tf.data.Dataset.zip((text_ds, dense_ds))\n else:\n data_set = text_ds\n else:\n label_ds = load_one_label_dataset(label_placeholder, self.config)\n if self.use_dense:\n data_set = tf.data.Dataset.zip((text_ds, dense_ds, label_ds))\n else:\n data_set = tf.data.Dataset.zip((text_ds, label_ds))\n\n vocab_dict = load_vocab_dict(self.text_vocab_file_path)\n vocab_size = len(vocab_dict)\n data_size = len(text)\n if self.split_token != \"\":\n if self.split_token not in vocab_dict:\n raise ValueError(\n \"The Model uses split token: {}, not in corpus.\".format(\n self.split_token))\n self.config['data']['split_token'] = int(vocab_dict[self.split_token])\n self.config['data']['vocab_size'] = vocab_size\n self.config['data']['{}_data_size'.format(self.mode)] = data_size\n\n return data_set\n\n def feature_spec(self):\n \"\"\"Get shapes for feature.\"\"\"\n feature_shapes = [(tf.TensorShape([self.max_seq_len]), tf.TensorShape([]))]\n if self.use_dense:\n feature_shapes.append(tf.TensorShape(self.dense_input_dim))\n if not self.infer_without_label:\n feature_shapes.append(tf.TensorShape([self.num_classes]))\n if len(feature_shapes) == 1:\n return feature_shapes[0]\n return tuple(feature_shapes)\n\n def export_inputs(self):\n \"\"\"Inputs for exported model.\"\"\"\n vocab_dict = load_vocab_dict(self.text_vocab_file_path)\n vocab_size = len(vocab_dict)\n if self.split_token != \"\":\n if self.split_token not in vocab_dict:\n raise ValueError(\n \"The Model uses split token: {}, not in corpus.\".format(\n self.split_token))\n self.config['data']['split_token'] = int(vocab_dict[self.split_token])\n self.config['data']['vocab_size'] = vocab_size\n\n input_sentence = tf.placeholder(\n shape=(None,), dtype=tf.string, name=\"input_sentence\")\n\n input_pipeline_func = self.get_input_pipeline(for_export=True)\n\n token_ids = input_pipeline_func(input_sentence)\n token_ids_len = tf.map_fn(lambda x: compute_sen_lens(x, padding_token=0),\n token_ids)\n\n export_data = {\n \"export_inputs\": {\n \"input_sentence\": input_sentence\n },\n \"model_inputs\": {\n \"input_x\": token_ids,\n \"input_x_len\": token_ids_len\n }\n }\n\n if self.use_dense:\n input_dense = tf.placeholder(\n shape=(None,), dtype=tf.float32, name=\"input_dense\")\n export_data[\"export_inputs\"][\"input_dense\"] = input_dense\n\n return export_data\n\n def dataset(self):\n \"\"\"Data set function\"\"\"\n\n data_set = self.generate_data()\n logging.debug(\"data_set: {}\".format(data_set))\n if self.mode == 'train':\n if self.need_shuffle:\n # shuffle batch size and repeat\n logging.debug(\"shuffle and repeat dataset ...\")\n data_set = data_set.apply(\n tf.data.experimental.shuffle_and_repeat(\n buffer_size=self.shuffle_buffer_size, count=None))\n else:\n logging.debug(\"repeat dataset ...\")\n data_set = data_set.repeat(count=None)\n\n feature_shape = self.feature_spec()\n logging.debug(\"feature_shape: {}\".format(feature_shape))\n data_set = data_set.padded_batch(\n batch_size=self.batch_size, padded_shapes=feature_shape)\n\n data_set = data_set.prefetch(self.num_prefetch_batch)\n\n iterator = data_set.make_initializable_iterator()\n\n # pylint: disable=unused-variable\n if self.infer_without_label:\n if self.use_dense:\n (input_x, input_x_len), input_dense = iterator.get_next()\n else:\n input_x, input_x_len = iterator.get_next()\n else:\n if self.use_dense:\n (input_x, input_x_len), input_dense, input_y = iterator.get_next()\n else:\n (input_x, input_x_len), input_y = iterator.get_next()\n\n input_x_dict = collections.OrderedDict([(\"input_x\", input_x)])\n return_dict = {\n \"input_x_dict\": input_x_dict,\n \"input_x_len\": input_x_len,\n \"iterator\": iterator,\n \"init_feed_dict\": self.init_feed_dict\n }\n\n if self.use_dense:\n input_x_dict[\"input_dense\"] = input_dense\n\n if not self.infer_without_label:\n return_dict[\"input_y_dict\"] = collections.OrderedDict([(\"input_y\",\n input_y)])\n\n return return_dict\n", "# Copyright (C) 2017 Beijing Didi Infinity Technology and Development Co.,Ltd.\n# All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Solver for sequence labeling model in raw tensorflow.\"\"\"\n\n# pylint: disable=too-many-instance-attributes, not-context-manager, bad-continuation, no-name-in-module\n\nimport re\nimport tensorflow as tf\nfrom absl import logging\nfrom tensorflow.contrib.crf import crf_decode\nfrom delta.utils.register import registers\nfrom delta.utils.solver.raw_solver import RawSolver\n\n\[email protected]\nclass PretrainRawSeqLabelSolver(RawSolver):\n \"\"\"Solver for raw tensorflow model.\"\"\"\n\n def build_output(self, model): # pylint: disable=no-self-use\n \"\"\"\n Build the output of the model.\n `score` and `input_y` are for loss calculation.\n `preds` and `y_ground_truth` are for metric calculation.\n \"\"\"\n model.preds, score = crf_decode(model.logits, model.transitions,\n model.input_x_len)\n\n model.score = tf.identity(score, name=\"score\")\n model.y_ground_truth = model.input_y\n if model.use_pretrained_model:\n logging.info(\"initialize_pretrained_model_variables\")\n self.initialize_pretrained_model_variables(model.pretrained_model_path,\n model.pretrained_model_mode)\n\n def build_export_output(self, model): # pylint: disable=no-self-use\n \"\"\"\n Build the output of the model.\n `score` and `input_y` are for loss calculation.\n `preds` and `y_ground_truth` are for metric calculation.\n \"\"\"\n model.preds, score = crf_decode(model.logits, model.transitions,\n model.input_x_len)\n\n model.score = tf.identity(score, name=\"score\")\n model.output_dict = {\"score\": model.score, \"preds\": model.preds}\n\n def get_assignment_map_from_checkpoint(self, all_variables, init_checkpoint):\n \"\"\"\n Get the map of the current variables and init checkpoint variables.\n \"\"\"\n assignment_map = {}\n name_to_var = {}\n init_set = set()\n for var in all_variables:\n name = var.name\n m = re.match(\"^(.*):\\\\d+$\", name)\n if m is not None:\n name = m.group(1)\n name_to_var[name] = var\n\n init_vars = tf.train.list_variables(init_checkpoint)\n\n for name, var_shape in init_vars:\n for k, v in name_to_var.items():\n if re.findall(name + '$', k):\n assignment_map[name] = name_to_var[k]\n init_set.add(name_to_var[k])\n return assignment_map, init_set\n\n def remove_trainable_variables(self, init_set):\n \"\"\"\n Make the variables of the pretrained model untrainable\n \"\"\"\n\n variables_to_untrain = list()\n trainable_collection = tf.get_collection_ref(\n tf.GraphKeys.TRAINABLE_VARIABLES)\n for var in trainable_collection:\n if var in init_set:\n variables_to_untrain.append(var)\n\n for var in variables_to_untrain:\n trainable_collection.remove(var)\n\n def initialize_pretrained_model_variables(self, pretrained_model_path,\n pretrained_model_mode):\n \"\"\"\n Initialize the variables of the pretrained model\n according to fine-tune of feature mode\n \"\"\"\n all_variables = tf.get_collection_ref(tf.GraphKeys.GLOBAL_VARIABLES)\n init_checkpoint = pretrained_model_path\n pretrained_assignment_map, init_set = self.get_assignment_map_from_checkpoint(\n all_variables, init_checkpoint)\n tf.train.init_from_checkpoint(init_checkpoint, pretrained_assignment_map)\n if pretrained_model_mode == \"feature\":\n self.remove_trainable_variables(init_set)\n" ]
[ [ "tensorflow.TensorShape", "numpy.array", "tensorflow.test.main", "numpy.ones" ], [ "tensorflow.test.main" ], [ "numpy.shape", "tensorflow.test.main" ], [ "tensorflow.device", "tensorflow.keras.utils.multi_gpu_model", "tensorflow.reduce_mean", "tensorflow.keras.backend.get_session", "tensorflow.RunOptions", "tensorflow.data.make_one_shot_iterator", "tensorflow.RunMetadata", "tensorflow.keras.callbacks.ReduceLROnPlateau", "tensorflow.keras.optimizers.Adadelta", "tensorflow.keras.backend.function", "tensorflow.keras.optimizers.Adam", "tensorflow.keras.callbacks.TerminateOnNaN", "tensorflow.keras.backend.clear_session", "tensorflow.keras.callbacks.TensorBoard", "tensorflow.keras.callbacks.EarlyStopping" ], [ "tensorflow.TensorShape", "tensorflow.data.Dataset.from_tensor_slices", "tensorflow.placeholder", "tensorflow.data.Dataset.zip", "tensorflow.data.experimental.shuffle_and_repeat" ], [ "tensorflow.get_collection_ref", "tensorflow.identity", "tensorflow.train.init_from_checkpoint", "tensorflow.train.list_variables", "tensorflow.contrib.crf.crf_decode" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "2.6", "2.4", "2.3", "2.5", "2.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
syushinski/ra-accuracy-research
[ "b9114b610e38959b261275c7b67d3973645e5c47" ]
[ "python/dtextract/data/data.py" ]
[ "# Copyright 2015-2016 Stanford University\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http:#www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport random\nimport numpy as np\nimport pandas as pd\nfrom ..util.log import *\n\nCAT = 0 # categorical data type\nNUM = 1 # numeric data type\nID = 2 # identifier (to be ignored)\nNUM_RES = 3 # numeric response\nCAT_RES = 4 # categorical response (only 2 categories currently handled!)\n\n# Splits the dataset into two randomly selected sets according to\n# the given proportion.\n#\n# parameters/returns:\n# df : pandas.DataFrame\n# prop : float (the proportion of training points, typically ~70%)\n# return : (DataTable, DataTable) (the (training, test) datasets)\ndef split(df, prop):\n # Checks\n if prop < 0.0 or prop > 1.0:\n raise Exception('Invalid proportion: ' + str(prop))\n\n # Step 1: Shuffle the row indices\n rows = [i for i in range(len(df))]\n random.shuffle(rows)\n\n # Step 2: Split into training and test rows\n splitPoint = int(prop * len(df))\n trainRows = rows[:splitPoint]\n testRows = rows[splitPoint:]\n\n # Step 2: Split data frame into train and test sets\n trainDf = df.iloc[trainRows,:]\n testDf = df.iloc[testRows,:]\n\n return (trainDf, testDf)\n\ndef constructDataMatrix(df, res, catFeats):\n # Step 1: Construct covariate and response columns\n covCols = []\n resCols = []\n catFeatIndices = [[] for cFeature in catFeats]\n numericFeatIndices = []\n for i in range(len(df.columns)):\n log('i:' + str(i) + ' df.columns[i]:' + str(df.columns[i]), DEBUG)\n if df.columns[i] != res:\n categorical = False\n for j in range(len(catFeats)):\n cF = catFeats[j]\n if str(df.columns[i]).startswith(str(cF) + '_'):\n categorical = True\n catFeatIndices[j].append(len(covCols))\n log('i:' + str(i) + ' df.columns[i]:' + \\\n str(df.columns[i]) + ' catFeat:'+ str(cF), DEBUG)\n if not categorical:\n numericFeatIndices.append(len(covCols))\n covCols.append(i)\n else:\n resCols.append(i)\n if len(resCols) != 1:\n raise Exception('Invalid columns!')\n\n # Step 2: Construct covariate and response data frames\n covDf = df.iloc[:,covCols]\n resDf = df.iloc[:,resCols]\n\n X = np.array(covDf.values)\n y = np.array(resDf.values[:,0])\n\n return (X, y, catFeatIndices, numericFeatIndices)\n\n# Parse the given CSV file and return a pandas DataFrame\n# representation of the data.\n#\n# Note: The dataProcessors field is a list of lambdas that\n# are applied to the data in each column (in particular,\n# it should be the same length as the list dataTypes).\n# This field can be used to preprocess the data.\n#\n# parameters/returns:\n# path : str (path of the CSV file)\n# hasHeader : bool (whether the dataset has a header to ignore)\n# dataTypes : [int] (categorical, numeric, or identifier)\n# return : pandas.DataFrame\ndef readCsv(path, hasHeader, dataTypes, delim_whitespace, CORELS=False):\n # Step 1: Parse the CSV\n\n log('Reading file: ' + path, INFO)\n\n # Step 1a: Skip the first row if it is the header\n skiprows = 1 if hasHeader else 0\n\n # Step 1b: Initialize data structures\n cur = 0\n names = [] # names\n dtype = {} # data types\n impute = [] # impute these columns\n dummies = [] # convert these columns to indicators\n usecols = [] # list of columsn to use\n res = None\n isCatRes = None\n\n for i in range(len(dataTypes)):\n if not _isSkip(dataTypes[i]):\n # Step 1c: Append the name\n names.append(cur)\n # Step 1d: Construct the data type\n dtype[cur] = _toDType(dataTypes[i])\n # Step 1e: Use this column\n usecols.append(i)\n # Step 1f: Add to impute if numeric\n if _isImpute(dataTypes[i]):\n impute.append(cur)\n # Step 1g: Add to dummies if categorical\n if _isDummy(dataTypes[i]):\n dummies.append(cur)\n # Step 1h: Handle response\n if _isResponse(dataTypes[i]):\n if res != None:\n raise Exception('Multiple response variables!')\n res = cur\n isCatRes = _isCatResponse(dataTypes[i])\n # Step 1i: Increment the name\n cur += 1\n else:\n names.append(-1)\n\n if res == None:\n raise Exception('No response variable!')\n\n # Step 1g: Parse the CSV\n try:\n df = pd.read_csv(path, delim_whitespace=delim_whitespace, header=None, skiprows=skiprows, usecols=usecols, names=names, dtype=dtype, na_values=['?']) if delim_whitespace else pd.read_csv(path, header=None, skiprows=skiprows, usecols=usecols, names=names, dtype=dtype, na_values=['?'])\n except:\n df = pd.read_csv(path, sep=';', header=None, skiprows=skiprows, usecols=usecols, names=names, dtype=dtype, na_values=['?'])\n log('Done!', INFO)\n log('Rows read: ' + str(len(df)), INFO)\n\n # Step 2: Impute missing values for floating points\n for i in impute:\n df[i].fillna(df[i].mean(), inplace=True)\n if CORELS:\n for j in range(len(df[i])):\n df[i] = (df[i] > df[i].mean()) * 1\n\n\n\n # Step 3: Convert categorical to indicator\n df = pd.get_dummies(df, columns=dummies, dummy_na=True)\n\n # Step 4: If categorical response, convert to integer\n resMap = {}\n if isCatRes:\n # Step 4a: Construct response map\n for val in df[res]:\n if not val in resMap:\n resMap[val] = len(resMap)\n # Step 4b: Map response\n df[res] = df[res].apply(lambda val: resMap[val])\n\n log('Columns: ' + str(len(df.columns)), INFO)\n log('Column names:\\n' + ''.join((str(i) + ': ' + str(col) + '\\n' for (i, col) in zip(range(len(df.columns)), df.columns))), INFO)\n\n return (df, res, resMap, dummies)\n\n# Checks whether the datatype should be skipped (only ID).\ndef _isSkip(dataType):\n return dataType == ID\n\n# Checks whether the data type should be imputed.\ndef _isImpute(dataType):\n return dataType == NUM\n\n# Checks whether the data type should be converted from categorical to indicators.\ndef _isDummy(dataType):\n return dataType == CAT\n\n# Checks whether the data type is a response type.\ndef _isResponse(dataType):\n return dataType == NUM_RES or dataType == CAT_RES\n\n# Checks whether the data type is a categorical response.\ndef _isCatResponse(dataType):\n return dataType == CAT_RES\n\n# Converts a data type to a pandas type.\ndef _toDType(dataType):\n if dataType == CAT or dataType == CAT_RES:\n return str\n elif dataType == NUM or dataType == NUM_RES:\n return np.float64\n elif dataType == ID:\n raise Exception('Should not consider ID types')\n else:\n raise Exception('Unknown data type: ' + str(dataType))\n" ]
[ [ "numpy.array", "pandas.read_csv", "pandas.get_dummies" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
BurntSushi/clam
[ "e374c08d016018d6be4d2fc4e8b0999b52f82929" ]
[ "pyclam/tests/test_manifold.py" ]
[ "import random\nimport unittest\nfrom tempfile import TemporaryFile\n\nimport numpy as np\nfrom scipy.spatial.distance import cdist\n\nfrom pyclam import datasets, criterion\nfrom pyclam.manifold import Manifold, Cluster\n\nnp.random.seed(42)\nrandom.seed(42)\n\n\nclass TestManifold(unittest.TestCase):\n @classmethod\n def setUpClass(cls) -> None:\n cls.data, cls.labels = datasets.random(n=1000, dimensions=3)\n cls.manifold = Manifold(cls.data, 'euclidean')\n cls.manifold.build(\n criterion.MaxDepth(8),\n criterion.LFDRange(60, 50),\n )\n return\n\n def test_init(self):\n m = Manifold(self.data, 'euclidean')\n self.assertEqual(1, len(m.layers))\n\n m = Manifold(self.data, 'euclidean', [1, 2, 3])\n self.assertListEqual([1, 2, 3], m.argpoints)\n\n fraction = 0.2\n m = Manifold(self.data, 'euclidean', fraction)\n self.assertEqual(int(len(self.data) * fraction), len(m.argpoints))\n\n with self.assertRaises(ValueError):\n # noinspection PyTypeChecker\n Manifold(self.data, 'euclidean', ['a', 'b', 'c'])\n\n with self.assertRaises(ValueError):\n # noinspection PyTypeChecker\n Manifold(self.data, 'euclidean', 'apples')\n return\n\n def test_eq(self):\n self.assertEqual(self.manifold, self.manifold)\n other = Manifold(self.data, 'euclidean', argpoints=0.2).build(\n criterion.MaxDepth(10),\n criterion.LFDRange(60, 50),\n )\n self.assertNotEqual(self.manifold, other)\n self.assertEqual(other, other)\n\n other = Manifold(self.data, 'cosine')\n self.assertNotEqual(self.manifold, other)\n return\n\n def test_iter(self):\n self.assertListEqual(self.manifold.layers, list(iter(self.manifold)))\n return\n\n def test_str(self):\n self.assertIsInstance(str(self.manifold), str)\n return\n\n def test_repr(self):\n self.assertIsInstance(repr(self.manifold), str)\n return\n\n def test_find_points(self):\n self.assertEqual(1, len(self.manifold.find_points(self.data[0], radius=0.0)))\n self.assertLessEqual(1, len(self.manifold.find_points(self.data[0], radius=1.0)))\n\n point = self.data[0]\n distances = [(p, d) for p, d in zip(\n range(self.data.shape[0]),\n cdist(np.asarray([point]), self.data, self.manifold.metric)[0],\n )]\n\n for radius in [0.25, 0.5, 1.0, 2.0, 5.0]:\n naive_results = {(p, d) for p, d in distances if d <= radius}\n results = self.manifold.find_points(point, radius)\n self.assertSetEqual(naive_results, set(results))\n\n return\n\n def test_find_clusters(self):\n self.manifold.build_tree()\n self.assertEqual(1, len(self.manifold.find_clusters(self.data[0], radius=0.0, depth=-1)))\n return\n\n def test_build(self):\n m = Manifold(self.data, 'euclidean').build(criterion.MaxDepth(1))\n self.assertEqual(2, len(m.layers))\n m.build(criterion.MaxDepth(2))\n self.assertEqual(3, len(m.layers))\n self.assertEqual(len(self.data), m.graph.population)\n return\n\n def test_build_tree(self):\n m = Manifold(self.data, 'euclidean')\n self.assertEqual(1, len(m.layers))\n\n m.build_tree(criterion.AddLevels(2))\n self.assertEqual(3, len(m.layers))\n\n # MaxDepth shouldn't do anything in build_tree if we're beyond that depth already.\n m.build_tree(criterion.MaxDepth(1))\n self.assertEqual(3, len(m.layers))\n\n m.build_tree()\n self.assertEqual(len(self.data), m.layers[-1].cardinality)\n return\n\n def test_ancestry(self):\n name = '0101'\n lineage = self.manifold.ancestry(name)\n [self.assertEqual(name[:len(l.name)], l.name) for i, l in enumerate(lineage)]\n lineage = self.manifold.ancestry(lineage[-1])\n [self.assertEqual(name[:len(l.name)], l.name) for i, l in enumerate(lineage)]\n return\n\n def test_select(self):\n cluster = None\n for cluster in self.manifold.layers[-1]:\n self.assertIsInstance(self.manifold.select(cluster.name), Cluster)\n else:\n with self.assertRaises(ValueError):\n self.manifold.select(cluster.name + '01')\n with self.assertRaises(ValueError):\n self.manifold.select(cluster.name + '01110110')\n return\n\n def test_neighbors(self):\n for dataset in [datasets.bullseye, ]: # datasets.spiral_2d, datasets.tori, datasets.skewer, datasets.line]:\n data, labels = dataset()\n manifold = Manifold(data, 'euclidean')\n manifold.build(\n criterion.MaxDepth(12),\n criterion.LFDRange(60, 50),\n )\n\n for cluster in manifold.graph.clusters:\n potential_neighbors = [c for c in manifold.graph.clusters if c.name != cluster.name]\n argcenters = [c.argmedoid for c in potential_neighbors]\n distances = list(cluster.distance_from(argcenters))\n radii = [cluster.radius + c.radius for c in potential_neighbors]\n true_neighbors = {c: d for c, d, r in zip(potential_neighbors, distances, radii) if d <= r}\n neighbors = {edge.neighbor: edge.distance for edge in manifold.graph.edges[cluster]}\n\n extras = set(neighbors.keys()) - set(true_neighbors.keys())\n self.assertEqual(0, len(extras), msg=f'got extra neighbors: optimal, true {len(true_neighbors)}, actual {len(neighbors)}\\n'\n + \"\\n\".join([f\"{c.name}, {cluster.radius + c.radius:.6f}\" for c in extras]))\n\n missed = set(true_neighbors.keys()) - set(neighbors.keys())\n self.assertEqual(0, len(missed), msg=f'missed some neighbors: optimal, true {len(true_neighbors)}, actual {len(neighbors)}\\n'\n + '\\n'.join([f'{c.name}, {cluster.radius + c.radius:.6f}' for c in missed]))\n return\n\n def test_dump(self):\n with TemporaryFile() as fp:\n self.manifold.dump(fp)\n return\n\n def test_load(self):\n original = self.manifold\n with TemporaryFile() as fp:\n original.dump(fp)\n fp.seek(0)\n loaded = Manifold.load(fp, self.data)\n self.assertEqual(original, loaded)\n self.assertEqual(set(original.layers[-1]), set(loaded.layers[-1]))\n self.assertEqual(original.graph, loaded.graph)\n\n for layer in loaded.layers:\n for cluster in layer:\n self.assertIn('radius', cluster.cache)\n self.assertIn('argradius', cluster.cache)\n self.assertIn('argsamples', cluster.cache)\n self.assertIn('argmedoid', cluster.cache)\n self.assertIn('local_fractal_dimension', cluster.cache)\n return\n\n def test_partition_backends(self):\n data = datasets.random(n=100, dimensions=5)[0]\n m_single = Manifold(data, 'euclidean')._partition_single([criterion.MaxDepth(5)])\n m_thread = Manifold(data, 'euclidean')._partition_threaded([criterion.MaxDepth(5)])\n self.assertEqual(m_single, m_thread)\n return\n\n def test_find_knn(self):\n data = datasets.bullseye()[0]\n point = data[0]\n points = sorted([(d, p) for p, d in zip(range(data.shape[0]),\n cdist(np.asarray([point]), data, 'euclidean')[0])])\n\n m = Manifold(data, 'euclidean')\n m.build_tree(criterion.MinPoints(10), criterion.MaxDepth(10))\n\n ks = list(range(10))\n ks.extend(range(10, data.shape[0], 1000))\n for k in ks:\n naive_results = {p for d, p in points[:k]}\n results = m.find_knn(point, k)\n self.assertEqual(k, len(results))\n self.assertSetEqual(naive_results, {p for p, _ in results})\n" ]
[ [ "numpy.asarray", "numpy.random.seed" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
BecauseWeCanStudios/LEGOVNO
[ "97654da906e5d8ee999fea6dbc062914cc5710b2" ]
[ "source/utils/apply_backgrounds.py" ]
[ "#!/usr/bin/python\nimport os\nimport glob\nimport utils\nimport random\nimport argparse\nimport platform\nimport threading\nimport numpy as np\nfrom PIL import Image\nfrom tqdm import tqdm\nfrom math import ceil\nfrom itertools import chain\n\ndef apply_background(back, img, path):\n\tback.paste(img, (0, 0), img)\n\tutils.make_dirs(path)\n\tback.save(path)\n\ndef apply_noise_func(min, max):\n\tdef f(img):\n\t\tarr = np.array(img).astype(int)\n\t\tarr += np.random.randint(min, max, (*img.size, 3))\n\t\treturn Image.fromarray(arr.clip(0, 255).astype('uint8'))\n\treturn f\n\ndef choose_backgrounds(backgrounds, count):\n\treturn [(Image.open(file).convert('RGB'), os.path.splitext(os.path.basename(file))[0]) for file in random.sample(backgrounds, count)]\n\ndef function(n, images, backgrounds, args):\n\tpbar = tqdm(images, position=n)\n\tfor file in pbar:\n\t\tpbar.set_description(utils.cut_string(file))\n\t\timg = Image.open(file)\n\t\tp = os.path.join(args.output, os.path.dirname(file))\n\t\tn = os.path.splitext(os.path.basename(file))[0]\n\t\tfor back, name in backgrounds:\n\t\t\tapply_background(noise(back.resize(img.size, Image.ANTIALIAS)), img, os.path.join(p, './{}_{}.png'.format(n, name)))\n\t\tif args.mask:\n\t\t\tImage.frombytes('1', img.size, np.packbits(np.array(img)[::,::,3].astype(bool), axis=1)).save(os.path.join(p, './{}.mask'.format(n)), 'png')\n\t\tfor i in range(args.random_backgrounds):\n\t\t\tapply_background(Image.fromarray(np.random.randint(0, 256, (*img.size, 3), 'uint8')), img, os.path.join(p, './{}_{}.png'.format(n, i)))\n\t\tif args.rebackground:\n\t\t\tbackgrounds = choose_backgrounds(args.backgrounds, args.backgrounds_number)\n\nparser = argparse.ArgumentParser(description='Enlarge your dataset with new backgrounds and/or generate masks')\nparser.add_argument('filenames', help='Image filenames', nargs='+', metavar='IMG')\nparser.add_argument('-b', '--backgrounds', default=glob.glob('./backgrounds/**', recursive=True), help='Background filenames', nargs='+', metavar='BG')\nparser.add_argument('-n', '--noise', nargs=2, help='Apply noise [MIN, MAX)', metavar=('MIN', 'MAX'), type=int)\nparser.add_argument('-m', '--mask', action='store_true', help='Generate mask')\nparser.add_argument('-t', '--threads', type=int, default=1, help='Number of threads')\nparser.add_argument('-bn', '--backgrounds_number', type=int, default=1, help='Apply N backgrounds', metavar='N')\nparser.add_argument('-rb', '--random_backgrounds', type=int, default=0, help='Generate K images with random noise backgrounds', metavar='K')\nparser.add_argument('--output', help='Output dir', type=str, default='./result', metavar='OUT')\nparser.add_argument('--rebackground', action='store_true', help='Choose new backgrounds for every image')\nargs = parser.parse_args()\n\nif platform.system() == 'Windows':\n\targs.filenames = list(chain(*(glob.glob(i, recursive=True) for i in args.filenames)))\n\targs.backgrounds = list(chain(*(glob.glob(i, recursive=True) for i in args.backgrounds)))\n\nargs.filenames = list(filter(lambda x: os.path.isfile(x), args.filenames))\nargs.backgrounds = list(filter(lambda x: os.path.isfile(x), args.backgrounds))\n\nif args.backgrounds_number < 0:\n\targs.backgrounds_number = len(args.backgrounds)\n\nbackgrounds = choose_backgrounds(args.backgrounds, args.backgrounds_number)\nnoise = apply_noise_func(args.noise[0], args.noise[1]) if args.noise else lambda x: x\n\nthreads = []\ntn = ceil(len(args.filenames) / args.threads)\n\nfor i in range(args.threads):\n\tthreads.append(threading.Thread(target=function, args=(i, args.filenames[i * tn:(i + 1) * tn], backgrounds, args)))\n\tthreads[-1].start()\n\nfor i in threads:\n\ti.join()\n\nfor i in threads:\n\tprint()\n" ]
[ [ "numpy.array", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
leopauly/Observation-Learning-Simulations
[ "462c04a87c45aae51537b8ea5b44646afa31d3a5", "462c04a87c45aae51537b8ea5b44646afa31d3a5", "462c04a87c45aae51537b8ea5b44646afa31d3a5", "462c04a87c45aae51537b8ea5b44646afa31d3a5", "462c04a87c45aae51537b8ea5b44646afa31d3a5", "462c04a87c45aae51537b8ea5b44646afa31d3a5", "462c04a87c45aae51537b8ea5b44646afa31d3a5", "462c04a87c45aae51537b8ea5b44646afa31d3a5", "462c04a87c45aae51537b8ea5b44646afa31d3a5" ]
[ "sandbox/bradly/third_person/algos/trainer.py", "sandbox/bradly/third_person/launchers/cyberpunk_aws_gail.py", "rllab/envs/mujoco/mujoco_env.py", "S2l/Thesis_Ch4/Results/Task1/V/runtime_data/correlation_metric_val_graph.py", "S2l/Thesis_Ch3/Exp2_push3dof/push3dof_train_ddpg_new1.py", "S2l/Initial works/Exp2_push3dof/gym_push3dofreal_train_1.py", "ablations_code/ablations.py", "S2l/Thesis_Ch3/Exp1_reach3dof/Scripts/reach7dof_train_proposed_thesis_reward_eval_trajectorymaps.py", "sandbox/bradly/third_person/launchers/cyberpunk_launcher_newreacher.py" ]
[ "from rllab.core.serializable import Serializable\nfrom rllab.misc import logger\nimport numpy as np\nimport tensorflow as tf\nimport pyprind\nfrom rllab.sampler.utils import rollout\nfrom sandbox.rocky.analogy.policies.apply_demo_policy import ApplyDemoPolicy\nfrom sandbox.rocky.analogy.dataset import SupervisedDataset\nfrom sandbox.rocky.analogy.policies.normalizing_policy import NormalizingPolicy\nfrom rllab.sampler.stateful_pool import singleton_pool\nimport itertools\nimport random\nimport contextlib\n\ndef unwrap(env):\n if isinstance(env, TfEnv):\n return unwrap(env.wrapped_env)\n return env\n\[email protected]\ndef set_seed_tmp(seed=None):\n if seed is None:\n yield\n else:\n state = random.getstate()\n np_state = np.random.get_state()\n random.seed(seed)\n np.random.seed(seed)\n yield\n np.random.set_state(np_state)\n random.setstate(state)\n\n\ndef collect_demo(G, demo_seed, analogy_seed, target_seed, env_cls, demo_policy_cls, horizon):\n demo_env = env_cls(seed=demo_seed, target_seed=target_seed)\n analogy_env = env_cls(seed=analogy_seed, target_seed=target_seed)\n demo_path = rollout(demo_env, demo_policy_cls(demo_env), max_path_length=horizon)\n analogy_path = rollout(analogy_env, demo_policy_cls(analogy_env), max_path_length=horizon)\n return demo_path, analogy_path, demo_env, analogy_env\n\n\n# A simple example hopefully able to train a feed-forward network\n\nclass Trainer(Serializable):\n def __init__(\n self,\n policy,\n env_cls,\n demo_policy_cls,\n shuffler=None,\n n_train_trajs=50,\n n_test_trajs=20,\n horizon=50,\n batch_size=10,\n n_epochs=100,\n n_passes_per_epoch=1,\n n_eval_trajs=10,\n learning_rate=1e-3,\n no_improvement_tolerance=5,\n plot=False,\n ):\n Serializable.quick_init(self, locals())\n self.env_cls = env_cls\n self.demo_policy_cls = demo_policy_cls\n self.shuffler = shuffler\n self.n_train_trajs = n_train_trajs\n self.n_test_trajs = n_test_trajs\n self.horizon = horizon\n self.policy = policy\n self.plot = plot\n self.batch_size = batch_size\n self.n_epochs = n_epochs\n self.n_passes_per_epoch = n_passes_per_epoch\n self.n_eval_trajs = n_eval_trajs\n self.learning_rate = learning_rate\n self.no_improvement_tolerance = no_improvement_tolerance\n\n def train(self):\n\n demo_seeds, analogy_seeds, target_seeds = np.random.randint(\n low=0, high=np.iinfo(np.int32).max, size=(3, self.n_train_trajs + self.n_test_trajs)\n )\n\n logger.log(\"Collecting trajectories\")\n progbar = pyprind.ProgBar(len(demo_seeds))\n\n data_list = []\n\n for data in singleton_pool.run_imap_unordered(\n collect_demo,\n [tuple(seeds) + (self.env_cls, self.demo_policy_cls, self.horizon)\n for seeds in zip(demo_seeds, analogy_seeds, target_seeds)]\n ):\n progbar.update()\n data_list.append(data)\n\n demo_paths, analogy_paths, demo_envs, analogy_envs = zip(*data_list)\n\n if progbar.active:\n progbar.stop()\n\n logger.log(\"Processing data\")\n\n all_data_pairs = [\n (\"demo_paths\", np.asarray(demo_paths)),\n (\"analogy_paths\", np.asarray(analogy_paths)),\n # These will be ignored during training since they appear last\n (\"demo_envs\", np.array(demo_envs)),\n (\"analogy_envs\", np.array(analogy_envs)),\n ]\n all_data_keys = [x[0] for x in all_data_pairs]\n all_data_vals = [x[1] for x in all_data_pairs]\n\n dataset = SupervisedDataset(\n inputs=all_data_vals,\n train_batch_size=self.batch_size,\n train_ratio=self.n_train_trajs * 1.0 / (self.n_train_trajs + self.n_test_trajs),\n shuffler=self.shuffler,\n )\n\n env = demo_envs[0]\n\n logger.log(\"Constructing optimization problem\")\n policy = self.policy\n policy = NormalizingPolicy(\n self.policy,\n *dataset.train.inputs[:2]\n )\n\n demo_obs_var = env.observation_space.new_tensor_variable(name=\"demo_obs\", extra_dims=2)\n demo_action_var = env.action_space.new_tensor_variable(name=\"demo_actions\", extra_dims=2)\n\n analogy_obs_var = env.observation_space.new_tensor_variable(name=\"analogy_obs\", extra_dims=2)\n analogy_action_var = env.action_space.new_tensor_variable(name=\"analogy_actions\", extra_dims=2)\n\n lr_var = tf.placeholder(dtype=tf.float32, shape=(), name=\"lr\")\n\n train_policy_action_var = policy.action_sym(\n analogy_obs_var,\n state_info_vars=dict(\n demo_obs=demo_obs_var,\n demo_action=demo_action_var\n ),\n phase='train'\n )\n test_policy_action_var = policy.action_sym(\n analogy_obs_var,\n state_info_vars=dict(\n demo_obs=demo_obs_var,\n demo_action=demo_action_var\n ),\n phase='test'\n )\n train_loss_var = tf.reduce_mean(tf.square(analogy_action_var - train_policy_action_var))\n test_loss_var = tf.reduce_mean(tf.square(analogy_action_var - test_policy_action_var))\n\n optimizer = tf.train.AdamOptimizer(learning_rate=lr_var)\n\n params = policy.get_params(trainable=True)\n\n grads_and_vars = optimizer.compute_gradients(train_loss_var, var_list=params)\n train_op = optimizer.apply_gradients(grads_and_vars)\n\n # Best average return achieved by the NN policy\n best_loss = np.inf\n # Best parameter for the NN policy\n best_params = None\n # Number of epochs without improvement compared to the best policy so far\n n_no_improvement = 0\n\n # Current learning rate\n learning_rate = self.learning_rate\n\n def to_feed(batch):\n batch_dict = dict(zip(all_data_keys, batch))\n demo_obs = np.asarray([p[\"observations\"] for p in batch_dict[\"demo_paths\"]])\n demo_actions = np.asarray([p[\"actions\"] for p in batch_dict[\"demo_paths\"]])\n analogy_obs = np.asarray([p[\"observations\"] for p in batch_dict[\"analogy_paths\"]])\n analogy_actions = np.asarray([p[\"actions\"] for p in batch_dict[\"analogy_paths\"]])\n return {\n demo_obs_var: demo_obs,\n demo_action_var: demo_actions,\n analogy_obs_var: analogy_obs,\n analogy_action_var: analogy_actions,\n lr_var: learning_rate,\n }\n\n logger.log(\"Launching TF session\")\n\n with tf.Session() as sess:\n logger.log(\"Initializing TF variables\")\n sess.run(tf.initialize_all_variables())\n logger.log(\"Initialized\")\n\n for epoch_idx in range(self.n_epochs):\n losses = []\n logger.log(\"Start epoch %d\" % epoch_idx)\n\n # Skip training for the first epoch\n if epoch_idx > 0:\n logger.log(\"Start training...\")\n progbar = pyprind.ProgBar(dataset.train.number_batches * self.n_passes_per_epoch)\n for _ in range(self.n_passes_per_epoch):\n for batch in dataset.train.iterate():\n _, loss = sess.run(\n [train_op, train_loss_var],\n feed_dict=to_feed(batch),\n )\n losses.append(loss)\n progbar.update()\n if progbar.active:\n progbar.stop()\n logger.log(\"Finished\")\n else:\n logger.log(\"Skipped training for the 0th epoch, to collect initial test statistics\")\n\n test_loss = sess.run(\n test_loss_var,\n feed_dict=to_feed(dataset.test.inputs),\n )\n\n test_dict = dict(zip(all_data_keys, dataset.test.inputs))\n\n # Evaluate performance\n\n eval_paths = []\n\n for idx, demo_path, analogy_env in zip(\n itertools.count(),\n test_dict[\"demo_paths\"],\n test_dict[\"analogy_envs\"],\n ):\n eval_paths.append(rollout(\n analogy_env, ApplyDemoPolicy(policy, demo_path), max_path_length=self.horizon,\n animated=self.plot and idx == 0,\n ))\n\n # import ipdb; ipdb.set_trace()\n\n if self.plot:\n rollout(\n analogy_env, ApplyDemoPolicy(policy, demo_path), max_path_length=self.horizon,\n animated=self.plot and idx == 0,\n )\n\n returns = [np.sum(p[\"rewards\"]) for p in eval_paths]\n\n avg_loss = np.mean(losses)\n\n # avg_train_loss = np.mean(train_losses)\n if avg_loss > best_loss:\n n_no_improvement += 1\n else:\n n_no_improvement = 0\n best_loss = avg_loss\n # collect best params\n best_params = sess.run(params)\n\n logger.record_tabular('Epoch', epoch_idx)\n logger.record_tabular(\"LearningRate\", learning_rate)\n logger.record_tabular(\"NoImprovementEpochs\", n_no_improvement)\n logger.record_tabular('AverageTrainLoss', avg_loss)\n logger.record_tabular('AverageTestLoss', test_loss)\n logger.record_tabular('AverageReturn', np.mean(returns))\n logger.record_tabular('MaxReturn', np.max(returns))\n logger.record_tabular('MinReturn', np.min(returns))\n logger.record_tabular('OracleAverageReturn', np.mean(\n [np.sum(p[\"rewards\"]) for p in test_dict[\"analogy_paths\"]]\n ))\n log_env = unwrap(analogy_envs[-1])\n log_envs = map(unwrap, test_dict[\"analogy_envs\"])\n log_env.log_analogy_diagnostics(eval_paths, log_envs)\n\n logger.dump_tabular()\n\n if n_no_improvement >= self.no_improvement_tolerance:\n learning_rate *= 0.5\n logger.log(\"No improvement for %d epochs. Reducing learning rate to %f\" % (n_no_improvement,\n learning_rate))\n n_no_improvement = 0\n # restore to best params\n sess.run([tf.assign(p, pv) for p, pv in zip(params, best_params)])\n\n logger.log(\"Saving itr params..\")\n\n save_params = dict(\n policy=policy,\n # save a version of the environment\n env=analogy_envs[-1],\n trainer=self,\n )\n logger.save_itr_params(epoch_idx, save_params)\n logger.log(\"Saved\")\n", "from sandbox.rocky.tf.algos.trpo import TRPO\nfrom rllab.baselines.linear_feature_baseline import LinearFeatureBaseline\nfrom rllab.envs.normalized_env import normalize\nfrom sandbox.rocky.tf.optimizers.conjugate_gradient_optimizer import ConjugateGradientOptimizer\nfrom sandbox.rocky.tf.optimizers.conjugate_gradient_optimizer import FiniteDifferenceHvp\nfrom sandbox.rocky.tf.policies.gaussian_mlp_policy import GaussianMLPPolicy\nfrom sandbox.rocky.tf.envs.base import TfEnv\nfrom sandbox.bradly.third_person.policy.random_policy import RandomPolicy\nfrom sandbox.bradly.third_person.algos.cyberpunk_trainer_gail import CyberPunkTrainerGAIL\nfrom sandbox.bradly.third_person.discriminators.discriminator import ConvDiscriminator\nimport joblib\nimport tensorflow as tf\nfrom rllab.envs.gym_env import GymEnv\nimport pickle\nfrom rllab.misc.instrument import stub, run_experiment_lite\nimport rllab.misc.logger as logger\nclass CyberpunkAWSGAIL:\n\n def __init__(self, expert_env, novice_env, horizon, itrs, trajs, imsize, expert_pkl, **kwargs):\n self.expert_env = expert_env\n self.novice_env = novice_env\n self.horizon = horizon\n self.itrs = itrs\n self.trajs = trajs\n self.expert_pkl = expert_pkl\n self.imsize = imsize\n\n def train(self):\n\n expert_env = TfEnv(self.expert_env)#TfEnv(GymEnv(\"Pusher3DOF-v1\", force_reset=True, record_video=False))\n# expert_env = TfEnv(normalize(ReacherEnv()))\n novice_env = TfEnv(self.novice_env)#TfEnv(GymEnv(\"Pusher3DOFNoChange-v1\", force_reset=True, record_video=True))\n\n# novice_env = TfEnv(normalize(ReacherTwoEnv(), normalize_obs=True))\n expert_fail_pol = RandomPolicy(expert_env.spec)\n\n policy = GaussianMLPPolicy(\n name=\"novice_policy\",\n env_spec=novice_env.spec,\n init_std=10,\n # The neural network policy should have two hidden layers, each with 32 hidden units.\n hidden_sizes=(32, 32)\n )\n\n baseline = LinearFeatureBaseline(env_spec=expert_env.spec)\n\n algo = TRPO(\n env=novice_env,\n policy=policy,\n baseline=baseline,\n batch_size=4000,\n max_path_length=self.horizon,\n n_itr=self.itrs,\n discount=0.99,\n step_size=0.01,\n optimizer=ConjugateGradientOptimizer(hvp_approach=FiniteDifferenceHvp(base_eps=1e-5))\n\n )\n\n config = tf.ConfigProto()\n config.gpu_options.allow_growth=True\n with tf.Session(config=config) as sess:\n\n #What do the n_itr and start_itr mean?\n algo.n_itr = 0\n algo.start_itr = 0\n algo.train(sess=sess) #TODO: What is happening here?\n\n im_height = self.imsize[0]\n im_width = self.imsize[1]\n im_channels = 3\n\n dim_input = [im_height, im_width, im_channels]\n\n disc = ConvDiscriminator(input_dim=dim_input)\n\n #data = joblib.load(self.expert_pkl)#\"/home/andrewliu/research/viewpoint/rllab-tpil/third_person_im/data/local/experiment/experiment_2017_05_07_20_58_39_0001/itr_123.pkl\")#\"/home/abhigupta/abhishek_sandbox/viewpoint/third_person_im/data/local/experiment/experiment_2017_05_06_18_07_38_0001/itr_900.pkl\")\n #expert_policy = data['policy']\n with open(self.expert_pkl, 'rb') as pfile:\n expert_policy = pickle.load(pfile)\n # expert_policy = load_expert_reacher(expert_env, sess) #Load the expert #TODO: Need to train the expert\n\n #from rllab.sampler.utils import rollout\n #while True:\n # t = rollout(env=expert_env, agent=expert_policy, max_path_length=50, animated=True)\n\n algo.n_itr = self.itrs\n trainer = CyberPunkTrainerGAIL(disc=disc, novice_policy_env=novice_env,\n expert_env=expert_env, novice_policy=policy,\n novice_policy_opt_algo=algo, expert_success_pol=expert_policy,\n im_width=im_width, im_height=im_height, im_channels=im_channels,\n tf_sess=sess, horizon=self.horizon)\n\n iterations = self.itrs\n for iter_step in range(0, iterations):\n logger.record_tabular('Iteration', iter_step)\n trainer.take_iteration(n_trajs_cost=self.trajs, n_trajs_policy=self.trajs)\n logger.dump_tabular(with_prefix=False)\n\n trainer.log_and_finish()\n\n", "import numpy as np\nimport os.path as osp\n\nfrom rllab import spaces\nfrom rllab.envs.base import Env\nfrom rllab.misc.overrides import overrides\nfrom rllab.mujoco_py import MjModel, MjViewer\nfrom rllab.misc import autoargs\nfrom rllab.misc import logger\nimport theano\nimport tempfile\nimport os\nimport mako.template\nimport mako.lookup\n\nMODEL_DIR = osp.abspath(\n osp.join(\n osp.dirname(__file__),\n '../../../vendor/mujoco_models'\n )\n)\n\n\nBIG = 1e6\n\n\nclass MujocoEnv(Env):\n FILE = None\n\n @autoargs.arg('action_noise', type=float,\n help='Noise added to the controls, which will be '\n 'proportional to the action bounds')\n def __init__(self, action_noise=0.0, file_path=None, temp_path=None, template_args=None):\n # compile template\n if file_path is None:\n if self.__class__.FILE is None:\n raise \"Mujoco file not specified\"\n file_path = osp.join(MODEL_DIR, self.__class__.FILE)\n if file_path.endswith(\".mako\"):\n lookup = mako.lookup.TemplateLookup(directories=[MODEL_DIR])\n with open(file_path) as template_file:\n template = mako.template.Template(\n template_file.read(), lookup=lookup)\n content = template.render(\n opts=template_args if template_args is not None else {},\n )\n tmp_f, file_path = tempfile.mkstemp(text=True)\n with open(file_path, 'w') as f:\n f.write(content)\n self.model = MjModel(file_path)\n os.close(tmp_f)\n else:\n self.model = MjModel(file_path)\n self.data = self.model.data\n self.viewer = None\n self.init_qpos = self.model.data.qpos\n self.init_qvel = self.model.data.qvel\n self.init_qacc = self.model.data.qacc\n self.init_ctrl = self.model.data.ctrl\n self.qpos_dim = self.init_qpos.size\n self.qvel_dim = self.init_qvel.size\n self.ctrl_dim = self.init_ctrl.size\n self.action_noise = action_noise\n if \"frame_skip\" in self.model.numeric_names:\n frame_skip_id = self.model.numeric_names.index(\"frame_skip\")\n addr = self.model.numeric_adr.flat[frame_skip_id]\n self.frame_skip = int(self.model.numeric_data.flat[addr])\n else:\n self.frame_skip = 1\n if \"init_qpos\" in self.model.numeric_names:\n init_qpos_id = self.model.numeric_names.index(\"init_qpos\")\n addr = self.model.numeric_adr.flat[init_qpos_id]\n size = self.model.numeric_size.flat[init_qpos_id]\n init_qpos = self.model.numeric_data.flat[addr:addr + size]\n self.init_qpos = init_qpos\n self.dcom = None\n self.current_com = None\n self.reset()\n super(MujocoEnv, self).__init__()\n\n @property\n @overrides\n def action_space(self):\n bounds = self.model.actuator_ctrlrange\n lb = bounds[:, 0]\n ub = bounds[:, 1]\n return spaces.Box(lb, ub)\n\n @property\n @overrides\n def observation_space(self):\n shp = self.get_current_obs().shape\n ub = BIG * np.ones(shp)\n return spaces.Box(ub * -1, ub)\n\n @property\n def action_bounds(self):\n return self.action_space.bounds\n\n def reset_mujoco(self):\n self.model.data.qpos = self.init_qpos + \\\n np.random.normal(size=self.init_qpos.shape) * 0.01\n self.model.data.qvel = self.init_qvel + \\\n np.random.normal(size=self.init_qvel.shape) * 0.1\n self.model.data.qacc = self.init_qacc\n self.model.data.ctrl = self.init_ctrl\n\n @overrides\n def reset(self):\n self.reset_mujoco()\n self.model.forward()\n self.current_com = self.model.data.com_subtree[0]\n self.dcom = np.zeros_like(self.current_com)\n return self.get_current_obs()\n\n def get_current_obs(self):\n return self._get_full_obs()\n\n def _get_full_obs(self):\n data = self.model.data\n cdists = np.copy(self.model.geom_margin).flat\n for c in self.model.data.contact:\n cdists[c.geom2] = min(cdists[c.geom2], c.dist)\n obs = np.concatenate([\n data.qpos.flat,\n data.qvel.flat,\n # data.cdof.flat,\n data.cinert.flat,\n data.cvel.flat,\n # data.cacc.flat,\n data.qfrc_actuator.flat,\n data.cfrc_ext.flat,\n data.qfrc_constraint.flat,\n cdists,\n # data.qfrc_bias.flat,\n # data.qfrc_passive.flat,\n self.dcom.flat,\n ])\n return obs\n\n @property\n def _state(self):\n return np.concatenate([\n self.model.data.qpos.flat,\n self.model.data.qvel.flat\n ])\n\n def inject_action_noise(self, action):\n # generate action noise\n noise = self.action_noise * \\\n np.random.normal(size=action.shape)\n # rescale the noise to make it proportional to the action bounds\n lb, ub = self.action_bounds\n noise = 0.5 * (ub - lb) * noise\n return action + noise\n\n def forward_dynamics(self, action):\n self.model.data.ctrl = self.inject_action_noise(action)\n for _ in range(self.frame_skip):\n self.model.step()\n self.model.forward()\n new_com = self.model.data.com_subtree[0]\n self.dcom = new_com - self.current_com\n self.current_com = new_com\n\n def get_viewer(self):\n if self.viewer is None:\n self.viewer = MjViewer()\n self.viewer.start()\n self.viewer.set_model(self.model)\n return self.viewer\n\n def render(self):\n viewer = self.get_viewer()\n viewer.loop_once()\n\n def start_viewer(self):\n viewer = self.get_viewer()\n if not viewer.running:\n viewer.start()\n\n def stop_viewer(self):\n if self.viewer:\n self.viewer.finish()\n\n def release(self):\n # temporarily alleviate the issue (but still some leak)\n from rllab.mujoco_py.mjlib import mjlib\n mjlib.mj_deleteModel(self.model._wrapped)\n mjlib.mj_deleteData(self.data._wrapped)\n\n def get_body_xmat(self, body_name):\n idx = self.model.body_names.index(body_name)\n return self.model.data.xmat[idx].reshape((3, 3))\n\n def get_body_com(self, body_name):\n idx = self.model.body_names.index(body_name)\n return self.model.data.com_subtree[idx]\n\n def get_body_comvel(self, body_name):\n idx = self.model.body_names.index(body_name)\n return self.model.body_comvels[idx]\n\n def print_stats(self):\n super(MujocoEnv, self).print_stats()\n print(\"qpos dim:\\t%d\" % len(self.model.data.qpos))\n\n def action_from_key(self, key):\n raise NotImplementedError\n", "## @leopauly\n## For finding correlation between rewards per episode vs eval_metric per episode\n\n\n## loading values\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib\nimport matplotlib.pyplot as plt\nmatplotlib.style.use('ggplot')\n\n\ndef corr_coef(filename_x,filename_y,i):\n #-------------------------------------------------------------------------------------------------#\n\n y = np.loadtxt(filename_y, unpack=True)\n y_new=[y_ for y_ in y if y_!=0]\n print('y size:' ,np.array(y_new).shape)\n\n #-------------------------------------------------------------------------------------------------#\n\n x = np.loadtxt(filename_x, unpack=True)\n x_new=[x_ for x_ in x if x_!=0]\n\n x_norm=[]\n for i in range(len(x_new)):\n x_norm.append(1-(x_new[i]/x_new[0]))\t\n print('x size:' ,np.array(x_new).shape)\n\n\n #-------------------------------------------------------------------------------------------------#\n\n ## correlation coefficient\n cor_coef=np.corrcoef(x_norm,y_new)\n print(np.corrcoef(x_norm,y_new))\n\n\n plt.scatter(x_norm,y_new,color='red')\n plt.hold(True)\n\n #-------------------------------------------------------------------------------------------------#\n \n return cor_coef\n\n\n\nfilenames_x_array=[\"eval_metric_per_epispde_run_7.txt\",\"eval_metric_per_epispde_run_6.txt\"]\nfilemames_y_array=[\"episode_reward_run_7.txt\",\"episode_reward_run_6.txt\"]\ncorr_per_run=[]\nfor i in range(len(filenames_x_array)):\n filename_x=filenames_x_array[i]\n filename_y=filemames_y_array[i]\n corr_per_run.append(corr_coef(filename_x,filename_y,i)[0][1])\n\ncorr_per_run=np.array(corr_per_run)\nprint('corr_per_run',corr_per_run)\nmean_corr=np.mean(corr_per_run)\nstd_corr=np.std(corr_per_run)\nprint('Mean correlation cofficient:',mean_corr)\nprint('Std correlation cofficient:',std_corr)\n\n\nplt.title('Visual reward vs Auxiliary')\nplt.xlabel('Auxiliary reward')\nplt.ylabel('Visual reward')\nplt.savefig('Visual reward vs Auxiliary.png')\nplt.show()\n", "#### Training agent in Pusher7Dof gym env using a single real-world env\n## Wrtitten by : leopauly | [email protected]\n## Courtesy for DDPG implementation : Steven Spielberg Pon Kumar (github.com/stevenpjg)\n## Exp for checking view point invaraince using 3dofpushreal Gym env\n####\n\n##Imports\nimport gym\nfrom gym.spaces import Box, Discrete\nimport numpy as np\nnp.set_printoptions(suppress=True)\nimport cv2\nfrom ddpg_new1 import DDPG as Agent\nfrom ou_noise import OUNoise\nimport matplotlib.pyplot as plt\nimport scipy.misc as misc\n\n## Imports for DNN\nimport os\nfrom threading import Thread, Lock\nimport sys\nfrom six.moves import xrange # pylint: disable=redefined-builtin\nimport PIL.Image as Image\nimport random\nimport numpy as np\nimport cv2\nimport time\nimport math\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nimport keras\nfrom keras import backend as K\n\n## Custom scripts\nimport lscript as lsp\nimport modelling as md\n\n## Defining env\nenv = gym.make('Pusher3DOFReal-v1')\nassert isinstance(env.observation_space, Box), \"observation space must be continuous\"\nassert isinstance(env.action_space, Box), \"action space must be continuous\"\n\n## Defining vars for reinfrocement learning algo\nnum_episodes=500\nnum_rollouts=20 # Each roll out represent a complete activity : activity could be pushing an object, reaching to a point or similar !\nsteps=16 # No of actions taken in a roll out\nis_batch_norm = False #batch normalization switch\nxrange=range # For python3\nstart_training=64 # Buffer size, before starting to train the RL algorithm\n\n## vars for feature extraction\nheight=112 \nwidth=112 \nchannel=3\ncrop_size=112\n\ncluster_length=16 # Length of one activity\nnb_classes=2 \nfeature_size=4608 #8192 #16384 #487 \n#frame_feature_size=\ndemo_folder='./Demos/Demo_push_180deg/'\n#-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------#\n\n\nclass Frame_Feature:\n def __init__(self):\n self.g=tf.Graph()\n with self.g.as_default():\n self.sess=tf.InteractiveSession(config=tf.ConfigProto(allow_soft_placement=True,log_device_placement=False))\n self.base_model=tf.keras.applications.vgg16.VGG16(include_top=False, weights='imagenet', input_tensor=None, input_shape=(height,width,channel), pooling=None, classes=1000)\n #print(tf.contrib.graph_editor.get_tensors(self.g)) #(tf.get_default_graph()))\n self.base_model._make_predict_function()\n print('VggNet loaded with Imagenet values for observation frame & baseline feature extraction')\n \n ## Extraction of features - baseline\n def video_feature_extractor(self,vid):\n \n sum_val=0\n features=0\n for i in range(cluster_length):\n\n frame_=vid[i]\n frame=self.im_preprocess(frame_)\n frame=frame.reshape(-1,height,width,channel)\n \n #print('frame size',frame.shape)\n #print(tf.contrib.graph_editor.get_tensors(self.g))\n temp_val=self.base_model.predict(frame)\n temp_val=temp_val.reshape(feature_size)\n #print('temp_val',temp_val.shape)\n sum_val=sum_val+temp_val\n #print('sum_val',sum_val.shape)\n\n features=sum_val/cluster_length\n #print('feature from one video.shape',features.shape)\n return features\n\n\n def frame_feature_extractor(self,frame_):\n frame= self.im_preprocess(frame_)\n frame=frame.reshape(-1,height,width,channel)\n frame_features=self.base_model.predict(frame)\n return frame_features\n\n def im_preprocess(self,im):\n im = np.float32(im)\n im[:,:,2] -= 103.939\n im[:,:,1] -= 116.779\n im[:,:,0] -= 123.68\n im = im[:, :, ::-1] # change to BGR\n return im\n#-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------#\n\n\n### DEMO FEATURE EXTRACTION\ndef get_compress_frames_data(filename, num_frames_per_clip=cluster_length):\n ret_arr = []\n for parent, dirnames, filenames in os.walk(filename):\n\n filenames = sorted(filenames)\n jump=math.floor((len(filenames)/num_frames_per_clip))\n loop=0\n\n for i in range(0,len(filenames),jump):\n if (loop>15):\n break\n if (filenames[i].endswith('.png')):\n image_name = str(filename) + '/' + str(filenames[i])\n img = Image.open(image_name)\n img_data = np.array(img)\n ret_arr.append(img_data)\n loop=loop+1\n ret_arr=np.array(ret_arr)\n #ret_arr=ret_arr/255\n\n return ret_arr\n\ndef demo_array_extractor(demo_vid_path):\n demo_vid_array=get_compress_frames_data(demo_vid_path)\n return demo_vid_array\n#-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------#\n\n\n'''\n### VIDEO FEATURE EXTRACTION - BASELINE\n\nclass Vid_Feature:\n \n def __init__(self,include_fc=False,summary=True):\n self.g=tf.Graph()\n with self.g.as_default():\n self.sess=tf.InteractiveSession(config=tf.ConfigProto(allow_soft_placement=True,log_device_placement=False))\n self.baseline_model=tf.keras.applications.vgg16.VGG16(include_top=False, weights='imagenet', input_tensor=None, input_shape=(height,width,channel), pooling=None, classes=1000)\n #print(tf.contrib.graph_editor.get_tensors(self.g)) #(tf.get_default_graph()))\n self.baseline_model._make_predict_function()\n print('VggNet loaded with Imagenet values for baseline feature extraction')\n \n\n ## Extraction of features - baseline\n def feature_extractor(self,vid):\n \n sum_val=0\n features=0\n for i in range(cluster_length):\n\n frame_=vid[i]\n frame=self.im_preprocess(frame_)\n frame=frame.reshape(-1,height,width,channel)\n \n print('frame size',frame.shape)\n #print(tf.contrib.graph_editor.get_tensors(self.g))\n temp_val=self.baseline_model.predict(frame)\n temp_val=temp_val.reshape(feature_size)\n #print('temp_val',temp_val.shape)\n sum_val=sum_val+temp_val\n #print('sum_val',sum_val.shape)\n\n features=sum_val/cluster_length\n #print('feature from one video.shape',features.shape)\n return features\n\n\n\n def im_preprocess(self,im):\n\n im = np.float32(im)\n im[:,:,2] -= 103.939\n im[:,:,1] -= 116.779\n im[:,:,0] -= 123.68\n im = im[:, :, ::-1] # change to BGR\n return im\n'''\n#-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------#\n\n\ndef distance(f_demo,f_robo):\n #print('shape f_demo',f_demo.shape,'shape f_demo',f_robo.shape)\n return np.linalg.norm(f_demo-f_robo)\n#-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------#\n\n\ndef s2l():\n\n #Randomly initialize critic,actor,target critic, target actor network and replay buffer \n num_states = feature_size #num_states = env.observation_space.shape[0]\n num_actions = env.action_space.shape[0] \n print (\"Number of States:\", num_states)\n print (\"Number of Actions:\", num_actions)\n\n agent = Agent(state_size=num_states, action_size=num_actions) #agent = DDPG(env, is_batch_norm,num_states,num_actions)\n exploration_noise = OUNoise(env.action_space.shape[0])\n counter=0 \n total_reward=0\n \n print (\"Number of Rollouts per episode:\", num_rollouts)\n print (\"Number of Steps per roll out:\", steps)\n reward_st = np.array([0]) #saving reward\n eval_metric_st= np.array([0])\n reward_st_all = np.array([0]) #saving reward after every step\n \n frame_obj=Frame_Feature()\n\n #activity_obj=Vid_Feature()\n demo_vid_array=demo_array_extractor(demo_folder)\n demo_features=frame_obj.video_feature_extractor(demo_vid_array)\n\n for episode in range(num_episodes):\n print (\"==== Starting episode no:\",episode,\"====\",\"\\n\")\n env.reset() # Reset env in the begining of each episode\n env.render()\n obs_img=env.render(mode='rgb_array') # Get the observation\n obs_img=np.array(misc.imresize(obs_img,[112,112,3]))\n observation =np.array(frame_obj.frame_feature_extractor(obs_img))\n observation=observation.reshape(-1)\n reward_per_episode = 0\n\n for t in range(num_rollouts): \n \n reward_per_rollout=0\n vid_robo_=[]\n\n for i in range(steps):\n\n x = observation\n\n state=np.reshape(x,[1,num_states])\n action = agent.get_action(state=state) #action = agent.evaluate_actor(np.reshape(x,[1,num_states]))\n noise = exploration_noise.noise()\n action = action[0] + noise #Select action according to current policy and exploration noise\n print ('Action at episode-',episode,'rollout-',t, 'step-', i ,\" :\",action)\n\n \n _,_,done,info=env.step(action)\n env.render()\n obs_robo_=env.render(mode='rgb_array') # Get the observation\n obs_robo=misc.imresize(obs_robo_,[112,112,3])\n vid_robo_.append(obs_robo)\n observation=np.array(frame_obj.frame_feature_extractor(np.array(obs_robo)))\n observation=observation.reshape(-1)\n #pasue()\n \n if(i==15):\n vid_robo=np.array(vid_robo_)\n robo_features=frame_obj.video_feature_extractor(vid_robo)\n reward=-(distance(demo_features,robo_features))\n reward=np.array(reward)\n print('reward: ',reward)\n else:\n reward=0\n reward=np.array(reward)\n print('reward: ',reward)\n\n # Printing eval_metric after every rollout\n eval_metric=np.array(env.get_eval())\n eval_metric=eval_metric.reshape(-1)\n print('Distance to goal:',eval_metric) \n eval_metric_st = np.append(eval_metric_st,eval_metric) \n np.savetxt('eval_metric_per_step.txt',eval_metric_st, newline=\"\\n\")\n\n # Storing reward after every rollout\n reward_st_all = np.append(reward_st_all,reward)\n np.savetxt('reward_all.txt',reward_st_all, newline=\"\\n\")\n\n #add s_t,s_t+1,action,reward to experience memory\n agent.remember(x, action, reward, done, observation) #agent.add_experience(x,observation,action,reward,False)\n reward_per_rollout+=reward\n counter+=1\n \n #train critic and actor network\n if counter > start_training: \n agent.train()\n print ('\\n\\n')\n \n #Saving policy \n if ((episode%50)==0 and t==num_rollouts-1):\n print('saving policy...........................!')\n #agent.save_actor(episode)\n\n\n reward_per_episode+=reward_per_rollout \n\n #check if episode ends:\n \n print ('EPISODE: ',episode,' Total Reward: ',reward_per_episode)\n print (\"Printing reward to file\")\n exploration_noise.reset() #reinitializing random noise for action exploration\n reward_st = np.append(reward_st,reward_per_episode)\n np.savetxt('episode_reward.txt',reward_st, fmt='%f', newline=\"\\n\")\n print ('\\n\\n')\n \n total_reward+=reward_per_episode \n\n print (\"Average reward per episode {}\".format(total_reward / num_episodes)) \n\n \n \ns2l()\n\n", "#### Training agent in Pusher7Dof gym env using a single real-world env\n## Wrtitten by : leopauly | [email protected]\n## Courtesy for DDPG implementation : Steven Spielberg Pon Kumar (github.com/stevenpjg)\n####\n\n##Imports\nimport gym\nfrom gym.spaces import Box, Discrete\nimport numpy as np\nnp.set_printoptions(suppress=True)\nimport cv2\nfrom ddpg import DDPG\nfrom ou_noise import OUNoise\nimport matplotlib.pyplot as plt\nimport scipy.misc as misc\n\n## Imports for DNN\nimport os\nfrom threading import Thread, Lock\nimport sys\nfrom six.moves import xrange # pylint: disable=redefined-builtin\nimport PIL.Image as Image\nimport random\nimport numpy as np\nimport cv2\nimport time\nimport math\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nimport keras\nfrom keras import backend as K\n\n## Custom scripts\nimport lscript as lsp\nimport modelling as md\n\n## Defining env\nenv = gym.make('Pusher3DOFReal-v1')\nassert isinstance(env.observation_space, Box), \"observation space must be continuous\"\nassert isinstance(env.action_space, Box), \"action space must be continuous\"\n\n## Defining vars for reinfrocement learning algo\nnum_episodes=700\nnum_rollouts=20 # Each roll out represent a complete activity : activity could be pushing an object, reaching to a point or similar !\nsteps=16 # No of actions taken in a roll out\nis_batch_norm = False #batch normalization switch\nxrange=range # For python3\nstart_training=64 # Buffer size, before starting to train the RL algorithm\n\n## vars for feature extraction\nheight=112 \nwidth=112 \nchannel=3\ncrop_size=112\n\ncluster_length=16 # Length of one activity\nnb_classes=2 \nfeature_size=4608 #8192 #16384 #487 \n#frame_feature_size=\nsaved_path='/home/ironman2/S2l_storage/trained_activity_nets/' \ndemo_folder='./Demo_push_1/'\n#-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------#\n\n\nclass Frame_Feature:\n def __init__(self):\n self.g=tf.Graph()\n with self.g.as_default():\n self.sess=tf.InteractiveSession(config=tf.ConfigProto(allow_soft_placement=True,log_device_placement=False))\n self.base_model=tf.keras.applications.vgg16.VGG16(include_top=False, weights='imagenet', input_tensor=None, input_shape=(height,width,channel), pooling=None, classes=1000)\n print(tf.contrib.graph_editor.get_tensors(self.g)) #(tf.get_default_graph()))\n self.base_model._make_predict_function()\n print('VggNet loaded with Imagenet values')\n \n def frame_feature_extractor(self,frame_):\n frame= self.im_preprocess(frame_)\n frame=frame.reshape(-1,height,width,channel)\n frame_features=self.base_model.predict(frame)\n return frame_features\n\n def im_preprocess(self,im):\n im = np.float32(im)\n im[:,:,2] -= 103.939\n im[:,:,1] -= 116.779\n im[:,:,0] -= 123.68\n im = im[:, :, ::-1] # change to BGR\n return im\n#-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------#\n\n\n### DEMO FEATURE EXTRACTION\ndef get_compress_frames_data(filename, num_frames_per_clip=cluster_length):\n ret_arr = []\n for parent, dirnames, filenames in os.walk(filename):\n\n filenames = sorted(filenames)\n jump=math.floor((len(filenames)/num_frames_per_clip))\n loop=0\n\n for i in range(0,len(filenames),jump):\n if (loop>15):\n break\n if (filenames[i].endswith('.png')):\n image_name = str(filename) + '/' + str(filenames[i])\n img = Image.open(image_name)\n img_data = np.array(img)\n ret_arr.append(img_data)\n loop=loop+1\n ret_arr=np.array(ret_arr)\n #ret_arr=ret_arr/255\n\n return ret_arr\n\ndef demo_array_extractor(demo_vid_path):\n demo_vid_array=get_compress_frames_data(demo_vid_path)\n return demo_vid_array\n#-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------#\n\n\n\n### VIDEO FEATURE EXTRACTION\n\nclass Vid_Feature:\n \n def __init__(self):\n self.saved_path='/home/ironman2/S2l_storage/trained_activity_nets/' \n self.network_name='activity_model.ckpt-104.meta'\n ### Activity_net\n self.g=tf.Graph()\n with self.g.as_default():\n\n self.sess = tf.InteractiveSession(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=False))\n ## Restore model weights from previously saved model\n self.saver = tf.train.import_meta_graph(os.path.join(self.saved_path,self.network_name))\n self.saver.restore(self.sess, os.path.join(saved_path,'activity_model.ckpt-104'))\n print(\"Model restored from file: %s\" % saved_path,flush=True) \n\n ## For extracting activity features\n def feature_extractor(self,vid_np):\n #print('shape of video for feature extraction:',vid_np.shape)\n self.vid_=vid_np.reshape(-1,cluster_length,height,width,channel)\n\n #print(tf.contrib.graph_editor.get_tensors(self.g)) #(tf.get_default_graph()))\n #print(tf.get_default_graph().as_graph_def())\n f_v = self.sess.graph.get_tensor_by_name('flatten_1/Reshape:0')\n self.f_v_val=np.array(self.sess.run([f_v], feed_dict={'conv1_input:0':self.vid_,'Placeholder:0':self.vid_ }))#,K.learning_phase(): 0 }))\n\n #print('extracted video features shape:',f_v_val.shape)\n self.features=np.reshape(self.f_v_val,(-1))\n #print('features_shape',features.shape)\n return self.features\n\n#-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------#\n\n\ndef distance(f_demo,f_robo):\n #print('shape f_demo',f_demo.shape,'shape f_demo',f_robo.shape)\n return np.linalg.norm(f_demo-f_robo)\n#-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------#\n\n\ndef s2l():\n\n #Randomly initialize critic,actor,target critic, target actor network and replay buffer \n num_states = feature_size #num_states = env.observation_space.shape[0]\n num_actions = env.action_space.shape[0] \n print (\"Number of States:\", num_states)\n print (\"Number of Actions:\", num_actions)\n\n agent = DDPG(env, is_batch_norm,num_states,num_actions)\n exploration_noise = OUNoise(env.action_space.shape[0])\n counter=0 \n total_reward=0\n \n print (\"Number of Rollouts per episode:\", num_rollouts)\n print (\"Number of Steps per roll out:\", steps)\n reward_st = np.array([0]) #saving reward\n reward_st_all = np.array([0]) #saving reward after every step\n \n activity_obj=Vid_Feature()\n demo_vid_array=demo_array_extractor(demo_folder)\n demo_features=activity_obj.feature_extractor(demo_vid_array)\n\n frame_obj=Frame_Feature()\n\n for episode in range(num_episodes):\n print (\"==== Starting episode no:\",episode,\"====\",\"\\n\")\n env.reset() # Reset env in the begining of each episode\n env.render()\n obs_img=env.render(mode='rgb_array') # Get the observation\n obs_img=np.array(misc.imresize(obs_img,[112,112,3]))\n observation =np.array(frame_obj.frame_feature_extractor(obs_img))\n observation=observation.reshape(-1)\n reward_per_episode = 0\n\n for t in range(num_rollouts): \n \n reward_per_rollout=0\n vid_robo_=[]\n\n for i in range(steps):\n\n x = observation\n\n action = agent.evaluate_actor(np.reshape(x,[1,num_states]))\n noise = exploration_noise.noise()\n action = action[0] + noise #Select action according to current policy and exploration noise\n print ('Action at episode-',episode,'rollout-',t, 'step-', i ,\" :\",action)\n\n \n _,_,done,info=env.step(action)\n env.render()\n obs_robo_=env.render(mode='rgb_array') # Get the observation\n obs_robo=misc.imresize(obs_robo_,[112,112,3])\n vid_robo_.append(obs_robo)\n observation=np.array(frame_obj.frame_feature_extractor(np.array(obs_robo)))\n observation=observation.reshape(-1)\n #pasue()\n \n if(i==15):\n vid_robo=np.array(vid_robo_)\n robo_features=activity_obj.feature_extractor(vid_robo)\n reward=-(distance(demo_features,robo_features))\n reward=np.array(reward)\n print('reward: ',reward)\n else:\n reward=0\n reward=np.array(reward)\n print('reward: ',reward)\n\n reward_st_all = np.append(reward_st_all,reward)\n np.savetxt('reward_all.txt',reward_st_all, newline=\"\\n\")\n\n #add s_t,s_t+1,action,reward to experience memory\n agent.add_experience(x,observation,action,reward,False)\n reward_per_rollout+=reward\n counter+=1\n \n #train critic and actor network\n if counter > start_training: \n agent.train()\n print ('\\n\\n')\n \n reward_per_episode+=reward_per_rollout \n\n #check if episode ends:\n \n print ('EPISODE: ',episode,' Total Reward: ',reward_per_episode)\n print (\"Printing reward to file\")\n exploration_noise.reset() #reinitializing random noise for action exploration\n reward_st = np.append(reward_st,reward_per_episode)\n np.savetxt('episode_reward.txt',reward_st, fmt='%f', newline=\"\\n\")\n print ('\\n\\n')\n \n total_reward+=reward_per_episode \n\n print (\"Average reward per episode {}\".format(total_reward / num_episodes)) \n\n \n \ns2l()\n\n", "import numpy as np\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nfrom tensorflow import gfile\nimport imageio\nimport pickle\nimport scipy.misc\nimport sys\nfrom IPython.display import HTML\nimport imageio\nimport argparse\n\ndef transform(image, resize_height=36, resize_width=64):\n cropped_image = scipy.misc.imresize(image, [resize_height, resize_width])\n return np.array(cropped_image)/127.5 - 1.\ndef inverse_transform(images):\n return (images+1.)/2.\n\ndef lrelu(x, leak=0.2, name=\"lrelu\"):\n return tf.maximum(x, leak*x)\n\ndef conv2d(input_, output_dim, \n k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02,\n name=\"conv2d\"):\n with tf.variable_scope(name):\n w = tf.get_variable('w', [k_h, k_w, input_.get_shape()[-1], output_dim],\n initializer=tf.truncated_normal_initializer(stddev=stddev))\n# print(\"c\", w.get_shape())\n conv = tf.nn.conv2d(input_, w, strides=[1, d_h, d_w, 1], padding='SAME')\n\n biases = tf.get_variable('biases', [output_dim], initializer=tf.constant_initializer(0.0))\n conv = tf.reshape(tf.nn.bias_add(conv, biases), conv.get_shape())\n\n return conv\n\nclass batch_norm(object):\n def __init__(self, epsilon=1e-5, momentum = 0.9, name=\"batch_norm\"):\n with tf.variable_scope(name):\n self.epsilon = epsilon\n self.momentum = momentum\n self.name = name\n\n def __call__(self, x):\n return tf.contrib.layers.batch_norm(x,\n decay=self.momentum, \n updates_collections=None,\n epsilon=self.epsilon,\n scale=True,\n is_training=tftrain,\n scope=self.name)\n\ndef linear(input_, output_size, scope=None, stddev=0.02, bias_start=0.0, with_w=False):\n shape = input_.get_shape().as_list()\n\n with tf.variable_scope(scope or \"Linear\"):\n matrix = tf.get_variable(\"Matrix\", [shape[1], output_size], tf.float32,\n tf.random_normal_initializer(stddev=stddev))\n bias = tf.get_variable(\"bias\", [output_size],\n initializer=tf.constant_initializer(bias_start))\n if with_w:\n return tf.matmul(input_, matrix) + bias, matrix, bias\n else:\n return tf.matmul(input_, matrix) + bias\n \n\ndef deconv2d(input_, output_shape,\n k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02,\n name=\"deconv2d\", with_w=False):\n with tf.variable_scope(name):\n # filter : [height, width, output_channels, in_channels]\n w = tf.get_variable('w', [k_h, k_w, output_shape[-1], input_.get_shape()[-1]],\n initializer=tf.random_normal_initializer(stddev=stddev))\n# print(\"w\", w.get_shape())\n try:\n deconv = tf.nn.conv2d_transpose(input_, w, output_shape=output_shape,\n strides=[1, d_h, d_w, 1])\n\n # Support for verisons of TensorFlow before 0.7.0\n except AttributeError:\n deconv = tf.nn.deconv2d(input_, w, output_shape=output_shape,\n strides=[1, d_h, d_w, 1])\n\n biases = tf.get_variable('biases', [output_shape[-1]], initializer=tf.constant_initializer(0.0))\n deconv = tf.reshape(tf.nn.bias_add(deconv, biases), deconv.get_shape())\n\n if with_w:\n return deconv, w, biases\n else:\n return deconv\n\nclass ContextAEPushReal:\n def __init__(self, gf_dim=64, df_dim=64,\n gfc_dim=1024, dfc_dim=1024,\n c_dim=3):\n self.gf_dim = gf_dim\n self.df_dim = df_dim\n self.c_dim = c_dim\n\n self.gfc_dim = gfc_dim\n self.dfc_dim = dfc_dim\n\n\n def build(self, image, ablation_type):\n imgshape = image.get_shape().as_list()\n print(imgshape)\n self.output_height, self.output_width = imgshape[-3:-1]\n self.batch_size = imgshape[1]\n featsize = 100\n srcimg = image[0]\n tgtimg = image[2]\n tgtctx = image[1]\n \n nf0 = 32\n nf1 = 16\n nf2 = 16\n nf3 = 8\n ns0 = 1\n ns1 = 2\n ns2 = 1\n ns3 = 2\n# with tf.variable_scope(\"conv_context\") as scope:\n\n def encode(img):\n img_h0 = lrelu(conv2d(img, nf0, d_h=ns0, d_w=ns0, name='h0_conv'))\n img_h1 = lrelu(conv2d(img_h0, nf1, d_h=ns1, d_w=ns1, name='h1_conv'))\n img_h2 = lrelu(conv2d(img_h1, nf2, d_h=ns2, d_w=ns2, name='h2_conv'))\n img_h3 = lrelu(conv2d(img_h2, nf3, d_h=ns3, d_w=ns3, name='h3_conv'))\n print(img_h3.get_shape())\n img_h4 = lrelu(linear(tf.nn.dropout(tf.reshape(img_h3, [self.batch_size, -1]), keep_prob), featsize, 'h4_lin'))\n img_z = lrelu(linear(tf.nn.dropout(img_h4, keep_prob), featsize, 'hz_lin'))\n return img_h0, img_h1, img_h2, img_h3, img_h4, img_z\n \n with tf.variable_scope(\"conv\") as scope:\n srcimg_h0, srcimg_h1, srcimg_h2, srcimg_h3, srcimg_h4, srcimg_z = encode(srcimg)\n scope.reuse_variables()\n tgtimg_h0, tgtimg_h1, tgtimg_h2, tgtimg_h3, tgtimg_h4, tgtimg_z = encode(tgtimg)\n tgtctx_h0, tgtctx_h1, tgtctx_h2, tgtctx_h3, tgtctx_h4, tgtctx_z = encode(tgtctx)\n\n with tf.variable_scope(\"translate\") as scope:\n trans_h0 = lrelu(linear(tf.nn.dropout(tf.concat([srcimg_z, tgtctx_z], 1), keep_prob), featsize, 'trans_h0'))\n trans_z = linear(tf.nn.dropout(trans_h0, keep_prob), featsize, 'trans_z')\n self.translated_z = trans_z\n \n s_h, s_w = self.output_height, self.output_width\n s_h0, s_h1, s_h2, s_h3 = \\\n int(s_h/ns0), int(s_h/ns0/ns1), int(s_h/ns0/ns1/ns2), int(s_h/ns0/ns1/ns2/ns3)\n s_w0, s_w1, s_w2, s_w3 = \\\n int(s_w/ns0), int(s_w/ns0/ns1), int(s_w/ns0/ns1/ns2), int(s_w/ns0/ns1/ns2/ns3)\n \n def decode(z, skip_h3, skip_h2, skip_h1, skip_h0):\n z_ = lrelu(linear(tf.nn.dropout(z, keep_prob), nf3*s_h3*s_w3, 'd_h0_lin'))\n h0 = tf.nn.dropout(tf.reshape(z_, [-1, s_h3, s_w3, nf3]), keep_prob)\n h1 = lrelu(deconv2d(tf.concat([h0, skip_h3], 3),\n [self.batch_size, s_h2, s_w2, nf2], name='d_h1', d_h=ns3, d_w=ns3))\n h2 = lrelu(deconv2d(tf.concat([h1, skip_h2], 3),\n [self.batch_size, s_h1, s_w1, nf1], name='d_h2', d_h=ns2, d_w=ns2))\n h3 = lrelu(deconv2d(tf.concat([h2, skip_h1], 3),\n [self.batch_size, s_h0, s_w0, nf0], name='d_h3', d_h=ns1, d_w=ns1))\n print(h3.get_shape())\n h4 = deconv2d(tf.concat([h3, skip_h0], 3),\n [self.batch_size, s_h, s_w, self.c_dim], name='d_h4', d_h=ns0, d_w=ns0)\n return h4\n with tf.variable_scope(\"deconv\") as scope:\n output_h4 = decode(trans_z, tgtctx_h3, tgtctx_h2, tgtctx_h1, tgtctx_h0)\n scope.reuse_variables()\n truthoutput_h4 = decode(tgtimg_z, tgtctx_h3, tgtctx_h2, tgtctx_h1, tgtctx_h0)\n\n self.simloss = tf.reduce_mean((trans_z - tgtimg_z) ** 2) * 1e3\n print(tgtimg_z.get_shape())\n self.out = output_h4\n self.out2 = truthoutput_h4\n print(self.out.get_shape())\n self.recon1 = tf.nn.l2_loss(tgtimg - self.out)\n self.recon2 = tf.nn.l2_loss(tgtimg - self.out2)\n if ablation_type == \"None\":\n self.loss = self.recon1 + self.recon2 + self.simloss\n elif ablation_type == \"L2\":\n self.loss = self.recon1 + self.recon2\n elif ablation_type == \"L2L3\":\n self.loss = self.recon1\n elif ablation_type == \"L1\":\n self.loss = self.recon2 + self.simloss \n\nclass ContextAEPush:\n def __init__(self, gf_dim=64, df_dim=64,\n gfc_dim=1024, dfc_dim=1024,\n c_dim=3):\n self.gf_dim = gf_dim\n self.df_dim = df_dim\n self.c_dim = c_dim\n\n self.gfc_dim = gfc_dim\n self.dfc_dim = dfc_dim\n\n\n def build(self, image, ablation_type):\n imgshape = image.get_shape().as_list()\n print(imgshape)\n self.output_height, self.output_width = imgshape[-3:-1]\n self.batch_size = imgshape[1]\n featsize = 1024\n srcimg = image[0]\n tgtimg = image[2]\n tgtctx = image[1]\n \n with tf.variable_scope(\"conv_context\") as scope:\n tgtctx_h0 = lrelu(conv2d(tgtctx, self.df_dim, name='h0_conv'))\n tgtctx_h1 = lrelu(conv2d(tgtctx_h0, self.df_dim*2, name='h1_conv'))\n tgtctx_h2 = lrelu(conv2d(tgtctx_h1, self.df_dim*4, name='h2_conv'))\n tgtctx_h3 = lrelu(conv2d(tgtctx_h2, self.df_dim*8, name='h3_conv'))\n tgtctx_h4 = lrelu(linear(tf.reshape(tgtctx_h3, [self.batch_size, -1]), featsize, 'h4_lin'))\n tgtctx_z = linear(tgtctx_h4, featsize, 'hz_lin')\n\n with tf.variable_scope(\"conv\") as scope:\n srcimg_h0 = lrelu(conv2d(srcimg, self.df_dim, name='h0_conv'))\n srcimg_h1 = lrelu(conv2d(srcimg_h0, self.df_dim*2, name='h1_conv'))\n srcimg_h2 = lrelu(conv2d(srcimg_h1, self.df_dim*4, name='h2_conv'))\n srcimg_h3 = lrelu(conv2d(srcimg_h2, self.df_dim*8, name='h3_conv'))\n print(srcimg_h3.get_shape())\n srcimg_h4 = lrelu(linear(tf.reshape(srcimg_h3, [self.batch_size, -1]), featsize, 'h4_lin'))\n srcimg_z = lrelu(linear(srcimg_h4, featsize, 'hz_lin'))\n \n scope.reuse_variables()\n \n tgtimg_h0 = lrelu(conv2d(tgtimg, self.df_dim, name='h0_conv'))\n tgtimg_h1 = lrelu(conv2d(tgtimg_h0, self.df_dim*2, name='h1_conv'))\n tgtimg_h2 = lrelu(conv2d(tgtimg_h1, self.df_dim*4, name='h2_conv'))\n tgtimg_h3 = lrelu(conv2d(tgtimg_h2, self.df_dim*8, name='h3_conv'))\n tgtimg_h4 = lrelu(linear(tf.reshape(tgtimg_h3, [self.batch_size, -1]), featsize, 'h4_lin'))\n tgtimg_z = lrelu(linear(tgtimg_h4, featsize, 'hz_lin'))\n\n with tf.variable_scope(\"translate\") as scope:\n trans_h0 = lrelu(linear(tf.concat([srcimg_z, tgtctx_z], 1), featsize, 'trans_h0'))\n trans_z = linear(trans_h0, featsize, 'trans_z')\n self.translated_z = trans_z\n \n with tf.variable_scope(\"deconv\") as scope:\n s_h, s_w = self.output_height, self.output_width\n s_h2, s_h4, s_h8, s_h16 = \\\n int(s_h/2), int(s_h/4), int(s_h/8), int(s_h/16)\n s_w2, s_w4, s_w8, s_w16 = \\\n int(s_w/2), int(s_w/4), int(s_w/8), int(s_w/16)\n\n output_z_ = lrelu(linear(trans_z, self.gf_dim*8*s_h16*s_w16, 'd_h0_lin'))\n output_h0 = tf.reshape(output_z_, [-1, s_h16, s_w16, self.gf_dim * 8])\n output_h1 = lrelu(deconv2d(tf.concat([output_h0, tgtctx_h3], 3),\n [self.batch_size, s_h8, s_w8, self.gf_dim*4], name='d_h1'))\n output_h2 = lrelu(deconv2d(tf.concat([output_h1, tgtctx_h2], 3),\n [self.batch_size, s_h4, s_w4, self.gf_dim*2], name='d_h2'))\n output_h3 = lrelu(deconv2d(tf.concat([output_h2, tgtctx_h1], 3),\n [self.batch_size, s_h2, s_w2, self.gf_dim*1], name='d_h3'))\n output_h4 = deconv2d(tf.concat([output_h3, tgtctx_h0], 3),\n [self.batch_size, s_h, s_w, self.c_dim], name='d_h4')\n \n scope.reuse_variables()\n \n truthoutput_z_ = lrelu(linear(tgtimg_z, self.gf_dim*8*s_h16*s_w16, 'd_h0_lin'))\n truthoutput_h0 = tf.reshape(truthoutput_z_, [-1, s_h16, s_w16, self.gf_dim * 8])\n truthoutput_h1 = lrelu(deconv2d(tf.concat([truthoutput_h0, tgtctx_h3], 3),\n [self.batch_size, s_h8, s_w8, self.gf_dim*4], name='d_h1'))\n truthoutput_h2 = lrelu(deconv2d(tf.concat([truthoutput_h1, tgtctx_h2], 3),\n [self.batch_size, s_h4, s_w4, self.gf_dim*2], name='d_h2'))\n truthoutput_h3 = lrelu(deconv2d(tf.concat([truthoutput_h2, tgtctx_h1], 3),\n [self.batch_size, s_h2, s_w2, self.gf_dim*1], name='d_h3'))\n truthoutput_h4 = deconv2d(tf.concat([truthoutput_h3, tgtctx_h0], 3),\n [self.batch_size, s_h, s_w, self.c_dim], name='d_h4')\n\n self.simloss = tf.reduce_mean((trans_z - tgtimg_z) ** 2) * 1e3\n mean, var = tf.nn.moments(tgtimg_z, axes=[0])\n print(var.get_shape())\n# self.simloss /= tf.reduce_mean(var)\n print(tgtimg_z.get_shape())\n self.out = output_h4# + contextimg#tf.nn.tanh(h4)\n self.out2 = truthoutput_h4\n self.recon1 = tf.nn.l2_loss(tgtimg - self.out)\n self.recon2 = tf.nn.l2_loss(tgtimg - self.out2)\n self.loss = self.recon1 + self.recon2 + self.simloss\n if ablation_type == \"None\":\n self.loss = self.recon1 + self.recon2 + self.simloss\n elif ablation_type == \"L2\":\n self.loss = self.recon1 + self.recon2\n elif ablation_type == \"L2L3\":\n self.loss = self.recon1\n elif ablation_type == \"L1\":\n self.loss = self.recon2 + self.simloss \n\nclass ContextAEReach:\n def __init__(self, gf_dim=64, df_dim=64,\n gfc_dim=1024, dfc_dim=1024,\n c_dim=3):\n self.gf_dim = gf_dim\n self.df_dim = df_dim\n self.c_dim = c_dim\n\n self.gfc_dim = gfc_dim\n self.dfc_dim = dfc_dim\n\n\n def build(self, image, ablation_type):\n imgshape = image.get_shape().as_list()\n print(imgshape)\n self.output_height, self.output_width = imgshape[-3:-1]\n self.batch_size = imgshape[1]\n featsize = 1024\n srcimg = image[0]\n tgtimg = image[2]\n tgtctx = image[1]\n \n with tf.variable_scope(\"conv_context\") as scope:\n tgtctx_h0 = lrelu(conv2d(tgtctx, self.df_dim, name='h0_conv'))\n tgtctx_h1 = lrelu(conv2d(tgtctx_h0, self.df_dim*2, name='h1_conv'))\n tgtctx_h2 = lrelu(conv2d(tgtctx_h1, self.df_dim*4, name='h2_conv'))\n tgtctx_h3 = lrelu(conv2d(tgtctx_h2, self.df_dim*8, name='h3_conv'))\n tgtctx_h4 = lrelu(linear(tf.reshape(tgtctx_h3, [self.batch_size, -1]), featsize, 'h4_lin'))\n tgtctx_z = linear(tgtctx_h4, featsize, 'hz_lin')\n\n with tf.variable_scope(\"conv\") as scope:\n srcimg_h0 = lrelu(conv2d(srcimg, self.df_dim, name='h0_conv'))\n srcimg_h1 = lrelu(conv2d(srcimg_h0, self.df_dim*2, name='h1_conv'))\n srcimg_h2 = lrelu(conv2d(srcimg_h1, self.df_dim*4, name='h2_conv'))\n srcimg_h3 = lrelu(conv2d(srcimg_h2, self.df_dim*8, name='h3_conv'))\n print(srcimg_h3.get_shape())\n srcimg_h4 = lrelu(linear(tf.reshape(srcimg_h3, [self.batch_size, -1]), featsize, 'h4_lin'))\n srcimg_z = lrelu(linear(srcimg_h4, featsize, 'hz_lin'))\n \n scope.reuse_variables()\n \n tgtimg_h0 = lrelu(conv2d(tgtimg, self.df_dim, name='h0_conv'))\n tgtimg_h1 = lrelu(conv2d(tgtimg_h0, self.df_dim*2, name='h1_conv'))\n tgtimg_h2 = lrelu(conv2d(tgtimg_h1, self.df_dim*4, name='h2_conv'))\n tgtimg_h3 = lrelu(conv2d(tgtimg_h2, self.df_dim*8, name='h3_conv'))\n tgtimg_h4 = lrelu(linear(tf.reshape(tgtimg_h3, [self.batch_size, -1]), featsize, 'h4_lin'))\n tgtimg_z = lrelu(linear(tgtimg_h4, featsize, 'hz_lin'))\n\n with tf.variable_scope(\"translate\") as scope:\n trans_h0 = lrelu(linear(tf.concat([srcimg_z, tgtctx_z], 1), featsize, 'trans_h0'))\n trans_z = linear(trans_h0, featsize, 'trans_z')\n self.translated_z = trans_z\n \n with tf.variable_scope(\"deconv\") as scope:\n s_h, s_w = self.output_height, self.output_width\n s_h2, s_h4, s_h8, s_h16 = \\\n int(s_h/2), int(s_h/4), int(s_h/8), int(s_h/16)\n s_w2, s_w4, s_w8, s_w16 = \\\n int(s_w/2), int(s_w/4), int(s_w/8), int(s_w/16)\n\n output_z_ = lrelu(linear(trans_z, self.gf_dim*8*s_h16*s_w16, 'd_h0_lin'))\n output_h0 = tf.reshape(output_z_, [-1, s_h16, s_w16, self.gf_dim * 8])\n output_h1 = lrelu(deconv2d(tf.concat([output_h0, tgtctx_h3], 3),\n [self.batch_size, s_h8, s_w8, self.gf_dim*4], name='d_h1'))\n output_h2 = lrelu(deconv2d(tf.concat([output_h1, tgtctx_h2], 3),\n [self.batch_size, s_h4, s_w4, self.gf_dim*2], name='d_h2'))\n output_h3 = lrelu(deconv2d(tf.concat([output_h2, tgtctx_h1], 3),\n [self.batch_size, s_h2, s_w2, self.gf_dim*1], name='d_h3'))\n output_h4 = deconv2d(tf.concat([output_h3, tgtctx_h0], 3),\n [self.batch_size, s_h, s_w, self.c_dim], name='d_h4')\n \n scope.reuse_variables()\n \n truthoutput_z_ = lrelu(linear(tgtimg_z, self.gf_dim*8*s_h16*s_w16, 'd_h0_lin'))\n truthoutput_h0 = tf.reshape(truthoutput_z_, [-1, s_h16, s_w16, self.gf_dim * 8])\n truthoutput_h1 = lrelu(deconv2d(tf.concat([truthoutput_h0, tgtctx_h3], 3),\n [self.batch_size, s_h8, s_w8, self.gf_dim*4], name='d_h1'))\n truthoutput_h2 = lrelu(deconv2d(tf.concat([truthoutput_h1, tgtctx_h2], 3),\n [self.batch_size, s_h4, s_w4, self.gf_dim*2], name='d_h2'))\n truthoutput_h3 = lrelu(deconv2d(tf.concat([truthoutput_h2, tgtctx_h1], 3),\n [self.batch_size, s_h2, s_w2, self.gf_dim*1], name='d_h3'))\n truthoutput_h4 = deconv2d(tf.concat([truthoutput_h3, tgtctx_h0], 3),\n [self.batch_size, s_h, s_w, self.c_dim], name='d_h4')\n\n self.simloss = tf.reduce_mean((trans_z - tgtimg_z) ** 2) * 1e3\n mean, var = tf.nn.moments(tgtimg_z, axes=[0])\n print(var.get_shape())\n# self.simloss /= tf.reduce_mean(var)\n print(tgtimg_z.get_shape())\n self.out = output_h4# + contextimg#tf.nn.tanh(h4)\n self.out2 = truthoutput_h4\n self.recon1 = tf.nn.l2_loss(tgtimg - self.out)\n self.recon2 = tf.nn.l2_loss(tgtimg - self.out2)\n # self.loss = self.recon1 + self.recon2 + self.simloss\n if ablation_type == \"None\":\n self.loss = self.recon1 + self.recon2 + self.simloss\n elif ablation_type == \"L2\":\n self.loss = self.recon1 + self.recon2\n elif ablation_type == \"L2L3\":\n self.loss = self.recon1\n elif ablation_type == \"L1\":\n self.loss = self.recon2 + self.simloss \n\nclass ContextAESweep:\n def __init__(self, gf_dim=64, df_dim=64,\n gfc_dim=1024, dfc_dim=1024,\n c_dim=3):\n self.gf_dim = gf_dim\n self.df_dim = df_dim\n self.c_dim = c_dim\n\n self.gfc_dim = gfc_dim\n self.dfc_dim = dfc_dim\n\n\n def build(self, image, ablation_type):\n imgshape = image.get_shape().as_list()\n print(imgshape)\n self.output_height, self.output_width = imgshape[-3:-1]\n self.batch_size = imgshape[1]\n featsize = 100\n srcimg = image[0]\n tgtimg = image[2]\n tgtctx = image[1]\n \n nf0 = 32\n nf1 = 16\n nf2 = 16\n nf3 = 8\n ns0 = 1\n ns1 = 2\n ns2 = 1\n ns3 = 2\n# with tf.variable_scope(\"conv_context\") as scope:\n\n def encode(img):\n img_h0 = lrelu(conv2d(img, nf0, d_h=ns0, d_w=ns0, name='h0_conv'))\n img_h1 = lrelu(conv2d(img_h0, nf1, d_h=ns1, d_w=ns1, name='h1_conv'))\n img_h2 = lrelu(conv2d(img_h1, nf2, d_h=ns2, d_w=ns2, name='h2_conv'))\n img_h3 = lrelu(conv2d(img_h2, nf3, d_h=ns3, d_w=ns3, name='h3_conv'))\n print(img_h3.get_shape())\n img_h4 = lrelu(linear(tf.nn.dropout(tf.reshape(img_h3, [self.batch_size, -1]), keep_prob), featsize, 'h4_lin'))\n img_z = lrelu(linear(tf.nn.dropout(img_h4, keep_prob), featsize, 'hz_lin'))\n return img_h0, img_h1, img_h2, img_h3, img_h4, img_z\n \n with tf.variable_scope(\"conv\") as scope:\n srcimg_h0, srcimg_h1, srcimg_h2, srcimg_h3, srcimg_h4, srcimg_z = encode(srcimg)\n scope.reuse_variables()\n tgtimg_h0, tgtimg_h1, tgtimg_h2, tgtimg_h3, tgtimg_h4, tgtimg_z = encode(tgtimg)\n tgtctx_h0, tgtctx_h1, tgtctx_h2, tgtctx_h3, tgtctx_h4, tgtctx_z = encode(tgtctx)\n\n with tf.variable_scope(\"translate\") as scope:\n trans_h0 = lrelu(linear(tf.nn.dropout(tf.concat([srcimg_z, tgtctx_z], 1), keep_prob), featsize, 'trans_h0'))\n trans_z = linear(tf.nn.dropout(trans_h0, keep_prob), featsize, 'trans_z')\n self.translated_z = trans_z\n \n s_h, s_w = self.output_height, self.output_width\n s_h0, s_h1, s_h2, s_h3 = \\\n int(s_h/ns0), int(s_h/ns0/ns1), int(s_h/ns0/ns1/ns2), int(s_h/ns0/ns1/ns2/ns3)\n s_w0, s_w1, s_w2, s_w3 = \\\n int(s_w/ns0), int(s_w/ns0/ns1), int(s_w/ns0/ns1/ns2), int(s_w/ns0/ns1/ns2/ns3)\n \n def decode(z, skip_h3, skip_h2, skip_h1, skip_h0):\n z_ = lrelu(linear(tf.nn.dropout(z, keep_prob), nf3*s_h3*s_w3, 'd_h0_lin'))\n h0 = tf.nn.dropout(tf.reshape(z_, [-1, s_h3, s_w3, nf3]), keep_prob)\n import IPython\n IPython.embed()\n h1 = lrelu(deconv2d(tf.concat([h0, skip_h3], 3),\n [self.batch_size, s_h2, s_w2, nf2], name='d_h1', d_h=ns3, d_w=ns3))\n h2 = lrelu(deconv2d(tf.concat([h1, skip_h2], 3),\n [self.batch_size, s_h1, s_w1, nf1], name='d_h2', d_h=ns2, d_w=ns2))\n h3 = lrelu(deconv2d(tf.concat([h2, skip_h1], 3),\n [self.batch_size, s_h0, s_w0, nf0], name='d_h3', d_h=ns1, d_w=ns1))\n print(h3.get_shape())\n h4 = deconv2d(tf.concat([h3, skip_h0], 3),\n [self.batch_size, s_h, s_w, self.c_dim], name='d_h4', d_h=ns0, d_w=ns0)\n return h4\n with tf.variable_scope(\"deconv\") as scope:\n output_h4 = decode(trans_z, tgtctx_h3, tgtctx_h2, tgtctx_h1, tgtctx_h0)\n scope.reuse_variables()\n truthoutput_h4 = decode(tgtimg_z, tgtctx_h3, tgtctx_h2, tgtctx_h1, tgtctx_h0)\n\n self.simloss = tf.reduce_mean((trans_z - tgtimg_z) ** 2) * 1e3\n print(tgtimg_z.get_shape())\n self.out = output_h4\n self.out2 = truthoutput_h4\n print(self.out.get_shape())\n self.recon1 = tf.nn.l2_loss(tgtimg - self.out)\n self.recon2 = tf.nn.l2_loss(tgtimg - self.out2)\n self.loss = self.recon1 + self.recon2 + self.simloss\n if ablation_type == \"None\":\n self.loss = self.recon1 + self.recon2 + self.simloss\n elif ablation_type == \"L2\":\n self.loss = self.recon1 + self.recon2\n elif ablation_type == \"L2L3\":\n self.loss = self.recon1\n elif ablation_type == \"L1\":\n self.loss = self.recon2 + self.simloss \n\nif __name__ == \"__main__\":\n #TODO: add in an argparse\n parser = argparse.ArgumentParser(description='Run ablations on models')\n parser.add_argument('experiment_type', type=str,\n help='type of ablation')\n parser.add_argument('ablation_type', type=str,\n help='type of ablation')\n parser.add_argument('data_location', type=str,\n help='data_location')\n args = parser.parse_args()\n\n vdata = np.load(args.data_location)\n\n tf.reset_default_graph()\n idim = (36, 64)\n keep_prob = tf.placeholder(tf.float32, name='keep_prob')\n tftrain = tf.placeholder(tf.bool, name='tftrain')\n batch_size=100\n if (args.experiment_type == \"reach\") or (args.experiment_type == \"push\"):\n idim = (48, 48)\n tfinput = tf.placeholder(tf.float32, (3, batch_size) + idim + (3, ), name='x')\n if args.experiment_type == \"reach\":\n test = ContextAEReach()\n elif args.experiment_type == \"push\":\n test = ContextAEPush()\n elif args.experiment_type == \"pushreal\": \n test = ContextAEPushReal()\n elif args.experiment_type == \"sweep\":\n test = ContextAESweep()\n \n test.build(tfinput, args.ablation_type)\n\n\n config = tf.ConfigProto()\n config.gpu_options.allow_growth=True\n sess = tf.Session(config=config)\n learning_rate = tf.placeholder(tf.float32, shape=[])\n optimizer = tf.train.AdamOptimizer(learning_rate).minimize(test.loss)\n sess.run(tf.global_variables_initializer())\n allloss = []\n validloss = []\n itr = 0\n saver = tf.train.Saver()\n\n n = vdata.shape[1]\n nlen = vdata.shape[0]\n ntrain = int(0.8*n)\n nvalid = n - ntrain\n validdata = vdata[:, ntrain:]\n traindata = vdata[:, :ntrain]\n while True:\n choicesrc = np.random.choice(ntrain, batch_size)\n choicetgt = np.random.choice(ntrain, batch_size)\n srcdata = traindata[np.arange(0, batch_size) % nlen, choicesrc]\n tgtdata = traindata[np.arange(0, batch_size) % nlen, choicetgt]\n tgtctx = traindata[0, choicetgt]\n batch = [srcdata, tgtctx, tgtdata]\n _, loss, sim, r1, r2 = sess.run( [optimizer, test.loss, test.simloss, test.recon1, test.recon2], \n {tfinput: batch, learning_rate:1e-4, tftrain:False, keep_prob:0.5})\n if itr % 4 == 0:\n print(loss, sim, r1, r2)\n allloss.append(loss)\n \n if itr % 40 == 0:\n choicesrc = np.random.choice(nvalid, batch_size)\n choicetgt = np.random.choice(nvalid, batch_size)\n srcdata = validdata[np.arange(0, batch_size) % nlen, choicesrc]\n tgtdata = validdata[np.arange(0, batch_size) % nlen, choicetgt]\n tgtctx = validdata[0, choicetgt]\n batch = [srcdata, tgtctx, tgtdata]\n loss, sim, r1, r2 = sess.run([test.loss, test.simloss, test.recon1, test.recon2], \n {tfinput: batch, tftrain:False, keep_prob:1.0})\n print(loss, sim, r1, r2,'E')\n validloss.append(loss)\n saver.save(sess, 'ablation_' + str(args.experiment_type) + '_' + str(args.ablation_type) + \"_\" + str(itr))\n if itr == 30000 or (itr>30000 and itr%10000 == 0):\n import IPython\n IPython.embed() \n itr += 1", "#### Reward evaluation for agent in Reacher7Dof gym env using a single real-world env\n## Wrtitten by : leopauly | [email protected]\n## Courtesy for DDPG implementation : Steven Spielberg Pon Kumar (github.com/stevenpjg)\n####\n\n##Imports\nimport gym\nfrom gym.spaces import Box, Discrete\nimport numpy as np\nnp.set_printoptions(suppress=True)\nimport cv2\nfrom ddpg import DDPG\nfrom ou_noise import OUNoise\nimport matplotlib.pyplot as plt\nimport scipy.misc as misc\nimport skimage\nimport os\nfrom threading import Thread, Lock\nimport sys\nfrom six.moves import xrange \nimport PIL.Image as Image\nimport random\nimport numpy as np\nimport cv2\nimport time\nimport math\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nimport keras\nfrom keras import backend as K\nimport lscript as lsp\nimport modelling as md\n\n## Defining vars\nnum_episodes=20\nsteps=60 # No of actions taken in a roll out\nis_batch_norm = False #batch normalization switch\nxrange=range # For python3\nstart_training=64 # Buffer size, before starting to train the RL algorithm\nheight=112 \nwidth=112 \nchannel=3\ncrop_size=112\ncluster_length=16 \nnb_classes=2 \nfeature_size=4608 \nlayer_name=sys.argv[4]\n\n\n#-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------#\n## Printing all the experiment hyper-parameters\nprint('Switch:',sys.argv[3])\nprint('Layer name:',sys.argv[4])\n\n#-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------#\n\nswitch=int(sys.argv[3])\nif(switch==0):\n demo_folder='../Demos/demo_reach_0deg_new/'\n base_dir='/home/ironman2/Observation-Learning-Simulations/S2l/Thesis_Ch3/Exp1_reach3dof/Results/Results_UCF/Proposed/'+layer_name.split('/')[0]+'/M4dof_random/'\n policy_savepath= '/home/ironman2/S2l_storage/policies_saved/thesis/Exp1/Proposed/Proposed_'+layer_name.split('/')[0]+'/M4dof_random/'\nelif (switch==1):\n base_dir='/home/ironman2/Observation-Learning-Simulations/S2l/Thesis_Ch3/Exp1_reach3dof/Results/Results_UCF/Proposed/'+layer_name.split('/')[0]+'/3DV2_new2/'\n demo_folder='../Demos/demo_reach_0deg_new/'\n policy_savepath= '/home/ironman2/S2l_storage/policies_saved/thesis/Exp1/Proposed/Proposed_'+layer_name.split('/')[0]+'/3DV2_new2/'\nelif (switch==2):\n base_dir='/home/ironman2/Observation-Learning-Simulations/S2l/Thesis_Ch3/Exp1_reach3dof/Results/Results_UCF/Proposed/'+layer_name.split('/')[0]+'/3DV2_new3/'\n demo_folder='../Demos/demo_reach_0deg_new/'\n policy_savepath= '/home/ironman2/S2l_storage/policies_saved/thesis/Exp1/Proposed/Proposed_'+layer_name.split('/')[0]+'/3DV2_new3/'\nelif (switch==3):\n base_dir='/home/ironman2/Observation-Learning-Simulations/S2l/Thesis_Ch3/Exp1_reach3dof/Results/Results_UCF/Traj_maps_'+layer_name.split('/')[0]+'_20eps/lr_thesis/'\n policy_savepath='/home/ironman2/Observation-Learning-Simulations/S2l/Thesis_Ch3/Exp1_reach3dof/Results/Results_UCF/Traj_maps_'+layer_name.split('/')[0]+'_20eps/lr_thesis/'\n demo_folder='../Demos/demo_reach_0deg_new/' \nelif (switch==4):\n base_dir='/home/ironman2/Observation-Learning-Simulations/S2l/Thesis_Ch3/Exp1_reach3dof/Results/Results_UCF/Traj_maps_'+layer_name.split('/')[0]+'_20eps/ll_thesis_new/'\n policy_savepath='/home/ironman2/Observation-Learning-Simulations/S2l/Thesis_Ch3/Exp1_reach3dof/Results/Results_Random/Traj_maps_'+layer_name.split('/')[0]+'_20eps/ll_thesis/'\n demo_folder='../Demos/demo_reach_0deg_new/' \nelif (switch==5):\n base_dir='/home/ironman2/Observation-Learning-Simulations/S2l/Thesis_Ch3/Exp1_reach3dof/Results/Results_Random/Reward_Eval_Conv5_20eps_rand/multi_target_close/'\n demo_folder='../Demos/demo_reach_0deg_new/' \nelif (switch==6):\n base_dir='/home/ironman2/Observation-Learning-Simulations/S2l/Thesis_Ch3/Exp1_reach3dof/Results/Results_Random/Reward_Eval_Conv5_20eps_rand/multi_target_far/'\n demo_folder='../Demos/demo_reach_0deg_new/' \nelif (switch==-2):\n base_dir='/home/ironman2/Observation-Learning-Simulations/S2l/Thesis_Ch3/Exp1_reach3dof/Results/Results_UCF/Proposed/'+layer_name.split('/')[0]+'/M4dof+V/'\n policy_savepath= '/home/ironman2/S2l_storage/policies_saved/thesis/Exp1/Proposed/Proposed_'+layer_name.split('/')[0]+'/M4dof+V/'\n demo_folder='../Demos/demo_reach_180deg_new/'\nelif (switch==-4):\n base_dir='/home/ironman2/Observation-Learning-Simulations/S2l/Thesis_Ch3/Exp1_reach3dof/Results/Results_Random/'+layer_name.split('/')[0]+'/Obj2/'\n demo_folder='../Demos/demo_reach_180deg_new/' \n policy_savepath= '/home/ironman2/S2l_storage/policies_saved/thesis/Proposed_'+layer_name.split('/')[0]+'/Obj2/'\nelif (switch==-3):\n base_dir='/home/ironman2/Observation-Learning-Simulations/S2l/Thesis_Ch3/Exp1_reach3dof/Results/Results_Random/'+layer_name.split('/')[0]+'/Obj1/'\n demo_folder='../Demos/demo_reach_0deg_new/' \n policy_savepath= '/home/ironman2/S2l_storage/policies_saved/thesis/Proposed_'+layer_name.split('/')[0]+'/Obj1_new/'\nelif (switch==-5):\n base_dir='/home/ironman2/Observation-Learning-Simulations/S2l/Thesis_Ch3/Exp1_reach3dof/Results/Results_UCF/Proposed/'+layer_name.split('/')[0]+'/BG/'\n demo_folder='../Demos/demo_reach_0deg_new/' \n policy_savepath= '/home/ironman2/S2l_storage/policies_saved/thesis/Proposed_'+layer_name.split('/')[0]+'/BG/'\nelif (switch==-6):\n demo_folder='../Demos/demo_reach_0deg_h.s/'\n base_dir='/home/ironman2/Observation-Learning-Simulations/S2l/Thesis_Ch3/Exp1_reach3dof/Results/Results_UCF/Proposed/'+layer_name.split('/')[0]+'/M4dof+M/'\n policy_savepath= '/home/ironman2/S2l_storage/policies_saved/thesis/Exp1/Proposed/Proposed_'+layer_name.split('/')[0]+'/M4dof+M/'\nelse:\n base_dir='/home/ironman2/Observation-Learning-Simulations/S2l/Thesis_Ch3/Exp1_reach3dof/Results/Results_Random/'+layer_name.split('/')[0]+'/BG-A_new1/'\n demo_folder='../Demos/demo_reach_0deg_new/'\n policy_savepath= '/home/ironman2/S2l_storage/policies_saved/thesis/Exp1/Proposed/Proposed_'+layer_name.split('/')[0]+'/BG-A_new1/'\n \nos.system('mkdir %s' % base_dir)\n\n#-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------#\n\n## Defining env\nenv = gym.make('Pusher3DOFReal-v1')\nenv.switch=switch\nenv.initialize_env()\nassert isinstance(env.observation_space, Box), \"observation space must be continuous\"\nassert isinstance(env.action_space, Box), \"action space must be continuous\"\n\n#-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------#\n\nclass Frame_Feature:\n def __init__(self):\n self.g=tf.Graph()\n with self.g.as_default():\n self.sess=tf.InteractiveSession(config=tf.ConfigProto(allow_soft_placement=True,log_device_placement=False))\n self.base_model=tf.keras.applications.vgg16.VGG16(include_top=False, weights='imagenet', input_tensor=None, input_shape=(height,width,channel), pooling=None, classes=1000)\n #print(tf.contrib.graph_editor.get_tensors(self.g)) #(tf.get_default_graph()))\n self.base_model._make_predict_function()\n print('VggNet loaded with Imagenet values')\n \n def frame_feature_extractor(self,frame_):\n frame= self.im_preprocess(frame_)\n frame=frame.reshape(-1,height,width,channel)\n frame_features=self.base_model.predict(frame)\n return frame_features\n\n def im_preprocess(self,im):\n im = np.float32(im)\n im[:,:,2] -= 103.939\n im[:,:,1] -= 116.779\n im[:,:,0] -= 123.68\n im = im[:, :, ::-1] # change to BGR\n return im\n#-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------#\n\n\n### DEMO FEATURE EXTRACTION\ndef get_compress_frames_data(filename, num_frames_per_clip=cluster_length):\n ret_arr = []\n for parent, dirnames, filenames in os.walk(filename):\n\n filenames = sorted(filenames)\n jump=math.floor((len(filenames)/num_frames_per_clip))\n loop=0\n\n for i in range(0,len(filenames),jump):\n if (loop>15):\n break\n if (filenames[i].endswith('.png')):\n image_name = str(filename) + '/' + str(filenames[i])\n img = Image.open(image_name)\n img_data = np.array(img)\n ret_arr.append(img_data)\n loop=loop+1\n ret_arr=np.array(ret_arr)\n #ret_arr=ret_arr/255\n return ret_arr\n\ndef demo_array_extractor(demo_vid_path):\n demo_vid_array=get_compress_frames_data(demo_vid_path)\n return demo_vid_array\n\n#-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------#\n\ndef sampling_obs(vid_robo_all,num_frames_per_clip=cluster_length):\n total_obs=len(vid_robo_all)\n jump=math.floor(total_obs/num_frames_per_clip)\n loop=0\n ret_arr=[]\n for i in range(0,total_obs,jump):\n if (loop>15):\n break\n img_data = vid_robo_all[i]\n ret_arr.append(img_data)\n loop=loop+1\n \n ret_arr=np.array(ret_arr)\n #ret_arr=ret_arr/255\n return ret_arr\n\n#-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------#\n\n### VIDEO FEATURE EXTRACTION\nclass Vid_Feature:\n \n def __init__(self):\n self.saved_path='/home/ironman2/S2l_storage/trained_activity_nets_thesis/saved/models/' \n self.network_name='activity_model.ckpt-67.meta'\n self.network_weigths_name='activity_model.ckpt-67'\n\n #self.saved_path='/home/ironman2/S2l_storage/trained_C3D_MIME/' \n #self.network_name='activity_model.ckpt-155.meta'\n #self.network_weigths_name='activity_model.ckpt-155'\n ### Activity_net\n self.g=tf.Graph()\n with self.g.as_default():\n\n self.sess = tf.InteractiveSession(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=False))\n ## Restore model weights from previously saved model\n self.saver = tf.train.import_meta_graph(os.path.join(self.saved_path,self.network_name))\n #self.saver.restore(self.sess, os.path.join(self.saved_path,self.network_weigths_name)) \n self.sess.run(tf.global_variables_initializer())\n print(\"Model restored from file: %s\" % self.saved_path,flush=True) \n\n ## For extracting activity features\n def feature_extractor(self,vid_np):\n self.vid_=vid_np.reshape(-1,cluster_length,height,width,channel)\n f_v = self.sess.graph.get_tensor_by_name(layer_name) #('flatten_1/Reshape:0')\n self.f_v_val=np.array(self.sess.run([f_v], feed_dict={'conv1_input:0':self.vid_,'Placeholder:0':self.vid_,'dropout_1/keras_learning_phase:0':0 }))\n self.features=np.reshape(self.f_v_val,(-1))\n return self.features\n\n#-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------#\n\ndef distance(f_demo,f_robo):\n norm_val=2\n norm_pow=1\n distance_=np.linalg.norm(f_demo-f_robo,ord=norm_val)\n return pow(distance_,norm_pow)\n\n#-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------#\n\ndef s2l(i_run):\n print('This is the ith run',i_run)\n\n #Randomly initialize critic,actor,target critic, target actor network and replay buffer \n num_states = feature_size #num_states = env.observation_space.shape[0]\n num_actions = env.action_space.shape[0] \n print (\"Number of States:\", num_states)\n print (\"Number of Actions:\", num_actions)\n\n agent = DDPG(env, is_batch_norm,num_states,num_actions,policy_savepath)\n exploration_noise = OUNoise(env.action_space.shape[0])\n counter=0 \n total_reward=0\n best_reward=-10000\n \n print (\"Number of Steps per episode:\", steps)\n reward_st_per_episode = np.array([0]) #saving reward\n man_pos_x_st_per_episode = np.array([0])\n man_pos_y_st_per_episode = np.array([0])\n eval_metric_st= np.array([0])\n eval_metric_st_per_episode= np.array([0])\n reward_st_per_step = np.array([0]) #saving reward after every step\n \n activity_obj=Vid_Feature()\n demo_vid_array=demo_array_extractor(demo_folder)\n if(i_run==0):\n plt.imshow(demo_vid_array[0])\n plt.savefig('demo_img'+str(switch) +'.png')\n demo_features=activity_obj.feature_extractor(demo_vid_array)\n frame_obj=Frame_Feature()\n\n for episode in range(num_episodes):\n print (\"==== Starting episode no:\",episode,\"====\",\"\\n\")\n env.reset() # Reset env in the begining of each episode\n env.render()\n obs_img=env.render(mode='rgb_array') # Get the observation\n if(i_run==0 and episode==0):\n plt.imshow(obs_img)\n plt.savefig('env_img'+str(switch) +'.png')\n obs_img=np.array(misc.imresize(obs_img,[112,112,3]))\n observation =np.array(frame_obj.frame_feature_extractor(obs_img))\n observation=observation.reshape(-1)\n \n reward_per_episode = 0\n vid_robo_=[]\n\n man_pos_x_st_per_step = np.array([0])\n man_pos_y_st_per_step = np.array([0])\n\n for i in range(steps):\n\n x = observation\n\n action = agent.evaluate_actor(np.reshape(x,[1,num_states]))\n action = action-[-2,1,1,1] # for Mdof4\n #action = action+1 # for rest of everything \n noise = exploration_noise.noise()/(episode+1) # for rest of everything \n #noise = -(exploration_noise.noise()*math.exp(episode)) # for Mdof4\n action = action[0] + noise \n print ('Action at',i_run ,'episode-',episode, 'step-', i ,\" :\",action)\n\n \n _,_,done,info=env.step(action)\n env.render()\n obs_robo_=env.render(mode='rgb_array') \n obs_robo=misc.imresize(obs_robo_,[112,112,3])\n vid_robo_.append(obs_robo)\n \n observation=np.array(frame_obj.frame_feature_extractor(np.array(obs_robo)))\n observation=observation.reshape(-1)\n \n if(i==(steps-1)):\n vid_robo_all=np.array(vid_robo_)\n vid_robo=sampling_obs(vid_robo_all)\n #for i in range(len(vid_robo)):\n # plt.imshow(vid_robo[i])\n # plt.savefig('obs_img'+str(i) +'.png')\n robo_features=activity_obj.feature_extractor(vid_robo)\n reward=-(distance(demo_features,robo_features))\n reward=np.array(reward)\n print('reward: ',reward)\n else:\n reward=0\n reward=np.array(reward)\n print('reward: ',reward)\n\n # Printing eval_metric after every step\n eval_metric=np.array(env.get_eval())\n eval_metric=eval_metric.reshape(-1)\n print('Distance to goal:',eval_metric) \n eval_metric_st = np.append(eval_metric_st,eval_metric) \n np.savetxt(base_dir+'eval_metric_per_step_run_'+str(i_run)+'.txt',eval_metric_st, newline=\"\\n\")\n\n ## Printing and saving final mnaipulator position\n manipulator_pos=env.get_man_pos()\n print('Episode: ',episode,'step: ',i,'Manipulator position: ',manipulator_pos)\n\n man_pos_x_st_per_step = np.append(man_pos_x_st_per_step,manipulator_pos[0])\n np.savetxt(base_dir+'step_man_x_pos_run_'+str(i_run)+'_eps_'+str(episode)+'.txt',man_pos_x_st_per_step, fmt='%f', newline=\"\\n\")\n\n man_pos_y_st_per_step = np.append(man_pos_y_st_per_step,manipulator_pos[1])\n np.savetxt(base_dir+'step_man_y_pos_run_'+str(i_run)+'_eps_'+str(episode)+'.txt',man_pos_y_st_per_step, fmt='%f', newline=\"\\n\")\n \n\n # Storing reward after every step\n reward_st_per_step = np.append(reward_st_per_step,reward)\n np.savetxt(base_dir+'reward_per_step_run_'+str(i_run)+'.txt',reward_st_per_step, newline=\"\\n\")\n\n #add s_t,s_t+1,action,reward to experience memory\n agent.add_experience(x,observation,action,reward,False)\n counter+=1\n \n #train critic and actor network\n if counter > start_training: \n agent.train() #\n print ('\\n\\n')\n print('Episode: ',episode,' Manipulator position: ',env.get_man_pos())\n \n reward_per_step=reward\n reward_per_episode+=reward_per_step \n\n ## Printing and saving episode rewards\n print ('Episode: ',episode,' Episode Reward: ',reward_per_episode)\n exploration_noise.reset() #reinitializing random noise for action exploration\n reward_st_per_episode = np.append(reward_st_per_episode,reward_per_episode)\n np.savetxt(base_dir+'episode_reward_run_'+str(i_run)+'.txt',reward_st_per_episode, fmt='%f', newline=\"\\n\")\n\n ## Printing and saving final mnaipulator position\n manipulator_final_pos=env.get_man_pos()\n print('Episode: ',episode,' Manipulator position: ',manipulator_final_pos)\n\n man_pos_x_st_per_episode = np.append(man_pos_x_st_per_episode,manipulator_final_pos[0])\n np.savetxt(base_dir+'episode_man_x_pos_run_'+str(i_run)+'.txt',man_pos_x_st_per_episode, fmt='%f', newline=\"\\n\")\n\n man_pos_y_st_per_episode = np.append(man_pos_y_st_per_episode,manipulator_final_pos[1])\n np.savetxt(base_dir+'episode_man_y_pos_run_'+str(i_run)+'.txt',man_pos_y_st_per_episode, fmt='%f', newline=\"\\n\")\n \n ## Saving\n if (best_reward<reward_per_episode):\n best_reward=reward_per_episode\n print('best reward:',best_reward)\n print('current reward:',reward_per_episode)\n print('saving policy for episode..................:',episode)\n #agent.save_actor(episode,i_run)\n \n ## Printing eval_metric after every step\n eval_metric=np.array(env.get_eval())\n eval_metric=eval_metric.reshape(-1)\n print('Distance to goal at the end of episode:',eval_metric) \n eval_metric_st_per_episode = np.append(eval_metric_st_per_episode,eval_metric) \n np.savetxt(base_dir+'eval_metric_per_epispde_run_'+str(i_run)+'.txt', eval_metric_st_per_episode, newline=\"\\n\")\n\n total_reward+=reward_per_episode\n\n\n print (\"Average reward per episode {}\".format(total_reward / num_episodes))\n print('Best episode reward',best_reward) \n print ('\\n\\n')\n\n\n del agent\n del activity_obj\n del frame_obj\n\n\nif __name__=='__main__':\n from datetime import datetime\n start_time=str(datetime.now())\n run_start=int(sys.argv[1])\n run_end=int(sys.argv[2])\n print('Start trial:',run_start,'End trail:',run_end-1)\n for i_run in range(run_start,run_end):\n s2l(i_run)\n print('Start to end time:',start_time,str(datetime.now()))\n", "from sandbox.rocky.tf.algos.trpo import TRPO\nfrom rllab.baselines.linear_feature_baseline import LinearFeatureBaseline\nfrom rllab.envs.normalized_env import normalize\nfrom sandbox.rocky.tf.optimizers.conjugate_gradient_optimizer import ConjugateGradientOptimizer\nfrom sandbox.rocky.tf.optimizers.conjugate_gradient_optimizer import FiniteDifferenceHvp\nfrom sandbox.rocky.tf.policies.gaussian_mlp_policy import GaussianMLPPolicy\nfrom sandbox.rocky.tf.envs.base import TfEnv\nfrom sandbox.bradly.third_person.policy.random_policy import RandomPolicy\nfrom sandbox.bradly.third_person.algos.cyberpunk_trainer import CyberPunkTrainer\nfrom sandbox.bradly.third_person.policy.expert_reacher import load_expert_reacher\nfrom sandbox.bradly.third_person.envs.reacher import ReacherEnv #TODO: Make this randomize all the time\nfrom sandbox.bradly.third_person.envs.reacher_two import ReacherTwoEnv #TODO: Make this randomize only once.\n\nfrom sandbox.bradly.third_person.discriminators.discriminator import DomainConfusionVelocityDiscriminator\nfrom sandbox.bradly.third_person.launchers.cyberpunk_launcher_newreacher_aws import AWSDummy\nimport joblib\nimport tensorflow as tf\nfrom rllab.envs.gym_env import GymEnv\n\nfrom rllab.misc.instrument import stub, run_experiment_lite\n\nfrom rllab import config\n\nstub(globals())\n\nimport numpy as np\n\nconfig.AWS_IMAGE_ID = \"ami-6df5d30d\"\nconfig.AWS_INSTANCE_TYPE = \"g2.2xlarge\"\nconfig.AWS_SPOT_PRICE = \"0.7001\"\nsubnet = 'us-west-1c'\n\nconfig.AWS_NETWORK_INTERFACES = [\n dict(\n SubnetId=config.ALL_SUBNET_INFO[subnet][\"SubnetID\"],\n Groups=[config.ALL_SUBNET_INFO[subnet][\"Groups\"]],\n DeviceIndex=0,\n AssociatePublicIpAddress=True,\n )\n ]\n\n#\n## novice_env = TfEnv(normalize(ReacherTwoEnv(), normalize_obs=True))\n#expert_fail_pol = RandomPolicy(expert_env.spec)\n#\n#policy = GaussianMLPPolicy(\n# name=\"novice_policy\",\n# env_spec=novice_env.spec,\n# # The neural network policy should have two hidden layers, each with 32 hidden units.\n# hidden_sizes=(32, 32)\n#)\n#\n#baseline = LinearFeatureBaseline(env_spec=expert_env.spec)\n#\n#algo = TRPO(\n# env=novice_env,\n# policy=policy,\n# baseline=baseline,\n# batch_size=4000,\n# max_path_length=50,\n# n_itr=40,\n# discount=0.99,\n# step_size=0.01,\n# optimizer=ConjugateGradientOptimizer(hvp_approach=FiniteDifferenceHvp(base_eps=1e-5))\n#\n#)\n#\n#config = tf.ConfigProto()\n#config.gpu_options.allow_growth=True\n#with tf.Session(config=config) as sess:\n#\n# #What do the n_itr and start_itr mean?\n# algo.n_itr = 0\n# algo.start_itr = 0\n# algo.train(sess=sess) #TODO: What is happening here?\n#\n# im_height = 36\n# im_width = 64\n# im_channels = 3\n#\n# dim_input = [im_height, im_width, im_channels]\n#\n# disc = DomainConfusionVelocityDiscriminator(input_dim=dim_input, output_dim_class=2, output_dim_dom=2,\n# tf_sess=sess)\n#\n# data = joblib.load(\"/home/andrewliu/research/viewpoint/rllab-tpil/third_person_im/data/local/experiment/experiment_2017_05_07_20_58_39_0001/itr_123.pkl\")#\"/home/abhigupta/abhishek_sandbox/viewpoint/third_person_im/data/local/experiment/experiment_2017_05_06_18_07_38_0001/itr_900.pkl\")\n# expert_policy = data['policy']\n#\n# # expert_policy = load_expert_reacher(expert_env, sess) #Load the expert #TODO: Need to train the expert\n#\n# #from rllab.sampler.utils import rollout\n# #while True:\n# # t = rollout(env=expert_env, agent=expert_policy, max_path_length=50, animated=True)\n#\n# algo.n_itr = 40\n# trainer = CyberPunkTrainer(disc=disc, novice_policy_env=novice_env, expert_fail_pol=expert_fail_pol,\n# expert_env=expert_env, novice_policy=policy,\n# novice_policy_opt_algo=algo, expert_success_pol=expert_policy,\n# im_width=im_width, im_height=im_height, im_channels=im_channels,\n# tf_sess=sess, horizon=50)\n#\n# iterations = 100\n# for iter_step in range(0, iterations):\n# trainer.take_iteration(n_trajs_cost=1000, n_trajs_policy=1000)\n#\n# trainer.log_and_finish()\n#\ndef getcolor():\n color = np.random.uniform(low=0, high=1, size=3)\n while np.linalg.norm(color - np.array([1.,0.,0.])) < 0.5:\n color = np.random.uniform(low=0, high=1, size=3)\n return color\nfor nvars in range(20):\n# for mem in range(200, 500, 30):\n vp = np.random.uniform(low=0, high=360)\n goal = np.concatenate([np.random.uniform(low=-1.1, high=-0.5, size=1),\n np.random.uniform(low=0.5, high=1.1, size=1)]).tolist()\n armcolor = getcolor()\n bgcolor = getcolor()\n while np.linalg.norm(bgcolor - armcolor) < 0.5:\n bgcolor = np.random.uniform(low=0, high=1, size=3)\n armcolor = armcolor.tolist() + [1.0]\n bgcolor = bgcolor.tolist() + [1.0]\n\n expert_env = TfEnv(GymEnv(\"Pusher3DOF-v1\", force_reset=True, record_video=False))\n ## expert_env = TfEnv(normalize(ReacherEnv()))\n novice_env = TfEnv(GymEnv(\"Pusher3DOFNoChange-v1\", force_reset=True, record_video=True,\n goal=goal, vp=vp, bgcolor=bgcolor, armcolor=armcolor))\n\n dummy = AWSDummy(expert_env=expert_env, novice_env=novice_env,\n horizon=50, itrs=100, trajs=100, expert_pkl='expert_reach.pkl')\n\n run_experiment_lite(dummy.run(),\n exp_prefix=\"cyberpunk_reach1\",\n n_parallel=1,\n # dry=True,\n snapshot_mode=\"all\",\n seed=1,\n mode=\"ec2_mujoco\"\n )\n" ]
[ [ "numpy.random.get_state", "numpy.random.seed", "numpy.min", "numpy.asarray", "tensorflow.assign", "tensorflow.placeholder", "numpy.max", "tensorflow.initialize_all_variables", "numpy.random.set_state", "numpy.mean", "numpy.iinfo", "tensorflow.train.AdamOptimizer", "tensorflow.square", "tensorflow.Session", "numpy.array", "numpy.sum" ], [ "tensorflow.ConfigProto", "tensorflow.Session" ], [ "numpy.ones", "numpy.concatenate", "numpy.copy", "numpy.random.normal", "numpy.zeros_like" ], [ "matplotlib.pyplot.title", "matplotlib.style.use", "matplotlib.pyplot.scatter", "matplotlib.pyplot.savefig", "matplotlib.pyplot.hold", "numpy.std", "numpy.loadtxt", "numpy.mean", "numpy.corrcoef", "matplotlib.pyplot.xlabel", "numpy.array", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ], [ "tensorflow.Graph", "scipy.misc.imresize", "numpy.reshape", "numpy.set_printoptions", "tensorflow.keras.applications.vgg16.VGG16", "numpy.linalg.norm", "tensorflow.ConfigProto", "numpy.append", "numpy.float32", "numpy.savetxt", "numpy.array" ], [ "tensorflow.contrib.graph_editor.get_tensors", "tensorflow.Graph", "scipy.misc.imresize", "numpy.reshape", "numpy.set_printoptions", "tensorflow.keras.applications.vgg16.VGG16", "numpy.linalg.norm", "tensorflow.ConfigProto", "numpy.append", "numpy.float32", "numpy.savetxt", "numpy.array" ], [ "tensorflow.concat", "tensorflow.nn.conv2d_transpose", "tensorflow.nn.l2_loss", "tensorflow.train.AdamOptimizer", "tensorflow.nn.conv2d", "numpy.arange", "tensorflow.nn.moments", "tensorflow.nn.deconv2d", "tensorflow.truncated_normal_initializer", "tensorflow.ConfigProto", "tensorflow.reset_default_graph", "tensorflow.Session", "tensorflow.train.Saver", "numpy.load", "tensorflow.random_normal_initializer", "tensorflow.nn.dropout", "tensorflow.matmul", "numpy.random.choice", "tensorflow.placeholder", "tensorflow.global_variables_initializer", "tensorflow.contrib.layers.batch_norm", "numpy.array", "tensorflow.nn.bias_add", "tensorflow.reduce_mean", "tensorflow.maximum", "tensorflow.reshape", "tensorflow.constant_initializer", "tensorflow.variable_scope" ], [ "tensorflow.Graph", "matplotlib.pyplot.imshow", "scipy.misc.imresize", "numpy.reshape", "numpy.set_printoptions", "tensorflow.keras.applications.vgg16.VGG16", "numpy.linalg.norm", "tensorflow.ConfigProto", "tensorflow.global_variables_initializer", "numpy.append", "numpy.float32", "numpy.array" ], [ "numpy.random.uniform", "numpy.array", "numpy.linalg.norm" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "0.14", "0.15", "1.0", "0.19", "0.18", "1.2", "0.12", "0.10", "0.17", "0.16" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "0.14", "0.15", "1.0", "0.19", "0.18", "1.2", "0.12", "0.10", "0.17", "0.16" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "0.14", "0.15", "1.0", "0.19", "0.18", "1.2", "0.12", "0.10", "0.17", "0.16" ], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] }, { "matplotlib": [], "numpy": [ "1.10", "1.12", "1.11", "1.19", "1.24", "1.13", "1.16", "1.9", "1.18", "1.23", "1.21", "1.22", "1.20", "1.7", "1.15", "1.14", "1.17", "1.8" ], "pandas": [], "scipy": [], "tensorflow": [] } ]
LouisLesueur/gods
[ "9522b6fc43851be062e2f9be84fa12cb8d74ccbb" ]
[ "python/generate.py" ]
[ "import pygraphviz as pgv\nimport numpy as np\n\n\nclass Person:\n\tdef __init__(self, name,id,origin=0):\n\t\tself.name = name\n\t\tself.id = id\n\t\tself.origin=origin\n\t\tself.specs=[]\n\n\tdef to_js(self):\n\n\t\tspecs_js = \"\"\n\t\tfor spec in self.specs:\n\t\t\tspecs_js += spec + ', '\n\n\t\treturn \" { id: \"+str(self.id)+\", label: '\"+self.name+\"',\"+specs_js+\" shape: 'dot'},\\n\"\n\n\tdef __str__(self):\n\t\treturn self.name\n\n\tdef __eq__(self, pers):\n\t\treturn self.name == pers.name\n\n\tdef add_spec(self, spec):\n\t\tself.specs.append(spec)\n\n\nclass Relation:\n\tdef __init__(self, nature, id1, id2, origin=0):\n\t\tself.nature = nature\n\t\tself.id1 = id1\n\t\tself.id2 = id2\n\t\tself.origin = origin\n\n\tdef to_js(self):\n\t\tcolor = \"orange\"\n\t\tif self.nature == \"partner\":\n\t\t\tcolor = \"red\"\n\t\t\treturn \" { from: \"+str(self.id1)+\", to: \"+str(self.id2)+\", relation: '\"+self.nature+\"', color: '\"+color+\"'},\\n\"\n\t\tif self.nature == \"mom\" or self.nature==\"father\":\n\t\t\treturn \" { from: \"+str(self.id1)+\", to: \"+str(self.id2)+\", relation: '\"+self.nature+\"', arrows: 'to', color: '\"+color+\"'},\\n\"\n\t\tif self.nature[:4]==\"king\":\n\t\t\treturn \" { from: \"+str(self.id1)+\", to: \"+str(self.id2)+\", relation: '\"+self.nature+\"', arrows: 'to', color: 'yellow'},\\n\"\n\n\nclass graph:\n\tdef __init__(self, persons, relations, cities, sides):\n\t\tself.persons = persons\n\t\tself.relations = relations\n\t\tself.cities = cities\n\t\tself.sides = sides\n\n\n\tdef to_js(self):\n\t\tfile = open('../js/data.js', 'w')\n\t\tfile.write(\"const nodes = new vis.DataSet([\\n\")\n\t\tfor pers in self.persons:\n\t\t\tfile.write(pers.to_js())\n\t\tfile.write(\"]);\\n\")\n\t\tfile.write(\"const edges = new vis.DataSet([\\n\")\n\t\tfor rel in self.relations:\n\t\t\tfile.write(rel.to_js())\n\t\tfile.write(\"]);\\n\")\n\t\tfile.close()\n\n\nclass Tree:\n\tdef __init__(self, authors):\n\t\tself.names = set({})\n\t\tself.persons = {}\n\t\tself.relations = []\n\t\tself.cities = []\n\t\tself.sides = []\n\n\n\t\tdef add_person(name, origin):\n\t\t\tidx = len(self.names)\n\t\t\tif name != '?':\n\t\t\t\tif name != 'none':\n\t\t\t\t\tself.names.add(name)\n\t\t\t\t\tif len(self.names)>idx:\n\t\t\t\t\t\tself.persons[name] = (Person(name,idx,origin))\n\n\t\tdef add_relation(name1, name2, nature,origin=0):\n\t\t\tif name1 in self.names and name2 in self.names:\n\t\t\t\tid1 = self.persons[name1].id\n\t\t\t\tid2 = self.persons[name2].id\n\t\t\t\tself.relations.append(Relation(nature,id1,id2,origin))\n\n\n\t\tfor j,a in enumerate(authors):\n\t\t\twith open(a, 'r') as file:\n\t\t\t\tdata = np.loadtxt(file, dtype='str', comments='#', delimiter=',')\n\t\t\t\tfor i,dat in enumerate(data):\n\n\t\t\t\t\tadd_person(dat[0],j)\n\t\t\t\t\tadd_person(dat[1],j)\n\t\t\t\t\tadd_relation(dat[0],dat[1],\"partner\",j)\n\n\t\t\t\t\tfor child in dat[2][1:-1].split(\" \"):\n\t\t\t\t\t\tadd_person(child,j)\n\t\t\t\t\t\tadd_relation(dat[0],child,\"father\",j)\n\t\t\t\t\t\tadd_relation(dat[1],child,\"mom\",j)\n\t\t\tfile.close()\n\n\t\twith open('kings', 'r') as file:\n\t\t\tdata = np.loadtxt(file, dtype='str', comments='#', delimiter=',')\n\t\t\tfor i,dat in enumerate(data):\n\t\t\t\tself.cities.append(dat[0])\n\t\t\t\tcity = dat[0]\n\t\t\t\tkings = dat[1][1:-1].split(\" \")\n\t\t\t\tfor j in range(len(kings)-1):\n\t\t\t\t\tself.persons[kings[j]].add_spec(\"king: '\"+city+\"' \")\n\t\t\t\t\tadd_relation(kings[j], kings[j+1], \"king\")\n\t\t\t\tself.persons[kings[-1]].add_spec(\"king: '\"+city+\"' \")\n\t\tfile.close()\n\n\t\twith open('Troie', 'r') as file:\n\t\t\tdata = np.loadtxt(file, dtype='str', comments='#', delimiter=',')\n\t\t\tfor i,dat in enumerate(data):\n\t\t\t\tself.sides.append(dat[0])\n\t\t\t\tside = dat[0]\n\t\t\t\tsides = dat[1][1:-1].split(\" \")\n\t\t\t\tfor j in range(len(sides)-1):\n\t\t\t\t\tself.persons[sides[j]].add_spec(\"side: '\"+side+\"' \")\n\t\t\t\tself.persons[sides[-1]].add_spec(\"side: '\"+side+\"' \")\n\t\tfile.close()\n\n\t\tself.graph = graph(self.persons.values(), self.relations, self.cities, self.sides)\n\n\tdef export_persos(self):\n\t\tfile = open('Persos', 'w')\n\t\tfor name in self.names:\n\t\t\tfile.write(name+'\\n')\n\t\tfile.close()\n\n\n\nApo = Tree(['Apollodore', 'Ajouts'])\nApo.export_persos()\nApo.graph.to_js()\n" ]
[ [ "numpy.loadtxt" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
kimjaed/simpeg
[ "b8d716f86a4ea07ba3085fabb24c2bc974788040", "b8d716f86a4ea07ba3085fabb24c2bc974788040", "b8d716f86a4ea07ba3085fabb24c2bc974788040", "b8d716f86a4ea07ba3085fabb24c2bc974788040" ]
[ "examples/02-mesh/plot_quadtree_facediv.py", "tests/base/test_directives.py", "tests/em/fdem/inverse/derivs/test_FDEM_derivs.py", "tests/em/tdem/test_TDEM_inductive_permeable.py" ]
[ "\"\"\"\nMesh: QuadTree: FaceDiv\n=======================\n\nShowing the face divergence on the quadtree with numbering.\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom SimPEG import Mesh\n\n\ndef run(plotIt=True, n=60):\n\n M = Mesh.TreeMesh([[(1, 16)], [(1, 16)]], levels=4)\n\n M.insert_cells(\n np.c_[5, 5], np.r_[3],\n finalize=True\n )\n\n if plotIt:\n fig, axes = plt.subplots(2, 1, figsize=(10, 10))\n\n M.plotGrid(cells=True, nodes=False, ax=axes[0])\n axes[0].axis('off')\n axes[0].set_title('Simple QuadTree Mesh')\n axes[0].set_xlim([-1, 17])\n axes[0].set_ylim([-1, 17])\n\n for ii, loc in zip(range(M.nC), M.gridCC):\n axes[0].text(loc[0]+0.2, loc[1], '{0:d}'.format(ii), color='r')\n\n axes[0].plot(M.gridFx[:, 0], M.gridFx[:, 1], 'g>')\n for ii, loc in zip(range(M.nFx), M.gridFx):\n axes[0].text(loc[0]+0.2, loc[1], '{0:d}'.format(ii), color='g')\n\n axes[0].plot(M.gridFy[:, 0], M.gridFy[:, 1], 'm^')\n for ii, loc in zip(range(M.nFy), M.gridFy):\n axes[0].text(\n loc[0]+0.2, loc[1]+0.2, '{0:d}'.format(\n (ii+M.nFx)\n ),\n color='m'\n )\n\n axes[1].spy(M.faceDiv)\n axes[1].set_title('Face Divergence')\n axes[1].set_ylabel('Cell Number')\n axes[1].set_xlabel('Face Number')\n\nif __name__ == '__main__':\n run()\n plt.show()\n", "import unittest\nimport warnings\nimport pytest\nimport numpy as np\n\nfrom SimPEG import (\n Mesh, Maps, Directives, Regularization, DataMisfit, Optimization,\n Inversion, InvProblem\n)\nfrom SimPEG import PF\n\n\nclass DirectivesValidation(unittest.TestCase):\n\n def test_validation_pass(self):\n betaest = Directives.BetaEstimate_ByEig()\n\n IRLS = Directives.Update_IRLS(\n f_min_change=1e-4, minGNiter=3, beta_tol=1e-2\n )\n update_Jacobi = Directives.UpdatePreconditioner()\n dList = [betaest, IRLS, update_Jacobi]\n directiveList = Directives.DirectiveList(*dList)\n\n self.assertTrue(directiveList.validate())\n\n def test_validation_fail(self):\n betaest = Directives.BetaEstimate_ByEig()\n\n IRLS = Directives.Update_IRLS(\n f_min_change=1e-4, minGNiter=3, beta_tol=1e-2\n )\n update_Jacobi = Directives.UpdatePreconditioner()\n dList = [betaest, update_Jacobi, IRLS]\n directiveList = Directives.DirectiveList(*dList)\n\n with self.assertRaises(AssertionError):\n self.assertTrue(directiveList.validate())\n\n def test_validation_warning(self):\n betaest = Directives.BetaEstimate_ByEig()\n\n IRLS = Directives.Update_IRLS(\n f_min_change=1e-4, minGNiter=3, beta_tol=1e-2\n )\n update_Jacobi = Directives.UpdatePreconditioner()\n dList = [betaest, IRLS]\n directiveList = Directives.DirectiveList(*dList)\n\n with pytest.warns(UserWarning):\n self.assertTrue(directiveList.validate())\n\n\nclass ValidationInInversion(unittest.TestCase):\n\n def setUp(self):\n mesh = Mesh.TensorMesh([4, 4, 4])\n\n # Magnetic inducing field parameter (A,I,D)\n B = [50000, 90, 0]\n\n # Create a MAGsurvey\n rx = PF.BaseMag.RxObs(\n np.vstack([[0.25, 0.25, 0.25], [-0.25, -0.25, 0.25]])\n )\n srcField = PF.BaseMag.SrcField([rx], param=(B[0], B[1], B[2]))\n survey = PF.BaseMag.LinearSurvey(srcField)\n\n # Create the forward model operator\n prob = PF.Magnetics.MagneticIntegral(\n mesh, chiMap=Maps.IdentityMap(mesh)\n )\n\n # Pair the survey and problem\n survey.pair(prob)\n\n # Compute forward model some data\n m = np.random.rand(mesh.nC)\n survey.makeSyntheticData(m)\n\n reg = Regularization.Sparse(mesh)\n reg.mref = np.zeros(mesh.nC)\n\n wr = np.sum(prob.G**2., axis=0)**0.5\n reg.cell_weights = wr\n reg.norms = np.c_[0, 1, 1, 1]\n reg.eps_p, reg.eps_q = 1e-3, 1e-3\n\n # Data misfit function\n dmis = DataMisfit.l2_DataMisfit(survey)\n dmis.W = 1./survey.std\n\n # Add directives to the inversion\n opt = Optimization.ProjectedGNCG(\n maxIter=2, lower=-10., upper=10.,\n maxIterCG=2\n )\n\n invProb = InvProblem.BaseInvProblem(dmis, reg, opt)\n\n self.mesh = mesh\n self.invProb = invProb\n\n def test_validation_in_inversion(self):\n betaest = Directives.BetaEstimate_ByEig()\n\n # Here is where the norms are applied\n IRLS = Directives.Update_IRLS(\n f_min_change=1e-4, minGNiter=3, beta_tol=1e-2\n )\n\n update_Jacobi = Directives.UpdatePreconditioner()\n\n with self.assertRaises(AssertionError):\n # validation should happen and this will fail\n # (IRLS needs to be before update_Jacobi)\n inv = Inversion.BaseInversion(\n self.invProb, directiveList=[betaest, update_Jacobi, IRLS]\n )\n\n with self.assertRaises(AssertionError):\n # validation should happen and this will fail\n # (IRLS needs to be before update_Jacobi)\n inv = Inversion.BaseInversion(self.invProb)\n inv.directiveList = [betaest, update_Jacobi, IRLS]\n\n\nif __name__ == '__main__':\n unittest.main()\n", "from __future__ import print_function\nimport unittest\nimport numpy as np\nfrom SimPEG import Tests\nfrom scipy.constants import mu_0\nfrom SimPEG.EM.Utils.testingUtils import getFDEMProblem\n\ntestE = False\ntestB = False\ntestH = True\ntestJ = False\n\nverbose = False\n\nTOL = 1e-5\nFLR = 1e-20 # \"zero\", so if residual below this --> pass regardless of order\nCONDUCTIVITY = 1e1\nMU = mu_0\nfreq = 1e-1\naddrandoms = True\n\nSrcType = ['MagDipole', 'RawVec'] # or 'MAgDipole_Bfield', 'CircularLoop', 'RawVec'\n\n\ndef derivTest(fdemType, comp):\n\n prb = getFDEMProblem(fdemType, comp, SrcType, freq)\n # prb.solverOpts = dict(check_accuracy=True)\n\n print('{0!s} formulation - {1!s}'.format(fdemType, comp))\n x0 = np.log(np.ones(prb.sigmaMap.nP)*CONDUCTIVITY)\n # mu = np.log(np.ones(prb.mesh.nC)*MU)\n\n if addrandoms is True:\n x0 = x0 + np.random.randn(prb.sigmaMap.nP)*np.log(CONDUCTIVITY)*1e-1\n # mu = mu + np.random.randn(prb.sigmaMap.nP)*MU*1e-1\n\n survey = prb.survey\n\n def fun(x):\n return survey.dpred(x), lambda x: prb.Jvec(x0, x)\n return Tests.checkDerivative(fun, x0, num=2, plotIt=False, eps=FLR)\n\n\nclass FDEM_DerivTests(unittest.TestCase):\n if testE:\n def test_Jvec_exr_Eform(self):\n self.assertTrue(derivTest('e', 'exr'))\n def test_Jvec_eyr_Eform(self):\n self.assertTrue(derivTest('e', 'eyr'))\n def test_Jvec_ezr_Eform(self):\n self.assertTrue(derivTest('e', 'ezr'))\n def test_Jvec_exi_Eform(self):\n self.assertTrue(derivTest('e', 'exi'))\n def test_Jvec_eyi_Eform(self):\n self.assertTrue(derivTest('e', 'eyi'))\n def test_Jvec_ezi_Eform(self):\n self.assertTrue(derivTest('e', 'ezi'))\n\n def test_Jvec_bxr_Eform(self):\n self.assertTrue(derivTest('e', 'bxr'))\n def test_Jvec_byr_Eform(self):\n self.assertTrue(derivTest('e', 'byr'))\n def test_Jvec_bzr_Eform(self):\n self.assertTrue(derivTest('e', 'bzr'))\n def test_Jvec_bxi_Eform(self):\n self.assertTrue(derivTest('e', 'bxi'))\n def test_Jvec_byi_Eform(self):\n self.assertTrue(derivTest('e', 'byi'))\n def test_Jvec_bzi_Eform(self):\n self.assertTrue(derivTest('e', 'bzi'))\n\n def test_Jvec_exr_Eform(self):\n self.assertTrue(derivTest('e', 'jxr'))\n def test_Jvec_eyr_Eform(self):\n self.assertTrue(derivTest('e', 'jyr'))\n def test_Jvec_ezr_Eform(self):\n self.assertTrue(derivTest('e', 'jzr'))\n def test_Jvec_exi_Eform(self):\n self.assertTrue(derivTest('e', 'jxi'))\n def test_Jvec_eyi_Eform(self):\n self.assertTrue(derivTest('e', 'jyi'))\n def test_Jvec_ezi_Eform(self):\n self.assertTrue(derivTest('e', 'jzi'))\n\n def test_Jvec_bxr_Eform(self):\n self.assertTrue(derivTest('e', 'hxr'))\n def test_Jvec_byr_Eform(self):\n self.assertTrue(derivTest('e', 'hyr'))\n def test_Jvec_bzr_Eform(self):\n self.assertTrue(derivTest('e', 'hzr'))\n def test_Jvec_bxi_Eform(self):\n self.assertTrue(derivTest('e', 'hxi'))\n def test_Jvec_byi_Eform(self):\n self.assertTrue(derivTest('e', 'hyi'))\n def test_Jvec_bzi_Eform(self):\n self.assertTrue(derivTest('e', 'hzi'))\n\n if testB:\n def test_Jvec_exr_Bform(self):\n self.assertTrue(derivTest('b', 'exr'))\n def test_Jvec_eyr_Bform(self):\n self.assertTrue(derivTest('b', 'eyr'))\n def test_Jvec_ezr_Bform(self):\n self.assertTrue(derivTest('b', 'ezr'))\n def test_Jvec_exi_Bform(self):\n self.assertTrue(derivTest('b', 'exi'))\n def test_Jvec_eyi_Bform(self):\n self.assertTrue(derivTest('b', 'eyi'))\n def test_Jvec_ezi_Bform(self):\n self.assertTrue(derivTest('b', 'ezi'))\n\n def test_Jvec_bxr_Bform(self):\n self.assertTrue(derivTest('b', 'bxr'))\n def test_Jvec_byr_Bform(self):\n self.assertTrue(derivTest('b', 'byr'))\n def test_Jvec_bzr_Bform(self):\n self.assertTrue(derivTest('b', 'bzr'))\n def test_Jvec_bxi_Bform(self):\n self.assertTrue(derivTest('b', 'bxi'))\n def test_Jvec_byi_Bform(self):\n self.assertTrue(derivTest('b', 'byi'))\n def test_Jvec_bzi_Bform(self):\n self.assertTrue(derivTest('b', 'bzi'))\n\n def test_Jvec_jxr_Bform(self):\n self.assertTrue(derivTest('b', 'jxr'))\n def test_Jvec_jyr_Bform(self):\n self.assertTrue(derivTest('b', 'jyr'))\n def test_Jvec_jzr_Bform(self):\n self.assertTrue(derivTest('b', 'jzr'))\n def test_Jvec_jxi_Bform(self):\n self.assertTrue(derivTest('b', 'jxi'))\n def test_Jvec_jyi_Bform(self):\n self.assertTrue(derivTest('b', 'jyi'))\n def test_Jvec_jzi_Bform(self):\n self.assertTrue(derivTest('b', 'jzi'))\n\n def test_Jvec_hxr_Bform(self):\n self.assertTrue(derivTest('b', 'hxr'))\n def test_Jvec_hyr_Bform(self):\n self.assertTrue(derivTest('b', 'hyr'))\n def test_Jvec_hzr_Bform(self):\n self.assertTrue(derivTest('b', 'hzr'))\n def test_Jvec_hxi_Bform(self):\n self.assertTrue(derivTest('b', 'hxi'))\n def test_Jvec_hyi_Bform(self):\n self.assertTrue(derivTest('b', 'hyi'))\n def test_Jvec_hzi_Bform(self):\n self.assertTrue(derivTest('b', 'hzi'))\n\n if testJ:\n def test_Jvec_jxr_Jform(self):\n self.assertTrue(derivTest('j', 'jxr'))\n def test_Jvec_jyr_Jform(self):\n self.assertTrue(derivTest('j', 'jyr'))\n def test_Jvec_jzr_Jform(self):\n self.assertTrue(derivTest('j', 'jzr'))\n def test_Jvec_jxi_Jform(self):\n self.assertTrue(derivTest('j', 'jxi'))\n def test_Jvec_jyi_Jform(self):\n self.assertTrue(derivTest('j', 'jyi'))\n def test_Jvec_jzi_Jform(self):\n self.assertTrue(derivTest('j', 'jzi'))\n\n def test_Jvec_hxr_Jform(self):\n self.assertTrue(derivTest('j', 'hxr'))\n def test_Jvec_hyr_Jform(self):\n self.assertTrue(derivTest('j', 'hyr'))\n def test_Jvec_hzr_Jform(self):\n self.assertTrue(derivTest('j', 'hzr'))\n def test_Jvec_hxi_Jform(self):\n self.assertTrue(derivTest('j', 'hxi'))\n def test_Jvec_hyi_Jform(self):\n self.assertTrue(derivTest('j', 'hyi'))\n def test_Jvec_hzi_Jform(self):\n self.assertTrue(derivTest('j', 'hzi'))\n\n def test_Jvec_exr_Jform(self):\n self.assertTrue(derivTest('j', 'exr'))\n def test_Jvec_eyr_Jform(self):\n self.assertTrue(derivTest('j', 'eyr'))\n def test_Jvec_ezr_Jform(self):\n self.assertTrue(derivTest('j', 'ezr'))\n def test_Jvec_exi_Jform(self):\n self.assertTrue(derivTest('j', 'exi'))\n def test_Jvec_eyi_Jform(self):\n self.assertTrue(derivTest('j', 'eyi'))\n def test_Jvec_ezi_Jform(self):\n self.assertTrue(derivTest('j', 'ezi'))\n\n def test_Jvec_bxr_Jform(self):\n self.assertTrue(derivTest('j', 'bxr'))\n def test_Jvec_byr_Jform(self):\n self.assertTrue(derivTest('j', 'byr'))\n def test_Jvec_bzr_Jform(self):\n self.assertTrue(derivTest('j', 'bzr'))\n def test_Jvec_bxi_Jform(self):\n self.assertTrue(derivTest('j', 'bxi'))\n def test_Jvec_byi_Jform(self):\n self.assertTrue(derivTest('j', 'byi'))\n def test_Jvec_bzi_Jform(self):\n self.assertTrue(derivTest('j', 'bzi'))\n\n if testH:\n def test_Jvec_hxr_Hform(self):\n self.assertTrue(derivTest('h', 'hxr'))\n def test_Jvec_hyr_Hform(self):\n self.assertTrue(derivTest('h', 'hyr'))\n def test_Jvec_hzr_Hform(self):\n self.assertTrue(derivTest('h', 'hzr'))\n def test_Jvec_hxi_Hform(self):\n self.assertTrue(derivTest('h', 'hxi'))\n def test_Jvec_hyi_Hform(self):\n self.assertTrue(derivTest('h', 'hyi'))\n def test_Jvec_hzi_Hform(self):\n self.assertTrue(derivTest('h', 'hzi'))\n\n def test_Jvec_hxr_Hform(self):\n self.assertTrue(derivTest('h', 'jxr'))\n def test_Jvec_hyr_Hform(self):\n self.assertTrue(derivTest('h', 'jyr'))\n def test_Jvec_hzr_Hform(self):\n self.assertTrue(derivTest('h', 'jzr'))\n def test_Jvec_hxi_Hform(self):\n self.assertTrue(derivTest('h', 'jxi'))\n def test_Jvec_hyi_Hform(self):\n self.assertTrue(derivTest('h', 'jyi'))\n def test_Jvec_hzi_Hform(self):\n self.assertTrue(derivTest('h', 'jzi'))\n\n def test_Jvec_exr_Hform(self):\n self.assertTrue(derivTest('h', 'exr'))\n def test_Jvec_eyr_Hform(self):\n self.assertTrue(derivTest('h', 'eyr'))\n def test_Jvec_ezr_Hform(self):\n self.assertTrue(derivTest('h', 'ezr'))\n def test_Jvec_exi_Hform(self):\n self.assertTrue(derivTest('h', 'exi'))\n def test_Jvec_eyi_Hform(self):\n self.assertTrue(derivTest('h', 'eyi'))\n def test_Jvec_ezi_Hform(self):\n self.assertTrue(derivTest('h', 'ezi'))\n\n def test_Jvec_bxr_Hform(self):\n self.assertTrue(derivTest('h', 'bxr'))\n def test_Jvec_byr_Hform(self):\n self.assertTrue(derivTest('h', 'byr'))\n def test_Jvec_bzr_Hform(self):\n self.assertTrue(derivTest('h', 'bzr'))\n def test_Jvec_bxi_Hform(self):\n self.assertTrue(derivTest('h', 'bxi'))\n def test_Jvec_byi_Hform(self):\n self.assertTrue(derivTest('h', 'byi'))\n def test_Jvec_bzi_Hform(self):\n self.assertTrue(derivTest('h', 'bzi'))\n\nif __name__ == '__main__':\n unittest.main()\n", "from __future__ import division, print_function\nimport unittest\n\nimport discretize\nfrom discretize import utils\nimport numpy as np\nimport scipy.sparse as sp\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import LogNorm\nfrom scipy.constants import mu_0, inch, foot\nimport time\n\nfrom SimPEG.EM import TDEM\nfrom SimPEG import Utils, Maps\nfrom SimPEG.Utils import Zero\n\nfrom pymatsolver import Pardiso\n\nplotIt = False\nTOL = 1e-4\n\n\nclass TestInductiveSourcesPermeability(unittest.TestCase):\n\n def setUp(self):\n target_mur = [1, 50, 100, 200]\n target_l = 500\n target_r = 50\n sigma_back = 1e-5\n radius_loop = 100\n\n model_names = [\"target_{}\".format(mur) for mur in target_mur]\n\n # Set up a Cyl mesh\n csx = 5. # cell size in the x-direction\n csz = 5. # cell size in the z-direction\n domainx = 100 # go out 500m from the well\n\n # padding parameters\n npadx, npadz = 15, 15 # number of padding cells\n pfx = 1.4 # expansion factor for the padding to infinity\n pfz = 1.4\n\n ncz = int(target_l/csz)\n mesh = discretize.CylMesh([\n [(csx, int(domainx/csx)), (csx, npadx, pfx)],\n 1,\n [(csz, npadz, -pfz), (csz, ncz), (csz, npadz, pfz)]\n ])\n mesh.x0 = [0, 0, -mesh.hz[:npadz + ncz].sum()]\n\n # Plot the mesh\n if plotIt:\n mesh.plotGrid()\n plt.show()\n\n self.radius_loop = radius_loop\n self.target_mur = target_mur\n self.target_l = target_l\n self.target_r = target_r\n self.sigma_back = sigma_back\n self.model_names = model_names\n self.mesh = mesh\n\n def test_permeable_sources(self):\n\n target_mur = self.target_mur\n target_l = self.target_l\n target_r = self.target_r\n sigma_back = self.sigma_back\n model_names = self.model_names\n mesh = self.mesh\n radius_loop = self.radius_loop\n\n # Assign physical properties on the mesh\n def populate_target(mur):\n mu_model = np.ones(mesh.nC)\n x_inds = mesh.gridCC[:, 0] < target_r\n z_inds = (\n (mesh.gridCC[:, 2] <= 0) & (mesh.gridCC[:, 2] >= -target_l)\n )\n mu_model[x_inds & z_inds] = mur\n return mu_0 * mu_model\n\n mu_dict = {\n key: populate_target(mu) for key, mu in\n zip(model_names, target_mur)\n }\n sigma = np.ones(mesh.nC) * sigma_back\n\n # Plot the models\n if plotIt:\n xlim = np.r_[-200, 200] # x-limits in meters\n zlim = np.r_[-1.5*target_l, 10.] # z-limits in meters. (z-positive up)\n\n fig, ax = plt.subplots(\n 1, len(model_names), figsize=(6*len(model_names), 5)\n )\n if len(model_names) == 1:\n ax = [ax]\n\n for a, key in zip(ax, model_names):\n plt.colorbar(mesh.plotImage(\n mu_dict[key], ax=a,\n pcolorOpts={'norm': LogNorm()}, # plot on a log-scale\n mirror=True\n )[0], ax=a)\n a.set_title('{}'.format(key), fontsize=13)\n # cylMeshGen.mesh.plotGrid(ax=a, slice='theta') # uncomment to plot the mesh on top of this\n a.set_xlim(xlim)\n a.set_ylim(zlim)\n plt.tight_layout()\n plt.show()\n\n ramp = [\n (1e-5, 20), (1e-4, 20), (3e-4, 20), (1e-3, 20), (3e-3, 20),\n (1e-2, 20), (3e-2, 20), (1e-1, 20), (3e-1, 20), (1, 50)\n ]\n timeSteps = ramp\n\n time_mesh = discretize.TensorMesh([ramp])\n offTime = 10000\n waveform = TDEM.Src.QuarterSineRampOnWaveform(\n ramp_on=np.r_[1e-4, 20], ramp_off=offTime - np.r_[1e-4, 0]\n )\n\n if plotIt:\n wave = np.r_[[waveform.eval(t) for t in time_mesh.gridN]]\n plt.plot(time_mesh.gridN, wave)\n plt.plot(time_mesh.gridN, np.zeros(time_mesh.nN), '-|', color='k')\n plt.show()\n\n src_magnetostatic = TDEM.Src.CircularLoop(\n [], loc=np.r_[0., 0., 0.], orientation=\"z\", radius=100,\n )\n\n src_ramp_on = TDEM.Src.CircularLoop(\n [], loc=np.r_[0., 0., 0.], orientation=\"z\", radius=100,\n waveform=waveform\n )\n\n src_list = [src_magnetostatic]\n src_list_late_ontime = [src_ramp_on]\n\n prob = TDEM.Problem3D_b(\n mesh=mesh, timeSteps=timeSteps, sigmaMap=Maps.IdentityMap(mesh),\n Solver=Pardiso\n )\n prob_late_ontime = TDEM.Problem3D_b(\n mesh=mesh, timeSteps=timeSteps, sigmaMap=Maps.IdentityMap(mesh),\n Solver=Pardiso\n )\n\n survey = TDEM.Survey(srcList=src_list)\n survey_late_ontime = TDEM.Survey(src_list_late_ontime)\n\n prob.pair(survey)\n prob_late_ontime.pair(survey_late_ontime)\n\n fields_dict = {}\n\n for key in model_names:\n t = time.time()\n print('--- Running {} ---'.format(key))\n\n prob_late_ontime.mu = mu_dict[key]\n fields_dict[key] = prob_late_ontime.fields(sigma)\n\n print(\" ... done. Elapsed time {}\".format(time.time() - t))\n print('\\n')\n\n b_magnetostatic = {}\n b_late_ontime = {}\n\n for key in model_names:\n prob.mu = mu_dict[key]\n prob.sigma = sigma\n b_magnetostatic[key] = src_magnetostatic.bInitial(prob)\n\n prob_late_ontime.mu = mu_dict[key]\n b_late_ontime[key] = utils.mkvc(\n fields_dict[key][:, 'b', -1]\n )\n\n if plotIt:\n fig, ax = plt.subplots(\n len(model_names), 2, figsize=(3*len(model_names), 5)\n )\n\n for i, key in enumerate(model_names):\n ax[i][0].semilogy(\n np.absolute(b_magnetostatic[key]),\n label='magnetostatic'\n )\n ax[i][0].semilogy(\n np.absolute(b_late_ontime[key]), label='late on-time'\n )\n ax[i][0].legend()\n\n ax[i][1].semilogy(\n np.absolute(b_magnetostatic[key] - b_late_ontime[key])\n )\n plt.tight_layout()\n plt.show()\n\n print(\"Testing TDEM with permeable targets\")\n passed = []\n for key in model_names:\n norm_magneotstatic = np.linalg.norm(b_magnetostatic[key])\n norm_late_ontime = np.linalg.norm(b_late_ontime[key])\n norm_diff = np.linalg.norm(\n b_magnetostatic[key] - b_late_ontime[key]\n )\n passed_test = (\n norm_diff / (0.5*(norm_late_ontime + norm_magneotstatic))\n < TOL\n )\n print(\"\\n{}\".format(key))\n print(\n \"||magnetostatic||: {:1.2e}, \"\n \"||late on-time||: {:1.2e}, \"\n \"||difference||: {:1.2e} passed?: {}\".format(\n norm_magneotstatic, norm_late_ontime, norm_diff,\n passed_test\n )\n )\n\n passed += [passed_test]\n\n assert all(passed)\n\n prob.sigma = 1e-4*np.ones(mesh.nC)\n v = utils.mkvc(np.random.rand(mesh.nE))\n w = utils.mkvc(np.random.rand(mesh.nF))\n assert(\n np.all(\n mesh.getEdgeInnerProduct(1e-4*np.ones(mesh.nC))*v ==\n prob.MeSigma*v\n )\n )\n\n assert(\n np.all(\n mesh.getEdgeInnerProduct(\n 1e-4*np.ones(mesh.nC), invMat=True\n )*v ==\n prob.MeSigmaI*v\n )\n )\n assert(\n np.all(\n mesh.getFaceInnerProduct(1./1e-4*np.ones(mesh.nC))*w ==\n prob.MfRho*w\n )\n )\n\n assert(\n np.all(\n mesh.getFaceInnerProduct(\n 1./1e-4*np.ones(mesh.nC), invMat=True\n )*w ==\n prob.MfRhoI*w\n )\n )\n\n prob.rho = 1./1e-3*np.ones(mesh.nC)\n v = utils.mkvc(np.random.rand(mesh.nE))\n w = utils.mkvc(np.random.rand(mesh.nF))\n assert(\n np.all(\n mesh.getEdgeInnerProduct(1e-3*np.ones(mesh.nC))*v ==\n prob.MeSigma*v\n )\n )\n\n assert(\n np.all(\n mesh.getEdgeInnerProduct(\n 1e-3*np.ones(mesh.nC), invMat=True\n )*v ==\n prob.MeSigmaI*v\n )\n )\n assert(\n np.all(\n mesh.getFaceInnerProduct(1./1e-3*np.ones(mesh.nC))*w ==\n prob.MfRho*w\n )\n )\n\n assert(\n np.all(\n mesh.getFaceInnerProduct(\n 1./1e-3*np.ones(mesh.nC), invMat=True\n )*w ==\n prob.MfRhoI*w\n )\n )\n\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "matplotlib.pyplot.show", "matplotlib.pyplot.subplots" ], [ "numpy.sum", "numpy.zeros", "numpy.random.rand", "numpy.vstack" ], [ "numpy.log", "numpy.random.randn", "numpy.ones" ], [ "matplotlib.pyplot.tight_layout", "numpy.absolute", "matplotlib.colors.LogNorm", "numpy.linalg.norm", "numpy.ones", "matplotlib.pyplot.plot", "numpy.random.rand", "matplotlib.pyplot.show", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
twn29004/OpenPCDet
[ "3457cc30b21d882a1376ef272fbaa49755c72a2e", "3457cc30b21d882a1376ef272fbaa49755c72a2e", "3457cc30b21d882a1376ef272fbaa49755c72a2e" ]
[ "pcdet/datasets/kitti/kitti_object_eval_python/kitti_common.py", "pcdet/models/detectors/second_net_iou.py", "pcdet/datasets/kitti/kitti_object_eval_python/eval.py" ]
[ "import concurrent.futures as futures\r\nimport os\r\nimport pathlib\r\nimport re\r\nfrom collections import OrderedDict\r\n\r\nimport numpy as np\r\nfrom skimage import io\r\n\r\n\r\ndef get_image_index_str(img_idx):\r\n return \"{:06d}\".format(img_idx)\r\n\r\n\r\ndef get_kitti_info_path(idx,\r\n prefix,\r\n info_type='image_2',\r\n file_tail='.png',\r\n training=True,\r\n relative_path=True):\r\n img_idx_str = get_image_index_str(idx)\r\n img_idx_str += file_tail\r\n prefix = pathlib.Path(prefix)\r\n if training:\r\n file_path = pathlib.Path('training') / info_type / img_idx_str\r\n else:\r\n file_path = pathlib.Path('testing') / info_type / img_idx_str\r\n if not (prefix / file_path).exists():\r\n raise ValueError(\"file not exist: {}\".format(file_path))\r\n if relative_path:\r\n return str(file_path)\r\n else:\r\n return str(prefix / file_path)\r\n\r\n\r\ndef get_image_path(idx, prefix, training=True, relative_path=True):\r\n return get_kitti_info_path(idx, prefix, 'image_2', '.png', training,\r\n relative_path)\r\n\r\n\r\ndef get_label_path(idx, prefix, training=True, relative_path=True):\r\n return get_kitti_info_path(idx, prefix, 'label_2', '.txt', training,\r\n relative_path)\r\n\r\n\r\ndef get_velodyne_path(idx, prefix, training=True, relative_path=True):\r\n return get_kitti_info_path(idx, prefix, 'velodyne', '.bin', training,\r\n relative_path)\r\n\r\n\r\ndef get_calib_path(idx, prefix, training=True, relative_path=True):\r\n return get_kitti_info_path(idx, prefix, 'calib', '.txt', training,\r\n relative_path)\r\n\r\n\r\ndef _extend_matrix(mat):\r\n mat = np.concatenate([mat, np.array([[0., 0., 0., 1.]])], axis=0)\r\n return mat\r\n\r\n\r\ndef get_kitti_image_info(path,\r\n training=True,\r\n label_info=True,\r\n velodyne=False,\r\n calib=False,\r\n image_ids=7481,\r\n extend_matrix=True,\r\n num_worker=8,\r\n relative_path=True,\r\n with_imageshape=True):\r\n # image_infos = []\r\n root_path = pathlib.Path(path)\r\n if not isinstance(image_ids, list):\r\n image_ids = list(range(image_ids))\r\n\r\n def map_func(idx):\r\n image_info = {'image_idx': idx}\r\n annotations = None\r\n if velodyne:\r\n image_info['velodyne_path'] = get_velodyne_path(\r\n idx, path, training, relative_path)\r\n image_info['img_path'] = get_image_path(idx, path, training,\r\n relative_path)\r\n if with_imageshape:\r\n img_path = image_info['img_path']\r\n if relative_path:\r\n img_path = str(root_path / img_path)\r\n image_info['img_shape'] = np.array(\r\n io.imread(img_path).shape[:2], dtype=np.int32)\r\n if label_info:\r\n label_path = get_label_path(idx, path, training, relative_path)\r\n if relative_path:\r\n label_path = str(root_path / label_path)\r\n annotations = get_label_anno(label_path)\r\n if calib:\r\n calib_path = get_calib_path(\r\n idx, path, training, relative_path=False)\r\n with open(calib_path, 'r') as f:\r\n lines = f.readlines()\r\n P0 = np.array(\r\n [float(info) for info in lines[0].split(' ')[1:13]]).reshape(\r\n [3, 4])\r\n P1 = np.array(\r\n [float(info) for info in lines[1].split(' ')[1:13]]).reshape(\r\n [3, 4])\r\n P2 = np.array(\r\n [float(info) for info in lines[2].split(' ')[1:13]]).reshape(\r\n [3, 4])\r\n P3 = np.array(\r\n [float(info) for info in lines[3].split(' ')[1:13]]).reshape(\r\n [3, 4])\r\n if extend_matrix:\r\n P0 = _extend_matrix(P0)\r\n P1 = _extend_matrix(P1)\r\n P2 = _extend_matrix(P2)\r\n P3 = _extend_matrix(P3)\r\n image_info['calib/P0'] = P0\r\n image_info['calib/P1'] = P1\r\n image_info['calib/P2'] = P2\r\n image_info['calib/P3'] = P3\r\n R0_rect = np.array([\r\n float(info) for info in lines[4].split(' ')[1:10]\r\n ]).reshape([3, 3])\r\n if extend_matrix:\r\n rect_4x4 = np.zeros([4, 4], dtype=R0_rect.dtype)\r\n rect_4x4[3, 3] = 1.\r\n rect_4x4[:3, :3] = R0_rect\r\n else:\r\n rect_4x4 = R0_rect\r\n image_info['calib/R0_rect'] = rect_4x4\r\n Tr_velo_to_cam = np.array([\r\n float(info) for info in lines[5].split(' ')[1:13]\r\n ]).reshape([3, 4])\r\n Tr_imu_to_velo = np.array([\r\n float(info) for info in lines[6].split(' ')[1:13]\r\n ]).reshape([3, 4])\r\n if extend_matrix:\r\n Tr_velo_to_cam = _extend_matrix(Tr_velo_to_cam)\r\n Tr_imu_to_velo = _extend_matrix(Tr_imu_to_velo)\r\n image_info['calib/Tr_velo_to_cam'] = Tr_velo_to_cam\r\n image_info['calib/Tr_imu_to_velo'] = Tr_imu_to_velo\r\n if annotations is not None:\r\n image_info['annos'] = annotations\r\n add_difficulty_to_annos(image_info)\r\n return image_info\r\n\r\n with futures.ThreadPoolExecutor(num_worker) as executor:\r\n image_infos = executor.map(map_func, image_ids)\r\n return list(image_infos)\r\n\r\n\r\ndef filter_kitti_anno(image_anno,\r\n used_classes,\r\n used_difficulty=None,\r\n dontcare_iou=None):\r\n if not isinstance(used_classes, (list, tuple)):\r\n used_classes = [used_classes]\r\n img_filtered_annotations = {}\r\n relevant_annotation_indices = [\r\n i for i, x in enumerate(image_anno['name']) if x in used_classes\r\n ]\r\n for key in image_anno.keys():\r\n img_filtered_annotations[key] = (\r\n image_anno[key][relevant_annotation_indices])\r\n if used_difficulty is not None:\r\n relevant_annotation_indices = [\r\n i for i, x in enumerate(img_filtered_annotations['difficulty'])\r\n if x in used_difficulty\r\n ]\r\n for key in image_anno.keys():\r\n img_filtered_annotations[key] = (\r\n img_filtered_annotations[key][relevant_annotation_indices])\r\n\r\n if 'DontCare' in used_classes and dontcare_iou is not None:\r\n dont_care_indices = [\r\n i for i, x in enumerate(img_filtered_annotations['name'])\r\n if x == 'DontCare'\r\n ]\r\n # bounding box format [y_min, x_min, y_max, x_max]\r\n all_boxes = img_filtered_annotations['bbox']\r\n ious = iou(all_boxes, all_boxes[dont_care_indices])\r\n\r\n # Remove all bounding boxes that overlap with a dontcare region.\r\n if ious.size > 0:\r\n boxes_to_remove = np.amax(ious, axis=1) > dontcare_iou\r\n for key in image_anno.keys():\r\n img_filtered_annotations[key] = (img_filtered_annotations[key][\r\n np.logical_not(boxes_to_remove)])\r\n return img_filtered_annotations\r\n\r\ndef filter_annos_low_score(image_annos, thresh):\r\n new_image_annos = []\r\n for anno in image_annos:\r\n img_filtered_annotations = {}\r\n relevant_annotation_indices = [\r\n i for i, s in enumerate(anno['score']) if s >= thresh\r\n ]\r\n for key in anno.keys():\r\n img_filtered_annotations[key] = (\r\n anno[key][relevant_annotation_indices])\r\n new_image_annos.append(img_filtered_annotations)\r\n return new_image_annos\r\n\r\ndef kitti_result_line(result_dict, precision=4):\r\n prec_float = \"{\" + \":.{}f\".format(precision) + \"}\"\r\n res_line = []\r\n all_field_default = OrderedDict([\r\n ('name', None),\r\n ('truncated', -1),\r\n ('occluded', -1),\r\n ('alpha', -10),\r\n ('bbox', None),\r\n ('dimensions', [-1, -1, -1]),\r\n ('location', [-1000, -1000, -1000]),\r\n ('rotation_y', -10),\r\n ('score', None),\r\n ])\r\n res_dict = [(key, None) for key, val in all_field_default.items()]\r\n res_dict = OrderedDict(res_dict)\r\n for key, val in result_dict.items():\r\n if all_field_default[key] is None and val is None:\r\n raise ValueError(\"you must specify a value for {}\".format(key))\r\n res_dict[key] = val\r\n\r\n for key, val in res_dict.items():\r\n if key == 'name':\r\n res_line.append(val)\r\n elif key in ['truncated', 'alpha', 'rotation_y', 'score']:\r\n if val is None:\r\n res_line.append(str(all_field_default[key]))\r\n else:\r\n res_line.append(prec_float.format(val))\r\n elif key == 'occluded':\r\n if val is None:\r\n res_line.append(str(all_field_default[key]))\r\n else:\r\n res_line.append('{}'.format(val))\r\n elif key in ['bbox', 'dimensions', 'location']:\r\n if val is None:\r\n res_line += [str(v) for v in all_field_default[key]]\r\n else:\r\n res_line += [prec_float.format(v) for v in val]\r\n else:\r\n raise ValueError(\"unknown key. supported key:{}\".format(\r\n res_dict.keys()))\r\n return ' '.join(res_line)\r\n\r\n\r\ndef add_difficulty_to_annos(info):\r\n min_height = [40, 25,\r\n 25] # minimum height for evaluated groundtruth/detections\r\n max_occlusion = [\r\n 0, 1, 2\r\n ] # maximum occlusion level of the groundtruth used for eval_utils\r\n max_trunc = [\r\n 0.15, 0.3, 0.5\r\n ] # maximum truncation level of the groundtruth used for eval_utils\r\n annos = info['annos']\r\n dims = annos['dimensions'] # lhw format\r\n bbox = annos['bbox']\r\n height = bbox[:, 3] - bbox[:, 1]\r\n occlusion = annos['occluded']\r\n truncation = annos['truncated']\r\n diff = []\r\n easy_mask = np.ones((len(dims), ), dtype=np.bool)\r\n moderate_mask = np.ones((len(dims), ), dtype=np.bool)\r\n hard_mask = np.ones((len(dims), ), dtype=np.bool)\r\n i = 0\r\n for h, o, t in zip(height, occlusion, truncation):\r\n if o > max_occlusion[0] or h <= min_height[0] or t > max_trunc[0]:\r\n easy_mask[i] = False\r\n if o > max_occlusion[1] or h <= min_height[1] or t > max_trunc[1]:\r\n moderate_mask[i] = False\r\n if o > max_occlusion[2] or h <= min_height[2] or t > max_trunc[2]:\r\n hard_mask[i] = False\r\n i += 1\r\n is_easy = easy_mask\r\n is_moderate = np.logical_xor(easy_mask, moderate_mask)\r\n is_hard = np.logical_xor(hard_mask, moderate_mask)\r\n\r\n for i in range(len(dims)):\r\n if is_easy[i]:\r\n diff.append(0)\r\n elif is_moderate[i]:\r\n diff.append(1)\r\n elif is_hard[i]:\r\n diff.append(2)\r\n else:\r\n diff.append(-1)\r\n annos[\"difficulty\"] = np.array(diff, np.int32)\r\n return diff\r\n\r\n\r\ndef get_label_anno(label_path):\r\n annotations = {}\r\n annotations.update({\r\n 'name': [],\r\n 'truncated': [],\r\n 'occluded': [],\r\n 'alpha': [],\r\n 'bbox': [],\r\n 'dimensions': [],\r\n 'location': [],\r\n 'rotation_y': []\r\n })\r\n with open(label_path, 'r') as f:\r\n lines = f.readlines()\r\n # if len(lines) == 0 or len(lines[0]) < 15:\r\n # content = []\r\n # else:\r\n content = [line.strip().split(' ') for line in lines]\r\n annotations['name'] = np.array([x[0] for x in content])\r\n annotations['truncated'] = np.array([float(x[1]) for x in content])\r\n annotations['occluded'] = np.array([int(x[2]) for x in content])\r\n annotations['alpha'] = np.array([float(x[3]) for x in content])\r\n annotations['bbox'] = np.array(\r\n [[float(info) for info in x[4:8]] for x in content]).reshape(-1, 4)\r\n # dimensions will convert hwl format to standard lhw(camera) format.\r\n annotations['dimensions'] = np.array(\r\n [[float(info) for info in x[8:11]] for x in content]).reshape(\r\n -1, 3)[:, [2, 0, 1]]\r\n annotations['location'] = np.array(\r\n [[float(info) for info in x[11:14]] for x in content]).reshape(-1, 3)\r\n annotations['rotation_y'] = np.array(\r\n [float(x[14]) for x in content]).reshape(-1)\r\n if len(content) != 0 and len(content[0]) == 16: # have score\r\n annotations['score'] = np.array([float(x[15]) for x in content])\r\n else:\r\n annotations['score'] = np.zeros([len(annotations['bbox'])])\r\n return annotations\r\n\r\ndef get_label_annos(label_folder, image_ids=None):\r\n if image_ids is None:\r\n filepaths = pathlib.Path(label_folder).glob('*.txt')\r\n prog = re.compile(r'^\\d{6}.txt$')\r\n filepaths = filter(lambda f: prog.match(f.name), filepaths)\r\n image_ids = [int(p.stem) for p in filepaths]\r\n image_ids = sorted(image_ids)\r\n if not isinstance(image_ids, list):\r\n image_ids = list(range(image_ids))\r\n annos = []\r\n label_folder = pathlib.Path(label_folder)\r\n for idx in image_ids:\r\n image_idx = get_image_index_str(idx)\r\n label_filename = label_folder / (image_idx + '.txt')\r\n annos.append(get_label_anno(label_filename))\r\n return annos\r\n\r\ndef area(boxes, add1=False):\r\n \"\"\"Computes area of boxes.\r\n\r\n Args:\r\n boxes: Numpy array with shape [N, 4] holding N boxes\r\n\r\n Returns:\r\n a numpy array with shape [N*1] representing box areas\r\n \"\"\"\r\n if add1:\r\n return (boxes[:, 2] - boxes[:, 0] + 1.0) * (\r\n boxes[:, 3] - boxes[:, 1] + 1.0)\r\n else:\r\n return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])\r\n\r\n\r\ndef intersection(boxes1, boxes2, add1=False):\r\n \"\"\"Compute pairwise intersection areas between boxes.\r\n\r\n Args:\r\n boxes1: a numpy array with shape [N, 4] holding N boxes\r\n boxes2: a numpy array with shape [M, 4] holding M boxes\r\n\r\n Returns:\r\n a numpy array with shape [N*M] representing pairwise intersection area\r\n \"\"\"\r\n [y_min1, x_min1, y_max1, x_max1] = np.split(boxes1, 4, axis=1)\r\n [y_min2, x_min2, y_max2, x_max2] = np.split(boxes2, 4, axis=1)\r\n\r\n all_pairs_min_ymax = np.minimum(y_max1, np.transpose(y_max2))\r\n all_pairs_max_ymin = np.maximum(y_min1, np.transpose(y_min2))\r\n if add1:\r\n all_pairs_min_ymax += 1.0\r\n intersect_heights = np.maximum(\r\n np.zeros(all_pairs_max_ymin.shape),\r\n all_pairs_min_ymax - all_pairs_max_ymin)\r\n\r\n all_pairs_min_xmax = np.minimum(x_max1, np.transpose(x_max2))\r\n all_pairs_max_xmin = np.maximum(x_min1, np.transpose(x_min2))\r\n if add1:\r\n all_pairs_min_xmax += 1.0\r\n intersect_widths = np.maximum(\r\n np.zeros(all_pairs_max_xmin.shape),\r\n all_pairs_min_xmax - all_pairs_max_xmin)\r\n return intersect_heights * intersect_widths\r\n\r\n\r\ndef iou(boxes1, boxes2, add1=False):\r\n \"\"\"Computes pairwise intersection-over-union between box collections.\r\n\r\n Args:\r\n boxes1: a numpy array with shape [N, 4] holding N boxes.\r\n boxes2: a numpy array with shape [M, 4] holding N boxes.\r\n\r\n Returns:\r\n a numpy array with shape [N, M] representing pairwise iou scores.\r\n \"\"\"\r\n intersect = intersection(boxes1, boxes2, add1)\r\n area1 = area(boxes1, add1)\r\n area2 = area(boxes2, add1)\r\n union = np.expand_dims(\r\n area1, axis=1) + np.expand_dims(\r\n area2, axis=0) - intersect\r\n return intersect / union\r\n", "import torch\r\nfrom .detector3d_template import Detector3DTemplate\r\nfrom ..model_utils.model_nms_utils import class_agnostic_nms\r\nfrom ...ops.roiaware_pool3d import roiaware_pool3d_utils\r\n\r\n\r\nclass SECONDNetIoU(Detector3DTemplate):\r\n def __init__(self, model_cfg, num_class, dataset):\r\n super().__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset)\r\n self.module_list = self.build_networks()\r\n\r\n def forward(self, batch_dict):\r\n batch_dict['dataset_cfg'] = self.dataset.dataset_cfg\r\n for cur_module in self.module_list:\r\n batch_dict = cur_module(batch_dict)\r\n\r\n if self.training:\r\n loss, tb_dict, disp_dict = self.get_training_loss()\r\n\r\n ret_dict = {\r\n 'loss': loss\r\n }\r\n return ret_dict, tb_dict, disp_dict\r\n else:\r\n pred_dicts, recall_dicts = self.post_processing(batch_dict)\r\n return pred_dicts, recall_dicts\r\n\r\n def get_training_loss(self):\r\n disp_dict = {}\r\n\r\n loss_rpn, tb_dict = self.dense_head.get_loss()\r\n loss_rcnn, tb_dict = self.roi_head.get_loss(tb_dict)\r\n\r\n loss = loss_rpn + loss_rcnn\r\n return loss, tb_dict, disp_dict\r\n\r\n @staticmethod\r\n def cal_scores_by_npoints(cls_scores, iou_scores, num_points_in_gt, cls_thresh=10, iou_thresh=100):\r\n \"\"\"\r\n Args:\r\n cls_scores: (N)\r\n iou_scores: (N)\r\n num_points_in_gt: (N, 7+c)\r\n cls_thresh: scalar\r\n iou_thresh: scalar\r\n \"\"\"\r\n assert iou_thresh >= cls_thresh\r\n alpha = torch.zeros(cls_scores.shape, dtype=torch.float32).cuda()\r\n alpha[num_points_in_gt <= cls_thresh] = 0\r\n alpha[num_points_in_gt >= iou_thresh] = 1\r\n \r\n mask = ((num_points_in_gt > cls_thresh) & (num_points_in_gt < iou_thresh))\r\n alpha[mask] = (num_points_in_gt[mask] - 10) / (iou_thresh - cls_thresh)\r\n \r\n scores = (1 - alpha) * cls_scores + alpha * iou_scores\r\n\r\n return scores\r\n\r\n def set_nms_score_by_class(self, iou_preds, cls_preds, label_preds, score_by_class):\r\n n_classes = torch.unique(label_preds).shape[0]\r\n nms_scores = torch.zeros(iou_preds.shape, dtype=torch.float32).cuda()\r\n for i in range(n_classes):\r\n mask = label_preds == (i + 1)\r\n class_name = self.class_names[i]\r\n score_type = score_by_class[class_name]\r\n if score_type == 'iou':\r\n nms_scores[mask] = iou_preds[mask]\r\n elif score_type == 'cls':\r\n nms_scores[mask] = cls_preds[mask]\r\n else:\r\n raise NotImplementedError\r\n\r\n return nms_scores\r\n\r\n def post_processing(self, batch_dict):\r\n \"\"\"\r\n Args:\r\n batch_dict:\r\n batch_size:\r\n batch_cls_preds: (B, num_boxes, num_classes | 1) or (N1+N2+..., num_classes | 1)\r\n batch_box_preds: (B, num_boxes, 7+C) or (N1+N2+..., 7+C)\r\n cls_preds_normalized: indicate whether batch_cls_preds is normalized\r\n batch_index: optional (N1+N2+...)\r\n roi_labels: (B, num_rois) 1 .. num_classes\r\n Returns:\r\n\r\n \"\"\"\r\n post_process_cfg = self.model_cfg.POST_PROCESSING\r\n batch_size = batch_dict['batch_size']\r\n recall_dict = {}\r\n pred_dicts = []\r\n for index in range(batch_size):\r\n if batch_dict.get('batch_index', None) is not None:\r\n assert batch_dict['batch_cls_preds'].shape.__len__() == 2\r\n batch_mask = (batch_dict['batch_index'] == index)\r\n else:\r\n assert batch_dict['batch_cls_preds'].shape.__len__() == 3\r\n batch_mask = index\r\n\r\n box_preds = batch_dict['batch_box_preds'][batch_mask]\r\n iou_preds = batch_dict['batch_cls_preds'][batch_mask]\r\n cls_preds = batch_dict['roi_scores'][batch_mask]\r\n\r\n src_iou_preds = iou_preds\r\n src_box_preds = box_preds\r\n src_cls_preds = cls_preds\r\n assert iou_preds.shape[1] in [1, self.num_class]\r\n\r\n if not batch_dict['cls_preds_normalized']:\r\n iou_preds = torch.sigmoid(iou_preds)\r\n cls_preds = torch.sigmoid(cls_preds)\r\n\r\n if post_process_cfg.NMS_CONFIG.MULTI_CLASSES_NMS:\r\n raise NotImplementedError\r\n else:\r\n iou_preds, label_preds = torch.max(iou_preds, dim=-1)\r\n label_preds = batch_dict['roi_labels'][index] if batch_dict.get('has_class_labels', False) else label_preds + 1\r\n\r\n if post_process_cfg.NMS_CONFIG.get('SCORE_BY_CLASS', None) and \\\r\n post_process_cfg.NMS_CONFIG.SCORE_TYPE == 'score_by_class':\r\n nms_scores = self.set_nms_score_by_class(\r\n iou_preds, cls_preds, label_preds, post_process_cfg.NMS_CONFIG.SCORE_BY_CLASS\r\n )\r\n elif post_process_cfg.NMS_CONFIG.get('SCORE_TYPE', None) == 'iou' or \\\r\n post_process_cfg.NMS_CONFIG.get('SCORE_TYPE', None) is None:\r\n nms_scores = iou_preds\r\n elif post_process_cfg.NMS_CONFIG.SCORE_TYPE == 'cls':\r\n nms_scores = cls_preds\r\n elif post_process_cfg.NMS_CONFIG.SCORE_TYPE == 'weighted_iou_cls':\r\n nms_scores = post_process_cfg.NMS_CONFIG.SCORE_WEIGHTS.iou * iou_preds + \\\r\n post_process_cfg.NMS_CONFIG.SCORE_WEIGHTS.cls * cls_preds\r\n elif post_process_cfg.NMS_CONFIG.SCORE_TYPE == 'num_pts_iou_cls':\r\n point_mask = (batch_dict['points'][:, 0] == batch_mask)\r\n batch_points = batch_dict['points'][point_mask][:, 1:4]\r\n\r\n num_pts_in_gt = roiaware_pool3d_utils.points_in_boxes_cpu(\r\n batch_points.cpu(), box_preds[:, 0:7].cpu()\r\n ).sum(dim=1).float().cuda()\r\n \r\n score_thresh_cfg = post_process_cfg.NMS_CONFIG.SCORE_THRESH\r\n nms_scores = self.cal_scores_by_npoints(\r\n cls_preds, iou_preds, num_pts_in_gt, \r\n score_thresh_cfg.cls, score_thresh_cfg.iou\r\n )\r\n else:\r\n raise NotImplementedError\r\n\r\n selected, selected_scores = class_agnostic_nms(\r\n box_scores=nms_scores, box_preds=box_preds,\r\n nms_config=post_process_cfg.NMS_CONFIG,\r\n score_thresh=post_process_cfg.SCORE_THRESH\r\n )\r\n\r\n if post_process_cfg.OUTPUT_RAW_SCORE:\r\n raise NotImplementedError\r\n\r\n final_scores = selected_scores\r\n final_labels = label_preds[selected]\r\n final_boxes = box_preds[selected]\r\n\r\n recall_dict = self.generate_recall_record(\r\n box_preds=final_boxes if 'rois' not in batch_dict else src_box_preds,\r\n recall_dict=recall_dict, batch_index=index, data_dict=batch_dict,\r\n thresh_list=post_process_cfg.RECALL_THRESH_LIST\r\n )\r\n\r\n record_dict = {\r\n 'pred_boxes': final_boxes,\r\n 'pred_scores': final_scores,\r\n 'pred_labels': final_labels,\r\n 'pred_cls_scores': cls_preds[selected],\r\n 'pred_iou_scores': iou_preds[selected]\r\n }\r\n\r\n pred_dicts.append(record_dict)\r\n\r\n return pred_dicts, recall_dict\r\n", "import io as sysio\r\n\r\nimport numba\r\nimport numpy as np\r\n\r\nfrom .rotate_iou import rotate_iou_gpu_eval\r\n\r\n\r\[email protected]\r\ndef get_thresholds(scores: np.ndarray, num_gt, num_sample_pts=41):\r\n scores.sort()\r\n scores = scores[::-1]\r\n current_recall = 0\r\n thresholds = []\r\n for i, score in enumerate(scores):\r\n l_recall = (i + 1) / num_gt\r\n if i < (len(scores) - 1):\r\n r_recall = (i + 2) / num_gt\r\n else:\r\n r_recall = l_recall\r\n if (((r_recall - current_recall) < (current_recall - l_recall))\r\n and (i < (len(scores) - 1))):\r\n continue\r\n # recall = l_recall\r\n thresholds.append(score)\r\n current_recall += 1 / (num_sample_pts - 1.0)\r\n return thresholds\r\n\r\n\r\ndef clean_data(gt_anno, dt_anno, current_class, difficulty):\r\n CLASS_NAMES = ['car', 'pedestrian', 'cyclist', 'van', 'person_sitting', 'truck']\r\n MIN_HEIGHT = [40, 25, 25]\r\n MAX_OCCLUSION = [0, 1, 2]\r\n MAX_TRUNCATION = [0.15, 0.3, 0.5]\r\n dc_bboxes, ignored_gt, ignored_dt = [], [], []\r\n current_cls_name = CLASS_NAMES[current_class].lower()\r\n num_gt = len(gt_anno[\"name\"])\r\n num_dt = len(dt_anno[\"name\"])\r\n num_valid_gt = 0\r\n for i in range(num_gt):\r\n bbox = gt_anno[\"bbox\"][i]\r\n gt_name = gt_anno[\"name\"][i].lower()\r\n height = bbox[3] - bbox[1]\r\n valid_class = -1\r\n if (gt_name == current_cls_name):\r\n valid_class = 1\r\n elif (current_cls_name == \"Pedestrian\".lower()\r\n and \"Person_sitting\".lower() == gt_name):\r\n valid_class = 0\r\n elif (current_cls_name == \"Car\".lower() and \"Van\".lower() == gt_name):\r\n valid_class = 0\r\n else:\r\n valid_class = -1\r\n ignore = False\r\n if ((gt_anno[\"occluded\"][i] > MAX_OCCLUSION[difficulty])\r\n or (gt_anno[\"truncated\"][i] > MAX_TRUNCATION[difficulty])\r\n or (height <= MIN_HEIGHT[difficulty])):\r\n # if gt_anno[\"difficulty\"][i] > difficulty or gt_anno[\"difficulty\"][i] == -1:\r\n ignore = True\r\n if valid_class == 1 and not ignore:\r\n ignored_gt.append(0)\r\n num_valid_gt += 1\r\n elif (valid_class == 0 or (ignore and (valid_class == 1))):\r\n ignored_gt.append(1)\r\n else:\r\n ignored_gt.append(-1)\r\n # for i in range(num_gt):\r\n if gt_anno[\"name\"][i] == \"DontCare\":\r\n dc_bboxes.append(gt_anno[\"bbox\"][i])\r\n for i in range(num_dt):\r\n if (dt_anno[\"name\"][i].lower() == current_cls_name):\r\n valid_class = 1\r\n else:\r\n valid_class = -1\r\n height = abs(dt_anno[\"bbox\"][i, 3] - dt_anno[\"bbox\"][i, 1])\r\n if height < MIN_HEIGHT[difficulty]:\r\n ignored_dt.append(1)\r\n elif valid_class == 1:\r\n ignored_dt.append(0)\r\n else:\r\n ignored_dt.append(-1)\r\n\r\n return num_valid_gt, ignored_gt, ignored_dt, dc_bboxes\r\n\r\n\r\[email protected](nopython=True)\r\ndef image_box_overlap(boxes, query_boxes, criterion=-1):\r\n N = boxes.shape[0]\r\n K = query_boxes.shape[0]\r\n overlaps = np.zeros((N, K), dtype=boxes.dtype)\r\n for k in range(K):\r\n qbox_area = ((query_boxes[k, 2] - query_boxes[k, 0]) *\r\n (query_boxes[k, 3] - query_boxes[k, 1]))\r\n for n in range(N):\r\n iw = (min(boxes[n, 2], query_boxes[k, 2]) -\r\n max(boxes[n, 0], query_boxes[k, 0]))\r\n if iw > 0:\r\n ih = (min(boxes[n, 3], query_boxes[k, 3]) -\r\n max(boxes[n, 1], query_boxes[k, 1]))\r\n if ih > 0:\r\n if criterion == -1:\r\n ua = (\r\n (boxes[n, 2] - boxes[n, 0]) *\r\n (boxes[n, 3] - boxes[n, 1]) + qbox_area - iw * ih)\r\n elif criterion == 0:\r\n ua = ((boxes[n, 2] - boxes[n, 0]) *\r\n (boxes[n, 3] - boxes[n, 1]))\r\n elif criterion == 1:\r\n ua = qbox_area\r\n else:\r\n ua = 1.0\r\n overlaps[n, k] = iw * ih / ua\r\n return overlaps\r\n\r\n\r\ndef bev_box_overlap(boxes, qboxes, criterion=-1):\r\n riou = rotate_iou_gpu_eval(boxes, qboxes, criterion)\r\n return riou\r\n\r\n\r\[email protected](nopython=True, parallel=True)\r\ndef d3_box_overlap_kernel(boxes, qboxes, rinc, criterion=-1):\r\n # ONLY support overlap in CAMERA, not lider.\r\n N, K = boxes.shape[0], qboxes.shape[0]\r\n for i in range(N):\r\n for j in range(K):\r\n if rinc[i, j] > 0:\r\n # iw = (min(boxes[i, 1] + boxes[i, 4], qboxes[j, 1] +\r\n # qboxes[j, 4]) - max(boxes[i, 1], qboxes[j, 1]))\r\n iw = (min(boxes[i, 1], qboxes[j, 1]) - max(\r\n boxes[i, 1] - boxes[i, 4], qboxes[j, 1] - qboxes[j, 4]))\r\n\r\n if iw > 0:\r\n area1 = boxes[i, 3] * boxes[i, 4] * boxes[i, 5]\r\n area2 = qboxes[j, 3] * qboxes[j, 4] * qboxes[j, 5]\r\n inc = iw * rinc[i, j]\r\n if criterion == -1:\r\n ua = (area1 + area2 - inc)\r\n elif criterion == 0:\r\n ua = area1\r\n elif criterion == 1:\r\n ua = area2\r\n else:\r\n ua = inc\r\n rinc[i, j] = inc / ua\r\n else:\r\n rinc[i, j] = 0.0\r\n\r\n\r\ndef d3_box_overlap(boxes, qboxes, criterion=-1):\r\n rinc = rotate_iou_gpu_eval(boxes[:, [0, 2, 3, 5, 6]],\r\n qboxes[:, [0, 2, 3, 5, 6]], 2)\r\n d3_box_overlap_kernel(boxes, qboxes, rinc, criterion)\r\n return rinc\r\n\r\n\r\[email protected](nopython=True)\r\ndef compute_statistics_jit(overlaps,\r\n gt_datas,\r\n dt_datas,\r\n ignored_gt,\r\n ignored_det,\r\n dc_bboxes,\r\n metric,\r\n min_overlap,\r\n thresh=0,\r\n compute_fp=False,\r\n compute_aos=False):\r\n\r\n det_size = dt_datas.shape[0]\r\n gt_size = gt_datas.shape[0]\r\n dt_scores = dt_datas[:, -1]\r\n dt_alphas = dt_datas[:, 4]\r\n gt_alphas = gt_datas[:, 4]\r\n dt_bboxes = dt_datas[:, :4]\r\n gt_bboxes = gt_datas[:, :4]\r\n\r\n assigned_detection = [False] * det_size\r\n ignored_threshold = [False] * det_size\r\n if compute_fp:\r\n for i in range(det_size):\r\n if (dt_scores[i] < thresh):\r\n ignored_threshold[i] = True\r\n NO_DETECTION = -10000000\r\n tp, fp, fn, similarity = 0, 0, 0, 0\r\n # thresholds = [0.0]\r\n # delta = [0.0]\r\n thresholds = np.zeros((gt_size, ))\r\n thresh_idx = 0\r\n delta = np.zeros((gt_size, ))\r\n delta_idx = 0\r\n for i in range(gt_size):\r\n if ignored_gt[i] == -1:\r\n continue\r\n det_idx = -1\r\n valid_detection = NO_DETECTION\r\n max_overlap = 0\r\n assigned_ignored_det = False\r\n\r\n for j in range(det_size):\r\n if (ignored_det[j] == -1):\r\n continue\r\n if (assigned_detection[j]):\r\n continue\r\n if (ignored_threshold[j]):\r\n continue\r\n overlap = overlaps[j, i]\r\n dt_score = dt_scores[j]\r\n if (not compute_fp and (overlap > min_overlap)\r\n and dt_score > valid_detection):\r\n det_idx = j\r\n valid_detection = dt_score\r\n elif (compute_fp and (overlap > min_overlap)\r\n and (overlap > max_overlap or assigned_ignored_det)\r\n and ignored_det[j] == 0):\r\n max_overlap = overlap\r\n det_idx = j\r\n valid_detection = 1\r\n assigned_ignored_det = False\r\n elif (compute_fp and (overlap > min_overlap)\r\n and (valid_detection == NO_DETECTION)\r\n and ignored_det[j] == 1):\r\n det_idx = j\r\n valid_detection = 1\r\n assigned_ignored_det = True\r\n\r\n if (valid_detection == NO_DETECTION) and ignored_gt[i] == 0:\r\n fn += 1\r\n elif ((valid_detection != NO_DETECTION)\r\n and (ignored_gt[i] == 1 or ignored_det[det_idx] == 1)):\r\n assigned_detection[det_idx] = True\r\n elif valid_detection != NO_DETECTION:\r\n tp += 1\r\n # thresholds.append(dt_scores[det_idx])\r\n thresholds[thresh_idx] = dt_scores[det_idx]\r\n thresh_idx += 1\r\n if compute_aos:\r\n # delta.append(gt_alphas[i] - dt_alphas[det_idx])\r\n delta[delta_idx] = gt_alphas[i] - dt_alphas[det_idx]\r\n delta_idx += 1\r\n\r\n assigned_detection[det_idx] = True\r\n if compute_fp:\r\n for i in range(det_size):\r\n if (not (assigned_detection[i] or ignored_det[i] == -1\r\n or ignored_det[i] == 1 or ignored_threshold[i])):\r\n fp += 1\r\n nstuff = 0\r\n if metric == 0:\r\n overlaps_dt_dc = image_box_overlap(dt_bboxes, dc_bboxes, 0)\r\n for i in range(dc_bboxes.shape[0]):\r\n for j in range(det_size):\r\n if (assigned_detection[j]):\r\n continue\r\n if (ignored_det[j] == -1 or ignored_det[j] == 1):\r\n continue\r\n if (ignored_threshold[j]):\r\n continue\r\n if overlaps_dt_dc[j, i] > min_overlap:\r\n assigned_detection[j] = True\r\n nstuff += 1\r\n fp -= nstuff\r\n if compute_aos:\r\n tmp = np.zeros((fp + delta_idx, ))\r\n # tmp = [0] * fp\r\n for i in range(delta_idx):\r\n tmp[i + fp] = (1.0 + np.cos(delta[i])) / 2.0\r\n # tmp.append((1.0 + np.cos(delta[i])) / 2.0)\r\n # assert len(tmp) == fp + tp\r\n # assert len(delta) == tp\r\n if tp > 0 or fp > 0:\r\n similarity = np.sum(tmp)\r\n else:\r\n similarity = -1\r\n return tp, fp, fn, similarity, thresholds[:thresh_idx]\r\n\r\n\r\ndef get_split_parts(num, num_part):\r\n same_part = num // num_part\r\n remain_num = num % num_part\r\n if same_part == 0:\r\n return [num]\r\n\r\n if remain_num == 0:\r\n return [same_part] * num_part\r\n else:\r\n return [same_part] * num_part + [remain_num]\r\n\r\n\r\[email protected](nopython=True)\r\ndef fused_compute_statistics(overlaps,\r\n pr,\r\n gt_nums,\r\n dt_nums,\r\n dc_nums,\r\n gt_datas,\r\n dt_datas,\r\n dontcares,\r\n ignored_gts,\r\n ignored_dets,\r\n metric,\r\n min_overlap,\r\n thresholds,\r\n compute_aos=False):\r\n gt_num = 0\r\n dt_num = 0\r\n dc_num = 0\r\n for i in range(gt_nums.shape[0]):\r\n for t, thresh in enumerate(thresholds):\r\n overlap = overlaps[dt_num:dt_num + dt_nums[i], gt_num:\r\n gt_num + gt_nums[i]]\r\n\r\n gt_data = gt_datas[gt_num:gt_num + gt_nums[i]]\r\n dt_data = dt_datas[dt_num:dt_num + dt_nums[i]]\r\n ignored_gt = ignored_gts[gt_num:gt_num + gt_nums[i]]\r\n ignored_det = ignored_dets[dt_num:dt_num + dt_nums[i]]\r\n dontcare = dontcares[dc_num:dc_num + dc_nums[i]]\r\n tp, fp, fn, similarity, _ = compute_statistics_jit(\r\n overlap,\r\n gt_data,\r\n dt_data,\r\n ignored_gt,\r\n ignored_det,\r\n dontcare,\r\n metric,\r\n min_overlap=min_overlap,\r\n thresh=thresh,\r\n compute_fp=True,\r\n compute_aos=compute_aos)\r\n pr[t, 0] += tp\r\n pr[t, 1] += fp\r\n pr[t, 2] += fn\r\n if similarity != -1:\r\n pr[t, 3] += similarity\r\n gt_num += gt_nums[i]\r\n dt_num += dt_nums[i]\r\n dc_num += dc_nums[i]\r\n\r\n\r\ndef calculate_iou_partly(gt_annos, dt_annos, metric, num_parts=50):\r\n \"\"\"fast iou algorithm. this function can be used independently to\r\n do result analysis. Must be used in CAMERA coordinate system.\r\n Args:\r\n gt_annos: dict, must from get_label_annos() in kitti_common.py\r\n dt_annos: dict, must from get_label_annos() in kitti_common.py\r\n metric: eval type. 0: bbox, 1: bev, 2: 3d\r\n num_parts: int. a parameter for fast calculate algorithm\r\n \"\"\"\r\n assert len(gt_annos) == len(dt_annos)\r\n total_dt_num = np.stack([len(a[\"name\"]) for a in dt_annos], 0)\r\n total_gt_num = np.stack([len(a[\"name\"]) for a in gt_annos], 0)\r\n num_examples = len(gt_annos)\r\n split_parts = get_split_parts(num_examples, num_parts)\r\n parted_overlaps = []\r\n example_idx = 0\r\n\r\n for num_part in split_parts:\r\n gt_annos_part = gt_annos[example_idx:example_idx + num_part]\r\n dt_annos_part = dt_annos[example_idx:example_idx + num_part]\r\n if metric == 0:\r\n gt_boxes = np.concatenate([a[\"bbox\"] for a in gt_annos_part], 0)\r\n dt_boxes = np.concatenate([a[\"bbox\"] for a in dt_annos_part], 0)\r\n overlap_part = image_box_overlap(gt_boxes, dt_boxes)\r\n elif metric == 1:\r\n loc = np.concatenate(\r\n [a[\"location\"][:, [0, 2]] for a in gt_annos_part], 0)\r\n dims = np.concatenate(\r\n [a[\"dimensions\"][:, [0, 2]] for a in gt_annos_part], 0)\r\n rots = np.concatenate([a[\"rotation_y\"] for a in gt_annos_part], 0)\r\n gt_boxes = np.concatenate(\r\n [loc, dims, rots[..., np.newaxis]], axis=1)\r\n loc = np.concatenate(\r\n [a[\"location\"][:, [0, 2]] for a in dt_annos_part], 0)\r\n dims = np.concatenate(\r\n [a[\"dimensions\"][:, [0, 2]] for a in dt_annos_part], 0)\r\n rots = np.concatenate([a[\"rotation_y\"] for a in dt_annos_part], 0)\r\n dt_boxes = np.concatenate(\r\n [loc, dims, rots[..., np.newaxis]], axis=1)\r\n overlap_part = bev_box_overlap(gt_boxes, dt_boxes).astype(\r\n np.float64)\r\n elif metric == 2:\r\n loc = np.concatenate([a[\"location\"] for a in gt_annos_part], 0)\r\n dims = np.concatenate([a[\"dimensions\"] for a in gt_annos_part], 0)\r\n rots = np.concatenate([a[\"rotation_y\"] for a in gt_annos_part], 0)\r\n gt_boxes = np.concatenate(\r\n [loc, dims, rots[..., np.newaxis]], axis=1)\r\n loc = np.concatenate([a[\"location\"] for a in dt_annos_part], 0)\r\n dims = np.concatenate([a[\"dimensions\"] for a in dt_annos_part], 0)\r\n rots = np.concatenate([a[\"rotation_y\"] for a in dt_annos_part], 0)\r\n dt_boxes = np.concatenate(\r\n [loc, dims, rots[..., np.newaxis]], axis=1)\r\n overlap_part = d3_box_overlap(gt_boxes, dt_boxes).astype(\r\n np.float64)\r\n else:\r\n raise ValueError(\"unknown metric\")\r\n parted_overlaps.append(overlap_part)\r\n example_idx += num_part\r\n overlaps = []\r\n example_idx = 0\r\n for j, num_part in enumerate(split_parts):\r\n gt_annos_part = gt_annos[example_idx:example_idx + num_part]\r\n dt_annos_part = dt_annos[example_idx:example_idx + num_part]\r\n gt_num_idx, dt_num_idx = 0, 0\r\n for i in range(num_part):\r\n gt_box_num = total_gt_num[example_idx + i]\r\n dt_box_num = total_dt_num[example_idx + i]\r\n overlaps.append(\r\n parted_overlaps[j][gt_num_idx:gt_num_idx + gt_box_num,\r\n dt_num_idx:dt_num_idx + dt_box_num])\r\n gt_num_idx += gt_box_num\r\n dt_num_idx += dt_box_num\r\n example_idx += num_part\r\n\r\n return overlaps, parted_overlaps, total_gt_num, total_dt_num\r\n\r\n\r\ndef _prepare_data(gt_annos, dt_annos, current_class, difficulty):\r\n gt_datas_list = []\r\n dt_datas_list = []\r\n total_dc_num = []\r\n ignored_gts, ignored_dets, dontcares = [], [], []\r\n total_num_valid_gt = 0\r\n for i in range(len(gt_annos)):\r\n rets = clean_data(gt_annos[i], dt_annos[i], current_class, difficulty)\r\n num_valid_gt, ignored_gt, ignored_det, dc_bboxes = rets\r\n ignored_gts.append(np.array(ignored_gt, dtype=np.int64))\r\n ignored_dets.append(np.array(ignored_det, dtype=np.int64))\r\n if len(dc_bboxes) == 0:\r\n dc_bboxes = np.zeros((0, 4)).astype(np.float64)\r\n else:\r\n dc_bboxes = np.stack(dc_bboxes, 0).astype(np.float64)\r\n total_dc_num.append(dc_bboxes.shape[0])\r\n dontcares.append(dc_bboxes)\r\n total_num_valid_gt += num_valid_gt\r\n gt_datas = np.concatenate(\r\n [gt_annos[i][\"bbox\"], gt_annos[i][\"alpha\"][..., np.newaxis]], 1)\r\n dt_datas = np.concatenate([\r\n dt_annos[i][\"bbox\"], dt_annos[i][\"alpha\"][..., np.newaxis],\r\n dt_annos[i][\"score\"][..., np.newaxis]\r\n ], 1)\r\n gt_datas_list.append(gt_datas)\r\n dt_datas_list.append(dt_datas)\r\n total_dc_num = np.stack(total_dc_num, axis=0)\r\n return (gt_datas_list, dt_datas_list, ignored_gts, ignored_dets, dontcares,\r\n total_dc_num, total_num_valid_gt)\r\n\r\n\r\ndef eval_class(gt_annos,\r\n dt_annos,\r\n current_classes,\r\n difficultys,\r\n metric,\r\n min_overlaps,\r\n compute_aos=False,\r\n num_parts=100):\r\n \"\"\"Kitti eval. support 2d/bev/3d/aos eval. support 0.5:0.05:0.95 coco AP.\r\n Args:\r\n gt_annos: dict, must from get_label_annos() in kitti_common.py\r\n dt_annos: dict, must from get_label_annos() in kitti_common.py\r\n current_classes: list of int, 0: car, 1: pedestrian, 2: cyclist\r\n difficultys: list of int. eval difficulty, 0: easy, 1: normal, 2: hard\r\n metric: eval type. 0: bbox, 1: bev, 2: 3d\r\n min_overlaps: float, min overlap. format: [num_overlap, metric, class].\r\n num_parts: int. a parameter for fast calculate algorithm\r\n\r\n Returns:\r\n dict of recall, precision and aos\r\n \"\"\"\r\n assert len(gt_annos) == len(dt_annos)\r\n num_examples = len(gt_annos)\r\n split_parts = get_split_parts(num_examples, num_parts)\r\n\r\n rets = calculate_iou_partly(dt_annos, gt_annos, metric, num_parts)\r\n overlaps, parted_overlaps, total_dt_num, total_gt_num = rets\r\n N_SAMPLE_PTS = 41\r\n num_minoverlap = len(min_overlaps)\r\n num_class = len(current_classes)\r\n num_difficulty = len(difficultys)\r\n precision = np.zeros(\r\n [num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS])\r\n recall = np.zeros(\r\n [num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS])\r\n aos = np.zeros([num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS])\r\n for m, current_class in enumerate(current_classes):\r\n for l, difficulty in enumerate(difficultys):\r\n rets = _prepare_data(gt_annos, dt_annos, current_class, difficulty)\r\n (gt_datas_list, dt_datas_list, ignored_gts, ignored_dets,\r\n dontcares, total_dc_num, total_num_valid_gt) = rets\r\n for k, min_overlap in enumerate(min_overlaps[:, metric, m]):\r\n thresholdss = []\r\n for i in range(len(gt_annos)):\r\n rets = compute_statistics_jit(\r\n overlaps[i],\r\n gt_datas_list[i],\r\n dt_datas_list[i],\r\n ignored_gts[i],\r\n ignored_dets[i],\r\n dontcares[i],\r\n metric,\r\n min_overlap=min_overlap,\r\n thresh=0.0,\r\n compute_fp=False)\r\n tp, fp, fn, similarity, thresholds = rets\r\n thresholdss += thresholds.tolist()\r\n thresholdss = np.array(thresholdss)\r\n thresholds = get_thresholds(thresholdss, total_num_valid_gt)\r\n thresholds = np.array(thresholds)\r\n pr = np.zeros([len(thresholds), 4])\r\n idx = 0\r\n for j, num_part in enumerate(split_parts):\r\n gt_datas_part = np.concatenate(\r\n gt_datas_list[idx:idx + num_part], 0)\r\n dt_datas_part = np.concatenate(\r\n dt_datas_list[idx:idx + num_part], 0)\r\n dc_datas_part = np.concatenate(\r\n dontcares[idx:idx + num_part], 0)\r\n ignored_dets_part = np.concatenate(\r\n ignored_dets[idx:idx + num_part], 0)\r\n ignored_gts_part = np.concatenate(\r\n ignored_gts[idx:idx + num_part], 0)\r\n fused_compute_statistics(\r\n parted_overlaps[j],\r\n pr,\r\n total_gt_num[idx:idx + num_part],\r\n total_dt_num[idx:idx + num_part],\r\n total_dc_num[idx:idx + num_part],\r\n gt_datas_part,\r\n dt_datas_part,\r\n dc_datas_part,\r\n ignored_gts_part,\r\n ignored_dets_part,\r\n metric,\r\n min_overlap=min_overlap,\r\n thresholds=thresholds,\r\n compute_aos=compute_aos)\r\n idx += num_part\r\n for i in range(len(thresholds)):\r\n recall[m, l, k, i] = pr[i, 0] / (pr[i, 0] + pr[i, 2])\r\n precision[m, l, k, i] = pr[i, 0] / (pr[i, 0] + pr[i, 1])\r\n if compute_aos:\r\n aos[m, l, k, i] = pr[i, 3] / (pr[i, 0] + pr[i, 1])\r\n for i in range(len(thresholds)):\r\n precision[m, l, k, i] = np.max(\r\n precision[m, l, k, i:], axis=-1)\r\n recall[m, l, k, i] = np.max(recall[m, l, k, i:], axis=-1)\r\n if compute_aos:\r\n aos[m, l, k, i] = np.max(aos[m, l, k, i:], axis=-1)\r\n ret_dict = {\r\n \"recall\": recall,\r\n \"precision\": precision,\r\n \"orientation\": aos,\r\n }\r\n return ret_dict\r\n\r\n\r\ndef get_mAP(prec):\r\n sums = 0\r\n for i in range(0, prec.shape[-1], 4):\r\n sums = sums + prec[..., i]\r\n return sums / 11 * 100\r\n\r\n\r\ndef get_mAP_R40(prec):\r\n sums = 0\r\n for i in range(1, prec.shape[-1]):\r\n sums = sums + prec[..., i]\r\n return sums / 40 * 100\r\n\r\n\r\ndef print_str(value, *arg, sstream=None):\r\n if sstream is None:\r\n sstream = sysio.StringIO()\r\n sstream.truncate(0)\r\n sstream.seek(0)\r\n print(value, *arg, file=sstream)\r\n return sstream.getvalue()\r\n\r\n\r\ndef do_eval(gt_annos,\r\n dt_annos,\r\n current_classes,\r\n min_overlaps,\r\n compute_aos=False,\r\n PR_detail_dict=None):\r\n # min_overlaps: [num_minoverlap, metric, num_class]\r\n difficultys = [0, 1, 2]\r\n ret = eval_class(gt_annos, dt_annos, current_classes, difficultys, 0,\r\n min_overlaps, compute_aos)\r\n # ret: [num_class, num_diff, num_minoverlap, num_sample_points]\r\n mAP_bbox = get_mAP(ret[\"precision\"])\r\n mAP_bbox_R40 = get_mAP_R40(ret[\"precision\"])\r\n\r\n if PR_detail_dict is not None:\r\n PR_detail_dict['bbox'] = ret['precision']\r\n\r\n mAP_aos = mAP_aos_R40 = None\r\n if compute_aos:\r\n mAP_aos = get_mAP(ret[\"orientation\"])\r\n mAP_aos_R40 = get_mAP_R40(ret[\"orientation\"])\r\n\r\n if PR_detail_dict is not None:\r\n PR_detail_dict['aos'] = ret['orientation']\r\n\r\n ret = eval_class(gt_annos, dt_annos, current_classes, difficultys, 1,\r\n min_overlaps)\r\n mAP_bev = get_mAP(ret[\"precision\"])\r\n mAP_bev_R40 = get_mAP_R40(ret[\"precision\"])\r\n\r\n if PR_detail_dict is not None:\r\n PR_detail_dict['bev'] = ret['precision']\r\n\r\n ret = eval_class(gt_annos, dt_annos, current_classes, difficultys, 2,\r\n min_overlaps)\r\n mAP_3d = get_mAP(ret[\"precision\"])\r\n mAP_3d_R40 = get_mAP_R40(ret[\"precision\"])\r\n if PR_detail_dict is not None:\r\n PR_detail_dict['3d'] = ret['precision']\r\n return mAP_bbox, mAP_bev, mAP_3d, mAP_aos, mAP_bbox_R40, mAP_bev_R40, mAP_3d_R40, mAP_aos_R40\r\n\r\n\r\ndef do_coco_style_eval(gt_annos, dt_annos, current_classes, overlap_ranges,\r\n compute_aos):\r\n # overlap_ranges: [range, metric, num_class]\r\n min_overlaps = np.zeros([10, *overlap_ranges.shape[1:]])\r\n for i in range(overlap_ranges.shape[1]):\r\n for j in range(overlap_ranges.shape[2]):\r\n min_overlaps[:, i, j] = np.linspace(*overlap_ranges[:, i, j])\r\n mAP_bbox, mAP_bev, mAP_3d, mAP_aos = do_eval(\r\n gt_annos, dt_annos, current_classes, min_overlaps, compute_aos)\r\n # ret: [num_class, num_diff, num_minoverlap]\r\n mAP_bbox = mAP_bbox.mean(-1)\r\n mAP_bev = mAP_bev.mean(-1)\r\n mAP_3d = mAP_3d.mean(-1)\r\n if mAP_aos is not None:\r\n mAP_aos = mAP_aos.mean(-1)\r\n return mAP_bbox, mAP_bev, mAP_3d, mAP_aos\r\n\r\n\r\ndef get_official_eval_result(gt_annos, dt_annos, current_classes, PR_detail_dict=None):\r\n overlap_0_7 = np.array([[0.7, 0.5, 0.5, 0.7,\r\n 0.5, 0.7], [0.7, 0.5, 0.5, 0.7, 0.5, 0.7],\r\n [0.7, 0.5, 0.5, 0.7, 0.5, 0.7]])\r\n overlap_0_5 = np.array([[0.7, 0.5, 0.5, 0.7,\r\n 0.5, 0.5], [0.5, 0.25, 0.25, 0.5, 0.25, 0.5],\r\n [0.5, 0.25, 0.25, 0.5, 0.25, 0.5]])\r\n min_overlaps = np.stack([overlap_0_7, overlap_0_5], axis=0) # [2, 3, 5]\r\n class_to_name = {\r\n 0: 'Car',\r\n 1: 'Pedestrian',\r\n 2: 'Cyclist',\r\n 3: 'Van',\r\n 4: 'Person_sitting',\r\n 5: 'Truck'\r\n }\r\n name_to_class = {v: n for n, v in class_to_name.items()}\r\n if not isinstance(current_classes, (list, tuple)):\r\n current_classes = [current_classes]\r\n current_classes_int = []\r\n for curcls in current_classes:\r\n if isinstance(curcls, str):\r\n current_classes_int.append(name_to_class[curcls])\r\n else:\r\n current_classes_int.append(curcls)\r\n current_classes = current_classes_int\r\n min_overlaps = min_overlaps[:, :, current_classes]\r\n result = ''\r\n # check whether alpha is valid\r\n compute_aos = False\r\n for anno in dt_annos:\r\n if anno['alpha'].shape[0] != 0:\r\n if anno['alpha'][0] != -10:\r\n compute_aos = True\r\n break\r\n mAPbbox, mAPbev, mAP3d, mAPaos, mAPbbox_R40, mAPbev_R40, mAP3d_R40, mAPaos_R40 = do_eval(\r\n gt_annos, dt_annos, current_classes, min_overlaps, compute_aos, PR_detail_dict=PR_detail_dict)\r\n\r\n ret_dict = {}\r\n for j, curcls in enumerate(current_classes):\r\n # mAP threshold array: [num_minoverlap, metric, class]\r\n # mAP result: [num_class, num_diff, num_minoverlap]\r\n for i in range(min_overlaps.shape[0]):\r\n result += print_str(\r\n (f\"{class_to_name[curcls]} \"\r\n \"AP@{:.2f}, {:.2f}, {:.2f}:\".format(*min_overlaps[i, :, j])))\r\n result += print_str((f\"bbox AP:{mAPbbox[j, 0, i]:.4f}, \"\r\n f\"{mAPbbox[j, 1, i]:.4f}, \"\r\n f\"{mAPbbox[j, 2, i]:.4f}\"))\r\n result += print_str((f\"bev AP:{mAPbev[j, 0, i]:.4f}, \"\r\n f\"{mAPbev[j, 1, i]:.4f}, \"\r\n f\"{mAPbev[j, 2, i]:.4f}\"))\r\n result += print_str((f\"3d AP:{mAP3d[j, 0, i]:.4f}, \"\r\n f\"{mAP3d[j, 1, i]:.4f}, \"\r\n f\"{mAP3d[j, 2, i]:.4f}\"))\r\n\r\n if compute_aos:\r\n result += print_str((f\"aos AP:{mAPaos[j, 0, i]:.2f}, \"\r\n f\"{mAPaos[j, 1, i]:.2f}, \"\r\n f\"{mAPaos[j, 2, i]:.2f}\"))\r\n # if i == 0:\r\n # ret_dict['%s_aos/easy' % class_to_name[curcls]] = mAPaos[j, 0, 0]\r\n # ret_dict['%s_aos/moderate' % class_to_name[curcls]] = mAPaos[j, 1, 0]\r\n # ret_dict['%s_aos/hard' % class_to_name[curcls]] = mAPaos[j, 2, 0]\r\n\r\n result += print_str(\r\n (f\"{class_to_name[curcls]} \"\r\n \"AP_R40@{:.2f}, {:.2f}, {:.2f}:\".format(*min_overlaps[i, :, j])))\r\n result += print_str((f\"bbox AP:{mAPbbox_R40[j, 0, i]:.4f}, \"\r\n f\"{mAPbbox_R40[j, 1, i]:.4f}, \"\r\n f\"{mAPbbox_R40[j, 2, i]:.4f}\"))\r\n result += print_str((f\"bev AP:{mAPbev_R40[j, 0, i]:.4f}, \"\r\n f\"{mAPbev_R40[j, 1, i]:.4f}, \"\r\n f\"{mAPbev_R40[j, 2, i]:.4f}\"))\r\n result += print_str((f\"3d AP:{mAP3d_R40[j, 0, i]:.4f}, \"\r\n f\"{mAP3d_R40[j, 1, i]:.4f}, \"\r\n f\"{mAP3d_R40[j, 2, i]:.4f}\"))\r\n if compute_aos:\r\n result += print_str((f\"aos AP:{mAPaos_R40[j, 0, i]:.2f}, \"\r\n f\"{mAPaos_R40[j, 1, i]:.2f}, \"\r\n f\"{mAPaos_R40[j, 2, i]:.2f}\"))\r\n if i == 0:\r\n ret_dict['%s_aos/easy_R40' % class_to_name[curcls]] = mAPaos_R40[j, 0, 0]\r\n ret_dict['%s_aos/moderate_R40' % class_to_name[curcls]] = mAPaos_R40[j, 1, 0]\r\n ret_dict['%s_aos/hard_R40' % class_to_name[curcls]] = mAPaos_R40[j, 2, 0]\r\n\r\n if i == 0:\r\n # ret_dict['%s_3d/easy' % class_to_name[curcls]] = mAP3d[j, 0, 0]\r\n # ret_dict['%s_3d/moderate' % class_to_name[curcls]] = mAP3d[j, 1, 0]\r\n # ret_dict['%s_3d/hard' % class_to_name[curcls]] = mAP3d[j, 2, 0]\r\n # ret_dict['%s_bev/easy' % class_to_name[curcls]] = mAPbev[j, 0, 0]\r\n # ret_dict['%s_bev/moderate' % class_to_name[curcls]] = mAPbev[j, 1, 0]\r\n # ret_dict['%s_bev/hard' % class_to_name[curcls]] = mAPbev[j, 2, 0]\r\n # ret_dict['%s_image/easy' % class_to_name[curcls]] = mAPbbox[j, 0, 0]\r\n # ret_dict['%s_image/moderate' % class_to_name[curcls]] = mAPbbox[j, 1, 0]\r\n # ret_dict['%s_image/hard' % class_to_name[curcls]] = mAPbbox[j, 2, 0]\r\n\r\n ret_dict['%s_3d/easy_R40' % class_to_name[curcls]] = mAP3d_R40[j, 0, 0]\r\n ret_dict['%s_3d/moderate_R40' % class_to_name[curcls]] = mAP3d_R40[j, 1, 0]\r\n ret_dict['%s_3d/hard_R40' % class_to_name[curcls]] = mAP3d_R40[j, 2, 0]\r\n ret_dict['%s_bev/easy_R40' % class_to_name[curcls]] = mAPbev_R40[j, 0, 0]\r\n ret_dict['%s_bev/moderate_R40' % class_to_name[curcls]] = mAPbev_R40[j, 1, 0]\r\n ret_dict['%s_bev/hard_R40' % class_to_name[curcls]] = mAPbev_R40[j, 2, 0]\r\n ret_dict['%s_image/easy_R40' % class_to_name[curcls]] = mAPbbox_R40[j, 0, 0]\r\n ret_dict['%s_image/moderate_R40' % class_to_name[curcls]] = mAPbbox_R40[j, 1, 0]\r\n ret_dict['%s_image/hard_R40' % class_to_name[curcls]] = mAPbbox_R40[j, 2, 0]\r\n\r\n return result, ret_dict\r\n\r\n\r\ndef get_coco_eval_result(gt_annos, dt_annos, current_classes):\r\n class_to_name = {\r\n 0: 'Car',\r\n 1: 'Pedestrian',\r\n 2: 'Cyclist',\r\n 3: 'Van',\r\n 4: 'Person_sitting',\r\n }\r\n class_to_range = {\r\n 0: [0.5, 0.95, 10],\r\n 1: [0.25, 0.7, 10],\r\n 2: [0.25, 0.7, 10],\r\n 3: [0.5, 0.95, 10],\r\n 4: [0.25, 0.7, 10],\r\n }\r\n name_to_class = {v: n for n, v in class_to_name.items()}\r\n if not isinstance(current_classes, (list, tuple)):\r\n current_classes = [current_classes]\r\n current_classes_int = []\r\n for curcls in current_classes:\r\n if isinstance(curcls, str):\r\n current_classes_int.append(name_to_class[curcls])\r\n else:\r\n current_classes_int.append(curcls)\r\n current_classes = current_classes_int\r\n overlap_ranges = np.zeros([3, 3, len(current_classes)])\r\n for i, curcls in enumerate(current_classes):\r\n overlap_ranges[:, :, i] = np.array(\r\n class_to_range[curcls])[:, np.newaxis]\r\n result = ''\r\n # check whether alpha is valid\r\n compute_aos = False\r\n for anno in dt_annos:\r\n if anno['alpha'].shape[0] != 0:\r\n if anno['alpha'][0] != -10:\r\n compute_aos = True\r\n break\r\n mAPbbox, mAPbev, mAP3d, mAPaos = do_coco_style_eval(\r\n gt_annos, dt_annos, current_classes, overlap_ranges, compute_aos)\r\n for j, curcls in enumerate(current_classes):\r\n # mAP threshold array: [num_minoverlap, metric, class]\r\n # mAP result: [num_class, num_diff, num_minoverlap]\r\n o_range = np.array(class_to_range[curcls])[[0, 2, 1]]\r\n o_range[1] = (o_range[2] - o_range[0]) / (o_range[1] - 1)\r\n result += print_str((f\"{class_to_name[curcls]} \"\r\n \"coco AP@{:.2f}:{:.2f}:{:.2f}:\".format(*o_range)))\r\n result += print_str((f\"bbox AP:{mAPbbox[j, 0]:.2f}, \"\r\n f\"{mAPbbox[j, 1]:.2f}, \"\r\n f\"{mAPbbox[j, 2]:.2f}\"))\r\n result += print_str((f\"bev AP:{mAPbev[j, 0]:.2f}, \"\r\n f\"{mAPbev[j, 1]:.2f}, \"\r\n f\"{mAPbev[j, 2]:.2f}\"))\r\n result += print_str((f\"3d AP:{mAP3d[j, 0]:.2f}, \"\r\n f\"{mAP3d[j, 1]:.2f}, \"\r\n f\"{mAP3d[j, 2]:.2f}\"))\r\n if compute_aos:\r\n result += print_str((f\"aos AP:{mAPaos[j, 0]:.2f}, \"\r\n f\"{mAPaos[j, 1]:.2f}, \"\r\n f\"{mAPaos[j, 2]:.2f}\"))\r\n return result\r\n" ]
[ [ "numpy.logical_not", "numpy.logical_xor", "numpy.split", "numpy.expand_dims", "numpy.amax", "numpy.transpose", "numpy.array", "numpy.zeros" ], [ "torch.sigmoid", "torch.unique", "torch.max", "torch.zeros" ], [ "numpy.linspace", "numpy.cos", "numpy.stack", "numpy.concatenate", "numpy.max", "numpy.array", "numpy.zeros", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
attawesome/Computer-Vision-with-OpenCV
[ "1198bfee8683f6b5c415e07e085fa528e00ecd0a" ]
[ "solutions/canny_license_plate.py" ]
[ "import cv2\nimport matplotlib.pyplot as plt\n\nimage = cv2.imread('../img/license_plate.png')\n\ngray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\nblurred = cv2.GaussianBlur(gray, (3, 3), 0)\n\n# compute a \"wide\", \"mid-range\", and \"tight\" threshold for the edges\nwide = cv2.Canny(blurred, 10, 200)\nmid = cv2.Canny(blurred, 30, 150)\ntight = cv2.Canny(blurred, 240, 250)\n\n# show the edge maps\nplt.figure(figsize=(20, 10))\nplt.subplot(221), plt.imshow(blurred, cmap='gray')\nplt.title('Blurred Image'), plt.xticks([]), plt.yticks([])\nplt.subplot(222), plt.imshow(wide, cmap='gray')\nplt.title('Wide Edge Map'), plt.xticks([]), plt.yticks([])\nplt.subplot(223), plt.imshow(mid, cmap='gray')\nplt.title('Mid Edge Map'), plt.xticks([]), plt.yticks([])\nplt.subplot(224), plt.imshow(tight, cmap='gray')\nplt.title('Tight Edge Map'), plt.xticks([]), plt.yticks([])\nplt.show()\n" ]
[ [ "matplotlib.pyplot.imshow", "matplotlib.pyplot.yticks", "matplotlib.pyplot.title", "matplotlib.pyplot.subplot", "matplotlib.pyplot.xticks", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
caniko/tridesclous
[ "9f412a42697561e3c7d8e3a35249cd13240239a0" ]
[ "tridesclous/peeler_engine_geometry.py" ]
[ "\"\"\"\nHere implementation that tale in account the geometry\nof the probe to speed up template matching.\n\n\"\"\"\n\nimport time\nimport numpy as np\nimport joblib\nfrom concurrent.futures import ThreadPoolExecutor\nimport itertools\n\n\n\nfrom .peeler_engine_base import PeelerEngineGeneric\n\nfrom .peeler_tools import *\nfrom .peeler_tools import _dtype_spike\n\nimport sklearn.metrics.pairwise\n\nfrom .cltools import HAVE_PYOPENCL, OpenCL_Helper\nif HAVE_PYOPENCL:\n import pyopencl\n mf = pyopencl.mem_flags\n\nfrom .peakdetector import get_peak_detector_class\n\ntry:\n import numba\n HAVE_NUMBA = True\n from .numba_tools import numba_explore_best_shift, numba_sparse_scalar_product\nexcept ImportError:\n HAVE_NUMBA = False\n\n\n\n\n\n\n\nclass PeelerEngineGeometrical(PeelerEngineGeneric):\n def change_params(self, **kargs):\n PeelerEngineGeneric.change_params(self, **kargs)\n\n def initialize(self, **kargs):\n PeelerEngineGeneric.initialize(self, **kargs)\n \n # create peak detector\n p = dict(self.catalogue['peak_detector_params'])\n\n self.peakdetector_engine = p.pop('engine')\n self.peakdetector_method = p.pop('method')\n \n PeakDetector_class = get_peak_detector_class(self.peakdetector_method, self.peakdetector_engine)\n \n chunksize = self.fifo_size-2*self.n_span # not the real chunksize here\n self.peakdetector = PeakDetector_class(self.sample_rate, self.nb_channel,\n chunksize, self.internal_dtype, self.geometry)\n \n self.peakdetector.change_params(**p)\n \n # some attrs\n self.shifts = np.arange(-self.maximum_jitter_shift, self.maximum_jitter_shift+1)\n self.nb_shift = self.shifts.size\n \n #~ self.channel_distances = sklearn.metrics.pairwise.euclidean_distances(self.geometry).astype('float32')\n #~ self.channels_adjacency = {}\n #~ for c in range(self.nb_channel):\n #~ if self.use_sparse_template:\n #~ nearest, = np.nonzero(self.channel_distances[c, :]<self.adjacency_radius_um)\n #~ self.channels_adjacency[c] = nearest\n #~ else:\n #~ self.channels_adjacency[c] = np.arange(self.nb_channel, dtype='int64')\n \n self.mask_already_tested = np.zeros((self.fifo_size, self.nb_channel), dtype='bool')\n\n\n def initialize_before_each_segment(self, **kargs):\n PeelerEngineGeneric.initialize_before_each_segment(self, **kargs)\n self.peakdetector.initialize_stream()\n\n\n def detect_local_peaks_before_peeling_loop(self):\n # reset tested mask\n self.mask_already_tested[:] = False\n # and detect peak\n self.re_detect_local_peak()\n \n #~ print('detect_local_peaks_before_peeling_loop', self.pending_peaks.size)\n\n def re_detect_local_peak(self):\n mask = self.peakdetector.get_mask_peaks_in_chunk(self.fifo_residuals)\n if mask.ndim ==1:\n #~ mask &= ~self.mask_already_tested[self.n_span:-self.n_span, 0]\n sample_indexes, = np.nonzero(mask)\n sample_indexes += self.n_span\n tested = self.mask_already_tested[sample_indexes, 0]\n sample_indexes = sample_indexes[~tested]\n chan_indexes = np.zeros(sample_indexes.size, dtype='int64')\n else:\n #~ mask &= ~self.mask_already_tested[self.n_span:-self.n_span, :]\n sample_indexes, chan_indexes = np.nonzero(mask)\n sample_indexes += self.n_span\n tested = self.mask_already_tested[sample_indexes, chan_indexes]\n sample_indexes = sample_indexes[~tested]\n chan_indexes = chan_indexes[~tested]\n \n \n amplitudes = np.abs(self.fifo_residuals[sample_indexes, chan_indexes])\n order = np.argsort(amplitudes)[::-1]\n \n dtype_peak = [('sample_index', 'int32'), ('chan_index', 'int32'), ('peak_value', 'float32')]\n self.pending_peaks = np.zeros(sample_indexes.size, dtype=dtype_peak)\n self.pending_peaks['sample_index'] = sample_indexes\n self.pending_peaks['chan_index'] = chan_indexes\n self.pending_peaks['peak_value'] = amplitudes\n self.pending_peaks = self.pending_peaks[order]\n #~ print('re_detect_local_peak', self.pending_peaks.size)\n\n def select_next_peak(self):\n #~ print(len(self.pending_peaks))\n if len(self.pending_peaks)>0:\n sample_ind, chan_ind, ampl = self.pending_peaks[0]\n self.pending_peaks = self.pending_peaks[1:]\n return sample_ind, chan_ind\n else:\n return LABEL_NO_MORE_PEAK, None\n\n def on_accepted_spike(self, sample_ind, cluster_idx, jitter):\n # remove spike prediction from fifo residuals\n #~ t1 = time.perf_counter()\n pos, pred = make_prediction_one_spike(sample_ind, cluster_idx, jitter, self.fifo_residuals.dtype, self.catalogue)\n #~ t2 = time.perf_counter()\n #~ print(' make_prediction_one_spike', (t2-t1)*1000)\n \n #~ t1 = time.perf_counter()\n self.fifo_residuals[pos:pos+self.peak_width_long, :] -= pred\n #~ t2 = time.perf_counter()\n #~ print(' self.fifo_residuals -', (t2-t1)*1000)\n\n # this prevent search peaks in the zone until next \"reset_to_not_tested\"\n #~ t1 = time.perf_counter()\n self.clean_pending_peaks_zone(sample_ind, cluster_idx)\n #~ t2 = time.perf_counter()\n #~ print(' self.clean_pending_peaks_zone -', (t2-t1)*1000)\n\n\n def clean_pending_peaks_zone(self, sample_ind, cluster_idx):\n # TODO test with sparse_mask_level3s!!!!!\n mask = self.sparse_mask_level1[cluster_idx, :]\n\n \n #~ t1 = time.perf_counter()\n #~ keep = np.zeros(self.pending_peaks.size, dtype='bool')\n #~ for i, peak in enumerate(self.pending_peaks):\n #~ in_zone = mask[peak['chan_index']] and \\\n #~ (peak['sample_index']+self.n_left)<sample_ind and \\\n #~ sample_ind<(peak['sample_index']+self.n_right)\n #~ keep[i] = not(in_zone)\n \n peaks = self.pending_peaks\n in_zone = mask[peaks['chan_index']] &\\\n ((peaks['sample_index']+self.n_left)<sample_ind) & \\\n ((peaks['sample_index']+self.n_right)>sample_ind)\n keep = ~ in_zone\n #~ t2 = time.perf_counter()\n #~ print(' clean_pending_peaks_zone loop', (t2-t1)*1000)\n \n self.pending_peaks = self.pending_peaks[keep]\n \n #~ print('clean_pending_peaks_zone', self.pending_peaks.size)\n \n def set_already_tested(self, sample_ind, peak_chan):\n self.mask_already_tested[sample_ind, peak_chan] = True\n\n def reset_to_not_tested(self, good_spikes):\n for spike in good_spikes:\n # each good spike can remove from\n cluster_idx = self.catalogue['label_to_index'][spike.cluster_label]\n chan_mask = self.sparse_mask_level1[cluster_idx, :]\n self.mask_already_tested[spike.index + self.n_left_long:spike.index + self.n_right_long][:, chan_mask] = False\n \n self.re_detect_local_peak()\n \n\n def get_no_label_peaks(self):\n mask = self.peakdetector.get_mask_peaks_in_chunk(self.fifo_residuals)\n nolabel_indexes, chan_indexes = np.nonzero(mask)\n #~ nolabel_indexes, chan_indexes = np.nonzero(~self.mask_not_already_tested)\n \n nolabel_indexes += self.n_span\n nolabel_indexes = nolabel_indexes[nolabel_indexes<(self.chunksize+self.n_span)]\n bad_spikes = np.zeros(nolabel_indexes.shape[0], dtype=_dtype_spike)\n bad_spikes['index'] = nolabel_indexes\n bad_spikes['cluster_label'] = LABEL_UNCLASSIFIED\n return bad_spikes\n\n def get_best_template(self, left_ind, chan_ind):\n\n full_waveform = self.fifo_residuals[left_ind:left_ind+self.peak_width,:]\n \n centers0 = self.catalogue['centers0']\n projections = self.catalogue['projections']\n\n strict_low = self.catalogue['boundaries'][:, 0]\n strict_high = self.catalogue['boundaries'][:, 1]\n flexible_low = self.catalogue['boundaries'][:, 2]\n flexible_high = self.catalogue['boundaries'][:, 3]\n \n \n n = centers0.shape[0]\n flat_waveform = full_waveform.flatten()\n flat_centers0 = centers0.reshape(n, -1)\n \n #~ scalar_products = np.zeros(n, dtype='float32')\n #~ for i in range(n):\n #~ sp = np.sum((flat_waveform - flat_centers0[i, :]) * projections[i, :])\n #~ scalar_products[i] = sp\n #~ scalar_products = np.sum((flat_waveform[np.newaxis, :] - flat_centers0[:, :]) * projections[:, :], axis=1)\n #~ print(scalar_products)\n \n #~ t1 = time.perf_counter()\n scalar_products = numba_sparse_scalar_product(self.fifo_residuals, left_ind, centers0, projections, chan_ind,\n self.sparse_mask_level1, )\n #~ t2 = time.perf_counter()\n #~ print('numba_sparse_scalar_product', (t2-t1)*1000)\n\n #~ print(scalar_products)\n \n \n possible_idx, = np.nonzero((scalar_products < strict_high) & (scalar_products > strict_low))\n #~ possible_idx, = np.nonzero((scalar_products < flexible_high) & (scalar_products > flexible_low))\n \n #~ print('possible_idx', possible_idx)\n #~ print('scalar_products[possible_idx]', scalar_products[possible_idx])\n \n \n #~ do_plot = False\n if len(possible_idx) == 1:\n extra_idx = None\n candidates_idx =possible_idx\n elif len(possible_idx) == 0:\n #~ extra_idx, = np.nonzero((np.abs(scalar_products) < 0.5))\n extra_idx, = np.nonzero((scalar_products < flexible_high) & (scalar_products > flexible_low))\n #~ if len(extra_idx) ==0:\n # give a try to very far ones.\n #~ extra_idx, = np.nonzero((np.abs(scalar_products) < 1.))\n #~ print('extra_idx', extra_idx)\n #~ if len(extra_idx) ==0:\n #~ candidates_idx = []\n #~ else:\n #~ candidates_idx = extra_idx\n candidates_idx = extra_idx\n #~ candidates_idx =possible_idx\n #~ pass\n elif len(possible_idx) > 1 :\n extra_idx = None\n candidates_idx = possible_idx\n \n debug_plot_change = False\n if len(candidates_idx) > 0:\n #~ t1 = time.perf_counter()\n candidates_idx = np.array(candidates_idx, dtype='int64')\n common_mask = np.sum(self.sparse_mask_level3[candidates_idx, :], axis=0) > 0\n shift_scalar_product, shift_distance = numba_explore_best_shift(self.fifo_residuals, left_ind, self.catalogue['centers0'],\n self.catalogue['projections'], candidates_idx, self.maximum_jitter_shift, common_mask, self.sparse_mask_level1)\n #~ i0, i1 = np.unravel_index(np.argmin(np.abs(shift_scalar_product), axis=None), shift_scalar_product.shape)\n i0, i1 = np.unravel_index(np.argmin(shift_distance, axis=None), shift_distance.shape)\n #~ best_idx = candidates_idx[i0]\n shift = self.shifts[i1]\n cluster_idx = candidates_idx[i0]\n final_scalar_product = shift_scalar_product[i0, i1]\n #~ t2 = time.perf_counter()\n #~ print('numba_explore_best_shift', (t2-t1)*1000)\n\n\n\n \n \n #~ print('shift', shift)\n #~ print('cluster_idx', cluster_idx)\n #~ print('final_scalar_product', final_scalar_product)\n \n \n if np.abs(shift) == self.maximum_jitter_shift:\n cluster_idx = None\n shift = None\n final_scalar_product = None\n #~ print('maximum_jitter_shift >> cluster_idx = None ')\n #~ do_plot = True\n #~ i0_bis, i1_bis = np.unravel_index(np.argmin(np.abs(shift_scalar_product), axis=None), shift_scalar_product.shape)\n #~ if i0 != i0_bis:\n \n #~ debug_plot_change = True\n #~ print('Warning')\n #~ print(possible_idx)\n #~ print(shift_scalar_product)\n #~ print(shift_distance)\n \n \n \n \n #~ if best_idx != cluster_idx:\n #~ print('*'*50)\n #~ print('best_idx != cluster_idx', best_idx, cluster_idx)\n #~ print('*'*50)\n #~ cluster_idx = best_idx\n #~ debug_plot_change = True\n else:\n cluster_idx = None\n shift = None\n final_scalar_product = None\n \n #~ import matplotlib.pyplot as plt\n #~ fig, ax = plt.subplots()\n #~ ax.plot(self.shifts, shift_scalar_product.T)\n #~ plt.show()\n \n \n #~ print('ici',)\n \n\n # DEBUG OMP\n #~ from sklearn.linear_model import orthogonal_mp_gram\n #~ from sklearn.linear_model import OrthogonalMatchingPursuit\n #~ n_nonzero_coefs = 2\n #~ omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)\n #~ X = self.catalogue['centers0'].reshape(self.catalogue['centers0'].shape[0], -1).T\n #~ waveform = self.fifo_residuals[left_ind:left_ind+self.peak_width,:].flatten()\n #~ y = waveform\n #~ omp.fit(X, y)\n #~ coef = omp.coef_\n #~ idx_r, = coef.nonzero()\n #~ cluster_idx_omp = np.argmin(np.abs(coef - 1))\n \n \n #~ if cluster_idx_omp != cluster_idx and coef[cluster_idx_omp] > 0.5:\n #~ if True:\n if False:\n \n \n \n \n #~ if cluster_idx in (3,6):\n #~ if do_plot:\n #~ if False:\n #~ if final_scalar_product is not None and np.abs(final_scalar_product) > 0.5:\n \n #~ if True:\n #~ if len(possible_idx) != 1:\n #~ if len(possible_idx) > 1:\n #~ if len(candidates_idx) > 1:\n \n #~ if 7 in possible_idx or cluster_idx == 7:\n #~ if cluster_idx not in possible_idx and len(possible_idx) > 0:\n #~ if debug_plot_change:\n \n import matplotlib.pyplot as plt\n \n print()\n print('best cluster_idx', cluster_idx)\n print('possible_idx', possible_idx)\n print('extra_idx', extra_idx)\n print(scalar_products[possible_idx])\n print(strict_high[possible_idx])\n \n print('cluster_idx_omp', cluster_idx_omp)\n\n \n fig, ax = plt.subplots()\n ax.plot(coef)\n if cluster_idx is not None:\n ax.axvline(cluster_idx)\n ax.set_title(f'{cluster_idx} omp {cluster_idx_omp}')\n #~ plt.show()\n\n \n \n \n \n fig, ax = plt.subplots()\n shift2 = 0 if shift is None else shift\n full_waveform2 = self.fifo_residuals[left_ind+shift2:left_ind+shift2+self.peak_width,:]\n \n ax.plot(full_waveform2.T.flatten(), color='k')\n if shift !=0 and shift is not None:\n ax.plot(full_waveform.T.flatten(), color='grey', ls='--')\n \n for idx in candidates_idx:\n ax.plot(self.catalogue['centers0'][idx, :].T.flatten(), color='m')\n \n ax.plot(self.catalogue['centers0'][cluster_idx_omp, :].T.flatten(), color='y')\n \n \n \n \n if cluster_idx is not None:\n ax.plot(self.catalogue['centers0'][cluster_idx, :].T.flatten(), color='c', ls='--')\n ax.set_title(f'best {cluster_idx} shift {shift} possible_idx {possible_idx}')\n \n if shift is not None:\n fig, ax = plt.subplots()\n #~ ax.plot(self.shifts, np.abs(shift_scalar_product).T)\n ax.plot(self.shifts, shift_scalar_product.T)\n ax.axhline(0)\n \n\n fig, ax = plt.subplots()\n ax.plot(self.shifts, np.abs(shift_distance).T)\n \n plt.show()\n \n \n best_template_info = {'nb_candidate' : len(candidates_idx), 'final_scalar_product':final_scalar_product}\n \n \n return cluster_idx, shift, best_template_info\n\n\n def accept_tempate(self, left_ind, cluster_idx, jitter, best_template_info):\n if jitter is None:\n # this must have a jitter\n jitter = 0\n \n #~ if np.abs(jitter) > (self.maximum_jitter_shift - 0.5):\n #~ return False\n \n strict_low = self.catalogue['boundaries'][:, 0]\n strict_high = self.catalogue['boundaries'][:, 1]\n flexible_low = self.catalogue['boundaries'][:, 2]\n flexible_high = self.catalogue['boundaries'][:, 3]\n \n \n #~ flat_waveform = full_waveform.flatten()\n #~ sp2 = np.sum((flat_waveform - centers0[cluster_idx, :].flatten()) * projections[cluster_idx, :])\n sp = best_template_info['final_scalar_product']\n nb_candidate = best_template_info['nb_candidate']\n \n if nb_candidate == 1:\n \n #~ accept_template = strict_low[cluster_idx] < sp < strict_high[cluster_idx]\n accept_template = flexible_low[cluster_idx] < sp < flexible_high[cluster_idx]\n \n else:\n accept_template = flexible_low[cluster_idx] < sp < flexible_high[cluster_idx]\n \n \n # waveform L2 on mask\n #~ full_waveform = self.fifo_residuals[left_ind:left_ind+self.peak_width,:]\n #~ wf = full_waveform[:, mask]\n \n # prediction with interpolation\n #~ _, pred_wf = make_prediction_one_spike(left_ind - self.n_left, cluster_idx, jitter, self.fifo_residuals.dtype, self.catalogue, long=False)\n #~ pred_wf = pred_wf[:, mask]\n \n #~ dist = (pred_wf - wf) ** 2\n \n \n # criteria per channel\n #~ residual_nrj_by_chan = np.sum(dist, axis=0)\n #~ wf_nrj = np.sum(wf**2, axis=0)\n #~ weight = self.weight_per_template_dict[cluster_idx]\n #~ crietria_weighted = (wf_nrj>residual_nrj_by_chan).astype('float') * weight\n #~ accept_template = np.sum(crietria_weighted) >= 0.7 * np.sum(weight)\n \n # criteria per sample\n #~ dist * np.abs(pred_wf) < \n #~ dist_w = dist / np.abs(pred_wf)\n #~ gain = (dist < wf**2).astype('float') * np.abs(pred_wf) / np.sum(np.abs(pred_wf))\n #~ gain = (wf / pred_wf - 1) * np.abs(pred_wf) / np.sum(np.abs(pred_wf))\n #~ gain = (pred_wf**2 / wf**1 - 1) * np.abs(pred_wf) / np.sum(np.abs(pred_wf))\n #~ accept_template = np.sum(gain) > 0.8\n #~ accept_template = np.sum(gain) > 0.7\n #~ accept_template0 = np.sum(gain) > 0.6\n #~ accept_template = np.sum(gain) > 0.5\n \n # criteria max residual\n #~ max_res = np.max(np.abs(pred_wf - wf))\n #~ max_pred = np.max(np.abs(pred_wf))\n #~ accept_template1 = max_pred > max_res\n \n \n \n\n \n \n \n #~ accept_template = False\n \n # debug\n #~ limit_sp =self.catalogue['sp_normed_limit'][cluster_idx, :]\n #~ sp = np.sum(self.catalogue['centers0_normed'] * full_waveform * self.catalogue['template_weight'])\n #~ print('limit_sp', limit_sp, 'sp', sp)\n \n \n \n #~ accept_template = False\n #~ immediate_accept = False\n \n # DEBUG always refuse!!!!!\n #~ accept_template = False\n \n \n #~ label = self.catalogue['cluster_labels'][cluster_idx]\n \n # debug\n #~ if label == 13:\n #~ if accept_template and not immediate_accept:\n #~ accept_template = False\n \n # debug\n #~ if label == 13:\n #~ if not hasattr(self, 'count_accept'):\n #~ self.count_accept = {}\n #~ self.count_accept[label] = {'accept_template':0, 'immediate_accept':0, 'not_accepted':0}\n \n #~ if accept_template:\n #~ self.count_accept[label]['accept_template'] += 1\n #~ if immediate_accept:\n #~ self.count_accept[label]['immediate_accept'] += 1\n #~ else:\n #~ self.count_accept[label]['not_accepted'] += 1\n #~ print(self.count_accept)\n \n #~ if self._plot_debug:\n #~ if not accept_template and label in []:\n #~ if not accept_template:\n #~ if accept_template:\n #~ if True:\n if False:\n \n #~ if not immediate_accept:\n #~ if immediate_accept:\n #~ if immediate_accept:\n #~ if label == 7 and not accept_template:\n #~ if label == 7:\n #~ if label == 121:\n #~ if label == 5:\n #~ if nb_candidate > 1:\n \n #~ if label == 13 and accept_template and not immediate_accept:\n #~ if label == 13 and not accept_template:\n \n #~ if label in (7,9):\n #~ nears = np.array([ 5813767, 5813767, 11200038, 11322540, 14989650, 14989673, 14989692, 14989710, 15119220, 15830377, 16138346, 16216666, 17078883])\n #~ print(np.abs((left_ind - self.n_left) - nears))\n #~ print(np.abs((left_ind - self.n_left) - nears) < 2)\n #~ if label == 5 and np.any(np.abs((left_ind - self.n_left) - nears) < 50):\n \n #~ if immediate_accept:\n \n import matplotlib.pyplot as plt\n \n mask = self.sparse_mask_level2[cluster_idx]\n full_waveform = self.fifo_residuals[left_ind:left_ind+self.peak_width,:]\n wf = full_waveform[:, mask]\n _, pred_waveform = make_prediction_one_spike(left_ind - self.n_left, cluster_idx, jitter, self.fifo_residuals.dtype, self.catalogue, long=False)\n pred_wf = pred_waveform[:, mask]\n \n if accept_template:\n color = 'g'\n else:\n color = 'r'\n \n #~ if accept_template:\n #~ if immediate_accept:\n #~ color = 'g'\n #~ else:\n #~ color = 'c'\n #~ else:\n #~ color = 'r'\n \n #~ if not immediate_accept:\n #~ fig, ax = plt.subplots()\n #~ ax.plot(gain.T.flatten(), color=color)\n #~ ax.set_title('{}'.format(np.sum(gain)))\n\n #~ fig, ax = plt.subplots()\n #~ ax.plot(feat_centroids.T, alpha=0.5)\n #~ ax.plot(feat_waveform, color='k')\n\n fig, ax = plt.subplots()\n ax.plot(full_waveform.T.flatten(), color='k')\n ax.plot(pred_waveform.T.flatten(), color=color)\n \n l0, l1 = strict_low[cluster_idx], strict_high[cluster_idx]\n l2, l3 = flexible_low[cluster_idx], flexible_high[cluster_idx]\n title = f'{cluster_idx} {sp:0.3f} lim [{l0:0.3f} {l1:0.3f}] [{l2:0.3f} {l3:0.3f}] {nb_candidate}'\n ax.set_title(title)\n \n #~ fig, ax = plt.subplots()\n #~ ax.plot(wf.T.flatten(), color='k')\n #~ ax.plot(pred_wf.T.flatten(), color=color)\n \n #~ ax.plot( wf.T.flatten() - pred_wf.T.flatten(), color=color, ls='--')\n \n print()\n print('cluster_idx',cluster_idx, 'accept_template', accept_template)\n #~ print(distance, self.distance_limit[cluster_idx])\n #~ print('distance', distance, distance2, 'limit_distance', self.distance_limit[cluster_idx])\n\n #~ limit_sp =self.catalogue['sp_normed_limit'][cluster_idx, :]\n #~ sp = np.sum(self.catalogue['centers0_normed'] * full_waveform * self.catalogue['template_weight'])\n #~ sp = np.sum(self.catalogue['centers0_normed'] * full_waveform)\n #~ print('limit_sp', limit_sp, 'sp', sp)\n \n #~ if not immediate_accept:\n #~ print('np.sum(gain)', np.sum(gain))\n\n\n #~ fig, ax = plt.subplots()\n #~ res = wf - pred_wf\n #~ count, bins = np.histogram(res, bins=150, weights=np.abs(pred_wf))\n #~ ax.plot(bins[:-1], count)\n #~ plt.show()\n\n \n \n #~ if distance2 >= self.distance_limit[cluster_idx]:\n #~ print(crietria_weighted, weight)\n #~ print(np.sum(crietria_weighted), np.sum(weight))\n \n #~ ax.plot(full_wf0.T.flatten(), color='y')\n #~ ax.plot( full_wf.T.flatten() - full_wf0.T.flatten(), color='y')\n \n #~ ax.set_title('not accepted')\n plt.show()\n \n return accept_template\n \n \n def _plot_after_inner_peeling_loop(self):\n pass\n\n def _plot_before_peeling_loop(self):\n import matplotlib.pyplot as plt\n fig, ax = plt.subplots()\n plot_sigs = self.fifo_residuals.copy()\n self._plot_sigs_before = plot_sigs\n #~ chan_order = np.argsort(self.channel_distances[0, :])\n \n for c in range(self.nb_channel):\n #~ for c in chan_order:\n plot_sigs[:, c] += c*30\n \n ax.plot(plot_sigs, color='k')\n\n ax.axvline(self.fifo_size - self.n_right_long, color='r')\n ax.axvline(-self.n_left_long, color='r')\n\n mask = self.peakdetector.get_mask_peaks_in_chunk(self.fifo_residuals)\n sample_inds, chan_inds= np.nonzero(mask)\n sample_inds += self.n_span\n \n ax.scatter(sample_inds, plot_sigs[sample_inds, chan_inds], color='r')\n ax.set_title(f'nb peak {sample_inds.size}')\n \n #~ plt.show()\n \n \n def _plot_label_unclassified(self, left_ind, peak_chan, cluster_idx, jitter):\n return\n import matplotlib.pyplot as plt\n #~ print('LABEL UNCLASSIFIED', left_ind, cluster_idx)\n fig, ax = plt.subplots()\n \n wf = self.fifo_residuals[left_ind:left_ind+self.peak_width, :]\n wf0 = self.catalogue['centers0'][cluster_idx, :, :]\n \n ax.plot(wf.T.flatten(), color='b')\n #~ ax.plot(wf0.T.flatten(), color='g')\n \n ax.set_title(f'label_unclassified {left_ind-self.n_left} {cluster_idx} chan{peak_chan}')\n \n ax.axvline(peak_chan*self.peak_width-self.n_left)\n \n plt.show()\n\n def _plot_after_peeling_loop(self, good_spikes):\n import matplotlib.pyplot as plt\n fig, ax = plt.subplots()\n plot_sigs = self.fifo_residuals.copy()\n \n \n for c in range(self.nb_channel):\n plot_sigs[:, c] += c*30\n ax.plot(plot_sigs, color='k')\n \n ax.plot(self._plot_sigs_before, color='b')\n \n ax.axvline(self.fifo_size - self.n_right_long, color='r')\n ax.axvline(-self.n_left_long, color='r')\n\n mask = self.peakdetector.get_mask_peaks_in_chunk(self.fifo_residuals)\n sample_inds, chan_inds= np.nonzero(mask)\n sample_inds += self.n_span\n ax.scatter(sample_inds, plot_sigs[sample_inds, chan_inds], color='r')\n \n \n \n good_spikes = np.array(good_spikes, dtype=_dtype_spike)\n pred = make_prediction_signals(good_spikes, self.internal_dtype, plot_sigs.shape, self.catalogue, safe=True)\n plot_pred = pred.copy()\n for c in range(self.nb_channel):\n plot_pred[:, c] += c*30\n \n ax.plot(plot_pred, color='m')\n \n plt.show()\n\n\n\n\n\n" ]
[ [ "numpy.abs", "numpy.nonzero", "numpy.arange", "matplotlib.pyplot.subplots", "numpy.argmin", "numpy.argsort", "numpy.array", "matplotlib.pyplot.show", "numpy.zeros", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
reichang182/Transformer
[ "6f90c29eaaba898919b7689ab7e2cfce1604cdb8", "6f90c29eaaba898919b7689ab7e2cfce1604cdb8" ]
[ "src/transformers/models/roberta/modeling_roberta.py", "src/transformers/models/rag/modeling_rag.py" ]
[ "# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"PyTorch RoBERTa model. \"\"\"\n\nimport math\n\nimport torch\nimport torch.nn as nn\nimport torch.utils.checkpoint\nfrom torch.nn import CrossEntropyLoss, MSELoss\n\nfrom ...activations import ACT2FN, gelu\nfrom ...file_utils import (\n add_code_sample_docstrings,\n add_start_docstrings,\n add_start_docstrings_to_model_forward,\n replace_return_docstrings,\n)\nfrom ...modeling_outputs import (\n BaseModelOutputWithPastAndCrossAttentions,\n BaseModelOutputWithPoolingAndCrossAttentions,\n CausalLMOutputWithCrossAttentions,\n MaskedLMOutput,\n MultipleChoiceModelOutput,\n QuestionAnsweringModelOutput,\n SequenceClassifierOutput,\n TokenClassifierOutput,\n)\nfrom ...modeling_utils import (\n PreTrainedModel,\n apply_chunking_to_forward,\n find_pruneable_heads_and_indices,\n prune_linear_layer,\n)\nfrom ...utils import logging\nfrom .configuration_roberta import RobertaConfig\n\n\nlogger = logging.get_logger(__name__)\n\n_CHECKPOINT_FOR_DOC = \"roberta-base\"\n_CONFIG_FOR_DOC = \"RobertaConfig\"\n_TOKENIZER_FOR_DOC = \"RobertaTokenizer\"\n\nROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = [\n \"roberta-base\",\n \"roberta-large\",\n \"roberta-large-mnli\",\n \"distilroberta-base\",\n \"roberta-base-openai-detector\",\n \"roberta-large-openai-detector\",\n # See all RoBERTa models at https://huggingface.co/models?filter=roberta\n]\n\n\nclass RobertaEmbeddings(nn.Module):\n \"\"\"\n Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.\n \"\"\"\n\n # Copied from transformers.models.bert.modeling_bert.BertEmbeddings.__init__\n def __init__(self, config):\n super().__init__()\n self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)\n self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)\n self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)\n\n # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load\n # any TensorFlow checkpoint file\n self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n # position_ids (1, len position emb) is contiguous in memory and exported when serialized\n self.register_buffer(\"position_ids\", torch.arange(config.max_position_embeddings).expand((1, -1)))\n self.position_embedding_type = getattr(config, \"position_embedding_type\", \"absolute\")\n\n # End copy\n self.padding_idx = config.pad_token_id\n self.position_embeddings = nn.Embedding(\n config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx\n )\n\n def forward(\n self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0\n ):\n if position_ids is None:\n if input_ids is not None:\n # Create the position ids from the input token ids. Any padded tokens remain padded.\n position_ids = create_position_ids_from_input_ids(\n input_ids, self.padding_idx, past_key_values_length\n ).to(input_ids.device)\n else:\n position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds)\n\n if input_ids is not None:\n input_shape = input_ids.size()\n else:\n input_shape = inputs_embeds.size()[:-1]\n\n if token_type_ids is None:\n token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)\n\n if inputs_embeds is None:\n inputs_embeds = self.word_embeddings(input_ids)\n token_type_embeddings = self.token_type_embeddings(token_type_ids)\n\n embeddings = inputs_embeds + token_type_embeddings\n if self.position_embedding_type == \"absolute\":\n position_embeddings = self.position_embeddings(position_ids)\n embeddings += position_embeddings\n embeddings = self.LayerNorm(embeddings)\n embeddings = self.dropout(embeddings)\n return embeddings\n\n def create_position_ids_from_inputs_embeds(self, inputs_embeds):\n \"\"\"\n We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.\n\n Args:\n inputs_embeds: torch.Tensor\n\n Returns: torch.Tensor\n \"\"\"\n input_shape = inputs_embeds.size()[:-1]\n sequence_length = input_shape[1]\n\n position_ids = torch.arange(\n self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device\n )\n return position_ids.unsqueeze(0).expand(input_shape)\n\n\n# Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->Roberta\nclass RobertaSelfAttention(nn.Module):\n def __init__(self, config):\n super().__init__()\n if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, \"embedding_size\"):\n raise ValueError(\n f\"The hidden size ({config.hidden_size}) is not a multiple of the number of attention \"\n f\"heads ({config.num_attention_heads})\"\n )\n\n self.num_attention_heads = config.num_attention_heads\n self.attention_head_size = int(config.hidden_size / config.num_attention_heads)\n self.all_head_size = self.num_attention_heads * self.attention_head_size\n\n self.query = nn.Linear(config.hidden_size, self.all_head_size)\n self.key = nn.Linear(config.hidden_size, self.all_head_size)\n self.value = nn.Linear(config.hidden_size, self.all_head_size)\n\n self.dropout = nn.Dropout(config.attention_probs_dropout_prob)\n self.position_embedding_type = getattr(config, \"position_embedding_type\", \"absolute\")\n if self.position_embedding_type == \"relative_key\" or self.position_embedding_type == \"relative_key_query\":\n self.max_position_embeddings = config.max_position_embeddings\n self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)\n\n self.is_decoder = config.is_decoder\n\n def transpose_for_scores(self, x):\n new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)\n x = x.view(*new_x_shape)\n return x.permute(0, 2, 1, 3)\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_value=None,\n output_attentions=False,\n ):\n mixed_query_layer = self.query(hidden_states)\n\n # If this is instantiated as a cross-attention module, the keys\n # and values come from an encoder; the attention mask needs to be\n # such that the encoder's padding tokens are not attended to.\n is_cross_attention = encoder_hidden_states is not None\n\n if is_cross_attention and past_key_value is not None:\n # reuse k,v, cross_attentions\n key_layer = past_key_value[0]\n value_layer = past_key_value[1]\n attention_mask = encoder_attention_mask\n elif is_cross_attention:\n key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))\n value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))\n attention_mask = encoder_attention_mask\n elif past_key_value is not None:\n key_layer = self.transpose_for_scores(self.key(hidden_states))\n value_layer = self.transpose_for_scores(self.value(hidden_states))\n key_layer = torch.cat([past_key_value[0], key_layer], dim=2)\n value_layer = torch.cat([past_key_value[1], value_layer], dim=2)\n else:\n key_layer = self.transpose_for_scores(self.key(hidden_states))\n value_layer = self.transpose_for_scores(self.value(hidden_states))\n\n query_layer = self.transpose_for_scores(mixed_query_layer)\n\n if self.is_decoder:\n # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.\n # Further calls to cross_attention layer can then reuse all cross-attention\n # key/value_states (first \"if\" case)\n # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of\n # all previous decoder key/value_states. Further calls to uni-directional self-attention\n # can concat previous decoder key/value_states to current projected key/value_states (third \"elif\" case)\n # if encoder bi-directional self-attention `past_key_value` is always `None`\n past_key_value = (key_layer, value_layer)\n\n # Take the dot product between \"query\" and \"key\" to get the raw attention scores.\n attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))\n\n if self.position_embedding_type == \"relative_key\" or self.position_embedding_type == \"relative_key_query\":\n seq_length = hidden_states.size()[1]\n position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)\n position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)\n distance = position_ids_l - position_ids_r\n positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)\n positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility\n\n if self.position_embedding_type == \"relative_key\":\n relative_position_scores = torch.einsum(\"bhld,lrd->bhlr\", query_layer, positional_embedding)\n attention_scores = attention_scores + relative_position_scores\n elif self.position_embedding_type == \"relative_key_query\":\n relative_position_scores_query = torch.einsum(\"bhld,lrd->bhlr\", query_layer, positional_embedding)\n relative_position_scores_key = torch.einsum(\"bhrd,lrd->bhlr\", key_layer, positional_embedding)\n attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key\n\n attention_scores = attention_scores / math.sqrt(self.attention_head_size)\n if attention_mask is not None:\n # Apply the attention mask is (precomputed for all layers in RobertaModel forward() function)\n attention_scores = attention_scores + attention_mask\n\n # Normalize the attention scores to probabilities.\n attention_probs = nn.Softmax(dim=-1)(attention_scores)\n\n # This is actually dropping out entire tokens to attend to, which might\n # seem a bit unusual, but is taken from the original Transformer paper.\n attention_probs = self.dropout(attention_probs)\n\n # Mask heads if we want to\n if head_mask is not None:\n attention_probs = attention_probs * head_mask\n\n context_layer = torch.matmul(attention_probs, value_layer)\n\n context_layer = context_layer.permute(0, 2, 1, 3).contiguous()\n new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)\n context_layer = context_layer.view(*new_context_layer_shape)\n\n outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)\n\n if self.is_decoder:\n outputs = outputs + (past_key_value,)\n return outputs\n\n\n# Copied from transformers.models.bert.modeling_bert.BertSelfOutput\nclass RobertaSelfOutput(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, hidden_states, input_tensor):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states)\n hidden_states = self.LayerNorm(hidden_states + input_tensor)\n return hidden_states\n\n\n# Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->Roberta\nclass RobertaAttention(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.self = RobertaSelfAttention(config)\n self.output = RobertaSelfOutput(config)\n self.pruned_heads = set()\n\n def prune_heads(self, heads):\n if len(heads) == 0:\n return\n heads, index = find_pruneable_heads_and_indices(\n heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads\n )\n\n # Prune linear layers\n self.self.query = prune_linear_layer(self.self.query, index)\n self.self.key = prune_linear_layer(self.self.key, index)\n self.self.value = prune_linear_layer(self.self.value, index)\n self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)\n\n # Update hyper params and store pruned heads\n self.self.num_attention_heads = self.self.num_attention_heads - len(heads)\n self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads\n self.pruned_heads = self.pruned_heads.union(heads)\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_value=None,\n output_attentions=False,\n ):\n self_outputs = self.self(\n hidden_states,\n attention_mask,\n head_mask,\n encoder_hidden_states,\n encoder_attention_mask,\n past_key_value,\n output_attentions,\n )\n attention_output = self.output(self_outputs[0], hidden_states)\n outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them\n return outputs\n\n\n# Copied from transformers.models.bert.modeling_bert.BertIntermediate\nclass RobertaIntermediate(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.intermediate_size)\n if isinstance(config.hidden_act, str):\n self.intermediate_act_fn = ACT2FN[config.hidden_act]\n else:\n self.intermediate_act_fn = config.hidden_act\n\n def forward(self, hidden_states):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.intermediate_act_fn(hidden_states)\n return hidden_states\n\n\n# Copied from transformers.models.bert.modeling_bert.BertOutput\nclass RobertaOutput(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.intermediate_size, config.hidden_size)\n self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, hidden_states, input_tensor):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states)\n hidden_states = self.LayerNorm(hidden_states + input_tensor)\n return hidden_states\n\n\n# Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->Roberta\nclass RobertaLayer(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.chunk_size_feed_forward = config.chunk_size_feed_forward\n self.seq_len_dim = 1\n self.attention = RobertaAttention(config)\n self.is_decoder = config.is_decoder\n self.add_cross_attention = config.add_cross_attention\n if self.add_cross_attention:\n assert self.is_decoder, f\"{self} should be used as a decoder model if cross attention is added\"\n self.crossattention = RobertaAttention(config)\n self.intermediate = RobertaIntermediate(config)\n self.output = RobertaOutput(config)\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_value=None,\n output_attentions=False,\n ):\n # decoder uni-directional self-attention cached key/values tuple is at positions 1,2\n self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None\n self_attention_outputs = self.attention(\n hidden_states,\n attention_mask,\n head_mask,\n output_attentions=output_attentions,\n past_key_value=self_attn_past_key_value,\n )\n attention_output = self_attention_outputs[0]\n\n # if decoder, the last output is tuple of self-attn cache\n if self.is_decoder:\n outputs = self_attention_outputs[1:-1]\n present_key_value = self_attention_outputs[-1]\n else:\n outputs = self_attention_outputs[1:] # add self attentions if we output attention weights\n\n cross_attn_present_key_value = None\n if self.is_decoder and encoder_hidden_states is not None:\n assert hasattr(\n self, \"crossattention\"\n ), f\"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`\"\n\n # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple\n cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None\n cross_attention_outputs = self.crossattention(\n attention_output,\n attention_mask,\n head_mask,\n encoder_hidden_states,\n encoder_attention_mask,\n cross_attn_past_key_value,\n output_attentions,\n )\n attention_output = cross_attention_outputs[0]\n outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights\n\n # add cross-attn cache to positions 3,4 of present_key_value tuple\n cross_attn_present_key_value = cross_attention_outputs[-1]\n present_key_value = present_key_value + cross_attn_present_key_value\n\n layer_output = apply_chunking_to_forward(\n self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output\n )\n outputs = (layer_output,) + outputs\n\n # if decoder, return the attn key/values as the last output\n if self.is_decoder:\n outputs = outputs + (present_key_value,)\n\n return outputs\n\n def feed_forward_chunk(self, attention_output):\n intermediate_output = self.intermediate(attention_output)\n layer_output = self.output(intermediate_output, attention_output)\n return layer_output\n\n\n# Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->Roberta\nclass RobertaEncoder(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.config = config\n self.layer = nn.ModuleList([RobertaLayer(config) for _ in range(config.num_hidden_layers)])\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_values=None,\n use_cache=None,\n output_attentions=False,\n output_hidden_states=False,\n return_dict=True,\n ):\n all_hidden_states = () if output_hidden_states else None\n all_self_attentions = () if output_attentions else None\n all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None\n\n next_decoder_cache = () if use_cache else None\n for i, layer_module in enumerate(self.layer):\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n layer_head_mask = head_mask[i] if head_mask is not None else None\n past_key_value = past_key_values[i] if past_key_values is not None else None\n\n if getattr(self.config, \"gradient_checkpointing\", False) and self.training:\n\n if use_cache:\n logger.warning(\n \"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting \"\n \"`use_cache=False`...\"\n )\n use_cache = False\n\n def create_custom_forward(module):\n def custom_forward(*inputs):\n return module(*inputs, past_key_value, output_attentions)\n\n return custom_forward\n\n layer_outputs = torch.utils.checkpoint.checkpoint(\n create_custom_forward(layer_module),\n hidden_states,\n attention_mask,\n layer_head_mask,\n encoder_hidden_states,\n encoder_attention_mask,\n )\n else:\n layer_outputs = layer_module(\n hidden_states,\n attention_mask,\n layer_head_mask,\n encoder_hidden_states,\n encoder_attention_mask,\n past_key_value,\n output_attentions,\n )\n\n hidden_states = layer_outputs[0]\n if use_cache:\n next_decoder_cache += (layer_outputs[-1],)\n if output_attentions:\n all_self_attentions = all_self_attentions + (layer_outputs[1],)\n if self.config.add_cross_attention:\n all_cross_attentions = all_cross_attentions + (layer_outputs[2],)\n\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n if not return_dict:\n return tuple(\n v\n for v in [\n hidden_states,\n next_decoder_cache,\n all_hidden_states,\n all_self_attentions,\n all_cross_attentions,\n ]\n if v is not None\n )\n return BaseModelOutputWithPastAndCrossAttentions(\n last_hidden_state=hidden_states,\n past_key_values=next_decoder_cache,\n hidden_states=all_hidden_states,\n attentions=all_self_attentions,\n cross_attentions=all_cross_attentions,\n )\n\n\n# Copied from transformers.models.bert.modeling_bert.BertPooler\nclass RobertaPooler(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.activation = nn.Tanh()\n\n def forward(self, hidden_states):\n # We \"pool\" the model by simply taking the hidden state corresponding\n # to the first token.\n first_token_tensor = hidden_states[:, 0]\n pooled_output = self.dense(first_token_tensor)\n pooled_output = self.activation(pooled_output)\n return pooled_output\n\n\nclass RobertaPreTrainedModel(PreTrainedModel):\n \"\"\"\n An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained\n models.\n \"\"\"\n\n config_class = RobertaConfig\n base_model_prefix = \"roberta\"\n\n # Copied from transformers.models.bert.modeling_bert.BertPreTrainedModel._init_weights\n def _init_weights(self, module):\n \"\"\" Initialize the weights \"\"\"\n if isinstance(module, nn.Linear):\n # Slightly different from the TF version which uses truncated_normal for initialization\n # cf https://github.com/pytorch/pytorch/pull/5617\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n if module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, nn.Embedding):\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n if module.padding_idx is not None:\n module.weight.data[module.padding_idx].zero_()\n elif isinstance(module, nn.LayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n\n\nROBERTA_START_DOCSTRING = r\"\"\"\n\n This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic\n methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,\n pruning heads etc.)\n\n This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__\n subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to\n general usage and behavior.\n\n Parameters:\n config (:class:`~transformers.RobertaConfig`): Model configuration class with all the parameters of the\n model. Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model\n weights.\n\"\"\"\n\nROBERTA_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using :class:`~transformers.RobertaTokenizer`. See\n :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for\n details.\n\n `What are input IDs? <../glossary.html#input-ids>`__\n attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`):\n Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,\n 1]``:\n\n - 0 corresponds to a `sentence A` token,\n - 1 corresponds to a `sentence B` token.\n\n `What are token type IDs? <../glossary.html#token-type-ids>`_\n position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,\n config.max_position_embeddings - 1]``.\n\n `What are position IDs? <../glossary.html#position-ids>`_\n head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`):\n Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert :obj:`input_ids` indices into associated\n vectors than the model's internal embedding lookup matrix.\n output_attentions (:obj:`bool`, `optional`):\n Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned\n tensors for more detail.\n output_hidden_states (:obj:`bool`, `optional`):\n Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for\n more detail.\n return_dict (:obj:`bool`, `optional`):\n Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.\n\"\"\"\n\n\n@add_start_docstrings(\n \"The bare RoBERTa Model transformer outputting raw hidden-states without any specific head on top.\",\n ROBERTA_START_DOCSTRING,\n)\nclass RobertaModel(RobertaPreTrainedModel):\n \"\"\"\n\n The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of\n cross-attention is added between the self-attention layers, following the architecture described in `Attention is\n all you need`_ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz\n Kaiser and Illia Polosukhin.\n\n To behave as an decoder the model needs to be initialized with the :obj:`is_decoder` argument of the configuration\n set to :obj:`True`. To be used in a Seq2Seq model, the model needs to initialized with both :obj:`is_decoder`\n argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an\n input to the forward pass.\n\n .. _`Attention is all you need`: https://arxiv.org/abs/1706.03762\n\n \"\"\"\n\n _keys_to_ignore_on_load_missing = [r\"position_ids\"]\n\n # Copied from transformers.models.bert.modeling_bert.BertModel.__init__ with Bert->Roberta\n def __init__(self, config, add_pooling_layer=True):\n super().__init__(config)\n self.config = config\n\n self.embeddings = RobertaEmbeddings(config)\n self.encoder = RobertaEncoder(config)\n\n self.pooler = RobertaPooler(config) if add_pooling_layer else None\n\n self.init_weights()\n\n def get_input_embeddings(self):\n return self.embeddings.word_embeddings\n\n def set_input_embeddings(self, value):\n self.embeddings.word_embeddings = value\n\n def _prune_heads(self, heads_to_prune):\n \"\"\"\n Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base\n class PreTrainedModel\n \"\"\"\n for layer, heads in heads_to_prune.items():\n self.encoder.layer[layer].attention.prune_heads(heads)\n\n @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format(\"(batch_size, sequence_length)\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=BaseModelOutputWithPoolingAndCrossAttentions,\n config_class=_CONFIG_FOR_DOC,\n )\n # Copied from transformers.models.bert.modeling_bert.BertModel.forward\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_values=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):\n Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if\n the model is configured as a decoder.\n encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in\n the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):\n Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.\n\n If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`\n (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`\n instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.\n use_cache (:obj:`bool`, `optional`):\n If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up\n decoding (see :obj:`past_key_values`).\n \"\"\"\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if self.config.is_decoder:\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n else:\n use_cache = False\n\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n elif input_ids is not None:\n input_shape = input_ids.size()\n batch_size, seq_length = input_shape\n elif inputs_embeds is not None:\n input_shape = inputs_embeds.size()[:-1]\n batch_size, seq_length = input_shape\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n device = input_ids.device if input_ids is not None else inputs_embeds.device\n\n # past_key_values_length\n past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0\n\n if attention_mask is None:\n attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)\n if token_type_ids is None:\n token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)\n\n # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]\n # ourselves in which case we just need to make it broadcastable to all heads.\n extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device)\n\n # If a 2D or 3D attention mask is provided for the cross-attention\n # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]\n if self.config.is_decoder and encoder_hidden_states is not None:\n encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()\n encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)\n if encoder_attention_mask is None:\n encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)\n encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)\n else:\n encoder_extended_attention_mask = None\n\n # Prepare head mask if needed\n # 1.0 in head_mask indicate we keep the head\n # attention_probs has shape bsz x n_heads x N x N\n # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]\n # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]\n head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)\n\n embedding_output = self.embeddings(\n input_ids=input_ids,\n position_ids=position_ids,\n token_type_ids=token_type_ids,\n inputs_embeds=inputs_embeds,\n past_key_values_length=past_key_values_length,\n )\n encoder_outputs = self.encoder(\n embedding_output,\n attention_mask=extended_attention_mask,\n head_mask=head_mask,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_extended_attention_mask,\n past_key_values=past_key_values,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n sequence_output = encoder_outputs[0]\n pooled_output = self.pooler(sequence_output) if self.pooler is not None else None\n\n if not return_dict:\n return (sequence_output, pooled_output) + encoder_outputs[1:]\n\n return BaseModelOutputWithPoolingAndCrossAttentions(\n last_hidden_state=sequence_output,\n pooler_output=pooled_output,\n past_key_values=encoder_outputs.past_key_values,\n hidden_states=encoder_outputs.hidden_states,\n attentions=encoder_outputs.attentions,\n cross_attentions=encoder_outputs.cross_attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"RoBERTa Model with a `language modeling` head on top for CLM fine-tuning. \"\"\", ROBERTA_START_DOCSTRING\n)\nclass RobertaForCausalLM(RobertaPreTrainedModel):\n _keys_to_ignore_on_load_missing = [r\"position_ids\", r\"lm_head.decoder.bias\"]\n _keys_to_ignore_on_load_unexpected = [r\"pooler\"]\n\n def __init__(self, config):\n super().__init__(config)\n\n if not config.is_decoder:\n logger.warning(\"If you want to use `RobertaLMHeadModel` as a standalone, add `is_decoder=True.`\")\n\n self.roberta = RobertaModel(config, add_pooling_layer=False)\n self.lm_head = RobertaLMHead(config)\n\n self.init_weights()\n\n def get_output_embeddings(self):\n return self.lm_head.decoder\n\n def set_output_embeddings(self, new_embeddings):\n self.lm_head.decoder = new_embeddings\n\n @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n labels=None,\n past_key_values=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):\n Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if\n the model is configured as a decoder.\n encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in\n the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in\n ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are\n ignored (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``\n past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):\n Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.\n\n If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`\n (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`\n instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.\n use_cache (:obj:`bool`, `optional`):\n If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up\n decoding (see :obj:`past_key_values`).\n\n Returns:\n\n Example::\n\n >>> from transformers import RobertaTokenizer, RobertaForCausalLM, RobertaConfig\n >>> import torch\n\n >>> tokenizer = RobertaTokenizer.from_pretrained('roberta-base')\n >>> config = RobertaConfig.from_pretrained(\"roberta-base\")\n >>> config.is_decoder = True\n >>> model = RobertaForCausalLM.from_pretrained('roberta-base', config=config)\n\n >>> inputs = tokenizer(\"Hello, my dog is cute\", return_tensors=\"pt\")\n >>> outputs = model(**inputs)\n\n >>> prediction_logits = outputs.logits\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n if labels is not None:\n use_cache = False\n\n outputs = self.roberta(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_attention_mask,\n past_key_values=past_key_values,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = outputs[0]\n prediction_scores = self.lm_head(sequence_output)\n\n lm_loss = None\n if labels is not None:\n # we are doing next-token prediction; shift prediction scores and input ids by one\n shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()\n labels = labels[:, 1:].contiguous()\n loss_fct = CrossEntropyLoss()\n lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))\n\n if not return_dict:\n output = (prediction_scores,) + outputs[2:]\n return ((lm_loss,) + output) if lm_loss is not None else output\n\n return CausalLMOutputWithCrossAttentions(\n loss=lm_loss,\n logits=prediction_scores,\n past_key_values=outputs.past_key_values,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n cross_attentions=outputs.cross_attentions,\n )\n\n def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, **model_kwargs):\n input_shape = input_ids.shape\n # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly\n if attention_mask is None:\n attention_mask = input_ids.new_ones(input_shape)\n\n # cut decoder_input_ids if past is used\n if past is not None:\n input_ids = input_ids[:, -1:]\n\n return {\"input_ids\": input_ids, \"attention_mask\": attention_mask, \"past_key_values\": past}\n\n def _reorder_cache(self, past, beam_idx):\n reordered_past = ()\n for layer_past in past:\n reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)\n return reordered_past\n\n\n@add_start_docstrings(\"\"\"RoBERTa Model with a `language modeling` head on top. \"\"\", ROBERTA_START_DOCSTRING)\nclass RobertaForMaskedLM(RobertaPreTrainedModel):\n _keys_to_ignore_on_load_missing = [r\"position_ids\", r\"lm_head.decoder.bias\"]\n _keys_to_ignore_on_load_unexpected = [r\"pooler\"]\n\n def __init__(self, config):\n super().__init__(config)\n\n if config.is_decoder:\n logger.warning(\n \"If you want to use `RobertaForMaskedLM` make sure `config.is_decoder=False` for \"\n \"bi-directional self-attention.\"\n )\n\n self.roberta = RobertaModel(config, add_pooling_layer=False)\n self.lm_head = RobertaLMHead(config)\n\n self.init_weights()\n\n def get_output_embeddings(self):\n return self.lm_head.decoder\n\n def set_output_embeddings(self, new_embeddings):\n self.lm_head.decoder = new_embeddings\n\n @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=MaskedLMOutput,\n config_class=_CONFIG_FOR_DOC,\n mask=\"<mask>\",\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,\n config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored\n (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``\n kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`):\n Used to hide legacy arguments that have been deprecated.\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.roberta(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_attention_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n sequence_output = outputs[0]\n prediction_scores = self.lm_head(sequence_output)\n\n masked_lm_loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))\n\n if not return_dict:\n output = (prediction_scores,) + outputs[2:]\n return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output\n\n return MaskedLMOutput(\n loss=masked_lm_loss,\n logits=prediction_scores,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\nclass RobertaLMHead(nn.Module):\n \"\"\"Roberta Head for masked language modeling.\"\"\"\n\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, features, **kwargs):\n x = self.dense(features)\n x = gelu(x)\n x = self.layer_norm(x)\n\n # project back to size of vocabulary with bias\n x = self.decoder(x)\n\n return x\n\n\n@add_start_docstrings(\n \"\"\"\n RoBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the\n pooled output) e.g. for GLUE tasks.\n \"\"\",\n ROBERTA_START_DOCSTRING,\n)\nclass RobertaForSequenceClassification(RobertaPreTrainedModel):\n _keys_to_ignore_on_load_missing = [r\"position_ids\"]\n\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n\n self.roberta = RobertaModel(config, add_pooling_layer=False)\n self.classifier = RobertaClassificationHead(config)\n\n self.init_weights()\n\n @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=SequenceClassifierOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,\n config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),\n If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.roberta(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n sequence_output = outputs[0]\n logits = self.classifier(sequence_output)\n\n loss = None\n if labels is not None:\n if self.num_labels == 1:\n # We are doing regression\n loss_fct = MSELoss()\n loss = loss_fct(logits.view(-1), labels.view(-1))\n else:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n\n if not return_dict:\n output = (logits,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return SequenceClassifierOutput(\n loss=loss,\n logits=logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"\n Roberta Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a\n softmax) e.g. for RocStories/SWAG tasks.\n \"\"\",\n ROBERTA_START_DOCSTRING,\n)\nclass RobertaForMultipleChoice(RobertaPreTrainedModel):\n _keys_to_ignore_on_load_missing = [r\"position_ids\"]\n\n def __init__(self, config):\n super().__init__(config)\n\n self.roberta = RobertaModel(config)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.classifier = nn.Linear(config.hidden_size, 1)\n\n self.init_weights()\n\n @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format(\"batch_size, num_choices, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=MultipleChoiceModelOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n token_type_ids=None,\n attention_mask=None,\n labels=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for computing the multiple choice classification loss. Indices should be in ``[0, ...,\n num_choices-1]`` where :obj:`num_choices` is the size of the second dimension of the input tensors. (See\n :obj:`input_ids` above)\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]\n\n flat_input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None\n flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None\n flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None\n flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None\n flat_inputs_embeds = (\n inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))\n if inputs_embeds is not None\n else None\n )\n\n outputs = self.roberta(\n flat_input_ids,\n position_ids=flat_position_ids,\n token_type_ids=flat_token_type_ids,\n attention_mask=flat_attention_mask,\n head_mask=head_mask,\n inputs_embeds=flat_inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n pooled_output = outputs[1]\n\n pooled_output = self.dropout(pooled_output)\n logits = self.classifier(pooled_output)\n reshaped_logits = logits.view(-1, num_choices)\n\n loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(reshaped_logits, labels)\n\n if not return_dict:\n output = (reshaped_logits,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return MultipleChoiceModelOutput(\n loss=loss,\n logits=reshaped_logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"\n Roberta Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for\n Named-Entity-Recognition (NER) tasks.\n \"\"\",\n ROBERTA_START_DOCSTRING,\n)\nclass RobertaForTokenClassification(RobertaPreTrainedModel):\n _keys_to_ignore_on_load_unexpected = [r\"pooler\"]\n _keys_to_ignore_on_load_missing = [r\"position_ids\"]\n\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n\n self.roberta = RobertaModel(config, add_pooling_layer=False)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.classifier = nn.Linear(config.hidden_size, config.num_labels)\n\n self.init_weights()\n\n @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=TokenClassifierOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels -\n 1]``.\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.roberta(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = outputs[0]\n\n sequence_output = self.dropout(sequence_output)\n logits = self.classifier(sequence_output)\n\n loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n # Only keep active parts of the loss\n if attention_mask is not None:\n active_loss = attention_mask.view(-1) == 1\n active_logits = logits.view(-1, self.num_labels)\n active_labels = torch.where(\n active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)\n )\n loss = loss_fct(active_logits, active_labels)\n else:\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n\n if not return_dict:\n output = (logits,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return TokenClassifierOutput(\n loss=loss,\n logits=logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\nclass RobertaClassificationHead(nn.Module):\n \"\"\"Head for sentence-level classification tasks.\"\"\"\n\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.out_proj = nn.Linear(config.hidden_size, config.num_labels)\n\n def forward(self, features, **kwargs):\n x = features[:, 0, :] # take <s> token (equiv. to [CLS])\n x = self.dropout(x)\n x = self.dense(x)\n x = torch.tanh(x)\n x = self.dropout(x)\n x = self.out_proj(x)\n return x\n\n\n@add_start_docstrings(\n \"\"\"\n Roberta Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear\n layers on top of the hidden-states output to compute `span start logits` and `span end logits`).\n \"\"\",\n ROBERTA_START_DOCSTRING,\n)\nclass RobertaForQuestionAnswering(RobertaPreTrainedModel):\n _keys_to_ignore_on_load_unexpected = [r\"pooler\"]\n _keys_to_ignore_on_load_missing = [r\"position_ids\"]\n\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n\n self.roberta = RobertaModel(config, add_pooling_layer=False)\n self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)\n\n self.init_weights()\n\n @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=QuestionAnsweringModelOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n start_positions=None,\n end_positions=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for position (index) of the start of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the\n sequence are not taken into account for computing the loss.\n end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for position (index) of the end of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the\n sequence are not taken into account for computing the loss.\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.roberta(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = outputs[0]\n\n logits = self.qa_outputs(sequence_output)\n start_logits, end_logits = logits.split(1, dim=-1)\n start_logits = start_logits.squeeze(-1)\n end_logits = end_logits.squeeze(-1)\n\n total_loss = None\n if start_positions is not None and end_positions is not None:\n # If we are on multi-GPU, split add a dimension\n if len(start_positions.size()) > 1:\n start_positions = start_positions.squeeze(-1)\n if len(end_positions.size()) > 1:\n end_positions = end_positions.squeeze(-1)\n # sometimes the start/end positions are outside our model inputs, we ignore these terms\n ignored_index = start_logits.size(1)\n start_positions.clamp_(0, ignored_index)\n end_positions.clamp_(0, ignored_index)\n\n loss_fct = CrossEntropyLoss(ignore_index=ignored_index)\n start_loss = loss_fct(start_logits, start_positions)\n end_loss = loss_fct(end_logits, end_positions)\n total_loss = (start_loss + end_loss) / 2\n\n if not return_dict:\n output = (start_logits, end_logits) + outputs[2:]\n return ((total_loss,) + output) if total_loss is not None else output\n\n return QuestionAnsweringModelOutput(\n loss=total_loss,\n start_logits=start_logits,\n end_logits=end_logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\ndef create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0):\n \"\"\"\n Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols\n are ignored. This is modified from fairseq's `utils.make_positions`.\n\n Args:\n x: torch.Tensor x:\n\n Returns: torch.Tensor\n \"\"\"\n # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.\n mask = input_ids.ne(padding_idx).int()\n incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask\n return incremental_indices.long() + padding_idx\n", "# coding=utf-8\n# Copyright 2020, The RAG Authors and The HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"RAG model implementation.\"\"\"\n\nfrom dataclasses import dataclass\nfrom typing import Callable, List, Optional, Tuple\n\nimport torch\n\nfrom ...configuration_utils import PretrainedConfig\nfrom ...file_utils import add_start_docstrings_to_model_forward, replace_return_docstrings\nfrom ...generation_beam_search import BeamSearchScorer\nfrom ...modeling_outputs import ModelOutput\nfrom ...modeling_utils import PreTrainedModel\nfrom ...utils import logging\nfrom .configuration_rag import RagConfig\nfrom .retrieval_rag import RagRetriever\n\n\nlogger = logging.get_logger(__name__)\n\n_CONFIG_FOR_DOC = \"RagConfig\"\n\n\n@dataclass\nclass RetrievAugLMMarginOutput(ModelOutput):\n \"\"\"\n Base class for retriever augmented marginalized models outputs.\n\n Args:\n loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):\n Language modeling loss.\n logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):\n Prediction scores of the language modeling head. The score is possibly marginalized over all documents for\n each vocabulary token.\n doc_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.n_docs)`):\n Score between each retrieved document embeddings (see :obj:`retrieved_doc_embeds`) and\n :obj:`question_encoder_last_hidden_state`.\n past_key_values (:obj:`List[torch.FloatTensor]`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):\n List of :obj:`torch.FloatTensor` of length :obj:`config.n_layers`, with each tensor of shape :obj:`(2,\n batch_size, num_heads, sequence_length, embed_size_per_head)`).\n\n Contains precomputed hidden-states (key and values in the attention blocks) of the decoder that can be used\n (see :obj:`past_key_values` input) to speed up sequential decoding.\n retrieved_doc_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.n_docs, hidden_size)`, `optional`, returned when `output_retrieved=True`):\n Embedded documents retrieved by the retriever. Is used with ``question_encoder_last_hidden_state`` to\n compute the ``doc_scores``.\n retrieved_doc_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, config.n_docs)`, `optional`, returned when `output_retrieved=True`):\n The indexes of the embedded documents retrieved by the retriever.\n context_input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size * config.n_docs, config.max_combined_length)`, `optional`, returned when `output_retrieved=True`):\n Input ids post-processed from the retrieved documents and the question encoder input_ids by the retriever.\n context_attention_mask (:obj:`torch.LongTensor` of shape :obj:`(batch_size * config.n_docs, config.max_combined_length)`, `optional`, returned when `output_retrieved=True`):\n Attention mask post-processed from the retrieved documents and the question encoder :obj:`input_ids` by the\n retriever.\n question_encoder_last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):\n Sequence of hidden states at the output of the last layer of the question encoder pooled output of the\n model.\n question_enc_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):\n Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings and one for the output of each\n layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`.\n\n Hidden states of the question encoder at the output of each layer plus the initial embedding outputs.\n question_enc_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):\n Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,\n sequence_length, sequence_length)`.\n\n Attentions weights of the question encoder, after the attention softmax, used to compute the weighted\n average in the self-attention heads.\n generator_enc_last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):\n Sequence of hidden-states at the output of the last layer of the generator encoder of the model.\n generator_enc_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):\n Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings and one for the output of each\n layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`.\n\n Hidden states of the generator encoder at the output of each layer plus the initial embedding outputs.\n generator_enc_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):\n Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,\n sequence_length, sequence_length)`.\n\n Attentions weights of the generator encoder, after the attention softmax, used to compute the weighted\n average in the self-attention heads.\n generator_dec_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):\n Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings and one for the output of each\n layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`.\n\n Hidden states of the generator decoder at the output of each layer plus the initial embedding outputs.\n generator_dec_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):\n Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,\n sequence_length, sequence_length)`.\n\n Attentions weights of the generator decoder, after the attention softmax, used to compute the weighted\n average in the self-attention heads.\n generator_cross_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):\n Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,\n sequence_length, sequence_length)`.\n\n Cross-attentions weights of the generator decoder, after the attention softmax, used to compute the\n weighted average in the cross-attention heads.\n \"\"\"\n\n loss: Optional[torch.FloatTensor] = None\n logits: torch.FloatTensor = None\n doc_scores: torch.FloatTensor = None\n past_key_values: Optional[List[torch.FloatTensor]] = None\n retrieved_doc_embeds: Optional[torch.FloatTensor] = None\n retrieved_doc_ids: Optional[torch.LongTensor] = None\n context_input_ids: Optional[torch.LongTensor] = None\n context_attention_mask: Optional[torch.LongTensor] = None\n question_encoder_last_hidden_state: Optional[torch.FloatTensor] = None\n question_enc_hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n question_enc_attentions: Optional[Tuple[torch.FloatTensor]] = None\n generator_enc_last_hidden_state: Optional[torch.FloatTensor] = None\n generator_enc_hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n generator_enc_attentions: Optional[Tuple[torch.FloatTensor]] = None\n generator_dec_hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n generator_dec_attentions: Optional[Tuple[torch.FloatTensor]] = None\n generator_cross_attentions: Optional[Tuple[torch.FloatTensor]] = None\n\n\n@dataclass\nclass RetrievAugLMOutput(ModelOutput):\n \"\"\"\n Args:\n logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):\n Prediction scores of the language modeling head. The score is possibly marginalized over all documents for\n each vocabulary token.\n doc_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.n_docs)`):\n Score between each retrieved document embeddings (see :obj:`retrieved_doc_embeds`) and\n :obj:`question_encoder_last_hidden_state`.\n past_key_values (:obj:`List[torch.FloatTensor]`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):\n List of :obj:`torch.FloatTensor` of length :obj:`config.n_layers`, with each tensor of shape :obj:`(2,\n batch_size, num_heads, sequence_length, embed_size_per_head)`).\n\n Contains precomputed hidden-states (key and values in the attention blocks) of the decoder that can be used\n (see :obj:`past_key_values` input) to speed up sequential decoding.\n retrieved_doc_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.n_docs, hidden_size)`, `optional`, returned when `output_retrieved=True`):\n Embedded documents retrieved by the retriever. Is used with ``question_encoder_last_hidden_state`` to\n compute the ``doc_scores``.\n retrieved_doc_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, config.n_docs)`, `optional`, returned when `output_retrieved=True`):\n The indexes of the embedded documents retrieved by the retriever.\n context_input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size * config.n_docs, config.max_combined_length)`, `optional`, returned when `output_retrieved=True`):\n Input ids post-processed from the retrieved documents and the question encoder input_ids by the retriever.\n context_attention_mask (:obj:`torch.LongTensor` of shape :obj:`(batch_size * config.n_docs, config.max_combined_length)`, `optional`, returned when `output_retrieved=True`):\n Attention mask post-processed from the retrieved documents and the question encoder :obj:`input_ids` by the\n retriever.\n question_encoder_last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):\n Sequence of hidden states at the output of the last layer of the question encoder pooled output of the\n model.\n question_enc_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):\n Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings and one for the output of each\n layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`.\n\n Hidden states of the question encoder at the output of each layer plus the initial embedding outputs.\n question_enc_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):\n Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,\n sequence_length, sequence_length)`.\n\n Attentions weights of the question encoder, after the attention softmax, used to compute the weighted\n average in the self-attention heads.\n generator_enc_last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):\n Sequence of hidden-states at the output of the last layer of the generator encoder of the model.\n generator_enc_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):\n Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings and one for the output of each\n layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`.\n\n Hidden states of the generator encoder at the output of each layer plus the initial embedding outputs.\n generator_enc_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):\n Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,\n sequence_length, sequence_length)`.\n\n Attentions weights of the generator encoder, after the attention softmax, used to compute the weighted\n average in the self-attention heads.\n generator_dec_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):\n Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings and one for the output of each\n layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`.\n\n Hidden states of the generator decoder at the output of each layer plus the initial embedding outputs.\n generator_dec_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):\n Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,\n sequence_length, sequence_length)`.\n\n Attentions weights of the generator decoder, after the attention softmax, used to compute the weighted\n average in the self-attention heads.\n generator_cross_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):\n Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,\n sequence_length, sequence_length)`.\n\n Cross-attentions weights of the generator decoder, after the attention softmax, used to compute the\n weighted average in the cross-attention heads.\n \"\"\"\n\n logits: torch.FloatTensor = None\n doc_scores: torch.FloatTensor = None\n past_key_values: Optional[List[torch.FloatTensor]] = None\n retrieved_doc_embeds: Optional[torch.FloatTensor] = None\n retrieved_doc_ids: Optional[torch.LongTensor] = None\n context_input_ids: Optional[torch.LongTensor] = None\n context_attention_mask: Optional[torch.LongTensor] = None\n question_encoder_last_hidden_state: Optional[torch.FloatTensor] = None\n question_enc_hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n question_enc_attentions: Optional[Tuple[torch.FloatTensor]] = None\n generator_enc_last_hidden_state: Optional[torch.FloatTensor] = None\n generator_enc_hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n generator_enc_attentions: Optional[Tuple[torch.FloatTensor]] = None\n generator_dec_hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n generator_dec_attentions: Optional[Tuple[torch.FloatTensor]] = None\n generator_cross_attentions: Optional[Tuple[torch.FloatTensor]] = None\n\n\nclass RagPreTrainedModel(PreTrainedModel):\n r\"\"\"\n RAG models were released with the paper `Retrieval-Augmented Generation for Knowledge-Intensive NLP Tasks\n <https://arxiv.org/abs/2005.11401>`_ by Patrick Lewis, Ethan Perez, Aleksandra Piktus et al.\n\n RAG is a retriever augmented model and encapsulate three components: a question encoder, a dataset retriever and a\n generator, the encoder and generator are trainable while the retriever is just an indexed dataset.\n\n \"\"\"\n config_class = RagConfig\n base_model_prefix = \"rag\"\n _keys_to_ignore_on_load_missing = [r\"position_ids\"]\n\n @classmethod\n def from_pretrained_question_encoder_generator(\n cls,\n question_encoder_pretrained_model_name_or_path: str = None,\n generator_pretrained_model_name_or_path: str = None,\n retriever: RagRetriever = None,\n *model_args,\n **kwargs\n ) -> PreTrainedModel:\n r\"\"\"\n Instantiates an question encoder and a generator from one or two base classes of the library from pretrained\n model checkpoints.\n\n The model is set in evaluation mode by default using :obj:`model.eval()` (Dropout modules are deactivated). To\n train the model, you need to first set it back in training mode with :obj:`model.train()`.\n\n Params:\n question_encoder_pretrained_model_name_or_path (:obj: `str`, `optional`, defaults to `None`):\n Information necessary to initiate the question encoder. Can be either:\n\n - A string, the `model id` of a pretrained model hosted inside a model repo on huggingface.co.\n Valid model ids can be located at the root-level, like ``bert-base-uncased``, or namespaced under\n a user or organization name, like ``dbmdz/bert-base-german-cased``.\n - A path to a `directory` containing model weights saved using\n :func:`~transformers.PreTrainedModel.save_pretrained`, e.g., ``./my_model_directory/``.\n - A path or url to a `tensorflow index checkpoint file` (e.g, ``./tf_model/model.ckpt.index``). In\n this case, ``from_tf`` should be set to :obj:`True` and a configuration object should be provided\n as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in\n a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.\n\n generator_pretrained_model_name_or_path (:obj: `str`, `optional`, defaults to `None`):\n Information necessary to initiate the generator. Can be either:\n\n - A string, the `model id` of a pretrained model hosted inside a model repo on huggingface.co.\n Valid model ids can be located at the root-level, like ``bert-base-uncased``, or namespaced under\n a user or organization name, like ``dbmdz/bert-base-german-cased``.\n - A path to a `directory` containing model weights saved using\n :func:`~transformers.PreTrainedModel.save_pretrained`, e.g., ``./my_model_directory/``.\n - A path or url to a `tensorflow index checkpoint file` (e.g, ``./tf_model/model.ckpt.index``). In\n this case, ``from_tf`` should be set to :obj:`True` and a configuration object should be provided\n as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in\n a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.\n\n model_args (remaining positional arguments, `optional`):\n All remaning positional arguments will be passed to the underlying model's ``__init__`` method.\n retriever (:class:`~transformers.RagRetriever`, `optional`):\n The retriever to use.\n kwwargs (remaining dictionary of keyword arguments, `optional`):\n Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,\n ``output_attentions=True``).\n\n - To update the question_encoder configuration, use the prefix `question_encoder_` for each\n configuration parameter.\n - To update the generator configuration, use the prefix `generator_` for each configuration parameter.\n - To update the parent model configuration, do not use a prefix for each configuration parameter.\n\n Behaves differently depending on whether a :obj:`config` is provided or automatically loaded.\n\n Example::\n\n >>> from transformers import RagModel\n >>> # initialize a RAG from two pretrained models.\n >>> model = RagModel.from_question_encoder_generator_pretrained('facebook/dpr-question_encoder-single-nq-base', 't5-small')\n >>> # saving model after fine-tuning\n >>> model.save_pretrained(\"./rag\")\n >>> # load fine-tuned model\n >>> model = RagModel.from_pretrained(\"./rag\")\n\n \"\"\"\n\n kwargs_question_encoder = {\n argument[len(\"question_question_encoder_\") :]: value\n for argument, value in kwargs.items()\n if argument.startswith(\"question_encoder_\")\n }\n\n kwargs_generator = {\n argument[len(\"generator_\") :]: value\n for argument, value in kwargs.items()\n if argument.startswith(\"generator_\")\n }\n\n # remove question_encoder, generator kwargs from kwargs\n for key in kwargs_question_encoder.keys():\n del kwargs[\"question_encoder_\" + key]\n for key in kwargs_generator.keys():\n del kwargs[\"generator_\" + key]\n\n # Load and initialize the question_encoder and generator\n # The distinction between question_encoder and generator at the model level is made\n # by the value of the flag `is_generator` that we need to set correctly.\n question_encoder = kwargs_question_encoder.pop(\"model\", None)\n if question_encoder is None:\n assert (\n question_encoder_pretrained_model_name_or_path is not None\n ), \"If `model` is not defined as an argument, a `question_encoder_pretrained_model_name_or_path` has to be defined\"\n from ..auto.modeling_auto import AutoModel\n\n if \"config\" not in kwargs_question_encoder:\n from ..auto.configuration_auto import AutoConfig\n\n question_encoder_config = AutoConfig.from_pretrained(question_encoder_pretrained_model_name_or_path)\n kwargs_question_encoder[\"config\"] = question_encoder_config\n\n question_encoder = AutoModel.from_pretrained(\n question_encoder_pretrained_model_name_or_path, *model_args, **kwargs_question_encoder\n )\n\n generator = kwargs_generator.pop(\"model\", None)\n if generator is None:\n assert (\n generator_pretrained_model_name_or_path is not None\n ), \"If `generator_model` is not defined as an argument, a `generator_pretrained_model_name_or_path` has to be defined\"\n from ..auto.modeling_auto import AutoModelForSeq2SeqLM\n\n if \"config\" not in kwargs_generator:\n from ..auto.configuration_auto import AutoConfig\n\n generator_config = AutoConfig.from_pretrained(generator_pretrained_model_name_or_path)\n kwargs_generator[\"config\"] = generator_config\n\n generator = AutoModelForSeq2SeqLM.from_pretrained(\n generator_pretrained_model_name_or_path, **kwargs_generator\n )\n\n # instantiate config with corresponding kwargs\n config = kwargs.get(\"config\", None)\n if config is None:\n config = RagConfig.from_question_encoder_generator_configs(\n question_encoder.config, generator.config, **kwargs\n )\n\n return cls(question_encoder=question_encoder, generator=generator, config=config, retriever=retriever)\n\n\nRAG_START_DOCSTRING = r\"\"\"\n\n RAG is a seq2seq model which encapsulates two core components: a question encoder and a generator. During a forward\n pass, we encode the input with the question encoder and pass it to the retriever to extract relevant context\n documents. The documents are then prepended to the input. Such contextualized inputs is passed to the generator.\n\n The question encoder can be any `autoencoding` model, preferably :class:`~transformers.DPRQuestionEncoder`, and the\n generator can be any `seq2seq` model, preferably :class:`~transformers.BartForConditionalGeneration`.\n\n The model can be initialized with a :class:`~transformers.RagRetriever` for end-to-end generation or used in\n combination with the outputs of a retriever in multiple steps---see examples for more details. The model is\n compatible any `autoencoding` model as the ``question_encoder`` and any `seq2seq` model with language model head as\n the ``generator``. It has been tested with :class:`~transformers.DPRQuestionEncoder` as the ``question_encoder``\n and :class:`~transformers.BartForConditionalGeneration` or :class:`~transformers.T5ForConditionalGeneration` as the\n ``generator``.\n\n This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic\n methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,\n pruning heads etc.)\n\n This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__\n subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to\n general usage and behavior.\n\n\n Args:\n config (:class:`~transformers.RagConfig`):\n Model configuration class with all the parameters of the model. Initializing with a config file does not\n load the weights associated with the model, only the configuration. Check out the\n :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.\n question_encoder (:class:`transformers.PreTrainedModel`):\n An encoder model compatible with the faiss index encapsulated by the ``retriever``.\n generator (:class:`transformers.PreTrainedModel`):\n A seq2seq model used as the generator in the RAG architecture.\n retriever (:class:`~transformers.RagRetriever`):\n A retriever class encapsulating a faiss index queried to obtain context documents for current inputs.\n\"\"\"\n\n\nRAG_FORWARD_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. :class:`~transformers.RagConfig`, used to initialize\n the model, specifies which generator to use, it also specifies a compatible generator tokenizer. Use that\n tokenizer class to obtain the indices.\n\n `What are input IDs? <../glossary.html#input-ids>`__\n attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n encoder_outputs (:obj:`tuple(tuple(torch.FloatTensor)`, `optional`)\n Tuple consists of (:obj:`generator_enc_last_hidden_state`, `optional`: :obj:`generator_enc_hidden_states`,\n `optional`: :obj:`generator_enc_attentions`). :obj:`generator_enc_last_hidden_state` of shape\n :obj:`(batch_size, n_docs * sequence_length, hidden_size)` is a sequence of hidden-states at the output of\n the last layer of the generator's encoder.\n\n Used by the (:class:`~transformers.RagModel`) model during decoding.\n decoder_input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, target_sequence_length)`, `optional`):\n Provide for generation tasks. `None` by default, construct as per instructions for the generator model\n you're using with your RAG instance.\n decoder_attention_mask (:obj:`torch.BoolTensor` of shape :obj:`(batch_size, target_sequence_length)`, `optional`):\n Default behavior: generate a tensor that ignores pad tokens in :obj:`decoder_input_ids`. Causal mask will\n also be used by default.\n past_key_values (:obj:`tuple(tuple(torch.FloatTensor))`):\n Tuple consists of two elements: :obj:`encoder_outputs` of the RAG model (see :obj:`encoder_outputs`) and\n :obj:`past_key_values` of the underlying generator. Can be used to speed up decoding.\n :obj:`past_key_values` are used in the (:class:`~transformers.RagTokenForGeneration`) model during\n decoding.\n doc_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.n_docs)`):\n Score between each retrieved document embeddings (see :obj:`retrieved_doc_embeds`) and\n :obj:`question_encoder_last_hidden_state`. If the model has is not initialized with a ``retriever``\n :obj:`doc_scores` has to be provided to the forward pass. :obj:`doc_scores` can be computed via\n :obj:`question_encoder_last_hidden_state` and :obj:`retrieved_doc_embeds`, see examples for more\n information.\n context_input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size * config.n_docs, config.max_combined_length)`, `optional`, returned when `output_retrieved=True`):\n Input IDs post-processed from the retrieved documents and the question encoder :obj:`input_ids` by the\n retriever.\n\n If the model has is not initialized with a ``retriever`` :obj:`context_input_ids` has to be provided to the\n forward pass. :obj:`context_input_ids` are returned by :meth:`~transformers.RagRetriever.__call__`.\n context_attention_mask (:obj:`torch.LongTensor` of shape :obj:`(batch_size * config.n_docs, config.max_combined_length)`, `optional`, returned when `output_retrieved=True`):\n Attention mask post-processed from the retrieved documents and the question encoder :obj:`input_ids` by the\n retriever.\n\n If the model has is not initialized with a ``retriever`` :obj:`context_attention_mask` has to be provided\n to the forward pass. :obj:`context_attention_mask` are returned by\n :meth:`~transformers.RagRetriever.__call__`.\n use_cache (:obj:`bool`, `optional`, defaults to :obj:`True`):\n If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up\n decoding (see :obj:`past_key_values`).\n output_attentions (:obj:`bool`, `optional`):\n Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned\n tensors for more detail.\n output_hidden_states (:obj:`bool`, `optional`):\n Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for\n more detail.\n output_retrieved(:obj:`bool`, `optional`):\n Whether or not to return the :obj:`retrieved_doc_embeds`, :obj:`retrieved_doc_ids`,\n :obj:`context_input_ids` and :obj:`context_attention_mask`. See returned tensors for more detail.\n n_docs (:obj:`int`, `optional`, defaults to :obj:`config.n_docs`)\n Number of documents to retrieve and/or number of documents for which to generate an answer.\n\"\"\"\n\n\n@add_start_docstrings_to_model_forward(RAG_START_DOCSTRING)\nclass RagModel(RagPreTrainedModel):\n def __init__(\n self,\n config: Optional[PretrainedConfig] = None,\n question_encoder: Optional[PreTrainedModel] = None,\n generator: Optional[PreTrainedModel] = None,\n retriever: Optional = None, # or maybe just use a `set_retriever(...)` method\n **kwargs,\n ):\n assert config is not None or (\n question_encoder is not None and generator is not None\n ), \"Either a configuration or an question_encoder and a generator has to be provided.\"\n\n if config is None:\n config = RagConfig.from_question_encoder_generator_configs(\n question_encoder.config, generator.config, **kwargs\n )\n else:\n assert isinstance(config, self.config_class), f\"config: {config} has to be of type {self.config_class}\"\n super().__init__(config)\n if question_encoder is None:\n from ..auto.modeling_auto import AutoModel\n\n question_encoder = AutoModel.from_config(config.question_encoder)\n\n if generator is None:\n from ..auto.modeling_auto import AutoModelForSeq2SeqLM\n\n generator = AutoModelForSeq2SeqLM.from_config(config.generator)\n\n self.retriever = retriever\n if self.retriever is not None:\n assert isinstance(\n retriever, RagRetriever\n ), f\"`self.retriever` is of type {type(self.retriever)}, but should be of type `RagRetriever`\"\n self.retriever = retriever\n\n self.question_encoder = question_encoder\n self.generator = generator\n\n @add_start_docstrings_to_model_forward(RAG_FORWARD_INPUTS_DOCSTRING)\n @replace_return_docstrings(output_type=RetrievAugLMOutput, config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n encoder_outputs=None,\n decoder_input_ids=None,\n decoder_attention_mask=None,\n past_key_values=None,\n doc_scores=None,\n context_input_ids=None,\n context_attention_mask=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n output_retrieved=None,\n n_docs=None,\n ):\n r\"\"\"\n Returns:\n\n Example::\n\n >>> from transformers import RagTokenizer, RagRetriever, RagModel\n >>> import torch\n\n >>> tokenizer = RagTokenizer.from_pretrained(\"facebook/rag-token-base\")\n >>> retriever = RagRetriever.from_pretrained(\"facebook/rag-token-base\", index_name=\"exact\", use_dummy_dataset=True)\n >>> # initialize with RagRetriever to do everything in one forward call\n >>> model = RagModel.from_pretrained(\"facebook/rag-token-base\", retriever=retriever)\n\n >>> inputs = tokenizer(\"How many people live in Paris?\", return_tensors=\"pt\")\n >>> outputs = model(input_ids=inputs[\"input_ids\"])\n \"\"\"\n n_docs = n_docs if n_docs is not None else self.config.n_docs\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n output_retrieved = output_retrieved if output_retrieved is not None else self.config.output_retrieved\n\n # whether retriever has to be used\n has_to_retrieve = (\n self.retriever is not None\n and (context_input_ids is None or context_attention_mask is None or doc_scores is None)\n and encoder_outputs is None\n )\n # encoder_outputs are pre-computed during RAG-token generation\n if encoder_outputs is None:\n\n if has_to_retrieve:\n question_enc_outputs = self.question_encoder(\n input_ids, attention_mask=attention_mask, return_dict=True\n )\n question_encoder_last_hidden_state = question_enc_outputs[0] # hidden states of question encoder\n\n retriever_outputs = self.retriever(\n input_ids,\n question_encoder_last_hidden_state.cpu().detach().to(torch.float32).numpy(),\n prefix=self.generator.config.prefix,\n n_docs=n_docs,\n return_tensors=\"pt\",\n )\n context_input_ids, context_attention_mask, retrieved_doc_embeds, retrieved_doc_ids = (\n retriever_outputs[\"context_input_ids\"],\n retriever_outputs[\"context_attention_mask\"],\n retriever_outputs[\"retrieved_doc_embeds\"],\n retriever_outputs[\"doc_ids\"],\n )\n\n # set to correct device\n retrieved_doc_embeds = retrieved_doc_embeds.to(question_encoder_last_hidden_state)\n context_input_ids = context_input_ids.to(input_ids)\n context_attention_mask = context_attention_mask.to(input_ids)\n\n # compute doc_scores\n doc_scores = torch.bmm(\n question_encoder_last_hidden_state.unsqueeze(1), retrieved_doc_embeds.transpose(1, 2)\n ).squeeze(1)\n else:\n assert (\n context_input_ids is not None\n ), \"Make sure that `context_input_ids` are passed, if no `retriever` is set. Alternatively, you can set a retriever using the `set_retriever(...)` function.\"\n assert (\n context_attention_mask is not None\n ), \"Make sure that `context_attention_mask` are passed, if no `retriever` is set. Alternatively, you can set a retriever using the `set_retriever(...)` function.\"\n assert (\n doc_scores is not None\n ), \"Make sure that `doc_scores` are passed, if no `retriever` is set. Alternatively, you can set a retriever using the `set_retriever(...)` function.\"\n\n assert (\n doc_scores is not None\n ), \"Make sure that `doc_scores` are passed when passing `encoder_outputs` to the forward function.\"\n\n assert (\n doc_scores.shape[1] % n_docs\n ) == 0, f\" The first dimension of `context_input_ids` should be a multiple of `n_docs`={n_docs}, but is {context_input_ids.shape[0]}.\"\n\n # Decoder input without context documents\n if decoder_input_ids is not None:\n decoder_input_ids = decoder_input_ids.repeat_interleave(n_docs, dim=0)\n\n if decoder_attention_mask is not None:\n decoder_attention_mask = decoder_attention_mask.repeat_interleave(n_docs, dim=0)\n\n gen_outputs = self.generator(\n input_ids=context_input_ids,\n attention_mask=context_attention_mask,\n encoder_outputs=encoder_outputs,\n decoder_input_ids=decoder_input_ids,\n decoder_attention_mask=decoder_attention_mask,\n past_key_values=past_key_values,\n use_cache=use_cache,\n output_attentions=output_attentions,\n return_dict=True,\n )\n\n if not has_to_retrieve:\n question_encoder_last_hidden_state = None\n question_enc_hidden_states = None\n question_enc_attentions = None\n retrieved_doc_embeds = None\n retrieved_doc_ids = None\n else:\n question_enc_hidden_states = question_enc_outputs.hidden_states\n question_enc_attentions = question_enc_outputs.attentions\n\n if not has_to_retrieve or not output_retrieved:\n # don't output retrieved docs\n context_input_ids = (None,)\n context_attention_mask = None\n retrieved_doc_embeds = None\n retrieved_doc_ids = None\n\n return RetrievAugLMOutput(\n logits=gen_outputs.logits,\n doc_scores=doc_scores,\n past_key_values=gen_outputs.past_key_values,\n context_input_ids=context_input_ids,\n context_attention_mask=context_attention_mask,\n retrieved_doc_embeds=retrieved_doc_embeds,\n retrieved_doc_ids=retrieved_doc_ids,\n question_encoder_last_hidden_state=question_encoder_last_hidden_state,\n question_enc_hidden_states=question_enc_hidden_states,\n question_enc_attentions=question_enc_attentions,\n generator_enc_last_hidden_state=gen_outputs.encoder_last_hidden_state,\n generator_enc_hidden_states=gen_outputs.encoder_hidden_states,\n generator_enc_attentions=gen_outputs.encoder_attentions,\n generator_dec_hidden_states=gen_outputs.decoder_hidden_states,\n generator_dec_attentions=gen_outputs.decoder_attentions,\n generator_cross_attentions=gen_outputs.cross_attentions,\n )\n\n\n@add_start_docstrings_to_model_forward(\n \"\"\"\n A RAG-sequence model implementation. It performs RAG-sequence specific marginalization in the forward pass.\n \"\"\",\n RAG_START_DOCSTRING,\n)\nclass RagSequenceForGeneration(RagPreTrainedModel):\n def __init__(\n self,\n config: Optional[PretrainedConfig] = None,\n question_encoder: Optional[PreTrainedModel] = None,\n generator: Optional[PreTrainedModel] = None,\n retriever: Optional = None,\n **kwargs,\n ):\n assert config is not None or (\n question_encoder is not None and generator is not None\n ), \"Either a configuration or an encoder and a generator has to be provided.\"\n\n if config is None:\n config = RagConfig.from_question_encoder_generator_configs(\n question_encoder.config, generator.config, **kwargs\n )\n super().__init__(config)\n\n # instantiate model\n self.rag = RagModel(config=config, question_encoder=question_encoder, generator=generator, retriever=retriever)\n\n def set_retriever(self, retriever: RagRetriever):\n self.rag.retriever = retriever\n\n @add_start_docstrings_to_model_forward(RAG_FORWARD_INPUTS_DOCSTRING)\n @replace_return_docstrings(output_type=RetrievAugLMMarginOutput, config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n encoder_outputs=None,\n decoder_input_ids=None,\n decoder_attention_mask=None,\n past_key_values=None,\n context_input_ids=None,\n context_attention_mask=None,\n doc_scores=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n output_retrieved=None,\n exclude_bos_score=None,\n reduce_loss=None,\n labels=None,\n n_docs=None,\n **kwargs # needs kwargs for generation\n ):\n r\"\"\"\n exclude_bos_score (:obj:`bool`, `optional`):\n Only relevant if ``labels`` is passed. If :obj:`True`, the score of the BOS token is disregarded when\n computing the loss.\n reduce_loss (:obj:`bool`, `optional`):\n Only relevant if ``labels`` is passed. If :obj:`True`, the NLL loss is reduced using the\n ``torch.Tensor.sum`` operation.\n kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`):\n Legacy dictionary, which is required so that model can use `generate()` function.\n\n Returns:\n\n Example::\n\n >>> from transformers import RagTokenizer, RagRetriever, RagSequenceForGeneration\n >>> import torch\n\n >>> tokenizer = RagTokenizer.from_pretrained(\"facebook/rag-sequence-nq\")\n >>> retriever = RagRetriever.from_pretrained(\"facebook/rag-sequence-nq\", index_name=\"exact\", use_dummy_dataset=True)\n >>> # initialize with RagRetriever to do everything in one forward call\n >>> model = RagSequenceForGeneration.from_pretrained(\"facebook/rag-token-nq\", retriever=retriever)\n\n >>> inputs = tokenizer(\"How many people live in Paris?\", return_tensors=\"pt\")\n >>> with tokenizer.as_target_tokenizer():\n ... targets = tokenizer(\"In Paris, there are 10 million people.\", return_tensors=\"pt\")\n >>> input_ids = inputs[\"input_ids\"]\n >>> labels = targets[\"input_ids\"]\n >>> outputs = model(input_ids=input_ids, labels=labels)\n\n >>> # or use retriever separately\n >>> model = RagSequenceForGeneration.from_pretrained(\"facebook/rag-sequence-nq\", use_dummy_dataset=True)\n >>> # 1. Encode\n >>> question_hidden_states = model.question_encoder(input_ids)[0]\n >>> # 2. Retrieve\n >>> docs_dict = retriever(input_ids.numpy(), question_hidden_states.detach().numpy(), return_tensors=\"pt\")\n >>> doc_scores = torch.bmm(question_hidden_states.unsqueeze(1), docs_dict[\"retrieved_doc_embeds\"].float().transpose(1, 2)).squeeze(1)\n >>> # 3. Forward to generator\n >>> outputs = model(context_input_ids=docs_dict[\"context_input_ids\"], context_attention_mask=docs_dict[\"context_attention_mask\"], doc_scores=doc_scores, decoder_input_ids=labels)\n \"\"\"\n n_docs = n_docs if n_docs is not None else self.config.n_docs\n exclude_bos_score = exclude_bos_score if exclude_bos_score is not None else self.config.exclude_bos_score\n reduce_loss = reduce_loss if reduce_loss is not None else self.config.reduce_loss\n\n if labels is not None:\n if decoder_input_ids is None:\n decoder_input_ids = labels\n use_cache = False\n\n outputs = self.rag(\n input_ids=input_ids,\n attention_mask=attention_mask,\n encoder_outputs=encoder_outputs,\n decoder_input_ids=decoder_input_ids,\n decoder_attention_mask=decoder_attention_mask,\n context_input_ids=context_input_ids,\n context_attention_mask=context_attention_mask,\n doc_scores=doc_scores,\n past_key_values=past_key_values,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n output_retrieved=output_retrieved,\n n_docs=n_docs,\n )\n\n loss = None\n if labels is not None:\n loss = self.get_nll(\n outputs.logits,\n outputs.doc_scores,\n decoder_input_ids,\n reduce_loss=reduce_loss,\n epsilon=self.config.label_smoothing,\n exclude_bos_score=exclude_bos_score,\n n_docs=n_docs,\n )\n\n return RetrievAugLMMarginOutput(\n loss=loss,\n logits=outputs.logits,\n doc_scores=outputs.doc_scores,\n past_key_values=outputs.past_key_values,\n context_input_ids=outputs.context_input_ids,\n context_attention_mask=outputs.context_attention_mask,\n retrieved_doc_embeds=outputs.retrieved_doc_embeds,\n retrieved_doc_ids=outputs.retrieved_doc_ids,\n question_encoder_last_hidden_state=outputs.question_encoder_last_hidden_state,\n question_enc_hidden_states=outputs.question_enc_hidden_states,\n question_enc_attentions=outputs.question_enc_attentions,\n generator_enc_last_hidden_state=outputs.generator_enc_last_hidden_state,\n generator_enc_hidden_states=outputs.generator_enc_hidden_states,\n generator_enc_attentions=outputs.generator_enc_attentions,\n generator_dec_hidden_states=outputs.generator_dec_hidden_states,\n generator_dec_attentions=outputs.generator_dec_attentions,\n generator_cross_attentions=outputs.generator_cross_attentions,\n )\n\n @property\n def retriever(self):\n return self.rag.retriever\n\n @property\n def generator(self):\n return self.rag.generator\n\n @property\n def question_encoder(self):\n return self.rag.question_encoder\n\n @torch.no_grad()\n def generate(\n self,\n input_ids: Optional[torch.LongTensor] = None,\n attention_mask: Optional[torch.LongTensor] = None,\n context_input_ids=None,\n context_attention_mask=None,\n doc_scores=None,\n do_deduplication=None, # defaults to True\n num_return_sequences=None, # defaults to 1\n num_beams=None, # defaults to 1\n n_docs=None,\n **model_kwargs\n ):\n \"\"\"\n Implements RAG sequence \"thorough\" decoding. Read the :meth:`~transformers.PreTrainedModel.generate``\n documentation for more information on how to set other generate input parameters.\n\n Args:\n input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n The sequence used as a prompt for the generation. If :obj:`input_ids` is not passed, then\n :obj:`context_input_ids` has to be provided.\n attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n context_input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size * config.n_docs, config.max_combined_length)`, `optional`, returned when `output_retrieved=True`):\n Input IDs post-processed from the retrieved documents and the question encoder input_ids by the\n retriever.\n context_attention_mask (:obj:`torch.LongTensor` of shape :obj:`(batch_size * config.n_docs, config.max_combined_length)`, `optional`, returned when `output_retrieved=True`):\n Attention mask post-processed from the retrieved documents and the question encoder :obj:`input_ids` by\n the retriever.\n\n If the model is not initialized with a ``retriever`` or ``input_ids`` is not given,\n :obj:`context_input_ids` and :obj:`context_attention_mask` have to be provided to the forward pass.\n They are returned by :meth:`~transformers.RagRetriever.__call__`.\n doc_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.n_docs)`):\n Score between each retrieved document embeddings (see :obj:`retrieved_doc_embeds`) and\n :obj:`question_encoder_last_hidden_state`.\n\n If the model is not initialized with a ``retriever`` or ``input_ids`` is not given, :obj:`doc_scores`\n has to be provided to the forward pass. :obj:`doc_scores` are returned by\n :meth:`~transformers.RagRetriever.__call__`.\n do_deduplication (:obj:`bool`, `optional`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has\n to be set to :obj:`False` if used while training with distributed backend.\n num_return_sequences(:obj:`int`, `optional`, defaults to 1):\n The number of independently computed returned sequences for each element in the batch. Note that this\n is not the value we pass to the ``generator``'s `:func:`~transformers.PreTrainedModel.generate``\n function, where we set ``num_return_sequences`` to :obj:`num_beams`.\n num_beams (:obj:`int`, `optional`, defaults to 1):\n Number of beams for beam search. 1 means no beam search.\n n_docs (:obj:`int`, `optional`, defaults to :obj:`config.n_docs`)\n Number of documents to retrieve and/or number of documents for which to generate an answer.\n kwargs:\n Additional kwargs will be passed to :meth:`~transformers.PreTrainedModel.generate`.\n\n Return:\n :obj:`torch.LongTensor` of shape :obj:`(batch_size * num_return_sequences, sequence_length)`: The generated\n sequences. The second dimension (sequence length) is either equal to :obj:`max_length` or shorter if all\n batches finished early due to the :obj:`eos_token_id`.\n \"\"\"\n\n n_docs = n_docs if n_docs is not None else self.config.n_docs\n do_deduplication = do_deduplication if do_deduplication is not None else self.config.do_deduplication\n num_doc_return_sequences = (\n num_return_sequences if num_return_sequences is not None else self.config.num_return_sequences\n )\n num_beams = num_beams if num_beams is not None else self.config.num_beams\n\n assert (\n input_ids is not None or context_input_ids is not None\n ), \" At least one of input_ids or context_input_ids must be given\"\n\n if self.retriever is not None and context_input_ids is None:\n question_hidden_states = self.question_encoder(input_ids, attention_mask=attention_mask)[0]\n context_input_ids = self.retriever(\n input_ids,\n question_hidden_states.cpu().detach().to(torch.float32).numpy(),\n prefix=self.generator.config.prefix,\n n_docs=n_docs,\n return_tensors=\"pt\",\n )[\"context_input_ids\"]\n\n # set to correct device\n context_input_ids = context_input_ids.to(input_ids)\n\n hypos = []\n model_kwargs[\"num_beams\"] = num_beams\n model_kwargs[\"num_return_sequences\"] = num_beams\n model_kwargs[\"attention_mask\"] = None\n\n batch_size = input_ids.shape[0] if input_ids is not None else context_input_ids.shape[0] // n_docs\n\n for index in range(batch_size):\n # first, generate beams from documents:\n generator_input_ids = context_input_ids[index * n_docs : (index + 1) * n_docs] # (n_docs, max_len)\n\n output_sequences = self.generator.generate(\n generator_input_ids,\n **model_kwargs,\n ) # n_docs * n_beam, tgt_len\n if do_deduplication:\n # do_deduplication, max_output_len\n output_sequences = torch.stack(list({str(k.tolist()): k for k in output_sequences}.values()))\n\n num_candidates = output_sequences.shape[\n 0\n ] # after deduplication, this number can be less than n_docs*n_beam\n\n # then, run model forwards to get nll scores:\n if input_ids is not None:\n new_input_ids = input_ids[index : index + 1].repeat(num_candidates, 1)\n outputs = self(new_input_ids, labels=output_sequences, exclude_bos_score=True)\n else: # input_ids is None, need context_input_ids/mask and doc_scores\n assert (\n context_attention_mask is not None\n ), \"Make sure that `context_attention_mask` are passed, if no `input_ids` is set. Alternatively, you can set a retriever using the `set_retriever(...)` function.\"\n assert (\n doc_scores is not None\n ), \"Make sure that `doc_scores` are passed, if no `input_ids` is set. Alternatively, you can set a retriever using the `set_retriever(...)` function.\"\n\n individual_input_ids = generator_input_ids.repeat(\n num_candidates, 1\n ) # (num_candidates*n_docs, max_len)\n\n individual_attention_mask = context_attention_mask[index * n_docs : (index + 1) * n_docs]\n individual_attention_mask = individual_attention_mask.repeat(num_candidates, 1)\n\n individual_doc_scores = doc_scores[index : (index + 1), :] # doc_scores.shape = [batch, n_docs]\n individual_doc_scores = individual_doc_scores.repeat(num_candidates, 1) # [num_candidates, n_docs]\n\n outputs = self(\n context_input_ids=individual_input_ids,\n context_attention_mask=individual_attention_mask,\n doc_scores=individual_doc_scores,\n labels=output_sequences,\n exclude_bos_score=True,\n )\n\n top_cand_inds = (-outputs[\"loss\"]).topk(num_doc_return_sequences)[1]\n\n # add hypothesis\n hypos.append(output_sequences[top_cand_inds])\n\n return self._cat_and_pad(hypos, pad_token_id=self.config.generator.pad_token_id)\n\n def get_nll(\n self, seq_logits, doc_scores, target, reduce_loss=False, epsilon=0.0, exclude_bos_score=False, n_docs=None\n ):\n # shift tokens left\n target = torch.cat(\n [target[:, 1:], target.new(target.shape[0], 1).fill_(self.config.generator.pad_token_id)], 1\n )\n\n n_docs = n_docs if n_docs is not None else self.config.n_docs\n\n # bos_token_id is None for T5\n bos_token_id = self.config.bos_token_id or self.config.generator.bos_token_id\n use_bos = bos_token_id is not None and target[:, 0].eq(bos_token_id).all()\n\n def _mask_pads(ll, smooth_obj):\n pad_mask = target.eq(self.config.generator.pad_token_id)\n if pad_mask.any():\n ll.masked_fill_(pad_mask, 0.0)\n smooth_obj.masked_fill_(pad_mask, 0.0)\n return ll.squeeze(-1), smooth_obj.squeeze(-1)\n\n # seq_logits dim = (batch*n_docs, tgt_len , #vocabs)\n seq_logprobs = torch.nn.functional.log_softmax(seq_logits, dim=-1).view(\n seq_logits.shape[0] // n_docs, n_docs, -1, seq_logits.size(-1)\n ) # batch_size x n_docs x tgt_len x #vocab_size\n doc_logprobs = torch.nn.functional.log_softmax(doc_scores, dim=1).unsqueeze(-1).unsqueeze(-1)\n\n # RAG-sequence marginalization\n first_token_scores = seq_logprobs[:, :, :1, :]\n second_token_scores = seq_logprobs[:, :, 1:2, :]\n remainder = seq_logprobs[:, :, 2:, :]\n rag_logprobs = torch.cat([first_token_scores, second_token_scores + doc_logprobs, remainder], dim=2)\n\n # calculate loss\n target = target.unsqueeze(1).unsqueeze(-1).repeat(1, n_docs, 1, 1)\n assert target.dim() == rag_logprobs.dim()\n\n ll = rag_logprobs.gather(dim=-1, index=target)\n smooth_obj = rag_logprobs.sum(dim=-1, keepdim=True) # total sum of all (normalised) logits\n\n ll, smooth_obj = _mask_pads(ll, smooth_obj)\n\n # sum over tokens, exclude bos while scoring\n ll = ll[:, :, 1:].sum(2) if exclude_bos_score and use_bos else ll.sum(2)\n smooth_obj = smooth_obj.sum(2)\n ll = ll.logsumexp(1) # logsumexp over docs\n smooth_obj = smooth_obj.logsumexp(1)\n\n nll_loss = -ll\n smooth_loss = -smooth_obj\n\n if reduce_loss:\n nll_loss = nll_loss.sum()\n smooth_loss = smooth_loss.sum()\n\n eps_i = epsilon / rag_logprobs.size(-1)\n loss = (1.0 - epsilon) * nll_loss + eps_i * smooth_loss\n return loss\n\n @staticmethod\n def _cat_and_pad(tensors, pad_token_id):\n output = (\n tensors[0].new(sum([t.shape[0] for t in tensors]), max([t.shape[1] for t in tensors])).fill_(pad_token_id)\n )\n ind = 0\n for t in tensors:\n output[ind : ind + t.shape[0], : t.shape[1]] = t\n ind += t.shape[0]\n return output\n\n\n@add_start_docstrings_to_model_forward(\n \"\"\"\n A RAG-token model implementation. It performs RAG-token specific marginalization in the forward pass.\n \"\"\",\n RAG_START_DOCSTRING,\n)\nclass RagTokenForGeneration(RagPreTrainedModel):\n def __init__(\n self,\n config: Optional[PretrainedConfig] = None,\n question_encoder: Optional[PreTrainedModel] = None,\n generator: Optional[PreTrainedModel] = None,\n retriever: Optional = None,\n **kwargs,\n ):\n assert config is not None or (\n question_encoder is not None and generator is not None\n ), \"Either a configuration or an encoder and a generator has to be provided.\"\n\n if config is None:\n config = RagConfig.from_question_encoder_generator_configs(\n question_encoder.config, generator.config, **kwargs\n )\n\n super().__init__(config)\n\n # instantiate model\n self.rag = RagModel(config=config, question_encoder=question_encoder, generator=generator, retriever=retriever)\n\n def set_retriever(self, retriever: RagRetriever):\n self.rag.retriever = retriever\n\n def prepare_inputs_for_generation(\n self,\n decoder_input_ids,\n past=None,\n attention_mask=None,\n use_cache=None,\n encoder_outputs=None,\n doc_scores=None,\n n_docs=None,\n **kwargs\n ):\n if past is not None:\n # if past is defined use only last decoder_input_ids\n decoder_input_ids = decoder_input_ids[:, -1:]\n\n return {\n \"input_ids\": None,\n \"encoder_outputs\": encoder_outputs,\n \"doc_scores\": doc_scores,\n \"context_attention_mask\": attention_mask,\n \"decoder_input_ids\": decoder_input_ids,\n \"past_key_values\": past,\n \"use_cache\": use_cache,\n \"do_marginalize\": True,\n \"n_docs\": n_docs,\n }\n\n @property\n def retriever(self):\n return self.rag.retriever\n\n @property\n def generator(self):\n return self.rag.generator\n\n @property\n def question_encoder(self):\n return self.rag.question_encoder\n\n @staticmethod\n def _reorder_cache(past, beam_idx):\n \"\"\"Reorders cache for generation. BART-inspired but we need to take care of the extra dimension for docs\"\"\"\n\n def _reorder_stacked(hidden_states, new_order):\n n_docs = hidden_states.shape[0] // new_order.shape[0]\n hidden_states = hidden_states.view(-1, n_docs, *hidden_states.shape[1:])\n hidden_states = hidden_states.index_select(0, new_order)\n result = hidden_states.view(-1, *hidden_states.shape[2:])\n return result\n\n reordered_past = ()\n for layer_past in past:\n # get the correct batch idx from decoder layer's batch dim for cross and self-attn\n reordered_past += (tuple(_reorder_stacked(past_state, beam_idx) for past_state in layer_past),)\n\n return reordered_past\n\n def marginalize(self, seq_logits, doc_scores, n_docs=None):\n\n n_docs = n_docs if n_docs is not None else self.config.n_docs\n\n # RAG-token marginalization\n seq_logprobs = torch.nn.functional.log_softmax(seq_logits, dim=-1).view(\n seq_logits.shape[0] // n_docs, n_docs, -1, seq_logits.size(-1)\n )\n doc_logprobs = torch.log_softmax(doc_scores, dim=1)\n log_prob_sum = seq_logprobs + doc_logprobs.unsqueeze(-1).unsqueeze(-1)\n return torch.logsumexp(log_prob_sum, dim=1)\n\n @add_start_docstrings_to_model_forward(RAG_FORWARD_INPUTS_DOCSTRING)\n @replace_return_docstrings(output_type=RetrievAugLMMarginOutput, config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n encoder_outputs=None,\n decoder_input_ids=None,\n decoder_attention_mask=None,\n past_key_values=None,\n context_input_ids=None,\n context_attention_mask=None,\n doc_scores=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n output_retrieved=None,\n do_marginalize=None,\n reduce_loss=None,\n labels=None,\n n_docs=None,\n **kwargs # needs kwargs for generation\n ):\n r\"\"\"\n do_marginalize (:obj:`bool`, `optional`):\n If :obj:`True`, the logits are marginalized over all documents by making use of\n ``torch.nn.functional.log_softmax``.\n reduce_loss (:obj:`bool`, `optional`):\n Only relevant if ``labels`` is passed. If :obj:`True`, the NLL loss is reduced using the\n ``torch.Tensor.sum`` operation.\n kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`):\n Legacy dictionary, which is required so that model can use `generate()` function.\n\n Returns:\n\n Example::\n\n >>> from transformers import RagTokenizer, RagRetriever, RagTokenForGeneration\n >>> import torch\n\n >>> tokenizer = RagTokenizer.from_pretrained(\"facebook/rag-token-nq\")\n >>> retriever = RagRetriever.from_pretrained(\"facebook/rag-token-nq\", index_name=\"exact\", use_dummy_dataset=True)\n >>> # initialize with RagRetriever to do everything in one forward call\n >>> model = RagTokenForGeneration.from_pretrained(\"facebook/rag-token-nq\", retriever=retriever)\n\n >>> inputs = tokenizer(\"How many people live in Paris?\", return_tensors=\"pt\")\n >>> with tokenizer.as_target_tokenizer():\n ... targets = tokenizer(\"In Paris, there are 10 million people.\", return_tensors=\"pt\")\n >>> input_ids = inputs[\"input_ids\"]\n >>> labels = targets[\"input_ids\"]\n >>> outputs = model(input_ids=input_ids, labels=labels)\n\n >>> # or use retriever separately\n >>> model = RagTokenForGeneration.from_pretrained(\"facebook/rag-token-nq\", use_dummy_dataset=True)\n >>> # 1. Encode\n >>> question_hidden_states = model.question_encoder(input_ids)[0]\n >>> # 2. Retrieve\n >>> docs_dict = retriever(input_ids.numpy(), question_hidden_states.detach().numpy(), return_tensors=\"pt\")\n >>> doc_scores = torch.bmm(question_hidden_states.unsqueeze(1), docs_dict[\"retrieved_doc_embeds\"].float().transpose(1, 2)).squeeze(1)\n >>> # 3. Forward to generator\n >>> outputs = model(context_input_ids=docs_dict[\"context_input_ids\"], context_attention_mask=docs_dict[\"context_attention_mask\"], doc_scores=doc_scores, decoder_input_ids=labels)\n\n >>> # or directly generate\n >>> generated = model.generate(context_input_ids=docs_dict[\"context_input_ids\"], context_attention_mask=docs_dict[\"context_attention_mask\"], doc_scores=doc_scores)\n >>> generated_string = tokenizer.batch_decode(generated, skip_special_tokens=True)\n \"\"\"\n n_docs = n_docs if n_docs is not None else self.config.n_docs\n do_marginalize = do_marginalize if do_marginalize is not None else self.config.do_marginalize\n reduce_loss = reduce_loss if reduce_loss is not None else self.config.reduce_loss\n\n if labels is not None:\n if decoder_input_ids is None:\n decoder_input_ids = labels\n use_cache = False\n\n outputs = self.rag(\n input_ids=input_ids,\n attention_mask=attention_mask,\n encoder_outputs=encoder_outputs,\n decoder_input_ids=decoder_input_ids,\n decoder_attention_mask=decoder_attention_mask,\n context_input_ids=context_input_ids,\n context_attention_mask=context_attention_mask,\n doc_scores=doc_scores,\n past_key_values=past_key_values,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n output_retrieved=output_retrieved,\n n_docs=n_docs,\n )\n\n loss = None\n logits = outputs.logits\n if labels is not None:\n assert decoder_input_ids is not None\n loss = self.get_nll(\n outputs.logits,\n outputs.doc_scores,\n labels,\n reduce_loss=reduce_loss,\n epsilon=self.config.label_smoothing,\n n_docs=n_docs,\n )\n\n if do_marginalize:\n logits = self.marginalize(logits, outputs.doc_scores, n_docs)\n\n return RetrievAugLMMarginOutput(\n loss=loss,\n logits=logits,\n doc_scores=outputs.doc_scores,\n past_key_values=outputs.past_key_values,\n context_input_ids=outputs.context_input_ids,\n context_attention_mask=outputs.context_attention_mask,\n retrieved_doc_embeds=outputs.retrieved_doc_embeds,\n retrieved_doc_ids=outputs.retrieved_doc_ids,\n question_encoder_last_hidden_state=outputs.question_encoder_last_hidden_state,\n question_enc_hidden_states=outputs.question_enc_hidden_states,\n question_enc_attentions=outputs.question_enc_attentions,\n generator_enc_last_hidden_state=outputs.generator_enc_last_hidden_state,\n generator_enc_hidden_states=outputs.generator_enc_hidden_states,\n generator_enc_attentions=outputs.generator_enc_attentions,\n generator_dec_hidden_states=outputs.generator_dec_hidden_states,\n generator_dec_attentions=outputs.generator_dec_attentions,\n generator_cross_attentions=outputs.generator_cross_attentions,\n )\n\n @torch.no_grad()\n def generate(\n self,\n input_ids: Optional[torch.LongTensor] = None,\n attention_mask: Optional[torch.LongTensor] = None,\n context_input_ids=None,\n context_attention_mask=None,\n doc_scores=None,\n max_length=None,\n min_length=None,\n early_stopping=None,\n use_cache=None,\n num_beams=None,\n num_beam_groups=None,\n diversity_penalty=None,\n bos_token_id=None,\n pad_token_id=None,\n eos_token_id=None,\n length_penalty=None,\n no_repeat_ngram_size=None,\n encoder_no_repeat_ngram_size=None,\n repetition_penalty=None,\n bad_words_ids=None,\n num_return_sequences=None,\n decoder_start_token_id=None,\n n_docs=None,\n prefix_allowed_tokens_fn: Callable[[int, torch.Tensor], List[int]] = None,\n forced_bos_token_id: Optional[int] = None,\n forced_eos_token_id: Optional[int] = None,\n remove_invalid_values: Optional[bool] = None,\n **model_kwargs\n ):\n \"\"\"\n Implements RAG token decoding.\n\n Args:\n input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n The sequence used as a prompt for the generation. If :obj:`input_ids` is not passed, then\n :obj:`context_input_ids` has to be provided.\n attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n context_input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size * config.n_docs, config.max_combined_length)`, `optional`, returned when `output_retrieved=True`):\n Input IDs post-processed from the retrieved documents and the question encoder :obj:`input_ids` by the\n retriever.\n\n If the model has is not initialized with a ``retriever``, :obj:`context_input_ids` has to be provided\n to the forward pass. :obj:`context_input_ids` are returned by\n :meth:`~transformers.RagRetriever.__call__`.\n context_attention_mask (:obj:`torch.LongTensor` of shape :obj:`(batch_size * config.n_docs, config.max_combined_length)`, `optional`, returned when `output_retrieved=True`):\n Attention mask post-processed from the retrieved documents and the question encoder :obj:`input_ids` by\n the retriever.\n\n If the model has is not initialized with a ``retriever``, :obj:`context_input_ids` has to be provided\n to the forward pass. :obj:`context_input_ids` are returned by\n :meth:`~transformers.RagRetriever.__call__`.\n doc_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.n_docs)`):\n Score between each retrieved document embeddings (see :obj:`retrieved_doc_embeds`) and\n :obj:`question_encoder_last_hidden_state`.\n\n If the model has is not initialized with a ``retriever``, :obj:`context_input_ids` has to be provided\n to the forward pass. :obj:`context_input_ids` are returned by\n :meth:`~transformers.RagRetriever.__call__`.\n max_length (:obj:`int`, `optional`, defaults to 20):\n The maximum length of the sequence to be generated.\n min_length (:obj:`int`, `optional`, defaults to 10):\n The minimum length of the sequence to be generated.\n early_stopping (:obj:`bool`, `optional`, defaults to :obj:`False`):\n Whether or not to stop the beam search when at least ``num_beams`` sentences are finished per batch or\n not.\n use_cache: (:obj:`bool`, `optional`, defaults to :obj:`True`):\n Whether or not the model should use the past last key/values attentions (if applicable to the model) to\n speed up decoding.\n pad_token_id (:obj:`int`, `optional`):\n The id of the `padding` token.\n bos_token_id (:obj:`int`, `optional`):\n The id of the `beginning-of-sequence` token.\n eos_token_id (:obj:`int`, `optional`):\n The id of the `end-of-sequence` token.\n length_penalty (:obj:`float`, `optional`, defaults to 1.0):\n Exponential penalty to the length. 1.0 means no penalty.\n\n Set to values < 1.0 in order to encourage the model to generate shorter sequences, to a value > 1.0 in\n order to encourage the model to produce longer sequences.\n no_repeat_ngram_size (:obj:`int`, `optional`, defaults to 0):\n If set to int > 0, all ngrams of that size can only occur once.\n encoder_no_repeat_ngram_size (:obj:`int`, `optional`, defaults to 0):\n If set to int > 0, all ngrams of that size that occur in the ``encoder_input_ids`` cannot occur in the\n ``decoder_input_ids``.\n bad_words_ids(:obj:`List[int]`, `optional`):\n List of token ids that are not allowed to be generated. In order to get the tokens of the words that\n should not appear in the generated text, use :obj:`tokenizer.encode(bad_word, add_prefix_space=True)`.\n num_beams (:obj:`int`, `optional`, defaults to 1):\n Number of beams for beam search. 1 means no beam search.\n num_beam_groups (:obj:`int`, `optional`, defaults to 1):\n Number of groups to divide :obj:`num_beams` into in order to ensure diversity among different groups of\n beams. `this paper <https://arxiv.org/pdf/1610.02424.pdf>`__ for more details.\n diversity_penalty (:obj:`float`, `optional`, defaults to 0.0):\n This value is subtracted from a beam's score if it generates a token same as any beam from other group\n at a particular time. Note that :obj:`diversity_penalty` is only effective if ``group beam search`` is\n enabled.\n num_return_sequences(:obj:`int`, `optional`, defaults to 1):\n The number of independently computed returned sequences for each element in the batch. Note that this\n is not the value we pass to the ``generator``'s `:func:`~transformers.PreTrainedModel.generate`\n function, where we set ``num_return_sequences`` to :obj:`num_beams`.\n decoder_start_token_id (:obj:`int`, `optional`):\n If an encoder-decoder model starts decoding with a different token than `bos`, the id of that token.\n n_docs (:obj:`int`, `optional`, defaults to :obj:`config.n_docs`)\n Number of documents to retrieve and/or number of documents for which to generate an answer.\n prefix_allowed_tokens_fn: (:obj:`Callable[[int, torch.Tensor], List[int]]`, `optional`):\n If provided, this function constraints the beam search to allowed tokens only at each step. If not\n provided no constraint is applied. This function takes 2 arguments :obj:`inputs_ids` and the batch ID\n :obj:`batch_id`. It has to return a list with the allowed tokens for the next generation step\n conditioned on the previously generated tokens :obj:`inputs_ids` and the batch ID :obj:`batch_id`. This\n argument is useful for constrained generation conditioned on the prefix, as described in\n `Autoregressive Entity Retrieval <https://arxiv.org/abs/2010.00904>`__.\n forced_bos_token_id (:obj:`int`, `optional`):\n The id of the token to force as the first generated token after the :obj:`decoder_start_token_id`.\n Useful for multilingual models like :doc:`mBART <../model_doc/mbart>` where the first generated token\n needs to be the target language token.\n forced_eos_token_id (:obj:`int`, `optional`):\n The id of the token to force as the last generated token when :obj:`max_length` is reached.\n remove_invalid_values (:obj:`bool`, `optional`):\n Whether to remove possible `nan` and `inf` outputs of the model to prevent the generation method to\n crash. Note that using ``remove_invalid_values`` can slow down generation.\n\n Return:\n :obj:`torch.LongTensor` of shape :obj:`(batch_size * num_return_sequences, sequence_length)`: The generated\n sequences. The second dimension (sequence_length) is either equal to :obj:`max_length` or shorter if all\n batches finished early due to the :obj:`eos_token_id`.\n \"\"\"\n # set default parameters\n n_docs = n_docs if n_docs is not None else self.config.n_docs\n num_beams = num_beams if num_beams is not None else self.config.num_beams\n num_beam_groups = num_beam_groups if num_beam_groups is not None else self.config.num_beam_groups\n max_length = max_length if max_length is not None else self.config.max_length\n num_return_sequences = (\n num_return_sequences if num_return_sequences is not None else self.config.num_return_sequences\n )\n bos_token_id = bos_token_id if bos_token_id is not None else self.config.generator.bos_token_id\n eos_token_id = eos_token_id if eos_token_id is not None else self.config.generator.eos_token_id\n pad_token_id = pad_token_id if pad_token_id is not None else self.config.generator.pad_token_id\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n decoder_start_token_id = (\n decoder_start_token_id\n if decoder_start_token_id is not None\n else self.config.generator.decoder_start_token_id\n )\n remove_invalid_values = (\n remove_invalid_values if remove_invalid_values is not None else self.config.remove_invalid_values\n )\n\n # retrieve docs\n if self.retriever is not None and context_input_ids is None:\n question_hidden_states = self.question_encoder(input_ids, attention_mask=attention_mask)[0]\n out = self.retriever(\n input_ids,\n question_hidden_states.cpu().detach().to(torch.float32).numpy(),\n prefix=self.generator.config.prefix,\n n_docs=n_docs,\n return_tensors=\"pt\",\n )\n context_input_ids, context_attention_mask, retrieved_doc_embeds = (\n out[\"context_input_ids\"],\n out[\"context_attention_mask\"],\n out[\"retrieved_doc_embeds\"],\n )\n\n # set to correct device\n retrieved_doc_embeds = retrieved_doc_embeds.to(question_hidden_states)\n context_input_ids = context_input_ids.to(input_ids)\n context_attention_mask = context_attention_mask.to(input_ids)\n\n # compute doc_scores\n doc_scores = torch.bmm(question_hidden_states.unsqueeze(1), retrieved_doc_embeds.transpose(1, 2)).squeeze(\n 1\n )\n\n assert (\n context_input_ids.shape[0] % n_docs\n ) == 0, f\" The first dimension of `context_input_ids` should be a multiple of `n_docs`={n_docs}, but is {context_input_ids.shape[0]}.\"\n\n # batch_size\n batch_size = context_input_ids.shape[0] // n_docs\n\n encoder = self.rag.generator.get_encoder()\n encoder_outputs = encoder(input_ids=context_input_ids, attention_mask=context_attention_mask, return_dict=True)\n\n input_ids = torch.full(\n (batch_size * num_beams, 1),\n decoder_start_token_id,\n dtype=torch.long,\n device=next(self.parameters()).device,\n )\n last_hidden_state = encoder_outputs[\"last_hidden_state\"]\n\n def extend_enc_output(tensor, num_beams=None):\n # split into `batch_size`, `num_beams`, `num_docs`\n tensor = tensor[None, None, :].reshape((batch_size, 1, n_docs) + tensor.shape[1:])\n # repeat same last hidden states over `num_beams` dimension\n tensor = tensor.expand((batch_size, num_beams, n_docs) + tensor.shape[3:])\n # merge `batch_size`, `num_beams`, `num_docs` dims again\n return tensor.reshape((batch_size * num_beams * n_docs,) + tensor.shape[3:])\n\n # correctly extend last_hidden_state and attention mask\n context_attention_mask = extend_enc_output(context_attention_mask, num_beams=num_beams)\n encoder_outputs[\"last_hidden_state\"] = extend_enc_output(last_hidden_state, num_beams=num_beams)\n\n doc_scores = doc_scores.repeat_interleave(num_beams, dim=0)\n\n # define start_len & additional parameters\n model_kwargs[\"doc_scores\"] = doc_scores\n model_kwargs[\"encoder_outputs\"] = encoder_outputs\n model_kwargs[\"attention_mask\"] = context_attention_mask\n model_kwargs[\"n_docs\"] = n_docs\n\n pre_processor = self._get_logits_processor(\n repetition_penalty=repetition_penalty,\n no_repeat_ngram_size=no_repeat_ngram_size,\n encoder_no_repeat_ngram_size=encoder_no_repeat_ngram_size,\n encoder_input_ids=context_input_ids,\n bad_words_ids=bad_words_ids,\n min_length=min_length,\n max_length=max_length,\n eos_token_id=eos_token_id,\n forced_bos_token_id=forced_bos_token_id,\n forced_eos_token_id=forced_eos_token_id,\n prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,\n num_beams=num_beams,\n num_beam_groups=num_beam_groups,\n diversity_penalty=diversity_penalty,\n remove_invalid_values=remove_invalid_values,\n )\n\n if num_beams == 1:\n if num_return_sequences > 1:\n raise ValueError(\n f\"num_return_sequences has to be 1, but is {num_return_sequences} when doing greedy search.\"\n )\n return self.greedy_search(\n input_ids,\n logits_processor=pre_processor,\n max_length=max_length,\n pad_token_id=pad_token_id,\n eos_token_id=eos_token_id,\n **model_kwargs,\n )\n elif num_beams > 1:\n length_penalty = length_penalty if length_penalty is not None else self.config.length_penalty\n early_stopping = early_stopping if early_stopping is not None else self.config.early_stopping\n if num_return_sequences > num_beams:\n raise ValueError(\"`num_return_sequences` has to be smaller or equal to `num_beams`.\")\n beam_scorer = BeamSearchScorer(\n batch_size=batch_size,\n max_length=max_length,\n num_beams=num_beams,\n device=self.device,\n length_penalty=length_penalty,\n do_early_stopping=early_stopping,\n num_beam_hyps_to_keep=num_return_sequences,\n )\n return self.beam_search(\n input_ids,\n beam_scorer,\n logits_processor=pre_processor,\n max_length=max_length,\n pad_token_id=pad_token_id,\n eos_token_id=eos_token_id,\n **model_kwargs,\n )\n else:\n raise ValueError(f\"`num_beams` has to be an integer strictly superior to 0 (≥ 1), but is {num_beams}\")\n\n def get_input_embeddings(self):\n return self.rag.generator.get_input_embeddings()\n\n def get_output_embeddings(self):\n return self.rag.generator.get_output_embeddings()\n\n def set_output_embeddings(self, new_embeddings):\n return self.rag.generator.set_output_embeddings(new_embeddings)\n\n def shift_tokens_right(self, input_ids, start_token_id=None):\n \"\"\"Shift input ids one token to the right, and pad with start_token_id\"\"\"\n if start_token_id is None:\n start_token_id = self.config.decoder_start_token_id\n shifted_input_ids = input_ids.new_zeros(input_ids.shape)\n shifted_input_ids[:, 1:] = input_ids[:, :-1].clone()\n shifted_input_ids[:, 0] = start_token_id\n return shifted_input_ids\n\n def get_nll(self, seq_logits, doc_scores, target, reduce_loss=False, epsilon=0.0, n_docs=None):\n n_docs = n_docs if n_docs is not None else self.config.n_docs\n # shift tokens left\n target = torch.cat(\n [target[:, 1:], target.new(target.shape[0], 1).fill_(self.config.generator.pad_token_id)], 1\n )\n\n def _mask_pads(ll, smooth_obj):\n pad_mask = target.eq(self.config.generator.pad_token_id)\n if pad_mask.any():\n ll.masked_fill_(pad_mask, 0.0)\n smooth_obj.masked_fill_(pad_mask, 0.0)\n return ll.squeeze(-1), smooth_obj.squeeze(-1)\n\n rag_logprobs = self.marginalize(seq_logits, doc_scores, n_docs)\n\n target = target.unsqueeze(-1)\n assert target.dim() == rag_logprobs.dim()\n\n ll = rag_logprobs.gather(dim=-1, index=target)\n smooth_obj = rag_logprobs.sum(dim=-1, keepdim=True) # total sum of all (normalised) logits\n ll, smooth_obj = _mask_pads(ll, smooth_obj)\n ll = ll.sum(1) # sum over tokens\n smooth_obj = smooth_obj.sum(1)\n\n nll_loss = -ll\n smooth_loss = -smooth_obj\n\n if reduce_loss:\n nll_loss = nll_loss.sum()\n smooth_loss = smooth_loss.sum()\n\n eps_i = epsilon / rag_logprobs.size(-1)\n loss = (1.0 - epsilon) * nll_loss + eps_i * smooth_loss\n return loss\n" ]
[ [ "torch.nn.Softmax", "torch.nn.Dropout", "torch.nn.CrossEntropyLoss", "torch.ones", "torch.zeros", "torch.cat", "torch.einsum", "torch.nn.Embedding", "torch.nn.LayerNorm", "torch.nn.Tanh", "torch.nn.Linear", "torch.matmul", "torch.tanh", "torch.tensor", "torch.arange", "torch.cumsum", "torch.nn.MSELoss" ], [ "torch.nn.functional.log_softmax", "torch.cat", "torch.log_softmax", "torch.no_grad", "torch.logsumexp" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
tum-ei-eda/transform_allocs_static
[ "9639eb5178e8135086511f5f1759b2a996c0236b" ]
[ "python/offseter.py" ]
[ "#!/home/osm/Documents/EI-EDA/ml_on_mcu/venv/bin/python3.7 \n#Change the venv path above.\n\n# Add offset to a given datasheet.\n# Two methods : \n# - Give offset for every ID (every malloc call) --> MORE Memory\n# - Give offset for every pointer (every container) -->Less Memory.\n# Datasheet format:\n# ID2.0|Size|firstAlloc|lastFree|Pointer\n\nimport argparse\nimport pandas as pd\nimport numpy as np\n\n# Import the datasheet\ndef import_sheet(path):\n df = pd.read_excel(path)\n df.columns = df.columns.str.replace(\" \", \"\") # Clean the datasheet\n df = df.set_index(\"ID2.0\") \n return df \n# Save the datasheet\ndef save_sheet(name,df):\n df.to_excel(name[:len(name)-5]+\"_updated.xlsx\")\n\n#Output offset array on terminal.\ndef offseter(df):\n #reference adding base pointer\n cell_ref= df['Pointer'].min()\n df['Offset_1'] = 0 #first method\n df['Offset_2'] = 0 #second method\n\n #Use buffer for every malloc call with appropriate size.\n global_size= 0 \n for index, row in df.iterrows():\n df.loc[index,'Offset_1']= global_size\n global_size+=row['Size']\n output_string =''\n for off in df['Offset_1']:\n output_string+=str(off)+\",\"\n\n #pretty print for hook\n print('=============================OFFSET_1==================================')\n print(output_string[:len(output_string)-1])\n print(\"Global Size required: \",global_size)\n print('=============================OFFSET_2==================================')\n\n #Use buffer for each unique pointer with appropriate size.\n max_mem = 0 \n for i in df['Pointer'].unique():\n df_current = df[df['Pointer'] == i]\n df.loc[(df.Pointer == i) , 'Offset_2'] = max_mem\n max_mem += df_current['Size'].max()\n\n #pretty print for hook \n output_string =''\n for off in df['Offset_2']:\n output_string+=str(off)+\",\"\n print(output_string[:len(output_string)-1])\n print(\"Global Size required: \",max_mem)\n return df\n\ndef main ():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"input_sheet_file\",\n help=\n \"The path to a datasheet containing pointer lifetimes and sizes, make sure that the format respects the following layout: 'ID2.0|Size|firstAlloc|lastFree|Pointer' of types 'int|int|int|int|str'\",\n type=str)\n parser.add_argument(\n \"-s\",\n \"--save_bool\",\n help=\n \"Saves the updated datasheet automatically into /datasheet_updated.xlsx. Defaults to False.\",\n action='store_true')\n args = parser.parse_args()\n df = import_sheet(args.input_sheet_file)\n df = offseter(df)\n if (args.save_bool):\n save_sheet(args.input_sheet_file,df)\n\nif __name__ == \"__main__\":\n main()" ]
[ [ "pandas.read_excel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
yangheng95/LCF-ABSA
[ "0eeb4788269a498d34c2aff942e03af78026617e", "0eeb4788269a498d34c2aff942e03af78026617e", "0eeb4788269a498d34c2aff942e03af78026617e" ]
[ "pyabsa/core/tc/prediction/text_classifier.py", "pyabsa/core/atepc/training/atepc_trainer.py", "pyabsa/core/apc/classic/__glove__/models/asgcn.py" ]
[ "# -*- coding: utf-8 -*-\r\n# file: text_classifier.py\r\n# author: yangheng <[email protected]>\r\n# Copyright (C) 2020. All Rights Reserved.\r\nimport json\r\nimport os\r\nimport pickle\r\nimport random\r\n\r\nimport autocuda\r\nimport numpy\r\nimport torch\r\nimport tqdm\r\nfrom findfile import find_file\r\nfrom termcolor import colored\r\nfrom torch.utils.data import DataLoader\r\nfrom transformers import AutoTokenizer, AutoModel, AutoConfig, DebertaV2ForMaskedLM, RobertaForMaskedLM, BertForMaskedLM\r\n\r\nfrom pyabsa.functional.dataset import detect_infer_dataset\r\n\r\nfrom ..models import GloVeTCModelList, BERTTCModelList\r\nfrom ..classic.__glove__.dataset_utils.data_utils_for_inference import GloVeTCDataset\r\nfrom ..classic.__bert__.dataset_utils.data_utils_for_inference import BERTClassificationDataset, Tokenizer4Pretraining\r\n\r\nfrom ..classic.__glove__.dataset_utils.data_utils_for_training import LABEL_PADDING, build_embedding_matrix, build_tokenizer\r\n\r\nfrom pyabsa.utils.pyabsa_utils import print_args, TransformerConnectionError\r\n\r\n\r\ndef get_mlm_and_tokenizer(text_classifier, config):\r\n if isinstance(text_classifier, TextClassifier):\r\n base_model = text_classifier.model.bert.base_model\r\n else:\r\n base_model = text_classifier.bert.base_model\r\n pretrained_config = AutoConfig.from_pretrained(config.pretrained_bert)\r\n if 'deberta-v3' in config.pretrained_bert:\r\n MLM = DebertaV2ForMaskedLM(pretrained_config)\r\n MLM.deberta = base_model\r\n elif 'roberta' in config.pretrained_bert:\r\n MLM = RobertaForMaskedLM(pretrained_config)\r\n MLM.roberta = base_model\r\n else:\r\n MLM = BertForMaskedLM(pretrained_config)\r\n MLM.bert = base_model\r\n return MLM, AutoTokenizer.from_pretrained(config.pretrained_bert)\r\n\r\n\r\nclass TextClassifier:\r\n def __init__(self, model_arg=None, cal_perplexity=False, eval_batch_size=128):\r\n '''\r\n from_train_model: load inferring_tutorials model from trained model\r\n '''\r\n self.cal_perplexity = cal_perplexity\r\n # load from a training\r\n if not isinstance(model_arg, str):\r\n print('Load text classifier from training')\r\n self.model = model_arg[0]\r\n self.opt = model_arg[1]\r\n self.tokenizer = model_arg[2]\r\n else:\r\n try:\r\n if 'fine-tuned' in model_arg:\r\n raise ValueError('Do not support to directly load a fine-tuned model, please load a .state_dict or .model instead!')\r\n print('Load text classifier from', model_arg)\r\n state_dict_path = find_file(model_arg, key='.state_dict', exclude_key=['__MACOSX'])\r\n model_path = find_file(model_arg, key='.model', exclude_key=['__MACOSX'])\r\n tokenizer_path = find_file(model_arg, key='.tokenizer', exclude_key=['__MACOSX'])\r\n config_path = find_file(model_arg, key='.config', exclude_key=['__MACOSX'])\r\n\r\n print('config: {}'.format(config_path))\r\n print('state_dict: {}'.format(state_dict_path))\r\n print('model: {}'.format(model_path))\r\n print('tokenizer: {}'.format(tokenizer_path))\r\n\r\n with open(config_path, mode='rb') as f:\r\n self.opt = pickle.load(f)\r\n\r\n if state_dict_path or model_path:\r\n if hasattr(BERTTCModelList, self.opt.model.__name__):\r\n if state_dict_path:\r\n self.bert = AutoModel.from_pretrained(self.opt.pretrained_bert)\r\n self.model = self.opt.model(self.bert, self.opt)\r\n self.model.load_state_dict(torch.load(state_dict_path, map_location='cpu'))\r\n elif model_path:\r\n self.model = torch.load(model_path, map_location='cpu')\r\n\r\n try:\r\n self.tokenizer = Tokenizer4Pretraining(max_seq_len=self.opt.max_seq_len, opt=self.opt)\r\n except ValueError:\r\n if tokenizer_path:\r\n with open(tokenizer_path, mode='rb') as f:\r\n self.tokenizer = pickle.load(f)\r\n else:\r\n raise TransformerConnectionError()\r\n else:\r\n tokenizer = build_tokenizer(\r\n dataset_list=self.opt.dataset_file,\r\n max_seq_len=self.opt.max_seq_len,\r\n dat_fname='{0}_tokenizer.dat'.format(os.path.basename(self.opt.dataset_name)),\r\n opt=self.opt\r\n )\r\n if model_path:\r\n self.model = torch.load(model_path, map_location='cpu')\r\n else:\r\n embedding_matrix = build_embedding_matrix(\r\n word2idx=tokenizer.word2idx,\r\n embed_dim=self.opt.embed_dim,\r\n dat_fname='{0}_{1}_embedding_matrix.dat'.format(str(self.opt.embed_dim), os.path.basename(self.opt.dataset_name)),\r\n opt=self.opt\r\n )\r\n self.model = self.opt.model(embedding_matrix, self.opt).to(self.opt.device)\r\n self.model.load_state_dict(torch.load(state_dict_path, map_location='cpu'))\r\n\r\n self.tokenizer = tokenizer\r\n\r\n print('Config used in Training:')\r\n print_args(self.opt, mode=1)\r\n\r\n except Exception as e:\r\n raise RuntimeError('Exception: {} Fail to load the model from {}! '.format(e, model_arg))\r\n\r\n if not hasattr(GloVeTCModelList, self.opt.model.__name__) \\\r\n and not hasattr(BERTTCModelList, self.opt.model.__name__):\r\n raise KeyError('The checkpoint you are loading is not from classifier model.')\r\n\r\n if hasattr(BERTTCModelList, self.opt.model.__name__):\r\n self.dataset = BERTClassificationDataset(tokenizer=self.tokenizer, opt=self.opt)\r\n\r\n elif hasattr(GloVeTCModelList, self.opt.model.__name__):\r\n self.dataset = GloVeTCDataset(tokenizer=self.tokenizer, opt=self.opt)\r\n\r\n self.infer_dataloader = None\r\n self.opt.eval_batch_size = eval_batch_size\r\n\r\n if self.opt.seed is not None:\r\n random.seed(self.opt.seed)\r\n numpy.random.seed(self.opt.seed)\r\n torch.manual_seed(self.opt.seed)\r\n torch.cuda.manual_seed(self.opt.seed)\r\n torch.backends.cudnn.deterministic = True\r\n torch.backends.cudnn.benchmark = False\r\n\r\n self.opt.initializer = self.opt.initializer\r\n\r\n if cal_perplexity:\r\n try:\r\n self.MLM, self.MLM_tokenizer = get_mlm_and_tokenizer(self, self.opt)\r\n except Exception as e:\r\n self.MLM, self.MLM_tokenizer = None, None\r\n\r\n def to(self, device=None):\r\n self.opt.device = device\r\n self.model.to(device)\r\n if hasattr(self, 'MLM'):\r\n self.MLM.to(self.opt.device)\r\n\r\n def cpu(self):\r\n self.opt.device = 'cpu'\r\n self.model.to('cpu')\r\n if hasattr(self, 'MLM'):\r\n self.MLM.to('cpu')\r\n\r\n def cuda(self, device='cuda:0'):\r\n self.opt.device = device\r\n self.model.to(device)\r\n if hasattr(self, 'MLM'):\r\n self.MLM.to(device)\r\n\r\n def _log_write_args(self):\r\n n_trainable_params, n_nontrainable_params = 0, 0\r\n for p in self.model.parameters():\r\n n_params = torch.prod(torch.tensor(p.shape))\r\n if p.requires_grad:\r\n n_trainable_params += n_params\r\n else:\r\n n_nontrainable_params += n_params\r\n print(\r\n 'n_trainable_params: {0}, n_nontrainable_params: {1}'.format(n_trainable_params, n_nontrainable_params))\r\n for arg in vars(self.opt):\r\n if getattr(self.opt, arg) is not None:\r\n print('>>> {0}: {1}'.format(arg, getattr(self.opt, arg)))\r\n\r\n def batch_infer(self,\r\n target_file=None,\r\n print_result=True,\r\n save_result=False,\r\n clear_input_samples=True,\r\n ignore_error=True):\r\n\r\n if clear_input_samples:\r\n self.clear_input_samples()\r\n\r\n save_path = os.path.join(os.getcwd(), 'text_classification.result.json')\r\n\r\n target_file = detect_infer_dataset(target_file, task='text_classification')\r\n if not target_file:\r\n raise FileNotFoundError('Can not find inference datasets!')\r\n\r\n self.dataset.prepare_infer_dataset(target_file, ignore_error=ignore_error)\r\n self.infer_dataloader = DataLoader(dataset=self.dataset, batch_size=self.opt.eval_batch_size, pin_memory=True, shuffle=False)\r\n return self._infer(save_path=save_path if save_result else None, print_result=print_result)\r\n\r\n def infer(self, text: str = None,\r\n print_result=True,\r\n ignore_error=True,\r\n clear_input_samples=True):\r\n\r\n if clear_input_samples:\r\n self.clear_input_samples()\r\n if text:\r\n self.dataset.prepare_infer_sample(text, ignore_error=ignore_error)\r\n else:\r\n raise RuntimeError('Please specify your datasets path!')\r\n self.infer_dataloader = DataLoader(dataset=self.dataset, batch_size=self.opt.eval_batch_size, shuffle=False)\r\n return self._infer(print_result=print_result)[0]\r\n\r\n def _infer(self, save_path=None, print_result=True):\r\n\r\n _params = filter(lambda p: p.requires_grad, self.model.parameters())\r\n\r\n correct = {True: 'Correct', False: 'Wrong'}\r\n results = []\r\n perplexity = 'N.A.'\r\n with torch.no_grad():\r\n self.model.eval()\r\n n_correct = 0\r\n n_labeled = 0\r\n n_total = 0\r\n if len(self.infer_dataloader.dataset) >= 100:\r\n it = tqdm.tqdm(self.infer_dataloader, postfix='inferring...')\r\n else:\r\n it = self.infer_dataloader\r\n for _, sample in enumerate(it):\r\n inputs = [sample[col].to(self.opt.device) for col in self.opt.inputs_cols if col != 'label']\r\n\r\n outputs = self.model(inputs)\r\n sen_logits = outputs\r\n t_probs = torch.softmax(sen_logits, dim=-1)\r\n for i, i_probs in enumerate(t_probs):\r\n if 'index_to_label' in self.opt.args and int(i_probs.argmax(axis=-1)) in self.opt.index_to_label:\r\n sent = self.opt.index_to_label[int(i_probs.argmax(axis=-1))]\r\n if sample['label'][i] != -999:\r\n real_sent = sample['label'][i] if isinstance(sample['label'][i], str) else self.opt.index_to_label.get(int(sample['label'][i]), 'N.A.')\r\n else:\r\n real_sent = 'N.A.'\r\n if real_sent != -999 and real_sent != '-999':\r\n n_labeled += 1\r\n if sent == real_sent:\r\n n_correct += 1\r\n else: # for the former versions before 1.2.0\r\n sent = int(i_probs.argmax(axis=-1))\r\n real_sent = int(sample['label'][i])\r\n\r\n text_raw = sample['text_raw'][i]\r\n\r\n if self.cal_perplexity:\r\n ids = self.MLM_tokenizer(text_raw, return_tensors=\"pt\")\r\n ids['labels'] = ids['input_ids'].clone()\r\n ids = ids.to(self.opt.device)\r\n loss = self.MLM(**ids)['loss']\r\n perplexity = float(torch.exp(loss / ids['input_ids'].size(1)))\r\n else:\r\n perplexity = 'N.A.'\r\n\r\n results.append({\r\n 'text': text_raw,\r\n 'label': sent,\r\n 'confidence': float(max(i_probs)),\r\n 'probs': i_probs.cpu().numpy(),\r\n 'ref_label': real_sent,\r\n 'ref_check': correct[sent == real_sent] if real_sent != '-999' else '',\r\n 'perplexity': perplexity,\r\n })\r\n n_total += 1\r\n\r\n try:\r\n if print_result:\r\n for result in results:\r\n text_printing = result['text']\r\n\r\n if result['ref_label'] != -999:\r\n if result['label'] == result['ref_label']:\r\n text_info = colored(' -> <{}(ref:{} confidence:{})>'.format(result['label'], result['ref_label'], result['confidence']), 'green')\r\n else:\r\n text_info = colored(' -> <{}(ref:{}) confidence:{}>'.format(result['label'], result['ref_label'], result['confidence']), 'red')\r\n else:\r\n text_info = ' -> {}'.format(result['label'])\r\n\r\n text_printing += text_info + colored('<perplexity:{}>'.format(result['perplexity']), 'yellow')\r\n print(text_printing)\r\n if save_path:\r\n with open(save_path, 'w', encoding='utf8') as fout:\r\n json.dump(str(results), fout, ensure_ascii=False)\r\n print('inference result saved in: {}'.format(save_path))\r\n except Exception as e:\r\n print('Can not save result: {}, Exception: {}'.format(text_raw, e))\r\n\r\n if len(results) > 1:\r\n print('Total samples:{}'.format(n_total))\r\n print('Labeled samples:{}'.format(n_labeled))\r\n print('Prediction Accuracy:{}%'.format(100 * n_correct / n_labeled if n_labeled else 'N.A.'))\r\n\r\n return results\r\n\r\n def clear_input_samples(self):\r\n self.dataset.all_data = []\r\n", "# -*- coding: utf-8 -*-\r\n# file: test_train_atepc.py\r\n# time: 2021/5/26 0026\r\n# author: yangheng <[email protected]>\r\n# github: https://github.com/yangheng95\r\n# Copyright (C) 2021. All Rights Reserved.\r\nimport math\r\nimport os\r\nimport pickle\r\nimport random\r\nimport re\r\nimport time\r\nfrom hashlib import sha256\r\n\r\nimport numpy as np\r\nimport torch\r\nimport torch.nn.functional as F\r\nimport tqdm\r\nfrom seqeval.metrics import classification_report\r\nfrom sklearn.metrics import f1_score\r\nfrom torch import cuda\r\nfrom torch.utils.data import (DataLoader, RandomSampler, SequentialSampler, TensorDataset)\r\nfrom transformers import AutoTokenizer, AutoModel\r\n\r\nfrom pyabsa.utils.file_utils import save_model\r\nfrom pyabsa.utils.pyabsa_utils import print_args, resume_from_checkpoint, retry, TransformerConnectionError, init_optimizer\r\nfrom ..dataset_utils.data_utils_for_training import ATEPCProcessor, convert_examples_to_features\r\n\r\nimport pytorch_warmup as warmup\r\n\r\ntry:\r\n import apex.amp as amp\r\n\r\n # assert torch.version.__version__ < '1.10.0'\r\n print('Use FP16 via Apex!')\r\nexcept Exception:\r\n amp = None\r\n\r\n\r\nclass Instructor:\r\n\r\n def __init__(self, opt, logger):\r\n self.warmup_scheduler = None\r\n self.lr_scheduler = None\r\n self.opt = opt\r\n self.logger = logger\r\n\r\n self.train_dataloader = None\r\n self.test_dataloader = None\r\n # if opt.use_bert_spc:\r\n # self.logger.info('Warning: The use_bert_spc is disabled for extracting aspect,'\r\n # ' reset use_bert_spc=False and go on... ')\r\n # opt.use_bert_spc = False\r\n import warnings\r\n warnings.filterwarnings('ignore')\r\n if self.opt.gradient_accumulation_steps < 1:\r\n raise ValueError(\"Invalid gradient_accumulation_steps parameter: {}, should be >= 1\".format(\r\n self.opt.gradient_accumulation_steps))\r\n\r\n self.opt.batch_size = self.opt.batch_size // self.opt.gradient_accumulation_steps\r\n\r\n random.seed(self.opt.seed)\r\n np.random.seed(self.opt.seed)\r\n torch.manual_seed(self.opt.seed)\r\n torch.cuda.manual_seed(self.opt.seed)\r\n\r\n if self.opt.model_path_to_save and not os.path.exists(self.opt.model_path_to_save):\r\n os.makedirs(self.opt.model_path_to_save)\r\n\r\n try:\r\n self.tokenizer = AutoTokenizer.from_pretrained(self.opt.pretrained_bert, do_lower_case='uncased' in self.opt.pretrained_bert)\r\n bert_base_model = AutoModel.from_pretrained(self.opt.pretrained_bert)\r\n self.opt.sep_indices = self.tokenizer.sep_token_id\r\n\r\n except ValueError as e:\r\n print('Init pretrained model failed, exception: {}'.format(e))\r\n raise TransformerConnectionError()\r\n\r\n processor = ATEPCProcessor(self.tokenizer)\r\n\r\n config_str = re.sub(r'<.*?>', '', str(sorted([str(self.opt.args[k]) for k in self.opt.args if k != 'seed'])))\r\n hash_tag = sha256(config_str.encode()).hexdigest()\r\n cache_path = '{}.{}.dataset.{}.cache'.format(self.opt.model_name, self.opt.dataset_name, hash_tag)\r\n\r\n if os.path.exists(cache_path):\r\n print('Loading dataset cache:', cache_path)\r\n with open(cache_path, mode='rb') as f:\r\n if self.opt.dataset_file['test']:\r\n self.train_data, self.test_data, opt = pickle.load(f)\r\n\r\n else:\r\n self.train_data, opt = pickle.load(f)\r\n # reset output dim according to dataset labels\r\n self.opt.polarities_dim = opt.polarities_dim\r\n\r\n else:\r\n self.train_examples = processor.get_train_examples(self.opt.dataset_file['train'], 'train')\r\n train_features = convert_examples_to_features(self.train_examples, self.opt.max_seq_len, self.tokenizer, self.opt)\r\n self.opt.label_list = sorted(list(self.opt.IOB_label_to_index.keys()))\r\n self.opt.num_labels = len(self.opt.label_list) + 1\r\n all_spc_input_ids = torch.tensor([f.input_ids_spc for f in train_features], dtype=torch.long)\r\n all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)\r\n all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)\r\n all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.long)\r\n all_valid_ids = torch.tensor([f.valid_ids for f in train_features], dtype=torch.long)\r\n all_lmask_ids = torch.tensor([f.label_mask for f in train_features], dtype=torch.long)\r\n all_polarities = torch.tensor([f.polarity for f in train_features], dtype=torch.long)\r\n lcf_cdm_vec = torch.tensor([f.lcf_cdm_vec for f in train_features], dtype=torch.float32)\r\n lcf_cdw_vec = torch.tensor([f.lcf_cdw_vec for f in train_features], dtype=torch.float32)\r\n\r\n self.train_data = TensorDataset(all_spc_input_ids, all_segment_ids, all_input_mask, all_label_ids,\r\n all_polarities, all_valid_ids, all_lmask_ids, lcf_cdm_vec, lcf_cdw_vec)\r\n\r\n if self.opt.dataset_file['test']:\r\n self.test_examples = processor.get_test_examples(self.opt.dataset_file['test'], 'test')\r\n test_features = convert_examples_to_features(self.test_examples, self.opt.max_seq_len,\r\n self.tokenizer, self.opt)\r\n all_spc_input_ids = torch.tensor([f.input_ids_spc for f in test_features], dtype=torch.long)\r\n all_input_mask = torch.tensor([f.input_mask for f in test_features], dtype=torch.long)\r\n all_segment_ids = torch.tensor([f.segment_ids for f in test_features], dtype=torch.long)\r\n all_label_ids = torch.tensor([f.label_id for f in test_features], dtype=torch.long)\r\n all_polarities = torch.tensor([f.polarity for f in test_features], dtype=torch.long)\r\n all_valid_ids = torch.tensor([f.valid_ids for f in test_features], dtype=torch.long)\r\n all_lmask_ids = torch.tensor([f.label_mask for f in test_features], dtype=torch.long)\r\n lcf_cdm_vec = torch.tensor([f.lcf_cdm_vec for f in test_features], dtype=torch.float32)\r\n lcf_cdw_vec = torch.tensor([f.lcf_cdw_vec for f in test_features], dtype=torch.float32)\r\n self.test_data = TensorDataset(all_spc_input_ids, all_segment_ids, all_input_mask, all_label_ids, all_polarities,\r\n all_valid_ids, all_lmask_ids, lcf_cdm_vec, lcf_cdw_vec)\r\n\r\n if self.opt.cache_dataset and not os.path.exists(cache_path):\r\n print('Caching dataset... please remove cached dataset if change model or dataset')\r\n with open(cache_path, mode='wb') as f:\r\n if self.opt.dataset_file['test']:\r\n pickle.dump((self.train_data, self.test_data, self.opt), f)\r\n else:\r\n pickle.dump((self.train_data, self.opt), f)\r\n\r\n # only identify the labels in training set, make sure the labels are the same type in the test set\r\n for key in opt.args:\r\n if key not in self.opt.args:\r\n self.opt.args[key] = opt.args[key]\r\n self.opt.args_call_count[key] = opt.args_call_count[key]\r\n\r\n bert_base_model.config.num_labels = self.opt.num_labels\r\n self.opt.label_list = opt.label_list\r\n\r\n self.num_train_optimization_steps = int(\r\n len(self.train_data) / self.opt.batch_size / self.opt.gradient_accumulation_steps) * self.opt.num_epoch\r\n\r\n train_sampler = RandomSampler(self.train_data)\r\n self.train_dataloader = DataLoader(self.train_data, sampler=train_sampler, pin_memory=True, batch_size=self.opt.batch_size)\r\n if self.opt.dataset_file['test']:\r\n test_sampler = SequentialSampler(self.test_data)\r\n self.test_dataloader = DataLoader(self.test_data, sampler=test_sampler, pin_memory=True, batch_size=self.opt.batch_size)\r\n\r\n self.model = self.opt.model(bert_base_model, opt=self.opt)\r\n\r\n param_optimizer = list(self.model.named_parameters())\r\n no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']\r\n self.optimizer_grouped_parameters = [\r\n {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],\r\n 'weight_decay': self.opt.l2reg},\r\n {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],\r\n 'weight_decay': 0}\r\n ]\r\n\r\n if self.opt.auto_device == 'allcuda':\r\n self.model.to(self.opt.device)\r\n self.model = torch.nn.parallel.DataParallel(self.model)\r\n else:\r\n self.model.to(self.opt.device)\r\n\r\n if isinstance(self.opt.optimizer, str):\r\n self.optimizer = init_optimizer(self.opt.optimizer)(self.optimizer_grouped_parameters,\r\n lr=self.opt.learning_rate,\r\n weight_decay=self.opt.l2reg)\r\n if amp:\r\n self.model, self.optimizer = amp.initialize(self.model, self.optimizer, opt_level=\"O1\")\r\n\r\n self.opt.device = torch.device(self.opt.device)\r\n if self.opt.device.type == 'cuda':\r\n self.logger.info(\r\n \"cuda memory allocated:{}\".format(torch.cuda.memory_allocated(device=self.opt.device)))\r\n\r\n print_args(self.opt, self.logger)\r\n\r\n def run(self):\r\n patience = self.opt.patience + self.opt.evaluate_begin\r\n if self.opt.log_step < 0:\r\n self.opt.log_step = len(self.train_dataloader) if self.opt.log_step < 0 else self.opt.log_step\r\n\r\n if self.opt.warmup_step >= 0:\r\n self.lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(self.optimizer, T_max=len(self.train_dataloader) * self.opt.num_epoch)\r\n self.warmup_scheduler = warmup.UntunedLinearWarmup(self.optimizer)\r\n\r\n self.logger.info(\"***** Running training for Aspect Term Extraction *****\")\r\n self.logger.info(\" Num examples = %d\", len(self.train_data))\r\n self.logger.info(\" Batch size = %d\", self.opt.batch_size)\r\n self.logger.info(\" Num steps = %d\", self.num_train_optimization_steps)\r\n sum_loss = 0\r\n sum_apc_test_acc = 0\r\n sum_apc_test_f1 = 0\r\n sum_ate_test_f1 = 0\r\n self.opt.max_test_metrics = {'max_apc_test_acc': 0, 'max_apc_test_f1': 0, 'max_ate_test_f1': 0}\r\n self.opt.metrics_of_this_checkpoint = {'apc_acc': 0, 'apc_f1': 0, 'ate_f1': 0}\r\n global_step = 0\r\n save_path = '{0}/{1}_{2}'.format(self.opt.model_path_to_save,\r\n self.opt.model_name,\r\n self.opt.dataset_name\r\n )\r\n for epoch in range(int(self.opt.num_epoch)):\r\n nb_tr_examples, nb_tr_steps = 0, 0\r\n iterator = tqdm.tqdm(self.train_dataloader, postfix='Epoch:{}'.format(epoch))\r\n postfix = ''\r\n patience -= 1\r\n for step, batch in enumerate(iterator):\r\n self.model.train()\r\n input_ids_spc, segment_ids, input_mask, label_ids, polarity, \\\r\n valid_ids, l_mask, lcf_cdm_vec, lcf_cdw_vec = batch\r\n input_ids_spc = input_ids_spc.to(self.opt.device)\r\n segment_ids = segment_ids.to(self.opt.device)\r\n input_mask = input_mask.to(self.opt.device)\r\n label_ids = label_ids.to(self.opt.device)\r\n polarity = polarity.to(self.opt.device)\r\n valid_ids = valid_ids.to(self.opt.device)\r\n l_mask = l_mask.to(self.opt.device)\r\n lcf_cdm_vec = lcf_cdm_vec.to(self.opt.device)\r\n lcf_cdw_vec = lcf_cdw_vec.to(self.opt.device)\r\n loss_ate, loss_apc = self.model(input_ids_spc,\r\n token_type_ids=segment_ids,\r\n attention_mask=input_mask,\r\n labels=label_ids,\r\n polarity=polarity,\r\n valid_ids=valid_ids,\r\n attention_mask_label=l_mask,\r\n lcf_cdm_vec=lcf_cdm_vec,\r\n lcf_cdw_vec=lcf_cdw_vec\r\n )\r\n # for multi-gpu, average loss by gpu instance number\r\n if self.opt.auto_device == 'allcuda':\r\n loss_ate, loss_apc = loss_ate.mean(), loss_apc.mean()\r\n # loss_ate = loss_ate.item() / (loss_ate.item() + loss_apc.item()) * loss_ate\r\n # loss_apc = loss_apc.item() / (loss_ate.item() + loss_apc.item()) * loss_apc\r\n # loss = loss_ate + loss_apc\r\n loss = loss_ate + loss_apc # the optimal weight of loss may be different according to dataset\r\n\r\n sum_loss += loss.item()\r\n\r\n if amp:\r\n with amp.scale_loss(loss, self.optimizer) as scaled_loss:\r\n scaled_loss.backward()\r\n else:\r\n loss.backward()\r\n self.optimizer.step()\r\n\r\n if self.opt.warmup_step >= 0:\r\n with self.warmup_scheduler.dampening():\r\n self.lr_scheduler.step()\r\n\r\n nb_tr_examples += input_ids_spc.size(0)\r\n nb_tr_steps += 1\r\n self.optimizer.zero_grad()\r\n global_step += 1\r\n global_step += 1\r\n if global_step % self.opt.log_step == 0:\r\n if self.opt.dataset_file['test'] and epoch >= self.opt.evaluate_begin:\r\n apc_result, ate_result = self.evaluate()\r\n sum_apc_test_acc += apc_result['apc_test_acc']\r\n sum_apc_test_f1 += apc_result['apc_test_f1']\r\n sum_ate_test_f1 += ate_result\r\n self.opt.metrics_of_this_checkpoint['apc_acc'] = apc_result['apc_test_acc']\r\n self.opt.metrics_of_this_checkpoint['apc_f1'] = apc_result['apc_test_f1']\r\n self.opt.metrics_of_this_checkpoint['ate_f1'] = ate_result\r\n\r\n if apc_result['apc_test_acc'] > self.opt.max_test_metrics['max_apc_test_acc'] or \\\r\n apc_result['apc_test_f1'] > self.opt.max_test_metrics['max_apc_test_f1'] or \\\r\n ate_result > self.opt.max_test_metrics['max_ate_test_f1']:\r\n patience = self.opt.patience\r\n if apc_result['apc_test_acc'] > self.opt.max_test_metrics['max_apc_test_acc']:\r\n self.opt.max_test_metrics['max_apc_test_acc'] = apc_result['apc_test_acc']\r\n if apc_result['apc_test_f1'] > self.opt.max_test_metrics['max_apc_test_f1']:\r\n self.opt.max_test_metrics['max_apc_test_f1'] = apc_result['apc_test_f1']\r\n if ate_result > self.opt.max_test_metrics['max_ate_test_f1']:\r\n self.opt.max_test_metrics['max_ate_test_f1'] = ate_result\r\n\r\n if self.opt.model_path_to_save:\r\n # if save_path:\r\n # try:\r\n # shutil.rmtree(save_path)\r\n # # self.logger.info('Remove sub-self.optimal trained model:', save_path)\r\n # except:\r\n # self.logger.info('Can not remove sub-self.optimal trained model:', save_path)\r\n\r\n save_path = '{0}/{1}_{2}_{3}_apcacc_{4}_apcf1_{5}_atef1_{6}/'.format(\r\n self.opt.model_path_to_save,\r\n self.opt.model_name,\r\n self.opt.dataset_name,\r\n self.opt.lcf,\r\n round(apc_result['apc_test_acc'], 2),\r\n round(apc_result['apc_test_f1'], 2),\r\n round(ate_result, 2)\r\n )\r\n\r\n save_model(self.opt, self.model, self.tokenizer, save_path)\r\n\r\n current_apc_test_acc = apc_result['apc_test_acc']\r\n current_apc_test_f1 = apc_result['apc_test_f1']\r\n current_ate_test_f1 = round(ate_result, 2)\r\n\r\n postfix = 'Epoch:{} | '.format(epoch)\r\n\r\n postfix += 'loss_apc:{:.4f} | loss_ate:{:.4f} |'.format(loss_apc.item(), loss_ate.item())\r\n\r\n postfix += ' APC_ACC: {}(max:{}) | APC_F1: {}(max:{}) | '.format(current_apc_test_acc,\r\n self.opt.max_test_metrics[\r\n 'max_apc_test_acc'],\r\n current_apc_test_f1,\r\n self.opt.max_test_metrics[\r\n 'max_apc_test_f1']\r\n )\r\n postfix += 'ATE_F1: {}(max:{})'.format(current_ate_test_f1, self.opt.max_test_metrics[\r\n 'max_ate_test_f1'])\r\n else:\r\n if self.opt.save_mode and epoch >= self.opt.evaluate_begin:\r\n save_model(self.opt, self.model, self.tokenizer, save_path + '_{}/'.format(loss.item()))\r\n postfix = 'Epoch:{} | Loss: {} | No evaluation until epoch:{}'.format(epoch, round(loss.item(), 8), self.opt.evaluate_begin)\r\n\r\n iterator.postfix = postfix\r\n iterator.refresh()\r\n\r\n if patience < 0:\r\n break\r\n\r\n self.opt.MV.add_metric('Max-APC-Test-Acc', self.opt.max_test_metrics['max_apc_test_acc'])\r\n self.opt.MV.add_metric('Max-APC-Test-F1', self.opt.max_test_metrics['max_apc_test_f1'])\r\n self.opt.MV.add_metric('Max-ATE-Test-F1', self.opt.max_test_metrics['max_ate_test_f1'])\r\n\r\n self.opt.MV.summary(no_print=True)\r\n self.logger.info(self.opt.MV.summary(no_print=True))\r\n\r\n print('Training finished, we hope you can share your checkpoint with community, please see:',\r\n 'https://github.com/yangheng95/PyABSA/blob/release/demos/documents/share-checkpoint.md')\r\n\r\n print_args(self.opt, self.logger)\r\n\r\n # return the model paths of multiple training\r\n # in case of loading the best model after training\r\n if self.opt.save_mode:\r\n del self.train_dataloader\r\n del self.test_dataloader\r\n del self.model\r\n cuda.empty_cache()\r\n time.sleep(3)\r\n return save_path\r\n else:\r\n # direct return model if do not evaluate\r\n del self.train_dataloader\r\n del self.test_dataloader\r\n cuda.empty_cache()\r\n time.sleep(3)\r\n return self.model, self.opt, self.tokenizer, sum_apc_test_acc, sum_apc_test_f1, sum_ate_test_f1\r\n\r\n def evaluate(self, eval_ATE=True, eval_APC=True):\r\n apc_result = {'apc_test_acc': 0, 'apc_test_f1': 0}\r\n ate_result = 0\r\n y_true = []\r\n y_pred = []\r\n n_test_correct, n_test_total = 0, 0\r\n test_apc_logits_all, test_polarities_all = None, None\r\n self.model.eval()\r\n label_map = {i: label for i, label in enumerate(self.opt.label_list, 1)}\r\n\r\n for i, batch in enumerate(self.test_dataloader):\r\n input_ids_spc, segment_ids, input_mask, label_ids, polarity, \\\r\n valid_ids, l_mask, lcf_cdm_vec, lcf_cdw_vec = batch\r\n input_ids_spc = input_ids_spc.to(self.opt.device)\r\n segment_ids = segment_ids.to(self.opt.device)\r\n input_mask = input_mask.to(self.opt.device)\r\n label_ids = label_ids.to(self.opt.device)\r\n polarity = polarity.to(self.opt.device)\r\n valid_ids = valid_ids.to(self.opt.device)\r\n l_mask = l_mask.to(self.opt.device)\r\n lcf_cdm_vec = lcf_cdm_vec.to(self.opt.device)\r\n lcf_cdw_vec = lcf_cdw_vec.to(self.opt.device)\r\n with torch.no_grad():\r\n if self.opt.auto_device == 'allcuda':\r\n ate_logits, apc_logits = self.model.module(input_ids_spc,\r\n token_type_ids=segment_ids,\r\n attention_mask=input_mask,\r\n labels=None,\r\n polarity=polarity,\r\n valid_ids=valid_ids,\r\n attention_mask_label=l_mask,\r\n lcf_cdm_vec=lcf_cdm_vec,\r\n lcf_cdw_vec=lcf_cdw_vec\r\n )\r\n else:\r\n ate_logits, apc_logits = self.model(input_ids_spc,\r\n token_type_ids=segment_ids,\r\n attention_mask=input_mask,\r\n labels=None,\r\n polarity=polarity,\r\n valid_ids=valid_ids,\r\n attention_mask_label=l_mask,\r\n lcf_cdm_vec=lcf_cdm_vec,\r\n lcf_cdw_vec=lcf_cdw_vec\r\n )\r\n if eval_APC:\r\n n_test_correct += (torch.argmax(apc_logits, -1) == polarity).sum().item()\r\n n_test_total += len(polarity)\r\n\r\n if test_polarities_all is None:\r\n test_polarities_all = polarity\r\n test_apc_logits_all = apc_logits\r\n else:\r\n test_polarities_all = torch.cat((test_polarities_all, polarity), dim=0)\r\n test_apc_logits_all = torch.cat((test_apc_logits_all, apc_logits), dim=0)\r\n\r\n if eval_ATE:\r\n if not self.opt.use_bert_spc:\r\n if self.opt.auto_device == 'allcuda':\r\n label_ids = self.model.module.get_batch_token_labels_bert_base_indices(label_ids)\r\n else:\r\n label_ids = self.model.get_batch_token_labels_bert_base_indices(label_ids)\r\n ate_logits = torch.argmax(F.log_softmax(ate_logits, dim=2), dim=2)\r\n ate_logits = ate_logits.detach().cpu().numpy()\r\n label_ids = label_ids.to('cpu').numpy()\r\n input_mask = input_mask.to('cpu').numpy()\r\n for i, label in enumerate(label_ids):\r\n temp_1 = []\r\n temp_2 = []\r\n for j, m in enumerate(label):\r\n if j == 0:\r\n continue\r\n elif label_ids[i][j] == len(self.opt.label_list):\r\n y_true.append(temp_1)\r\n y_pred.append(temp_2)\r\n break\r\n else:\r\n temp_1.append(label_map.get(label_ids[i][j], 'O'))\r\n temp_2.append(label_map.get(ate_logits[i][j], 'O'))\r\n if eval_APC:\r\n test_acc = n_test_correct / n_test_total\r\n\r\n test_f1 = f1_score(torch.argmax(test_apc_logits_all, -1).cpu(), test_polarities_all.cpu(),\r\n labels=list(range(self.opt.polarities_dim)), average='macro')\r\n\r\n test_acc = round(test_acc * 100, 2)\r\n test_f1 = round(test_f1 * 100, 2)\r\n apc_result = {'apc_test_acc': test_acc, 'apc_test_f1': test_f1}\r\n\r\n if eval_ATE:\r\n report = classification_report(y_true, y_pred, digits=4)\r\n tmps = report.split()\r\n ate_result = round(float(tmps[7]) * 100, 2)\r\n return apc_result, ate_result\r\n\r\n\r\n@retry\r\ndef train4atepc(opt, from_checkpoint_path, logger):\r\n # in case of handling ConnectionError exception\r\n trainer = Instructor(opt, logger)\r\n resume_from_checkpoint(trainer, from_checkpoint_path)\r\n\r\n return trainer.run()\r\n", "# -*- coding: utf-8 -*-\r\n# file: asgcn.py\r\n# author: <[email protected]>\r\n# Copyright (C) 2020. All Rights Reserved.\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\n\r\nfrom ..layers.dynamic_rnn import DynamicLSTM\r\n\r\n\r\nclass GraphConvolution(nn.Module):\r\n \"\"\"\r\n Simple GCN layer, similar to https://arxiv.org/abs/1609.02907\r\n \"\"\"\r\n\r\n def __init__(self, in_features, out_features, bias=True):\r\n super(GraphConvolution, self).__init__()\r\n self.in_features = in_features\r\n self.out_features = out_features\r\n self.weight = nn.Parameter(torch.FloatTensor(in_features, out_features))\r\n if bias:\r\n self.bias = nn.Parameter(torch.FloatTensor(out_features))\r\n else:\r\n self.register_parameter('bias', None)\r\n\r\n def forward(self, text, adj):\r\n hidden = torch.matmul(text, self.weight)\r\n denom = torch.sum(adj, dim=2, keepdim=True) + 1\r\n output = torch.matmul(adj, hidden) / denom\r\n if self.bias is not None:\r\n return output + self.bias\r\n else:\r\n return output\r\n\r\n\r\nclass ASGCN_Unit(nn.Module):\r\n inputs = ['text_indices', 'aspect_indices', 'left_indices', 'dependency_graph']\r\n\r\n def __init__(self, embedding_matrix, opt):\r\n super(ASGCN_Unit, self).__init__()\r\n self.opt = opt\r\n self.embed = nn.Embedding.from_pretrained(torch.tensor(embedding_matrix, dtype=torch.float))\r\n self.text_lstm = DynamicLSTM(opt.embed_dim, opt.hidden_dim, num_layers=1, batch_first=True, bidirectional=True)\r\n self.gc1 = GraphConvolution(2 * opt.hidden_dim, 2 * opt.hidden_dim)\r\n self.gc2 = GraphConvolution(2 * opt.hidden_dim, 2 * opt.hidden_dim)\r\n self.text_embed_dropout = nn.Dropout()\r\n\r\n def position_weight(self, x, aspect_double_idx, text_len, aspect_len):\r\n batch_size = x.shape[0]\r\n seq_len = x.shape[1]\r\n aspect_double_idx = aspect_double_idx.cpu().numpy()\r\n text_len = text_len.cpu().numpy()\r\n aspect_len = aspect_len.cpu().numpy()\r\n weight = [[] for i in range(batch_size)]\r\n for i in range(batch_size):\r\n context_len = text_len[i] - aspect_len[i]\r\n for j in range(aspect_double_idx[i, 0]):\r\n weight[i].append(1 - (aspect_double_idx[i, 0] - j) / context_len)\r\n for j in range(aspect_double_idx[i, 0], aspect_double_idx[i, 1] + 1):\r\n weight[i].append(0)\r\n for j in range(aspect_double_idx[i, 1] + 1, text_len[i]):\r\n weight[i].append(1 - (j - aspect_double_idx[i, 1]) / context_len)\r\n for j in range(text_len[i], seq_len):\r\n weight[i].append(0)\r\n weight = torch.tensor(weight, dtype=torch.float).unsqueeze(2).to(self.opt.device)\r\n return weight * x\r\n\r\n def mask(self, x, aspect_double_idx):\r\n batch_size, seq_len = x.shape[0], x.shape[1]\r\n aspect_double_idx = aspect_double_idx.cpu().numpy()\r\n mask = [[] for i in range(batch_size)]\r\n for i in range(batch_size):\r\n for j in range(aspect_double_idx[i, 0]):\r\n mask[i].append(0)\r\n for j in range(aspect_double_idx[i, 0], aspect_double_idx[i, 1] + 1):\r\n mask[i].append(1)\r\n for j in range(aspect_double_idx[i, 1] + 1, seq_len):\r\n mask[i].append(0)\r\n mask = torch.tensor(mask, dtype=torch.float).unsqueeze(2).to(self.opt.device)\r\n return mask * x\r\n\r\n def forward(self, inputs):\r\n text_indices, aspect_indices, left_indices, adj = \\\r\n inputs[0], inputs[1], inputs[2], inputs[3]\r\n text_len = torch.sum(text_indices != 0, dim=-1)\r\n aspect_len = torch.sum(aspect_indices != 0, dim=-1)\r\n left_len = torch.sum(left_indices != 0, dim=-1)\r\n aspect_double_idx = torch.cat([left_len.unsqueeze(1), (left_len + aspect_len - 1).unsqueeze(1)], dim=1)\r\n text = self.embed(text_indices)\r\n text = self.text_embed_dropout(text)\r\n text_out, (_, _) = self.text_lstm(text, text_len)\r\n seq_len = text_out.shape[1]\r\n adj = adj[:, :seq_len, :seq_len]\r\n x = F.relu(self.gc1(self.position_weight(text_out, aspect_double_idx, text_len, aspect_len), adj))\r\n x = F.relu(self.gc2(self.position_weight(x, aspect_double_idx, text_len, aspect_len), adj))\r\n x = self.mask(x, aspect_double_idx)\r\n alpha_mat = torch.matmul(x, text_out.transpose(1, 2))\r\n alpha = F.softmax(alpha_mat.sum(1, keepdim=True), dim=2)\r\n x = torch.matmul(alpha, text_out).squeeze(1) # batch_size x 2*hidden_dim\r\n\r\n return x\r\n\r\n\r\nclass ASGCN(nn.Module):\r\n inputs = [\r\n 'text_indices',\r\n 'aspect_indices',\r\n 'left_indices',\r\n 'dependency_graph',\r\n 'left_aspect_indices',\r\n 'left_left_indices',\r\n 'left_dependency_graph',\r\n 'right_aspect_indices',\r\n 'right_left_indices',\r\n 'right_dependency_graph',\r\n ]\r\n\r\n def __init__(self, bert, opt):\r\n super(ASGCN, self).__init__()\r\n self.opt = opt\r\n self.asgcn_left = ASGCN_Unit(bert, opt) if self.opt.lsa else None\r\n self.asgcn_central = ASGCN_Unit(bert, opt)\r\n self.asgcn_right = ASGCN_Unit(bert, opt) if self.opt.lsa else None\r\n self.dense = nn.Linear(self.opt.hidden_dim * 6, self.opt.polarities_dim) \\\r\n if self.opt.lsa else nn.Linear(self.opt.hidden_dim * 2, self.opt.polarities_dim)\r\n\r\n def forward(self, inputs):\r\n res = {'logits': None}\r\n if self.opt.lsa:\r\n cat_feat = torch.cat(\r\n (self.asgcn_left([inputs['text_indices'], inputs['left_aspect_indices'], inputs['left_left_indices'], inputs['left_dependency_graph']]),\r\n self.asgcn_central([inputs['text_indices'], inputs['aspect_indices'], inputs['left_indices'], inputs['dependency_graph']]),\r\n self.asgcn_right([inputs['text_indices'], inputs['right_aspect_indices'], inputs['right_left_indices'], inputs['right_dependency_graph']])),\r\n -1)\r\n res['logits'] = self.dense(cat_feat)\r\n else:\r\n res['logits'] = self.dense(self.asgcn_central([inputs['text_indices'], inputs['aspect_indices'], inputs['left_indices'], inputs['dependency_graph']]))\r\n\r\n return res\r\n" ]
[ [ "torch.softmax", "torch.cuda.manual_seed", "numpy.random.seed", "torch.load", "torch.manual_seed", "torch.utils.data.DataLoader", "torch.tensor", "torch.no_grad" ], [ "torch.nn.parallel.DataParallel", "torch.cuda.manual_seed", "numpy.random.seed", "torch.cat", "torch.manual_seed", "torch.utils.data.TensorDataset", "torch.utils.data.SequentialSampler", "torch.utils.data.DataLoader", "torch.utils.data.RandomSampler", "torch.cuda.empty_cache", "torch.tensor", "torch.nn.functional.log_softmax", "torch.no_grad", "torch.device", "torch.cuda.memory_allocated", "torch.argmax" ], [ "torch.nn.Dropout", "torch.sum", "torch.tensor", "torch.matmul", "torch.nn.Linear", "torch.FloatTensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ahmedjawedaj/jax
[ "f7009ed1072bf48ea6ba9d6d2ce573ce0ce13377" ]
[ "jax/interpreters/xla.py" ]
[ "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nfrom collections import defaultdict, deque\nimport itertools as it\nimport operator as op\nfrom typing import (Any, Callable, Dict, List, Optional, Sequence, Set, Type,\n Tuple, Union, NamedTuple)\nfrom warnings import warn\nimport weakref\n\nfrom absl import logging\nimport numpy as np\n\nfrom ..config import config\nfrom .. import core\nfrom jax._src import ad_util\nfrom jax._src import dtypes\nfrom .. import linear_util as lu\nfrom jax._src import source_info_util\nfrom jax._src.abstract_arrays import (make_shaped_array, array_types)\nfrom ..core import (ConcreteArray, ShapedArray, AbstractToken,\n Literal, pp_eqn_compact, raise_to_shaped, abstract_token)\nfrom jax._src.pprint_util import pp\nfrom .._src.util import (partial, partialmethod, cache, prod, unzip2,\n extend_name_stack, wrap_name, safe_zip, safe_map)\nfrom ..lib import xla_bridge as xb\nfrom ..lib import xla_client as xc\nfrom . import partial_eval as pe\nfrom . import ad\nfrom . import masking\n\nmap, unsafe_map = safe_map, map\nzip, unsafe_zip = safe_zip, zip\n\nxe = xc._xla\nxops = xc._xla.ops\n\n# Types\nBackend = Any # xc.LocalBackend (why does mypy not like this?)\nDevice = Any # xc.Device\nPyLocalBuffer = Any\n\nXlaOp = Any # xla_extension.XlaOp\nXlaShape = Any # xla_client.Shape\nXlaComputationBuilder = Any # xla_bridge._JaxComputationBuilder\nXlaExecutable = Any # xla_extension.LocalExecutable\n\n# This flag is set on exit; no logging should be attempted\n_on_exit = False\n\n\ndef compile_or_get_cached(backend, computation, compile_options):\n # Avoid import cycle between jax and jax.experimental\n from jax.experimental.compilation_cache import compilation_cache as cc\n if cc.is_initialized():\n cached_executable = cc.get_executable(computation, compile_options)\n if cached_executable is not None:\n return cached_executable\n else:\n compiled = backend_compile(backend, computation, compile_options)\n cc.put_executable(computation, compile_options, compiled)\n return compiled\n return backend_compile(backend, computation, compile_options)\n\ndef identity(x): return x\n\n_scalar_types = dtypes.python_scalar_dtypes.keys()\n\n# unit representation\ndef _make_unit_constant(c): return xb.constant_general(c, np.zeros((), dtype=np.dtype('bool')))\ndef _make_unit_shape(_): return (xc.Shape.array_shape(np.dtype('bool'), ()),)\ndef _device_put_unit(_, device):\n backend = xb.get_device_backend(device)\n return (backend.buffer_from_pyval(np.zeros((), dtype=np.dtype('bool')),\n device),)\ndef _make_array_shape(a):\n if a.dtype is dtypes.float0:\n return (xc.Shape.array_shape(np.dtype('bool'), a.shape),)\n else:\n return (xc.Shape.array_shape(a.dtype, a.shape),)\n\ntracebacks = {}\ndef make_op_metadata(primitive: core.Primitive,\n params: Dict, *,\n name_stack: str = \"\",\n source_info: Optional[source_info_util.Traceback] = None\n ) -> xc.OpMetadata:\n tracebacks[str(pp(name_stack) >> pp_eqn_compact(primitive.name, params))] = source_info\n frame = source_info_util.user_frame(source_info) if source_info else None\n return xc.OpMetadata(\n op_type=primitive.name,\n op_name=str(pp(name_stack) >> pp_eqn_compact(primitive.name, params)),\n source_file=frame.file_name if frame else None,\n source_line=frame.line_num if frame else None)\n\n### handlers\n\nxb.register_constant_handler(core.Unit, lambda c, *_: _make_unit_constant(c))\n\ndef aval_to_xla_shapes(aval):\n try:\n return xla_shape_handlers[type(aval)](aval)\n except KeyError as err:\n raise TypeError(f\"No xla_shape_handler for type: {type(aval)}\") from err\n\nxla_shape_handlers: Dict[Type[core.AbstractValue], Callable] = {\n core.AbstractUnit: _make_unit_shape,\n ShapedArray: _make_array_shape,\n ConcreteArray: _make_array_shape,\n}\n\ndef aval_to_result_handler(device: Optional[Device], aval: core.AbstractValue) -> Callable:\n try:\n return xla_result_handlers[type(aval)](device, aval)\n except KeyError as err:\n raise TypeError(f\"No xla_result_handler for type: {type(aval)}\") from err\n\ndef array_result_handler(device: Optional[Device], aval: core.ShapedArray):\n if aval.dtype is dtypes.float0:\n return lambda _: np.zeros(aval.shape, dtypes.float0)\n return partial(make_device_array, raise_to_shaped(aval), device)\n\n\nxla_result_handlers: Dict[Type[core.AbstractValue], Callable[..., Callable]] = {\n core.AbstractUnit: lambda _, __: lambda _: core.unit,\n ShapedArray: array_result_handler,\n ConcreteArray: array_result_handler,\n}\n\ndef device_put(x, device: Optional[Device] = None) -> Tuple[Any]:\n x = canonicalize_dtype(x)\n try:\n return device_put_handlers[type(x)](x, device)\n except KeyError as err:\n raise TypeError(f\"No device_put handler for type: {type(x)}\") from err\n\ndef _device_put_array(x, device: Optional[Device]):\n backend = xb.get_device_backend(device)\n if x.dtype is dtypes.float0:\n x = np.zeros(x.shape, dtype=np.dtype(bool))\n return (backend.buffer_from_pyval(x, device),)\n\ndef _device_put_scalar(x, device):\n return _device_put_array(dtypes.coerce_to_array(x), device)\n\ndevice_put_handlers: Dict[Any, Callable[[Any, Optional[Device]], Tuple[Any]]] = {\n core.Unit: _device_put_unit\n}\ndevice_put_handlers.update((t, _device_put_array) for t in array_types)\ndevice_put_handlers.update((t, _device_put_scalar) for t in _scalar_types)\n\n# TODO(mattjj): try to remove this canonicalize_dtype stuff\ndef canonicalize_dtype(x):\n typ = type(x)\n handler = canonicalize_dtype_handlers.get(typ)\n if handler: return handler(x)\n for typ in typ.mro():\n handler = canonicalize_dtype_handlers.get(typ)\n if handler: return handler(x)\n if hasattr(x, '__jax_array__'):\n return canonicalize_dtype(x.__jax_array__())\n raise TypeError(f\"No canonicalize_dtype handler for type: {type(x)}\")\n\ndef _canonicalize_ndarray_dtype(x):\n return np.asarray(x, dtypes.canonicalize_dtype(dtypes.result_type(x)))\n\ndef _canonicalize_python_scalar_dtype(typ, x):\n return np.asarray(\n x, dtypes.canonicalize_dtype(dtypes._scalar_type_to_dtype(typ, x)))\n\ncanonicalize_dtype_handlers: Dict[Any, Callable] = {core.Unit: identity}\ncanonicalize_dtype_handlers.update(\n (t, _canonicalize_ndarray_dtype) for t in array_types)\ncanonicalize_dtype_handlers.update(\n (t, partial(_canonicalize_python_scalar_dtype, t)) for t in _scalar_types)\n\ndef abstractify(x) -> core.AbstractValue:\n typ = type(x)\n aval_fn = pytype_aval_mappings.get(typ)\n if aval_fn: return aval_fn(x)\n for typ in typ.mro():\n aval_fn = pytype_aval_mappings.get(typ)\n if aval_fn: return aval_fn(x)\n if hasattr(x, '__jax_array__'):\n return abstractify(x.__jax_array__())\n raise TypeError(f\"Argument '{x}' of type '{type(x)}' is not a valid JAX type\")\n\ndef _make_abstract_python_scalar(typ, val):\n return ShapedArray((), dtypes._scalar_type_to_dtype(typ, val), weak_type=True)\n\npytype_aval_mappings: Dict[Any, Callable[[Any], core.AbstractValue]] = {\n core.Unit: lambda _: core.abstract_unit,\n}\npytype_aval_mappings.update((t, make_shaped_array) for t in array_types)\npytype_aval_mappings.update(\n (t, partial(_make_abstract_python_scalar, t)) for t in _scalar_types)\n\n# We can optionally set a Jaxpr rewriter that can be applied just before\n# compilation. This mechanism is used for compiling id_tap, we can\n# remove it once we bring the id_tap implementation into the core.\noutfeed_rewriter: Optional[Callable[[core.Jaxpr], core.Jaxpr]] = None\ndef apply_outfeed_rewriter(jaxpr: core.Jaxpr) -> core.Jaxpr:\n if outfeed_rewriter is not None:\n return outfeed_rewriter(jaxpr)\n else:\n return jaxpr\n\noutfeed_primitives: Set[core.Primitive] = set()\ndef jaxpr_uses_outfeed(jaxpr: core.Jaxpr) -> bool:\n \"\"\"Finds if there are outfeed primitives anywhere inside a Jaxpr.\"\"\"\n return any(primitive_uses_outfeed(eqn.primitive, eqn.params)\n for eqn in jaxpr.eqns)\n\ndef _param_uses_outfeed(param):\n if type(param) is core.Jaxpr:\n if jaxpr_uses_outfeed(param):\n return True\n elif type(param) is core.ClosedJaxpr:\n if jaxpr_uses_outfeed(param.jaxpr):\n return True\n return False\n\ndef primitive_uses_outfeed(prim: core.Primitive, params: Dict) -> bool:\n if prim in outfeed_primitives:\n return True\n for param in params.values():\n if isinstance(param, tuple):\n if any(unsafe_map(_param_uses_outfeed, param)):\n return True\n elif _param_uses_outfeed(param):\n return True\n return False\n\n### op-by-op execution\n\n\nArgSpec = Tuple[core.AbstractValue, Optional[Device]]\n\ndef arg_spec(x: Any) -> ArgSpec:\n aval = abstractify(x)\n try:\n return aval, x._device\n except:\n return aval, None\n\ndef apply_primitive(prim, *args, **params):\n \"\"\"Impl rule that compiles and runs a single primitive 'prim' using XLA.\"\"\"\n compiled_fun = xla_primitive_callable(prim, *unsafe_map(arg_spec, args), **params)\n return compiled_fun(*args)\n\n\ndef _partition_outputs(avals, outs):\n nouts = [aval._num_buffers for aval in avals]\n if config.jax_enable_checks:\n assert sum(nouts) == len(outs), f\"Internal error: sum(nouts)={sum(nouts)} should equal len(outs)={len(outs)}.\"\n outs = iter(outs)\n return [[next(outs) for _ in range(nout)] for nout in nouts]\n\n\n@cache()\ndef xla_primitive_callable(prim, *arg_specs: ArgSpec, **params):\n avals, arg_devices = unzip2(arg_specs)\n donated_invars = (False,) * len(arg_specs)\n device = _device_from_arg_devices(arg_devices)\n backend = xb.get_device_backend(device)\n if primitive_uses_outfeed(prim, params):\n # We use the _xla_callable path, where we pre-process the primitives\n def prim_fun(*args):\n return prim.bind(*args, **params)\n return _xla_callable(lu.wrap_init(prim_fun), device, None, \"prim\", donated_invars,\n *arg_specs)\n aval_out = prim.abstract_eval(*avals, **params)\n if not prim.multiple_results:\n handle_result = aval_to_result_handler(device, aval_out)\n else:\n handlers = map(partial(aval_to_result_handler, device), aval_out)\n handle_result = lambda *bufs:\\\n tuple(handler(*bs) for handler, bs in zip(handlers, _partition_outputs(aval_out, bufs)))\n tuple_args = len(avals) > 100\n if prim in initial_style_translations:\n nreps = initial_style_primitive_replicas(params)\n else:\n nreps = 1\n\n if nreps > xb.device_count(backend):\n raise ValueError(\n f\"compiling a primitive computation `{prim}` that requires {nreps} \"\n f\"replicas, but only {xb.device_count(backend)} XLA devices are \"\n f\"available on backend {backend.platform}.\")\n built_c = primitive_computation(prim, AxisEnv(nreps, (), ()), backend,\n tuple_args, *avals, **params)\n options = xb.get_compile_options(\n num_replicas=nreps,\n num_partitions=1,\n device_assignment=device and (device.id,))\n options.parameter_is_tupled_arguments = tuple_args\n compiled = backend_compile(backend, built_c, options)\n if nreps == 1:\n return partial(_execute_compiled_primitive, prim, compiled, handle_result)\n else:\n return partial(_execute_replicated_primitive, prim, compiled, handle_result)\n\ndef _device_from_arg_devices(devices: Sequence[Optional[Device]]) -> Optional[Device]:\n \"\"\"Given devices of inputs, determine where to perform a computation.\n\n Args:\n devices: list where each element is a either a `Device` instance or `None`.\n Returns:\n A `Device` instance or None.\n Raises:\n ValueError if input devices are inconsistent.\n \"\"\"\n try:\n device, = {d for d in devices if d is not None} or (None,)\n return device\n except ValueError as err:\n msg = \"primitive arguments must be colocated on the same device, got {}\"\n raise ValueError(msg.format(\", \".join(map(str, devices)))) from err\n\n@cache()\ndef primitive_computation(prim, axis_env, backend, tuple_args, *avals, **params):\n c = xb.make_computation_builder(f\"primitive_computation_{prim.name}\")\n op_metadata = make_op_metadata(prim, params)\n c.set_op_metadata(op_metadata)\n platform = xb.get_backend(backend).platform\n xla_args, _ = _xla_callable_args(c, avals, tuple_args)\n # return val always set as a side-effect on c\n if prim in backend_specific_translations[platform]:\n rule = backend_specific_translations[platform][prim]\n ans = rule(c, *xla_args, **params)\n elif prim in translations:\n rule = translations[prim]\n ans = rule(c, *xla_args, **params)\n elif prim in translations_with_avals:\n rule = translations_with_avals[prim]\n ans = rule(c, avals, xla_args, params)\n elif prim in initial_style_translations:\n rule = initial_style_translations[prim]\n ans = rule(c, axis_env, extend_name_stack(prim.name), avals, backend,\n *xla_args, **params)\n else:\n raise NotImplementedError(f\"XLA translation rule for {prim!r} on platform {platform!r} not found\")\n assert isinstance(ans, xe.XlaOp)\n c.clear_op_metadata()\n try:\n return c.build(ans)\n except RuntimeError as e:\n msg = (\" \".join(map(str, e.args)) + \"\\n\"\n \"This is a bug in JAX's shape-checking rules; please report it!\\n\"\n \"https://github.com/google/jax/issues\\n\")\n raise RuntimeError(msg) from e\n\ndef primitive_subcomputation(prim, *avals, **params):\n axis_env = AxisEnv(1, (), ())\n return primitive_computation(prim, axis_env, None, False, *avals, **params)\n\ndef backend_compile(backend, built_c, options):\n # we use a separate function call to ensure that XLA compilation appears\n # separately in Python profiling results\n return backend.compile(built_c, compile_options=options)\n\ndef _execute_compiled_primitive(prim, compiled, result_handler, *args):\n device, = compiled.local_devices()\n input_bufs = list(it.chain.from_iterable(device_put(x, device) for x in args if x is not token))\n out_bufs = compiled.execute(input_bufs)\n check_special(prim.name, out_bufs)\n return result_handler(*out_bufs)\n\ndef _execute_replicated_primitive(prim, compiled, result_handler, *args):\n input_bufs = [\n list(it.chain.from_iterable(device_put(x, device) for x in args if x is not token))\n for device in compiled.local_devices()]\n out_bufs = [\n buf[0] for buf in compiled.execute_sharded_on_local_devices(\n list(zip(*input_bufs)))\n ]\n return result_handler(*out_bufs)\n\ndef needs_check_special():\n return config.jax_debug_infs or config.jax_debug_nans\n\ndef check_special(name, bufs):\n if needs_check_special():\n for buf in bufs:\n _check_special(name, buf.xla_shape(), buf)\n\ndef _check_special(name, xla_shape, buf):\n assert not xla_shape.is_tuple()\n if dtypes.issubdtype(xla_shape.element_type(), np.inexact):\n if config.jax_debug_nans and np.any(np.isnan(buf.to_py())):\n raise FloatingPointError(f\"invalid value (nan) encountered in {name}\")\n if config.jax_debug_infs and np.any(np.isinf(buf.to_py())):\n raise FloatingPointError(f\"invalid value (inf) encountered in {name}\")\n\n### compiling jaxprs\n\ndef prefetch(x):\n if isinstance(x, DeviceArray):\n x.copy_to_host_async()\n return x\n\ndef jaxpr_literals(jaxpr):\n \"\"\"Generates all the literals inside a jaxpr, including nested subjaxprs.\"\"\"\n for eqn in jaxpr.eqns:\n for v in eqn.invars:\n if type(v) is core.Literal:\n yield v.val\n for subjaxpr in core.subjaxprs(jaxpr):\n yield from jaxpr_literals(subjaxpr)\n\n\ndef _flatmap(func: Callable, vars: Sequence):\n return list(it.chain.from_iterable(map(func, vars)))\n\ndef _partitionmap(func: Callable, vars: Sequence, nodes: Sequence):\n return map(func, vars, _partition_outputs([v.aval for v in vars], nodes))\n\ndef jaxpr_subcomp(c, jaxpr, backend, axis_env, consts, name_stack, *args):\n if backend not in ('cpu', 'gpu', 'tpu'):\n platform = xb.get_backend(backend).platform # canonicalize\n else:\n platform = backend\n\n def read(v):\n if type(v) is Literal:\n return xb.constant_general(c, canonicalize_dtype(v.val))\n else:\n return env[v]\n\n def aval(v):\n if type(v) is Literal:\n return abstractify(v.val)\n else:\n return v.aval\n\n def write(v, node):\n assert node is not None\n env[v] = node\n\n env = {}\n _partitionmap(write, [core.unitvar], _make_unit_constant(c))\n _partitionmap(write, jaxpr.constvars, consts)\n _partitionmap(write, jaxpr.invars, args)\n for eqn in jaxpr.eqns:\n op_metadata = make_op_metadata(\n eqn.primitive, eqn.params, name_stack=name_stack,\n source_info=eqn.source_info)\n c.set_op_metadata(op_metadata)\n in_nodes = _flatmap(read, eqn.invars)\n # TODO(jakevdp): migrate `translations` table to `translations_with_avals`\n if eqn.primitive in backend_specific_translations[platform]:\n rule = backend_specific_translations[platform][eqn.primitive]\n ans = rule(c, *in_nodes, **eqn.params)\n elif eqn.primitive in translations:\n ans = translations[eqn.primitive](c, *in_nodes, **eqn.params)\n elif eqn.primitive in translations_with_avals:\n rule = translations_with_avals[eqn.primitive]\n ans = rule(c, map(aval, eqn.invars), in_nodes, eqn.params)\n elif eqn.primitive in initial_style_translations:\n new_params = check_backend_params(eqn.params, backend)\n rule = initial_style_translations[eqn.primitive]\n ans = rule(c, axis_env, extend_name_stack(name_stack, eqn.primitive.name),\n map(aval, eqn.invars), backend, *in_nodes, **new_params)\n elif eqn.primitive in parallel_translations:\n rule = parallel_translations[eqn.primitive]\n ans = rule(c, *in_nodes, axis_env=axis_env, platform=platform, **eqn.params)\n elif eqn.primitive in call_translations:\n new_params = check_backend_params(eqn.params, backend)\n rule = call_translations[eqn.primitive]\n ans = rule(c, axis_env, in_nodes,\n name_stack, backend=backend, **new_params)\n else:\n raise NotImplementedError(\n f\"XLA translation rule for primitive '{eqn.primitive.name}' not found\")\n\n assert isinstance(ans, xe.XlaOp)\n c.get_shape(ans) # force xla to do shape error checking\n if eqn.primitive.multiple_results or any(v.aval._num_buffers > 1 for v in eqn.outvars):\n out_nodes = xla_destructure(c, ans)\n else:\n out_nodes = [ans]\n c.clear_op_metadata()\n _partitionmap(write, eqn.outvars, out_nodes)\n return _flatmap(read, jaxpr.outvars)\n\n\ndef xla_destructure(c, ans):\n num_elements = len(c.get_shape(ans).tuple_shapes())\n return [xops.GetTupleElement(ans, i) for i in range(num_elements)]\n\ndef check_backend_params(params, outer_backend):\n # For nested calls, the outermost call sets the backend for all inner calls;\n # it's an error if the inner call has a conflicting explicit backend spec.\n inner_backend = params.get('backend', None)\n if inner_backend and inner_backend != outer_backend:\n raise ValueError(\n f\"Outer-jit backend specification {outer_backend} must match explicit \"\n f\"inner-jit backend specification {inner_backend}.\")\n return {k: params[k] for k in params if k != 'backend'}\n\n\nclass AxisEnv(NamedTuple):\n \"\"\"Represents a pmap mesh (only along the replica axes).\"\"\"\n nreps: int\n names: Tuple[Any, ...]\n sizes: Tuple[int, ...]\n\ndef extend_axis_env(env: AxisEnv, name, size: int):\n return AxisEnv(env.nreps, env.names + (name,), env.sizes + (size,))\n\ndef axis_read(axis_env, axis_name):\n try:\n return max(i for i, name in enumerate(axis_env.names) if name == axis_name)\n except ValueError:\n raise NameError(\"unbound axis name: {}\".format(axis_name)) from None\n\ndef axis_groups(axis_env: AxisEnv, name):\n if not isinstance(name, (list, tuple)):\n name = (name,)\n mesh_axes = tuple(unsafe_map(partial(axis_read, axis_env), name))\n trailing_size, ragged = divmod(axis_env.nreps, prod(axis_env.sizes))\n assert not ragged\n mesh_spec = axis_env.sizes + (trailing_size,)\n return _axis_groups(mesh_spec, mesh_axes)\n\ndef _axis_groups(mesh_spec, mesh_axes):\n \"\"\"Computes replica group ids for a collective performed over a subset of the mesh.\n\n Args:\n mesh_spec: A sequence of integers representing the mesh shape.\n mesh_axes: A sequence of integers between 0 and `len(mesh_spec)` (exclusive)\n indicating over which axes the collective is performed.\n Returns:\n A tuple of replica groups (i.e. tuples containing replica ids).\n \"\"\"\n iota = np.arange(prod(mesh_spec)).reshape(mesh_spec)\n groups = np.reshape(\n np.moveaxis(iota, mesh_axes, np.arange(len(mesh_axes))),\n (prod(np.take(mesh_spec, mesh_axes)), -1))\n return tuple(unsafe_map(tuple, groups.T))\n\ndef jaxpr_replicas(jaxpr) -> int:\n \"\"\"The number of replicas needed for a jaxpr.\n\n For a eqn, multiply the `axis_size` with the `jaxpr_replicas` of the\n subjaxprs. For a list of eqns, take the maximum number of replicas.\n \"\"\"\n if isinstance(jaxpr, core.ClosedJaxpr):\n jaxpr = jaxpr.jaxpr\n return max(unsafe_map(eqn_replicas, jaxpr.eqns), default=1)\n\n# TODO(mattjj): this function assumes that only pmap has a parameter named\n# axis_size, and that it corresponds to cross-replica mapping\ndef eqn_replicas(eqn):\n call_jaxpr = eqn.params.get(\"call_jaxpr\")\n if call_jaxpr:\n return eqn.params.get('axis_size', 1) * jaxpr_replicas(call_jaxpr)\n elif eqn.primitive in initial_style_translations:\n return initial_style_primitive_replicas(eqn.params)\n else:\n return 1\n\ndef initial_style_primitive_replicas(params):\n return max(core.traverse_jaxpr_params(jaxpr_replicas, params).values(), default=1)\n\n# TODO(mattjj,skyewm): the functions here are utilities for checking if\n# not-yet-supported features are used with multi-host programming\n\ndef jaxpr_has_pmap(jaxpr):\n \"\"\"Whether there is an xla_pmap primitive anywhere inside a Jaxpr.\"\"\"\n for eqn in jaxpr.eqns:\n if 'xla_pmap' in eqn.primitive.name:\n return True\n for subjaxpr in core.subjaxprs(jaxpr):\n if jaxpr_has_pmap(subjaxpr):\n return True\n return False\n\n\ndef jaxpr_collectives(jaxpr):\n \"\"\"Generates all the collective primitives anywhere inside a Jaxpr.\"\"\"\n for eqn in jaxpr.eqns:\n if eqn.primitive in parallel_translations:\n yield eqn.primitive\n for subjaxpr in core.subjaxprs(jaxpr):\n yield from jaxpr_collectives(subjaxpr)\n\n\n### xla_call underlying jit\n\ndef _xla_call_impl(fun: lu.WrappedFun, *args, device, backend, name,\n donated_invars, inline):\n del inline # Only used at tracing time\n compiled_fun = _xla_callable(fun, device, backend, name, donated_invars,\n *unsafe_map(arg_spec, args))\n try:\n return compiled_fun(*args)\n except FloatingPointError:\n assert config.jax_debug_nans or config.jax_debug_infs # compiled_fun can only raise in this case\n print(\"Invalid value encountered in the output of a jit function. \"\n \"Calling the de-optimized version.\")\n # We want to run the wrapped function again (after _xla_callable already ran\n # it), but linear_util.WrappedFun instances are meant to be run only once.\n # In addition to re-executing the Python code, which is usually undesirable\n # but which config.jax_debug_nans is meant to opt into, we'll be re-executing\n # any linear_util.py-style side effects, i.e. re-populating Stores created\n # by any transformation_with_aux's applied to fun. Since this is\n # intentional here, to avoid \"Store occupied\" errors we reset the stores to\n # be empty.\n for store in fun.stores: store and store.reset()\n with core.new_sublevel():\n return fun.call_wrapped(*args) # probably won't return\n\ndef flatten_shape(s: XlaShape) -> Sequence[Tuple[Sequence[int], XlaShape]]:\n \"\"\"Expands a given shape tree into a flat list of indices to arrays.\n\n Given the following computation:\n\n >>> c = xc.XlaBuilder(\"example\")\n >>> p0 = xb.parameter(c, 1, xc.shape_from_pyval(jnp.ones([1])))\n >>> p1 = xb.parameter(c, 2, xc.shape_from_pyval(jnp.ones([2])))\n >>> p2 = xb.parameter(c, 3, xc.shape_from_pyval(jnp.ones([3])))\n >>> o = xops.Tuple(c, [p0, p1, p2])\n\n We can query the arrays in the output tuple:\n\n >>> flatten_shape(c.GetShape(o))\n [((0,), f32[1]{0}), ((1,), f32[2]{0}), ((2,), f32[3]{0})]\n\n Or the arrays in one of the parameters (which is itself an array):\n\n >>> flatten_shape(c.GetShape(p0))\n [((), f32[1]{0})]\n\n Args\n s: The input shape.\n\n Returns:\n An iterable of pairs of indices and shapes for each array within the shape\n tree.\n \"\"\"\n results: List[Tuple[Tuple[int, ...], XlaShape]] = []\n _flatten_shape(s, (), results)\n return results\n\ndef _flatten_shape(s: XlaShape, index: Tuple[int, ...],\n results: List[Tuple[Tuple[int, ...], XlaShape]]) -> None:\n if s.is_array() or s.is_token():\n results.append((index, s))\n else:\n assert s.is_tuple()\n for i, sub in enumerate(s.tuple_shapes()):\n _flatten_shape(sub, index + (i,), results)\n\n\ndef _xla_consts(c, consts):\n unique_consts = {id(const): const for const in consts}\n xla_consts = {\n id_: xb.constant_general(c, const) for id_, const in unique_consts.items()}\n return [c for const in consts for c in xla_consts[id(const)]]\n\[email protected]\ndef _xla_callable(fun: lu.WrappedFun, device, backend, name, donated_invars, *arg_specs):\n if device is not None and backend is not None:\n raise ValueError(\"can't specify both a device and a backend for jit, \"\n \"got device={} and backend={}\".format(device, backend))\n\n abstract_args, arg_devices = unzip2(arg_specs)\n jaxpr, out_avals, consts = pe.trace_to_jaxpr_final(\n fun, abstract_args, pe.debug_info_final(fun, \"jit\"))\n if any(isinstance(c, core.Tracer) for c in consts):\n raise core.UnexpectedTracerError(\"Encountered an unexpected tracer.\")\n jaxpr, kept_const_idx, kept_var_idx = _prune_unused_inputs(jaxpr)\n consts = [c for i, c in enumerate(consts) if i in kept_const_idx]\n pruned_arg_specs = (a for i, a in enumerate(arg_specs) if i in kept_var_idx)\n abstract_args, arg_devices = unzip2(pruned_arg_specs)\n donated_invars = [\n x for i, x in enumerate(donated_invars) if i in kept_var_idx\n ]\n map(prefetch, it.chain(consts, jaxpr_literals(jaxpr)))\n jaxpr = apply_outfeed_rewriter(jaxpr)\n\n nreps = jaxpr_replicas(jaxpr)\n device = _xla_callable_device(nreps, backend, device, arg_devices)\n backend = xb.get_device_backend(device) if device else (\n xb.get_backend(backend) if backend is not None else None)\n result_handlers = map(partial(aval_to_result_handler, device), out_avals)\n\n # Computations that only produce constants and/or only rearrange their inputs,\n # which are often produced from partial evaluation, don't need compilation,\n # and don't need to evaluate their arguments.\n if not jaxpr.eqns:\n return partial(_execute_trivial, jaxpr, device, consts, out_avals,\n result_handlers, kept_var_idx)\n\n if not _on_exit:\n log_priority = logging.WARNING if config.jax_log_compiles else logging.DEBUG\n logging.log(log_priority, \"Compiling %s (%s) for args %s.\",\n fun.__name__, id(fun), abstract_args)\n\n if nreps > 1:\n warn(f\"The jitted function {fun.__name__} includes a pmap. Using \"\n \"jit-of-pmap can lead to inefficient data movement, as the outer jit \"\n \"does not preserve sharded data representations and instead collects \"\n \"input and output arrays onto a single device. \"\n \"Consider removing the outer jit unless you know what you're doing. \"\n \"See https://github.com/google/jax/issues/2926.\")\n\n if nreps > xb.device_count(backend):\n raise ValueError(\n f\"compiling computation that requires {nreps} replicas, but only \"\n f\"{xb.device_count(backend)} XLA devices are available\")\n\n if xb.process_count() > 1 and (nreps > 1 or jaxpr_has_pmap(jaxpr)):\n raise NotImplementedError(\n \"jit of multi-host pmap not implemented (and jit-of-pmap can cause \"\n \"extra data movement anyway, so maybe you don't want it after all).\")\n\n tuple_args = len(abstract_args) > 100 # pass long arg lists as tuple for TPU\n\n c = xb.make_computation_builder(\"jit_{}\".format(fun.__name__))\n xla_consts = _xla_consts(c, consts)\n xla_args, donated_invars = _xla_callable_args(c, abstract_args, tuple_args,\n donated_invars=donated_invars)\n out_nodes = jaxpr_subcomp(\n c, jaxpr, backend.platform if backend is not None else None,\n AxisEnv(nreps, (), ()), xla_consts,\n extend_name_stack(wrap_name(name, 'jit')), *xla_args)\n backend = xb.get_backend(backend)\n out_tuple = xops.Tuple(c, out_nodes)\n if backend.platform in (\"gpu\", \"tpu\"):\n donated_invars = set_up_aliases(c, xla_args, out_tuple, donated_invars, tuple_args)\n if any(donated_invars):\n # TODO(tomhennigan): At call time we should mark these buffers as deleted.\n unused_donations = [str(c.GetShape(a))\n for a, d in zip(xla_args, donated_invars) if d]\n warn(\"Some donated buffers were not usable: {}\".format(\", \".join(unused_donations)))\n built = c.build(out_tuple)\n\n options = xb.get_compile_options(\n num_replicas=nreps,\n num_partitions=1,\n device_assignment=(device.id,) if device else None)\n options.parameter_is_tupled_arguments = tuple_args\n compiled = compile_or_get_cached(backend, built, options)\n if nreps == 1:\n return partial(_execute_compiled, compiled, out_avals, result_handlers,\n kept_var_idx)\n else:\n return partial(_execute_replicated, compiled, out_avals, result_handlers,\n kept_var_idx)\n\n\ndef set_up_aliases(c, xla_args, out_tuple, donated_args, tuple_args):\n \"\"\"Configures input/output \"must\" aliasing based on `donated_args`.\"\"\"\n # First for every input array add it to `donations` iff it is a member of\n # `donated_args`.\n donations = defaultdict(deque)\n for arg_index, arg in enumerate(xla_args):\n if donated_args[arg_index]:\n for param_index, element in flatten_shape(c.GetShape(arg)):\n key = (element.dimensions(), element.xla_element_type())\n if tuple_args:\n param_number = 0\n param_index = (arg_index,) + tuple(param_index)\n donations[key].append((param_number, param_index, arg_index))\n else:\n param_number = arg_index\n donations[key].append((param_number, param_index, arg_index))\n\n # Consume donations for outputs.\n out_donated_args = list(donated_args)\n for output_index, element in flatten_shape(c.GetShape(out_tuple)):\n key = (element.dimensions(), element.xla_element_type())\n if donations.get(key, ()):\n param_number, param_index, arg_index = donations[key].popleft()\n out_donated_args[arg_index] = False\n c.setup_alias(output_index, param_number, param_index)\n\n return tuple(out_donated_args)\n\n\ndef _prune_unused_inputs(\n jaxpr: core.Jaxpr) -> Tuple[core.Jaxpr, Set[int], Set[int]]:\n used = {v for v in jaxpr.outvars if isinstance(v, core.Var)}\n # TODO(zhangqiaorjc): Improve the DCE algorithm by also pruning primitive\n # applications that do not produce used outputs. Must handle side-effecting\n # primitives and nested jaxpr.\n used.update(\n v for eqn in jaxpr.eqns for v in eqn.invars if isinstance(v, core.Var))\n kept_const_idx, new_constvars = unzip2(\n (i, v) for i, v in enumerate(jaxpr.constvars) if v in used)\n kept_var_idx, new_invars = unzip2(\n (i, v) for i, v in enumerate(jaxpr.invars) if v in used)\n new_jaxpr = core.Jaxpr(new_constvars, new_invars, jaxpr.outvars, jaxpr.eqns)\n return new_jaxpr, set(kept_const_idx), set(kept_var_idx)\n\n\ndef _xla_callable_device(nreps, backend, device, arg_devices):\n if nreps > 1:\n if device is not None or backend is not None:\n raise ValueError(f\"can't specify device or backend for jit-of-pmap, \"\n f\"got device={device} and backend={backend}\")\n return None\n else:\n if device is None and backend is None:\n return _device_from_arg_devices(arg_devices)\n elif device is not None and backend is None:\n return device\n elif device is None and backend is not None:\n return xb.get_backend(backend).get_default_device_assignment(1)[0]\n else:\n assert False # Unreachable given the error check in _xla_callable\n\n# Used within _xla_callable_args and _xla_param to distinguish between None (no\n# sharding annotation set) and replicated.\n_replicated_param = object()\n\ndef _xla_callable_args(\n c, avals, tuple_args, *,\n replicated=None,\n partitions=None,\n partitions_proto: bool = False,\n donated_invars=None):\n assert partitions is None or len(partitions) == len(avals)\n if not tuple_args:\n if replicated is None:\n replicated = [None] * len(avals)\n if partitions is None:\n parts: List[object] = [None] * len(avals)\n elif partitions_proto:\n parts = partitions\n else:\n parts = [_replicated_param if part is None else part\n for part in partitions]\n counts = it.count()\n xla_args = [_xla_param(c, next(counts), xla_shape, r, p, partitions_proto)\n if a is not abstract_token else xops.CreateToken(c)\n for (a, r, p) in safe_zip(avals, replicated, parts)\n for xla_shape in aval_to_xla_shapes(a)]\n if donated_invars is not None:\n donated_invars = [\n d for (a, _, _, d) in zip(avals, replicated, parts, donated_invars)\n for xla_shape in aval_to_xla_shapes(a)]\n return xla_args, donated_invars\n else:\n if replicated is not None:\n replicated = [r for a, r in zip(avals, replicated)\n if a is not abstract_token]\n if partitions is None:\n tuple_parts = None\n elif partitions_proto:\n tuple_parts = xb.tuple_sharding_proto(partitions)\n else:\n tuple_parts = tuple(partitions)\n tuple_shape = xc.Shape.tuple_shape(\n [shape for a in avals for shape in aval_to_xla_shapes(a) if a is not abstract_token])\n tuple_param = _xla_param(c, 0, tuple_shape, replicated, tuple_parts, partitions_proto)\n xla_inputs = iter(xla_destructure(c, tuple_param))\n xla_args = [next(xla_inputs) if a is not abstract_token else\n xops.CreateToken(c) for a in avals]\n assert next(xla_inputs, None) is None\n return xla_args, donated_invars\n\ndef _xla_param(builder, param_num, xla_shape, replicated, partitions, parts_proto):\n make_param = partial(xb.parameter, builder, param_num, xla_shape,\n replicated=replicated)\n with_sharding = xb.with_sharding_proto if parts_proto else xb.with_sharding\n if partitions is None:\n return make_param()\n elif partitions is _replicated_param:\n return with_sharding(builder, None, make_param)\n else:\n return with_sharding(builder, partitions, make_param)\n\n\ndef _execute_compiled(compiled: XlaExecutable, avals, handlers, kept_var_idx,\n *args):\n device, = compiled.local_devices()\n input_bufs = list(\n it.chain.from_iterable(\n device_put(x, device)\n for i, x in enumerate(args)\n if x is not token and i in kept_var_idx))\n out_bufs = compiled.execute(input_bufs)\n check_special(xla_call_p.name, out_bufs)\n return [handler(*bs) for handler, bs in zip(handlers, _partition_outputs(avals, out_bufs))]\n\n\ndef _execute_replicated(compiled: XlaExecutable, avals, handlers, kept_var_idx,\n *args):\n input_bufs = [\n list(\n it.chain.from_iterable(\n device_put(x, device)\n for i, x in enumerate(args)\n if x is not token and i in kept_var_idx))\n for device in compiled.local_devices()\n ]\n out_bufs = [\n buf[0] for buf in compiled.execute_sharded_on_local_devices(\n list(zip(*input_bufs)))\n ]\n check_special(xla_call_p.name, out_bufs)\n return [handler(*bs) for handler, bs in zip(handlers, _partition_outputs(avals, out_bufs))]\n\n\ndef _execute_trivial(jaxpr, device: Optional[Device], consts, avals, handlers,\n kept_var_idx, *args):\n env = {core.unitvar: core.unit}\n pruned_args = (x for i, x in enumerate(args) if i in kept_var_idx)\n map(env.setdefault, jaxpr.invars, pruned_args)\n map(env.setdefault, jaxpr.constvars, consts)\n outs = [canonicalize_dtype(v.val) if type(v) is Literal else env[v]\n for v in jaxpr.outvars]\n return [_copy_device_array_to_device(x, device) if type_is_device_array(x)\n else h(*device_put(x, device)) for h, x in zip(handlers, outs)]\n\nxla_call_p: core.CallPrimitive = core.CallPrimitive('xla_call')\nxla_call = xla_call_p.bind\nxla_call_p.def_impl(_xla_call_impl)\n\ndef _xla_call_partial_eval_update_params(params, in_unknowns):\n call_jaxpr = params['call_jaxpr']\n donated_invars = params['donated_invars']\n if not in_unknowns and donated_invars:\n # JaxprTrace.post_process_call creates a call with no input tracers\n new_donated_invars = (False,) * len(call_jaxpr.invars)\n else:\n # JaxprTrace.process_call drops known input tracers\n donated_invars = [d for d, uk in zip(donated_invars, in_unknowns) if uk]\n new_donated_invars = ((False,) * (len(call_jaxpr.invars) - len(donated_invars))\n + tuple(donated_invars))\n return dict(params, donated_invars=new_donated_invars)\npe.call_param_updaters[xla_call_p] = _xla_call_partial_eval_update_params\n\ndef _xla_call_jvp_update_params(params, nz_tangents, nz_tangents_out_thunk):\n donated_invars = params['donated_invars']\n donated_tangents = [d for d, nz in zip(donated_invars, nz_tangents) if nz]\n new_donated_invars = (*donated_invars, *donated_tangents)\n return dict(params, donated_invars=new_donated_invars)\nad.call_param_updaters[xla_call_p] = _xla_call_jvp_update_params\n\ndef _xla_call_transpose_update_params(params, undef_primals, nonzero_cts):\n donated_invars = params['donated_invars']\n donated_primals = [d for d, u in zip(donated_invars, undef_primals) if not u]\n donated_cotangents = [False for nz in nonzero_cts if nz]\n return dict(params, donated_invars=(*donated_primals, *donated_cotangents))\nad.call_transpose_param_updaters[xla_call_p] = _xla_call_transpose_update_params\n\n\ndef _xla_call_translation_rule(c, axis_env, in_nodes, name_stack, backend, name,\n call_jaxpr, donated_invars, inline=None, device=None):\n del device, donated_invars, inline # Ignored.\n subc = xb.make_computation_builder(f\"jit_{name}\")\n args = [xb.parameter(subc, i, c.get_shape(n)) for i, n in enumerate(in_nodes)]\n out_nodes = jaxpr_subcomp(subc, call_jaxpr, backend, axis_env, (),\n extend_name_stack(name_stack, wrap_name(name, 'jit')), *args)\n subc = subc.build(xops.Tuple(subc, out_nodes))\n return xops.Call(c, subc, list(in_nodes))\nad.primitive_transposes[xla_call_p] = partial(ad.call_transpose, xla_call_p)\n\n\n### translation tables\n\ntranslations: Dict[core.Primitive, Callable] = {}\ntranslations_with_avals: Dict[core.Primitive, Callable] = {}\nparallel_translations: Dict[core.Primitive, Callable] = {}\ninitial_style_translations: Dict[core.Primitive, Callable] = {}\ncall_translations: Dict[core.Primitive, Callable] = {}\nbackend_specific_translations: Dict[str, Dict[core.Primitive, Callable]] = defaultdict(dict)\n\ncall_translations[xla_call_p] = _xla_call_translation_rule\n\ndef zeros_like_translation_rule(c, x):\n shape = c.get_shape(x)\n assert not shape.is_tuple()\n zero = xb.constant(c, np.array(0, shape.element_type()))\n return xops.Broadcast(zero, shape.dimensions())\ntranslations[ad_util.zeros_like_p] = zeros_like_translation_rule\n\ndef add_jaxvals_translation_rule(c, x, y):\n shape = c.get_shape(x)\n assert not shape.is_tuple()\n return xops.Add(x, y)\ntranslations[ad_util.add_jaxvals_p] = add_jaxvals_translation_rule\n\ntranslations[ad_util.stop_gradient_p] = lambda c, x: x\n\n\[email protected]\ndef _tuple_output(*args, **kwargs):\n ans = yield args, kwargs\n yield (ans,)\n\ndef lower_fun(fun, multiple_results, parallel=False, with_avals=False, backend=None):\n # TODO(jakevdp): migrate dependent code & always use the with_avals=True.\n def f(c, *xla_args, **params):\n avals = [_array_aval_from_xla_shape(c.get_shape(x)) for x in xla_args]\n return f_with_avals(c, avals, xla_args, params)\n\n def f_with_avals(c, avals, xla_args, params):\n if parallel:\n axis_env = params.pop('axis_env')\n del params['platform']\n else:\n axis_env = AxisEnv(1, (), ())\n wrapped_fun = lu.wrap_init(fun, params)\n if not multiple_results:\n wrapped_fun = _tuple_output(wrapped_fun)\n with core.extend_axis_env_nd(zip(axis_env.names, axis_env.sizes)):\n jaxpr, _, consts = pe.trace_to_jaxpr_dynamic(wrapped_fun, avals)\n outs = jaxpr_subcomp(c, jaxpr, backend, axis_env, _xla_consts(c, consts), '',\n *xla_args)\n if multiple_results or any(v.aval._num_buffers > 1 for v in jaxpr.outvars):\n return xops.Tuple(c, outs)\n else:\n assert len(outs) == 1, outs\n return outs[0]\n\n return f_with_avals if with_avals else f\n\ndef _array_aval_from_xla_shape(xla_shape):\n # This function instantiates the assumption that we can map fro XLA array\n # types to JAX array types.\n # TODO(mattjj): remove assumption can map XLA array types to JAX array types\n assert not xla_shape.is_tuple()\n return ShapedArray(xla_shape.dimensions(), xla_shape.numpy_dtype())\n\ndef lower_fun_initial_style(fun):\n def f(c, axis_env, name_stack, avals, backend, *xla_args, **params):\n jaxpr, _, consts = pe.trace_to_jaxpr_dynamic(lu.wrap_init(fun, params), avals)\n outs = jaxpr_subcomp(c, jaxpr, backend, axis_env, _xla_consts(c, consts),\n name_stack, *xla_args)\n return xops.Tuple(c, outs)\n return f\n\n\n### device-persistent data\n\nclass Token(object): pass\ntoken = Token()\n\npytype_aval_mappings[Token] = lambda _: abstract_token\ncore.pytype_aval_mappings[Token] = lambda _: abstract_token\nxla_shape_handlers[AbstractToken] = lambda _: (xc.Shape.token_shape(),)\nxla_result_handlers[AbstractToken] = lambda _, __: lambda _: token\ncanonicalize_dtype_handlers[Token] = identity\ndevice_put_handlers[Token] = lambda x, _: (x,)\n\n\ndef _forward_method(attrname, self, fun, *args):\n return fun(getattr(self, attrname), *args)\n_forward_to_value = partial(_forward_method, \"_value\")\n\n\n# The following is used for the type _CppDeviceArray or _DeviceArray.\nDeviceArrayProtocol = Any\nDeviceArray = xc.DeviceArrayBase\n\n_CppDeviceArray: DeviceArrayProtocol = xc.Buffer\n\ndef make_device_array(\n aval: core.ShapedArray,\n device: Optional[Device],\n device_buffer: PyLocalBuffer,\n) -> Union[PyLocalBuffer, \"_DeviceArray\"]:\n \"\"\"Returns a DeviceArray implementation based on arguments.\n\n This is to be used only within JAX. It will return either a PythonDeviceArray\n or a C++ equivalent implementation.\n \"\"\"\n if (isinstance(device_buffer, _CppDeviceArray)):\n\n if device_buffer.aval == aval and device_buffer._device == device:\n return device_buffer\n device_buffer = device_buffer.clone()\n device_buffer._device = device\n device_buffer.aval = aval\n device_buffer.weak_type = aval.weak_type\n return device_buffer\n\n return _DeviceArray(aval, device, device_buffer)\n\n\ndef type_is_device_array(x):\n \"\"\"Returns `True` if `x` is a non-sharded DeviceArray.\n\n Use this function instead of `type(x) is Devicearray`.\n \"\"\"\n type_x = type(x)\n return type_x is _DeviceArray or type_x is _CppDeviceArray\n\n\ndef device_array_supports_weakrefs():\n try:\n weakref.ref(DeviceArray())\n return True\n except TypeError:\n return False\n\n\nclass _DeviceArray(DeviceArray): # type: ignore\n \"\"\"A DeviceArray is an ndarray backed by a single device memory buffer.\"\"\"\n # We don't subclass ndarray because that would open up a host of issues,\n # but lax_numpy.py overrides isinstance behavior and attaches ndarray methods.\n __slots__ = [\n \"aval\", \"device_buffer\", \"_npy_value\", \"_device\", \"__weakref__\"\n ]\n __array_priority__ = 100\n\n # DeviceArray has methods that are dynamically populated in lax_numpy.py,\n # and this annotation is needed to make pytype happy.\n _HAS_DYNAMIC_ATTRIBUTES = True\n\n def __init__(self, aval: core.ShapedArray, device: Optional[Device],\n device_buffer: PyLocalBuffer):\n \"\"\"Initializer.\n\n Args:\n aval: The abstract value associated to this array (shape+dtype+weak_type).\n device: The optional sticky device. See\n https://jax.readthedocs.io/en/latest/faq.html#controlling-data-and-computation-placement-on-devices\n device_buffer: The underlying buffer owning the on-device data.\n \"\"\"\n DeviceArray.__init__(self)\n self.aval = aval\n self.device_buffer = device_buffer\n self._device = device\n\n self._npy_value = None\n if config.jax_enable_checks:\n assert type(aval) is ShapedArray\n npy_value = self._value\n assert npy_value.dtype == aval.dtype and npy_value.shape == aval.shape\n assert (device is None) or device is device_buffer.device()\n\n def _check_if_deleted(self):\n if self.device_buffer is deleted_buffer:\n raise RuntimeError(\"DeviceArray has been deleted.\")\n\n def block_until_ready(self):\n \"\"\"Blocks the caller until the buffer's value has been computed on device.\n\n This method is mostly useful for timing microbenchmarks that wish to\n time how long a computation takes, without transferring the result back\n to the host.\n\n Returns the buffer object (`self`).\n \"\"\"\n self._check_if_deleted()\n self.device_buffer.block_host_until_ready() # pytype: disable=attribute-error\n return self\n\n @property\n def _value(self):\n self._check_if_deleted()\n if self._npy_value is None:\n self._npy_value = self.device_buffer.to_py()\n self._npy_value.flags.writeable = False\n return self._npy_value\n\n @property\n def shape(self):\n return self.aval.shape\n\n @property\n def dtype(self):\n return self.aval.dtype\n\n @property\n def size(self):\n return prod(self.aval.shape)\n\n @property\n def ndim(self):\n return len(self.aval.shape)\n\n def copy_to_host_async(self):\n \"\"\"Requests a copy of the buffer to the host.\"\"\"\n self._check_if_deleted()\n if self._npy_value is None:\n self.device_buffer.copy_to_host_async() # pytype: disable=attribute-error\n\n def delete(self):\n \"\"\"Deletes the device array and any cached copy on the host.\n\n It is an error to access the contents of a `DeviceArray` after it has\n been deleted.\n\n Use of this method is optional; device buffers will be reclaimed\n automatically by Python when a DeviceArray object is garbage collected.\n However, it is sometimes useful to have more explicit control over the\n time of deletion.\n \"\"\"\n self.device_buffer.delete() # pytype: disable=attribute-error\n self.device_buffer = deleted_buffer\n self._npy_value = None\n\n @property\n def __cuda_array_interface__(self):\n return self.device_buffer.__cuda_array_interface__\n\n\n# Adding methods dynamically to both _DeviceArray and _CppDeviceArray\n# pylint: disable=protected-access\nfor device_array in [DeviceArray]:\n\n\n def copy(self):\n \"\"\"Returns an ndarray (backed by host memory, not device memory).\"\"\"\n return np.asarray(self)\n setattr(device_array, \"copy\", copy)\n\n def __repr__(self):\n line_width = np.get_printoptions()[\"linewidth\"]\n prefix = '{}('.format(self.__class__.__name__.lstrip('_'))\n s = np.array2string(self._value, prefix=prefix, suffix=',',\n separator=', ', max_line_width=line_width)\n dtype_str = 'dtype={})'.format(self.dtype.name)\n last_line_len = len(s) - s.rfind('\\n') + 1\n sep = ' '\n if last_line_len + len(dtype_str) + 1 > line_width:\n sep = ' ' * len(prefix)\n return \"{}{},{}{}\".format(prefix, s, sep, dtype_str)\n\n setattr(device_array, \"__repr__\", __repr__)\n\n def item(self):\n if dtypes.issubdtype(self.dtype, np.complexfloating):\n return complex(self)\n elif dtypes.issubdtype(self.dtype, np.floating):\n return float(self)\n elif dtypes.issubdtype(self.dtype, np.integer):\n return int(self)\n elif dtypes.issubdtype(self.dtype, np.bool_):\n return bool(self)\n else:\n raise TypeError(self.dtype)\n\n setattr(device_array, \"item\", item)\n\n def __len__(self):\n try:\n return self.aval.shape[0]\n except IndexError as err:\n raise TypeError(\"len() of unsized object\") from err # same as numpy error\n\n setattr(device_array, \"__len__\", __len__)\n\n def __iter__(self):\n if self.ndim == 0:\n raise TypeError(\"iteration over a 0-d array\") # same as numpy error\n else:\n return self._value.__iter__()\n\n setattr(device_array, \"__iter__\", __iter__)\n\n def __reversed__(self):\n if self.ndim == 0:\n raise TypeError(\"iteration over a 0-d array\")\n else:\n return reversed(self._value)\n\n setattr(device_array, \"__reversed__\", __reversed__)\n\n def __format__(self, format_spec):\n # Simulates behavior of https://github.com/numpy/numpy/pull/9883\n if self.ndim == 0:\n return format(self._value[()], format_spec)\n else:\n return format(self._value, format_spec)\n\n setattr(device_array, \"__format__\", __format__)\n\n def __array__(self, dtype=None, context=None):\n return np.asarray(self._value, dtype=dtype)\n\n setattr(device_array, \"__array__\", __array__)\n\n setattr(device_array, \"__str__\", partialmethod(_forward_to_value, str))\n setattr(device_array, \"__bool__\", partialmethod(_forward_to_value, bool))\n setattr(device_array, \"__nonzero__\", partialmethod(_forward_to_value, bool))\n setattr(device_array, \"__float__\", lambda self: self._value.__float__())\n setattr(device_array, \"__int__\", lambda self: self._value.__int__())\n setattr(device_array, \"__complex__\", lambda self: self._value.__complex__())\n setattr(device_array, \"__hex__\", partialmethod(_forward_to_value, hex))\n setattr(device_array, \"__oct__\", partialmethod(_forward_to_value, oct))\n setattr(device_array, \"__index__\", partialmethod(_forward_to_value, op.index))\n to_bytes = lambda self, order=\"C\": self._value.tobytes(order)\n setattr(device_array, \"tobytes\", to_bytes)\n del to_bytes\n setattr(device_array, \"tolist\", lambda self: self._value.tolist())\n\n # pickle saves and loads just like an ndarray\n setattr(device_array, \"__reduce__\",\n partialmethod(_forward_to_value, op.methodcaller(\"__reduce__\")))\n\n # explicitly set to be unhashable.\n setattr(device_array, \"__hash__\", None)\n\n # clobbered when jax.numpy is imported, but useful in tests\n setattr(device_array, \"__eq__\", lambda self, other: self._value == other)\n\n # The following methods are dynamically overridden in lax_numpy.py.\n def raise_not_implemented():\n raise NotImplementedError\n\n setattr(device_array, \"__getitem__\", lambda self, i: raise_not_implemented())\n# pylint: enable=protected-access\n\n\nclass DeletedBuffer(object): pass\ndeleted_buffer = DeletedBuffer()\n\nfor device_array in [_CppDeviceArray, _DeviceArray]:\n core.literalable_types.add(device_array)\n core.pytype_aval_mappings[device_array] = ConcreteArray\n pytype_aval_mappings[device_array] = op.attrgetter('aval')\n canonicalize_dtype_handlers[device_array] = identity\n\ndef _device_array_constant_handler(c, val, canonicalize_types=True):\n return xb.constant_general(c, val.device_buffer.to_py())\nxb.register_constant_handler(_DeviceArray, _device_array_constant_handler)\nxb.register_constant_handler(_CppDeviceArray, _device_array_constant_handler)\n\ndef _device_put_device_array(x: Union[DeviceArrayProtocol, _DeviceArray], device: Optional[Device]):\n x = _copy_device_array_to_device(x, device)\n return (x.device_buffer,)\ndevice_put_handlers[_CppDeviceArray] = _device_put_device_array\ndevice_put_handlers[_DeviceArray] = _device_put_device_array\n\ndef _copy_device_array_to_device(x: Union[DeviceArrayProtocol, _DeviceArray], device: Optional[xc.Device]) -> Union[DeviceArrayProtocol, _DeviceArray]:\n if device is None:\n # no copying to be done because there's no target specified\n return x\n elif xb.get_device_backend(device).platform == x.device_buffer.platform():\n # source and target platforms are the same\n if x.device_buffer.device() == device:\n # no copying to be done because source equals target\n if x._device == device:\n return x\n else:\n moved_buf = x.device_buffer # We need to change stickyness\n else:\n # move the buffer with a device-to-device copy\n moved_buf = x.device_buffer.copy_to_device(device)\n else:\n # buffers from different XLA backends are passed through the host.\n backend = xb.get_device_backend(device)\n moved_buf = backend.buffer_from_pyval(x.device_buffer.to_py(), device)\n return make_device_array(x.aval, device, moved_buf)\n\n\ndef _device_put_impl(x, device: Optional[Device] = None):\n if type_is_device_array(x):\n return _copy_device_array_to_device(x, device)\n\n try:\n a = abstractify(x)\n except TypeError as err:\n raise TypeError(\n f\"Argument '{x}' of type {type(x)} is not a valid JAX type\") from err\n return aval_to_result_handler(device, a)(*device_put(x, device))\n\ndevice_put_p = core.Primitive('device_put')\ndevice_put_p.def_impl(_device_put_impl)\ndevice_put_p.def_abstract_eval(lambda x, device=None: x)\ntranslations[device_put_p] = lambda c, x, device=None: x\nad.deflinear2(device_put_p, lambda cotangent, _, **kwargs: [cotangent])\nmasking.defvectorized(device_put_p)\n\n\ndef _zeros(c, xla_shape):\n if xla_shape.is_array():\n shape, dtype = xla_shape.dimensions(), xla_shape.numpy_dtype()\n zero = xb.constant(c, np.array(0, dtype=dtype))\n return xops.Broadcast(zero, shape)\n else:\n # It is a token\n return xops.CreateToken(c)\n\n\ndef _remat_using_cond(\n c, axis_env, in_nodes, name_stack, backend, name, call_jaxpr):\n \"\"\"Lower remat to a Conditional which always returns true. This:\n 1. Circumvents common subexpression elimination.\n 2. In common case of `jax.grad(jax.remat(f))`, ensures the remat blocks\n occur after the primal blocks, because cotangent is an input to the\n Conditional.\"\"\"\n # Fake condition which always selects True branch.\n rng = xops.RngUniform(xb.constant(c, np.array(0, dtype=np.float32)),\n xb.constant(c, np.array(1, dtype=np.float32)),\n xc.Shape.array_shape(xc.PrimitiveType.F32, []))\n pred = xops.Lt(rng, xb.constant(c, np.array(2, dtype=np.float32)))\n\n true_op = xops.Tuple(c, in_nodes)\n remat_subc = xb.make_computation_builder(\"remat_call_subcomputation\")\n input_op = xb.parameter(remat_subc, 0, c.get_shape(true_op), replicated=[])\n args = xla_destructure(remat_subc, input_op)\n out_nodes = jaxpr_subcomp(remat_subc, call_jaxpr, backend, axis_env, (),\n extend_name_stack(name_stack, wrap_name(name, 'remat')),\n *args)\n out_node_shapes = [remat_subc.get_shape(o) for o in out_nodes]\n remat_subc = remat_subc.build(xops.Tuple(remat_subc, out_nodes))\n\n false_op = true_op\n dummy_subc = xb.make_computation_builder(\"remat_call_dummy_subcomputation\")\n xb.parameter(dummy_subc, 0, c.get_shape(false_op), replicated=[])\n out_nodes = [_zeros(dummy_subc, s) for s in out_node_shapes]\n dummy_subc = dummy_subc.build(xops.Tuple(dummy_subc, out_nodes))\n\n return xops.Conditional(pred, true_op, remat_subc, false_op, dummy_subc)\n\n\ndef _remat_using_while(\n c, axis_env, in_nodes, name_stack, backend, name, call_jaxpr):\n \"\"\"Lower remat to a single iteration while loop.\"\"\"\n # Dummy subc for getting subcomp shapes.\n dummy_inputs = xops.Tuple(c, in_nodes)\n dummy_subc = xb.make_computation_builder(\"remat_dummy_subcomputation\")\n dummy_input_op = xb.parameter(dummy_subc, 0, c.get_shape(dummy_inputs), replicated=[])\n dummy_args = xla_destructure(dummy_subc, dummy_input_op)\n dummy_subcomp_outs = jaxpr_subcomp(\n dummy_subc, call_jaxpr, backend, axis_env, (),\n extend_name_stack(name_stack, wrap_name(name, \"remat\")), *dummy_args)\n out_node_shapes = [dummy_subc.get_shape(o) for o in dummy_subcomp_outs]\n\n i_init = xb.constant(c, np.array(0, dtype=np.int32))\n zeros_like_outs = [_zeros(c, s) for s in out_node_shapes]\n inputs = xops.Tuple(c, [i_init] + in_nodes + zeros_like_outs)\n\n cond_subc = xb.make_computation_builder(\"remat_cond_subcomputation\")\n input_op = xb.parameter(cond_subc, 0, c.get_shape(inputs), replicated=[])\n i = xops.GetTupleElement(input_op, 0)\n rng = xops.RngUniform(xb.constant(cond_subc, np.array(1, dtype=np.int32)),\n xb.constant(cond_subc, np.array(2, dtype=np.int32)),\n xc.Shape.array_shape(xc.PrimitiveType.S32, []))\n cond_subc = cond_subc.build(xops.Lt(i, rng))\n\n body_subc = xb.make_computation_builder(\"remat_body_subcomputation\")\n input_op = xb.parameter(body_subc, 0, c.get_shape(inputs), replicated=[])\n i, *args = xla_destructure(body_subc, input_op)[:len(in_nodes)+1]\n i_next = xops.Add(i, xb.constant(body_subc, np.array(1, dtype=np.int32)))\n subcomp_outs = jaxpr_subcomp(\n body_subc, call_jaxpr, backend, axis_env, (),\n extend_name_stack(name_stack, wrap_name(name, \"remat\")), *args)\n out_nodes = [i_next] + args + subcomp_outs\n body_subc = body_subc.build(xops.Tuple(body_subc, out_nodes))\n outs = xops.While(cond_subc, body_subc, inputs)\n return xops.Tuple(c, xla_destructure(c, outs)[len(in_nodes)+1:])\n\n\ndef _remat_translation_rule(c, axis_env, in_nodes,\n name_stack, backend, name, call_jaxpr,\n prevent_cse, differentiated, concrete, device=None):\n del device, concrete # Unused.\n if differentiated and prevent_cse:\n if backend == \"gpu\":\n return _remat_using_while(\n c, axis_env, in_nodes, name_stack, backend, name, call_jaxpr)\n else:\n return _remat_using_cond(\n c, axis_env, in_nodes, name_stack, backend, name, call_jaxpr)\n else:\n outs = jaxpr_subcomp(c, call_jaxpr, backend, axis_env, (), \"\", *in_nodes)\n return xops.Tuple(c, outs)\n\ncall_translations[pe.remat_call_p] = _remat_translation_rule # type: ignore\n\n\nad.primitive_transposes[core.named_call_p] = partial(ad.call_transpose,\n core.named_call_p)\n\n\ndef _named_call_translation_rule(c, axis_env, in_nodes, name_stack, *,\n name=\"core_call\", backend, call_jaxpr):\n subc = xb.make_computation_builder(name)\n args = [xb.parameter(subc, i, c.GetShape(n)) for i, n in enumerate(in_nodes)]\n out_nodes = jaxpr_subcomp(subc, call_jaxpr, backend, axis_env, (),\n extend_name_stack(name_stack, name), *args)\n subc = subc.Build(xops.Tuple(subc, out_nodes))\n return xops.Call(c, subc, list(in_nodes))\ncall_translations[core.named_call_p] = _named_call_translation_rule\n\n\ndef _call_translation_rule(c, axis_env, in_nodes, name_stack, *, backend,\n call_jaxpr):\n return _named_call_translation_rule(\n c, axis_env, in_nodes, name_stack, name=\"core_call\",\n backend=backend, call_jaxpr=call_jaxpr)\ncall_translations[core.call_p] = _call_translation_rule\n" ]
[ [ "numpy.take", "numpy.get_printoptions", "numpy.asarray", "numpy.dtype", "numpy.array2string", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
godisreal/CrowdEgress
[ "4e284f0108e9a6ed09c07d8738fb17421b82039b" ]
[ "code/code_version1.0/agent_model_obst3.py" ]
[ "# -*-coding:utf-8-*-\n# Author: WP\n# Email: [email protected]\n\n\nimport numpy as np\nfrom math_func import *\nfrom math import *\nimport random\n#from stack import *\n\nclass Pedestrian(object):\n def __init__(self, x=1, y=1):\n # random initialize a agent\n #self.memory = np.array([0.0, 0.0], [0.0, 0.0], [0.0, 0.0])\n #self.sumAdapt = np.array([0.0, 0.0])\n self.ID = 0 #Name or index of agents\n self.inComp = 1\n self.aType = 'MoveToDest' #{'MoveToDest' 'Follow' 'Talk' 'Search'}\n\t\n self.tpre = random.uniform(6.0,22.0)\n self.maxSpeed = random.uniform(1.0,2.0)\n self.diss = random.uniform(-1.0,0.0)\n\t\n self.posX_init = random.uniform(8,24)\n self.posY_init = random.uniform(8,18)\n self.pos = np.array([self.posX_init, self.posY_init])\t\n #self.pos = np.array([10.0, 10.0])\n\n self.actualVX_init = random.uniform(0,1.6)\n self.actualVY_init = random.uniform(0,1.6)\n self.actualV = np.array([self.actualVX_init, self.actualVY_init])\n self.actualSpeed = np.linalg.norm(self.actualV) #np.array([0.0, 0.0])\n\n self.dest = np.array([60.0,10.0])\n self.exitInMind = None\n self.direction = normalize(self.dest - self.pos)\n #self.direction = np.array([0.0, 0.0])\n\n self.pathMap = []\n self.others = []\n self.targetDoors = []\n self.targetExits = []\n self.memory = []\n self.route = [] # Record the passing doors\n # in size of number of doors -1, 0,+1\n #self.memory.append(self.dest)\n \n self.desiredSpeed = 2.0 #random.uniform(0.3,2.3) #1.8\n self.desiredV = self.desiredSpeed*self.direction\n self.desiredSpeedMode = 'random' #{'random' 'fixed' 'increase' 'decrease'}\n \n self.tau = random.uniform(8,16) #10.0\n self.drivenAcc =(self.desiredV - self.actualV)/self.tau\n \n self.mass = 60 #random.uniform(40,90) #60.0\n self.radius = 0.35 #1.6 #0.3\n\n self.wallrepF= np.array([0.0,0.0])\n self.groupF= np.array([0.0,0.0])\n self.selfrepF= np.array([0.0,0.0])\n self.doorF= np.array([0.0,0.0])\n\t\n self.interactionRange = 3.0 #Distance for communication (talking)\n self.p = 0.2\n self.pMode = 'random' #{'random' 'fixed' 'increase' 'decrease'}\n \n self.bodyFactorA = 12.0\n self.slideFricFactorA = 240000\n\t\n # /Group Social Force\n self.A_CF = 1 #30/20000 #2\n self.B_CF = 1 #random.uniform(0.8,1.6) #0.8 #0.08\n\t\n # Social Force\n self.A_SF = 200 #2\n self.B_SF = 0.8 #random.uniform(0.8,1.6) #0.8 #0.08\n\t\n # Wall Force / Door Force\n self.A_WF = 60 #200 #60 #2\n self.B_WF = 0.3 #0.2 #0.8 #3.2 #2.2 #random.uniform(0.8,1.6) #0.08\n \n self.bodyFactorW = 12.0\n self.slideFricFactorW = 240000\n\t\n self.Goal = 0\n self.timeOut = 0.0\n\t\n self.desiredV_old = np.array([0.0, 0.0])\n self.actualV_old = np.array([0.0, 0.0])\n\t\n self.lamb = random.uniform(0.2,0.4)\n self.diw_desired = 0.6\n\t\n self.ratioV = 1\n self.stressLevel = 1\n\t\n self.color = [255, 0, 0] #blue\n\t\n self.moving_tau = 0.7\n self.tpre_tau = 1.6\n self.talk_tau = 2.6\n self.talk_prob = 0.6\n \n print('X and Y Position:', self.pos)\n print('self.direction:', self.direction)\n \n \n def shoulders(self):\n if np.allclose(self.actualV, np.zeros(2)):\n direction = self.direction\n direction = normalize(direction)\n else: \n direction = np.array([-self.actualV[1], self.actualV[0]])\n direction = normalize(direction)\n\t\n leftPx = self.pos + self.radius*direction\n rightPx = self.pos - self.radius*direction\t\n return leftPx, rightPx\n \n\n def adaptDirection(self):\n self.direction = normalize(self.destmemeory[-1]-self.pos)\n if np.allclose(self.direction, np.zeros(2)):\n self.direction = np.zeros(2)\n return self.direction\n \n\t\n def adaptVel(self):\n deltaV = self.desiredV - self.actualV\n if np.allclose(deltaV, np.zeros(2)):\n deltaV = np.zeros(2)\n return deltaV*self.mass/self.tau\n\n\n def adaptP(self, flag = 'random'):\n if flag == 'random':\n self.p = self.p + random.uniform(-0.3, 0.3)\n self.p = max(-1.0, min(1.0, self.p))\n elif flag == 'increase' and self.p<1.0:\n # Use randome walk or not ???\n self.p = self.p + random.uniform(0.0, 0.3)\n self.p = min(1.0, self.p)\n elif flag == 'decrease' and self.p>-1.0:\n self.p = self.p + random.uniform(-0.3, 0.0)\n self.p = max(-1.0, self.p)\n return None\n\t\n \n def adaptDesiredSpeed(self, flag = 'random'):\n if flag == 'random':\n self.desiredSpeed = self.desiredSpeed + random.uniform(-0.3, 0.3)\n self.desiredSpeed = max(0.0, min(3.0, self.desiredSpeed))\n elif flag == 'increase' and self.desiredSpeed<3.0:\n self.desiredSpeed = self.desiredSpeed + random.uniform(0.0, 0.3)\n self.desiredSpeed = min(3.0, self.desiredSpeed)\n elif flag == 'decrease' and self.desiredSpeed>0.0:\n self.desiredSpeed = self.desiredSpeed + random.uniform(-0.3, 0.0)\n self.desiredSpeed = max(0.0, self.desiredSpeed)\n return None\n\t\n\n def selfRepulsion(self, Dfactor=1, Afactor=1, Bfactor=1):\n first = -self.direction*Afactor*self.A_CF*np.exp((self.radius*Dfactor)/(self.B_CF*Bfactor))*(self.radius*Dfactor)\n return first\n\t\n\n def changeAttr(self, x=1, y=1, Vx=1, Vy=1):\n self.posX = x\n self.posY = y\n self.pos = np.array([self.posX, self.posY])\n self.actualVX = Vx\n self.actualVY = Vy\n self.actualV = np.array([self.actualVX, self.actualVY])\n\n\n def showAttr(self):\n #print('test')\n print('X and Y Position:', self.pos)\n print('self.direction:', self.direction)\n print('self.velocity:', self.actualV)\n \n\n def cohesiveForce(self, other, Dfactor=1, Afactor=1, Bfactor=1):\n\n # self.A = AMatrix(selfID, otherID)\n # self.B = BMatrix(selfID, otherID)\n #phiij = vectorAngleCos(self.actualV , (other.pos - self.pos))\n #anisoF = self.lamb + (1-self.lamb)*(1+cos(phiij))/2\n\t\n rij = self.radius + other.radius\n dij = np.linalg.norm(self.pos - other.pos)\n nij = (self.pos - other.pos)/dij\n first = Afactor*self.A_CF*np.exp((rij*Dfactor-dij)/(self.B_CF*Bfactor))*nij*(rij*Dfactor-dij) #*anisoF\n return first\n\t\n\n def agentForce(self, other):\n rij = self.radius + other.radius\n dij = np.linalg.norm(self.pos - other.pos)\n nij = (self.pos - other.pos)/dij\n first = self.A_SF*np.exp((rij-dij)/self.B_SF)*nij\n\t\n second = self.bodyFactorA*ggg(rij-dij)*nij\n\t\n #Issue: nij is a vector directing from j to i. \n #*(rij*Dfactor-dij)/20000+ self.bodyFactor*g(rij-dij)*nij/10000\n tij = np.array([-nij[1],nij[0]])\n deltaVij = (self.actualV - other.actualV)*tij\n third = self.slideFricFactorA*ggg(rij-dij)*deltaVij*tij\n #third = 300*exp(rij-dij)*deltaVij*tij/dij\n\t\n return first + second #+ third\n \n\n ############################\n # This is not used any more. \n def physicalForce(self, other):\n rij = self.radius + other.radius\n dij = np.linalg.norm(self.pos - other.pos)\n nij = (self.pos - other.pos)/dij\n first = self.bodyFactorA*g(rij-dij)*nij\n #print >> f, \"first:\", first, \"/n\"\n\t\n return first\n # This is not used any more. \n ############################\n\n \n def doorForce(self, door, mode='edge', fuzzydir=0.0):\n if door.inside(self.pos)==False:\n doordir = door.direction(door.arrow)\n agentdir = door.pos-self.pos\n if np.dot(doordir, agentdir)>=fuzzydir:\n ri = self.radius\n #mid= (np.array([self.params[0], self.params[1]]) + np.array([self.params[2], self.params[3]]))/2.0\n if mode=='pos':\n dist=np.linalg.norm(door.pos - self.pos)\n dire = normalize(door.pos-self.pos)\n elif mode == 'edge':\n edge1, edge2, edge3, edge4 = door.edge()\n dist1 = np.linalg.norm(edge1 - self.pos)\n dist2 = np.linalg.norm(edge2 - self.pos)\n dist3 = np.linalg.norm(edge3 - self.pos)\n dist4 = np.linalg.norm(edge4 - self.pos)\n dist_list = [dist1, dist2, dist3, dist4]\n dist = min(dist_list)\n dist_index =np.argmin(dist_list)\n dire = normalize(door.pos-self.pos) # Need improvement\n #if dist1<dist2:\n # dist=dist1\n # dire = normalize(edge1-self.pos)\n #else:\n # dist=dist2\n # dire = normalize(edge2-self.pos)\n \n #first = self.A_WF*np.exp((ri-dist)/self.B_WF)*dire\n second = 760*exp((ri-dist)/0.3)*dire #1.8)*dire\n return second #first + second\n else:\n return np.array([0.0, 0.0])\n else:\n if door.arrow == 1 or door.arrow == -1:\n w1= np.array([door.params[0], door.params[1]])\n w2 = np.array([door.params[2], door.params[1]])\n diw, niw = distanceP2L(self.pos, w1, w2)\n first = -self.A_WF*np.exp((self.diw_desired-diw)/self.B_WF)*niw\n #second = -600*exp((2*ri-diw)/0.2)*niw\n #result1 = self.wall_LineForce(w1, w2)\n\n w1= np.array([door.params[0], door.params[3]])\n w2 = np.array([door.params[2], door.params[3]])\n diw, niw = distanceP2L(self.pos, w1, w2)\n second = -self.A_WF*np.exp((self.diw_desired-diw)/self.B_WF)*niw\n #result2 = self.wall_LineForce(w1, w2)\n\n return first + second\n #return result1 + result2\n \n if door.arrow == 2 or door.arrow == -2:\n w1= np.array([door.params[0], door.params[1]])\n w2= np.array([door.params[0], door.params[3]])\n diw, niw = distanceP2L(self.pos, w1, w2)\n first = -self.A_WF*np.exp((self.diw_desired-diw)/self.B_WF)*niw\n #result1 = self.wall_LineForce(w1, w2)\n\n w1= np.array([door.params[2], door.params[1]])\n w2= np.array([door.params[2], door.params[3]])\n diw, niw = distanceP2L(self.pos, w1, w2)\n second = -self.A_WF*np.exp((self.diw_desired-diw)/self.B_WF)*niw\n #result2 = self.wall_LineForce(w1, w2)\n\n return first + second\n #return result1 + result2\n \n if door.arrow ==0:\n return np.array([0.0, 0.0])\n # if abs(self.actualV[0]) > abs(self.actualV[1]):\n\n\n def wall_LineForce(self, w1, w2):\n #ftest = open(\"wallForceTest.txt\", \"w+\")\n ri = self.radius\n #w1 = np.array([wall.params[0],wall.params[1]])\n #w2 = np.array([wall.params[2],wall.params[3]])\n diw, niw = distanceP2L(self.pos, w1, w2)\n if diw>0.6:\n result=np.array([0.0, 0.0])\n return result\n else:\n #first = -260*np.exp((self.diw_desired-diw)/0.6)*niw #3.2)*niw\n first = -self.A_WF*np.exp((self.diw_desired-diw)/self.B_WF)*niw\n #second = -60*exp((2*ri-diw)/0.2)*niw\n #second = -self.bodyFactorW*ggg(2*ri-diw)*niw*200000\n #Issue: the diretion of niw is from the agent to the wall. Check Needed!\n #print >> ftest, 'first:', first, '\\n'\n \n #tiw = np.array([-niw[1],niw[0]])\n #third = self.slideFricFactorW*ggg(ri-diw)*(self.actualV*tiw)*tiw/1000\n #print >> ftest, 'second:', second, '\\n'\n \n #ftest.close()\n if diw>=ri:\n second = np.array([0.0, 0.0])\n else:\n second = -self.bodyFactorW*(ri-diw)*niw*200000\n return first + second #+ third\n\n \n def wallForce(self, wall):\n if wall.mode == 'line':\n w1 = np.array([wall.params[0],wall.params[1]])\n w2 = np.array([wall.params[2],wall.params[3]])\n result = self.wall_LineForce(w1, w2)\n return result\n\n elif wall.mode == 'rect':\n\n ########################\n ### p1-----------------p4 ###\n ### | | ###\n ### | | ###\n ### | | ###\n ### p2-----------------p3 ###\n ########################\n \n p1 = np.array([wall.params[0], wall.params[1]])\n p2 = np.array([wall.params[0], wall.params[3]])\n p3 = np.array([wall.params[2], wall.params[3]])\n p4 = np.array([wall.params[2], wall.params[1]])\n\n dist1 = np.linalg.norm(p1 - p2)\n dist2 = np.linalg.norm(p2 - p3)\n\n if dist1<0.3 and dist2/dist1>10.0:\n w1=(p1+p2)/2.0\n w2=(p3+p4)/2.0\n result = self.wall_LineForce(w1, w2)\n return result\n\n if dist2<0.3 and dist1/dist2>10.0:\n w1=(p1+p4)/2.0\n w2=(p2+p3)/2.0\n result = self.wall_LineForce(w1, w2)\n return result\n \n w1 = np.array([wall.params[0],wall.params[1]])\n w2 = np.array([wall.params[0],wall.params[3]])\n result0 = self.wall_LineForce(w1, w2)\n\n w1 = np.array([wall.params[2],wall.params[1]])\n w2 = np.array([wall.params[2],wall.params[3]])\n result2 = self.wall_LineForce(w1, w2)\n\n w1 = np.array([wall.params[0],wall.params[1]])\n w2 = np.array([wall.params[2],wall.params[1]])\n result1 = self.wall_LineForce(w1, w2)\n\n w1 = np.array([wall.params[0],wall.params[3]])\n w2 = np.array([wall.params[2],wall.params[3]])\n result3 = self.wall_LineForce(w1, w2)\n\n result = result0+result1+result2+result3\n return result\n\n\n def wallOnRoute(self, wall, mode=1.0, lookhead=3.0):\n\n p1 = self.pos\n p2 = self.pos + (mode*self.desiredV+(1-mode)*self.actualV)*lookhead\n\t\n #if mode==\"dv\":\n # p2 = self.pos + self.desiredV\n #elif mode==\"av\":\n # p2 = self.pos + self.actualV\n #else:\n # print 'Error: mode must be either \"dv\" or \"av\"!'\n # return\n\t\n # The time interval to look ahead is an important issue\n # It is a major issue to use whether actualV or desiredV\n\n if wall is None:\n return None, None, None\n \n if wall.mode == 'line':\n w1 = np.array([wall.params[0],wall.params[1]])\n w2 = np.array([wall.params[2],wall.params[3]])\n #dist = self.wallOnRoute_Line(w1, w2, mode, lookhead)\n fuzzyPara = random.uniform(0.0,2.0)\n result, flag = lineIntersection(p1, p2, w1, w2, 0.0, fuzzyPara)\n if result==None:\n dist = None\n else:\n dist = np.linalg.norm(self.pos - result)\n return result, dist, normalize(w2-w1)\n \n if wall.mode =='rect':\n result = None\n dist=None\n arrow=None\n\n w1 = np.array([wall.params[0],wall.params[1]])\n w2 = np.array([wall.params[0],wall.params[3]])\n fuzzyPara = random.uniform(0.0,2.0)\n result0, flag = lineIntersection(p1, p2, w1, w2, 0.0, fuzzyPara)\n if result0==None:\n dist0 = None\n else:\n dist0 = np.linalg.norm(self.pos - result0)\n #dist0 = self.wallOnRoute_Line(w1, w2, mode, lookhead)\n if dist0!=None:\n if dist==None:\n result = result0\n dist=dist0\n arrow=w2-w1\n elif dist0<dist:\n result = result0\n dist=dist0\n arrow=w2-w1\n\n w1 = np.array([wall.params[2],wall.params[1]])\n w2 = np.array([wall.params[2],wall.params[3]])\n fuzzyPara = random.uniform(0.0,2.0)\n result2, flag = lineIntersection(p1, p2, w1, w2, 0.0, fuzzyPara)\n if result2==None:\n dist2 = None\n else:\n dist2 = np.linalg.norm(self.pos - result2)\n #dist2 = self.wallOnRoute_Line(w1, w2, mode, lookhead)\n if dist2!=None:\n if dist==None:\n result = result2\n dist=dist2\n arrow=w2-w1\n elif dist2<dist:\n result = result2\n dist=dist2\n arrow=w2-w1\n\n w1 = np.array([wall.params[0],wall.params[1]])\n w2 = np.array([wall.params[2],wall.params[1]])\n fuzzyPara = random.uniform(0.0,2.0)\n result1, flag = lineIntersection(p1, p2, w1, w2, 0.0, fuzzyPara)\n if result1==None:\n dist1 = None\n else:\n dist1 = np.linalg.norm(self.pos - result1)\n \n #dist1 = self.wallOnRoute_Line(w1, w2, mode, lookhead)\n if dist1!=None:\n if dist==None:\n result=result1\n dist=dist1\n arrow=w2-w1\n elif dist1<dist:\n result=result1\n dist=dist1\n arrow=w2-w1\n\n w1 = np.array([wall.params[0],wall.params[3]])\n w2 = np.array([wall.params[2],wall.params[3]])\n fuzzyPara = random.uniform(0.0,2.0)\n result3, flag = lineIntersection(p1, p2, w1, w2, 0.0, fuzzyPara)\n if result3==None:\n dist3 = None\n else:\n dist3 = np.linalg.norm(self.pos - result3)\n\n #dist3 = self.wallOnRoute_Line(w1, w2, mode, lookhead)\n if dist3!=None:\n if dist==None:\n result=result3\n dist=dist3\n arrow=w2-w1\n elif dist3<dist:\n result=result3\n dist=dist3\n arrow=w2-w1\n\n if arrow!=None:\n arrow=normalize(arrow)\n return result, dist, arrow\n\n \n \n #####################################\n # how an agent interacts with others\n #####################################\n \n def opinionDynamics(self):\n\t\n # self.D = DMatrix(selfID, otherID)\n # self.A = AMatrix(selfID, otherID)\n # self.B = BMatrix(selfID, otherID)\n # dij = np.linalg.norm(self.pos - other.pos)\n \n otherMovingDir = np.array([0.0, 0.0])\n otherMovingSpeed = 0.0\n otherMovingNum = 0\n\t\n for idaj, aj in enumerate(self.others):\n otherMovingDir += normalize(aj.actualV) #/DFactor[idai, idaj]*AFactor[idai, idaj]\n otherMovingSpeed += np.linalg.norm(aj.actualV) #/DFactor[idai, idaj]*AFactor[idai, idaj]\n otherMovingNum += 1\n\t\t\n #nij = (self.pos - other.pos)/dij\n \n #if dij < self.interactionRange:\n\t# self.dest = self.p*self.dest + (1-self.p)*other.dest\n\n\t#otherDirection = np.array([0.0, 0.0])\n\t#otherSpeed = 0.0\n\t#num = 0\n\t#otherV = np.array([0.0, 0.0])\n\n #if dij < self.interactionRange:\n\t #self.desiredV = self.p*self.desiredV + (1-self.p)*other.actualV\n\t #otherDirection = normalize(other.actualV)\n\t #otherSpeed = np.linalg.norm(other.actualV)\n\t #num = 1\n\t #otherV = other.actualV\n\t\n return otherMovingDir, otherMovingSpeed/otherMovingNum\n \n\n def opinionExchange(self, other, mode=1.0):\n otherV= mode*other.desiredV+(1-mode)*other.actualV\n self.desiredV = self.p*self.desiredV + (1-self.p)*otherV\n return\n\n\n def findDoorDir(self, direction):\n if direction == 1:\n return np.array([1.0, 0.0])\n elif direction == -1:\n return np.array([-1.0, 0.0])\n elif direction == 2:\n return np.array([0.0, 1.0])\n elif direction == -2:\n return np.array([0.0, -1.0])\n \n \n def selectTarget(self, exit2door):\n dest = None\n doorOK = None\n exitOK = None\n\n for exit in self.targetExits:\n #exit.computePos()\n if exit.inside(self.pos):\n return exit\n else: \n dest_temp = np.linalg.norm(exit.pos - self.pos)\n dir1 = exit.direction(exit.arrow)\n # temp = self.route[exit.id]\n # dir1 = self.findDoorDir(temp)\n dir2 = exit.pos-self.pos\n if dest ==None or dest>dest_temp:\n if np.dot(dir1, dir2)>=0:\n dest=dest_temp\n exitOK = exit\n\n # Now the nearest exit is found: exitOK\n # Compare it with the exitInMind: Change target exit or not?\n # self.others is to be taken into account. \n \n if exitOK != None:\n self.pathMap = exit2door[exitOK.id]\n return exitOK\n else:\n self.pathMap = exit2door[self.exitInMind.id]\n \n for door in self.targetDoors:\n #door.computePos()\n if door.inside(self.pos):\n return door\n #if self.pos[0]>=door.params[0] and self.pos[0]<=door.params[2]:\n # if self.pos[1]>=door.params[1] and self.pos[1]<=door.params[3]:\n # return door\n else:\n if len(self.route)>0:\n if (self.route[len(self.route)-1] is door.pos) and len(self.targetDoors)>1:\n continue\n dest_temp = np.linalg.norm(door.pos - self.pos)\n dir1 = door.direction(self.pathMap[door.id]) #door.direction(door.arrow) #\n dir2 = door.pos-self.pos\n if dest ==None or dest>dest_temp:\n if np.dot(dir1, dir2)>=0:\n dest=dest_temp\n doorOK = door\n \n return doorOK\n\n\n def moveToAgent(self):\n dest = None\n someoneOK = None\n for aj in self.others:\n dest_temp = np.linalg.norm(aj.pos - self.pos)\n dir1 = self.direction\n dir2 = aj.pos-self.pos\n if dest ==None or dest>dest_temp:\n #if np.dot(dir1, dir2)>0:\n dest=dest_temp\n someoneOK = aj\n return someoneOK\n\n \n def findVisibleTarget(self, walls, doors):\n resultDoors=[]\n for iddoor, door in enumerate(doors):\n if door.inComp ==0:\n continue\n if door.inside(self.pos):\n resultDoors.append(door)\n continue\n #edge1 = np.array([door.params[0], door.params[1]])\n #edge2 = np.array([door.params[2], door.params[3]])\n edge1, edge2, edge3, edge4 = door.edge()\n isVisibleDoor=True\n for wall in walls:\n if wall.inComp ==0:\n continue\n result1, flag1 = wall.wallInBetween(self.pos, edge1)\n result2, flag2 = wall.wallInBetween(self.pos, edge2)\n result3, flag3 = wall.wallInBetween(self.pos, edge3)\n result4, flag4 = wall.wallInBetween(self.pos, edge4)\n result5, flag5 = wall.wallInBetween(self.pos, door.pos)\n if flag1 and flag2 and flag3 and flag4 and flag5:\n isVisibleDoor=False\n break\n #elif not flag1:\n # if np.dot(door.pos-edge1, door.arrow)<0:\n # isVisibleDoor=False \n if isVisibleDoor:\n resultDoors.append(door)\n return resultDoors\n \n\nif __name__ == '__main__':\n \n Ped1 = Agent()\n Ped2 = Agent()\n f1 = Ped1.cohesiveForce(Ped2)\n f2 = Ped2.cohesiveForce(Ped1)\n Ped1.opinionExchange(Ped2)\n Ped2.opinionExchange(Ped1)\n print('----------Testing starts here--------')\n print('Other Opinion', f1)\n print('Other Opinion', f2)\n Ped1.showAttr()\n Ped1.showAttr()\n v = Ped1.adaptVel\n Ped1.changeAttr(1,1)\n Ped2.changeAttr(2,2)\n\n\n\t\n" ]
[ [ "numpy.dot", "numpy.linalg.norm", "numpy.argmin", "numpy.array", "numpy.exp", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ddebby/AI-Lab
[ "247f5ed0d93955e061e65d06f48cf9c7afc6b4a8" ]
[ "0.Demo/2.object_detection_by_tensorflow/9.libs/predict-5classes.py" ]
[ "# USAGE\n# python predict.py --model lisa/experiments/exported_model/frozen_inference_graph.pb \\\n# \t--image lisa/vid8/frameAnnotations-MVI_0120.MOV_annotations/stop_1324866406.avi_image4.png \\\n\n# import the necessary packages\n#from object_detection.utils import label_map_util\nimport tensorflow as tf\nimport numpy as np\nimport argparse\nimport imutils\nimport cv2\n\n# construct the argument parse and parse the arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-m\", \"--model\", required=True,\n\thelp=\"base path for frozen checkpoint detection graph\")\nap.add_argument(\"-i\", \"--image\", required=True,\n\thelp=\"path to input image\")\nap.add_argument(\"-c\", \"--min-confidence\", type=float, default=0.5,\n\thelp=\"minimum probability used to filter weak detections\")\nargs = vars(ap.parse_args())\n\n# initialize a set of colors for our class labels\n#COLORS = np.random.uniform(0, 255, size=(5, 3))\nCOLORS = [\n\t [255,0,0], \t\n [0,255,255],\n [255,255,255],\n [0,0,255],\n [0,255,0]\n\t]\n# initialize the model\nmodel = tf.Graph()\n\n# create a context manager that makes this model the default one for\n# execution\nwith model.as_default():\n\t# initialize the graph definition\n\tgraphDef = tf.GraphDef()\n\n\t# load the graph from disk\n\twith tf.gfile.GFile(args[\"model\"], \"rb\") as f:\n\t\tserializedGraph = f.read()\n\t\tgraphDef.ParseFromString(serializedGraph)\n\t\ttf.import_graph_def(graphDef, name=\"\")\n\n# load the class labels from disk\n#labelMap = label_map_util.load_labelmap(args[\"labels\"])\n#categories = label_map_util.convert_label_map_to_categories(\n#\tlabelMap, max_num_classes=args[\"num_classes\"],\n#\tuse_display_name=True)\n#categoryIdx = label_map_util.create_category_index(categories)\n\ncategoryIdx = {1: {'id': 1, 'name': 'blue'},\n\t\t\t\t2: {'id': 2, 'name': 'yellow'},\n\t\t\t\t3: {'id': 3, 'name': 'white'},\n\t\t\t\t4: {'id': 4, 'name': 'red'},\n\t\t\t\t5: {'id': 5, 'name': 'none'}}\n# create a session to perform inference\nwith model.as_default():\n\twith tf.Session(graph=model) as sess:\n\t\t# grab a reference to the input image tensor and the boxes\n\t\t# tensor\n\t\timageTensor = model.get_tensor_by_name(\"image_tensor:0\")\n\t\tboxesTensor = model.get_tensor_by_name(\"detection_boxes:0\")\n\n\t\t# for each bounding box we would like to know the score\n\t\t# (i.e., probability) and class label\n\t\tscoresTensor = model.get_tensor_by_name(\"detection_scores:0\")\n\t\tclassesTensor = model.get_tensor_by_name(\"detection_classes:0\")\n\t\tnumDetections = model.get_tensor_by_name(\"num_detections:0\")\n\n\t\t# load the image from disk\n\t\timage = cv2.imread(args[\"image\"])\n\t\t(H, W) = image.shape[:2]\n\n\t\t# check to see if we should resize along the width\n\t\tif W > H and W > 1000:\n\t\t\timage = imutils.resize(image, width=1000)\n\n\t\t# otherwise, check to see if we should resize along the\n\t\t# height\n\t\telif H > W and H > 1000:\n\t\t\timage = imutils.resize(image, height=1000)\n\n\t\t# prepare the image for detection\n\t\t(H, W) = image.shape[:2]\n\t\toutput = image.copy()\n\t\timage = cv2.cvtColor(image.copy(), cv2.COLOR_BGR2RGB)\n\t\timage = np.expand_dims(image, axis=0)\n\n\t\t# perform inference and compute the bounding boxes,\n\t\t# probabilities, and class labels\n\t\t(boxes, scores, labels, N) = sess.run(\n\t\t\t[boxesTensor, scoresTensor, classesTensor, numDetections],\n\t\t\tfeed_dict={imageTensor: image})\n\n\t\t# squeeze the lists into a single dimension\n\t\tboxes = np.squeeze(boxes)\n\t\tscores = np.squeeze(scores)\n\t\tlabels = np.squeeze(labels)\n\n\t\t# loop over the bounding box predictions\n\t\tfor (box, score, label) in zip(boxes, scores, labels):\n\t\t\t# if the predicted probability is less than the minimum\n\t\t\t# confidence, ignore it\n\t\t\tif score < args[\"min_confidence\"]:\n\t\t\t\tcontinue\n\n\t\t\t# scale the bounding box from the range [0, 1] to [W, H]\n\t\t\t(startY, startX, endY, endX) = box\n\t\t\tstartX = int(startX * W)\n\t\t\tstartY = int(startY * H)\n\t\t\tendX = int(endX * W)\n\t\t\tendY = int(endY * H)\n\n\t\t\t# draw the prediction on the output image\n\t\t\tlabel = categoryIdx[label]\n\t\t\tidx = int(label[\"id\"]) - 1\n\t\t\tlabel = \"{}: {:.2f}\".format(label[\"name\"], score)\n\t\t\tcv2.rectangle(output, (startX, startY), (endX, endY),\n\t\t\t\tCOLORS[idx], 2)\n\t\t\ty = startY - 10 if startY - 10 > 10 else startY + 10\n\t\t\tcv2.putText(output, label, (startX, y),\n\t\t\t\tcv2.FONT_HERSHEY_SIMPLEX, 0.3, COLORS[idx], 1)\n\n\t\t# show the output image\n\t\tcv2.imshow(\"Output\", output)\n\t\tcv2.waitKey(0)\n" ]
[ [ "tensorflow.Graph", "tensorflow.import_graph_def", "numpy.expand_dims", "tensorflow.gfile.GFile", "numpy.squeeze", "tensorflow.Session", "tensorflow.GraphDef" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
CONDUITlab/CCDEF
[ "b0ec05870d0d75afe69e551ff8ad4521050577a0" ]
[ "ccdef/convert/wfdb_clinical_sql.py" ]
[ "\"\"\"\nConvert MIMIC III data to CCDEF (hdf5 based)\n\"\"\"\nimport numpy as np\nimport pandas as pd\nimport psycopg2 \n#import sqlite3\nimport h5py\nimport json\nimport wfdb\nfrom ccdef._utils import df_to_sarray\n\ndef open_db():\n schema = 'mimiciii'\n con = psycopg2.connect(dbname='mimic', user='queryuser', host='10.100.3.150', password='conduit')\n cur = con.cursor()\n cur.execute('SET search_path to {}'.format(schema))\n \n return (con)\n\ndef patient_id_from_file(filename):\n return (filename.split('/')[-1].split('-')[0][1:].strip(\"0\"))\n\ndef extract_notes(infile):\n \"\"\"\n extract_notes(infile)\n\n Take all notes in the mimic3 db for infile\n \n TODO: Will need to build option to include only labs/notes in the period where there is waveform/numeric data\n but for now we include everything so it is available for context (eg echo reports)\n\n Parameters\n ----------\n\n infile: string\n filename of a wfdb file from the MIMIC3 matched dataset\n origin: datetime\n the base datetime for the file\n\n return: notes\n DataFrame containing notes, times, etc\n \"\"\"\n\n # get patient ID\n subj_id = patient_id_from_file(infile)\n \n #get lab_events for this patient\n con = open_db()\n \n query = \\\n \"\"\"\n SELECT i.chartdate, i.charttime, i.description, i.category, i.text\n FROM noteevents i\n WHERE subject_id = {};\n \"\"\".format(subj_id)\n\n notes = pd.read_sql_query(query,con)\n \"\"\" change time stamp to seconds from origin \"\"\"\n \n origin = pd.to_datetime(wfdb.rdheader(infile).base_datetime)\n notes.insert(0, 'time', '')\n for idx, row in notes.iterrows():\n notes['time'].iloc[idx]=int((pd.to_datetime(row['charttime'])-origin).total_seconds())\n del notes['charttime']\n del notes['chartdate']\n\n return (notes)\n\ndef write_notes(notes_df, outfile):\n \"\"\"\n write_notes(notes_df, infile)\n\n Write notes from notes_df to infile \n\n Parameters\n ----------\n\n infile: string\n filename of a wfdb file from the MIMIC3 matched dataset\n\n notes_df: notes\n DataFrame containing notes, times, etc\n \"\"\"\n \n arr, saType = df_to_sarray(notes_df)\n \n with h5py.File(outfile, 'a') as f:\n clin = f.require_group('/clinical')\n note_ds = f.create_dataset('clinical/notes', maxshape = (None, ), data = arr, dtype = saType,\n compression=\"gzip\", compression_opts = 9, shuffle = True)\n \n \ndef extract_labs(infile):\n \"\"\"\n extract_labs(infile)\n\n Take all lab values in the mimic3 db for infile\n \n TODO: Will need to build option to include only labs/notes in the period where there is waveform/numeric data\n but for now we include everything so it is available for context (eg echo reports)\n\n Parameters\n ----------\n\n infile: string\n filename of a wfdb file from the MIMIC3 matched dataset\n origin: datetime\n the base datetime for the file\n\n return: notes\n DataFrame containing notes, times, etc\n \"\"\"\n\n # get patient ID\n subj_id = patient_id_from_file(infile)\n\n #get basetime\n origin = wfdb.rdheader(infile).base_datetime\n \n #get lab_events for this patient\n con = open_db()\n \n query = \\\n \"\"\"\n SELECT e.charttime, e.itemid, e.value, e.valuenum, e.valueuom, e.flag,\n i.label, i.fluid, i.category, i.loinc_code\n FROM labevents e\n INNER JOIN d_labitems i\n ON e.itemid = i.itemid\n WHERE subject_id = {};\n \"\"\".format(subj_id)\n labs = pd.read_sql_query(query,con)\n\n #convert time\n origin = pd.to_datetime(wfdb.rdheader(infile).base_datetime)\n labs.insert(0, 'time', '')\n\n for idx, row in labs.iterrows():\n labs['time'].iloc[idx]=int((pd.to_datetime(row['charttime'])-origin).total_seconds())\n del labs['charttime']\n\n return (labs)\n\ndef write_labs(labs_df, outfile):\n\n # TODO: convert flag to category and encode in .flag_info\n arr, saType = df_to_sarray(labs_df)\n\n\n # dt = h5py.special_dtype(vlen=str)\n # comp_type = np.dtype([('time', dt), ('testid', 'i8'), ('value', dt), ('valuenum', 'f8'), ('flag', dt)])\n # define array for writing to dataset\n# arr_data = np.empty((0,), dtype=comp_type)\n# for idx, row in labs_df.iterrows():\n# arr = np.array([(str(row['charttime']), row['itemid'], row['value'], row['valuenum'], row['flag'])], \n# dtype = comp_type)\n# arr_data = np.append(arr_data, arr)\n \n \n #create metadata\n labs_grouped = labs_df.groupby('itemid')['itemid','label','category','fluid','valueuom','loinc_code'].first()\n labs_grouped = labs_grouped.set_index('itemid')\n test_info = labs_grouped.T.to_dict('dict')\n \n with h5py.File(outfile, 'a') as f:\n clin = f.require_group('/clinical')\n lab_ds = f.create_dataset('clinical/labs', maxshape = (None, ), data = arr, dtype=saType,\n compression=\"gzip\", compression_opts = 9, shuffle = True)\n lab_ds.attrs['.test_info'] = json.dumps(test_info) \n \ndef labs_to_df (dset):\n # extract values from dataset and convert to dataframe\n \n # load metadata\n tests_dict = json.loads(dset.attrs['.test_info'])\n info_df = pd.DataFrame.from_dict(tests_dict, orient='columns').T\n\n # create column for merge\n info_df['testid'] = info_df.index.astype(int)\n \n #load labs\n labs_df = pd.DataFrame(dset[:])\n \n #merge\n \n df = pd.merge(labs_df, info_df, on = 'testid')\n \n return (df)\n\ndef get_admissions(subj_id):\n con = open_db()\n \n query = \\\n \"\"\"\n SELECT *\n FROM admissions\n WHERE subject_id = {};\n \"\"\".format(subj_id)\n\n admits = pd.read_sql_query(query,con) \n return (admits)\n\ndef find_admission (filename):\n '''\n Get admission information from MIMIC III filename (contains subj ID)\n Return demographic information\n '''\n subj_id = 0\n hadm_id = 0\n diagnosis = ''\n expired = ''\n death_time = ''\n ethnicity = ''\n \n subj_id = patient_id_from_file(filename)\n print('searching {}'.format(subj_id))\n \n # get additional demographics\n con = open_db()\n query = \\\n \"\"\"\n SELECT i.subject_id, i.gender, i.dob \n FROM patients i\n WHERE subject_id = {};\n \"\"\".format(subj_id)\n\n demo = pd.read_sql_query(query,con)\n gender = demo.gender.values[0]\n dob = demo.dob.values[0]\n \n record = wfdb.rdheader(filename)\n sig_start = record.base_datetime\n print('Signal file start {}'.format(sig_start))\n\n admits = get_admissions(subj_id)\n \n for idx, row in admits.iterrows():\n adm_time = row['admittime']\n dsc_time = row['dischtime']\n\n print('Admission # {}, in at {} out at {}'.format(row['hadm_id'], adm_time, dsc_time))\n\n if (sig_start > adm_time) and (sig_start < dsc_time):\n print ('Subject {}, record {}, diagnosis: {}. HADM {} '.format(row['subject_id'], \n record.record_name, row['diagnosis'], row['hadm_id']))\n hadm_id = row['hadm_id']\n diagnosis = row['diagnosis']\n expired = row['hospital_expire_flag']\n death_time = row['deathtime']\n ethnicity = row['ethnicity']\n age = round( pd.to_timedelta(adm_time-dob)/pd.to_timedelta(365,unit='d'))\n \n \n return(subj_id, hadm_id, age, gender, ethnicity, diagnosis, expired, death_time)\n \ndef get_diagnoses(hadm_id):\n # get all diagnoses for the admission\n # return as dict including short and long titles\n \n con = open_db()\n\n query = \\\n \"\"\"\n SELECT h.icd9_code,\n d.icd9_code, d.short_title, d.long_title\n FROM diagnoses_icd h\n INNER JOIN d_icd_diagnoses d\n ON d.icd9_code = h.icd9_code\n WHERE hadm_id = {};\n \"\"\".format(hadm_id)\n\n dx_dict = pd.read_sql_query(query,con).T.to_dict('dict')\n return (dx_dict)\n\ndef convert_mimic_matched (filename, samp_end = None, all_labs=True, all_notes=True):\n ''' \n TODO:\n time filter\n additional demographics\n \n mapping function\n '''\n # all_labs - include labs from outside the time range of the specified signal file\n # all_notes - include labs from outside the time range of the specified signal file\n\n # samp_end - to limit size of datafile for testing, default = None\n \n # use base file, pull numerics\n\n if filename[-1] != 'n':\n print('Base is waveform file, add numerics')\n else:\n print('Base is numerics file, change basename to waveform and then process numerics')\n filename = filename[:-1]\n \n # generate output filename\n \n outfile = 'mimic_test.h5'\n \n # read header\n record = wfdb.rdheader(filename)\n \n# meta_head = wfdb_head_meta(filename)\n \n with h5py.File(outfile, 'w') as f:\n# meta = f.require_group('.meta')\n# meta.attrs['data'] = json.dumps(meta_head, indent = 4)\n# meta.attrs['mapping'] = json.dumps('Placeholder', indent = 4)\n \n \n grp_numerics = f.require_group('numerics')\n root = f['/']\n\n print('Converting numerics')\n record = wfdb.rdrecord(filename+'n', sampfrom = 0, sampto = samp_end )\n df = pd.DataFrame(data = record.p_signal, columns = record.sig_name)\n ds_num = grp_numerics.create_dataset('vitals', maxshape = (None,), data = df.to_records(index=False),\n compression=\"gzip\", compression_opts=9, shuffle = True)\n \n grp_waveforms = f.require_group('/waveforms')\n print('Converting waveforms')\n record = wfdb.rdrecord(filename, sampfrom = 0, sampto = samp_end )\n df = pd.DataFrame(data = record.p_signal, columns = record.sig_name)\n ds_wave = grp_waveforms.create_dataset('hemodynamics', maxshape = (None,), data = df.to_records(index=False),\n compression=\"gzip\", compression_opts=9, shuffle = True)\n \n grp_clinical = f.require_group('/clinical')\n\n #demographics\n\n print('Locating admission')\n (subj_id, hadm_id, age, gender, ethnicity, diagnosis, expired, death_time) = find_admission(filename)\n\n demographics = {\n 'Age' : age,\n 'Ethnicity' : ethnicity,\n 'Gender' : gender,\n 'Expired' : expired,\n 'Death_time': death_time\n }\n \n root.attrs['demographics'] = json.dumps(demographics, indent = 4)\n grp_clinical.attrs['admit_diagnosis'] = json.dumps(diagnosis) # add additional codes from dianosis table\n \n #get additional diagnoses - save as dict\n dx_list = get_diagnoses(hadm_id)\n grp_clinical.attrs['diagnoses'] = json.dumps(dx_list, indent = 4)\n \n# reseach = f.require_group('/Research')\n\n \n #convert numerics\n #convert waveforms\n \n print ('Extracting labs')\n labs = extract_labs(filename)\n write_labs(labs, outfile)\n \n print ('Extracting notes')\n notes = extract_notes(filename)\n write_notes(notes, outfile) \n\n #micro\n " ]
[ [ "pandas.read_sql_query", "pandas.merge", "pandas.to_datetime", "pandas.DataFrame", "pandas.DataFrame.from_dict", "pandas.to_timedelta" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
pandafengye/OutbreakPAD
[ "742f5ac71919c9a529d9ff12f55074d15e8db9a6" ]
[ "build/lib/OutbreakPAD/Prediction/GRNN_Predict_Model.py" ]
[ "from neupy import algorithms\r\nimport numpy as np\r\nfrom sklearn import datasets, preprocessing\r\nfrom sklearn.model_selection import train_test_split\r\n\r\ndef GRNN_Predict_Model(X,Y,std=0.02):\r\n scaler = preprocessing.MinMaxScaler()\r\n arima_values=scaler.fit_transform(X)\r\n origin_values=scaler.fit_transform(Y.reshape((-1, 1)))\r\n arima=arima_values[:len(arima_values)-4]\r\n origin=origin_values[:len(origin_values)-4]\r\n x_train, x_test, y_train,y_test = train_test_split(arima,origin,train_size = 0.7,random_state=0)\r\n nw = algorithms.GRNN(std=std, verbose=False)\r\n nw.train(x_train, y_train)\r\n # y_Predict = nw.predict(x_test)\r\n GRNN_Predict = nw.predict(arima_values)\r\n GRNN_Predict[np.isnan(GRNN_Predict)] = 0 # If NA is encountered, replace it with 0\r\n #print(GRNN_Predict)\r\n origin_values_inverse=scaler.inverse_transform(origin_values)\r\n GRNN_Predict_inverse =scaler.inverse_transform(GRNN_Predict )\r\n return GRNN_Predict_inverse\r\n" ]
[ [ "numpy.isnan", "sklearn.model_selection.train_test_split", "sklearn.preprocessing.MinMaxScaler" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
maguileracanon/gcn
[ "b6578fa62315bfe02b4f829d44a853ca156588ab" ]
[ "gcn/GCNLayer.py" ]
[ "\"\"\"Linear module.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\n# from __future__ import google_type_annotations\nfrom __future__ import print_function\nimport numpy as np\n\nimport sonnet as snt\nimport tensorflow as tf\n\n\ndef glorot(shape, name=None):\n \"\"\"Glorot & Bengio (AISTATS 2010) init.\"\"\"\n init_range = np.sqrt(6.0 / (shape[0] + shape[1]))\n initial = tf.random.uniform(shape, minval=-init_range, maxval=init_range, dtype=tf.float32)\n return tf.Variable(initial, name=name)\n\n\nclass MyGCNLayer(tf.keras.layers.Layer):\n def __init__(self, name=\"MyGCNLayer\", outputdim=None, act=True):\n super(MyGCNLayer, self).__init__()\n self.num_outputs = outputdim\n self.act = act\n\n def build(self, input_shape):\n input_size = input_shape[1]\n self.kernel = self.add_weight(\"kernel\", shape=[input_size, self.num_outputs])\n\n\n\n def call(self, input,a_hat):\n\n x = tf.nn.dropout(input, 1 - 0.5)\n x = tf.matmul(x, self.kernel)\n x = tf.sparse.sparse_dense_matmul(a_hat, x)\n if self.act:\n return tf.nn.relu(x)\n else:\n return x\n\n\nclass MLPGraphNetwork(snt.Module):\n def __init__(self, name=\"MLPGraphNetwork\", outputdim=None, act=tf.nn.relu, bias=True):\n \"\"\"\n\n :type outputdim: object\n \"\"\"\n super(MLPGraphNetwork, self).__init__(name=name)\n self.output_dim = outputdim\n self.bias = bias\n self.act = act\n\n @snt.once\n def _initialize(self, inputs):\n input_size = inputs.shape[1]\n self.w = glorot([input_size, self.output_dim], name=\"weights\")\n if self.bias:\n # Fix this, the shape of the bias is not automatized . it was giving me an error\n\n self.b_arr = tf.Variable(tf.zeros((2708, 1), dtype=tf.float32), name=\"bias\")\n print(\"Bias done\", self.b_arr)\n # tf.Variable(tf.random.normal([input_size, self.output_size]))\n\n def __call__(self, inputs, a_hat):\n self._initialize(inputs)\n x = tf.nn.dropout(inputs, 1 - 0.5)\n res = tf.matmul(x, self.w)\n output = tf.matmul(a_hat, res)\n if self.bias:\n output += self.b_arr\n\n return self.act(output)\n\n\nclass GCN(tf.keras.Model):\n\n def __init__(self,\n encoder_arr=None,\n pi=None,\n convolution_kernel_1=None,\n convolution_kernel_2=None,\n decoder_arr=None,\n name=\"GCN\"):\n super(GCN, self).__init__(name=name)\n # self._normalizer = snt.LayerNorm(axis=1, create_offset=True, create_scale=True)\n self._encoder = snt.Sequential([snt.nets.MLP(encoder_arr, activate_final=True),snt.LayerNorm(axis=1, create_offset=True, create_scale=True)])\n #self._encoder = snt.LayerNorm(axis=0, create_offset=True, create_scale=True)\n self._graphNetwork = MyGCNLayer(outputdim=convolution_kernel_1, name=\"gcn1\",act=True)\n self._conv2 = MyGCNLayer(outputdim=convolution_kernel_2, name=\"gcn2\",act=True)\n # self._decoder = snt.Sequential([snt.LayerNorm(axis=1, create_offset=True, create_scale=True),snt.nets.MLP(decoder_arr, activate_final=False)])\n\n def call(self, input_op, dadmx):\n x=self._encoder(input_op)\n conv1 = self._graphNetwork(x, dadmx)\n conv2 = self._conv2(conv1,dadmx)\n return conv2\n" ]
[ [ "tensorflow.nn.relu", "tensorflow.matmul", "numpy.sqrt", "tensorflow.Variable", "tensorflow.zeros", "tensorflow.random.uniform", "tensorflow.sparse.sparse_dense_matmul", "tensorflow.nn.dropout" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
GittiHab/mbrl-thesis-code
[ "10ecd6ef7cbb2df4bd03ce9928e344eab4238a2e" ]
[ "algos/PlaNet/reward_space.py" ]
[ "import abc\nfrom copy import deepcopy\nfrom typing import Optional\n\nfrom torch.nn import functional as F\nfrom torch.nn.utils import clip_grad_norm_\nfrom models.models import bottle\nimport torch\nfrom stable_baselines3.common.torch_layers import MlpExtractor\nfrom exploration.base import Explorer\nfrom utils import reset_model_params\nfrom .optimizer import Optimizer\nfrom .targets import one_step_td, get_target_function\nfrom warnings import warn\nfrom torch.optim.lr_scheduler import CyclicLR, StepLR\n\n__all__ = ['RewardSpace', 'ExplorationRewardSpace', 'setup_reward_space']\n\n\ndef setup_reward_space(config, world_model, lr_step_size, name):\n if config.value_lr is not None:\n reward_space = RewardSpace(world_model.reward_model,\n value_lr=config.value_lr,\n value_layers=config.value_layers if 'value_layers' in config else 3,\n state_size=world_model.state_size,\n hidden_size=config.hidden_size if 'hidden_size' in config else world_model.hidden_size,\n lr_strategy=setup_lr_schedule(config, name, lr_step_size),\n device=world_model.device,\n name=name,\n value_target_fn=get_target_function(config),\n update_target_every=config.update_target_every if 'update_target_every' in config else None)\n else:\n reward_space = RewardSpace(world_model.reward_model)\n return reward_space\n\n\ndef setup_lr_schedule(config, name, lr_step_size):\n strategy = 'step' if 'lr_strategy' not in config else config.lr_strategy.lower()\n if strategy == 'step':\n if lr_step_size <= 0:\n warn('Using no LR scheduler as step size is smaller than 1.')\n return None\n if 'lr_decay' not in config:\n return None\n min_lr = config.value_lr * config.lr_min_factor if 'lr_decay' in config else config.value_lr * 0.01\n return StepLRSchedule(name, lr_step_size, config.lr_decay, min_lr)\n elif strategy == 'cyclic':\n return CyclicLRSchedule(name, min_lr=config.lr_min, max_lr=config.lr_max,\n lr_half_cycle=config.lr_half_cycle, lr_mode=config.lr_mode)\n warn('Specified learning rate schedule not supported.')\n return None\n\n\nclass LearningRateStrategy(abc.ABC):\n\n def __init__(self, name, min_lr=None):\n self.name = name\n self.scheduler = None\n self.min_lr = min_lr\n self._last_lr = None\n\n def _step(self, logger):\n logger.record(\"train/learning_rate_\" + self.name, self.last_lr)\n if self.min_lr is None or self.last_lr > self.min_lr:\n self.scheduler.step()\n self._last_lr = self.scheduler.get_last_lr()[0]\n\n @property\n def last_lr(self):\n if self._last_lr is None:\n return self.scheduler.get_last_lr()[0]\n return self._last_lr\n\n def after_batch(self, logger):\n pass\n\n def after_epoch(self, logger):\n pass\n\n @abc.abstractmethod\n def reset_schedule(self, optimizer):\n pass\n\n\nclass CyclicLRSchedule(LearningRateStrategy):\n\n def __init__(self, name, min_lr, max_lr, lr_half_cycle, lr_mode):\n super().__init__(name)\n self.lr_min = min_lr\n self.lr_max = max_lr\n self.lr_half_cycle = lr_half_cycle\n self.lr_mode = lr_mode\n\n self.scheduler = None # type: CyclicLR\n\n def after_batch(self, logger):\n self._step(logger)\n\n def reset_schedule(self, optimizer):\n self.scheduler = CyclicLR(optimizer, base_lr=self.lr_min, max_lr=self.lr_max,\n step_size_up=self.lr_half_cycle, mode=self.lr_mode, cycle_momentum=False)\n\n\nclass StepLRSchedule(LearningRateStrategy):\n def __init__(self, name, lr_step_size, lr_decay, min_lr=None):\n super().__init__(name, min_lr)\n self.lr_step_size = lr_step_size\n self.lr_decay = lr_decay\n\n self.scheduler = None # type: StepLR\n\n def after_epoch(self, logger):\n self._step(logger)\n\n def reset_schedule(self, optimizer):\n self.scheduler = StepLR(optimizer, self.lr_step_size, self.lr_decay)\n return self.scheduler\n\n\nclass RewardSpace(Optimizer):\n def __init__(self, reward_model, value_lr=None, hidden_size=None, state_size=None, belief_size=None, device=None,\n value_layers=3, lr_strategy: Optional[LearningRateStrategy] = None, grad_clip_norm=100.0,\n name='', value_target_fn=one_step_td, update_target_every=1):\n '''\n\n :param reward_model: Model that predicts the reward given belief, state, and action.\n :param value_lr: Learning rate of the value network. Leave None to NOT use a value network.\n :param hidden_size: Hidden layer size of the value network. Required when value network is used.\n :param state_size: State size, if None only the belief is used as an input for the value network.\n :param belief_size: Belief size, if None only the state is used as an input for the value network.\n belief_size and state_size cannot both be None if value network is used.\n :param device: PyTorch device (cuda or cpu).\n :param value_layers: Number of layers of the value network.\n :param name: Name of this reward space. Will be shown in the log.\n '''\n self.reward_model = reward_model\n self._name = name\n\n self.lr_strategy = lr_strategy\n if value_lr is not None:\n assert hidden_size is not None and (\n state_size is not None or belief_size is not None) and device is not None, \\\n 'If value network is used, value_lr, hidden_size, device, and state_size or hidden_size or both must be specified.'\n if state_size and belief_size:\n input_size = state_size + belief_size\n elif state_size:\n input_size = state_size\n else:\n input_size = belief_size\n self.value_model = ValueNetwork(MlpExtractor(\n # We are ignoring the beliefs for now.\n # If this is changed, ValueNetwork class needs to be changed to pass the belief to the MlpExtractor.\n input_size,\n net_arch=[{'vf': [hidden_size for _ in range(value_layers)]}],\n activation_fn=torch.nn.ReLU,\n device=device,\n ), device=device,\n ignore_beliefs=belief_size is None,\n ignore_states=state_size is None)\n self.value_optimizer = torch.optim.Adam(list(self.value_model.parameters()), lr=value_lr)\n self.lr = value_lr\n self.lr_schedule = None\n self.device = device\n self.values_target = None\n self.target_fn = value_target_fn\n self.grad_clip_norm = grad_clip_norm\n self.update_target_every = update_target_every\n\n self.reset_lr_schedule()\n else:\n self.value_model = None\n self.iteration = 0\n\n def reset_lr_schedule(self):\n if self.lr_strategy is None:\n return\n self.lr_schedule = self.lr_strategy.reset_schedule(self.value_optimizer)\n\n @property\n def name(self):\n return self._name\n\n def values(self, *args):\n if len(args[0].size()) == 3:\n return bottle(self.value_model, args)\n return self.value_model(*args)\n\n def rewards(self, beliefs, states, actions=None):\n assert len(beliefs.size()) == 3, 'Requires planning horizon dimension.'\n return self._rewards(beliefs, states, actions)\n\n def _rewards(self, beliefs, states, actions=None):\n return bottle(self.reward_model, (beliefs[1:], states[1:]))\n\n @property\n def has_scheduler(self):\n return self.lr_strategy is not None\n\n @property\n def has_value_model(self):\n return self.value_model is not None\n\n def reset_value_params(self) -> None:\n \"\"\"\n Reset the weights of the value network (if it exists).\n \"\"\"\n if self.has_value_model:\n self.reset_lr_schedule()\n reset_model_params(self.value_model)\n\n def reset_value_target_params(self) -> None:\n \"\"\"\n Reset the weights of the value *target* network (if it there is a value network).\n \"\"\"\n if self.has_value_model:\n if self.values_target is None:\n self._update_target_network()\n reset_model_params(self.values_target)\n\n def train_batch(self, states, embeddings, actions, rewards, dones, gamma, logger):\n if not self.has_value_model:\n return 0.\n\n value_loss = self._update_value(states, rewards, dones, gamma)\n\n if self.has_scheduler:\n self.lr_strategy.after_batch(logger)\n return value_loss\n\n def before_updates(self):\n if self.has_value_model and self.iteration % self.update_target_every == 0:\n self._update_target_network()\n self.iteration += 1\n\n def after_updates(self, logger):\n if self.has_scheduler:\n self.lr_strategy.after_epoch(logger)\n\n def _update_value(self, states, rewards, terminals, gamma):\n self.value_model.train()\n with torch.no_grad():\n dones_true = rewards == 0.\n\n # In the DQN algorithm implementation they do something like this? Mais, pourquoi?\n # next_q_values = torch.cat(self.values_target(next_observations), dim=1)\n # next_q_values, _ = torch.min(next_q_values, dim=1, keepdim=True)\n values_next = bottle(self.values_target,\n (states.beliefs[1:].detach(), states.states[1:].detach()))\n target_values = self.target_fn(rewards, dones_true, gamma, values_next)\n\n # Get current Q-values estimates for each critic network\n # using action from the replay buffer\n current_values = bottle(self.value_model,\n (states.beliefs[:-1].detach(), states.states[:-1].detach()))\n\n # Compute critic loss\n value_loss = F.mse_loss(current_values, target_values, reduction='none')\n\n # Fix terminal states propagating wrong value\n value_loss = self._mask_terminal_states(value_loss, terminals)\n value_loss = value_loss.mean()\n\n # Optimize the critic\n self.value_optimizer.zero_grad()\n value_loss.backward()\n clip_grad_norm_(self.value_model.parameters(), self.grad_clip_norm, norm_type=2)\n self.value_optimizer.step()\n self.value_model.eval()\n return value_loss.item()\n\n def _mask_terminal_states(self, value_loss, dones):\n \"\"\"\n HOTFIX: This is a beautiful example of software engineering. Sometimes you make such a bad mistake that\n adding *more* code instead of improving the original one is the only way forward at the moment...\n\n So what does this workaround do? We use code that requires a single sequence of states. However, as this code\n actually should work with episodic environments, we actually need two sequences: current_states and next_states.\n Why? Because if we hit a terminal state s_T, this will be in the next_states sequence but not in the current\n one. Similarly, the first state of an episode will never be in the next_states but only in the current_states\n sequence. Now, we cannot simply append or prepend states to one or the other sequence to transform from one into\n the other. If a terminal state is in the middle of the sequence we would also need to merge the start state\n from the middle of the sequence into it. And there's our problem: Now the length of the sequence changes.\n Now, working with tensors usually relies on *matching* shapes -- so we would be running into a lot of trouble.\n In other words, if we wanted to actually use two tensors as in a normal replay buffer setup we need to a lot of\n refactoring. And there's no time for this, right now. So if anyone reads this, now you know why this method\n exists.\n TODO: refactor to fix this (ID1)\n\n :return:\n \"\"\"\n terminal_states = torch.cat([torch.zeros((1, *dones.size()[1:]), device=dones.device), dones])[:-1]\n return value_loss[terminal_states == 0]\n\n def _update_target_network(self):\n if self.values_target is None:\n self.values_target = deepcopy(self.value_model)\n else:\n with torch.no_grad():\n for p in self.values_target.parameters():\n p.requires_grad = False\n\n self.values_target.load_state_dict(self.value_model.state_dict())\n\n\nclass ExplorationRewardSpace(RewardSpace):\n def __init__(self,\n exploration_method: Explorer, reward_model=None,\n value_lr=None, hidden_size=None, state_size=None, belief_size=None, device=None,\n value_layers=3, lr_strategy=None, name='',\n reset_target_every=None, update_target_every=1):\n super().__init__(reward_model,\n value_lr=value_lr,\n hidden_size=hidden_size,\n state_size=state_size,\n belief_size=belief_size,\n device=device,\n value_layers=value_layers,\n lr_strategy=lr_strategy,\n name=name,\n update_target_every=update_target_every)\n self.exploration_method: Explorer = exploration_method\n self.reset_target_every = reset_target_every if reset_target_every > 0 else None\n\n def _rewards(self, beliefs, states, actions=None):\n assert actions is not None, 'Exploration reward space requires actions to compute reward.'\n return bottle(self.exploration_method.exploration_reward, (beliefs[:-1], states[:-1], actions))\n # if we were to combine the rewards, we would need to do something like this:\n # return self.exploration_method(self.reward_model(...), ...)\n\n def before_updates(self):\n if self.has_value_model:\n if self.reset_target_every is not None and self.iteration % self.reset_target_every == 0:\n self.reset_value_target_params()\n elif self.iteration % self.update_target_every == 0:\n self._update_target_network()\n # super().before_updates()\n\n def train_batch(self, states, embeddings, actions, rewards, dones, gamma, logger):\n self.exploration_method.train_batch(rewards, states.beliefs,\n states.states,\n actions, embeddings)\n return super().train_batch(states, embeddings, actions,\n self.rewards(states.beliefs, states.states, actions).unsqueeze(-1), dones, gamma,\n logger)\n\n\nclass ValueNetwork(torch.nn.Module):\n def __init__(self, mlp_extractor: MlpExtractor, device,\n ignore_states=False, ignore_beliefs=True):\n assert not ignore_states or not ignore_beliefs, 'Cannot ignore both states and beliefs.'\n super().__init__()\n self.mlp_extractor = mlp_extractor\n self.lin = torch.nn.Linear(self.mlp_extractor.latent_dim_vf, 1)\n self.lin.to(device)\n self.device = device\n\n self.ignore_states = ignore_states\n self.ignore_beliefs = ignore_beliefs\n\n def forward(self, beliefs, states):\n # We are ignoring the beliefs for now.\n # If this is changed, the input size of the MlpExtractor needs to be changed too.\n if self.ignore_states:\n features = beliefs\n elif self.ignore_beliefs:\n features = states\n else:\n features = torch.cat([states, beliefs], dim=-1)\n return self.lin(self.mlp_extractor(features)[1])\n" ]
[ [ "torch.optim.lr_scheduler.CyclicLR", "torch.cat", "torch.nn.Linear", "torch.nn.functional.mse_loss", "torch.no_grad", "torch.optim.lr_scheduler.StepLR" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Zhaoyi-Yan/PFDNet
[ "86798fbc4fadc673e7912c08492ea3611bc20154", "86798fbc4fadc673e7912c08492ea3611bc20154" ]
[ "metrics.py", "net/CSRNet.py" ]
[ "import torch\nimport torch.nn as nn\nimport sys\nfrom functools import reduce\n\nclass JointLoss(nn.Module):\n def __init__(self, alpha, beta):\n super(JointLoss, self).__init__()\n self.MSELoss = nn.MSELoss(size_average=False)\n self.BCELoss = nn.BCELoss(size_average=True)\n self.alpha = alpha\n self.beta = beta\n \n def forward(self, x, gt_map, target_map):\n mse = self.MSELoss(x, gt_map) * self.alpha\n bce = self.BCELoss(x, target_map) * self.beta\n# sys.stdout.write(\"mse loss = {}, bce loss = {}\\r\".format(mse, bce))\n sys.stdout.flush()\n return mse + bce\n \nclass MSEScalarLoss(nn.Module):\n def __init__(self):\n super(MSEScalarLoss, self).__init__()\n \n def forward(self, x, gt_map):\n return torch.pow(x.sum() - gt_map.sum(), 2) / (reduce(lambda a,b:a * b, x.shape))\n \nclass AEBatch(nn.Module):\n def __init__(self):\n super(AEBatch, self).__init__()\n\n def forward(self, estimated_density_map, gt_num):\n return torch.abs(torch.sum(estimated_density_map, dim=(1, 2, 3)) - gt_num)\n\n\nclass SEBatch(nn.Module):\n def __init__(self):\n super(SEBatch, self).__init__()\n\n def forward(self, estimated_density_map, gt_num):\n return torch.pow(torch.sum(estimated_density_map, dim=(1, 2, 3)) - gt_num, 2)\n", "import torch.nn as nn\nfrom torchvision import models\nimport torch.nn.functional as functional\nimport time\nimport torch\n\n\nclass CSRNet(nn.Module):\n def __init__(self):\n super(CSRNet, self).__init__()\n self.backend_feat = [(512, 2), (512, 2), (512, 2), (256, 2), (128, 2), (64, 2)]\n self.front_end = nn.Sequential(*(list(list(models.vgg16_bn(True).children())[0].children())[0:33]))\n self.back_end = make_layers(self.backend_feat, in_channels=512, batch_norm=True)\n self.output_layer = nn.Conv2d(64, 1, kernel_size=1)\n\n for m in self.output_layer.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.normal_(m.weight, std=0.01)\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n def forward(self, x):\n img_shape = x.shape\n front_end = self.front_end(x)\n back_end = self.back_end(front_end)\n output = self.output_layer(back_end)\n output = functional.interpolate(output, scale_factor=4, mode='bilinear', align_corners=False)\n return output\n\n\ndef make_layers(cfg, in_channels, batch_norm=False):\n layers = []\n for v, atrous in cfg:\n if v == 'M':\n layers += [nn.MaxPool2d(kernel_size=2, stride=2)]\n else:\n conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=atrous, dilation=atrous)\n if batch_norm:\n layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]\n else:\n layers += [conv2d, nn.ReLU(inplace=True)]\n in_channels = v\n return nn.Sequential(*layers)\n" ]
[ [ "torch.sum", "torch.nn.MSELoss", "torch.nn.BCELoss" ], [ "torch.nn.Sequential", "torch.nn.init.constant_", "torch.nn.Conv2d", "torch.nn.MaxPool2d", "torch.nn.init.normal_", "torch.nn.functional.interpolate", "torch.nn.BatchNorm2d", "torch.nn.ReLU" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ChyiYaqing/chyidlTutorial
[ "77e7f6f84f21537a58a8a8a42e31cf2e3dd31996" ]
[ "root/os/DSAA/DataStructuresAndAlgorithms/python/sort_insert_array_implement.py" ]
[ "#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# sort_insert_array_implement.py\n# python\n#\n# 🎂\"Here's to the crazy ones. The misfits. The rebels.\n# The troublemakers. The round pegs in the square holes.\n# The ones who see things differently. They're not found\n# of rules. And they have no respect for the status quo.\n# You can quote them, disagree with them, glority or vilify\n# them. About the only thing you can't do is ignore them.\n# Because they change things. They push the human race forward.\n# And while some may see them as the creazy ones, we see genius.\n# Because the poeple who are crazy enough to think thay can change\n# the world, are the ones who do.\"\n#\n# Created by Chyi Yaqing on 02/18/19 15:56.\n# Copyright © 2019. Chyi Yaqing.\n# All rights reserved.\n#\n# Distributed under terms of the\n# MIT\n\n\"\"\"\nInsertion Sort is a simple sorting algorithm t\nLoop from i=1 to n-1, Pick element arr[i] and insert it into sorted sequence\n arr[0, i-1]\n\"\"\"\nimport numpy as np\nimport time\n\n\n# Python program for implementation of Insertion Sort\n# Function to do insertion sort\ndef insertionSort(arr):\n # Traverse through 1 to len(arr)\n for i in range(1, len(arr)):\n key = arr[i]\n # Move elements of arr[0..i-1], that are greater than key, to one\n # position ahead of their current position\n j = i - 1\n while j >= 0 and key < arr[j]:\n arr[j+1] = arr[j]\n j -= 1\n arr[j+1] = key\n\n\n# Driver code to test above\narr = [12, 11, 13, 5, 6]\nprint(\"Original array is : {}\".format(arr))\ninsertionSort(arr)\nprint(\"Sorted array is : {}\".format(arr))\n\ntestRandoms = np.random.randint(1, 101, [200, 100])\nstart = time.time()\nfor i in range(len(testRandoms)):\n insertionSort(testRandoms[i])\nprint(\"Consumes sum: {} ms\".format((time.time()-start)*1000))\n" ]
[ [ "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
gabiherman/datman
[ "dcbca4981ff7bb1be536d6c62c3b27786cabdef9" ]
[ "bin/dm_header_checks.py" ]
[ "#!/usr/bin/env python\n\"\"\"\nUsage:\n dm_header_checks.py [options] [--ignore=<STR>]... <series> <standard>\n\nArguments:\n <series> Full path to the series JSON file being examined\n <standard> Full path to the standards JSON file to compare\n against\n\nOptions:\n --output <PATH> Full path to store output as a file\n --ignore <STR> A dicom header field to ignore. Can be specified\n more than once. Can be used along with the\n --ignore-file option\n --ignore-file <PATH> Full path to a text file of header fields to ignore\n with each field name on a new line. Can be used\n alongside the --ignore option\n --tolerance <PATH> Full path to a json file mapping field names to a\n tolerance for that field\n --dti Include a bval check. If enabled, it is expected\n that there will be a .bval file in the same dir\n as the series (or gold standard) with the\n same file name as the series (or gold standard)\n --ignore-db Disable attempts to update database\n\"\"\"\nimport os\nimport json\n\nfrom numpy import isclose\nfrom docopt import docopt\n\ndef main():\n args = docopt(__doc__)\n series_json = args['<series>']\n standard_json = args['<standard>']\n output = args['--output']\n ignored_fields = args['--ignore']\n ignore_file = args['--ignore-file']\n tolerances = args['--tolerance']\n ignore_db = args['--ignore-db']\n dti = args['--dti']\n\n if ignore_file:\n ignored_fields.extend(parse_file(ignore_file))\n\n if tolerances:\n tolerances = read_json(tolerances)\n\n diffs = construct_diffs(series_json, standard_json, ignored_fields,\n tolerances, dti)\n\n if not diffs:\n return\n\n if output:\n write_diff_log(diffs, output)\n\n if ignore_db:\n return\n\n # Will add later\n # update_database(series_json, diffs)\n\ndef parse_file(file_path):\n try:\n with open(file_path, \"r\") as fh:\n contents = fh.readlines()\n except Exception as e:\n raise type(e)(\"Couldnt read file of field names to ignore. \"\n \"{}\".format(str(e)))\n return [line.strip() for line in contents]\n\ndef construct_diffs(series_json, standard_json, ignored_fields=None,\n tolerances=None, dti=False):\n series = read_json(series_json)\n standard = read_json(standard_json)\n\n diffs = compare_headers(series, standard, ignore=ignored_fields,\n tolerance=tolerances)\n\n if dti:\n bval_diffs = check_bvals(series_json, standard_json)\n if bval_diffs:\n diffs['bvals'] = bval_diffs\n\n return diffs\n\ndef read_json(json_file):\n with open(json_file, \"r\") as fp:\n contents = json.load(fp)\n return contents\n\ndef compare_headers(series, standard, ignore=None, tolerance=None):\n if ignore:\n remove_fields(standard, ignore)\n if not tolerance:\n tolerance = {}\n\n diffs = {}\n for field in standard:\n try:\n value = series[field]\n except KeyError:\n diffs.setdefault('missing', []).append(field)\n continue\n if value != standard[field]:\n result = handle_diff(value, standard[field], tolerance.get(field))\n if result:\n diffs[field] = result\n return diffs\n\ndef remove_fields(json_contents, fields):\n for item in fields:\n try:\n del json_contents[item]\n except KeyError:\n pass\n\ndef handle_diff(value, expected, tolerance=None):\n diffs = {'expected': expected, 'actual': value}\n\n if not tolerance:\n return diffs\n\n if isclose(value, expected, atol=tolerance):\n return {}\n\n diffs['tolerance'] = tolerance\n return diffs\n\ndef check_bvals(series_path, standard_path):\n try:\n series_bval = find_bvals(series_path)\n standard_bval = find_bvals(standard_path)\n except IOError as e:\n return 'Error - {}'.format(e)\n if series_bval != standard_bval:\n return {'expected': standard_bval, 'actual': series_bval}\n return {}\n\ndef find_bvals(json_path):\n bval_path = json_path.replace('json', 'bval')\n if not os.path.isfile(bval_path):\n raise IOError(\"bval for {} does not exist\".format(json_path))\n try:\n with open(bval_path, \"r\") as bval_fh:\n bvals = bval_fh.readlines()[0]\n except:\n raise IOError(\"Unable to read bval file {}\".format(bval_path))\n return bvals\n\ndef write_diff_log(diffs, output_path):\n with open(output_path, 'w') as dest:\n json.dump(diffs, dest)\n\n# def update_database(series, diffs):\n# return\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.isclose" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jaesik817/svpg_tensorflow
[ "ef8323af45bcb4f7c06588b4ee4ac8ec478b6027" ]
[ "svpg_cont_action/tmp.py" ]
[ "import gym\nimport itertools\nimport matplotlib\nimport numpy as np\nimport sys\nimport tensorflow as tf\nimport collections\n\nimport sklearn.pipeline\nimport sklearn.preprocessing\n\nfrom sklearn.kernel_approximation import RBFSampler\n\nfrom svpg import SVPG\n\n###################\n# parameters\n###################\nENV_NAME=\"MountainCarContinuous-v0\";\n#ENV_NAME=\"Pendulum-v0\";\n#ENV_NAME=\"Reacher-v1\";\nif(ENV_NAME==\"Pendulum-v0\"):\n NUM_EPISODES=2000;\n ENTROPY_BETA=0.01;\n POLICY_LR=0.0001;\n VALUE_LR=0.001;\n NUM_VARS=6;\n UPDATE_ITER=10;\n MAX_EPI_STEP=200;\n DISCOUNT_FACTOR=0.9;\nif(ENV_NAME==\"MountainCarContinuous-v0\"):\n NUM_EPISODES=100;\n ENTROPY_BETA=0.1;\n POLICY_LR=0.001;\n VALUE_LR=0.1;\n NUM_VARS=4;\n UPDATE_ITER=20;\n MAX_EPI_STEP=1000;\n DISCOUNT_FACTOR=0.95;\n\n# for SVPG\nn_particles=1;\nindependent_flag_svpg=1;\n###################\n\n# gym env\nenv=np.zeros(n_particles,dtype=object);\nfor i in range(n_particles):\n env[i] = gym.envs.make(ENV_NAME)\nnum_state=env[0].observation_space.shape[0]\nnum_action=env[0].action_space.shape[0]\naction_bound=[env[0].action_space.low, env[0].action_space.high]\n# MAX EPI STEP is setted in gym \nMAX_EPI_STEP=env[0].spec.timestep_limit;\n\n\"\"\"\nFor MountainCarContinuous-v0\n\"\"\"\n# Feature Preprocessing: Normalize to zero mean and unit variance\n# We use a few samples from the observation space to do this\nobsp_high=env[0].observation_space.high;\nobsp_low=env[0].observation_space.low;\nfor i in range(len(obsp_high)):\n if(obsp_high[i]==float('Inf')):\n obsp_high[i]=1e+10;\nfor i in range(len(obsp_low)):\n if(obsp_low[i]==-float('Inf')):\n obsp_low[i]=-1e+10;\nobservation_examples = np.array([np.random.uniform(low=obsp_low, high=obsp_high,size=env[0].observation_space.low.shape) for x in range(10000)])\n\nscaler = sklearn.preprocessing.StandardScaler()\nscaler.fit(observation_examples)\n\n# Used to convert a state to a featurizes represenation.\n# We use RBF kernels with different variances to cover different parts of the space\nfeaturizer = sklearn.pipeline.FeatureUnion([\n (\"rbf1\", RBFSampler(gamma=5.0, n_components=100)),\n (\"rbf2\", RBFSampler(gamma=2.0, n_components=100)),\n (\"rbf3\", RBFSampler(gamma=1.0, n_components=100)),\n (\"rbf4\", RBFSampler(gamma=0.5, n_components=100))\n ])\nfeaturizer.fit(scaler.transform(observation_examples))\n\ndef featurize_state(state):\n scaled = scaler.transform([state])\n featurized = featurizer.transform(scaled)\n return featurized[0]\n\ndef build_policy_net_MountainCarContinuous(input_tf):\n mu = tf.layers.dense(input_tf, num_action, tf.nn.tanh, kernel_initializer=w_init, name='mu') # estimated action value\n sigma = tf.layers.dense(input_tf, num_action, tf.nn.softplus, kernel_initializer=w_init, name='sigma') # estimated variance\n return mu,sigma;\n\nclass PolicyEstimator_MountainCarContinuous():\n def __init__(self, entropy_beta=0.1, learning_rate=0.001, par_idx=0,scope=\"policy_estimator\"):\n w_init = tf.random_normal_initializer(0.,.1);\n with tf.variable_scope(scope+\"_\"+str(par_idx)):\n\n # state, target and action\n self.state = tf.placeholder(tf.float32, [None,400], name=\"state\")\n self.target = tf.placeholder(tf.float32,[None,1], name=\"target\")\n self.a_his = tf.placeholder(tf.float32, [None, num_action], name=\"action_hist\")\n\n # layers\n\n # wrap output\n self.mu = self.mu * action_bound[1];\n self.sigma = self.sigma + 1e-5\n self.normal_dist = tf.contrib.distributions.Normal(self.mu, self.sigma)\n self.action = tf.squeeze(self.normal_dist.sample(1),axis=0);\n self.action = tf.clip_by_value(self.action, action_bound[0], action_bound[1])\n\n # Loss and train op\n self.loss = -self.normal_dist.log_prob(self.a_his) * self.target\n # Add cross entropy cost to encourage exploration\n self.loss -= entropy_beta * self.normal_dist.entropy()\n self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\n self.grads_and_vars = self.optimizer.compute_gradients(self.loss)\n self.grads=[];\n self.vars=[];\n for i in range(len(self.grads_and_vars)):\n self.grads.append(self.grads_and_vars[i][0]);\n self.vars.append(self.grads_and_vars[i][1]);\n self.grads=self.grads[-1*NUM_VARS:];\n self.vars=self.vars[-1*NUM_VARS:];\n self.train_op = self.optimizer.apply_gradients(\n self.grads_and_vars, global_step=tf.contrib.framework.get_global_step())\n\n def predict(self, state, sess=None):\n sess = sess or tf.get_default_session()\n state=featurize_state(state);\n return sess.run(self.action, { self.state: [state] })[0]\n\n def update(self, state, target, action, sess=None):\n sess = sess or tf.get_default_session()\n for st_idx in range(len(state)):\n state[st_idx]=featurize_state(state[st_idx]);\n feed_dict = { self.state: state, self.target: target, self.a_his: action }\n _, loss = sess.run([self.train_op, self.loss], feed_dict)\n return loss\n\nclass ValueEstimator_MountainCarContinuous():\n def __init__(self, learning_rate=0.1, par_idx=0,scope=\"value_estimator\"):\n w_init = tf.random_normal_initializer(0.,.1);\n with tf.variable_scope(scope+\"_\"+str(par_idx)):\n # state and target\n self.state = tf.placeholder(tf.float32, [None,400], \"state\")\n self.target = tf.placeholder(tf.float32, [None,1], name=\"target\")\n\n # layers\n self.value_estimate = tf.layers.dense(self.state, 1, kernel_initializer=w_init, name='v') # estimated value for state\n\n # loss and optimizer\n self.loss = tf.squared_difference(self.value_estimate, self.target)\n self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\n self.train_op = self.optimizer.minimize(\n self.loss, global_step=tf.contrib.framework.get_global_step())\n\n def predict(self, state, sess=None):\n sess = sess or tf.get_default_session()\n state=featurize_state(state);\n return sess.run(self.value_estimate, { self.state: [state] })[0][0]\n\n def update(self, state, target, sess=None):\n sess = sess or tf.get_default_session()\n for st_idx in range(len(state)):\n state[st_idx]=featurize_state(state[st_idx]);\n feed_dict = { self.state: state, self.target: target }\n _, loss = sess.run([self.train_op, self.loss], feed_dict)\n return loss\n\n\"\"\"\nFor Pendulum-v0\n\"\"\"\nclass PolicyEstimator_Pendulum():\n def __init__(self, entropy_beta=0.01, learning_rate=0.01, par_idx=0,scope=\"policy_estimator\"):\n w_init = tf.random_normal_initializer(0.,.1);\n with tf.variable_scope(scope+\"_\"+str(par_idx)):\n \n # state, target and action\n self.state = tf.placeholder(tf.float32, [None,num_state], name=\"state\")\n self.target = tf.placeholder(tf.float32,[None,1], name=\"target\")\n self.a_his = tf.placeholder(tf.float32, [None, num_action], name=\"action_hist\") \n \n # layers\n l_a = tf.layers.dense(self.state, 200, tf.nn.relu6, kernel_initializer=w_init, name='la')\n self.mu = tf.layers.dense(l_a, num_action, tf.nn.tanh, kernel_initializer=w_init, name='mu') # estimated action value\n self.sigma = tf.layers.dense(l_a, num_action, tf.nn.softplus, kernel_initializer=w_init, name='sigma') # estimated variance\n \n # wrap output\n self.mu = self.mu * action_bound[1];\n self.sigma = self.sigma + 1e-4\n\n # get action from distribution\n self.normal_dist = tf.contrib.distributions.Normal(self.mu, self.sigma)\n self.action = tf.squeeze(self.normal_dist.sample(1),axis=0);\n self.action = tf.clip_by_value(self.action, action_bound[0], action_bound[1])\n \n # Loss and train op\n self.loss = -self.normal_dist.log_prob(self.a_his) * self.target\n # Add cross entropy cost to encourage exploration\n self.loss -= entropy_beta * self.normal_dist.entropy()\n self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\n self.grads_and_vars = self.optimizer.compute_gradients(self.loss)\n self.grads=[];\n self.vars=[];\n for i in range(len(self.grads_and_vars)):\n self.grads.append(self.grads_and_vars[i][0]);\n self.vars.append(self.grads_and_vars[i][1]);\n self.grads=self.grads[-1*NUM_VARS:];\n self.vars=self.vars[-1*NUM_VARS:];\n self.train_op = self.optimizer.apply_gradients(\n self.grads_and_vars, global_step=tf.contrib.framework.get_global_step())\n \n def predict(self, state, sess=None):\n sess = sess or tf.get_default_session()\n return sess.run(self.action, { self.state: [state] })[0]\n\n def update(self, state, target, a_his, sess=None):\n sess = sess or tf.get_default_session()\n feed_dict = { self.state: state, self.target: target, self.a_his: a_his }\n _, loss = sess.run([self.train_op, self.loss], feed_dict)\n return loss\n\nclass ValueEstimator_Pendulum():\n def __init__(self, learning_rate=0.1, par_idx=0,scope=\"value_estimator\"):\n w_init = tf.random_normal_initializer(0.,.1);\n with tf.variable_scope(scope+\"_\"+str(par_idx)):\n # state and target\n self.state = tf.placeholder(tf.float32, [None,num_state], \"state\")\n self.target = tf.placeholder(tf.float32, [None,1], name=\"target\")\n\n # layers\n l_c = tf.layers.dense(self.state, 100, tf.nn.relu6, kernel_initializer=w_init, name='lc')\n self.value_estimate = tf.layers.dense(l_c, 1, kernel_initializer=w_init, name='v') # estimated value for state\n \n # loss and optimizer\n self.loss = tf.reduce_mean(tf.square(tf.subtract(self.value_estimate, self.target)))\n self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\n self.train_op = self.optimizer.minimize(\n self.loss, global_step=tf.contrib.framework.get_global_step()) \n \n def predict(self, state, sess=None):\n sess = sess or tf.get_default_session()\n return sess.run(self.value_estimate, { self.state: [state] })[0][0]\n\n def update(self, state, target, sess=None):\n sess = sess or tf.get_default_session()\n feed_dict = { self.state: state, self.target: target }\n _, loss = sess.run([self.train_op, self.loss], feed_dict)\n return loss\n\ndef advantage_actor_critic(env, estimator_policy, estimator_value, svpg, num_episodes,max_epi_step, discount_factor=1.0):\n # Keeps track of useful statistics\n stats = {};\n stats[\"episode_lengths\"]=np.zeros((n_particles,num_episodes));\n stats[\"episode_rewards\"]=np.zeros((n_particles,num_episodes));\n \n Transition = collections.namedtuple(\"Transition\", [\"state\", \"action\", \"reward\", \"next_state\", \"done\"])\n \n # state list\n state=np.zeros(n_particles,dtype=object);\n action=np.zeros(n_particles,dtype=object);\n next_state=np.zeros(n_particles,dtype=object);\n episode=np.zeros(n_particles,dtype=object);\n reward=np.zeros(n_particles,dtype=object);\n done=np.zeros(n_particles,dtype=object);\n policy_grads=np.zeros(n_particles,dtype=object);\n\n # total step\n total_step=1;\n # trasition backup initialization\n for i in range(n_particles):\n episode[i]=[];\n\n for i_episode in range(num_episodes):\n # Reset the environment\n for i in range(n_particles):\n state[i] = env[i].reset()\n # run\n for t in range(MAX_EPI_STEP):\n for i in range(n_particles):\n # Take a step\n action[i] = estimator_policy[i].predict(state[i])\n next_state[i], reward[i], done[i], _ = env[i].step(action[i])\n # Pendulum case maximum running is just done (there are no reward threshold)\n if(ENV_NAME==\"Pendulum-v0\"):\n done[i] = True if t == max_epi_step -1 else False\n\n # Keep track of the transition\n episode[i].append(Transition(\n state=state[i], action=action[i], reward=reward[i], next_state=next_state[i], done=done[i]))\n\n # Update statistics\n stats[\"episode_rewards\"][i][i_episode] += reward[i]\n stats[\"episode_lengths\"][i][i_episode] = t\n \n state[i] = next_state[i]\n \n # checking one of them is done\n Done=False;\n for i in range(n_particles):\n if done[i]:\n Done=True;\n \n if((total_step%UPDATE_ITER==0)or(Done)):\n feed_dict={};\n # Buffer for each particle\n buffer_s=np.zeros(n_particles,dtype=object);\n buffer_a=np.zeros(n_particles,dtype=object);\n buffer_v=np.zeros(n_particles,dtype=object);\n buffer_td_target=np.zeros(n_particles,dtype=object);\n buffer_td_error=np.zeros(n_particles,dtype=object);\n for i in range(n_particles):\n buffer_s[i]=[];\n buffer_a[i]=[];\n buffer_v[i]=[];\n buffer_td_target[i]=[];\n buffer_td_error[i]=[];\n for t in range(len(episode[i])):\n transition=episode[i][t];\n buffer_s[i].append(transition.state);\n buffer_a[i].append(transition.action);\n # normalize reward for Pendulum case\n if(ENV_NAME==\"Pendulum-v0\"):\n buffer_v[i].append((transition.reward+8)/8)\n else:\n buffer_v[i].append(transition.reward)\n if done[i]:\n v_s_=0;\n else:\n v_s_=estimator_value[i].predict(episode[i][-1].next_state);\n buffer_v[i].reverse();\n for r in buffer_v[i]:\n v_s_=r+discount_factor*v_s_\n buffer_td_target[i].append(v_s_);\n buffer_td_target[i].reverse();\n for t in range(len(buffer_s[i])):\n buffer_td_error[i].append(buffer_td_target[i][t]-estimator_value[i].predict(buffer_s[i][t]));\n estimator_value[i].update(buffer_s[i],np.reshape(buffer_td_target[i],[-1,1]));\n feed_dict.update({estimator_policy[i].state:buffer_s[i]});\n feed_dict.update({estimator_policy[i].target:np.reshape(buffer_td_error[i],[-1,1])});\n feed_dict.update({estimator_policy[i].a_his:np.reshape(buffer_a[i],[-1,num_action])});\n svpg.run(feed_dict);\n # trasition backup re-set\n for i in range(n_particles):\n episode[i]=[];\n total_step+=1;\n if Done:\n break\n \n # Print out which step we're on, useful for debugging. (average recent 10 episode scores)\n if(i_episode>=10):\n print(\"Episode {}/{} ({})\".format(i_episode + 1, num_episodes, np.max(np.mean(stats[\"episode_rewards\"][:,i_episode-10:i_episode - 1],axis=1))))\n \n return stats\n\ntf.reset_default_graph()\n\nglobal_step = tf.Variable(0, name=\"global_step\", trainable=False)\npolicy_estimator = np.zeros(n_particles,dtype=object);\nvalue_estimator = np.zeros(n_particles,dtype=object);\n\n# call proper policy and value estimators for each envs\nfor i in range(n_particles):\n if(ENV_NAME==\"Pendulum-v0\"):\n policy_estimator[i] = PolicyEstimator_Pendulum(entropy_beta=ENTROPY_BETA,learning_rate=POLICY_LR,par_idx=i)\n value_estimator[i] = ValueEstimator_Pendulum(learning_rate=VALUE_LR,par_idx=i)\n if(ENV_NAME==\"MountainCarContinuous-v0\"):\n policy_estimator[i] = PolicyEstimator_MountainCarContinuous(entropy_beta=ENTROPY_BETA,learning_rate=POLICY_LR,par_idx=i)\n value_estimator[i] = ValueEstimator_MountainCarContinuous(learning_rate=VALUE_LR,par_idx=i)\n\nsvpg=SVPG(policy_estimator,independent_flag_svpg,learning_rate=POLICY_LR);\n\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n # Note, due to randomness in the policy the number of episodes you need varies\n # TODO: Sometimes the algorithm gets stuck, I'm not sure what exactly is happening there.\n stats = advantage_actor_critic(env, policy_estimator, value_estimator, svpg, NUM_EPISODES, MAX_EPI_STEP,discount_factor=DISCOUNT_FACTOR)\n\n" ]
[ [ "numpy.mean", "tensorflow.train.AdamOptimizer", "tensorflow.Variable", "numpy.reshape", "tensorflow.layers.dense", "tensorflow.subtract", "tensorflow.reset_default_graph", "tensorflow.contrib.distributions.Normal", "tensorflow.Session", "tensorflow.random_normal_initializer", "numpy.zeros", "sklearn.kernel_approximation.RBFSampler", "tensorflow.placeholder", "tensorflow.global_variables_initializer", "tensorflow.clip_by_value", "tensorflow.get_default_session", "tensorflow.contrib.framework.get_global_step", "numpy.random.uniform", "tensorflow.squared_difference" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] } ]
spowlas/sarpy
[ "c4a404203607d404b93a556459aa5311d7148e95", "c4a404203607d404b93a556459aa5311d7148e95" ]
[ "sarpy/geometry/point_projection.py", "sarpy/deprecated/io/DEM/DEM.py" ]
[ "\"\"\"\nFunctions to map between the coordinates in image pixel space and geographical coordinates.\n\"\"\"\n\nimport logging\nfrom typing import Tuple\nfrom types import MethodType # for binding a method dynamically to a class\n\nimport numpy\n\nfrom . import geocoords\nfrom ..io.complex.sicd_elements.blocks import Poly2DType, XYZPolyType\nfrom ..io.DEM.DEM import DTEDList, GeoidHeight, DTEDInterpolator\n\n\n__classification__ = \"UNCLASSIFIED\"\n__author__ = (\"Thomas McCullough\", \"Wade Schwartzkopf\")\n\n\n#############\n# Ground-to-Image (aka Scene-to-Image) projection.\n\n# noinspection PyUnusedLocal\ndef _validate_coords(coords, sicd):\n if not isinstance(coords, numpy.ndarray):\n coords = numpy.array(coords, dtype=numpy.float64)\n\n orig_shape = coords.shape\n\n if len(orig_shape) == 1:\n coords = numpy.reshape(coords, (1, -1))\n if coords.shape[-1] != 3:\n raise ValueError(\n 'The coords array must represent an array of points in ECF coordinates, '\n 'so the final dimension of coords must have length 3. Have coords.shape = {}'.format(coords.shape))\n\n # TODO: possibly check for coordinates too far from the sicd box?\n return coords, orig_shape\n\n\ndef _ground_to_image(coords, coa_proj, uGPN,\n SCP, SCP_Pixel, uIPN, sf, row_ss, col_ss, uProj,\n row_col_transform, ipp_transform, delta_gp_max, max_iterations):\n \"\"\"\n Basic level helper function.\n\n Parameters\n ----------\n coords : numpy.ndarray|tuple|list\n coa_proj : COAProjection\n uGPN : numpy.ndarray\n SCP : numpy.ndarray\n SCP_Pixel : numpy.ndarray\n uIPN : numpy.ndarray\n sf : float\n row_ss : float\n col_ss : float\n uProj : numpy.ndarray\n row_col_transform : numpy.ndarray\n ipp_transform : numpy.ndarray\n delta_gp_max : float\n max_iterations : int\n\n Returns\n -------\n Tuple[numpy.ndarray, float, int]\n * `image_points` - the determined image point array, of size `N x 2`. Following SICD convention,\n the upper-left pixel is [0, 0].\n * `delta_gpn` - residual ground plane displacement (m).\n * `iterations` - the number of iterations performed.\n \"\"\"\n g_n = coords.copy()\n im_points = numpy.zeros((coords.shape[0], 2), dtype=numpy.float64)\n delta_gpn = numpy.zeros((coords.shape[0],), dtype=numpy.float64)\n cont = True\n iteration = 0\n\n matrix_transform = numpy.dot(row_col_transform, ipp_transform)\n # (3 x 2)*(2 x 2) = (3 x 2)\n\n while cont:\n # TODO: is there any point in progressively stopping iteration?\n # It doesn't really save much computation time.\n # I set it to iterate over everything or nothing.\n # project ground plane to image plane iteration\n iteration += 1\n dist_n = numpy.dot(SCP - g_n, uIPN)/sf # (N, )\n i_n = g_n + numpy.outer(dist_n, uProj) # (N, 3)\n delta_ipp = i_n - SCP # (N, 3)\n ip_iter = numpy.dot(delta_ipp, matrix_transform) # (N, 2)\n im_points[:, 0] = ip_iter[:, 0]/row_ss + SCP_Pixel[0]\n im_points[:, 1] = ip_iter[:, 1]/col_ss + SCP_Pixel[1]\n # transform to ground plane containing the scene points and check how it compares\n p_n = _image_to_ground_plane(im_points, coa_proj, g_n, uGPN)\n # compute displacement between scene point and this new projected point\n diff_n = coords - p_n\n disp_pn = numpy.linalg.norm(diff_n, axis=1)\n # should we continue iterating?\n cont = numpy.any(disp_pn > delta_gp_max) or (iteration <= max_iterations)\n if cont:\n g_n += diff_n\n\n return im_points, delta_gpn, iteration\n\n\ndef ground_to_image(coords, sicd, delta_gp_max=None, max_iterations=10, block_size=50000,\n delta_arp=None, delta_varp=None, range_bias=None, adj_params_frame='ECF'):\n \"\"\"\n Transforms a 3D ECF point to pixel (row/column) coordinates. This is\n implemented in accordance with the SICD Image Projections Description Document.\n **Really Scene-To-Image projection.**\"\n\n Parameters\n ----------\n coords : numpy.ndarray|tuple|list\n ECF coordinate to map to scene coordinates, of size `N x 3`.\n sicd : sarpy.io.complex.sicd_elements.SICD.SICDType\n SICD meta data structure.\n delta_gp_max : float|None\n Ground plane displacement tol (m). Defaults to 0.1*pixel.\n max_iterations : int\n maximum number of iterations to perform\n block_size : int|None\n size of blocks of coordinates to transform at a time\n delta_arp : None|numpy.ndarray|list|tuple\n ARP position adjustable parameter (ECF, m). Defaults to 0 in each coordinate.\n delta_varp : None|numpy.ndarray|list|tuple\n VARP position adjustable parameter (ECF, m/s). Defaults to 0 in each coordinate.\n range_bias : float|int\n Range bias adjustable parameter (m), defaults to 0.\n adj_params_frame : str\n One of ['ECF', 'RIC_ECF', 'RIC_ECI'], specifying the coordinate frame used for\n expressing `delta_arp` and `delta_varp` parameters.\n\n Returns\n -------\n Tuple[numpy.ndarray, float, int]\n * `image_points` - the determined image point array, of size `N x 2`. Following\n the SICD convention, he upper-left pixel is [0, 0].\n * `delta_gpn` - residual ground plane displacement (m).\n * `iterations` - the number of iterations performed.\n \"\"\"\n\n coords, orig_shape = _validate_coords(coords, sicd)\n\n row_ss = sicd.Grid.Row.SS\n col_ss = sicd.Grid.Col.SS\n pixel_size = numpy.sqrt(row_ss*row_ss + col_ss*col_ss)\n if delta_gp_max is None:\n delta_gp_max = 0.1*pixel_size\n delta_gp_max = float(delta_gp_max)\n if delta_gp_max < 0.01*pixel_size:\n delta_gp_max = 0.01*pixel_size\n logging.warning('delta_gp_max was less than 0.01*pixel_size, '\n 'and has been reset to {}'.format(delta_gp_max))\n\n coa_proj = COAProjection(sicd, delta_arp, delta_varp, range_bias, adj_params_frame)\n\n # establishing the basic projection components\n SCP_Pixel = sicd.ImageData.SCPPixel.get_array()\n uRow = sicd.Grid.Row.UVectECF.get_array() # unit normal in row direction\n uCol = sicd.Grid.Col.UVectECF.get_array() # unit normal in column direction\n uIPN = numpy.cross(uRow, uCol) # image plane unit normal\n uIPN /= numpy.linalg.norm(uIPN) # NB: uRow/uCol may not be perpendicular\n cos_theta = numpy.dot(uRow, uCol)\n sin_theta = numpy.sqrt(1 - cos_theta*cos_theta)\n ipp_transform = numpy.array([[1, -cos_theta], [-cos_theta, 1]], dtype=numpy.float64)/(sin_theta*sin_theta)\n row_col_transform = numpy.zeros((3, 2), dtype=numpy.float64)\n row_col_transform[:, 0] = uRow\n row_col_transform[:, 1] = uCol\n\n SCP = sicd.GeoData.SCP.ECF.get_array()\n uGPN = sicd.PFA.FPN.get_array() if sicd.ImageFormation.ImageFormAlgo == 'PFA' \\\n else geocoords.wgs_84_norm(SCP)\n ARP_SCP_COA = sicd.SCPCOA.ARPPos.get_array()\n VARP_SCP_COA = sicd.SCPCOA.ARPVel.get_array()\n uSPN = sicd.SCPCOA.look*numpy.cross(VARP_SCP_COA, SCP-ARP_SCP_COA)\n uSPN /= numpy.linalg.norm(uSPN)\n # uSPN - defined in section 3.1 as normal to instantaneous slant plane that contains SCP at SCP COA is\n # tangent to R/Rdot contour at SCP. Points away from center of Earth. Use look to establish sign.\n sf = float(numpy.dot(uSPN, uIPN)) # scale factor\n\n # prepare the work space\n coords_view = numpy.reshape(coords, (-1, 3)) # possibly or make 2-d flatten\n num_points = coords_view.shape[0]\n if block_size is None or num_points <= block_size:\n image_points, delta_gpn, iters = _ground_to_image(\n coords_view, coa_proj, uGPN,\n SCP, SCP_Pixel, uIPN, sf, row_ss, col_ss, uSPN,\n row_col_transform, ipp_transform, delta_gp_max, max_iterations)\n else:\n image_points = numpy.zeros((num_points, 2), dtype=numpy.float64)\n delta_gpn = numpy.zeros((num_points, ), dtype=numpy.float64)\n iters = numpy.zeros((num_points, ), dtype=numpy.int16)\n\n # proceed with block processing\n start_block = 0\n while start_block < num_points:\n end_block = min(start_block+block_size, num_points)\n image_points[start_block:end_block, :], delta_gpn[start_block:end_block], \\\n iters[start_block:end_block] = _ground_to_image(\n coords_view[start_block:end_block, :], coa_proj, uGPN,\n SCP, SCP_Pixel, uIPN, sf, row_ss, col_ss, uSPN,\n row_col_transform, ipp_transform, delta_gp_max, max_iterations)\n start_block = end_block\n\n if len(orig_shape) == 1:\n image_points = numpy.reshape(image_points, (-1,))\n elif len(orig_shape) > 1:\n image_points = numpy.reshape(image_points, orig_shape[:-1]+(2, ))\n delta_gpn = numpy.reshape(delta_gpn, orig_shape[:-1])\n iters = numpy.reshape(iters, orig_shape[:-1])\n return image_points, delta_gpn, iters\n\n\ndef ground_to_image_geo(coords, sicd, **kwargs):\n \"\"\"\n Transforms a 3D Lat/Lon/HAE point to pixel (row/column) coordinates.\n This is implemented in accordance with the SICD Image Projections Description Document.\n\n Parameters\n ----------\n coords : numpy.ndarray|tuple|list\n Lat/Lon/HAE coordinate to map to scene coordinates, of size `N x 3`.\n sicd : sarpy.io.complex.sicd_elements.SICD.SICDType\n SICD meta data structure.\n kwargs : dict\n See the key word arguments of :func:`ground_to_image`\n\n Returns\n -------\n Tuple[numpy.ndarray, float, int]\n * `image_points` - the determined image point array, of size `N x 2`. Following SICD convention,\n the upper-left pixel is [0, 0].\n * `delta_gpn` - residual ground plane displacement (m).\n * `iterations` - the number of iterations performed.\n \"\"\"\n\n return ground_to_image(geocoords.geodetic_to_ecf(coords), sicd, **kwargs)\n\n\n############\n# Image-To-Ground projections\n\ndef _ric_ecf_mat(rarp, varp, frame_type):\n \"\"\"\n Computes the ECF transformation matrix for RIC frame.\n\n Parameters\n ----------\n rarp : numpy.ndarray\n varp : numpy.ndarray\n frame_type : str\n the final three characters should be one of ['ECI', 'ECF']\n\n Returns\n -------\n numpy.ndarray\n the RIC transform matrix (array)\n \"\"\"\n\n # Angular velocity of earth in radians/second, not including precession\n w = 7292115.1467E-11\n typ = frame_type.upper()[-3:]\n vi = varp if typ == 'ECF' else varp + numpy.cross([0, 0, w], rarp)\n\n r = rarp/numpy.linalg.norm(rarp)\n c = numpy.cross(r, vi)\n c /= numpy.linalg.norm(c) # NB: perpendicular to r\n i = numpy.cross(c, r)\n # this is the cross of two perpendicular normal vectors, so normal\n return numpy.array([r, i, c], dtype=numpy.float64)\n\n\nclass COAProjection(object):\n \"\"\"\n The COA projection object - provide common projection functionality for all Image-to-R/Rdot projection.\n \"\"\"\n\n def __init__(self, sicd, delta_arp=None, delta_varp=None, range_bias=None, adj_params_frame='ECF'):\n \"\"\"\n\n Parameters\n ----------\n sicd : sarpy.io.complex.sicd_elements.SICD.SICDType\n The SICD metadata structure.\n delta_arp : None|numpy.ndarray|list|tuple\n ARP position adjustable parameter (ECF, m). Defaults to 0 in each coordinate.\n delta_varp : None|numpy.ndarray|list|tuple\n VARP position adjustable parameter (ECF, m/s). Defaults to 0 in each coordinate.\n range_bias : float|int\n Range bias adjustable parameter (m), defaults to 0.\n adj_params_frame : str\n One of ['ECF', 'RIC_ECF', 'RIC_ECI'], specifying the coordinate frame used for\n expressing `delta_arp` and `delta_varp` parameters.\n \"\"\"\n\n if not sicd.can_project_coordinates():\n raise ValueError('Insufficient metadata populated to formulate projection.')\n\n time_coa_poly = sicd.Grid.TimeCOAPoly\n # fall back to approximation if TimeCOAPoly is not populated\n if time_coa_poly is None:\n time_coa_poly = Poly2DType(Coefs=[[sicd.Timeline.CollectDuration/2, ], ])\n logging.warning(\n 'Using (constant) approximation to TimeCOAPoly, which may result in poor projection results.')\n self.time_coa_poly = time_coa_poly # type: Poly2DType\n\n self.arp_poly = sicd.Position.ARPPoly # type: XYZPolyType\n self.varp_poly = self.arp_poly.derivative(der_order=1, return_poly=True) # type: XYZPolyType\n\n self.row_ss = sicd.Grid.Row.SS # type: float\n self.col_ss = sicd.Grid.Col.SS # type: float\n self.first_row = sicd.ImageData.FirstRow # type: int\n self.first_col = sicd.ImageData.FirstCol # type: int\n self.scp_row = sicd.ImageData.SCPPixel.Row # type: int\n self.scp_col = sicd.ImageData.SCPPixel.Col # type: int\n\n if delta_arp is None:\n delta_arp = numpy.array([0, 0, 0], dtype=numpy.float64)\n if not isinstance(delta_arp, numpy.ndarray):\n delta_arp = numpy.array(delta_arp, dtype=numpy.float64)\n if delta_arp.shape != (3, ):\n raise ValueError('delta_arp must have shape (3, ). Got {}'.format(delta_arp.shape))\n\n if delta_varp is None:\n delta_varp = numpy.array([0, 0, 0], dtype=numpy.float64)\n if not isinstance(delta_varp, numpy.ndarray):\n delta_varp = numpy.array(delta_varp, dtype=numpy.float64)\n if delta_varp.shape != (3, ):\n raise ValueError('delta_varp must have shape (3, ). Got {}'.format(delta_varp.shape))\n\n if adj_params_frame in ['RIC_ECI', 'RIC_ECF']:\n if sicd.SCPCOA.ARPPos is None or sicd.SCPCOA.ARPVel is None:\n raise ValueError(\n 'The adj_params_frame is of RIC type, but one of SCPCOA.ARPPos or '\n 'SCPCOA.ARPVel is not populated.')\n ARP_SCP_COA = sicd.SCPCOA.ARPPos.get_array()\n VARP_SCP_COA = sicd.SCPCOA.ARPVel.get_array()\n ric_matrix = _ric_ecf_mat(ARP_SCP_COA, VARP_SCP_COA, adj_params_frame)\n delta_arp = ric_matrix.dot(delta_arp)\n delta_varp = ric_matrix.dot(delta_varp)\n self.delta_arp = delta_arp # type: numpy.ndarray\n self.delta_varp = delta_varp # type: numpy.ndarray\n\n if range_bias is None:\n range_bias = 0.0\n else:\n range_bias = float(range_bias)\n self.range_bias = range_bias # type: float\n # bind the method specific intermediate projection method\n self._method_proj = MethodType(_get_type_specific_projection(sicd), self)\n\n def _init_proj(self, im_points):\n \"\"\"\n\n Parameters\n ----------\n im_points : numpy.ndarray\n\n Returns\n -------\n Tuple[numpy.ndarray,...]\n \"\"\"\n\n row_meters = (im_points[:, 0] + self.first_row - self.scp_row)*self.row_ss\n col_meters = (im_points[:, 1] + self.first_col - self.scp_col)*self.col_ss\n t_coa = self.time_coa_poly(row_meters, col_meters)\n # calculate aperture reference position and velocity at target time\n arp_coa = self.arp_poly(t_coa)\n varp_coa = self.varp_poly(t_coa)\n return row_meters, col_meters, t_coa, arp_coa, varp_coa\n\n def projection(self, im_points):\n \"\"\"\n Perform the projection from image coordinates to R/Rdot coordinates.\n\n Parameters\n ----------\n im_points : numpy.ndarray\n This array of image point coordinates, **expected to have shape (N, 2)**.\n\n Returns\n -------\n Tuple[numpy.ndarray,numpy.ndarray,numpy.ndarray,numpy.ndarray,numpy.ndarray]\n * `r_tgt_coa` - range to the ARP at COA\n * `r_dot_tgt_coa` - range rate relative to the ARP at COA\n * `t_coa` - center of aperture time since CDP start for input ip\n * `arp_coa` - aperture reference position at t_coa\n * `varp_coa` - velocity at t_coa\n \"\"\"\n\n row_meters, col_meters, t_coa, arp_coa, varp_coa = self._init_proj(im_points)\n r_tgt_coa, r_dot_tgt_coa = self._method_proj(row_meters, col_meters, t_coa, arp_coa, varp_coa)\n # adjust parameters (TODO: after all the calculations?)\n arp_coa += self.delta_arp\n varp_coa += self.delta_varp\n r_tgt_coa += self.range_bias\n return r_tgt_coa, r_dot_tgt_coa, t_coa, arp_coa, varp_coa\n\n\ndef _get_type_specific_projection(sicd):\n \"\"\"\n Gets an intermediate method specific projection method with six required\n calling arguments (self, row_meters, col_meters, t_coa, arp_coa, varp_coa).\n\n Parameters\n ----------\n sicd : sarpy.io.complex.sicd_elements.SICD.SICDType\n\n Returns\n -------\n callable\n \"\"\"\n # triple-nested function - it was conceptually clearest...\n\n def pfa_projection():\n SCP = sicd.GeoData.SCP.ECF.get_array()\n pfa = sicd.PFA\n polar_ang_poly = pfa.PolarAngPoly\n spatial_freq_sf_poly = pfa.SpatialFreqSFPoly\n polar_ang_poly_der = polar_ang_poly.derivative(der_order=1, return_poly=True)\n spatial_freq_sf_poly_der = spatial_freq_sf_poly.derivative(der_order=1, return_poly=True)\n\n polar_ang_poly_der = polar_ang_poly.derivative(der_order=1, return_poly=True)\n spatial_freq_sf_poly_der = spatial_freq_sf_poly.derivative(der_order=1, return_poly=True)\n\n # noinspection PyUnusedLocal, PyIncorrectDocstring\n def method_projection(instance, row_meters, col_meters, t_coa, arp_coa, varp_coa):\n \"\"\"\n PFA specific intermediate projection.\n\n Parameters\n ----------\n row_meters : numpy.ndarray\n col_meters : numpy.ndarray\n t_coa : numpy.ndarray\n arp_coa : numpy.ndarray\n varp_coa : numpy.ndarray\n\n Returns\n -------\n Tuple[numpy.ndarray, numpy.ndarray]\n \"\"\"\n\n ARP_minus_SCP = arp_coa - SCP\n rSCPTgtCoa = numpy.linalg.norm(ARP_minus_SCP, axis=-1)\n rDotSCPTgtCoa = numpy.sum(varp_coa * ARP_minus_SCP, axis=-1) / rSCPTgtCoa\n\n thetaTgtCoa = polar_ang_poly(t_coa)\n dThetaDtTgtCoa = polar_ang_poly_der(t_coa)\n # Compute polar aperture scale factor (KSF) and derivative wrt polar angle\n ksfTgtCoa = spatial_freq_sf_poly(thetaTgtCoa)\n dKsfDThetaTgtCoa = spatial_freq_sf_poly_der(thetaTgtCoa)\n # Compute spatial frequency domain phase slopes in Ka and Kc directions\n # NB: sign for the phase may be ignored as it is cancelled in a subsequent computation.\n dPhiDKaTgtCoa = row_meters * numpy.cos(thetaTgtCoa) + col_meters * numpy.sin(thetaTgtCoa)\n dPhiDKcTgtCoa = -row_meters * numpy.sin(thetaTgtCoa) + col_meters * numpy.cos(thetaTgtCoa)\n # Compute range relative to SCP\n deltaRTgtCoa = ksfTgtCoa * dPhiDKaTgtCoa\n # Compute derivative of range relative to SCP wrt polar angle.\n # Scale by derivative of polar angle wrt time.\n dDeltaRDThetaTgtCoa = dKsfDThetaTgtCoa * dPhiDKaTgtCoa + ksfTgtCoa * dPhiDKcTgtCoa\n deltaRDotTgtCoa = dDeltaRDThetaTgtCoa * dThetaDtTgtCoa\n return rSCPTgtCoa + deltaRTgtCoa, rDotSCPTgtCoa + deltaRDotTgtCoa\n\n return method_projection\n\n def rgazcomp_projection():\n SCP = sicd.GeoData.SCP.ECF.get_array()\n az_sf = sicd.RgAzComp.AzSF\n\n # noinspection PyUnusedLocal, PyIncorrectDocstring\n def method_projection(instance, row_meters, col_meters, t_coa, arp_coa, varp_coa):\n \"\"\"\n RgAzComp specific intermediate projection.\n\n Parameters\n ----------\n row_meters : numpy.ndarray\n col_meters : numpy.ndarray\n t_coa : numpy.ndarray\n arp_coa : numpy.ndarray\n varp_coa : numpy.ndarray\n\n Returns\n -------\n Tuple[numpy.ndarray, numpy.ndarray]\n \"\"\"\n\n ARP_minus_SCP = arp_coa - SCP\n rSCPTgtCoa = numpy.linalg.norm(ARP_minus_SCP, axis=-1)\n rDotSCPTgtCoa = numpy.sum(varp_coa*ARP_minus_SCP, axis=-1)/rSCPTgtCoa\n deltaRTgtCoa = row_meters\n deltaRDotTgtCoa = -numpy.linalg.norm(varp_coa, axis=-1)*az_sf*col_meters\n return rSCPTgtCoa + deltaRTgtCoa, rDotSCPTgtCoa + deltaRDotTgtCoa\n\n return method_projection\n\n def inca_projection():\n inca = sicd.RMA.INCA\n r_ca_scp = inca.R_CA_SCP\n time_ca_poly = inca.TimeCAPoly\n drate_sf_poly = inca.DRateSFPoly\n\n # noinspection PyUnusedLocal, PyIncorrectDocstring\n def method_projection(instance, row_meters, col_meters, t_coa, arp_coa, varp_coa):\n \"\"\"\n INCA specific intermediate projection.\n\n Parameters\n ----------\n row_meters : numpy.ndarray\n col_meters : numpy.ndarray\n t_coa : numpy.ndarray\n arp_coa : numpy.ndarray\n varp_coa : numpy.ndarray\n\n Returns\n -------\n Tuple[numpy.ndarray, numpy.ndarray]\n \"\"\"\n\n # compute range/time of closest approach\n R_CA_TGT = r_ca_scp + row_meters # Range at closest approach\n t_CA_TGT = time_ca_poly(col_meters) # Time of closest approach\n # Compute ARP velocity magnitude (actually squared, since that's how it's used) at t_CA_TGT\n VEL2_CA_TGT = numpy.sum(instance.varp_poly(t_CA_TGT)**2, axis=-1)\n # Compute the Doppler Rate Scale Factor for image Grid location\n DRSF_TGT = drate_sf_poly(row_meters, col_meters)\n # Difference between COA time and CA time\n dt_COA_TGT = t_coa - t_CA_TGT\n r_tgt_coa = numpy.sqrt(R_CA_TGT*R_CA_TGT + DRSF_TGT*VEL2_CA_TGT*dt_COA_TGT*dt_COA_TGT)\n r_dot_tgt_coa = (DRSF_TGT/r_tgt_coa)*VEL2_CA_TGT*dt_COA_TGT\n return r_tgt_coa, r_dot_tgt_coa\n\n return method_projection\n\n def plane_projection():\n SCP = sicd.GeoData.SCP.ECF.get_array()\n uRow = sicd.Grid.Row.UVectECF.get_array()\n uCol = sicd.Grid.Row.UVectECF.get_array()\n\n # noinspection PyUnusedLocal, PyIncorrectDocstring\n def method_projection(instance, row_meters, col_meters, t_coa, arp_coa, varp_coa):\n \"\"\"\n Plane specific intermediate projection.\n\n Parameters\n ----------\n row_meters : numpy.ndarray\n col_meters : numpy.ndarray\n t_coa : numpy.ndarray\n arp_coa : numpy.ndarray\n varp_coa : numpy.ndarray\n\n Returns\n -------\n Tuple[numpy.ndarray, numpy.ndarray]\n \"\"\"\n\n ARP_minus_IPP = arp_coa - (SCP + numpy.outer(row_meters, uRow) + numpy.outer(col_meters, uCol))\n r_tgt_coa = numpy.linalg.norm(ARP_minus_IPP, axis=-1)\n r_dot_tgt_coa = numpy.sum(varp_coa * ARP_minus_IPP, axis=-1)/r_tgt_coa\n return r_tgt_coa, r_dot_tgt_coa\n return method_projection\n\n # NB: sicd.can_project_coordinates() has been called, so all required attributes\n # must be populated\n if sicd.Grid.Type == 'RGAZIM':\n if sicd.ImageFormation.ImageFormAlgo == 'PFA':\n return pfa_projection()\n elif sicd.ImageFormation.ImageFormAlgo == 'RGAZCOMP':\n return rgazcomp_projection()\n elif sicd.Grid.Type == 'RGZERO':\n return inca_projection()\n elif sicd.Grid.Type in ['XRGYCR', 'XCTYAT', 'PLANE']:\n return plane_projection()\n else:\n # NB: this will have been noted by sicd.can_project_coordinates(), but is\n # here for completeness\n raise ValueError('Unhandled Grid.Type'.format(sicd.Grid.Type))\n\n\ndef _validate_im_points(im_points, sicd):\n \"\"\"\n\n Parameters\n ----------\n im_points : numpy.ndarray|list|tuple\n sicd : sarpy.io.complex.sicd_elements.SICD.SICDType\n\n Returns\n -------\n numpy.ndarray\n \"\"\"\n\n if im_points is None:\n raise ValueError('The argument cannot be None')\n\n if not isinstance(im_points, numpy.ndarray):\n im_points = numpy.array(im_points, dtype=numpy.float64)\n\n orig_shape = im_points.shape\n\n if len(im_points.shape) == 1:\n im_points = numpy.reshape(im_points, (1, -1))\n if im_points.shape[-1] != 2:\n raise ValueError(\n 'The im_points array must represent an array of points in pixel coordinates, '\n 'so the final dimension of im_points must have length 2. Have im_points.shape = {}'.format(im_points.shape))\n\n # check to ensure that the entries of im_points are not ridiculous\n rows = sicd.ImageData.NumRows\n cols = sicd.ImageData.NumCols\n row_bounds = (-rows/2, 3*rows/2)\n col_bounds = (-cols/2, 3*cols/2)\n if numpy.any(\n (im_points[:, 0] < row_bounds[0]) | (im_points[:, 0] > row_bounds[1]) |\n (im_points[:, 1] < col_bounds[0]) | (im_points[:, 1] > col_bounds[1])):\n raise ValueError(\n 'The sicd is has {} rows and {} cols. image_to_ground projection effort '\n 'requires row coordinates in the range {} and column coordinates '\n 'in the range {}'.format(rows, cols, row_bounds, col_bounds))\n return im_points, orig_shape\n\n\ndef image_to_ground(im_points, sicd, block_size=50000, projection_type='HAE', **kwargs):\n \"\"\"\n Transforms image coordinates to ground plane ECF coordinate via the algorithm(s)\n described in SICD Image Projections document.\n\n Parameters\n ----------\n im_points : numpy.ndarray|list|tuple\n (row, column) coordinates of N points in image (or subimage if FirstRow/FirstCol are nonzero).\n Following SICD convention, the upper-left pixel is [0, 0].\n sicd : sarpy.io.complex.sicd_elements.SICD.SICDType\n SICD meta data structure.\n block_size : None|int\n Size of blocks of coordinates to transform at a time. The entire array will be\n transformed as a single block if `None`.\n projection_type : str\n One of ['PLANE', 'HAE', 'DEM'].\n kwargs : dict\n keyword arguments relevant for the given projection type. See image_to_ground_plane/hae/dem methods.\n\n Returns\n -------\n numpy.ndarray\n Physical coordinates (in ECF) corresponding input image coordinates. The interpretation\n or meaning of the physical coordinates depends on `projection_type` chosen.\n \"\"\"\n\n p_type = projection_type.upper()\n if p_type == 'PLANE':\n return image_to_ground_plane(im_points, sicd, block_size=block_size, **kwargs)\n elif p_type == 'HAE':\n return image_to_ground_hae(im_points, sicd, block_size=block_size, **kwargs)\n elif p_type == 'DEM':\n return image_to_ground_dem(im_points, sicd, block_size=block_size, **kwargs)\n else:\n raise ValueError('Got unrecognized projection type {}'.format(projection_type))\n\n\ndef image_to_ground_geo(im_points, sicd, **kwargs):\n \"\"\"\n Transforms image coordinates to ground plane Lat/Lon/HAE coordinate via the algorithm(s)\n described in SICD Image Projections document.\n\n Parameters\n ----------\n im_points : numpy.ndarray|list|tuple\n (row, column) coordinates of N points in image (or subimage if FirstRow/FirstCol are nonzero).\n Following SICD convention, the upper-left pixel is [0, 0].\n sicd : sarpy.io.complex.sicd_elements.SICD.SICDType\n SICD meta data structure.\n kwargs : dict\n See the keyword arguments in :func:`image_to_ground`.\n\n Returns\n -------\n numpy.ndarray\n Ground Plane Point (in Lat/Lon/HAE coordinates) along the R/Rdot contour.\n \"\"\"\n\n return geocoords.ecf_to_geodetic(image_to_ground(im_points, sicd, **kwargs))\n\n\n#####\n# Image-to-Ground Plane\n\ndef _image_to_ground_plane_perform(r_tgt_coa, r_dot_tgt_coa, arp_coa, varp_coa, gref, uZ):\n \"\"\"\n\n Parameters\n ----------\n r_tgt_coa : numnpy.ndarray\n r_dot_tgt_coa : numnpy.ndarray\n arp_coa : numnpy.ndarray\n varp_coa : numnpy.ndarray\n gref : numnpy.ndarray\n uZ : numnpy.ndarray\n\n Returns\n -------\n numpy.ndarray\n \"\"\"\n\n # Solve for the intersection of a R/Rdot contour and a ground plane.\n arpZ = numpy.sum((arp_coa - gref)*uZ, axis=-1)\n arpZ[arpZ > r_tgt_coa] = numpy.nan\n # ARP ground plane nadir\n aGPN = arp_coa - numpy.outer(arpZ, uZ)\n # Compute ground plane distance (gd) from ARP nadir to circle of const range\n gd = numpy.sqrt(r_tgt_coa*r_tgt_coa - arpZ*arpZ)\n # Compute sine and cosine of grazing angle\n cosGraz = gd/r_tgt_coa\n sinGraz = arpZ/r_tgt_coa\n\n # Velocity components normal to ground plane and parallel to ground plane.\n vMag = numpy.linalg.norm(varp_coa, axis=-1)\n vZ = numpy.dot(varp_coa, uZ)\n vX = numpy.sqrt(vMag*vMag - vZ*vZ) # Note: For Vx = 0, no Solution\n # Orient X such that Vx > 0 and compute unit vectors uX and uY\n uX = ((varp_coa - numpy.outer(vZ, uZ)).T/vX).T\n uY = numpy.cross(uZ, uX)\n # Compute cosine of azimuth angle to ground plane point\n cosAz = (-r_dot_tgt_coa+vZ*sinGraz) / (vX * cosGraz)\n cosAz[numpy.abs(cosAz) > 1] = numpy.nan # R/Rdot combination not possible in given plane\n\n # Compute sine of azimuth angle. Use LOOK to establish sign.\n look = numpy.sign(numpy.dot(numpy.cross(arp_coa-gref, varp_coa), uZ))\n sinAz = look * numpy.sqrt(1-cosAz*cosAz)\n\n # Compute Ground Plane Point in ground plane and along the R/Rdot contour\n return aGPN + (uX.T*gd*cosAz + uY.T*gd*sinAz).T\n\n\ndef _image_to_ground_plane(im_points, coa_projection, gref, uZ):\n \"\"\"\n\n Parameters\n ----------\n im_points : numpy.ndarray\n coa_projection : COAProjection\n gref : numpy.ndarray\n uZ : numpy.ndarray\n\n Returns\n -------\n numpy.ndarray\n \"\"\"\n\n r_tgt_coa, r_dot_tgt_coa, t_coa, arp_coa, varp_coa = coa_projection.projection(im_points)\n return _image_to_ground_plane_perform(r_tgt_coa, r_dot_tgt_coa, arp_coa, varp_coa, gref, uZ)\n\n\ndef image_to_ground_plane(im_points, sicd, block_size=50000, gref=None, ugpn=None, **coa_args):\n \"\"\"\n Transforms image coordinates to ground plane ECF coordinate via the algorithm(s)\n described in SICD Image Projections document.\n\n Parameters\n ----------\n im_points : numpy.ndarray|list|tuple\n the image coordinate array\n sicd : sarpy.io.complex.sicd_elements.SICD.SICDType\n the SICD metadata structure.\n block_size : None|int\n Size of blocks of coordinates to transform at a time. The entire array will be\n transformed as a single block if `None`.\n gref : None|numpy.ndarray|list|tuple\n Ground plane reference point ECF coordinates (m). The default is the SCP\n ugpn : None|numpy.ndarray|list|tuple\n Vector normal to the plane to which we are projecting.\n coa_args : dict\n keyword arguments for COAProjection constructor.\n\n Returns\n -------\n numpy.ndarray\n Ground Plane Point (in ECF coordinates) corresponding to the input image coordinates.\n \"\"\"\n\n # method parameter validation\n if gref is None:\n gref = sicd.GeoData.SCP.ECF.get_array()\n if ugpn is None:\n ugpn = sicd.PFA.FPN.get_array() if sicd.ImageFormation.ImageFormAlgo == 'PFA' \\\n else geocoords.wgs_84_norm(gref)\n if len(ugpn.shape) == 2:\n ugpn = numpy.reshape(ugpn, (3, ))\n uZ = ugpn/numpy.linalg.norm(ugpn)\n\n # coa projection creation\n im_points, orig_shape = _validate_im_points(im_points, sicd)\n coa_proj = COAProjection(sicd, **coa_args)\n\n # prepare workspace\n im_points_view = numpy.reshape(im_points, (-1, 2)) # possibly or make 2-d flatten\n num_points = im_points_view.shape[0]\n if block_size is None or num_points <= block_size:\n coords = _image_to_ground_plane(im_points_view, coa_proj, gref, uZ)\n else:\n coords = numpy.zeros((num_points, 3), dtype=numpy.float64)\n # proceed with block processing\n start_block = 0\n while start_block < num_points:\n end_block = min(start_block + block_size, num_points)\n coords[start_block:end_block, :] = _image_to_ground_plane(\n im_points_view[start_block:end_block], coa_proj, gref, uZ)\n start_block = end_block\n\n if len(orig_shape) == 1:\n coords = numpy.reshape(coords, (-1, ))\n elif len(orig_shape) > 1:\n coords = numpy.reshape(coords, orig_shape[:-1] + (3,))\n return coords\n\n\n#####\n# Image-to-HAE\n\ndef _image_to_ground_hae_perform(\n r_tgt_coa, r_dot_tgt_coa, arp_coa, varp_coa, SCP, ugpn,\n hae0, delta_hae_max, hae_nlim, scp_hae):\n \"\"\"\n Intermediate helper method.\n\n Parameters\n ----------\n r_tgt_coa : numpy.ndarray\n r_dot_tgt_coa : numpy.ndarray\n arp_coa : numpy.ndarray\n varp_coa : numpy.ndarray\n SCP : numpy.ndarray\n ugpn : numpy.ndarray\n hae0 : float\n delta_hae_max : float\n hae_nlim : int\n scp_hae : float\n\n Returns\n -------\n numpy.ndarray\n \"\"\"\n\n # Compute the geodetic ground plane normal at the SCP.\n look = numpy.sign(numpy.sum(numpy.cross(arp_coa, varp_coa)*(SCP-arp_coa), axis=1))\n gref = SCP - (scp_hae - hae0)*ugpn\n # iteration variables\n gpp = None\n delta_hae = None\n cont = True\n iters = 0\n while cont:\n iters += 1\n # Compute the precise projection along the R/Rdot contour to Ground Plane.\n gpp = _image_to_ground_plane_perform(r_tgt_coa, r_dot_tgt_coa, arp_coa, varp_coa, gref, ugpn)\n # check our hae value versus hae0\n gpp_llh = geocoords.ecf_to_geodetic(gpp)\n delta_hae = gpp_llh[:, 2] - hae0\n abs_delta_hae = numpy.abs(delta_hae)\n # should we stop our iteration?\n cont = numpy.all(abs_delta_hae > delta_hae_max) and (iters <= hae_nlim)\n if cont:\n delta_hae_min = delta_hae[numpy.argmin(abs_delta_hae)]\n gref -= delta_hae_min*ugpn\n # Compute the unit slant plane normal vector, uspn, that is tangent to the R/Rdot contour at point gpp\n uspn = (numpy.cross(varp_coa, (gpp - arp_coa)).T*look).T\n uspn = (uspn.T/numpy.linalg.norm(uspn, axis=-1)).T\n # For the final straight line projection, project from point gpp along\n # the slant plane normal (as opposed to the ground plane normal that was\n # used in the iteration) to point slp.\n sf = numpy.sum(ugpn*uspn, axis=-1)\n slp = gpp - (uspn.T*delta_hae/sf).T\n # Assign surface point SPP position by adjusting the HAE to be on the\n # HAE0 surface.\n spp_llh = geocoords.ecf_to_geodetic(slp)\n spp_llh[:, 2] = hae0\n spp = geocoords.geodetic_to_ecf(spp_llh)\n return spp\n\n\ndef _image_to_ground_hae(im_points, coa_projection, hae0, delta_hae_max, hae_nlim, scp_hae, SCP):\n \"\"\"\n Intermediate helper function for projection.\n\n Parameters\n ----------\n im_points : numpy.ndarray\n the image coordinate array\n coa_projection : COAProjection\n hae0 : float\n delta_hae_max : float\n hae_nlim : int\n scp_hae : float\n SCP : numpy.ndarray\n\n Returns\n -------\n numpy.ndarray\n \"\"\"\n\n # get (image formation specific) projection parameters\n r_tgt_coa, r_dot_tgt_coa, t_coa, arp_coa, varp_coa = coa_projection.projection(im_points)\n ugpn = geocoords.wgs_84_norm(SCP)\n return _image_to_ground_hae_perform(\n r_tgt_coa, r_dot_tgt_coa, arp_coa, varp_coa, SCP, ugpn,\n hae0, delta_hae_max, hae_nlim, scp_hae)\n\n\ndef image_to_ground_hae(im_points, sicd, block_size=50000,\n hae0=None, delta_hae_max=None, hae_nlim=None, **coa_args):\n \"\"\"\n Transforms image coordinates to ground plane ECF coordinate via the algorithm(s)\n described in SICD Image Projections document.\n\n Parameters\n ----------\n im_points : numpy.ndarray|list|tuple\n the image coordinate array\n sicd : sarpy.io.complex.sicd_elements.SICD.SICDType\n the SICD metadata structure.\n block_size : None|int\n Size of blocks of coordinates to transform at a time. The entire array will be\n transformed as a single block if `None`.\n hae0 : None|float|int\n Surface height (m) above the WGS-84 reference ellipsoid for projection point.\n Defaults to HAE at the SCP.\n delta_hae_max : None|float|int\n Height threshold for convergence of iterative constant HAE computation (m). Defaults to 1.\n hae_nlim : int\n Maximum number of iterations allowed for constant hae computation. Defaults to 5.\n coa_args : dict\n keyword arguments for COAProjection constructor.\n\n Returns\n -------\n numpy.ndarray\n Ground Plane Point (in ECF coordinates) with target hae corresponding to\n the input image coordinates.\n \"\"\"\n\n # method parameter validation\n SCP = sicd.GeoData.SCP.ECF.get_array()\n scp_hae = sicd.GeoData.SCP.LLH.HAE\n if hae0 is None:\n hae0 = scp_hae\n\n if delta_hae_max is None:\n delta_hae_max = 1.0\n delta_hae_max = float(delta_hae_max)\n if delta_hae_max <= 1e-2:\n raise ValueError('delta_hae_max must be at least 1e-2 (1 cm). Got {0:8f}'.format(delta_hae_max))\n if hae_nlim is None:\n hae_nlim = 5\n hae_nlim = int(hae_nlim)\n if hae_nlim <= 0:\n raise ValueError('hae_nlim must be a positive integer. Got {}'.format(hae_nlim))\n\n # coa projection creation\n im_points, orig_shape = _validate_im_points(im_points, sicd)\n coa_proj = COAProjection(sicd, **coa_args)\n\n # prepare workspace\n im_points_view = numpy.reshape(im_points, (-1, 2)) # possibly or make 2-d flatten\n num_points = im_points_view.shape[0]\n if block_size is None or num_points <= block_size:\n coords = _image_to_ground_hae(im_points_view, coa_proj, hae0, delta_hae_max, hae_nlim, scp_hae, SCP)\n else:\n coords = numpy.zeros((num_points, 3), dtype=numpy.float64)\n # proceed with block processing\n start_block = 0\n while start_block < num_points:\n end_block = min(start_block + block_size, num_points)\n coords[start_block:end_block, :] = _image_to_ground_hae(\n im_points_view[start_block:end_block], coa_proj, hae0, delta_hae_max, hae_nlim, scp_hae, SCP)\n start_block = end_block\n\n if len(orig_shape) == 1:\n coords = numpy.reshape(coords, (-1,))\n elif len(orig_shape) > 1:\n coords = numpy.reshape(coords, orig_shape[:-1] + (3,))\n return coords\n\n\n#####\n# Image-to-DEM\n\ndef _image_to_ground_dem(\n im_points, coa_projection, dem_interpolator, min_dem, max_dem, horizontal_step_size, scp_hae, SCP):\n \"\"\"\n\n Parameters\n ----------\n im_points : numpy.ndarray\n coa_projection : COAProjection\n dem_interpolator : DTEDInterpolator\n min_dem : float\n max_dem : float\n horizontal_step_size : float|int\n scp_hae: float\n SCP : numpy.ndarray\n\n Returns\n -------\n numpy.ndarray\n \"\"\"\n\n # get (image formation specific) projection parameters\n r_tgt_coa, r_dot_tgt_coa, t_coa, arp_coa, varp_coa = coa_projection.projection(im_points)\n ugpn = geocoords.wgs_84_norm(SCP)\n delta_hae_max = 1\n hae_nlim = 5\n\n # if max_dem - min_dem is sufficiently small, then just do the simplest thing\n if max_dem - min_dem < 1:\n return _image_to_ground_hae_perform(\n r_tgt_coa, r_dot_tgt_coa, arp_coa, varp_coa, SCP, ugpn, max_dem,\n delta_hae_max, hae_nlim, scp_hae)\n # get projection to hae at high/low points\n coords_high = _image_to_ground_hae_perform(\n r_tgt_coa, r_dot_tgt_coa, arp_coa, varp_coa, SCP, ugpn, max_dem,\n delta_hae_max, hae_nlim, scp_hae)\n coords_low = _image_to_ground_hae_perform(\n r_tgt_coa, r_dot_tgt_coa, arp_coa, varp_coa, SCP, ugpn, min_dem,\n delta_hae_max, hae_nlim, scp_hae)\n ecf_diffs = coords_low - coords_high\n dists = numpy.linalg.norm(ecf_diffs, axis=1)\n # NB: the proper projection point will be the HIGHEST point\n # on the DEM along the straight line between the high and low point\n sin_ang = (max_dem - min_dem)/numpy.min(dists)\n cos_ang = numpy.sqrt(1 - sin_ang*sin_ang)\n num_pts = numpy.max(dists)/(cos_ang*horizontal_step_size)\n step = numpy.linspace(0., 1., num_pts, dtype=numpy.float64)\n # construct our lat lon space of lines\n llh_high = geocoords.ecf_to_geodetic(coords_high)\n llh_low = geocoords.ecf_to_geodetic(coords_low)\n # I'm drawing these lines in lat/lon space, because this should be incredibly local\n diffs = llh_low - llh_high\n elevations = numpy.linspace(max_dem, min_dem, num_pts, dtype=numpy.float64)\n # construct the space of points connecting high to low of shape (N, 2, num_pts)\n lat_lon_space = llh_low[:, :2] + numpy.multiply.outer(diffs[:, :2], step) # NB: this is a numpy.ufunc trick\n # determine the ground hae elevation at these points according to the dem interpolator\n # NB: lat_lon_elevations is shape (N, num_pts)\n lat_lon_elevation = dem_interpolator.get_elevation_hae(lat_lon_space[:, 0, :], lat_lon_space[:, 1, :], block_size=50000)\n del lat_lon_space # we can free this up, since it's potentially large\n bad_values = numpy.isnan(lat_lon_elevation)\n if numpy.any(bad_values):\n lat_lon_elevation[bad_values] = scp_hae\n # adjust by the hae, to find the diff between our line in elevation\n lat_lon_elevation -= elevations\n # these elevations should be guaranteed to start positive because we used to\n # total bounds for the DEM values\n # we find the \"first\" (in high to low order) element where the elevation is close enough to negative\n # NB: this is shape (N, )\n indices = numpy.argmax(lat_lon_elevation < 0.5, axis=1)\n # linearly interpolate to find the best guess for 0 crossing.\n prev_indices = indices - 1\n if numpy.any(prev_indices < 0):\n raise ValueError(\"The first negative entry should have occurred at a strictly positive index\")\n d1 = lat_lon_elevation[:, indices]\n d0 = lat_lon_elevation[:, prev_indices]\n frac_indices = indices + (d1/(d0 - d1))\n return coords_high + ((frac_indices/(num_pts - 1))*ecf_diffs.T).T\n\n\ndef image_to_ground_dem(im_points, sicd, block_size=50000,\n dted_list=None, dem_type='SRTM2F', geoid_file=None,\n horizontal_step_size=10, **coa_args):\n \"\"\"\n Transforms image coordinates to ground plane ECF coordinate via the algorithm(s)\n described in SICD Image Projections document.\n\n Parameters\n ----------\n im_points : numpy.ndarray|list|tuple\n the image coordinate array\n sicd : sarpy.io.complex.sicd_elements.SICD.SICDType\n the SICD metadata structure.\n block_size : None|int\n Size of blocks of coordinates to transform at a time. The entire array will be transformed as a single block if `None`.\n dted_list : None|str|DTEDList|DTEDInterpolator\n dem_type : str\n One of ['DTED1', 'DTED2', 'SRTM1', 'SRTM2', 'SRTM2F'], specifying the DEM type.\n geoid_file : None|str|GeoidHeight\n horizontal_step_size : None|float|int\n Maximum distance between adjacent points along the R/Rdot contour.\n coa_args : dict\n keyword arguments for COAProjection constructor.\n\n Returns\n -------\n numpy.ndarray\n Physical coordinates (in ECF coordinates) with corresponding to the input image\n coordinates, assuming detected features actually correspond to the DEM.\n \"\"\"\n\n # coa projection creation\n im_points, orig_shape = _validate_im_points(im_points, sicd)\n coa_proj = COAProjection(sicd, **coa_args)\n\n # TODO: handle dted_list is None\n if isinstance(dted_list, str):\n dted_list = DTEDList(dted_list)\n\n scp = sicd.GeoData.SCP.LLH.get_array()\n if isinstance(dted_list, DTEDList):\n # find sensible bounds for the DEMs that we need to load up\n t_lats = numpy.array([scp[0]-0.1, scp[0], scp[0] + 0.1], dtype=numpy.float64)\n lon_diff = min(10., abs(10.0/(112*numpy.sin(numpy.rad2deg(scp[0])))))\n t_lons = numpy.arange(scp[1]-lon_diff, scp[1]+lon_diff+1, lon_diff, dtype=numpy.float64)\n t_lats[t_lats > 90] = 90.0\n t_lats[t_lats < -90] = -90.0\n t_lons[t_lons > 180] -= 360\n t_lons[t_lons < -180] += 360\n lats, lons = numpy.meshgrid(t_lats, t_lons)\n dem_interpolator = DTEDInterpolator.from_coords_and_list(lats, lons, dted_list, dem_type, geoid_file=geoid_file)\n elif isinstance(dted_list, DTEDInterpolator):\n dem_interpolator = dted_list\n else:\n raise ValueError(\n 'dted_list is expected to be a string suitable for constructing a DTEDList, '\n 'an instance of a DTEDList suitable for constructing a DTEDInterpolator, '\n 'or DTEDInterpolator instance. Got {}'.format(type(dted_list)))\n # determine max/min hae in the DEM\n # not the ellipsoid\n scp_geoid = dem_interpolator.geoid.get(scp[0], scp[1])\n # remember that min/max in a DTED is relative to the geoid, not hae\n min_dem = dem_interpolator.get_min_dem() + scp_geoid + 10\n max_dem = dem_interpolator.get_max_dem() + scp_geoid - 10\n\n # prepare workspace\n im_points_view = numpy.reshape(im_points, (-1, 2)) # possibly or make 2-d flatten\n num_points = im_points_view.shape[0]\n if block_size is None or num_points <= block_size:\n coords = _image_to_ground_dem(\n im_points_view, coa_proj, dem_interpolator, min_dem, max_dem, horizontal_step_size,\n scp[2], sicd.GeoData.SCP.ECF.get_array())\n else:\n coords = numpy.zeros((num_points, 3), dtype=numpy.float64)\n # proceed with block processing\n start_block = 0\n while start_block < num_points:\n end_block = min(start_block + block_size, num_points)\n coords[start_block:end_block, :] = _image_to_ground_dem(\n im_points_view[start_block:end_block], coa_proj, dem_interpolator,\n min_dem, max_dem, horizontal_step_size, scp[2], sicd.GeoData.SCP.ECF.get_array())\n start_block = end_block\n\n if len(orig_shape) == 1:\n coords = numpy.reshape(coords, (-1,))\n elif len(orig_shape) > 1:\n coords = numpy.reshape(coords, orig_shape[:-1] + (3,))\n return coords\n", "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nThis module defines the DEM class and its associated files\n\"\"\"\nfrom __future__ import print_function\nimport os\nfrom copy import copy\nimport numpy as np\nfrom .dem_log import dem_logger\nfrom .readers import read_dted\nfrom scipy.interpolate import interpn\n\nclass DEM:\n \"\"\"\n Abstract class for handling DEM files.\n \"\"\"\n\n def __init__(self, coordinates = [], masterpath = '', dempaths = [], dem_type = 'DTED1', dem_buffer = 0.0167,\n lonlat=False, log_to_console=True, log_to_file = '', log_level = 'WARNING'):\n '''\n Initialize the DEM class.\n coordinates - (list/ndarray) A point/list of LAT/LON coordinates to use for grabbing a DEM. REQUIRES masterpath!\n masterpath - (str) Top Level directory were DEMs are held. REQUIRES coordinates!\n dempaths - (list) Specify exactly the file(s) needed instead of using coords. Can be used with coords/masterpath\n dem_type - (str) Specify the DEM type. Currently accepts DTED1, DTED2, SRTM1, SRTM2, SRTM2F. Default is DTED1.\n dem_buffer - (float) Ensures that coordinates (see above) on the edge of have enough DEM points for\n proper interpolation. Default is 0.0167 decimal degrees (60 seconds).\n log_to_console - (bool) Print log messages to console/terminal. Default is True.\n log_to_file - (str) Provide a filepath if a log file is wanted. Default is no log file.\n log_level - (str) Define what messages to print. In order of increasing severity (i.e. number of messages)\n options are: NOTSET, DEBUG, INFO, WARNING, ERROR, CRITICAL,FATAL. Default is WARNING\n '''\n if log_to_console == True and log_to_file != '': # If a pre-set logger hasn't been passed create one\n self.logger = dem_logger('ELEVATOR', level=log_level, logfile=log_to_file) # LOG TO CONSOLE AND FILE\n elif log_to_console == True and log_to_file == '':\n self.logger = dem_logger('ELEVATOR', level=log_level) # LOG TO CONSOLE ONLY\n elif log_to_console == False and log_to_file != '':\n self.logger = dem_logger('ELEVATOR', level=log_level, console=False, logfile=log_to_file) # LOG TO FILE ONLY\n elif log_to_console == False and log_to_file == '':\n self.logger = dem_logger('ELEVATOR', level='FATAL') # ONLY LOG FATAL LEVEL MESSAGES\n else:\n print('LOGGER IS NOT SURE WHAT TO DO!')\n\n self.avaiable_dems = ['DTED1', 'DTED2', 'SRTM1', 'SRTM2', 'SRTM2F']\n\n # Add any user specified dempaths to self\n if dempaths == []:\n self.dempaths = []\n else: # If there are dempaths do some stuff\n self.dempaths = dempaths\n if 'list' not in str(type(self.dempaths)): # Ensure type == list for reading single or multiple files\n self.dempaths = [dempaths]\n for dempath in self.dempaths:\n if not os.path.exists(dempath): # If the user path does not exist, remove it.\n self.dempaths.remove(dempath)\n self.logger.warning('Could not locate user specified file {}'.format(dempath))\n\n # If coordinates are specified, find the right DEMS and use those\n if coordinates != []:\n if lonlat: # If coordinates are [LON,LAT] instead of [LAT/LON]\n self.logger.debug('Switching Coordinates')\n coordinates = self.geo_swap(coordinates) # Then swap to ensure correctness\n self.include(coordinates, masterpath, dem_type, dem_buffer)\n ndems = len(self.dempaths)\n self.origin = np.zeros([ndems,2]).astype(str) # DDMMSS [LAT, LON] origin for LOWER LEFT corner of the data\n self.origindd = np.zeros([ndems,2]) # [LAT, LON] Origin in decimal degrees\n self.delta = np.zeros([ndems,2]) # [LAT, LON] spacing in tenths of arcseconds\n self.deltadd = np.zeros([ndems,2]) # [LAT, LON] spacing in decimal degrees\n self.deltam = np.zeros([ndems,2]) # [LAT, LON] spacing in meters\n self.lat_list_1D = [] # List to hold 1-D LAT arrays\n self.lon_list_1D = [] # List to hold 1-D LON arrays\n self.lat_list_2D = [] # List to hold 2-D LAT matrices\n self.lon_list_2D = [] # List to hold 2-D LON matrices\n self.elevation_list = [] # List to hold 2-D elevation matrices\n\n if self.dempaths != []: # If there are files in the list and init_read is set\n self.read_dempath()\n\n if len(self.elevation_list) > 0:\n self.join_dems() # Join multiple dems if they exist then join the DEMs together\n return # END OF INIT\n\n def include(self,coordinates, masterpath, dem_type, dem_buffer):\n '''\n Find DEM file (or files) to read based on a coordinate (or coordinates) MUST BE LAT,LON\n Return - A list of valid files to pass to the actual reader\n '''\n self.logger.info('Including filepaths.')\n if coordinates != []:\n if dem_type in self.avaiable_dems: # Check DEM type\n if ('DTED' in dem_type) or ('SRTM1' in dem_type) or ('SRTM2' in dem_type) and not('SRTM2F' in dem_type): # Check to see if DTED\n suffix = '.dt' + dem_type[-1] # Append the 1 or 2 to the suffix\n elif('SRTM2F' in dem_type):\n suffix = '.dt2'\n # Add elif statements for other DEM formats\n else:\n self.logger.critical('Cannot read DEM type {}'.format(dem_type))\n return None\n if not os.path.exists(masterpath):\n self.logger.warning('Master DEM path -- {} -- does not appear to exist.'.format(masterpath))\n else:\n coordinates = np.array(coordinates) # Make sure coordinates are an Nx2 array, not a list\n if len(coordinates.shape) == 1:\n coordinates = np.array([coordinates]) # If coordinates is a 1D array, convert to 2D\n elif len(coordinates.shape) >= 3:\n self.logger.warning('3-Dimensional array detected. May cause unknown errors!')\n lat_range = np.array([np.floor(np.min(coordinates[:,0])-dem_buffer),np.ceil(np.max(coordinates[:,0])+dem_buffer)]) # Full LAT range\n if lat_range[0] < -90.0: # Set limits on LAT range\n lat_range[0] = -90.0\n if lat_range[1] >= 90.0:\n lat_range[1] = 90.0\n # In order to handle longitudes properly at the international dateline (180 meets -180)\n # set new range from 0 -> 360 for ease of creating DEM range. Then just reset for dempath\n lon_360 = copy(coordinates[:,1])\n# hemi_w = (lon_360 < 0)\n lon_360[(lon_360 < 0)] += 360.0\n lon_range = np.array([np.floor(np.min(lon_360)-dem_buffer),np.ceil(np.max(lon_360)+dem_buffer)]) # Full LON\n\n\n if lat_range.size*lon_range.size > 25.0 and lat_range.size*lon_range.size <= 100.0:\n self.logger.warning('Coordinate range extends beyond 25 square degrees.')\n elif lat_range.size*lon_range.size >= 100.0:\n self.logger.warning('Coordinate range too large (>100 square degrees).')\n self.logger.warning('Quitting DEM reading.')\n return\n for lat in np.arange(lat_range[0],lat_range[1],1):\n for lon in np.arange(lon_range[0],lon_range[1],1):\n if lon >= 180.0:\n lon -= 360.0\n if np.sign(lat) >= 0:\n lat_hemi = 'n' # Northern hemisphere\n else:\n lat_hemi = 's' # Southern hemisphere\n if np.sign(lon) >= 0:\n lon_hemi = 'e' # Eastern hemisphere\n else:\n lon_hemi = 'w' # Western hemisphere\n lat_short = lat_hemi + str(np.abs(lat).astype(int)).zfill(2)\n lon_short = lon_hemi + str(np.abs(lon).astype(int)).zfill(3)\n\n if(dem_type[0:4].upper() == 'DTED'):\n include_path = os.path.join(masterpath, dem_type[0:4].lower(), dem_type[-1], lon_short, lat_short + suffix)\n elif(dem_type[0:4].upper() == 'SRTM' and dem_type[-1].upper() == \"F\"):\n include_path = os.path.join(masterpath, dem_type[0:4].lower(), dem_type[-2:].lower(), lon_short, lat_short + suffix)\n if os.path.exists(include_path):\n if include_path not in self.dempaths:\n self.dempaths.append(include_path)\n else:\n self.logger.warning('include() could not find file {}'.format(include_path))\n self.logger.debug('include_path {}'.format(include_path))\n else:\n self.logger.critical('No DEM files used. Could not find appropriate files.')\n return # END OF INCLUDE\n\n def read_dempath(self):\n '''\n Read all files within DEM.dempaths\n '''\n self.logger.info('Going to read {} DEM(s).'.format(len(self.dempaths)))\n if self.dempaths == []:\n self.logger.warning('Nothing to read.')\n return []\n for i, dempath in enumerate(self.dempaths): # Cycle through list of dempaths\n self.logger.debug('Reading DEM file {}'.format(dempath))\n try:\n dem_specs, geos_1D, geos_2D, elevations = read_dted(dempath) # Read each file one at a time.\n except:\n self.logger.warning('read_dted failed for {}'.format(dempath)) # Print warning\n continue # Then skip the rest of the loop and continue on\n self.origin[i,:] = dem_specs[0]\n self.origindd[i,:] = dem_specs[1]\n self.delta[i,:] = dem_specs[2]\n self.deltadd[i,:] = dem_specs[3]\n self.deltam[i,:]= dem_specs[4]\n self.lat_list_1D.append(geos_1D[0])\n self.lon_list_1D.append(geos_1D[1])\n self.lat_list_2D.append(geos_2D[0])\n self.lon_list_2D.append(geos_2D[1])\n self.elevation_list.append(elevations)\n return # END OF READ_DEMPATH\n\n def join_dems(self):\n '''\n Take all the DEMs in dempath and stick them together.\n '''\n self.logger.info('Joining DEMs together if necessary.')\n olon_360 = copy(self.origindd[:, 1]) # Get the lon origins\n self.logger.debug('DEM origins: {}'.format(self.origindd))\n self.logger.debug('DEM LON min/max: {}/{}'.format(np.min(self.origindd[:,1]),np.max(self.origindd[:,1])))\n if np.max(self.origindd[:,1]) == 179.0 and np.min(self.origindd[:,1]) == -180.0: # If DEM range crosses the dateline\n olon_360[(olon_360 < 0)] += 360.0 # Convert to -180 to 180 -> 0 to 360\n\n # indices labels which files go together in LAT space and LON space.\n indices = np.array([np.max(self.origindd[:,0]) - self.origindd[:,0], # Get LAT rows\n olon_360 - np.min(olon_360)]).astype(int) # Get LON cols\n nlats = np.unique(indices[0,:]).shape[0] # Get number of lat rows\n nlons = np.unique(indices[1,:]).shape[0] # Get number of lon rows\n\n i = 0\n while i < nlats:\n j = 0\n while j < nlons:\n # To fill grid from top left to bottom right, match i,j to indices from above\n image_index = np.where((indices[0,:] == i) & (indices[1,:] == j))[0][0].astype(int) # Get index\n if j == 0: # Get the values for the first LON row\n self.lons_1D = self.lon_list_1D[image_index]\n elevrow = self.elevation_list[image_index]\n else: # Remove the common columns and join, ex. (73...74, 74....75), so remove the first 74 row\n if np.sign(self.lons_1D[1]) == 1 and np.sign(self.lon_list_1D[image_index][1]) == -1: # If at dateline\n self.lons_1D = np.hstack((self.lons_1D[0:-1],(360.0 + self.lon_list_1D[image_index]))) # Convert\n else:\n self.lons_1D = np.hstack((self.lons_1D[0:-1], self.lon_list_1D[image_index]))\n elevrow = np.vstack((elevrow[0:-1,:],self.elevation_list[image_index]))\n j += 1\n if i == 0: # Get the values for the first LAT col\n self.dem = elevrow\n self.lats_1D = self.lat_list_1D[image_index]\n else: # Remove the common rows and join, ex. (34...35, 35....36), so remove the first 36 col\n self.lats_1D = np.hstack((self.lat_list_1D[image_index][0:-1],self.lats_1D))\n self.dem = np.hstack((elevrow[:,0:-1],self.dem))\n i += 1\n return # END OF JOIN_DEMS\n\n def elevate(self, coord, method='linear',lonlat=False):\n '''\n Given a geographic coordinate, return an elevation from the DEM\n Coord may be a sinlge coord (c = [lat,lon]) or multiples (c = [[lat,lon],[lat,lon],...]\n '''\n if not 'dem' in dir(self): # Check to see if dem is an attribute\n self.logger.warning('There are no DEMs to interpolate from.') # If not, no dems have been read in\n return np.zeros(coord.shape[0]) + -1234.5\n if np.max(self.origindd[:,1]) == 179.0 and np.min(self.origindd[:,1]) == -180.0: # If DEM range crosses the dateline\n coord[(coord < 0)] += 360.0 # Convert to -180 to 180 -> 0 to 360\n # interpolated_elevation = interpn((1-D LON array, 1-D LAT array), 2-D elev array, coord array)\n coord = np.array(coord) # Ensure coord is an array for the following line to work\n if len(coord.shape) == 1: # If only one point is specified\n coord = coord.reshape((1,2)) # Then make sure its a 2-D array of 1 by 2 [[x,y]]\n if not lonlat: # If coords are given at LAT/LON (y,x), then switch to LON/LAT (x,y)\n coord = self.geo_swap(coord) # Convert [[lat,lon],...] to [[lon, lat], ...]\n # The following is to ensure interpn evaluates all the good, valid coordinates instead\n # of throwing the baby out with the bath water.\n elev = np.zeros(coord.shape[0]) + -1234.5 # Create a list of dummy elevations the same length as input list\n elev2 = elev\n # Get all valid coordinates\n in_bounds = np.where((coord[:,1] > np.min(self.lats_1D)) & (coord[:,1] < np.max(self.lats_1D)) &\n (coord[:,0] > np.min(self.lons_1D)) & (coord[:,0] < np.max(self.lons_1D)))[0]\n \n self.logger.debug('Coord LAT range: {}'.format((np.min(coord[:,1]),np.max(coord[:,1]))))\n self.logger.debug('Coord LON range: {}'.format((np.min(coord[:,0]),np.max(coord[:,0]))))\n self.logger.debug('DEM LAT range: {}'.format((np.min(self.lats_1D),np.max(self.lats_1D))))\n self.logger.debug('DEM LON range: {}'.format((np.min(self.lons_1D),np.max(self.lons_1D))))\n self.logger.debug('Coord shape: {}'.format(coord.shape))\n self.logger.debug('Elev size: {}'.format(elev.size))\n self.logger.debug('In_bounds size: {}'.format(in_bounds.size))\n if in_bounds.size < elev.size:\n self.logger.warning('Some points may be outside of DEM boundary. Check coordinate list.')\n if in_bounds.size > 0: # If there are any valid points, then do try the interpolation\n try:\n self.logger.info('Interpolating elevation points.')\n elev[in_bounds] = interpn((self.lons_1D, self.lats_1D), self.dem, coord[in_bounds,:], method=method)\n #f = spint.interp2d(self.lon_1D, self.lats_1D, self.dem)\n #elev2[in_bounds] = f(coord[in_bounds,:])\n except Exception as err:\n self.logger.critical('Interpolation error: {}'.format(err))\n good_heights = np.where(elev > -1234.5)[0] # Do stats on valid points only.\n if good_heights.size > 0: # If there are good points then print stats\n emin = np.round(np.min(elev[good_heights]),2)\n emean = np.round(np.mean(elev[good_heights]),2)\n emax = np.round(np.max(elev[good_heights]),2)\n self.logger.info('Elevation stats (min/mean/max): {}/{}/{}'.format(emin,emean,emax))\n else:\n self.logger.info('No valid points found.')\n return elev # END OF ELEVATE\n\n def geo_swap(self,incoord):\n '''\n Provides a way for users to input LON/LAT coords instead of LAT/LON.\n '''\n incoord = np.array(incoord) # Ensure input coords are numpy arrays\n if len(incoord.shape) == 1: # If only one point is specified\n incoord = incoord.reshape((1,2)) # Then make sure its a 2-D array of 1 by 2 [[x,y]]\n outcoord = np.array(copy(incoord))\n outcoord[:,0] = incoord[:,1]\n outcoord[:,1] = incoord[:,0]\n return outcoord\n\n# END OF FILE" ]
[ [ "numpy.dot", "numpy.sqrt", "numpy.linspace", "numpy.rad2deg", "numpy.all", "numpy.max", "numpy.argmin", "numpy.any", "numpy.cross", "numpy.reshape", "numpy.arange", "numpy.multiply.outer", "numpy.sin", "numpy.argmax", "numpy.outer", "numpy.zeros", "numpy.min", "numpy.isnan", "numpy.meshgrid", "numpy.array", "numpy.sum", "numpy.abs", "numpy.linalg.norm", "numpy.cos" ], [ "numpy.hstack", "numpy.abs", "numpy.min", "numpy.unique", "numpy.arange", "numpy.sign", "numpy.max", "scipy.interpolate.interpn", "numpy.mean", "numpy.array", "numpy.zeros", "numpy.where", "numpy.vstack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.14", "1.6", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] } ]
uguisu/tensorflow_without_keras
[ "fe03b4febf2267921207ef3321926d14f5672bc3" ]
[ "src/Chapter_001/003_function_001.py" ]
[ "# coding=utf-8\n# author uguisu\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.python.framework.errors_impl import InvalidArgumentError\n\n\ndef matrix_001():\n\n print('\\n- 矩阵转置 ------------------------')\n # 矩阵转置\n np_array = np.array([[1., 2., 3.], [4., 5., 6.]])\n tensor_a = tf.constant(np_array, dtype=tf.float32)\n tensor_b = tf.transpose(tensor_a)\n print('tensor_a shape = ', tensor_a.shape, '\\n')\n print('tensor_b shape = ', tensor_b.shape, '\\n')\n\n print('\\n- 维度压缩1 ------------------------')\n # 维度压缩1\n # 删除所有维度=1的纬度\n tensor_a = tf.constant([1, 3, 4, 5], shape=(1, 4))\n print('tensor_a = ', tensor_a, '\\n')\n print('tensor_a shape = ', tensor_a.shape, '\\n')\n\n tensor_b = tf.squeeze(tensor_a)\n print('tensor_b = ', tensor_b, '\\n')\n print('tensor_b shape = ', tensor_b.shape, '\\n')\n\n print('\\n- 维度压缩2 ------------------------')\n # 维度压缩2\n # input value is as follow\n # [\n # [[1 3]]\n # [[4 5]]\n # [[4 6]]\n # ]\n tensor_a = tf.constant(value=[1, 3, 4, 5, 4, 6], shape=(3, 1, 2))\n print('tensor_a = ', tensor_a, '\\n')\n print('tensor_a shape = ', tensor_a.shape, '\\n')\n\n # output will be\n # [[1 3]\n # [4 5]\n # [4\n # 6]]\n tensor_b = tf.squeeze(tensor_a)\n print('tensor_b = ', tensor_b, '\\n')\n print('tensor_b shape = ', tensor_b.shape, '\\n')\n\n print('\\n- range ------------------------')\n # tf.range()用法和python的range()函数相同,不同的地方在于,循环变量(本例中的`i`)是一个tensor对象\n for i in tf.range(1, 5):\n print('i = ', i)\n\n print('\\n- case ------------------------')\n # tf.case()用法类似于对python的if语句进行封装\n # Example 1:\n # if (x < y) return 17;\n # else return 23;\n x = 10\n y = 5\n f1 = lambda: tf.constant(17)\n f2 = lambda: tf.constant(23)\n tensor_a = tf.case([(tf.less(x, y), f1)], default=f2)\n print('tensor_a 1 = ', tensor_a, '\\n')\n x = 5\n y = 10\n tensor_a = tf.case([(tf.less(x, y), f1)], default=f2)\n print('tensor_a 2 = ', tensor_a, '\\n')\n\n # Example 2\n # if (x < y & & x > z) raise OpError(\"Only one predicate may evaluate to True\");\n # if (x < y) return 17;\n # else if (x > z) return 23;\n # else return -1;\n def f1(): return tf.constant(17)\n def f2(): return tf.constant(23)\n def f3(): return tf.constant(-1)\n # raise InvalidArgumentError\n x = 5\n y = 10\n z = 1\n try:\n tensor_a = tf.case([(tf.less(x, y), f1), (tf.greater(x, z), f2)], default=f3, exclusive=True)\n print('tensor_a 3 = ', tensor_a, '\\n')\n except InvalidArgumentError:\n print('(⊙o⊙) catch InvalidArgumentError error', '\\n')\n\n x = 5\n y = 10\n z = 20\n tensor_a = tf.case([(tf.less(x, y), f1), (tf.greater(x, z), f2)], default=f3, exclusive=True)\n print('tensor_a 4 = ', tensor_a, '\\n')\n\n\nif __name__ == '__main__':\n matrix_001()\n" ]
[ [ "tensorflow.transpose", "tensorflow.constant", "tensorflow.range", "tensorflow.less", "tensorflow.greater", "tensorflow.squeeze", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.5", "1.7", "0.12", "1.0", "1.2" ] } ]
ardihikaru/mlsp
[ "db38972bcceac7b95808132457c4de9170546c9d", "db38972bcceac7b95808132457c4de9170546c9d" ]
[ "hw1/others/coba.py", "hw5/libs/common/dataset.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\n\n# target = 100\nsize = 20\n# alpha = 1\n# beta = 1.0/target\nlam = 0.2\n# target = 0.5\n# target = 2.5*lam #\nbeta = 1 / lam # beta = 1 / lambda\n\n\nY = np.random.exponential(beta, size)\n\n# plt.plot(x, Y, 'b-')\n# plt.plot(x[:size], Y, 'r.')\n# # plt.plot(x[:size], simulated_data, 'r.')\n# plt.show()\n\n# bin = jumlah patahan\n# alpha = bar's transparancy; value = 0-1 (decimal)\nplt.hist(Y, density=True, bins=size*2, lw=100, alpha=.9)\n# # plt.hist(Y, density=True, bins=4, lw=0, alpha=.8)\n# # plt.hist(Y, density=False, bins=200,lw=0,alpha=.8)\n# plt.plot([0, max(Y)], [target, target], 'r--')\n# # plt.ylim(0,target*1.1)\nplt.show()\n\n", "# pip install mlxtend\nfrom mlxtend.data import loadlocal_mnist\nimport os\n\nclass Dataset():\n def __init__(self, train_data=None, test_data=None, is_lib=False):\n self.__init_vars()\n self.__setup_dataset(train_data, test_data)\n\n def __init_vars(self):\n self.project_dir = os.getcwd()\n self.dataset_path = self.project_dir + \"/hw5/dataset/\"\n\n def __setup_dataset_mnist_lib(self):\n import numpy as np\n from keras.datasets import mnist\n (self.X_train, self.Y_train), (self.X_test, self.Y_test) = mnist.load_data()\n self.X_train = np.reshape(self.X_train, (60000, 784))\n self.X_test = np.reshape(self.X_test, (10000, 784))\n\n # Standardize dataset\n self.X_train = self.X_train.astype('float32')\n self.X_test = self.X_test.astype('float32')\n self.X_train = self.X_train / 255.\n self.X_test = self.X_test / 255.\n\n # Max train dataset = 60k\n def __extract_mnist_train(self, limit):\n # fix bug: max limit = 60k\n X, Y = loadlocal_mnist(\n images_path=self.dataset_path + 'train-images.idx3-ubyte',\n labels_path=self.dataset_path + 'train-labels.idx1-ubyte')\n if limit is not None:\n if limit > 60000:\n limit = 60000\n X, Y = X[:limit], Y[:limit]\n return X, Y\n\n # Max test dataset = 10k\n def __extract_mnist_test(self, limit):\n # fix bug: max limit = 10k\n X, Y = loadlocal_mnist(\n images_path=self.dataset_path + 't10k-images.idx3-ubyte',\n labels_path=self.dataset_path + 't10k-labels.idx1-ubyte')\n if limit is not None:\n if limit > 60000:\n limit = 60000\n X, Y = X[:limit], Y[:limit]\n return X, Y\n\n def __setup_dataset(self, limit_train, limit_test):\n self.X_train, self.Y_train = self.__extract_mnist_train(limit_train)\n self.X_test, self.Y_test = self.__extract_mnist_test(limit_test)\n\n # Standardize dataset\n self.X_train = self.X_train.astype('float32')\n self.X_test = self.X_test.astype('float32')\n self.X_train = self.X_train / 255.\n self.X_test = self.X_test / 255.\n\n def get_dataset(self):\n return self.X_train, self.Y_train, self.X_test, self.Y_test\n" ]
[ [ "numpy.random.exponential", "matplotlib.pyplot.show", "matplotlib.pyplot.hist" ], [ "numpy.reshape" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
TangYuan-Liu/mindspore
[ "fb8fd3338605bb34fa5cea054e535a8b1d753fab", "fb8fd3338605bb34fa5cea054e535a8b1d753fab" ]
[ "mindspore/python/mindspore/ops/composite/math_ops.py", "mindspore/python/mindspore/dataset/text/utils.py" ]
[ "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"math Operations.\"\"\"\nfrom itertools import zip_longest\nfrom collections import deque\nimport numpy as np\nfrom mindspore.ops.composite.multitype_ops import _constexpr_utils as const_utils\nfrom mindspore.common import dtype as mstype\nfrom mindspore._checkparam import Validator as validator\nfrom mindspore.ops.operations import _inner_ops as inner\nfrom mindspore.ops.primitive import constexpr\nfrom mindspore.ops import functional as F\nfrom mindspore.ops.operations._inner_ops import DynamicResizeNearestNeighbor\nfrom .. import operations as P\n\n\n@constexpr\ndef _check_validate_axis(axis, name):\n if isinstance(axis, (tuple, list)):\n for idx, item in enumerate(axis):\n validator.check_value_type(\"axis[%d]\" % idx, item, [int], name)\n axis = validator.check_value_type('axis', axis, [int, tuple, list], name)\n return axis\n\n\n@constexpr\ndef _check_validate_keepdims(keep_dims, name):\n keep_dims = validator.check_value_type('keep_dims', keep_dims, [bool], name)\n return keep_dims\n\n@constexpr\ndef is_const(x):\n return x is not None\n\ndef count_nonzero(x, axis=(), keep_dims=False, dtype=mstype.int32):\n r\"\"\"\n Count number of nonzero elements across axis of input tensor\n\n Args:\n x (Tensor): Input data is used to count non-zero numbers.\n :math:`(N,*)` where :math:`*` means, any number of additional dimensions.\n axis (Union[int, tuple(int), list(int)]): The dimensions to reduce. Only constant value is allowed.\n Default: (), reduce all dimensions.\n keep_dims (bool): If true, keep these reduced dimensions and the length is 1.\n If false, don't keep these dimensions. Default: False.\n dtype (Union[Number, mindspore.bool\\_]): The data type of the output tensor. Only constant value is allowed.\n Default: mindspore.int32\n\n Returns:\n Tensor, number of nonzero element. The data type is `dtype`.\n\n Supported Platforms:\n ``Ascend`` ``GPU`` ``CPU``\n\n Examples:\n >>> from mindspore import Tensor, ops\n >>> import numpy as np\n >>> # case 1: each value specified.\n >>> x = Tensor(np.array([[0, 1, 0], [1, 1, 0]]).astype(np.float32))\n >>> nonzero_num = ops.count_nonzero(x=x, axis=[0, 1], keep_dims=True, dtype=mindspore.int32)\n >>> print(nonzero_num)\n [[3]]\n >>> # case 2: all value is default.\n >>> nonzero_num = ops.count_nonzero(x=x)\n >>> print(nonzero_num)\n 3\n >>> # case 3: axis value was specified 0.\n >>> nonzero_num = ops.count_nonzero(x=x, axis=[0,])\n >>> print(nonzero_num)\n [1 2 0]\n >>> # case 4: axis value was specified 1.\n >>> nonzero_num = ops.count_nonzero(x=x, axis=[1,])\n >>> print(nonzero_num)\n [1 2]\n >>> # case 5: keep_dims value was specified.\n >>> nonzero_num = ops.count_nonzero(x=x, keep_dims=True)\n >>> print(nonzero_num)\n [[3]]\n >>> # case 6: keep_dims and axis value was specified.\n >>> nonzero_num = ops.count_nonzero(x=x, axis=[0,], keep_dims=True)\n >>> print(nonzero_num)\n [[1 2 0]]\n \"\"\"\n\n const_utils.check_type_valid(F.dtype(x), mstype.number_type, 'input x')\n axis = _check_validate_axis(axis, \"count_nonzero\")\n keep_dims = _check_validate_keepdims(keep_dims, \"count_nonzero\")\n const_utils.check_type_valid(dtype, mstype.number_type + (mstype.bool_,), 'dtype')\n\n not_equal = P.NotEqual()\n cast = P.Cast()\n reduce_sum = P.ReduceSum(keep_dims)\n nonzero_bool = not_equal(x, 0)\n # ReduceSum only support float16 or float32 tensor.\n nonzero_val = cast(nonzero_bool, mstype.float32)\n nonzero_num = cast(reduce_sum(nonzero_val, axis), dtype)\n\n return nonzero_num\n\n\n@constexpr\ndef _int_to_tuple_conv(axes):\n \"\"\"\n Converts ints to tuples in input axes, expected by most validation checks.\n \"\"\"\n for x in [0, 1]:\n if isinstance(axes[x], int):\n axes[x] = (axes[x],)\n return axes\n\n\n@constexpr\ndef _check_axes(axes, prim_name=None):\n \"\"\"\n Check for validity and type of axes passed to function.\n \"\"\"\n msg_prefix = f\"For '{prim_name}', the\" if prim_name else \"The\"\n validator.check_value_type('axes', axes, [int, tuple, list], \"tensor dot\")\n if not isinstance(axes, int):\n axes = list(axes) # to avoid immutability issues\n if len(axes) != 2:\n raise ValueError(f\"{msg_prefix} dimension of 'axes' should be 2, but got 'axes': {axes}.\")\n axes = _int_to_tuple_conv(axes) # convert before length checks\n if len(axes[0]) != len(axes[1]):\n raise ValueError(f\"{msg_prefix} first and second dim of 'axes' have to be the same size/length, \"\n f\"but got 'axes': {axes}.\")\n if len(axes[0]) != len(set(axes[0])) or len(axes[1]) != len(set(axes[1])):\n raise ValueError(f\"{msg_prefix} 'axes' cannot have duplicating values, but got {axes}.\")\n return axes\n\n\n@constexpr\ndef _typecheck_input(x1_type, x2_type, prim_name=None):\n \"\"\"\n Check input tensor types to be valid and confirm they are the same type.\n \"\"\"\n msg_prefix = f\"For '{prim_name}', the\" if prim_name else \"The\"\n const_utils.check_type_valid(x1_type, [mstype.float32, mstype.float16], 'x1')\n const_utils.check_type_valid(x2_type, [mstype.float32, mstype.float16], 'x2')\n if x1_type != x2_type:\n raise TypeError(f\"{msg_prefix} inputs must be the same type, but got x1_type: {x1_type} \"\n f\"and x2_type: {x2_type}.\")\n\n\n@constexpr\ndef _axes_int_check(x1_shape, x2_shape, axes, prim_name=None):\n \"\"\"\n Convert from single int axes to 2d tuple if required\n \"\"\"\n msg_prefix = f\"For '{prim_name}', the\" if prim_name else \"The\"\n if isinstance(axes, int):\n if axes < 0:\n raise ValueError(f\"{msg_prefix} 'axes' must be at least 0, but got {axes}.\")\n if axes == 0:\n # outer product, no input validation required\n return [], []\n if axes > len(x1_shape) or axes > len(x2_shape):\n raise ValueError(f\"{msg_prefix} 'axes' cannot be greater than the length of 'x1_shape' and 'x2_shape', \"\n f\"but got 'axes': {axes}, 'x1_shape': {x1_shape}, 'x2_shape': {x2_shape}.\")\n x1_ind = tuple(range(len(x1_shape))[-1 * axes:])\n x2_ind = tuple(range(len(x2_shape))[:axes])\n axes = tuple((x1_ind, x2_ind))\n axes = _int_to_tuple_conv(axes)\n return axes\n\n\n@constexpr\ndef _validate_axes(x1_shape, x2_shape, axes, prim_name=None):\n \"\"\"\n Checks for axes having the correct length according to input, for any value in axis\n being out of range with given shape and also checking for compatible axes values\n with given inputs.\n \"\"\"\n msg_prefix = f\"For '{prim_name}', the\" if prim_name else \"The\"\n shapes = [x1_shape, x2_shape]\n\n # axis length check\n for ix_input, x_axes in enumerate(axes):\n axes_len = len(x_axes)\n shape_dim_len = len(shapes[ix_input])\n if axes_len > shape_dim_len:\n raise ValueError(f\"{msg_prefix} length of element {x_axes} in 'axes' should be less than or equal to \"\n f\"{shape_dim_len}, but got {axes_len}.\")\n\n # axis values range check\n for ix_input, x_axes in enumerate(axes):\n comp_shape = shapes[ix_input]\n max_val = len(comp_shape) - 1\n min_val = -1 * len(comp_shape)\n for _, x_value in enumerate(x_axes):\n if not min_val <= x_value <= max_val:\n raise ValueError(f\"{msg_prefix} value in 'axes' should be in range: [{min_val}, {max_val}], \"\n f\"but got {x_value}.\")\n\n # check axis value with input shape - both ways for axis valid\n invalid_a = False\n invalid_b = False\n for i in range(len(axes[0])): # sizes already validated\n if x1_shape[axes[0][i]] != x2_shape[axes[1][i]]:\n invalid_a = True\n if x1_shape[axes[0][i]] != x2_shape[axes[1][len(axes[0]) - 1 - i]]:\n invalid_b = True\n if invalid_a and invalid_b:\n raise ValueError(f\"{msg_prefix} 'i' should exist such that 'x1_shape[axes[0][i]]' is equal to \"\n f\"'x2_shape[axes[1][i]]' or 'x2_shape[axes[1][len(axes[0])-1-i]]', but got \"\n f\"'x1_shape': {x1_shape}, 'x2_shape': {x2_shape}, 'axes': {axes}.\")\n\n\n@constexpr\ndef _calc_new_shape(shape, axes, position=0):\n \"\"\"\n Calculate transpose and reshape parameters for input transformations,\n 'position' refers to whether tensor is first or second in the op.\n \"\"\"\n contraction_axes = tuple(i if i >= 0 else i + len(shape) for i in axes[position])\n prod_contraction = int(np.prod([shape[i] for i in contraction_axes]))\n free_axes = tuple(i for i in range(len(shape)) if i not in contraction_axes)\n free_dims = tuple(shape[i] for i in free_axes)\n prod_free = int(np.prod(free_dims))\n\n transpose_perm = contraction_axes + free_axes if position else free_axes + contraction_axes\n new_shape = (prod_contraction, prod_free) if position else (prod_free, prod_contraction)\n return new_shape, transpose_perm, free_dims\n\n\ndef tensor_dot(x1, x2, axes):\n \"\"\"\n Computation of Tensor contraction on arbitrary axes between tensors `a` and `b`.\n\n Contraction allows for the summation of products of elements of `a` and `b` on specified axes.\n The same number of axes must be specified for both x1 and x2, and values must be within range\n of number of dims of both `a` and `b`.\n\n Selected dims in both inputs must also match.\n\n axes = 0 leads to outer product\n axes = 1 leads to normal matrix multiplication when inputs both 2D.\n axes = 1 is the same as axes = ((1,),(0,)) where both `a` and `b` are 2D.\n axes = 2 is the same as axes = ((1,2),(0,1)) where both `a` and `b` are 3D.\n\n Args:\n x1 (Tensor): First tensor in tensor_dot with datatype float16 or float32\n x2 (Tensor): Second tensor in tensor_dot with datatype float16 or float32\n axes (Union[int, tuple(int), tuple(tuple(int)), list(list(int))]): Single value or\n tuple/list of length 2 with dimensions specified for `a` and `b` each. If single value `N` passed,\n automatically picks up last N dims from `a` input shape and first N dims from `b` input shape in order\n as axes for each respectively.\n\n Inputs:\n - **x1** (Tensor) - First tensor in tensor_dot with datatype float16 or float32\n - **x2** (Tensor) - Second tensor in tensor_dot with datatype float16 or float32\n - **axes** (Union[int, tuple(int), tuple(tuple(int)), list(list(int))]) - Single value or\n tuple/list of length 2 with dimensions specified for `a` and `b` each. If single value `N` passed,\n automatically picks up last N dims from `a` input shape and first N dims from `b` input shape in order\n as axes for each respectively.\n\n Outputs:\n Tensor, the shape of the output tensor is :math:`(N + M)`. Where :math:`N` and :math:`M` are the free axes not\n contracted in both inputs\n\n Raises:\n TypeError: If `x1` or `x2` is not a Tensor.\n TypeError: If `axes` is not one of the following: int, tuple, list.\n\n Supported Platforms:\n ``Ascend`` ``GPU`` ``CPU``\n\n Examples:\n >>> from mindspore import Tensor, ops\n >>> import mindspore\n >>> import numpy as np\n >>> input_x1 = Tensor(np.ones(shape=[1, 2, 3]), mindspore.float32)\n >>> input_x2 = Tensor(np.ones(shape=[3, 1, 2]), mindspore.float32)\n >>> output = ops.tensor_dot(input_x1, input_x2, ((0,1),(1,2)))\n >>> print(output)\n [[2. 2. 2]\n [2. 2. 2]\n [2. 2. 2]]\n \"\"\"\n shape_op = P.Shape()\n reshape_op = P.Reshape()\n transpose_op = P.Transpose()\n matmul_op = P.MatMul(False, False)\n # input validity checks\n x1_shape = shape_op(x1)\n x2_shape = shape_op(x2)\n x1_type = F.dtype(x1)\n x2_type = F.dtype(x2)\n axes = _check_axes(axes, 'tensor_dot')\n _typecheck_input(x1_type, x2_type, 'tensor_dot')\n # input compatibility check & axes format update\n axes = _axes_int_check(x1_shape, x2_shape, axes, 'tensor_dot')\n _validate_axes(x1_shape, x2_shape, axes, 'tensor_dot')\n x1_reshape_fwd, x1_transpose_fwd, x1_ret = _calc_new_shape(x1_shape, axes, 0)\n x2_reshape_fwd, x2_transpose_fwd, x2_ret = _calc_new_shape(x2_shape, axes, 1)\n output_shape = x1_ret + x2_ret # combine free axes from both inputs\n # run tensor_dot op\n x1_transposed = transpose_op(x1, x1_transpose_fwd)\n x2_transposed = transpose_op(x2, x2_transpose_fwd)\n x1_reshaped = reshape_op(x1_transposed, x1_reshape_fwd)\n x2_reshaped = reshape_op(x2_transposed, x2_reshape_fwd)\n mul_result = matmul_op(x1_reshaped, x2_reshaped)\n final_result = reshape_op(mul_result, output_shape)\n return final_result\n\n\n@constexpr\ndef _check_invalid_input(x1_shape, x2_shape, prim_name=None):\n msg_prefix = f\"For '{prim_name}', the\" if prim_name else \"The\"\n if len(x1_shape) < 2 or len(x2_shape) < 2:\n raise ValueError(f\"{msg_prefix} inputs x1, x2 should have 'dimension >= 2',\"\n f\"but got 'len(x1_shape)': ({len(x1_shape)}) and 'len(x2_shape)': ({len(x2_shape)}).\")\n\n\n@constexpr\ndef _typecheck_input_dot(x1_type, x2_type, prim_name=None):\n \"\"\"\n Check input tensor types to be valid and confirm they are the same type for dot and batch dot ops.\n \"\"\"\n msg_prefix = f\"For '{prim_name}', the\" if prim_name else \"The\"\n const_utils.check_type_valid(x1_type, [mstype.float16, mstype.float32], 'x1')\n const_utils.check_type_valid(x2_type, [mstype.float16, mstype.float32], 'x2')\n if x1_type != x2_type:\n raise TypeError(f\"{msg_prefix} inputs must be the same type, but got \"\n f\"x1_type: {x1_type} and x2_type: {x2_type}.\")\n\n\n@constexpr\ndef _get_transpose_shape(x2_shape):\n x2_shape_range = tuple(range(len(x2_shape)))\n x2_shape_transpose = x2_shape_range[-2:-1] + x2_shape_range[:-2] + x2_shape_range[-1:]\n return x2_shape_transpose\n\n\ndef dot(x1, x2):\n \"\"\"\n Computation a dot product between samples in two tensors.\n\n Args:\n x1 (Tensor): First tensor in Dot op with datatype float16 or float32,\n The rank must be greater than or equal to 2.\n x2 (Tensor): Second tensor in Dot op with datatype float16 or float32,\n The rank must be greater than or equal to 2.\n\n Inputs:\n - **x1** (Tensor) - First tensor in Dot op with datatype float16 or float32\n The rank must be greater than or equal to 2.\n - **x2** (Tensor) - Second tensor in Dot op with datatype float16 or float32\n The rank must be greater than or equal to 2.\n\n Outputs:\n Tensor, dot product of x1 and x2.\n\n Raises:\n TypeError: If type of x1 and x2 are not the same.\n TypeError: If dtype of x1 or x2 is not float16 or float32.\n ValueError: If rank of x1 or x2 less than 2.\n\n Supported Platforms:\n ``Ascend`` ``GPU`` ``CPU``\n\n Examples:\n >>> from mindspore import Tensor, ops\n >>> import mindspore\n >>> input_x1 = Tensor(np.ones(shape=[2, 3]), mindspore.float32)\n >>> input_x2 = Tensor(np.ones(shape=[1, 3, 2]), mindspore.float32)\n >>> output = ops.dot(input_x1, input_x2)\n >>> print(output)\n [[[3. 3.]]\n [[3. 3.]]]\n >>> print(output.shape)\n (2, 1, 2)\n >>> input_x1 = Tensor(np.ones(shape=[1, 2, 3]), mindspore.float32)\n >>> input_x2 = Tensor(np.ones(shape=[1, 3, 2]), mindspore.float32)\n >>> output = ops.dot(input_x1, input_x2)\n >>> print(output)\n [[[[3. 3.]]\n [[3. 3.]]]]\n >>> print(output.shape)\n (1, 2, 1, 2)\n >>> input_x1 = Tensor(np.ones(shape=[1, 2, 3]), mindspore.float32)\n >>> input_x2 = Tensor(np.ones(shape=[2, 3, 2]), mindspore.float32)\n >>> output = ops.dot(input_x1, input_x2)\n >>> print(output)\n [[[[3. 3.]\n [3. 3.]]\n [[3. 3.]\n [3. 3.]]]]\n >>> print(output.shape)\n (1, 2, 2, 2)\n >>> input_x1 = Tensor(np.ones(shape=[3, 2, 3]), mindspore.float32)\n >>> input_x2 = Tensor(np.ones(shape=[2, 1, 3, 2]), mindspore.float32)\n >>> output = ops.dot(input_x1, input_x2)\n >>> print(output)\n [[[[[3. 3.]]\n [[3. 3.]]]\n [[[3. 3.]]\n [[3. 3.]]]]\n [[[[3. 3.]]\n [[3. 3.]]]\n [[[3. 3.]]\n [[3. 3.]]]]\n [[[[3. 3.]]\n [[3. 3.]]]\n [[[3. 3.]]\n [[3. 3.]]]]]\n >>> print(output.shape)\n (3, 2, 2, 1, 2)\n \"\"\"\n shape_op = P.Shape()\n reshape_op = P.Reshape()\n transpose_op = P.Transpose()\n matmul_op = P.MatMul(False, False)\n x1_shape = shape_op(x1)\n x2_shape = shape_op(x2)\n x1_type = F.dtype(x1)\n x2_type = F.dtype(x2)\n _typecheck_input_dot(x1_type, x2_type, 'dot')\n _check_invalid_input(x1_shape, x2_shape, 'dot')\n\n if len(x1_shape) > 2 or len(x2_shape) > 2:\n x2_shape_transpose = _get_transpose_shape(x2_shape)\n x2_transpose = transpose_op(x2, x2_shape_transpose)\n x1_reshape = reshape_op(x1, (-1, x1_shape[-1]))\n x2_reshape = reshape_op(x2_transpose, (x2_shape[-2], -1))\n mul_result = matmul_op(x1_reshape, x2_reshape)\n return reshape_op(mul_result, x1_shape[:-1] + x2_shape[:-2] + x2_shape[-1:])\n return matmul_op(x1, x2)\n\n\n@constexpr\ndef _get_batch_size(x1_shape, x2_shape, prim_name=None):\n \"\"\"\n Get batch sizes from two inputs\n \"\"\"\n msg_prefix = f\"For '{prim_name}', the\" if prim_name else \"The\"\n if len(x1_shape) < 2 or len(x2_shape) < 2:\n raise ValueError(f\"{msg_prefix} inputs x1, x2 should have 'dimension >= 2', \"\n f\"but got 'len(x1_shape)': ({len(x1_shape)}) and 'len(x2_shape)': ({len(x2_shape)}).\")\n return x1_shape[0], x2_shape[0]\n\n\n@constexpr\ndef _typecheck_input_batch_dot(x1_type, x2_type, prim_name=None):\n \"\"\"\n Check input tensor types to be valid and confirm they are the same type for batch dot ops.\n \"\"\"\n msg_prefix = f\"For '{prim_name}', the\" if prim_name else \"The\"\n const_utils.check_type_valid(x1_type, [mstype.float32], 'x1')\n const_utils.check_type_valid(x2_type, [mstype.float32], 'x2')\n if x1_type != x2_type:\n raise TypeError(f\"{msg_prefix} inputs must be the same type, but got x1_type: {x1_type} and \"\n f\"x2_type: {x2_type}.\")\n\n\n@constexpr\ndef _check_axes_for_batch_dot(x1_shape, x2_shape, axes, prim_name=None):\n \"\"\"\n Check whether axes are valid and cast axes from tuple to list\n \"\"\"\n msg_prefix = f\"For '{prim_name}', the\" if prim_name else \"The\"\n if axes is None:\n if len(x2_shape) == 2:\n axes = [len(x1_shape) - 1, len(x2_shape) - 1]\n else:\n axes = [len(x1_shape) - 1, len(x2_shape) - 2]\n\n if isinstance(axes, (list, tuple)):\n if 0 in axes:\n raise ValueError(f\"{msg_prefix} 'axes' cannot contain 0, but got axes: {axes}.\")\n if len(axes) != 2:\n raise ValueError(f\"{msg_prefix} length of 'axes' must be equal to 2, but got {len(axes)}.\")\n if isinstance(axes, tuple):\n axes = list(axes)\n validator.check_value_type('axes[0]', axes[0], [int], 'batch_dot')\n validator.check_value_type('axes[1]', axes[1], [int], 'batch_dot')\n # Reverse if axis < 0\n if axes[0] < 0:\n axes[0] += len(x1_shape)\n if axes[1] < 0:\n axes[1] += len(x2_shape)\n validator.check_non_negative_int(axes[0], 'reversed axes[0]', 'batch_dot')\n validator.check_non_negative_int(axes[1], 'reversed axes[1]', 'batch_dot')\n if axes[0] > len(x1_shape) or axes[1] > len(x2_shape):\n raise ValueError(f\"{msg_prefix} axes[0] must be less than or equal to len(x1_shape), \"\n f\"and axes[1] must be less than or equal to len(x2_shape).\"\n f\"But got 'axes': {axes}, 'x1_shape': {x1_shape}, 'x2_shape': {x2_shape}.\")\n elif isinstance(axes, int):\n if axes == 0:\n raise ValueError(f\"{msg_prefix} 'axes' should not be equal to 0, but got {axes}.\")\n if axes < 0:\n axes = [axes + len(x1_shape), axes + len(x2_shape)]\n validator.check_non_negative_int(axes[0], 'reversed axes', 'batch_dot')\n elif axes > len(x1_shape) or axes > len(x2_shape):\n raise ValueError(f\"{msg_prefix} 'axes' cannot be greater than the length of 'x1_shape' and 'x2_shape', \"\n f\"but got 'axes': {axes}, 'x1_shape': {x1_shape}, 'x2_shape': {x2_shape}.\")\n else:\n axes = [axes, axes]\n else:\n raise ValueError(f\"{msg_prefix} type of 'axes' must be one of those: int, tuple(int), list(int), \"\n f\"but got {type(axes).__name__}.\")\n return axes\n\n\n@constexpr\ndef _calc_new_shape_batchdot(shape, axes, position=0):\n \"\"\"\n Calculate transpose and reshape parameters for input transformations,\n 'position' refers to whether tensor is first or second in the op.\n \"\"\"\n axis = axes[position]\n contraction_axes = tuple([axis])\n prod_contraction = int(np.prod([shape[i] for i in contraction_axes]))\n free_axes = tuple(i for i in range(1, len(shape)) if i not in contraction_axes)\n free_dims = tuple(shape[i] for i in free_axes)\n prod_free = int(np.prod(free_dims))\n\n transpose_perm = contraction_axes + free_axes if position else free_axes + contraction_axes\n transpose_perm = tuple([0]) + transpose_perm\n new_shape = (prod_contraction, prod_free) if position else (prod_free, prod_contraction)\n new_shape = tuple([shape[0]]) + new_shape\n return new_shape, transpose_perm, free_dims\n\n\n@constexpr\ndef _check_batch_size(x1_batch_size, x2_batch_size, prim_name=None):\n \"\"\"\n Check whether batch size of two inputs are the same\n \"\"\"\n msg_prefix = f\"For '{prim_name}', the\" if prim_name else \"The\"\n if x1_batch_size != x2_batch_size:\n raise ValueError(f\"{msg_prefix} inputs 'x1', 'x2' should have the same batch sizes, but got \"\n f\"'x1_batch_size': {x1_batch_size} and 'x2_batch_size': {x2_batch_size}.\")\n\n\n@constexpr\ndef _get_output_shape(batch_size, x1_ret, x2_ret):\n \"\"\"\n Compute output shape for batch dot\n \"\"\"\n output_shape = tuple([batch_size]) + x1_ret + x2_ret\n return output_shape\n\n\ndef batch_dot(x1, x2, axes=None):\n \"\"\"\n Computation of batch dot product between samples in two tensors containing batch dims.\n\n .. math::\n output = x1[batch, :] * x2[batch, :]\n\n Args:\n x1 (Tensor): First tensor in Batch Dot op with datatype float32 and the rank of `x1` must be greater\n than or equal to 2.\n x2 (Tensor): Second tensor in Batch Dot op with datatype float32. The datatype of `x2` should\n be same as `x1` and the rank of `x2` must be greater than or equal to 2.\n axes (Union[int, tuple(int), list(int)]): Single value or tuple/list of length 2 with dimensions\n specified for `a` and `b` each. If single value `N` passed, automatically picks up last N dims from\n `a` input shape and last N dimensions from `b` input shape in order as axes for each respectively.\n Default: None.\n\n Outputs:\n Tensor, batch dot product of `x1` and `x2`. For example: The Shape of output\n for input `x1` shapes (batch, d1, axes, d2) and `x2` shapes (batch, d3, axes, d4) is (batch, d1, d2, d3, d4),\n where d1 and d2 means any number.\n\n Raises:\n TypeError: If type of x1 and x2 are not the same.\n TypeError: If dtype of x1 or x2 is not float32.\n ValueError: If rank of x1 or x2 less than 2.\n ValueError: If batch dim used in axes.\n ValueError: If len(axes) less than 2.\n ValueError: If axes is not one of those: None, int, (int, int).\n ValueError: If axes reversed from negative int is too low for dimensions of input arrays.\n ValueError: If axes value is too high for dimensions of input arrays.\n ValueError: If batch size of x1 and x2 are not the same.\n\n Supported Platforms:\n ``Ascend`` ``GPU`` ``CPU``\n\n Examples:\n >>> from mindspore import Tensor, ops\n >>> import numpy as np\n >>> x1 = Tensor(np.ones(shape=[2, 2, 3]), mindspore.float32)\n >>> x2 = Tensor(np.ones(shape=[2, 3, 2]), mindspore.float32)\n >>> axes = (-1, -2)\n >>> output = ops.batch_dot(x1, x2, axes)\n >>> print(output)\n [[[3. 3.]\n [3. 3.]]\n [[3. 3.]\n [3. 3.]]]\n >>> x1 = Tensor(np.ones(shape=[2, 2]), mindspore.float32)\n >>> x2 = Tensor(np.ones(shape=[2, 3, 2]), mindspore.float32)\n >>> axes = (1, 2)\n >>> output = ops.batch_dot(x1, x2, axes)\n >>> print(output)\n [[2. 2. 2.]\n [2. 2. 2.]]\n >>> print(output.shape)\n (2, 3)\n >>> x1 = Tensor(np.ones(shape=[6, 2, 3, 4]), mindspore.float32)\n >>> x2 = Tensor(np.ones(shape=[6, 5, 4, 8]), mindspore.float32)\n >>> output = ops.batch_dot(x1, x2)\n >>> print(output.shape)\n (6, 2, 3, 5, 8)\n >>> x1 = Tensor(np.ones(shape=[2, 2, 4]), mindspore.float32)\n >>> x2 = Tensor(np.ones(shape=[2, 5, 4, 5]), mindspore.float32)\n >>> output = ops.batch_dot(x1, x2)\n >>> print(output.shape)\n (2, 2, 5, 5)\n\n \"\"\"\n\n transpose_op = P.Transpose()\n batch_matmul_op = P.BatchMatMul()\n squeeze_one_op = P.Squeeze(1)\n squeeze_minus_one_op = P.Squeeze(-1)\n # input validity checks\n x1_shape = F.shape(x1)\n x2_shape = F.shape(x2)\n x1_dim_num = len(x1_shape)\n x2_dim_num = len(x2_shape)\n x1_type = F.dtype(x1)\n x2_type = F.dtype(x2)\n\n x1_batch_size, x2_batch_size = _get_batch_size(x1_shape, x2_shape, 'batch_dot')\n\n _typecheck_input_batch_dot(x1_type, x2_type, 'batch_dot')\n _check_batch_size(x1_batch_size, x2_batch_size, 'batch_dot')\n axes = _check_axes_for_batch_dot(x1_shape, x2_shape, axes, 'batch_dot')\n\n if x1_dim_num == 2:\n x1 = F.expand_dims(x1, 1)\n axes[0] += 1\n if x2_dim_num == 2:\n x2 = F.expand_dims(x2, 2)\n\n x1_shape = F.shape(x1)\n x2_shape = F.shape(x2)\n\n x1_reshape_fwd, x1_transpose_fwd, x1_ret = _calc_new_shape_batchdot(x1_shape, axes, 0)\n x2_reshape_fwd, x2_transpose_fwd, x2_ret = _calc_new_shape_batchdot(x2_shape, axes, 1)\n output_shape = _get_output_shape(x1_batch_size, x1_ret, x2_ret)\n\n x1_transposed = transpose_op(x1, x1_transpose_fwd)\n x2_transposed = transpose_op(x2, x2_transpose_fwd)\n x1_reshaped = F.reshape(x1_transposed, x1_reshape_fwd)\n x2_reshaped = F.reshape(x2_transposed, x2_reshape_fwd)\n\n # Batch matmal op part\n mul_result = batch_matmul_op(x1_reshaped, x2_reshaped)\n\n final_result = F.reshape(mul_result, output_shape)\n\n # if the original dims are expanded, restore them from 3 to 2\n if x1_dim_num == 2:\n final_result = squeeze_one_op(final_result)\n elif x2_dim_num == 2:\n final_result = squeeze_minus_one_op(final_result)\n\n return final_result\n\n\n@constexpr\ndef _check_same_type(dtype1, dtype2):\n return dtype1 == dtype2\n\n\n@constexpr\ndef _max(*args):\n \"\"\"Returns the maximum value.\"\"\"\n return max(*args)\n\n\n@constexpr\ndef _min(*args):\n \"\"\"Returns the minimum value.\"\"\"\n return min(*args)\n\n\n@constexpr\ndef _infer_shape_rem(shape1, shape2, ndim1, ndim2, transpose_b):\n \"\"\"Infers the shape of the last two dimensions after performing matmul.\"\"\"\n shape_rem = []\n if ndim1 >= 2:\n shape_rem.append(shape1[-2])\n if transpose_b:\n if ndim2 >= 2:\n shape_rem.append(shape2[-2])\n else:\n if ndim1 >= 1:\n shape_rem.append(shape2[-1])\n return tuple(shape_rem)\n\n\n@constexpr\ndef _check_matmul_shapes(shape1, shape2, prim_name=None):\n \"\"\"Checks shape1 and shape2 are valid to perform matmul, and returns output shape after broadcasting.\"\"\"\n msg_prefix = f\"For '{prim_name}', the\" if prim_name else \"The\"\n ndim1, ndim2 = len(shape1), len(shape2)\n if ndim1 < 1 or ndim2 < 1:\n raise ValueError(f\"{msg_prefix} dimension of input operands must be at least 1, but got \"\n f\"the length of shape1: {ndim1}, the length of shape2: {ndim2}.\")\n if ndim2 >= 2 and shape1[-1] != shape2[-2]:\n raise ValueError(f\"{msg_prefix} shape1[-1] should be equal to shape2[-2] when the length of shape2 \"\n f\"is greater than or equal to 2, but got shape1[-1]: {shape1[-1]}, \"\n f\"shape2[-2]: {shape2[-2]}.\")\n shape_out = deque()\n for items in zip_longest(reversed(shape1[:-2]), reversed(shape2[:-2]), fillvalue=1):\n max_size = max(items)\n if any(item not in (1, max_size) for item in items):\n raise ValueError(f\"{msg_prefix} operands could not be broadcast together with shape1 {shape1} and \"\n f\"shape2 {shape2}.\")\n shape_out.appendleft(max_size)\n return tuple(shape_out)\n\n\n@constexpr\ndef _tile_size(shape, out_shape, ndim):\n \"\"\"Returns tile_size such that shape*tile_size = out_shape\"\"\"\n size = [1] * ndim\n for idx, (i, j) in enumerate(zip(shape, out_shape)):\n if i != j:\n size[idx] = j\n return tuple(size)\n\n\n@constexpr\ndef _check_need_broadcast(shape1, shape2):\n \"\"\"Returns True if broadcast is necessary for batchmatmul.\"\"\"\n return shape1[:-2] != shape2[:-2]\n\n\ndef _expand(x, ndim):\n \"\"\"Expand x to ndim from axis, which can be 0 or -1.\"\"\"\n while F.rank(x) < ndim:\n x = F.expand_dims(x, 0)\n return x\n\n\ndef _broadcast_to(x, shape_cur, shape_to, ndim_to):\n \"\"\"Broadcasts x from shape_cur to shape_to.\"\"\"\n size = _tile_size(shape_cur, shape_to, ndim_to)\n return F.tile(x, size)\n\n\ndef matmul(x1, x2, dtype=None):\n \"\"\"\n Returns the matrix product of two arrays.\n\n Note:\n Numpy arguments `out`, `casting`, `order`, `subok`, `signature`, and `extobj` are\n not supported.\n On GPU, the supported dtypes are np.float16 and np.float32.\n On CPU, the supported dtypes are np.float16 and np.float32.\n\n Args:\n x1 (Tensor): Input tensor, scalar not allowed.\n The last dimension of `x1` must be the same size as the second last dimension of `x2`.\n And the shape of x1 and x2 could be broadcast.\n x2 (Tensor): Input tensor, scalar not allowed.\n The last dimension of `x1` must be the same size as the second last dimension of `x2`.\n And the shape of x1 and x2 could be broadcast.\n dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the\n output Tensor.\n\n Returns:\n Tensor or scalar, the matrix product of the inputs. This is a scalar only\n when both `x1`, `x2` are 1-d vectors.\n\n Raises:\n ValueError: If the last dimension of `x1` is not the same size as the\n second-to-last dimension of `x2`, or if a scalar value is passed in.\n ValueError: If the shape of `x1` and `x2` could not broadcast together.\n\n Supported Platforms:\n ``Ascend`` ``GPU`` ``CPU``\n\n Examples:\n >>> from mindspore import Tensor, ops\n >>> import mindspore\n >>> # case 1 : Reasonable application of broadcast mechanism\n >>> x1 = Tensor(np.arange(2*3*4).reshape(2, 3, 4), mindspore.float32)\n >>> x2 = Tensor(np.arange(4*5).reshape(4, 5), mindspore.float32)\n >>> output = ops.matmul(x1, x2)\n >>> print(output)\n [[[ 70. 76. 82. 88. 94.]\n [ 190. 212. 234. 256. 278.]\n [ 310. 348. 386. 424. 462.]]\n [[ 430. 484. 538. 592. 646.]\n [ 550. 620. 690. 760. 830.]\n [ 670. 756. 842. 928. 1014.]]]\n >>> print(output.shape)\n (2, 3, 5)\n >>> # case 2 : the rank of `x1` is 1\n >>> x1 = Tensor(np.ones([1, 2]), mindspore.float32)\n >>> x2 = Tensor(np.ones([2,]), mindspore.float32)\n >>> output = ops.matmul(x1, x2)\n >>> print(output)\n [2.]\n >>> print(output.shape)\n (1,)\n \"\"\"\n # performs type promotion\n dtype1 = F.dtype(x1)\n dtype2 = F.dtype(x2)\n if not _check_same_type(dtype1, dtype2):\n x1 = x1.astype(mstype.float32)\n x2 = x2.astype(mstype.float32)\n\n ndim1_orig, ndim2_orig = F.rank(x1), F.rank(x2)\n shape1_orig, shape2_orig = F.shape(x1), F.shape(x2)\n transpose_b = ndim2_orig == 1\n shape_backbone = _check_matmul_shapes(shape1_orig, shape2_orig, 'matmul')\n # infers the shape of the output\n shape_out = shape_backbone + _infer_shape_rem(shape1_orig, shape2_orig,\n ndim1_orig, ndim2_orig, transpose_b)\n\n x1 = _expand(x1, 2)\n x2 = _expand(x2, 2)\n if F.rank(x2) == 2:\n if F.rank(x1) > 2:\n x1 = F.reshape(x1, (-1, shape1_orig[-1]))\n res = P.MatMul(False, transpose_b)(x1, x2)\n else:\n # broadcasts x1.shape[:-2] with x2.shape[:-2]\n ndim_aligned = _max(ndim1_orig, ndim2_orig)\n x1 = _expand(x1, ndim_aligned)\n x2 = _expand(x2, ndim_aligned)\n shape1_aligned, shape2_aligned = F.shape(x1), F.shape(x2)\n x1 = _broadcast_to(x1, shape1_aligned[:-2], shape_backbone, ndim_aligned)\n x2 = _broadcast_to(x2, shape2_aligned[:-2], shape_backbone, ndim_aligned)\n res = P.BatchMatMul(False, transpose_b)(x1, x2)\n\n if dtype is not None:\n res = res.astype(dtype)\n return F.reshape(res, shape_out)\n\n\n@constexpr\ndef _create_cummin_perm(axis, x_shape):\n \"\"\"Insure axis is in [-len(x_shape),len(s_shape)-1]\"\"\"\n len_axis = len(x_shape)\n if not isinstance(axis, int):\n raise TypeError(f\"The date type of 'axis' should be Int, but got {axis}.\")\n if axis < -len_axis or axis > len_axis:\n raise ValueError(f\"The value of axis should be in [{-len_axis}, {len_axis}], but got {axis}.\")\n prem = [i for i in range(len_axis)]\n if axis < 0:\n axis = axis + len_axis\n prem[0], prem[axis] = axis, 0\n prem = tuple(prem)\n return prem\n\n\ndef cummin(x, axis):\n r\"\"\"\n Computation of the cumulative minimum of elements of 'x' in the dimension axis,\n and the index location of each maximum value found in the dimension 'axis'.\n\n It returns the cumulative minimum of elements and the index.\n\n .. math::\n \\begin{array}{ll} \\\\\n y{i} = min(x{1}, x{2}, ... , x{i})\n \\end{array}\n\n Args:\n x (Tensor): The input tensor, rank of `input_x` > 0.\n axis (Int): The dimension to do the operation, The axis is in the range from -len(`input_x`.shape)\n to len(`input_x`.shape) - 1. When it's in the range from 0 to len(`input_x`.shape) - 1, it means starting\n from the first dimension and counting forwards, When it's less than 0, it means we're counting backwards\n from the last dimension. For example, -1 means the last dimension.\n\n Outputs:\n - **output** (Tensor) - The output tensor of the cumulative minimum of elements.\n - **indices** (Tensor) - The result tensor of the index of each minimum value been found.\n\n Raises:\n TypeError: If `input_x` is not a Tensor.\n TypeError: If 'axis' is not an int.\n ValueError:If 'axis' is out the range of [-len(`input_x`.shape) to len(`input_x`.shape) - 1]\n\n Supported Platforms:\n ``Ascend``\n\n Examples:\n >>> from mindspore import Tensor, ops\n >>> import mindspore\n >>> a = Tensor([-0.2284, -0.6628, 0.0975, 0.2680, -1.3298, -0.4220], mindspore.float32)\n >>> output = ops.cummin(a, axis=0)\n >>> print(output[0])\n [-0.2284 -0.6628 -0.6628 -0.6628 -1.3298 -1.3298]\n >>> print(output[1])\n [0 1 1 1 4 4]\n \"\"\"\n cummin_op = inner.Cummin(axis=0)\n if axis == 0:\n out1, out2 = cummin_op(x)\n else:\n transpose = P.Transpose()\n x_shape = P.Shape()(x)\n prem = _create_cummin_perm(axis, x_shape)\n x = transpose(x, prem)\n out1, out2 = cummin_op(x)\n out1 = transpose(out1, prem)\n out2 = transpose(out2, prem)\n return [out1, out2]\n\n\ndef resize_nearest_neighbor(input_x, size, align_corners=False):\n r\"\"\"\n Resizes the input tensor by using the nearest neighbor algorithm.\n\n Resizes the input tensor to a given size by using the nearest neighbor algorithm. The nearest\n neighbor algorithm selects the value of the nearest point and does not consider the\n values of neighboring points at all, yielding a piecewise-constant interpolant.\n\n Args:\n input_x (Tensor) - The input tensor. The shape of the tensor is :math:`(N, C, H, W)`.\n size (Union[Tensor, tuple, list]): The target size. The dimension of size must be 2.\n align_corners (bool): Whether the centers of the 4 corner pixels of the input\n and output tensors are aligned. Default: False.\n\n Outputs:\n Tensor, the shape of the output tensor is :math:`(N, C, NEW\\_H, NEW\\_W)`.\n The data type is the same as the `input_x`.\n\n Raises:\n TypeError: If `input_x` is not a Tensor.\n TypeError: If `size` is neither tuple nor list.\n TypeError: If `align_corners` is not a bool.\n ValueError: If length of `size` is not equal to 2.\n\n Supported Platforms:\n ``Ascend`` ``GPU`` ``CPU``\n\n Examples:\n >>> input_tensor = Tensor(np.array([[[[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]]]), mindspore.float32)\n >>> output = ops.ResizeNearestNeighbor(input_tensor, (2, 2))\n >>> print(output)\n [[[[-0.1 0.3]\n [ 0.4 0.5]]]]\n \"\"\"\n if size is None:\n raise ValueError(f'For ResizeNearestNeighbor, size could not be None.')\n if isinstance(size, (tuple, list)):\n resize = P.ResizeNearestNeighbor(size, align_corners)\n return resize(input_x)\n if is_const(size):\n size = size.asnumpy()\n resize = P.ResizeNearestNeighbor(size, align_corners)\n return resize(input_x)\n\n resize = DynamicResizeNearestNeighbor(align_corners)\n return resize(input_x, size)\n", "# Copyright 2020-2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nThe module text.utils provides some general methods for NLP text processing.\nFor example, you can use Vocab to build a dictionary,\nuse to_bytes and to_str to encode and decode strings into a specified format.\n\"\"\"\n\nfrom enum import IntEnum\n\nimport numpy as np\n\nimport mindspore._c_dataengine as cde\nfrom .validators import check_vocab, check_from_file, check_from_list, check_from_dict, check_from_dataset, \\\n check_from_dataset_sentencepiece, check_from_file_sentencepiece, check_save_model, \\\n check_from_file_vectors, check_tokens_to_ids, check_ids_to_tokens\n\n__all__ = [\n \"Vocab\", \"SentencePieceVocab\", \"to_str\", \"to_bytes\", \"Vectors\", \"FastText\", \"GloVe\", \"CharNGram\"\n]\n\n\nclass Vocab:\n \"\"\"\n Vocab object that is used to save pairs of words and ids.\n\n It contains a map that maps each word(str) to an id(int) or reverse.\n \"\"\"\n\n def __init__(self):\n self.c_vocab = None\n\n def vocab(self):\n \"\"\"\n Get the vocabory table in dict type.\n\n Returns:\n A vocabulary consisting of word and id pairs.\n\n Examples:\n >>> vocab = text.Vocab.from_list([\"word_1\", \"word_2\", \"word_3\", \"word_4\"])\n >>> vocabory_dict = vocab.vocab()\n \"\"\"\n check_vocab(self.c_vocab)\n return self.c_vocab.vocab()\n\n @check_tokens_to_ids\n def tokens_to_ids(self, tokens):\n \"\"\"\n Converts a token string or a sequence of tokens in a single integer id or a sequence of ids.\n If token does not exist, return id with value -1.\n\n Args:\n tokens(Union[str, list[str]]): One or several token(s) to convert to token id(s).\n\n Returns:\n The token id or list of token ids.\n\n Examples:\n >>> vocab = text.Vocab.from_list([\"w1\", \"w2\", \"w3\"], special_tokens=[\"<unk>\"], special_first=True)\n >>> ids = vocab.tokens_to_ids([\"w1\", \"w3\"])\n \"\"\"\n check_vocab(self.c_vocab)\n if isinstance(tokens, str):\n tokens = [tokens]\n return self.c_vocab.tokens_to_ids(tokens)\n\n @check_ids_to_tokens\n def ids_to_tokens(self, ids):\n \"\"\"\n Converts a single index or a sequence of indices in a token or a sequence of tokens.\n If id does not exist, return empty string.\n\n Args:\n ids(Union[int, list[int]]): The token id (or token ids) to convert to tokens.\n\n Returns:\n The decoded token(s).\n\n Examples:\n >>> vocab = text.Vocab.from_list([\"w1\", \"w2\", \"w3\"], special_tokens=[\"<unk>\"], special_first=True)\n >>> token = vocab.ids_to_tokens(0)\n \"\"\"\n check_vocab(self.c_vocab)\n if isinstance(ids, int):\n ids = [ids]\n return self.c_vocab.ids_to_tokens(ids)\n\n @classmethod\n @check_from_dataset\n def from_dataset(cls, dataset, columns=None, freq_range=None, top_k=None, special_tokens=None, special_first=True):\n \"\"\"\n Build a vocab from a dataset.\n\n This would collect all unique words in a dataset and return a vocab within\n the frequency range specified by user in freq_range. User would be warned if no words fall into the frequency.\n Words in vocab are ordered from the highest frequency to the lowest frequency. Words with the same frequency\n would be ordered lexicographically.\n\n Args:\n dataset(Dataset): dataset to build vocab from.\n columns(list[str], optional): column names to get words from. It can be a list of column names.\n (default=None, where all columns will be used. If any column isn't string type, will return error).\n freq_range(tuple, optional): A tuple of integers (min_frequency, max_frequency). Words within the frequency\n range would be kept. 0 <= min_frequency <= max_frequency <= total_words. min_frequency=0 is the same as\n min_frequency=1. max_frequency > total_words is the same as max_frequency = total_words.\n min_frequency/max_frequency can be None, which corresponds to 0/total_words separately\n (default=None, all words are included).\n top_k(int, optional): top_k is greater than 0. Number of words to be built into vocab. top_k means most\n frequent words are taken. top_k is taken after freq_range. If not enough top_k, all words will be taken\n (default=None, all words are included).\n special_tokens(list, optional): A list of strings, each one is a special token. For example\n special_tokens=[\"<pad>\",\"<unk>\"] (default=None, no special tokens will be added).\n special_first(bool, optional): Whether special_tokens will be prepended/appended to vocab. If special_tokens\n is specified and special_first is set to True, special_tokens will be prepended (default=True).\n\n Returns:\n Vocab, vocab built from the dataset.\n\n Examples:\n >>> dataset = ds.TextFileDataset(\"/path/to/sentence/piece/vocab/file\", shuffle=False)\n >>> vocab = text.Vocab.from_dataset(dataset, \"text\", freq_range=None, top_k=None,\n ... special_tokens=[\"<pad>\", \"<unk>\"],\n ... special_first=True)\n >>> dataset = dataset.map(operations=text.Lookup(vocab, \"<unk>\"), input_columns=[\"text\"])\n \"\"\"\n vocab = Vocab()\n vocab.c_vocab = dataset.build_vocab(columns, freq_range, top_k, special_tokens, special_first)\n return vocab\n\n @classmethod\n @check_from_list\n def from_list(cls, word_list, special_tokens=None, special_first=True):\n \"\"\"\n Build a vocab object from a list of word.\n\n Args:\n word_list(list): A list of string where each element is a word of type string.\n special_tokens(list, optional): A list of strings, each one is a special token. For example\n special_tokens=[\"<pad>\",\"<unk>\"] (default=None, no special tokens will be added).\n special_first(bool, optional): Whether special_tokens is prepended or appended to vocab. If special_tokens\n is specified and special_first is set to True, special_tokens will be prepended (default=True).\n\n Returns:\n Vocab, vocab built from the `list`.\n\n Examples:\n >>> vocab = text.Vocab.from_list([\"w1\", \"w2\", \"w3\"], special_tokens=[\"<unk>\"], special_first=True)\n \"\"\"\n if special_tokens is None:\n special_tokens = []\n vocab = Vocab()\n vocab.c_vocab = cde.Vocab.from_list(word_list, special_tokens, special_first)\n return vocab\n\n @classmethod\n @check_from_file\n def from_file(cls, file_path, delimiter=\"\", vocab_size=None, special_tokens=None, special_first=True):\n \"\"\"\n Build a vocab object from a list of word.\n\n Args:\n file_path (str): Path to the file which contains the vocab list.\n delimiter (str, optional): A delimiter to break up each line in file, the first element is taken to be\n the word (default=\"\", the whole line will be treated as a word).\n vocab_size (int, optional): Number of words to read from file_path (default=None, all words are taken).\n special_tokens (list, optional): A list of strings, each one is a special token. For example\n special_tokens=[\"<pad>\",\"<unk>\"] (default=None, no special tokens will be added).\n special_first (bool, optional): Whether special_tokens will be prepended/appended to vocab,\n If special_tokens is specified and special_first is set to True,\n special_tokens will be prepended (default=True).\n\n Returns:\n Vocab, vocab built from the file.\n\n Examples:\n >>> # Assume vocab file contains the following content:\n >>> # --- begin of file ---\n >>> # apple,apple2\n >>> # banana, 333\n >>> # cat,00\n >>> # --- end of file ---\n >>>\n >>> # Read file through this API and specify \",\" as delimiter,\n >>> # then the delimiter will break up each line in file, the first element is taken to be the word.\n >>> vocab = text.Vocab.from_file(\"/path/to/simple/vocab/file\", \",\", None, [\"<pad>\", \"<unk>\"], True)\n >>>\n >>> # Finally, there are 5 words in the vocab: \"<pad>\", \"<unk>\", \"apple\", \"banana\", \"cat\"\n >>> print(vocab.vocab())\n \"\"\"\n if vocab_size is None:\n vocab_size = -1\n if special_tokens is None:\n special_tokens = []\n vocab = Vocab()\n vocab.c_vocab = cde.Vocab.from_file(file_path, delimiter, vocab_size, special_tokens, special_first)\n return vocab\n\n @classmethod\n @check_from_dict\n def from_dict(cls, word_dict):\n \"\"\"\n Build a vocab object from a dict.\n\n Args:\n word_dict (dict): Dict contains word and id pairs, where word should be str and id be int. id is recommended\n to start from 0 and be continuous. ValueError will be raised if id is negative.\n\n Returns:\n Vocab, vocab built from the `dict`.\n\n Examples:\n >>> vocab = text.Vocab.from_dict({\"home\": 3, \"behind\": 2, \"the\": 4, \"world\": 5, \"<unk>\": 6})\n \"\"\"\n vocab = Vocab()\n vocab.c_vocab = cde.Vocab.from_dict(word_dict)\n return vocab\n\n\nclass SentencePieceVocab(cde.SentencePieceVocab):\n \"\"\"\n SentencePiece object that is used to do words segmentation.\n \"\"\"\n\n @classmethod\n @check_from_dataset_sentencepiece\n def from_dataset(cls, dataset, col_names, vocab_size, character_coverage, model_type, params):\n \"\"\"\n Build a SentencePiece from a dataset.\n\n Args:\n dataset(Dataset): Dataset to build SentencePiece.\n col_names(list): The list of the col name.\n vocab_size(int): Vocabulary size.\n character_coverage(float): Amount of characters covered by the model, good defaults are: 0.9995 for\n languages with rich character set like Japanese or Chinese and 1.0 for other languages with small\n character set.\n model_type(SentencePieceModel): It can be any of [SentencePieceModel.UNIGRAM, SentencePieceModel.BPE,\n SentencePieceModel.CHAR, SentencePieceModel.WORD], default is SentencePieceModel.UNIGRAM. The input\n sentence must be pre-tokenized when using SentencePieceModel.WORD type.\n\n - SentencePieceModel.UNIGRAM, Unigram Language Model means the next word in the sentence is assumed to\n be independent of the previous words generated by the model.\n - SentencePieceModel.BPE, refers to byte pair encoding algorithm, which replaces the most frequent pair\n of bytes in a sentence with a single, unused byte.\n - SentencePieceModel.CHAR, refers to char based sentencePiece Model type.\n - SentencePieceModel.WORD, refers to word based sentencePiece Model type.\n\n params(dict): A dictionary with no incoming parameters.\n\n Returns:\n SentencePieceVocab, vocab built from the dataset.\n\n Examples:\n >>> from mindspore.dataset.text import SentencePieceModel\n >>> dataset = ds.TextFileDataset(\"/path/to/sentence/piece/vocab/file\", shuffle=False)\n >>> vocab = text.SentencePieceVocab.from_dataset(dataset, [\"text\"], 5000, 0.9995,\n ... SentencePieceModel.UNIGRAM, {})\n \"\"\"\n\n return dataset.build_sentencepiece_vocab(col_names, vocab_size, character_coverage,\n model_type, params)\n\n @classmethod\n @check_from_file_sentencepiece\n def from_file(cls, file_path, vocab_size, character_coverage, model_type, params):\n \"\"\"\n Build a SentencePiece object from a list of word.\n\n Args:\n file_path(list): Path to the file which contains the SentencePiece list.\n vocab_size(int): Vocabulary size.\n character_coverage(float): Amount of characters covered by the model, good defaults are: 0.9995 for\n languages with rich character set like Japanese or Chinese and 1.0 for other languages with small\n character set.\n model_type(SentencePieceModel): It can be any of [SentencePieceModel.UNIGRAM, SentencePieceModel.BPE,\n SentencePieceModel.CHAR, SentencePieceModel.WORD], default is SentencePieceModel.UNIGRAM. The input\n sentence must be pre-tokenized when using SentencePieceModel.WORD type.\n\n - SentencePieceModel.UNIGRAM, Unigram Language Model means the next word in the sentence is assumed to\n be independent of the previous words generated by the model.\n - SentencePieceModel.BPE, refers to byte pair encoding algorithm, which replaces the most frequent pair\n of bytes in a sentence with a single, unused byte.\n - SentencePieceModel.CHAR, refers to char based sentencePiece Model type.\n - SentencePieceModel.WORD, refers to word based sentencePiece Model type.\n\n params(dict): A dictionary with no incoming parameters(The parameters are derived from SentencePiece\n library).\n\n .. code-block::\n\n input_sentence_size 0\n max_sentencepiece_length 16\n\n Returns:\n SentencePieceVocab, vocab built from the file.\n\n Examples:\n >>> from mindspore.dataset.text import SentencePieceModel\n >>> vocab = text.SentencePieceVocab.from_file([\"/path/to/sentence/piece/vocab/file\"], 5000, 0.9995,\n ... SentencePieceModel.UNIGRAM, {})\n \"\"\"\n return super().from_file(file_path, vocab_size, character_coverage,\n DE_C_INTER_SENTENCEPIECE_MODE[model_type], params)\n\n @classmethod\n @check_save_model\n def save_model(cls, vocab, path, filename):\n \"\"\"\n Save model into given filepath.\n\n Args:\n vocab(SentencePieceVocab): A SentencePiece object.\n path(str): Path to store model.\n filename(str): The name of the file.\n\n Examples:\n >>> from mindspore.dataset.text import SentencePieceModel\n >>> vocab = text.SentencePieceVocab.from_file([\"/path/to/sentence/piece/vocab/file\"], 5000, 0.9995,\n ... SentencePieceModel.UNIGRAM, {})\n >>> text.SentencePieceVocab.save_model(vocab, \"./\", \"m.model\")\n \"\"\"\n super().save_model(vocab, path, filename)\n\n\ndef to_str(array, encoding='utf8'):\n \"\"\"\n Convert NumPy array of `bytes` to array of `str` by decoding each element based on charset `encoding`.\n\n Args:\n array (numpy.ndarray): Array of `bytes` type representing strings.\n encoding (str): Indicating the charset for decoding (default='utf8').\n\n Returns:\n numpy.ndarray, NumPy array of `str`.\n\n Examples:\n >>> text_file_dataset_dir = [\"/path/to/text_file_dataset_file\"]\n >>> dataset = ds.TextFileDataset(dataset_files=text_file_dataset_dir, shuffle=False)\n >>> for item in dataset.create_dict_iterator(num_epochs=1, output_numpy=True):\n ... data = text.to_str(item[\"text\"])\n \"\"\"\n\n if not isinstance(array, np.ndarray):\n raise TypeError('input should be a NumPy array.')\n\n return np.char.decode(array, encoding)\n\n\ndef to_bytes(array, encoding='utf8'):\n \"\"\"\n Convert NumPy array of `str` to array of `bytes` by encoding each element based on charset `encoding`.\n\n Args:\n array (numpy.ndarray): Array of `str` type representing strings.\n encoding (str): Indicating the charset for encoding (default='utf8').\n\n Returns:\n numpy.ndarray, NumPy array of `bytes`.\n \"\"\"\n\n if not isinstance(array, np.ndarray):\n raise ValueError('input should be a NumPy array.')\n\n return np.char.encode(array, encoding)\n\n\nclass JiebaMode(IntEnum):\n \"\"\"\n An enumeration for JiebaTokenizer.\n\n Possible enumeration values are: JiebaMode.MIX, JiebaMode.MP, JiebaMode.HMM.\n\n - JiebaMode.MIX: tokenize with a mix of MPSegment and HMMSegment algorithm.\n - JiebaMode.MP: tokenize with MPSegment algorithm.\n - JiebaMode.HMM: tokenize with Hidden Markov Model Segment algorithm.\n \"\"\"\n MIX = 0\n MP = 1\n HMM = 2\n\n\nclass NormalizeForm(IntEnum):\n \"\"\"\n An enumeration for NormalizeUTF8.\n\n Possible enumeration values are: NormalizeForm.NONE, NormalizeForm.NFC, NormalizeForm.NFKC, NormalizeForm.NFD,\n NormalizeForm.NFKD.\n\n - NormalizeForm.NONE: do nothing for input string tensor.\n - NormalizeForm.NFC: normalize with Normalization Form C.\n - NormalizeForm.NFKC: normalize with Normalization Form KC.\n - NormalizeForm.NFD: normalize with Normalization Form D.\n - NormalizeForm.NFKD: normalize with Normalization Form KD.\n \"\"\"\n NONE = 0\n NFC = 1\n NFKC = 2\n NFD = 3\n NFKD = 4\n\n\nclass SentencePieceModel(IntEnum):\n \"\"\"\n An enumeration for SentencePieceModel.\n\n Possible enumeration values are: SentencePieceModel.UNIGRAM, SentencePieceModel.BPE, SentencePieceModel.CHAR,\n SentencePieceModel.WORD.\n\n - SentencePieceModel,UNIGRAM: Unigram Language Model means the next word in the sentence is assumed to be\n independent of the previous words generated by the model.\n - SentencePieceModel.BPE: refers to byte pair encoding algorithm, which replaces the most frequent pair of bytes in\n a sentence with a single, unused byte.\n - SentencePieceModel.CHAR: refers to char based sentencePiece Model type.\n - SentencePieceModel.WORD: refers to word based sentencePiece Model type.\n \"\"\"\n UNIGRAM = 0\n BPE = 1\n CHAR = 2\n WORD = 3\n\n\nDE_C_INTER_SENTENCEPIECE_MODE = {\n SentencePieceModel.UNIGRAM: cde.SentencePieceModel.DE_SENTENCE_PIECE_UNIGRAM,\n SentencePieceModel.BPE: cde.SentencePieceModel.DE_SENTENCE_PIECE_BPE,\n SentencePieceModel.CHAR: cde.SentencePieceModel.DE_SENTENCE_PIECE_CHAR,\n SentencePieceModel.WORD: cde.SentencePieceModel.DE_SENTENCE_PIECE_WORD\n}\n\n\nclass SPieceTokenizerOutType(IntEnum):\n \"\"\"\n An enumeration for SPieceTokenizerOutType.\n\n Possible enumeration values are: SPieceTokenizerOutType.STRING, SPieceTokenizerOutType.INT.\n\n - SPieceTokenizerOutType.STRING: means output type of SentencePice Tokenizer is string.\n - SPieceTokenizerOutType.INT: means output type of SentencePice Tokenizer is int.\n \"\"\"\n STRING = 0\n INT = 1\n\n\nclass SPieceTokenizerLoadType(IntEnum):\n \"\"\"\n An enumeration for SPieceTokenizerLoadType.\n\n Possible enumeration values are: SPieceTokenizerLoadType.FILE, SPieceTokenizerLoadTypeMODEL.\n\n - SPieceTokenizerLoadType.FILE: Load sentencepiece tokenizer from local sentencepiece vocab file.\n - SPieceTokenizerLoadType.MODEL: Load sentencepiece tokenizer from sentencepiece vocab instance.\n \"\"\"\n FILE = 0\n MODEL = 1\n\n\nclass Vectors(cde.Vectors):\n \"\"\"\n Vectors object that is used to map tokens into vectors.\n \"\"\"\n\n @classmethod\n @check_from_file_vectors\n def from_file(cls, file_path, max_vectors=None):\n \"\"\"\n Build a vector from a file.\n\n Args:\n file_path (str): Path of the file that contains the vectors.\n max_vectors (int, optional): This can be used to limit the number of pre-trained vectors loaded.\n Most pre-trained vector sets are sorted in the descending order of word frequency. Thus, in\n situations where the entire set doesn’t fit in memory, or is not needed for another reason,\n passing max_vectors can limit the size of the loaded set (default=None, no limit).\n\n Examples:\n >>> vector = text.Vectors.from_file(\"/path/to/vectors/file\", max_vectors=None)\n \"\"\"\n\n max_vectors = max_vectors if max_vectors is not None else 0\n return super().from_file(file_path, max_vectors)\n\n\nclass FastText(cde.FastText):\n \"\"\"\n FastText object that is used to map tokens into vectors.\n \"\"\"\n\n @classmethod\n @check_from_file_vectors\n def from_file(cls, file_path, max_vectors=None):\n \"\"\"\n Build a FastText vector from a file.\n\n Args:\n file_path (str): Path of the file that contains the vectors. The shuffix of pre-trained vector sets\n must be `*.vec`.\n max_vectors (int, optional): This can be used to limit the number of pre-trained vectors loaded.\n Most pre-trained vector sets are sorted in the descending order of word frequency. Thus, in\n situations where the entire set doesn’t fit in memory, or is not needed for another reason,\n passing max_vectors can limit the size of the loaded set (default=None, no limit).\n\n Examples:\n >>> fast_text = text.FastText.from_file(\"/path/to/fast_text/file\", max_vectors=None)\n \"\"\"\n\n max_vectors = max_vectors if max_vectors is not None else 0\n return super().from_file(file_path, max_vectors)\n\n\nclass GloVe(cde.GloVe):\n \"\"\"\n GloVe object that is used to map tokens into vectors.\n \"\"\"\n\n @classmethod\n @check_from_file_vectors\n def from_file(cls, file_path, max_vectors=None):\n \"\"\"\n Build a GloVe vector from a file.\n\n Args:\n file_path (str): Path of the file that contains the vectors. The format of pre-trained vector sets\n must be `glove.6B.*.txt`.\n max_vectors (int, optional): This can be used to limit the number of pre-trained vectors loaded.\n Most pre-trained vector sets are sorted in the descending order of word frequency. Thus, in\n situations where the entire set doesn’t fit in memory, or is not needed for another reason,\n passing max_vectors can limit the size of the loaded set (default=None, no limit).\n\n Examples:\n >>> glove = text.GloVe.from_file(\"/path/to/glove/file\", max_vectors=None)\n \"\"\"\n\n max_vectors = max_vectors if max_vectors is not None else 0\n return super().from_file(file_path, max_vectors)\n\n\nclass CharNGram(cde.CharNGram):\n \"\"\"\n CharNGram object that is used to map tokens into pre-trained vectors.\n \"\"\"\n\n @classmethod\n @check_from_file_vectors\n def from_file(cls, file_path, max_vectors=None):\n \"\"\"\n Build a CharNGram vector from a file.\n\n Args:\n file_path (str): Path of the file that contains the CharNGram vectors.\n max_vectors (int, optional): This can be used to limit the number of pre-trained vectors loaded.\n Most pre-trained vector sets are sorted in the descending order of word frequency. Thus, in\n situations where the entire set doesn’t fit in memory, or is not needed for another reason,\n passing max_vectors can limit the size of the loaded set (default=None, no limit).\n\n Examples:\n >>> char_n_gram = text.CharNGram.from_file(\"/path/to/char_n_gram/file\", max_vectors=None)\n \"\"\"\n\n max_vectors = max_vectors if max_vectors is not None else 0\n return super().from_file(file_path, max_vectors)\n" ]
[ [ "numpy.prod" ], [ "numpy.char.encode", "numpy.char.decode" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
MarcoGorelli/precise
[ "227d017d45f1c4b39887a85133f3d62950a1e341", "227d017d45f1c4b39887a85133f3d62950a1e341", "227d017d45f1c4b39887a85133f3d62950a1e341" ]
[ "tests/portfolio/test_parity.py", "precise/skaters/location/empirical.py", "precise/skaters/portfoliostatic/ppoportfactory.py" ]
[ "\nfrom precise.skaters.covarianceutil.covrandom import random_band_cov\nfrom precise.skaters.portfoliostatic.hrpport import hrp_unit_unit_s5_port\nimport numpy as np\n\n\ndef test_parity():\n cov = random_band_cov()\n print(np.shape(cov))\n w = hrp_unit_unit_s5_port(cov=cov)\n print(sum(w))", "from precise.skaters.location.empricalpre import emp\nimport numpy as np\n\n\ndef emp_d0(y, s:dict, k=1, **ignore):\n \"\"\" Rudimentary empirical skater with no cov estimation \"\"\"\n # See precision.covariance.empirical.run_emp_pcov_d1 if you want cov estimates\n s = emp(s=s, x=np.array(y))\n y_hat = s['mean']\n return y_hat, np.eye(len(y)), s\n\n\ndef emp_d1(y, s:dict, k=1, **ignore):\n \"\"\" Rudimentary differenced empirical skater with no cov estimation \"\"\"\n # See precision.covariance.empirical.run_emp_pcov_d1 if you want cov estimates\n if not s:\n s = {'prev_y':y,'dy':{}}\n return np.zeros_like(y), np.eye(len(y)), s\n else:\n dy = y - s['prev_y']\n dy_hat, _, s['dy'] = emp_d0(y=dy, s=s['dy'], k=1)\n y['prev_y'] = y\n y_hat = s['prev_y'] + dy_hat + s['dy']['v']['mean']\n y_cov = np.eye(len(y))\n return y_hat, y_cov, s\n\n\n", "import numpy as np\nimport pandas as pd\nfrom precise.skaters.covarianceutil.covfunctions import to_symmetric, dense_weights_from_dict, normalize_dict_values, nearest_pos_def\nfrom precise.skaters.covarianceutil.covfunctions import try_invert\nfrom precise.skaters.locationutil.vectorfunctions import normalize\nfrom precise.skaters.portfolioutil.portfunctions import portfolio_variance\nfrom pypfopt import EfficientFrontier\nfrom typing import List\nfrom itertools import zip_longest\nfrom precise.skaters.portfolioutil.portfunctions import var_scaled_returns\nfrom pypfopt.exceptions import OptimizationError\nfrom cvxpy.error import SolverError\ntry:\n from scipy.sparse.linalg import ArpackNoConvergence\nexcept ImportError:\n from scipy.sparse.linalg.eigen import ArpackNoConvergence\n\nfrom precise.skaters.covarianceutil.covfunctions import affine_shrink\n\n# Thin wrapper for PyPortfolioOpt\n# For full flexibility refer to the package https://pyportfolioopt.readthedocs.io/en/latest/MeanVariance.html\n\n\nPPO_METHODS = ['max_sharpe','min_volatility','max_quadratic_utility']\nPPO_LONG_BOUNDS = (0, 1)\nPPO_UNIT_BOUNDS = (-1, 1)\n\n\ndef ppo_sharpe_port(cov=None, pre=None, as_dense=True):\n \"\"\" Max Sharpe ratio portfolio using cov-implied returns\n :param cov: Covariance matrix\n :param pre: Precision matrix\n :param as_dense: If false, will return weights in dict formet\n :return: np.array of weights\n \"\"\"\n return ppo_portfolio_factory(method='max_sharpe', cov=cov, pre=pre, as_dense=as_dense, weight_bounds=PPO_LONG_BOUNDS)\n\n\ndef ppo_vol_port(cov=None, pre=None, as_dense=True):\n return ppo_portfolio_factory(method='min_volatility', cov=cov, pre=pre, as_dense=as_dense, weight_bounds=PPO_LONG_BOUNDS)\n\n\ndef ppo_quad_port(cov=None, pre=None, as_dense=True):\n return ppo_portfolio_factory(method='max_quadratic_utility', cov=cov, pre=pre, as_dense=as_dense, weight_bounds=PPO_LONG_BOUNDS)\n\n\ndef ppo_sharpe_ls_port(cov=None, pre=None, as_dense=True):\n return ppo_portfolio_factory(method='max_sharpe', cov=cov, pre=pre, as_dense=as_dense, weight_bounds=PPO_UNIT_BOUNDS)\n\n\ndef ppo_vol_ls_port(cov=None, pre=None, as_dense=True):\n return ppo_portfolio_factory(method='min_volatility', cov=cov, pre=pre, as_dense=as_dense, weight_bounds=PPO_UNIT_BOUNDS)\n\n\ndef ppo_quad_ls_port(cov=None, pre=None, as_dense=True):\n return ppo_portfolio_factory(method='max_quadratic_utility', cov=cov, pre=pre, as_dense=as_dense, weight_bounds=PPO_UNIT_BOUNDS)\n\n\ndef ppo_portfolio_factory(method:str, cov=None, pre=None, as_dense=False, weight_bounds=None,\n risk_free_rate:float=0.02, mu:float=0.04, n_attempts=5, warn=False, throw=False):\n \"\"\"\n :param method:\n :param cov:\n :param pre:\n :param as_dense: If set to True, will force return of np.array even if supplied dataframe\n :param weight_bounds:\n :return: Can return a dictionary of variable names and weights\n \"\"\"\n\n expected_returns = var_scaled_returns(cov=cov,mu=mu,r=risk_free_rate)\n\n if weight_bounds is None:\n weight_bounds = PPO_LONG_BOUNDS\n\n if cov is None:\n cov = try_invert(pre)\n\n # Set return style\n as_series = (not as_dense) and isinstance(cov,pd.DataFrame)\n\n # Tidy up cov and send to optimizer ... repeatedly with more shrinkage as needed\n shrunk_cov = nearest_pos_def( to_symmetric( np.copy(cov) ) )\n converged = False\n warned = False\n for attempt_no in range(n_attempts):\n n_dim = np.shape(cov)[0]\n ef = EfficientFrontier(expected_returns=expected_returns, cov_matrix=shrunk_cov,\n weight_bounds=weight_bounds)\n port_method = getattr(ef, method)\n try:\n if method=='max_sharpe':\n port_method(risk_free_rate=risk_free_rate)\n else:\n port_method()\n converged = True\n except (OptimizationError, SolverError, ArpackNoConvergence, UserWarning):\n converged = False\n if converged:\n break\n else:\n warned = True\n if warn:\n print(' warning: '+method+' did not converge on attempt '+str(attempt_no))\n shrunk_cov = affine_shrink(a=shrunk_cov,phi=1.02, lmbd=0.01, copy=False)\n\n if not converged:\n if throw:\n raise NotImplementedError('pyportfolio opt failed even after shrinkage')\n else:\n print(' PyPortfolioOpt failed ... falling back to minimum variance')\n from precise.skaters.portfoliostatic.equalport import equal_long_port\n return equal_long_port(cov=cov, as_dense=not as_series)\n\n if converged and warned:\n if warn:\n print(' ... but with shrinkage it converges okay.')\n\n weights = ef.clean_weights()\n weights = normalize_dict_values(weights)\n\n if as_series:\n return pd.Series( index=list(weights.keys()), data=list(weights.values()) )\n else:\n return dense_weights_from_dict(weights, n_dim=n_dim)\n\n\ndef long_from_cov( cov, as_dense=True ):\n \"\"\" Backward compat \"\"\"\n return ppo_vol_port(cov=cov, as_dense=as_dense)\n\n\ndef long_from_pre(pre, as_dense=True):\n \"\"\" Backward compate \"\"\"\n return ppo_vol_port(pre=pre, as_dense=as_dense)\n\n\ndef ppo_portfolio_variance(method:str, cov=None, pre=None):\n \"\"\"\n Variance of the unit min-var portfolio\n (Used in some hierarchical methods to allocate capital)\n \"\"\"\n w = ppo_portfolio_factory(method=method, pre=pre, cov=cov)\n return portfolio_variance(cov=cov,w=w)\n\n\ndef ppo_sharpe_alloc(covs:List, pres:List)->[float]:\n return _ppo_portfolio_allocation(method='max_sharpe', covs=covs, pres=pres)\n\n\ndef ppo_vol_alloc(covs:List, pres:List)->[float]:\n return _ppo_portfolio_allocation(method='min_volatility', covs=covs, pres=pres)\n\n\ndef ppo_quad_alloc(covs:List, pres:List)->[float]:\n return _ppo_portfolio_allocation(method='max_quadratic_utility', covs=covs, pres=pres)\n\n\ndef _ppo_portfolio_allocation(method:str, covs:List, pres:List)->[float]:\n \"\"\" Allocate capital between portfolios using either cov or pre matrices\n :param covs: List of covariance matrices\n :param pres: List of precision matrices\n :return: Capital allocation vector\n \"\"\"\n return normalize([ 1/ppo_portfolio_variance(method=method, cov=cov, pre=pre) for cov, pre in zip_longest(covs, pres, fillvalue=None) ])\n" ]
[ [ "numpy.shape" ], [ "numpy.array", "numpy.zeros_like" ], [ "numpy.copy", "numpy.shape" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
piiswrong/caffe
[ "d8f676d85eb9b84221c74d0efc5ed0b34d78d3ac" ]
[ "python/caffe_util/imshow_layer.py" ]
[ "import cv2\nimport cv\nimport caffe\nimport numpy as np\nimport ast\nimport datetime\nimport time\n\nclass ImshowLayer(caffe.Layer):\n def setup(self, bottom, top):\n assert len(top) == 0, 'ImshowLayer has no output.'\n self.param_ = ast.literal_eval(self.param_str)\n if 'resize' not in self.param_ or self.param_['resize'] == 0:\n self.resize = False\n else:\n self.resize = True\n self.size = self.param_['resize']\n self.save = self.param_.get('save', None)\n self.scale = self.param_.get('scale', [])\n self.format = self.param_.get('format', [])\n\n def reshape(self, bottom, top):\n pass\n\n def forward(self, bottom, top):\n batch_size = bottom[0].num\n height = 0\n width = 0\n if self.resize:\n width = self.size * len(bottom)\n height = self.size\n else: \n for i in xrange(len(bottom)):\n width += bottom[i].width\n height = max(height, bottom[i].height)\n buff = np.zeros((height*batch_size, width, 3), dtype = np.uint8)\n #import pdb \n #pdb.set_trace()\n for i in xrange(batch_size):\n cur = 0\n for j in xrange(len(bottom)):\n img = bottom[j].data[i].transpose((1,2,0))\n if len(self.scale):\n assert len(self.scale) == len(bottom)\n img *= self.scale[j]\n img = img.astype(np.uint8)\n if len(self.format):\n assert len(self.format) == len(bottom)\n if self.format[j] == 'ycrcb':\n img = cv2.cvtColor(img, cv.CV_YCrCb2BGR)\n if img.shape[2] == 1:\n img = np.tile(img, 3)\n if self.resize:\n widthj = heightj = self.size\n img = cv2.resize(img, (self.size, self.size))\n else:\n widthj = bottom[j].width\n heightj = bottom[j].height\n buff[i*height:i*height+heightj, cur:cur+widthj, :] = img\n cur += widthj\n if self.save is None:\n cv2.imshow('buff', buff)\n cv2.waitKey(0)\n else:\n cv2.imwrite(self.save+'%f'%time.time()+'.jpg', buff)\n\n def backward(self, top, propagate_down, bottom):\n pass" ]
[ [ "numpy.zeros", "numpy.tile" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]