repo_name
stringlengths
8
130
hexsha
sequence
file_path
sequence
code
sequence
apis
sequence
goncaloasimoes/net-pro-sim
[ "77f766661229327df16bef1e6813152e02350459" ]
[ "src/DataProcessing/VisualizeCommunities.py" ]
[ "from . import Visualize #pylint: disable=relative-beyond-top-level\nimport networkx as nx\nfrom matplotlib import lines\nfrom scipy.spatial import Voronoi, voronoi_plot_2d\nimport numpy as np\n\n# TODO: Needs to be retested and fixed.\n''' Visualize or save images of the network with its various communities represented. '''\nclass VisualizeCommunities(Visualize.Visualize):\n\n def process(self, data = {}):\n ''' Do nothing. Override saving of state for every step.'''\n pass\n\n def end_step_processing(self, data = {}):\n self.show_legend = False\n # color_map, states, color_order = self._get_color_map(data['network'], data['model'])\n graph = data['network'].get_graph()\n if self.graph_layout is None:\n self.graph_layout = nx.spring_layout(graph, scale=1, seed=100)\n communities = {frozenset(graph.nodes[v]['community']) for v in graph}\n centers = []\n for community in communities:\n center_x = 0\n center_y = 0\n for node in community:\n center_x += self.graph_layout[node][0]\n center_y += self.graph_layout[node][1]\n center = [\n center_x/len(community),\n center_y/len(community),\n ]\n centers.append(center)\n print(centers)\n centers = np.array(centers)\n print(centers)\n vor = Voronoi(centers)\n self.points = centers\n self.vor = vor\n # for region in vor.regions:\n # if not -1 in region:\n # polygon = [vor.vertices[i] for i in region]\n # plt.fill(*zip(*polygon))\n self.draw( data, draw_before=self.voronoi )\n self.save_number += 1\n self.step += 1\n self.graph_layout = None # Reset layout\n \n def voronoi(self, ax):\n color = 'crimson'\n alpha = 0.7\n #ax.plot(self.points[:,0], self.points[:,1], 'o')\n #ax.plot(self.vor.vertices[:,0], self.vor.vertices[:,1], '*')\n \n lim_x = [0,0]\n lim_y = [0,0]\n for node in self.graph_layout:\n x = self.graph_layout[node][0]\n y = self.graph_layout[node][1]\n if x < lim_x[0]:\n lim_x[0] = x\n if x > lim_x[1]:\n lim_x[1] = x\n if y < lim_y[0]:\n lim_y[0] = y\n if y > lim_y[1]:\n lim_y[1] = y\n padding = 0.1\n ax.set_xlim(lim_x[0] - padding, lim_x[1] + padding); ax.set_ylim(lim_y[0] - padding, lim_y[1] + padding)\n\n for simplex in self.vor.ridge_vertices:\n simplex = np.asarray(simplex) \n if np.all(simplex >= 0):\n line = lines.Line2D(self.vor.vertices[simplex,0], self.vor.vertices[simplex,1], lw=2., color=color, linestyle='--', alpha=alpha)\n ax.add_line(line)\n # ax.plot(self.vor.vertices[simplex,0], self.vor.vertices[simplex,1], 'r-', linewidth=1)\n\n center = self.points.mean(axis=0)\n for pointidx, simplex in zip(self.vor.ridge_points, self.vor.ridge_vertices):\n simplex = np.asarray(simplex)\n if np.any(simplex < 0):\n i = simplex[simplex >= 0][0] # finite end self.Voronoi vertex\n t = self.points[pointidx[1]] - self.points[pointidx[0]] # tangent\n t /= np.linalg.norm(t)\n n = np.array([-t[1], t[0]]) # normal\n midpoint = self.points[pointidx].mean(axis=0)\n far_point = self.vor.vertices[i] + np.sign(np.dot(midpoint - center, n)) * n * 100\n line = lines.Line2D([self.vor.vertices[i,0], far_point[0]], [self.vor.vertices[i,1], far_point[1]], lw=2., color=color, linestyle='--', alpha=alpha)\n line.set_clip_on(False)\n ax.add_line( line)\n # ax.plot([self.vor.vertices[i,0], far_point[0]], [self.vor.vertices[i,1], far_point[1]], 'r--', linewidth=1)\n # voronoi_plot_2d(self.vor,ax=ax, show_vertices = False, show_points=False, line_colors='red')\n" ]
[ [ "matplotlib.lines.Line2D", "scipy.spatial.Voronoi", "numpy.any", "numpy.asarray", "numpy.all", "numpy.array", "numpy.dot", "numpy.linalg.norm" ] ]
janvonrickenbach/Chaco_wxPhoenix_py3
[ "21a10cfd81100f28e3fbc273357ac45642519f33" ]
[ "chaco/ticks.py" ]
[ "#-------------------------------------------------------------------------------\r\n#\r\n#\r\n# Written by: David C. Morrill (based on similar routines written by Eric Jones)\r\n#\r\n# Date: 2007-05-01\r\n#\r\n# (c) Copyright 2002-7 by Enthought, Inc.\r\n#\r\n#-------------------------------------------------------------------------------\r\n\"\"\" Tick generator classes and helper functions for calculating axis\r\ntick-related values (i.e., bounds and intervals).\r\n\r\n\"\"\"\r\n# Major library imports\r\nfrom numpy import arange, argsort, array, ceil, concatenate, equal, finfo, \\\r\n float64, floor, linspace, log10, minimum, ndarray, newaxis, \\\r\n putmask, shape\r\n\r\n# Enthought library imports\r\nfrom traits.api import HasTraits, Any\r\n\r\n\r\nclass AbstractTickGenerator(HasTraits):\r\n \"\"\" Abstract class for tick generators.\r\n \"\"\"\r\n\r\n def get_ticks(self,\r\n data_low,\r\n data_high,\r\n bounds_low,\r\n bounds_high,\r\n interval,\r\n use_endpoints=False,\r\n scale='linear'):\r\n \"\"\" Returns a list of ticks points in data space.\r\n\r\n Parameters\r\n ----------\r\n data_low, data_high : float\r\n The actual minimum and maximum of index values of the entire\r\n dataset.\r\n bounds_low, bounds_high : \"auto\", \"fit\", float\r\n The range for which ticks should be generated.\r\n interval : \"auto\", float\r\n If the value is a positive number, it specifies the length\r\n of the tick interval; a negative integer specifies the\r\n number of tick intervals; 'auto' specifies that the number and\r\n length of the tick intervals are automatically calculated, based\r\n on the range of the axis.\r\n use_endpoints : Boolean\r\n If True, the lower and upper bounds of the data are used as the\r\n lower and upper end points of the axis. If False, the end points\r\n might not fall exactly on the bounds.\r\n scale : 'linear' or 'log'\r\n The type of scale the ticks are for.\r\n\r\n Returns\r\n -------\r\n tick_list : array of floats\r\n Where ticks are to be placed.\r\n\r\n\r\n Example\r\n -------\r\n If the range of x-values in a line plot span from -15.0 to +15.0, but\r\n the plot is currently displaying only the region from 3.1 to 6.83, and\r\n the user wants the interval to be automatically computed to be some\r\n nice value, then call get_ticks() thusly::\r\n\r\n get_ticks(-15.0, 15.0, 3.1, 6.83, \"auto\")\r\n\r\n A reasonable return value in this case would be::\r\n\r\n [3.5, 4.0, 4.5, 5.0, 5.5, 6.0, 6.5]\r\n \"\"\"\r\n\r\n raise NotImplementedError\r\n\r\n\r\nclass DefaultTickGenerator(AbstractTickGenerator):\r\n \"\"\" An implementation of AbstractTickGenerator that simply uses the\r\n auto_ticks() and log_auto_ticks() functions.\r\n \"\"\"\r\n\r\n def get_ticks(self,\r\n data_low,\r\n data_high,\r\n bounds_low,\r\n bounds_high,\r\n interval,\r\n use_endpoints=False,\r\n scale='linear'):\r\n if scale == 'linear':\r\n return array(\r\n auto_ticks(\r\n data_low,\r\n data_high,\r\n bounds_low,\r\n bounds_high,\r\n interval,\r\n use_endpoints=False),\r\n float64)\r\n elif scale == 'log':\r\n return array(\r\n log_auto_ticks(\r\n data_low,\r\n data_high,\r\n bounds_low,\r\n bounds_high,\r\n interval,\r\n use_endpoints=False),\r\n float64)\r\n\r\n\r\nclass ShowAllTickGenerator(AbstractTickGenerator):\r\n \"\"\" Uses the abstract interface, but returns all \"positions\" instead\r\n of decimating the ticks.\r\n\r\n You must provide a sequence of values as a *positions* keyword argument\r\n to the constructor.\r\n \"\"\"\r\n # A sequence of positions for ticks.\r\n positions = Any\r\n\r\n def get_ticks(self,\r\n data_low,\r\n data_high,\r\n bounds_low,\r\n bounds_high,\r\n interval,\r\n use_endpoints=False,\r\n scale='linear'):\r\n \"\"\" Returns an array based on **positions**.\r\n \"\"\"\r\n # ignore all the high, low, etc. data and just return every position\r\n return array(self.positions, float64)\r\n\r\n\r\nclass MinorTickGenerator(DefaultTickGenerator):\r\n \"\"\" An implementation of AbstractTickGenerator that extends DefaultTickGenerator,\r\n but sets the tick interval to a smaller length.\r\n \"\"\"\r\n\r\n def get_ticks(self,\r\n data_low,\r\n data_high,\r\n bounds_low,\r\n bounds_high,\r\n interval,\r\n use_endpoints=False,\r\n scale='linear'):\r\n if interval == 'auto':\r\n # for the default interval, generate a smaller tick interval\r\n interval = auto_interval(\r\n 0, auto_interval(data_low, data_high), max_ticks=5)\r\n\r\n return super(MinorTickGenerator, self).get_ticks(\r\n data_low, data_high, bounds_low, bounds_high, interval,\r\n use_endpoints, scale)\r\n\r\n\r\n#-------------------------------------------------------------------------------\r\n# Code imported from plt/plot_utility.py:\r\n#-------------------------------------------------------------------------------\r\n\r\n\r\ndef auto_ticks(data_low,\r\n data_high,\r\n bound_low,\r\n bound_high,\r\n tick_interval,\r\n use_endpoints=True):\r\n \"\"\" Finds locations for axis tick marks.\r\n\r\n Calculates the locations for tick marks on an axis. The *bound_low*,\r\n *bound_high*, and *tick_interval* parameters specify how the axis end\r\n points and tick interval are calculated.\r\n\r\n Parameters\r\n ----------\r\n\r\n data_low, data_high : number\r\n The minimum and maximum values of the data along this axis.\r\n If any of the bound settings are 'auto' or 'fit', the axis\r\n traits are calculated automatically from these values.\r\n bound_low, bound_high : 'auto', 'fit', or a number.\r\n The lower and upper bounds of the axis. If the value is a number,\r\n that value is used for the corresponding end point. If the value is\r\n 'auto', then the end point is calculated automatically. If the\r\n value is 'fit', then the axis bound is set to the corresponding\r\n *data_low* or *data_high* value.\r\n tick_interval : can be 'auto' or a number\r\n If the value is a positive number, it specifies the length\r\n of the tick interval; a negative integer specifies the\r\n number of tick intervals; 'auto' specifies that the number and\r\n length of the tick intervals are automatically calculated, based\r\n on the range of the axis.\r\n use_endpoints : Boolean\r\n If True, the lower and upper bounds of the data are used as the\r\n lower and upper end points of the axis. If False, the end points\r\n might not fall exactly on the bounds.\r\n\r\n Returns\r\n -------\r\n An array of tick mark locations. The first and last tick entries are the\r\n axis end points.\r\n \"\"\"\r\n\r\n is_auto_low = (bound_low == 'auto')\r\n is_auto_high = (bound_high == 'auto')\r\n\r\n if isinstance(bound_low, str):\r\n lower = data_low\r\n else:\r\n lower = float(bound_low)\r\n\r\n if isinstance(bound_high, str):\r\n upper = data_high\r\n else:\r\n upper = float(bound_high)\r\n\r\n if (tick_interval == 'auto') or (tick_interval == 0.0):\r\n rng = abs(upper - lower)\r\n\r\n if rng == 0.0:\r\n tick_interval = 0.5\r\n lower = data_low - 0.5\r\n upper = data_high + 0.5\r\n else:\r\n tick_interval = auto_interval(lower, upper)\r\n elif tick_interval < 0:\r\n intervals = -tick_interval\r\n tick_interval = tick_intervals(lower, upper, intervals)\r\n if is_auto_low and is_auto_high:\r\n is_auto_low = is_auto_high = False\r\n lower = tick_interval * floor(lower / tick_interval)\r\n while ((abs(lower) >= tick_interval) and (\r\n (lower + tick_interval * (intervals - 1)) >= upper)):\r\n lower -= tick_interval\r\n upper = lower + tick_interval * intervals\r\n\r\n # If the lower or upper bound are set to 'auto',\r\n # calculate them based on the newly chosen tick_interval:\r\n if is_auto_low or is_auto_high:\r\n delta = 0.01 * tick_interval * (data_low == data_high)\r\n auto_lower, auto_upper = auto_bounds(data_low - delta,\r\n data_high + delta, tick_interval)\r\n if is_auto_low:\r\n lower = auto_lower\r\n if is_auto_high:\r\n upper = auto_upper\r\n\r\n # Compute the range of ticks values:\r\n start = floor(lower / tick_interval) * tick_interval\r\n end = floor(upper / tick_interval) * tick_interval\r\n # If we return the same value for the upper bound and lower bound, the\r\n # layout code will not be able to lay out the tick marks (divide by zero).\r\n if start == end:\r\n lower = start = start - tick_interval\r\n upper = end = start - tick_interval\r\n\r\n if upper > end:\r\n end += tick_interval\r\n ticks = arange(start, end + (tick_interval / 2.0), tick_interval)\r\n\r\n if len(ticks) < 2:\r\n ticks = array(((lower - lower * 1.0e-7), lower))\r\n if (not is_auto_low) and use_endpoints:\r\n ticks[0] = lower\r\n if (not is_auto_high) and use_endpoints:\r\n ticks[-1] = upper\r\n\r\n return [tick for tick in ticks if tick >= bound_low and tick <= bound_high]\r\n\r\n\r\n#--------------------------------------------------------------------------------\r\n# Compute the best tick interval for a specified data range:\r\n#--------------------------------------------------------------------------------\r\n\r\n\r\ndef heckbert_interval(data_low, data_high, numticks=8):\r\n \"\"\"\r\n Returns a \"nice\" range and interval for a given data range and a preferred\r\n number of ticks. From Paul Heckbert's algorithm in Graphics Gems.\r\n \"\"\"\r\n range = _nice(data_high - data_low)\r\n d = _nice(range / (numticks - 1), round=True)\r\n graphmin = floor(data_low / d) * d\r\n graphmax = ceil(data_high / d) * d\r\n #nfrac = max(-floor(log10(d)), 0)\r\n return graphmin, graphmax, d\r\n\r\n\r\ndef _nice(x, round=False):\r\n \"\"\" if round is False, then use ceil(range) \"\"\"\r\n expv = floor(log10(x))\r\n f = x / pow(10, expv)\r\n if round:\r\n if f < 1.5:\r\n nf = 1.0\r\n elif f < 3.0:\r\n nf = 2.0\r\n elif f < 7.0:\r\n nf = 5.0\r\n else:\r\n nf = 10.0\r\n else:\r\n if f <= 1.0:\r\n nf = 1.0\r\n elif f <= 2.0:\r\n nf = 2.0\r\n elif f <= 5.0:\r\n nf = 5.0\r\n else:\r\n nf = 10.0\r\n return nf * pow(10, expv)\r\n\r\n\r\ndef auto_interval(data_low, data_high, max_ticks=9):\r\n \"\"\" Calculates the tick interval for a range.\r\n\r\n The boundaries for the data to be plotted on the axis are::\r\n\r\n data_bounds = (data_low,data_high)\r\n\r\n The function chooses the number of tick marks, which can be between\r\n 3 and max_ticks marks (including end points), and chooses tick intervals at\r\n 1, 2, 2.5, 5, 10, 20, ...\r\n\r\n Returns\r\n -------\r\n interval : float\r\n tick mark interval for axis\r\n \"\"\"\r\n #if data_high<data_low:\r\n #hold=data_high\r\n #data_high=data_low\r\n #data_low=hold\r\n range = float(data_high) - float(data_low)\r\n # We'll choose from between 2 and 8 tick marks.\r\n # Preference is given to more ticks:\r\n # Note reverse order and see kludge below...\r\n divisions = arange(max_ticks - 1, 2.0,\r\n -1.0) # for max_ticks=9, ( 7, 6, ..., 3 )\r\n\r\n # Calculate the intervals for the divisions:\r\n candidate_intervals = range / divisions\r\n\r\n # Get magnitudes and mantissas for each candidate:\r\n magnitudes = 10.0**floor(log10(candidate_intervals))\r\n mantissas = candidate_intervals / magnitudes\r\n\r\n # List of \"pleasing\" intervals between ticks on graph.\r\n # Only the first magnitude are listed, higher mags others are inferred:\r\n magic_intervals = array((1.0, 2.0, 2.5, 5.0, 10.0))\r\n\r\n # Calculate the absolute differences between the candidates\r\n # (with magnitude removed) and the magic intervals:\r\n differences = abs(magic_intervals[:, newaxis] - mantissas)\r\n\r\n # Find the division and magic interval combo that produce the\r\n # smallest differences:\r\n\r\n # KLUDGE: 'argsort' doesn't preserve the order of equal values,\r\n # so we subtract a small, index dependent amount from each difference\r\n # to force correct ordering.\r\n sh = shape(differences)\r\n small = 2.2e-16 * arange(sh[1]) * arange(sh[0])[:, newaxis]\r\n small = small[::-1, ::-1] #reverse the order\r\n differences = differences - small\r\n\r\n # ? Numeric should allow keyword \"axis\" ? comment out for now\r\n #best_mantissa = minimum.reduce(differences,axis=0)\r\n #best_magic = minimum.reduce(differences,axis=-1)\r\n best_mantissa = minimum.reduce(differences, 0)\r\n best_magic = minimum.reduce(differences, -1)\r\n magic_index = argsort(best_magic)[0]\r\n mantissa_index = argsort(best_mantissa)[0]\r\n\r\n # The best interval is the magic_interval multiplied by the magnitude\r\n # of the best mantissa:\r\n interval = magic_intervals[magic_index]\r\n magnitude = magnitudes[mantissa_index]\r\n result = interval * magnitude\r\n if result == 0.0:\r\n result = finfo(float).eps\r\n return result\r\n\r\n\r\n#--------------------------------------------------------------------------------\r\n# Compute the best tick interval length to achieve a specified number of tick\r\n# intervals:\r\n#--------------------------------------------------------------------------------\r\n\r\n\r\ndef tick_intervals(data_low, data_high, intervals):\r\n \"\"\" Computes the best tick interval length to achieve a specified number of\r\n tick intervals.\r\n\r\n Parameters\r\n ----------\r\n data_low, data_high : number\r\n The minimum and maximum values of the data along this axis.\r\n If any of the bound settings are 'auto' or 'fit', the axis\r\n traits are calculated automatically from these values.\r\n intervals : number\r\n The desired number of intervals\r\n\r\n Returns\r\n -------\r\n Returns a float indicating the tick interval length.\r\n \"\"\"\r\n range = float(data_high - data_low)\r\n if range == 0.0:\r\n range = 1.0\r\n interval = range / intervals\r\n factor = 10.0**floor(log10(interval))\r\n interval /= factor\r\n\r\n if interval < 2.0:\r\n interval = 2.0\r\n index = 0\r\n elif interval < 2.5:\r\n interval = 2.5\r\n index = 1\r\n elif interval < 5.0:\r\n interval = 5.0\r\n index = 2\r\n else:\r\n interval = 10.0\r\n index = 3\r\n\r\n while True:\r\n result = interval * factor\r\n if ((floor(data_low / result) * result) +\r\n (intervals * result) >= data_high):\r\n return result\r\n index = (index + 1) % 4\r\n interval *= (2.0, 1.25, 2.0, 2.0)[index]\r\n\r\n\r\ndef log_auto_ticks(data_low,\r\n data_high,\r\n bound_low,\r\n bound_high,\r\n tick_interval,\r\n use_endpoints=True):\r\n \"\"\"Like auto_ticks(), but for log scales.\"\"\"\r\n tick_goal = 15\r\n magic_numbers = [1, 2, 5]\r\n explicit_ticks = False\r\n\r\n if data_low <= 0.0:\r\n return []\r\n\r\n if tick_interval != 'auto':\r\n if tick_interval < 0:\r\n tick_goal = -tick_interval\r\n else:\r\n magic_numbers = [tick_interval]\r\n explicit_ticks = True\r\n\r\n if data_low > data_high:\r\n data_low, data_high = data_high, data_low\r\n\r\n log_low = log10(data_low)\r\n log_high = log10(data_high)\r\n log_interval = log_high - log_low\r\n\r\n if log_interval < 1.0:\r\n # If less than a factor of 10 separates the data, just use the normal\r\n # linear approach\r\n return auto_ticks(\r\n data_low,\r\n data_high,\r\n bound_low,\r\n bound_high,\r\n tick_interval,\r\n use_endpoints=False)\r\n\r\n elif log_interval < (tick_goal + 1) / 2 or explicit_ticks:\r\n # If there's enough space, try to put lines at the magic number multipliers\r\n # inside each power of ten\r\n\r\n # Try each interval to see how many ticks we get\r\n for interval in magic_numbers:\r\n ticklist = []\r\n for exp in range(int(floor(log_low)), int(ceil(log_high))):\r\n for multiplier in linspace(\r\n interval, 10.0, round(10.0 / interval), endpoint=1):\r\n tick = 10**exp * multiplier\r\n if tick >= data_low and tick <= data_high:\r\n ticklist.append(tick)\r\n if len(ticklist) < tick_goal + 3 or explicit_ticks:\r\n return ticklist\r\n else:\r\n # We put lines at every power of ten or less\r\n startlog = ceil(log_low)\r\n endlog = floor(log_high)\r\n interval = ceil((endlog - startlog) / 9.0)\r\n expticks = arange(startlog, endlog, interval)\r\n # There's no function that is like arange but inclusive, so\r\n # we have to check whether the endpoint should be included.\r\n if (endlog - startlog) % interval == 0.0:\r\n expticks = concatenate([expticks, [endlog]])\r\n return 10**expticks\r\n\r\n\r\n#-------------------------------------------------------------------------------\r\n# Compute the best lower and upper axis bounds for a range of data:\r\n#-------------------------------------------------------------------------------\r\n\r\n\r\ndef auto_bounds(data_low, data_high, tick_interval):\r\n \"\"\" Calculates appropriate upper and lower bounds for the axis from\r\n the data bounds and the given axis interval.\r\n\r\n The boundaries hit either exactly on the lower and upper values\r\n or on the tick mark just beyond the lower and upper values.\r\n \"\"\"\r\n return (calc_bound(data_low, tick_interval, False),\r\n calc_bound(data_high, tick_interval, True))\r\n\r\n\r\n#-------------------------------------------------------------------------------\r\n# Compute the best axis endpoint for a specified data value:\r\n#-------------------------------------------------------------------------------\r\n\r\n\r\ndef calc_bound(end_point, tick_interval, is_upper):\r\n \"\"\" Finds an axis end point that includes the value *end_point*.\r\n\r\n If the tick mark interval results in a tick mark hitting directly on the\r\n end point, *end_point* is returned. Otherwise, the location of the tick\r\n mark just past *end_point* is returned. The *is_upper* parameter\r\n specifies whether *end_point* is at the upper (True) or lower (False)\r\n end of the axis.\r\n \"\"\"\r\n quotient, remainder = divmod(end_point, tick_interval)\r\n if ((remainder == 0.0) or ((\r\n (tick_interval - remainder) / tick_interval) < 0.00001)):\r\n return end_point\r\n\r\n c1 = (quotient + 1.0) * tick_interval\r\n c2 = quotient * tick_interval\r\n if is_upper:\r\n return max(c1, c2)\r\n return min(c1, c2)\r\n" ]
[ [ "numpy.ceil", "numpy.concatenate", "numpy.floor", "numpy.argsort", "numpy.arange", "numpy.log10", "numpy.shape", "numpy.array", "numpy.minimum.reduce", "numpy.finfo" ] ]
slowy07/keras
[ "d3688b72924a4235598f0f80038de8c897f44799" ]
[ "keras/saving/saved_model/serialized_attributes.py" ]
[ "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Helper classes that list&validate all attributes to serialize to SavedModel.\n\"\"\"\n\nfrom keras.saving.saved_model import constants\nfrom keras.saving.saved_model import order_preserving_set as ops\nfrom keras.saving.saved_model import save_impl\nfrom keras.utils.generic_utils import LazyLoader\nimport tensorflow.compat.v2 as tf\n\n# TODO(b/134426265): Switch back to single-quotes to match the rest of the file\n# once the issue with copybara is fixed.\n# pylint:disable=g-inconsistent-quotes\nbase_layer = LazyLoader(\n \"base_layer\", globals(),\n \"keras.engine.base_layer\")\ntraining_lib = LazyLoader(\n \"training_lib\", globals(),\n \"keras.engine.training\")\nmetrics = LazyLoader(\"metrics\", globals(),\n \"keras.metrics\")\nrecurrent = LazyLoader(\n \"recurrent\", globals(),\n \"keras.layers.recurrent\")\n# pylint:enable=g-inconsistent-quotes\n\n\nclass SerializedAttributes(object):\n \"\"\"Class that tracks and validates all serialization attributes.\n\n Keras models contain many Python-defined components. For example, the\n trainable_variable property lists the model's trainable variables by\n recursively retrieving the trainable variables from each of the child layers.\n Another example is model.call, a python function that calls child layers and\n adds ops to the backend graph.\n\n Only Tensorflow checkpointable objects and functions can be serialized to\n SavedModel. Serializing a Keras model as-is results in a checkpointable object\n that does not resemble a Keras model at all. Thus, extra checkpointable\n objects and functions must be created during serialization.\n\n **Defining new serialized attributes**\n Child classes should be defined using:\n SerializedAttributes.with_attributes(\n 'name', checkpointable_objects=[...], functions=[...], copy_from=[...])\n This class is used to cache generated checkpointable objects and functions,\n ensuring that new objects and functions are generated a single time.\n\n **Usage during serialization**\n Each Layer/Model object should have a corresponding instance of\n SerializedAttributes. Create a new instance by calling\n `SerializedAttributes.new(obj)`. Objects and functions may be saved using\n `.set_and_validate_checkpointable_objects`/`.set_and_and_validate_functions`.\n The properties `.checkpointable_objects` and `.functions` returns the cached\n values.\n\n **Adding/changing attributes to save to SavedModel**\n 1. Change the call to `SerializedAttributes.with_attributes` in the correct\n class:\n - CommonEndpoints: Base attributes to be added during serialization. If\n these attributes are present in a Trackable object, it can be\n deserialized to a Keras Model.\n - LayerAttributes: Attributes to serialize for Layer objects.\n - ModelAttributes: Attributes to serialize for Model objects.\n 2. Update class docstring\n 3. Update arguments to any calls to `set_and_validate_*`. For example, if\n `call_raw_tensors` is added to the ModelAttributes function list, then\n a `call_raw_tensors` function should be passed to\n `set_and_validate_functions`.\n\n **Common endpoints vs other attributes**\n Only common endpoints are attached directly to the root object. Keras-specific\n attributes are saved to a separate trackable object with the name \"keras_api\".\n The number of objects attached to the root is limited because any naming\n conflicts will cause user code to break.\n\n Another reason is that this will only affect users who call\n `tf.saved_model.load` instead of `tf.keras.models.load_model`. These are\n advanced users who are likely to have defined their own tf.functions and\n trackable objects. The added Keras-specific attributes are kept out of the way\n in the \"keras_api\" namespace.\n\n Properties defined in this class may be used to filter out keras-specific\n attributes:\n - `functions_to_serialize`: Returns dict of functions to attach to the root\n object.\n - `checkpointable_objects_to_serialize`: Returns dict of objects to attach to\n the root object (including separate trackable object containing\n keras-specific attributes)\n\n All changes to the serialized attributes must be backwards-compatible, so\n attributes should not be removed or modified without sufficient justification.\n \"\"\"\n\n @staticmethod\n def with_attributes(\n name, checkpointable_objects=None, functions=None, copy_from=None):\n \"\"\"Creates a subclass with all attributes as specified in the arguments.\n\n Args:\n name: Name of subclass\n checkpointable_objects: List of checkpointable objects to be serialized\n in the SavedModel.\n functions: List of functions to be serialized in the SavedModel.\n copy_from: List of other SerializedAttributes subclasses. The returned\n class will copy checkpoint objects/functions from each subclass.\n\n Returns:\n Child class with attributes as defined in the `checkpointable_objects`\n and `functions` lists.\n \"\"\"\n checkpointable_objects = checkpointable_objects or []\n functions = functions or []\n\n if copy_from is not None:\n for cls in copy_from:\n checkpointable_objects.extend(cls.all_checkpointable_objects)\n functions.extend(cls.all_functions)\n\n # OrderPreservingSets are used here to guarantee serialization determinism\n # of Keras objects.\n classdict = {\n 'all_checkpointable_objects':\n ops.OrderPreservingSet(checkpointable_objects),\n 'all_functions':\n ops.OrderPreservingSet(functions),\n }\n return type(name, (SerializedAttributes,), classdict)\n\n @staticmethod\n def new(obj):\n \"\"\"Returns a new SerializedAttribute object.\"\"\"\n if isinstance(obj, training_lib.Model):\n return ModelAttributes()\n elif isinstance(obj, metrics.Metric):\n return MetricAttributes()\n elif isinstance(obj, recurrent.RNN):\n return RNNAttributes()\n elif isinstance(obj, base_layer.Layer):\n return LayerAttributes()\n else:\n raise TypeError('Internal error during serialization: Expected Keras '\n 'Layer object, got {} of type {}'.format(obj, type(obj)))\n\n def __init__(self):\n self._object_dict = {}\n self._function_dict = {}\n self._keras_trackable = tf.__internal__.tracking.AutoTrackable()\n\n @property\n def functions(self):\n \"\"\"Returns dictionary of all functions.\"\"\"\n return {key: value for key, value in self._function_dict.items()\n if value is not None}\n\n @property\n def checkpointable_objects(self):\n \"\"\"Returns dictionary of all checkpointable objects.\"\"\"\n return {key: value for key, value in self._object_dict.items()\n if value is not None}\n\n @property\n def functions_to_serialize(self):\n \"\"\"Returns functions to attach to the root object during serialization.\"\"\"\n functions = {}\n for key, v in self.functions.items():\n if key in CommonEndpoints.all_functions:\n functions[key] = (v.wrapped_call if isinstance(v, save_impl.LayerCall)\n else v)\n return functions\n\n @property\n def objects_to_serialize(self):\n \"\"\"Returns objects to attach to the root object during serialization.\"\"\"\n objects = {key: value for key, value in self.checkpointable_objects.items()\n if key in CommonEndpoints.all_checkpointable_objects}\n objects[constants.KERAS_ATTR] = self._keras_trackable\n return objects\n\n def set_and_validate_functions(self, function_dict):\n \"\"\"Saves function dictionary, and validates dictionary values.\"\"\"\n for key in self.all_functions:\n if key in function_dict:\n if (function_dict[key] is not None and # Not all functions are required\n not isinstance(function_dict[key],\n (tf.__internal__.function.Function,\n tf.types.experimental.ConcreteFunction,\n save_impl.LayerCall))):\n raise ValueError(\n 'Function dictionary contained a non-function object: {} (for key'\n ' {})'.format(function_dict[key], key))\n fn = function_dict[key]\n self._function_dict[key] = fn\n\n # Extract TensorFlow `Function` from LayerCall.\n tf_fn = fn.wrapped_call if isinstance(fn, save_impl.LayerCall) else fn\n setattr(self._keras_trackable, key, tf_fn)\n else:\n raise ValueError('Function {} missing from serialized function dict.'\n .format(key))\n return self.functions\n\n def set_and_validate_objects(self, object_dict):\n \"\"\"Saves objects to a dictionary, and validates the values.\"\"\"\n for key in self.all_checkpointable_objects:\n if key in object_dict:\n if not isinstance(object_dict[key], tf.__internal__.tracking.Trackable):\n raise ValueError(\n 'Object dictionary contained a non-trackable object: {} (for key'\n ' {})'.format(object_dict[key], key))\n self._object_dict[key] = object_dict[key]\n setattr(self._keras_trackable, key, object_dict[key])\n else:\n raise ValueError(\n 'Object {} missing from serialized object dict.'.format(key))\n return self.checkpointable_objects\n\n\nclass CommonEndpoints(SerializedAttributes.with_attributes(\n 'CommonEndpoints',\n checkpointable_objects=['variables', 'trainable_variables',\n 'regularization_losses'],\n functions=['__call__', 'call_and_return_all_conditional_losses',\n '_default_save_signature'])):\n \"\"\"Common endpoints shared by all models loadable by Keras.\n\n List of all attributes:\n variables: List of all variables in the model and its sublayers.\n trainable_variables: List of all trainable variables in the model and its\n sublayers.\n regularization_losses: List of all unconditional losses (losses not\n dependent on the inputs) in the model and its sublayers.\n __call__: Function that takes inputs and returns the outputs of the model\n call function.\n call_and_return_all_conditional_losses: Function that returns a tuple of\n (call function outputs, list of all losses that depend on the inputs).\n _default_save_signature: Traced model call function. This is only included\n if the top level exported object is a Keras model.\n \"\"\"\n\n\nclass LayerAttributes(SerializedAttributes.with_attributes(\n 'LayerAttributes',\n checkpointable_objects=['non_trainable_variables', 'layers', 'metrics',\n 'layer_regularization_losses', 'layer_metrics'],\n functions=['call_and_return_conditional_losses', 'activity_regularizer_fn'],\n copy_from=[CommonEndpoints]\n )):\n \"\"\"Layer checkpointable objects + functions that are saved to the SavedModel.\n\n List of all attributes:\n All attributes from CommonEndpoints\n non_trainable_variables: List of non-trainable variables in the layer and\n its sublayers.\n layers: List of all sublayers.\n metrics: List of all metrics in the layer and its sublayers.\n call_and_return_conditional_losses: Function that takes inputs and returns a\n tuple of (outputs of the call function, list of input-dependent losses).\n The list of losses excludes the activity regularizer function, which is\n separate to allow the deserialized Layer object to define a different\n activity regularizer.\n activity_regularizer_fn: Callable that returns the activity regularizer loss\n layer_regularization_losses: List of losses owned only by this layer.\n layer_metrics: List of metrics owned by this layer.\n \"\"\"\n\n\nclass ModelAttributes(SerializedAttributes.with_attributes(\n 'ModelAttributes',\n copy_from=[LayerAttributes])):\n \"\"\"Model checkpointable objects + functions that are saved to the SavedModel.\n\n List of all attributes:\n All attributes from LayerAttributes (including CommonEndpoints)\n \"\"\"\n # TODO(kathywu): Add attributes `compile_losses` and `compile_metrics`, which\n # list all losses and metrics defined by `model.compile`.\n\n\nclass MetricAttributes(\n SerializedAttributes.with_attributes(\n 'MetricAttributes',\n checkpointable_objects=['variables'],\n functions=[],\n )):\n \"\"\"Attributes that are added to Metric objects when saved to SavedModel.\n\n List of all attributes:\n variables: list of all variables\n \"\"\"\n pass\n\n\nclass RNNAttributes(SerializedAttributes.with_attributes(\n 'RNNAttributes',\n checkpointable_objects=['states'],\n copy_from=[LayerAttributes])):\n \"\"\"RNN checkpointable objects + functions that are saved to the SavedModel.\n\n List of all attributes:\n All attributes from LayerAttributes (including CommonEndpoints)\n states: List of state variables\n \"\"\"\n" ]
[ [ "tensorflow.compat.v2.__internal__.tracking.AutoTrackable" ] ]
jackromo/GSOCMcgillApplication
[ "769fdb2529008f7812a90ce7aba306e2bc630203" ]
[ "mcgill_app/graphs.py" ]
[ "\"\"\"\n.. module:: graphs\n :synopsis: All graph plotting and creation facilities.\n\n.. moduleauthor:: Jack Romo <[email protected]>\n\n\"\"\"\n\nfrom __future__ import division\nimport matplotlib.pyplot as plt\n\n\nclass FunctionsGraph(object):\n \"\"\"\n A graph that wraps around matplotlib.pyplot for plotting PlottableFunctions.\n \"\"\"\n\n def __init__(self, x_label=\"\", y_label=\"\", title=\"\"):\n \"\"\"\n :type x_label: str\n :param x_label: Label of x axis.\n :type y_label: str\n :param y_label: Label of y axis.\n :type title: str\n :param title: Title of graph displayed directly above.\n \"\"\"\n self.x_label = x_label\n self.y_label = y_label\n self.title = title\n self.functions = []\n\n def add_plotted_function(self, func, style=\"g-\", label=\"\"):\n \"\"\"\n Append a PlottedFunction to the graph, which will be drawn on the graph when plotted.\n\n :type func: PlottedFunction\n :param func: The function to be added to the graph.\n :type style: str\n :param style: The styling of the function's line on the graph. Must be in matplotlib style.\n :type label: str\n :param label: Name of function that will be put in legend of graph.\n :returns: Nothing.\n \"\"\"\n self.functions.append({\"function\": func,\n \"style\": style,\n \"label\": label})\n\n def plot(self, x_range=(0, 10), point_spacing=1.0, unit_factor_x=1.0, unit_factor_y=1.0):\n \"\"\"\n Plots graph of all functions across a specified interval.\n\n :type x_range: tuple\n :param x_range: A 2-tuple specifying lowest and highest x values on x-axis.\n :type point_spacing: float\n :param point_spacing: The space between x values of each plotted point on the graph.\n :type unit_factor_x: float\n :param unit_factor_x: Factor to multiply x values by to get into correct units on graph.\n :type unit_factor_y: float\n :param unit_factor_y: Factor to multiply x values by to get into correct units on graph.\n :returns: Nothing.\n \"\"\"\n for func_map in self.functions:\n function = func_map[\"function\"]\n xs, ys = function.get_xy_vals(x_range=x_range, point_spacing=point_spacing)\n plt.plot([x * unit_factor_x for x in xs],\n [y * unit_factor_y for y in ys], func_map[\"style\"], label=func_map[\"label\"])\n plt.legend()\n plt.xlabel(self.x_label)\n plt.ylabel(self.y_label)\n plt.suptitle(self.title, fontsize=12)\n plt.show()\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.suptitle", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlabel" ] ]
jshi31/NAFAE
[ "3421070d966877bbeb33d2d9b26a9d755a178589" ]
[ "lib/model/transformer/Modules.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.init as init\nimport numpy as np\nimport pdb\n\n__author__ = \"Yu-Hsiang Huang\"\n\nclass Linear(nn.Module):\n ''' Simple Linear layer with xavier init '''\n def __init__(self, d_in, d_out, bias=True):\n super(Linear, self).__init__()\n self.linear = nn.Linear(d_in, d_out, bias=bias)\n init.xavier_normal(self.linear.weight)\n\n def forward(self, x):\n return self.linear(x)\n\nclass Bottle(nn.Module):\n ''' Perform the reshape routine before and after an operation '''\n\n def forward(self, input):\n if len(input.size()) <= 2:\n return super(Bottle, self).forward(input)\n size = input.size()[:2]\n out = super(Bottle, self).forward(input.view(size[0]*size[1], -1))\n return out.view(size[0], size[1], -1)\n\nclass BottleLinear(Bottle, Linear):\n ''' Perform the reshape routine before and after a linear projection '''\n pass\n\nclass BottleSoftmax(Bottle, nn.Softmax):\n ''' Perform the reshape routine before and after a softmax operation'''\n pass\n\nclass LayerNormalization(nn.Module):\n ''' Layer normalization module '''\n\n def __init__(self, d_hid, eps=1e-3):\n super(LayerNormalization, self).__init__()\n\n self.eps = eps\n self.a_2 = nn.Parameter(torch.ones(d_hid), requires_grad=True)\n self.b_2 = nn.Parameter(torch.zeros(d_hid), requires_grad=True)\n\n def forward(self, z):\n if z.size(1) == 1:\n return z\n\n mu = torch.mean(z, keepdim=True, dim=-1)\n sigma = torch.std(z, keepdim=True, dim=-1)\n ln_out = (z - mu.expand_as(z)) / (sigma.expand_as(z) + self.eps)\n ln_out = ln_out * self.a_2.expand_as(ln_out) + self.b_2.expand_as(ln_out)\n\n return ln_out\n\nclass BatchBottle(nn.Module):\n ''' Perform the reshape routine before and after an operation '''\n\n def forward(self, input):\n if len(input.size()) <= 2:\n return super(BatchBottle, self).forward(input)\n size = input.size()[1:]\n out = super(BatchBottle, self).forward(input.view(-1, size[0]*size[1]))\n return out.view(-1, size[0], size[1])\n\nclass BottleLayerNormalization(BatchBottle, LayerNormalization):\n ''' Perform the reshape routine before and after a layer normalization'''\n pass\n\nclass ScaledDotProductAttention(nn.Module):\n ''' Scaled Dot-Product Attention '''\n\n def __init__(self, d_model, attn_dropout=0.1):\n super(ScaledDotProductAttention, self).__init__()\n self.temper = np.power(d_model, 0.5)\n self.dropout = nn.Dropout(attn_dropout)\n self.softmax = BottleSoftmax()\n\n def forward(self, q, k, v, attn_mask=None):\n\n attn = torch.bmm(q, k.transpose(1, 2)) / self.temper\n if attn_mask is not None:\n\n assert attn_mask.size() == attn.size(), \\\n 'Attention mask shape {} mismatch ' \\\n 'with Attention logit tensor shape ' \\\n '{}.'.format(attn_mask.size(), attn.size())\n\n attn.data.masked_fill_(attn_mask, -float('inf'))\n\n attn = self.softmax(attn)\n attn = self.dropout(attn)\n output = torch.bmm(attn, v)\n\n return output, attn\n" ]
[ [ "torch.ones", "torch.nn.Linear", "torch.std", "torch.nn.init.xavier_normal", "numpy.power", "torch.zeros", "torch.bmm", "torch.nn.Dropout", "torch.mean" ] ]
jgollub1/tennis_match_prediction
[ "1ccf0ecd5ddb5d98da2d3610e4890fcab844dfcc" ]
[ "src/data_functions.py" ]
[ "import os\nimport sys\n\nsys.path.insert(0, './sackmann')\n\nimport re\nimport datetime\nimport numpy as np\nimport pandas as pd\nimport elo_538 as elo\nfrom tennisMatchProbability import matchProb\nfrom processing_util import normalize_name\nfrom data_classes import stats_52, adj_stats_52, tny_52, commop_stats\nfrom collections import defaultdict\nfrom globals import COMMOP_START_YEAR, EPSILON\n\npd.options.mode.chained_assignment = None\n\n'''\nconcatenate original match dataframes from years\n(start_y, end_y)\n'''\ndef concat_data(start_y, end_y, tour):\n match_year_list = []\n for i in range(start_y, end_y+1):\n f_name = \"../match_data_formatted/{}_matches_{}.csv\".format(tour, i)\n try:\n match_year_list.append(pd.read_csv(f_name))\n except:\n print('could not find file for year: ', i)\n full_match_df = pd.concat(match_year_list, ignore_index = True)\n return full_match_df.sort_values(by=['tny_date','tny_name','match_num'], ascending=True).reset_index(drop=True)\n\n'''\nmatch data preprocessing\n'''\ndef format_match_df(df,tour,ret_strings=[],abd_strings=[]):\n cols = [u'tourney_id', u'tourney_name', u'surface', u'draw_size', u'tourney_date',\n u'match_num', u'winner_name', u'loser_name', u'score', u'best_of', u'w_svpt',\n u'w_1stWon', u'w_2ndWon', u'l_svpt', u'l_1stWon', u'l_2ndWon']\n df = df[cols]\n df = df.rename(columns={'winner_name':'w_name','loser_name':'l_name','tourney_id':'tny_id',\\\n 'tourney_name':'tny_name','tourney_date':'tny_date'})\n\n df['w_name'] = [normalize_name(x,tour) for x in df['w_name']]\n df['l_name'] = [normalize_name(x,tour) for x in df['l_name']]\n df['tny_name'] = ['Davis Cup' if 'Davis Cup' in s else s for s in df['tny_name']]\n df['tny_name'] = [s.replace('Australian Chps.','Australian Open').replace('Australian Open-2',\\\n 'Australian Open').replace('U.S. National Chps.','US Open') for s in df['tny_name']]\n df['is_gs'] = (df['tny_name'] == 'Australian Open') | (df['tny_name'] == 'Roland Garros') |\\\n (df['tny_name'] == 'Wimbledon') | (df['tny_name'] == 'US Open')\n\n # format dates\n df['tny_date'] = [datetime.datetime.strptime(str(x), \"%Y%m%d\").date() for x in df['tny_date']]\n df['match_year'] = [x.year for x in df['tny_date']]\n df['match_month'] = [x.month for x in df['tny_date']]\n df['match_year'] = df['match_year'] + (df['match_month'] == 12) # correct december start dates\n df['match_month'] = [1 if month==12 else month for month in df['match_month']] # to following year\n df['score'] = [re.sub(r\"[\\(\\[].*?[\\)\\]]\", \"\", str(s)) for s in df['score']] # str(s) fixes any nans\n df['score'] = ['RET' if 'RET' in s else s for s in df['score']]\n df['w_swon'], df['l_swon'] = df['w_1stWon']+df['w_2ndWon'], df['l_1stWon']+df['l_2ndWon']\n df['w_rwon'], df['l_rwon'] = df['l_svpt']-df['l_swon'], df['w_svpt']-df['w_swon']\n df['w_rpt'], df['l_rpt'] = df['l_svpt'], df['w_svpt']\n df.drop(['w_1stWon','w_2ndWon','l_1stWon','l_2ndWon'], axis=1, inplace=True)\n\n # remove matches involving a retirement\n abd_d, ret_d = set(abd_strings), set(ret_strings)\n df['score'] = ['ABN' if score.split(' ')[-1] in abd_d else score for score in df['score']]\n df['score'] = ['RET' if score in ret_d else score for score in df['score']]\n return df.loc[(df['score'] != 'ABN') & (df['score'] != 'RET')].reset_index(drop=True)\n\n'''\noriginal dataset labels columns by 'w_'/'l_'\nchange 'w'/'l' to 'p0','p1' (where p0 is the higher ranked player, according to Elo ratings)\n'''\n# TODO: refactor this into two functions\ndef change_labels(df, cols):\n # change w,l TO p0,p1\n for col in cols:\n df['p0'+col] = [df['l'+col][i] if df['winner'][i] else df['w'+col][i] for i in range(len(df))]\n df['p1'+col] = [df['w'+col][i] if df['winner'][i] else df['l'+col][i] for i in range(len(df))]\n\n # add s/r pct columns\n p_hat = np.sum([df['p0_52_swon'],df['p1_52_swon']])/np.sum([df['p0_52_svpt'],df['p1_52_svpt']])\n for label in ['p0','p1']:\n df[label+'_s_pct'] = [p_hat if x==0 else x for x in np.nan_to_num(df[label+'_52_swon']/df[label+'_52_svpt'])]\n df[label+'_r_pct'] = [1-p_hat if x==0 else x for x in np.nan_to_num(df[label+'_52_rwon']/df[label+'_52_rpt'])]\n df[label+'_sf_s_pct'] = [p_hat if x==0 else x for x in np.nan_to_num(df[label+'_sf_52_swon']/df[label+'_sf_52_svpt'])]\n df[label+'_sf_r_pct'] = [1-p_hat if x==0 else x for x in np.nan_to_num(df[label+'_sf_52_rwon']/df[label+'_sf_52_rpt'])]\n\n for label in ['w', 'l']:\n df.drop([label + col for col in cols], axis=1, inplace=True)\n\n df['tny_name'] = [s if s==s else 'Davis Cup' for s in df['tny_name']]\n return df\n\n'''\noriginal dataset labels columns by 'w_'/'l_'\nchange 'w'/'l' to 'p0','p1' (where p0 is the higher ranked player, according to Elo ratings)\n(without extra formatting)\n'''\ndef change_labels_v2(df, cols):\n # change w,l TO p0,p1\n for col in cols:\n df['p0'+col] = [df['l'+col][i] if df['winner'][i] else df['w'+col][i] for i in range(len(df))]\n df['p1'+col] = [df['w'+col][i] if df['winner'][i] else df['l'+col][i] for i in range(len(df))]\n\n for label in ['w', 'l']:\n df.drop([label + col for col in cols], axis=1, inplace=True)\n\n return df\n\n'''\nconfirm that match serve/return stats are not null\n'''\ndef validate(row, label):\n return row[label+'_swon']==row[label+'_swon'] and row[label+'_svpt']==row[label+'_svpt'] \\\n and row[label+'_rwon']==row[label+'_rwon'] and row[label+'_rpt']==row[label+'_rpt']\n\n'''\nfrom start_ind (a year before start_year), collect cumulative\n12-month s/r stats prior to each match\n'''\ndef get_current_52_stats(df, start_ind):\n players_stats = {}\n active_players = {}\n w_l = ['p0', 'p1']\n start_date = (df['match_year'][start_ind],df['match_month'][start_ind])\n avg_stats = stats_52(start_date)\n avg_stats.update(start_date,(6.4,10,3.6,10)) # set as prior so first row is not nan\n\n for i, row in df[start_ind:].iterrows():\n date = row['match_year'],row['match_month']\n avg_stats.set_month(date)\n for k,label in enumerate(w_l):\n if row[label+'_name'] not in players_stats:\n players_stats[row[label+'_name']] = stats_52(date)\n # store serving stats prior to match, update current month\n players_stats[row[label+'_name']].set_month(date)\n if validate(row, label):\n match_stats = (row[label+'_swon'],row[label+'_svpt'],row[w_l[1-k]+'_svpt']-\\\n row[w_l[1-k]+'_swon'],row[w_l[1-k]+'_svpt'])\n players_stats[row[label+'_name']].update(date,match_stats)\n avg_stats.update(date,match_stats)\n\n active_players[row[label+'_name']] = 1 # log active player\n\n # update every player to current month\n for player in active_players.keys():\n players_stats[player].set_month(date)\n\n players = active_players.keys()\n current_52_stats = [[player] + list(np.sum(players_stats[player].last_year,axis=0)) \\\n for player in players]\n # avg_52_stats = np.sum(avg_stats.last_year,axis=0)\n cols = ['player','52_swon','52_svpt','52_rwon','52_rpt']\n current_stats_df = pd.DataFrame(current_52_stats, columns=cols)\n current_stats_df['52_s_pct'] = current_stats_df['52_swon']/current_stats_df['52_svpt']\n current_stats_df['52_r_pct'] = current_stats_df['52_rwon']/current_stats_df['52_rpt']\n return current_stats_df[current_stats_df['52_svpt']>0] # return players active in past 12 months\n\n'''\ngenerate 12-month stats for Barnett-Clarke model\nas well as variations (adjusted, EM-normalized)\n'''\ndef generate_stats(df, start_ind):\n df = generate_52_stats(df,start_ind)\n print('generated 52 stats...')\n\n df = generate_52_adj_stats(df,start_ind)\n print('generated 52 adj stats...')\n\n df = generate_tny_stats(df,start_ind)\n print('generated tny stats...')\n\n df = generate_commop_stats(df, start_ind)\n print('generated commop stats...')\n\n cols = ['_name','_elo_538','_sf_elo_538', #'_elo','_sf_elo'\n '_swon', '_svpt', '_rwon', '_rpt',\n '_52_swon', '_52_svpt','_52_rwon','_52_rpt',\n '_sf_52_swon','_sf_52_svpt','_sf_52_rwon','_sf_52_rpt',\n '_52_s_adj','_52_r_adj']\n\n # of players p0, p1, p0 will always be the player with the first name alphabetically (since this is deterministic)\n # the 'winner' will be 1 when p0's name comes alphabetically last and 0 otherwise\n df['winner'] = df['w_name'] > df['l_name']\n df = change_labels(df, cols)\n df = change_labels_v2(df, ['_commop_s_pct', '_commop_r_pct'])\n\n df['elo_diff'] = df['p0_elo_538'] - df['p1_elo_538']\n df['sf_elo_diff'] = df['p0_sf_elo_538'] - df['p1_sf_elo_538']\n\n # # dataframe with only official matches\n # df = df[df['winner']!='None']\n # df = df.reset_index(drop=True)\n # cols = ['52_s_adj','52_r_adj']\n\n em_cols = ['s_pct', 'r_pct', 'sf_s_pct', 'sf_r_pct', '52_s_adj', '52_r_adj']\n df = generate_sr_pct(df)\n\n # FIX for correct em stat sample sizes\n df = df.loc[start_ind:].reset_index(drop=True)\n df = generate_em_stats(df, em_cols)\n return df\n\n'''\nadd s/r pct columns, replacing nan with overall avg\n'''\ndef generate_sr_pct(df):\n p_hat = np.sum([df['p0_52_swon'],df['p1_52_swon']])\n p_hat = p_hat/np.sum([df['p0_52_svpt'],df['p1_52_svpt']])\n for label in ['p0','p1']:\n # divide with np.nan_to_num and use p_hat as a placeholder when n=0\n df[label+'_s_pct'] = np.nan_to_num(df[label+'_52_swon']/df[label+'_52_svpt'])\n df[label+'_s_pct'] = df[label+'_s_pct'] + (p_hat) * (df[label+'_s_pct'] == 0)\n df[label+'_r_pct'] = np.nan_to_num(df[label+'_52_rwon']/df[label+'_52_rpt'])\n df[label+'_r_pct'] = df[label+'_r_pct'] + (1-p_hat)*(df[label+'_r_pct'] == 0)\n\n df[label+'_sf_s_pct'] = np.nan_to_num(df[label+'_sf_52_swon']/df[label+'_sf_52_svpt'])\n df[label+'_sf_s_pct'] = df[label+'_sf_s_pct'] + (p_hat) * (df[label+'_sf_s_pct'] == 0)\n df[label+'_sf_r_pct'] = np.nan_to_num(df[label+'_sf_52_rwon']/df[label+'_sf_52_rpt'])\n df[label+'_sf_r_pct'] = df[label+'_sf_r_pct'] + (1-p_hat)*(df[label+'_sf_r_pct'] == 0)\n\n # finally, generate the observed service percentages in each match\n df[label+'_s_pct_obsv'] = np.nan_to_num(df[label+'_swon']/df[label+'_svpt'])\n return df\n\ndef finalize_df(df):\n # generate serving probabilities for Barnett-Clarke model\n df['match_id'] = range(len(df))\n df['tny_stats'] = [df['avg_52_s'][i] if df['tny_stats'][i]==0 else df['tny_stats'][i] for i in range(len(df))]\n df['p0_s_kls'] = df['tny_stats']+(df['p0_s_pct']-df['avg_52_s']) - (df['p1_r_pct']-df['avg_52_r'])\n df['p1_s_kls'] = df['tny_stats']+(df['p1_s_pct']-df['avg_52_s']) - (df['p0_r_pct']-df['avg_52_r'])\n df['p0_s_kls_EM'] = df['tny_stats']+(df['p0_s_pct_EM']-df['avg_52_s']) - (df['p1_r_pct_EM']-df['avg_52_r'])\n df['p1_s_kls_EM'] = df['tny_stats']+(df['p1_s_pct_EM']-df['avg_52_s']) - (df['p0_r_pct_EM']-df['avg_52_r'])\n\n df['p0_s_sf_kls'] = df['tny_stats']+(df['p0_sf_s_pct']-df['sf_avg_52_s']) - (df['p1_sf_r_pct']-df['sf_avg_52_r'])\n df['p1_s_sf_kls'] = df['tny_stats']+(df['p1_sf_s_pct']-df['sf_avg_52_s']) - (df['p0_sf_r_pct']-df['sf_avg_52_r'])\n df['p0_s_sf_kls_EM'] = df['tny_stats']+(df['p0_sf_s_pct_EM']-df['sf_avg_52_s']) - (df['p1_sf_r_pct_EM']-df['sf_avg_52_r'])\n df['p1_s_sf_kls_EM'] = df['tny_stats']+(df['p1_sf_s_pct_EM']-df['sf_avg_52_s']) - (df['p0_sf_r_pct_EM']-df['sf_avg_52_r'])\n\n df['p0_s_adj_kls'] = df['tny_stats']+(df['p0_52_s_adj']) - (df['p1_52_r_adj'])\n df['p1_s_adj_kls'] = df['tny_stats']+(df['p1_52_s_adj']) - (df['p0_52_r_adj'])\n df['p0_s_adj_kls_EM'] = df['tny_stats']+(df['p0_52_s_adj_EM']) - (df['p1_52_r_adj_EM'])\n df['p1_s_adj_kls_EM'] = df['tny_stats']+(df['p1_52_s_adj_EM']) - (df['p0_52_r_adj_EM'])\n\n df['p0_s_commop_kls'] = df['tny_stats']+(df['p0_commop_s_pct'] - df['avg_52_s']) - (df['p1_commop_r_pct'] - df['avg_52_r'])\n df['p1_s_commop_kls'] = df['tny_stats']+(df['p1_commop_s_pct'] - df['avg_52_s']) - (df['p0_commop_r_pct'] - df['avg_52_r'])\n\n p_hat = np.sum([df['p0_52_swon'],df['p1_52_swon']])/np.sum([df['p0_52_svpt'],df['p1_52_svpt']])\n df['p0_s_baseline'] = p_hat\n df['p1_s_baseline'] = p_hat\n\n # generate match probabilities for Barnett-Clarke method, with or w/o EM estimators\n df['match_prob_kls'] = [matchProb(row['p0_s_kls'],1-row['p1_s_kls']) for i,row in df.iterrows()]\n df['match_prob_kls_EM'] = [matchProb(row['p0_s_kls_EM'],1-row['p1_s_kls_EM']) for i,row in df.iterrows()]\n df['match_prob_sf_kls'] = [matchProb(row['p0_s_sf_kls'],1-row['p1_s_sf_kls']) for i,row in df.iterrows()]\n df['match_prob_sf_kls_EM'] = [matchProb(row['p0_s_sf_kls_EM'],1-row['p1_s_sf_kls_EM']) for i,row in df.iterrows()]\n df['match_prob_adj_kls'] = [matchProb(row['p0_s_adj_kls'],1-row['p1_s_adj_kls']) for i,row in df.iterrows()]\n df['match_prob_adj_kls_EM'] = [matchProb(row['p0_s_adj_kls_EM'],1-row['p1_s_adj_kls_EM']) for i,row in df.iterrows()]\n df['match_prob_commop_kls'] = [matchProb(row['p0_s_commop_kls'],1-row['p1_s_commop_kls']) for i,row in df.iterrows()]\n df['match_prob_commop'] = [1 - df['w_commop_match_prob'][i] if df['winner'][i] else df['w_commop_match_prob'][i] for i in range(len(df))]\n\n # generate win probabilities from elo differences\n df['elo_prob'] = (1+10**(df['elo_diff']/-400.))**-1\n df['sf_elo_prob'] = [(1+10**(diff/-400.))**-1 for diff in df['sf_elo_diff']]\n\n # elo-induced serve percentages\n df = generate_bc_stats_elo_induced(df, 'elo',start_ind=0)\n return df\n\ndef get_start_ind(match_df, start_year):\n return match_df[match_df['match_year']>=start_year-1].index[0]\n\n'''\nreturns dataframe with up-to-date player stats through date of most recent match\n'''\ndef generate_df(tour, start_year, end_year, ret_strings, abd_strings, counts_538):\n print('start_year: ', start_year)\n print('end_year: ', end_year)\n match_df = concat_data(start_year, end_year, tour)\n print('match_df.shape before: ', match_df.shape)\n start_ind = match_df[match_df['match_year']>=start_year-1].index[0]\n print('match_df.shape: ', match_df.shape)\n match_df = generate_elo(match_df, counts_538)\n print('generated elo on match dataset...')\n\n match_df = generate_stats(match_df, start_ind) # 52, adj, tny, etc.\n match_df = finalize_df(match_df)\n match_df = match_df.reset_index(drop=True)\n print('finalized df...')\n return match_df\n\n'''\nreturns two dataframes\n1) contains up-to-date player stats through date of most recent match\n2) contains every match with elo/serve/return/etc stats\n'''\ndef generate_test_dfs(tour, start_year, end_year, ret_strings, abd_strings, counts_538):\n match_df = concat_data(start_year, end_year, tour)\n start_ind = match_df[match_df['match_year']>=start_year-1].index[0]\n match_df = generate_elo(match_df, counts_538)\n\n match_df = generate_52_stats(match_df, start_ind)\n match_df = generate_52_adj_stats(match_df, start_ind)\n match_df = generate_tny_stats(match_df, start_ind)\n match_df = generate_commop_stats(match_df, start_ind)\n # TODO: add generate_em_stats() right here\n\n return match_df\n\n'''\nreceives n x 2 array with columns 'w_name', 'l_name', 'is_gs'\n'''\ndef generate_elo_columns(arr, counts_538):\n # print('arr[0]: ', arr[0])\n # print('arr[1]: ', arr[1])\n # print('arr[0].dtype', arr[:, 0].dtype)\n # print('arr[1].dtype', arr[:, 1].dtype)\n # print('arr[:, :2]', arr[:, :2])\n # for s in arr:\n # if isinstance(s[1], bool):\n # print('s: ', s)\n # print('is: ', [s for s in arr if isinstance(s[0], bool))\n # players_set = np.unique(arr[:, :2].astype(str))\n player_names = arr[:, :2].flatten()\n players_set = np.where(player_names!=player_names, '', player_names).tolist()\n # players_set = list(set(list(np.concatenate(arr[:, 0], arr[:, 1]))))\n # player_count = len(players_set)\n # print('player_count: ', player_count)\n # initial_ratings = [elo.Rating() for _ in range(player_count)]\n # zipped = zip(\n # players_set,\n # [elo.Rating() for _ in range(player_count)]\n # )\n # # print('zipped: ', zipped)\n # players_elo = dict(zip(\n # players_set,\n # [elo.Rating() for _ in range(player_count)]\n # )) # can use default dict here?\n players_elo = {}\n for player in players_set:\n # print('player: ', player)\n players_elo[player] = elo.Rating()\n\n match_elos = np.zeros([arr.shape[0], 2])\n elo_obj = elo.Elo_Rater()\n\n # update player elo from every recorded match\n for i in range(arr.shape[0]):\n w_name, l_name = arr[i][:2]\n\n if w_name != w_name or l_name != l_name:\n match_elos[i] = np.nan, np.nan\n continue\n\n match_elos[i] = players_elo[w_name].value, players_elo[l_name].value\n elo_obj.rate_1vs1(players_elo[w_name], players_elo[l_name], arr[i][2], counts_538)\n\n return match_elos[:,0], match_elos[:,1]\n\ndef generate_surface_elo_columns(df, surfaces, counts_538):\n df['w_sf_elo_538'], df['l_sf_elo_538'] = df['w_elo_538'], df['l_elo_538']\n for surface in surfaces:\n surface_df = df[(df['surface'] == surface) & (df['w_name'] == df['w_name']) & (df['l_name'] == df['l_name'])]\n w_elo_columns, l_elo_columns = generate_elo_columns(np.array(surface_df[['w_name', 'l_name', 'is_gs']]), True)\n df.loc[df['surface'] == surface, 'w_sf_elo_538'] = w_elo_columns\n df.loc[df['surface'] == surface, 'l_sf_elo_538'] = l_elo_columns\n\n return df['w_sf_elo_538'], df['l_sf_elo_538']\n\n'''\nreceives n x 4 array with columns 'w_name', 'l_name', 'is_gs', 'Date'\n'''\ndef generateEloColumnsWithHistory(arr, counts_538):\n playerEloHistory = defaultdict(list)\n players_set = np.unique(arr[:, :2])\n players_elo = dict(zip(\n players_set,\n [elo.Rating() for __ in range(len(players_set))]\n )) # can use default dict here?\n\n match_elos = np.zeros([arr.shape[0], 2])\n elo_obj = elo.Elo_Rater()\n\n # update player elo from every recorded match\n for i in range(arr.shape[0]):\n w_name, l_name = arr[i][:2]\n isGrandSlam = arr[i][2]\n date = datetime.datetime.strptime(arr[i][3], '%Y-%m-%d')\n\n match_elos[i] = players_elo[w_name].value, players_elo[l_name].value\n elo_obj.rate_1vs1(players_elo[w_name], players_elo[l_name], 0, counts_538)\n\n playerEloHistory[w_name].append({ 'date': date, 'newElo': players_elo[w_name].value, 'won': 1 })\n playerEloHistory[l_name].append({ 'date': date, 'newElo': players_elo[l_name].value, 'won': 0 })\n\n return match_elos[:,0], match_elos[:,1], playerEloHistory, players_elo\n\n'''\nreturn match dataframe with each player's pre-match elo ratings\n'''\ndef generate_elo(df, counts_538=True):\n df['w_elo_538'], df['l_elo_538'] = generate_elo_columns(np.array(df[['w_name', 'l_name', 'is_gs']]), True)\n df['w_sf_elo_538'], df['l_sf_elo_538'] = generate_surface_elo_columns(df, ['Hard', 'Clay', 'Grass'], counts_538)\n return df\n\n # df['w_sf_elo_538'], df['l_sf_elo_538'] = df['w_elo_538'], df['l_elo_538']\n # for surface in ['Hard', 'Clay', 'Grass']:\n # surface_df = df[df['surface'] == surface]\n # w_elo_columns, l_elo_columns = generate_elo_columns(np.array(surface_df[['w_name', 'l_name', 'is_gs']]), True)\n # df.loc[df['surface'] == surface, 'w_sf_elo_538'] = w_elo_columns\n # df.loc[df['surface'] == surface, 'l_sf_elo_538'] = l_elo_columns\n\n # return df\n\n'''\nreplace nan values with overall average array value\n'''\ndef fill_nan_with_mean(arr):\n mean = np.nanmean(arr)\n arr[np.isnan(arr)] = mean\n return arr\n\n'''\ncollect 12-month s/r average performance by player\n'''\ndef generate_52_stats(df,start_ind):\n players_stats = {}\n start_date = (df['match_year'][start_ind],df['match_month'][start_ind])\n avg_stats = stats_52(start_date)\n # set as prior so first row is not nan\n avg_stats.update(start_date,(6.4,10,3.6,10))\n # array w/ 2x1 arrays for each player's 12-month serve/return performance\n match_52_stats = np.zeros([2,len(df),4])\n avg_52_stats = np.zeros([len(df),4]) # avg tour-wide stats for serve, return\n\n s_players_stats = {}\n s_avg_stats = {}\n for surface in ('Hard','Clay','Grass'):\n s_players_stats[surface] = {}\n s_avg_stats[surface] = stats_52((df['match_year'][0],df['match_month'][0]))\n s_avg_stats[surface].update(start_date,(6.4,10,3.6,10))\n s_match_52_stats = np.zeros([2,len(df),4])\n s_avg_52_stats = np.zeros([len(df),4])\n\n w_l = ['w','l']\n for i, row in df.loc[start_ind:].iterrows():\n surface = row['surface']\n date = row['match_year'],row['match_month']\n\n avg_stats.set_month(date)\n avg_52_stats[i] = np.sum(avg_stats.last_year,axis=0)\n for k,label in enumerate(w_l):\n if row[label+'_name'] not in players_stats:\n players_stats[row[label+'_name']] = stats_52(date)\n # store serving stats prior to match, update current month\n players_stats[row[label+'_name']].set_month(date)\n match_52_stats[k][i] = np.sum(players_stats[row[label+'_name']].last_year,axis=0) # all four stats per player\n # update serving stats if not null\n if validate(row, label):\n match_stats = (row[label+'_swon'],row[label+'_svpt'],row[w_l[1-k]+'_svpt']-\\\n row[w_l[1-k]+'_swon'],row[w_l[1-k]+'_svpt'])\n players_stats[row[label+'_name']].update(date,match_stats)\n avg_stats.update(date,match_stats)\n\n # repeat above process for surface-specific stats\n if surface not in ('Hard','Clay','Grass'):\n continue\n s_avg_stats[surface].set_month(date)\n s_avg_52_stats[i] = np.sum(s_avg_stats[surface].last_year,axis=0)\n for k,label in enumerate(w_l):\n if row[label+'_name'] not in s_players_stats[surface]:\n s_players_stats[surface][row[label+'_name']] = stats_52(date)\n\n # store serving stats prior to match, from current month\n s_players_stats[surface][row[label+'_name']].set_month(date)\n s_match_52_stats[k][i] = np.sum(s_players_stats[surface][row[label+'_name']].last_year,axis=0)\n # update serving stats if not null\n if validate(row, label):\n match_stats = (row[label+'_swon'],row[label+'_svpt'],row[w_l[1-k]+'_svpt']-\\\n row[w_l[1-k]+'_swon'],row[w_l[1-k]+'_svpt'])\n s_players_stats[surface][row[label+'_name']].update(date,match_stats)\n s_avg_stats[surface].update(date,match_stats)\n\n for k,label in enumerate(w_l):\n df[label+'_52_swon'] = match_52_stats[k][:,0]\n df[label+'_52_svpt'] = match_52_stats[k][:,1]\n df[label+'_52_rwon'] = match_52_stats[k][:,2]\n df[label+'_52_rpt'] = match_52_stats[k][:,3]\n df[label+'_sf_52_swon'] = s_match_52_stats[k][:,0]\n df[label+'_sf_52_svpt'] = s_match_52_stats[k][:,1]\n df[label+'_sf_52_rwon'] = s_match_52_stats[k][:,2]\n df[label+'_sf_52_rpt'] = s_match_52_stats[k][:,3]\n\n with np.errstate(divide='ignore', invalid='ignore'):\n df['avg_52_s'] = fill_nan_with_mean(np.divide(avg_52_stats[:,0],avg_52_stats[:,1]))\n df['avg_52_r'] = fill_nan_with_mean(np.divide(avg_52_stats[:,2],avg_52_stats[:,3]))\n df['sf_avg_52_s'] = fill_nan_with_mean(np.divide(s_avg_52_stats[:,0],s_avg_52_stats[:,1]))\n df['sf_avg_52_r'] = fill_nan_with_mean(np.divide(s_avg_52_stats[:,2],s_avg_52_stats[:,3]))\n return df\n\n'''\nEfron-Morris estimators for 52-week serve and return percentages\nCalculates B_i coefficients in terms of service points\nFeed any existing col where ['p0_'+col, 'p1_'+col] within df.columns\n# TODO: you should be passing in the full column suffix after 'p0_'/'p1_'\n'''\ndef generate_em_stats(df,cols):\n for col in cols:\n stat_history = np.concatenate([df['p0_'+col],df['p1_'+col]],axis=0)\n n = int(len(stat_history)/2)\n prefix = 'sf_52_' if 'sf' in col else '52_'\n suffix = 'svpt' if '_s_' in col else 'rpt'\n num_points = np.concatenate([df['p0_'+prefix+suffix],df['p1_'+prefix+suffix]])\n p_hat = np.mean(stat_history)\n sigma2_i = fill_nan_with_mean(np.divide(p_hat*(1-p_hat),num_points,where=num_points>0))\n tau2_hat = np.nanvar(stat_history)\n B_i = sigma2_i/(tau2_hat+sigma2_i)\n\n stat_history[stat_history!=stat_history] = p_hat\n df['p0_' + col + '_EM'] = df['p0_' + col]+B_i[:n] * (p_hat - df['p0_' + col])\n df['p1_' + col + '_EM'] = df['p1_' + col]+B_i[n:] * (p_hat - df['p1_' + col])\n print(col, p_hat)\n return df # ok if p_hats don't add up because they're avg of averages\n\n'''\nEfron-Morris estimators for 52-week serve and return percentages\nCalculates B_i coefficients in terms of service points\nFeed any existing col within df.columns\n'''\ndef generate_em_stats_current(df,cols):\n for col in cols:\n stat_history = df[col]\n num_points = df['52_svpt'] if col=='52_swon' else df['52_rpt']\n p_hat = np.mean(stat_history)\n sigma2_i = fill_nan_with_mean(np.divide(p_hat*(1-p_hat),num_points,where=num_points>0))\n tau2_hat = np.nanvar(stat_history)\n B_i = sigma2_i/(tau2_hat+sigma2_i)\n\n stat_history[stat_history!=stat_history] = p_hat\n df[col+'_EM'] = df[col]+B_i*(p_hat-df[col])\n print(col, p_hat)\n return df # ok if p_hats don't add up because they're avg of averages\n\n\n'''\nuse validate stats before calling statsClass.update() method\n'''\ndef is_valid(arr):\n return not np.isnan(arr).any()\n\n'''\ncollects 12-month s/r stats relative to historical opponents\ncolumns '52_s_adj','52_r_adj' represent how well a player\nperforms above average\n'''\ndef generate_52_adj_stats(df,start_ind=0):\n players_stats = {}\n match_52_stats = np.zeros([2,len(df),2]) # 2x1 arrays for x_i, x_j's 12-month s/r performance\n\n w_l = ['w','l']\n for i, row in df.loc[start_ind:].iterrows():\n surface = row['surface']\n date = row['match_year'],row['match_month']\n avg_52_s, avg_52_r = row['avg_52_s'],row['avg_52_r']\n match_stats = [[],[]]\n\n # add new players to the dictionary\n for k,label in enumerate(w_l):\n if row[label+'_name'] not in players_stats:\n players_stats[row[label+'_name']] = adj_stats_52(date)\n\n # store pre-match adj stats\n for k,label in enumerate(w_l):\n players_stats[row[label+'_name']].set_month(date)\n\n # fill in player's adjusted stats prior to start of match\n match_52_stats[k][i] = players_stats[row[label+'_name']].adj_sr\n # update serving stats if not null\n if validate(row, label):\n sv_stats = (row[label+'_swon'],row[label+'_svpt'],row[label+'_rwon'],row[label+'_rpt'])\n\n\n # TODO: this is the troublesome line... could be extracting nan value from opponent\n # TODO: also rewrite this so it's readable (plus with arrays not obvious at)\n opp_r_ablty = players_stats[row[w_l[1-k]+'_name']].adj_sr[1] + avg_52_r\n opp_s_ablty = players_stats[row[w_l[1-k]+'_name']].adj_sr[0] + avg_52_s\n opp_stats = (opp_r_ablty * row[label + '_svpt'], opp_s_ablty * row[label + '_rpt'])\n match_stats[k] = sv_stats + opp_stats\n\n # update players' adjusted scores based on pre-match adjusted ratings\n for k,label in enumerate(w_l):\n # if is_valid(match_stats):\n if validate(row, label) and is_valid(match_stats):\n players_stats[row[label+'_name']].update(date,match_stats[k])\n\n for k,label in enumerate(w_l):\n df[label+'_52_s_adj'] = match_52_stats[k][:,0]\n df[label+'_52_r_adj'] = match_52_stats[k][:,1]\n return df\n\n\n'''\ngenerate delta between two players relative to shared opponent\ndelta_i^AB = (spw(A, C_i) - (1 - rpw(A, C_i))) - (spw(B, C_i) - (1 - rpw(B, C_i)))\n'''\ndef generate_delta(p1_stats, p2_stats):\n p1_s_pct, p1_r_pct = p1_stats[0]/float(p1_stats[1]), p1_stats[2]/float(p1_stats[3])\n p2_s_pct, p2_r_pct = p2_stats[0]/float(p2_stats[1]), p2_stats[2]/float(p2_stats[3])\n return (p1_s_pct - (1 - p1_r_pct)) - (p2_s_pct - (1 - p2_r_pct))\n\n'''\nreturn true if total service/return points both greater than zero\n'''\ndef has_stats(last_year_stats):\n return last_year_stats[1] > 0 and last_year_stats[3] > 0\n\n'''\nget opponents who have played a match in the past 12 months (more than 0 points)\n'''\ndef get_opponents(player_d, player_name):\n historical_opponents = player_d[player_name].history.keys()\n return [opp for opp in historical_opponents if has_stats(player_d[player_name].history[opp])]\n\n'''\ncompute serve/return parameters, given their common opponent history\n'''\ndef generate_commop_params(player_d, player1, player2):\n p1_opponents, p2_opponents = get_opponents(player_d, player1), get_opponents(player_d, player2)\n common_opponents = np.intersect1d(p1_opponents, p2_opponents)\n if len(common_opponents) == 0:\n return [0]\n\n match_deltas = np.zeros(len(common_opponents))\n for i, comm_op in enumerate(common_opponents):\n p1_match_stats = player_d[player1].history[comm_op]\n p2_match_stats = player_d[player2].history[comm_op]\n comm_op_delta = generate_delta(p1_match_stats, p2_match_stats)\n match_deltas[i] = comm_op_delta\n if np.isnan(comm_op_delta):\n print('nan here: ', p1_match_stats, p2_match_stats, comm_op)\n\n overall_delta = np.mean(match_deltas)\n if np.isnan(overall_delta):\n print('nan, match_deltas: ', match_deltas)\n return match_deltas\n\n'''\ncollect historical s/r common-opponent performance by player\n'''\ndef generate_commop_stats(df, start_ind):\n player_d = {}\n match_52_stats = np.zeros([2,len(df), 2])\n match_probs = np.zeros([len(df)])\n\n w_l = ['w','l']\n for i, row in df.loc[start_ind:].iterrows():\n for k, label in enumerate(w_l):\n opponent_name = row[w_l[1-k]+'_name']\n if row[label+'_name'] not in player_d:\n player_d[row[label+'_name']] = commop_stats()\n\n if validate(row, label):\n match_stats = (row[label+'_swon'],row[label+'_svpt'],row[w_l[1-k]+'_svpt']-\\\n row[w_l[1-k]+'_swon'],row[w_l[1-k]+'_svpt'])\n player_d[row[label+'_name']].update(match_stats, opponent_name)\n\n # can compute common-opponent stats after current match stats inputted\n if row['match_year'] >= COMMOP_START_YEAR: # start at COMMOP_START_YEAR, computationally intensive\n match_deltas = generate_commop_params(player_d, row['w_name'], row['l_name'])\n overall_delta = np.mean(match_deltas)\n w_s_pct, w_r_pct = (.6 + overall_delta/2), (.4 + overall_delta/2)\n\n match_52_stats[0][i] = [w_s_pct, w_r_pct]\n match_52_stats[1][i] = [1 - w_r_pct, 1 - w_s_pct]\n\n iterated_match_probs = [\n np.mean([\n matchProb(.6 + match_delta, .4),\n matchProb(.6, .4 + match_delta)\n ])\n for match_delta in match_deltas\n ]\n match_probs[i] = np.mean(iterated_match_probs)\n\n for k,label in enumerate(w_l):\n df[label+'_commop_s_pct'] = match_52_stats[k][:,0]\n df[label+'_commop_r_pct'] = match_52_stats[k][:,1]\n\n df['w_commop_match_prob'] = match_probs\n return df\n\n\n'''\ncollect yearly tournament serve averages for 'f_av'\nin Barnette-Clark equation\n'''\ndef generate_tny_stats(df,start_ind=0):\n tny_stats = {}\n tny_52_stats = np.zeros(len(df))\n for i, row in df.loc[start_ind:].iterrows():\n if row['tny_name']=='Davis Cup':\n continue\n\n year,t_id = row['tny_id'].split('-')\n year = int(year)\n match_stats = (row['w_swon']+row['l_swon'],row['w_svpt']+row['l_svpt'])\n # handle nan cases, provide tny_stats if possible\n if row['w_swon']!=row['w_swon']:\n if t_id in tny_stats:\n if year-1 in tny_stats[t_id].historical_avgs:\n swon,svpt = tny_stats[t_id].historical_avgs[year-1]\n tny_52_stats[i] = swon/float(svpt)\n continue\n # create new object if needed, then update\n elif t_id not in tny_stats:\n tny_stats[t_id] = tny_52(year)\n tny_52_stats[i] = tny_stats[t_id].update(year,match_stats)\n\n df['tny_stats'] = tny_52_stats\n return df\n\n'''\napproximate inverse elo-->s_pct calculator\n'''\ndef elo_induced_s(prob, s_total):\n s0 = s_total/2\n diff = s_total/4\n current_prob = .5\n\n while abs(current_prob-prob) > EPSILON:\n if current_prob < prob:\n s0 += diff\n else:\n s0 -= diff\n diff /= 2\n current_prob = matchProb(s0,1-(s_total-s0))\n return s0, s_total-s0\n\n\n'''\nimport to set s_total with EM-normalized percentages\n'''\ndef generate_bc_stats_elo_induced(df,col,start_ind=0):\n df['s_total'] = df['p0_s_kls_EM'] + df['p1_s_kls_EM']\n induced_s = np.zeros([len(df),2])\n for i, row in df.loc[start_ind:].iterrows():\n induced_s[i] = elo_induced_s(row[col+'_prob'],row['s_total'])\n df['p0_s_kls_' + col] = induced_s[:,0]\n df['p1_s_kls_' + col] = induced_s[:,1]\n\n del df['s_total']\n return df\n\ndef format_pbp_df(df,tour='atp'):\n df['w_name'] = np.where(df['winner'] == 0, df['server1'], df['server2'])\n df['l_name'] = np.where(df['winner'] == 0, df['server2'], df['server1'])\n df['w_name'] = [normalize_name(x,tour=tour) for x in df['w_name']]\n df['l_name'] = [normalize_name(x,tour=tour) for x in df['l_name']]\n df['date'] = pd.to_datetime(df['date'])\n df['match_year'] = [x.year for x in df['date']]\n df['match_month'] = [x.month for x in df['date']]\n df['date'] = [x.date() for x in df['date']]\n df['score'] = [re.sub(r\"[\\(\\[].*?[\\)\\]]\", \"\", s) for s in df['score']]\n return df\n\ndef connect_match_and_pbp_dfs(match_df,pbp_df,col_d,player_cols,start_year=2009):\n pbp_dict = {}; winner_dict = {}\n for i in xrange(len(pbp_df)):\n key = pbp_df['w_name'][i] +' ' + pbp_df['l_name'][i] + ' ' \\\n + str(pbp_df['match_year'][i]) + ' ' + pbp_df['score'][i]\n key = key+' '+str(pbp_df['match_month'][i]) if key in col_d else key\n if key in pbp_dict:\n continue\n pbp_dict[key] = pbp_df['pbp'][i]\n winner_dict[key] = pbp_df['winner'][i]\n\n # in case of a collision (about 10 cases), I only take the first match with that key\n c = 0\n pbps,winners = [],[]\n info = {}\n\n match_df = match_df[match_df['match_year']>=start_year]\n for i in match_df.index:\n key = match_df['w_name'][i] +' ' + match_df['l_name'][i] + ' ' \\\n +str(match_df['match_year'][i])+' '+match_df['score'][i]\n key = key+' '+str(match_df['match_month'][i]) if key in col_d else key\n if key in pbp_dict:\n c += 1\n pbps.append(pbp_dict[key])\n winners.append(winner_dict[key])\n if key in info:\n pbps[-1] = 'None'; winners[-1] = 'None'\n print('collision');\n print(key + ' ' + str(match_df['match_month'][i]))\n info[key] = 1\n else:\n pbps.append('None')\n # we'll just make 'winner' a random 0 or 1 for now\n winners.append(np.random.choice([0,1]))\n print(c)\n match_df['pbp'] = pbps\n match_df['winner'] = winners\n\n #df = match_df[match_df['pbp']!='NA']\n #cols = df.columns.drop(['loser_id','winner_id'])\n df = match_df[match_df.columns.drop(['loser_id','winner_id'])]\n df = df.reset_index(drop=True)\n\n # # change w,l TO p0,p1\n # for col in player_cols:\n # df['p0'+col] = [df['l'+col][i] if df['winner'][i] else df['w'+col][i] for i in xrange(len(df))]\n # df['p1'+col] = [df['w'+col][i] if df['winner'][i] else df['l'+col][i] for i in xrange(len(df))]\n\n # # add s/r pct columns\n # p_hat = np.sum([df['p0_52_swon'],df['p1_52_swon']])/np.sum([df['p0_52_svpt'],df['p1_52_svpt']])\n # for label in ['p0','p1']:\n # df[label+'_s_pct'] = [p_hat if x==0 else x for x in np.nan_to_num(df[label+'_52_swon']/df[label+'_52_svpt'])]\n # df[label+'_r_pct'] = [1-p_hat if x==0 else x for x in np.nan_to_num(df[label+'_52_rwon']/df[label+'_52_rpt'])]\n # df[label+'_sf_s_pct'] = [p_hat if x==0 else x for x in np.nan_to_num(df[label+'_sf_52_swon']/df[label+'_sf_52_svpt'])]\n # df[label+'_sf_r_pct'] = [1-p_hat if x==0 else x for x in np.nan_to_num(df[label+'_sf_52_rwon']/df[label+'_sf_52_rpt'])]\n\n # df['elo_diff'] = [df['p0_elo'][i] - df['p1_elo'][i] for i in xrange(len(df))]\n # df['sf_elo_diff'] = [df['p0_sf_elo'][i] - df['p1_sf_elo'][i] for i in xrange(len(df))]\n # df['tny_name'] = [s if s==s else 'Davis Cup' for s in df['tny_name']]\n return df\n" ]
[ [ "numpy.sum", "numpy.intersect1d", "numpy.nanvar", "numpy.nan_to_num", "numpy.nanmean", "numpy.random.choice", "pandas.to_datetime", "numpy.isnan", "numpy.where", "numpy.unique", "numpy.mean", "numpy.zeros", "pandas.read_csv", "pandas.concat", "numpy.divide", "pandas.DataFrame", "numpy.errstate", "numpy.array", "numpy.concatenate" ] ]
microsoft/aaai21-copy-that
[ "7dfb2ebabbbf1165a33c2430ef2f2571e487b4fd" ]
[ "model/tests/copyspan_seq2seq_synth_edits.py" ]
[ "import logging\r\nimport random\r\n\r\nimport numpy as np\r\n\r\nfrom dpu_utils.utils import run_and_debug, RichPath\r\n\r\nfrom data.representationviz import RepresentationsVisualizer\r\nfrom data.synthetic.charedits import get_dataset\r\nfrom editrepcomponents.alignededitencoder import AlignedEditTokensEmbedding\r\nfrom dpu_utils.ptutils import BaseComponent, ComponentTrainer\r\nfrom mlcomponents.seqdecoding.spancopydecoder import GruSpanCopyingDecoder\r\nfrom mlcomponents.seqencoder import BiGruSequenceEncoder\r\nfrom editrepcomponents.copyeditor import CopyEditor\r\nfrom mlcomponents.embeddings import TokenSequenceEmbedder\r\nfrom mlcomponents.seqdecoding import LuongAttention\r\n\r\nlogging.basicConfig(level=logging.INFO,\r\n format='%(asctime)-15s %(name)-5s %(levelname)-8s %(message)s')\r\n\r\n\r\ndef run():\r\n greedy_decoding = False\r\n\r\n np.random.seed(1)\r\n random.seed(1)\r\n dataset = get_dataset()\r\n logging.info('Generated %s synthetic datapoints.', len(dataset))\r\n\r\n training_set, validation_set = dataset[:int(.8 * len(dataset))], dataset[int(.8 * len(dataset)):]\r\n\r\n seq_embeddings = TokenSequenceEmbedder('SeqTokenEmbedder', hyperparameters={'max_seq_length': 12, 'dropout_rate':0, 'min_word_count_threshold': 1})\r\n input_sequence_encoder = BiGruSequenceEncoder('BiGruEncoder',\r\n token_embedder=seq_embeddings,\r\n hyperparameters={\r\n 'num_layers':2,\r\n 'hidden_size': 61,\r\n })\r\n\r\n attention = LuongAttention('StandardAttention',\r\n hyperparameters={'memories_hidden_dimension': input_sequence_encoder.output_states_size})\r\n copy_attention = LuongAttention('StandardAttention',\r\n hyperparameters={'memories_hidden_dimension': input_sequence_encoder.output_states_size})\r\n\r\n edit_token_embeddings = AlignedEditTokensEmbedding('EditEncoder', token_encoder=seq_embeddings)\r\n edit_encoder = BiGruSequenceEncoder('BiGruEditEncoder',\r\n token_embedder=edit_token_embeddings,\r\n hyperparameters={\r\n 'num_layers':3,\r\n })\r\n decoder = GruSpanCopyingDecoder('GruSpanCopyingDecoder',\r\n token_encoder=seq_embeddings,\r\n standard_attention=attention,\r\n copy_attention=copy_attention,\r\n hyperparameters={'initial_state_size': 244+192,\r\n 'memories_hidden_dimension': 122,\r\n 'dropout_rate':0,\r\n 'additional_inputs_size':64*3,\r\n 'max_memories_length': 12})\r\n\r\n model = CopyEditor('CopyEditor',\r\n input_sequence_encoder=input_sequence_encoder,\r\n edit_encoder=edit_encoder,\r\n output_sequence_decoder=decoder,\r\n learn_bidirectional_edits=True\r\n )\r\n\r\n save_path = RichPath.create('./testmodel-copyspan.pkl.gz')\r\n trainer = ComponentTrainer(model, save_path, max_num_epochs=50, minibatch_size=500)\r\n trainer.train(training_set, validation_set, patience=10)\r\n\r\n ## Try greedy decoding\r\n model = None\r\n model = BaseComponent.restore_model(save_path) # type: CopyEditor\r\n model.eval()\r\n all_data = [model.load_data_from_sample(d) for d in validation_set]\r\n ground_input_sequences = [d.input_sequence for d in validation_set]\r\n data_iter = iter(all_data)\r\n predictions = []\r\n representations = []\r\n is_full = True\r\n start_idx = 0\r\n while is_full:\r\n mb_data, is_full, num_elements = model.create_minibatch(data_iter, max_num_items=100)\r\n if num_elements > 0:\r\n if greedy_decoding:\r\n mb_predictions = [s for s in model.greedy_decode(input_sequences=mb_data['input_sequences'],\r\n aligned_edits=mb_data['aligned_edits'],\r\n ground_input_sequences=ground_input_sequences[\r\n start_idx:start_idx + num_elements])]\r\n else:\r\n mb_predictions = [s for s in model.beam_decode(input_sequences=mb_data['input_sequences'], aligned_edits=mb_data['aligned_edits'],\r\n ground_input_sequences=ground_input_sequences[start_idx:start_idx+num_elements])]\r\n predictions.extend(mb_predictions)\r\n start_idx += num_elements\r\n representations.extend(model.edit_encoder.get_summary(input_sequence_data=mb_data['aligned_edits']))\r\n if not is_full:\r\n break\r\n\r\n assert len(all_data) == len(predictions)\r\n\r\n num_errors_at_1 = 0\r\n num_errors_at_5 = 0\r\n for i, (datasample, predictions) in enumerate(zip(validation_set, predictions)):\r\n if predictions[0][0] != datasample.output_sequence:\r\n print(datasample, predictions)\r\n num_errors_at_1 += 1\r\n if not any(predictions[i][0] == datasample.output_sequence for i in range(len(predictions))):\r\n num_errors_at_5 += 1\r\n\r\n print(f'Matched @1 {100 * (1 - num_errors_at_1/len(validation_set))}% samples.')\r\n print(f'Matched @5 {100 * (1 - num_errors_at_5/len(validation_set))}% samples.')\r\n\r\n representations = np.array(representations)\r\n viz = RepresentationsVisualizer(labeler=lambda d:d.edit_type)\r\n viz.print_nearest_neighbors(validation_set, representations, num_items=20)\r\n # viz.plot_tsne(validation_set, representations, save_file='out.pdf')\r\n\r\n\r\nrun_and_debug(run, True)\r\n" ]
[ [ "numpy.array", "numpy.random.seed" ] ]
kvenkman/hummingbird
[ "b8ec670b3c90ec7e87d3ae4a2b268075bd5eae65" ]
[ "tests/test_sklearn_linear_converter.py" ]
[ "\"\"\"\nTests sklearn linear classifiers (LinearRegression, LogisticRegression, SGDClassifier, LogisticRegressionCV) converters.\n\"\"\"\nimport unittest\nimport warnings\n\nimport numpy as np\nimport torch\nfrom sklearn.linear_model import LinearRegression, LogisticRegression, SGDClassifier, LogisticRegressionCV\nfrom sklearn import datasets\n\nimport hummingbird.ml\nfrom hummingbird.ml._utils import tvm_installed\nfrom hummingbird.ml import constants\n\n\nclass TestSklearnLinearClassifiers(unittest.TestCase):\n\n # LogisticRegression test function to be parameterized\n def _test_logistic_regression(self, num_classes, solver=\"liblinear\", multi_class=\"auto\", labels_shift=0):\n if num_classes > 2:\n model = LogisticRegression(solver=solver, multi_class=multi_class, fit_intercept=True)\n else:\n model = LogisticRegression(solver=\"liblinear\", fit_intercept=True)\n\n np.random.seed(0)\n X = np.random.rand(100, 200)\n X = np.array(X, dtype=np.float32)\n y = np.random.randint(num_classes, size=100) + labels_shift\n\n model.fit(X, y)\n\n torch_model = hummingbird.ml.convert(model, \"torch\")\n\n self.assertTrue(torch_model is not None)\n np.testing.assert_allclose(model.predict_proba(X), torch_model.predict_proba(X), rtol=1e-6, atol=1e-6)\n\n # LogisticRegression binary\n def test_logistic_regression_bi(self):\n self._test_logistic_regression(2)\n\n # LogisticRegression multiclass with auto\n def test_logistic_regression_multi_auto(self):\n self._test_logistic_regression(3)\n\n # LogisticRegression with class labels shifted\n def test_logistic_regression_shifted_classes(self):\n self._test_logistic_regression(3, labels_shift=2)\n\n # LogisticRegression with multi+ovr\n def test_logistic_regression_multi_ovr(self):\n self._test_logistic_regression(3, multi_class=\"ovr\")\n\n # LogisticRegression with multi+multinomial+sag\n def test_logistic_regression_multi_multin_sag(self):\n warnings.filterwarnings(\"ignore\")\n # this will not converge due to small test size\n self._test_logistic_regression(3, multi_class=\"multinomial\", solver=\"sag\")\n\n # LogisticRegression binary lbfgs\n def test_logistic_regression_bi_lbfgs(self):\n warnings.filterwarnings(\"ignore\")\n # this will not converge due to small test size\n self._test_logistic_regression(2, solver=\"lbfgs\")\n\n # LogisticRegression with multi+lbfgs\n def test_logistic_regression_multi_lbfgs(self):\n warnings.filterwarnings(\"ignore\")\n # this will not converge due to small test size\n self._test_logistic_regression(3, solver=\"lbfgs\")\n\n # LogisticRegression with multi+multinomial+lbfgs\n def test_logistic_regression_multi_multin_lbfgs(self):\n warnings.filterwarnings(\"ignore\")\n # this will not converge due to small test size\n self._test_logistic_regression(3, multi_class=\"multinomial\", solver=\"lbfgs\")\n\n # LogisticRegression with multi+ovr+lbfgs\n def test_logistic_regression_multi_ovr_lbfgs(self):\n warnings.filterwarnings(\"ignore\")\n # this will not converge due to small test size\n self._test_logistic_regression(3, multi_class=\"ovr\", solver=\"lbfgs\")\n\n # LinearRegression test function to be parameterized\n def _test_linear_regression(self, y_input):\n model = LinearRegression()\n\n np.random.seed(0)\n X = np.random.rand(100, 200)\n X = np.array(X, dtype=np.float32)\n y = y_input\n\n model.fit(X, y)\n\n torch_model = hummingbird.ml.convert(model, \"torch\")\n\n self.assertTrue(torch_model is not None)\n np.testing.assert_allclose(model.predict(X), torch_model.predict(X), rtol=1e-6, atol=1e-6)\n\n # LinearRegression with ints\n def test_linear_regression_int(self):\n np.random.seed(0)\n self._test_linear_regression(np.random.randint(2, size=100))\n\n # LinearRegression with floats\n def test_linear_regression_float(self):\n np.random.seed(0)\n self._test_linear_regression(np.random.rand(100))\n\n # LogisticRegressionCV test function to be parameterized\n def _test_logistic_regression_cv(self, num_classes, solver=\"liblinear\", multi_class=\"auto\", labels_shift=0):\n if num_classes > 2:\n model = LogisticRegressionCV(solver=solver, multi_class=multi_class, fit_intercept=True)\n else:\n model = LogisticRegressionCV(solver=\"liblinear\", fit_intercept=True)\n\n np.random.seed(0)\n X = np.random.rand(100, 200)\n X = np.array(X, dtype=np.float32)\n y = np.random.randint(num_classes, size=100) + labels_shift\n\n model.fit(X, y)\n torch_model = hummingbird.ml.convert(model, \"torch\")\n self.assertTrue(torch_model is not None)\n np.testing.assert_allclose(model.predict_proba(X), torch_model.predict_proba(X), rtol=1e-6, atol=1e-6)\n\n # LogisticRegressionCV with 2 classes\n def test_logistic_regression_cv_bi(self):\n self._test_logistic_regression_cv(2)\n\n # LogisticRegressionCV with 3 classes\n def test_logistic_regression_cv_multi(self):\n self._test_logistic_regression_cv(3)\n\n # LogisticRegressionCV with shifted classes\n def test_logistic_regression_cv_shifted_classes(self):\n self._test_logistic_regression_cv(3, labels_shift=2)\n\n # LogisticRegressionCV with multi+ovr\n def test_logistic_regression_cv_multi_ovr(self):\n self._test_logistic_regression_cv(3, multi_class=\"ovr\")\n\n # LogisticRegressionCV with multi+multinomial\n def test_logistic_regression_cv_multi_multin(self):\n warnings.filterwarnings(\"ignore\")\n # this will not converge due to small test size\n self._test_logistic_regression_cv(3, multi_class=\"multinomial\", solver=\"sag\")\n\n # SGDClassifier test function to be parameterized\n def _test_sgd_classifier(self, num_classes):\n\n model = SGDClassifier(loss=\"log\")\n\n np.random.seed(0)\n X = np.random.rand(100, 200)\n X = np.array(X, dtype=np.float32)\n y = np.random.randint(num_classes, size=100)\n\n model.fit(X, y)\n\n torch_model = hummingbird.ml.convert(model, \"torch\")\n self.assertTrue(torch_model is not None)\n np.testing.assert_allclose(model.predict(X), torch_model.predict(X), rtol=1e-6, atol=1e-6)\n\n # SGDClassifier with 2 classes\n def test_sgd_classifier_bi(self):\n self._test_sgd_classifier(2)\n\n # SGDClassifier with 3 classes\n def test_sgd_classifier_multi(self):\n self._test_sgd_classifier(3)\n\n # Failure Cases\n def test_sklearn_linear_model_raises_wrong_type(self):\n warnings.filterwarnings(\"ignore\")\n np.random.seed(0)\n X = np.random.rand(100, 200)\n X = np.array(X, dtype=np.float32)\n y = np.random.randint(3, size=100).astype(np.float32) # y must be int, not float, should error\n model = SGDClassifier().fit(X, y)\n self.assertRaises(RuntimeError, hummingbird.ml.convert, model, \"torch\")\n\n # Float 64 data tests\n def test_float64_linear_regression(self):\n model = LinearRegression()\n\n np.random.seed(0)\n X = np.random.rand(100, 200)\n y = np.random.randint(2, size=100)\n\n model.fit(X, y)\n\n torch_model = hummingbird.ml.convert(model, \"torch\")\n\n self.assertTrue(torch_model is not None)\n np.testing.assert_allclose(model.predict(X), torch_model.predict(X), rtol=1e-6, atol=1e-6)\n\n def test_float64_sgd_classifier(self):\n\n model = SGDClassifier(loss=\"log\")\n\n np.random.seed(0)\n num_classes = 3\n X = np.random.rand(100, 200)\n y = np.random.randint(num_classes, size=100)\n\n model.fit(X, y)\n\n torch_model = hummingbird.ml.convert(model, \"torch\")\n self.assertTrue(torch_model is not None)\n np.testing.assert_allclose(model.predict(X), torch_model.predict(X), rtol=1e-6, atol=1e-6)\n\n # Test Torschscript backend.\n def test_logistic_regression_ts(self):\n\n model = LogisticRegression(solver=\"liblinear\")\n\n data = datasets.load_iris()\n X, y = data.data, data.target\n X = X.astype(np.float32)\n\n model.fit(X, y)\n\n ts_model = hummingbird.ml.convert(model, \"torch.jit\", X)\n self.assertTrue(ts_model is not None)\n np.testing.assert_allclose(model.predict(X), ts_model.predict(X), rtol=1e-6, atol=1e-6)\n np.testing.assert_allclose(model.predict_proba(X), ts_model.predict_proba(X), rtol=1e-6, atol=1e-6)\n\n # Test TVM backends.\n @unittest.skipIf(not (tvm_installed()), reason=\"TVM tests require TVM\")\n def test_sgd_classifier_tvm(self):\n\n model = SGDClassifier(loss=\"log\")\n\n np.random.seed(0)\n num_classes = 3\n X = np.random.rand(100, 200)\n X = np.array(X, dtype=np.float32)\n y = np.random.randint(num_classes, size=100)\n\n model.fit(X, y)\n\n tvm_model = hummingbird.ml.convert(model, \"tvm\", X)\n self.assertTrue(tvm_model is not None)\n np.testing.assert_allclose(model.predict(X), tvm_model.predict(X), rtol=1e-6, atol=1e-6)\n np.testing.assert_allclose(model.predict_proba(X), tvm_model.predict_proba(X), rtol=1e-6, atol=1e-6)\n\n @unittest.skipIf(not (tvm_installed()), reason=\"TVM tests require TVM\")\n def test_lr_tvm(self):\n\n model = LinearRegression()\n\n np.random.seed(0)\n num_classes = 1000\n X = np.random.rand(100, 200)\n X = np.array(X, dtype=np.float32)\n y = np.random.randint(num_classes, size=100)\n\n model.fit(X, y)\n\n tvm_model = hummingbird.ml.convert(model, \"tvm\", X, extra_config={constants.TVM_MAX_FUSE_DEPTH: 30})\n self.assertTrue(tvm_model is not None)\n\n np.testing.assert_allclose(model.predict(X), tvm_model.predict(X), rtol=1e-6, atol=1e-3)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n" ]
[ [ "sklearn.linear_model.SGDClassifier", "sklearn.linear_model.LinearRegression", "numpy.random.seed", "sklearn.linear_model.LogisticRegressionCV", "numpy.random.rand", "sklearn.linear_model.LogisticRegression", "numpy.array", "numpy.random.randint", "sklearn.datasets.load_iris" ] ]
paminco/paminco
[ "2b8da7cc83adde3c72af5150cf6d7294ff6fd29e" ]
[ "paminco/net/tests/test_cost.py" ]
[ "import pytest\nimport numpy as np\n\nfrom itertools import zip_longest\n\nfrom paminco.net import load_sioux\nfrom paminco.net._data_gas import temporary_gas_files\nfrom paminco.net._data_examples import (NET_SIMPLE_POLYNOMIAL,\n NET_ELECTRICAL_PIECEWISE)\nfrom paminco.net.network import Network\nfrom paminco.net.cost import EquidistantInterpolationRule, PiecewiseQuadraticCost, SymbolicCost, SimplePolynomial, BreakpointsInterpolationRule, EdgeCostInterpolation\nfrom paminco.algo.mca import MCAInterpolationRule\n\n\ndef test_costfunc_traffic():\n net = load_sioux()\n fft = net.cost.coefficients[:, 0]\n a = net.cost.coefficients[:, 4]\n coeffs = {\"a\": a, \"fft\": fft}\n F = \"x*fft + a/5 * x**5\"\n f = \"fft + a*x**4\"\n f1 = \"4*a*x**3\"\n f2 = \"12*a*x**2\"\n sc = SymbolicCost(coeffs, F, f, f1, f2, shared=net.shared)\n net.integrate_cost()\n for d in range(4):\n for i in range(5):\n x = np.random.random(net.m) * 100\n polyval = net.cost(x, d=d)\n symval = sc(x, d=d)\n assert np.allclose(polyval, symval)\n for i in range(5):\n x = np.random.random(net.m) * 1000\n polyval = net.cost(x, d=d)\n symval = sc(x, d=d)\n assert np.allclose(polyval, symval)\n for i in range(5):\n x = np.random.random(net.m) * 10000\n polyval = net.cost(x, d=d)\n symval = sc(x, d=d)\n assert np.allclose(polyval, symval)\n\n\ndef test_costfunc_gas():\n # Open Gas40 network\n with temporary_gas_files(\"gas40\") as tmpfiles:\n gas40 = Network.from_gaslib(*tmpfiles)\n F = \"beta * x * abs(x)\"\n f = \"2 * beta * abs(x)\"\n f1 = \"2 * beta * x / abs(x)\"\n f2 = \"0\"\n beta = gas40.cost.coefficients[:, 2]\n sc = SymbolicCost({\"beta\": beta}, F, f, f1, f2, shared=gas40.shared)\n for d in range(4):\n for _ in range(5):\n x = np.random.random(gas40.m) * 100\n polyval = gas40.cost(x, d=d)\n symval = sc(x, d=d)\n assert np.allclose(polyval, symval)\n for _ in range(5):\n x = np.random.random(gas40.m) * 1000\n polyval = gas40.cost(x, d=d)\n symval = sc(x, d=d)\n assert np.allclose(polyval, symval)\n for _ in range(5):\n x = np.random.random(gas40.m) * 10000\n polyval = gas40.cost(x, d=d)\n symval = sc(x, d=d)\n assert np.allclose(polyval, symval)\n\n\[email protected](\"d\", [1, 2, 3])\ndef test_derivation_integration(d):\n net = load_sioux()\n c2 = net.cost.integrate(d=d)\n c3 = c2.differentiate(d=d)\n f = np.random.random(net.m) * 10000\n assert np.allclose(c3(f), net.cost(f))\n assert np.allclose(c3(f, d=1), net.cost(f, d=1))\n \n # TODO-PW: gas networks\n\n\ndef test_polycost_exact():\n \"\"\"\n Tests the SimplePolynomial and PolynomialCost classes against\n precalculated values of the polynomials (1+x)^i, i = 1, ..., 10\n for x = -5, ..., 5\n \"\"\"\n target_vals = np.array([[-4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6],\n [16, 9, 4, 1, 0, 1, 4, 9, 16, 25, 36],\n [-64, -27, -8, -1, 0, 1, 8, 27, 64, 125, 216],\n [256, 81, 16, 1, 0, 1, 16, 81, 256, 625, 1296],\n [-1024, -243, -32, -1, 0, 1, 32, 243, 1024, 3125, 7776],\n [4096, 729, 64, 1, 0, 1, 64, 729, 4096, 15625, 46656],\n [-16384, -2187, -128, -1, 0, 1, 128, 2187, 16384, 78125, 279936],\n [65536, 6561, 256, 1, 0, 1, 256, 6561, 65536, 390625, 1679616],\n [-262144, -19683, -512, -1, 0, 1, 512, 19683, 262144, 1953125, 10077696],\n [1048576, 59049, 1024, 1, 0, 1, 1024, 59049, 1048576, 9765625, 60466176]]).T\n\n coeff_raw_data = [[1, 1],\n [1, 2, 1],\n [1, 3, 3, 1],\n [1, 4, 6, 4, 1],\n [1, 5, 10, 10, 5, 1],\n [1, 6, 15, 20, 15, 6, 1],\n [1, 7, 21, 35, 35, 21, 7, 1],\n [1, 8, 28, 56, 70, 56, 28, 8, 1],\n [1, 9, 36, 84, 126, 126, 84, 36, 9, 1],\n [1, 10, 45, 120, 210, 252, 210, 120, 45, 10, 1]]\n\n coefficients = np.array(list(zip_longest(*coeff_raw_data, fillvalue=0))).T\n signed = [False for _ in range(len(coefficients))]\n\n m = len(coefficients)\n m_edges = [[str(e), str(e + 1)] for e in range(m)]\n\n dummy_net = Network(m_edges, cost_data=(coefficients, signed))\n polynomials = [SimplePolynomial(coefficients[i], signed[i]) for i in range(len(coefficients))]\n\n for i, x in enumerate(range(-5, 6)):\n poly_result = np.array([p(x) for p in polynomials])\n assert np.array_equal(poly_result, dummy_net.cost(x))\n assert np.array_equal(poly_result, target_vals[i])\n\n\ndef test_signed_polycost():\n \"\"\"Test signed polycost/simple polynomials against precalculated values.\"\"\"\n target_vals = np.array([[335, 169, 71, 23, 7, 5, 11, 39, 107, 233, 435],\n [315, 156, 63, 18, 3, 0, 9, 42, 117, 252, 465],\n [-315, -156, -63, -18, -3, 0, 9, 42, 117, 252, 465]]).T\n\n coefficients = np.array([[5, 1, 2, 3], [0, 3, 3, 3], [0, 3, 3, 3]])\n signed = [True, True, False]\n\n m = len(coefficients)\n m_edges = [[str(e), str(e + 1)] for e in range(m)]\n\n dummy_net = Network(m_edges, cost_data=(coefficients, signed))\n polynomials = [SimplePolynomial(coefficients[i], signed[i]) for i in range(len(coefficients))]\n\n for i, x in enumerate(range(-5, 6)):\n poly_result = np.array([p(x) for p in polynomials])\n assert np.array_equal(poly_result, dummy_net.cost(x))\n assert np.array_equal(poly_result, target_vals[i])\n\n\ndef numerical_function_compare(f1, f2, a, b, k, exact=False, x_shape=None):\n \"\"\"Compare two functions f1 and f2 by checking values numerically.\n \n Inserts ``k`` values between ``a`` and ``b`` (including both) into the\n functions ``f1`` and ``f2``.\n If ``exact`` is set to True, the values must match exacly, otherwise\n numpy.isclose is used. If ``x_shape`` is set to something else than\n None, instead of a real value x from [a, b] a numpy array of shape\n ``x_shape`` with constant value x is inserted into f1 and f2.\n \"\"\"\n step = (b - a) / (k - 1)\n xs = np.arange(a, b + step, step)\n for x in xs:\n if x_shape is not None:\n x = np.full(x_shape, x)\n if exact:\n if not np.array_equal(f1(x), f2(x)):\n return False\n else:\n if not np.allclose(f1(x), f2(x)):\n return False\n else:\n if exact:\n if not f1(x) == f2(x):\n return False\n else:\n if not np.isclose(f1(x), f2(x)):\n return False\n return True\n\n\ndef test_piecewise_quadratic_cost():\n net = Network.from_xml(NET_ELECTRICAL_PIECEWISE)\n\n F = [lambda x: 0.5 * x * x if x < 3 else 2.5 * x * x - 12 * x + 18,\n lambda x: 0.5 * x * x if x < 2 else 1.5 * x * x - 4 * x + 4,\n lambda x: 0.5 * x * x if x < 1 else 2 * x * x - 3 * x + 1.5]\n\n f = [lambda x: x if x < 3 else 5 * x - 12,\n lambda x: x if x < 2 else 3 * x - 4,\n lambda x: x if x < 1 else 4 * x - 3]\n\n def target_cost(x): return np.array([F[i](x[i]) for i in range(net.m)])\n def target_der(x): return np.array([f[i](x[i]) for i in range(net.m)])\n\n for i in range(net.m):\n assert numerical_function_compare(F[i], net.cost[i], 0, 10, 1000)\n def der(x): return net.cost[i](x, d=1)\n assert numerical_function_compare(f[i], der, 0, 10, 1000)\n\n assert numerical_function_compare(target_cost, net.cost, 0, 10, 1000, x_shape=net.m)\n def der(x): return net.cost(x, d=1)\n assert numerical_function_compare(target_der, der, 0, 10, 1000, x_shape=net.m)\n\n\[email protected](\n \"coefficients, smoothness\",\n [\n (\n np.array([[-np.inf, -np.inf, np.inf, np.inf],\n [4, 3, 1, 0],\n [2, 7, -1, 1],\n [np.inf, np.inf, np.inf, 42]]),\n [True, True, False]\n ),\n (\n np.array([[4, 3, 1, 0],\n [2, 7, -1, 1]]),\n [True, True, False]\n ),\n (\n np.array([[4, 3, 1, 0],\n [2, 7, -1, 1],\n [3, 1, 7, 2]]),\n [True, False, False]\n ),\n (\n np.array([[4, 3, 1, 0],\n [2, 7, -1, 1],\n [3, 1, 42, 2]]),\n [False, False, False]\n ),\n (\n np.array([[-np.inf, -np.inf, np.inf, -np.inf],\n [2, 2, 2, -1],\n [2, 2, 2, 1],\n [np.inf, np.inf, np.inf, 8]]),\n [True, True, True]\n )\n ]\n)\ndef test_piecewise_quadratic_single_edge_smoothness(coefficients, smoothness, margin=0):\n ei = np.full(len(coefficients), 0)\n pwq = PiecewiseQuadraticCost((coefficients, ei))\n for k, s in enumerate(smoothness):\n assert pwq.is_smooth(k=k, margin=margin) == s\n\n\ndef test_breakpoints_interpolation():\n net = Network.from_xml(NET_SIMPLE_POLYNOMIAL)\n\n x_max = 5000\n bps = np.unique(np.random.randint(-x_max + 1, x_max, 100))\n\n bp_targets = []\n\n # Create target breakpoints for all edges\n # By selecting the appropriate breakpoints from bp\n # and adding possible artificial breakpoints\n for e in range(net.m):\n l, u = net.edges.lb[e], net.edges.ub[e]\n pre = np.array([-np.inf, max(l, -x_max)])\n selected_bps = bps[(bps > l) & (bps < u)]\n post = np.array([]) if u == np.inf else np.array([u])\n bp_targets.append(np.concatenate([pre, selected_bps, post]))\n \n irule = BreakpointsInterpolationRule(bps)\n pwqc = net.cost.interpolate(irule, x_max=x_max)\n\n # Get the breakpoints from the interpolated cost\n df = pwqc._ec.to_df()\n for i in range(len(bp_targets)):\n bp = np.array((df[df[\"edge\"] == i][\"tau\"]))\n assert len(bp) == len(bp_targets[i])\n assert np.array_equal(bp, np.array(bp_targets[i]))\n\n\ndef test_equidistant_interpolation():\n net = Network.from_xml(NET_SIMPLE_POLYNOMIAL)\n\n x_max = 5000\n delta = 10\n\n irule = EquidistantInterpolationRule(delta)\n pwqc = net.cost.interpolate(irule, x_max=x_max)\n df = pwqc._ec.to_df()\n\n # Check breakpoints by testing if they equal arange\n for i in range(net.m):\n bp = np.array((df[df[\"edge\"] == i][\"tau\"]))\n lo, up = net.edges.lb[i], net.edges.ub[i]\n lo = max(lo, -x_max)\n up = min(up, x_max)\n tbp = np.arange(lo, up + 1, delta)\n tbp = np.concatenate([np.array([-np.inf]), tbp])\n assert np.array_equal(bp, tbp)\n\n\ndef test_mca_interpolation_rule():\n net = Network.from_xml(NET_SIMPLE_POLYNOMIAL)\n\n bp_targets = [[-np.inf, 0, 2, 8, 26, 80, 242, 728, 2186, 6560, 19682,\n 59048, 177146, 531440, 1594322, 4782968, 14348906],\n [-np.inf, 0, 2, 6.47213578e+00,\n 1.95700045e+01, 5.87610787e+01, 1.76300253e+02, 5.28906430e+02,\n 1.58672118e+03, 4.76016418e+03, 1.42804927e+04, 4.28414783e+04,\n 1.28524435e+05, 3.85573305e+05, 1.15671991e+06, 3.47015974e+06,\n 1.04104792e+07, 3.12314377e+07],\n [-np.inf, -1.43489060e+07, -4.78296867e+06, -1.59432289e+06,\n -5.31440963e+05, -1.77146988e+05, -5.90489959e+04, -1.96829986e+04,\n -6.56099949e+03, -2.18699968e+03, -7.28999435e+02, -2.42998440e+02,\n -8.09953646e+01, -2.69861071e+01, -8.95827470e+00, -2.87339972e+00,\n -5.70660096e-01, 0.00000000e+00, 2.00000000e+00, 6.47213578e+00,\n 1.95700045e+01, 5.87610787e+01, 1.76300253e+02, 5.28906430e+02,\n 1.58672118e+03, 4.76016418e+03, 1.42804927e+04, 4.28414783e+04,\n 1.28524435e+05, 3.85573305e+05, 1.15671991e+06, 3.47015974e+06,\n 1.04104792e+07, 3.12314377e+07],\n [-np.inf, -1000, -3.33332333e+02, -1.11107778e+02,\n -3.70269251e+01, -1.23152862e+01, -4.02349000e+00, -1.07989912e+00,\n 0.00000000e+00, 2.00000000e+00, 6.47213578e+00, 1.95700045e+01,\n 5.87610787e+01, 1.76300253e+02, 5.28906430e+02, 1.58672118e+03,\n 4.76016418e+03, 1.42804927e+04, 4.28414783e+04, 1.28524435e+05,\n 3.85573305e+05, 1.15671991e+06, 3.47015974e+06, 1.04104792e+07,\n 3.12314377e+07],\n [-np.inf, -1.43489060e+07, -4.78296867e+06, -1.59432289e+06,\n -5.31440963e+05, -1.77146988e+05, -5.90489959e+04, -1.96829986e+04,\n -6.56099949e+03, -2.18699968e+03, -7.28999435e+02, -2.42998440e+02,\n -8.09953646e+01, -2.69861071e+01, -8.95827470e+00, -2.87339972e+00,\n -5.70660096e-01, 0.00000000e+00, 2.00000000e+00, 6.47213578e+00,\n 1.95700045e+01, 5.87610787e+01, 1.76300253e+02, 5.28906430e+02,\n 1000],\n [-np.inf, -1000, -3.33332333e+02, -1.11107778e+02,\n -3.70269251e+01, -1.23152862e+01, -4.02349000e+00, -1.07989912e+00,\n 0.00000000e+00, 2.00000000e+00, 6.47213578e+00,\n 1.95700045e+01, 5.87610787e+01, 1.76300253e+02, 5.28906430e+02,\n 1000]]\n exact = [0]\n\n x_max = 14348906\n\n rule = MCAInterpolationRule(2, 3 * net.m * x_max, net.m, x_max)\n pwqc = net.cost.interpolate(rule)\n # Get the breakpoints from the interpolated cost\n df = pwqc._ec.to_df()\n for i in range(len(bp_targets)):\n bp = np.array((df[df[\"edge\"] == i][\"tau\"]))\n assert len(bp) == len(bp_targets[i])\n if i in exact:\n assert np.array_equal(bp, np.array(bp_targets[i]))\n else:\n assert np.allclose(bp, np.array(bp_targets[i]))\n\n\[email protected](\"rng\", [42, 4242, 424242, 0, 123])\ndef test_edge_interpolation(rng):\n \"\"\"Test the computation of the computation of piecewise quadratic pieces.\n \n Creates random polynomials and interpolates them with equidistant steps.\n Checks consistency by testing that\n a) the derivative of the cost function coincides with the linear interpolation\n 2 * a * x + b at the breakpoints\n b) the linear interpolation is continuous\n\n Does not check the actual function but only the interpolation of\n the derivative. In particuar no test of the offsets.\n \"\"\"\n rng = np.random.default_rng(rng)\n degree = rng.integers(3, 6)\n coeffs = rng.integers(-10, 10, degree + 1)\n ec = SimplePolynomial(coeffs)\n step = 10\n irule = EquidistantInterpolationRule(step)\n eci = EdgeCostInterpolation(0, ec, irule, 1000 + step, 0, 1000 + step)\n coeff = eci.interpolate()\n a = 2 * coeff[1:-1, 0]\n b = coeff[1:-1, 1]\n tau = coeff[1:-1, 3]\n for i, t in enumerate(tau):\n assert ec.ddx(t) == a[i] * t + b[i]\n if i > 0:\n assert a[i] * t + b[i] == a[i - 1] * t + b[i - 1]\n" ]
[ [ "numpy.allclose", "numpy.random.default_rng", "numpy.arange", "numpy.random.random", "numpy.array_equal", "numpy.array", "numpy.concatenate", "numpy.random.randint", "numpy.full" ] ]
duboviy/misc
[ "4cd8cbcf12fc29dd2f12699fbd2f3dd738b5e4b5" ]
[ "hist.py" ]
[ "\"\"\" Horizontal histogram plotting function. \"\"\"\n\nfrom __future__ import absolute_import\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef horizontal_hist(items, title=None, axis_label=None, color=None, height=10, width=20, reverse=False):\n \"\"\"\n Plots a histogram of values and frequencies.\n\n Arguments:\n items (iterable[any]) => Example, [1, 2, 3, 1, 2]\n title (Optional[str]) => Example, \"Resulting histogram\".\n axis_label (Optional[str]) => Example, \"y-axis\".\n color (Optional[str]) => Default: matplotlib's default plot color, a royal blue\n height (Optional[int]) => Default: 10\n width (Optional[int]) => Default: 20\n reverse (Optional[bool]) => From top to bottom in order of decreasing frequency or not.\n\n Returns:\n None, however a matplotlib figure should be produced.\n \"\"\"\n\n unique_items, item_counts = np.unique(items, return_counts=True)\n item_counts, unique_items = zip(*sorted(zip(item_counts, unique_items), reverse=reverse))\n\n pos = np.arange(len(unique_items)) + 0.5\n plt.figure(figsize=(width, height))\n plt.barh(pos, item_counts, align='center', color=color)\n plt.yticks(pos, unique_items)\n plt.xlabel('Frequency')\n plt.ylabel(axis_label) if axis_label else None\n plt.title(title) if title else None\n\n plt.show()\n\n\nif __name__ == '__main__':\n items = range(1, 10) * 100 + range(11, 20) * 50 + range(21, 30) * 25\n horizontal_hist(items, title=\"Resulting histogram\")\n" ]
[ [ "numpy.unique", "matplotlib.pyplot.figure", "matplotlib.pyplot.title", "matplotlib.pyplot.barh", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.yticks", "matplotlib.pyplot.xlabel" ] ]
takaratruong/emergent-generalization
[ "20de15ee6514dba48b48c76d8cd9289d62966932" ]
[ "code/train.py" ]
[ "\"\"\"\nTrain an RNN decoder to make binary predictions;\nthen train an RNN language model to generate sequences\n\"\"\"\n\n\nimport contextlib\nfrom collections import defaultdict\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\n\nimport models\nimport util\nimport data\nimport os\nimport vis\nimport emergence\n\nimport pandas as pd\nimport io_util\n\n# Logging\nimport logging\n\nlogging.basicConfig(\n format=\"%(asctime)s %(levelname)-8s %(message)s\",\n level=logging.INFO,\n datefmt=\"%Y-%m-%d %H:%M:%S\",\n)\n\n\ndef convert_lang_to_numeric(lang, lang_length, pad_val=-1, skip_sos_eos=True):\n \"\"\"\n Convert lang to numeric, with custom padding, for later language analysis\n \"\"\"\n lang_i = lang.argmax(2)\n for i, length in enumerate(lang_length):\n\n if skip_sos_eos:\n # zero out EOS\n lang_i[i, length - 1 :] = pad_val\n else:\n lang_i[i, length:] = pad_val\n\n # shave off SOS, ending EOS if present\n if skip_sos_eos:\n lang_i = lang_i[:, 1:-1]\n\n return lang_i\n\n\ndef get_true_lang(batch, dataset, args, join=True):\n spk_inp, spk_y, lis_inp, lis_y, true_lang, md, idx = batch\n true_lang_text = dataset.to_text(true_lang, join=join)\n return true_lang_text\n\n\ndef get_positive_examples(inp, y):\n \"\"\"\n inp -> batch_size x n_examples x feat_size\n y -> batch_size x y\n\n output\n \"\"\"\n where_zero = np.where(y.sum(1) == 0)[0]\n y[where_zero] = 1\n occur_rows, occur_cols = np.where(y)\n row_indices, occur_col_indices = np.unique(occur_rows, return_index=True)\n assert (row_indices == np.arange(len(row_indices))).all()\n assert len(occur_col_indices) == len(y)\n col_indices = occur_cols[occur_col_indices]\n sel = inp[row_indices, col_indices]\n return sel\n\n\ndef subsample(items, idx):\n return [items[i] for i in idx]\n\n\ndef compute_lang_metrics(\n all_lang,\n dataset,\n args,\n attrs=None,\n reprs=None,\n attrs_numeric=None,\n toks=None,\n max_analysis_length=1000,\n):\n lang_metrics = {}\n if all_lang.shape[0] > max_analysis_length:\n idx = np.random.choice(\n all_lang.shape[0], size=max_analysis_length, replace=False\n )\n\n all_lang = all_lang.iloc[idx].reset_index()\n\n if attrs is not None:\n attrs = subsample(attrs, idx)\n if toks is not None:\n toks = subsample(toks, idx)\n if reprs is not None:\n reprs = subsample(reprs, idx)\n\n # topographic similarity between ground truth language and tokens\n # only do it if the ground truth language is meaningful\n if dataset.name == \"shapeworld\":\n langts = emergence.topsim(\n all_lang[\"true_lang\"], toks, meaning_distance_fn=\"edit\"\n )\n lang_metrics[\"langts\"] = langts\n\n if dataset.name == \"shapeworld\":\n\n def compute_hd(tl1, tl2):\n # Remove SOS, EOS\n tl1 = \" \".join(tl1[1:-1])\n tl2 = \" \".join(tl2[1:-1])\n return dataset.concept_distance(tl1, tl2)\n\n elif dataset.name == \"cub\":\n\n def compute_hd(tl1, tl2):\n tl1 = int(tl1[1])\n tl2 = int(tl2[1])\n return dataset.concept_distance(tl1, tl2)\n\n if dataset.concept_distances is not None:\n hd = emergence.topsim(\n all_lang[\"true_lang\"], toks, meaning_distance_fn=compute_hd\n )\n lang_metrics[\"hausdorff\"] = hd\n\n if attrs is not None:\n # topographic similarity between meanings and tokens\n ts = emergence.topsim(\n attrs, toks, meaning_distance_fn=dataset.meaning_distance_fn\n )\n lang_metrics[\"ts\"] = ts\n\n # topographic similarity between reprs and attributes\n # For random sets later, worth disentangling prototype repr from\n # individual inputs repr\n reprts = emergence.topsim(\n attrs,\n reprs,\n meaning_distance_fn=dataset.meaning_distance_fn,\n message_distance_fn=\"euclidean\",\n )\n lang_metrics[\"reprts\"] = reprts\n\n return lang_metrics\n\n\ndef compute_metrics_by_md(all_lang, md_vocab=None):\n metrics_by_md = {}\n per_md_acc = all_lang[[\"md\", \"acc\"]].groupby(\"md\").mean()\n for i, md_row in per_md_acc.iterrows():\n if md_vocab is None:\n md_name = str(md_row.name)\n else:\n md_name = md_vocab[\"i2w\"][md_row.name]\n md_key = f\"acc_md_{md_name}\"\n metrics_by_md[md_key] = md_row[\"acc\"]\n return metrics_by_md\n\n\ndef log_epoch_summary(epoch, split, metrics):\n logging.info(\n \"Epoch {}\\t{} {}\".format(\n epoch,\n split.upper(),\n \" \".join(\"{}: {:.4f}\".format(m, v) for m, v in metrics.items()),\n )\n )\n\n\ndef log_epoch_progress(epoch, batch_i, batch_size, dataloader, stats):\n meter_str = \" \".join(f\"{k}: {v.avg:.3f}\" for k, v in stats.meters.items())\n data_i = batch_i * batch_size\n data_total = len(dataloader.dataset)\n pct = round(100 * batch_i / len(dataloader))\n logging.info(f\"Epoch {epoch} [{data_i}/{data_total} ({pct}%)] {meter_str}\")\n\n\ndef init_metrics():\n \"\"\"\n Initialize the metrics for this training run. This is a defaultdict, so\n metrics not specified here can just be appended to/assigned to during\n training.\n Returns\n -------\n metrics : `collections.defaultdict`\n All training metrics\n \"\"\"\n metrics = {}\n metrics[\"best_acc\"] = 0.0\n metrics[\"best_val_acc\"] = 0.0\n metrics[\"best_val_same_acc\"] = 0.0\n metrics[\"best_loss\"] = float(\"inf\")\n metrics[\"best_epoch\"] = 0\n return metrics\n\n\ndef run(\n split,\n epoch,\n pair,\n optimizer,\n dataloaders,\n args,\n random_state=None,\n force_no_train=False,\n):\n \"\"\"\n Run the model for a single epoch.\n\n Parameters\n ----------\n split : ``str``\n The dataloader split to use. Also determines model behavior if e.g.\n ``split == 'train'`` then model will be in train mode/optimizer will be\n run.\n epoch : ``int``\n current epoch\n model : ``torch.nn.Module``\n the model you are training/evaling\n optimizer : ``torch.nn.optim.Optimizer``\n the optimizer\n criterion : ``torch.nn.loss``\n the loss function\n dataloaders : ``dict[str, torch.utils.data.DataLoader]``\n Dictionary of dataloaders whose keys are the names of the ``split``s\n and whose values are the corresponding dataloaders\n args : ``argparse.Namespace``\n Arguments for this experiment run\n random_state : ``np.random.RandomState``\n The numpy random state in case anything stochastic happens during the\n run\n\n Returns\n -------\n metrics : ``dict[str, float]``\n Metrics from this run; keys are statistics and values are their average\n values across the batches\n \"\"\"\n training = (split == \"train\") and not force_no_train\n dataloader = dataloaders[split]\n torch.set_grad_enabled(training)\n pair.train(mode=training)\n\n stats = util.Statistics()\n\n all_lang = []\n all_toks = [] # language, unjoined text form, ragged\n # FIXME - make this one class\n if dataloader.dataset.name == \"cub\":\n all_attrs = []\n all_reprs = [] # representations\n else:\n all_attrs = None\n all_reprs = None\n\n if training:\n optimizer.zero_grad()\n this_epoch_eps = max(0.0, args.eps - (epoch * args.eps_anneal))\n this_epoch_uniform_weight = max(\n 0.0, args.uniform_weight - (epoch * args.uniform_weight_anneal)\n )\n this_epoch_softmax_temp = max(\n 1.0, args.softmax_temp - (epoch * args.softmax_temp_anneal)\n )\n else:\n this_epoch_eps = 0.0\n this_epoch_uniform_weight = 0.0\n this_epoch_softmax_temp = 1.0\n\n for batch_i, batch in enumerate(dataloader):\n spk_inp, spk_y, lis_inp, lis_y, true_lang, md, idx = batch\n batch_size = spk_inp.shape[0]\n\n # Determine what's input\n if dataloader.dataset.name == \"shapeworld\":\n spk_inp = spk_inp.float() / 255\n lis_inp = lis_inp.float() / 255\n else:\n spk_inp = spk_inp.float()\n lis_inp = lis_inp.float()\n\n spk_y = spk_y.float()\n lis_y = lis_y.float()\n\n if args.cuda:\n spk_inp = spk_inp.cuda()\n spk_y = spk_y.cuda()\n lis_inp = lis_inp.cuda()\n lis_y = lis_y.cuda()\n\n if args.listener_only:\n lis_scores = pair.listener(lis_inp, None)\n elif args.copy_listener:\n speaker_emb = pair.speaker(spk_inp, spk_y)\n lis_scores = pair.listener(lis_inp, speaker_emb)\n else:\n (lang, lang_length), states = pair.speaker(\n spk_inp,\n spk_y,\n max_len=args.max_lang_length,\n eps=this_epoch_eps,\n softmax_temp=this_epoch_softmax_temp,\n uniform_weight=this_epoch_uniform_weight,\n )\n lis_scores = pair.listener(lis_inp, lang, lang_length)\n\n # Evaluate loss and accuracy\n if args.reference_game_xent:\n # Take only 0th listener score + after midpoint. Then do cross\n # entropy\n assert lis_scores.shape[1] % 2 == 0\n midp = lis_scores.shape[1] // 2\n lis_scores_xent = torch.cat((lis_scores[:, :1], lis_scores[:, midp:]), 1)\n zeros = torch.zeros(batch_size, dtype=torch.int64, device=lis_scores.device)\n this_loss = pair.xent_criterion(lis_scores_xent, zeros)\n lis_pred = lis_scores_xent.argmax(1)\n per_game_acc = (lis_pred == 0).float().cpu().numpy()\n this_acc = per_game_acc.mean()\n else:\n this_loss = pair.bce_criterion(lis_scores, lis_y)\n lis_pred = (lis_scores > 0).float()\n per_game_acc = (lis_pred == lis_y).float().mean(1).cpu().numpy()\n this_acc = per_game_acc.mean()\n\n # Save language\n if args.use_lang:\n lang_i = lang.argmax(2)\n lang_text_unjoined = util.to_emergent_text(lang_i)\n lang_text = [\" \".join(toks) for toks in lang_text_unjoined]\n else:\n lang_text_unjoined = [[\"N/A\"] for _ in range(batch_size)]\n lang_text = [\"N/A\" for _ in range(batch_size)]\n true_lang_text = get_true_lang(batch, dataloader.dataset, args, join=False)\n true_lang_text_joined = [\" \".join(t) for t in true_lang_text]\n\n # Game difficulty/other metadata indicator\n all_lang.extend(zip(lang_text, true_lang_text, per_game_acc, md.numpy()))\n\n # Get attributes\n all_toks.extend(lang_text_unjoined)\n if dataloader.dataset.name == \"cub\":\n attrs = md.numpy()[:, 1:]\n all_attrs.extend(attrs)\n all_reprs.extend(states.detach().cpu().numpy())\n\n if args.joint_training:\n # Also train speaker on classification task\n spk_scores = pair.speaker.classify_from_states(states, lis_inp)\n spk_loss = pair.bce_criterion(spk_scores, lis_y)\n spk_pred = (spk_scores > 0).float()\n spk_per_game_acc = (spk_pred == lis_y).float().mean(1).cpu().numpy()\n spk_acc = spk_per_game_acc.mean()\n stats.update(spk_loss=spk_loss, spk_acc=spk_acc)\n comb_loss = this_loss + args.joint_training_lambda * spk_loss\n else:\n comb_loss = this_loss\n\n if training:\n comb_loss.backward()\n\n if (batch_i + 1) % args.accum_steps == 0:\n torch.nn.utils.clip_grad_norm_(pair.parameters(), args.clip)\n optimizer.step()\n optimizer.zero_grad()\n backpropped = True\n else:\n backpropped = False\n\n if batch_i % args.log_interval == 0:\n log_epoch_progress(epoch, batch_i, batch_size, dataloader, stats)\n\n stats.update(\n loss=this_loss, acc=this_acc, batch_size=batch_size, combined_loss=comb_loss\n )\n\n if training and not backpropped:\n torch.nn.utils.clip_grad_norm_(pair.parameters(), args.clip)\n optimizer.step()\n optimizer.zero_grad()\n\n # Compute metrics + collect generation language\n metrics = stats.averages()\n all_lang = pd.DataFrame.from_records(\n all_lang,\n columns=[\"lang\", \"true_lang\", \"acc\", \"md\"],\n )\n\n if args.use_lang:\n # Compute emergent communication statistics\n # TODO - this should generally be a \"meaning preprocess\" function\n if dataloader.dataset.name == \"cub\":\n attrs_numeric = dataloader.dataset.attr_to_numeric(all_attrs)\n else:\n attrs_numeric = None\n\n lang_metrics = compute_lang_metrics(\n all_lang,\n dataloader.dataset,\n args,\n attrs=all_attrs,\n reprs=all_reprs,\n attrs_numeric=attrs_numeric,\n toks=all_toks,\n )\n metrics.update(lang_metrics)\n\n if dataloader.dataset.name == \"shapeworld\":\n by_md_metrics = compute_metrics_by_md(\n all_lang, md_vocab=dataloader.dataset.metadata_vocab\n )\n metrics.update(by_md_metrics)\n\n log_epoch_summary(epoch, split, metrics)\n\n if args.vis:\n vis.report(\n spk_inp.cpu(),\n spk_y.cpu(),\n lis_inp.cpu(),\n lis_y.cpu(),\n dataloader.dataset,\n epoch,\n split,\n {\"speaker\": lang_text},\n true_lang_text_joined,\n {\"speaker\": lis_pred},\n exp_dir=os.path.join(\"exp\", args.name),\n )\n\n clean_language(all_lang)\n return metrics, all_lang\n\n\ndef clean_language(all_lang_df):\n def clean_lang(lang):\n # Startswith/endswith\n if lang.startswith(\"<s>\"):\n lang = lang[3:]\n if lang.endswith(\"</s>\"):\n lang = lang[:-4]\n return lang\n\n def clean_true_lang(true_lang):\n return \" \".join(true_lang[1:-1])\n\n all_lang_df[\"lang\"] = all_lang_df[\"lang\"].apply(clean_lang)\n all_lang_df[\"true_lang\"] = all_lang_df[\"true_lang\"].apply(clean_true_lang)\n\n\nif __name__ == \"__main__\":\n args = io_util.parse_args()\n\n exp_dir = os.path.join(\"exp\", args.name)\n os.makedirs(exp_dir, exist_ok=True)\n util.save_args(args, exp_dir)\n\n dataloaders = data.loader.load_dataloaders(args)\n model_config = models.builder.build_models(dataloaders, args)\n this_game_type = data.util.get_game_type(args)\n\n run_args = (model_config[\"pair\"], model_config[\"optimizer\"], dataloaders, args)\n\n all_metrics = []\n metrics = init_metrics()\n for epoch in range(args.epochs):\n # No reset on epoch 0, but reset after epoch 2, epoch 4, etc\n if (\n args.listener_reset_interval > 0\n and (epoch % args.listener_reset_interval) == 0\n ):\n logging.info(f\"Resetting listener at epoch {epoch}\")\n model_config[\"pair\"].listener.reset_parameters()\n\n metrics[\"epoch\"] = epoch\n\n # Train\n train_metrics, lang = run(\"train\", epoch, *run_args)\n util.update_with_prefix(metrics, train_metrics, \"train\")\n\n # Eval across seen/unseen splits, and all game configurations\n for game_type in [\"ref\", \"setref\", \"concept\"]:\n if args.no_cross_eval and game_type != this_game_type:\n continue\n for split in [\"val\", \"test\"]:\n split_metrics = defaultdict(list)\n\n for split_type in [\"\", \"_same\"]:\n sname = f\"{split}{split_type}_{game_type}\"\n if sname in dataloaders:\n eval_metrics, eval_lang = run(sname, epoch, *run_args)\n util.update_with_prefix(metrics, eval_metrics, sname)\n if this_game_type == game_type:\n # Default\n util.update_with_prefix(\n metrics, eval_metrics, f\"{split}{split_type}\"\n )\n\n for metric, value in eval_metrics.items():\n split_metrics[metric].append(value)\n\n if sname == f\"test_{this_game_type}\":\n # Store + concatenate test language\n lang = pd.concat((lang, eval_lang), axis=0)\n\n # Average across seen and novel\n split_metrics = {k: np.mean(v) for k, v in split_metrics.items()}\n util.update_with_prefix(\n metrics, split_metrics, f\"{split}_avg_{game_type}\"\n )\n if this_game_type == game_type:\n # Default\n util.update_with_prefix(metrics, split_metrics, f\"{split}_avg\")\n\n # model_config['scheduler'].step(metrics[\"val_avg_loss\"])\n\n # Use validation accuracy to choose the best model.\n is_best = metrics[\"val_avg_acc\"] > metrics[\"best_acc\"]\n if is_best:\n metrics[\"best_acc\"] = metrics[\"val_avg_acc\"]\n metrics[\"best_loss\"] = metrics[\"val_avg_loss\"]\n metrics[\"best_epoch\"] = epoch\n if args.use_lang:\n lang.to_csv(os.path.join(exp_dir, \"best_lang.csv\"), index=False)\n # Save the model\n model_fname = os.path.join(exp_dir, \"best_model.pt\")\n torch.save(model_config[\"pair\"].state_dict(), model_fname)\n\n if epoch % args.save_interval == 0:\n model_fname = os.path.join(exp_dir, f\"{epoch}_model.pt\")\n torch.save(model_config[\"pair\"].state_dict(), model_fname)\n if args.use_lang:\n lang.to_csv(os.path.join(exp_dir, f\"{epoch}_lang.csv\"), index=False)\n\n # Additionally track best for splits separately\n metrics[\"best_val_acc\"] = max(metrics[\"best_val_acc\"], metrics[\"val_acc\"])\n if \"val_same_acc\" in metrics:\n metrics[\"best_val_same_acc\"] = max(\n metrics[\"best_val_same_acc\"], metrics[\"val_same_acc\"]\n )\n\n all_metrics.append(metrics.copy())\n\n if args.wandb:\n import wandb\n\n wandb.log(metrics)\n\n pd.DataFrame(all_metrics).to_csv(\n os.path.join(exp_dir, \"metrics.csv\"), index=False\n )\n" ]
[ [ "torch.set_grad_enabled", "pandas.DataFrame", "numpy.random.choice", "pandas.DataFrame.from_records", "pandas.concat", "torch.zeros", "numpy.where", "torch.cat", "numpy.unique", "numpy.mean" ] ]
NEISSproject/tf_neiss
[ "50df6153d0d1f5d471dd9ec0bc52617805001f79" ]
[ "model_fn/model_fn_nlp/util_nlp/graphs_pos.py" ]
[ "import tensorflow as tf\nimport numpy as np\n\nfrom model_fn.graph_base import GraphBase\nfrom model_fn.model_fn_nlp.util_nlp.attention import Selfattention, MultiHeadAttention\nfrom model_fn.model_fn_nlp.util_nlp.transformer import EncoderLayer, Encoder, Decoder\nimport model_fn.model_fn_nlp.util_nlp.graphs_bert_lm as bert_graphs\nimport model_fn.util_model_fn.optimizer as optimizers\n\n\ndef create_padding_mask(seq):\n seq_mask = tf.cast(tf.sequence_mask(seq), tf.int32)\n masked = tf.cast(tf.math.equal(seq_mask, 0), tf.float32)\n\n # add extra dimensions to add the padding\n # to the attention logits.\n return masked[:, tf.newaxis, tf.newaxis, :]\n\n\ndef get_angles(pos, i, d_model):\n angle_rates = 1 / np.power(10000, (2 * (i // 2)) / np.float32(d_model))\n return pos * angle_rates\n\n\ndef positional_encoding(position, d_model):\n angle_rads = get_angles(np.arange(position)[:, np.newaxis],\n np.arange(d_model)[np.newaxis, :],\n d_model)\n\n # apply sin to even indices in the array; 2i\n angle_rads[:, 0::2] = np.sin(angle_rads[:, 0::2])\n\n # apply cos to odd indices in the array; 2i+1\n angle_rads[:, 1::2] = np.cos(angle_rads[:, 1::2])\n\n pos_encoding = angle_rads[np.newaxis, ...]\n\n return tf.cast(pos_encoding, dtype=tf.float32)\n\n\nclass KerasGraphFF3(GraphBase):\n def __init__(self, params):\n super(KerasGraphFF3, self).__init__(params)\n self._flags = params['flags']\n self.vocab_tag_size = params['num_tags']\n # declare graph_params and update from dict --graph_params\n self.graph_params[\"ff_hidden_1\"] = 128\n self.graph_params[\"ff_hidden_2\"] = 128\n self.graph_params[\"ff_hidden_3\"] = 128\n # initilize keras layer\n self._tracked_layers[\"ff_layer_1\"] = tf.keras.layers.Dense(self.graph_params[\"ff_hidden_1\"],\n activation=tf.nn.leaky_relu, name=\"ff_layer_1\")\n self._tracked_layers[\"ff_layer_2\"] = tf.keras.layers.Dense(self.graph_params[\"ff_hidden_2\"],\n activation=tf.nn.leaky_relu, name=\"ff_layer_2\")\n self._tracked_layers[\"ff_layer_3\"] = tf.keras.layers.Dense(self.graph_params[\"ff_hidden_3\"],\n activation=tf.nn.leaky_relu, name=\"ff_layer_3\")\n self._tracked_layers[\"last_layer\"] = tf.keras.layers.Dense(self.vocab_tag_size, activation=None,\n name=\"last_layer\")\n self._tracked_layers[\"softmax\"] = tf.keras.layers.Softmax()\n\n @tf.function\n def call(self, inputs, training=None, mask=None):\n sentence = inputs[\"sentence\"]\n sentencelength = inputs[\"sentencelength\"]\n # connect keras layers\n ff_layer_1_out = self._tracked_layers[\"ff_layer_1\"](sentence)\n ff_layer_2_out = self._tracked_layers[\"ff_layer_2\"](ff_layer_1_out)\n ff_layer_3_out = self._tracked_layers[\"ff_layer_3\"](ff_layer_2_out)\n logits = self._tracked_layers[\"last_layer\"](ff_layer_3_out)\n pred_ids = tf.argmax(input=logits, axis=2, output_type=tf.int32)\n probabilities = self._tracked_layers[\"softmax\"](logits)\n self._graph_out = {\"pred_ids\": pred_ids, 'probabilities': probabilities, 'logits': logits,\n \"sentencelength\": sentencelength}\n return self._graph_out\n\n\nclass SelfAtt(GraphBase):\n def __init__(self, params):\n super(SelfAtt, self).__init__(params)\n self._flags = params['flags']\n self.vocab_tag_size = params['num_tags']\n # declare graph_params and update from dict --graph_params\n self.graph_params[\"self_att_num_dims\"] = 128\n # initilize keras layer\n self._tracked_layers[\"self_attention\"] = Selfattention(params, 300, self.graph_params[\"self_att_num_dims\"], 0.5)\n self._tracked_layers[\"last_layer\"] = tf.keras.layers.Dense(self.vocab_tag_size, activation=None,\n name=\"last_layer\")\n self._tracked_layers[\"softmax\"] = tf.keras.layers.Softmax()\n\n @tf.function\n def call(self, inputs, training=None, mask=None):\n sentence = inputs[\"sentence\"]\n sentencelength = inputs[\"sentencelength\"]\n # connect keras layers\n self_att_out = self._tracked_layers[\"self_attention\"](inputs=sentence, training=training)\n logits = self._tracked_layers[\"last_layer\"](self_att_out)\n pred_ids = tf.argmax(input=logits, axis=2, output_type=tf.int32)\n probabilities = self._tracked_layers[\"softmax\"](logits)\n self._graph_out = {\"pred_ids\": pred_ids, 'probabilities': probabilities, 'logits': logits,\n \"sentencelength\": sentencelength}\n return self._graph_out\n\n\nclass MultiheadAtt(GraphBase):\n def __init__(self, params):\n super(MultiheadAtt, self).__init__(params)\n self._flags = params['flags']\n self.vocab_tag_size = params['num_tags']\n self.embed_dim = 300\n self.num_heads = 10\n self.max_seq_len = 200\n self.pos_encoding = positional_encoding(self.max_seq_len,\n self.embed_dim)\n\n # initilize keras layer\n self._tracked_layers[\"multihead_attention\"] = MultiHeadAttention(self.embed_dim, self.num_heads)\n self._tracked_layers[\"last_layer\"] = tf.keras.layers.Dense(self.vocab_tag_size, activation=None,\n name=\"last_layer\")\n self._tracked_layers[\"softmax\"] = tf.keras.layers.Softmax()\n\n def call(self, inputs, training=None, mask=None):\n sentence = inputs[\"sentence\"]\n sentencelength = inputs[\"sentencelength\"]\n sentencelength = sentencelength[:, 0]\n # add pos encoding\n # max_batch_seq_len = tf.shape(sentence)[1]\n # sentence += self.pos_encoding[:, :max_batch_seq_len, :]\n\n # connect keras layers\n mask = create_padding_mask(sentencelength)\n multihead_att_out, attention_weights = self._tracked_layers[\"multihead_attention\"](\n {'q': sentence, 'k': sentence, 'v': sentence, 'mask': mask})\n logits = self._tracked_layers[\"last_layer\"](multihead_att_out)\n pred_ids = tf.argmax(input=logits, axis=2, output_type=tf.int32)\n probabilities = self._tracked_layers[\"softmax\"](logits)\n self._graph_out = {\"pred_ids\": pred_ids, 'probabilities': probabilities, 'logits': logits,\n \"sentencelength\": sentencelength}\n return self._graph_out\n\n\nclass EncoderLayerAlone(GraphBase):\n def __init__(self, params):\n super(EncoderLayerAlone, self).__init__(params)\n self._flags = params['flags']\n self.vocab_tag_size = params['num_tags']\n self.embed_dim = 300\n self.num_heads = 10\n self.max_seq_len = 200\n self.pos_encoding = positional_encoding(self.max_seq_len,\n self.embed_dim)\n\n # initilize keras layer\n self._tracked_layers[\"encoder_layer\"] = EncoderLayer(self.embed_dim, self.num_heads, self.embed_dim)\n self._tracked_layers[\"last_layer\"] = tf.keras.layers.Dense(self.vocab_tag_size, activation=None,\n name=\"last_layer\")\n self._tracked_layers[\"softmax\"] = tf.keras.layers.Softmax()\n\n def call(self, inputs, training=None, mask=None):\n sentence = inputs[\"sentence\"]\n sentencelength = inputs[\"sentencelength\"]\n sentencelength = sentencelength[:, 0]\n # add pos encoding\n # max_batch_seq_len = tf.shape(sentence)[1]\n # sentence += self.pos_encoding[:, :max_batch_seq_len, :]\n\n # connect keras layers\n mask = create_padding_mask(sentencelength)\n enc_lay_out = self._tracked_layers[\"encoder_layer\"]({'x': sentence, 'mask': mask}, training)\n logits = self._tracked_layers[\"last_layer\"](enc_lay_out)\n pred_ids = tf.argmax(input=logits, axis=2, output_type=tf.int32)\n probabilities = self._tracked_layers[\"softmax\"](logits)\n self._graph_out = {\"pred_ids\": pred_ids, 'probabilities': probabilities, 'logits': logits,\n \"sentencelength\": sentencelength}\n return self._graph_out\n\n\nclass EncoderFull(GraphBase):\n def __init__(self, params):\n super(EncoderFull, self).__init__(params)\n self._flags = params['flags']\n self._num_layers = 8\n self._d_model = 128\n self._num_heads = 8\n self._dff = 512\n self._input_vocab_size = params['tok_size'] + 2\n self._target_vocab_size = params['num_tags'] + 2\n self._pe_input = 300\n self._rate = 0.1\n\n # initilize keras layer\n self._tracked_layers[\"encoder\"] = Encoder(self._num_layers, self._d_model, self._num_heads, self._dff,\n self._input_vocab_size, self._pe_input, self._rate)\n self._tracked_layers[\"last_layer\"] = tf.keras.layers.Dense(self._target_vocab_size, activation=None,\n name=\"last_layer\")\n self._tracked_layers[\"softmax\"] = tf.keras.layers.Softmax()\n\n def call(self, inputs, training=None, mask=None):\n sentence = inputs[\"sentence\"]\n sentencelength = inputs[\"sentencelength\"]\n sentencelength = sentencelength[:, 0]\n # add pos encoding\n #max_batch_seq_len = tf.shape(sentence)[1]\n #sentence += self.pos_encoding[:, :max_batch_seq_len, :]\n\n # connect keras layers\n mask = self.create_padding_mask_trans(sentence)\n enc_lay_out = self._tracked_layers[\"encoder\"]({'x': sentence, 'mask': mask}, training)\n logits = self._tracked_layers[\"last_layer\"](enc_lay_out)\n max=tf.reduce_max(sentencelength)\n logits=logits[:,:max]\n pred_ids = tf.argmax(input=logits, axis=2, output_type=tf.int32)\n probabilities = self._tracked_layers[\"softmax\"](logits)\n self._graph_out = {\"pred_ids\": pred_ids, 'probabilities': probabilities, 'logits': logits,\n \"sentencelength\": sentencelength}\n\n return self._graph_out\n\n def create_padding_mask_trans(self, seq):\n seq = tf.cast(tf.math.equal(seq, 0), tf.float32)\n\n # add extra dimensions to add the padding\n # to the attention logits.\n return seq[:, tf.newaxis, tf.newaxis, :]\n\n\nclass Transformer(GraphBase):\n def __init__(self, params):\n super(Transformer, self).__init__(params)\n self._flags = params['flags']\n self._num_layers = 4\n self._d_model = 128\n self._num_heads = 8\n self._dff = 512\n self._input_vocab_size = params['tok_size'] + 2\n self._target_vocab_size = params['num_tags'] + 2\n self._pe_input = 300\n self._pe_target = 300\n self._rate = 0.1\n\n self._tracked_layers[\"encoder\"] = Encoder(self._num_layers, self._d_model, self._num_heads, self._dff,\n self._input_vocab_size, self._pe_input, self._rate)\n\n self._tracked_layers[\"decoder\"] = Decoder(self._num_layers, self._d_model, self._num_heads, self._dff,\n self._target_vocab_size, self._pe_target, self._rate)\n\n self._tracked_layers[\"last_layer\"] = tf.keras.layers.Dense(self._target_vocab_size)\n self._tracked_layers[\"softmax\"] = tf.keras.layers.Softmax()\n\n def call(self, inputs, training=None, mask=None):\n inp = inputs[\"sentence\"]\n sentencelength = inputs[\"sentencelength\"]\n sentencelength = sentencelength[:, 0]\n tar = inputs[\"tar_inp\"]\n\n enc_padding_mask, look_ahead_mask, dec_padding_mask = self.create_masks(inp, tar)\n\n enc_output = self._tracked_layers[\"encoder\"]({'x': inp, 'mask': enc_padding_mask},\n training) # (batch_size, inp_seq_len, d_model)\n\n # dec_output.shape == (batch_size, tar_seq_len, d_model)\n dec_output, attention_weights = self._tracked_layers[\"decoder\"]({\n 'tar': tar, 'enc_output': enc_output, 'look_ahead_mask': look_ahead_mask, 'padding_mask': dec_padding_mask},\n training)\n\n final_output = self._tracked_layers[\"last_layer\"](dec_output) # (batch_size, tar_seq_len, target_vocab_size)\n pred_ids = tf.argmax(input=final_output, axis=2, output_type=tf.int32)\n probabilities = self._tracked_layers[\"softmax\"](final_output)\n self._graph_out = {\"pred_ids\": pred_ids, 'probabilities': probabilities, 'logits': final_output,\n \"sentencelength\": sentencelength, \"attention_weights\": attention_weights}\n return self._graph_out\n\n def create_masks(self, inp, tar):\n # Encoder padding mask\n enc_padding_mask = self.create_padding_mask_trans(inp)\n\n # Used in the 2nd attention block in the decoder.\n # This padding mask is used to mask the encoder outputs.\n dec_padding_mask = self.create_padding_mask_trans(inp)\n\n # Used in the 1st attention block in the decoder.\n # It is used to pad and mask future tokens in the input received by\n # the decoder.\n look_ahead_mask = self.create_look_ahead_mask(tf.shape(tar)[1])\n dec_target_padding_mask = self.create_padding_mask_trans(tar)\n combined_mask = tf.maximum(dec_target_padding_mask, look_ahead_mask)\n\n return enc_padding_mask, combined_mask, dec_padding_mask\n\n def create_look_ahead_mask(self, size):\n mask = 1 - tf.linalg.band_part(tf.ones((size, size)), -1, 0)\n return mask # (seq_len, seq_len)\n\n def create_padding_mask_trans(self, seq):\n seq = tf.cast(tf.math.equal(seq, 0), tf.float32)\n\n # add extra dimensions to add the padding\n # to the attention logits.\n return seq[:, tf.newaxis, tf.newaxis, :]\n\nclass POSwithMiniBERT(GraphBase):\n def set_optimizer(self,params):\n \"\"\"set a custom optimizer using --optimizer, --optimizer_params,\n - overwrite if you need something different; set self.optimizer = tf.keras.optimizer._class_object\"\"\"\n\n get_optimizer = getattr(optimizers, params['flags'].optimizer)\n self.custom_optimizer = get_optimizer(params)\n self.bert_optimizer = self.custom_optimizer.get_keras_optimizer()\n #self.custom_optimizer.print_params()\n\n def __init__(self, params):\n super(POSwithMiniBERT, self).__init__(params)\n self._flags = params['flags']\n self._target_vocab_size = params['num_tags'] + 2\n self._pretrained_bert = getattr(bert_graphs, params['flags'].bert_graph)(params)\n self.set_optimizer(params)\n #self._pretrained_bert.load_weights(tf.train.latest_checkpoint(self._flags.bert_dir))\n checkpoint_obj = tf.train.Checkpoint(step=self._pretrained_bert.global_step, optimizer=self.bert_optimizer,\n model=self._pretrained_bert)\n\n if tf.train.get_checkpoint_state(self._flags.bert_dir):\n print(\"restore bert_model from bert checkpoint: {}\".format(self._flags.bert_dir))\n checkpoint_obj.restore(tf.train.latest_checkpoint(self._flags.bert_dir)).expect_partial()\n\n self._tracked_layers[\"last_layer\"] = tf.keras.layers.Dense(self._target_vocab_size, activation=None,\n name=\"last_layer\")\n self._tracked_layers[\"softmax\"] = tf.keras.layers.Softmax()\n\n def call(self, inputs, training=None, mask=None):\n sentencelength = inputs[\"sentencelength\"]\n sentencelength = sentencelength[:, 0]\n inputs[\"masked_index\"]=None\n bert_graph_out=self._pretrained_bert(inputs)\n del inputs[\"masked_index\"]\n\n final_output = self._tracked_layers[\"last_layer\"](bert_graph_out[\"enc_output\"]) # (batch_size, tar_seq_len, target_vocab_size)\n pred_ids = tf.argmax(input=final_output, axis=2, output_type=tf.int32)\n probabilities = self._tracked_layers[\"softmax\"](final_output)\n self._graph_out = {\"pred_ids\": pred_ids, 'probabilities': probabilities, 'logits': final_output,\n \"sentencelength\": sentencelength}\n return self._graph_out\n" ]
[ [ "tensorflow.math.equal", "tensorflow.reduce_max", "tensorflow.shape", "tensorflow.train.get_checkpoint_state", "tensorflow.ones", "tensorflow.keras.layers.Softmax", "numpy.cos", "numpy.float32", "numpy.arange", "tensorflow.cast", "tensorflow.train.latest_checkpoint", "tensorflow.sequence_mask", "tensorflow.train.Checkpoint", "tensorflow.keras.layers.Dense", "tensorflow.argmax", "numpy.sin", "tensorflow.maximum" ] ]
Twizwei/maskrcnn_detector
[ "095584f813acb40c937672ff5b63603d40095a2a" ]
[ "build/lib.linux-x86_64-3.6/maskrcnn_benchmark/engine/inference.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\nimport datetime\nimport logging\nimport tempfile\nimport time\nimport os\nimport json\nfrom collections import OrderedDict\n\nimport torch\n\nfrom tqdm import tqdm\n\nfrom ..utils.comm import is_main_process\nfrom ..utils.comm import scatter_gather\nfrom ..utils.comm import synchronize\n\n\ndef compute_on_dataset(model, data_loader, device):\n model.eval()\n results_dict = {}\n cpu_device = torch.device(\"cpu\")\n for i, batch in tqdm(enumerate(data_loader)):\n images, targets, image_ids = batch\n images = images.to(device)\n with torch.no_grad():\n output = model(images)\n output = [o.to(cpu_device) for o in output]\n results_dict.update(\n {img_id: result for img_id, result in zip(image_ids, output)}\n )\n return results_dict\n\n\ndef _accumulate_predictions_from_multiple_gpus(predictions_per_gpu):\n all_predictions = scatter_gather(predictions_per_gpu)\n if not is_main_process():\n return\n # merge the list of dicts\n predictions = {}\n for p in all_predictions:\n predictions.update(p)\n # convert a dict where the key is the index in a list\n image_ids = list(sorted(predictions.keys()))\n if len(image_ids) != image_ids[-1] + 1:\n logger = logging.getLogger(\"maskrcnn_benchmark.inference\")\n logger.warning(\n \"Number of images that were gathered from multiple processes is not \"\n \"a contiguous set. Some images might be missing from the evaluation\"\n )\n\n # convert to a list\n predictions = [predictions[i] for i in image_ids]\n return predictions\n\n\ndef save_as_bdd_format(preds, path, name, img_names, return_pred=False):\n preds_bdd = [] \n for j in range(len(preds)):\n pred = preds[j]\n pred_bdd = {\n 'name': img_names[j],\n 'labels': []\n }\n boxes = pred.bbox.numpy().tolist()\n labels = pred.get_field('labels').numpy().tolist()\n scores = pred.get_field('scores').numpy().tolist()\n for i in range(len(boxes)):\n pred_bdd['labels'] += [{\n 'category': labels[i],\n 'box2d': {\n 'x1': boxes[i][0],\n 'y1': boxes[i][1],\n 'x2': boxes[i][2],\n 'y2': boxes[i][3]\n },\n 'score': scores[i]\n }]\n preds_bdd += [pred_bdd]\n path = os.path.join(path, '{}.json'.format(name))\n with open(path, 'w') as f:\n json.dump(preds_bdd, f)\n if return_pred:\n return pred_bdd\n\n\ndef inference(\n model,\n data_loader,\n iou_types=(\"bbox\",),\n box_only=False,\n device=\"cuda\",\n expected_results=(),\n expected_results_sigma_tol=4,\n output_folder=None,\n name=\"predictions\",\n return_pred=False\n):\n\n # convert to a torch.device for efficiency\n device = torch.device(device)\n num_devices = (\n torch.distributed.deprecated.get_world_size()\n if torch.distributed.deprecated.is_initialized()\n else 1\n )\n logger = logging.getLogger(\"maskrcnn_benchmark.inference\")\n dataset = data_loader.dataset\n logger.info(\"Start evaluation on {} images\".format(len(dataset)))\n start_time = time.time()\n predictions = compute_on_dataset(model, data_loader, device)\n # wait for all processes to complete before measuring the time\n synchronize()\n total_time = time.time() - start_time\n total_time_str = str(datetime.timedelta(seconds=total_time))\n logger.info(\n \"Total inference time: {} ({} s / img per device, on {} devices)\".format(\n total_time_str, total_time * num_devices / len(dataset), num_devices\n )\n )\n\n predictions = _accumulate_predictions_from_multiple_gpus(predictions)\n if not is_main_process():\n return\n\n if output_folder:\n det_path = os.path.join(output_folder, \"detections\")\n if not os.path.exists(det_path):\n os.makedirs(det_path)\n save_as_bdd_format(predictions, det_path, name, dataset.image_paths, return_pred)\n \n return\n" ]
[ [ "torch.distributed.deprecated.get_world_size", "torch.no_grad", "torch.device", "torch.distributed.deprecated.is_initialized" ] ]
pyrooka/pathfinder
[ "c6226e1f02ef8471ddc42e1c19afde39dbb8c4ec" ]
[ "path_finder.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\n/***************************************************************************\n PathFinder\n A QGIS plugin\n Find the shortest path between two points in a raster image.\n -------------------\n begin : 2017-09-20\n git sha : $Format:%H$\n copyright : (C) 2017 by BN\n email : [email protected]\n ***************************************************************************/\n\n/***************************************************************************\n * *\n * This program is free software; you can redistribute it and/or modify *\n * it under the terms of the GNU General Public License as published by *\n * the Free Software Foundation; either version 2 of the License, or *\n * (at your option) any later version. *\n * *\n ***************************************************************************/\n\"\"\"\n# Default Python packages\nimport os.path\nfrom time import time\nfrom random import randint\n\n# PyQt packages\nfrom PyQt4.QtCore import *\nfrom PyQt4.QtGui import QAction, QIcon, QColor\n\n# Initialize Qt resources from file resources.py\nimport resources\n\n# Import the code for the dialog\nfrom path_finder_dialog import PathFinderDialog\n\n# Packages by QGIS\nimport numpy as np\nfrom osgeo import gdal\nfrom qgis.core import *\nfrom qgis.utils import iface\nfrom qgis.gui import QgsMapTool\n\n# My packages\nimport pyastar\n\n\nclass PathFinder:\n \"\"\"QGIS Plugin Implementation.\"\"\"\n\n def __init__(self, iface):\n \"\"\"Constructor.\n\n :param iface: An interface instance that will be passed to this class\n which provides the hook by which you can manipulate the QGIS\n application at run time.\n :type iface: QgsInterface\n \"\"\"\n # Save reference to the QGIS interface\n self.iface = iface\n # initialize plugin directory\n self.plugin_dir = os.path.dirname(__file__)\n # initialize locale\n locale = QSettings().value('locale/userLocale')[0:2]\n locale_path = os.path.join(\n self.plugin_dir,\n 'i18n',\n 'PathFinder_{}.qm'.format(locale))\n\n if os.path.exists(locale_path):\n self.translator = QTranslator()\n self.translator.load(locale_path)\n\n if qVersion() > '4.3.3':\n QCoreApplication.installTranslator(self.translator)\n\n\n # Declare instance attributes\n self.actions = []\n self.menu = self.tr(u'&Path finder')\n # TODO: We are going to let the user set this up in a future iteration\n self.toolbar = self.iface.addToolBar(u'PathFinder')\n self.toolbar.setObjectName(u'PathFinder')\n\n # noinspection PyMethodMayBeStatic\n def tr(self, message):\n \"\"\"Get the translation for a string using Qt translation API.\n\n We implement this ourselves since we do not inherit QObject.\n\n :param message: String for translation.\n :type message: str, QString\n\n :returns: Translated version of message.\n :rtype: QString\n \"\"\"\n # noinspection PyTypeChecker,PyArgumentList,PyCallByClass\n return QCoreApplication.translate('PathFinder', message)\n\n\n def add_action(\n self,\n icon_path,\n text,\n callback,\n enabled_flag=True,\n add_to_menu=True,\n add_to_toolbar=True,\n status_tip=None,\n whats_this=None,\n parent=None):\n \"\"\"Add a toolbar icon to the toolbar.\n\n :param icon_path: Path to the icon for this action. Can be a resource\n path (e.g. ':/plugins/foo/bar.png') or a normal file system path.\n :type icon_path: str\n\n :param text: Text that should be shown in menu items for this action.\n :type text: str\n\n :param callback: Function to be called when the action is triggered.\n :type callback: function\n\n :param enabled_flag: A flag indicating if the action should be enabled\n by default. Defaults to True.\n :type enabled_flag: bool\n\n :param add_to_menu: Flag indicating whether the action should also\n be added to the menu. Defaults to True.\n :type add_to_menu: bool\n\n :param add_to_toolbar: Flag indicating whether the action should also\n be added to the toolbar. Defaults to True.\n :type add_to_toolbar: bool\n\n :param status_tip: Optional text to show in a popup when mouse pointer\n hovers over the action.\n :type status_tip: str\n\n :param parent: Parent widget for the new action. Defaults None.\n :type parent: QWidget\n\n :param whats_this: Optional text to show in the status bar when the\n mouse pointer hovers over the action.\n\n :returns: The action that was created. Note that the action is also\n added to self.actions list.\n :rtype: QAction\n \"\"\"\n\n # Create the dialog (after translation) and keep reference\n self.dlg = PathFinderDialog()\n\n icon = QIcon(icon_path)\n action = QAction(icon, text, parent)\n action.triggered.connect(callback)\n action.setEnabled(enabled_flag)\n\n if status_tip is not None:\n action.setStatusTip(status_tip)\n\n if whats_this is not None:\n action.setWhatsThis(whats_this)\n\n if add_to_toolbar:\n self.toolbar.addAction(action)\n\n if add_to_menu:\n self.iface.addPluginToRasterMenu(\n self.menu,\n action)\n\n self.actions.append(action)\n\n return action\n\n def initGui(self):\n \"\"\"Create the menu entries and toolbar icons inside the QGIS GUI.\"\"\"\n\n icon_path = ':/plugins/PathFinder/icon.png'\n self.add_action(\n icon_path,\n text=self.tr(u'Find path'),\n callback=self.run,\n parent=self.iface.mainWindow())\n\n\n def unload(self):\n \"\"\"Removes the plugin menu item and icon from QGIS GUI.\"\"\"\n for action in self.actions:\n self.iface.removePluginRasterMenu(\n self.tr(u'&Path finder'),\n action)\n self.iface.removeToolBarIcon(action)\n # remove the toolbar\n del self.toolbar\n\n\n def run(self):\n \"\"\"Run method that performs all the real work\"\"\"\n\n # Init the gui.\n self.init_combobox()\n self.canvas = self.iface.mapCanvas()\n self.mouse_click = MySelectorTool(self.canvas, self.click_callback)\n iface.mapCanvas().setMapTool(self.mouse_click)\n # Try to disconnect the previous connection.\n try:\n self.dlg.pushButton_2.clicked.disconnect()\n self.dlg.pushButton.clicked.disconnect()\n self.dlg.checkBox.stateChanged.disconnect()\n self.dlg.comboBox.currentIndexChanged.disconnect()\n except:\n # In case of error don't do anything yet.\n pass\n\n self.dlg.pushButton_2.clicked.connect(self.clear_coordinates)\n self.dlg.pushButton.clicked.connect(self.find_path)\n self.dlg.checkBox.stateChanged.connect(self.checkbox_state_change_callback)\n self.dlg.comboBox.currentIndexChanged.connect(self.combobox_index_change_callback)\n\n\n # Show the dialog.\n self.dlg.show()\n # Run the dialog event loop.\n result = self.dlg.exec_()\n # See if OK was pressed.\n if result:\n # Do something useful here - delete the line containing pass and\n # substitute with your code.\n pass\n\n\n def init_combobox(self):\n \"\"\"\n Initialize the comobobox with the layer of the project.\n \"\"\"\n\n # Get all the layers from the interface.\n all_layers = self.iface.legendInterface().layers()\n self.layers = []\n layers_list = []\n\n for layer in all_layers:\n # If the layer is a raster layer,\n if layer.type() == QgsMapLayer.RasterLayer:\n # add it to the lists.\n self.layers.append(layer)\n layers_list.append(layer.name())\n # Clear all previous layer.\n self.dlg.comboBox.clear()\n # Add the layer names to the combobox.\n self.dlg.comboBox.addItems(layers_list)\n\n # Trigger the combobox index change event to refres the bands in their combobox.\n if len(self.layers) > 0:\n self.dlg.comboBox.setCurrentIndex(0)\n self.combobox_index_change_callback(0)\n\n\n def combobox_index_change_callback(self, index):\n \"\"\"\n Callback to handle index change in the combobox.\n \"\"\"\n\n layer = self.layers[index]\n band_count = layer.bandCount()\n\n # Clear all previous item\n self.dlg.comboBox_2.clear()\n # Add the number as string to the combobox.\n self.dlg.comboBox_2.addItems([str(i) for i in range(1, band_count + 1)])\n\n\n def checkbox_state_change_callback(self, state):\n \"\"\"\n Callback function to handle checkbox checkstate changes.\n \"\"\"\n\n # Unchecked.\n if state == 0:\n self.dlg.comboBox_2.show()\n self.dlg.lineEdit_6.hide()\n # Checked.\n else:\n self.dlg.comboBox_2.hide()\n self.dlg.lineEdit_6.show()\n\n\n def click_callback(self, coordinates):\n \"\"\"\n Callback function to handle mouse clicks.\n coordinates (QgsPoint): point with the coordinates\n \"\"\"\n\n # If both of the START lineEdit fields are empty put the coordinates into them.\n if not self.dlg.lineEdit_2.text() and not self.dlg.lineEdit_3.text():\n self.dlg.lineEdit_2.setText(str(coordinates.x()))\n self.dlg.lineEdit_3.setText(str(coordinates.y()))\n # If both of the END lineEdit fields are empty put the coordinates into them.\n elif not self.dlg.lineEdit_4.text() and not self.dlg.lineEdit_5.text():\n self.dlg.lineEdit_4.setText(str(coordinates.x()))\n self.dlg.lineEdit_5.setText(str(coordinates.y()))\n\n\n def validation(self):\n \"\"\"\n Before call the find path method, validate the given inputs.\n return (tuple): (true | false, message)\n \"\"\"\n\n current_index = self.dlg.comboBox.currentIndex()\n\n # Check the layer first. Is valid raster layer?\n if current_index < 0:\n return (False, 'No layer selected.')\n\n # Check the layer type. Now I only load raster layers, but I left this here.\n if self.layers[current_index].type() != QgsMapLayer.RasterLayer:\n return (False, 'The selected layer is not raster.')\n\n if self.dlg.checkBox.isChecked():\n # Check the band number.\n if self.dlg.lineEdit_6.text() == '':\n return (False, 'No band number provided.')\n\n try:\n int(self.dlg.lineEdit_6.text())\n except:\n return (False, 'Invalid band number.')\n\n # Check the value limit.\n if self.dlg.lineEdit.text() == '':\n return (False, 'No value provided.')\n\n try:\n float(self.dlg.lineEdit.text())\n except:\n return (False, 'Invalid value.')\n\n # Now the coordinates.\n if self.dlg.lineEdit_2.text() == '' or self.dlg.lineEdit_3.text() == '':\n return (False, 'No start coordinates.')\n\n try:\n float(self.dlg.lineEdit_2.text())\n float(self.dlg.lineEdit_3.text())\n except:\n return (False, 'Invalid start coordinates.')\n\n if self.dlg.lineEdit_4.text() == '' or self.dlg.lineEdit_5.text() == '':\n return (False, 'No end coordinates.')\n\n try:\n float(self.dlg.lineEdit_4.text())\n float(self.dlg.lineEdit_5.text())\n except:\n return (False, 'Invalid end coordinates.')\n\n return (True,)\n\n\n def find_path(self):\n # First validate all the datas.\n validation_result = self.validation()\n if not validation_result[0]:\n QgsMessageLog.logMessage(validation_result[1])\n return\n\n # Index of the currently selected layer.\n selected_layer_index = self.dlg.comboBox.currentIndex()\n # Current layer object.\n layer = self.layers[selected_layer_index]\n # Data provider object of the layer.\n provider = layer.dataProvider()\n # Open the layer from its uri (basically it's a path for the file) with update acces.\n raster = gdal.Open(str(provider.dataSourceUri()), gdal.GA_Update)\n\n # Set the transform matrix for this instance from the opened raster layer.\n self.geo_transform_matrix = raster.GetGeoTransform()\n\n # Set the pixel size in the CRS of the raster.\n self.raster_size_x = layer.rasterUnitsPerPixelX()\n self.raster_size_y = layer.rasterUnitsPerPixelY()\n\n # Set the CRS from our raster.\n self.crs = layer.crs()\n\n # Get the band number.\n if self.dlg.checkBox.isChecked():\n band_number = int(self.dlg.lineEdit_6.text())\n else:\n band_number = self.dlg.comboBox_2.currentIndex() + 1\n\n # The band which contains the information.\n band = raster.GetRasterBand(band_number)\n\n # The max value.\n max_value = float(self.dlg.lineEdit.text())\n\n # Try to read the given band.\n try:\n band_array = band.ReadAsArray()\n except:\n QgsMessageLog.logMessage('Invalid band number.')\n return\n\n # Get the starting and ending coordinates. Numpy is working (default) with row ordered arrays,\n # while QGIS the opposite.\n start_coordinates = self.get_pixel_coordinates(float(self.dlg.lineEdit_2.text()), float(self.dlg.lineEdit_3.text()))\n start_coordinates_np = start_coordinates[::-1]\n end_coordinates = self.get_pixel_coordinates(float(self.dlg.lineEdit_4.text()), float(self.dlg.lineEdit_5.text()))\n end_coordinates_np = end_coordinates[::-1]\n\n # Check the coordinates aren't \"wall\" pixels.\n if band_array[start_coordinates_np[0], start_coordinates_np[1]] >= max_value:\n QgsMessageLog.logMessage('The starting coordinates are above the maximum value.')\n return\n if band_array[end_coordinates_np[0], end_coordinates_np[1]] >= max_value:\n QgsMessageLog.logMessage('The ending coordinates are above the maximum value.')\n return\n\n # Create copy from the band. To secure our raster and don't overwrite accidentally.\n grid = np.copy(band_array)\n\n # Process the grid for the algorithm.\n # Inf = wall\n # 1 = free area\n grid[band_array >= max_value] = np.inf\n grid[band_array < max_value] = 1\n\n # Get the time for measure the algorithm.\n t0 = time()\n\n # Get the path finally!\n path = pyastar.astar_path(grid, start_coordinates_np, end_coordinates_np)\n\n # Duration of the algorithm running time.\n duration = time() - t0\n\n # If no path found.\n if not len(path):\n QgsMessageLog.logMessage('No path found!')\n return\n\n QgsMessageLog.logMessage('Path found in %.6fs.' % duration)\n QgsMessageLog.logMessage('Steps: ' + str(len(path)))\n\n # Create the vector layer from the path and get the length of the created line.\n path_length = self.create_vector_layer(path)\n\n # Type of the CRS map unit.\n unit_type = QgsUnitTypes.encodeUnit(self.crs.mapUnits())\n\n self.dlg.label_10.setText('Path length: %.1f %s' % (path_length, unit_type))\n\n # Reload the layers into the comobox.\n self.init_combobox()\n\n return\n\n\n def create_vector_layer(self, path):\n \"\"\"\n Create vector layer from the given path.\n path (list): list of the coordinates\n return (float): length of the created line\n \"\"\"\n\n # New QGIS Vector layer with the base (raster) layer's CRS.\n vector_layer = QgsVectorLayer('LineString?crs=' + self.crs.toWkt(), 'Path', 'memory')\n # Get the layer renderer and the feature symbol.\n symbol = vector_layer.rendererV2().symbols2(QgsRenderContext())[0]\n # Set the feature (line) width.\n symbol.setWidth(1.0)\n # Now set the color of the feature (line) random.\n symbol.setColor(QColor.fromRgb(randint(0,255), randint(0,255), randint(0,255)))\n\n # Get the layer provider.\n provider = vector_layer.dataProvider()\n\n # Enable layer edit.\n vector_layer.startEditing()\n\n # Line features created from the path (list of QGIS points).\n points = []\n\n # Iterates over the points in a path.\n for point in path:\n # Get the CRS coordinates of the point,\n crs_coordinate = self.get_crs_coordinates(point[1], point[0])\n # then create a QGIS point from it.\n points.append(QgsPoint(crs_coordinate[0], crs_coordinate[1]))\n\n # New QGIS feature.\n line = QgsFeature()\n\n # Set feature (line) geometry from the points.\n line.setGeometry(QgsGeometry.fromPolyline(points))\n\n # Add the features to the vector (provider).\n provider.addFeatures([line])\n\n # Commit (\"save\") changes to the vector layer.\n vector_layer.commitChanges()\n\n # Then add it to the project.\n QgsMapLayerRegistry.instance().addMapLayer(vector_layer)\n\n return line.geometry().length()\n\n\n def get_pixel_coordinates(self, x, y):\n \"\"\"\n This method calculates the image pixel coordinate for a real location.\n x (double): x coordinate\n y (double): y coordinate\n return (list): [x, y]\n \"\"\"\n\n\n if (not self.geo_transform_matrix):\n QgsMessageLog.logMessage('No geo transform matrix.')\n return [0, 0]\n\n # Honestly I just copied this block from the QGIS Python Programming Cookbook. I didn't dig into deeper,\n # but more or less it's trivial. It uses parameters from the georeferencing information of the raster.\n ul_x = self.geo_transform_matrix[0]\n ul_y = self.geo_transform_matrix[3]\n x_dist = self.geo_transform_matrix[1]\n y_dist = self.geo_transform_matrix[5]\n rtn_x = self.geo_transform_matrix[2]\n rtn_y = self.geo_transform_matrix[4]\n # Calculate the pixel X,Y.\n pixel_x = int((x - ul_x) / x_dist)\n pixel_y = int((y - ul_y) / y_dist)\n\n return [pixel_x, pixel_y]\n\n\n def get_crs_coordinates(self, x, y):\n \"\"\"\n This method calculates the CRS coordinate for a pixel location.\n x (int): x coordinate\n y (int): y coordinate\n return (list): [x, y]\n \"\"\"\n\n if (not self.geo_transform_matrix):\n QgsMessageLog.logMessage('No geo transform matrix.')\n return [0, 0]\n\n # Get parameters from georeferencing.\n ul_x = self.geo_transform_matrix[0]\n ul_y = self.geo_transform_matrix[3]\n x_dist = self.geo_transform_matrix[1]\n y_dist = self.geo_transform_matrix[5]\n rtn_x = self.geo_transform_matrix[2]\n rtn_y = self.geo_transform_matrix[4]\n # Calculate the CRS X,Y.\n crs_x = (x * x_dist) + ul_x\n crs_y = (y * y_dist) + ul_y\n\n # Now the coordinates are in the topleft (?) corner if the pixels so I move them to the center.\n # May not work for your raster and CRS. In this case please contact me! (:\n crs_x += self.raster_size_x / 2\n crs_y -= self.raster_size_y / 2\n\n return [crs_x, crs_y]\n\n\n def clear_coordinates(self):\n \"\"\"\n Clear all coordinates field (START(X,Y) END(X,Y)) on the GUI.\n \"\"\"\n\n self.dlg.lineEdit_2.setText('')\n self.dlg.lineEdit_3.setText('')\n self.dlg.lineEdit_4.setText('')\n self.dlg.lineEdit_5.setText('')\n\n\n def get_eucl_dist(self, point_1, point_2):\n \"\"\"\n Calculate the distance between two points.\n point_1 (list/tuple): first point coordinates (x, y)\n point_2 (list/tuple): second point coordinates (x, y)\n return (float): the calculated distance\n \"\"\"\n\n return ((point_1[0] - point_2[0])**2 + (point_1[1] - point_2[1])**2)**0.5\n\n\nclass MySelectorTool(QgsMapTool):\n \"\"\"\n Inherits the QGIS identify tool (tool is deactivated once another tool takes focus)\n \"\"\"\n def __init__(self, canvas, callback):\n QgsMapTool.__init__(self, canvas)\n self.canvas = canvas\n self.click_event = pyqtSignal()\n self.callback = callback\n\n\n def canvasReleaseEvent(self, mouseEvent):\n \"\"\"\n Mouse click events.\n \"\"\"\n\n # Coordinates.\n x = mouseEvent.pos().x()\n y = mouseEvent.pos().y()\n # Transform the coordinates into map coordinates (CRS).\n point = self.canvas.getCoordinateTransform().toMapCoordinates(x, y)\n # Call the callback function.\n self.callback(point)\n" ]
[ [ "numpy.copy" ] ]
llbxg/NIST-SP-800-22
[ "7e82243643b62fdc07cbe5f40d540b0a16a4372a" ]
[ "tests/t_05_binary_matrix_rank_test.py" ]
[ "import math\n\nimport numpy as np\nimport scipy.special as sc\n\nfrom tests.src.utils import split_list, __print\nfrom tests.src.rakn import rank\n\n# .5 Binary Matrix Rank Test\ndef binary_matrix_rank_test(key, n, M=32, Q=32, b_print=True):\n if n < 38912:\n __print(b_print, '{:40} : Error. Need at least 38,912 bits. Got {}.' .format('binary matrix rank test', n))\n return [0], False\n\n N=n//(M*Q)\n split_key=list(split_list(key,M*Q))\n\n if len(split_key[-1]) != len(split_key[0]):\n split_key=split_key[0:-1]\n\n f_split_key= list(map(lambda x : np.reshape(np.array(x),[M,Q]), split_key))\n\n ranks = list(map(lambda x: rank(list(x)),f_split_key))\n\n full_rank = M\n\n FM =ranks.count(full_rank)\n FM1=ranks.count(full_rank-1)\n NFMM1=N - FM -FM1\n\n chi_squared_obs = (FM-0.2888*N)**2/(0.2888*N) + (FM1-0.5776*N)**2/(0.5776*N)+(NFMM1-0.1336*N)**2/(0.1336*N)\n p=math.e**(-chi_squared_obs/2)\n\n b = (p >= 0.01)\n\n __print(b_print, '{:40} : {:.3f} -> {}'.format('binary matrix rank test',p,b))\n\n return [p],b" ]
[ [ "numpy.array" ] ]
usamaahsan93/Perceptron
[ "27dbcefe39f5bd61f30a66887abd810bb59c9b2c" ]
[ "myPerceptron.py" ]
[ "import numpy as np\nfrom numpy.random import randn\n\n#This function is taken from Dr Fayyaz ul Amir Afsar Minhas (Github User: foxtrotmike)\ndef getExamples(n=100,d=2):\n \"\"\"\n Generates n d-dimensional normally distributed examples of each class \n The mean of the positive class is [1] and for the negative class it is [-1]\n DO NOT CHANGE THIS FUNCTION\n \"\"\"\n Xp = randn(n,d)+1 #generate n examples of the positive class\n #Xp[:,0]=Xp[:,0]+1\n Xn = randn(n,d)-1 #generate n examples of the negative class\n #Xn[:,0]=Xn[:,0]-1\n X = np.vstack((Xp,Xn)) #Stack the examples together to a single matrix\n Y = np.array([+1]*n+[-1]*n) #Associate Labels\n return (X,Y) \n\n\n\nw=getExamples()\n\ndata=w[0]\nlabel=w[1]\nlr=0.01\n\n#Checking the loop execution\ncount=0\nok=False\nmaxRun=0\n\n#Seperating data into weights and bias\nw=np.random.random(data.shape[1])\nb=np.random.random()\n\n#Perceptron Loop\nwhile not ok and maxRun<=1000:\n maxRun+=1\n count=0\n for i in range(len(data)):\n \n #Counting on all the example if satisfied by y*f(x) >=1 is all true then we have found the weights of perceptron\n #The code then breaks\n if label[i]*(w.T.dot(data[i])+b) >=1:\n count+=1\n \n #Else weights are updated\n else:\n w=w+lr*label[i]*data[i]\n b=b+lr*label[i]\n \n if count==len(data):\n ok=True\n \n#Printing weights and bias \nprint(w,b)\n\n\n#############################################################\nprint('NOW TESTING')\nl=[]\nfor i in range(len(data)):\n l.append(np.sign(w.T.dot(data[i])+b)==np.sign(label[i]))\n\nprint('ACCURACY : ',l.count(True)/len(l)) \n" ]
[ [ "numpy.vstack", "numpy.sign", "numpy.random.randn", "numpy.random.random", "numpy.array" ] ]
barbagroup/pygbe_validation_paper
[ "ef826a8e956a817f919c4357a08aa8f675a910a4" ]
[ "repro_packs/rockstuhl/repro_exec_files/scripts/cext_wave_prism_34K_SE.py" ]
[ "import numpy\nimport time\nimport sys\nimport os\nfrom argparse import ArgumentParser\n\nimport pygbe\nfrom pygbe.util.read_data import read_fields\nfrom pygbe.main import main\n\nfrom cext_wavelength_scanning import create_diel_list, Cext_wave_scan, Cext_analytical\n\n\ndef read_inputs(args):\n \"\"\"\n Parse command-line arguments to read arguments in main.\n \"\"\"\n\n parser = ArgumentParser(description='Read path where input files are located')\n parser.add_argument('-if',\n '--infiles',\n type=str,\n help=\"Absolute path where input files are located (downloaded from zenodo)\")\n \n return parser.parse_args(args)\n \n\n\ndef main(argv=sys.argv):\n\n argv=sys.argv\n args = read_inputs(argv[1:])\n\n in_files_path = args.infiles\n\n #Import surface data\n wave_s, diel_rs, diel_is = numpy.loadtxt('../dielectric_data/4H-SIC_permittivity_10-12_microns.csv', skiprows=1, unpack=True)\n\n air_diel = [1. + 1j*0.] * len(wave_s)\n #Creating dielectric list first dielectric outside, then inside\n diel_list = [list(eps) for eps in zip(air_diel, diel_rs + 1j*diel_is)]\n\n #Set enviornment variable for PyGBe\n folder_path = in_files_path + 'prism6720x26880x3280_SE'\n full_path = os.path.abspath(folder_path)+'/'\n os.environ['PYGBE_PROBLEM_FOLDER'] = full_path\n\n #Creating dictionary field. We will modify the 'E' key in the for loop.\n field_dict_pillars = read_fields(full_path + 'prism_34K.config')\n\n\n #Calculate Cext(lambda) for pillars' surface\n tic_ss = time.time()\n e_field = -1.\n wave, Cext_pillars = Cext_wave_scan(e_field, wave_s, diel_list, field_dict_pillars, full_path) \n toc_ss = time.time()\n\n\n\n numpy.savetxt('../results_data/prism_SE_LE_res/'+'prism_34K_short_edge'+'10-20microns.txt', \n list(zip(wave, Cext_pillars)),\n fmt = '%.9f %.9f', \n header = 'lambda [Ang], Cext [nm^2]') \n\n time_simulation = (toc_ss - tic_ss) \n\n with open('../results_data/prism_SE_LE_res/Time_'+'prism_34K_short_edge'+'.txt', 'w') as f:\n print('time_total: {} s'.format(time_simulation), file=f)\n\nif __name__ == \"__main__\":\n main(sys.argv)\n" ]
[ [ "numpy.loadtxt" ] ]
khdlr/PyQtImageViewer
[ "0f41c10684915bd6ed8db6ce5fb5a783d7bc5889" ]
[ "ViewGeoTIFF.py" ]
[ "#!/usr/bin/env python\n\"\"\" ViewGeoTIFF.py: PyQt image viewer widget for a QPixmap in a QGraphicsView scene with mouse zooming and panning.\n\n\"\"\"\n\nimport os.path\ntry:\n from PyQt5.QtCore import Qt, QRectF, pyqtSignal, QT_VERSION_STR\n from PyQt5.QtGui import QImage, QPixmap, QPainterPath\n from PyQt5.QtWidgets import QGraphicsView, QGraphicsScene, QFileDialog\nexcept ImportError:\n try:\n from PyQt4.QtCore import Qt, QRectF, pyqtSignal, QT_VERSION_STR\n from PyQt4.QtGui import QGraphicsView, QGraphicsScene, QImage, QPixmap, QPainterPath, QFileDialog\n except ImportError:\n raise ImportError(\"ViewGeoTIFF: Requires PyQt5 or PyQt4.\")\nimport rasterio\nimport numpy as np\nfrom pathlib import Path\n\n__author__ = \"Konrad Heidler <[email protected]>, Marcel Goldschen-Ohm <[email protected]>\"\n__version__ = '0.1.0'\n\n\nclass ViewGeoTIFF(QGraphicsView):\n \"\"\" PyQt image viewer widget for a QPixmap in a QGraphicsView scene with mouse zooming and panning.\n\n Displays a QImage or QPixmap (QImage is internally converted to a QPixmap).\n To display any other image format, you must first convert it to a QImage or QPixmap.\n\n Some useful image format conversion utilities:\n qimage2ndarray: NumPy ndarray <==> QImage (https://github.com/hmeine/qimage2ndarray)\n ImageQt: PIL Image <==> QImage (https://github.com/python-pillow/Pillow/blob/master/PIL/ImageQt.py)\n\n Mouse interaction:\n Left mouse button drag: Pan image.\n Right mouse button drag: Zoom box.\n Right mouse button doubleclick: Zoom to show entire image.\n \"\"\"\n\n # Mouse button signals emit image scene (x, y) coordinates.\n # !!! For image (row, column) matrix indexing, row = y and column = x.\n leftMouseButtonPressed = pyqtSignal(float, float)\n rightMouseButtonPressed = pyqtSignal(float, float)\n leftMouseButtonReleased = pyqtSignal(float, float)\n rightMouseButtonReleased = pyqtSignal(float, float)\n leftMouseButtonDoubleClicked = pyqtSignal(float, float)\n rightMouseButtonDoubleClicked = pyqtSignal(float, float)\n\n def __init__(self, app):\n QGraphicsView.__init__(self)\n self.app = app\n\n # Image is displayed as a QPixmap in a QGraphicsScene attached to this QGraphicsView.\n self.scene = QGraphicsScene()\n self.setScene(self.scene)\n\n # Store a local handle to the scene's current image pixmap.\n self._pixmapHandle = None\n\n # Image aspect ratio mode.\n # !!! ONLY applies to full image. Aspect ratio is always ignored when zooming.\n # Qt.IgnoreAspectRatio: Scale image to fit viewport.\n # Qt.KeepAspectRatio: Scale image to fit inside viewport, preserving aspect ratio.\n # Qt.KeepAspectRatioByExpanding: Scale image to fill the viewport, preserving aspect ratio.\n self.aspectRatioMode = Qt.KeepAspectRatio\n\n # Scroll bar behaviour.\n # Qt.ScrollBarAlwaysOff: Never shows a scroll bar.\n # Qt.ScrollBarAlwaysOn: Always shows a scroll bar.\n # Qt.ScrollBarAsNeeded: Shows a scroll bar only when zoomed.\n self.setHorizontalScrollBarPolicy(Qt.ScrollBarAsNeeded)\n self.setVerticalScrollBarPolicy(Qt.ScrollBarAsNeeded)\n\n # Stack of QRectF zoom boxes in scene coordinates.\n self.zoomStack = []\n\n # Flags for enabling/disabling mouse interaction.\n self.canZoom = True\n self.canPan = True\n\n # Path of current file\n self.currentFile = None\n\n\n def hasImage(self):\n \"\"\" Returns whether or not the scene contains an image pixmap.\n \"\"\"\n return self._pixmapHandle is not None\n\n def clearImage(self):\n \"\"\" Removes the current image pixmap from the scene if it exists.\n \"\"\"\n if self.hasImage():\n self.scene.removeItem(self._pixmapHandle)\n self._pixmapHandle = None\n\n def pixmap(self):\n \"\"\" Returns the scene's current image pixmap as a QPixmap, or else None if no image exists.\n :rtype: QPixmap | None\n \"\"\"\n if self.hasImage():\n return self._pixmapHandle.pixmap()\n return None\n\n def image(self):\n \"\"\" Returns the scene's current image pixmap as a QImage, or else None if no image exists.\n :rtype: QImage | None\n \"\"\"\n if self.hasImage():\n return self._pixmapHandle.pixmap().toImage()\n return None\n\n def setImage(self, image):\n \"\"\" Set the scene's current image pixmap to the input QImage or QPixmap.\n Raises a RuntimeError if the input image has type other than QImage or QPixmap.\n :type image: QImage | QPixmap\n \"\"\"\n if type(image) is QPixmap:\n pixmap = image\n elif type(image) is QImage:\n pixmap = QPixmap.fromImage(image)\n else:\n raise RuntimeError(\"ImageViewer.setImage: Argument must be a QImage or QPixmap.\")\n if self.hasImage():\n self._pixmapHandle.setPixmap(pixmap)\n else:\n self._pixmapHandle = self.scene.addPixmap(pixmap)\n self.setSceneRect(QRectF(pixmap.rect())) # Set scene size to image size.\n self.updateViewer()\n\n def loadImageFromFile(self, fileName=None):\n \"\"\" Load an image from file.\n Without any arguments, loadImageFromFile() will popup a file dialog to choose the image file.\n With a fileName argument, loadImageFromFile(fileName) will attempt to load the specified image file directly.\n \"\"\"\n if fileName is None:\n if QT_VERSION_STR[0] == '4':\n fileName = QFileDialog.getOpenFileName(self, \"Open image file.\")\n elif QT_VERSION_STR[0] == '5':\n fileName, _ = QFileDialog.getOpenFileName(self, \"Open image file.\")\n self.currentFile = Path(fileName)\n self.updateFolderListing()\n self.loadImage()\n\n def loadImage(self):\n if self.currentFile.exists():\n with rasterio.open(self.currentFile) as raster:\n r = raster.read(4)\n g = raster.read(3)\n b = raster.read(2)\n\n img = np.stack([r, g, b], axis=-1)\n img = (255 * np.clip(img / 3000, 0, 1)).astype(np.uint8)\n\n height, width = img.shape[:2]\n image = QImage(img, width, height, 3*width, QImage.Format_RGB888)\n\n self.setImage(image)\n self.setWindowTitle(f'{self.currentFile.name} ({self.folderIndex+1} / {len(self.folderListing)})')\n else:\n print(f\"Trying to load Image at {self.currentFile}, which doesn't exist!\")\n\n\n def updateFolderListing(self):\n self.folderListing = list(sorted(self.currentFile.parent.glob('*.tif')))\n self.folderIndex = self.folderListing.index(self.currentFile)\n\n def updateViewer(self):\n \"\"\" Show current zoom (if showing entire image, apply current aspect ratio mode).\n \"\"\"\n if not self.hasImage():\n return\n if len(self.zoomStack) and self.sceneRect().contains(self.zoomStack[-1]):\n self.fitInView(self.zoomStack[-1], Qt.IgnoreAspectRatio) # Show zoomed rect (ignore aspect ratio).\n else:\n self.zoomStack = [] # Clear the zoom stack (in case we got here because of an invalid zoom).\n self.fitInView(self.sceneRect(), self.aspectRatioMode) # Show entire image (use current aspect ratio mode).\n\n def resizeEvent(self, event):\n \"\"\" Maintain current zoom on resize.\n \"\"\"\n self.updateViewer()\n\n def mousePressEvent(self, event):\n \"\"\" Start mouse pan or zoom mode.\n \"\"\"\n scenePos = self.mapToScene(event.pos())\n if event.button() == Qt.LeftButton:\n if self.canPan:\n self.setDragMode(QGraphicsView.ScrollHandDrag)\n self.leftMouseButtonPressed.emit(scenePos.x(), scenePos.y())\n elif event.button() == Qt.RightButton:\n if self.canZoom:\n self.setDragMode(QGraphicsView.RubberBandDrag)\n self.rightMouseButtonPressed.emit(scenePos.x(), scenePos.y())\n QGraphicsView.mousePressEvent(self, event)\n\n def mouseReleaseEvent(self, event):\n \"\"\" Stop mouse pan or zoom mode (apply zoom if valid).\n \"\"\"\n QGraphicsView.mouseReleaseEvent(self, event)\n scenePos = self.mapToScene(event.pos())\n if event.button() == Qt.LeftButton:\n self.setDragMode(QGraphicsView.NoDrag)\n self.leftMouseButtonReleased.emit(scenePos.x(), scenePos.y())\n elif event.button() == Qt.RightButton:\n if self.canZoom:\n viewBBox = self.zoomStack[-1] if len(self.zoomStack) else self.sceneRect()\n selectionBBox = self.scene.selectionArea().boundingRect().intersected(viewBBox)\n self.scene.setSelectionArea(QPainterPath()) # Clear current selection area.\n if selectionBBox.isValid() and (selectionBBox != viewBBox):\n self.zoomStack.append(selectionBBox)\n self.updateViewer()\n self.setDragMode(QGraphicsView.NoDrag)\n self.rightMouseButtonReleased.emit(scenePos.x(), scenePos.y())\n\n def mouseDoubleClickEvent(self, event):\n \"\"\" Show entire image.\n \"\"\"\n scenePos = self.mapToScene(event.pos())\n if event.button() == Qt.LeftButton:\n self.leftMouseButtonDoubleClicked.emit(scenePos.x(), scenePos.y())\n elif event.button() == Qt.RightButton:\n if self.canZoom:\n self.zoomStack = [] # Clear zoom stack.\n self.updateViewer()\n self.rightMouseButtonDoubleClicked.emit(scenePos.x(), scenePos.y())\n QGraphicsView.mouseDoubleClickEvent(self, event)\n\n def closeEvent(self, event):\n self.app.quit()\n\n def nextImage(self):\n self.folderIndex = (self.folderIndex + 1) % len(self.folderListing)\n self.currentFile = self.folderListing[self.folderIndex]\n self.loadImage()\n\n def previousImage(self):\n self.folderIndex -= 1\n if self.folderIndex < 0:\n self.folderIndex += len(self.folderListing)\n self.currentFile = self.folderListing[self.folderIndex]\n self.loadImage()\n\n def keyPressEvent(self, event):\n if event.key() == Qt.Key_Right:\n self.nextImage()\n elif event.key() == Qt.Key_Left:\n self.previousImage()\n event.accept()\n\n\nif __name__ == '__main__':\n import sys\n try:\n from PyQt5.QtWidgets import QApplication\n except ImportError:\n\n try:\n from PyQt4.QtGui import QApplication\n except ImportError:\n raise ImportError(\"ViewGeoTIFF: Requires PyQt5 or PyQt4.\")\n print('Using Qt ' + QT_VERSION_STR)\n\n def handleLeftClick(x, y):\n row = int(y)\n column = int(x)\n print(\"Clicked on image pixel (row=\"+str(row)+\", column=\"+str(column)+\")\")\n\n # Create the application.\n app = QApplication(sys.argv)\n\n # Create image viewer and load an image file to display.\n viewer = ViewGeoTIFF(app)\n\n filename = None\n if len(sys.argv) > 1:\n filename = sys.argv[1]\n\n viewer.loadImageFromFile(filename)\n # Handle left mouse clicks with custom slot.\n viewer.leftMouseButtonPressed.connect(handleLeftClick)\n\n # Show viewer and run application.\n viewer.show()\n sys.exit(app.exec_())\n" ]
[ [ "numpy.stack", "numpy.clip" ] ]
tongni1975/stackup-workshops
[ "d83f1d5adcc0b133b10e22d1db295020af967bac" ]
[ "pi-pytorch/tutorials/rnn/data.py" ]
[ "import numpy as np\nimport math, random\n\n# Generate a noisy multi-sin wave \ndef sine_2(X, signal_freq=60.):\n return (np.sin(2 * np.pi * (X) / signal_freq) + np.sin(4 * np.pi * (X) / signal_freq)) / 2.0\n\ndef noisy(Y, noise_range=(-0.05, 0.05)):\n noise = np.random.uniform(noise_range[0], noise_range[1], size=Y.shape)\n return Y + noise\n\ndef sample(sample_size):\n random_offset = random.randint(0, sample_size)\n X = np.arange(sample_size)\n Y = noisy(sine_2(X + random_offset))\n return Y" ]
[ [ "numpy.random.uniform", "numpy.sin", "numpy.arange" ] ]
victorfariassb/ministros_mencao_estado
[ "cf490a09aef646d68a399700a9a7b6cdfef18f54" ]
[ "contagem_palavras_especificas.py" ]
[ "import pandas as pd\r\n\r\nestados = ['acre|ac', 'alagoas|al', 'amapá|ap', 'amazonas|am', 'bahia|ba', 'ceará|ce', 'espírito santo|es', 'goiás|go', 'maranhão|ma', 'mato grosso|mt', 'mato grosso do sul|ms', 'goiás|go',\r\n 'maranhão|ma', 'minas gerais|mg', 'pará|pa', 'paraíba|pb', 'paraná|pr', 'pernambuco|pe', 'piauí|pi', 'rio de janeiro|rj', 'rio grande do norte|rn', 'rio grande do sul|rs',\r\n 'rondônia|ro', 'roraima|rr', 'santa catarina|sc', 'são paulo|sp', 'sergipe|se', 'tocantins|to', 'distrito federal|df']\r\n\r\ndef contagem_palavras_especificas(arquivo, palavras):\r\n \"\"\"Conta como mais de um caso os termos tenham sido mencionados no mesmo tweet. Além disso, Mato Grosso conta os dados de MS\"\"\"\r\n dados = {}\r\n df = pd.read_csv(arquivo)\r\n df = df[['date', 'tweet']]\r\n df['count'] = 1\r\n df['tweet'] = df['tweet'].str.lower()\r\n for palavra in palavras:\r\n termo = df.loc[df['tweet'].str.contains(fr\"\\b({palavra})\\b\")].sum()\r\n termo['estado'] = palavra\r\n dados[f'{termo[\"estado\"]}'] = termo['count']\r\n for i in sorted(dados, key= dados.get, reverse=True):\r\n print(i, dados[i])\r\n #contagem.to_csv(novo_doc)\r\n\r\nprint('Tarcisio')\r\ncontagem_palavras_especificas('tarcisio.csv', estados)\r\nprint('\\n')\r\nprint('onyx')\r\ncontagem_palavras_especificas('onyx.csv', estados)\r\nprint('\\n')\r\n\r\nprint('marinho')\r\ncontagem_palavras_especificas('marinho.csv', estados)\r\nprint('\\n')\r\n\r\nprint('TerezaCrisMS')\r\ncontagem_palavras_especificas('tereza.csv', estados)\r\nprint('\\n')\r\nprint('andersongtorres')\r\ncontagem_palavras_especificas('torres.csv', estados)\r\nprint('\\n')\r\nprint('João Roma')\r\ncontagem_palavras_especificas('joao_roma.csv', estados)\r\nprint('\\n')\r\nprint('fabiofaria')\r\ncontagem_palavras_especificas('fabiofaria.csv', estados)\r\n" ]
[ [ "pandas.read_csv" ] ]
tkf/matplotlib
[ "5b90a27aeda308a7dcbf70d5cc7a0612b3bb41e5" ]
[ "lib/matplotlib/tests/test_transforms.py" ]
[ "from __future__ import print_function\nfrom nose.tools import assert_equal\nfrom numpy.testing import assert_almost_equal\nfrom matplotlib.transforms import Affine2D, BlendedGenericTransform\nfrom matplotlib.path import Path\nfrom matplotlib.scale import LogScale\nfrom matplotlib.testing.decorators import cleanup\nimport numpy as np\n\nimport matplotlib.transforms as mtrans\nimport matplotlib.pyplot as plt\n\n\n\n@cleanup\ndef test_non_affine_caching():\n class AssertingNonAffineTransform(mtrans.Transform):\n \"\"\"\n This transform raises an assertion error when called when it\n shouldn't be and self.raise_on_transform is True.\n\n \"\"\"\n input_dims = output_dims = 2\n is_affine = False\n def __init__(self, *args, **kwargs):\n mtrans.Transform.__init__(self, *args, **kwargs)\n self.raise_on_transform = False\n self.underlying_transform = mtrans.Affine2D().scale(10, 10)\n\n def transform_path_non_affine(self, path):\n if self.raise_on_transform:\n assert False, ('Invalidated affine part of transform '\n 'unnecessarily.')\n return self.underlying_transform.transform_path(path)\n transform_path = transform_path_non_affine\n\n def transform_non_affine(self, path):\n if self.raise_on_transform:\n assert False, ('Invalidated affine part of transform '\n 'unnecessarily.')\n return self.underlying_transform.transform(path)\n transform = transform_non_affine\n\n my_trans = AssertingNonAffineTransform()\n ax = plt.axes()\n plt.plot(range(10), transform=my_trans + ax.transData)\n plt.draw()\n # enable the transform to raise an exception if it's non-affine transform\n # method is triggered again.\n my_trans.raise_on_transform = True\n ax.transAxes.invalidate()\n plt.draw()\n\n\ndef test_Affine2D_from_values():\n points = [ [0,0],\n [10,20],\n [-1,0],\n ]\n\n t = Affine2D.from_values(1,0,0,0,0,0)\n actual = t.transform(points)\n expected = np.array( [[0,0],[10,0],[-1,0]] )\n assert_almost_equal(actual,expected)\n\n t = Affine2D.from_values(0,2,0,0,0,0)\n actual = t.transform(points)\n expected = np.array( [[0,0],[0,20],[0,-2]] )\n assert_almost_equal(actual,expected)\n\n t = Affine2D.from_values(0,0,3,0,0,0)\n actual = t.transform(points)\n expected = np.array( [[0,0],[60,0],[0,0]] )\n assert_almost_equal(actual,expected)\n\n t = Affine2D.from_values(0,0,0,4,0,0)\n actual = t.transform(points)\n expected = np.array( [[0,0],[0,80],[0,0]] )\n assert_almost_equal(actual,expected)\n\n t = Affine2D.from_values(0,0,0,0,5,0)\n actual = t.transform(points)\n expected = np.array( [[5,0],[5,0],[5,0]] )\n assert_almost_equal(actual,expected)\n\n t = Affine2D.from_values(0,0,0,0,0,6)\n actual = t.transform(points)\n expected = np.array( [[0,6],[0,6],[0,6]] )\n assert_almost_equal(actual,expected)\n\ndef test_clipping_of_log():\n # issue 804\n M,L,C = Path.MOVETO, Path.LINETO, Path.CLOSEPOLY\n points = [ (0.2, -99), (0.4, -99), (0.4, 20), (0.2, 20), (0.2, -99) ]\n codes = [ M, L, L, L, C ]\n path = Path(points, codes)\n\n # something like this happens in plotting logarithmic histograms\n trans = BlendedGenericTransform(Affine2D(),\n LogScale.Log10Transform('clip'))\n tpath = trans.transform_path_non_affine(path)\n result = tpath.iter_segments(trans.get_affine(),\n clip=(0, 0, 100, 100),\n simplify=False)\n\n tpoints, tcodes = zip(*result)\n # Because y coordinate -99 is outside the clip zone, the first\n # line segment is effectively removed. That means that the closepoly\n # operation must be replaced by a move to the first point.\n assert np.allclose(tcodes, [ M, M, L, L, L ])\n assert np.allclose(tpoints[-1], tpoints[0])\n" ]
[ [ "numpy.testing.assert_almost_equal", "numpy.allclose", "matplotlib.pyplot.draw", "matplotlib.transforms.Affine2D", "matplotlib.pyplot.axes", "matplotlib.scale.LogScale.Log10Transform", "matplotlib.path.Path", "numpy.array", "matplotlib.transforms.Transform.__init__", "matplotlib.transforms.Affine2D.from_values" ] ]
PartumSomnia/bns_ppr_tools
[ "b02bab870bb54171bc0d0cd7e07bfb50e978e7dd" ]
[ "module_ejecta/ejecta_formulas.py" ]
[ "\nimport numpy as np\n\nclass FORMULAS:\n\n @staticmethod\n def vinf(eninf):\n return np.sqrt(2. * eninf)\n\n @staticmethod\n def vinf_bern(eninf, enthalpy):\n return np.sqrt(2.*(enthalpy*(eninf + 1.) - 1.))\n\n @staticmethod\n def vel(w_lorentz):\n return np.sqrt(1. - 1. / (w_lorentz**2))\n\n @staticmethod\n def get_tau(rho, vel, radius, lrho_b):\n\n rho_b = 10 ** lrho_b\n tau_0 = 0.5 * 2.71828182845904523536 * (radius / vel) * (0.004925794970773136) # in ms\n tau_b = tau_0 * ((rho/rho_b) ** (1.0 / 3.0))\n return tau_b # ms\n\n @staticmethod\n def enthalpy(eps, press, rho):\n return 1 + eps + (press / rho)\n" ]
[ [ "numpy.sqrt" ] ]
mjirik/wsicolorfilter
[ "85f8f3705d21065781d49d0b700ab162e9063870" ]
[ "wsicolorfilter/svm_filter.py" ]
[ "import pickle as plk\n\nfrom sklearn import svm\n\nfrom wsicolorfilter.filter import Filter\n\n\nclass SvmFilter(Filter):\n \"\"\"Filter which assign each pixel to the nearest centroid of the model.\"\"\"\n\n def create_model(self):\n return svm.LinearSVC()\n\n def train_model(self, x, y):\n # train model\n self.model.fit(x, y)\n\n def predict(self, img):\n orig_shape = img.shape\n\n # bitmap to string of pixels\n img = img.reshape((orig_shape[0] * orig_shape[1], orig_shape[2]))\n\n filter_mask = self.model.predict(img)\n filter_mask = filter_mask.reshape(orig_shape[0], orig_shape[1])\n filter_mask -= 1 # normalize output\n\n return filter_mask\n\n def load_model(self, file_name='svm_filter.npy'):\n with open(file_name, 'rb') as file:\n self.model = plk.load(file)\n\n def save_model(self, file_name='svm_filter.npy'):\n with open(file_name, 'wb') as file:\n plk.dump(self.model, file)\n" ]
[ [ "sklearn.svm.LinearSVC" ] ]
idzol/battlesnake
[ "3d94e70143d4d5f1cffd79fe6b84b59e1cb4864b" ]
[ "_tests/test_interrupt.py" ]
[ "from typing import List, Dict\n\nimport math\nimport operator\nfrom operator import add\n\nimport random as rand\nimport numpy as np\n# import pandas as pd\n# import random as rand\nimport copy as copy\n\nimport time as time\nfrom logClass import log\n\nimport constants as CONST\nimport functions as fn\n\nfrom snakeClass import snake\nfrom boardClass import board\n\nclass snakeTest(snake):\n pass \n\n\nclass boardTest(board):\n\n def isRoutePointv2(self, start, turn, eating={}, path=[], enemy=False):\n # Start - check route points from start location\n # Turn - adjust for future turn state \n # Eating - adjust for past / future eating \n # Path - check past path points for collision\n # Enemy - ignore markov threat when predicting enemy moves\n\n w = self.width\n h = self.height\n\n step = copy.copy(start)\n # Get step\n dy = step[0]\n dx = step[1]\n\n # Get markov \n \n t = min(turn, CONST.lookAheadEnemy - 1)\n markov = copy.copy(self.markovs[t])\n\n # Get tails \n trails = copy.copy(self.trailsSnake)\n board = np.zeros([w, h], np.intc)\n\n for sid in trails:\n # Adjust trails for each snake based on eating\n if sid in eating.keys(): \n board += np.where(trails[sid], trails[sid] + eating[sid], trails[sid]) \n\n else: \n board += trails[sid]\n \n # print(\"DEBUG\", dx, dy, t, step, path)\n # Route logic \n if (self.inBounds(step)):\n if ( # Our prediction logic \n (((markov[dy, dx] < CONST.pointThreshold) or \n (t >= board[dy, dx] and \n markov[dy, dx] >= CONST.routeThreshold)) and \n not (step in path)) or \n # Enemy prediction logic \n (enemy and \n t >= board[dy, dx] and \n not (step in path)) \n ):\n\n return (True, copy.copy(markov[dy, dx]))\n \n return (False, CONST.pointThreshold)\n\n\n def getEnemyFuture(self, snakes, turns=2):\n\n sid_us = self.getIdentity()\n for sid in snakes:\n \n snake = snakes[sid]\n head = snake.getHead()\n paths = [] \n\n enemy = False \n if sid_us != snake.getId():\n enemy = True \n \n for dirn in CONST.directions:\n # initial step in each direction \n step = list(map(add, head, CONST.directionMap[dirn]))\n found, point = self.isRoutePointv2(step, 0, {}, [], enemy)\n if (found): \n paths.append(copy.copy([step]))\n\n # print(\"FUTURE\", head, paths)\n\n for turn in range(0, turns - 1):\n # for N-1 turns look in each direction for each path\n paths_new = []\n for path in paths: \n for dirn in CONST.directions:\n \n head_n = path[-1] \n step = list(map(add, head_n, CONST.directionMap[dirn]))\n found, point = self.isRoutePointv2(step, turn)\n route = path + [step]\n if (found): \n # New path found \n paths_new.append(copy.copy(route))\n\n # Concatenate new paths \n paths = paths + paths_new\n\n # print(\"SNAKE\", snake.getId(), paths)\n snake.setNextSteps(paths)\n\n\n # def isRoutePoint_simulate(step, turn, us, them):\n # if (them):\n # simulate = np.zeros([h, w], np.intc) \n # for t in them: \n # simulate[t[1],t[0]] = CONST.routeThreshold\n\n\n def simulateBoard_basic(snakes):\n # Set enemy step (outside fxn) \n # Simulate step \n pass \n\n # check if enemy head / our head on same square \n # if we are larger, win \n\n\ndef enemyStrategy(bo, snakes): \n # (1) select possible paths for enemy\n future = 2 \n dirn_avoid = []\n \n paths = bo.getEnemyFuture(snakes, future)\n # O(dirs * future * snakes)\n\n oursteps = us.getNextSteps()\n \n sid_us = bo.getIdentity()\n length_us = snakes[sid_us].getLength()\n steps_us = snakes[sid_us].getNextSteps()\n \n for sid in snakes:\n if sid != sid_us: \n snake = snakes[sid]\n length = snake.getLength()\n steps = snake.getNextSteps()\n\n \n for ourstep in steps_us:\n # Iterate through our steps \n for step in steps:\n # Check against enemy steps \n safe = True\n reason = \"\"\n \n if len(step) == len(ourstep):\n # Look for same turn (same length)\n\n # print(\"STEPS us:%s them:%s\" % (ourstep, step)) \n \n if step == ourstep:\n if length > length_us:\n # Check for collision (we are same / smaller) \n safe = False \n reason = \"head on collision\"\n\n else: \n\n # Check moves available \n turn = len(step)\n start = ourstep[-1]\n future = ourstep + step \n future.remove(start)\n\n found = bo.findEmptySpace(start, future, turn)\n \n # print(\"FOUND start:%s turn:%s future%s found %s\" % (start, turn, future, found)) \n \n # No paths\n if (not found):\n safe = False \n reason = \"no path for us\"\n \n \n if (not safe):\n # One of the enemy steps is not safe for our step. Abandon all paths in that direction (overcautious)\n # TODO: less agressive if we see a path out\n\n print (\"STEP sid:%s us:%s them:%s reason:%s\" % (sid, ourstep, step, reason))\n for s in steps_us:\n if ourstep[0] in s: \n steps_us.remove(s)\n dirn_avoid.append(ourstep[0])\n # dy = ourstep[0][0]\n # dx = ourstep[0][1]\n # self.markov[dy,dx] = CONST.routeThreshold\n\n snakes[sid_us].setNextSteps(steps_us)\n # print(\"FINAL PATHS\", dirn_avoid, steps_us)\n # return directions to avoid (mark as not reachable)\n return dirn_avoid\n\n\n# def enemyStrategy_chance(self):\n # (2) if our path = 100% (ie. single path)\n # and enemy head < us head distance\n # (stay off wall -- leave space for retreat / exit)\n\n # (3) logic that says they are 1x square from wall \n # if each point in probability cloud \n # then enclosed drops to zero .. \n # or intersects wtih our path @ 100% \n\n\ndata = {'game': {'id': '609d47ca-e773-49f9-b3f4-2c52afa4a05c', 'ruleset': {'name': 'solo', 'version': 'v1.0.22', 'settings': {'foodSpawnChance': 15, 'minimumFood': 1, 'hazardDamagePerTurn': 0, 'royale': {'shrinkEveryNTurns': 0}, 'squad': {'allowBodyCollisions': False, 'sharedElimination': False, 'sharedHealth': False, 'sharedLength': False}}}, 'timeout': 500, 'source': ''}, 'turn': 4, 'board': {'height': 11, 'width': 11, 'snakes': [], 'food': [], 'hazards': []}, 'you': {'id': 'gs_VYFDmY6qCM6MH6KJ7jKKybg3', 'name': 'idzol-dev', 'latency': '326', 'health': 96, 'body': [{'x': 1, 'y': 9}, {'x': 1, 'y': 8}, {'x': 1, 'y': 7}], 'head': {'x': 1, 'y': 9}, 'length': 3, 'shout': '', 'squad': ''}}\n\nlogger = log()\n\nus = snakeTest()\nus2 = snakeTest()\nthem = snakeTest()\n\nus.setHead([0,6])\nus.setBody([[0,7], [0,8], [0,9], [0,10], [1,10]])\nus.setBody(us.getBody())\n\nus2.setHead([1, 4])\nus2.setBody([[1, 5], [2, 5], [3, 5], [3, 4]])\nus2.setHistory(us2.getBody())\n\nthem.setHead([7,7])\nthem.setBody([[7,6],[7,5],[6,5],[6,4],[7,4],[7,3]])\nthem.setHistory(them.getBody())\n\nsn_body = us.getBody()\nenemy_body = them.getBody()\n\nus.setType('us')\nus.setId('ourSnek')\nus2.setId('enemySnek1')\nthem.setId('enemySnek2')\n\nallSnakes = {\n us.getId():us,\n us2.getId():us2,\n them.getId():them\n}\n\n# foods = [[6,6]]\nfoods = [[2,2], [4,4], [6,6]]\n\nbo = boardTest()\nbo.setIdentity(us.getId())\n\nCONST.minProbability = 1\n\nlogger.timer('== Update Boards ==')\nbo.updateBoards(data, us, allSnakes, foods) \nbo.updateChance(allSnakes, foods)\nbo.updateMarkov(us, allSnakes, foods)\nbo.updateDijkstra(us)\nlogger.timer('== Finish Boards ==')\n\n\n#bo.getEnemyFuture(allSnakes, 2)\n# print(\"US\", us.getNextSteps())\n# print(\"US2\", us2.getNextSteps())\n# print(\"ENEMY\", them.getNextSteps())\n\nroute = enemyStrategy(bo, allSnakes)\nprint (\"ROUTE\", route)\n\n\nlogger.timer('== Finish Strategy ==')\n\n# print(bo.markovs[0])\nprint(bo.combine)\n\ntargets = [[0,0], [2,0], [3,9], [9,1]]\n\nlogger.timer('== Finish Paths ==')\n\n" ]
[ [ "numpy.where", "numpy.zeros" ] ]
travers-rhodes/deepmind-research
[ "59bace8e09b31686f1a4d4bd642c47388bc230fb" ]
[ "iodine/main.py" ]
[ "# Lint as: python3\n# Copyright 2019 Deepmind Technologies Limited.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# pylint: disable=g-importing-member, g-multiple-import, g-import-not-at-top\n# pylint: disable=protected-access, g-bad-import-order, missing-docstring\n# pylint: disable=unused-variable, invalid-name, no-value-for-parameter\n\nfrom copy import deepcopy\nimport os.path\nimport warnings\nfrom absl import logging\nimport numpy as np\nfrom sacred import Experiment, SETTINGS\n\n# Ignore all tensorflow deprecation warnings\nlogging._warn_preinit_stderr = 0\nwarnings.filterwarnings(\"ignore\", module=\".*tensorflow.*\")\nimport tensorflow.compat.v1 as tf\n\ntf.logging.set_verbosity(tf.logging.ERROR)\nimport sonnet as snt\nfrom sacred.stflow import LogFileWriter\nfrom iodine.modules import utils\nfrom iodine import configurations\n\nSETTINGS.CONFIG.READ_ONLY_CONFIG = False\n\nex = Experiment(\"iodine\")\n\n\[email protected]\ndef default_config():\n continue_run = False # set to continue experiment from an existing checkpoint\n checkpoint_dir = (\"checkpoints/iodine\"\n ) # if continue_run is False, \"_{run_id}\" will be appended\n save_summaries_steps = 10\n save_checkpoint_steps = 1000\n\n n_z = 64 # number of latent dimensions\n num_components = 7 # number of components (K)\n num_iters = 5\n\n learn_rate = 0.001\n batch_size = 4\n stop_after_steps = int(1e6)\n\n # Details for the dataset, model and optimizer are left empty here.\n # They can be found in the configurations for individual datasets,\n # which are provided in configurations.py and added as named configs.\n data = {} # Dataset details will go here\n model = {} # Model details will go here\n optimizer = {} # Optimizer details will go here\n\n\nex.named_config(configurations.clevr6)\nex.named_config(configurations.multi_dsprites)\nex.named_config(configurations.tetrominoes)\nex.named_config(configurations.dots)\n\n\[email protected]\ndef build(identifier, _config):\n config_copy = deepcopy(_config[identifier])\n return utils.build(config_copy, identifier=identifier)\n\n\ndef get_train_step(model, dataset, optimizer):\n loss, scalars, _ = model(dataset(\"train\"))\n global_step = tf.train.get_or_create_global_step()\n grads = optimizer.compute_gradients(loss)\n gradients, variables = zip(*grads)\n global_norm = tf.global_norm(gradients)\n gradients, global_norm = tf.clip_by_global_norm(\n gradients, 5.0, use_norm=global_norm)\n grads = zip(gradients, variables)\n train_op = optimizer.apply_gradients(grads, global_step=global_step)\n\n with tf.control_dependencies([train_op]):\n overview = model.get_overview_images(dataset(\"summary\"))\n scalars[\"debug/global_grad_norm\"] = global_norm\n\n summaries = {\n k: tf.summary.scalar(k, v) for k, v in scalars.items()\n }\n summaries.update(\n {k: tf.summary.image(k, v) for k, v in overview.items()})\n\n return tf.identity(global_step), scalars, train_op\n\n\[email protected]\ndef get_checkpoint_dir(continue_run, checkpoint_dir, _run, _log):\n if continue_run:\n assert os.path.exists(checkpoint_dir)\n _log.info(\"Continuing run from checkpoint at {}\".format(checkpoint_dir))\n return checkpoint_dir\n\n run_id = _run._id\n if run_id is None: # then no observer was added that provided an _id\n if not _run.unobserved:\n _log.warning(\n \"No run_id given or provided by an Observer. (Re-)using run_id=1.\")\n run_id = 1\n checkpoint_dir = checkpoint_dir + \"_{run_id}\".format(run_id=run_id)\n _log.info(\n \"Starting a new run using checkpoint dir: '{}'\".format(checkpoint_dir))\n return checkpoint_dir\n\n\[email protected]\ndef get_session(chkp_dir, loss, stop_after_steps, save_summaries_steps,\n save_checkpoint_steps):\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n\n hooks = [\n tf.train.StopAtStepHook(last_step=stop_after_steps),\n tf.train.NanTensorHook(loss),\n ]\n\n return tf.train.MonitoredTrainingSession(\n hooks=hooks,\n config=config,\n checkpoint_dir=chkp_dir,\n save_summaries_steps=save_summaries_steps,\n save_checkpoint_steps=save_checkpoint_steps,\n )\n\n\[email protected](unobserved=True)\ndef load_checkpoint(use_placeholder=False, session=None):\n dataset = build(\"data\")\n model = build(\"model\")\n if use_placeholder:\n inputs = dataset.get_placeholders()\n else:\n inputs = dataset()\n\n info = model.eval(inputs)\n if session is None:\n session = tf.Session()\n saver = tf.train.Saver()\n checkpoint_dir = get_checkpoint_dir()\n checkpoint_file = tf.train.latest_checkpoint(checkpoint_dir)\n saver.restore(session, checkpoint_file)\n\n print('Successfully restored Checkpoint \"{}\"'.format(checkpoint_file))\n # print variables\n variables = tf.global_variables() + tf.local_variables()\n for row in snt.format_variables(variables, join_lines=False):\n print(row)\n\n return {\n \"session\": session,\n \"model\": model,\n \"info\": info,\n \"inputs\": inputs,\n \"dataset\": dataset,\n }\n\n\[email protected]\n@LogFileWriter(ex)\ndef main(save_summaries_steps):\n checkpoint_dir = get_checkpoint_dir()\n\n dataset = build(\"data\")\n model = build(\"model\")\n optimizer = build(\"optimizer\")\n gstep, train_step_exports, train_op = get_train_step(model, dataset,\n optimizer)\n\n loss = []\n with get_session(checkpoint_dir, train_step_exports[\"loss/total\"]) as sess:\n while not sess.should_stop():\n out = sess.run({\n \"step\": gstep,\n \"loss\": train_step_exports[\"loss/total\"],\n \"train\": train_op,\n })\n loss.append(out[\"loss\"])\n step = out[\"step\"]\n if step % save_summaries_steps == 0:\n mean_loss = np.mean(loss)\n ex.log_scalar(\"loss\", mean_loss, step)\n print(\"{step:>6d} Loss: {loss: >12.2f}\".format(\n step=step, loss=mean_loss))\n loss = []\n" ]
[ [ "tensorflow.compat.v1.train.Saver", "tensorflow.compat.v1.Session", "tensorflow.compat.v1.summary.scalar", "tensorflow.compat.v1.ConfigProto", "tensorflow.compat.v1.global_norm", "tensorflow.compat.v1.train.MonitoredTrainingSession", "tensorflow.compat.v1.train.StopAtStepHook", "tensorflow.compat.v1.train.NanTensorHook", "tensorflow.compat.v1.train.latest_checkpoint", "tensorflow.compat.v1.identity", "tensorflow.compat.v1.global_variables", "tensorflow.compat.v1.control_dependencies", "tensorflow.compat.v1.summary.image", "tensorflow.compat.v1.local_variables", "tensorflow.compat.v1.clip_by_global_norm", "tensorflow.compat.v1.train.get_or_create_global_step", "numpy.mean", "tensorflow.compat.v1.logging.set_verbosity" ] ]
ypark234/openmc
[ "571ed3b6ab449e555fc9c8452106425d26b19bd3" ]
[ "openmc/volume.py" ]
[ "from collections import OrderedDict\nfrom collections.abc import Iterable, Mapping\nfrom numbers import Real, Integral\nfrom xml.etree import ElementTree as ET\nimport warnings\n\nimport numpy as np\nimport pandas as pd\nimport h5py\nfrom uncertainties import ufloat\n\nimport openmc\nimport openmc.checkvalue as cv\n\n_VERSION_VOLUME = 1\n\n\nclass VolumeCalculation:\n \"\"\"Stochastic volume calculation specifications and results.\n\n Parameters\n ----------\n domains : Iterable of openmc.Cell, openmc.Material, or openmc.Universe\n Domains to find volumes of\n samples : int\n Number of samples used to generate volume estimates\n lower_left : Iterable of float\n Lower-left coordinates of bounding box used to sample points. If this\n argument is not supplied, an attempt is made to automatically determine\n a bounding box.\n upper_right : Iterable of float\n Upper-right coordinates of bounding box used to sample points. If this\n argument is not supplied, an attempt is made to automatically determine\n a bounding box.\n\n Attributes\n ----------\n ids : Iterable of int\n IDs of domains to find volumes of\n domain_type : {'cell', 'material', 'universe'}\n Type of each domain\n samples : int\n Number of samples used to generate volume estimates\n lower_left : Iterable of float\n Lower-left coordinates of bounding box used to sample points\n upper_right : Iterable of float\n Upper-right coordinates of bounding box used to sample points\n threshold : float\n Threshold for the maximum standard deviation of volume in the calculation\n atoms : dict\n Dictionary mapping unique IDs of domains to a mapping of nuclides to\n total number of atoms for each nuclide present in the domain. For\n example, {10: {'U235': 1.0e22, 'U238': 5.0e22, ...}}.\n atoms_dataframe : pandas.DataFrame\n DataFrame showing the estimated number of atoms for each nuclide present\n in each domain specified.\n volumes : dict\n Dictionary mapping unique IDs of domains to estimated volumes in cm^3.\n threshold : float\n Threshold for the maxmimum standard deviation of volumes.\n trigger_type : {'variance', 'std_dev', 'rel_err'}\n Value type used to halt volume calculation\n iterations : int\n Number of iterations over samples (for calculations with a trigger).\n\n \"\"\"\n def __init__(self, domains, samples, lower_left=None, upper_right=None):\n self._atoms = {}\n self._volumes = {}\n self._threshold = None\n self._trigger_type = None\n self._iterations = None\n\n cv.check_type('domains', domains, Iterable,\n (openmc.Cell, openmc.Material, openmc.Universe))\n if isinstance(domains[0], openmc.Cell):\n self._domain_type = 'cell'\n elif isinstance(domains[0], openmc.Material):\n self._domain_type = 'material'\n elif isinstance(domains[0], openmc.Universe):\n self._domain_type = 'universe'\n self.ids = [d.id for d in domains]\n\n self.samples = samples\n\n if lower_left is not None:\n if upper_right is None:\n raise ValueError('Both lower-left and upper-right coordinates '\n 'should be specified')\n\n # For cell domains, try to compute bounding box and make sure\n # user-specified one is valid\n if self.domain_type == 'cell':\n for c in domains:\n ll, ur = c.bounding_box\n if np.any(np.isinf(ll)) or np.any(np.isinf(ur)):\n continue\n if (np.any(np.asarray(lower_left) > ll) or\n np.any(np.asarray(upper_right) < ur)):\n warnings.warn(\n \"Specified bounding box is smaller than computed \"\n \"bounding box for cell {}. Volume calculation may \"\n \"be incorrect!\".format(c.id))\n\n self.lower_left = lower_left\n self.upper_right = upper_right\n else:\n if self.domain_type == 'cell':\n ll, ur = openmc.Union(c.region for c in domains).bounding_box\n if np.any(np.isinf(ll)) or np.any(np.isinf(ur)):\n raise ValueError('Could not automatically determine bounding '\n 'box for stochastic volume calculation.')\n else:\n self.lower_left = ll\n self.upper_right = ur\n else:\n raise ValueError('Could not automatically determine bounding box '\n 'for stochastic volume calculation.')\n\n @property\n def ids(self):\n return self._ids\n\n @property\n def samples(self):\n return self._samples\n\n @property\n def lower_left(self):\n return self._lower_left\n\n @property\n def upper_right(self):\n return self._upper_right\n\n @property\n def threshold(self):\n return self._threshold\n\n @property\n def trigger_type(self):\n return self._trigger_type\n\n @property\n def iterations(self):\n return self._iterations\n\n @property\n def domain_type(self):\n return self._domain_type\n\n @property\n def atoms(self):\n return self._atoms\n\n @property\n def volumes(self):\n return self._volumes\n\n @property\n def atoms_dataframe(self):\n items = []\n columns = [self.domain_type.capitalize(), 'Nuclide', 'Atoms']\n for uid, atoms_dict in self.atoms.items():\n for name, atoms in atoms_dict.items():\n items.append((uid, name, atoms))\n\n return pd.DataFrame.from_records(items, columns=columns)\n\n @ids.setter\n def ids(self, ids):\n cv.check_type('domain IDs', ids, Iterable, Real)\n self._ids = ids\n\n @samples.setter\n def samples(self, samples):\n cv.check_type('number of samples', samples, Integral)\n cv.check_greater_than('number of samples', samples, 0)\n self._samples = samples\n\n @lower_left.setter\n def lower_left(self, lower_left):\n name = 'lower-left bounding box coordinates',\n cv.check_type(name, lower_left, Iterable, Real)\n cv.check_length(name, lower_left, 3)\n self._lower_left = lower_left\n\n @upper_right.setter\n def upper_right(self, upper_right):\n name = 'upper-right bounding box coordinates'\n cv.check_type(name, upper_right, Iterable, Real)\n cv.check_length(name, upper_right, 3)\n self._upper_right = upper_right\n\n @threshold.setter\n def threshold(self, threshold):\n name = 'volume std. dev. threshold'\n cv.check_type(name, threshold, Real)\n cv.check_greater_than(name, threshold, 0.0)\n self._threshold = threshold\n\n @trigger_type.setter\n def trigger_type(self, trigger_type):\n cv.check_value('tally trigger type', trigger_type,\n ('variance', 'std_dev', 'rel_err'))\n self._trigger_type = trigger_type\n\n @iterations.setter\n def iterations(self, iterations):\n name = 'volume calculation iterations'\n cv.check_type(name, iterations, Integral)\n cv.check_greater_than(name, iterations, 0)\n self._iterations = iterations\n\n @volumes.setter\n def volumes(self, volumes):\n cv.check_type('volumes', volumes, Mapping)\n self._volumes = volumes\n\n @atoms.setter\n def atoms(self, atoms):\n cv.check_type('atoms', atoms, Mapping)\n self._atoms = atoms\n\n def set_trigger(self, threshold, trigger_type):\n \"\"\"Set a trigger on the voulme calculation\n\n Parameters\n ----------\n threshold : float\n Threshold for the maxmimum standard deviation of volumes\n trigger_type : {'variance', 'std_dev', 'rel_err'}\n Value type used to halt volume calculation\n \"\"\"\n self.trigger_type = trigger_type\n self.threshold = threshold\n\n @classmethod\n def from_hdf5(cls, filename):\n \"\"\"Load stochastic volume calculation results from HDF5 file.\n\n Parameters\n ----------\n filename : str\n Path to volume.h5 file\n\n Returns\n -------\n openmc.VolumeCalculation\n Results of the stochastic volume calculation\n\n \"\"\"\n with h5py.File(filename, 'r') as f:\n cv.check_filetype_version(f, \"volume\", _VERSION_VOLUME)\n\n domain_type = f.attrs['domain_type'].decode()\n samples = f.attrs['samples']\n lower_left = f.attrs['lower_left']\n upper_right = f.attrs['upper_right']\n\n threshold = f.attrs.get('threshold')\n trigger_type = f.attrs.get('trigger_type')\n iterations = f.attrs.get('iterations', 1)\n\n volumes = {}\n atoms = {}\n ids = []\n for obj_name in f:\n if obj_name.startswith('domain_'):\n domain_id = int(obj_name[7:])\n ids.append(domain_id)\n group = f[obj_name]\n volume = ufloat(*group['volume'][()])\n volumes[domain_id] = volume\n nucnames = group['nuclides'][()]\n atoms_ = group['atoms'][()]\n atom_dict = OrderedDict()\n for name_i, atoms_i in zip(nucnames, atoms_):\n atom_dict[name_i.decode()] = ufloat(*atoms_i)\n atoms[domain_id] = atom_dict\n\n # Instantiate some throw-away domains that are used by the constructor\n # to assign IDs\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', openmc.IDWarning)\n if domain_type == 'cell':\n domains = [openmc.Cell(uid) for uid in ids]\n elif domain_type == 'material':\n domains = [openmc.Material(uid) for uid in ids]\n elif domain_type == 'universe':\n domains = [openmc.Universe(uid) for uid in ids]\n\n # Instantiate the class and assign results\n vol = cls(domains, samples, lower_left, upper_right)\n\n if trigger_type is not None:\n vol.set_trigger(threshold, trigger_type.decode())\n\n vol.iterations = iterations\n vol.volumes = volumes\n vol.atoms = atoms\n return vol\n\n def load_results(self, filename):\n \"\"\"Load stochastic volume calculation results from an HDF5 file.\n\n Parameters\n ----------\n filename : str\n Path to volume.h5 file\n\n \"\"\"\n results = type(self).from_hdf5(filename)\n\n # Make sure properties match\n assert self.ids == results.ids\n assert np.all(self.lower_left == results.lower_left)\n assert np.all(self.upper_right == results.upper_right)\n\n # Copy results\n self.volumes = results.volumes\n self.atoms = results.atoms\n\n def to_xml_element(self):\n \"\"\"Return XML representation of the volume calculation\n\n Returns\n -------\n element : xml.etree.ElementTree.Element\n XML element containing volume calculation data\n\n \"\"\"\n element = ET.Element(\"volume_calc\")\n dt_elem = ET.SubElement(element, \"domain_type\")\n dt_elem.text = self.domain_type\n id_elem = ET.SubElement(element, \"domain_ids\")\n id_elem.text = ' '.join(str(uid) for uid in self.ids)\n samples_elem = ET.SubElement(element, \"samples\")\n samples_elem.text = str(self.samples)\n ll_elem = ET.SubElement(element, \"lower_left\")\n ll_elem.text = ' '.join(str(x) for x in self.lower_left)\n ur_elem = ET.SubElement(element, \"upper_right\")\n ur_elem.text = ' '.join(str(x) for x in self.upper_right)\n if self.threshold:\n trigger_elem = ET.SubElement(element, \"threshold\")\n trigger_elem.set(\"type\", self.trigger_type)\n trigger_elem.set(\"threshold\", str(self.threshold))\n return element\n" ]
[ [ "pandas.DataFrame.from_records", "numpy.all", "numpy.asarray", "numpy.isinf" ] ]
neevparikh/lwm
[ "ec8d27f6c011a732aa58ae04cba66a5bac68f8f8" ]
[ "atari/dqn/prepare_obs.py" ]
[ "import torch\n\n\ndef prepare_obs(obs, done, fstack):\n assert obs.dtype == torch.uint8\n assert obs.shape[2] == 1\n\n if fstack > 1:\n obs = stack_frames(obs, fstack)\n done_stacked = stack_frames(done, fstack)\n obs = obs * obs_mask(done_stacked)\n return obs.float() / 128 - 1\n\n\ndef stack_frames(x, stack=4):\n \"\"\"\n Args:\n x: [steps + stack - 1, batch, 1, ...] - flat trajectory with prefix = stack - 1\n Returns:\n [steps, batch, stack, ...] - each step (dim 0) includes stack of frames (dim 2)\n \"\"\"\n shape = (x.shape[0] - stack + 1, x.shape[1], stack, *x.shape[3:])\n y = torch.empty(shape, dtype=x.dtype, device=x.device)\n for i in range(stack):\n y[:, :, i] = x[i : shape[0] + i, :, 0]\n return y\n\n\ndef obs_mask(done):\n \"\"\"\n mask to zero out observations in 4-frame stack when done = 1\n \"\"\"\n mask = 1 - done[:, :, 1:]\n for i in reversed(range(mask.shape[2] - 1)):\n mask[:, :, i] *= mask[:, :, i + 1]\n mask = torch.cat([mask, torch.ones_like(mask[:, :, -1:])], 2)\n mask = mask[..., None, None]\n return mask\n" ]
[ [ "torch.empty", "torch.ones_like" ] ]
ssense-ai/gitpod-heroku-python-ai-1
[ "9d33cf2d8bcced448affcf39f86abde291e7e0b3" ]
[ "build_model.py" ]
[ "import numpy as np\nimport pandas as pd\n\n#データ分割用\nfrom sklearn.model_selection import train_test_split\n\n#LightGBM\nimport lightgbm as lgb\n\n#pickle\nimport pickle\n\n#データ読み込み\ndf_train = pd.read_csv(\"train.csv\")\ndf_test = pd.read_csv(\"test.csv\")\n\n#データ結合\ndf_train[\"TrainFlag\"] = True\ndf_test[\"TrainFlag\"] = False\n\ndf_all = df_train.append(df_test)\ndf_all.index = df_all[\"Id\"]\ndf_all.drop(\"Id\", axis = 1, inplace = True)\n\n#ダミー変数化\ndf_all = pd.get_dummies(df_all, drop_first=True)\n\n#df_allを訓練データとテストデータに再度分ける\ndf_train = df_all[df_all[\"TrainFlag\"] == True]\ndf_train = df_train.drop([\"TrainFlag\"], axis = 1)\n\ndf_test = df_all[df_all[\"TrainFlag\"] == False]\ndf_test = df_test.drop([\"TrainFlag\"], axis = 1)\ndf_test = df_test.drop([\"SalePrice\"], axis = 1)\n\n#データ分割\ny = df_train[\"SalePrice\"].values\nX = df_train.drop(\"SalePrice\", axis=1).values\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1234)\n\n#LGBMのデータ作成\nlgb_train = lgb.Dataset(X_train, y_train)\nlgb_eval = lgb.Dataset(X_test, y_test)\n\n#パラメータ設定\nparams = {\n # 回帰問題\n 'random_state':1234, 'verbose':0,\n # 学習用の指標 (RMSE)\n 'metrics': 'rmse',\n }\nnum_round = 100\n\n#モデル訓練\nmodel = lgb.train(params, lgb_train, num_boost_round = num_round)\n\n#モデルを保存\nwith open('lgb_model.pickle', mode='wb') as fp:\n pickle.dump(model, fp)\n" ]
[ [ "pandas.read_csv", "sklearn.model_selection.train_test_split", "pandas.get_dummies" ] ]
Aletechdev/mutil
[ "30339f409723a2288c0e3575d466529555961305" ]
[ "test_gene.py" ]
[ "import pandas as pd\nfrom gene import get_gene_bnum\n\ndf = pd.DataFrame.from_dict(\n {'OBJECT_ID': ['ECK120000001'],\n 'OBJECT_SYNONYM_NAME': ['b4053'],\n 'OS_INTERNAL_COMMENT': [None],\n 'KEY_ID_ORG': ['ECK12']}, orient=\"columns\")\n\nassert(get_gene_bnum(\"ECK120000001\", df) == \"b4053\")\n\nprint(\"DONE\")" ]
[ [ "pandas.DataFrame.from_dict" ] ]
amolmore3171/flaskoythonheatmapapp
[ "f80e62d8bed0e352b8a6bd5241839f0695573752" ]
[ "GDO_events.py" ]
[ "from __future__ import print_function\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\nimport pandas as pd\nimport io\nfrom flask import Flask, make_response\nfrom matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas\n\n\napp = Flask(__name__)\n\n\[email protected]('/plot.png')\ndef home_page():\n chicagoland_agg_datafile='data/Chicagoland_agg.csv'\n Chicagoland_agg = pd.read_csv(chicagoland_agg_datafile, names=['county', 'date_str', 'num_events_by_county', 'num_devices_by_county','avg_events_by_county'], skiprows=1)\n\n\n fig, ax = plt.subplots(figsize=(22,8))\n for c in list(Chicagoland_agg.county.unique()):\n ax.plot(Chicagoland_agg[Chicagoland_agg['county']==c]['avg_events_by_county'], marker='.', linestyle='-', linewidth=0.5, label=c)\n\n ax.set_ylabel('Avg. # Events')\n ax.set_title('GDO Open and Close Events: Chicagoland, Jan - Apr 2020')\n plt.legend()\n #-------------------------------------------\n # Set x-axis major ticks to weekly interval, on Mondays\n ax.xaxis.set_major_locator(mdates.WeekdayLocator(byweekday=mdates.MONDAY))\n # Format x-tick labels as 3-letter month name and day number\n ax.xaxis.set_major_formatter(mdates.DateFormatter('%b %d'))\n #--------------------------------------------\n\n ax.set_ylim((3,14))\n ax.set_yticks(range(3,14))\n\n ax.grid('x', ls=':')\n\n\n plt.axvline(\"2020-03-16\", color=\"blue\", linestyle='--', linewidth=0.5)\n plt.axvline(\"2020-03-21\", color=\"red\", linestyle='--', linewidth=0.5)\n\n\n canvas = FigureCanvas(fig)\n output = io.BytesIO()\n canvas.print_png(output)\n response = make_response(output.getvalue())\n response.mimetype = 'image/png'\n return response\n\n\nif __name__ == '__main__':\n print(__doc__)\n app.run()\n" ]
[ [ "matplotlib.pyplot.axvline", "matplotlib.pyplot.legend", "pandas.read_csv", "matplotlib.dates.DateFormatter", "matplotlib.pyplot.subplots", "matplotlib.dates.WeekdayLocator", "matplotlib.backends.backend_agg.FigureCanvasAgg" ] ]
dekelmeirom/pathologylab
[ "262b0bd9cb9233bc960671c2d674cf895b228f39" ]
[ "algo/PDL1Net/cell_count.py" ]
[ "import cv2\nimport numpy as np\nimport copy\nimport algo.mrcnn.visualize_pdl1 as vis_pdl1\n\nclass_names = {\"INFLAMMATION\": 1, \"NEGATIVE\": 2, \"POSITIVE\": 3, \"OTHER\": 4}\n\n\ndef gamma_correction(img, gammas):\n \"\"\"\n apply gamma correction on the given image.\n allow different gamma for each color channel\n :param img: image in BGR color format\n :param gammas: array of gamma to use for each channel (in RGB order)\n :return: corrected image\n \"\"\"\n # assume the format of the image is BGR, but the gammas are in RGB\n img[:, :, 0] = (((img[:, :, 0] / 255) ** gammas[2]) * 255)\n img[:, :, 1] = (((img[:, :, 1] / 255) ** gammas[1]) * 255)\n img[:, :, 2] = (((img[:, :, 2] / 255) ** gammas[0]) * 255)\n return img\n\n\ndef hue_nuclei_masking(img, min_hue, max_hue):\n \"\"\"\n mask the image's nuclei by hue limits\n :param img: the image to apply thr masking to\n :param min_hue: the minimum hue to consider as nuclei\n :param max_hue: the maximum hue to consider as nuclei\n :return: mask of the filtered image\n \"\"\"\n hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n hsv_img = cv2.cvtColor(hsv_img, cv2.COLOR_RGB2HSV)\n\n hue_mask = np.logical_and(min_hue < hsv_img[:, :, 0], hsv_img[:, :, 0] < max_hue)\n return hue_mask\n\n\ndef morphological_correction(mask, kernel_size=4):\n # create the negative of the mask\n negative_mask = np.ones(mask.shape)\n negative_mask = negative_mask - mask\n # close operation\n kernel_close = np.ones((kernel_size, kernel_size), np.uint8)\n negative_mask = cv2.morphologyEx(negative_mask.astype('uint8'), cv2.MORPH_CLOSE, kernel_close)\n # return to the non-negative mask\n mask = np.ones(negative_mask.shape)\n mask = mask - negative_mask\n return mask\n\n\ndef morphological_correction_big(mask, kernel_size=5):\n # close operation\n kernel_close = np.ones((kernel_size, kernel_size), np.uint8)\n mask = cv2.morphologyEx(mask.astype('uint8'), cv2.MORPH_CLOSE, kernel_close)\n\n return mask\n\n\ndef find_contours(mask):\n # uncomment this line and comment the line after if using later version of openCV\n #_, cnts, _ = cv2.findContours(mask.astype('uint8'), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n cnts, _ = cv2.findContours(mask.astype('uint8'), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n single_pixel = []\n for cnt in cnts:\n if cnt.shape[0] <= 1:\n single_pixel.append(cnt)\n return cnts, single_pixel\n\n\ndef count_nucleus(img, img_class, img_masks, img_class_ids):\n working_img = copy.deepcopy(img)\n gammas = []\n if img_class == class_names[\"POSITIVE\"]:\n gammas = [2, 1.6, 1.3] # RGB\n mask = vis_pdl1.get_class_mask(3, img_masks, img_class_ids)\n elif img_class == class_names[\"NEGATIVE\"]:\n gammas = [2.2, 1.6, 1.3] # RGB\n mask = vis_pdl1.get_class_mask(2, img_masks, img_class_ids)\n else:\n mask = np.zeros((1024, 1024))\n # create the 3d mask\n temp_mask = np.broadcast_to(mask, (3, mask.shape[0], mask.shape[1]))\n mask_3d = np.zeros((mask.shape[0], mask.shape[1], 3))\n for i in range(3):\n mask_3d[:, :, i] = temp_mask[i, :, :]\n # apply the mask\n working_img = working_img * mask_3d\n working_img = working_img.astype('uint8')\n\n working_img = gamma_correction(working_img, gammas)\n\n if img_class == class_names[\"POSITIVE\"]:\n hue_min = 70\n hue_max = 175\n elif img_class == class_names[\"NEGATIVE\"]:\n hue_min = 50\n hue_max = 175\n else:\n hue_min = 70\n hue_max = 175\n mask = hue_nuclei_masking(working_img, hue_min, hue_max)\n\n if img_class == class_names[\"POSITIVE\"]:\n kernel_size = 4\n elif img_class == class_names[\"NEGATIVE\"]:\n kernel_size = 4\n mask_after_morph = morphological_correction(mask, kernel_size)\n cnts, single_pixel = find_contours(mask_after_morph)\n if len(cnts) > 40: # on high number of cells - do not use morphological operation\n if img_class == class_names[\"POSITIVE\"]:\n kernel_size = 5\n elif img_class == class_names[\"NEGATIVE\"]:\n kernel_size = 5\n mask_after_morph = morphological_correction_big(mask, kernel_size)\n cnts, single_pixel = find_contours(mask_after_morph)\n return len(cnts) - len(single_pixel), mask_after_morph\n" ]
[ [ "numpy.broadcast_to", "numpy.ones", "numpy.logical_and", "numpy.zeros" ] ]
eulerlab/QDSpy
[ "29f0fd118cca7b86925f3a0187f64f0f2560aedc" ]
[ "QDSpy_core_presenter.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nQDSpy module - interprets and presents compiled stimuli\n\n'Presenter' \n Presents a compiled stimulus. \n This class is a graphics API independent.\n \nCopyright (c) 2013-2016 Thomas Euler\nDistributed under the terms of the GNU General Public License (GPL)\n\"\"\"\n# ---------------------------------------------------------------------\n__author__ \t= \"[email protected]\"\n\nimport numpy as np\nimport QDSpy_global as glo\nimport QDSpy_stim as stm\nimport QDSpy_stim_movie as mov\nimport QDSpy_stim_video as vid\nimport QDSpy_stim_draw as drw\nimport QDSpy_stim_support as ssp\nimport QDSpy_core_support as csp\nimport QDSpy_core_shader as csh\nimport QDSpy_config as cfg\nimport QDSpy_multiprocessing as mpr\nimport Devices.digital_io as dio\n\nglobal Clock\nClock = csp.defaultClock\n\n# ---------------------------------------------------------------------\n# Adjust global parameters depending on command line arguments\n#\nargs = cfg.getParsedArgv()\n\nglobal QDSpy_verbose\nQDSpy_verbose = args.verbose\nif QDSpy_verbose:\n import pylab\n \n# =====================================================================\n#\n# ---------------------------------------------------------------------\nclass Presenter:\n \"\"\" Presenter class\n \"\"\"\n def __init__(self, _Stage, _IO, _Conf, _View, _View2=None, _LCr=[]):\n # Initializing\n #\n self.Stage = _Stage\n self.IO = _IO\n self.Conf = _Conf\n self.View = _View\n self.LCr = _LCr\n self.ShManager = csh.ShaderManager(self.Conf)\n self.reset()\n\n self.dtFr_meas_s = self.Stage.dtFr_s\n self.dtFr_thres_s = self.dtFr_meas_s +self.Conf.maxDtTr_ms /1000.0\n\n # Prepare recording of stimulus presentation, if requested\n # \n if self.Conf.recordStim:\n self.View.prepareGrabStim()\n \n # Define event handler(s)\n #\n self.View.setOnKeyboardHandler(self.onKeyboard)\n self.View.setOnDrawHandler(self.onDraw)\n\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n def reset(self):\n # Reset presenter\n #\n self.Stim = None\n self.isReady = False\n self.nFr = 0\n self.is1FrOfSce = False\n self.iSc = 0\n self.isNextSce = True\n self.isFirstSce = True\n\n self.isEnd = False\n self.isIdle = True\n self.isUserAbort = False\n\n self.isRunFromGUI = False\n self.Sync = None\n \n self.IO_portOut = dio.devConst.NONE\n self.IO_maskMark = 0\n self.IO_isMarkSet = False\n\n self.nFrTotal = 0\n self.tFrRel_s = 0.0\n self.tFrRelOff_s = 0.0\n self.avFrDur_s = 0.0\n self.nRendTotal = 0\n self.avPresDur_s = 0.0\n self.avRendDur_s = 0.0\n self.rendDur_s = 0.0\n self.tFr = 0.0\n self.tStart = 0.0\n\n if glo.QDSpy_frRateStatsBufferLen > 0:\n self.dataDtFr = np.zeros(glo.QDSpy_frRateStatsBufferLen, dtype=float)\n else:\n self.dataDtFr = []\n self.dataDtFrLen = 0\n self.dataDtFrOver = False\n self.nDroppedFr = 0\n\n self.isInLoop = False\n self.nLoopRepeats = 0\n self.iFirstLoopSc = -1\n\n self.vertTr = np.array([], dtype=np.int) # temporary vertex arrays\n self.iVertTr = np.array([], dtype=np.int) # temporary index arrays\n self.vRGBTr = np.array([], dtype=np.uint8) # temporary RGBA arrays\n self.vRGBTr2 = np.array([], dtype=np.uint8) \n\n self.currShObjIDs = [] # list, IDs of current shader-enabled objects\n self.prevShObjIDs = [] # list, IDs of previously shown shader-enabled\n # objects\n self.prevObjIDs = [] # list, previously shown objects (all)\n self.ShProgList = [] # list, available shader programs\n # (ready to bind)\n self.MovieList = [] # list, movie class objects\n self.MovieCtrlList= [] # list, movie control class objects\n self.VideoList = [] # list, video class objects\n self.VideoCtrlList= [] # list, video control class objects\n \n self.markerVert, self.antiMarkerVert = drw.marker2vert(self.Stage, self.Conf)\n\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n def onKeyboard(self, _key, _x, _y):\n if not(self.isRunFromGUI) and _key in glo.QDSpy_KEY_KillPresent:\n self.isUserAbort = True\n self.stop()\n\n # --------------------------------------------------------------------\n # Scene rendering function\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n def renderSce(self, _iSc, _nSc):\n # Renders the indexed scene\n #\n if self.Conf.isTrackTime:\n t0 = Clock.getTime_s()\n sc = self.Stim.SceList[_iSc]\n drawn = True\n\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n if sc[stm.SC_field_type] == stm.StimSceType.beginLoop:\n # Begin of a loop\n #\n self.isInLoop = True\n self.nLoopRepeats = sc[stm.SC_field_nLoopTrials] -1\n self.iFirstLoopSc = _iSc +1\n\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n elif sc[stm.SC_field_type] == stm.StimSceType.endLoop:\n # End of a loop\n #\n if (not(self.isInLoop) or (self.nLoopRepeats < 0)):\n pass\n if self.nLoopRepeats > 0:\n self.iSc = self.iFirstLoopSc -1\n self.nLoopRepeats -= 1\n else:\n self.isInLoop = False\n\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n elif sc[stm.SC_field_type] == stm.StimSceType.clearSce:\n # Clear scene\n #\n self.View.clear()\n if self.Stage.useScrOvl:\n drawn = False\n\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n elif sc[stm.SC_field_type] == stm.StimSceType.changeBkgCol:\n # Change background color\n #\n self.View.clear(sc[stm.SC_field_BkgRGB])\n \n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n elif sc[stm.SC_field_type] == stm.StimSceType.sendCommandToLCr:\n # Change LED currents\n #\n _params = sc[stm.SC_field_LCrParams]\n if ((_params[0] == stm.StimLCrCmd.setLEDCurrents) and\n (_params[1] >= 0) and (_params[1] < len(self.LCr))):\n self.LCr[_params[1]].setLEDCurrents(_params[2])\n \n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n elif sc[stm.SC_field_type] == stm.StimSceType.logUserParams:\n # Write user parameters to the log file\n #\n _userParams = sc[stm.SC_field_userParams]\n _userParams.update(stimFileName=self.Stim.fileName)\n # **************************************\n # **************************************\n # TODO: Copy also external stimulus files (containing large \n # lists or data structures) to the log directory\n # **************************************\n # **************************************\n ssp.Log.write(\"DATA\", _userParams.__str__())\n\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n elif sc[stm.SC_field_type] == stm.StimSceType.changeShParams:\n # Change shader parameters\n #\n _ShID = sc[stm.SC_field_ShID][0]\n _iSh = self.Stim.ShDict[_ShID]\n self.Stim.ShList[_iSh][stm.SH_field_Params] = sc[stm.SC_field_ShParams]\n\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n elif sc[stm.SC_field_type] == stm.StimSceType.changeObjShader:\n # Change object shader\n #\n for i in range(len(sc[stm.SC_field_IDs])):\n _ObID = sc[stm.SC_field_IDs][i]\n _iObj = self.Stim.ObjDict[_ObID]\n _ShID = sc[stm.SC_field_ShIDs][i]\n if _ShID < 0:\n _iSh = -1\n else:\n _iSh = self.Stim.ShDict[_ShID]\n self.Stim.ObjList[_iObj][stm.SO_field_shProgIndex] = _iSh\n \"\"\"\n SO_field_shProgIndex\n\n newSce = [StimSceType.changeObjShader, -1, self.nSce, False,\n _IDs, _shIDs]\n SC_field_IDs = 4\n SC_field_ShIDs = 5\n\n #self.currIVShObjGr[0] = shd.ShaderBindGroup(shd.getStandardShader(), 0)\n #self.currIVShObjGr[2] = shd.ShaderBindGroup(shd.getStandardShader(), 2)\n\n newShader = [StimObjType.shader, _shID,\n _shType, csh.SH_defaultParams[_shType], _shCode]\n self.ShDict[_shID] = len(self.ShList)\n self.ShList.append(newShader)\n self.ShProgList\n\n SH_field_type = 0\n SH_field_ID = 1\n SH_field_shaderType = 2\n SH_field_Params = 3\n SH_field_shaderCode = 4\n\n newBox = [StimObjType.box, _ID,\n (float(_dx_um), float(_dy_um)),\n\t \t\t SO_defaultFgRGB, SO_defaultAlpha, SO_default_RGBAByVert,\n _enShader, -1]\n \"\"\"\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n elif sc[stm.SC_field_type] == stm.StimSceType.startMovie:\n # Start running movie ...\n #\n # Get movie object via movie ID\n #\n _MovID = sc[stm.SC_field_IDs][0]\n _iMov = self.Stim.MovDict[_MovID]\n movOb = self.MovieList[_iMov]\n\n # Create a new movie control object; this internally creates a\n # pyglet sprite, with the requested presentation properties\n #\n mCtOb = mov.MovieCtrl(sc[stm.SC_field_MovSeq], _MovID, _Movie=movOb)\n mCtOb.iScr = sc[stm.SC_field_MovScreen]\n mCtOb.setSpriteProperties(sc[stm.SC_field_posXY], \n sc[stm.SC_field_magXY],\n sc[stm.SC_field_rot], \n sc[stm.SC_field_MovTrans])\n\n # Add the move control object to the list of active movies; remove\n # previous one, if still present\n #\n iMC = 0\n while iMC < len(self.MovieCtrlList):\n if self.MovieCtrlList[iMC][3] == _MovID:\n temp = self.MovieCtrlList.pop(iMC)\n temp[0].kill()\n else:\n iMC += 1\n\n self.MovieCtrlList.append([mCtOb, _iSc, self.nFrTotal, _MovID])\n\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n elif sc[stm.SC_field_type] == stm.StimSceType.startVideo:\n # Start running video ...\n #\n # Get video object via video ID\n #\n _VidID = sc[stm.SC_field_IDs][0]\n _iVid = self.Stim.VidDict[_VidID]\n vidOb = self.VideoList[_iVid]\n\n # Create a new movie control object; this internally creates a\n # pyglet sprite, with the requested presentation properties\n #\n vCtOb = vid.VideoCtrl(_Video=vidOb)\n vCtOb.iScr = sc[stm.SC_field_VidScreen]\n vCtOb.setSpriteProperties(sc[stm.SC_field_posXY], \n sc[stm.SC_field_magXY],\n sc[stm.SC_field_rot], \n sc[stm.SC_field_VidTrans])\n\n # Add the video control object to the list of active videos; remove\n # previous one, if still present\n #\n iVC = 0\n while iVC < len(self.VideoCtrlList):\n if self.VideoCtrlList[iVC][3] == _VidID:\n temp = self.VideoCtrlList.pop(iVC)\n temp[0].kill()\n else:\n iVC += 1\n \n self.VideoCtrlList.append([vCtOb, _iSc, self.nFrTotal, _VidID])\n\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n elif sc[stm.SC_field_type] == stm.StimSceType.renderSce:\n # Render objects in scene\n #\n self.View.clear()\n \n if self.Stim.cScOList[_iSc][0] >= 0:\n \n if self.is1FrOfSce: # _________________________________________\n # First frame of a new scene: Get index of object drawing list of\n # this scene and then get vertex data (or reuse previous data, if\n # nothing has changed)\n #\n iODr, ObjNewMask, ObjIDs, ObjPosXY, ObjRot = self.Stim.cScOList[_iSc]\n nObjs = len(self.Stim.cODr_tr_iVert[iODr])\n ObjNewMask = np.array(ObjNewMask)\n\n # Generate pyglet Groups to bind shaders to objects, if required\n #\n self.Batch.delete_shader_handles()\n\n for iObj, ObjID in enumerate(ObjIDs):\n if ObjID < 0:\n continue\n iObjList = self.Stim.ObjDict[ObjID]\n iSh = self.Stim.ObjList[iObjList][stm.SO_field_shProgIndex]\n if iSh >= 0:\n # Create Group object referencing to requested shader and set\n # shader parameters (uniforms)\n #\n shPar = self.Stim.ShList[iSh][stm.SH_field_Params]\n shType = self.Stim.ShList[iSh][stm.SH_field_shaderType]\n self.Batch.add_shader_handle(ObjID, self.ShProgList[iSh], shType)\n x = ObjPosXY[iObj][0] +self.Stage.dxScr\n y = ObjPosXY[iObj][1] +self.Stage.dyScr\n a_rad = (ObjRot[iObj]+90.0)*np.pi/180.0\n self.tFrRelOff_s = self.tFrRel_s\n self.Batch.set_shader_time(ObjID, 0.0)\n self.Batch.set_shader_parameters(ObjID, [x,y], a_rad, shPar)\n \n else:\n # No shader\n #\n self.Batch.add_shader_handle(ObjID)\n\n # Check if vertex list(s) need(s) to be updated or re-created\n #\n # self.Stim.cODr_tr_xxx[iODr][iObj][0] := SC_vertDataChanged or not\n # self.Stim.cODr_tr_xxx[iODr][iObj][1] := Object ID or -1\n # self.Stim.cODr_tr_xxx[iODr][iObj][2] := numpy array with data\n #\n # Kill vertex data of previous shader-enabled objects\n #\n self.currShObjIDs = []\n self.Batch.delete_shader_object_data()\n\n for iObj in range(nObjs):\n self.iVertTr = self.Stim.cODr_tr_iVert[iODr][iObj][2]\n self.vertTr = self.Stim.cODr_tr_vertCoord[iODr][iObj][2]\n self.vRGBATr = self.Stim.cODr_tr_vertRGBA[iODr][iObj][2]\n self.vRGBATr2 = self.Stim.cODr_tr_vertRGBA2[iODr][iObj][2]\n\n if iObj == 0:\n # Not shader-enabled objects ...\n #\n if ObjNewMask[iObj] == stm.SC_ObjNewAll:\n self.Batch.replace_object_data(self.iVertTr, self.vertTr, \n self.vRGBATr, self.vRGBATr2)\n else:\n if (ObjNewMask[iObj] & stm.SC_ObjNewiVer) > 0:\n self.Batch.replace_object_data_indices(self.iVertTr)\n if (ObjNewMask[iObj] & stm.SC_ObjNewVer) > 0:\n self.Batch.replace_object_data_vertices(self.vertTr)\n if (ObjNewMask[iObj] & stm.SC_ObjNewRGBA) > 0:\n self.Batch.replace_object_data_colors(self.vRGBATr, \n self.vRGBATr2)\n\n else:\n # For each shader-enabled object ...\n #\n self.currShObjIDs.append(ObjIDs[iObj])\n self.Batch.add_shader_object_data(ObjIDs[iObj], self.iVertTr, \n self.vertTr, self.vRGBATr,\n self.vRGBATr2)\n\n self.prevShObjIDs = self.currShObjIDs\n self.prevObjIDs = ObjIDs\n\n else: # ______________________________________________________\n # Not first frame of a new scene, just update the shader\n # parameters, if any, ...\n #\n self.Batch.set_shader_time_all(self.tFrRel_s -self.tFrRelOff_s)\n\n # Indicate that the batch needs to be drawn\n #\n drawn = False\n\n\n if (_nSc > 0) and (not drawn\\\n or (len(self.MovieCtrlList) > 0) or (len(self.VideoCtrlList) > 0)):\n # Keep movie control objects updated: Advance or kill, if finished\n #\n iMC = 0\n while iMC < len(self.MovieCtrlList):\n mCtOb, iScWhenStarted, iFrWhenStarted, ID = self.MovieCtrlList[iMC]\n if iScWhenStarted == _iSc:\n # Don't start playing the movie if we are still in the no-duration\n # scene that started the movie\n #\n '''\n ssp.Log.write(\"DEBUG\", \"Movie #{0} ID{1} ready, _iSc={2} iFr={3}\"\n .format(iMC, mCtOb.ID, _iSc, self.nFrTotal))\n '''\n iMC += 1\n continue\n\n res = mCtOb.getNextFrIndex()\n if res < 0:\n '''\n ssp.Log.write(\"DEBUG\", \"Movie #{0} ID{1} last, _iSc={1} nFr={2}\"\n .format(iMC, mCtOb.ID, _iSc, self.nFrTotal -iFrWhenStarted))\n '''\n mCtOb, iScWhenStarted, iFrWhenStarted, ID = self.MovieCtrlList.pop(iMC)\n mCtOb.kill()\n\n else:\n mCtOb.setSpriteBatch(self.Batch)\n iMC += 1\n \n # Keep video control objects updated: Advance or kill, if finished\n #\n iVC = 0\n while iVC < len(self.VideoCtrlList):\n vCtOb, iScWhenStarted, iFrWhenStarted, ID = self.VideoCtrlList[iVC]\n if iScWhenStarted == _iSc:\n # Don't start playing the video if we are still in the no-duration\n # scene that started the video\n #\n iVC += 1\n continue\n\n res = vCtOb.getNextFrIndex()\n if res < 0:\n vCtOb, iScWhenStarted, iFrWhenStarted, ID = self.VideoCtrlList.pop(iVC)\n vCtOb.kill()\n\n else:\n vCtOb.setSpriteBatch(self.Batch)\n iVC += 1\n \n # Draw current triangle vertices, acknowledging the scaling and\n # rotation of the current display (stage settings)\n #\n self.Batch.draw(self.Stage, self.View, \n sc[stm.SC_field_type] == stm.StimSceType.clearSce)\n \n # Show marker, if requested and present in the current scene\n #\n if self.Conf.markShowOnScr:\n if sc[stm.SC_field_marker]: \n self.Batch.add_rect_data(self.markerVert)\n else:\n self.Batch.add_rect_data(self.antiMarkerVert)\n \n # Track rendering timing, if requested\n #\n if self.Conf.isTrackTime:\n self.rendDur_s = Clock.getTime_s() -t0\n self.avRendDur_s += self.rendDur_s\n self.nRendTotal += 1\n\n\n # --------------------------------------------------------------------\n # Frame refresh handler\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n def onDraw(self):\n # Check if stimulus is defined\n #\n if self.Stim == None:\n self.View.clear()\n self.isEnd = True\n\n if self.isIdle:\n # Stimulus has already ended; nothing to do ...\n #\n ssp.Log.write(\"DEBUG\", \"Presenter.onDraw(), isIdle=True\")\n self.finish()\n return\n\n # If run from GUI (as server) ...\n #\n if self.isRunFromGUI:\n # Check if GUI requests abort ...\n #\n if self.Sync.Request.value in [mpr.CANCELING, mpr.TERMINATING]:\n self.Sync.setStateSafe(mpr.CANCELING)\n self.isUserAbort = True\n self.stop()\n \n if self.Sync.pipeSrv.poll():\n data = self.Sync.pipeSrv.recv()\n if data[0] == mpr.PipeValType.toSrv_changedStage:\n # Stage properties were adjusted by the user, reflect immediately\n # in the stimulus presentation\n #\n self.Stage.scalX_umPerPix = data[1][\"scalX_umPerPix\"]\n self.Stage.scalY_umPerPix = data[1][\"scalY_umPerPix\"]\n self.Stage.centOffX_pix = data[1][\"centOffX_pix\"]\n self.Stage.centOffY_pix = data[1][\"centOffY_pix\"]\n self.Stage.rot_angle = data[1][\"rot_angle\"]\n self.Stage.dxScr12 = data[1][\"dxScr12\"]\n self.Stage.dyScr12 = data[1][\"dyScr12\"]\n self.Stage.offXScr1_pix = data[1][\"offXScr1_pix\"]\n self.Stage.offYScr1_pix = data[1][\"offYScr1_pix\"]\n self.Stage.offXScr2_pix = data[1][\"offXScr2_pix\"]\n self.Stage.offYScr2_pix = data[1][\"offYScr2_pix\"]\n \n if data[0] == mpr.PipeValType.toSrv_changedLEDs:\n # User changed LED currents and/or toggled LEDs, notify \n # lightcrafter immediately\n #\n self.Stage.LEDs = data[1][0] \n self.Stage.isLEDSeqEnabled = data[1][1]\n self.Stage.sendLEDChangesToLCr(self.Conf)\n\n if data[0] == mpr.PipeValType.toSrv_setIODevPins:\n # User pressed a user button, change IO device pins accordingly\n # \n csp.setIODevicePin(self.IO, data[1][0], data[1][1], data[1][2])\n\n # Render scene\n #\n while self.isNextSce and not self.isEnd:\n # Load next scene ...\n #\n if not self.isFirstSce:\n # Increase scene index and check for end of stimulus ...\n #\n self.iSc += 1\n self.isEnd = self.iSc >= len(self.Stim.SceList)\n if self.isEnd:\n break\n\n else:\n #\t... except it is the first scene\n #\n self.isFirstSce = False\n self.tStart = Clock.getTime_s()\n\n self.nFr = self.Stim.cScDurList[self.iSc]\n self.is1FrOfSce = True\n if self.nFr <= 0:\n # Scene w/o duration, handle immediately\n #\n self.renderSce(self.iSc, self.nFr)\n self.isNextSce = True\n\n else:\n # Scene has a duration, handle it below\n #\n self.isNextSce = False\n\n if self.isEnd:\n # No more scenes to display or aborted by used,\n # in any case, end presentation\n #\n isDone = (self.iSc >= len(self.Stim.SceList))\n ssp.Log.write(\"ok\", \"Done\" if isDone else \"Aborted by user\")\n ssp.Log.write(\"DATA\", {\"stimFileName\": self.Stim.fileName, \n \"stimState\": \"FINISHED\" if isDone else \"ABORTED\"}\n .__str__())\n \n self.Stim = None\n self.isIdle = True\n return\n\n if self.nFr > 0:\n # Scene has a duration, handle it ...\n #\n self.renderSce(self.iSc, self.nFr)\n self.nFr -= 1\n self.is1FrOfSce = False\n self.isNextSce = (self.nFr == 0)\n\n # Determine if marker should be shown ...\n # ************\n # TODO: first read port to be able to set/clear only the needed pin\n # ************\n isMaskChanged = False\n if self.IO is not None:\n if self.Stim.cScMarkList[self.iSc] > 0:\n # ...\n maskMark = self.IO_maskMark\n isMaskChanged = True\n self.IO_isMarkSet = True\n else:\n if self.IO_isMarkSet:\n # ...\n maskMark = 0\n isMaskChanged = True \n self.IO_isMarkSet = False\n\n # Flip display buffer ...\n #\n t1 = Clock.getTime_s()\n self.View.present()\n self.avPresDur_s += Clock.getTime_s() -t1\n \n # ****************************\n # ****************************\n # ****************************\n # ****************************\n # ****************************\n '''\n if self.isRunFromGUI and not(self.View.Renderer.pil_img_data is None):\n print(\"******************\", len(self.View.Renderer.pil_img_data))\n #self.Sync.Frame.value = self.View.Renderer.pil_img_data\n ''' \n # **************************** \n # ****************************\n # ****************************\n # ****************************\n \n # Send marker signal, if needed\n #\n if isMaskChanged:\n self.IO.writeDPort(self.IO_portOut, maskMark)\n\n # Record stimulus presentation, if requested\n #\n if self.Conf.recordStim:\n self.View.grabStimFrame()\n \n # Keep track of refresh duration\n #\n if self.Conf.isTrackTime:\n if self.nFrTotal == 0:\n self.tFr = Clock.getTime_s()\n else:\n t0 = Clock.getTime_s()\n dt = t0 -self.tFr\n self.avFrDur_s += dt\n self.tFr = t0\n self.dataDtFr[self.dataDtFrLen] = dt\n self.dataDtFrLen += 1\n if self.dataDtFrLen >= self.dataDtFr.size:\n self.dataDtFrOver = True\n self.dataDtFrLen = 0\n \"\"\"\n if (self.Conf.isWarnFrDrop and\n (abs(dt -self.dtFr_meas_s) > self.Conf.maxDtTr_ms/1000.0)):\n \"\"\" \n if self.Conf.isWarnFrDrop and (dt > self.dtFr_thres_s):\n self.nDroppedFr += 1\n ssp.Log.write(\"WARNING\", \"dt of frame #{0} was {1:.3f} ms\"\n .format(self.nFrTotal, dt *1000.0))\n\n self.nFrTotal += 1\n self.tFrRel_s = self.nFrTotal*self.Stage.dtFr_s\n\n\n # --------------------------------------------------------------------\n def run(self):\n # Runs the OpenGL/pyglet event loop, thereby any loaded stimulus\n #\n if not self.isReady:\n return\n \n if self.Stim is None:\n ssp.Log.write(\"ok\", \"Ready.\")\n return\n\n # Start stimulus ...\n #\n self.isEnd = False\n ssp.Log.write(\"ok\", \"Running...\")\n ssp.Log.write(\"DATA\", {\"stimFileName\": self.Stim.fileName, \n \"stimState\": \"STARTED\",\n \"stimMD5\": self.Stim.md5Str}.__str__()) \n self.Stage.logData()\n self.View.startRenderingLoop(self) \n\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n def stop(self):\n # Signals event loop to stop\n #\n self.isEnd = True\n ssp.Log.write(\"DEBUG\", \"Presenter.stop()\")\n\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n def finish(self):\n # Finish presentation\n #\n if not self.isReady:\n return\n \n if self.isUserAbort:\n # Clear screen\n #\n self.View.clear()\n self.View.present()\n ssp.Log.write(\"ABORT\", \"User aborted program\")\n \n else:\n ssp.Log.write(\"ok\", \"Program finished\")\n\n # Log timing information\n #\n if self.Conf.isTrackTime:\n self.avRendDur_s = self.avRendDur_s /self.nRendTotal\n self.avPresDur_s = self.avPresDur_s /self.nRendTotal\n self.avFrDur_s = self.avFrDur_s /self.nFrTotal\n ssp.Log.write(\"INFO\", \"{0:.3f} ms/frame ({1:.3f} Hz), rendering: \"\n \"{2:.3f} ms/frame ({3} frames in total)\"\n .format(self.avFrDur_s*1000.0, 1/self.avFrDur_s,\n self.avRendDur_s*1000.0, self.nFrTotal))\n ssp.Log.write(\"INFO\", \"presenting: {0:.3f} ms/frame\"\n .format(self.avPresDur_s*1000.0))\n\n if glo.QDSpy_frRateStatsBufferLen > 0:\n if not self.dataDtFrOver:\n data = self.dataDtFr[:self.dataDtFrLen]\n else:\n data = self.dataDtFr\n else:\n data = np.array(self.dataDtFr)\n av = data.mean() *1000.0\n std = data.std() *1000.0\n ssp.Log.write(\"INFO\", \"{0:.3f} +/- {1:.3f} ms/frame (over the last {2}\"\n \" frames) = {3:.3} Hz\"\n .format(av, std, len(data), 1000.0/av))\n if self.nDroppedFr > 0:\n pcDrFr = 100.0*self.nDroppedFr/self.nFrTotal\n ssp.Log.write(\"WARNING\", \"{0} frames dropped (={1:.3f} %)\"\n .format(self.nDroppedFr, pcDrFr))\n\n ssp.Log.write(\"DATA\", {\"avgFreq_Hz\": 1/self.avFrDur_s, \n \"nFrames\": self.nFrTotal,\n \"nDroppedFrames\": self.nDroppedFr}\n .__str__())\n\n if QDSpy_verbose:\n # Generate a plot ...\n #\n pylab.title(\"Timing\")\n pylab.subplot(2,1,1)\n pylab.plot(list(range(len(data))), data*1000, \"-\")\n xArr = [0, len(data)-1]\n dtFr_ms = self.dtFr_meas_s*1000\n yMin = dtFr_ms +self.Conf.maxDtTr_ms\n yMax = dtFr_ms -self.Conf.maxDtTr_ms\n pylab.plot(xArr, [yMin, yMin], \"k--\")\n pylab.plot(xArr, [yMax, yMax], \"k--\")\n pylab.ylabel(\"frame duration [ms]\")\n pylab.xlabel(\"frame #\")\n pylab.xlim([10,len(data)])\n pylab.ylim([dtFr_ms-10,dtFr_ms+10])\n pylab.subplot(2,1,2)\n n, bins, pat = pylab.hist(data*1000, 100, histtype=\"bar\", rwidth=0.9)\n pylab.setp(pat, 'facecolor', 'b', 'alpha', 0.75)\n #pylab.xlim([dtFr_ms-5,dtFr_ms+5])\n pylab.ylabel(\"# frames\")\n pylab.xlabel(\"frame duration [ms]\")\n pylab.tight_layout()\n pylab.show()\n\n\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n def prepare(self, _Stim, _Sync=None):\n # Prepare a stimulus, to be started with the run() function\n #\n self.reset()\n self.Stim = _Stim\n self.isReady = True\n\n if _Sync is not None:\n self.isRunFromGUI = True\n self.Sync = _Sync\n\n if self.Stim is None:\n self.isReady = False\n \n else: \n # Setup digital I/O, if used\n # \n if self.IO is not None:\n self.IO_portOut = self.IO.getPortFromStr(self.Conf.DIOportOut)\n self.IO_maskMark = 0x01 << self.Conf.DIOpinMarker\n \n # Load and generate shader(s), if any\n #\n self.ShProgList = []\n if not glo.QDSpy_loadShadersOnce:\n self.ShManager = csh.ShaderManager(self.Conf)\n \n if len(self.Stim.ShList) > 0:\n for iSh, Sh in enumerate(self.Stim.ShList):\n shType = Sh[stm.SH_field_shaderType]\n if shType in self.ShManager.getShaderTypes():\n # Create shader program\n #\n shader = self.ShManager.createShader(shType)\n if shader is not None:\n self.ShProgList.append(shader)\n else: \n self.isReady = False\n ssp.Log.write(\"ERROR\", \"Stimulus '{0}' uses shader '{1}' that \"\n \"could not be compiled\"\n .format(_Stim.nameStr, shType))\n else:\n # A shaders that is not in the shader folder is required\n #\n self.isReady = False \n ssp.Log.write(\"ERROR\", \"Stimulus '{0}' uses shader '{1}' that \"\n \"cannot be found\".format(_Stim.nameStr, shType))\n\n # Load movie files, if any\n #\n self.MovieList = []\n if len(self.Stim.MovList) > 0:\n for Mov in self.Stim.MovList:\n movOb = mov.Movie(self.Conf)\n res = movOb.load(self.Conf.pathStim +Mov[stm.SM_field_movieFName])\n if res == stm.StimErrC.ok:\n # Add movie class object to list\n #\n self.MovieList.append(movOb)\n\n else:\n # The movie file(s) could not be loaded\n #\n self.isReady = False\n ssp.Log.write(\"ERROR\", \"Stimulus '{0}' uses movie '{1}' that \"\n \"cannot be found\".format(\n _Stim.nameStr,Mov[stm.SM_field_movieFName]))\n\n # Load videos, if any\n #\n self.VideoList = []\n if len(self.Stim.VidList) > 0:\n for Vid in self.Stim.VidList:\n vidOb = vid.Video(self.Conf)\n res = vidOb.load(self.Conf.pathStim +Vid[stm.SV_field_videoFName])\n if res == stm.StimErrC.ok:\n # Add video class object to list\n #\n self.VideoList.append(vidOb)\n\n else:\n # The video file(s) could not be loaded\n #\n self.isReady = False\n ssp.Log.write(\"ERROR\", \"Stimulus '{0}' uses video '{1}' that \"\n \"cannot be found\".format(\n _Stim.nameStr, Vid[stm.SV_field_videoFName]))\n \n # Create batch object for rendering objects\n #\n self.Batch = self.View.createBatch(_isScrOvl=self.Stage.useScrOvl)\n self.Batch.set_shader_manager(self.ShManager)\n \n if self.isReady:\n ssp.Log.write(\"ok\", \"Stimulus '{0}' prepared\".format(_Stim.nameStr))\n\n# ---------------------------------------------------------------------\n" ]
[ [ "numpy.array", "numpy.zeros" ] ]
yihui-lai/coffea
[ "351cc727845ab83a8e31a193dc06e534bedb97fe" ]
[ "coffea/jetmet_tools/CorrectedMETFactory.py" ]
[ "from coffea.jetmet_tools.JECStack import JECStack\nimport awkward\nimport numpy\nimport warnings\nfrom copy import copy\n\n\nclass CorrectedMETFactory(object):\n\n def __init__(self, name_map):\n if 'xMETRaw' not in name_map or name_map['xMETRaw'] is None:\n warnings.warn('There is no name mapping for ptRaw,'\n ' CorrectedJets will assume that <object>.x is raw pt!')\n name_map['xMETRaw'] = name_map['METx'] + '_raw'\n self.treat_pt_as_raw = 'ptRaw' not in name_map\n\n if 'yMETRaw' not in name_map or name_map['yMETRaw'] is None:\n warnings.warn('There is no name mapping for massRaw,'\n ' CorrectedJets will assume that <object>.x is raw pt!')\n name_map['yMETRaw'] = name_map['METy'] + '_raw'\n\n self.name_map = name_map\n\n def build(self, MET, corrected_jets, lazy_cache):\n if lazy_cache is None:\n raise Exception('CorrectedMETFactory requires a awkward-array cache to function correctly.')\n if (not isinstance(MET, awkward.highlevel.Array) or\n not isinstance(corrected_jets, awkward.highlevel.Array)):\n raise Exception(\"'MET' and 'corrected_jets' must be an awkward array of some kind!\")\n\n out = copy(MET)\n\n form = out[self.name_map['METpt']].layout.form\n length = len(out)\n\n orig_jets = copy(corrected_jets)\n orig_jets[self.name_map['JetPt']] = orig_jets[self.name_map['ptRaw']]\n orig_jets[self.name_map['JetMass']] = orig_jets[self.name_map['massRaw']]\n\n out['x_orig'] = getattr(out, self.name_map['METx'])\n out['y_orig'] = getattr(out, self.name_map['METy'])\n\n out[self.name_map['METpt'] + '_orig'] = out[self.name_map['METpt']]\n out[self.name_map['METphi'] + '_orig'] = out[self.name_map['METphi']]\n\n def corrected_met_cartesian(met, rawJets, corrJets, dim):\n return met[f'{dim}_orig'] - awkward.sum(getattr(rawJets, dim) - getattr(corrJets, dim), axis=-1)\n\n def corrected_met_cartesian_unc(met, rawJets, corrJets, dimMET, dimJets):\n return getattr(met, dimMET) - awkward.sum(getattr(rawJets, dimJets) - getattr(corrJets, dimJets), axis=-1)\n\n out['corrected_met_x'] = awkward.virtual(\n corrected_met_cartesian,\n args=(out, orig_jets, corrected_jets, self.name_map['JETx']),\n length=length, form=form, cache=lazy_cache\n )\n out['corrected_met_y'] = awkward.virtual(\n corrected_met_cartesian,\n args=(out, orig_jets, corrected_jets, self.name_map['JETy']),\n length=length, form=form, cache=lazy_cache\n )\n\n out[self.name_map['METpt']] = awkward.virtual(\n lambda met: numpy.hypot(met['corrected_met_x'], met['corrected_met_y']),\n args=(out, ),\n length=length,\n form=form,\n cache=lazy_cache\n )\n out[self.name_map['METphi']] = awkward.virtual(\n lambda met: numpy.arctan2(met['corrected_met_y'], met['corrected_met_x']),\n args=(out, ),\n length=length,\n form=form,\n cache=lazy_cache\n )\n\n def make_unclustered_variant(themet, op, deltaX, deltaY):\n variant = copy(themet)\n variant['corrected_met_x'] = awkward.virtual(\n lambda met: op(out['corrected_met_x'], out[f'{deltaX}']),\n args=(out, ),\n length=length,\n form=form,\n cache=lazy_cache\n )\n variant['corrected_met_y'] = awkward.virtual(\n lambda met: op(out['corrected_met_y'], out[f'{deltaY}']),\n args=(out, ),\n length=length,\n form=form,\n cache=lazy_cache\n )\n variant[self.name_map['METpt']] = awkward.virtual(\n lambda met: numpy.hypot(out['corrected_met_x'], out['corrected_met_y']),\n args=(variant, ),\n length=length,\n form=form,\n cache=lazy_cache\n )\n variant[self.name_map['METphi']] = awkward.virtual(\n lambda met: numpy.arctan2(out['corrected_met_y'], out['corrected_met_x']),\n args=(variant, ),\n length=length,\n form=form,\n cache=lazy_cache\n )\n return variant\n\n unclus_up = make_unclustered_variant(MET, lambda x, y: x + y,\n self.name_map['UnClusteredEnergyDeltaX'],\n self.name_map['UnClusteredEnergyDeltaY'])\n unclus_down = make_unclustered_variant(MET, lambda x, y: x - y,\n self.name_map['UnClusteredEnergyDeltaX'],\n self.name_map['UnClusteredEnergyDeltaY'])\n out['MET_UnclusteredEnergy'] = awkward.zip({'up': unclus_up, 'down': unclus_down},\n depth_limit=1,\n with_name='METSystematic')\n\n def make_variant(name, variation):\n variant = copy(MET)\n variant['corrected_met_x'] = awkward.virtual(\n corrected_met_cartesian_unc,\n args=(out, orig_jets, variation, self.name_map['METx'], self.name_map['JETx']),\n length=length, form=form, cache=lazy_cache\n )\n variant['corrected_met_y'] = awkward.virtual(\n corrected_met_cartesian_unc,\n args=(out, orig_jets, variation, self.name_map['METy'], self.name_map['JETy']),\n length=length, form=form, cache=lazy_cache\n )\n variant[self.name_map['METpt']] = awkward.virtual(\n lambda met: numpy.hypot(met['corrected_met_x'], met['corrected_met_y']),\n args=(variant, ),\n length=length,\n form=form,\n cache=lazy_cache\n )\n variant[self.name_map['METphi']] = awkward.virtual(\n lambda met: numpy.arctan2(met['corrected_met_y'], met['corrected_met_x']),\n args=(variant, ),\n length=length,\n form=form,\n cache=lazy_cache\n )\n return variant\n\n for unc in filter(lambda x: x.startswith(('JER', 'JES')), awkward.fields(corrected_jets)):\n up = make_variant(unc, corrected_jets[unc].up)\n down = make_variant(unc, corrected_jets[unc].down)\n out[unc] = awkward.zip({'up': up, 'down': down},\n depth_limit=1,\n with_name='METSystematic')\n return out\n\n def uncertainties(self):\n return ['MET_UnclusteredEnergy']\n" ]
[ [ "numpy.arctan2", "numpy.hypot" ] ]
vishalbelsare/classifications
[ "e16dbc9b625ff7e233be30bfb3d432f7b026facd" ]
[ "product/HS/IntlAtlas/clean.py" ]
[ "import pandas as pd\nimport sys\n\nsys.path.append(\"../../..\")\nfrom classification import (\n Hierarchy,\n repeated_table_to_parent_id_table,\n parent_code_table_to_parent_id_table,\n spread_out_entries,\n sort_by_code_and_level,\n Classification,\n)\n\n\ndef get_hs_services(file=\"./in/Services_Hierarchy.csv\"):\n services = pd.read_csv(file, encoding=\"utf-8\", dtype=\"str\")\n # Spread out services similarly to each set of exports but buffered further\n service_starts = {\"section\": 10, \"2digit\": 400, \"4digit\": 4000, \"6digit\": 11000}\n return spread_out_entries(services, service_starts, h)\n\n\nif __name__ == \"__main__\":\n names = pd.read_table(\n \"./in/HS92_Atlas_Names.tsv\", encoding=\"utf-8\", dtype={\"code\": str}\n )\n\n hierarchy = pd.read_table(\n \"./in/HS92_Atlas_Hierarchy.tsv\", encoding=\"utf-8\", dtype=\"str\"\n )\n\n fields = {\"section\": [], \"2digit\": [], \"4digit\": [], \"6digit\": []}\n\n h = Hierarchy([\"section\", \"2digit\", \"4digit\", \"6digit\"])\n parent_code_table = repeated_table_to_parent_id_table(hierarchy, h, fields)\n parent_code_table = parent_code_table.merge(names, on=[\"code\", \"level\"])\n\n # Sort by level order (not necessarily alphabetical)\n parent_code_table = sort_by_code_and_level(parent_code_table, h)\n\n parent_id_table = parent_code_table_to_parent_id_table(parent_code_table, h)\n parent_id_table[\"name\"] = parent_id_table.name_en\n\n parent_id_table = parent_id_table[\n [\n \"code\",\n \"name\",\n \"level\",\n \"name_en\",\n \"name_es\",\n \"name_short_en\",\n \"name_short_es\",\n \"parent_id\",\n ]\n ]\n\n # Decide what id each level should start from\n # Put ample space between each range of ids\n level_starts = {\"section\": 0, \"2digit\": 100, \"4digit\": 650, \"6digit\": 5000}\n parent_id_table = spread_out_entries(parent_id_table, level_starts, h)\n\n # Append services to table\n services = get_hs_services()\n\n # Append to main table and sort on combined spread out indices\n parent_id_table = parent_id_table.append(services).sort_index()\n\n # Hidden products (e.g., garbage, scrap metal)\n hidden = pd.read_csv(\"./in/hidden_products.csv\", dtype={\"code\": str})\n ## Is shown == Not hidden\n parent_id_table[\"is_shown\"] = (~parent_id_table.code.isin(hidden.code)).astype(int)\n\n # Natural Resources\n nat_resources = pd.read_csv(\"./in/natural_resources.csv\", dtype={\"code\": str})\n parent_id_table[\"natural_resource\"] = (\n parent_id_table.code.isin(nat_resources.code)\n ).astype(int)\n\n c = Classification(parent_id_table, h)\n\n c.to_csv(\"out/hs92_atlas.csv\")\n c.to_stata(\"out/hs92_atlas.dta\")\n" ]
[ [ "pandas.read_csv", "pandas.read_table" ] ]
zhouzach/spark
[ "ad77b400da4089a2de74394e2b8aed813633025a" ]
[ "python/pyspark/ml/clustering.py" ]
[ "#\n# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport sys\nimport warnings\n\nfrom pyspark import since, keyword_only\nfrom pyspark.ml.util import *\nfrom pyspark.ml.wrapper import JavaEstimator, JavaModel, JavaParams, JavaWrapper\nfrom pyspark.ml.param.shared import *\nfrom pyspark.ml.common import inherit_doc\nfrom pyspark.sql import DataFrame\n\n__all__ = ['BisectingKMeans', 'BisectingKMeansModel', 'BisectingKMeansSummary',\n 'KMeans', 'KMeansModel',\n 'GaussianMixture', 'GaussianMixtureModel', 'GaussianMixtureSummary',\n 'LDA', 'LDAModel', 'LocalLDAModel', 'DistributedLDAModel', 'PowerIterationClustering']\n\n\nclass ClusteringSummary(JavaWrapper):\n \"\"\"\n Clustering results for a given model.\n\n .. versionadded:: 2.1.0\n \"\"\"\n\n @property\n @since(\"2.1.0\")\n def predictionCol(self):\n \"\"\"\n Name for column of predicted clusters in `predictions`.\n \"\"\"\n return self._call_java(\"predictionCol\")\n\n @property\n @since(\"2.1.0\")\n def predictions(self):\n \"\"\"\n DataFrame produced by the model's `transform` method.\n \"\"\"\n return self._call_java(\"predictions\")\n\n @property\n @since(\"2.1.0\")\n def featuresCol(self):\n \"\"\"\n Name for column of features in `predictions`.\n \"\"\"\n return self._call_java(\"featuresCol\")\n\n @property\n @since(\"2.1.0\")\n def k(self):\n \"\"\"\n The number of clusters the model was trained with.\n \"\"\"\n return self._call_java(\"k\")\n\n @property\n @since(\"2.1.0\")\n def cluster(self):\n \"\"\"\n DataFrame of predicted cluster centers for each training data point.\n \"\"\"\n return self._call_java(\"cluster\")\n\n @property\n @since(\"2.1.0\")\n def clusterSizes(self):\n \"\"\"\n Size of (number of data points in) each cluster.\n \"\"\"\n return self._call_java(\"clusterSizes\")\n\n @property\n @since(\"2.4.0\")\n def numIter(self):\n \"\"\"\n Number of iterations.\n \"\"\"\n return self._call_java(\"numIter\")\n\n\n@inherit_doc\nclass _GaussianMixtureParams(HasMaxIter, HasFeaturesCol, HasSeed, HasPredictionCol,\n HasProbabilityCol, HasTol, HasAggregationDepth):\n \"\"\"\n Params for :py:class:`GaussianMixture` and :py:class:`GaussianMixtureModel`.\n\n .. versionadded:: 3.0.0\n \"\"\"\n\n k = Param(Params._dummy(), \"k\", \"Number of independent Gaussians in the mixture model. \" +\n \"Must be > 1.\", typeConverter=TypeConverters.toInt)\n\n @since(\"2.0.0\")\n def getK(self):\n \"\"\"\n Gets the value of `k`\n \"\"\"\n return self.getOrDefault(self.k)\n\n\nclass GaussianMixtureModel(JavaModel, _GaussianMixtureParams, JavaMLWritable, JavaMLReadable,\n HasTrainingSummary):\n \"\"\"\n Model fitted by GaussianMixture.\n\n .. versionadded:: 2.0.0\n \"\"\"\n\n @since(\"3.0.0\")\n def setFeaturesCol(self, value):\n \"\"\"\n Sets the value of :py:attr:`featuresCol`.\n \"\"\"\n return self._set(featuresCol=value)\n\n @since(\"3.0.0\")\n def setPredictionCol(self, value):\n \"\"\"\n Sets the value of :py:attr:`predictionCol`.\n \"\"\"\n return self._set(predictionCol=value)\n\n @since(\"3.0.0\")\n def setProbabilityCol(self, value):\n \"\"\"\n Sets the value of :py:attr:`probabilityCol`.\n \"\"\"\n return self._set(probabilityCol=value)\n\n @property\n @since(\"2.0.0\")\n def weights(self):\n \"\"\"\n Weight for each Gaussian distribution in the mixture.\n This is a multinomial probability distribution over the k Gaussians,\n where weights[i] is the weight for Gaussian i, and weights sum to 1.\n \"\"\"\n return self._call_java(\"weights\")\n\n @property\n @since(\"3.0.0\")\n def gaussians(self):\n \"\"\"\n Array of :py:class:`MultivariateGaussian` where gaussians[i] represents\n the Multivariate Gaussian (Normal) Distribution for Gaussian i\n \"\"\"\n return self._call_java(\"gaussians\")\n\n @property\n @since(\"2.0.0\")\n def gaussiansDF(self):\n \"\"\"\n Retrieve Gaussian distributions as a DataFrame.\n Each row represents a Gaussian Distribution.\n The DataFrame has two columns: mean (Vector) and cov (Matrix).\n \"\"\"\n return self._call_java(\"gaussiansDF\")\n\n @property\n @since(\"2.1.0\")\n def summary(self):\n \"\"\"\n Gets summary (e.g. cluster assignments, cluster sizes) of the model trained on the\n training set. An exception is thrown if no summary exists.\n \"\"\"\n if self.hasSummary:\n return GaussianMixtureSummary(super(GaussianMixtureModel, self).summary)\n else:\n raise RuntimeError(\"No training summary available for this %s\" %\n self.__class__.__name__)\n\n @since(\"3.0.0\")\n def predict(self, value):\n \"\"\"\n Predict label for the given features.\n \"\"\"\n return self._call_java(\"predict\", value)\n\n @since(\"3.0.0\")\n def predictProbability(self, value):\n \"\"\"\n Predict probability for the given features.\n \"\"\"\n return self._call_java(\"predictProbability\", value)\n\n\n@inherit_doc\nclass GaussianMixture(JavaEstimator, _GaussianMixtureParams, JavaMLWritable, JavaMLReadable):\n \"\"\"\n GaussianMixture clustering.\n This class performs expectation maximization for multivariate Gaussian\n Mixture Models (GMMs). A GMM represents a composite distribution of\n independent Gaussian distributions with associated \"mixing\" weights\n specifying each's contribution to the composite.\n\n Given a set of sample points, this class will maximize the log-likelihood\n for a mixture of k Gaussians, iterating until the log-likelihood changes by\n less than convergenceTol, or until it has reached the max number of iterations.\n While this process is generally guaranteed to converge, it is not guaranteed\n to find a global optimum.\n\n .. note:: For high-dimensional data (with many features), this algorithm may perform poorly.\n This is due to high-dimensional data (a) making it difficult to cluster at all\n (based on statistical/theoretical arguments) and (b) numerical issues with\n Gaussian distributions.\n\n >>> from pyspark.ml.linalg import Vectors\n\n >>> data = [(Vectors.dense([-0.1, -0.05 ]),),\n ... (Vectors.dense([-0.01, -0.1]),),\n ... (Vectors.dense([0.9, 0.8]),),\n ... (Vectors.dense([0.75, 0.935]),),\n ... (Vectors.dense([-0.83, -0.68]),),\n ... (Vectors.dense([-0.91, -0.76]),)]\n >>> df = spark.createDataFrame(data, [\"features\"])\n >>> gm = GaussianMixture(k=3, tol=0.0001, seed=10)\n >>> gm.getMaxIter()\n 100\n >>> gm.setMaxIter(10)\n GaussianMixture...\n >>> gm.getMaxIter()\n 10\n >>> model = gm.fit(df)\n >>> model.getAggregationDepth()\n 2\n >>> model.getFeaturesCol()\n 'features'\n >>> model.setPredictionCol(\"newPrediction\")\n GaussianMixtureModel...\n >>> model.predict(df.head().features)\n 2\n >>> model.predictProbability(df.head().features)\n DenseVector([0.0, 0.4736, 0.5264])\n >>> model.hasSummary\n True\n >>> summary = model.summary\n >>> summary.k\n 3\n >>> summary.clusterSizes\n [2, 2, 2]\n >>> summary.logLikelihood\n 8.14636...\n >>> weights = model.weights\n >>> len(weights)\n 3\n >>> gaussians = model.gaussians\n >>> len(gaussians)\n 3\n >>> model.gaussiansDF.select(\"mean\").head()\n Row(mean=DenseVector([0.825, 0.8675]))\n >>> model.gaussiansDF.select(\"cov\").head()\n Row(cov=DenseMatrix(2, 2, [0.0056, -0.0051, -0.0051, 0.0046], False))\n >>> transformed = model.transform(df).select(\"features\", \"newPrediction\")\n >>> rows = transformed.collect()\n >>> rows[4].newPrediction == rows[5].newPrediction\n True\n >>> rows[2].newPrediction == rows[3].newPrediction\n True\n >>> gmm_path = temp_path + \"/gmm\"\n >>> gm.save(gmm_path)\n >>> gm2 = GaussianMixture.load(gmm_path)\n >>> gm2.getK()\n 3\n >>> model_path = temp_path + \"/gmm_model\"\n >>> model.save(model_path)\n >>> model2 = GaussianMixtureModel.load(model_path)\n >>> model2.hasSummary\n False\n >>> model2.weights == model.weights\n True\n >>> model2.gaussians == model.gaussians\n True\n >>> model2.gaussiansDF.select(\"mean\").head()\n Row(mean=DenseVector([0.825, 0.8675]))\n >>> model2.gaussiansDF.select(\"cov\").head()\n Row(cov=DenseMatrix(2, 2, [0.0056, -0.0051, -0.0051, 0.0046], False))\n\n .. versionadded:: 2.0.0\n \"\"\"\n\n @keyword_only\n def __init__(self, featuresCol=\"features\", predictionCol=\"prediction\", k=2,\n probabilityCol=\"probability\", tol=0.01, maxIter=100, seed=None,\n aggregationDepth=2):\n \"\"\"\n __init__(self, featuresCol=\"features\", predictionCol=\"prediction\", k=2, \\\n probabilityCol=\"probability\", tol=0.01, maxIter=100, seed=None, \\\n aggregationDepth=2)\n \"\"\"\n super(GaussianMixture, self).__init__()\n self._java_obj = self._new_java_obj(\"org.apache.spark.ml.clustering.GaussianMixture\",\n self.uid)\n self._setDefault(k=2, tol=0.01, maxIter=100, aggregationDepth=2)\n kwargs = self._input_kwargs\n self.setParams(**kwargs)\n\n def _create_model(self, java_model):\n return GaussianMixtureModel(java_model)\n\n @keyword_only\n @since(\"2.0.0\")\n def setParams(self, featuresCol=\"features\", predictionCol=\"prediction\", k=2,\n probabilityCol=\"probability\", tol=0.01, maxIter=100, seed=None,\n aggregationDepth=2):\n \"\"\"\n setParams(self, featuresCol=\"features\", predictionCol=\"prediction\", k=2, \\\n probabilityCol=\"probability\", tol=0.01, maxIter=100, seed=None, \\\n aggregationDepth=2)\n\n Sets params for GaussianMixture.\n \"\"\"\n kwargs = self._input_kwargs\n return self._set(**kwargs)\n\n @since(\"2.0.0\")\n def setK(self, value):\n \"\"\"\n Sets the value of :py:attr:`k`.\n \"\"\"\n return self._set(k=value)\n\n @since(\"2.0.0\")\n def setMaxIter(self, value):\n \"\"\"\n Sets the value of :py:attr:`maxIter`.\n \"\"\"\n return self._set(maxIter=value)\n\n @since(\"2.0.0\")\n def setFeaturesCol(self, value):\n \"\"\"\n Sets the value of :py:attr:`featuresCol`.\n \"\"\"\n return self._set(featuresCol=value)\n\n @since(\"2.0.0\")\n def setPredictionCol(self, value):\n \"\"\"\n Sets the value of :py:attr:`predictionCol`.\n \"\"\"\n return self._set(predictionCol=value)\n\n @since(\"2.0.0\")\n def setProbabilityCol(self, value):\n \"\"\"\n Sets the value of :py:attr:`probabilityCol`.\n \"\"\"\n return self._set(probabilityCol=value)\n\n @since(\"2.0.0\")\n def setSeed(self, value):\n \"\"\"\n Sets the value of :py:attr:`seed`.\n \"\"\"\n return self._set(seed=value)\n\n @since(\"2.0.0\")\n def setTol(self, value):\n \"\"\"\n Sets the value of :py:attr:`tol`.\n \"\"\"\n return self._set(tol=value)\n\n @since(\"3.0.0\")\n def setAggregationDepth(self, value):\n \"\"\"\n Sets the value of :py:attr:`aggregationDepth`.\n \"\"\"\n return self._set(aggregationDepth=value)\n\n\nclass GaussianMixtureSummary(ClusteringSummary):\n \"\"\"\n Gaussian mixture clustering results for a given model.\n\n .. versionadded:: 2.1.0\n \"\"\"\n\n @property\n @since(\"2.1.0\")\n def probabilityCol(self):\n \"\"\"\n Name for column of predicted probability of each cluster in `predictions`.\n \"\"\"\n return self._call_java(\"probabilityCol\")\n\n @property\n @since(\"2.1.0\")\n def probability(self):\n \"\"\"\n DataFrame of probabilities of each cluster for each training data point.\n \"\"\"\n return self._call_java(\"probability\")\n\n @property\n @since(\"2.2.0\")\n def logLikelihood(self):\n \"\"\"\n Total log-likelihood for this model on the given data.\n \"\"\"\n return self._call_java(\"logLikelihood\")\n\n\nclass KMeansSummary(ClusteringSummary):\n \"\"\"\n Summary of KMeans.\n\n .. versionadded:: 2.1.0\n \"\"\"\n\n @property\n @since(\"2.4.0\")\n def trainingCost(self):\n \"\"\"\n K-means cost (sum of squared distances to the nearest centroid for all points in the\n training dataset). This is equivalent to sklearn's inertia.\n \"\"\"\n return self._call_java(\"trainingCost\")\n\n\n@inherit_doc\nclass _KMeansParams(HasMaxIter, HasFeaturesCol, HasSeed, HasPredictionCol, HasTol,\n HasDistanceMeasure, HasWeightCol):\n \"\"\"\n Params for :py:class:`KMeans` and :py:class:`KMeansModel`.\n\n .. versionadded:: 3.0.0\n \"\"\"\n\n k = Param(Params._dummy(), \"k\", \"The number of clusters to create. Must be > 1.\",\n typeConverter=TypeConverters.toInt)\n initMode = Param(Params._dummy(), \"initMode\",\n \"The initialization algorithm. This can be either \\\"random\\\" to \" +\n \"choose random points as initial cluster centers, or \\\"k-means||\\\" \" +\n \"to use a parallel variant of k-means++\",\n typeConverter=TypeConverters.toString)\n initSteps = Param(Params._dummy(), \"initSteps\", \"The number of steps for k-means|| \" +\n \"initialization mode. Must be > 0.\", typeConverter=TypeConverters.toInt)\n\n @since(\"1.5.0\")\n def getK(self):\n \"\"\"\n Gets the value of `k`\n \"\"\"\n return self.getOrDefault(self.k)\n\n @since(\"1.5.0\")\n def getInitMode(self):\n \"\"\"\n Gets the value of `initMode`\n \"\"\"\n return self.getOrDefault(self.initMode)\n\n @since(\"1.5.0\")\n def getInitSteps(self):\n \"\"\"\n Gets the value of `initSteps`\n \"\"\"\n return self.getOrDefault(self.initSteps)\n\n\nclass KMeansModel(JavaModel, _KMeansParams, GeneralJavaMLWritable, JavaMLReadable,\n HasTrainingSummary):\n \"\"\"\n Model fitted by KMeans.\n\n .. versionadded:: 1.5.0\n \"\"\"\n\n @since(\"3.0.0\")\n def setFeaturesCol(self, value):\n \"\"\"\n Sets the value of :py:attr:`featuresCol`.\n \"\"\"\n return self._set(featuresCol=value)\n\n @since(\"3.0.0\")\n def setPredictionCol(self, value):\n \"\"\"\n Sets the value of :py:attr:`predictionCol`.\n \"\"\"\n return self._set(predictionCol=value)\n\n @since(\"1.5.0\")\n def clusterCenters(self):\n \"\"\"Get the cluster centers, represented as a list of NumPy arrays.\"\"\"\n return [c.toArray() for c in self._call_java(\"clusterCenters\")]\n\n @property\n @since(\"2.1.0\")\n def summary(self):\n \"\"\"\n Gets summary (e.g. cluster assignments, cluster sizes) of the model trained on the\n training set. An exception is thrown if no summary exists.\n \"\"\"\n if self.hasSummary:\n return KMeansSummary(super(KMeansModel, self).summary)\n else:\n raise RuntimeError(\"No training summary available for this %s\" %\n self.__class__.__name__)\n\n @since(\"3.0.0\")\n def predict(self, value):\n \"\"\"\n Predict label for the given features.\n \"\"\"\n return self._call_java(\"predict\", value)\n\n\n@inherit_doc\nclass KMeans(JavaEstimator, _KMeansParams, JavaMLWritable, JavaMLReadable):\n \"\"\"\n K-means clustering with a k-means++ like initialization mode\n (the k-means|| algorithm by Bahmani et al).\n\n >>> from pyspark.ml.linalg import Vectors\n >>> data = [(Vectors.dense([0.0, 0.0]), 2.0), (Vectors.dense([1.0, 1.0]), 2.0),\n ... (Vectors.dense([9.0, 8.0]), 2.0), (Vectors.dense([8.0, 9.0]), 2.0)]\n >>> df = spark.createDataFrame(data, [\"features\", \"weighCol\"])\n >>> kmeans = KMeans(k=2)\n >>> kmeans.setSeed(1)\n KMeans...\n >>> kmeans.setWeightCol(\"weighCol\")\n KMeans...\n >>> kmeans.setMaxIter(10)\n KMeans...\n >>> kmeans.getMaxIter()\n 10\n >>> kmeans.clear(kmeans.maxIter)\n >>> model = kmeans.fit(df)\n >>> model.getDistanceMeasure()\n 'euclidean'\n >>> model.setPredictionCol(\"newPrediction\")\n KMeansModel...\n >>> model.predict(df.head().features)\n 0\n >>> centers = model.clusterCenters()\n >>> len(centers)\n 2\n >>> transformed = model.transform(df).select(\"features\", \"newPrediction\")\n >>> rows = transformed.collect()\n >>> rows[0].newPrediction == rows[1].newPrediction\n True\n >>> rows[2].newPrediction == rows[3].newPrediction\n True\n >>> model.hasSummary\n True\n >>> summary = model.summary\n >>> summary.k\n 2\n >>> summary.clusterSizes\n [2, 2]\n >>> summary.trainingCost\n 4.0\n >>> kmeans_path = temp_path + \"/kmeans\"\n >>> kmeans.save(kmeans_path)\n >>> kmeans2 = KMeans.load(kmeans_path)\n >>> kmeans2.getK()\n 2\n >>> model_path = temp_path + \"/kmeans_model\"\n >>> model.save(model_path)\n >>> model2 = KMeansModel.load(model_path)\n >>> model2.hasSummary\n False\n >>> model.clusterCenters()[0] == model2.clusterCenters()[0]\n array([ True, True], dtype=bool)\n >>> model.clusterCenters()[1] == model2.clusterCenters()[1]\n array([ True, True], dtype=bool)\n\n .. versionadded:: 1.5.0\n \"\"\"\n\n @keyword_only\n def __init__(self, featuresCol=\"features\", predictionCol=\"prediction\", k=2,\n initMode=\"k-means||\", initSteps=2, tol=1e-4, maxIter=20, seed=None,\n distanceMeasure=\"euclidean\", weightCol=None):\n \"\"\"\n __init__(self, featuresCol=\"features\", predictionCol=\"prediction\", k=2, \\\n initMode=\"k-means||\", initSteps=2, tol=1e-4, maxIter=20, seed=None, \\\n distanceMeasure=\"euclidean\", weightCol=None)\n \"\"\"\n super(KMeans, self).__init__()\n self._java_obj = self._new_java_obj(\"org.apache.spark.ml.clustering.KMeans\", self.uid)\n self._setDefault(k=2, initMode=\"k-means||\", initSteps=2, tol=1e-4, maxIter=20,\n distanceMeasure=\"euclidean\")\n kwargs = self._input_kwargs\n self.setParams(**kwargs)\n\n def _create_model(self, java_model):\n return KMeansModel(java_model)\n\n @keyword_only\n @since(\"1.5.0\")\n def setParams(self, featuresCol=\"features\", predictionCol=\"prediction\", k=2,\n initMode=\"k-means||\", initSteps=2, tol=1e-4, maxIter=20, seed=None,\n distanceMeasure=\"euclidean\", weightCol=None):\n \"\"\"\n setParams(self, featuresCol=\"features\", predictionCol=\"prediction\", k=2, \\\n initMode=\"k-means||\", initSteps=2, tol=1e-4, maxIter=20, seed=None, \\\n distanceMeasure=\"euclidean\", weightCol=None)\n\n Sets params for KMeans.\n \"\"\"\n kwargs = self._input_kwargs\n return self._set(**kwargs)\n\n @since(\"1.5.0\")\n def setK(self, value):\n \"\"\"\n Sets the value of :py:attr:`k`.\n \"\"\"\n return self._set(k=value)\n\n @since(\"1.5.0\")\n def setInitMode(self, value):\n \"\"\"\n Sets the value of :py:attr:`initMode`.\n \"\"\"\n return self._set(initMode=value)\n\n @since(\"1.5.0\")\n def setInitSteps(self, value):\n \"\"\"\n Sets the value of :py:attr:`initSteps`.\n \"\"\"\n return self._set(initSteps=value)\n\n @since(\"2.4.0\")\n def setDistanceMeasure(self, value):\n \"\"\"\n Sets the value of :py:attr:`distanceMeasure`.\n \"\"\"\n return self._set(distanceMeasure=value)\n\n @since(\"1.5.0\")\n def setMaxIter(self, value):\n \"\"\"\n Sets the value of :py:attr:`maxIter`.\n \"\"\"\n return self._set(maxIter=value)\n\n @since(\"1.5.0\")\n def setFeaturesCol(self, value):\n \"\"\"\n Sets the value of :py:attr:`featuresCol`.\n \"\"\"\n return self._set(featuresCol=value)\n\n @since(\"1.5.0\")\n def setPredictionCol(self, value):\n \"\"\"\n Sets the value of :py:attr:`predictionCol`.\n \"\"\"\n return self._set(predictionCol=value)\n\n @since(\"1.5.0\")\n def setSeed(self, value):\n \"\"\"\n Sets the value of :py:attr:`seed`.\n \"\"\"\n return self._set(seed=value)\n\n @since(\"1.5.0\")\n def setTol(self, value):\n \"\"\"\n Sets the value of :py:attr:`tol`.\n \"\"\"\n return self._set(tol=value)\n\n @since(\"3.0.0\")\n def setWeightCol(self, value):\n \"\"\"\n Sets the value of :py:attr:`weightCol`.\n \"\"\"\n return self._set(weightCol=value)\n\n\n@inherit_doc\nclass _BisectingKMeansParams(HasMaxIter, HasFeaturesCol, HasSeed, HasPredictionCol,\n HasDistanceMeasure):\n \"\"\"\n Params for :py:class:`BisectingKMeans` and :py:class:`BisectingKMeansModel`.\n\n .. versionadded:: 3.0.0\n \"\"\"\n\n k = Param(Params._dummy(), \"k\", \"The desired number of leaf clusters. Must be > 1.\",\n typeConverter=TypeConverters.toInt)\n minDivisibleClusterSize = Param(Params._dummy(), \"minDivisibleClusterSize\",\n \"The minimum number of points (if >= 1.0) or the minimum \" +\n \"proportion of points (if < 1.0) of a divisible cluster.\",\n typeConverter=TypeConverters.toFloat)\n\n @since(\"2.0.0\")\n def getK(self):\n \"\"\"\n Gets the value of `k` or its default value.\n \"\"\"\n return self.getOrDefault(self.k)\n\n @since(\"2.0.0\")\n def getMinDivisibleClusterSize(self):\n \"\"\"\n Gets the value of `minDivisibleClusterSize` or its default value.\n \"\"\"\n return self.getOrDefault(self.minDivisibleClusterSize)\n\n\nclass BisectingKMeansModel(JavaModel, _BisectingKMeansParams, JavaMLWritable, JavaMLReadable,\n HasTrainingSummary):\n \"\"\"\n Model fitted by BisectingKMeans.\n\n .. versionadded:: 2.0.0\n \"\"\"\n\n @since(\"3.0.0\")\n def setFeaturesCol(self, value):\n \"\"\"\n Sets the value of :py:attr:`featuresCol`.\n \"\"\"\n return self._set(featuresCol=value)\n\n @since(\"3.0.0\")\n def setPredictionCol(self, value):\n \"\"\"\n Sets the value of :py:attr:`predictionCol`.\n \"\"\"\n return self._set(predictionCol=value)\n\n @since(\"2.0.0\")\n def clusterCenters(self):\n \"\"\"Get the cluster centers, represented as a list of NumPy arrays.\"\"\"\n return [c.toArray() for c in self._call_java(\"clusterCenters\")]\n\n @since(\"2.0.0\")\n def computeCost(self, dataset):\n \"\"\"\n Computes the sum of squared distances between the input points\n and their corresponding cluster centers.\n\n ..note:: Deprecated in 3.0.0. It will be removed in future versions. Use\n ClusteringEvaluator instead. You can also get the cost on the training dataset in the\n summary.\n \"\"\"\n warnings.warn(\"Deprecated in 3.0.0. It will be removed in future versions. Use \"\n \"ClusteringEvaluator instead. You can also get the cost on the training \"\n \"dataset in the summary.\", DeprecationWarning)\n return self._call_java(\"computeCost\", dataset)\n\n @property\n @since(\"2.1.0\")\n def summary(self):\n \"\"\"\n Gets summary (e.g. cluster assignments, cluster sizes) of the model trained on the\n training set. An exception is thrown if no summary exists.\n \"\"\"\n if self.hasSummary:\n return BisectingKMeansSummary(super(BisectingKMeansModel, self).summary)\n else:\n raise RuntimeError(\"No training summary available for this %s\" %\n self.__class__.__name__)\n\n @since(\"3.0.0\")\n def predict(self, value):\n \"\"\"\n Predict label for the given features.\n \"\"\"\n return self._call_java(\"predict\", value)\n\n\n@inherit_doc\nclass BisectingKMeans(JavaEstimator, _BisectingKMeansParams, JavaMLWritable, JavaMLReadable):\n \"\"\"\n A bisecting k-means algorithm based on the paper \"A comparison of document clustering\n techniques\" by Steinbach, Karypis, and Kumar, with modification to fit Spark.\n The algorithm starts from a single cluster that contains all points.\n Iteratively it finds divisible clusters on the bottom level and bisects each of them using\n k-means, until there are `k` leaf clusters in total or no leaf clusters are divisible.\n The bisecting steps of clusters on the same level are grouped together to increase parallelism.\n If bisecting all divisible clusters on the bottom level would result more than `k` leaf\n clusters, larger clusters get higher priority.\n\n >>> from pyspark.ml.linalg import Vectors\n >>> data = [(Vectors.dense([0.0, 0.0]),), (Vectors.dense([1.0, 1.0]),),\n ... (Vectors.dense([9.0, 8.0]),), (Vectors.dense([8.0, 9.0]),)]\n >>> df = spark.createDataFrame(data, [\"features\"])\n >>> bkm = BisectingKMeans(k=2, minDivisibleClusterSize=1.0)\n >>> bkm.setMaxIter(10)\n BisectingKMeans...\n >>> bkm.getMaxIter()\n 10\n >>> bkm.clear(bkm.maxIter)\n >>> bkm.setSeed(1)\n BisectingKMeans...\n >>> bkm.getSeed()\n 1\n >>> bkm.clear(bkm.seed)\n >>> model = bkm.fit(df)\n >>> model.getMaxIter()\n 20\n >>> model.setPredictionCol(\"newPrediction\")\n BisectingKMeansModel...\n >>> model.predict(df.head().features)\n 0\n >>> centers = model.clusterCenters()\n >>> len(centers)\n 2\n >>> model.computeCost(df)\n 2.0\n >>> model.hasSummary\n True\n >>> summary = model.summary\n >>> summary.k\n 2\n >>> summary.clusterSizes\n [2, 2]\n >>> summary.trainingCost\n 2.000...\n >>> transformed = model.transform(df).select(\"features\", \"newPrediction\")\n >>> rows = transformed.collect()\n >>> rows[0].newPrediction == rows[1].newPrediction\n True\n >>> rows[2].newPrediction == rows[3].newPrediction\n True\n >>> bkm_path = temp_path + \"/bkm\"\n >>> bkm.save(bkm_path)\n >>> bkm2 = BisectingKMeans.load(bkm_path)\n >>> bkm2.getK()\n 2\n >>> bkm2.getDistanceMeasure()\n 'euclidean'\n >>> model_path = temp_path + \"/bkm_model\"\n >>> model.save(model_path)\n >>> model2 = BisectingKMeansModel.load(model_path)\n >>> model2.hasSummary\n False\n >>> model.clusterCenters()[0] == model2.clusterCenters()[0]\n array([ True, True], dtype=bool)\n >>> model.clusterCenters()[1] == model2.clusterCenters()[1]\n array([ True, True], dtype=bool)\n\n .. versionadded:: 2.0.0\n \"\"\"\n\n @keyword_only\n def __init__(self, featuresCol=\"features\", predictionCol=\"prediction\", maxIter=20,\n seed=None, k=4, minDivisibleClusterSize=1.0, distanceMeasure=\"euclidean\"):\n \"\"\"\n __init__(self, featuresCol=\"features\", predictionCol=\"prediction\", maxIter=20, \\\n seed=None, k=4, minDivisibleClusterSize=1.0, distanceMeasure=\"euclidean\")\n \"\"\"\n super(BisectingKMeans, self).__init__()\n self._java_obj = self._new_java_obj(\"org.apache.spark.ml.clustering.BisectingKMeans\",\n self.uid)\n self._setDefault(maxIter=20, k=4, minDivisibleClusterSize=1.0)\n kwargs = self._input_kwargs\n self.setParams(**kwargs)\n\n @keyword_only\n @since(\"2.0.0\")\n def setParams(self, featuresCol=\"features\", predictionCol=\"prediction\", maxIter=20,\n seed=None, k=4, minDivisibleClusterSize=1.0, distanceMeasure=\"euclidean\"):\n \"\"\"\n setParams(self, featuresCol=\"features\", predictionCol=\"prediction\", maxIter=20, \\\n seed=None, k=4, minDivisibleClusterSize=1.0, distanceMeasure=\"euclidean\")\n Sets params for BisectingKMeans.\n \"\"\"\n kwargs = self._input_kwargs\n return self._set(**kwargs)\n\n @since(\"2.0.0\")\n def setK(self, value):\n \"\"\"\n Sets the value of :py:attr:`k`.\n \"\"\"\n return self._set(k=value)\n\n @since(\"2.0.0\")\n def setMinDivisibleClusterSize(self, value):\n \"\"\"\n Sets the value of :py:attr:`minDivisibleClusterSize`.\n \"\"\"\n return self._set(minDivisibleClusterSize=value)\n\n @since(\"2.4.0\")\n def setDistanceMeasure(self, value):\n \"\"\"\n Sets the value of :py:attr:`distanceMeasure`.\n \"\"\"\n return self._set(distanceMeasure=value)\n\n @since(\"2.0.0\")\n def setMaxIter(self, value):\n \"\"\"\n Sets the value of :py:attr:`maxIter`.\n \"\"\"\n return self._set(maxIter=value)\n\n @since(\"2.0.0\")\n def setFeaturesCol(self, value):\n \"\"\"\n Sets the value of :py:attr:`featuresCol`.\n \"\"\"\n return self._set(featuresCol=value)\n\n @since(\"2.0.0\")\n def setPredictionCol(self, value):\n \"\"\"\n Sets the value of :py:attr:`predictionCol`.\n \"\"\"\n return self._set(predictionCol=value)\n\n @since(\"2.0.0\")\n def setSeed(self, value):\n \"\"\"\n Sets the value of :py:attr:`seed`.\n \"\"\"\n return self._set(seed=value)\n\n def _create_model(self, java_model):\n return BisectingKMeansModel(java_model)\n\n\nclass BisectingKMeansSummary(ClusteringSummary):\n \"\"\"\n Bisecting KMeans clustering results for a given model.\n\n .. versionadded:: 2.1.0\n \"\"\"\n\n @property\n @since(\"3.0.0\")\n def trainingCost(self):\n \"\"\"\n Sum of squared distances to the nearest centroid for all points in the training dataset.\n This is equivalent to sklearn's inertia.\n \"\"\"\n return self._call_java(\"trainingCost\")\n\n\n@inherit_doc\nclass _LDAParams(HasMaxIter, HasFeaturesCol, HasSeed, HasCheckpointInterval):\n \"\"\"\n Params for :py:class:`LDA` and :py:class:`LDAModel`.\n\n .. versionadded:: 3.0.0\n \"\"\"\n\n k = Param(Params._dummy(), \"k\", \"The number of topics (clusters) to infer. Must be > 1.\",\n typeConverter=TypeConverters.toInt)\n optimizer = Param(Params._dummy(), \"optimizer\",\n \"Optimizer or inference algorithm used to estimate the LDA model. \"\n \"Supported: online, em\", typeConverter=TypeConverters.toString)\n learningOffset = Param(Params._dummy(), \"learningOffset\",\n \"A (positive) learning parameter that downweights early iterations.\"\n \" Larger values make early iterations count less\",\n typeConverter=TypeConverters.toFloat)\n learningDecay = Param(Params._dummy(), \"learningDecay\", \"Learning rate, set as an\"\n \"exponential decay rate. This should be between (0.5, 1.0] to \"\n \"guarantee asymptotic convergence.\", typeConverter=TypeConverters.toFloat)\n subsamplingRate = Param(Params._dummy(), \"subsamplingRate\",\n \"Fraction of the corpus to be sampled and used in each iteration \"\n \"of mini-batch gradient descent, in range (0, 1].\",\n typeConverter=TypeConverters.toFloat)\n optimizeDocConcentration = Param(Params._dummy(), \"optimizeDocConcentration\",\n \"Indicates whether the docConcentration (Dirichlet parameter \"\n \"for document-topic distribution) will be optimized during \"\n \"training.\", typeConverter=TypeConverters.toBoolean)\n docConcentration = Param(Params._dummy(), \"docConcentration\",\n \"Concentration parameter (commonly named \\\"alpha\\\") for the \"\n \"prior placed on documents' distributions over topics (\\\"theta\\\").\",\n typeConverter=TypeConverters.toListFloat)\n topicConcentration = Param(Params._dummy(), \"topicConcentration\",\n \"Concentration parameter (commonly named \\\"beta\\\" or \\\"eta\\\") for \"\n \"the prior placed on topic' distributions over terms.\",\n typeConverter=TypeConverters.toFloat)\n topicDistributionCol = Param(Params._dummy(), \"topicDistributionCol\",\n \"Output column with estimates of the topic mixture distribution \"\n \"for each document (often called \\\"theta\\\" in the literature). \"\n \"Returns a vector of zeros for an empty document.\",\n typeConverter=TypeConverters.toString)\n keepLastCheckpoint = Param(Params._dummy(), \"keepLastCheckpoint\",\n \"(For EM optimizer) If using checkpointing, this indicates whether\"\n \" to keep the last checkpoint. If false, then the checkpoint will be\"\n \" deleted. Deleting the checkpoint can cause failures if a data\"\n \" partition is lost, so set this bit with care.\",\n TypeConverters.toBoolean)\n\n @since(\"2.0.0\")\n def getK(self):\n \"\"\"\n Gets the value of :py:attr:`k` or its default value.\n \"\"\"\n return self.getOrDefault(self.k)\n\n @since(\"2.0.0\")\n def getOptimizer(self):\n \"\"\"\n Gets the value of :py:attr:`optimizer` or its default value.\n \"\"\"\n return self.getOrDefault(self.optimizer)\n\n @since(\"2.0.0\")\n def getLearningOffset(self):\n \"\"\"\n Gets the value of :py:attr:`learningOffset` or its default value.\n \"\"\"\n return self.getOrDefault(self.learningOffset)\n\n @since(\"2.0.0\")\n def getLearningDecay(self):\n \"\"\"\n Gets the value of :py:attr:`learningDecay` or its default value.\n \"\"\"\n return self.getOrDefault(self.learningDecay)\n\n @since(\"2.0.0\")\n def getSubsamplingRate(self):\n \"\"\"\n Gets the value of :py:attr:`subsamplingRate` or its default value.\n \"\"\"\n return self.getOrDefault(self.subsamplingRate)\n\n @since(\"2.0.0\")\n def getOptimizeDocConcentration(self):\n \"\"\"\n Gets the value of :py:attr:`optimizeDocConcentration` or its default value.\n \"\"\"\n return self.getOrDefault(self.optimizeDocConcentration)\n\n @since(\"2.0.0\")\n def getDocConcentration(self):\n \"\"\"\n Gets the value of :py:attr:`docConcentration` or its default value.\n \"\"\"\n return self.getOrDefault(self.docConcentration)\n\n @since(\"2.0.0\")\n def getTopicConcentration(self):\n \"\"\"\n Gets the value of :py:attr:`topicConcentration` or its default value.\n \"\"\"\n return self.getOrDefault(self.topicConcentration)\n\n @since(\"2.0.0\")\n def getTopicDistributionCol(self):\n \"\"\"\n Gets the value of :py:attr:`topicDistributionCol` or its default value.\n \"\"\"\n return self.getOrDefault(self.topicDistributionCol)\n\n @since(\"2.0.0\")\n def getKeepLastCheckpoint(self):\n \"\"\"\n Gets the value of :py:attr:`keepLastCheckpoint` or its default value.\n \"\"\"\n return self.getOrDefault(self.keepLastCheckpoint)\n\n\n@inherit_doc\nclass LDAModel(JavaModel, _LDAParams):\n \"\"\"\n Latent Dirichlet Allocation (LDA) model.\n This abstraction permits for different underlying representations,\n including local and distributed data structures.\n\n .. versionadded:: 2.0.0\n \"\"\"\n\n @since(\"3.0.0\")\n def setFeaturesCol(self, value):\n \"\"\"\n Sets the value of :py:attr:`featuresCol`.\n \"\"\"\n return self._set(featuresCol=value)\n\n @since(\"3.0.0\")\n def setSeed(self, value):\n \"\"\"\n Sets the value of :py:attr:`seed`.\n \"\"\"\n return self._set(seed=value)\n\n @since(\"3.0.0\")\n def setTopicDistributionCol(self, value):\n \"\"\"\n Sets the value of :py:attr:`topicDistributionCol`.\n\n >>> algo = LDA().setTopicDistributionCol(\"topicDistributionCol\")\n >>> algo.getTopicDistributionCol()\n 'topicDistributionCol'\n \"\"\"\n return self._set(topicDistributionCol=value)\n\n @since(\"2.0.0\")\n def isDistributed(self):\n \"\"\"\n Indicates whether this instance is of type DistributedLDAModel\n \"\"\"\n return self._call_java(\"isDistributed\")\n\n @since(\"2.0.0\")\n def vocabSize(self):\n \"\"\"Vocabulary size (number of terms or words in the vocabulary)\"\"\"\n return self._call_java(\"vocabSize\")\n\n @since(\"2.0.0\")\n def topicsMatrix(self):\n \"\"\"\n Inferred topics, where each topic is represented by a distribution over terms.\n This is a matrix of size vocabSize x k, where each column is a topic.\n No guarantees are given about the ordering of the topics.\n\n WARNING: If this model is actually a :py:class:`DistributedLDAModel` instance produced by\n the Expectation-Maximization (\"em\") `optimizer`, then this method could involve\n collecting a large amount of data to the driver (on the order of vocabSize x k).\n \"\"\"\n return self._call_java(\"topicsMatrix\")\n\n @since(\"2.0.0\")\n def logLikelihood(self, dataset):\n \"\"\"\n Calculates a lower bound on the log likelihood of the entire corpus.\n See Equation (16) in the Online LDA paper (Hoffman et al., 2010).\n\n WARNING: If this model is an instance of :py:class:`DistributedLDAModel` (produced when\n :py:attr:`optimizer` is set to \"em\"), this involves collecting a large\n :py:func:`topicsMatrix` to the driver. This implementation may be changed in the future.\n \"\"\"\n return self._call_java(\"logLikelihood\", dataset)\n\n @since(\"2.0.0\")\n def logPerplexity(self, dataset):\n \"\"\"\n Calculate an upper bound on perplexity. (Lower is better.)\n See Equation (16) in the Online LDA paper (Hoffman et al., 2010).\n\n WARNING: If this model is an instance of :py:class:`DistributedLDAModel` (produced when\n :py:attr:`optimizer` is set to \"em\"), this involves collecting a large\n :py:func:`topicsMatrix` to the driver. This implementation may be changed in the future.\n \"\"\"\n return self._call_java(\"logPerplexity\", dataset)\n\n @since(\"2.0.0\")\n def describeTopics(self, maxTermsPerTopic=10):\n \"\"\"\n Return the topics described by their top-weighted terms.\n \"\"\"\n return self._call_java(\"describeTopics\", maxTermsPerTopic)\n\n @since(\"2.0.0\")\n def estimatedDocConcentration(self):\n \"\"\"\n Value for :py:attr:`LDA.docConcentration` estimated from data.\n If Online LDA was used and :py:attr:`LDA.optimizeDocConcentration` was set to false,\n then this returns the fixed (given) value for the :py:attr:`LDA.docConcentration` parameter.\n \"\"\"\n return self._call_java(\"estimatedDocConcentration\")\n\n\n@inherit_doc\nclass DistributedLDAModel(LDAModel, JavaMLReadable, JavaMLWritable):\n \"\"\"\n Distributed model fitted by :py:class:`LDA`.\n This type of model is currently only produced by Expectation-Maximization (EM).\n\n This model stores the inferred topics, the full training dataset, and the topic distribution\n for each training document.\n\n .. versionadded:: 2.0.0\n \"\"\"\n\n @since(\"2.0.0\")\n def toLocal(self):\n \"\"\"\n Convert this distributed model to a local representation. This discards info about the\n training dataset.\n\n WARNING: This involves collecting a large :py:func:`topicsMatrix` to the driver.\n \"\"\"\n model = LocalLDAModel(self._call_java(\"toLocal\"))\n\n # SPARK-10931: Temporary fix to be removed once LDAModel defines Params\n model._create_params_from_java()\n model._transfer_params_from_java()\n\n return model\n\n @since(\"2.0.0\")\n def trainingLogLikelihood(self):\n \"\"\"\n Log likelihood of the observed tokens in the training set,\n given the current parameter estimates:\n log P(docs | topics, topic distributions for docs, Dirichlet hyperparameters)\n\n Notes:\n - This excludes the prior; for that, use :py:func:`logPrior`.\n - Even with :py:func:`logPrior`, this is NOT the same as the data log likelihood given\n the hyperparameters.\n - This is computed from the topic distributions computed during training. If you call\n :py:func:`logLikelihood` on the same training dataset, the topic distributions\n will be computed again, possibly giving different results.\n \"\"\"\n return self._call_java(\"trainingLogLikelihood\")\n\n @since(\"2.0.0\")\n def logPrior(self):\n \"\"\"\n Log probability of the current parameter estimate:\n log P(topics, topic distributions for docs | alpha, eta)\n \"\"\"\n return self._call_java(\"logPrior\")\n\n @since(\"2.0.0\")\n def getCheckpointFiles(self):\n \"\"\"\n If using checkpointing and :py:attr:`LDA.keepLastCheckpoint` is set to true, then there may\n be saved checkpoint files. This method is provided so that users can manage those files.\n\n .. note:: Removing the checkpoints can cause failures if a partition is lost and is needed\n by certain :py:class:`DistributedLDAModel` methods. Reference counting will clean up\n the checkpoints when this model and derivative data go out of scope.\n\n :return List of checkpoint files from training\n \"\"\"\n return self._call_java(\"getCheckpointFiles\")\n\n\n@inherit_doc\nclass LocalLDAModel(LDAModel, JavaMLReadable, JavaMLWritable):\n \"\"\"\n Local (non-distributed) model fitted by :py:class:`LDA`.\n This model stores the inferred topics only; it does not store info about the training dataset.\n\n .. versionadded:: 2.0.0\n \"\"\"\n pass\n\n\n@inherit_doc\nclass LDA(JavaEstimator, _LDAParams, JavaMLReadable, JavaMLWritable):\n \"\"\"\n Latent Dirichlet Allocation (LDA), a topic model designed for text documents.\n\n Terminology:\n\n - \"term\" = \"word\": an element of the vocabulary\n - \"token\": instance of a term appearing in a document\n - \"topic\": multinomial distribution over terms representing some concept\n - \"document\": one piece of text, corresponding to one row in the input data\n\n Original LDA paper (journal version):\n Blei, Ng, and Jordan. \"Latent Dirichlet Allocation.\" JMLR, 2003.\n\n Input data (featuresCol):\n LDA is given a collection of documents as input data, via the featuresCol parameter.\n Each document is specified as a :py:class:`Vector` of length vocabSize, where each entry is the\n count for the corresponding term (word) in the document. Feature transformers such as\n :py:class:`pyspark.ml.feature.Tokenizer` and :py:class:`pyspark.ml.feature.CountVectorizer`\n can be useful for converting text to word count vectors.\n\n >>> from pyspark.ml.linalg import Vectors, SparseVector\n >>> from pyspark.ml.clustering import LDA\n >>> df = spark.createDataFrame([[1, Vectors.dense([0.0, 1.0])],\n ... [2, SparseVector(2, {0: 1.0})],], [\"id\", \"features\"])\n >>> lda = LDA(k=2, seed=1, optimizer=\"em\")\n >>> lda.setMaxIter(10)\n LDA...\n >>> lda.getMaxIter()\n 10\n >>> lda.clear(lda.maxIter)\n >>> model = lda.fit(df)\n >>> model.setSeed(1)\n DistributedLDAModel...\n >>> model.getTopicDistributionCol()\n 'topicDistribution'\n >>> model.isDistributed()\n True\n >>> localModel = model.toLocal()\n >>> localModel.isDistributed()\n False\n >>> model.vocabSize()\n 2\n >>> model.describeTopics().show()\n +-----+-----------+--------------------+\n |topic|termIndices| termWeights|\n +-----+-----------+--------------------+\n | 0| [1, 0]|[0.50401530077160...|\n | 1| [0, 1]|[0.50401530077160...|\n +-----+-----------+--------------------+\n ...\n >>> model.topicsMatrix()\n DenseMatrix(2, 2, [0.496, 0.504, 0.504, 0.496], 0)\n >>> lda_path = temp_path + \"/lda\"\n >>> lda.save(lda_path)\n >>> sameLDA = LDA.load(lda_path)\n >>> distributed_model_path = temp_path + \"/lda_distributed_model\"\n >>> model.save(distributed_model_path)\n >>> sameModel = DistributedLDAModel.load(distributed_model_path)\n >>> local_model_path = temp_path + \"/lda_local_model\"\n >>> localModel.save(local_model_path)\n >>> sameLocalModel = LocalLDAModel.load(local_model_path)\n\n .. versionadded:: 2.0.0\n \"\"\"\n\n @keyword_only\n def __init__(self, featuresCol=\"features\", maxIter=20, seed=None, checkpointInterval=10,\n k=10, optimizer=\"online\", learningOffset=1024.0, learningDecay=0.51,\n subsamplingRate=0.05, optimizeDocConcentration=True,\n docConcentration=None, topicConcentration=None,\n topicDistributionCol=\"topicDistribution\", keepLastCheckpoint=True):\n \"\"\"\n __init__(self, featuresCol=\"features\", maxIter=20, seed=None, checkpointInterval=10,\\\n k=10, optimizer=\"online\", learningOffset=1024.0, learningDecay=0.51,\\\n subsamplingRate=0.05, optimizeDocConcentration=True,\\\n docConcentration=None, topicConcentration=None,\\\n topicDistributionCol=\"topicDistribution\", keepLastCheckpoint=True)\n \"\"\"\n super(LDA, self).__init__()\n self._java_obj = self._new_java_obj(\"org.apache.spark.ml.clustering.LDA\", self.uid)\n self._setDefault(maxIter=20, checkpointInterval=10,\n k=10, optimizer=\"online\", learningOffset=1024.0, learningDecay=0.51,\n subsamplingRate=0.05, optimizeDocConcentration=True,\n topicDistributionCol=\"topicDistribution\", keepLastCheckpoint=True)\n kwargs = self._input_kwargs\n self.setParams(**kwargs)\n\n def _create_model(self, java_model):\n if self.getOptimizer() == \"em\":\n return DistributedLDAModel(java_model)\n else:\n return LocalLDAModel(java_model)\n\n @keyword_only\n @since(\"2.0.0\")\n def setParams(self, featuresCol=\"features\", maxIter=20, seed=None, checkpointInterval=10,\n k=10, optimizer=\"online\", learningOffset=1024.0, learningDecay=0.51,\n subsamplingRate=0.05, optimizeDocConcentration=True,\n docConcentration=None, topicConcentration=None,\n topicDistributionCol=\"topicDistribution\", keepLastCheckpoint=True):\n \"\"\"\n setParams(self, featuresCol=\"features\", maxIter=20, seed=None, checkpointInterval=10,\\\n k=10, optimizer=\"online\", learningOffset=1024.0, learningDecay=0.51,\\\n subsamplingRate=0.05, optimizeDocConcentration=True,\\\n docConcentration=None, topicConcentration=None,\\\n topicDistributionCol=\"topicDistribution\", keepLastCheckpoint=True)\n\n Sets params for LDA.\n \"\"\"\n kwargs = self._input_kwargs\n return self._set(**kwargs)\n\n @since(\"2.0.0\")\n def setCheckpointInterval(self, value):\n \"\"\"\n Sets the value of :py:attr:`checkpointInterval`.\n \"\"\"\n return self._set(checkpointInterval=value)\n\n @since(\"2.0.0\")\n def setSeed(self, value):\n \"\"\"\n Sets the value of :py:attr:`seed`.\n \"\"\"\n return self._set(seed=value)\n\n @since(\"2.0.0\")\n def setK(self, value):\n \"\"\"\n Sets the value of :py:attr:`k`.\n\n >>> algo = LDA().setK(10)\n >>> algo.getK()\n 10\n \"\"\"\n return self._set(k=value)\n\n @since(\"2.0.0\")\n def setOptimizer(self, value):\n \"\"\"\n Sets the value of :py:attr:`optimizer`.\n Currently only support 'em' and 'online'.\n\n >>> algo = LDA().setOptimizer(\"em\")\n >>> algo.getOptimizer()\n 'em'\n \"\"\"\n return self._set(optimizer=value)\n\n @since(\"2.0.0\")\n def setLearningOffset(self, value):\n \"\"\"\n Sets the value of :py:attr:`learningOffset`.\n\n >>> algo = LDA().setLearningOffset(100)\n >>> algo.getLearningOffset()\n 100.0\n \"\"\"\n return self._set(learningOffset=value)\n\n @since(\"2.0.0\")\n def setLearningDecay(self, value):\n \"\"\"\n Sets the value of :py:attr:`learningDecay`.\n\n >>> algo = LDA().setLearningDecay(0.1)\n >>> algo.getLearningDecay()\n 0.1...\n \"\"\"\n return self._set(learningDecay=value)\n\n @since(\"2.0.0\")\n def setSubsamplingRate(self, value):\n \"\"\"\n Sets the value of :py:attr:`subsamplingRate`.\n\n >>> algo = LDA().setSubsamplingRate(0.1)\n >>> algo.getSubsamplingRate()\n 0.1...\n \"\"\"\n return self._set(subsamplingRate=value)\n\n @since(\"2.0.0\")\n def setOptimizeDocConcentration(self, value):\n \"\"\"\n Sets the value of :py:attr:`optimizeDocConcentration`.\n\n >>> algo = LDA().setOptimizeDocConcentration(True)\n >>> algo.getOptimizeDocConcentration()\n True\n \"\"\"\n return self._set(optimizeDocConcentration=value)\n\n @since(\"2.0.0\")\n def setDocConcentration(self, value):\n \"\"\"\n Sets the value of :py:attr:`docConcentration`.\n\n >>> algo = LDA().setDocConcentration([0.1, 0.2])\n >>> algo.getDocConcentration()\n [0.1..., 0.2...]\n \"\"\"\n return self._set(docConcentration=value)\n\n @since(\"2.0.0\")\n def setTopicConcentration(self, value):\n \"\"\"\n Sets the value of :py:attr:`topicConcentration`.\n\n >>> algo = LDA().setTopicConcentration(0.5)\n >>> algo.getTopicConcentration()\n 0.5...\n \"\"\"\n return self._set(topicConcentration=value)\n\n @since(\"2.0.0\")\n def setTopicDistributionCol(self, value):\n \"\"\"\n Sets the value of :py:attr:`topicDistributionCol`.\n\n >>> algo = LDA().setTopicDistributionCol(\"topicDistributionCol\")\n >>> algo.getTopicDistributionCol()\n 'topicDistributionCol'\n \"\"\"\n return self._set(topicDistributionCol=value)\n\n @since(\"2.0.0\")\n def setKeepLastCheckpoint(self, value):\n \"\"\"\n Sets the value of :py:attr:`keepLastCheckpoint`.\n\n >>> algo = LDA().setKeepLastCheckpoint(False)\n >>> algo.getKeepLastCheckpoint()\n False\n \"\"\"\n return self._set(keepLastCheckpoint=value)\n\n @since(\"2.0.0\")\n def setMaxIter(self, value):\n \"\"\"\n Sets the value of :py:attr:`maxIter`.\n \"\"\"\n return self._set(maxIter=value)\n\n @since(\"2.0.0\")\n def setFeaturesCol(self, value):\n \"\"\"\n Sets the value of :py:attr:`featuresCol`.\n \"\"\"\n return self._set(featuresCol=value)\n\n\n@inherit_doc\nclass _PowerIterationClusteringParams(HasMaxIter, HasWeightCol):\n \"\"\"\n Params for :py:class:`PowerIterationClustering`.\n\n .. versionadded:: 3.0.0\n \"\"\"\n\n k = Param(Params._dummy(), \"k\",\n \"The number of clusters to create. Must be > 1.\",\n typeConverter=TypeConverters.toInt)\n initMode = Param(Params._dummy(), \"initMode\",\n \"The initialization algorithm. This can be either \" +\n \"'random' to use a random vector as vertex properties, or 'degree' to use \" +\n \"a normalized sum of similarities with other vertices. Supported options: \" +\n \"'random' and 'degree'.\",\n typeConverter=TypeConverters.toString)\n srcCol = Param(Params._dummy(), \"srcCol\",\n \"Name of the input column for source vertex IDs.\",\n typeConverter=TypeConverters.toString)\n dstCol = Param(Params._dummy(), \"dstCol\",\n \"Name of the input column for destination vertex IDs.\",\n typeConverter=TypeConverters.toString)\n\n @since(\"2.4.0\")\n def getK(self):\n \"\"\"\n Gets the value of :py:attr:`k` or its default value.\n \"\"\"\n return self.getOrDefault(self.k)\n\n @since(\"2.4.0\")\n def getInitMode(self):\n \"\"\"\n Gets the value of :py:attr:`initMode` or its default value.\n \"\"\"\n return self.getOrDefault(self.initMode)\n\n @since(\"2.4.0\")\n def getSrcCol(self):\n \"\"\"\n Gets the value of :py:attr:`srcCol` or its default value.\n \"\"\"\n return self.getOrDefault(self.srcCol)\n\n @since(\"2.4.0\")\n def getDstCol(self):\n \"\"\"\n Gets the value of :py:attr:`dstCol` or its default value.\n \"\"\"\n return self.getOrDefault(self.dstCol)\n\n\n@inherit_doc\nclass PowerIterationClustering(_PowerIterationClusteringParams, JavaParams, JavaMLReadable,\n JavaMLWritable):\n \"\"\"\n Power Iteration Clustering (PIC), a scalable graph clustering algorithm developed by\n `Lin and Cohen <http://www.cs.cmu.edu/~frank/papers/icml2010-pic-final.pdf>`_. From the\n abstract: PIC finds a very low-dimensional embedding of a dataset using truncated power\n iteration on a normalized pair-wise similarity matrix of the data.\n\n This class is not yet an Estimator/Transformer, use :py:func:`assignClusters` method\n to run the PowerIterationClustering algorithm.\n\n .. seealso:: `Wikipedia on Spectral clustering\n <http://en.wikipedia.org/wiki/Spectral_clustering>`_\n\n >>> data = [(1, 0, 0.5),\n ... (2, 0, 0.5), (2, 1, 0.7),\n ... (3, 0, 0.5), (3, 1, 0.7), (3, 2, 0.9),\n ... (4, 0, 0.5), (4, 1, 0.7), (4, 2, 0.9), (4, 3, 1.1),\n ... (5, 0, 0.5), (5, 1, 0.7), (5, 2, 0.9), (5, 3, 1.1), (5, 4, 1.3)]\n >>> df = spark.createDataFrame(data).toDF(\"src\", \"dst\", \"weight\").repartition(1)\n >>> pic = PowerIterationClustering(k=2, weightCol=\"weight\")\n >>> pic.setMaxIter(40)\n PowerIterationClustering...\n >>> assignments = pic.assignClusters(df)\n >>> assignments.sort(assignments.id).show(truncate=False)\n +---+-------+\n |id |cluster|\n +---+-------+\n |0 |0 |\n |1 |0 |\n |2 |0 |\n |3 |0 |\n |4 |0 |\n |5 |1 |\n +---+-------+\n ...\n >>> pic_path = temp_path + \"/pic\"\n >>> pic.save(pic_path)\n >>> pic2 = PowerIterationClustering.load(pic_path)\n >>> pic2.getK()\n 2\n >>> pic2.getMaxIter()\n 40\n\n .. versionadded:: 2.4.0\n \"\"\"\n\n @keyword_only\n def __init__(self, k=2, maxIter=20, initMode=\"random\", srcCol=\"src\", dstCol=\"dst\",\n weightCol=None):\n \"\"\"\n __init__(self, k=2, maxIter=20, initMode=\"random\", srcCol=\"src\", dstCol=\"dst\",\\\n weightCol=None)\n \"\"\"\n super(PowerIterationClustering, self).__init__()\n self._java_obj = self._new_java_obj(\n \"org.apache.spark.ml.clustering.PowerIterationClustering\", self.uid)\n self._setDefault(k=2, maxIter=20, initMode=\"random\", srcCol=\"src\", dstCol=\"dst\")\n kwargs = self._input_kwargs\n self.setParams(**kwargs)\n\n @keyword_only\n @since(\"2.4.0\")\n def setParams(self, k=2, maxIter=20, initMode=\"random\", srcCol=\"src\", dstCol=\"dst\",\n weightCol=None):\n \"\"\"\n setParams(self, k=2, maxIter=20, initMode=\"random\", srcCol=\"src\", dstCol=\"dst\",\\\n weightCol=None)\n Sets params for PowerIterationClustering.\n \"\"\"\n kwargs = self._input_kwargs\n return self._set(**kwargs)\n\n @since(\"2.4.0\")\n def setK(self, value):\n \"\"\"\n Sets the value of :py:attr:`k`.\n \"\"\"\n return self._set(k=value)\n\n @since(\"2.4.0\")\n def setInitMode(self, value):\n \"\"\"\n Sets the value of :py:attr:`initMode`.\n \"\"\"\n return self._set(initMode=value)\n\n @since(\"2.4.0\")\n def setSrcCol(self, value):\n \"\"\"\n Sets the value of :py:attr:`srcCol`.\n \"\"\"\n return self._set(srcCol=value)\n\n @since(\"2.4.0\")\n def setDstCol(self, value):\n \"\"\"\n Sets the value of :py:attr:`dstCol`.\n \"\"\"\n return self._set(dstCol=value)\n\n @since(\"2.4.0\")\n def setMaxIter(self, value):\n \"\"\"\n Sets the value of :py:attr:`maxIter`.\n \"\"\"\n return self._set(maxIter=value)\n\n @since(\"2.4.0\")\n def setWeightCol(self, value):\n \"\"\"\n Sets the value of :py:attr:`weightCol`.\n \"\"\"\n return self._set(weightCol=value)\n\n @since(\"2.4.0\")\n def assignClusters(self, dataset):\n \"\"\"\n Run the PIC algorithm and returns a cluster assignment for each input vertex.\n\n :param dataset:\n A dataset with columns src, dst, weight representing the affinity matrix,\n which is the matrix A in the PIC paper. Suppose the src column value is i,\n the dst column value is j, the weight column value is similarity s,,ij,,\n which must be nonnegative. This is a symmetric matrix and hence\n s,,ij,, = s,,ji,,. For any (i, j) with nonzero similarity, there should be\n either (i, j, s,,ij,,) or (j, i, s,,ji,,) in the input. Rows with i = j are\n ignored, because we assume s,,ij,, = 0.0.\n\n :return:\n A dataset that contains columns of vertex id and the corresponding cluster for\n the id. The schema of it will be:\n - id: Long\n - cluster: Int\n\n .. versionadded:: 2.4.0\n \"\"\"\n self._transfer_params_to_java()\n jdf = self._java_obj.assignClusters(dataset._jdf)\n return DataFrame(jdf, dataset.sql_ctx)\n\n\nif __name__ == \"__main__\":\n import doctest\n import numpy\n import pyspark.ml.clustering\n from pyspark.sql import SparkSession\n try:\n # Numpy 1.14+ changed it's string format.\n numpy.set_printoptions(legacy='1.13')\n except TypeError:\n pass\n globs = pyspark.ml.clustering.__dict__.copy()\n # The small batch size here ensures that we see multiple batches,\n # even in these small test examples:\n spark = SparkSession.builder\\\n .master(\"local[2]\")\\\n .appName(\"ml.clustering tests\")\\\n .getOrCreate()\n sc = spark.sparkContext\n globs['sc'] = sc\n globs['spark'] = spark\n import tempfile\n temp_path = tempfile.mkdtemp()\n globs['temp_path'] = temp_path\n try:\n (failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)\n spark.stop()\n finally:\n from shutil import rmtree\n try:\n rmtree(temp_path)\n except OSError:\n pass\n if failure_count:\n sys.exit(-1)\n" ]
[ [ "numpy.set_printoptions" ] ]
atomicoo/EnhanceIMG
[ "8c009fbb6c5461ff6d7f30bdacec72232639c7f2" ]
[ "awegan/options/base_options.py" ]
[ "import argparse\nimport os, sys\nfrom abc import ABC, abstractmethod\n\nimport torch\nimport models\nimport datasets\n\n\nclass BaseOptions(ABC):\n \"\"\"This class is an abstract base class (ABC) for options.\n To create a subclass, you need to implement the following five functions:\n -- <__init__>: initialize the class; first call BaseOptions.__init__(self, opt).\n -- <initialize>: initialize the option's arguments.\n -- <parse>: parse the option's arguments.\n \"\"\"\n def __init__(self):\n pass\n\n @abstractmethod\n def initialize(self, parser):\n pass\n\n @abstractmethod\n def parse(self):\n pass\n\n\nclass BasicOptions(BaseOptions):\n \"\"\"This class defines options used during both training and test time.\n\n It also implements several helper functions such as parsing, printing, and saving the options.\n It also gathers additional options defined in <modify_commandline_options> functions in both dataset class and model class.\n \"\"\"\n\n def __init__(self):\n \"\"\"Reset the class; indicates the class hasn't been initailized\"\"\"\n self.initialized = False\n\n def initialize(self, parser):\n \"\"\"Define the common options that are used in both training and test.\"\"\"\n # basic parameters\n parser.add_argument('--data_root', required=True, help='path to images (should have subfolders trainA, trainB, valA, valB, etc)')\n parser.add_argument('--name', type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models')\n parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')\n parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here')\n # model parameters\n parser.add_argument('--model', type=str, default='cycle_gan', help='chooses which model to use. [cycle_gan | pix2pix | recyc_gan]')\n parser.add_argument('--input_nc', type=int, default=3, help='# of input image channels: 3 for RGB and 1 for grayscale')\n parser.add_argument('--output_nc', type=int, default=3, help='# of output image channels: 3 for RGB and 1 for grayscale')\n parser.add_argument('--ngf', type=int, default=64, help='# of gen filters in the last conv layer')\n parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in the first conv layer')\n parser.add_argument('--netD', type=str, default='basic', help='specify discriminator architecture [basic | n_layers | pixel]. The basic model is a 70x70 PatchGAN. n_layers allows you to specify the layers in the discriminator')\n parser.add_argument('--netG', type=str, default='resnet_9bs', help='specify generator architecture [resnet_9bs | resnet_6bs | unet_256 | unet_128]')\n parser.add_argument('--n_layers_D', type=int, default=3, help='only used if netD==n_layers')\n parser.add_argument('--norm', type=str, default='instance', help='instance normalization or batch normalization [instance | batch | none]')\n parser.add_argument('--init_type', type=str, default='normal', help='network initialization [normal | xavier | kaiming | orthogonal]')\n parser.add_argument('--init_gain', type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.')\n parser.add_argument('--no_dropout', action='store_true', help='no dropout for the generator')\n # vgg parameters for perceptrual loss\n parser.add_argument('--vgg', type=float, default=0, help='use perceptrual loss')\n parser.add_argument('--vgg_mean', action='store_true', help='substract mean in vgg loss')\n parser.add_argument('--vgg_choose', type=str, default='relu5_3', help='choose layer for vgg')\n parser.add_argument('--no_vgg_instance', action='store_true', help='vgg instance normalization')\n parser.add_argument('--vgg_maxpooling', action='store_true', help='normalize attention map')\n # dataset parameters\n parser.add_argument('--dataset_mode', type=str, default='unaligned', help='chooses how datasets are loaded. [unaligned | aligned | single | degraded]')\n parser.add_argument('--degraded_mode', type=str, default='colorization', help='chooses how datasets are loaded. [colorization | super_resolution | denoising | restoration]')\n parser.add_argument('--direction', type=str, default='AtoB', help='AtoB or BtoA')\n parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly')\n parser.add_argument('--num_threads', default=0 if sys.platform.startswith('win') else 4, type=int, help='# threads for loading data')\n parser.add_argument('--batch_size', type=int, default=1, help='input batch size')\n parser.add_argument('--load_size', type=int, default=286, help='scale images to this size')\n parser.add_argument('--crop_size', type=int, default=256, help='then crop to this size')\n parser.add_argument('--max_dataset_size', type=int, default=float(\"inf\"), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.')\n parser.add_argument('--preprocess', type=str, default='resize_and_crop', help='scaling and cropping of images at load time [resize_and_crop | crop | scale_width | scale_width_and_crop | none]')\n parser.add_argument('--no_flip', action='store_true', help='if specified, do not flip the images for data augmentation')\n parser.add_argument('--display_winsize', type=int, default=256, help='display window size for both visdom and HTML')\n # additional parameters\n parser.add_argument('--epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')\n parser.add_argument('--load_iter', type=int, default='0', help='which iteration to load? if load_iter > 0, the code will load models by iter_[load_iter]; otherwise, the code will load models by [epoch]')\n parser.add_argument('--verbose', action='store_true', help='if specified, print more debugging information')\n parser.add_argument('--suffix', default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{load_size}')\n\n self.initialized = True\n return parser\n\n def gather_options(self):\n \"\"\"Initialize our parser with basic options(only once).\n \"\"\"\n if not self.initialized: # check if it has been initialized\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser = self.initialize(parser)\n\n # get the basic options\n opt, _ = parser.parse_known_args()\n\n # modify model-related parser options\n model_name = opt.model\n model_option_setter = models.get_option_setter(model_name)\n parser = model_option_setter(parser, self.is_train)\n opt, _ = parser.parse_known_args() # parse again with new defaults\n\n # modify dataset-related parser options\n dataset_name = opt.dataset_mode\n dataset_option_setter = datasets.get_option_setter(dataset_name)\n parser = dataset_option_setter(parser, self.is_train)\n opt, _ = parser.parse_known_args() # parse again with new defaults\n\n # save and return the parser\n self.parser = parser\n return parser.parse_args()\n\n def print_options(self, opt):\n \"\"\"Print and save options\n\n It will print both current options and default values(if different).\n It will save options into a text file / [checkpoints_dir] / opt.txt\n \"\"\"\n message = ''\n message += '----------------- Options ---------------\\n'\n for k, v in sorted(vars(opt).items()):\n comment = ''\n default = self.parser.get_default(k)\n if v != default:\n comment = '\\t[default: %s]' % str(default)\n message += '{:>25}: {:<30}{}\\n'.format(str(k), str(v), comment)\n message += '----------------- End -------------------'\n print(message)\n\n # save to the disk\n expr_dir = os.path.join(opt.checkpoints_dir, opt.name)\n os.makedirs(expr_dir, exist_ok=True)\n file_name = os.path.join(expr_dir, '{}_opt.txt'.format(opt.phase))\n with open(file_name, 'wt') as opt_file:\n opt_file.write(message)\n opt_file.write('\\n')\n\n def parse(self):\n \"\"\"Parse our options, create checkpoints directory suffix, and set up gpu device.\"\"\"\n opt = self.gather_options()\n opt.is_train = self.is_train # train or test\n\n # process opt.suffix\n if opt.suffix:\n suffix = ('_' + opt.suffix.format(**vars(opt))) if opt.suffix != '' else ''\n opt.name = opt.name + suffix\n\n self.print_options(opt)\n\n # set gpu ids\n str_ids = opt.gpu_ids.split(',')\n opt.gpu_ids = []\n for str_id in str_ids:\n id = int(str_id)\n if id >= 0:\n opt.gpu_ids.append(id)\n if len(opt.gpu_ids) > 0:\n torch.cuda.set_device(opt.gpu_ids[0])\n\n self.opt = opt\n return self.opt\n" ]
[ [ "torch.cuda.set_device" ] ]
zhunzhong/audio-sync-kit
[ "abe826334ef4cf0a3e6809877584b6aa243140d7" ]
[ "audio_sync/cli.py" ]
[ "# Copyright 2016 Google Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"CLI to measure latencies between two audio signals.\"\"\"\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport datetime\nimport json\nimport logging\nimport math\nimport sys\nimport wave\n\nimport audio_sync\nfrom audio_sync import analyzer\nfrom audio_sync import plot\nimport numpy\n\n\nEXIT_CODE_UNKNOWN_ERROR = 255\nEXIT_CODE_ARGS_PARSE_ERROR = 127\nEXIT_CODE_SUCCESS = 0\nEXIT_CODE_LATENCIES_ABOVE_THRESHOLD = 1\nEXIT_CODE_DROPOUTS_DETECTED = 2\n\nVERY_LARGE_LATENCY_USEC = 10000\n\nSECS_IN_MSEC = 1000\nSECS_IN_USEC = 1000000\n\n\ndef ParseArgs(args):\n \"\"\"Helper function for command line parameter parsing.\n\n Args:\n args: (list of str) arguments passed to CLI.\n\n Returns:\n The parsed parameters.\n \"\"\"\n parser = argparse.ArgumentParser(description='Measure latency.')\n parser.add_argument('--debug', default=False, action='store_true',\n help='Enable debug output.')\n parser.add_argument('ref_wav_path',\n help='Path to the reference .wav file.')\n parser.add_argument('act_wav_path',\n help='Path to the actual .wav file.')\n parser.add_argument('--period', type=float, default=0.1,\n help='Fundamental period of audio files (secs).')\n parser.add_argument('--pulse_length', type=float, default=0.002,\n help='Duration of pulse in audio files (secs).')\n parser.add_argument('--dropout_threshold', type=float, default=0.3,\n help=('Dropout threshold, every peak below will be '\n 'interpreted as dropout. Range: [0.0, 1.0]'))\n parser.add_argument('--silence_threshold', type=float, default=0.05,\n help=('Silence threshold, every value below will be '\n 'interpreted as silence. Range: [0.0, 1.0]'))\n parser.add_argument('--min_silence_length', type=float, default=0.005,\n help=('Minimum length of silence (secs). Silences '\n 'below this duration will be ignored.'))\n parser.add_argument('--parsable_output', default=False, action='store_true',\n help='Print latencies and dropouts as a JSON string.')\n parser.add_argument('--print_stats', default=False, action='store_true',\n help='Print latencies stats (max, min, and average).')\n parser.add_argument('--print_percentiles', default=False, action='store_true',\n help='Print latency percentiles.')\n parser.add_argument('--plot_timeline', default=False, action='store_true',\n help=('Plot the conditions in a timeline.'))\n parser.add_argument('--latency_threshold', type=float, default=0.001,\n help=('Latencies equal or greater than this threshold '\n '(secs) are considered excessive.'))\n parser.add_argument('--plot_ascii_graph', default=False, action='store_true',\n help=('Plots all latencies as ASCII art.'))\n parser.add_argument('--start_time', default='00:00:00',\n help=('hh:mm:ss of when playback started.'))\n parser.add_argument('--dots_per_msec', type=int, default='10',\n help=('How many ASCII dots are used per msec of '\n 'latency.'))\n return parser.parse_args(args)\n\n\ndef GetStats(latencies):\n \"\"\"Gets latency stats.\n\n Args:\n latencies: (list) list of 2-tuples (<time>, <latency>).\n\n Returns:\n A 3-tuple:\n Element 1: (float) max latency in seconds.\n Element 2: (float) min latency in seconds.\n Element 3: (float) mean latency in seconds.\n \"\"\"\n values = [d for _, d in latencies if not math.isnan(d)]\n if values:\n # The max, min, and avg should be based on absolute values (otherwise,\n # we could report that -0.1 is greater than -0.2, which is misleading),\n # but we still need to show the signed value so users can tell if the\n # signal was ahead or behind.\n return (max(values, key=abs),\n min(values, key=abs),\n numpy.mean(values))\n else:\n return float('NaN'), float('NaN'), float('NaN')\n\n\ndef CalculatePercentiles(latencies, percentiles=(0, 50, 75, 90, 95, 99, 100)):\n \"\"\"Calculates the latency percentiles.\n\n Args:\n latencies: (list) list of 2-tuples (<time>, <latency>).\n percentiles: (tuple) tuple containing the percentiles to calculate.\n\n Returns:\n A list of the form [(<percentile>, abs(<value>)), ...] for each of\n the percentiles requested.\n \"\"\"\n values = [d for _, d in latencies if not math.isnan(d)]\n if values:\n vals = numpy.percentile(sorted([abs(v) for v in values]),\n percentiles).tolist()\n return zip(percentiles, vals)\n else:\n return zip(percentiles, (float('NaN'),) * 7)\n\n\ndef _Print(message):\n \"\"\"Prints |message| to standard output.\"\"\"\n print(message)\n\n\ndef _PlotResults(\n duration_secs, latencies, dropouts, num_ticks=5, num_dots=70,\n latency_threshold_secs=0.001):\n \"\"\"Plots the results in a text timeline.\"\"\"\n duration_secs = float(duration_secs)\n\n conditions_timeline = plot.GetConditionsInTimeframe(\n latencies, dropouts, duration_secs, num_dots, latency_threshold_secs)\n\n output = (\n 'Timeline:\\n'\n '%s\\n\\n'\n '< = Act more than %.3f secs behind ref\\n'\n '> = Act more than %.3f secs ahead of ref\\n'\n 'o = Dropout\\n'\n '. = %.3f secs\\n') % (\n plot.GetPlotString(conditions_timeline, duration_secs, num_ticks),\n latency_threshold_secs,\n latency_threshold_secs,\n duration_secs / num_dots\n )\n print(output)\n\n\ndef _PlotAsciiGraph(\n latencies, start_time, dots_per_msec=10, latency_threshold_secs=0.001):\n \"\"\"Plots all latencies with timestamp in an ASCII timeline.\n\n Args:\n latencies: (list) list of 2-tuples (<time>, <latency>).\n start_time: (datetime) time the capture of the .wav files started.\n dots_per_msec: (int) How many ASCII dots to use per msec of latency.\n latency_threshold_secs: (float) latencies equal or greater than this\n threshold are considered excessive and are marked with a '*'.\n \"\"\"\n if dots_per_msec < 0:\n raise ValueError('Invalid dots_per_msec %d.' % dots_per_msec)\n\n if latency_threshold_secs < 0:\n raise ValueError('Invalid latency_threshold_secs %d.' % (\n latency_threshold_secs))\n\n threshold_in_dots = int(latency_threshold_secs * SECS_IN_MSEC * dots_per_msec)\n for latency in latencies:\n if math.isnan(latency[1]):\n usecs = VERY_LARGE_LATENCY_USEC\n else:\n usecs = SECS_IN_USEC * latency[1]\n msecs = usecs / 1000\n dots_total = int(abs(msecs) * dots_per_msec)\n dots_below_threshold = min(dots_total, threshold_in_dots)\n filler_spaces_until_thresh = threshold_in_dots - dots_below_threshold\n out_str = '.'*dots_below_threshold + ' '*filler_spaces_until_thresh + '|'\n if dots_total > threshold_in_dots:\n dots_above_thresh = dots_total - threshold_in_dots\n out_str += '*' * dots_above_thresh\n total_secs = int(latency[0])\n time_h = (int)(total_secs / 60)\n time_m = (int)(total_secs % 60)\n t = datetime.timedelta(seconds=total_secs)\n print((start_time + t).strftime('%H:%M:%S'),\n '%2.2d:%2.2d > %+4.4d %s' % (time_h, time_m, usecs, out_str))\n\n values = [d for _, d in latencies if not math.isnan(d)]\n if values:\n avg = numpy.mean(values)\n print(\"\\navg[%d]=%.6f\\n\" % (len(values), avg))\n\n\ndef _PrintPercentiles(percentiles):\n \"\"\"Prints the percentiles to standard output.\"\"\"\n output = '\\n'.join([\n '%d%%: %.6f' % p for p in percentiles])\n _Print('Percentiles (secs):\\n' + output)\n\n\ndef _GetWaveDurationSecs(wav_path):\n \"\"\"Gets the duration in secs of the WAV file.\"\"\"\n wav = wave.open(wav_path)\n try:\n return wav.getnframes() / (wav.getnchannels() * wav.getframerate())\n finally:\n wav.close()\n\n\ndef _Main(args):\n \"\"\"Parses options and shows results.\"\"\"\n try:\n args = ParseArgs(args)\n except SystemExit:\n sys.exit(EXIT_CODE_ARGS_PARSE_ERROR)\n\n if args.debug:\n logging.basicConfig(level=logging.DEBUG)\n\n try:\n settings = analyzer.AnalysisSettings(\n args.period, args.pulse_length, args.dropout_threshold,\n args.silence_threshold, args.min_silence_length)\n latencies, dropouts = audio_sync.AnalyzeAudios(\n args.ref_wav_path, args.act_wav_path, settings)\n print(latencies)\n max_latency, min_latency, avg_latency = GetStats(latencies)\n\n if args.parsable_output:\n _Print(json.dumps({'latencies': latencies, 'dropouts': dropouts}))\n else:\n if args.plot_ascii_graph:\n try:\n start_time = datetime.datetime.strptime(args.start_time, \"%H:%M:%S\")\n except ValueError:\n sys.exit(EXIT_CODE_ARGS_PARSE_ERROR)\n _PlotAsciiGraph(latencies, start_time, dots_per_msec=args.dots_per_msec,\n latency_threshold_secs=args.latency_threshold)\n duration_secs = _GetWaveDurationSecs(args.ref_wav_path)\n if args.plot_timeline:\n _PlotResults(duration_secs, latencies, dropouts,\n latency_threshold_secs=args.latency_threshold)\n if args.print_stats:\n _Print('Max latency: %f secs' % max_latency)\n _Print('Min latency: %f secs' % min_latency)\n _Print('Mean latency: %f secs\\n' % avg_latency)\n if args.print_percentiles:\n percentiles = CalculatePercentiles(latencies)\n _PrintPercentiles(percentiles)\n\n if abs(max_latency) >= args.latency_threshold:\n sys.exit(EXIT_CODE_LATENCIES_ABOVE_THRESHOLD)\n elif dropouts:\n sys.exit(EXIT_CODE_DROPOUTS_DETECTED)\n else:\n sys.exit(EXIT_CODE_SUCCESS)\n except Exception: # pylint: disable=broad-except\n logging.exception('')\n sys.exit(EXIT_CODE_UNKNOWN_ERROR)\n\n\ndef main():\n _Main(sys.argv[1:])\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.mean" ] ]
dhill2522/OPTIONS
[ "f5058e39bef204a53991b275d79f2d7223ff2d61" ]
[ "Class1_Eq.py" ]
[ "\"\"\"\r\nCreated on Mon Nov 05 03:52:36 2018\r\n@author: Paul\r\n\"\"\"\r\n\r\n### Boiler-Plate ###\r\nfrom threading import Thread\r\nimport matplotlib.pylab as plt\r\nimport numpy as np\r\nimport scipy as sp\r\nfrom numpy import random\r\nimport time\r\n\r\nfrom Func import *\r\nfrom iapws97 import _PSat_T\r\n\r\n\r\n###############################################################################\r\n\"\"\"\"\"\"\"\"\" All PERCS Equipment Classes \"\"\"\"\"\"\"\"\" ###########################\r\n###############################################################################\r\n\r\nclass Pipe: # Pipes 716 & 717\r\n def __init__(self,len_):\r\n self.Di = 0.5 # ft\r\n self.len = len_ # ft\r\n self.cost = 0.0 # $\r\n def calc_Pipe(self):\r\n self.cost = 50.0*self.len # $\r\n\r\nclass Support: # PERCS Tank Support Structure\r\n def __init__(self,R_tank,H_tank,elev_tank):\r\n self.R = R_tank # ft\r\n self.H = H_tank # ft\r\n self.elev = elev_tank # ft\r\n self.cost = 0.0 # $\r\n def calc_Support(self):\r\n profile = np.pi*self.R**2.0 # ft^2\r\n cost_per_sqft = 30 # $/ft^2\r\n elev_factor = 0.0628*self.elev + 1.0\r\n self.cost = elev_factor * profile * cost_per_sqft\r\n\r\nclass HX: # Fake Heat Exchanger\r\n def __init__(self):\r\n self.P = 155.132 # bar (hot leg pressure)\r\n self.n = 0 # (number of tubes)\r\n self.A = 0.0 # m^2 (heat transfer SA)\r\n self.cost = 0.0 # $\r\n def calc_HX(self):\r\n # Calculate Cost\r\n K = np.array([4.1884,-0.2503,0.1974])\r\n if self.A <= 1000:\r\n # Purchase Cost\r\n C_p0 = 10.0**(K[0]+K[1]*np.log10(self.A)+K[2]*np.log10(self.A)**2.0)\r\n else:\r\n C_p0 = 11665.8957777+152.0393955*self.A\r\n P_g = self.P-1.0 # barg\r\n if P_g < 5.0: \r\n C = np.array([0.0,0.0,0.0])\r\n else:\r\n C = np.array([0.03881,-0.11272,0.08183])\r\n # Pressure Factor\r\n F_P = 10.0**(C[0]+C[1]*np.log10(P_g)+C[2]*np.log10(P_g)**2.0)\r\n F_M = 2.75 # Material Factor (Stainless Steel)\r\n B_1 = 1.63\r\n B_2 = 1.66\r\n self.cost = (553.9/397.0)*C_p0*(B_1 + B_2*F_M*F_P)\r\n\r\nclass Tank: # PERCS Tank\r\n def __init__(self,R_,H_):\r\n self.P = 1.703 # bar\r\n self.R = R_ # ft\r\n self.D = self.R*2/3.28084 # m\r\n self.H = H_ # ft\r\n self.A = np.pi * (self.R**2.0) * self.H # ft^3\r\n self.A = self.A / 35.3147 # m^3\r\n self.cost = 0.0\r\n def calc_Tank(self):\r\n # Calcuate Cost\r\n K = np.array([3.4974,0.4485,0.1074])\r\n if self.A <= 520:\r\n C_p0 = 10.0**(K[0]+K[1]*np.log10(self.A)+K[2]*np.log10(self.A)**2.0) # Purchase Cost\r\n else:\r\n C_p0 = 637.3687*self.A-9491.036783\r\n P_g = self.P-1.0 # barg\r\n F_P = ((P_g+1.0)*self.D/(2.0*(850.0-0.6*(P_g+1.0)))+0.00315)/0.0063 # Pressure Factor\r\n F_M = 3.12 # Material Factor (SS)\r\n B_1 = 2.25\r\n B_2 = 1.82\r\n self.cost = (553.9/397.0)*C_p0*(B_1 + B_2*F_M*F_P)\r\n\r\nclass Chemical: # MgCO3 within PERCS Tank\r\n def __init__(self,ID):\r\n self.mass = 0.0 # kg\r\n self.ID = ID\r\n self.chem_costs = np.array((24.0,0.0)) # $/kg\r\n self.cost = 0.0\r\n def calc_Chemical(self):\r\n self.cost = self.mass * self.chem_costs[self.ID]\r\n\r\n\r\n###############################################################################\r\n\"\"\"\"\"\"\"\"\" All PCS Equipment Classes \"\"\"\"\"\"\"\"\" #############################\r\n###############################################################################\r\n\r\nclass Stream:\r\n def __init__(self,P,T,mdot,x):\r\n self.y = 1\r\n self.P = P\r\n self.T = T\r\n self.mdot = mdot\r\n self.x = x\r\n\r\nclass Turbine:\r\n def __init__(self,Pin,Tin,mdot,x_in,Pout):\r\n self.y = 1\r\n self.Pin = Pin\r\n self.Tin = Tin\r\n self.mdot = mdot\r\n self.x_in = x_in\r\n self.Pout = Pout\r\n self.Tout = 0.0\r\n self.x_out = 0.0\r\n self.W = 0.0\r\n self.eff = 0.915\r\n self.cost = 0.0\r\n def calc_Turb(self):\r\n # If turbine does exist\r\n if self.y == 1:\r\n # If the incoming steam is saturated or superheated\r\n if self.x_in >= 1.0:\r\n Hin = h_pT(self.Pin,self.Tin)\r\n # If the incoming stream is two-phase\r\n else:\r\n Hin = h_Tx(self.Tin,self.x_in)\r\n # Calculate the Power Generated (W)\r\n Sin = S_ph(self.Pin,Hin)\r\n S_out_id = Sin\r\n H_out_id = h_pS(self.Pout,S_out_id)\r\n DH_id = Hin - H_out_id\r\n DH_real = self.eff*DH_id\r\n self.W = self.mdot*DH_real/1000 # MW\r\n # Calculate the outlet properties\r\n H_out = Hin - DH_real\r\n self.Tout = T_ph(self.Pout,H_out)\r\n self.x_out = x_ph(self.Pout,H_out)\r\n # Calcuate Cost\r\n A = self.W * 1000 # kW\r\n K = np.array([2.7051,1.4398,-0.1776])\r\n if A <= 9800:\r\n # Purchase Cost\r\n C_p0 = 10.0**(K[0]+K[1]*np.log10(A)+K[2]*np.log10(A)**2.0)\r\n else:\r\n C_p0 = 410763.708588+0.87078286*A\r\n F_P = 0.0 # Pressure Factor\r\n F_M = 0.0 # Material Factor (Stainless Steel)\r\n B_1 = 1.0\r\n B_2 = 0.0\r\n self.cost = (553.9/397.0)*C_p0*(B_1 + B_2*F_M*F_P)\r\n # If turbine does not exist\r\n else:\r\n self.cost = 0.0\r\n self.W = 0.0\r\n\r\nclass Pump:\r\n def __init__(self,Pin,Tin,mdot,Pout):\r\n self.y = 1\r\n self.Pin = Pin\r\n self.Tin = Tin\r\n self.mdot = mdot\r\n self.Pout = Pout\r\n self.Tout = 0.0\r\n self.W = 0.0\r\n self.eff = 0.85\r\n self.cost = 0.0\r\n def calc_Pump(self):\r\n # If pump does exist\r\n if self.y == 1:\r\n if self.Pin > _PSat_T(self.Tin+273.15)*10.0:\r\n Hin = h_pT(self.Pin,self.Tin)\r\n else:\r\n Hin = h_Tx(self.Tin,0.0)\r\n # Calculate the work requirement (W)\r\n Sin = S_ph(self.Pin,Hin)\r\n S_out_id = Sin\r\n H_out_id = h_pS(self.Pout,S_out_id)\r\n DH_id = H_out_id - Hin\r\n DH_real = DH_id / self.eff\r\n self.W = self.mdot*DH_real/1000 # MW\r\n # Calculate the outlet properties\r\n H_out = Hin + DH_real\r\n self.Tout = T_ph(self.Pout,H_out)\r\n # Calcuate Cost\r\n A = self.W * 1000 # kW\r\n K = np.array([3.3892,0.0536,0.1538])\r\n if A <= 300:\r\n # Purchase Cost\r\n C_p0 = 10.0**(K[0]+K[1]*np.log10(A)+K[2]*np.log10(A)**2.0)\r\n else:\r\n C_p0 = 5371.29236+79.50315*A\r\n P_g = self.Pout - 1.0 # barg\r\n C = np.zeros(3)\r\n if P_g >= 10.0:\r\n C = np.array([-0.3935,0.3957,-0.00226])\r\n # Pressure Factor\r\n F_P = 10.0**(C[0]+C[1]*np.log10(P_g)+C[2]*np.log10(P_g)**2.0)\r\n F_M = 2.28 # Material Factor (Stainless Steel)\r\n B_1 = 1.89\r\n B_2 = 1.35\r\n self.cost = (553.9/397.0)*C_p0*(B_1 + B_2*F_M*F_P)\r\n # If pump does not exist\r\n else:\r\n self.cost = 0.0\r\n self.W = 0.0\r\n\r\nclass PHX: # Primary Heat Exchanger (Steam Generator)\r\n def __init__(self,Tout):\r\n self.y = 1\r\n # Hot stream vars (Default)\r\n self.P_hot = 160.71 # bar\r\n self.Tin_hot = 328.56 # deg C\r\n self.Tout_hot = 296.51 # deg C\r\n self.Q_th = 750.0e3 # Thermal power transferred, in kW\r\n # Cold stream vars\r\n self.Pin = 64. # bar\r\n self.Tin = 0.0\r\n self.Pout = self.Pin # Zero pressure drop\r\n self.Tout = Tout # Optimized param\r\n self.xout = 0.0\r\n # Overall vars\r\n self.mdot = 0.0\r\n self.cost = 0.0\r\n def calc_PHX(self):\r\n # Calculate Tcold_in\r\n Hout = h_pT(self.Pout,self.Tout)\r\n Hin = Hout - self.Q_th/self.mdot\r\n self.Tin = T_ph(self.Pin,Hin)\r\n # Calculate xout (quality leaving phx)\r\n self.xout = x_ph(self.Pout,Hout)\r\n # Find required heat-transfer Area, A\r\n # Log mean temperature difference\r\n DT_1 = self.Tin_hot-self.Tout\r\n DT_2 = self.Tout_hot-self.Tin\r\n self.DT_lm = (DT_2-DT_1)/np.log(DT_2/DT_1)\r\n # HX calculations\r\n self.F = 1. # for phase change\r\n U = 5 #kW/m^2*K, back-calculated from B&W params\r\n self.A = self.Q_th / (self.F*U*self.DT_lm) # m^2\r\n # Calculate Cost\r\n K = np.array([4.1884,-0.2503,0.1974])\r\n if self.A <= 1000:\r\n # Purchase Cost\r\n C_p0 = 10.0**(K[0]+K[1]*np.log10(self.A)+K[2]*np.log10(self.A)**2.0)\r\n else:\r\n C_p0 = 11665.8957777+152.0393955*self.A\r\n P_g = self.P_hot-1.0 # barg\r\n if P_g < 5.0: \r\n C = np.array([0.0,0.0,0.0])\r\n else:\r\n C = np.array([0.03881,-0.11272,0.08183])\r\n # Pressure Factor\r\n F_P = 10.0**(C[0]+C[1]*np.log10(P_g)+C[2]*np.log10(P_g)**2.0)\r\n F_M = 2.75 # Material Factor (Stainless Steel)\r\n B_1 = 1.63\r\n B_2 = 1.66\r\n self.cost = (553.9/397.0)*C_p0*(B_1 + B_2*F_M*F_P)\r\n\r\nclass Reheater:\r\n \"\"\"\r\n 1 = Reheat Stream (Hot side)\r\n 2 = Residual Steam (Cold side)\r\n \"\"\"\r\n def __init__(self,ID,Pin1,Tin1,mdot1,x_in1,Pin2,Tin2,mdot2,Satd_in2):\r\n self.y = 1\r\n # Hot stream vars\r\n self.ID = ID\r\n self.Pin1 = Pin1\r\n self.Tin1 = Tin1\r\n self.mdot1 = mdot1\r\n self.x_in1 = x_in1\r\n self.Satd_in1 = False\r\n self.Tout1 = 0.0\r\n self.Pout1 = 0.0\r\n self.x_out1 = 0.0\r\n # Cold stream vars\r\n self.Pin2 = Pin2\r\n self.Tin2 = Tin2\r\n self.mdot2 = mdot2\r\n self.Satd_in2 = Satd_in2\r\n self.s_lim = 0 # Fake ID\r\n self.Tout2 = 0.0\r\n self.Pout2 = 0.0\r\n # Overall vars\r\n self.DT_lm = 0.0\r\n self.F = 0.0\r\n self.A = 0.0\r\n self.q = 0.0 # kW\r\n self.pinch = False\r\n self.cost = 0.0\r\n def calc_RH(self):\r\n self.pinch = False\r\n # If reheater does exist\r\n if self.y == 1:\r\n # Check for a pinch point\r\n if (self.Tin1 - self.Tin2) <= 10.0:\r\n self.pinch = True\r\n self.A = 0.0\r\n self.q = 0.0\r\n self.Pout1 = self.Pin1\r\n self.Tout1 = self.Tin1\r\n self.x_out1 = self.x_in1\r\n self.Pout2 = self.Pin2\r\n self.Tout2 = self.Tin2\r\n # Proceed with calcs if no pinch point\r\n else:\r\n # For now, ignore pressure drops in the RHs\r\n self.Pout1 = self.Pin1\r\n self.Pout2 = self.Pin2\r\n # Calcs if limiting stream is 1\r\n To1 = self.Tin2 + 10.0\r\n Hin1 = 0.0 # Fake value\r\n if self.Satd_in1 == False and self.x_in1 == 0.0:\r\n Hin1 = h_pT(self.Pin1,self.Tin1)\r\n elif self.Satd_in1 == True:\r\n Hin1 = h_Tx(self.Tin1,self.x_in1)\r\n elif 0.0 < self.x_in1 < 1.0:\r\n Hin1 = h_Tx(self.Tin1, self.x_in1)\r\n elif self.Satd_in1 == False and self.x_in1 == 1.0:\r\n Hin1 = h_pT(self.Pin1,self.Tin1)\r\n Ho1 = h_pT(self.Pout1,To1)\r\n q1 = self.mdot1*(Hin1-Ho1)\r\n # Calcs if limiting stream is 2\r\n To2 = self.Tin1 - 10.0\r\n if self.Satd_in2 == True:\r\n Hin2 = h_Tx(self.Tin2,1.0)\r\n else:\r\n Hin2 = h_pT(self.Pin2,self.Tin2)\r\n Ho2 = h_pT(self.Pout2,To2)\r\n q2 = self.mdot2*(Ho2-Hin2)\r\n # Determine which stream is actually limiting\r\n if q1 < q2:\r\n self.s_lim = 1\r\n else:\r\n self.s_lim = 2\r\n # If limiting stream is 1:\r\n if self.s_lim == 1:\r\n self.Tout1 = To1\r\n self.q = q1\r\n # Apply q to Turbine stream, find the new Tout2\r\n DH_2 = self.q / self.mdot2\r\n Hout2 = Hin2 + DH_2\r\n self.Tout2 = T_ph(self.Pout2,Hout2)\r\n # If limiting stream is 2:\r\n if self.s_lim == 2:\r\n self.Tout2 = To2\r\n self.q = q2\r\n # Apply q to Reheat stream, find the new Tout1 and x_out1\r\n DH_1 = self.q / self.mdot1\r\n Hout1 = Hin1 - DH_1\r\n self.Tout1 = T_ph(self.Pout1,Hout1)\r\n self.x_out1 = x_ph(self.Pout1,Hout1)\r\n # If no pinch point, Calc the Cost\r\n if self.pinch == False:\r\n # Find required heat-transfer Area, A\r\n DT_1 = self.Tin1-self.Tout2\r\n DT_2 = self.Tout1-self.Tin2\r\n self.DT_lm = (DT_2-DT_1)/np.log(DT_2/DT_1)\r\n if self.x_in1 > 0.0:\r\n self.F = 1.0\r\n else:\r\n R = (self.Tin1-self.Tout1)/(self.Tout2-self.Tin2)\r\n P = (self.Tout2-self.Tin2)/(self.Tin1-self.Tin2)\r\n inside = (2.0-P*(R+1.0-np.sqrt(R**2.0+1.0)))/(2.0-P*(R+1.0+np.sqrt(R**2.0+1.0)))\r\n self.F = np.sqrt(R**2.0+1.0)/(R-1.0)*np.log((1.0-P)/(1.0-P*R))/np.log(inside)\r\n \"\"\" U = 1 kW/m^2*K until changed on 2.17.17 \"\"\"\r\n U = 3 # kW/m^2*K\r\n self.A = self.q / (self.F*U*self.DT_lm) # m^2\r\n # Calcuate Cost\r\n K = np.array([4.1884,-0.2503,0.1974])\r\n if self.A <= 1000:\r\n # Purchase Cost\r\n C_p0 = 10.0**(K[0]+K[1]*np.log10(self.A)+K[2]*np.log10(self.A)**2.0)\r\n else:\r\n C_p0 = 11665.8957777+152.0393955*self.A\r\n P_g = self.Pin1-1.0 # barg\r\n if P_g < 5.0: \r\n C = np.array([0.0,0.0,0.0])\r\n else:\r\n C = np.array([0.03881,-0.11272,0.08183])\r\n # Pressure Factor\r\n F_P = 10.0**(C[0]+C[1]*np.log10(P_g)+C[2]*np.log10(P_g)**2.0)\r\n F_M = 2.75 # Material Factor (Stainless Steel)\r\n B_1 = 1.63\r\n B_2 = 1.66\r\n self.cost = (553.9/397.0)*C_p0*(B_1 + B_2*F_M*F_P)\r\n # If there was a pinch point, enact a penalty in the cost\r\n elif self.pinch == True:\r\n self.cost = 15.0e9 / 5.0\r\n # If reheater does not exist\r\n else:\r\n self.cost = 0.0\r\n\r\nclass MS: # Moisture Separator\r\n \"\"\"\r\n V = vapor outlet\r\n L = liquid outlet\r\n \"\"\"\r\n def __init__(self,Pin,Tin,mdot,x_in):\r\n self.y = 1\r\n self.P = Pin\r\n self.T = Tin\r\n self.mdot = mdot\r\n self.x_in = x_in\r\n self.mdot_V = 0.0\r\n self.mdot_L = 0.0\r\n self.D = 0.0 # m\r\n self.V = 0.0 # m^3\r\n self.cost = 0.0\r\n def calc_MS(self):\r\n self.mdot_V = self.mdot * self.x_in\r\n self.mdot_L = self.mdot * (1-self.x_in)\r\n # If MS does exist\r\n if self.y == 1:\r\n # Find the volume required, A\r\n rho_steam = rhoV_P(self.P) # kg/m^3\r\n rho_water = rhoL_P(self.P) # kg/m^3\r\n res_time = 60.0 # sec\r\n A = res_time*(self.mdot_V/rho_steam+self.mdot_L/rho_water) # m^3\r\n self.V = A\r\n # Calcuate Cost\r\n K = np.array([3.4974,0.4485,0.1074])\r\n if A <= 520:\r\n # Purchase Cost\r\n C_p0 = 10.0**(K[0]+K[1]*np.log10(A)+K[2]*np.log10(A)**2.0)\r\n else:\r\n C_p0 = 637.3687*A-9491.036783\r\n P_g = self.P-1 # barg\r\n self.D = A/100.0 # m\r\n # Pressure Factor\r\n F_P = ((P_g+1.0)*self.D/(2.0*(850.0-0.6*(P_g+1.0)))+0.00315)/0.0063\r\n F_M = 3.12 # Material Factor (Stainless Steel)\r\n B_1 = 2.25\r\n B_2 = 1.82\r\n self.cost = C_p0*(B_1 + B_2*F_M*F_P)\r\n # If MS does not exist\r\n else:\r\n self.cost = 0.0\r\n\r\nclass Condenser:\r\n def __init__(self,Pin,Tin,mdot,x_in):\r\n # Initialize vars\r\n self.y = 1\r\n self.Pin = Pin\r\n self.Tin = Tin\r\n self.mdot = mdot\r\n self.x_in = x_in\r\n self.q = 0.0\r\n self.Pout = 0.0\r\n self.Tout = 0.0\r\n self.x_out = 0.0\r\n self.A = 0.0\r\n self.cost = 0.0\r\n def calc_Condenser(self):\r\n # Calculate Q across HX\r\n Hin = h_Tx(self.Tin,self.x_in)\r\n self.Pout = self.Pin\r\n self.Tout = self.Tin\r\n Hout = h_Tx(self.Tout,self.x_out)\r\n DH = Hin - Hout\r\n self.q = self.mdot * DH / 1000 # MW\r\n # Find required heat-transfer Area, A\r\n DT_1 = self.Tin-30.0\r\n DT_2 = self.Tout-25.0\r\n DT_lm = (DT_2-DT_1)/np.log(DT_2/DT_1)\r\n R = (self.Tin-self.Tout)/(30.0-25.0)\r\n P = (30.0-25.0)/(self.Tin-25.0)\r\n inside = (2.0-P*(R+1.0-np.sqrt(R**2.0+1.0)))/(2.0-P*(R+1.0+np.sqrt(R**2.0+1.0)))\r\n F = np.sqrt(R**2.0+1.0)/(R-1.0)*np.log((1.0-P)/(1.0-P*R))/np.log(inside)\r\n U = 1 # kW/m^2*K\r\n self.A = self.q / (F*U*DT_lm) # m^2\r\n # Calcuate Cost\r\n K = np.array([4.8306,-0.8509,0.3187])\r\n if self.A <= 1000:\r\n # Purchase Cost\r\n C_p0 = 10.0**(K[0]+K[1]*np.log10(self.A)+K[2]*np.log10(self.A)**2.0)\r\n else:\r\n C_p0 = 146.21105*self.A-6225.7924\r\n P_g = self.Pin-1.0 # barg\r\n F_P = 0.0\r\n # Pressure Factor\r\n if P_g < 5.0: \r\n C = np.array([0.0,0.0,0.0])\r\n F_P = 1.0\r\n else:\r\n C = np.array([0.03881,-0.11272,0.08183])\r\n F_P = 10.0**(C[0]+C[1]*np.log10(P_g)+C[2]*np.log10(P_g)**2.0)\r\n F_M = 2.75 # Material Factor (Stainless Steel)\r\n B_1 = 1.63\r\n B_2 = 1.66\r\n # If condenser does exist, which it should...\r\n if self.y == 1:\r\n self.cost = (553.9/397.0)*C_p0*(B_1 + B_2*F_M*F_P)\r\n else:\r\n self.cost = 0.0\r\n\r\nclass FWH: # Feedwater Heater\r\n def __init__(self,Pin1,Tin1,mdot1,x_in1,Pin2,Tin2,mdot2):\r\n # Initialize vars\r\n self.y = 1\r\n self.Pin1 = Pin1\r\n self.Tin1 = Tin1\r\n self.mdot1 = mdot1\r\n self.x_in1 = x_in1\r\n self.Pin2 = Pin2\r\n self.Tin2 = Tin2\r\n self.mdot2 = mdot2\r\n self.Pout = 0.0\r\n self.Tout = 0.0\r\n self.mdot = 0.0\r\n self.x_out = 0.0\r\n self.D = 0.0 # m\r\n self.V = 0.0 # m^3\r\n self.cost = 0.0\r\n def calc_FWH(self):\r\n # If FWH does exist\r\n if self.y == 1:\r\n # Calculate the outlet Enthalpy\r\n self.Pout = self.Pin2\r\n self.mdot = self.mdot1 + self.mdot2\r\n #''' Added 0.005 degC to Tin1, b/c of a once-occuring issue with IAPWS97 '''\r\n Hin1 = h_Tx(self.Tin1,self.x_in1)\r\n x1 = self.mdot1 / self.mdot\r\n Hin2 = h_pT(self.Pin2,self.Tin2)\r\n x2 = self.mdot2 / self.mdot\r\n Hout = x1*Hin1 + x2*Hin2\r\n self.Tout = T_ph(self.Pout,Hout)\r\n self.x_out = x_ph(self.Pout,Hout)\r\n # Find the volume required, A\r\n rho_water = 10**3.0 # kg/m^3\r\n res_time = 60.0 # sec\r\n self.V = res_time*(self.mdot/rho_water) # m^3\r\n A = self.V\r\n # Calcuate Cost\r\n K = np.array([3.4974,0.4485,0.1074])\r\n if A <= 520:\r\n # Purchase Cost\r\n C_p0 = 10.0**(K[0]+K[1]*np.log10(A)+K[2]*np.log10(A)**2.0)\r\n else:\r\n C_p0 = 637.3687*A-9491.036783\r\n P_g = self.Pout-1 # barg\r\n self.D = A/100.0 # m\r\n # Pressure Factor\r\n F_P = ((P_g+1.0)*self.D/(2.0*(850.0-0.6*(P_g+1.0)))+0.00315)/0.0063\r\n F_M = 3.12 # Material Factor (Stainless Steel)\r\n B_1 = 2.25\r\n B_2 = 1.82\r\n self.cost = (553.9/397.0)*C_p0*(B_1 + B_2*F_M*F_P)\r\n # If FWH does not exist\r\n else:\r\n self.cost = 0.0\r\n\r\n\r\n###############################################################################\r\n\"\"\"\"\"\"\"\"\" All Core Loop Equipment Classes \"\"\"\"\"\"\"\"\" #######################\r\n###############################################################################\r\n\r\nclass Pump_rcp:\r\n def __init__(self,W,P_out):\r\n self.Pout = P_out # bar\r\n self.W_th = W # MW\r\n self.eff = 0.85\r\n self.cost = 0.0\r\n def calc_Pump(self):\r\n # Calcuate Cost\r\n A = self.W_th * 1000 # kW\r\n K = np.array([3.3892,0.0536,0.1538])\r\n if A <= 300:\r\n # Purchase Cost\r\n C_p0 = 10.0**(K[0]+K[1]*np.log10(A)+K[2]*np.log10(A)**2.0)\r\n else:\r\n C_p0 = 5371.29236+79.50315*A\r\n P_g = self.Pout - 1.0 # barg\r\n C = np.zeros(3)\r\n if P_g >= 10.0:\r\n C = np.array([-0.3935,0.3957,-0.00226])\r\n # Pressure Factor\r\n F_P = 10.0**(C[0]+C[1]*np.log10(P_g)+C[2]*np.log10(P_g)**2.0)\r\n F_M = 2.28 # Material Factor (Stainless Steel)\r\n B_1 = 1.89\r\n B_2 = 1.35\r\n self.cost = (553.9/397.0)*C_p0*(B_1 + B_2*F_M*F_P)" ]
[ [ "numpy.sqrt", "numpy.zeros", "numpy.log", "numpy.log10", "numpy.array" ] ]
inessus/ai-skills
[ "527f32d49887f06eee357c83bb6a9a21edc69bc5" ]
[ "src/model/pytorch/21-RL/DeepRL-Tutorials/agents/Quantile_Rainbow.py" ]
[ "import numpy as np\n\nimport torch\n\nfrom agents.DQN import Model as DQN_Agent\nfrom networks.network_bodies import SimpleBody, AtariBody\nfrom networks.networks import DuelingQRDQN\nfrom utils.ReplayMemory import PrioritizedReplayMemory\n\nclass Model(DQN_Agent):\n def __init__(self, static_policy=False, env=None, config=None):\n self.num_quantiles = config.QUANTILES\n self.cumulative_density = torch.tensor((2 * np.arange(self.num_quantiles) + 1) / (2.0 * self.num_quantiles), device=config.device, dtype=torch.float) \n self.quantile_weight = 1.0 / self.num_quantiles\n\n super(Model, self).__init__(static_policy, env, config)\n\n self.nsteps=max(self.nsteps, 3)\n \n \n def declare_networks(self):\n self.model = DuelingQRDQN(self.env.observation_space.shape, self.env.action_space.n, noisy=True, sigma_init=self.sigma_init, quantiles=self.num_quantiles)\n self.target_model = DuelingQRDQN(self.env.observation_space.shape, self.env.action_space.n, noisy=True, sigma_init=self.sigma_init, quantiles=self.num_quantiles)\n\n def declare_memory(self):\n self.memory = PrioritizedReplayMemory(self.experience_replay_size, self.priority_alpha, self.priority_beta_start, self.priority_beta_frames)\n\n def next_distribution(self, batch_vars):\n batch_state, batch_action, batch_reward, non_final_next_states, non_final_mask, empty_next_state_values, indices, weights = batch_vars\n\n with torch.no_grad():\n quantiles_next = torch.zeros((self.batch_size, self.num_quantiles), device=self.device, dtype=torch.float)\n if not empty_next_state_values:\n self.target_model.sample_noise()\n max_next_action = self.get_max_next_state_action(non_final_next_states)\n quantiles_next[non_final_mask] = self.target_model(non_final_next_states).gather(1, max_next_action).squeeze(dim=1)\n\n quantiles_next = batch_reward + ((self.gamma**self.nsteps)*quantiles_next)\n\n return quantiles_next\n \n def compute_loss(self, batch_vars):\n batch_state, batch_action, batch_reward, non_final_next_states, non_final_mask, empty_next_state_values, indices, weights = batch_vars\n\n batch_action = batch_action.unsqueeze(dim=-1).expand(-1, -1, self.num_quantiles)\n\n self.model.sample_noise()\n quantiles = self.model(batch_state)\n quantiles = quantiles.gather(1, batch_action).squeeze(1)\n\n quantiles_next = self.next_distribution(batch_vars)\n \n diff = quantiles_next.t().unsqueeze(-1) - quantiles.unsqueeze(0)\n\n loss = self.huber(diff) * torch.abs(self.cumulative_density.view(1, -1) - (diff < 0).to(torch.float))\n loss = loss.transpose(0,1)\n self.memory.update_priorities(indices, loss.detach().mean(1).sum(-1).abs().cpu().numpy().tolist())\n loss = loss * weights.view(self.batch_size, 1, 1)\n loss = loss.mean(1).sum(-1).mean()\n\n return loss\n\n def get_action(self, s, eps):\n with torch.no_grad():\n X = torch.tensor([s], device=self.device, dtype=torch.float) \n self.model.sample_noise()\n a = (self.model(X) * self.quantile_weight).sum(dim=2).max(dim=1)[1]\n return a.item()\n\n def get_max_next_state_action(self, next_states):\n next_dist = self.model(next_states) * self.quantile_weight\n return next_dist.sum(dim=2).max(1)[1].view(next_states.size(0), 1, 1).expand(-1, -1, self.num_quantiles)" ]
[ [ "torch.zeros", "torch.no_grad", "torch.tensor", "numpy.arange" ] ]
pedronarloch/jMetalPy_phD
[ "c16a31a65c23a203d439f33a4d99668982e7c25b" ]
[ "jmetal/problem/singleobjective/CEC2013LSGO.py" ]
[ "from cec2013lsgo.cec2013 import Benchmark\nimport numpy as np\nfrom jmetal.core.problem import FloatProblem, S\n\n\nclass CEC2013LSGO(FloatProblem):\n\n def __init__(self, function_type: int = 0, number_of_variables: int = 1000):\n\n super(CEC2013LSGO, self).__init__()\n self.number_of_objectives = 1\n self.number_of_constraints = 0\n self.number_of_variables = number_of_variables\n\n self.obj_directions = [self.MINIMIZE]\n self.obj_labels = ['Fitness']\n\n self.function_type = function_type\n\n self.benchmark = Benchmark()\n\n info = self.benchmark.get_info(self.function_type)\n self.lower_bound = [info['lower'] for _ in range(self.number_of_variables)]\n self.upper_bound = [info['upper'] for _ in range(self.number_of_variables)]\n self.evaluator = self.benchmark.get_function(self.function_type)\n\n def evaluate(self, solution: S) -> S:\n sol = np.array(solution.variables)\n solution.objectives[0] = self.evaluator(sol)\n\n return solution\n\n def get_name(self) -> str:\n return \"CEC_2013LSGO_F\"+str(self.function_type)\n" ]
[ [ "numpy.array" ] ]
ahmedshahin9/melanoma.1.0
[ "db6e458ae7376993c0d3fccfe56b7e88b6f936f0" ]
[ "batches.py" ]
[ "import os\nfrom scipy import misc\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom skimage.util import img_as_bool\n\n# this script creates batches from the dataset\n# batch size: 224 * 224 * 3\n# we save the batch and its ground truth in two separate folders \"batches\" , \"batches_ground\n\n\npath = \"/home/ahmed/melanoma/ISBI2016_ISIC_Part1_Training_Data\"\nlisting = sorted(os.listdir(path))\nk = 0\nj = 0\ny = []\nwhile (k < 900):\n cur_img_id = np.random.randint(0,900 - k)\n cur_img = listing[cur_img_id]\n x_img = misc.imread(path + \"/\" + cur_img)\n y_img = img_as_bool(misc.imread(path.replace(\"ISBI2016_ISIC_Part1_Training_Data\",\"ISBI2016_ISIC_Part1_Training_GroundTruth\") + \"/\" + cur_img.replace(\".jpg\",\"_Segmentation.png\")))\n # now we have chosen an image\n # get fore and back vectors for the image\n\n fore_idx = np.where(y_img == True)\n back_idx = np.where(y_img == False)\n # in the fore, pick 55 random elements\n i_f = 0\n i_b = 0\n a0 = fore_idx[0]\n a1 = fore_idx[1]\n b0 = back_idx[0]\n b1 = back_idx[1]\n \n while ((i_f < 55) and (len(a0) > 0)):\n k_fore = np.random.randint(0,len(a0))\n x_f = a0[k_fore]\n y_f = a1[k_fore]\n if (x_f >= 112) and (y_f >= 112) and (x_f+112 < y_img.shape[0]) and (y_f+112 < y_img.shape[1]):\n misc.imsave('/home/ahmed/melanoma/batches/{2}_fore_{0}_batch_{1}.jpg'.format(cur_img.replace(\".jpg\",\"\").replace(\"ISIC_\",\"\"),i_f, j),x_img[x_f-112: x_f+112, y_f-112:y_f+112,:])\n misc.imsave('/home/ahmed/melanoma/batches_ground/{2}_fore_{0}_batch_{1}_mask.jpg'.format(cur_img.replace(\".jpg\",\"\").replace(\"ISIC_\",\"\"),i_f,j),y_img[x_f-112: x_f+112, y_f-112:y_f+112])\n u = x_img[x_f-112: x_f+112, y_f-112:y_f+112,:]\n if (u.shape[0] != 224) or (u.shape[1] != 224):\n print(\"ERROR\")\n i_f += 1\n j += 1\n y.append(1)\n\n a0 = np.delete(a0,k_fore)\n a1 = np.delete(a1,k_fore)\n# print(len(a0))\n\n # the same thing with the back\n while ((i_b < 55) and (len(b0) > 0)):\n k_back = np.random.randint(0,len(b0))\n x_b = b0[k_back]\n y_b = b1[k_back]\n if (x_b >= 112) and (y_b >= 112) and (x_b+112 < y_img.shape[0]) and (y_b+112 < y_img.shape[1]):\n misc.imsave('/home/ahmed/melanoma/batches/{2}_back_{0}_batch_{1}.jpg'.format(cur_img.replace(\".jpg\",\"\").replace(\"ISIC_\",\"\"),i_b,j),x_img[x_b-112: x_b+112, y_b-112:y_b+112,:])\n misc.imsave('/home/ahmed/melanoma/batches_ground/{2}_back_{0}_batch_{1}_mask.jpg'.format(cur_img.replace(\".jpg\",\"\").replace(\"ISIC_\",\"\"),i_b,j),y_img[x_b-112: x_b+112, y_b-112:y_b+112])\n n = x_img[x_b-112: x_b+112, y_b-112:y_b+112,:]\n if (n.shape[0] != 224) or (n.shape[1] != 224):\n print(\"ERROR\")\n i_b += 1\n j += 1\n y.append(0)\n b0 = np.delete(b0,k_back)\n b1 = np.delete(b1,k_back)\n# print(len(b0))\n print(k)\n k += 1\n" ]
[ [ "numpy.where", "numpy.random.randint", "scipy.misc.imread", "numpy.delete" ] ]
jacobboney/featuretools
[ "679aa0c9a3985942ce5278f56353b44c41958907" ]
[ "featuretools/demo/weather.py" ]
[ "import pandas as pd\n\nimport featuretools as ft\n\n\ndef load_weather(nrows=None,\n return_single_table=False):\n '''\n Load the Australian daily-min-temperatures weather dataset.\n\n Args:\n\n nrows (int): Passed to nrows in ``pd.read_csv``.\n return_single_table (bool): Exit the function early and return a dataframe.\n\n '''\n filename = \"daily-min-temperatures.csv\"\n print('Downloading data ...')\n url = \"https://api.featurelabs.com/datasets/{}?library=featuretools&version={}\".format(filename, ft.__version__)\n data = pd.read_csv(url, index_col=None, nrows=nrows)\n if return_single_table:\n return data\n es = make_es(data)\n return es\n\n\ndef make_es(data):\n es = ft.EntitySet('Weather Data')\n\n es.add_dataframe(data,\n dataframe_name='temperatures',\n index='id',\n make_index=True,\n time_index='Date')\n return es\n" ]
[ [ "pandas.read_csv" ] ]
kbschliep/pycroscopy
[ "4f18e7b453aca496e611603616112c1a1a524beb" ]
[ "pycroscopy/io/translators/time_series.py" ]
[ "\"\"\"\nCreated on Feb 9, 2016\n\n@author: Chris Smith\n\"\"\"\n\nfrom __future__ import division, print_function, absolute_import, unicode_literals\n\nimport os\n\nimport numpy as np\nfrom skimage.measure import block_reduce\nimport h5py\n\nfrom .df_utils.dm_utils import read_dm3\nfrom pyUSID.io.image import read_image\nfrom pyUSID.io.translator import Translator, generate_dummy_main_parms\nfrom pyUSID.io.write_utils import Dimension, calc_chunks\nfrom pyUSID.io.hdf_utils import get_h5_obj_refs, link_as_main, write_main_dataset, \\\n write_simple_attrs, create_indexed_group\n\n\nclass MovieTranslator(Translator):\n \"\"\"\n Translate Pytchography data from a set of images to an HDF5 file\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(MovieTranslator, self).__init__(*args, **kwargs)\n\n self.rebin = False\n self.bin_factor = 1\n self.h5_file = None\n self.binning_func = self.__no_bin\n self.bin_func = None\n self.image_ext = None\n\n def translate(self, h5_path, image_path, bin_factor=None, bin_func=np.mean, start_image=0, image_type='.tif'):\n \"\"\"\n Basic method that adds Movie data to existing hdf5 file\n\n Parameters\n ----------------\n h5_path : str\n Absolute path to where the HDF5 file should be located\n image_path : str\n Absolute path to folder holding the image files\n bin_factor : array_like of uint, optional\n Downsampling factor for each dimension. Default is None.\n bin_func : callable, optional\n Function which will be called to calculate the return value\n of each block. Function must implement an axis parameter,\n i.e. numpy.mean. Ignored if bin_factor is None. Default is\n numpy.mean.\n start_image : int, optional\n Integer denoting which image in the file path should be considered the starting\n point. Default is 0, start with the first image on the list.\n image_type : str, optional\n File extension of images to load. Used to filter out other files in the same\n directory. Default .tif\n\n Returns\n ----------\n h5_main : h5py.Dataset\n HDF5 Dataset object that contains the flattened images\n\n \"\"\"\n self.image_ext = image_type\n\n image_path = os.path.abspath(image_path)\n h5_path = os.path.abspath(h5_path)\n \n if os.path.exists(h5_path):\n os.remove(h5_path)\n\n self.h5_file = h5py.File(h5_path, 'w')\n \n '''\n Get the list of all files with the provided extension and the number of files in the list\n '''\n if os.path.isfile(image_path):\n file_list, image_parms = read_dm3(image_path)\n usize = image_parms['SuperScan-Height']\n vsize = image_parms['SuperScan-Width']\n data_type = file_list.dtype.type\n num_images = file_list.shape[0] - start_image\n\n else:\n file_list = self._parse_file_path(image_path, image_type)\n\n # Set up the basic parameters associated with this set of images\n (usize, vsize), data_type, image_parms = self._getimagesize(os.path.join(image_path, file_list[0]))\n\n num_images = len(file_list) - start_image\n\n '''\n Check if a bin_factor is given. Set up binning objects if it is.\n '''\n if bin_factor is not None:\n self.rebin = True\n if isinstance(bin_factor, int):\n self.bin_factor = (bin_factor, bin_factor)\n elif len(bin_factor) == 2:\n self.bin_factor = tuple(bin_factor)\n else:\n raise ValueError('Input parameter `bin_factor` must be a length 2 array_like or an integer.\\n' +\n '{} was given.'.format(bin_factor))\n usize = int(usize / self.bin_factor[0])\n vsize = int(vsize / self.bin_factor[1])\n self.binning_func = block_reduce\n self.bin_func = bin_func\n data_type = np.float32\n\n h5_main, h5_mean_spec, h5_ronch = self._setupH5(usize, vsize, np.float32, num_images, image_parms)\n\n self._read_data(file_list[start_image:],\n h5_main, h5_mean_spec, h5_ronch, image_path)\n\n return h5_main\n\n def _read_data(self, image_stack, h5_main, h5_mean_spec, h5_ronch, image_path):\n \"\"\"\n Iterates over the images in `file_list`, reading each image and downsampling if\n reqeusted, and writes the flattened image to file. Also builds the Mean_Ronchigram\n and the Spectroscopic_Mean datasets at the same time.\n\n Parameters\n ----------\n image_stack : list of str\n List of all files in `image_path` that will be read\n h5_main : h5py.Dataset\n Dataset which will hold the Ronchigrams\n h5_mean_spec : h5py.Dataset\n Dataset which will hold the Spectroscopic Mean\n h5_ronch : h5py.Dataset\n Dataset which will hold the Mean Ronchigram\n image_path : str\n Absolute file path to the directory which hold the images\n\n Returns\n -------\n None\n \"\"\"\n\n mean_ronch = np.zeros(h5_ronch.shape, dtype=np.float32)\n\n num_files = len(image_stack)\n\n if os.path.isfile(image_path):\n self.__save_dm3_frames(image_stack, h5_main, h5_mean_spec, h5_ronch, mean_ronch, num_files)\n else:\n self.__read_image_files(image_stack, h5_main, h5_mean_spec, h5_ronch, image_path, mean_ronch, num_files)\n\n def __save_dm3_frames(self, image_stack, h5_main, h5_mean_spec, h5_ronch, mean_ronch, num_frames):\n \"\"\"\n\n :param image_stack:\n :param h5_main:\n :param h5_mean_spec:\n :param h5_ronch:\n :param mean_ronch:\n :param num_frames:\n :return:\n \"\"\"\n for iframe, thisframe in enumerate(image_stack):\n selected = (iframe + 1) % round(num_frames / 16) == 0\n if selected:\n print('Processing file...{}% - reading: {}'.format(round(100 * iframe / num_frames), iframe))\n image = self.binning_func(thisframe, self.bin_factor, self.bin_func).flatten()\n h5_main[:, iframe] = image\n\n h5_mean_spec[iframe] = np.mean(image)\n\n mean_ronch += image\n\n self.h5_file.flush()\n\n h5_ronch[:] = mean_ronch / num_frames\n self.h5_file.flush()\n\n def __read_image_files(self, image_stack, h5_main, h5_mean_spec, h5_ronch, image_path, mean_ronch, num_files):\n \"\"\"\n Read each image from `file_list` and save it in `h5_main`.\n\n Parameters\n ----------\n image_stack:\n :param h5_main:\n :param h5_mean_spec:\n :param h5_ronch:\n :param image_path:\n :param mean_ronch:\n :param num_files:\n :return:\n \"\"\"\n for ifile, thisfile in enumerate(image_stack):\n\n selected = (ifile + 1) % round(num_files / 16) == 0\n if selected:\n print('Processing file...{}% - reading: {}'.format(round(100 * ifile / num_files), thisfile))\n\n image = read_image(os.path.join(image_path, thisfile), greyscale=True)\n image = self.binning_func(image, self.bin_factor, self.bin_func)\n image = image.flatten()\n h5_main[:, ifile] = image\n\n h5_mean_spec[ifile] = np.mean(image)\n\n mean_ronch += image\n\n self.h5_file.flush()\n h5_ronch[:] = mean_ronch / num_files\n self.h5_file.flush()\n\n @staticmethod\n def downSampRoncVec(ronch_vec, binning_factor):\n \"\"\"\n Downsample the image by taking the mean over nearby values\n\n Parameters\n ----------\n ronch_vec : ndarray\n Image data\n binning_factor : int\n factor to reduce the size of the image by\n\n Returns\n -------\n ronc_mat3_mean : ndarray\n Flattened downsampled image\n \"\"\"\n ccd_pix = int(np.sqrt(ronch_vec.size))\n ronc_mat = ronch_vec.reshape(ccd_pix, ccd_pix)\n ronc_mat2 = ronc_mat.reshape(ccd_pix, ccd_pix / binning_factor, binning_factor)\n ronc_mat2_mean = ronc_mat2.mean(2) # take the mean along the 3rd dimension\n ronc_mat3 = ronc_mat2_mean.reshape(ccd_pix / binning_factor, binning_factor, -1)\n ronc_mat3_mean = ronc_mat3.mean(1)\n\n return ronc_mat3_mean.reshape(-1)\n\n @staticmethod\n def _parse_file_path(path, ftype='all'):\n \"\"\"\n Returns a list of all files in the directory given by path\n \n Parameters\n ---------------\n path : string / unicode\n absolute path to directory containing files\n ftype : this file types to return in file_list. (optional. Default is all) \n \n Returns\n ----------\n file_list : list of strings\n names of all files in directory located at path\n numfiles : unsigned int\n number of files in file_list\n \"\"\"\n\n # Get all files in directory\n file_list = os.listdir(path)\n\n # If no file type specified, return full list\n if ftype == 'all':\n return file_list\n\n # Remove files of type other than the request ftype from the list\n new_file_list = []\n for this_thing in file_list:\n # Make sure it's really a file\n if not os.path.isfile(os.path.join(path, this_thing)):\n continue\n\n split = os.path.splitext(this_thing)\n ext = split[1]\n if ext == ftype:\n new_file_list.append(os.path.join(path, this_thing))\n\n return new_file_list\n\n @staticmethod\n def _getimagesize(image):\n \"\"\"\n Returns the x and y size of the image in pixels\n \n Parameters\n ------------\n image : string / unicode\n absolute path to the image file\n \n Returns\n -----------\n (size, tmp.dtype) : Tuple \n \n size : unsigned integer\n x and y dimenstions of image\n dtype : data type\n Datatype of the image\n \"\"\"\n tmp, parms = read_image(image, get_parms=True)\n size = tmp.shape\n\n return size, tmp.dtype.type, parms\n\n def _setupH5(self, usize, vsize, data_type, num_images, main_parms):\n \"\"\"\n Setup the HDF5 file in which to store the data including creating\n the Position and Spectroscopic datasets\n\n Parameters\n ----------\n usize : int\n Number of pixel columns in the images\n vsize : int\n Number of pixel rows in the images\n data_type : type\n Data type to save image as\n num_images : int\n Number of images in the movie\n main_parms : dict\n\n\n Returns\n -------\n h5_main : h5py.Dataset\n HDF5 Dataset that the images will be written into\n h5_mean_spec : h5py.Dataset\n HDF5 Dataset that the mean over all positions will be written\n into\n h5_ronch : h5py.Dataset\n HDF5 Dateset that the mean over all Spectroscopic steps will be\n written into\n \"\"\"\n num_pixels = usize * vsize\n\n root_parms = generate_dummy_main_parms()\n root_parms['data_type'] = 'PtychographyData'\n\n main_parms['num_images'] = num_images\n main_parms['image_size_u'] = usize\n main_parms['image_size_v'] = vsize\n main_parms['num_pixels'] = num_pixels\n main_parms['translator'] = 'Movie'\n\n # Create the hdf5 data Group\n write_simple_attrs(self.h5_file, root_parms)\n meas_grp = create_indexed_group(self.h5_file, 'Measurement')\n write_simple_attrs(meas_grp, main_parms)\n chan_grp = create_indexed_group(meas_grp, 'Channel')\n\n # Build the Position and Spectroscopic Datasets\n spec_dim = Dimension('Time', 's', np.arange(num_images))\n pos_dims = [Dimension('X', 'a.u.', np.arange(usize)), Dimension('Y', 'a.u.', np.arange(vsize))]\n\n ds_chunking = calc_chunks([num_pixels, num_images],\n data_type(0).itemsize,\n unit_chunks=(num_pixels, 1))\n\n # Allocate space for Main_Data and Pixel averaged Data\n h5_main = write_main_dataset(chan_grp, (num_pixels, num_images), 'Raw_Data',\n 'Intensity', 'a.u.',\n pos_dims, spec_dim,\n chunks=ds_chunking, dtype=data_type)\n h5_ronch = meas_grp.create_dataset('Mean_Ronchigram',\n data=np.zeros(num_pixels, dtype=np.float32),\n dtype=np.float32)\n h5_mean_spec = meas_grp.create_dataset('Spectroscopic_Mean',\n data=np.zeros(num_images, dtype=np.float32),\n dtype=np.float32)\n\n self.h5_file.flush()\n\n return h5_main, h5_mean_spec, h5_ronch\n\n @staticmethod\n def __no_bin(image, *args, **kwargs):\n \"\"\"\n Does absolutely nothing to the image. Exists so that we can have\n a bin function to call whether we actually rebin the image or not.\n\n Parameters\n ----------\n image : ndarray\n Image\n args:\n Argument list\n kwargs:\n Keyword argument list\n\n Returns\n -------\n image : ndarray\n The input image\n \"\"\"\n return image\n" ]
[ [ "numpy.mean", "numpy.arange", "numpy.sqrt", "numpy.zeros" ] ]
UKPLab/conll2019-snopes-experiments
[ "102f4a05cfba781036bd3a7b06022246e53765ad" ]
[ "src/rte_pac/feature_extaction/average_word_embedding.py" ]
[ "from gensim.models import KeyedVectors\nimport numpy as np\nimport nltk\n\n\n# model = KeyedVectors.load_word2vec_format('data/GoogleNews-vectors-negative300.bin',binary=True)\n#\n# vecab = model.vocab.keys()\n# print(len(vecab))\n# vector = model.get_vector('This')\n# print(type(vector))\n# print(vector.shape)\n\n\ndef load_model(filepath):\n return KeyedVectors.load_word2vec_format(filepath, binary=True)\n\n\ndef average_word_embedding(model, vocab, tokens):\n vectors = []\n count = 0\n\n for word in tokens:\n if word in vocab:\n count += 1\n vectors.append(model.get_vector(word))\n else:\n continue\n numpy_vectors = np.reshape(np.array(vectors), newshape=(count, 300))\n\n return np.sum(numpy_vectors, axis=0) / count if count != 0 else np.zeros((1, 300))\n\n\ndef get_word_embedding_features(texts):\n model = load_model('data/GoogleNews-vectors-negative300.bin')\n vocab = model.vocab.keys()\n embedding_features = np.zeros(shape=(len(texts), 300))\n\n for idx, text in enumerate(texts):\n tokens = nltk.word_tokenize(text, language='english')\n embedding_features[idx] = average_word_embedding(model, vocab, tokens)\n\n return embedding_features\n\n\ntexts = ['I love somebody!']\n" ]
[ [ "numpy.array", "numpy.sum", "numpy.zeros" ] ]
RimeT/p3_radio
[ "3d522a4356c62255cd93c6d74eb388a2e474dd00" ]
[ "radiomics/get_test_features.py" ]
[ "import argparse\n\nimport pandas as pd\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n # debug\n parser.add_argument('feature_csv', help='feature csv file')\n parser.add_argument('tags_csv', help='tags csv. extract dataset=1 samples')\n parser.add_argument('out_csv', help='feature csv file')\n\n args = parser.parse_args()\n\n feat_df = pd.read_csv(args.feature_csv)\n tags = pd.read_csv(args.tags_csv)\n feat_cols = list(feat_df.columns)\n merged = pd.merge(feat_df, tags, on=['mask'])\n merged = merged[merged['dataset'] == 1]\n merged.to_csv(args.out_csv, columns=feat_cols, index=None)\n" ]
[ [ "pandas.read_csv", "pandas.merge" ] ]
eldarbaykiev/magtess-inversion-python
[ "e775fb0393c00eff9871cfa3e40c5784a89f3e5e" ]
[ "gmi_create_design_matrix.py" ]
[ "def main(dr):\n\n #**************** TESTING PARAMS (WOULD BE REMOVED)*******#\n TRUNCATE = True\n #**************** ---------------------------------*******#\n\n\n\n import gmi_misc\n #**************** PRINT HEADER ***************************#\n gmi_misc.print_header()\n print (\"Script no. 3: Creation of design matrices\")\n #**************** ------------ ***************************#\n\n\n #**************** GET WORKING DIRECTORY ******************#\n import os\n old_cwd = os.getcwd()\n gmi_misc.info('Current directory: '+ old_cwd)\n\n try:\n os.chdir(dr)\n except:\n gmi_misc.error('CAN NOT OPEN WORKING DIRECTORY '+ dr + ', ABORTING...')\n\n gmi_misc.info('WORKING DIRECTORY: '+ os.getcwd())\n #**************** --------------------- ******************#\n\n\n\n #**************** read parameters from file **************#\n import gmi_config\n gmi_config.read_config()\n #**************** ------------------------- **************#\n\n\n\n #************ check if previous stages were launched *****#\n import gmi_hash\n stages = [0,0,0]\n stages, dictionary = gmi_hash.read_dict('checksums.npy')\n\n if __name__ == '__main__':\n err = 0\n if stages[0] == -1:\n err += 1\n gmi_misc.warning('model.magtess was changed after the run of Script 1, restart Script no. 1 first! ABORTING...')\n elif stages[0] == 0:\n err += 1\n gmi_misc.warning('model.magtess was changed after the run of Script 1, restart Script no. 1 first! ABORTING...')\n else:\n pass\n\n if stages[1] == -1:\n err += 1\n gmi_misc.warning('Folder model was changed after the run of Script 2, restart Script no. 2 first! ABORTING...')\n elif stages[1] == 0:\n err += 1\n gmi_misc.warning('Folder model was changed after the run of Script 2, restart Script no. 2 first! ABORTING...')\n else:\n pass\n\n if err > 0:\n gmi_misc.error('CHECKSUM FAILED, ABORTING!')\n\n #**************** --------------------- ******************#\n\n\n\n #**************** CREATE DESIGN MATRICES *****************#\n import os\n import glob\n\n os.chdir('model')\n coefflist = glob.glob(\"*.coeff\")\n os.chdir('..')\n\n n_tess = len(coefflist)\n if n_tess == 0:\n gmi_misc.error(\"NO CALCULATED SH MODELS OF EACH TESSEROID'S MAGNETIC FIELD\")\n exit(-1)\n\n if gmi_config.MULTIPLICATOR != 1.0:\n gmi_misc.warning(\"NOTE: SUSCEPTIBILITY OF EACH TESSEROID IS MULTIPLIED BY \"+ str(gmi_config.MULTIPLICATOR))\n\n import pyshtools\n import numpy as np\n\n coeff_filename = 'model/tess_n' + str(0) + '.coeff'\n\n b = gmi_misc.read_coeffs_from_text_file(coeff_filename, gmi_config.N_MIN_CUTOFF)\n n_vals = len(b)\n\n gmi_misc.message('Assemblying design matrices...')\n from tqdm import tqdm\n A = np.zeros((n_tess, n_vals))\n A_ufilt = np.zeros((n_tess, n_vals))\n\n\n\n\n\n #if __name__ == '__main__':\n for i in tqdm(range(n_tess)):\n coeff_filename = 'model/tess_n' + str(i) + '.coeff'\n\n b = gmi_misc.read_coeffs_from_text_file(coeff_filename, gmi_config.N_MIN_CUTOFF)\n b_ufilt = gmi_misc.read_coeffs_from_text_file(coeff_filename, 0)\n A[i, :] = b[:]\n A_ufilt[i, :] = b_ufilt[:]\n '''\n else:\n from PyQt5 import QtWidgets\n\n app = QtWidgets.QApplication.instance()\n if app is None:\n # if it does not exist then a QApplication is created\n app = QtWidgets.QApplication([])\n\n from progress_bar import ProgressBar\n pb = ProgressBar()\n\n for i in range(n_tess):\n coeff_filename = 'model/tess_n' + str(i) + '.coeff'\n\n b = gmi_misc.read_coeffs_from_text_file(coeff_filename, gmi_config.N_MIN_CUTOFF)\n b_ufilt = gmi_misc.read_coeffs_from_text_file(coeff_filename, 0)\n A[i, :] = b[:]\n A_ufilt[i, :] = b_ufilt[:]\n\n pb.setValue(((i + 1) / n_tess) * 100)\n app.processEvents()\n\n pb.close()\n '''\n\n gmi_misc.ok('...done')\n\n #**************** SAVE MATRICES *****************#\n\n np.save('design_matrix_shcoeff', A)\n np.save('design_matrix_ufilt_shcoeff', A_ufilt)\n\n #**************** ------------- *****************#\n\n\n\n #**************** WRITE MD5 PARAMS **************#\n import hashlib\n SHAhash = hashlib.md5()\n\n f1 = open('design_matrix_shcoeff.npy', 'rb')\n while 1:\n # Read file in as little chunks\n buf = f1.read(4096)\n if not buf : break\n SHAhash.update(hashlib.md5(buf).hexdigest().encode('utf-8'))\n f1.close()\n\n\n f2 = open('design_matrix_ufilt_shcoeff.npy', 'rb')\n while 1:\n # Read file in as little chunks\n buf = f2.read(4096)\n if not buf : break\n SHAhash.update(hashlib.md5(buf).hexdigest().encode('utf-8'))\n f2.close()\n\n dictionary['stage3'] = SHAhash.hexdigest()\n dictionary['stage4'] = ''\n np.save('checksums.npy', dictionary)\n #**************** ---------------- **************#\n\n\n\n\n #**************** RETURN BACK TO INITIAL PATH ***#\n os.chdir(old_cwd)\n\n #**************** --------------------------- ***#\n\n\nif __name__ == '__main__':\n #**************** GET WORKING DIRECTORY ******************#\n\n WORKING_DIR = ''\n import sys\n if len(sys.argv) == 1:\n WORKING_DIR = ''\n\n WORKING_DIR = sys.argv[1]\n\n #**************** --------------------- ******************#\n main(WORKING_DIR)\n" ]
[ [ "numpy.save", "numpy.zeros" ] ]
Unbabel/caption
[ "90725dbf5bc3809e0364d20d0837c58968ceb2b1" ]
[ "caption/models/utils.py" ]
[ "# -*- coding: utf-8 -*-\nimport torch\nfrom caption.tokenizers import TextEncoderBase\n\n\ndef mask_fill(\n fill_value: float,\n tokens: torch.tensor,\n embeddings: torch.tensor,\n padding_index: int,\n) -> torch.tensor:\n \"\"\"\n Function that masks embeddings representing padded elements.\n :param fill_value: the value to fill the embeddings belonging to padded tokens.\n :param tokens: The input sequences [bsz x seq_len].\n :param embeddings: word embeddings [bsz x seq_len x hiddens].\n :param padding_index: Index of the padding token.\n \"\"\"\n padding_mask = tokens.eq(padding_index).unsqueeze(-1)\n return embeddings.float().masked_fill_(padding_mask, fill_value).type_as(embeddings)\n\n\ndef mask_tokens(\n inputs: torch.tensor,\n tokenizer: TextEncoderBase,\n mlm_probability: float = 0.15,\n ignore_index: int = -100,\n):\n \"\"\" Mask tokens function from Hugging Face that prepares masked tokens inputs/labels for \n masked language modeling.\n\n :param inputs: Input tensor to be masked.\n :param tokenizer: COMET text encoder.\n :param mlm_probability: Probability of masking a token (default: 15%).\n :param ignore_index: Specifies a target value that is ignored and does not contribute to \n the input gradient (default: -100).\n\n Returns:\n - Tuple with input to the model and the target.\n \"\"\"\n if tokenizer.mask_index is None:\n raise ValueError(\n \"This tokenizer does not have a mask token which is necessary for masked language\"\n \"modeling. Remove the --mlm flag if you want to use this tokenizer.\"\n )\n\n labels = inputs.clone()\n probability_matrix = torch.full(labels.shape, mlm_probability)\n special_tokens_mask = [\n tokenizer.get_special_tokens_mask(val) for val in labels.tolist()\n ]\n probability_matrix.masked_fill_(\n torch.tensor(special_tokens_mask, dtype=torch.bool), value=0.0\n )\n padding_mask = labels.eq(tokenizer.padding_index)\n probability_matrix.masked_fill_(padding_mask, value=0.0)\n\n masked_indices = torch.bernoulli(probability_matrix).bool()\n labels[~masked_indices] = ignore_index # We only compute loss on masked tokens\n\n # 80% of the time, we replace masked input tokens with ([MASK])\n indices_replaced = (\n torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices\n )\n\n inputs[indices_replaced] = tokenizer.mask_index\n\n # 10% of the time, we replace masked input tokens with random word\n indices_random = (\n torch.bernoulli(torch.full(labels.shape, 0.5)).bool()\n & masked_indices\n & ~indices_replaced\n )\n random_words = torch.randint(tokenizer.vocab_size, labels.shape, dtype=torch.long)\n inputs[indices_random] = random_words[indices_random]\n\n # The rest of the time (10% of the time) we keep the masked input tokens unchanged\n return inputs, labels\n" ]
[ [ "torch.tensor", "torch.full", "torch.randint", "torch.bernoulli" ] ]
leidian977/bert
[ "d7a54fce83a5678777a02bc50176e7fa527d7f9f" ]
[ "tokenization_test.py" ]
[ "# coding=utf-8\r\n# Copyright 2018 The Google AI Language Team Authors.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport os\r\nimport tempfile\r\n\r\nimport tokenization\r\nimport tensorflow as tf\r\n\r\n\r\nclass TokenizationTest(tf.test.TestCase):\r\n\r\n def test_full_tokenizer(self):\r\n vocab_tokens = [\r\n \"[UNK]\", \"[CLS]\", \"[SEP]\", \"want\", \"##want\", \"##ed\", \"wa\", \"un\", \"runn\",\r\n \"##ing\", \",\"\r\n ]\r\n with tempfile.NamedTemporaryFile(delete=False) as vocab_writer:\r\n vocab_writer.write(\"\".join([x + \"\\n\" for x in vocab_tokens]))\r\n\r\n vocab_file = vocab_writer.name\r\n\r\n tokenizer = tokenization.FullTokenizer(vocab_file)\r\n os.unlink(vocab_file)\r\n\r\n tokens = tokenizer.tokenize(u\"UNwant\\u00E9d,running\")\r\n self.assertAllEqual(tokens, [\"un\", \"##want\", \"##ed\", \",\", \"runn\", \"##ing\"])\r\n\r\n self.assertAllEqual(\r\n tokenizer.convert_tokens_to_ids(tokens), [7, 4, 5, 10, 8, 9])\r\n\r\n def test_chinese(self):\r\n tokenizer = tokenization.BasicTokenizer()\r\n\r\n self.assertAllEqual(\r\n tokenizer.tokenize(u\"ah\\u535A\\u63A8zz\"),\r\n [u\"ah\", u\"\\u535A\", u\"\\u63A8\", u\"zz\"])\r\n\r\n def test_basic_tokenizer_lower(self):\r\n tokenizer = tokenization.BasicTokenizer(do_lower_case=True)\r\n\r\n self.assertAllEqual(\r\n tokenizer.tokenize(u\" \\tHeLLo!how \\n Are yoU? \"),\r\n [\"hello\", \"!\", \"how\", \"are\", \"you\", \"?\"])\r\n self.assertAllEqual(tokenizer.tokenize(u\"H\\u00E9llo\"), [\"hello\"])\r\n\r\n def test_basic_tokenizer_no_lower(self):\r\n tokenizer = tokenization.BasicTokenizer(do_lower_case=False)\r\n\r\n self.assertAllEqual(\r\n tokenizer.tokenize(u\" \\tHeLLo!how \\n Are yoU? \"),\r\n [\"HeLLo\", \"!\", \"how\", \"Are\", \"yoU\", \"?\"])\r\n\r\n def test_wordpiece_tokenizer(self):\r\n vocab_tokens = [\r\n \"[UNK]\", \"[CLS]\", \"[SEP]\", \"want\", \"##want\", \"##ed\", \"wa\", \"un\", \"runn\",\r\n \"##ing\"\r\n ]\r\n\r\n vocab = {}\r\n for (i, token) in enumerate(vocab_tokens):\r\n vocab[token] = i\r\n tokenizer = tokenization.WordpieceTokenizer(vocab=vocab)\r\n\r\n self.assertAllEqual(tokenizer.tokenize(\"\"), [])\r\n\r\n self.assertAllEqual(\r\n tokenizer.tokenize(\"unwanted running\"),\r\n [\"un\", \"##want\", \"##ed\", \"runn\", \"##ing\"])\r\n\r\n self.assertAllEqual(\r\n tokenizer.tokenize(\"unwantedX running\"), [\"[UNK]\", \"runn\", \"##ing\"])\r\n\r\n def test_convert_tokens_to_ids(self):\r\n vocab_tokens = [\r\n \"[UNK]\", \"[CLS]\", \"[SEP]\", \"want\", \"##want\", \"##ed\", \"wa\", \"un\", \"runn\",\r\n \"##ing\"\r\n ]\r\n\r\n vocab = {}\r\n for (i, token) in enumerate(vocab_tokens):\r\n vocab[token] = i\r\n\r\n self.assertAllEqual(\r\n tokenization.convert_tokens_to_ids(\r\n vocab, [\"un\", \"##want\", \"##ed\", \"runn\", \"##ing\"]), [7, 4, 5, 8, 9])\r\n\r\n def test_is_whitespace(self):\r\n self.assertTrue(tokenization._is_whitespace(u\" \"))\r\n self.assertTrue(tokenization._is_whitespace(u\"\\t\"))\r\n self.assertTrue(tokenization._is_whitespace(u\"\\r\"))\r\n self.assertTrue(tokenization._is_whitespace(u\"\\n\"))\r\n self.assertTrue(tokenization._is_whitespace(u\"\\u00A0\"))\r\n\r\n self.assertFalse(tokenization._is_whitespace(u\"A\"))\r\n self.assertFalse(tokenization._is_whitespace(u\"-\"))\r\n\r\n def test_is_control(self):\r\n self.assertTrue(tokenization._is_control(u\"\\u0005\"))\r\n\r\n self.assertFalse(tokenization._is_control(u\"A\"))\r\n self.assertFalse(tokenization._is_control(u\" \"))\r\n self.assertFalse(tokenization._is_control(u\"\\t\"))\r\n self.assertFalse(tokenization._is_control(u\"\\r\"))\r\n\r\n def test_is_punctuation(self):\r\n self.assertTrue(tokenization._is_punctuation(u\"-\"))\r\n self.assertTrue(tokenization._is_punctuation(u\"$\"))\r\n self.assertTrue(tokenization._is_punctuation(u\"`\"))\r\n self.assertTrue(tokenization._is_punctuation(u\".\"))\r\n\r\n self.assertFalse(tokenization._is_punctuation(u\"A\"))\r\n self.assertFalse(tokenization._is_punctuation(u\" \"))\r\n\r\n\r\nif __name__ == \"__main__\":\r\n tf.test.main()\r\n" ]
[ [ "tensorflow.test.main" ] ]
GeoscienceAustralia/uncoverml
[ "672914377afa4ad1c069fcd4845bc45f80132e36" ]
[ "tests/test_cubist.py" ]
[ "\nimport numpy as np\nfrom sklearn.metrics import r2_score\n\nfrom uncoverml.cubist import Cubist, MultiCubist\n\n# Declare some test data taken from the boston houses dataset\nx = np.array([\n [0.006, 18.00, 2.310, 0.5380, 6.5750, 65.20, 4.0900, 1, 296.0, 15.30],\n [0.027, 0.00, 7.070, 0.4690, 6.4210, 78.90, 4.9671, 2, 242.0, 17.80],\n [0.027, 0.00, 7.070, 0.4690, 7.1850, 61.10, 4.9671, 2, 242.0, 17.80],\n [0.032, 0.00, 2.180, 0.4580, 6.9980, 45.80, 6.0622, 3, 222.0, 18.70],\n [0.069, 0.00, 2.180, 0.4580, 7.1470, 54.20, 6.0622, 3, 222.0, 18.70],\n [0.029, 0.00, 2.180, 0.4580, 6.4300, 58.70, 6.0622, 3, 222.0, 18.70],\n [0.088, 12.50, 7.870, 0.5240, 6.0120, 66.60, 5.5605, 5, 311.0, 15.20],\n [0.144, 12.50, 7.870, 0.5240, 6.1720, 96.10, 5.9505, 5, 311.0, 15.20],\n [0.211, 12.50, 7.870, 0.5240, 5.6310, 100.00, 6.0821, 5, 311.0, 15.20],\n [0.170, 12.50, 7.870, 0.5240, 6.0040, 85.90, 6.5921, 5, 311.0, 15.20],\n [0.224, 12.50, 7.870, 0.5240, 6.3770, 94.30, 6.3467, 5, 311.0, 15.20],\n [0.117, 12.50, 7.870, 0.5240, 6.0090, 82.90, 6.2267, 5, 311.0, 15.20],\n [0.093, 12.50, 7.870, 0.5240, 5.8890, 39.00, 5.4509, 5, 311.0, 15.20],\n [0.629, 0.00, 8.140, 0.5380, 5.9490, 61.80, 4.7075, 4, 307.0, 21.00],\n [0.637, 0.00, 8.140, 0.5380, 6.0960, 84.50, 4.4619, 4, 307.0, 21.00]])\n\ny = np.array([24.00, 21.60, 34.70, 33.40, 36.20, 28.70, 22.90, 27.10,\n 16.50, 18.90, 15.00, 18.90, 21.70, 20.40, 18.2])\n\n\ndef test_correct_range():\n\n # Fit the data\n predictor = Cubist(print_output=False,\n sampling=90, seed=0, committee_members=2)\n predictor.fit(x, y)\n\n # Predict the output\n y_pred = predictor.predict(x)\n\n # Assert that the true y is similar to the prediction\n score = r2_score(y, y_pred)\n assert 0.68 < score < 0.8\n\n\ndef test_correct_range_with_sampling():\n\n # Fit the data\n predictor = Cubist(print_output=False,\n sampling=90, seed=10, committee_members=2)\n predictor.fit(x, y)\n\n # Predict the output\n y_pred = predictor.predict(x)\n\n # Assert that the true y is similar to the prediction\n score = r2_score(y, y_pred)\n assert 0.68 < score < 0.73\n\n\ndef test_multicubist():\n predictor = MultiCubist(print_output=False,\n trees=5,\n sampling=90,\n seed=1,\n neighbors=1)\n predictor.fit(x, y)\n\n # Predict the output\n y_pred = predictor.predict(x)\n\n # Assert that the true y is similar to the prediction\n score = r2_score(y, y_pred)\n assert 0.5 < score < 0.8\n\n\ndef test_multicibist_mpi(mpisync):\n \"\"\"\n run this with something like:\n \"mpirun -np 4 py.test ../tests/test_cubist.py::test_multicubist_mpi\"\n\n \"\"\"\n\n predictor = MultiCubist(trees=10,\n sampling=60,\n seed=1,\n neighbors=1,\n committee_members=5,\n parallel=True)\n predictor.fit(x, y)\n\n # Predict the output\n y_pred_p = predictor.predict(x)\n\n score = r2_score(y, y_pred_p)\n\n assert 0.5 < score < 0.8\n" ]
[ [ "numpy.array", "sklearn.metrics.r2_score" ] ]
Edinburgh-Genome-Foundry/Taskpacker
[ "151b581e3b64c6f462e177a5b8b2ff3457529ad0" ]
[ "tests/test_basics.py" ]
[ "\"\"\"Basic tests.\n\nAnd I mean reeeaaaally basic, I'm just making sure the main example runs here.\nThat's because the project is still experimental and \"expected behavior\" is\na very fluid concept at this time.\n\"\"\"\nimport os\nimport matplotlib\nmatplotlib.use('Agg')\nfrom taskpacker import (tasks_from_spreadsheet,\n resources_from_spreadsheet,\n schedule_processes_series,\n plot_tasks_dependency_graph,\n plot_schedule, Task, Resource,\n numberjack_scheduler)\n\nimport matplotlib.cm as cm\n\n\ndef test_dna_assembly_example(tmpdir):\n\n spreadsheet_path = os.path.join('examples', 'examples_data',\n \"dna_assembly.xls\")\n\n colors = (cm.Paired(0.21 * i % 1.0) for i in range(30))\n\n resources = resources_from_spreadsheet(\n spreadsheet_path=spreadsheet_path, sheetname=\"resources\")\n\n processes = [\n tasks_from_spreadsheet(spreadsheet_path=spreadsheet_path,\n sheetname=\"process\",\n resources_dict=resources,\n tasks_color=next(colors),\n task_name_prefix=\"WU%d_\" % (i + 1))\n for i in range(5)\n ]\n\n print(\"NOW OPTIMIZING THE SCHEDULE, BE PATIENT...\")\n new_processes = schedule_processes_series(\n processes, est_process_duration=5000, time_limit=6)\n\n # PLOT THE TASKS DEPENDENCY TREE\n ax = plot_tasks_dependency_graph(processes[0])\n ax.set_title(\"PLAN OF A WORK UNIT\")\n ax.figure.savefig(\"basic_example_work_unit.pdf\", bbox_inches=\"tight\")\n\n # PLOT THE OPTIMIZED SCHEDULE\n ax = plot_schedule([t for process in new_processes for t in process])\n ax.figure.set_size_inches((8, 5))\n ax.set_xlabel(\"time (min)\")\n ax.figure.savefig(os.path.join(str(tmpdir),\n \"basic_example_schedule.png\"),\n bbox_inches=\"tight\")\n\n\ndef test_alice_and_bob():\n\n alice = Resource(\"Alice\", capacity=2)\n bob = Resource(\"Bob\", capacity=1)\n\n clean_scalpels = Task(\"Clean the scalpels\", resources=[bob], duration=20,\n color=\"white\")\n visit_plants = Task(\"Visit the plants\", resources=[alice], duration=60,\n color=\"yellow\")\n cook_hamsters = Task(\"Cook the hamsters\", resources=[alice], duration=30,\n color=\"red\")\n dice_hamsters = Task(\"Dice the hamsters\", resources=[bob], duration=40,\n color=\"blue\", follows=[cook_hamsters, clean_scalpels])\n feed_gremlins = Task(\"Feed the gremlins\", resources=[alice, bob],\n duration=50,\n color=\"orange\", follows=[dice_hamsters])\n\n all_tasks = [clean_scalpels, visit_plants, cook_hamsters, dice_hamsters,\n feed_gremlins]\n scheduled_tasks = numberjack_scheduler(all_tasks)\n" ]
[ [ "matplotlib.use", "matplotlib.cm.Paired" ] ]
sand-ci/AlarmsAndAlerts
[ "37b783cbb22bb7d01532e3e1427fd18098717095" ]
[ "ps-throughput.py" ]
[ "import threading\nimport time\nimport datetime\nimport pandas as pd\nfrom functools import reduce, wraps\nfrom datetime import datetime, timedelta\nimport numpy as np\nfrom scipy.stats import zscore\n\nimport utils.queries as qrs\nimport utils.helpers as hp\nfrom data_objects.NodesMetaData import NodesMetaData\n\nfrom alarms import alarms\n\n\n\ndef fixMissingMetadata(rowDf, idx):\n metadf = NodesMetaData(idx, dateFrom, dateTo).df\n df1 = pd.merge(metadf[['host', 'ip', 'site']], rowDf[[\n 'src', 'hash']], left_on='ip', right_on='src', how='right')\n df2 = pd.merge(metadf[['host', 'ip', 'site']], rowDf[[\n 'dest', 'hash']], left_on='ip', right_on='dest', how='right')\n df = pd.merge(df1, df2, on=['hash'], suffixes=(\n '_src', '_dest'), how='inner')\n df = df[df.duplicated(subset=['hash']) == False]\n\n df = df.drop(columns=['ip_src', 'ip_dest'])\n df = pd.merge(rowDf, df, on=['hash', 'src', 'dest'], how='left')\n \n return df.rename(columns={'site_src': 'src_site', 'site_dest': 'dest_site'})\n\n\ndef queryData(idx, dateFrom, dateTo):\n data = []\n # query in portions since ES does not allow aggregations with more than 10000 bins\n intv = int(hp.CalcMinutes4Period(dateFrom, dateTo)/60)\n time_list = hp.GetTimeRanges(dateFrom, dateTo, intv)\n for i in range(len(time_list)-1):\n data.extend(qrs.query4Avg(idx, time_list[i], time_list[i+1]))\n\n return data\n\n\ndef getStats(df, threshold):\n# metaDf = fixMissingMetadata(df, 'ps_throughput')\n metaDf = df.copy()\n # convert to MB\n metaDf['value'] = round(metaDf['value']*1e-6)\n \n # split the data in 3 days\n sitesDf = metaDf.groupby(['src_site', 'dest_site',pd.Grouper(key='dt', freq='4d')])['value'].mean().to_frame().reset_index()\n \n # get the statistics\n sitesDf['z'] = sitesDf.groupby(['src_site','dest_site'])['value'].apply(lambda x: round((x - x.mean())/x.std(),2))\n stdDf = sitesDf.groupby(['src_site','dest_site'])['value'].apply(lambda x: x.std()).to_frame().reset_index().rename(columns={'value':'std'})\n stdDf['mean'] = sitesDf.groupby(['src_site','dest_site'])['value'].apply(lambda x: x.mean()).values\n\n sitesDf = pd.merge(sitesDf, stdDf, left_on=['src_site','dest_site'], right_on=['src_site','dest_site'], how='left')\n\n # get the % change with respect to the average for the period\n sitesDf['%change'] = round(((sitesDf['value'] - sitesDf['mean'])/sitesDf['mean'])*100)\n\n # grap the last 3 days period\n last3days = pd.to_datetime(max(sitesDf.dt.unique())).strftime(\"%Y-%m-%d\")\n\n # return only sites having significant drop in values in the most recent period\n return sitesDf[((sitesDf['z']<=-threshold)|(sitesDf['z']>=threshold))&(sitesDf['dt']==last3days)].rename(columns={'value':'last3days_avg'}).round(2)\n\n\ndef createAlarms(alarmsDf, alarmType, minCount=5):\n # we aim for exposing a single site which shows significant change in throughput from/to 5 (default value) other sites in total\n # below we find the total count of unique sites related to a single site name\n src_cnt = alarmsDf[['src_site']].value_counts().to_frame().reset_index().rename(columns={0:'cnt', 'src_site': 'site'})\n dest_cnt = alarmsDf[['dest_site']].value_counts().to_frame().reset_index().rename(columns={0:'cnt', 'dest_site': 'site'})\n cntDf = pd.concat([src_cnt, dest_cnt]).groupby(['site']).sum().reset_index()\n\n # create the alarm objects\n alarmOnPair = alarms('Networking', 'Perfsonar', alarmType)\n alarmOnMulty = alarms('Networking', 'Perfsonar', f'{alarmType} from/to multiple sites')\n\n rows2Delete = []\n\n for site in cntDf[cntDf['cnt']>=minCount]['site'].values:\n\n subset = alarmsDf[(alarmsDf['src_site']==site)|(alarmsDf['dest_site']==site)]\n \n # build the lists of values\n src_sites, dest_sites, src_change, dest_change = [],[],[],[]\n for idx, row in subset.iterrows():\n if row['src_site'] != site:\n src_sites.append(row['src_site'])\n src_change.append(row['%change'])\n if row['dest_site'] != site:\n dest_sites.append(row['dest_site'])\n dest_change.append(row['%change'])\n \n # create the alarm source content\n doc = {'dest_sites':dest_sites, 'dest_change':dest_change, 'src_sites':src_sites, 'src_change':src_change}\n doc['site'] = site\n\n # send the alarm with the proper message\n alarmOnMulty.addAlarm(body=f'{alarmType} from/to multiple sites', tags=[site], source=doc)\n rows2Delete.extend(subset.index.values)\n\n # delete the rows for which alarms were created\n alarmsDf = alarmsDf.drop(rows2Delete)\n\n # The rest will be send as 'regular' src-dest alarms\n for doc in alarmsDf[(alarmsDf['%change']<=-50)|(alarmsDf['%change']>=50)][['src_site', 'dest_site', 'last3days_avg', '%change']].to_dict('records'):\n alarmOnPair.addAlarm(body=alarmType, tags=[doc['src_site'], doc['dest_site']], source=doc)\n\n\nnow = datetime.utcnow()\ndateTo = datetime.strftime(now, '%Y-%m-%d %H:%M')\ndateFrom = datetime.strftime(now - timedelta(days=21), '%Y-%m-%d %H:%M')\n\n# get the data\nrowDf = pd.DataFrame(queryData('ps_throughput', dateFrom, dateTo))\nrowDf['dt'] = pd.to_datetime(rowDf['from'], unit='ms')\n\n# calculate the statistics\nstatsDf = getStats(rowDf, 1.9)\n\n# Bandwidth decreased\ncreateAlarms(statsDf[(statsDf['z']<=-1.9)&(statsDf['%change']!=100)], 'Bandwidth decreased')\n# Bandwidth recovery\ncreateAlarms(statsDf[(statsDf['z']>=1.9)], 'Bandwidth increased')" ]
[ [ "pandas.Grouper", "pandas.to_datetime", "pandas.merge", "pandas.concat" ] ]
davtoh/RRTools
[ "6dde2d4622719d9031bf21ffbf7723231a0e2003" ]
[ "tests/GUI_tests/Ex_pyplot2.py" ]
[ "\"\"\"Demo of how to pop up plots asynchronously using separate processes.\"\"\"\nfrom __future__ import print_function\n# https://gist.github.com/dwf/1222883\nfrom multiprocessing import Process\nimport time\nimport sys\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndef demo():\n i = 0\n processes = []\n while True:\n i += 1\n s = time.time()\n while time.time() - s < 5:\n print('HA', end=' ')\n sys.stdout.flush()\n def do_something():\n figno = i\n f = plt.figure()\n # Normally this will always be \"Figure 1\" since it's the first\n # figure created by this process. So do something about it.\n f.canvas.set_window_title('My stupid plot number %d' % i)\n arr = np.random.uniform(size=(50, 50))\n plt.imshow(arr)\n plt.show()\n p = Process(None, do_something)\n processes.append(p) # May want to do other things with objects\n p.start()\n\nif __name__ == \"__main__\":\n demo()" ]
[ [ "numpy.random.uniform", "matplotlib.pyplot.figure", "matplotlib.pyplot.imshow", "matplotlib.pyplot.show" ] ]
OsbornHu/tensorflow-ml
[ "56c3051e7085a919a603481709b63e4a6614192a" ]
[ "chapter02/demo_2.8.py" ]
[ "#!/usr/bin/python2.7\n# -*- coding:utf-8 -*-\n\n# Author: NetworkRanger\n# Date: 2018/11/4 下午12:04\n\n# 2.8 TensorFlow 实现创建张量\n\n# 1. 导入相应的工具库,初始化计算图\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn import datasets\nimport tensorflow as tf\nsess = tf.Session()\n\n# 2. 导入iris数据集,根据目标数据是否为山鸢尾将其转换成1或者0。由于iris数据集将山鸢尾标记为0,我们将其从0置为1,同时把其他物种标记为0\niris = datasets.load_iris()\nbinary_target = np.array([1. if x==0 else 0. for x in iris.target])\niris_2d = np.array([[x[2], x[3]] for x in iris.data])\n\n# 3. 声明变量训练大小,数据占位符和模型变量\nbatch_size = 20\nx1_data = tf.placeholder(shape=[None, 1], dtype=tf.float32)\nx2_data = tf.placeholder(shape=[None, 1], dtype=tf.float32)\ny_target = tf.placeholder(shape=[None, 1], dtype=tf.float32)\nA = tf.Variable(tf.random_normal(shape=[1, 1]))\nb = tf.Variable(tf.random_normal(shape=[1, 1]))\n\n# 4. 定义线性模型\nmy_mult = tf.matmul(x2_data, A)\nmy_add = tf.add(my_mult, b)\nmy_output = tf.subtract(x1_data, my_add)\n\n# 5. 增加TensorFlow 的 sigmoid 交叉熵损失函数 sigmoid_cross_entropy_with_logits()\nxentropy = tf.nn.sigmoid_cross_entropy_with_logits(logits=my_output, labels=y_target)\n\n# 6. 声明优化器方法,最小化交叉熵损失\nmy_opt = tf.train.GradientDescentOptimizer(0.05)\ntrain_step = my_opt.minimize(xentropy)\n\n# 7. 创建一个变量初始化操作,然后让TensorFlow执行它\ninit = tf.initialize_all_variables()\nsess.run(init)\n\n# 8. 现在迭代100次训练线性模型\nfor i in range(1000):\n rand_index = np.random.choice(len(iris_2d), size=batch_size)\n rand_x = iris_2d[rand_index]\n rand_x1 = np.array([[x[0]] for x in rand_x])\n rand_x2 = np.array([[x[1]] for x in rand_x])\n rand_y = np.array([[y] for y in binary_target[rand_index]])\n sess.run(train_step, feed_dict={x1_data: rand_x1, x2_data: rand_x2, y_target: rand_y})\n if (i+1)%200 == 0:\n print('Step #' + str(i+1) + ' A = ' + str(sess.run(A)) + ', b = ' + str(sess.run(b)))\n\n \"\"\"\n Step #200 A = [[8.574331]], b = [[-3.5213127]]\n Step #400 A = [[10.120312]], b = [[-4.6367807]]\n Step #600 A = [[11.085849]], b = [[-5.303544]]\n Step #800 A = [[11.831396]], b = [[-5.835288]]\n Step #1000 A = [[12.395876]], b = [[-6.260936]]\n \"\"\"\n\n# 9. 下面的命令抽取模型变量并绘图\n[[slope]] = sess.run(A)\n[[intercept]] = sess.run(b)\nx = np.linspace(0, 3, num=50)\nablineValues = []\nfor i in x:\n ablineValues.append(slope+intercept)\n\nsetosa_x = [a[1] for i,a in enumerate(iris_2d) if binary_target[i] == 1]\nsetosa_y = [a[0] for i,a in enumerate(iris_2d) if binary_target[i] == 1]\nnon_setosa_x = [a[1] for i,a in enumerate(iris_2d) if binary_target[i] == 0]\nnon_setosa_y = [a[0] for i,a in enumerate(iris_2d) if binary_target[i] == 0]\nplt.plot(setosa_x, setosa_y, 'rx', ms=10, mew=2, label='setosa')\nplt.plot(non_setosa_x, non_setosa_y, 'ro', label='Non-setosa')\nplt.plot(x, ablineValues, 'b-')\nplt.xlim([0.0, 2.7])\nplt.ylim([0.0, 2.7])\nplt.suptitle('Linear Separator For I.setosa', fontsize=20)\nplt.xlabel('Petal Length')\nplt.ylabel('Petal Width')\nplt.legend(loc='lower right')\nplt.show()" ]
[ [ "tensorflow.initialize_all_variables", "tensorflow.matmul", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.plot", "tensorflow.random_normal", "sklearn.datasets.load_iris", "matplotlib.pyplot.xlim", "matplotlib.pyplot.suptitle", "numpy.linspace", "tensorflow.nn.sigmoid_cross_entropy_with_logits", "tensorflow.subtract", "tensorflow.Session", "matplotlib.pyplot.ylim", "tensorflow.placeholder", "matplotlib.pyplot.legend", "tensorflow.add", "matplotlib.pyplot.show", "tensorflow.train.GradientDescentOptimizer", "numpy.array", "matplotlib.pyplot.xlabel" ] ]
goan15910/ConvDet
[ "6404622cc9d0c8e8b756260c4979b6842b2d0cb0" ]
[ "src/dataset/imdb.py" ]
[ "# Author: Bichen Wu ([email protected]) 08/25/2016\n\n\"\"\"The data base wrapper class\"\"\"\n\nimport os\nimport random\nimport shutil\n\nfrom PIL import Image, ImageFont, ImageDraw\nimport cv2\nimport numpy as np\nfrom utils.util import iou, batch_iou, drift_dist, recolor, scale_trans, rand_flip\n\nclass imdb(object):\n \"\"\"Image database.\"\"\"\n\n def __init__(self, name, mc):\n self._name = name\n self._classes = []\n self._image_set = []\n self._image_idx = []\n self._data_root_path = []\n self._rois = {}\n self.mc = mc\n\n # batch reader\n self._perm_idx = None\n self._cur_idx = 0\n\n @property\n def name(self):\n return self._name\n\n @property\n def classes(self):\n return self._classes\n\n @property\n def num_classes(self):\n return len(self._classes)\n\n @property\n def image_idx(self):\n return self._image_idx\n\n @property\n def image_set(self):\n return self._image_set\n\n @property\n def data_root_path(self):\n return self._data_root_path\n\n @property\n def year(self):\n return self._year\n\n def _shuffle_image_idx(self):\n self._perm_idx = [self._image_idx[i] for i in\n np.random.permutation(np.arange(len(self._image_idx)))]\n self._cur_idx = 0\n\n def read_image_batch(self, shuffle=True):\n \"\"\"Only Read a batch of images\n Args:\n shuffle: whether or not to shuffle the dataset\n Returns:\n images: length batch_size list of arrays [height, width, 3]\n \"\"\"\n mc = self.mc\n if shuffle:\n if self._cur_idx + mc.BATCH_SIZE >= len(self._image_idx):\n self._shuffle_image_idx()\n batch_idx = self._perm_idx[self._cur_idx:self._cur_idx+mc.BATCH_SIZE]\n self._cur_idx += mc.BATCH_SIZE\n else:\n if self._cur_idx + mc.BATCH_SIZE >= len(self._image_idx):\n batch_idx = self._image_idx[self._cur_idx:] \\\n + self._image_idx[:self._cur_idx + mc.BATCH_SIZE-len(self._image_idx)]\n self._cur_idx += mc.BATCH_SIZE - len(self._image_idx)\n else:\n batch_idx = self._image_idx[self._cur_idx:self._cur_idx+mc.BATCH_SIZE]\n self._cur_idx += mc.BATCH_SIZE\n\n images, scales = [], []\n for i in batch_idx:\n im = cv2.imread(self._image_path_at(i))\n if mc.SUB_BGR_MEANS:\n im = im.astype(np.float32, copy=False)\n im -= mc.BGR_MEANS\n orig_h, orig_w, _ = [float(v) for v in im.shape]\n im = cv2.resize(im, (mc.IMAGE_WIDTH, mc.IMAGE_HEIGHT))\n x_scale = mc.IMAGE_WIDTH/orig_w\n y_scale = mc.IMAGE_HEIGHT/orig_h\n images.append(im)\n scales.append((x_scale, y_scale))\n\n return images, scales\n\n def read_batch(self, shuffle=True):\n \"\"\"Read a batch of image and bounding box annotations.\n Args:\n shuffle: whether or not to shuffle the dataset\n Returns:\n image_per_batch: images. Shape: batch_size x width x height x [b, g, r]\n label_per_batch: labels. Shape: batch_size x object_num\n delta_per_batch: bounding box deltas. Shape: batch_size x object_num x \n [dx ,dy, dw, dh]\n aidx_per_batch: index of anchors that are responsible for prediction.\n Shape: batch_size x object_num\n bbox_per_batch: scaled bounding boxes. Shape: batch_size x object_num x \n [cx, cy, w, h]\n \"\"\"\n mc = self.mc\n\n if shuffle:\n if self._cur_idx + mc.BATCH_SIZE >= len(self._image_idx):\n self._shuffle_image_idx()\n batch_idx = self._perm_idx[self._cur_idx:self._cur_idx+mc.BATCH_SIZE]\n self._cur_idx += mc.BATCH_SIZE\n else:\n if self._cur_idx + mc.BATCH_SIZE >= len(self._image_idx):\n batch_idx = self._image_idx[self._cur_idx:] \\\n + self._image_idx[:self._cur_idx + mc.BATCH_SIZE-len(self._image_idx)]\n self._cur_idx += mc.BATCH_SIZE - len(self._image_idx)\n else:\n batch_idx = self._image_idx[self._cur_idx:self._cur_idx+mc.BATCH_SIZE]\n self._cur_idx += mc.BATCH_SIZE\n\n image_per_batch = []\n label_per_batch = []\n bbox_per_batch = []\n delta_per_batch = []\n aidx_per_batch = []\n if mc.DEBUG_MODE:\n avg_ious = 0.\n num_objects = 0.\n max_iou = 0.0\n min_iou = 1.0\n num_zero_iou_obj = 0\n\n for idx in batch_idx:\n # load the image\n im = cv2.imread(self._image_path_at(idx))\n orig_h, orig_w, _ = [float(v) for v in im.shape]\n\n # load annotations\n label_this_batch = np.array([b[4] for b in self._rois[idx][:]])\n gt_bbox = np.array([[b[0], b[1], b[2], b[3]] for b in self._rois[idx][:]])\n\n if mc.DATA_AUGMENTATION:\n assert mc.DATA_AUG_TYPE in ['SQT', 'YOLO'], \\\n 'Invalid augmentation type: {}'.format(mc.DATA_AUG_TYPE)\n if mc.DATA_AUG_TYPE == 'SQT':\n im, gt_bbox = drift_dist(im, gt_bbox, mc, orig_h, orig_w)\n elif mc.DATA_AUG_TYPE == 'YOLO':\n if np.random.randint(2) > 0.5:\n im, gt_bbox, label_this_batch = scale_trans(im, gt_bbox, label_this_batch)\n im = recolor(im)\n im, gt_bbox = rand_flip(im, gt_bbox, orig_w)\n\n # Remove BGR bias\n if mc.SUB_BGR_MEANS:\n im = im.astype(np.float32, copy=False)\n im -= mc.BGR_MEANS\n #im = im.astype(np.uint8, copy=False)\n\n label_per_batch.append(label_this_batch.tolist())\n\n # scale image\n im = cv2.resize(im, (mc.IMAGE_WIDTH, mc.IMAGE_HEIGHT))\n image_per_batch.append(im)\n\n # scale annotation\n x_scale = mc.IMAGE_WIDTH/orig_w\n y_scale = mc.IMAGE_HEIGHT/orig_h\n gt_bbox[:, 0::2] = gt_bbox[:, 0::2]*x_scale\n gt_bbox[:, 1::2] = gt_bbox[:, 1::2]*y_scale\n bbox_per_batch.append(gt_bbox)\n\n aidx_per_image, delta_per_image = [], []\n aidx_set = set()\n for i in range(len(gt_bbox)):\n overlaps = batch_iou(mc.ANCHOR_BOX, gt_bbox[i])\n\n aidx = len(mc.ANCHOR_BOX)\n for ov_idx in np.argsort(overlaps)[::-1]:\n if overlaps[ov_idx] <= 0:\n if mc.DEBUG_MODE:\n min_iou = min(overlaps[ov_idx], min_iou)\n num_objects += 1\n num_zero_iou_obj += 1\n break\n if ov_idx not in aidx_set:\n aidx_set.add(ov_idx)\n aidx = ov_idx\n if mc.DEBUG_MODE:\n max_iou = max(overlaps[ov_idx], max_iou)\n min_iou = min(overlaps[ov_idx], min_iou)\n avg_ious += overlaps[ov_idx]\n num_objects += 1\n break\n\n if aidx == len(mc.ANCHOR_BOX): \n # even the largeset available overlap is 0, thus, choose one with the\n # smallest square distance\n dist = np.sum(np.square(gt_bbox[i] - mc.ANCHOR_BOX), axis=1)\n for dist_idx in np.argsort(dist):\n if dist_idx not in aidx_set:\n aidx_set.add(dist_idx)\n aidx = dist_idx\n break\n \n box_cx, box_cy, box_w, box_h = gt_bbox[i]\n delta = [0]*4\n delta[0] = (box_cx - mc.ANCHOR_BOX[aidx][0])/box_w\n delta[1] = (box_cy - mc.ANCHOR_BOX[aidx][1])/box_h\n delta[2] = np.log(box_w/mc.ANCHOR_BOX[aidx][2])\n delta[3] = np.log(box_h/mc.ANCHOR_BOX[aidx][3])\n\n aidx_per_image.append(aidx)\n delta_per_image.append(delta)\n\n delta_per_batch.append(delta_per_image)\n aidx_per_batch.append(aidx_per_image)\n\n if mc.DEBUG_MODE:\n print ('max iou: {}'.format(max_iou))\n print ('min iou: {}'.format(min_iou))\n print ('avg iou: {}'.format(avg_ious/num_objects))\n print ('number of objects: {}'.format(num_objects))\n print ('number of objects with 0 iou: {}'.format(num_zero_iou_obj))\n\n return image_per_batch, label_per_batch, delta_per_batch, \\\n aidx_per_batch, bbox_per_batch\n\n def evaluate_detections(self):\n raise NotImplementedError\n\n def visualize_detections(\n self, image_dir, image_format, det_error_file, output_image_dir,\n num_det_per_type=10):\n\n # load detections\n with open(det_error_file) as f:\n lines = f.readlines()\n random.shuffle(lines)\n f.close()\n\n dets_per_type = {}\n for line in lines:\n obj = line.strip().split(' ')\n error_type = obj[1]\n if error_type not in dets_per_type:\n dets_per_type[error_type] = [{\n 'im_idx':obj[0], \n 'bbox':[float(obj[2]), float(obj[3]), float(obj[4]), float(obj[5])],\n 'class':obj[6],\n 'score': float(obj[7])\n }]\n else:\n dets_per_type[error_type].append({\n 'im_idx':obj[0], \n 'bbox':[float(obj[2]), float(obj[3]), float(obj[4]), float(obj[5])],\n 'class':obj[6],\n 'score': float(obj[7])\n })\n\n out_ims = []\n # Randomly select some detections and plot them\n COLOR = (200, 200, 0)\n for error_type, dets in dets_per_type.iteritems():\n det_im_dir = os.path.join(output_image_dir, error_type)\n if os.path.exists(det_im_dir):\n shutil.rmtree(det_im_dir)\n os.makedirs(det_im_dir)\n\n for i in range(min(num_det_per_type, len(dets))):\n det = dets[i]\n im = Image.open(\n os.path.join(image_dir, det['im_idx']+image_format))\n draw = ImageDraw.Draw(im)\n draw.rectangle(det['bbox'], outline=COLOR)\n draw.text((det['bbox'][0], det['bbox'][1]), \n '{:s} ({:.2f})'.format(det['class'], det['score']),\n fill=COLOR)\n out_im_path = os.path.join(det_im_dir, str(i)+image_format)\n im.save(out_im_path)\n im = np.array(im)\n out_ims.append(im[:,:,::-1]) # RGB to BGR\n return out_ims\n\n" ]
[ [ "numpy.argsort", "numpy.log", "numpy.array", "numpy.square", "numpy.random.randint" ] ]
MaajidKhan/ONNX-1.6.0-OP-Library
[ "df26621fa225485849853f5e11180600be71d11d" ]
[ "operators/sign.py" ]
[ "#sign\n\nimport onnx\nfrom onnx import helper\nfrom onnx import numpy_helper\nfrom onnx import AttributeProto, TensorProto, GraphProto\nimport numpy as np\nfrom Compare_output import compare\n\n# Create the inputs (ValueInfoProto)\nx = helper.make_tensor_value_info('x', TensorProto.FLOAT, [11,])\n\n\n# Create one output (ValueInfoProto)\ny = helper.make_tensor_value_info('y', TensorProto.FLOAT, [11,])\n\n# Create a node (NodeProto)\nnode_def = helper.make_node(\n 'Sign',\n inputs=['x'],\n outputs=['y'],\n)\n\n# Create the graph (GraphProto)\ngraph_def = helper.make_graph(\n [node_def],\n 'test-model',\n [x],\n [y],\n)\n\n# Create the model (ModelProto)\nmodel_def = helper.make_model(graph_def, producer_name='onnx-sign')\nprint('The model is:\\n{}'.format(model_def))\nonnx.checker.check_model(model_def)\nprint('The model is checked!')\n\n# Save the ONNX model\nimport os\npath = os.getcwd()\nnew_model_path = os.path.join(path, '../onnx_generated_models/sign.onnx')\nonnx.save(model_def, new_model_path)\nprint('The model is saved.')\n\n\n# Preprocessing: load the ONNX model (Loading an already exisisting model)\nmodel_path1 = os.path.join(path, '../onnx_generated_models/sign.onnx')\nonnx_model1 = onnx.load(model_path1)\nprint('The model is:\\n{}'.format(onnx_model1))\n\n\nx = np.array(range(-5, 6)).astype(np.float32)\ny_actual = np.sign(x)\n\n#Running the model using ONNX Runtime\nimport onnxruntime as rt\nimport numpy\nsess = rt.InferenceSession(\"../onnx_generated_models/sign.onnx\")\ninput_name = sess.get_inputs()[0].name\nlabel_name = sess.get_outputs()[0].name\n\ny_pred = sess.run(\n [label_name], {input_name: x.astype(numpy.float32),\n })\n\nprint(\"The predicted output for the operation: sign\")\nprint(y_pred)\n\ny_pred = np.asarray(y_pred) #converting list into an array\nprint(y_pred.shape)\n\ny_pred = np.squeeze(y_pred, axis=0)\nprint(y_pred.shape)\n\ncompare(y_actual, y_pred)" ]
[ [ "numpy.sign", "numpy.asarray", "numpy.squeeze" ] ]
OniOniOn-/maplestory_dpm_calc
[ "fbe824f01ab8e8210b174dd9db8295da80c267cd" ]
[ "statistics/optimization_hint.py" ]
[ "import argparse\n\nimport pandas as pd\nfrom dpmModule.character.characterKernel import JobGenerator\nfrom dpmModule.character.characterTemplate import TemplateGenerator\nfrom dpmModule.jobs import jobMap\nfrom dpmModule.kernel import core\nfrom dpmModule.status.ability import Ability_grade\n\nfrom .loader import load_data\nfrom .preset import get_preset\nfrom .saver import save_data\n\n\ndef get_args():\n parser = argparse.ArgumentParser(\"Optimization hint argument\")\n parser.add_argument(\n \"--id\", type=str, help=\"Target preset id to calculate statistics\"\n )\n parser.add_argument(\"--ulevel\", type=int, default=8000)\n parser.add_argument(\"--cdr\", type=int, default=0)\n parser.add_argument(\"--time\", type=int, default=1800)\n parser.add_argument(\"--task\", default=\"dpm\")\n parser.add_argument(\"--calc\", action=\"store_true\")\n\n return parser.parse_args()\n\n\ndef armor_percent_to_float(num: float):\n return (100 - num) / 100\n\n\ndef armor_float_to_percent(num: float):\n return 100 - num * 100\n\n\ndef get_modifier(args) -> core.CharacterModifier:\n preset = get_preset(args.id)\n gen: JobGenerator = jobMap[preset.job].JobGenerator()\n target, weapon_stat = TemplateGenerator().get_template_and_weapon_stat(gen, str(args.ulevel), args.cdr)\n v_builder = core.AlwaysMaximumVBuilder()\n graph = gen.package(\n target,\n v_builder,\n options=preset.options,\n ulevel=args.ulevel,\n weaponstat=weapon_stat,\n ability_grade=Ability_grade(4, 1),\n )\n return graph.get_default_buff_modifier()\n\n\ndef optimization_hint(args, df: pd.DataFrame):\n buff_modifier = get_modifier(args)\n\n df = df[[\"name\", \"deal\", \"mdf\"]]\n df = df.loc[df[\"deal\"] > 0]\n deal_total = df[\"deal\"].sum()\n\n df[\"crit_damage\"] = df[\"mdf\"].apply(lambda x: x[\"crit_damage\"])\n df[\"pdamage\"] = df[\"mdf\"].apply(lambda x: x[\"pdamage\"])\n df[\"boss_pdamage\"] = df[\"mdf\"].apply(lambda x: x[\"boss_pdamage\"])\n df[\"armor_ignore\"] = df[\"mdf\"].apply(lambda x: x[\"armor_ignore\"])\n df[\"patt\"] = df[\"mdf\"].apply(lambda x: x[\"patt\"])\n grouped = df.groupby([\"name\"])\n\n df = pd.DataFrame()\n df[\"share\"] = grouped[\"deal\"].sum() / deal_total\n df[\"crit_damage\"] = grouped[\"crit_damage\"].mean()\n df[\"pdamage\"] = grouped[\"pdamage\"].mean()\n df[\"boss_pdamage\"] = grouped[\"boss_pdamage\"].mean()\n df[\"armor_ignore\"] = grouped[\"armor_ignore\"].mean()\n df[\"patt\"] = grouped[\"patt\"].mean()\n\n print(df)\n\n crit_damage = (df[\"crit_damage\"] * df[\"share\"]).sum()\n pdamage = (df[\"pdamage\"] * df[\"share\"]).sum()\n boss_pdamage = (df[\"boss_pdamage\"] * df[\"share\"]).sum()\n armor_ignore = (df[\"armor_ignore\"] * df[\"share\"]).sum()\n patt = (df[\"patt\"] * df[\"share\"]).sum()\n\n print(\n {\n \"crit_damage\": crit_damage - buff_modifier.crit_damage,\n \"pdamage\": pdamage - buff_modifier.pdamage,\n \"boss_pdamage\": boss_pdamage - buff_modifier.boss_pdamage,\n \"armor_ignore\": armor_float_to_percent(\n armor_percent_to_float(armor_ignore)\n / armor_percent_to_float(buff_modifier.armor_ignore)\n / armor_percent_to_float(20)\n ),\n \"patt\": patt - buff_modifier.patt,\n }\n )\n\n\nif __name__ == \"__main__\":\n args = get_args()\n if args.calc:\n data = save_data(args)\n else:\n data = load_data(args)\n optimization_hint(args, data)\n" ]
[ [ "pandas.DataFrame" ] ]
zeroized/DeepRec-torch
[ "2957f65501243107284f3a43735b77b3e89ce684" ]
[ "model/wrapper/base.py" ]
[ "import torch\nfrom torch.utils.data import DataLoader\n\nfrom util.log_util import create_file_console_logger\nfrom util.train import config_path, split_dataset, train_model\nfrom torch.utils.tensorboard import SummaryWriter\n\n\nclass BaseModel:\n def __init__(self):\n self.loader_args = None\n self.model = None\n self.job_name = ''\n self.device = torch.device('cpu')\n self.logger = None,\n self.tb_writer = None,\n self.ckpt_dir = None\n self.log_path = None,\n self.model_path = None\n self.ckpt_interval = -1\n\n def config_training(self, write_log_file=True, log_path=None,\n save_ckpt=True, ckpt_dir=None, ckpt_interval=None,\n save_model=True, model_path=None,\n write_tb=True, tb_dir=None):\n self.logger, self.tb_writer, self.ckpt_dir, self.log_path, self.model_path = \\\n config_path(self.job_name, self.device, write_log_file, log_path, save_ckpt, ckpt_dir, save_model,\n model_path, write_tb, tb_dir)\n if save_ckpt:\n self.ckpt_interval = ckpt_interval\n\n def config_tensorboard(self, write_tb=False, tb_dir=None):\n if write_tb:\n self.tb_writer = SummaryWriter(log_dir=tb_dir)\n\n def config_logger(self, write_log_file=False, log_path=None):\n if write_log_file:\n self.logger = create_file_console_logger(log_path, name=self.job_name)\n\n def config_ckpt(self, save_ckpt=False, ckpt_dir=None, ckpt_interval=None):\n if save_ckpt:\n self.ckpt_dir = ckpt_dir\n self.ckpt_interval = ckpt_interval\n\n def config_model_saving(self, save_model=False, model_path=None):\n if save_model:\n self.model_path = model_path\n\n def config_loader_meta(self, **kwargs):\n self.loader_args = kwargs\n\n def _train(self, dataset, loss_func, optimizer=None, epochs=2, val_size=0):\n if not optimizer:\n optimizer = torch.optim.SGD(params=self.model.parameters(), lr=1e-3)\n if val_size <= 0:\n train_loader = DataLoader(dataset, **self.loader_args)\n val_loader = None\n else:\n train_set, val_set = split_dataset(dataset, val_size)\n train_loader = DataLoader(train_set, **self.loader_args)\n val_loader = DataLoader(val_set, batch_size=self.loader_args['batch_size'])\n self.model.train()\n train_model(self.model, train_loader, loss_func, optimizer, val_loader, epochs,\n self.logger, self.tb_writer, self.ckpt_dir, self.ckpt_interval, self.model_path)\n\n def train(self, **kwargs):\n raise NotImplementedError\n\n def eval(self, **kwargs):\n raise NotImplementedError\n" ]
[ [ "torch.utils.tensorboard.SummaryWriter", "torch.utils.data.DataLoader", "torch.device" ] ]
Manza12/kornia
[ "580bbbffc771470445de27a7957d970b5a606172" ]
[ "kornia/augmentation/functional/functional3d.py" ]
[ "from typing import Tuple, List, Union, Dict, cast, Optional\n\nimport torch\n\nimport kornia as K\nfrom kornia.constants import Resample, BorderType, pi\nfrom kornia.geometry.transform.affwarp import _compute_rotation_matrix3d, _compute_tensor_center3d\nfrom kornia.geometry.transform.projwarp import warp_affine3d\nfrom kornia.geometry import (\n crop_by_boxes3d,\n warp_perspective3d,\n get_perspective_transform3d,\n rotate3d,\n get_affine_matrix3d,\n deg2rad,\n)\nfrom kornia.enhance import equalize3d\n\nfrom .. import random_generator as rg\nfrom ..utils import _validate_input3d\nfrom kornia.filters import motion_blur3d\n\nfrom .__temp__ import __deprecation_warning, _deprecation_wrapper\n\n\n@_deprecation_wrapper\n@_validate_input3d\ndef apply_hflip3d(input: torch.Tensor) -> torch.Tensor:\n r\"\"\"Apply horizontal flip on a 3D tensor volume or a batch of tensors volumes with given random parameters.\n\n Args:\n input (torch.Tensor): Tensor to be transformed with shape :math:`(*, C, D, H, W)`.\n\n Returns:\n torch.Tensor: Horizontal flipped input with shape :math:`(*, C, D, H, W)`.\n \"\"\"\n\n return torch.flip(input, [-1])\n\n\n@_deprecation_wrapper\n@_validate_input3d\ndef compute_hflip_transformation3d(input: torch.Tensor) -> torch.Tensor:\n r\"\"\"Compute the horizontal flip transformation matrix :math: `(*, 4, 4)`.\n\n Args:\n input (torch.Tensor): Tensor to be transformed with shape :math:`(*, C, D, H, W)`.\n\n Returns:\n torch.Tensor: Horizontal flip transformation matrix :math: `(*, 4, 4)`.\n \"\"\"\n\n w: int = input.shape[-1]\n flip_mat: torch.Tensor = torch.tensor([[-1, 0, 0, w - 1], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])\n\n return flip_mat.repeat(input.size(0), 1, 1).to(input)\n\n\n@_deprecation_wrapper\n@_validate_input3d\ndef apply_vflip3d(input: torch.Tensor) -> torch.Tensor:\n r\"\"\"Apply vertical flip on a 3D tensor volume or a batch of tensors volumes with given random parameters.\n\n Args:\n input (torch.Tensor): Tensor to be transformed with shape :math:`(*, C, D, H, W)`.\n\n Returns:\n torch.Tensor: Vertical flipped input with shape :math:`(*, C, D, H, W)`.\n \"\"\"\n\n return torch.flip(input, [-2])\n\n\n@_deprecation_wrapper\n@_validate_input3d\ndef compute_vflip_transformation3d(input: torch.Tensor) -> torch.Tensor:\n r\"\"\"Compute the veritical flip transformation matrix :math: `(*, 4, 4)`.\n\n Args:\n input (torch.Tensor): Tensor to be transformed with shape :math:`(*, C, D, H, W)`.\n\n Returns:\n torch.Tensor: The vertical flip transformation matrix :math: `(*, 4, 4)`.\n \"\"\"\n\n h: int = input.shape[-2]\n flip_mat: torch.Tensor = torch.tensor([[1, 0, 0, 0], [0, -1, 0, h - 1], [0, 0, 1, 0], [0, 0, 0, 1]])\n\n return flip_mat.repeat(input.size(0), 1, 1).to(input)\n\n\n@_deprecation_wrapper\n@_validate_input3d\ndef apply_dflip3d(input: torch.Tensor) -> torch.Tensor:\n r\"\"\"Apply depthical flip on a 3D tensor volume or a batch of tensors volumes with given random parameters.\n\n Args:\n input (torch.Tensor): Tensor to be transformed with shape :math:`(*, C, D, H, W)`.\n\n Returns:\n torch.Tensor: Depthical flipped input with shape :math:`(*, C, D, H, W)`.\n \"\"\"\n\n return torch.flip(input, [-3])\n\n\n@_deprecation_wrapper\n@_validate_input3d\ndef compute_intensity_transformation3d(input: torch.Tensor):\n r\"\"\"Compute the identity matrix :math: `(*, 4, 4)`.\n\n Args:\n input (torch.Tensor): Tensor to be transformed with shape :math:`(*, C, D, H, W)`.\n\n Returns:\n torch.Tensor: Identity matrix :math: `(*, 4, 4)`.\n \"\"\"\n identity: torch.Tensor = torch.eye(4, device=input.device, dtype=input.dtype).repeat(input.shape[0], 1, 1)\n return identity\n\n\n@_deprecation_wrapper\n@_validate_input3d\ndef compute_dflip_transformation3d(input: torch.Tensor) -> torch.Tensor:\n r\"\"\"Compute the depthical flip transformation matrix :math: `(*, 4, 4)`.\n\n Args:\n input (torch.Tensor): Tensor to be transformed with shape :math:`(*, C, D, H, W)`.\n\n Returns:\n torch.Tensor: Depthical flip transformation matrix :math: `(*, 4, 4)`.\n \"\"\"\n\n d: int = input.shape[-3]\n flip_mat: torch.Tensor = torch.tensor([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, -1, d - 1], [0, 0, 0, 1]])\n\n return flip_mat.repeat(input.size(0), 1, 1).to(input)\n\n\n@_deprecation_wrapper\n@_validate_input3d\ndef apply_affine3d(\n input: torch.Tensor, params: Dict[str, torch.Tensor], flags: Dict[str, torch.Tensor]\n) -> torch.Tensor:\n r\"\"\"Random affine transformation of the image keeping center invariant.\n\n Args:\n input (torch.Tensor): Tensor to be transformed with shape :math:`(*, C, D, H, W)`.\n params (Dict[str, torch.Tensor]):\n - params['angles']: Degrees of rotation with the shape of :math: `(*, 3)` for yaw, pitch, roll.\n - params['translations']: Horizontal, vertical and depthical translations (dx,dy,dz).\n - params['center']: Rotation center (x,y,z).\n - params['scale']: Isotropic scaling params.\n - params['sxy']: Shear param toward x-y-axis.\n - params['sxz']: Shear param toward x-z-axis.\n - params['syx']: Shear param toward y-x-axis.\n - params['syz']: Shear param toward y-z-axis.\n - params['szx']: Shear param toward z-x-axis.\n - params['szy']: Shear param toward z-y-axis.\n flags (Dict[str, torch.Tensor]):\n - params['resample']: Integer tensor. NEAREST = 0, BILINEAR = 1.\n - params['align_corners']: Boolean tensor.\n\n Returns:\n torch.Tensor: Affine transfromed input with shape :math:`(*, C, D, H, W)`.\n \"\"\"\n\n # arrange input data\n x_data: torch.Tensor = input.view(-1, *input.shape[-4:])\n\n depth, height, width = x_data.shape[-3:]\n\n # concatenate transforms\n transform: torch.Tensor = compute_affine_transformation3d(input, params)\n\n resample_name: str = Resample(flags['resample'].item()).name.lower()\n align_corners: bool = cast(bool, flags['align_corners'].item())\n\n out_data: torch.Tensor = warp_affine3d(\n x_data, transform[:, :3, :], (depth, height, width), resample_name, align_corners=align_corners\n )\n return out_data.view_as(input)\n\n\n@_deprecation_wrapper\n@_validate_input3d\ndef compute_affine_transformation3d(input: torch.Tensor, params: Dict[str, torch.Tensor]) -> torch.Tensor:\n r\"\"\"Compute the affine transformation matrix :math: `(*, 4, 4)`.\n\n Args:\n input (torch.Tensor): Tensor to be transformed with shape :math:`(*, C, D, H, W)`.\n params (Dict[str, torch.Tensor]):\n - params['angles']: Degrees of rotation with the shape of :math: `(*, 3)` for yaw, pitch, roll.\n - params['translations']: Horizontal, vertical and depthical translations (dx,dy,dz).\n - params['center']: Rotation center (x,y,z).\n - params['scale']: Isotropic scaling params.\n - params['sxy']: Shear param toward x-y-axis.\n - params['sxz']: Shear param toward x-z-axis.\n - params['syx']: Shear param toward y-x-axis.\n - params['syz']: Shear param toward y-z-axis.\n - params['szx']: Shear param toward z-x-axis.\n - params['szy']: Shear param toward z-y-axis.\n\n Returns:\n torch.Tensor: The affine transformation matrix :math: `(*, 4, 4)`.\n \"\"\"\n transform = get_affine_matrix3d(\n params['translations'],\n params['center'],\n params['scale'],\n params['angles'],\n deg2rad(params['sxy']),\n deg2rad(params['sxz']),\n deg2rad(params['syx']),\n deg2rad(params['syz']),\n deg2rad(params['szx']),\n deg2rad(params['szy']),\n ).to(input)\n return transform\n\n\n@_deprecation_wrapper\n@_validate_input3d\ndef apply_rotation3d(\n input: torch.Tensor, params: Dict[str, torch.Tensor], flags: Dict[str, torch.Tensor]\n) -> torch.Tensor:\n r\"\"\"Rotate a tensor image or a batch of tensor images a random amount of degrees.\n\n Args:\n input (torch.Tensor): Tensor to be transformed with shape :math:`(*, C, D, H, W)`.\n params (Dict[str, torch.Tensor]):\n - params['degrees']: degree to be applied.\n flags (Dict[str, torch.Tensor]):\n - params['resample']: Integer tensor. NEAREST = 0, BILINEAR = 1.\n - params['align_corners']: Boolean tensor.\n\n Returns:\n torch.Tensor: The cropped input.\n \"\"\"\n yaw: torch.Tensor = params[\"yaw\"].to(input)\n pitch: torch.Tensor = params[\"pitch\"].to(input)\n roll: torch.Tensor = params[\"roll\"].to(input)\n\n resample_mode: str = Resample(flags['resample'].item()).name.lower()\n align_corners: bool = cast(bool, flags['align_corners'].item())\n\n transformed: torch.Tensor = rotate3d(input, yaw, pitch, roll, mode=resample_mode, align_corners=align_corners)\n\n return transformed\n\n\n@_deprecation_wrapper\n@_validate_input3d\ndef compute_rotate_tranformation3d(input: torch.Tensor, params: Dict[str, torch.Tensor]):\n r\"\"\"Compute the rotation transformation matrix :math: `(*, 4, 4)`.\n\n Args:\n input (torch.Tensor): Tensor to be transformed with shape :math:`(*, C, D, H, W)`.\n params (Dict[str, torch.Tensor]):\n - params['yaw']: degree to be applied.\n - params['pitch']: degree to be applied.\n - params['roll']: degree to be applied.\n\n Returns:\n torch.Tensor: The rotation transformation matrix :math: `(*, 4, 4)`.\n \"\"\"\n yaw: torch.Tensor = params[\"yaw\"].to(input)\n pitch: torch.Tensor = params[\"pitch\"].to(input)\n roll: torch.Tensor = params[\"roll\"].to(input)\n\n center: torch.Tensor = _compute_tensor_center3d(input)\n rotation_mat: torch.Tensor = _compute_rotation_matrix3d(yaw, pitch, roll, center.expand(yaw.shape[0], -1))\n\n # rotation_mat is B x 3 x 4 and we need a B x 4 x 4 matrix\n trans_mat: torch.Tensor = torch.eye(4, device=input.device, dtype=input.dtype).repeat(input.shape[0], 1, 1)\n trans_mat[:, 0] = rotation_mat[:, 0]\n trans_mat[:, 1] = rotation_mat[:, 1]\n trans_mat[:, 2] = rotation_mat[:, 2]\n\n return trans_mat\n\n\n@_deprecation_wrapper\n@_validate_input3d\ndef apply_motion_blur3d(\n input: torch.Tensor, params: Dict[str, torch.Tensor], flags: Dict[str, torch.Tensor]\n) -> torch.Tensor:\n r\"\"\"Perform motion blur on an image.\n\n Args:\n input (torch.Tensor): Tensor to be transformed with shape :math:`(*, C, D, H, W)`.\n params (Dict[str, torch.Tensor]):\n - params['ksize_factor']: motion kernel width and height (odd and positive).\n - params['angle_factor']: yaw, pitch and roll range of the motion blur in degrees :math:`(B, 3)`.\n - params['direction_factor']: forward/backward direction of the motion blur.\n Lower values towards -1.0 will point the motion blur towards the back (with\n angle provided via angle), while higher values towards 1.0 will point the motion\n blur forward. A value of 0.0 leads to a uniformly (but still angled) motion blur.\n flags (Dict[str, torch.Tensor]):\n - flags['border_type']: the padding mode to be applied before convolving.\n CONSTANT = 0, REFLECT = 1, REPLICATE = 2, CIRCULAR = 3. Default: BorderType.CONSTANT.\n\n Returns:\n torch.Tensor: adjusted image tensor with shape :math:`(*, C, D, H, W)`.\n \"\"\"\n\n kernel_size: int = cast(int, params['ksize_factor'].unique().item())\n angle = params['angle_factor']\n direction = params['direction_factor']\n border_type: str = cast(str, BorderType(flags['border_type'].item()).name.lower())\n mode: str = cast(str, Resample(flags['interpolation'].item()).name.lower())\n\n return motion_blur3d(input, kernel_size, angle, direction, border_type, mode)\n\n\n@_deprecation_wrapper\n@_validate_input3d\ndef apply_crop3d(input: torch.Tensor, params: Dict[str, torch.Tensor], flags: Dict[str, torch.Tensor]) -> torch.Tensor:\n r\"\"\"Apply cropping by src bounding box and dst bounding box.\n\n Args:\n input (torch.Tensor): Tensor to be transformed with shape :math:`(*, C, D, H, W)`.\n params (Dict[str, torch.Tensor]):\n - params['src']: The applied cropping src matrix :math: `(*, 8, 3)`.\n - params['dst']: The applied cropping dst matrix :math: `(*, 8, 3)`.\n flags (Dict[str, torch.Tensor]):\n - params['interpolation']: Integer tensor. NEAREST = 0, BILINEAR = 1.\n - params['align_corners']: Boolean tensor.\n\n Returns:\n torch.Tensor: The cropped input.\n\n Note:\n BBox order: front-top-left, front-top-right, front-bottom-right, front-bottom-left, back-top-left,\n back-top-right, back-bottom-right, back-bottom-left. The coordinates must be in x, y, z order.\n \"\"\"\n\n resample_mode: str = Resample.get(flags['interpolation'].item()).name.lower() # type: ignore\n align_corners: bool = cast(bool, flags['align_corners'].item())\n\n return crop_by_boxes3d(input, params['src'], params['dst'], resample_mode, align_corners=align_corners)\n\n\n@_deprecation_wrapper\n@_validate_input3d\ndef compute_crop_transformation3d(\n input: torch.Tensor, params: Dict[str, torch.Tensor], flags: Dict[str, torch.Tensor]\n) -> torch.Tensor:\n r\"\"\"Compute the cropping transformation matrix :math: `(*, 4, 4)`.\n\n Args:\n input (torch.Tensor): Tensor to be transformed with shape :math:`(*, C, D, H, W)`.\n params (Dict[str, torch.Tensor]):\n - params['src']: The applied cropping src matrix :math: `(*, 8, 3)`.\n - params['dst']: The applied cropping dst matrix :math: `(*, 8, 3)`.\n\n Returns:\n torch.Tensor: The cropping transformation matrix :math: `(*, 4, 4)`.\n \"\"\"\n transform: torch.Tensor = get_perspective_transform3d(params['src'].to(input.dtype), params['dst'].to(input.dtype))\n transform = transform.expand(input.shape[0], -1, -1).to(input)\n return transform\n\n\n@_deprecation_wrapper\n@_validate_input3d\ndef apply_perspective3d(\n input: torch.Tensor, params: Dict[str, torch.Tensor], flags: Dict[str, torch.Tensor]\n) -> torch.Tensor:\n r\"\"\"Perform perspective transform of the given torch.Tensor or batch of tensors.\n\n Args:\n input (torch.Tensor): Tensor to be transformed with shape :math:`(*, C, D, H, W)`.\n params (Dict[str, torch.Tensor]):\n - params['start_points']: Tensor containing [top-left, top-right, bottom-right,\n bottom-left] of the original image with shape Bx8x3.\n - params['end_points']: Tensor containing [top-left, top-right, bottom-right,\n bottom-left] of the transformed image with shape Bx8x3.\n flags (Dict[str, torch.Tensor]):\n - params['interpolation']: Integer tensor. NEAREST = 0, BILINEAR = 1.\n - params['align_corners']: Boolean tensor.\n\n Returns:\n torch.Tensor: Perspectively transformed tensor with shape :math:`(*, C, D, H, W)`.\n \"\"\"\n\n _, _, depth, height, width = input.shape\n\n # compute the homography between the input points\n transform: torch.Tensor = compute_perspective_transformation3d(input, params)\n\n out_data: torch.Tensor = input.clone()\n\n # apply the computed transform\n depth, height, width = input.shape[-3:]\n resample_name: str = Resample(flags['interpolation'].item()).name.lower()\n align_corners: bool = cast(bool, flags['align_corners'].item())\n\n out_data = warp_perspective3d(\n input, transform, (depth, height, width), flags=resample_name, align_corners=align_corners\n )\n\n return out_data.view_as(input)\n\n\n@_deprecation_wrapper\n@_validate_input3d\ndef compute_perspective_transformation3d(input: torch.Tensor, params: Dict[str, torch.Tensor]) -> torch.Tensor:\n r\"\"\"Compute the perspective transformation matrix :math: `(*, 4, 4)`.\n\n Args:\n input (torch.Tensor): Tensor to be transformed with shape :math:`(*, C, D, H, W)`.\n params (Dict[str, torch.Tensor]):\n - params['start_points']: Tensor containing [top-left, top-right, bottom-right,\n bottom-left] of the orignal image with shape Bx8x3.\n - params['end_points']: Tensor containing [top-left, top-right, bottom-right,\n bottom-left] of the transformed image with shape Bx8x3.\n\n Returns:\n torch.Tensor: The perspective transformation matrix :math: `(*, 4, 4)`\n \"\"\"\n perspective_transform: torch.Tensor = get_perspective_transform3d(params['start_points'], params['end_points']).to(\n input\n )\n\n transform: torch.Tensor = K.eye_like(4, input)\n\n transform = perspective_transform\n\n return transform\n\n\n@_deprecation_wrapper\n@_validate_input3d\ndef apply_equalize3d(input: torch.Tensor, params: Dict[str, torch.Tensor]) -> torch.Tensor:\n r\"\"\"Equalize a tensor volume or a batch of tensors volumes with given random parameters.\n\n Args:\n input (torch.Tensor): Tensor to be transformed with shape :math:`(*, C, D, H, W)`.\n params (Dict[str, torch.Tensor]): shall be empty.\n\n Returns:\n torch.Tensor: Equalized input with shape :math:`(*, C, D, H, W)`.\n \"\"\"\n\n return equalize3d(input)\n" ]
[ [ "torch.eye", "torch.tensor", "torch.flip" ] ]
christophcc/xarray
[ "132733a917171fcb1f269406eb9e6668cbb7e376" ]
[ "xarray/coding/variables.py" ]
[ "\"\"\"Coders for individual Variable objects.\"\"\"\nimport warnings\nfrom functools import partial\nfrom typing import Any, Hashable\n\nimport numpy as np\nimport pandas as pd\n\nfrom ..core import dtypes, duck_array_ops, indexing\nfrom ..core.pycompat import dask_array_type\nfrom ..core.utils import equivalent\nfrom ..core.variable import Variable\n\n\nclass SerializationWarning(RuntimeWarning):\n \"\"\"Warnings about encoding/decoding issues in serialization.\"\"\"\n\n\nclass VariableCoder:\n \"\"\"Base class for encoding and decoding transformations on variables.\n\n We use coders for transforming variables between xarray's data model and\n a format suitable for serialization. For example, coders apply CF\n conventions for how data should be represented in netCDF files.\n\n Subclasses should implement encode() and decode(), which should satisfy\n the identity ``coder.decode(coder.encode(variable)) == variable``. If any\n options are necessary, they should be implemented as arguments to the\n __init__ method.\n\n The optional name argument to encode() and decode() exists solely for the\n sake of better error messages, and should correspond to the name of\n variables in the underlying store.\n \"\"\"\n\n def encode(\n self, variable: Variable, name: Hashable = None\n ) -> Variable: # pragma: no cover\n \"\"\"Convert an encoded variable to a decoded variable\n \"\"\"\n raise NotImplementedError()\n\n def decode(\n self, variable: Variable, name: Hashable = None\n ) -> Variable: # pragma: no cover\n \"\"\"Convert an decoded variable to a encoded variable\n \"\"\"\n raise NotImplementedError()\n\n\nclass _ElementwiseFunctionArray(indexing.ExplicitlyIndexedNDArrayMixin):\n \"\"\"Lazily computed array holding values of elemwise-function.\n\n Do not construct this object directly: call lazy_elemwise_func instead.\n\n Values are computed upon indexing or coercion to a NumPy array.\n \"\"\"\n\n def __init__(self, array, func, dtype):\n assert not isinstance(array, dask_array_type)\n self.array = indexing.as_indexable(array)\n self.func = func\n self._dtype = dtype\n\n @property\n def dtype(self):\n return np.dtype(self._dtype)\n\n def __getitem__(self, key):\n return type(self)(self.array[key], self.func, self.dtype)\n\n def __array__(self, dtype=None):\n return self.func(self.array)\n\n def __repr__(self):\n return \"%s(%r, func=%r, dtype=%r)\" % (\n type(self).__name__,\n self.array,\n self.func,\n self.dtype,\n )\n\n\ndef lazy_elemwise_func(array, func, dtype):\n \"\"\"Lazily apply an element-wise function to an array.\n\n Parameters\n ----------\n array : any valid value of Variable._data\n func : callable\n Function to apply to indexed slices of an array. For use with dask,\n this should be a pickle-able object.\n dtype : coercible to np.dtype\n Dtype for the result of this function.\n\n Returns\n -------\n Either a dask.array.Array or _ElementwiseFunctionArray.\n \"\"\"\n if isinstance(array, dask_array_type):\n return array.map_blocks(func, dtype=dtype)\n else:\n return _ElementwiseFunctionArray(array, func, dtype)\n\n\ndef unpack_for_encoding(var):\n return var.dims, var.data, var.attrs.copy(), var.encoding.copy()\n\n\ndef unpack_for_decoding(var):\n return var.dims, var._data, var.attrs.copy(), var.encoding.copy()\n\n\ndef safe_setitem(dest, key, value, name=None):\n if key in dest:\n var_str = \" on variable {!r}\".format(name) if name else \"\"\n raise ValueError(\n \"failed to prevent overwriting existing key {} in attrs{}. \"\n \"This is probably an encoding field used by xarray to describe \"\n \"how a variable is serialized. To proceed, remove this key from \"\n \"the variable's attributes manually.\".format(key, var_str)\n )\n dest[key] = value\n\n\ndef pop_to(source, dest, key, name=None):\n \"\"\"\n A convenience function which pops a key k from source to dest.\n None values are not passed on. If k already exists in dest an\n error is raised.\n \"\"\"\n value = source.pop(key, None)\n if value is not None:\n safe_setitem(dest, key, value, name=name)\n return value\n\n\ndef _apply_mask(\n data: np.ndarray, encoded_fill_values: list, decoded_fill_value: Any, dtype: Any\n) -> np.ndarray:\n \"\"\"Mask all matching values in a NumPy arrays.\"\"\"\n data = np.asarray(data, dtype=dtype)\n condition = False\n for fv in encoded_fill_values:\n condition |= data == fv\n return np.where(condition, decoded_fill_value, data)\n\n\nclass CFMaskCoder(VariableCoder):\n \"\"\"Mask or unmask fill values according to CF conventions.\"\"\"\n\n def encode(self, variable, name=None):\n dims, data, attrs, encoding = unpack_for_encoding(variable)\n\n fv = encoding.get(\"_FillValue\")\n mv = encoding.get(\"missing_value\")\n\n if fv is not None and mv is not None and not equivalent(fv, mv):\n raise ValueError(\n \"Variable {!r} has multiple fill values {}. \"\n \"Cannot encode data. \".format(name, [fv, mv])\n )\n\n if fv is not None:\n fill_value = pop_to(encoding, attrs, \"_FillValue\", name=name)\n if not pd.isnull(fill_value):\n data = duck_array_ops.fillna(data, fill_value)\n\n if mv is not None:\n fill_value = pop_to(encoding, attrs, \"missing_value\", name=name)\n if not pd.isnull(fill_value) and fv is None:\n data = duck_array_ops.fillna(data, fill_value)\n\n return Variable(dims, data, attrs, encoding)\n\n def decode(self, variable, name=None):\n dims, data, attrs, encoding = unpack_for_decoding(variable)\n\n raw_fill_values = [\n pop_to(attrs, encoding, attr, name=name)\n for attr in (\"missing_value\", \"_FillValue\")\n ]\n if raw_fill_values:\n encoded_fill_values = {\n fv\n for option in raw_fill_values\n for fv in np.ravel(option)\n if not pd.isnull(fv)\n }\n\n if len(encoded_fill_values) > 1:\n warnings.warn(\n \"variable {!r} has multiple fill values {}, \"\n \"decoding all values to NaN.\".format(name, encoded_fill_values),\n SerializationWarning,\n stacklevel=3,\n )\n\n dtype, decoded_fill_value = dtypes.maybe_promote(data.dtype)\n\n if encoded_fill_values:\n transform = partial(\n _apply_mask,\n encoded_fill_values=encoded_fill_values,\n decoded_fill_value=decoded_fill_value,\n dtype=dtype,\n )\n data = lazy_elemwise_func(data, transform, dtype)\n\n return Variable(dims, data, attrs, encoding)\n\n\ndef _scale_offset_decoding(data, scale_factor, add_offset, dtype):\n data = np.array(data, dtype=dtype, copy=True)\n if scale_factor is not None:\n data *= scale_factor\n if add_offset is not None:\n data += add_offset\n return data\n\n\ndef _choose_float_dtype(dtype, has_offset):\n \"\"\"Return a float dtype that can losslessly represent `dtype` values.\"\"\"\n # Keep float32 as-is. Upcast half-precision to single-precision,\n # because float16 is \"intended for storage but not computation\"\n if dtype.itemsize <= 4 and np.issubdtype(dtype, np.floating):\n return np.float32\n # float32 can exactly represent all integers up to 24 bits\n if dtype.itemsize <= 2 and np.issubdtype(dtype, np.integer):\n # A scale factor is entirely safe (vanishing into the mantissa),\n # but a large integer offset could lead to loss of precision.\n # Sensitivity analysis can be tricky, so we just use a float64\n # if there's any offset at all - better unoptimised than wrong!\n if not has_offset:\n return np.float32\n # For all other types and circumstances, we just use float64.\n # (safe because eg. complex numbers are not supported in NetCDF)\n return np.float64\n\n\nclass CFScaleOffsetCoder(VariableCoder):\n \"\"\"Scale and offset variables according to CF conventions.\n\n Follows the formula:\n decode_values = encoded_values * scale_factor + add_offset\n \"\"\"\n\n def encode(self, variable, name=None):\n dims, data, attrs, encoding = unpack_for_encoding(variable)\n\n if \"scale_factor\" in encoding or \"add_offset\" in encoding:\n dtype = _choose_float_dtype(data.dtype, \"add_offset\" in encoding)\n data = data.astype(dtype=dtype, copy=True)\n if \"add_offset\" in encoding:\n data -= pop_to(encoding, attrs, \"add_offset\", name=name)\n if \"scale_factor\" in encoding:\n data /= pop_to(encoding, attrs, \"scale_factor\", name=name)\n\n return Variable(dims, data, attrs, encoding)\n\n def decode(self, variable, name=None):\n dims, data, attrs, encoding = unpack_for_decoding(variable)\n\n if \"scale_factor\" in attrs or \"add_offset\" in attrs:\n scale_factor = pop_to(attrs, encoding, \"scale_factor\", name=name)\n add_offset = pop_to(attrs, encoding, \"add_offset\", name=name)\n dtype = _choose_float_dtype(data.dtype, \"add_offset\" in attrs)\n transform = partial(\n _scale_offset_decoding,\n scale_factor=scale_factor,\n add_offset=add_offset,\n dtype=dtype,\n )\n data = lazy_elemwise_func(data, transform, dtype)\n\n return Variable(dims, data, attrs, encoding)\n\n\nclass UnsignedIntegerCoder(VariableCoder):\n def encode(self, variable, name=None):\n dims, data, attrs, encoding = unpack_for_encoding(variable)\n\n # from netCDF best practices\n # https://www.unidata.ucar.edu/software/netcdf/docs/BestPractices.html\n # \"_Unsigned = \"true\" to indicate that\n # integer data should be treated as unsigned\"\n if encoding.get(\"_Unsigned\", \"false\") == \"true\":\n pop_to(encoding, attrs, \"_Unsigned\")\n signed_dtype = np.dtype(\"i%s\" % data.dtype.itemsize)\n if \"_FillValue\" in attrs:\n new_fill = signed_dtype.type(attrs[\"_FillValue\"])\n attrs[\"_FillValue\"] = new_fill\n data = duck_array_ops.around(data).astype(signed_dtype)\n\n return Variable(dims, data, attrs, encoding)\n\n def decode(self, variable, name=None):\n dims, data, attrs, encoding = unpack_for_decoding(variable)\n\n if \"_Unsigned\" in attrs:\n unsigned = pop_to(attrs, encoding, \"_Unsigned\")\n\n if data.dtype.kind == \"i\":\n if unsigned == \"true\":\n unsigned_dtype = np.dtype(\"u%s\" % data.dtype.itemsize)\n transform = partial(np.asarray, dtype=unsigned_dtype)\n data = lazy_elemwise_func(data, transform, unsigned_dtype)\n if \"_FillValue\" in attrs:\n new_fill = unsigned_dtype.type(attrs[\"_FillValue\"])\n attrs[\"_FillValue\"] = new_fill\n else:\n warnings.warn(\n \"variable %r has _Unsigned attribute but is not \"\n \"of integer type. Ignoring attribute.\" % name,\n SerializationWarning,\n stacklevel=3,\n )\n\n return Variable(dims, data, attrs, encoding)\n" ]
[ [ "numpy.dtype", "numpy.issubdtype", "numpy.asarray", "numpy.ravel", "numpy.array", "pandas.isnull", "numpy.where" ] ]
dixit-dude7/LDAM-DRW
[ "6366f4756d3ac0c6b6db784b7f20e16066967ed4" ]
[ "utils.py" ]
[ "import torch\r\nimport shutil\r\nimport os\r\nimport numpy as np\r\nimport matplotlib\r\nmatplotlib.use('Agg')\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.metrics import confusion_matrix\r\nfrom sklearn.utils.multiclass import unique_labels\r\n\r\nclass ImbalancedDatasetSampler(torch.utils.data.sampler.Sampler):\r\n\r\n def __init__(self, dataset, indices=None, num_samples=None):\r\n \r\n # if indices is not provided, \r\n # all elements in the dataset will be considered\r\n self.indices = list(range(len(dataset))) \\\r\n if indices is None else indices\r\n \r\n # if num_samples is not provided, \r\n # draw `len(indices)` samples in each iteration\r\n self.num_samples = len(self.indices) \\\r\n if num_samples is None else num_samples\r\n \r\n # distribution of classes in the dataset \r\n label_to_count = [0] * len(np.unique(dataset.targets))\r\n for idx in self.indices:\r\n label = self._get_label(dataset, idx)\r\n label_to_count[label] += 1\r\n \r\n beta = 0.9999\r\n effective_num = 1.0 - np.power(beta, label_to_count)\r\n per_cls_weights = (1.0 - beta) / np.array(effective_num)\r\n\r\n # weight for each sample\r\n weights = [per_cls_weights[self._get_label(dataset, idx)]\r\n for idx in self.indices]\r\n self.weights = torch.DoubleTensor(weights)\r\n \r\n def _get_label(self, dataset, idx):\r\n return dataset.targets[idx]\r\n \r\n def __iter__(self):\r\n return iter(torch.multinomial(self.weights, self.num_samples, replacement=True).tolist())\r\n\r\n def __len__(self):\r\n return self.num_samples\r\n\r\ndef calc_confusion_mat(val_loader, model, args):\r\n \r\n model.eval()\r\n all_preds = []\r\n all_targets = []\r\n with torch.no_grad():\r\n for i, (input, target) in enumerate(val_loader):\r\n if args.gpu is not None:\r\n input = input.cuda(args.gpu, non_blocking=True)\r\n target = target.cuda(args.gpu, non_blocking=True)\r\n\r\n # compute output\r\n output = model(input)\r\n _, pred = torch.max(output, 1)\r\n all_preds.extend(pred.cpu().numpy())\r\n all_targets.extend(target.cpu().numpy())\r\n cf = confusion_matrix(all_targets, all_preds).astype(float)\r\n\r\n cls_cnt = cf.sum(axis=1)\r\n cls_hit = np.diag(cf)\r\n\r\n cls_acc = cls_hit / cls_cnt\r\n\r\n print('Class Accuracy : ')\r\n print(cls_acc)\r\n classes = [str(x) for x in args.cls_num_list]\r\n plot_confusion_matrix(all_targets, all_preds, classes)\r\n plt.savefig(os.path.join(args.root_log, args.store_name, 'confusion_matrix.png'))\r\n\r\ndef plot_confusion_matrix(y_true, y_pred, classes,\r\n normalize=False,\r\n title=None,\r\n cmap=plt.cm.Blues):\r\n \r\n if not title:\r\n if normalize:\r\n title = 'Normalized confusion matrix'\r\n else:\r\n title = 'Confusion matrix, without normalization'\r\n\r\n # Compute confusion matrix\r\n cm = confusion_matrix(y_true, y_pred)\r\n \r\n fig, ax = plt.subplots()\r\n im = ax.imshow(cm, interpolation='nearest', cmap=cmap)\r\n ax.figure.colorbar(im, ax=ax)\r\n # We want to show all ticks...\r\n ax.set(xticks=np.arange(cm.shape[1]),\r\n yticks=np.arange(cm.shape[0]),\r\n # ... and label them with the respective list entries\r\n xticklabels=classes, yticklabels=classes,\r\n title=title,\r\n ylabel='True label',\r\n xlabel='Predicted label')\r\n\r\n # Rotate the tick labels and set their alignment.\r\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\r\n rotation_mode=\"anchor\")\r\n\r\n # Loop over data dimensions and create text annotations.\r\n fmt = '.2f' if normalize else 'd'\r\n thresh = cm.max() / 2.\r\n for i in range(cm.shape[0]):\r\n for j in range(cm.shape[1]):\r\n ax.text(j, i, format(cm[i, j], fmt),\r\n ha=\"center\", va=\"center\",\r\n color=\"white\" if cm[i, j] > thresh else \"black\")\r\n fig.tight_layout()\r\n return ax\r\n\r\ndef prepare_folders(args):\r\n \r\n folders_util = [args.root_log, args.root_model,\r\n os.path.join(args.root_log, args.store_name),\r\n os.path.join(args.root_model, args.store_name)]\r\n for folder in folders_util:\r\n if not os.path.exists(folder):\r\n print('creating folder ' + folder)\r\n os.mkdir(folder)\r\n\r\ndef save_checkpoint(args, state, is_best):\r\n \r\n filename = '%s/%s/ckpt.pth.tar' % (args.root_model, args.store_name)\r\n torch.save(state, filename)\r\n if is_best:\r\n shutil.copyfile(filename, filename.replace('pth.tar', 'best.pth.tar'))\r\n\r\n\r\nclass AverageMeter(object):\r\n \r\n def __init__(self, name, fmt=':f'):\r\n self.name = name\r\n self.fmt = fmt\r\n self.reset()\r\n\r\n def reset(self):\r\n self.val = 0\r\n self.avg = 0\r\n self.sum = 0\r\n self.count = 0\r\n\r\n def update(self, val, n=1):\r\n self.val = val\r\n self.sum += val * n\r\n self.count += n\r\n self.avg = self.sum / self.count\r\n\r\n def __str__(self):\r\n fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'\r\n return fmtstr.format(**self.__dict__)\r\n\r\n\r\ndef accuracy(output, target, topk=(1,)):\r\n \r\n with torch.no_grad():\r\n maxk = max(topk)\r\n batch_size = target.size(0)\r\n\r\n _, pred = output.topk(maxk, 1, True, True)\r\n pred = pred.t()\r\n correct = pred.eq(target.view(1, -1).expand_as(pred))\r\n\r\n res = []\r\n for k in topk:\r\n correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)\r\n res.append(correct_k.mul_(100.0 / batch_size))\r\n return res" ]
[ [ "numpy.diag", "torch.save", "torch.no_grad", "torch.multinomial", "matplotlib.pyplot.subplots", "sklearn.metrics.confusion_matrix", "torch.DoubleTensor", "numpy.arange", "numpy.power", "torch.max", "matplotlib.use", "numpy.array", "numpy.unique" ] ]
mzhaoshuai/RMI
[ "10a40cdbeb58bdd1bd7125fde73b48b12f9452c7" ]
[ "losses/normal_loss.py" ]
[ "#coding=utf-8\n\n\"\"\"\nImplementation of some commonly used losses.\n\"\"\"\n\n# python 2.X, 3.X compatibility\nfrom __future__ import print_function\nfrom __future__ import division\nfrom __future__ import absolute_import\n\n#import os\n#import numpy as np\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass BCECrossEntropyLoss(nn.Module):\n\t\"\"\"\n\tsigmoid with binary cross entropy loss.\n\tconsider the multiclass task as multi binary classification problem.\n\tone-vs-rest way.\n\tSUM over the channel.\n\t\"\"\"\n\tdef __init__(self,\n\t\t\t\t\tnum_classes=21,\n\t\t\t\t\tignore_index=255):\n\t\tsuper(BCECrossEntropyLoss, self).__init__()\n\t\tself.num_classes = num_classes\n\t\tself.ignore_index = ignore_index\n\n\tdef forward(self, logits_4D, labels_4D):\n\t\t\"\"\"\n\t\tArgs:\n\t\t\tlogits_4D \t:\t[N, C, H, W], dtype=float32\n\t\t\tlabels_4D \t:\t[N, H, W], dtype=long\n\t\t\"\"\"\n\t\tlabel_flat = labels_4D.view(-1).requires_grad_(False)\n\t\tlabel_mask_flat = label_flat < self.num_classes\n\t\tonehot_label_flat = F.one_hot(label_flat * label_mask_flat.long(), num_classes=self.num_classes).float()\n\t\tonehot_label_flat = onehot_label_flat.requires_grad_(False)\n\t\tlogits_flat = logits_4D.permute(0, 2, 3, 1).contiguous().view([-1, self.num_classes])\n\n\t\t# binary loss, multiplied by the not_ignore_mask\n\t\tlabel_mask_flat = label_mask_flat.float()\n\t\tvalid_pixels = torch.sum(label_mask_flat)\n\t\tbinary_loss = F.binary_cross_entropy_with_logits(logits_flat,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\ttarget=onehot_label_flat,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tweight=label_mask_flat.unsqueeze(dim=1),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\treduction='sum')\n\t\tbce_loss = torch.div(binary_loss, valid_pixels + 1.0)\n\t\treturn bce_loss\n" ]
[ [ "torch.sum", "torch.div" ] ]
Learning-and-Intelligent-Systems/predicators
[ "0b2e71cacf86ba2bfdc1d9059c3a78016d0a4d7e" ]
[ "src/envs/painting.py" ]
[ "\"\"\"Painting domain, which allows for two different grasps on an object (side or\ntop).\n\nSide grasping allows for placing into the shelf, and top grasping allows\nfor placing into the box. The box has a lid which may need to be opened;\nthis lid is NOT modeled by any of the given predicates.\n\"\"\"\n\nfrom typing import Any, ClassVar, Dict, List, Optional, Sequence, Set, Tuple, \\\n Union\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom gym.spaces import Box\nfrom matplotlib import patches\n\nfrom predicators.src import utils\nfrom predicators.src.envs import BaseEnv\nfrom predicators.src.settings import CFG\nfrom predicators.src.structs import Action, Array, GroundAtom, Object, \\\n ParameterizedOption, Predicate, State, Task, Type\n\n\nclass PaintingEnv(BaseEnv):\n \"\"\"Painting domain.\"\"\"\n # Parameters that aren't important enough to need to clog up settings.py\n table_lb: ClassVar[float] = -10.1\n table_ub: ClassVar[float] = -1.0\n table_height: ClassVar[float] = 0.2\n table_x: ClassVar[float] = 1.65\n shelf_l: ClassVar[float] = 2.0 # shelf length\n shelf_lb: ClassVar[float] = 1.\n shelf_ub: ClassVar[float] = shelf_lb + shelf_l - 0.05\n shelf_x: ClassVar[float] = 1.65\n shelf_y: ClassVar[float] = (shelf_lb + shelf_ub) / 2.0\n box_s: ClassVar[float] = 0.8 # side length\n box_y: ClassVar[float] = 0.5 # y coordinate\n box_lb: ClassVar[float] = box_y - box_s / 10\n box_ub: ClassVar[float] = box_y + box_s / 10\n box_x: ClassVar[float] = 1.65\n env_lb: ClassVar[float] = min(table_lb, shelf_lb, box_lb)\n env_ub: ClassVar[float] = max(table_ub, shelf_ub, box_ub)\n obj_height: ClassVar[float] = 0.13\n obj_radius: ClassVar[float] = 0.03\n obj_x: ClassVar[float] = 1.65\n obj_z: ClassVar[float] = table_height + obj_height / 2\n pick_tol: ClassVar[float] = 1e-2\n color_tol: ClassVar[float] = 1e-2\n wetness_tol: ClassVar[float] = 0.5\n dirtiness_tol: ClassVar[float] = 0.5\n open_fingers: ClassVar[float] = 0.8\n top_grasp_thresh: ClassVar[float] = 0.5 + 1e-2\n side_grasp_thresh: ClassVar[float] = 0.5 - 1e-2\n robot_x: ClassVar[float] = table_x - 0.5\n nextto_thresh: ClassVar[float] = 1.0\n on_table_height_tol: ClassVar[float] = 5e-02\n\n def __init__(self) -> None:\n super().__init__()\n # Types\n self._obj_type = Type(\"obj\", [\n \"pose_x\", \"pose_y\", \"pose_z\", \"dirtiness\", \"wetness\", \"color\",\n \"grasp\", \"held\"\n ])\n self._box_type = Type(\"box\", [\"pose_x\", \"pose_y\", \"color\"])\n self._lid_type = Type(\"lid\", [\"is_open\"])\n self._shelf_type = Type(\"shelf\", [\"pose_x\", \"pose_y\", \"color\"])\n self._robot_type = Type(\"robot\", [\"pose_x\", \"pose_y\", \"fingers\"])\n # Predicates\n self._InBox = Predicate(\"InBox\", [self._obj_type, self._box_type],\n self._InBox_holds)\n self._InShelf = Predicate(\"InShelf\",\n [self._obj_type, self._shelf_type],\n self._InShelf_holds)\n self._IsBoxColor = Predicate(\"IsBoxColor\",\n [self._obj_type, self._box_type],\n self._IsBoxColor_holds)\n self._IsShelfColor = Predicate(\"IsShelfColor\",\n [self._obj_type, self._shelf_type],\n self._IsShelfColor_holds)\n self._GripperOpen = Predicate(\"GripperOpen\", [self._robot_type],\n self._GripperOpen_holds)\n self._OnTable = Predicate(\"OnTable\", [self._obj_type],\n self._OnTable_holds)\n self._NotOnTable = Predicate(\"NotOnTable\", [self._obj_type],\n self._NotOnTable_holds)\n self._HoldingTop = Predicate(\"HoldingTop\", [self._obj_type],\n self._HoldingTop_holds)\n self._HoldingSide = Predicate(\"HoldingSide\", [self._obj_type],\n self._HoldingSide_holds)\n self._Holding = Predicate(\"Holding\", [self._obj_type],\n self._Holding_holds)\n self._IsWet = Predicate(\"IsWet\", [self._obj_type], self._IsWet_holds)\n self._IsDry = Predicate(\"IsDry\", [self._obj_type], self._IsDry_holds)\n self._IsDirty = Predicate(\"IsDirty\", [self._obj_type],\n self._IsDirty_holds)\n self._IsClean = Predicate(\"IsClean\", [self._obj_type],\n self._IsClean_holds)\n # Options\n self._Pick = utils.SingletonParameterizedOption(\n # variables: [robot, object to pick]\n # params: [grasp]\n \"Pick\",\n self._Pick_policy,\n types=[self._robot_type, self._obj_type],\n params_space=Box(np.array([-0.01], dtype=np.float32),\n np.array([1.01], dtype=np.float32)))\n self._Wash = utils.SingletonParameterizedOption(\n # variables: [robot]\n # params: []\n \"Wash\",\n self._Wash_policy,\n types=[self._robot_type])\n self._Dry = utils.SingletonParameterizedOption(\n # variables: [robot]\n # params: []\n \"Dry\",\n self._Dry_policy,\n types=[self._robot_type])\n self._Paint = utils.SingletonParameterizedOption(\n # variables: [robot]\n # params: [new color]\n \"Paint\",\n self._Paint_policy,\n types=[self._robot_type],\n params_space=Box(-0.01, 1.01, (1, )))\n self._Place = utils.SingletonParameterizedOption(\n # variables: [robot]\n # params: [absolute x, absolute y, absolute z]\n \"Place\",\n self._Place_policy,\n types=[self._robot_type],\n params_space=Box(\n np.array([self.obj_x - 1e-2, self.env_lb, self.obj_z - 1e-2],\n dtype=np.float32),\n np.array([self.obj_x + 1e-2, self.env_ub, self.obj_z + 1e-2],\n dtype=np.float32)))\n self._OpenLid = utils.SingletonParameterizedOption(\n # variables: [robot, lid]\n # params: []\n \"OpenLid\",\n self._OpenLid_policy,\n types=[self._robot_type, self._lid_type])\n # Static objects (always exist no matter the settings).\n self._box = Object(\"receptacle_box\", self._box_type)\n self._lid = Object(\"box_lid\", self._lid_type)\n self._shelf = Object(\"receptacle_shelf\", self._shelf_type)\n self._robot = Object(\"robby\", self._robot_type)\n\n @classmethod\n def get_name(cls) -> str:\n return \"painting\"\n\n def simulate(self, state: State, action: Action) -> State:\n assert self.action_space.contains(action.arr)\n arr = action.arr\n # Infer which transition function to follow\n wash_affinity = 0 if arr[5] > 0.5 else abs(arr[5] - 0.5)\n dry_affinity = 0 if arr[6] > 0.5 else abs(arr[6] - 0.5)\n paint_affinity = min(abs(arr[7] - state.get(self._box, \"color\")),\n abs(arr[7] - state.get(self._shelf, \"color\")))\n affinities = [\n (abs(1 - arr[4]), self._transition_pick_or_openlid),\n (wash_affinity, self._transition_wash),\n (dry_affinity, self._transition_dry),\n (paint_affinity, self._transition_paint),\n (abs(-1 - arr[4]), self._transition_place),\n ]\n _, transition_fn = min(affinities, key=lambda item: item[0])\n return transition_fn(state, action)\n\n def _transition_pick_or_openlid(self, state: State,\n action: Action) -> State:\n x, y, z, grasp = action.arr[:4]\n next_state = state.copy()\n # Open lid\n if self.box_lb < y < self.box_ub:\n next_state.set(self._lid, \"is_open\", 1.0)\n return next_state\n held_obj = self._get_held_object(state)\n # Cannot pick if already holding something\n if held_obj is not None:\n return next_state\n # Cannot pick if object pose not on table\n if not self.table_lb < y < self.table_ub:\n return next_state\n # Cannot pick if grasp is invalid\n if self.side_grasp_thresh < grasp < self.top_grasp_thresh:\n return next_state\n # Check if some object is close enough to (x, y, z)\n target_obj = self._get_object_at_xyz(state, x, y, z)\n if target_obj is None:\n return next_state\n # Execute pick\n next_state.set(self._robot, \"fingers\", 0.0)\n next_state.set(target_obj, \"grasp\", grasp)\n next_state.set(target_obj, \"held\", 1.0)\n return next_state\n\n def _transition_wash(self, state: State, action: Action) -> State:\n target_wetness = action.arr[5]\n next_state = state.copy()\n held_obj = self._get_held_object(state)\n # Can only wash if holding obj\n if held_obj is None:\n return next_state\n # Execute wash\n cur_dirtiness = state.get(held_obj, \"dirtiness\")\n next_dirtiness = max(cur_dirtiness - target_wetness, 0.0)\n next_state.set(held_obj, \"wetness\", target_wetness)\n next_state.set(held_obj, \"dirtiness\", next_dirtiness)\n return next_state\n\n def _transition_dry(self, state: State, action: Action) -> State:\n target_wetness = max(1.0 - action.arr[6], 0.0)\n next_state = state.copy()\n held_obj = self._get_held_object(state)\n # Can only dry if holding obj\n if held_obj is None:\n return next_state\n # Execute dry\n next_state.set(held_obj, \"wetness\", target_wetness)\n return next_state\n\n def _transition_paint(self, state: State, action: Action) -> State:\n color = action.arr[7]\n next_state = state.copy()\n # Can only paint if holding obj\n held_obj = self._get_held_object(state)\n if held_obj is None:\n return next_state\n # Can only paint if dry and clean\n if state.get(held_obj, \"dirtiness\") > self.dirtiness_tol or \\\n state.get(held_obj, \"wetness\") > self.wetness_tol:\n return next_state\n # Execute paint\n next_state.set(held_obj, \"color\", color)\n return next_state\n\n def _transition_place(self, state: State, action: Action) -> State:\n # Action args are target pose for held obj\n x, y, z = action.arr[:3]\n next_state = state.copy()\n # Can only place if holding obj\n held_obj = self._get_held_object(state)\n if held_obj is None:\n return next_state\n # Detect table vs shelf vs box place\n if self.table_lb < y < self.table_ub:\n receptacle = \"table\"\n elif self.shelf_lb < y < self.shelf_ub:\n receptacle = \"shelf\"\n elif self.box_lb < y < self.box_ub:\n receptacle = \"box\"\n else:\n # Cannot place outside of table, shelf, or box\n return next_state\n if receptacle == \"box\" and state.get(self._lid, \"is_open\") < 0.5:\n # Cannot place in box if lid is not open\n raise utils.EnvironmentFailure(\"Box lid is closed.\",\n {\"offending_objects\": {self._lid}})\n # Detect top grasp vs side grasp\n grasp = state.get(held_obj, \"grasp\")\n if grasp > self.top_grasp_thresh:\n top_or_side = \"top\"\n elif grasp < self.side_grasp_thresh:\n top_or_side = \"side\"\n # Can only place in shelf if side grasping, box if top grasping. If the\n # receptacle is table, we don't care what kind of grasp it is.\n if receptacle == \"shelf\" and top_or_side != \"side\":\n return next_state\n if receptacle == \"box\" and top_or_side != \"top\":\n return next_state\n # Detect collisions\n collider = self._get_object_at_xyz(state, x, y, z)\n if receptacle == \"table\" and \\\n collider is not None and \\\n collider != held_obj:\n return next_state\n # Execute place\n next_state.set(self._robot, \"fingers\", 1.0)\n next_state.set(held_obj, \"pose_x\", x)\n next_state.set(held_obj, \"pose_y\", y)\n if self._update_z_poses:\n if receptacle == \"table\" and np.allclose(\n z,\n self.table_height + self.obj_height / 2,\n rtol=self.on_table_height_tol):\n # If placing on table, snap the object to the correct z\n # position as long as the place location is close enough\n # (measured by rtol) to the correct table height. This\n # is necessary for learned samplers to have any hope of\n # placing objects on the table.\n next_state.set(held_obj, \"pose_z\",\n self.table_height + self.obj_height / 2)\n else:\n next_state.set(held_obj, \"pose_z\", z)\n next_state.set(held_obj, \"grasp\", 0.5)\n next_state.set(held_obj, \"held\", 0.0)\n return next_state\n\n @property\n def _num_objects_train(self) -> List[int]:\n return CFG.painting_num_objs_train\n\n @property\n def _num_objects_test(self) -> List[int]:\n return CFG.painting_num_objs_test\n\n def _generate_train_tasks(self) -> List[Task]:\n return self._get_tasks(num_tasks=CFG.num_train_tasks,\n num_objs_lst=self._num_objects_train,\n rng=self._train_rng)\n\n def _generate_test_tasks(self) -> List[Task]:\n return self._get_tasks(num_tasks=CFG.num_test_tasks,\n num_objs_lst=self._num_objects_test,\n rng=self._test_rng)\n\n @property\n def predicates(self) -> Set[Predicate]:\n return {\n self._InBox, self._InShelf, self._IsBoxColor, self._IsShelfColor,\n self._GripperOpen, self._OnTable, self._NotOnTable,\n self._HoldingTop, self._HoldingSide, self._Holding, self._IsWet,\n self._IsDry, self._IsDirty, self._IsClean\n }\n\n @property\n def goal_predicates(self) -> Set[Predicate]:\n return {\n self._InBox, self._InShelf, self._IsBoxColor, self._IsShelfColor\n }\n\n @property\n def types(self) -> Set[Type]:\n return {\n self._obj_type, self._box_type, self._lid_type, self._shelf_type,\n self._robot_type\n }\n\n @property\n def options(self) -> Set[ParameterizedOption]:\n return {\n self._Pick, self._Wash, self._Dry, self._Paint, self._Place,\n self._OpenLid\n }\n\n @property\n def action_space(self) -> Box:\n # Actions are 8-dimensional vectors:\n # [x, y, z, grasp, pickplace, water level, heat level, color]\n # Note that pickplace is 1 for pick, -1 for place, and 0 otherwise,\n # while grasp, water level, heat level, and color are in [0, 1].\n # We set the lower bound for z to 0.0, rather than self.obj_z - 1e-2,\n # because in RepeatedNextToPainting, we use this dimension to check\n # affinity of the move action\n lowers = np.array(\n [self.obj_x - 1e-2, self.env_lb, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0],\n dtype=np.float32)\n uppers = np.array([\n self.obj_x + 1e-2, self.env_ub, self.obj_z + 1e-2, 1.0, 1.0, 1.0,\n 1.0, 1.0\n ],\n dtype=np.float32)\n return Box(lowers, uppers)\n\n def render_state_plt(\n self,\n state: State,\n task: Task,\n action: Optional[Action] = None,\n caption: Optional[str] = None) -> matplotlib.figure.Figure:\n fig, ax = plt.subplots(1, 1)\n objs = [o for o in state if o.is_instance(self._obj_type)]\n denom = (self.env_ub - self.env_lb)\n # The factor of \"2\" here should actually be 0.5, but this\n # makes the objects too small, so we'll let it be bigger.\n # Don't be alarmed if objects seem to be intersecting in\n # the resulting videos.\n r = 2 * self.obj_radius / denom\n h = 2 * self.obj_height / denom\n z = (self.obj_z - self.env_lb) / denom\n # Draw box\n box_color = state.get(self._box, \"color\")\n box_lower = (self.box_lb - self.obj_radius - self.env_lb) / denom\n box_upper = (self.box_ub + self.obj_radius - self.env_lb) / denom\n rect = plt.Rectangle((box_lower, z - h),\n box_upper - box_lower,\n 2 * h,\n facecolor=[box_color, 0, 0],\n alpha=0.25)\n ax.add_patch(rect)\n # Draw box lid\n if state.get(self._lid, \"is_open\") < 0.5:\n plt.plot([box_lower, box_upper], [z + h, z + h],\n color=[box_color, 0, 0])\n # Draw shelf\n shelf_color = state.get(self._shelf, \"color\")\n shelf_lower = (self.shelf_lb - self.obj_radius - self.env_lb) / denom\n shelf_upper = (self.shelf_ub + self.obj_radius - self.env_lb) / denom\n rect = plt.Rectangle((shelf_lower, z - h),\n shelf_upper - shelf_lower,\n 2 * h,\n facecolor=[shelf_color, 0, 0],\n alpha=0.25)\n ax.add_patch(rect)\n # Draw objects\n held_obj = self._get_held_object(state)\n for obj in sorted(objs):\n x = state.get(obj, \"pose_x\")\n y = state.get(obj, \"pose_y\")\n z = state.get(obj, \"pose_z\")\n facecolor: Union[None, str, List[Any]] = None\n if state.get(obj, \"wetness\") > self.wetness_tol and \\\n state.get(obj, \"dirtiness\") < self.dirtiness_tol:\n # wet and clean\n facecolor = \"blue\"\n elif state.get(obj, \"wetness\") < self.wetness_tol and \\\n state.get(obj, \"dirtiness\") > self.dirtiness_tol:\n # dry and dirty\n facecolor = \"green\"\n elif state.get(obj, \"wetness\") < self.wetness_tol and \\\n state.get(obj, \"dirtiness\") < self.dirtiness_tol:\n # dry and clean\n facecolor = \"cyan\"\n obj_color = state.get(obj, \"color\")\n if obj_color > 0:\n facecolor = [obj_color, 0, 0]\n if held_obj == obj:\n assert state.get(self._robot, \"fingers\") < self.open_fingers\n grasp = state.get(held_obj, \"grasp\")\n assert grasp < self.side_grasp_thresh or \\\n grasp > self.top_grasp_thresh\n edgecolor = (\"yellow\"\n if grasp < self.side_grasp_thresh else \"orange\")\n else:\n edgecolor = \"gray\"\n # Normalize poses to [0, 1]\n x = (x - self.env_lb) / denom\n y = (y - self.env_lb) / denom\n z = (z - self.env_lb) / denom\n # Plot as rectangle\n rect = patches.Rectangle((y - r, z - h),\n 2 * r,\n 2 * h,\n zorder=-x,\n linewidth=1,\n edgecolor=edgecolor,\n facecolor=facecolor)\n ax.add_patch(rect)\n ax.set_xlim(-0.1, 1.1)\n ax.set_ylim(0.6, 1.0)\n title = (\"blue = wet+clean, green = dry+dirty, cyan = dry+clean;\\n\"\n \"yellow border = side grasp, orange border = top grasp\")\n if caption is not None:\n title += f\";\\n{caption}\"\n plt.suptitle(title, fontsize=12, wrap=True)\n plt.tight_layout()\n return fig\n\n @property\n def _max_objs_in_goal(self) -> int:\n return CFG.painting_max_objs_in_goal\n\n @property\n def _update_z_poses(self) -> bool:\n return False\n\n def _get_tasks(self, num_tasks: int, num_objs_lst: List[int],\n rng: np.random.Generator) -> List[Task]:\n tasks = []\n for i in range(num_tasks):\n num_objs = num_objs_lst[i % len(num_objs_lst)]\n data = {}\n # Initialize robot pos with open fingers\n robot_init_y = rng.uniform(self.table_lb, self.table_ub)\n data[self._robot] = np.array([self.robot_x, robot_init_y, 1.0],\n dtype=np.float32)\n # Sample distinct colors for shelf and box\n color1 = rng.uniform(0.2, 0.4)\n color2 = rng.uniform(0.6, 1.0)\n if rng.choice(2):\n box_color, shelf_color = color1, color2\n else:\n shelf_color, box_color = color1, color2\n # Create box, lid, and shelf objects\n lid_is_open = int(rng.uniform() < CFG.painting_lid_open_prob)\n data[self._box] = np.array([self.box_x, self.box_y, box_color],\n dtype=np.float32)\n data[self._lid] = np.array([lid_is_open], dtype=np.float32)\n data[self._shelf] = np.array(\n [self.shelf_x, self.shelf_y, shelf_color], dtype=np.float32)\n # Create moveable objects and goal\n objs = []\n obj_poses: List[Tuple[float, float, float]] = []\n goal = set()\n assert CFG.painting_goal_receptacles in (\"box_and_shelf\", \"box\",\n \"shelf\")\n if CFG.painting_goal_receptacles == \"shelf\":\n # No box; all max_objs_in_goal objects must go in the shelf\n num_objs_in_shelf = self._max_objs_in_goal\n else:\n # The last object is destined for the box, so the remaining\n # (max_objs_in_goal - 1) objects must go in the shelf\n num_objs_in_shelf = self._max_objs_in_goal - 1\n for j in range(num_objs):\n obj = Object(f\"obj{j}\", self._obj_type)\n objs.append(obj)\n pose = self._sample_initial_object_pose(obj_poses, rng)\n obj_poses.append(pose)\n # Start out wet and clean, dry and dirty, or dry and clean\n choice = rng.choice(3)\n if choice == 0:\n wetness = 0.0\n dirtiness = rng.uniform(0.5, 1.)\n elif choice == 1:\n wetness = rng.uniform(0.5, 1.)\n dirtiness = 0.0\n else:\n wetness = 0.0\n dirtiness = 0.0\n color = 0.0\n grasp = 0.5\n held = 0.0\n data[obj] = np.array([\n pose[0], pose[1], pose[2], dirtiness, wetness, color,\n grasp, held\n ],\n dtype=np.float32)\n if CFG.painting_goal_receptacles in (\n \"box_and_shelf\", \"box\") and j == num_objs - 1:\n # This object must go in the box\n # NOTE: the box can only fit one object\n goal.add(GroundAtom(self._InBox, [obj, self._box]))\n goal.add(GroundAtom(self._IsBoxColor, [obj, self._box]))\n elif CFG.painting_goal_receptacles in (\n \"box_and_shelf\", \"shelf\") and j < num_objs_in_shelf:\n # This object must go in the shelf\n # NOTE: any number of objects can fit in the shelf\n goal.add(GroundAtom(self._InShelf, [obj, self._shelf]))\n goal.add(GroundAtom(self._IsShelfColor,\n [obj, self._shelf]))\n assert len(goal) <= 2 * self._max_objs_in_goal\n state = State(data)\n # Sometimes start out holding an object, possibly with the wrong\n # grip, so that we'll have to put it on the table and regrasp\n if rng.uniform() < CFG.painting_initial_holding_prob:\n grasp = rng.choice([0.0, 1.0])\n target_obj = objs[rng.choice(len(objs))]\n state.set(self._robot, \"fingers\", 0.0)\n state.set(target_obj, \"grasp\", grasp)\n state.set(target_obj, \"held\", 1.0)\n state.set(target_obj, \"pose_y\",\n state.get(self._robot, \"pose_y\"))\n if self._update_z_poses:\n state.set(target_obj, \"pose_z\",\n state.get(target_obj, \"pose_z\") + 1.0)\n tasks.append(Task(state, goal))\n return tasks\n\n def _sample_initial_object_pose(\n self, existing_poses: List[Tuple[float, float, float]],\n rng: np.random.Generator) -> Tuple[float, float, float]:\n existing_ys = [p[1] for p in existing_poses]\n while True:\n this_y = rng.uniform(self.table_lb, self.table_ub)\n if all(\n abs(this_y - other_y) > 3.5 * self.obj_radius\n for other_y in existing_ys):\n return (self.obj_x, this_y,\n self.table_height + self.obj_height / 2)\n\n def _Pick_policy(self, state: State, memory: Dict,\n objects: Sequence[Object], params: Array) -> Action:\n del memory # unused\n _, obj = objects\n obj_x = state.get(obj, \"pose_x\")\n obj_y = state.get(obj, \"pose_y\")\n obj_z = state.get(obj, \"pose_z\")\n grasp, = params\n arr = np.array([obj_x, obj_y, obj_z, grasp, 1.0, 0.0, 0.0, 0.0],\n dtype=np.float32)\n # The grasp could cause the action to go out of bounds, so we clip\n # it back into the bounds for safety.\n arr = np.clip(arr, self.action_space.low, self.action_space.high)\n return Action(arr)\n\n def _Wash_policy(self, state: State, memory: Dict,\n objects: Sequence[Object], params: Array) -> Action:\n del state, memory, objects, params # unused\n arr = np.array(\n [self.obj_x, self.table_lb, self.obj_z, 0.0, 0.0, 1.0, 0.0, 0.0],\n dtype=np.float32)\n return Action(arr)\n\n def _Dry_policy(self, state: State, memory: Dict,\n objects: Sequence[Object], params: Array) -> Action:\n del state, memory, objects, params # unused\n arr = np.array(\n [self.obj_x, self.table_lb, self.obj_z, 0.0, 0.0, 0.0, 1.0, 0.0],\n dtype=np.float32)\n return Action(arr)\n\n def _Paint_policy(self, state: State, memory: Dict,\n objects: Sequence[Object], params: Array) -> Action:\n del state, memory, objects # unused\n new_color, = params\n new_color = min(max(new_color, 0.0), 1.0)\n arr = np.array([\n self.obj_x, self.table_lb, self.obj_z, 0.0, 0.0, 0.0, 0.0,\n new_color\n ],\n dtype=np.float32)\n return Action(arr)\n\n @staticmethod\n def _Place_policy(state: State, memory: Dict, objects: Sequence[Object],\n params: Array) -> Action:\n del state, memory, objects # unused\n x, y, z = params\n arr = np.array([x, y, z, 0.5, -1.0, 0.0, 0.0, 0.0], dtype=np.float32)\n return Action(arr)\n\n def _OpenLid_policy(self, state: State, memory: Dict,\n objects: Sequence[Object], params: Array) -> Action:\n del state, memory, objects, params # unused\n arr = np.array([\n self.obj_x, (self.box_lb + self.box_ub) / 2, self.obj_z, 0.0, 1.0,\n 0.0, 0.0, 0.0\n ],\n dtype=np.float32)\n return Action(arr)\n\n def _InBox_holds(self, state: State, objects: Sequence[Object]) -> bool:\n obj, _ = objects\n # If the object is held, not yet in box\n if self._obj_is_held(state, obj):\n return False\n # Check pose of object\n obj_y = state.get(obj, \"pose_y\")\n return self.box_lb < obj_y < self.box_ub\n\n def _InShelf_holds(self, state: State, objects: Sequence[Object]) -> bool:\n obj, _ = objects\n # If the object is held, not yet in shelf\n if self._obj_is_held(state, obj):\n return False\n # Check pose of object\n obj_y = state.get(obj, \"pose_y\")\n return self.shelf_lb < obj_y < self.shelf_ub\n\n def _IsBoxColor_holds(self, state: State,\n objects: Sequence[Object]) -> bool:\n obj, box = objects\n return abs(state.get(obj, \"color\") -\n state.get(box, \"color\")) < self.color_tol\n\n def _IsShelfColor_holds(self, state: State,\n objects: Sequence[Object]) -> bool:\n obj, shelf = objects\n return abs(state.get(obj, \"color\") -\n state.get(shelf, \"color\")) < self.color_tol\n\n def _GripperOpen_holds(self, state: State,\n objects: Sequence[Object]) -> bool:\n robot, = objects\n fingers = state.get(robot, \"fingers\")\n return fingers >= self.open_fingers\n\n def _OnTable_holds(self, state: State, objects: Sequence[Object]) -> bool:\n obj, = objects\n obj_y = state.get(obj, \"pose_y\")\n if not self.table_lb < obj_y < self.table_ub:\n return False\n # Note that obj_z is not updated in this class, but it may be updated\n # by subclasses by overriding self._update_z_poses.\n obj_z = state.get(obj, \"pose_z\")\n if not np.allclose(obj_z, self.table_height + self.obj_height / 2):\n assert self._update_z_poses\n return False\n return True\n\n def _NotOnTable_holds(self, state: State,\n objects: Sequence[Object]) -> bool:\n return not self._OnTable_holds(state, objects)\n\n def _HoldingTop_holds(self, state: State,\n objects: Sequence[Object]) -> bool:\n obj, = objects\n grasp = state.get(obj, \"grasp\")\n return grasp > self.top_grasp_thresh\n\n def _HoldingSide_holds(self, state: State,\n objects: Sequence[Object]) -> bool:\n obj, = objects\n grasp = state.get(obj, \"grasp\")\n return grasp < self.side_grasp_thresh\n\n def _Holding_holds(self, state: State, objects: Sequence[Object]) -> bool:\n obj, = objects\n return self._obj_is_held(state, obj)\n\n def _IsWet_holds(self, state: State, objects: Sequence[Object]) -> bool:\n obj, = objects\n return state.get(obj, \"wetness\") > self.wetness_tol\n\n def _IsDry_holds(self, state: State, objects: Sequence[Object]) -> bool:\n obj, = objects\n return not self._IsWet_holds(state, [obj])\n\n def _IsDirty_holds(self, state: State, objects: Sequence[Object]) -> bool:\n obj, = objects\n return state.get(obj, \"dirtiness\") > self.dirtiness_tol\n\n def _IsClean_holds(self, state: State, objects: Sequence[Object]) -> bool:\n obj, = objects\n return not self._IsDirty_holds(state, [obj])\n\n def _get_held_object(self, state: State) -> Optional[Object]:\n for obj in state:\n if obj.type != self._obj_type:\n continue\n if self._obj_is_held(state, obj):\n return obj\n return None\n\n def _obj_is_held(self, state: State, obj: Object) -> bool:\n # These two pieces of information are redundant. We include\n # the \"held\" feature only because it allows the Holding\n # predicate to be expressed with a single inequality.\n # Either feature can be used to implement this method.\n grasp = state.get(obj, \"grasp\")\n held_feat = state.get(obj, \"held\")\n is_held = (grasp > self.top_grasp_thresh\n or grasp < self.side_grasp_thresh)\n assert is_held == (held_feat > 0.5) # ensure redundancy\n return is_held\n\n def _get_object_at_xyz(self, state: State, x: float, y: float,\n z: float) -> Optional[Object]:\n target_obj = None\n for obj in state:\n if obj.type != self._obj_type:\n continue\n if np.allclose([x, y, z], [\n state.get(obj, \"pose_x\"),\n state.get(obj, \"pose_y\"),\n state.get(obj, \"pose_z\")\n ],\n atol=self.pick_tol):\n target_obj = obj\n return target_obj\n" ]
[ [ "numpy.allclose", "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.subplots", "numpy.clip", "matplotlib.patches.Rectangle", "matplotlib.pyplot.suptitle", "numpy.array", "matplotlib.pyplot.plot", "matplotlib.pyplot.Rectangle" ] ]
leggitta/mne-python
[ "11fb55c41b7c2cc800eb3406d9e44cabf00fc027" ]
[ "mne/fixes.py" ]
[ "\"\"\"Compatibility fixes for older version of python, numpy and scipy\n\nIf you add content to this file, please give the version of the package\nat which the fixe is no longer needed.\n\n# XXX : copied from scikit-learn\n\n\"\"\"\n# Authors: Emmanuelle Gouillart <[email protected]>\n# Gael Varoquaux <[email protected]>\n# Fabian Pedregosa <[email protected]>\n# Lars Buitinck <[email protected]>\n# License: BSD\n\nfrom __future__ import division\nimport collections\nfrom operator import itemgetter\nimport inspect\n\nimport warnings\nimport numpy as np\nimport scipy\nfrom scipy import linalg, sparse\nfrom math import ceil, log\nfrom numpy.fft import irfft\nfrom distutils.version import LooseVersion\nfrom functools import partial\nfrom .externals import six\nfrom .externals.six.moves import copyreg\nfrom gzip import GzipFile\n\n\n###############################################################################\n# Misc\n\nclass gzip_open(GzipFile): # python2.6 doesn't have context managing\n\n def __enter__(self):\n if hasattr(GzipFile, '__enter__'):\n return GzipFile.__enter__(self)\n else:\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n if hasattr(GzipFile, '__exit__'):\n return GzipFile.__exit__(self, exc_type, exc_value, traceback)\n else:\n return self.close()\n\n\nclass _Counter(collections.defaultdict):\n \"\"\"Partial replacement for Python 2.7 collections.Counter.\"\"\"\n def __init__(self, iterable=(), **kwargs):\n super(_Counter, self).__init__(int, **kwargs)\n self.update(iterable)\n\n def most_common(self):\n return sorted(six.iteritems(self), key=itemgetter(1), reverse=True)\n\n def update(self, other):\n \"\"\"Adds counts for elements in other\"\"\"\n if isinstance(other, self.__class__):\n for x, n in six.iteritems(other):\n self[x] += n\n else:\n for x in other:\n self[x] += 1\n\ntry:\n Counter = collections.Counter\nexcept AttributeError:\n Counter = _Counter\n\n\ndef _unique(ar, return_index=False, return_inverse=False):\n \"\"\"A replacement for the np.unique that appeared in numpy 1.4.\n\n While np.unique existed long before, keyword return_inverse was\n only added in 1.4.\n \"\"\"\n try:\n ar = ar.flatten()\n except AttributeError:\n if not return_inverse and not return_index:\n items = sorted(set(ar))\n return np.asarray(items)\n else:\n ar = np.asarray(ar).flatten()\n\n if ar.size == 0:\n if return_inverse and return_index:\n return ar, np.empty(0, np.bool), np.empty(0, np.bool)\n elif return_inverse or return_index:\n return ar, np.empty(0, np.bool)\n else:\n return ar\n\n if return_inverse or return_index:\n perm = ar.argsort()\n aux = ar[perm]\n flag = np.concatenate(([True], aux[1:] != aux[:-1]))\n if return_inverse:\n iflag = np.cumsum(flag) - 1\n iperm = perm.argsort()\n if return_index:\n return aux[flag], perm[flag], iflag[iperm]\n else:\n return aux[flag], iflag[iperm]\n else:\n return aux[flag], perm[flag]\n\n else:\n ar.sort()\n flag = np.concatenate(([True], ar[1:] != ar[:-1]))\n return ar[flag]\n\nif LooseVersion(np.__version__) < LooseVersion('1.5'):\n unique = _unique\nelse:\n unique = np.unique\n\n\ndef _bincount(X, weights=None, minlength=None):\n \"\"\"Replacing np.bincount in numpy < 1.6 to provide minlength.\"\"\"\n result = np.bincount(X, weights)\n if minlength is None or len(result) >= minlength:\n return result\n out = np.zeros(minlength, np.int)\n out[:len(result)] = result\n return out\n\nif LooseVersion(np.__version__) < LooseVersion('1.6'):\n bincount = _bincount\nelse:\n bincount = np.bincount\n\n\ndef _copysign(x1, x2):\n \"\"\"Slow replacement for np.copysign, which was introduced in numpy 1.4\"\"\"\n return np.abs(x1) * np.sign(x2)\n\nif not hasattr(np, 'copysign'):\n copysign = _copysign\nelse:\n copysign = np.copysign\n\n\ndef _in1d(ar1, ar2, assume_unique=False, invert=False):\n \"\"\"Replacement for in1d that is provided for numpy >= 1.4\"\"\"\n # Ravel both arrays, behavior for the first array could be different\n ar1 = np.asarray(ar1).ravel()\n ar2 = np.asarray(ar2).ravel()\n\n # This code is significantly faster when the condition is satisfied.\n if len(ar2) < 10 * len(ar1) ** 0.145:\n if invert:\n mask = np.ones(len(ar1), dtype=np.bool)\n for a in ar2:\n mask &= (ar1 != a)\n else:\n mask = np.zeros(len(ar1), dtype=np.bool)\n for a in ar2:\n mask |= (ar1 == a)\n return mask\n\n # Otherwise use sorting\n if not assume_unique:\n ar1, rev_idx = unique(ar1, return_inverse=True)\n ar2 = np.unique(ar2)\n\n ar = np.concatenate((ar1, ar2))\n # We need this to be a stable sort, so always use 'mergesort'\n # here. The values from the first array should always come before\n # the values from the second array.\n order = ar.argsort(kind='mergesort')\n sar = ar[order]\n if invert:\n bool_ar = (sar[1:] != sar[:-1])\n else:\n bool_ar = (sar[1:] == sar[:-1])\n flag = np.concatenate((bool_ar, [invert]))\n indx = order.argsort(kind='mergesort')[:len(ar1)]\n\n if assume_unique:\n return flag[indx]\n else:\n return flag[indx][rev_idx]\n\n\nif not hasattr(np, 'in1d') or LooseVersion(np.__version__) < '1.8':\n in1d = _in1d\nelse:\n in1d = np.in1d\n\n\ndef _digitize(x, bins, right=False):\n \"\"\"Replacement for digitize with right kwarg (numpy < 1.7).\n\n Notes\n -----\n This fix is only meant for integer arrays. If ``right==True`` but either\n ``x`` or ``bins`` are of a different type, a NotImplementedError will be\n raised.\n \"\"\"\n if right:\n x = np.asarray(x)\n bins = np.asarray(bins)\n if (x.dtype.kind not in 'ui') or (bins.dtype.kind not in 'ui'):\n raise NotImplementedError(\"Only implemented for integer input\")\n return np.digitize(x - 1e-5, bins)\n else:\n return np.digitize(x, bins)\n\nif LooseVersion(np.__version__) < LooseVersion('1.7'):\n digitize = _digitize\nelse:\n digitize = np.digitize\n\n\ndef _tril_indices(n, k=0):\n \"\"\"Replacement for tril_indices that is provided for numpy >= 1.4\"\"\"\n mask = np.greater_equal(np.subtract.outer(np.arange(n), np.arange(n)), -k)\n indices = np.where(mask)\n\n return indices\n\nif not hasattr(np, 'tril_indices'):\n tril_indices = _tril_indices\nelse:\n tril_indices = np.tril_indices\n\n\ndef _unravel_index(indices, dims):\n \"\"\"Add support for multiple indices in unravel_index that is provided\n for numpy >= 1.4\"\"\"\n indices_arr = np.asarray(indices)\n if indices_arr.size == 1:\n return np.unravel_index(indices, dims)\n else:\n if indices_arr.ndim != 1:\n raise ValueError('indices should be one dimensional')\n\n ndims = len(dims)\n unraveled_coords = np.empty((indices_arr.size, ndims), dtype=np.int)\n for coord, idx in zip(unraveled_coords, indices_arr):\n coord[:] = np.unravel_index(idx, dims)\n return tuple(unraveled_coords.T)\n\n\nif LooseVersion(np.__version__) < LooseVersion('1.4'):\n unravel_index = _unravel_index\nelse:\n unravel_index = np.unravel_index\n\n\ndef _qr_economic_old(A, **kwargs):\n \"\"\"\n Compat function for the QR-decomposition in economic mode\n Scipy 0.9 changed the keyword econ=True to mode='economic'\n \"\"\"\n with warnings.catch_warnings(record=True):\n return linalg.qr(A, econ=True, **kwargs)\n\n\ndef _qr_economic_new(A, **kwargs):\n return linalg.qr(A, mode='economic', **kwargs)\n\n\nif LooseVersion(scipy.__version__) < LooseVersion('0.9'):\n qr_economic = _qr_economic_old\nelse:\n qr_economic = _qr_economic_new\n\n\ndef savemat(file_name, mdict, oned_as=\"column\", **kwargs):\n \"\"\"MATLAB-format output routine that is compatible with SciPy 0.7's.\n\n 0.7.2 (or .1?) added the oned_as keyword arg with 'column' as the default\n value. It issues a warning if this is not provided, stating that \"This will\n change to 'row' in future versions.\"\n \"\"\"\n import scipy.io\n try:\n return scipy.io.savemat(file_name, mdict, oned_as=oned_as, **kwargs)\n except TypeError:\n return scipy.io.savemat(file_name, mdict, **kwargs)\n\nif hasattr(np, 'count_nonzero'):\n from numpy import count_nonzero\nelse:\n def count_nonzero(X):\n return len(np.flatnonzero(X))\n\n# little dance to see if np.copy has an 'order' keyword argument\nif 'order' in inspect.getargspec(np.copy)[0]:\n def safe_copy(X):\n # Copy, but keep the order\n return np.copy(X, order='K')\nelse:\n # Before an 'order' argument was introduced, numpy wouldn't muck with\n # the ordering\n safe_copy = np.copy\n\n\ndef _meshgrid(*xi, **kwargs):\n \"\"\"\n Return coordinate matrices from coordinate vectors.\n Make N-D coordinate arrays for vectorized evaluations of\n N-D scalar/vector fields over N-D grids, given\n one-dimensional coordinate arrays x1, x2,..., xn.\n .. versionchanged:: 1.9\n 1-D and 0-D cases are allowed.\n Parameters\n ----------\n x1, x2,..., xn : array_like\n 1-D arrays representing the coordinates of a grid.\n indexing : {'xy', 'ij'}, optional\n Cartesian ('xy', default) or matrix ('ij') indexing of output.\n See Notes for more details.\n .. versionadded:: 1.7.0\n sparse : bool, optional\n If True a sparse grid is returned in order to conserve memory.\n Default is False.\n .. versionadded:: 1.7.0\n copy : bool, optional\n If False, a view into the original arrays are returned in order to\n conserve memory. Default is True. Please note that\n ``sparse=False, copy=False`` will likely return non-contiguous\n arrays. Furthermore, more than one element of a broadcast array\n may refer to a single memory location. If you need to write to the\n arrays, make copies first.\n .. versionadded:: 1.7.0\n Returns\n -------\n X1, X2,..., XN : ndarray\n For vectors `x1`, `x2`,..., 'xn' with lengths ``Ni=len(xi)`` ,\n return ``(N1, N2, N3,...Nn)`` shaped arrays if indexing='ij'\n or ``(N2, N1, N3,...Nn)`` shaped arrays if indexing='xy'\n with the elements of `xi` repeated to fill the matrix along\n the first dimension for `x1`, the second for `x2` and so on.\n \"\"\"\n ndim = len(xi)\n\n copy_ = kwargs.pop('copy', True)\n sparse = kwargs.pop('sparse', False)\n indexing = kwargs.pop('indexing', 'xy')\n\n if kwargs:\n raise TypeError(\"meshgrid() got an unexpected keyword argument '%s'\"\n % (list(kwargs)[0],))\n\n if indexing not in ['xy', 'ij']:\n raise ValueError(\n \"Valid values for `indexing` are 'xy' and 'ij'.\")\n\n s0 = (1,) * ndim\n output = [np.asanyarray(x).reshape(s0[:i] + (-1,) + s0[i + 1::])\n for i, x in enumerate(xi)]\n\n shape = [x.size for x in output]\n\n if indexing == 'xy' and ndim > 1:\n # switch first and second axis\n output[0].shape = (1, -1) + (1,) * (ndim - 2)\n output[1].shape = (-1, 1) + (1,) * (ndim - 2)\n shape[0], shape[1] = shape[1], shape[0]\n\n if sparse:\n if copy_:\n return [x.copy() for x in output]\n else:\n return output\n else:\n # Return the full N-D matrix (not only the 1-D vector)\n if copy_:\n mult_fact = np.ones(shape, dtype=int)\n return [x * mult_fact for x in output]\n else:\n return np.broadcast_arrays(*output)\n\nif LooseVersion(np.__version__) < LooseVersion('1.7'):\n meshgrid = _meshgrid\nelse:\n meshgrid = np.meshgrid\n\n\n###############################################################################\n# Back porting firwin2 for older scipy\n\n# Original version of firwin2 from scipy ticket #457, submitted by \"tash\".\n#\n# Rewritten by Warren Weckesser, 2010.\n\n\ndef _firwin2(numtaps, freq, gain, nfreqs=None, window='hamming', nyq=1.0):\n \"\"\"FIR filter design using the window method.\n\n From the given frequencies `freq` and corresponding gains `gain`,\n this function constructs an FIR filter with linear phase and\n (approximately) the given frequency response.\n\n Parameters\n ----------\n numtaps : int\n The number of taps in the FIR filter. `numtaps` must be less than\n `nfreqs`. If the gain at the Nyquist rate, `gain[-1]`, is not 0,\n then `numtaps` must be odd.\n\n freq : array-like, 1D\n The frequency sampling points. Typically 0.0 to 1.0 with 1.0 being\n Nyquist. The Nyquist frequency can be redefined with the argument\n `nyq`.\n\n The values in `freq` must be nondecreasing. A value can be repeated\n once to implement a discontinuity. The first value in `freq` must\n be 0, and the last value must be `nyq`.\n\n gain : array-like\n The filter gains at the frequency sampling points.\n\n nfreqs : int, optional\n The size of the interpolation mesh used to construct the filter.\n For most efficient behavior, this should be a power of 2 plus 1\n (e.g, 129, 257, etc). The default is one more than the smallest\n power of 2 that is not less than `numtaps`. `nfreqs` must be greater\n than `numtaps`.\n\n window : string or (string, float) or float, or None, optional\n Window function to use. Default is \"hamming\". See\n `scipy.signal.get_window` for the complete list of possible values.\n If None, no window function is applied.\n\n nyq : float\n Nyquist frequency. Each frequency in `freq` must be between 0 and\n `nyq` (inclusive).\n\n Returns\n -------\n taps : numpy 1D array of length `numtaps`\n The filter coefficients of the FIR filter.\n\n Examples\n --------\n A lowpass FIR filter with a response that is 1 on [0.0, 0.5], and\n that decreases linearly on [0.5, 1.0] from 1 to 0:\n\n >>> taps = firwin2(150, [0.0, 0.5, 1.0], [1.0, 1.0, 0.0]) # doctest: +SKIP\n >>> print(taps[72:78]) # doctest: +SKIP\n [-0.02286961 -0.06362756 0.57310236 0.57310236 -0.06362756 -0.02286961]\n\n See also\n --------\n scipy.signal.firwin\n\n Notes\n -----\n\n From the given set of frequencies and gains, the desired response is\n constructed in the frequency domain. The inverse FFT is applied to the\n desired response to create the associated convolution kernel, and the\n first `numtaps` coefficients of this kernel, scaled by `window`, are\n returned.\n\n The FIR filter will have linear phase. The filter is Type I if `numtaps`\n is odd and Type II if `numtaps` is even. Because Type II filters always\n have a zero at the Nyquist frequency, `numtaps` must be odd if `gain[-1]`\n is not zero.\n\n .. versionadded:: 0.9.0\n\n References\n ----------\n .. [1] Oppenheim, A. V. and Schafer, R. W., \"Discrete-Time Signal\n Processing\", Prentice-Hall, Englewood Cliffs, New Jersey (1989).\n (See, for example, Section 7.4.)\n\n .. [2] Smith, Steven W., \"The Scientist and Engineer's Guide to Digital\n Signal Processing\", Ch. 17. http://www.dspguide.com/ch17/1.htm\n\n \"\"\"\n\n if len(freq) != len(gain):\n raise ValueError('freq and gain must be of same length.')\n\n if nfreqs is not None and numtaps >= nfreqs:\n raise ValueError('ntaps must be less than nfreqs, but firwin2 was '\n 'called with ntaps=%d and nfreqs=%s'\n % (numtaps, nfreqs))\n\n if freq[0] != 0 or freq[-1] != nyq:\n raise ValueError('freq must start with 0 and end with `nyq`.')\n d = np.diff(freq)\n if (d < 0).any():\n raise ValueError('The values in freq must be nondecreasing.')\n d2 = d[:-1] + d[1:]\n if (d2 == 0).any():\n raise ValueError('A value in freq must not occur more than twice.')\n\n if numtaps % 2 == 0 and gain[-1] != 0.0:\n raise ValueError(\"A filter with an even number of coefficients must \"\n \"have zero gain at the Nyquist rate.\")\n\n if nfreqs is None:\n nfreqs = 1 + 2 ** int(ceil(log(numtaps, 2)))\n\n # Tweak any repeated values in freq so that interp works.\n eps = np.finfo(float).eps\n for k in range(len(freq)):\n if k < len(freq) - 1 and freq[k] == freq[k + 1]:\n freq[k] = freq[k] - eps\n freq[k + 1] = freq[k + 1] + eps\n\n # Linearly interpolate the desired response on a uniform mesh `x`.\n x = np.linspace(0.0, nyq, nfreqs)\n fx = np.interp(x, freq, gain)\n\n # Adjust the phases of the coefficients so that the first `ntaps` of the\n # inverse FFT are the desired filter coefficients.\n shift = np.exp(-(numtaps - 1) / 2. * 1.j * np.pi * x / nyq)\n fx2 = fx * shift\n\n # Use irfft to compute the inverse FFT.\n out_full = irfft(fx2)\n\n if window is not None:\n # Create the window to apply to the filter coefficients.\n from scipy.signal.signaltools import get_window\n wind = get_window(window, numtaps, fftbins=False)\n else:\n wind = 1\n\n # Keep only the first `numtaps` coefficients in `out`, and multiply by\n # the window.\n out = out_full[:numtaps] * wind\n\n return out\n\n\ndef get_firwin2():\n \"\"\"Helper to get firwin2\"\"\"\n try:\n from scipy.signal import firwin2\n except ImportError:\n firwin2 = _firwin2\n return firwin2\n\n\ndef _filtfilt(*args, **kwargs):\n \"\"\"wrap filtfilt, excluding padding arguments\"\"\"\n from scipy.signal import filtfilt\n # cut out filter args\n if len(args) > 4:\n args = args[:4]\n if 'padlen' in kwargs:\n del kwargs['padlen']\n return filtfilt(*args, **kwargs)\n\n\ndef get_filtfilt():\n \"\"\"Helper to get filtfilt from scipy\"\"\"\n from scipy.signal import filtfilt\n\n if 'padlen' in inspect.getargspec(filtfilt)[0]:\n return filtfilt\n\n return _filtfilt\n\n\n###############################################################################\n# Back porting matrix_rank for numpy < 1.7\n\n\ndef _matrix_rank(M, tol=None):\n \"\"\" Return matrix rank of array using SVD method\n\n Rank of the array is the number of SVD singular values of the array that\n are greater than `tol`.\n\n Parameters\n ----------\n M : {(M,), (M, N)} array_like\n array of <=2 dimensions\n tol : {None, float}, optional\n threshold below which SVD values are considered zero. If `tol` is\n None, and ``S`` is an array with singular values for `M`, and\n ``eps`` is the epsilon value for datatype of ``S``, then `tol` is\n set to ``S.max() * max(M.shape) * eps``.\n\n Notes\n -----\n The default threshold to detect rank deficiency is a test on the magnitude\n of the singular values of `M`. By default, we identify singular values less\n than ``S.max() * max(M.shape) * eps`` as indicating rank deficiency (with\n the symbols defined above). This is the algorithm MATLAB uses [1]. It also\n appears in *Numerical recipes* in the discussion of SVD solutions for\n linear least squares [2].\n\n This default threshold is designed to detect rank deficiency accounting\n for the numerical errors of the SVD computation. Imagine that there is a\n column in `M` that is an exact (in floating point) linear combination of\n other columns in `M`. Computing the SVD on `M` will not produce a\n singular value exactly equal to 0 in general: any difference of the\n smallest SVD value from 0 will be caused by numerical imprecision in the\n calculation of the SVD. Our threshold for small SVD values takes this\n numerical imprecision into account, and the default threshold will detect\n such numerical rank deficiency. The threshold may declare a matrix `M`\n rank deficient even if the linear combination of some columns of `M` is\n not exactly equal to another column of `M` but only numerically very\n close to another column of `M`.\n\n We chose our default threshold because it is in wide use. Other\n thresholds are possible. For example, elsewhere in the 2007 edition of\n *Numerical recipes* there is an alternative threshold of ``S.max() *\n np.finfo(M.dtype).eps / 2. * np.sqrt(m + n + 1.)``. The authors describe\n this threshold as being based on \"expected roundoff error\" (p 71).\n\n The thresholds above deal with floating point roundoff error in the\n calculation of the SVD. However, you may have more information about the\n sources of error in `M` that would make you consider other tolerance\n values to detect *effective* rank deficiency. The most useful measure of\n the tolerance depends on the operations you intend to use on your matrix.\n For example, if your data come from uncertain measurements with\n uncertainties greater than floating point epsilon, choosing a tolerance\n near that uncertainty may be preferable. The tolerance may be absolute if\n the uncertainties are absolute rather than relative.\n\n References\n ----------\n .. [1] MATLAB reference documention, \"Rank\"\n http://www.mathworks.com/help/techdoc/ref/rank.html\n .. [2] W. H. Press, S. A. Teukolsky, W. T. Vetterling and B. P. Flannery,\n \"Numerical Recipes (3rd edition)\", Cambridge University Press, 2007,\n page 795.\n\n Examples\n --------\n >>> from numpy.linalg import matrix_rank\n >>> matrix_rank(np.eye(4)) # Full rank matrix\n 4\n >>> I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix\n >>> matrix_rank(I)\n 3\n >>> matrix_rank(np.ones((4,))) # 1 dimension - rank 1 unless all 0\n 1\n >>> matrix_rank(np.zeros((4,)))\n 0\n \"\"\"\n M = np.asarray(M)\n if M.ndim > 2:\n raise TypeError('array should have 2 or fewer dimensions')\n if M.ndim < 2:\n return np.int(not all(M == 0))\n S = np.linalg.svd(M, compute_uv=False)\n if tol is None:\n tol = S.max() * np.max(M.shape) * np.finfo(S.dtype).eps\n return np.sum(S > tol)\n\nif LooseVersion(np.__version__) > '1.7.1':\n from numpy.linalg import matrix_rank\nelse:\n matrix_rank = _matrix_rank\n\n\ndef _reconstruct_partial(func, args, kwargs):\n \"\"\"Helper to pickle partial functions\"\"\"\n return partial(func, *args, **(kwargs or {}))\n\n\ndef _reduce_partial(p):\n \"\"\"Helper to pickle partial functions\"\"\"\n return _reconstruct_partial, (p.func, p.args, p.keywords)\n\n# This adds pickling functionality to older Python 2.6\n# Please always import partial from here.\ncopyreg.pickle(partial, _reduce_partial)\n\n\ndef normalize_colors(vmin, vmax, clip=False):\n \"\"\"Helper to handle matplotlib API\"\"\"\n import matplotlib.pyplot as plt\n try:\n return plt.Normalize(vmin, vmax, clip=clip)\n except AttributeError:\n return plt.normalize(vmin, vmax, clip=clip)\n\n\ndef assert_true(expr, msg='False is not True'):\n \"\"\"Fake assert_true without message\"\"\"\n if not expr:\n raise AssertionError(msg)\n\n\ndef assert_is(expr1, expr2, msg=None):\n \"\"\"Fake assert_is without message\"\"\"\n assert_true(expr2 is expr2, msg)\n\n\ndef assert_is_not(expr1, expr2, msg=None):\n \"\"\"Fake assert_is_not without message\"\"\"\n assert_true(expr1 is not expr2, msg)\n\n\ndef _sparse_block_diag(mats, format=None, dtype=None):\n \"\"\"An implementation of scipy.sparse.block_diag since old versions of\n scipy don't have it. Forms a sparse matrix by stacking matrices in block\n diagonal form.\n\n Parameters\n ----------\n mats : list of matrices\n Input matrices.\n format : str, optional\n The sparse format of the result (e.g. \"csr\"). If not given, the\n matrix is returned in \"coo\" format.\n dtype : dtype specifier, optional\n The data-type of the output matrix. If not given, the dtype is\n determined from that of blocks.\n\n Returns\n -------\n res : sparse matrix\n \"\"\"\n nmat = len(mats)\n rows = []\n for ia, a in enumerate(mats):\n row = [None] * nmat\n row[ia] = a\n rows.append(row)\n return sparse.bmat(rows, format=format, dtype=dtype)\n\ntry:\n from scipy.sparse import block_diag as sparse_block_diag\nexcept Exception:\n sparse_block_diag = _sparse_block_diag\n\n\ndef _isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):\n \"\"\"\n Returns a boolean array where two arrays are element-wise equal within a\n tolerance.\n\n The tolerance values are positive, typically very small numbers. The\n relative difference (`rtol` * abs(`b`)) and the absolute difference\n `atol` are added together to compare against the absolute difference\n between `a` and `b`.\n\n Parameters\n ----------\n a, b : array_like\n Input arrays to compare.\n rtol : float\n The relative tolerance parameter (see Notes).\n atol : float\n The absolute tolerance parameter (see Notes).\n equal_nan : bool\n Whether to compare NaN's as equal. If True, NaN's in `a` will be\n considered equal to NaN's in `b` in the output array.\n\n Returns\n -------\n y : array_like\n Returns a boolean array of where `a` and `b` are equal within the\n given tolerance. If both `a` and `b` are scalars, returns a single\n boolean value.\n\n See Also\n --------\n allclose\n\n Notes\n -----\n .. versionadded:: 1.7.0\n\n For finite values, isclose uses the following equation to test whether\n two floating point values are equivalent.\n\n absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`))\n\n The above equation is not symmetric in `a` and `b`, so that\n `isclose(a, b)` might be different from `isclose(b, a)` in\n some rare cases.\n\n Examples\n --------\n >>> isclose([1e10,1e-7], [1.00001e10,1e-8])\n array([ True, False], dtype=bool)\n >>> isclose([1e10,1e-8], [1.00001e10,1e-9])\n array([ True, True], dtype=bool)\n >>> isclose([1e10,1e-8], [1.0001e10,1e-9])\n array([False, True], dtype=bool)\n >>> isclose([1.0, np.nan], [1.0, np.nan])\n array([ True, False], dtype=bool)\n >>> isclose([1.0, np.nan], [1.0, np.nan], equal_nan=True)\n array([ True, True], dtype=bool)\n \"\"\"\n def within_tol(x, y, atol, rtol):\n with np.errstate(invalid='ignore'):\n result = np.less_equal(abs(x - y), atol + rtol * abs(y))\n if np.isscalar(a) and np.isscalar(b):\n result = bool(result)\n return result\n\n x = np.array(a, copy=False, subok=True, ndmin=1)\n y = np.array(b, copy=False, subok=True, ndmin=1)\n\n # Make sure y is an inexact type to avoid bad behavior on abs(MIN_INT).\n # This will cause casting of x later. Also, make sure to allow subclasses\n # (e.g., for numpy.ma).\n dt = np.core.multiarray.result_type(y, 1.)\n y = np.array(y, dtype=dt, copy=False, subok=True)\n\n xfin = np.isfinite(x)\n yfin = np.isfinite(y)\n if all(xfin) and all(yfin):\n return within_tol(x, y, atol, rtol)\n else:\n finite = xfin & yfin\n cond = np.zeros_like(finite, subok=True)\n # Because we're using boolean indexing, x & y must be the same shape.\n # Ideally, we'd just do x, y = broadcast_arrays(x, y). It's in\n # lib.stride_tricks, though, so we can't import it here.\n x = x * np.ones_like(cond)\n y = y * np.ones_like(cond)\n # Avoid subtraction with infinite/nan values...\n cond[finite] = within_tol(x[finite], y[finite], atol, rtol)\n # Check for equality of infinite values...\n cond[~finite] = (x[~finite] == y[~finite])\n if equal_nan:\n # Make NaN == NaN\n both_nan = np.isnan(x) & np.isnan(y)\n cond[both_nan] = both_nan[both_nan]\n return cond\n\n\nif LooseVersion(np.__version__) < LooseVersion('1.7'):\n isclose = _isclose\nelse:\n isclose = np.isclose\n" ]
[ [ "numpy.sum", "numpy.ones", "numpy.diff", "numpy.core.multiarray.result_type", "numpy.fft.irfft", "numpy.ones_like", "numpy.asarray", "numpy.copy", "numpy.isscalar", "scipy.signal.signaltools.get_window", "numpy.isfinite", "numpy.unravel_index", "numpy.abs", "scipy.io.savemat", "numpy.isnan", "numpy.where", "numpy.linspace", "numpy.unique", "numpy.flatnonzero", "numpy.bincount", "numpy.zeros", "numpy.broadcast_arrays", "numpy.asanyarray", "scipy.signal.filtfilt", "numpy.arange", "numpy.max", "numpy.finfo", "numpy.zeros_like", "numpy.sign", "numpy.empty", "numpy.interp", "numpy.cumsum", "matplotlib.pyplot.Normalize", "numpy.exp", "numpy.linalg.svd", "numpy.errstate", "scipy.linalg.qr", "numpy.array", "numpy.concatenate", "matplotlib.pyplot.normalize", "numpy.digitize" ] ]
minhhn2910/conga2022
[ "81ad2fb9c0055c332f8f305b2ea409b6577003f4" ]
[ "train-cifar10/models/resnet_posit.py" ]
[ "'''ResNet in PyTorch.\n\nFor Pre-activation ResNet, see 'preact_resnet.py'.\n\nReference:\n[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun\n Deep Residual Learning for Image Recognition. arXiv:1512.03385\n'''\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, in_planes, planes, quant, stride=1):\n super(BasicBlock, self).__init__()\n self.conv1 = nn.Conv2d(\n in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,\n stride=1, padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n self.quant=quant()\n self.shortcut = nn.Sequential()\n if stride != 1 or in_planes != self.expansion*planes:\n self.shortcut = nn.Sequential(\n nn.Conv2d(in_planes, self.expansion*planes,\n kernel_size=1, stride=stride, bias=False),\n quant(),\n nn.BatchNorm2d(self.expansion*planes)\n )\n\n def forward(self, x):\n \n out = F.relu(self.bn1(self.conv1(x)))\n out = self.quant(out)\n out = self.bn2(self.conv2(out))\n out = self.quant(out)\n out += self.shortcut(x)\n out = F.relu(out)\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, in_planes, planes, stride=1):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,\n stride=stride, padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv3 = nn.Conv2d(planes, self.expansion *\n planes, kernel_size=1, bias=False)\n self.bn3 = nn.BatchNorm2d(self.expansion*planes)\n\n self.shortcut = nn.Sequential()\n if stride != 1 or in_planes != self.expansion*planes:\n self.shortcut = nn.Sequential(\n nn.Conv2d(in_planes, self.expansion*planes,\n kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(self.expansion*planes)\n )\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = F.relu(self.bn2(self.conv2(out)))\n out = self.bn3(self.conv3(out))\n out += self.shortcut(x)\n out = F.relu(out)\n return out\n\n\nclass ResNet(nn.Module):\n def __init__(self, block, num_blocks, quant, num_classes=10):\n super(ResNet, self).__init__()\n self.in_planes = 64\n self.quant = quant()\n self.conv1 = nn.Conv2d(3, 64, kernel_size=3,\n stride=1, padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(64)\n self.layer1 = self._make_layer(block, 64, quant, num_blocks[0], stride=1)\n self.layer2 = self._make_layer(block, 128, quant, num_blocks[1], stride=2)\n self.layer3 = self._make_layer(block, 256, quant, num_blocks[2], stride=2)\n self.layer4 = self._make_layer(block, 512, quant, num_blocks[3], stride=2)\n self.linear = nn.Linear(512*block.expansion, num_classes)\n\n def _make_layer(self, block, planes, quant, num_blocks, stride):\n strides = [stride] + [1]*(num_blocks-1)\n layers = []\n for stride in strides:\n layers.append(block(self.in_planes, planes, quant, stride))\n self.in_planes = planes * block.expansion\n return nn.Sequential(*layers)\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = self.quant(out)\n out = self.layer1(out)\n out = self.quant(out)\n out = self.layer2(out)\n out = self.quant(out)\n out = self.layer3(out)\n out = self.quant(out)\n out = self.layer4(out)\n out = self.quant(out)\n out = F.avg_pool2d(out, 4)\n out = out.view(out.size(0), -1)\n \n out = self.linear(out)\n return out\n\n\ndef ResNet18Posit(quant):\n return ResNet(BasicBlock, [2, 2, 2, 2], quant)\n\n\n\n# def ResNet34():\n# return ResNet(BasicBlock, [3, 4, 6, 3])\n\n\n# def ResNet50():\n# return ResNet(Bottleneck, [3, 4, 6, 3])\n\n\n# def ResNet101():\n# return ResNet(Bottleneck, [3, 4, 23, 3])\n\n\n# def ResNet152():\n# return ResNet(Bottleneck, [3, 8, 36, 3])\n\n\ndef test():\n net = ResNet18()\n y = net(torch.randn(1, 3, 32, 32))\n print(y.size())\n\n# test()\n" ]
[ [ "torch.nn.BatchNorm2d", "torch.nn.Linear", "torch.nn.functional.avg_pool2d", "torch.randn", "torch.nn.functional.relu", "torch.nn.Conv2d", "torch.nn.Sequential" ] ]
laiguokun/fairseq
[ "6c01c91aac81eb2e3173add4463dfa45c404ffa5" ]
[ "models/protected_multihead_attention.py" ]
[ "# Copyright (c) 2017-present, Facebook, Inc.\n# All rights reserved.\n#\n# This source code is licensed under the license found in the LICENSE file in\n# the root directory of this source tree. An additional grant of patent rights\n# can be found in the PATENTS file in the same directory.\n\nimport math\n\nimport torch\nfrom torch import nn\nfrom torch.nn import Parameter\nimport torch.nn.functional as F\n\nfrom fairseq import utils\nfrom fairseq.modules.learned_positional_embedding import LearnedPositionalEmbedding\n\n# Adapted from faiserq/modules/multihead_attention to deal with local attention\n# Local attetion masking in combination with padding masking can lead to\n# all -Inf attention rows. This version detects and corrects this situation\nclass ProtectedMultiheadAttention(nn.Module):\n \"\"\"Multi-headed attention.\n\n See \"Attention Is All You Need\" for more details.\n \"\"\"\n\n def __init__(self, embed_dim, num_heads, dropout=0., bias=True, add_bias_kv=False, add_zero_attn=False):\n super().__init__()\n self.embed_dim = embed_dim\n self.num_heads = num_heads\n self.dropout = dropout\n self.head_dim = embed_dim // num_heads\n assert self.head_dim * num_heads == self.embed_dim, \"embed_dim must be divisible by num_heads\"\n self.scaling = self.head_dim ** -0.5\n\n self.in_proj_weight = Parameter(torch.Tensor(3 * embed_dim, embed_dim))\n if bias:\n self.in_proj_bias = Parameter(torch.Tensor(3 * embed_dim))\n else:\n self.register_parameter('in_proj_bias', None)\n self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)\n\n if add_bias_kv:\n self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim))\n self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim))\n else:\n self.bias_k = self.bias_v = None\n\n self.add_zero_attn = add_zero_attn\n\n self.reset_parameters()\n\n self.onnx_trace = False\n\n def prepare_for_onnx_export_(self):\n self.onnx_trace = True\n\n def reset_parameters(self):\n nn.init.xavier_uniform_(self.in_proj_weight)\n nn.init.xavier_uniform_(self.out_proj.weight)\n if self.in_proj_bias is not None:\n nn.init.constant_(self.in_proj_bias, 0.)\n nn.init.constant_(self.out_proj.bias, 0.)\n if self.bias_k is not None:\n nn.init.xavier_normal_(self.bias_k)\n if self.bias_v is not None:\n nn.init.xavier_normal_(self.bias_v)\n\n def forward(self, query, key, value, key_padding_mask=None, incremental_state=None,\n need_weights=True, static_kv=False, attn_mask=None):\n \"\"\"Input shape: Time x Batch x Channel\n\n Self-attention can be implemented by passing in the same arguments for\n query, key and value. Timesteps can be masked by supplying a T x T mask in the\n `attn_mask` argument. Padding elements can be excluded from\n the key by passing a binary ByteTensor (`key_padding_mask`) with shape:\n batch x src_len, where padding elements are indicated by 1s.\n \"\"\"\n\n qkv_same = query.data_ptr() == key.data_ptr() == value.data_ptr()\n kv_same = key.data_ptr() == value.data_ptr()\n\n tgt_len, bsz, embed_dim = query.size()\n assert embed_dim == self.embed_dim\n assert list(query.size()) == [tgt_len, bsz, embed_dim]\n assert key.size() == value.size()\n\n if incremental_state is not None:\n saved_state = self._get_input_buffer(incremental_state)\n if 'prev_key' in saved_state:\n # previous time steps are cached - no need to recompute\n # key and value if they are static\n if static_kv:\n assert kv_same and not qkv_same\n key = value = None\n else:\n saved_state = None\n\n if qkv_same:\n # self-attention\n q, k, v = self.in_proj_qkv(query)\n elif kv_same:\n # encoder-decoder attention\n q = self.in_proj_q(query)\n if key is None:\n assert value is None\n k = v = None\n else:\n k, v = self.in_proj_kv(key)\n else:\n q = self.in_proj_q(query)\n k = self.in_proj_k(key)\n v = self.in_proj_v(value)\n\n q *= self.scaling\n\n if self.bias_k is not None:\n assert self.bias_v is not None\n k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)])\n v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)])\n if attn_mask is not None:\n attn_mask = torch.cat([attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1)\n if key_padding_mask is not None:\n key_padding_mask = torch.cat(\n [key_padding_mask, key_padding_mask.new_zeros(key_padding_mask.size(0), 1)], dim=1)\n\n q = q.contiguous().view(tgt_len, bsz * self.num_heads, self.head_dim).transpose(0, 1)\n if k is not None:\n k = k.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1)\n if v is not None:\n v = v.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1)\n\n if saved_state is not None:\n # saved states are stored with shape (bsz, num_heads, seq_len, head_dim)\n if 'prev_key' in saved_state:\n prev_key = saved_state['prev_key'].view(bsz * self.num_heads, -1, self.head_dim)\n if static_kv:\n k = prev_key\n else:\n k = torch.cat((prev_key, k), dim=1)\n if 'prev_value' in saved_state:\n prev_value = saved_state['prev_value'].view(bsz * self.num_heads, -1, self.head_dim)\n if static_kv:\n v = prev_value\n else:\n v = torch.cat((prev_value, v), dim=1)\n saved_state['prev_key'] = k.view(bsz, self.num_heads, -1, self.head_dim)\n saved_state['prev_value'] = v.view(bsz, self.num_heads, -1, self.head_dim)\n\n self._set_input_buffer(incremental_state, saved_state)\n\n src_len = k.size(1)\n\n if key_padding_mask is not None:\n assert key_padding_mask.size(0) == bsz\n assert key_padding_mask.size(1) == src_len\n\n if self.add_zero_attn:\n src_len += 1\n k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])], dim=1)\n v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])], dim=1)\n if attn_mask is not None:\n attn_mask = torch.cat([attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1)\n if key_padding_mask is not None:\n key_padding_mask = torch.cat(\n [key_padding_mask, torch.zeros(key_padding_mask.size(0), 1).type_as(key_padding_mask)], dim=1)\n\n attn_weights = torch.bmm(q, k.transpose(1, 2))\n assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len]\n\n if attn_mask is not None:\n attn_mask = attn_mask.unsqueeze(0)\n if self.onnx_trace:\n attn_mask = attn_mask.repeat(attn_weights.size(0), 1, 1)\n attn_weights += attn_mask\n\n if key_padding_mask is not None:\n # don't attend to padding symbols\n attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)\n if self.onnx_trace:\n attn_weights = torch.where(\n key_padding_mask.unsqueeze(1).unsqueeze(2),\n torch.Tensor([float(\"-Inf\")]),\n attn_weights.float()\n ).type_as(attn_weights)\n else:\n attn_weights = attn_weights.float().masked_fill(\n key_padding_mask.unsqueeze(1).unsqueeze(2),\n float('-inf'),\n ).type_as(attn_weights) # FP16 support: cast to float and back\n attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)\n all_inf = torch.isinf(attn_weights).all(dim=-1)\n if all_inf.any():\n attn_weights = attn_weights.float().masked_fill(\n all_inf.unsqueeze(-1),\n 0,\n ).type_as(attn_weights) # FP16 support: cast to float and back\n\n\n attn_weights = F.softmax(attn_weights.float(), dim=-1).type_as(attn_weights)\n attn_weights = F.dropout(attn_weights, p=self.dropout, training=self.training)\n\n attn = torch.bmm(attn_weights, v)\n assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.head_dim]\n if (self.onnx_trace and attn.size(1) == 1):\n # when ONNX tracing a single decoder step (sequence length == 1)\n # the transpose is a no-op copy before view, thus unnecessary\n attn = attn.contiguous().view(tgt_len, bsz, embed_dim)\n else:\n attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)\n attn = self.out_proj(attn)\n if need_weights:\n # average attention weights over heads\n attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)\n attn_weights = attn_weights.sum(dim=1) / self.num_heads\n else:\n attn_weights = None\n\n return attn, attn_weights\n\n def in_proj_qkv(self, query):\n return self._in_proj(query).chunk(3, dim=-1)\n\n def in_proj_kv(self, key):\n return self._in_proj(key, start=self.embed_dim).chunk(2, dim=-1)\n\n def in_proj_q(self, query):\n return self._in_proj(query, end=self.embed_dim)\n\n def in_proj_k(self, key):\n return self._in_proj(key, start=self.embed_dim, end=2 * self.embed_dim)\n\n def in_proj_v(self, value):\n return self._in_proj(value, start=2 * self.embed_dim)\n\n def _in_proj(self, input, start=0, end=None):\n weight = self.in_proj_weight\n bias = self.in_proj_bias\n weight = weight[start:end, :]\n if bias is not None:\n bias = bias[start:end]\n return F.linear(input, weight, bias)\n\n def reorder_incremental_state(self, incremental_state, new_order):\n \"\"\"Reorder buffered internal state (for incremental generation).\"\"\"\n input_buffer = self._get_input_buffer(incremental_state)\n if input_buffer is not None:\n for k in input_buffer.keys():\n input_buffer[k] = input_buffer[k].index_select(0, new_order)\n self._set_input_buffer(incremental_state, input_buffer)\n\n def _get_input_buffer(self, incremental_state):\n return utils.get_incremental_state(\n self,\n incremental_state,\n 'attn_state',\n ) or {}\n\n def _set_input_buffer(self, incremental_state, buffer):\n utils.set_incremental_state(\n self,\n incremental_state,\n 'attn_state',\n buffer,\n )\n\ndef ProtectedPositionalEmbedding(\n num_embeddings: int,\n embedding_dim: int,\n padding_idx: int,\n learned: bool = False,\n):\n if learned:\n # if padding_idx is specified then offset the embedding ids by\n # this index and adjust num_embeddings appropriately\n # TODO: The right place for this offset would be inside\n # LearnedPositionalEmbedding. Move this there for a cleaner implementation.\n if padding_idx is not None:\n num_embeddings = num_embeddings + padding_idx + 1\n m = LearnedPositionalEmbedding(num_embeddings, embedding_dim, padding_idx)\n nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5)\n if padding_idx is not None:\n nn.init.constant_(m.weight[padding_idx], 0)\n else:\n m = SinusoidalPositionalEmbedding(\n embedding_dim, padding_idx, init_size=num_embeddings + padding_idx + 1,\n )\n return m\n\n\n# class SinusoidalPositionalEmbedding(nn.Module):\n# \"\"\"This module produces sinusoidal positional embeddings of any length.\n#\n# Padding symbols are ignored.\n# \"\"\"\n#\n# def __init__(self, embedding_dim, padding_idx, init_size=1024):\n# super().__init__()\n# self.embedding_dim = embedding_dim\n# self.padding_idx = padding_idx\n# self.weights = SinusoidalPositionalEmbedding.get_embedding(\n# init_size,\n# embedding_dim,\n# )\n# self.onnx_trace = False\n# self.register_buffer('_float_tensor', torch.FloatTensor(1))\n#\n# def prepare_for_onnx_export_(self):\n# self.onnx_trace = True\n#\n# @staticmethod\n# def get_embedding(num_embeddings, embedding_dim):\n# \"\"\"Build sinusoidal embeddings.\n#\n# This matches the implementation in tensor2tensor, but differs slightly\n# from the description in Section 3.5 of \"Attention Is All You Need\".\n# \"\"\"\n# assert embedding_dim % 2 == 0\n# pos_seq = torch.arange(0, num_embeddings, 1.0, dtype=torch.float)\n# freq_seq = torch.arange(0, embedding_dim, 2.0, dtype=torch.float)\n# inv_freq = 1. / (10000 ** (freq_seq / embedding_dim))\n# sinusoidal_inp = torch.einsum(\"i, d-> id\", pos_seq, inv_freq)\n# emb = torch.cat([torch.sin(sinusoidal_inp), torch.cos(sinusoidal_inp)], -1)\n# return emb\n#\n# def forward(self, input_, incremental_state=None, timestep=None, **kwargs):\n# \"\"\"Input is expected to be of size [bsz x seqlen].\"\"\"\n# bsz, seq_len = torch.onnx.operators.shape_as_tensor(input_)\n# if (seq_len > self.weights.size(0)):\n# self.weights = SinusoidalPositionalEmbedding.get_embedding(\n# seq_len,\n# embedding_dim,\n# )\n# self.weights = self.weights.to(self._float_tensor)\n# if incremental_state is not None:\n# # positions is the same for every token when decoding a single step\n# pos = timestep.view(-1)[0] + 1 if timestep is not None else seq_len\n# if self.onnx_trace:\n# return self.weights.index_select(index=pos, dim=0).unsqueeze(1).repeat(bsz, 1, 1)\n# return self.weights[pos, :].expand(bsz, 1, -1)\n#\n# weights = self.weights\n# #get positions\n# mask = input_.ne(self.padding_idx)\n# positions = mask.cumsum(-1) - 1\n# positions = positions * mask.long()\n# positions = positions.view(-1, )\n# weights = torch.index_select(weights, 0, positions)\n# weights = weights.view(bsz, seq_len, -1)\n# weights = weights * mask.unsqueeze(-1).type_as(weights)\n# return weights\n#\n# def max_positions(self):\n# \"\"\"Maximum number of supported positions.\"\"\"\n# return int(1e5) # an arbitrary large number\n\n\nclass SinusoidalPositionalEmbedding(nn.Module):\n \"\"\"This module produces sinusoidal positional embeddings of any length.\n\n Padding symbols are ignored.\n \"\"\"\n\n def __init__(self, embedding_dim, padding_idx, init_size=1024):\n super().__init__()\n self.embedding_dim = embedding_dim\n self.padding_idx = padding_idx\n self.weights = SinusoidalPositionalEmbedding.get_embedding(\n init_size,\n embedding_dim,\n )\n self.onnx_trace = False\n self.register_buffer('_float_tensor', torch.FloatTensor(1))\n\n def prepare_for_onnx_export_(self):\n self.onnx_trace = True\n\n @staticmethod\n def get_embedding(num_embeddings, embedding_dim):\n \"\"\"Build sinusoidal embeddings.\n\n This matches the implementation in tensor2tensor, but differs slightly\n from the description in Section 3.5 of \"Attention Is All You Need\".\n \"\"\"\n half_dim = embedding_dim // 2\n # emb = math.log(10000) / (half_dim - 1)\n emb = math.log(10000) / half_dim\n emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb)\n emb = torch.arange(num_embeddings, dtype=torch.float).unsqueeze(1) * emb.unsqueeze(0)\n emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1)\n if embedding_dim % 2 == 1:\n # zero pad\n emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1)\n return emb\n\n def forward(self, input, incremental_state=None, timestep=None, **kwargs):\n \"\"\"Input is expected to be of size [bsz x seqlen].\"\"\"\n bsz, seq_len = torch.onnx.operators.shape_as_tensor(input)\n max_pos = seq_len\n if self.weights is None or max_pos > self.weights.size(0):\n # recompute/expand embeddings if needed\n self.weights = SinusoidalPositionalEmbedding.get_embedding(\n max_pos,\n self.embedding_dim,\n )\n self.weights = self.weights.to(self._float_tensor)\n\n if incremental_state is not None:\n # positions is the same for every token when decoding a single step\n pos = timestep.view(-1)[0] + 1 if timestep is not None else seq_len\n if self.onnx_trace:\n return self.weights.index_select(index=-1 + pos, dim=0).unsqueeze(1).repeat(bsz, 1, 1)\n return self.weights[-1 + pos, :].expand(bsz, 1, -1)\n\n positions = make_positions(input, self.padding_idx, onnx_trace=self.onnx_trace)\n if self.onnx_trace:\n flat_embeddings = self.weights.detach().index_select(0, positions.view(-1))\n embedding_shape = torch.cat((bsz.view(1), seq_len.view(1), torch.LongTensor([-1])))\n embeddings = torch.onnx.operators.reshape_from_tensor_shape(flat_embeddings, embedding_shape)\n return embeddings\n return self.weights.index_select(0, positions.view(-1)).view(bsz, seq_len, -1).detach()\n\n def max_positions(self):\n \"\"\"Maximum number of supported positions.\"\"\"\n return int(1e5) # an arbitrary large number\n\n\ndef make_positions(tensor, padding_idx, onnx_trace=False):\n \"\"\"Replace non-padding symbols with their position numbers.\n\n Position numbers begin at padding_idx+1. Padding symbols are ignored.\n \"\"\"\n # The series of casts and type-conversions here are carefully\n # balanced to both work with ONNX export and XLA. In particular XLA\n # prefers ints, cumsum defaults to output longs, and ONNX doesn't know\n # how to handle the dtype kwarg in cumsum.\n mask = tensor.ne(padding_idx).int()\n return (\n (torch.cumsum(mask, dim=1) - 1).type_as(mask) * mask\n ).long()\n\n\n" ]
[ [ "torch.nn.init.xavier_uniform_", "torch.cumsum", "torch.bmm", "torch.cat", "torch.onnx.operators.reshape_from_tensor_shape", "torch.nn.functional.dropout", "torch.nn.init.xavier_normal_", "torch.cos", "torch.onnx.operators.shape_as_tensor", "torch.nn.init.normal_", "torch.isinf", "torch.sin", "torch.arange", "torch.Tensor", "torch.FloatTensor", "torch.nn.Linear", "torch.nn.init.constant_", "torch.nn.functional.linear", "torch.zeros", "torch.LongTensor" ] ]
Tiamat-Tech/npms
[ "2d1bce8c98b0f24aa69273975c52b2fbdb101c29" ]
[ "npms/datasets/sdf_dataset.py" ]
[ "from __future__ import division\nimport sys\nfrom torch.utils.data import Dataset\nimport os\nimport numpy as np\nimport pickle\nimport imp\nimport trimesh\nimport torch\nimport json\nfrom tqdm import tqdm\nfrom timeit import default_timer as timer\n\nfrom utils.gaps_utils import read_pts_file\n\n\nclass SDFDataset(Dataset):\n\n def __init__(\n self, \n data_dir='data', \n labels_json='labels.json',\n batch_size=64, \n num_workers=12, \n sample_info={}, \n cache_data=True,\n **kwargs\n ):\n\n ###################################################################################################\n # SDF\n ###################################################################################################\n sdf_samples_info = sample_info['sdf']\n self.sdf_samples_types = sdf_samples_info['types']\n self.num_points_sdf = sdf_samples_info['num_points']\n percentages = [p for p in self.sdf_samples_types.values()]\n\n assert sum(percentages) > 0.999 and sum(percentages) <= 1.0, sum(percentages)\n \n if self.num_points_sdf > 0:\n self.sdf_samples_types = {k: int(v * self.num_points_sdf) for k, v in self.sdf_samples_types.items()}\n assert sum(self.sdf_samples_types.values()) == self.num_points_sdf\n\n print()\n print(\"num sdf samples\", self.num_points_sdf)\n print()\n \n ###################################################################################################\n # Flow\n ###################################################################################################\n sample_flow_info = sample_info['flow']\n self.num_points_flow = np.array(sample_flow_info['num_points'])\n\n self.num_flow_samples_list = []\n if self.num_points_flow > 0:\n self.sample_flow_dist = np.array(sample_flow_info['dist']) / len(sample_flow_info['dist'])\n self.sample_flow_sigmas = np.array(sample_flow_info['sigmas'])\n \n assert np.sum(self.sample_flow_dist) == 1\n assert np.any(self.sample_flow_dist < 0) == False\n assert len(self.sample_flow_dist) == len(self.sample_flow_sigmas)\n\n self.num_flow_samples_list = np.rint(self.sample_flow_dist * self.num_points_flow).astype(np.uint32)\n assert np.all(self.num_flow_samples_list == self.num_flow_samples_list[0]), f\"num_samples: {self.num_flow_samples_list}\"\n self.num_flow_samples_per_sigma = self.num_flow_samples_list[0]\n\n print()\n print(\"num_samples\", self.num_flow_samples_list)\n print(\"num_samples per sigma\", self.num_flow_samples_per_sigma)\n print()\n\n self.max_samples_per_sigma = 100000\n\n self.num_samples_per_shape = {\n 'sdf': self.num_points_sdf,\n 'flow': self.num_points_flow,\n }\n \n self.data_dir = data_dir\n self.labels_json = labels_json\n self.labels_tpose_json = os.path.join(os.path.dirname(labels_json), \"labels_tpose.json\")\n\n self.cache_data = cache_data\n self.cache = []\n self.cache_tpose = []\n\n # Load labels\n self._load()\n\n self.batch_size = batch_size\n self.num_workers = num_workers\n\n # Preload data\n if self.cache_data:\n print(\"Preloading cached data ...\")\n\n for index in tqdm(range(len(self.labels))):\n data = self.labels[index]\n data_dict = self._load_sample(data, is_tpose=False)\n self.cache.append(data_dict)\n\n # T-Poses\n for index in range(len(self.labels_tpose)):\n data = self.labels_tpose[index]\n data_dict = self._load_sample(data, is_tpose=True)\n self.cache_tpose.append(data_dict)\n\n print(\"Loaded cached data ...\")\n\n def _load(self):\n with open(self.labels_json) as f:\n self.labels = json.loads(f.read())\n\n with open(self.labels_tpose_json) as f:\n self.labels_tpose = json.loads(f.read())\n \n self.num_identities = len(self.labels_tpose)\n\n def __len__(self):\n return len(self.labels)\n\n def get_num_identities(self):\n return self.num_identities\n\n def get_num_samples_per_shape(self):\n return self.num_samples_per_shape\n\n def _load_sample(self, data, is_tpose):\n if 'dataset' in data:\n shape_path = os.path.join(self.data_dir, data['dataset'], data['identity_name'], data['animation_name'], data['sample_id'])\n else:\n shape_path = os.path.join(self.data_dir, data['identity_name'], data['animation_name'], data['sample_id'])\n\n # BOUNDARY\n points_sdf_dict = {}\n\n if is_tpose and self.num_points_sdf > 0:\n for i, sdf_samples_type in enumerate(self.sdf_samples_types):\n ext = 'pts' if sdf_samples_type == \"surface\" else 'sdf' \n sdf_samples_path = shape_path + f'/samples_{sdf_samples_type}.{ext}'\n\n # Load data from disk\n sdf_points = read_pts_file(sdf_samples_path)\n\n if ext == 'pts':\n sdf_points = sdf_points[:, :3]\n \n points_sdf_dict[sdf_samples_type] = sdf_points\n\n # FLOW\n points_flow = None\n\n for i in range(len(self.num_flow_samples_list)):\n # points\n flow_samples_path = shape_path + '/flow_samples_{}.npz'.format(self.sample_flow_sigmas[i])\n flow_samples_npz = np.load(flow_samples_path)\n flow_sample_points = flow_samples_npz['points'][None, ...]\n\n if points_flow is None:\n points_flow = flow_sample_points\n else:\n points_flow = np.concatenate((points_flow, flow_sample_points), axis=0) # factor, 100k, 3\n\n return {\n 'points_sdf_dict': points_sdf_dict,\n 'points_flow': np.array(points_flow, dtype=np.float32), \n 'path': shape_path,\n 'identity_id': data['identity_id']\n }\n\n def _subsample(self, data_dict, subsample_indices_list, is_tpose):\n points_sdf = []\n points_flow = []\n\n # SDF samples\n if is_tpose and self.num_points_sdf > 0:\n points_sdf_dict = data_dict['points_sdf_dict']\n\n points_sdf = prepare_samples(points_sdf_dict, self.sdf_samples_types)\n \n assert points_sdf.shape[0] == self.num_points_sdf, f\"{points_sdf.shape[0]} vs {self.num_points_sdf}\"\n\n # Flow samples\n for i in range(len(self.num_flow_samples_list)): # sample type\n\n flow_sample_points = data_dict['points_flow'][i]\n\n subsample_indices = subsample_indices_list[i]\n\n points_flow.extend(flow_sample_points[subsample_indices])\n\n assert len(points_flow) == self.num_points_flow, f\"{len(points_flow)} vs {self.num_points_flow}\"\n\n return {\n 'points_sdf': np.array(points_sdf, dtype=np.float32),\n 'points_flow': np.array(points_flow, dtype=np.float32),\n 'path': data_dict['path'],\n 'identity_id': data_dict['identity_id']\n }\n\n def _get_identity_id(self, d):\n identity_id = d['identity_id']\n assert identity_id < self.num_identities, f\"Identity {identity_id} is not defined in labels_tpose.json\"\n return identity_id\n\n def __getitem__(self, idx):\n\n if self.cache_data:\n data_dict = self.cache[idx]\n data_ref_dict = self.cache_tpose[self._get_identity_id(data_dict)] \n\n else:\n data = self.labels[idx]\n data_ref = self.labels_tpose[self._get_identity_id(data)]\n\n # Load samples\n data_dict = self._load_sample(data, is_tpose=False)\n data_ref_dict = self._load_sample(data_ref, is_tpose=True)\n \n # Sample random indices for each sample type\n subsample_flow_indices_list = []\n for i, num in enumerate(self.num_flow_samples_list): # sample type\n subsample_indices = np.random.randint(0, self.max_samples_per_sigma, num)\n subsample_flow_indices_list.append(subsample_indices)\n\n # Subsample\n data_ref_dict = self._subsample(data_ref_dict, subsample_flow_indices_list, is_tpose=True)\n data_dict = self._subsample(data_dict, subsample_flow_indices_list, is_tpose=False)\n\n return {\n 'ref': data_ref_dict,\n 'cur': data_dict,\n 'idx': idx,\n 'identity_id': data_dict['identity_id'],\n }\n\n def get_loader(self, shuffle=True):\n\n assert self.batch_size <= len(self), f\"batch size ({self.batch_size}) > len dataset ({len(self)})\" \n\n return torch.utils.data.DataLoader(\n self, \n batch_size=self.batch_size, \n num_workers=self.num_workers, \n shuffle=shuffle,\n worker_init_fn=self.worker_init_fn,\n pin_memory=True,\n drop_last=True\n )\n\n def get_batch_size(self):\n return self.batch_size\n\n def worker_init_fn(self, worker_id):\n random_data = os.urandom(4)\n base_seed = int.from_bytes(random_data, byteorder=\"big\")\n np.random.seed(base_seed + worker_id)\n\n\ndef prepare_samples(sample_data, N_target_dict):\n\n ##################################################################################################\n # Surface\n ##################################################################################################\n surface_sdf_samples = np.empty((0, 4))\n\n # Subsample\n if N_target_dict['surface'] > 0:\n surface_sdf_samples = sample_data['surface']\n N = N_target_dict['surface']\n assert surface_sdf_samples.shape[0] > N\n surface_idxs = np.random.permutation(surface_sdf_samples.shape[0])[:N]\n surface_sdf_samples = surface_sdf_samples[surface_idxs, :]\n assert surface_sdf_samples.shape[1] == 3\n \n # Generate gt sdf for surface points (all have 0 sdf values)\n surface_sdf = np.zeros((surface_sdf_samples.shape[0], 1), dtype=np.float32)\n surface_sdf_samples = np.concatenate((surface_sdf_samples, surface_sdf), axis=1)\n\n ##################################################################################################\n # Near Surface Sampled\n ##################################################################################################\n nss_sdf_samples = np.empty((0, 4))\n \n # Subsample\n if N_target_dict['near'] > 0:\n nss_sdf_samples = sample_data['near']\n N = N_target_dict['near']\n assert nss_sdf_samples.shape[0] > N\n nss_idxs = np.random.permutation(nss_sdf_samples.shape[0])[:N]\n nss_sdf_samples = nss_sdf_samples[nss_idxs, :]\n assert nss_sdf_samples.shape[1] == 4\n\n # If you want to visualize:\n # print(nss_sdf_samples.shape)\n # import open3d as o3d\n # pcd = o3d.geometry.PointCloud(o3d.utility.Vector3dVector(nss_sdf_samples[:, :3]))\n # pcd.paint_uniform_color([1, 0, 0])\n # o3d.visualization.draw_geometries([pcd])\n\n ##################################################################################################\n # Uniform\n ##################################################################################################\n uniform_sdf_samples = np.empty((0, 4))\n \n # Subsample\n if N_target_dict['uniform'] > 0:\n uniform_sdf_samples = sample_data['uniform']\n N = N_target_dict['uniform']\n assert uniform_sdf_samples.shape[0] > N\n uniform_idxs = np.random.permutation(uniform_sdf_samples.shape[0])[:N]\n uniform_sdf_samples = uniform_sdf_samples[uniform_idxs, :]\n assert uniform_sdf_samples.shape[1] == 4\n\n # If you want to visualize:\n # print(uniform_sdf_samples.shape)\n # pcd2 = o3d.geometry.PointCloud(o3d.utility.Vector3dVector(uniform_sdf_samples[:, :3]))\n # pcd2.paint_uniform_color([0, 1, 0])\n # o3d.visualization.draw_geometries([pcd, pcd2])\n\n ##################################################################################################\n ##################################################################################################\n # Concatenate\n ##################################################################################################\n ##################################################################################################\n all_sdf_samples = np.concatenate((nss_sdf_samples, surface_sdf_samples, uniform_sdf_samples), axis=0)\n\n return all_sdf_samples" ]
[ [ "numpy.rint", "torch.utils.data.DataLoader", "numpy.load", "numpy.sum", "numpy.empty", "numpy.zeros", "numpy.random.permutation", "numpy.any", "numpy.random.seed", "numpy.all", "numpy.array", "numpy.concatenate", "numpy.random.randint" ] ]
enricorotundo/alibi
[ "190d7960630221813f704817ee48cc5af46a9e07" ]
[ "alibi/explainers/anchor_base.py" ]
[ "import copy\nimport logging\nimport numpy as np\nfrom collections import defaultdict, namedtuple\nfrom functools import partial\nfrom typing import Callable, Tuple, Set, Dict, List\n\nfrom alibi.utils.distributed import ActorPool, RAY_INSTALLED\nfrom alibi.utils.distributions import kl_bernoulli\n\n\nlogger = logging.getLogger(__name__)\n\n\n# TODO: Discuss logging strategy\n\nclass AnchorBaseBeam:\n\n def __init__(self, samplers: List[Callable], **kwargs) -> None:\n \"\"\"\n Parameters\n ---------\n samplers\n Objects that can be called with args (result, n_samples) tuple to draw samples.\n \"\"\"\n\n self.sample_fcn = samplers[0]\n self.samplers = None # type: List[Callable]\n # Initial size (in batches) of data/raw data samples cache.\n self.sample_cache_size = kwargs.get('sample_cache_size', 10000)\n # when only the max of self.margin or batch size remain emptpy, the cache is\n # extended to accommodate an additional sample_cache_size batches.\n self.margin = kwargs.get('cache_margin', 1000)\n\n def _init_state(self, batch_size: int, coverage_data: np.ndarray) -> None:\n \"\"\"\n Initialises the object state, which is used to compute result precisions & precision bounds\n and provide metadata for explanation objects.\n\n Parameters\n ----------\n batch_size\n See anchor_beam method.\n coverage_data\n See _get_coverage_samples method.\n sample_cache_size\n See anchor_beam method.\n \"\"\"\n\n prealloc_size = batch_size * self.sample_cache_size\n # t_ indicates that the attribute is a dictionary with entries for each anchor\n self.state = {\n 't_coverage': defaultdict(lambda: 0.), # anchors' coverage\n 't_coverage_idx': defaultdict(set), # index of anchors in coverage set\n 't_covered_true': defaultdict(None), # samples with same pred as instance where t_ applies\n 't_covered_false': defaultdict(None), # samples with dif pred to instance where t_ applies\n 't_idx': defaultdict(set), # row idx in sample cache where the anchors apply\n 't_nsamples': defaultdict(lambda: 0.), # total number of samples drawn for the anchors\n 't_order': defaultdict(list), # anchors are sorted to avoid exploring permutations\n # this is the order in which anchors were found\n 't_positives': defaultdict(lambda: 0.), # nb of samples where result pred = pred on instance\n 'prealloc_size': prealloc_size, # samples caches size\n 'data': np.zeros((prealloc_size, coverage_data.shape[1]), coverage_data.dtype), # samples caches\n 'labels': np.zeros(prealloc_size, ), # clf pred labels on raw_data\n 'current_idx': 0,\n 'n_features': coverage_data.shape[1], # data set dim after encoding\n 'coverage_data': coverage_data, # coverage data\n } # type: dict\n self.state['t_order'][()] = () # Trivial order for the empty result\n\n @staticmethod\n def _sort(x: tuple, allow_duplicates=False) -> tuple:\n \"\"\"\n Sorts a tuple, optionally removing duplicates.\n\n Parameters\n ----------\n x:\n Tuple to be sorted.\n allow_duplicates:\n If True, duplicate entries are kept\n\n Returns\n -------\n A sorted tuple.\n \"\"\"\n\n if allow_duplicates:\n return tuple(sorted(x))\n\n return tuple(sorted(set(x)))\n\n @staticmethod\n def dup_bernoulli(p: np.ndarray, level: np.ndarray, n_iter: int = 17) -> np.ndarray:\n \"\"\"\n Update upper precision bound for a candidate anchors dependent on the KL-divergence.\n\n Parameters\n ----------\n p\n Precision of candidate anchors.\n level\n beta / nb of samples for each result.\n n_iter\n Number of iterations during lower bound update.\n\n Returns\n -------\n Updated upper precision bounds array.\n \"\"\"\n # TODO: where does 17x sampling come from?\n lm = p.copy()\n um = np.minimum(np.minimum(p + np.sqrt(level / 2.), 1.0), 1.0)\n\n # Perform bisection algorithm to find the largest qm s.t. kl divergence is > level\n for j in range(1, n_iter):\n qm = (um + lm) / 2.\n kl_gt_idx = kl_bernoulli(p, qm) > level\n kl_lt_idx = np.logical_not(kl_gt_idx)\n um[kl_gt_idx] = qm[kl_gt_idx]\n lm[kl_lt_idx] = qm[kl_lt_idx]\n\n return um\n\n @staticmethod\n def dlow_bernoulli(p: np.ndarray, level: np.ndarray, n_iter: int = 17) -> np.ndarray:\n \"\"\"\n Update lower precision bound for a candidate anchors dependent on the KL-divergence.\n\n Parameters\n ----------\n p\n Precision of candidate anchors.\n level\n beta / nb of samples for each result.\n n_iter\n Number of iterations during lower bound update.\n\n Returns\n -------\n Updated lower precision bounds array.\n \"\"\"\n\n um = p.copy()\n lm = np.clip(p - np.sqrt(level / 2.), 0.0, 1.0) # lower bound\n\n # Perform bisection algorithm to find the smallest qm s.t. kl divergence is > level\n for _ in range(1, n_iter):\n qm = (um + lm) / 2.\n kl_gt_idx = kl_bernoulli(p, qm) > level\n kl_lt_idx = np.logical_not(kl_gt_idx)\n lm[kl_gt_idx] = qm[kl_gt_idx]\n um[kl_lt_idx] = qm[kl_lt_idx]\n\n return lm\n\n @staticmethod\n def compute_beta(n_features: int, t: int, delta: float) -> float:\n \"\"\"\n Parameters\n ----------\n n_features\n Number of candidate anchors.\n t\n Iteration number.\n delta\n\n Returns\n -------\n Level used to update upper and lower precision bounds.\n \"\"\"\n # TODO: where do magic numbers come from?\n alpha = 1.1\n k = 405.5\n temp = np.log(k * n_features * (t ** alpha) / delta)\n\n return temp + np.log(temp)\n\n def _get_coverage_samples(self, coverage_samples: int, samplers: List[Callable] = None) -> np.ndarray:\n \"\"\"\n Draws samples uniformly at random from the training set.\n\n Parameters\n ---------\n coverage_samples\n See anchor_beam method.\n samplers\n See __init__ method.\n\n Returns\n -------\n coverage_data\n binarised samples, where 1 indicates the feature has same value/is in same beam as\n instance to be explained. Used to determine, e.g., which samples an result applies to.\n \"\"\"\n\n [coverage_data] = self.sample_fcn((0, ()), coverage_samples, compute_labels=False)\n\n return coverage_data\n\n def select_critical_arms(self, means: np.ndarray, ub: np.ndarray, lb: np.ndarray, n_samples: np.ndarray,\n delta: float, top_n: int, t: int): # type: ignore\n \"\"\"\n Determines a set of two anchors by updating the upper bound for low emprical precision anchors and\n the lower bound for anchors with high empirical precision.\n\n Parameters\n ----------\n means\n Empirical mean result precisions.\n ub\n Upper bound on result precisions.\n lb\n Lower bound on result precisions.\n n_samples\n The number of samples drawn for each candidate result.\n delta\n Confidence budget, candidate anchors have close to optimal precisions with prob. 1 - delta.\n top_n\n Number of arms to be selected.\n t\n Iteration number.\n\n Returns\n -------\n Upper and lower precision bound indices.\n \"\"\"\n\n crit_arms = namedtuple('crit_arms', ['ut', 'lt'])\n\n sorted_means = np.argsort(means) # ascending sort of result candidates by precision\n beta = self.compute_beta(len(means), t, delta)\n\n # J = the beam width top result candidates with highest precision\n # not_J = the rest\n J = sorted_means[-top_n:]\n not_J = sorted_means[:-top_n]\n\n # update upper bound for lowest precision result candidates\n ub[not_J] = self.dup_bernoulli(means[not_J], beta / n_samples[not_J])\n # update lower bound for highest precision result candidates\n lb[J] = self.dlow_bernoulli(means[J], beta / n_samples[J])\n\n # for the low precision result candidates, compute the upper precision bound and keep the index ...\n # ... of the result candidate with the highest upper precision value -> ut\n # for the high precision result candidates, compute the lower precision bound and keep the index ...\n # ... of the result candidate with the lowest lower precision value -> lt\n ut = not_J[np.argmax(ub[not_J])]\n lt = J[np.argmin(lb[J])]\n\n return crit_arms._make((ut, lt))\n\n def kllucb(self, anchors: list, init_stats: dict, epsilon: float, delta: float, batch_size: int, top_n: int,\n verbose: bool = False, verbose_every: int = 1) -> np.ndarray:\n \"\"\"\n Implements the KL-LUCB algorithm (Kaufmann and Kalyanakrishnan, 2013).\n\n Parameters\n ----------\n anchors:\n A list of anchors from which two critical anchors are selected (see Kaufmann and Kalyanakrishnan, 2013).\n init_stats\n Dictionary with lists containing nb of samples used and where sample predictions equal the desired label.\n epsilon\n Precision bound tolerance for convergence.\n delta\n Used to compute beta.\n batch_size\n Number of samples.\n top_n\n Min of beam width size or number of candidate anchors.\n verbose\n Whether to print intermediate output.\n verbose_every\n Whether to print intermediate output every verbose_every steps.\n\n Returns\n -------\n Indices of best result options. Number of indices equals min of beam width or nb of candidate anchors.\n \"\"\"\n\n # n_features equals to the nb of candidate anchors\n n_features = len(anchors)\n\n # arrays for total number of samples & positives (# samples where prediction equals desired label)\n n_samples, positives = init_stats['n_samples'], init_stats['positives']\n anchors_to_sample, anchors_idx = [], []\n for f in np.where(n_samples == 0)[0]:\n anchors_to_sample.append(anchors[f])\n anchors_idx.append(f)\n\n if anchors_idx:\n pos, total = self.draw_samples(anchors_to_sample, 1)\n positives[anchors_idx] += pos\n n_samples[anchors_idx] += total\n\n if n_features == top_n: # return all options b/c of beam search width\n return np.arange(n_features)\n\n # update the upper and lower precision bounds until the difference between the best upper ...\n # ... precision bound of the low precision anchors and the worst lower precision bound of the high ...\n # ... precision anchors is smaller than eps\n means = positives / n_samples # fraction sample predictions equal to desired label\n ub, lb = np.zeros(n_samples.shape), np.zeros(n_samples.shape)\n t = 1\n crit_a_idx = self.select_critical_arms(means, ub, lb, n_samples, delta, top_n, t)\n B = ub[crit_a_idx.ut] - lb[crit_a_idx.lt]\n verbose_count = 0\n\n while B > epsilon:\n\n verbose_count += 1\n if verbose and verbose_count % verbose_every == 0:\n ut, lt = crit_a_idx\n print('Best: %d (mean:%.10f, n: %d, lb:%.4f)' %\n (lt, means[lt], n_samples[lt], lb[lt]), end=' ')\n print('Worst: %d (mean:%.4f, n: %d, ub:%.4f)' %\n (ut, means[ut], n_samples[ut], ub[ut]), end=' ')\n print('B = %.2f' % B)\n\n # draw samples for each critical result, update anchors' mean, upper and lower\n # bound precision estimate\n selected_anchors = [anchors[idx] for idx in crit_a_idx]\n pos, total = self.draw_samples(selected_anchors, batch_size)\n idx = list(crit_a_idx)\n positives[idx] += pos\n n_samples[idx] += total\n means = positives / n_samples\n t += 1\n crit_a_idx = self.select_critical_arms(means, ub, lb, n_samples, delta, top_n, t)\n B = ub[crit_a_idx.ut] - lb[crit_a_idx.lt]\n sorted_means = np.argsort(means)\n\n return sorted_means[-top_n:]\n\n def draw_samples(self, anchors: list, batch_size: int) -> Tuple[tuple, tuple]:\n \"\"\"\n Parameters\n ----------\n anchors\n Anchors on which samples are conditioned.\n batch_size\n The number of samples drawn for each result.\n\n Returns\n -------\n A tuple of positive samples (for which prediction matches desired label)\n and a tuple of total number of samples drawn.\n \"\"\"\n\n for anchor in anchors:\n if anchor not in self.state['t_order']:\n self.state['t_order'][anchor] = list(anchor)\n\n sample_stats, pos, total = [], (), () # type: List, Tuple, Tuple\n samples_iter = [self.sample_fcn((i, tuple(self.state['t_order'][anchor])), num_samples=batch_size)\n for i, anchor in enumerate(anchors)]\n for samples, anchor in zip(samples_iter, anchors):\n covered_true, covered_false, labels, *additionals, _ = samples\n sample_stats.append(self.update_state(covered_true, covered_false, labels, additionals, anchor))\n pos, total = list(zip(*sample_stats))\n\n return pos, total\n\n def propose_anchors(self, previous_best: list) -> list:\n \"\"\"\n Parameters\n ----------\n previous_best\n List with tuples of result candidates.\n\n\n Returns\n -------\n List with tuples of candidate anchors with additional metadata.\n \"\"\"\n\n # compute some variables used later on\n state = self.state\n all_features = range(state['n_features'])\n coverage_data = state['coverage_data']\n current_idx = state['current_idx']\n data = state['data'][:current_idx]\n labels = state['labels'][:current_idx]\n\n # initially, every feature separately is an result\n if len(previous_best) == 0:\n tuples = [(x,) for x in all_features]\n for x in tuples:\n pres = data[:, x[0]].nonzero()[0] # Select samples whose feat value is = to the result value\n state['t_idx'][x] = set(pres)\n state['t_nsamples'][x] = float(len(pres))\n state['t_positives'][x] = float(labels[pres].sum())\n state['t_order'][x].append(x[0])\n state['t_coverage_idx'][x] = set(coverage_data[:, x[0]].nonzero()[0])\n state['t_coverage'][x] = (float(len(state['t_coverage_idx'][x])) / coverage_data.shape[0])\n return tuples\n\n # create new anchors: add a feature to every result in current best\n new_tuples = set() # type: Set[tuple]\n for f in all_features:\n for t in previous_best:\n new_t = self._sort(t + (f,), allow_duplicates=False)\n if len(new_t) != len(t) + 1: # Avoid repeating the same feature ...\n continue\n if new_t not in new_tuples:\n new_tuples.add(new_t)\n state['t_order'][new_t] = copy.deepcopy(state['t_order'][t])\n state['t_order'][new_t].append(f)\n state['t_coverage_idx'][new_t] = (state['t_coverage_idx'][t].intersection(\n state['t_coverage_idx'][(f,)])\n )\n state['t_coverage'][new_t] = (float(len(state['t_coverage_idx'][new_t])) / coverage_data.shape[0])\n t_idx = np.array(list(state['t_idx'][t])) # indices of samples where the len-1 result applies\n t_data = state['data'][t_idx]\n present = np.where(t_data[:, f] == 1)[0]\n state['t_idx'][new_t] = set(t_idx[present]) # indices of samples where the proposed result applies\n idx_list = list(state['t_idx'][new_t])\n state['t_nsamples'][new_t] = float(len(idx_list))\n state['t_positives'][new_t] = np.sum(state['labels'][idx_list])\n\n return list(new_tuples)\n\n def update_state(self, covered_true: np.ndarray, covered_false: np.ndarray, labels: np.ndarray,\n samples: tuple, anchor: tuple) -> Tuple[int, int]:\n \"\"\"\n Updates the explainer state (see __init__ for full state definition).\n\n Parameters\n ----------\n\n covered_true\n Examples where the result applies and the prediction is the same as on\n the instance to be explained.\n covered_false\n Examples where the result applies and the prediction is the different to\n the instance to be explained.\n samples\n A tuple containing discretized data, coverage and the result sampled.\n labels\n An array indicating whether the prediction on the sample matches the label\n of the instance to be explained.\n anchor\n The result to be updated.\n\n Returns\n -------\n A tuple containing the number of instances equals desired label of observation\n to be explained the total number of instances sampled, and the result that was sampled\n \"\"\"\n\n # data = binary matrix where 1 means a feature has the same value as the feature in the result\n data, coverage = samples\n n_samples = data.shape[0]\n\n current_idx = self.state['current_idx']\n idxs = range(current_idx, current_idx + n_samples)\n self.state['t_idx'][anchor].update(idxs)\n self.state['t_nsamples'][anchor] += n_samples\n self.state['t_positives'][anchor] += labels.sum()\n if coverage > -1:\n self.state['t_coverage'][anchor] = coverage\n self.state['t_covered_true'][anchor] = covered_true\n self.state['t_covered_false'][anchor] = covered_false\n self.state['data'][idxs] = data\n self.state['labels'][idxs] = labels\n self.state['current_idx'] += n_samples\n\n if self.state['current_idx'] >= self.state['data'].shape[0] - max(self.margin, n_samples):\n prealloc_size = self.state['prealloc_size']\n self.state['data'] = np.vstack(\n (self.state['data'], np.zeros((prealloc_size, data.shape[1]), data.dtype))\n )\n self.state['labels'] = np.hstack(\n (self.state['labels'], np.zeros(prealloc_size, labels.dtype))\n )\n\n return labels.sum(), data.shape[0]\n\n def get_init_stats(self, anchors: list, coverages=False) -> dict:\n \"\"\"\n Finds the number of samples already drawn for each result in anchors, their\n comparisons with the instance to be explained and, optionally, coverage.\n\n Parameters\n ----------\n anchors\n Candidate anchors.\n coverages\n If True, the statistics returned contain the coverage of the specified anchors.\n\n Returns\n -------\n Dictionary with lists containing nb of samples used and where sample predictions equal\n the desired label.\n \"\"\"\n\n def array_factory(size: tuple):\n return lambda: np.zeros(size)\n\n state = self.state\n stats = defaultdict(array_factory((len(anchors),))) # type: Dict[str, np.ndarray]\n for i, anchor in enumerate(anchors):\n stats['n_samples'][i] = state['t_nsamples'][anchor]\n stats['positives'][i] = state['t_positives'][anchor]\n if coverages:\n stats['coverages'][i] = state['t_coverage'][anchor]\n\n return stats\n\n def get_anchor_metadata(self, features: tuple, success, batch_size: int = 100) -> dict:\n \"\"\"\n Given the features contained in a result, it retrieves metadata such as the precision and\n coverage of the result and partial anchors and examples where the result/partial anchors\n apply and yield the same prediction as on the instance to be explained (covered_true)\n or a different prediction (covered_false).\n\n Parameters\n ----------\n features\n Sorted indices of features in result.\n success\n Indicates whether an anchor satisfying precision threshold was met or not.\n batch_size\n Number of samples among which positive and negative examples for partial anchors are\n selected if partial anchors have not already been explicitly sampled.\n\n Returns\n -------\n Anchor dictionary with result features and additional metadata.\n :param success:\n \"\"\"\n\n state = self.state\n anchor = {'feature': [], 'mean': [], 'precision': [], 'coverage': [], 'examples': [],\n 'all_precision': 0, 'num_preds': state['data'].shape[0], 'success': success} # type: dict\n current_t = tuple() # type: tuple\n # draw pos and negative example where partial result applies if not sampled during search\n to_resample, to_resample_idx = [], []\n for f in state['t_order'][features]:\n current_t = self._sort(current_t + (f,), allow_duplicates=False)\n mean = (state['t_positives'][current_t] / state['t_nsamples'][current_t])\n anchor['feature'].append(f)\n anchor['mean'].append(mean)\n anchor['precision'].append(mean)\n anchor['coverage'].append(state['t_coverage'][current_t])\n\n # add examples where result does or does not hold\n if current_t in state['t_covered_true']:\n exs = {\n 'covered_true': state['t_covered_true'][current_t],\n 'covered_false': state['t_covered_false'][current_t],\n 'uncovered_true': np.array([]),\n 'uncovered_false': np.array([]),\n }\n anchor['examples'].append(exs)\n else:\n to_resample.append(current_t)\n # sampling process relies on ordering\n state['t_order'][current_t] = list(current_t)\n to_resample_idx.append(len(anchor['examples']))\n anchor['examples'].append('placeholder')\n # if the anchor was not sampled, the coverage is not estimated\n anchor['coverage'][-1] = 'placeholder'\n\n # If partial anchors have not been sampled, resample to find examples\n if to_resample:\n\n _, _ = self.draw_samples(to_resample, batch_size)\n\n while to_resample:\n feats, example_idx = to_resample.pop(), to_resample_idx.pop()\n anchor['examples'][example_idx] = {\n 'covered_true': state['t_covered_true'][feats],\n 'covered_false': state['t_covered_false'][feats],\n 'uncovered_true': np.array([]),\n 'uncovered_false': np.array([]),\n }\n # update result with true coverage\n anchor['coverage'][example_idx] = state['t_coverage'][feats]\n\n return anchor\n\n @staticmethod\n def to_sample(means: np.ndarray, ubs: np.ndarray, lbs: np.ndarray, desired_confidence: float, epsilon_stop: float):\n \"\"\"\n Given an array of mean result precisions and their upper and lower bounds, determines for which anchors\n more samples need to be drawn in order to estimate the anchors precision with desired_confidence and error\n tolerance.\n\n Parameters\n ----------\n means:\n Mean precisions (each element represents a different result).\n ubs:\n Precisions' upper bounds (each element represents a different result).\n lbs:\n Precisions' lower bounds (each element represents a different result).\n desired_confidence:\n Desired level of confidence for precision estimation.\n epsilon_stop:\n Tolerance around desired precision.\n\n Returns\n -------\n Boolean array indicating whether more samples are to be drawn for that particular result.\n \"\"\"\n\n return ((means >= desired_confidence) & (lbs < desired_confidence - epsilon_stop)) | \\\n ((means < desired_confidence) & (ubs >= desired_confidence + epsilon_stop))\n\n def anchor_beam(self, delta: float = 0.05, epsilon: float = 0.1, desired_confidence: float = 1.,\n beam_size: int = 1, epsilon_stop: float = 0.05, min_samples_start: int = 100,\n max_anchor_size: int = None, stop_on_first: bool = False, batch_size: int = 100,\n coverage_samples: int = 10000, verbose: bool = False, verbose_every: int = 1,\n **kwargs) -> dict:\n\n \"\"\"\n Uses the KL-LUCB algorithm (Kaufmann and Kalyanakrishnan, 2013) together with additional sampling to search\n feature sets (anchors) that guarantee the prediction made by a classifier model. The search is greedy if\n beam_size=1. Otherwise, at each of the max_anchor_size steps, beam_size solutions are explored. By construction,\n solutions found have high precision (defined as the expected of number of times the classifier makes the same\n prediction when queried with the feature subset combined with arbitrary samples drawn from a noise distribution)\n The algorithm maximises the coverage of the solution found - the frequency of occurrence of records containing\n the feature subset in set of samples.\n\n Parameters\n ----------\n delta\n Used to compute beta.\n epsilon\n Precision bound tolerance for convergence.\n desired_confidence\n Desired level of precision (tau in paper).\n beam_size\n Beam width.\n epsilon_stop\n Confidence bound margin around desired precision.\n min_samples_start\n Min number of initial samples.\n max_anchor_size\n Max number of features in result.\n stop_on_first\n Stop on first valid result found.\n coverage_samples\n Number of samples from which to build a coverage set.\n batch_size\n Number of samples used for an arm evaluation.\n verbose\n Whether to print intermediate LUCB & anchor selection output.\n verbose_every\n Print intermediate output every verbose_every steps.\n\n Returns\n -------\n Explanation dictionary containing anchors with metadata like coverage and precision\n and examples.\n \"\"\"\n\n # Select coverage set and initialise object state\n coverage_data = self._get_coverage_samples(\n coverage_samples,\n samplers=self.samplers,\n )\n self._init_state(batch_size, coverage_data)\n\n # sample by default 1 or min_samples_start more random value(s)\n (pos,), (total,) = self.draw_samples([()], min_samples_start)\n\n # mean = fraction of labels sampled data that equals the label of the instance to be explained, ...\n # ... equivalent to prec(A) in paper (eq.2)\n mean = np.array([pos / total])\n beta = np.log(1. / delta)\n # lower bound on mean precision\n lb = self.dlow_bernoulli(mean, np.array([beta / total]))\n\n # if lower precision bound below tau with margin eps, keep sampling data until lb is high enough ...\n # or mean falls below precision threshold\n while mean > desired_confidence and lb < desired_confidence - epsilon:\n (n_pos,), (n_total,) = self.draw_samples([()], batch_size)\n pos += n_pos\n total += n_total\n mean = np.array([pos / total])\n lb = self.dlow_bernoulli(mean, np.array([beta / total]))\n\n # if prec_lb(A) > tau for A=() then the empty result satisfies the constraints ...\n if lb > desired_confidence:\n return {\n 'feature': [],\n 'mean': [],\n 'num_preds': total,\n 'precision': [],\n 'coverage': [],\n 'examples': [],\n 'all_precision': mean,\n 'success': True,\n }\n\n current_size, best_coverage = 1, -1\n best_of_size = {0: []} # type: Dict[int, list]\n best_anchor = ()\n\n if max_anchor_size is None:\n max_anchor_size = self.state['n_features']\n\n # find best result using beam search\n while current_size <= max_anchor_size:\n\n # create new candidate anchors by adding features to current best anchors\n anchors = self.propose_anchors(best_of_size[current_size - 1])\n # goal is to max coverage given precision constraint P(prec(A) > tau) > 1 - delta (eq.4)\n # so keep tuples with higher coverage than current best coverage\n anchors = [anchor for anchor in anchors if self.state['t_coverage'][anchor] > best_coverage]\n\n # if no better coverage found with added features -> break\n if len(anchors) == 0:\n break\n\n # for each result, get initial nb of samples used and prec(A)\n stats = self.get_init_stats(anchors)\n\n # apply KL-LUCB and return result options (nb of options = beam width) in the form of indices\n candidate_anchors = self.kllucb(\n anchors,\n stats,\n epsilon,\n delta,\n batch_size,\n min(beam_size, len(anchors)),\n verbose=verbose,\n verbose_every=verbose_every,\n )\n # store best anchors for the given result size (nb of features in the result)\n best_of_size[current_size] = [anchors[index] for index in candidate_anchors]\n # for each candidate result:\n # update precision, lower and upper bounds until precision constraints are met\n # update best result if coverage is larger than current best coverage\n stats = self.get_init_stats(best_of_size[current_size], coverages=True)\n positives, n_samples = stats['positives'], stats['n_samples']\n beta = np.log(1. / (delta / (1 + (beam_size - 1) * self.state['n_features'])))\n kl_constraints = beta / n_samples\n means = stats['positives'] / stats['n_samples']\n lbs = self.dlow_bernoulli(means, kl_constraints)\n ubs = self.dup_bernoulli(means, kl_constraints)\n\n if verbose:\n print('Best of size ', current_size, ':')\n for i, mean, lb, ub in zip(candidate_anchors, means, lbs, ubs):\n print(i, mean, lb, ub)\n\n # draw samples to ensure result meets precision criteria\n continue_sampling = self.to_sample(means, ubs, lbs, desired_confidence, epsilon_stop)\n while continue_sampling.any():\n selected_anchors = [anchors[idx] for idx in candidate_anchors[continue_sampling]]\n pos, total = self.draw_samples(selected_anchors, batch_size)\n positives[continue_sampling] += pos\n n_samples[continue_sampling] += total\n means[continue_sampling] = positives[continue_sampling]/n_samples[continue_sampling]\n kl_constraints[continue_sampling] = beta / n_samples[continue_sampling]\n lbs[continue_sampling] = self.dlow_bernoulli(\n means[continue_sampling],\n kl_constraints[continue_sampling],\n )\n ubs[continue_sampling] = self.dup_bernoulli(\n means[continue_sampling],\n kl_constraints[continue_sampling],\n )\n continue_sampling = self.to_sample(means, ubs, lbs, desired_confidence, epsilon_stop)\n\n # anchors who meet the precision setting and have better coverage than the best anchors so far\n coverages = stats['coverages']\n valid_anchors = (means >= desired_confidence) & (lbs > desired_confidence - epsilon_stop)\n better_anchors = (valid_anchors & (coverages > best_coverage)).nonzero()[0]\n\n if verbose:\n for i, valid, mean, lb, ub, coverage in \\\n zip(candidate_anchors, valid_anchors, means, lbs, ubs, coverages):\n t = anchors[i]\n print(\n '%s mean = %.2f lb = %.2f ub = %.2f coverage: %.2f n: %d' %\n (t, mean, lb, ub, coverage, self.state['t_nsamples'][t]))\n if valid:\n print(\n 'Found eligible result ', t,\n 'Coverage:', coverage,\n 'Is best?', coverage > best_coverage,\n )\n\n if better_anchors.size > 0:\n best_anchor_idx = better_anchors[np.argmax(coverages[better_anchors])]\n best_coverage = coverages[best_anchor_idx]\n best_anchor = anchors[candidate_anchors[best_anchor_idx]]\n if best_coverage == 1. or stop_on_first:\n break\n\n current_size += 1\n\n # if no result is found, choose highest precision of best result candidate from every round\n if not best_anchor:\n success = False # indicates the method has not found an anchor\n logger.warning('Could not find an result satisfying the {} precision constraint. Now returning '\n 'the best non-eligible result.'.format(desired_confidence))\n anchors = []\n for i in range(0, current_size):\n anchors.extend(best_of_size[i])\n stats = self.get_init_stats(anchors)\n candidate_anchors = self.kllucb(\n anchors,\n stats,\n epsilon,\n delta,\n batch_size,\n 1, # beam size\n verbose=verbose,\n )\n best_anchor = anchors[candidate_anchors[0]]\n else:\n success = True\n\n return self.get_anchor_metadata(best_anchor, success, batch_size=batch_size)\n\n\nclass DistributedAnchorBaseBeam(AnchorBaseBeam):\n\n if RAY_INSTALLED:\n import ray\n ray = ray\n\n def __init__(self, samplers: List[Callable], **kwargs) -> None:\n\n super().__init__(samplers)\n self.chunksize = kwargs.get('chunksize', 1)\n self.sample_fcn = lambda actor, anchor, n_samples, compute_labels=True:\\\n actor.__call__.remote(anchor,\n n_samples,\n compute_labels=compute_labels)\n self.pool = ActorPool(samplers)\n self.samplers = samplers\n\n def _get_coverage_samples(self, coverage_samples: int, samplers: List[Callable] = None) -> np.ndarray:\n \"\"\"\n Sends a request for a coverage set to process running sampling tasks.\n\n Parameters\n ----------\n See superclass implementation.\n\n Returns\n -------\n See superclass implementation.\n \"\"\"\n\n [coverage_data] = DistributedAnchorBaseBeam.ray.get(\n self.sample_fcn(samplers[0], (0, ()), coverage_samples, compute_labels=False)\n )\n\n return coverage_data\n\n def draw_samples(self, anchors: list, batch_size: int) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n Distributes sampling requests among processes running sampling tasks.\n\n Parameters\n ----------\n See superclass implementation.\n\n Returns\n -------\n Same outputs as superclass but of different types.\n \"\"\"\n\n # partial anchors not generated by propose_anchors are not in the order dictionary\n for anchor in anchors:\n if anchor not in self.state['t_order']:\n self.state['t_order'][anchor] = list(anchor)\n\n pos, total = np.zeros((len(anchors),)), np.zeros((len(anchors),))\n order_map = [(i, tuple(self.state['t_order'][anchor])) for i, anchor in enumerate(anchors)]\n samples_iter = self.pool.map_unordered(\n partial(self.sample_fcn, n_samples=batch_size),\n order_map,\n self.chunksize,\n )\n for samples_batch in samples_iter:\n for samples in samples_batch:\n covered_true, covered_false, labels, *additionals, anchor_idx = samples\n positives, n_samples = self.update_state(\n covered_true,\n covered_false,\n labels,\n additionals,\n anchors[anchor_idx],\n )\n # return statistics in the same order as the requests\n pos[anchor_idx], total[anchor_idx] = positives, n_samples\n\n return pos, total\n" ]
[ [ "numpy.sqrt", "numpy.sum", "numpy.zeros", "numpy.argmin", "numpy.argsort", "numpy.argmax", "numpy.logical_not", "numpy.arange", "numpy.log", "numpy.array", "numpy.where" ] ]
peteseibel/retention-data-pipeline
[ "9839d2b900f77722fffb762772697e422e7ec8fb" ]
[ "retention_data_pipeline/dao/edw.py" ]
[ "import os\nimport pyodbc\nimport pandas\nfrom django.conf import settings\n\nDB = \"UWSDBDataStore\"\n\n\ndef get_day1_enrollments(year, quarter):\n \"\"\"\n Returns a list of student system_keys enrolled on day one and EOP status\n \"\"\"\n campus = 0\n db_query = \"\"\"\n SELECT *\n FROM (\n SELECT\n CASE WHEN mm_spcl_program IN(1, 2, 13, 14, 16, 17, 31, 32, 33)\n THEN CAST(1 AS BIT)\n ELSE CAST(0 AS BIT)\n END AS eop_student,\n (mm.mm_year*10 + mm.mm_qtr) as yrq,\n ROW_NUMBER() OVER\n (PARTITION BY mm.mm_system_key ORDER BY mm.mm_system_key) AS rn,\n mm_system_key, mm.mm_year, mm.mm_qtr, mm_deg_level, mm_major_abbr\n FROM\n sec.sr_mini_master mm\n INNER JOIN sec.sr_mini_master_deg_program deg\n ON deg.mm_student_no = mm.mm_student_no\n AND deg.mm_year = mm.mm_year\n AND deg.mm_qtr = mm.mm_qtr\n WHERE\n mm.mm_year = {}\n AND mm.mm_qtr = {}\n AND mm.mm_proc_ind = 2\n AND deg.mm_branch = {}) as a\n WHERE a.rn = 1\n \"\"\".format(\n year, quarter, campus\n )\n results = _run_query(DB, db_query)\n return results\n\n\ndef get_ts_courses(year, quarter):\n db_query = \"\"\"\n SELECT\n ts_year,\n ts_quarter,\n course_no,\n dept_abbrev,\n section_id,\n sln\n FROM\n sec.time_schedule\n WHERE\n ts_year = {}\n AND ts_quarter = {}\n \"\"\".format(\n year, quarter\n )\n results = _run_query(DB, db_query)\n return results\n\n\ndef get_registrations(year, quarter):\n db_query = \"\"\"\n SELECT\n system_key,\n regis_yr,\n regis_qtr,\n sln\n FROM\n sec.registration_courses\n WHERE\n regis_yr = {}\n AND regis_qtr = {}\n AND request_status in ('A', 'C', 'R')\n \"\"\".format(\n year, quarter\n )\n results = _run_query(DB, db_query)\n return results\n\n\ndef get_student_metadata():\n db_query = \"\"\"\n SELECT\n system_key,\n uw_netid,\n student_no,\n student_name_lowc\n FROM\n sec.student_1\n \"\"\"\n results = _run_query(DB, db_query)\n return results\n\n\ndef get_international_students():\n db_query = \"\"\"\n SELECT\n SDBSrcSystemKey,\n InternationalStudentInd\n FROM EDWPresentation.sec.dimStudent\n WHERE\n InternationalStudentInd = 'Y'\n \"\"\"\n results = _run_query(DB, db_query)\n return results\n\n\ndef get_majors(year, quarter):\n\n db_query = \"\"\"\n #TODO: Determine relationship w/ mini_maser and write query\n \"\"\".format(\n year, quarter\n )\n results = _run_query(DB, db_query)\n return results\n\n\ndef _run_query(database, query):\n os.environ[\"FREETDSCONF\"] = \"db_config/freetds.conf\"\n os.environ[\"ODBCSYSINI\"] = \"db_config\"\n\n password = getattr(settings, \"EDW_PASSWORD\")\n user = getattr(settings, \"EDW_USER\")\n server = getattr(settings, \"EDW_SERVER\")\n constring = (\n \"Driver={FreeTDS};\"\n f\"SERVERNAME={server};\"\n f\"Database={database};\"\n \"Port=1433;\"\n \"TDS_Version=7.2;\"\n f\"UID={user};\"\n f\"PWD={password}\"\n )\n con = pyodbc.connect(constring)\n df = pandas.read_sql(query, con)\n return df\n" ]
[ [ "pandas.read_sql" ] ]
patelajaychh/Hierarchical-Localization
[ "d3f155d0587376a6fd0395ea36125016160fa448" ]
[ "hloc/localize_inloc.py" ]
[ "import argparse\nfrom pathlib import Path\nimport numpy as np\nimport h5py\nfrom scipy.io import loadmat\nimport torch\nfrom tqdm import tqdm\nimport logging\nimport pickle\nimport cv2\nimport pycolmap\n\nfrom .utils.parsers import parse_retrieval, names_to_pair\n\n\ndef interpolate_scan(scan, kp):\n h, w, c = scan.shape\n kp = kp / np.array([[w-1, h-1]]) * 2 - 1\n assert np.all(kp > -1) and np.all(kp < 1)\n scan = torch.from_numpy(scan).permute(2, 0, 1)[None]\n kp = torch.from_numpy(kp)[None, None]\n grid_sample = torch.nn.functional.grid_sample\n\n # To maximize the number of points that have depth:\n # do bilinear interpolation first and then nearest for the remaining points\n interp_lin = grid_sample(\n scan, kp, align_corners=True, mode='bilinear')[0, :, 0]\n interp_nn = torch.nn.functional.grid_sample(\n scan, kp, align_corners=True, mode='nearest')[0, :, 0]\n interp = torch.where(torch.isnan(interp_lin), interp_nn, interp_lin)\n valid = ~torch.any(torch.isnan(interp), 0)\n\n kp3d = interp.T.numpy()\n valid = valid.numpy()\n return kp3d, valid\n\n\ndef get_scan_pose(dataset_dir, rpath):\n split_image_rpath = rpath.split('/')\n floor_name = split_image_rpath[-3]\n scan_id = split_image_rpath[-2]\n image_name = split_image_rpath[-1]\n building_name = image_name[:3]\n\n path = Path(\n dataset_dir, 'database/alignments', floor_name,\n f'transformations/{building_name}_trans_{scan_id}.txt')\n with open(path) as f:\n raw_lines = f.readlines()\n\n P_after_GICP = np.array([\n np.fromstring(raw_lines[7], sep=' '),\n np.fromstring(raw_lines[8], sep=' '),\n np.fromstring(raw_lines[9], sep=' '),\n np.fromstring(raw_lines[10], sep=' ')\n ])\n\n return P_after_GICP\n\n\ndef pose_from_cluster(dataset_dir, q, retrieved, feature_file, match_file,\n skip=None):\n height, width = cv2.imread(str(dataset_dir / q)).shape[:2]\n cx = .5 * width\n cy = .5 * height\n focal_length = 4032. * 28. / 36.\n\n all_mkpq = []\n all_mkpr = []\n all_mkp3d = []\n all_indices = []\n kpq = feature_file[q]['keypoints'].__array__()\n num_matches = 0\n\n for i, r in enumerate(retrieved):\n kpr = feature_file[r]['keypoints'].__array__()\n pair = names_to_pair(q, r)\n m = match_file[pair]['matches0'].__array__()\n v = (m > -1)\n\n if skip and (np.count_nonzero(v) < skip):\n continue\n\n mkpq, mkpr = kpq[v], kpr[m[v]]\n num_matches += len(mkpq)\n\n scan_r = loadmat(Path(dataset_dir, r + '.mat'))[\"XYZcut\"]\n mkp3d, valid = interpolate_scan(scan_r, mkpr)\n Tr = get_scan_pose(dataset_dir, r)\n mkp3d = (Tr[:3, :3] @ mkp3d.T + Tr[:3, -1:]).T\n\n all_mkpq.append(mkpq[valid])\n all_mkpr.append(mkpr[valid])\n all_mkp3d.append(mkp3d[valid])\n all_indices.append(np.full(np.count_nonzero(valid), i))\n\n all_mkpq = np.concatenate(all_mkpq, 0)\n all_mkpr = np.concatenate(all_mkpr, 0)\n all_mkp3d = np.concatenate(all_mkp3d, 0)\n all_indices = np.concatenate(all_indices, 0)\n\n cfg = {\n 'model': 'SIMPLE_PINHOLE',\n 'width': width,\n 'height': height,\n 'params': [focal_length, cx, cy]\n }\n ret = pycolmap.absolute_pose_estimation(\n all_mkpq, all_mkp3d, cfg, 48.00)\n ret['cfg'] = cfg\n return ret, all_mkpq, all_mkpr, all_mkp3d, all_indices, num_matches\n\n\ndef main(dataset_dir, retrieval, features, matches, results,\n skip_matches=None):\n\n assert retrieval.exists(), retrieval\n assert features.exists(), features\n assert matches.exists(), matches\n\n retrieval_dict = parse_retrieval(retrieval)\n queries = list(retrieval_dict.keys())\n\n feature_file = h5py.File(features, 'r')\n match_file = h5py.File(matches, 'r')\n\n poses = {}\n logs = {\n 'features': features,\n 'matches': matches,\n 'retrieval': retrieval,\n 'loc': {},\n }\n logging.info('Starting localization...')\n for q in tqdm(queries):\n db = retrieval_dict[q]\n ret, mkpq, mkpr, mkp3d, indices, num_matches = pose_from_cluster(\n dataset_dir, q, db, feature_file, match_file, skip_matches)\n\n poses[q] = (ret['qvec'], ret['tvec'])\n logs['loc'][q] = {\n 'db': db,\n 'PnP_ret': ret,\n 'keypoints_query': mkpq,\n 'keypoints_db': mkpr,\n '3d_points': mkp3d,\n 'indices_db': indices,\n 'num_matches': num_matches,\n }\n\n logging.info(f'Writing poses to {results}...')\n with open(results, 'w') as f:\n for q in queries:\n qvec, tvec = poses[q]\n qvec = ' '.join(map(str, qvec))\n tvec = ' '.join(map(str, tvec))\n name = q.split(\"/\")[-1]\n f.write(f'{name} {qvec} {tvec}\\n')\n\n logs_path = f'{results}_logs.pkl'\n logging.info(f'Writing logs to {logs_path}...')\n with open(logs_path, 'wb') as f:\n pickle.dump(logs, f)\n logging.info('Done!')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--dataset_dir', type=Path, required=True)\n parser.add_argument('--retrieval', type=Path, required=True)\n parser.add_argument('--features', type=Path, required=True)\n parser.add_argument('--matches', type=Path, required=True)\n parser.add_argument('--results', type=Path, required=True)\n parser.add_argument('--skip_matches', type=int)\n args = parser.parse_args()\n main(**args.__dict__)\n" ]
[ [ "numpy.count_nonzero", "numpy.all", "torch.from_numpy", "torch.nn.functional.grid_sample", "numpy.array", "torch.isnan", "numpy.concatenate", "numpy.fromstring" ] ]
czhao39/xacc-vqe
[ "4ad1d9308794e28c37772b7ea29cd3923388168a" ]
[ "examples/general/scipy_minimization_with_xacc.py" ]
[ "import numpy as np\nimport pyxacc as xacc\nfrom pyxacc import InstructionParameter\nimport pyxaccvqe as vqe\nfrom pyxaccvqe import PauliOperator\nfrom scipy.optimize import minimize\n\nxacc.Initialize()\n\n# Construct the First Quantized 2x2 and 3x3 Hamiltonians\nhamiltonian3x3 = PauliOperator(7.7658547225) + PauliOperator({0:'X'}, -2.143303525) + \\\n PauliOperator({0:'X', 1:'X'}, -3.91311896) + PauliOperator({0:'X', 1:'Z'}, -2.143303525) + \\\n PauliOperator({0:'Y',1:'Y'}, -3.91311896) + PauliOperator({0:'Z'}, 1.6408547224999999) + \\\n PauliOperator({0:'Z',1:'Z'}, -7.9841452775) + PauliOperator({1:'Z'}, -1.8591452775000001)\n \nprint('\\nH_{3x3} = ', hamiltonian3x3)\n\n# Create the 3x3 Ansatz\nansatz3x3 = xacc.gate.GateFunction('statePrep', ['theta0','theta1'])\nansatz3x3.add(xacc.gate.create('Ry',[0],['theta0']))\nansatz3x3.add(xacc.gate.create('Ry',[1],['theta1']))\nprint('3x3 Ansatz = \\n', ansatz3x3.toString('q'))\n\nqpu = xacc.getAccelerator('tnqvm')\n\ndef energy(params):\n ir = hamiltonian3x3.toXACCIR()\n kernels = ir.getKernels()\n qubits = qpu.createBuffer('q',2)\n energy = 0.0\n for k in kernels:\n val = 0\n coeff = k.getParameter(0)\n if k.nInstructions() > 0:\n evaledAnsatz = vqe.AnsatzEvaluator.evaluate(ansatz3x3, 2, np.asarray(params))\n k.insertInstruction(0, evaledAnsatz)\n qpu.execute(qubits, k)\n exp = qubits.getExpectationValueZ()\n qubits.resetBuffer()\n val = coeff * exp\n else:\n val = coeff\n energy += val\n return energy.real\n\nprint('XACC Diagonalize: ', vqe.execute(hamiltonian3x3, **{}).energy) #'task':'vqe', 'ansatz':ansatz3x3}).energy)\nprint('XACC Nelder-Mead: ', vqe.execute(hamiltonian3x3, **{'task':'vqe', 'vqe-params':'.7,.2', 'ansatz':ansatz3x3}).energy)\nprint('XACC SciPy Minimze:\\n', minimize(energy, [0,0]))\n\n\n\nfrom bayes_opt import BayesianOptimization\n\ndef neg_energy(x, y):\n return -1*energy([x,y])\n\nbo = BayesianOptimization(neg_energy,\n {'x': (0, 1), 'y': (0, 1)})\n\nbo.maximize(init_points=10, n_iter=10, kappa=2)\n\n#{'max': {'max_val': 2.035286440109728, 'max_params': {'x': 0.6745710475274774, 'y': 0.2651970328890534}},\n#print(bo.res)\n\nprint('\\nMin Energy = ', -1.0*bo.res['max']['max_val'], ' at ', bo.res['max']['max_params'])\n" ]
[ [ "scipy.optimize.minimize", "numpy.asarray" ] ]
t-kaichi/hyperspoof
[ "6effdf03be8489ba74154a12416c69948681aa51" ]
[ "train.components.py" ]
[ "import os\r\nimport time\r\nfrom tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint\r\nfrom absl import app\r\nfrom absl import flags\r\nfrom albumentations import (\r\n Compose, HorizontalFlip, RandomBrightness,RandomContrast,\r\n ShiftScaleRotate, ToFloat, VerticalFlip)\r\n\r\nfrom models import build_seg_model, build_pixel_mlp_class_model\r\nfrom utils import reset_tf, set_seed\r\nfrom VegetableSequence import VegetableDataset, VegetableSequence\r\nimport myFlags\r\n\r\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\r\nFLAGS = flags.FLAGS\r\n\r\ndef main(argv):\r\n reset_tf(FLAGS.device)\r\n set_seed()\r\n\r\n isSeg = FLAGS.isSeg # train segmentation model\r\n\r\n # data structure\r\n ds_info = VegetableDataset(FLAGS.data_path)\r\n dim = ds_info.hsi_dims\r\n input_shape = (224, 224, dim) if isSeg else (FLAGS.nb_pixels, dim)\r\n\r\n # Experiment name\r\n experiment_title = \"HSSD\" \r\n experiment_title += '-seg' if isSeg else '-pix_class'\r\n experiment_title += '-%d' % time.time()\r\n logdir = os.path.join(FLAGS.log_root, experiment_title)\r\n os.makedirs(logdir)\r\n print(\"logdir: \", logdir)\r\n\r\n # augmentation\r\n if isSeg:\r\n AUGMENTATIONS_TRAIN = Compose([\r\n HorizontalFlip(p=0.5),\r\n VerticalFlip(p=0.2),\r\n RandomContrast(limit=0.001, p=0.5),\r\n RandomBrightness(limit=0.001, p=0.5),\r\n ShiftScaleRotate(\r\n shift_limit=0.3, scale_limit=0.9,\r\n rotate_limit=30, border_mode=4, p=0.8),# cv2.BORDER_REFLECT_101\r\n ToFloat(max_value=1024)\r\n ])\r\n else:\r\n AUGMENTATIONS_TRAIN = Compose([\r\n RandomContrast(limit=0.001, p=0.5),\r\n RandomBrightness(limit=0.001, p=0.5),\r\n ToFloat(max_value=1024)\r\n ])\r\n AUGMENTATIONS_TEST = AUGMENTATIONS_TRAIN\r\n\r\n # loading dataset\r\n train_gen = VegetableSequence(FLAGS.batch_size, instance_ids=[1, 2, 3],\r\n sample_ids=[1,2], dataset=ds_info, isSeg=isSeg,\r\n nb_pixels=FLAGS.nb_pixels,augmentations=AUGMENTATIONS_TRAIN)\r\n valid_gen = VegetableSequence(FLAGS.batch_size, instance_ids=[4],\r\n sample_ids=[1,2], dataset=ds_info, isSeg=isSeg,\r\n nb_pixels=FLAGS.nb_pixels,augmentations=AUGMENTATIONS_TEST,\r\n random_state=2021)\r\n\r\n # building a model\r\n if isSeg:\r\n model = build_seg_model(input_shape=input_shape)\r\n else:\r\n model = build_pixel_mlp_class_model(\r\n nb_classes=ds_info.object_categories, input_shape=input_shape,\r\n loss_weight=FLAGS.loss_weight)\r\n\r\n # callbacks\r\n checkpoint = ModelCheckpoint(logdir + \"/best.weights.hdf5\", monitor='val_loss',\r\n save_best_only=True, save_weights_only=True,\r\n mode='auto', save_freq=\"epoch\")\r\n early_stopping = EarlyStopping(monitor=\"val_loss\", patience=FLAGS.patience)\r\n callbacks = [checkpoint, early_stopping]\r\n \r\n model.fit(train_gen, epochs=FLAGS.epochs,validation_data=valid_gen,\r\n validation_steps=len(valid_gen), callbacks=callbacks)\r\n\r\nif __name__ == \"__main__\":\r\n app.run(main)\r\n" ]
[ [ "tensorflow.keras.callbacks.ModelCheckpoint", "tensorflow.keras.callbacks.EarlyStopping" ] ]
Gael-de-Sailly/flopy
[ "4104cf5e6a35e2a1fd6183442962ae5cb258fa7a" ]
[ "autotest/t016_test.py" ]
[ "import os\nimport flopy\nimport numpy as np\n\n\ntpth = os.path.abspath(os.path.join('temp', 't016'))\nif not os.path.isdir(tpth):\n os.makedirs(tpth)\n\n\nexe_name = 'mfusg'\nv = flopy.which(exe_name)\n\nrun = True\nif v is None:\n run = False\n\n\ndef test_usg_disu_load():\n\n pthusgtest = os.path.join('..', 'examples', 'data', 'mfusg_test',\n '01A_nestedgrid_nognc')\n fname = os.path.join(pthusgtest, 'flow.disu')\n assert os.path.isfile(fname), 'disu file not found {}'.format(fname)\n\n # Create the model\n m = flopy.modflow.Modflow(modelname='usgload', verbose=True)\n\n # Load the disu file\n disu = flopy.modflow.ModflowDisU.load(fname, m)\n assert isinstance(disu, flopy.modflow.ModflowDisU)\n\n # Change where model files are written\n model_ws = tpth\n m.model_ws = model_ws\n\n # Write the disu file\n disu.write_file()\n assert os.path.isfile(os.path.join(model_ws,\n '{}.{}'.format(m.name,\n m.disu.extension[0])))\n\n # Load disu file\n disu2 = flopy.modflow.ModflowDisU.load(fname, m)\n for (key1, value1), (key2, value2) in zip(disu2.__dict__.items(),\n disu.__dict__.items()):\n if isinstance(value1, flopy.utils.Util2d) or isinstance(value1, flopy.utils.Util3d):\n assert np.array_equal(value1.array, value2.array)\n else:\n assert value1 == value2\n\n return\n\n\ndef test_usg_sms_load():\n\n pthusgtest = os.path.join('..', 'examples', 'data', 'mfusg_test',\n '01A_nestedgrid_nognc')\n fname = os.path.join(pthusgtest, 'flow.sms')\n assert os.path.isfile(fname), 'sms file not found {}'.format(fname)\n\n # Create the model\n m = flopy.modflow.Modflow(modelname='usgload', verbose=True)\n\n # Load the sms file\n sms = flopy.modflow.ModflowSms.load(fname, m)\n assert isinstance(sms, flopy.modflow.ModflowSms)\n\n # Change where model files are written\n model_ws = tpth\n m.model_ws = model_ws\n\n # Write the sms file\n sms.write_file()\n assert os.path.isfile(os.path.join(model_ws,\n '{}.{}'.format(m.name,\n m.sms.extension[0])))\n\n # Load sms file\n sms2 = flopy.modflow.ModflowSms.load(fname, m)\n for (key1, value1), (key2, value2) in zip(sms2.__dict__.items(),\n sms.__dict__.items()):\n assert value1 == value2, 'key1 {}, value 1 {} != key2 {} value 2 {}'.format(key1, value1, key2, value2)\n\n return\n\n\ndef test_usg_model():\n mf = flopy.modflow.Modflow(version='mfusg', structured=True,\n model_ws=tpth, modelname='simple',\n exe_name=v)\n dis = flopy.modflow.ModflowDis(mf, nlay=1, nrow=11, ncol=11)\n bas = flopy.modflow.ModflowBas(mf)\n lpf = flopy.modflow.ModflowLpf(mf)\n wel = flopy.modflow.ModflowWel(mf, stress_period_data={0: [[0, 5, 5, -1.]]})\n ghb = flopy.modflow.ModflowGhb(mf,\n stress_period_data={\n 0: [[0, 0, 0, 1.0, 1000.],\n [0, 9, 9, 0.0, 1000.], ]})\n oc = flopy.modflow.ModflowOc(mf)\n sms = flopy.modflow.ModflowSms(mf, options='complex')\n\n # run with defaults\n mf.write_input()\n if run:\n success, buff = mf.run_model()\n assert success\n\n # try different complexity options; all should run successfully\n for complexity in ['simple', 'moderate', 'complex']:\n print('testing MFUSG with sms complexity: ' + complexity)\n sms = flopy.modflow.ModflowSms(mf, options=complexity)\n sms.write_file()\n if run:\n success, buff = mf.run_model()\n assert success\n\n\nif __name__ == '__main__':\n test_usg_disu_load()\n test_usg_sms_load()\n test_usg_model()\n" ]
[ [ "numpy.array_equal" ] ]
iliasprc/CVPR21Chal-SLR
[ "9d0c9a593d2c4bbfd69eff040b84e9f0538740fb" ]
[ "Conv3D/Sign_Isolated_Conv3D_hha_clip_mask.py" ]
[ "import os\nimport sys\nfrom datetime import datetime\nimport logging\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader, random_split\nfrom torch.utils.tensorboard import SummaryWriter\nimport torchvision.transforms as transforms\nfrom models.Conv3D import r2plus1d_18\nfrom dataset_sign_clip import Sign_Isolated\nfrom train import train_epoch\nfrom validation_clip import val_epoch\nfrom collections import OrderedDict\n\nclass LabelSmoothingCrossEntropy(nn.Module):\n def __init__(self):\n super(LabelSmoothingCrossEntropy, self).__init__()\n def forward(self, x, target, smoothing=0.1):\n confidence = 1. - smoothing\n logprobs = F.log_softmax(x, dim=-1)\n nll_loss = -logprobs.gather(dim=-1, index=target.unsqueeze(1))\n nll_loss = nll_loss.squeeze(1)\n smooth_loss = -logprobs.mean(dim=-1)\n loss = confidence * nll_loss + smoothing * smooth_loss\n return loss.mean()\n\n# Path setting\nexp_name = 'hha_final'\ndata_path = \"../data/train_hha_2_mask\"\ndata_path2 = \"../data/val_hha_2_mask\"\nlabel_train_path = \"data/train_labels.csv\"\nlabel_val_path = \"data/val_gt.csv\"\nmodel_path = \"checkpoint/{}\".format(exp_name)\nif not os.path.exists(model_path):\n os.mkdir(model_path)\nif not os.path.exists(os.path.join('results', exp_name)):\n os.mkdir(os.path.join('results', exp_name))\nlog_path = \"log/sign_resnet2d+1_{}_{:%Y-%m-%d_%H-%M-%S}.log\".format(exp_name, datetime.now())\nsum_path = \"runs/sign_resnet2d+1_{}_{:%Y-%m-%d_%H-%M-%S}\".format(exp_name, datetime.now())\nphase = 'Train'\n# Log to file & tensorboard writer\nlogging.basicConfig(level=logging.INFO, format='%(message)s', handlers=[logging.FileHandler(log_path), logging.StreamHandler()])\nlogger = logging.getLogger('SLR')\nlogger.info('Logging to file...')\nwriter = SummaryWriter(sum_path)\n\n# Use specific gpus\n# os.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0,1,2,3,4,5,6,7\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0,1,2,3\"\n# Device setting\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n# Hyperparams\nnum_classes = 226 #100\nepochs = 100\n# batch_size = 16\nbatch_size = 48\nlearning_rate = 1e-3 #1e-3 Train 1e-4 Finetune\nweight_decay = 1e-4\nlog_interval = 80\nsample_size = 128\nsample_duration = 32\nattention = False\ndrop_p = 0.0\nhidden1, hidden2 = 512, 256\n\ndef get_lr(optimizer):\n for param_group in optimizer.param_groups:\n return param_group['lr']\n\n# Train with 3DCNN\nif __name__ == '__main__':\n # Load data\n transform = transforms.Compose([transforms.Resize([sample_size, sample_size]),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.5], std=[0.5])])\n train_set = Sign_Isolated(data_path=data_path, label_path=label_train_path, frames=sample_duration,\n num_classes=num_classes, train=True, transform=transform)\n val_set = Sign_Isolated(data_path=data_path2, label_path=label_val_path, frames=sample_duration,\n num_classes=num_classes, train=False, transform=transform)\n logger.info(\"Dataset samples: {}\".format(len(train_set)+len(val_set)))\n train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=True, num_workers=48, pin_memory=True)\n val_loader = DataLoader(val_set, batch_size=batch_size, shuffle=False, num_workers=48, pin_memory=True)\n # Create model\n model = r2plus1d_18(pretrained=True, num_classes=226)\n # load pretrained\n # checkpoint = torch.load('pretrained/slr_resnet2d+1_epoch016.pth')\n # new_state_dict = OrderedDict()\n # for k, v in checkpoint.items():\n # name = k[7:] # remove 'module.'\n # new_state_dict[name]=v\n # model.load_state_dict(new_state_dict)\n # if phase == 'Train':\n # model.fc1 = nn.Linear(model.fc1.in_features, num_classes)\n print(model)\n \n model = model.to(device)\n # Run the model parallelly\n if torch.cuda.device_count() > 1:\n logger.info(\"Using {} GPUs\".format(torch.cuda.device_count()))\n model = nn.DataParallel(model)\n # Create loss criterion & optimizer\n # criterion = nn.CrossEntropyLoss()\n criterion = LabelSmoothingCrossEntropy()\n optimizer = optim.Adam(model.parameters(), lr=learning_rate, weight_decay=weight_decay)\n scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.1, patience=10, threshold=0.0001)\n\n # Start training\n \n if phase == 'Train':\n logger.info(\"Training Started\".center(60, '#'))\n for epoch in range(epochs):\n print('lr: ', get_lr(optimizer))\n # Train the model\n train_epoch(model, criterion, optimizer, train_loader, device, epoch, logger, log_interval, writer)\n\n # Validate the model\n val_loss = val_epoch(model, criterion, val_loader, device, epoch, logger, writer)\n scheduler.step(val_loss)\n \n # Save model\n torch.save(model.state_dict(), os.path.join(model_path, \"sign_resnet2d+1_epoch{:03d}.pth\".format(epoch+1)))\n logger.info(\"Epoch {} Model Saved\".format(epoch+1).center(60, '#'))\n elif phase == 'Test':\n logger.info(\"Testing Started\".center(60, '#'))\n val_loss = val_epoch(model, criterion, val_loader, device, 0, logger, writer, phase=phase, exp_name=exp_name)\n\n logger.info(\"Finished\".center(60, '#'))\n" ]
[ [ "torch.utils.data.DataLoader", "torch.nn.functional.log_softmax", "torch.cuda.device_count", "torch.cuda.is_available", "torch.utils.tensorboard.SummaryWriter", "torch.optim.lr_scheduler.ReduceLROnPlateau", "torch.nn.DataParallel" ] ]
MouseHu/emdqn
[ "ba907e959f21dd0b5a17117accccae9c82a79a3b" ]
[ "baselines/deepq/experiments/atari/train_modelbased.py" ]
[ "import argparse\nimport gym\nimport numpy as np\nimport os\nimport tensorflow as tf\nimport tempfile\nimport time\n\nimport sys\n\ncwd = os.getcwd()\ncwd = '/'.join(cwd.split('/')[:-4])\ntemp = sys.path\ntemp.append('')\ntemp[1:] = temp[0:-1]\ntemp[0] = cwd\nprint(sys.path)\n\nfrom baselines.deepq.dqn_utils import *\nimport baselines.common.tf_util as U\nimport datetime\nfrom baselines import logger\nfrom baselines import deepq\nfrom baselines.deepq.replay_buffer import ReplayBufferContra, PrioritizedReplayBuffer\nfrom baselines.common.misc_util import (\n boolean_flag,\n pickle_load,\n pretty_eta,\n relatively_safe_pickle_dump,\n set_global_seeds,\n RunningAvg,\n SimpleMonitor\n)\nfrom baselines.common.schedules import LinearSchedule, PiecewiseSchedule\n# when updating this to non-deperecated ones, it is important to\n# copy over LazyFrames\nfrom baselines.common.atari_wrappers_deprecated import wrap_dqn\nfrom baselines.common.azure_utils import Container\nfrom baselines.deepq.experiments.atari.model import contrastive_model, rp_model, modelbased_model\n# from baselines.deepq.experiments.atari.lru_knn_ucb import LRU_KNN_UCB\nfrom baselines.deepq.experiments.atari.lru_knn_ucb import LRU_KNN_UCB\nfrom baselines.common.atari_lib import create_atari_environment\n\n\n# from gym.wrappers.monitoring.video_recorder import VideoRecorder\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\"DQN experiments for Atari games\")\n # Environment test deployment\n parser.add_argument(\"--env\", type=str, default=\"Pong\", help=\"name of the game\")\n parser.add_argument(\"--seed\", type=int, default=int(time.time()), help=\"which seed to use\")\n parser.add_argument(\"--gamma\", type=int, default=0.99, help=\"which seed to use\")\n # Core DQN parameters\n parser.add_argument(\"--mode\", type=str, default=\"max\", help=\"mode of episodic memory\")\n parser.add_argument(\"--replay-buffer-size\", type=int, default=int(1e5), help=\"replay buffer size\")\n parser.add_argument(\"--lr\", type=float, default=1e-4, help=\"learning rate for Adam optimizer\")\n parser.add_argument(\"--num-steps\", type=int, default=int(5e6),\n help=\"total number of steps to run the environment for\")\n parser.add_argument(\"--negative-samples\", type=int, default=10, help=\"numbers for negative samples\")\n parser.add_argument(\"--batch-size\", type=int, default=16,\n help=\"number of transitions to optimize at the same time\")\n parser.add_argument(\"--learning-freq\", type=int, default=16,\n help=\"number of iterations between every optimization step\")\n parser.add_argument(\"--target-update-freq\", type=int, default=10000,\n help=\"number of iterations between every target network update\")\n parser.add_argument(\"--knn\", type=int, default=11, help=\"number of k nearest neighbours\")\n parser.add_argument(\"--end_training\", type=int, default=2e5, help=\"number of pretrain steps\")\n parser.add_argument('--map_config', type=str,\n help='The map and config you want to run in MonsterKong.',\n default='../../../ple/configs/config_ppo_mk_large.py')\n # Bells and whistles\n # Checkpointing\n parser.add_argument(\"--save-dir\", type=str, default=None,\n help=\"directory in which training state and model should be saved.\")\n parser.add_argument(\"--save-azure-container\", type=str, default=None,\n help=\"It present data will saved/loaded from Azure. Should be in format ACCOUNT_NAME:ACCOUNT_KEY:CONTAINER\")\n parser.add_argument(\"--save-freq\", type=int, default=1e6,\n help=\"save model once every time this many iterations are completed\")\n parser.add_argument(\"--latent_dim\", type=int, default=32,\n help=\"latent_dim\")\n parser.add_argument(\"--video_path\", type=str, default=\"./videos\",\n help=\"video path\")\n parser.add_argument(\"--comment\", type=str, default=datetime.datetime.now().strftime(\"%I-%M_%B-%d-%Y\"),\n help=\"discription for this experiment\")\n parser.add_argument(\"--log_dir\", type=str, default=\"./tflogs\",\n help=\"directory in which training state and model should be saved.\")\n boolean_flag(parser, \"load-on-start\", default=True,\n help=\"if true and model was previously saved then training will be resumed\")\n\n boolean_flag(parser, \"learning\", default=False,\n help=\"if true and model was continued learned\")\n\n boolean_flag(parser, \"exploration\", default=False,\n help=\"if true and model was continued learned\")\n\n boolean_flag(parser, \"ucb\", default=False, help=\"whether or not to use ucb exploration\")\n boolean_flag(parser, \"rp\", default=False, help=\"whether or not to use random projection\")\n # EMDQN\n boolean_flag(parser, \"train-latent\", default=False, help=\"whether or not to further train latent\")\n return parser.parse_args()\n\n\ndef make_env(game_name):\n env = gym.make(game_name + \"NoFrameskip-v4\")\n monitored_env = SimpleMonitor(env) # puts rewards and number of steps in info, before environment is wrapped\n env = wrap_dqn(\n monitored_env) # applies a bunch of modification to simplify the observation space (downsample, make b/w)\n return env, monitored_env\n\n\ndef maybe_save_model(savedir, container, state):\n \"\"\"This function checkpoints the model and state of the training algorithm.\"\"\"\n if savedir is None:\n return\n start_time = time.time()\n model_dir = \"model-{}\".format(state[\"num_iters\"])\n U.save_state(os.path.join(savedir, model_dir, \"saved\"))\n if container is not None:\n container.put(os.path.join(savedir, model_dir), model_dir)\n relatively_safe_pickle_dump(state, os.path.join(savedir, 'training_state.pkl.zip'), compression=True)\n if container is not None:\n container.put(os.path.join(savedir, 'training_state.pkl.zip'), 'training_state.pkl.zip')\n relatively_safe_pickle_dump(state[\"monitor_state\"], os.path.join(savedir, 'monitor_state.pkl'))\n if container is not None:\n container.put(os.path.join(savedir, 'monitor_state.pkl'), 'monitor_state.pkl')\n logger.log(\"Saved model in {} seconds\\n\".format(time.time() - start_time))\n\n\ndef maybe_load_model(savedir, container):\n \"\"\"Load model if present at the specified path.\"\"\"\n if savedir is None:\n return\n\n state_path = os.path.join(os.path.join(savedir, 'training_state.pkl.zip'))\n if container is not None:\n logger.log(\"Attempting to download model from Azure\")\n found_model = container.get(savedir, 'training_state.pkl.zip')\n else:\n found_model = os.path.exists(state_path)\n if found_model:\n state = pickle_load(state_path, compression=True)\n model_dir = \"model-{}\".format(state[\"num_iters\"])\n if container is not None:\n container.get(savedir, model_dir)\n U.load_state(os.path.join(savedir, model_dir, \"saved\"))\n logger.log(\"Loaded models checkpoint at {} iterations\".format(state[\"num_iters\"]))\n return state\n\n\nif __name__ == '__main__':\n args = parse_args()\n if args.train_latent:\n print(\"Training latent\")\n # Parse savedir and azure container.\n savedir = args.save_dir\n if args.save_azure_container is not None:\n account_name, account_key, container_name = args.save_azure_container.split(\":\")\n container = Container(account_name=account_name,\n account_key=account_key,\n container_name=container_name,\n maybe_create=True)\n if savedir is None:\n # Careful! This will not get cleaned up. Docker spoils the developers.\n savedir = tempfile.TemporaryDirectory().name\n else:\n container = None\n # Create and seed the env.\n # env, monitored_env = make_env(args.env)\n if args.env == \"MK\":\n\n import imp\n\n try:\n map_config_file = args.map_config\n map_config = imp.load_source('map_config', map_config_file).map_config\n except Exception as e:\n sys.exit(str(e) + '\\n'\n + 'map_config import error. File not exist or map_config not specified')\n from gym.envs.registration import register\n\n register(\n id='MonsterKong-v0',\n entry_point='baselines.ple.gym_env.monsterkong:MonsterKongEnv',\n kwargs={'map_config': map_config},\n )\n\n env = gym.make('MonsterKong-v0')\n env = ProcessFrame(env)\n else:\n env = create_atari_environment(args.env, sticky_actions=False)\n if args.seed > 0:\n set_global_seeds(args.seed)\n env.unwrapped.seed(args.seed)\n print(\"obs shape\", env.observation_space.shape)\n # env = GIFRecorder(video_path=args.video_path + \"/{}/\".format(args.comment), record_video=True, env=env)\n subdir = (datetime.datetime.now()).strftime(\"%m-%d-%Y-%H:%M:%S\") + \" \" + args.comment\n tf_writer = tf.summary.FileWriter(os.path.join(args.log_dir, subdir), tf.get_default_graph())\n value_summary = tf.Summary()\n qec_summary = tf.Summary()\n value_summary.value.add(tag='discount_reward_mean')\n value_summary.value.add(tag='non_discount_reward_mean')\n # value_summary.value.add(tag='episode')\n\n qec_summary.value.add(tag='qec_mean')\n qec_summary.value.add(tag='qec_fount')\n value_summary.value.add(tag='steps')\n value_summary.value.add(tag='episodes')\n\n with U.make_session(4) as sess:\n # EMDQN\n buffer_size = 1000000\n ec_buffer = LRU_KNN_UCB(buffer_size, args.latent_dim, 'game', mode=args.mode)\n\n # rng = np.random.RandomState(123456) # deterministic, erase 123456 for stochastic\n # rp = rng.normal(loc=0, scale=1. / np.sqrt(latent_dim), size=(latent_dim, input_dim))\n qecwatch = []\n update_counter = 0\n qec_found = 0\n sequence = []\n\n tfout = open(\n './results/result_%s_contrast_%s' % (args.env, args.comment), 'w+')\n\n\n def act(ob, stochastic=0, update_eps=-1):\n global eps, qecwatch, qec_found, num_iters\n # print(ob.shape)\n\n z = z_func(ob)\n # next_z, rs = model_func(np.tile(ob, [env.action_space.n, 1, 1, 1]), actions)\n z = np.array(z).reshape((args.latent_dim))\n if update_eps >= 0:\n eps = update_eps\n if np.random.random() < max(stochastic, eps):\n action = np.random.randint(0, env.action_space.n)\n # print(eps,env.action_space.n,action)\n return action, z\n else:\n # print(eps,stochastic,np.random.rand(0, 1))\n # qs = np.zeros(env.action_space.n)\n actions = np.arange(env.action_space.n)\n next_zs, rs = model_func(np.tile(z, [env.action_space.n, 1]), actions)\n vs = ec_buffer.knn_value(next_zs[0], args.knn)\n qs = args.gamma * np.array(vs) + np.array(rs)\n optimistic_q = qs\n q_max = np.max(optimistic_q)\n # print(\"optimistic q\", optimistic_q.shape, np.where(optimistic_q == q_max))\n max_action = np.where(optimistic_q == q_max)[0]\n # print(max_action)\n action_selected = np.random.randint(0, len(max_action))\n # print(\"ec\",eps,np.argmax(q),q)\n return max_action[action_selected], z\n\n\n def update_kdtree():\n ec_buffer.update_kdtree()\n\n\n def update_ec(sequence):\n _, _, acts, _ = list(zip(*sequence))\n # print(np.bincount(acts))\n Rtd = 0.\n Rtds = [0]\n for seq in reversed(sequence):\n s, z, a, r = seq\n # z = s.flatten()\n # z = np.dot(rp, s.flatten())\n Rtd = r + args.gamma * Rtd\n Rtds.append(Rtd)\n z = np.array(z).reshape((args.latent_dim))\n v, _ = ec_buffer.peek(z, Rtd, True)\n if v == None: # new action\n ec_buffer.add(z, Rtd)\n return Rtds\n\n\n # Create training graph and replay buffer\n z_func, model_func, train = deepq.build_train_modelbased(\n make_obs_ph=lambda name: U.Uint8Input(env.observation_space.shape, name=name),\n net_func=rp_model if args.rp else contrastive_model,\n model_func=modelbased_model,\n num_actions=env.action_space.n,\n optimizer=tf.train.AdamOptimizer(learning_rate=args.lr, epsilon=1e-4),\n gamma=args.gamma,\n grad_norm_clipping=10,\n )\n\n approximate_num_iters = args.num_steps\n if args.ucb:\n exploration = PiecewiseSchedule([\n (0, 1),\n (2e4, 1),\n ], outside_value=0.01)\n else:\n exploration = PiecewiseSchedule([\n (0, 1.0),\n (args.end_training, 1.0),\n # (args.end_training+1, 1.0),\n # (args.end_training+1, 0.005),\n (args.end_training + 100000, 0.01),\n # (approximate_num_iters / 5, 0.1),\n # (approximate_num_iters / 3, 0.01)\n ], outside_value=0.01)\n\n replay_buffer = ReplayBufferContra(args.replay_buffer_size, K=args.negative_samples)\n\n U.initialize()\n num_iters = 0\n num_episodes = 0\n non_discount_return = [0.0]\n discount_return = [0.0]\n # Load the model\n state = maybe_load_model(savedir, container)\n # if state is not None:\n # num_iters, replay_buffer = state[\"num_iters\"], state[\"replay_buffer\"],\n # monitored_env.set_state(state[\"monitor_state\"])\n\n start_time, start_steps = time.time(), 0\n steps_per_iter = RunningAvg(0.999)\n iteration_time_est = RunningAvg(0.999)\n obs = env.reset()\n print_flag = True\n # Main trianing loop\n train_time = 0\n act_time = 0\n env_time = 0\n update_time = 0\n cur_time = time.time()\n while True:\n num_iters += 1\n # Take action and store transition in the replay buffer.\n action, z = \\\n act(np.array(obs)[None], update_eps=exploration.value(num_iters))\n act_time += time.time() - cur_time\n cur_time = time.time()\n new_obs, rew, done, info = env.step(action)\n env_time += time.time() - cur_time\n cur_time = time.time()\n # if num_episodes % 40 == 39:\n # env.record = True\n non_discount_return[-1] += rew\n discount_return[-1] += rew * args.gamma ** (num_iters - start_steps)\n # EMDQN\n sequence.append([obs, z, action, np.clip(rew, -1, 1)])\n replay_buffer.add(obs, action, rew, new_obs, float(done))\n obs = new_obs\n if done:\n # print((num_iters - start_steps), args.gamma ** (num_iters - start_steps))\n num_episodes += 1\n # EMDQN\n if num_iters >= args.end_training:\n update_ec(sequence)\n update_time += time.time() - cur_time\n cur_time = time.time()\n\n if print_flag:\n print(info)\n print_flag = False\n\n obs = env.reset()\n non_discount_return.append(0.0)\n discount_return.append(0.0)\n\n if num_iters % args.learning_freq == 0 and len(replay_buffer) > args.batch_size * (\n args.negative_samples + 1) and (\n num_iters < args.end_training or args.learning):\n # train vae\n obses_t, actions, rewards, obses_tp1, dones, obses_neg = replay_buffer.sample(args.batch_size, False)\n obses_t_c, actions_c, rewards_c, obses_tp1_c, dones_c, obses_neg_c = replay_buffer.sample(args.batch_size)\n inputs = [[1],obses_t_c,obses_tp1_c,obses_neg_c,obses_t, obses_tp1, rewards,actions]\n # inputs = [obses_t, obses_tp1, rewards, actions]\n total_errors, summary = train(*inputs)\n tf_writer.add_summary(summary, global_step=num_iters)\n # tf_writer.add_summary(summary,global_step=info[\"steps\"])\n # Update target network.\n train_time += time.time() - cur_time\n cur_time = time.time()\n if num_iters % args.target_update_freq == 0 and num_iters > args.end_training: # NOTE: why not 10000?\n update_kdtree()\n if start_time is not None:\n steps_per_iter.update(1)\n iteration_time_est.update(time.time() - start_time)\n start_time = time.time()\n value_summary.value[2].simple_value = num_iters\n\n # Save the model and training state.\n '''\n if num_iters > 0 and (num_iters % args.save_freq == 0 or info[\"steps\"] > args.num_steps):\n maybe_save_model(savedir, container, {\n 'replay_buffer': replay_buffer,\n 'num_iters': num_iters,\n 'monitor_state': monitored_env.get_state()\n })\n '''\n\n if num_iters > args.num_steps:\n break\n\n if done:\n return_len = min(len(non_discount_return) - 1, 100)\n sequence = []\n steps_left = args.num_steps - num_iters\n completion = np.round(num_iters / args.num_steps, 2)\n\n logger.record_tabular(\"% completion\", completion)\n # logger.record_tabular(\"steps\", info[\"steps\"])\n logger.record_tabular(\"iters\", num_iters)\n # logger.record_tabular(\"episodes\", info[0][\"episode\"])\n logger.record_tabular(\"reward\", np.mean(non_discount_return[-return_len - 1:-1]))\n logger.record_tabular(\"discount reward\", np.mean(discount_return[-return_len - 1:-1]))\n logger.record_tabular(\"num episode\", num_episodes)\n # logger.record_tabular(\"qec_mean\", np.mean(qecwatch))\n # logger.record_tabular(\"qec_proportion\", qec_found / (num_iters - start_steps))\n logger.record_tabular(\"update time\", update_time)\n logger.record_tabular(\"train time\", train_time)\n logger.record_tabular(\"act_time\", act_time)\n logger.record_tabular(\"env_time\", env_time)\n value_summary.value[0].simple_value = np.mean(discount_return[-return_len - 1:-1])\n value_summary.value[1].simple_value = np.mean(non_discount_return[-return_len - 1:-1])\n value_summary.value[3].simple_value = num_episodes\n # qec_summary.value[0].simple_value = np.mean(qecwatch)\n # qec_summary.value[1].simple_value = qec_found / (num_iters - start_steps)\n\n # if return_len > 1:\n # # np.mean(np.mean(episodic_return[-return_mean + 1:-1]))\n # tfout.write(\"%d, %.2f\\n\" % (num_iters, int(np.mean(discount_return[-return_len - 1:-1]))))\n # tfout.flush()\n logger.record_tabular(\"exploration\", exploration.value(num_iters))\n fps_estimate = (float(steps_per_iter) / (float(iteration_time_est) + 1e-6)\n if steps_per_iter._value is not None else 1 / (float(iteration_time_est) + 1e-6))\n logger.dump_tabular()\n logger.log()\n logger.log(\"ETA: \" + pretty_eta(int(steps_left / fps_estimate)))\n logger.log()\n\n start_steps = num_iters\n # qecwatch = []\n # qec_found = 0\n total_steps = num_iters - args.end_training\n tf_writer.add_summary(value_summary, global_step=total_steps)\n # tf_writer.add_summary(qec_summary, global_step=total_steps)\n cur_time = time.time()\n" ]
[ [ "numpy.tile", "numpy.mean", "tensorflow.train.AdamOptimizer", "numpy.random.random", "numpy.arange", "numpy.max", "numpy.clip", "tensorflow.get_default_graph", "numpy.round", "numpy.where", "numpy.random.randint", "numpy.array", "tensorflow.Summary" ] ]
inspire-group/ml_defense
[ "e7e8944d617885389a013061c320fa3553e779f0" ]
[ "lib/utils/DCA.py" ]
[ "\"\"\"\nDCA class performs Discriminant Correlation Analysis (DCA). It can be used as\na dimensionality reduction algorithm. Usage is similar to sklearn's\npreprocessing classes such as PCA.\n(Code from Thee Chanyaswad ([email protected]))\n\"\"\"\n\nimport numpy as np\nimport scipy\nfrom sklearn.metrics import pairwise\nfrom sklearn import preprocessing\n\n#------------------------------------------------------------------------------#\n\n\nclass DCA:\n\n def __init__(self, rho=None, rho_p=None, n_components=None):\n\n self.n_components = n_components\n self.rho = rho\n self.rho_p = rho_p\n\n def fit(self, X, y):\n\n (self._Sw, self._Sb) = self._get_Smatrices(X, y)\n\n if self.rho == None:\n s0 = np.linalg.eigvalsh(self._Sw)\n self.rho = 0.02 * np.max(s0)\n if self.rho_p == None:\n self.rho_p = 0.1 * self.rho\n\n pSw = self._Sw + self.rho * np.eye(self._Sw.shape[0])\n pSbar = self._Sb + self._Sw + \\\n (self.rho_p + self.rho) * np.eye(self._Sw.shape[0])\n (s1, vr) = scipy.linalg.eigh(\n pSbar, pSw, overwrite_a=True, overwrite_b=True)\n s1 = s1[::-1] # re-order from large to small\n Wdca = vr.T[::-1]\n self.eigVal = s1\n self.allComponents = Wdca\n if self.n_components:\n self.components = Wdca[0:self.n_components]\n else:\n self.components = Wdca\n\n def transform(self, X, dim=None):\n\n if dim == None:\n X_trans = np.inner(self.components, X)\n else:\n X_trans = np.inner(self.allComponents[0:dim], X)\n return X_trans.T\n\n def inverse_transform(self, Xreduced, projMatrix=None, dim=None):\n\n if projMatrix is None:\n if dim is None:\n W = self.components\n else:\n W = self.allComponents[0:dim]\n else:\n W = projMatrix\n # W = PxM where P<M\n foo = np.inner(W, W)\n bar = np.linalg.solve(foo.T, W)\n Xhat = np.inner(Xreduced, bar.T)\n return Xhat\n\n def _get_Smatrices(self, X, y):\n\n Sb = np.zeros((X.shape[1], X.shape[1]))\n\n S = np.inner(X.T, X.T)\n N = len(X)\n mu = np.mean(X, axis=0)\n classLabels = np.unique(y)\n for label in classLabels:\n classIdx = np.argwhere(y == label).T[0]\n Nl = len(classIdx)\n xL = X[classIdx]\n muL = np.mean(xL, axis=0)\n muLbar = muL - mu\n Sb = Sb + Nl * np.outer(muLbar, muLbar)\n\n Sbar = S - N * np.outer(mu, mu)\n Sw = Sbar - Sb\n self.mean_ = mu\n\n return (Sw, Sb)\n" ]
[ [ "numpy.eye", "numpy.linalg.solve", "numpy.zeros", "numpy.argwhere", "numpy.linalg.eigvalsh", "scipy.linalg.eigh", "numpy.max", "numpy.inner", "numpy.outer", "numpy.unique", "numpy.mean" ] ]
jairideout/scikit-bio
[ "81a1ce5acb434603c537f832caee64a76db19190" ]
[ "skbio/diversity/alpha/_ace.py" ]
[ "# ----------------------------------------------------------------------------\n# Copyright (c) 2013--, scikit-bio development team.\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n# ----------------------------------------------------------------------------\n\nfrom __future__ import absolute_import, division, print_function\n\nimport numpy as np\n\nfrom skbio.diversity.alpha._base import _validate_counts_vector\nfrom skbio.util._decorator import experimental\n\n\n@experimental(as_of=\"0.4.0\")\ndef ace(counts, rare_threshold=10):\n r\"\"\"Calculate the ACE metric (Abundance-based Coverage Estimator).\n\n The ACE metric is defined as:\n\n .. math::\n\n S_{ace}=S_{abund}+\\frac{S_{rare}}{C_{ace}}+\n \\frac{F_1}{C_{ace}}\\gamma^2_{ace}\n\n where :math:`S_{abund}` is the number of abundant OTUs (with more than\n `rare_threshold` individuals) when all samples are pooled,\n :math:`S_{rare}` is the number of rare OTUs (with less than or equal to\n `rare_threshold` individuals) when all samples are pooled, :math:`C_{ace}`\n is the sample abundance coverage estimator, :math:`F_1` is the frequency of\n singletons, and :math:`\\gamma^2_{ace}` is the estimated coefficient of\n variation for rare OTUs.\n\n The estimated coefficient of variation is defined as (assuming\n `rare_threshold` is 10, the default):\n\n .. math::\n\n \\gamma^2_{ace}=max\\left[\\frac{S_{rare}}{C_{ace}}\n \\frac{\\sum^{10}_{i=1}{{i\\left(i-1\\right)}}F_i}\n {\\left(N_{rare}\\right)\\left(N_{rare}-1\\right)} -1,0\\right]\n\n Parameters\n ----------\n counts : 1-D array_like, int\n Vector of counts.\n rare_threshold : int, optional\n Threshold at which an OTU containing as many or fewer individuals will\n be considered rare.\n\n Returns\n -------\n double\n Computed ACE metric.\n\n Raises\n ------\n ValueError\n If every rare OTU is a singleton.\n\n Notes\n -----\n ACE was first introduced in [1]_ and [2]_. The implementation here is based\n on the description given in the EstimateS manual [3]_.\n\n If no rare OTUs exist, returns the number of abundant OTUs. The default\n value of 10 for `rare_threshold` is based on [4]_.\n\n If `counts` contains zeros, indicating OTUs which are known to exist in the\n environment but did not appear in the sample, they will be ignored for the\n purpose of calculating the number of rare OTUs.\n\n References\n ----------\n .. [1] Chao, A. & S.-M Lee. 1992 Estimating the number of classes via\n sample coverage. Journal of the American Statistical Association 87,\n 210-217.\n .. [2] Chao, A., M.-C. Ma, & M. C. K. Yang. 1993. Stopping rules and\n estimation for recapture debugging with unequal failure rates.\n Biometrika 80, 193-201.\n .. [3] http://viceroy.eeb.uconn.edu/estimates/\n .. [4] Chao, A., W.-H. Hwang, Y.-C. Chen, and C.-Y. Kuo. 2000. Estimating\n the number of shared species in two communities. Statistica Sinica\n 10:227-246.\n\n \"\"\"\n counts = _validate_counts_vector(counts)\n freq_counts = np.bincount(counts)\n s_rare = _otus_rare(freq_counts, rare_threshold)\n singles = freq_counts[1]\n\n if singles > 0 and singles == s_rare:\n raise ValueError(\"The only rare OTUs are singletons, so the ACE \"\n \"metric is undefined. EstimateS suggests using \"\n \"bias-corrected Chao1 instead.\")\n\n s_abun = _otus_abundant(freq_counts, rare_threshold)\n if s_rare == 0:\n return s_abun\n\n n_rare = _number_rare(freq_counts, rare_threshold)\n c_ace = 1 - singles / n_rare\n\n top = s_rare * _number_rare(freq_counts, rare_threshold, gamma=True)\n bottom = c_ace * n_rare * (n_rare - 1)\n gamma_ace = (top / bottom) - 1\n\n if gamma_ace < 0:\n gamma_ace = 0\n\n return s_abun + (s_rare / c_ace) + ((singles / c_ace) * gamma_ace)\n\n\ndef _otus_rare(freq_counts, rare_threshold):\n \"\"\"Count number of rare OTUs.\"\"\"\n return freq_counts[1:rare_threshold + 1].sum()\n\n\ndef _otus_abundant(freq_counts, rare_threshold):\n \"\"\"Count number of abundant OTUs.\"\"\"\n return freq_counts[rare_threshold + 1:].sum()\n\n\ndef _number_rare(freq_counts, rare_threshold, gamma=False):\n \"\"\"Return number of individuals in rare OTUs.\n\n ``gamma=True`` generates the ``n_rare`` used for the variation coefficient.\n\n \"\"\"\n n_rare = 0\n\n if gamma:\n for i, j in enumerate(freq_counts[:rare_threshold + 1]):\n n_rare = n_rare + (i * j) * (i - 1)\n else:\n for i, j in enumerate(freq_counts[:rare_threshold + 1]):\n n_rare = n_rare + (i * j)\n\n return n_rare\n" ]
[ [ "numpy.bincount" ] ]
ccharp/dplyPY
[ "681af27b6ca595bec88e1a9b98ea90c9ac848f1b" ]
[ "dplypy/test/test_arrange.py" ]
[ "import pandas as pd\nimport numpy as np\n\nfrom dplypy.dplyframe import DplyFrame\nfrom dplypy.pipeline import arrange\n\n\ndef test_arrange():\n pandas_df = pd.DataFrame(\n data=[[5, 1, 0], [20, 2, 2], [0, 8, 8], [np.nan, 7, 9], [10, 7, 5], [15, 4, 3]],\n columns=[\"col1\", \"col2\", \"col3\"],\n index=[1, 3, 5, 7, 9, 11],\n )\n df = DplyFrame(pandas_df)\n\n output1 = df + arrange(by=\"col1\")\n expected1 = pandas_df.sort_values(by=\"col1\")\n pd.testing.assert_frame_equal(output1.pandas_df, expected1)\n\n try:\n df + arrange(by=1)\n except KeyError:\n pass\n else:\n raise AssertionError(\"KeyError was not raised\")\n\n output2 = df + arrange(by=[\"col1\", \"col2\"], ascending=False)\n expected2 = pandas_df.sort_values(by=[\"col1\", \"col2\"], ascending=False)\n pd.testing.assert_frame_equal(output2.pandas_df, expected2)\n\n try:\n df + arrange(by=[\"col1\", \"col4\"])\n except KeyError:\n pass\n else:\n raise AssertionError(\"KeyError was not raised\")\n\n output3 = df + arrange(by=[\"col1\"], ascending=False)\n expected3 = pandas_df.sort_values(by=[\"col1\"], ascending=False)\n pd.testing.assert_frame_equal(output3.pandas_df, expected3)\n\n output4 = df + arrange(by=\"col1\", axis=\"index\")\n expected4 = pandas_df.sort_values(by=\"col1\", axis=\"index\")\n pd.testing.assert_frame_equal(output4.pandas_df, expected4)\n\n output5 = df + arrange(by=1, axis=1)\n expected5 = pandas_df.sort_values(by=1, axis=1)\n pd.testing.assert_frame_equal(output5.pandas_df, expected5)\n\n output6 = df + arrange(by=[1, 3], axis=\"columns\", ascending=[True, False])\n expected6 = pandas_df.sort_values(\n by=[1, 3], axis=\"columns\", ascending=[True, False]\n )\n pd.testing.assert_frame_equal(output6.pandas_df, expected6)\n" ]
[ [ "pandas.DataFrame", "pandas.testing.assert_frame_equal" ] ]
padma-g/data
[ "b65e4e04a759ecc5b0b4df67e8cc290b0ddcadff" ]
[ "scripts/fbi/crime/preprocess.py" ]
[ "# Copyright 2021 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport csv\nimport io\nimport ssl\nimport urllib.request\nimport sys\nimport requests\nimport re\nimport os\nimport common_util as cu\n\nimport pandas as pd\nimport logging\nimport geocode_cities\n\n# Years that FBI doesn't public arson data.\n# 2019, 2018, 2017\n# The FBI does not publish arson data unless it receives data from either the agency or the state for all 12 months of the calendar year.\n\n_FIELDS_IN_CRIME_FILE = 15\n_POPULATION_INDEX = 3\n_YEAR_INDEX = 0\n_STATE_INDEX = 1\n_CITY_INDEX = 2\n_DUMMY_RAPE_INDEX = 7\n\n_CRIME_FIELDS = [\n 'Year',\n 'State',\n 'City',\n 'Population',\n # Violent Crimes\n 'Violent',\n 'ViolentMurderAndNonNegligentManslaughter',\n 'ViolentRape',\n 'Rape2',\n 'ViolentRobbery',\n 'ViolentAggravatedAssault',\n # Property Crimes\n 'Property',\n 'PropertyBurglary',\n 'PropertyLarcenyTheft',\n 'PropertyMotorVehicleTheft',\n # Arson\n 'PropertyArson',\n]\n\nGEO_CODE = 'Geocode'\nTOTAL = 'Total'\n\nOUTPUT_COLUMNS = [\n 'Year', 'GeoId', 'Count_CriminalActivities_ViolentCrime',\n 'Count_CriminalActivities_MurderAndNonNegligentManslaughter',\n 'Count_CriminalActivities_ForcibleRape', 'Count_CriminalActivities_Robbery',\n 'Count_CriminalActivities_AggravatedAssault',\n 'Count_CriminalActivities_PropertyCrime',\n 'Count_CriminalActivities_Burglary',\n 'Count_CriminalActivities_LarcenyTheft',\n 'Count_CriminalActivities_MotorVehicleTheft',\n 'Count_CriminalActivities_Arson', 'Count_CriminalActivities_CombinedCrime'\n]\n\n# From 2013-2016, the FBI reported statistics for two different definitions of rape before fully transitioning to the current definition in 2017.\n# We add a dummy column after it (so allyears have two Rape columns).\nYEARS_WITH_TWO_RAPE_COLUMNS = {'2013', '2014', '2015', '2016'}\n\nYEAR_TO_URL = {\n '2019':\n 'https://ucr.fbi.gov/crime-in-the-u.s/2019/crime-in-the-u.s.-2019/tables/table-8/table-8.xls',\n '2018':\n 'https://ucr.fbi.gov/crime-in-the-u.s/2018/crime-in-the-u.s.-2018/tables/table-8/table-8.xls',\n '2017':\n 'https://ucr.fbi.gov/crime-in-the-u.s/2017/crime-in-the-u.s.-2017/tables/table-8/table-8.xls',\n '2016':\n 'https://ucr.fbi.gov/crime-in-the-u.s/2016/crime-in-the-u.s.-2016/tables/table-6/table-6.xls',\n '2015':\n 'https://ucr.fbi.gov/crime-in-the-u.s/2015/crime-in-the-u.s.-2015/tables/table-8/table_8_offenses_known_to_law_enforcement_by_state_by_city_2015.xls',\n '2014':\n 'https://ucr.fbi.gov/crime-in-the-u.s/2014/crime-in-the-u.s.-2014/tables/table-8/Table_8_Offenses_Known_to_Law_Enforcement_by_State_by_City_2014.xls',\n '2013':\n 'https://ucr.fbi.gov/crime-in-the-u.s/2013/crime-in-the-u.s.-2013/tables/table-8/table_8_offenses_known_to_law_enforcement_by_state_by_city_2013.xls',\n '2012':\n 'https://ucr.fbi.gov/crime-in-the-u.s/2012/crime-in-the-u.s.-2012/tables/8tabledatadecpdf/table_8_offenses_known_to_law_enforcement_by_state_by_city_2012.xls',\n '2011':\n 'https://ucr.fbi.gov/crime-in-the-u.s/2011/crime-in-the-u.s.-2011/tables/table_8_offenses_known_to_law_enforcement_by_state_by_city_2011.xls',\n # Sanity check 2008-2010 don't have duplicate city state.\n '2010':\n 'https://ucr.fbi.gov/crime-in-the-u.s/2010/crime-in-the-u.s.-2010/tables/10tbl08.xls',\n '2009':\n 'https://www2.fbi.gov/ucr/cius2009/data/documents/09tbl08.xls',\n '2008':\n 'https://www2.fbi.gov/ucr/cius2008/data/documents/08tbl08.xls',\n}\n\n\ndef calculate_crimes(r):\n # Return the violent, property, arson crimes & total\n # If a field is empty, it is treated as 0\n\n # Category 1: Violent Crimes\n violent = cu.int_from_field(r['Violent'])\n\n murder = cu.int_from_field(r['ViolentMurderAndNonNegligentManslaughter'])\n rape = cu.int_from_field(r['ViolentRape'])\n rape2 = cu.int_from_field(r['Rape2'])\n robbery = cu.int_from_field(r['ViolentRobbery'])\n assault = cu.int_from_field(r['ViolentAggravatedAssault'])\n # Fix rape value\n rape += rape2\n\n # Add the values back as ints\n r['ViolentMurderAndNonNegligentManslaughter'] = murder\n r['ViolentRape'] = rape\n r['Rape2'] = rape2\n r['ViolentRobbery'] = robbery\n r['ViolentAggravatedAssault'] = assault\n\n violent_computed = murder + rape + robbery + assault\n if violent_computed != violent:\n print('{} {} {} violent mismatch {} {}'.format(r['Year'], r['City'],\n r['State'], violent,\n violent_computed))\n\n # Category 2: Property Crime\n property = cu.int_from_field(r['Property'])\n\n burglary = cu.int_from_field(r['PropertyBurglary'])\n theft = cu.int_from_field(r['PropertyLarcenyTheft'])\n motor = cu.int_from_field(r['PropertyMotorVehicleTheft'])\n\n # Add the property crime values as ints.\n r['PropertyBurglary'] = burglary\n r['PropertyLarcenyTheft'] = theft\n r['PropertyMotorVehicleTheft'] = motor\n\n # Compute totals\n property_computed = burglary + theft + motor\n\n if property_computed != property:\n print('{} {} {} property mismatch {} {}'.format(r['Year'], r['City'],\n r['State'], property,\n property_computed))\n\n # Category 3: Arson\n arson = cu.int_from_field(r['PropertyArson'])\n r['PropertyArson'] = arson\n\n total = violent_computed + property_computed + arson\n # Write back the totals\n r[TOTAL] = total\n r['Violent'] = violent_computed\n r['Property'] = property_computed\n\n\ndef _clean_crime_file(f_input, f_output):\n \"\"\"Clean a tsv file of crime statistics.\n\n The input contains crime statistics, one for every city.\n\n Remove header and footer lines, and append state column to every line.\n Skip lines that do not contain data.\n Args:\n f_input: file object with crime statistics, one per city.\n f_output: outputstream for writing the cleaned statistics.\n \"\"\"\n state = ''\n count_line = 0\n count_city = 0\n count_state = 0\n count_header_footer = 0\n count_incomplete_lines = 0\n count_comments = 0\n for line in f_input:\n count_line += 1\n if line.startswith('#'):\n count_comments += 1\n continue\n # Split by comma and exclude comma from quotes in split\n # For case like PENNSYLVANIA,\"Abington Township, Montgomery County\",55476.0,53.0,0.0,6.0,0,15.0,32.0,934.0,32.0,883.0,19.0,2.0\n field = [\n '\"{}\"'.format(x)\n for x in list(csv.reader([line], delimiter=',', quotechar='\"'))[0]\n ]\n\n # Skip incomplete lines\n if len(field) < _FIELDS_IN_CRIME_FILE:\n count_incomplete_lines += 1\n logging.info('%s %s', line, len(field))\n continue\n\n # Replace commas and quotes in fields e.g. \"1,234\" -> 1234\n # Remove any other leading or trailing whitespace\n for i in range(_FIELDS_IN_CRIME_FILE):\n field[i] = cu.remove_extra_chars(field[i])\n\n # Skip if the line does not contain data or if population is empty.\n if (not field[_POPULATION_INDEX] or\n not cu.is_digit(field[_POPULATION_INDEX]) or\n field[_POPULATION_INDEX] == '0'):\n count_header_footer += 1\n continue\n\n # If field[_STATE_INDEX] is present, use it as the State.\n if field[_STATE_INDEX]:\n # Remove numeric values from state names (comes from footnotes)\n state = cu.remove_digits(field[_STATE_INDEX])\n count_state += 1\n field[_STATE_INDEX] = state\n # Remove any numeric characters from city names.\n field[_CITY_INDEX] = cu.remove_digits(field[_CITY_INDEX])\n count_city += 1\n\n output_line = '{}\\n'.format(','.join(field[:_FIELDS_IN_CRIME_FILE]))\n f_output.write(output_line)\n\n logging.info('lines: %d, comments: %d, incomplete: %d, header_footer:%d',\n count_line, count_comments, count_incomplete_lines,\n count_header_footer)\n logging.info('%d cities', count_city)\n logging.info('%d states', count_state)\n\n\ndef _update_and_calculate_crime_csv(geo_codes, crime_csv, writer):\n with open(crime_csv, \"r\") as crime_f:\n crimes = csv.DictReader(crime_f, fieldnames=_CRIME_FIELDS)\n\n found_set = set()\n cities_not_found_set = set()\n for crime in crimes:\n if geocode_cities.update_crime_geocode(crime, geo_codes, found_set,\n cities_not_found_set):\n calculate_crimes(crime)\n\n processed_dict = {\n 'Year':\n crime['Year'],\n 'GeoId':\n \"dcid:geoId/{}\".format(crime[GEO_CODE]),\n 'Count_CriminalActivities_ViolentCrime':\n crime['Violent'],\n 'Count_CriminalActivities_MurderAndNonNegligentManslaughter':\n crime['ViolentMurderAndNonNegligentManslaughter'],\n 'Count_CriminalActivities_ForcibleRape':\n crime['ViolentRape'],\n 'Count_CriminalActivities_Robbery':\n crime['ViolentRobbery'],\n 'Count_CriminalActivities_AggravatedAssault':\n crime['ViolentAggravatedAssault'],\n 'Count_CriminalActivities_PropertyCrime':\n crime['Property'],\n 'Count_CriminalActivities_Burglary':\n crime['PropertyBurglary'],\n 'Count_CriminalActivities_LarcenyTheft':\n crime['PropertyLarcenyTheft'],\n 'Count_CriminalActivities_MotorVehicleTheft':\n crime['PropertyMotorVehicleTheft'],\n 'Count_CriminalActivities_Arson':\n crime['PropertyArson'],\n 'Count_CriminalActivities_CombinedCrime':\n crime[TOTAL],\n }\n writer.writerow(processed_dict)\n\n # Output the cities not_found\n with open('city_not_found.txt', 'w') as cities_not_found_f:\n for s in cities_not_found_set:\n cities_not_found_f.write('{}\\n'.format(s))\n\n print('US src_cities = {}, cities_not_found = {}'.format(\n len(found_set), len(cities_not_found_set)))\n\n\ndef create_tmcf_file(tmcf_file_path):\n stat_vars = OUTPUT_COLUMNS[2:]\n with open(tmcf_file_path, 'w', newline='') as f_out:\n for i in range(len(stat_vars)):\n f_out.write(\n cu.TEMPLATE_MCF_TEMPLATE.format_map({\n 'index': i,\n 'stat_var': stat_vars[i]\n }))\n\n\ndef create_formatted_csv_file(csv_files, city_output):\n geo_codes = geocode_cities.read_geocodes()\n\n with open(city_output, 'w') as csv_output_f:\n writer = csv.DictWriter(csv_output_f, fieldnames=OUTPUT_COLUMNS)\n writer.writeheader()\n\n for csv_file in csv_files:\n with open(csv_file, \"r\") as f_input:\n cleaned_csv_file = 'cleaned_file.csv'\n with open(cleaned_csv_file, \"w\") as f_output:\n logging.info('clean crime file for csv file %s', csv_file)\n _clean_crime_file(f_input, f_output)\n\n _update_and_calculate_crime_csv(geo_codes, cleaned_csv_file,\n writer)\n\n # Remove intermediate files.\n os.remove(cleaned_csv_file)\n\n\nif __name__ == '__main__':\n logging.getLogger().setLevel(logging.INFO)\n\n # Script XLS and convert to CSV.\n # Add year as the first column and second rape column is not there.\n csv_files = []\n for year, url in YEAR_TO_URL.items():\n response = requests.get(url)\n xls_file = year + '.xls'\n csv_file = year + '.csv'\n with open(xls_file, 'wb') as file:\n file.write(response.content)\n read_file = pd.read_excel(xls_file, skiprows=[0, 1, 2])\n read_file.insert(_YEAR_INDEX, 'Year', year)\n if year not in YEARS_WITH_TWO_RAPE_COLUMNS:\n read_file.insert(_DUMMY_RAPE_INDEX, 'Dummy', 0)\n read_file.to_csv(csv_file, index=None, header=True)\n csv_files.append(csv_file)\n # os.remove(xls_file)\n\n create_formatted_csv_file(csv_files, 'city_crime.csv')\n\n create_tmcf_file(\"FBI_crime.tmcf\")\n\n # Remove intermediate files.\n for csv_file in csv_files:\n os.remove(csv_file)\n" ]
[ [ "pandas.read_excel" ] ]
preetham7897/Estimation-of-Rainfall-Quantity-using-Hybrid-Ensemble-Regression
[ "659fa450e28eeea262295e86c5ebe04a50a8a88b" ]
[ "codes/simple average.py" ]
[ "import pandas as pd\r\nfrom sklearn.linear_model import LinearRegression\r\nfrom sklearn.tree import DecisionTreeRegressor\r\nfrom sklearn.svm import SVR\r\nfrom sklearn.preprocessing import PolynomialFeatures\r\nfrom sklearn.metrics import mean_squared_error as mse\r\nfrom sklearn.metrics import mean_absolute_error as mae\r\nfrom sklearn.metrics import median_absolute_error as mdae\r\nfrom sklearn.metrics import explained_variance_score as evs\r\nfrom sklearn.metrics import r2_score as r2\r\nfrom sklearn.model_selection import RepeatedKFold\r\nimport numpy as np\r\nfrom itertools import combinations\r\n\r\ndef rmse(y_t, y_p):\r\n return (mse(y_t, y_p))**0.5\r\n\r\nrkf = RepeatedKFold(n_splits=10, n_repeats=10)\r\ndata = pd.read_csv('C:\\\\Users\\\\Preetham G\\\\Documents\\\\Research Projects\\\\Forecast of Rainfall Quantity and its variation using Envrionmental Features\\\\Data\\\\Normalized & Combined Data\\\\All Districts.csv')\r\nmodels = [LinearRegression(), DecisionTreeRegressor(max_depth=6), LinearRegression(), SVR(kernel='linear')]\r\nnames = ['MLR', 'DTR(6)', 'PR(4)', 'SVR(L)']\r\ncomb_models = []\r\ncomb_names = []\r\nfor i in range(1, len(models)+1):\r\n l = combinations(models, i)\r\n m = combinations(names, i)\r\n for j in l:\r\n comb_models.append(list(j))\r\n for j in m:\r\n comb_names.append(list(j))\r\ndata = data.drop(columns=['Index', 'District'])\r\nmse_f = []\r\nrmse_f = []\r\nmae_f = []\r\nmdae_f = []\r\nevs_f = []\r\nr2_f = []\r\npoly = PolynomialFeatures(degree=4)\r\nfor i, j in zip(comb_models, comb_names):\r\n c = 0\r\n mse_t = []\r\n rmse_t = []\r\n mae_t = []\r\n mdae_t = []\r\n evs_t = []\r\n r2_t = []\r\n for tr_i, ts_i in rkf.split(data):\r\n train, test = data.iloc[tr_i], data.iloc[ts_i]\r\n train_x = train.drop(columns=['Rainfall'])\r\n train_y = train['Rainfall']\r\n test_x = test.drop(columns=['Rainfall'])\r\n test_y = test['Rainfall']\r\n d = {}\r\n for k, l in zip(i, j):\r\n print(j, l, c)\r\n model = k\r\n if l == 'PR(4)':\r\n train_x = poly.fit_transform(train_x)\r\n test_x = poly.fit_transform(test_x)\r\n model.fit(train_x, train_y)\r\n ts_p = model.predict(test_x)\r\n d[l] = list(ts_p)\r\n c += 1\r\n df = pd.DataFrame(d, columns=names)\r\n ts_p_m = df.mean(axis=1)\r\n mse_t.append(mse(test_y, ts_p_m))\r\n rmse_t.append(rmse(test_y, ts_p_m))\r\n mae_t.append(mae(test_y, ts_p_m))\r\n mdae_t.append(mdae(test_y, ts_p_m))\r\n evs_t.append(evs(test_y, ts_p_m))\r\n r2_t.append(r2(test_y, ts_p_m))\r\n mse_f.append(np.mean(mse_t))\r\n rmse_f.append(np.mean(rmse_t))\r\n mae_f.append(np.mean(mae_t))\r\n mdae_f.append(np.mean(mdae_t))\r\n evs_f.append(np.mean(evs_t))\r\n r2_f.append(np.mean(r2_t))\r\nd = {}\r\nd['Combinations'] = comb_names\r\nd['MSE'] = mse_f\r\nd['RMSE'] = rmse_f\r\nd['MAE'] = mae_f\r\nd['MDAE'] = mdae_f\r\nd['EVS'] = evs_f\r\nd['R2'] = r2_f\r\ndf = pd.DataFrame(d, columns=['Combinations', 'MSE', 'RMSE', 'MAE', 'MDAE', 'EVS', 'R2'])\r\ndf.to_csv('C:\\\\Users\\\\Preetham G\\\\Documents\\\\Research Projects\\\\Ensemble Rainfall\\\\Results\\\\Simple Average.csv', index=False)" ]
[ [ "sklearn.metrics.mean_squared_error", "pandas.read_csv", "sklearn.model_selection.RepeatedKFold", "sklearn.svm.SVR", "sklearn.linear_model.LinearRegression", "pandas.DataFrame", "sklearn.tree.DecisionTreeRegressor", "sklearn.metrics.mean_absolute_error", "sklearn.metrics.median_absolute_error", "sklearn.metrics.explained_variance_score", "sklearn.metrics.r2_score", "sklearn.preprocessing.PolynomialFeatures", "numpy.mean" ] ]
Sidbenake/ga-learner-dsmp-repo
[ "273af724ddea5c4eb957065def51e17ff9ad1279" ]
[ "Decision-Tree/code.py" ]
[ "# --------------\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\ndata = pd.read_csv(path)\nX = data.drop(['customer.id','paid.back.loan'],axis=1)\ny = data['paid.back.loan']\nX_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.3,random_state=0)\n\n\n# --------------\n#Importing header files\nimport matplotlib.pyplot as plt\n\n# Code starts here\nfully_paid = y_train.value_counts()\nplt.bar(fully_paid.index,fully_paid.values)\n\n# Code ends here\n\n\n# --------------\n#Importing header files\nimport numpy as np\nfrom sklearn.preprocessing import LabelEncoder\n\n\n# Code starts here\nX_train['int.rate'] = X_train['int.rate'].apply(lambda x: float(x[:-1]))\nX_train['int.rate'] = X_train['int.rate']/100\n\nX_test['int.rate'] = X_test['int.rate'].apply(lambda x: float(x[:-1]))\nX_test['int.rate'] = X_test['int.rate']/100\n\nnum_df = X_train.select_dtypes('number')\ncat_df = X_train.select_dtypes('object')\n# Code ends here\n\n\n\n# --------------\n#Importing header files\nimport seaborn as sns\n\n\n# Code starts here\ncols = num_df.columns\nfig, axes = plt.subplots(9,1)\nfor i in range(9):\n sns.boxplot(x=y_train, y=num_df[cols[i]],ax=axes[i])\n# Code ends here\n\n\n# --------------\n# Code starts here\ncols = cat_df.columns\nfig, axes = plt.subplots(2,2)\nfor i in range(2):\n for j in range(2):\n sns.countplot(x=X_train[cols[i*2+j]], hue=y_train,ax = axes[i,j])\n\n\n# Code ends here\n\n\n# --------------\n#Importing header files\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.preprocessing import LabelEncoder\nimport numpy as np\n# Code starts here\nX_train.fillna(np.nan)\nX_test.fillna(np.nan)\n\nle = LabelEncoder()\nfor i in cat_df.columns:\n X_train[i] = le.fit_transform(X_train[i])\n X_test[i] = le.fit_transform(X_test[i])\n\ny_train[y_train=='Yes']=1\ny_train[y_train=='No']=0\n\ny_test[y_test=='Yes']=1\ny_test[y_test=='No']=0\n\ny_train = y_train.astype('int')\ny_test = y_test.astype('int')\n\nmodel = DecisionTreeClassifier(random_state=0)\n\nmodel.fit(X_train,y_train)\n\nacc = model.score(X_test,y_test)\n\nprint(acc)\n# Code ends here\n\n\n# --------------\n#Importing header files\nfrom sklearn.model_selection import GridSearchCV\n\n#Parameter grid\nparameter_grid = {'max_depth': np.arange(3,10), 'min_samples_leaf': range(10,50,10)}\n\n# Code starts here\nmodel_2 = DecisionTreeClassifier(random_state=0)\np_tree = GridSearchCV(estimator=model_2,param_grid=parameter_grid,cv=5)\n\np_tree.fit(X_train,y_train)\n\nacc_2 = p_tree.score(X_test,y_test)\n\nprint(acc_2)\n# Code ends here\n\n\n# --------------\n#Importing header files\n\nfrom io import StringIO\nfrom sklearn.tree import export_graphviz\nfrom sklearn import tree\nfrom sklearn import metrics\nfrom IPython.display import Image\nimport pydotplus\n\n# Code starts here\ndot_data = tree.export_graphviz(decision_tree=p_tree.best_estimator_, out_file=None,\n feature_names=X.columns, filled = True, \n class_names=['loan_paid_back_yes','loan_paid_back_no'])\n\ngraph_big = pydotplus.graph_from_dot_data(dot_data)\n\n\n# show graph - do not delete/modify the code below this line\nimg_path = user_data_dir+'/file.png'\ngraph_big.write_png(img_path)\n\nplt.figure(figsize=(20,15))\nplt.imshow(plt.imread(img_path))\nplt.axis('off')\nplt.show() \n\n# Code ends here\n\n\n" ]
[ [ "matplotlib.pyplot.imread", "pandas.read_csv", "sklearn.tree.DecisionTreeClassifier", "matplotlib.pyplot.figure", "matplotlib.pyplot.axis", "matplotlib.pyplot.subplots", "numpy.arange", "matplotlib.pyplot.show", "sklearn.model_selection.GridSearchCV", "sklearn.preprocessing.LabelEncoder", "sklearn.tree.export_graphviz", "sklearn.model_selection.train_test_split", "matplotlib.pyplot.bar" ] ]
jbohnslav/kornia
[ "74cc0cfd6406179570b06ca4ef8423142e7eaa0b" ]
[ "kornia/utils/image.py" ]
[ "from typing import Optional\n\nimport numpy as np\nimport torch\n\n\ndef image_to_tensor(image: np.ndarray, keepdim: bool = True) -> torch.Tensor:\n \"\"\"Converts a numpy image to a PyTorch 4d tensor image.\n\n Args:\n image (numpy.ndarray): image of the form :math:`(H, W, C)`, :math:`(H, W)` or\n :math:`(B, H, W, C)`.\n keepdim (bool): If ``False`` unsqueeze the input image to match the shape\n :math:`(B, H, W, C)`. Default: ``True``\n\n Returns:\n torch.Tensor: tensor of the form :math:`(B, C, H, W)` if keepdim is ``False``,\n :math:`(C, H, W)` otherwise.\n \"\"\"\n if not isinstance(image, (np.ndarray,)):\n raise TypeError(\"Input type must be a numpy.ndarray. Got {}\".format(\n type(image)))\n\n if len(image.shape) > 4 or len(image.shape) < 2:\n raise ValueError(\n \"Input size must be a two, three or four dimensional array\")\n\n input_shape = image.shape\n tensor: torch.Tensor = torch.from_numpy(image)\n\n if len(input_shape) == 2:\n # (H, W) -> (1, H, W)\n tensor = tensor.unsqueeze(0)\n elif len(input_shape) == 3:\n # (H, W, C) -> (C, H, W)\n tensor = tensor.permute(2, 0, 1)\n elif len(input_shape) == 4:\n # (B, H, W, C) -> (B, C, H, W)\n tensor = tensor.permute(0, 3, 1, 2)\n keepdim = True # no need to unsqueeze\n else:\n raise ValueError(\n \"Cannot process image with shape {}\".format(input_shape))\n\n return tensor.unsqueeze(0) if not keepdim else tensor\n\n\ndef _to_bchw(tensor: torch.Tensor, color_channel_num: Optional[int] = None) -> torch.Tensor:\n \"\"\"Converts a PyTorch tensor image to BCHW format.\n\n Args:\n tensor (torch.Tensor): image of the form :math:`(H, W)`, :math:`(C, H, W)`, :math:`(H, W, C)` or\n :math:`(B, C, H, W)`.\n color_channel_num (Optional[int]): Color channel of the input tensor.\n If None, it will not alter the input channel.\n\n Returns:\n torch.Tensor: input tensor of the form :math:`(B, H, W, C)`.\n \"\"\"\n if not isinstance(tensor, torch.Tensor):\n raise TypeError(f\"Input type is not a torch.Tensor. Got {type(tensor)}\")\n\n if len(tensor.shape) > 4 or len(tensor.shape) < 2:\n raise ValueError(f\"Input size must be a two, three or four dimensional tensor. Got {tensor.shape}\")\n\n if len(tensor.shape) == 2:\n tensor = tensor.unsqueeze(0)\n\n if len(tensor.shape) == 3:\n tensor = tensor.unsqueeze(0)\n\n # TODO(jian): this function is never used. Besides is not feasible for torchscript.\n # In addition, the docs must be updated. I don't understand what is doing.\n # if color_channel_num is not None and color_channel_num != 1:\n # channel_list = [0, 1, 2, 3]\n # channel_list.insert(1, channel_list.pop(color_channel_num))\n # tensor = tensor.permute(*channel_list)\n return tensor\n\n\ndef _to_bcdhw(tensor: torch.Tensor, color_channel_num: Optional[int] = None) -> torch.Tensor:\n \"\"\"Converts a PyTorch tensor image to BCHW format.\n Args:\n tensor (torch.Tensor): image of the form :math:`(D, H, W)`, :math:`(C, D, H, W)`, :math:`(D, H, W, C)` or\n :math:`(B, C, D, H, W)`.\n color_channel_num (Optional[int]): Color channel of the input tensor.\n If None, it will not alter the input channel.\n\n Returns:\n torch.Tensor: input tensor of the form :math:`(B, C, D, H, W)`.\n \"\"\"\n if not isinstance(tensor, torch.Tensor):\n raise TypeError(f\"Input type is not a torch.Tensor. Got {type(tensor)}\")\n\n if len(tensor.shape) > 5 or len(tensor.shape) < 3:\n raise ValueError(f\"Input size must be a three, four or five dimensional tensor. Got {tensor.shape}\")\n\n if len(tensor.shape) == 3:\n tensor = tensor.unsqueeze(0)\n\n if len(tensor.shape) == 4:\n tensor = tensor.unsqueeze(0)\n\n # TODO(jian): this function is never used. Besides is not feasible for torchscript.\n # In addition, the docs must be updated. I don't understand what is doing.\n # if color_channel_num is not None and color_channel_num != 1:\n # channel_list = [0, 1, 2, 3, 4]\n # channel_list.insert(1, channel_list.pop(color_channel_num))\n # tensor = tensor.permute(*channel_list)\n return tensor\n\n\ndef tensor_to_image(tensor: torch.Tensor) -> np.array:\n \"\"\"Converts a PyTorch tensor image to a numpy image.\n\n In case the tensor is in the GPU, it will be copied back to CPU.\n\n Args:\n tensor (torch.Tensor): image of the form :math:`(H, W)`, :math:`(C, H, W)` or\n :math:`(B, C, H, W)`.\n\n Returns:\n numpy.ndarray: image of the form :math:`(H, W)`, :math:`(H, W, C)` or :math:`(B, H, W, C)`.\n\n \"\"\"\n if not isinstance(tensor, torch.Tensor):\n raise TypeError(\"Input type is not a torch.Tensor. Got {}\".format(\n type(tensor)))\n\n if len(tensor.shape) > 4 or len(tensor.shape) < 2:\n raise ValueError(\n \"Input size must be a two, three or four dimensional tensor\")\n\n input_shape = tensor.shape\n image: np.array = tensor.cpu().detach().numpy()\n\n if len(input_shape) == 2:\n # (H, W) -> (H, W)\n image = image\n elif len(input_shape) == 3:\n # (C, H, W) -> (H, W, C)\n if input_shape[0] == 1:\n # Grayscale for proper plt.imshow needs to be (H,W)\n image = image.squeeze()\n else:\n image = image.transpose(1, 2, 0)\n elif len(input_shape) == 4:\n # (B, C, H, W) -> (B, H, W, C)\n image = image.transpose(0, 2, 3, 1)\n if input_shape[0] == 1:\n image = image.squeeze(0)\n if input_shape[1] == 1:\n image = image.squeeze(-1)\n else:\n raise ValueError(\n \"Cannot process tensor with shape {}\".format(input_shape))\n\n return image\n" ]
[ [ "torch.from_numpy" ] ]
ai4er-cdt/gtc-exposure
[ "f0504d8c40c3553ba1466faef3d802ced09bd984" ]
[ "settlement_segmentation/data/sentinel_time_series/gen_train_data.py" ]
[ "import numpy as np\nimport descarteslabs as dl\nfrom shapely.geometry import Polygon, MultiPolygon\nfrom PIL import Image\n\ndef generate_sentinel_training_images(geometry,\n area, #eg. 'Jamaca'\n tile_size= 512,\n start_datetime=\"2014-01-01\",\n end_datetime=\"2020-01-01\",\n cloud_fraction=0.01\n ):\n \n #use descartes API to get Sentinel image\n scenes, geoctx = dl.scenes.search(geometry,\n products=[\"sentinel-2:L1C\"],\n start_datetime= start_datetime,\n end_datetime = end_datetime,\n cloud_fraction = cloud_fraction)\n \n #creates image stack using RGB Bands\n ndarray_stack = scenes.stack(\"red green blue\", geoctx.assign())\n \n #for each of the images\n image_stack = []\n for img in ndarray_stack:\n tiles= []\n #slice the image into tiles \n for y in range(tile_size, img.shape[2], tile_size):\n for x in range(tile_size, img.shape[1], tile_size):\n tile = (img[:,x:x+tile_size, y:y+tile_size])\n #this filters edge images that are not the correct shape\n if tile.shape == (3, tile_size, tile_size):\n tiles.append(tile) \n image_stack.append(tiles)\n \n #convert nested list to array\n no_scenes = len(image_stack)\n no_tiles_per_scene = len(image_stack[0])\n image_array = np.zeros([no_scenes, no_tiles_per_scene, 3, tile_size, tile_size])\n for i in range(no_scenes):\n for j in range(no_tiles_per_scene):\n image_array[i, j] = image_stack[i][j]\n \n #take compoite image as average of scenes\n composite_images = np.zeros(image_array[0].shape)\n for i in range(image_array.shape[1]):\n composite_image = np.ma.median(image_array[:,i], axis=0)\n composite_images[i] = composite_image\n \n # reshape from (3, x, y) to (x, y, 3)\n reshape_image = np.zeros((tile_size,tile_size,3))\n reshape_image[:,:,0] = composite_image[0]\n reshape_image[:,:,1] = composite_image[1]\n reshape_image[:,:,2] = composite_image[2]\n #scale values to [0, 255]\n #avoid divide by 0 error:\n if np.max(reshape_image)!=0:\n reshape_image = (reshape_image/np.max(reshape_image)*255).astype(np.uint8)\n #save images as jpeg\n Image.fromarray(reshape_image).save(\"train_{}_{}.jpeg\".format(area, i))\n \n return composite_images" ]
[ [ "numpy.ma.median", "numpy.max", "numpy.zeros" ] ]
ozcell/pytorch-auto-drive
[ "f1c2fd223cf7d307a3968fe671d0271b03ced39c" ]
[ "transforms/transforms.py" ]
[ "# Mostly copied and modified from torch/vision/references/segmentation to support unlabeled data\n# Copied functions from fmassa/vision-1 to support multi-dimensional masks loaded from numpy ndarray\n# Update: The current torchvision github repo now supports tensor operation for all common transformations,\n# you are encouraged to check it out\n# Processing in (w, h), while providing public functions in (h, w)\n#######################\n# For transforms with multiple targets (masks, keypoints), target is formed as `dict{'padding_mask', 'keypoints', etc.}`\nimport numpy as np\nfrom PIL import Image\nfrom collections.abc import Sequence\nimport numbers\nimport random\nimport torch\nimport math\nfrom . import functional as F\n\n\ndef _check_sequence_input(x, name, req_sizes):\n msg = req_sizes[0] if len(req_sizes) < 2 else \" or \".join([str(s) for s in req_sizes])\n if not isinstance(x, Sequence):\n raise TypeError(\"{} should be a sequence of length {}.\".format(name, msg))\n if len(x) not in req_sizes:\n raise ValueError(\"{} should be sequence of length {}.\".format(name, msg))\n\n\ndef _setup_angle(x, name, req_sizes=(2, )):\n if isinstance(x, numbers.Number):\n if x < 0:\n raise ValueError(\"If {} is a single number, it must be positive.\".format(name))\n x = [-x, x]\n else:\n _check_sequence_input(x, name, req_sizes)\n\n return [float(d) for d in x]\n\n\nclass Compose(object):\n def __init__(self, transforms):\n self.transforms = transforms\n\n def __call__(self, image, target):\n for t in self.transforms:\n image, target = t(image, target)\n\n return image, target\n\n\nclass Resize(object):\n def __init__(self, size_image, size_label):\n self.size_image = size_image\n self.size_label = size_label\n\n @staticmethod\n def transform_points(points, in_size, out_size, ignore_x=-2):\n # Resize a np.array (L x N x 2) of points (x, y), original axis start from top-left corner\n # x <-> w, y <-> h\n ignore_filter = (points[:, :, 0] == ignore_x)\n in_h, in_w = in_size\n out_h, out_w = out_size\n scale = np.array([out_w / in_w, out_h / in_h], dtype=np.float32)\n points = points * scale\n points[:, :, 0] = points[:, :, 0] * ~ignore_filter + (-2) * ignore_filter\n\n return points\n\n def __call__(self, image, target):\n w_ori, h_ori = F._get_image_size(image)\n image = F.resize(image, self.size_image, interpolation=Image.LINEAR)\n if isinstance(target, str):\n return image, target\n elif isinstance(target, dict): # To keep BC\n if 'keypoints' in target:\n target['keypoints'] = self.transform_points(target['keypoints'], (h_ori, w_ori), self.size_label)\n if 'padding_mask' in target:\n target['padding_mask'] = F.resize(target['padding_mask'], self.size_label, interpolation=Image.NEAREST)\n else:\n target = F.resize(target, self.size_label, interpolation=Image.NEAREST)\n\n return image, target\n\n\n# Crop from up-left corner\nclass Crop(object):\n def __init__(self, size):\n self.h, self.w = size\n\n def __call__(self, image, target):\n image = F.crop(image, 0, 0, self.h, self.w)\n target = F.crop(target, 0, 0, self.h, self.w)\n\n return image, target\n\n\n# Pad image with zeros, yet pad target with 255 (ignore label) on bottom & right if\n# given a bigger desired size (or else nothing is done at all)\nclass ZeroPad(object):\n def __init__(self, size):\n self.h, self.w = size\n\n @staticmethod\n def zero_pad(image, target, h, w):\n ow, oh = F._get_image_size(target)\n pad_h = h - oh if oh < h else 0\n pad_w = w - ow if ow < w else 0\n image = F.pad(image, [0, 0, pad_w, pad_h], fill=0)\n target = F.pad(target, [0, 0, pad_w, pad_h], fill=255)\n\n return image, target\n\n def __call__(self, image, target):\n return self.zero_pad(image, target, self.h, self.w)\n\n\n# Random translation in pixels\n# Random translation = Zero pad + Random crop\nclass RandomTranslation(object):\n def __init__(self, trans_h, trans_w):\n self.trans_h = trans_h\n self.trans_w = trans_w\n\n def __call__(self, image, target):\n tw, th = F._get_image_size(image)\n image = F.pad(image, [self.trans_w, self.trans_h, self.trans_w, self.trans_h], fill=0)\n target = F.pad(target, [self.trans_w, self.trans_h, self.trans_w, self.trans_h], fill=255)\n i, j, h, w = RandomCrop.get_params(image, (th, tw))\n image = F.crop(image, i, j, h, w)\n target = F.crop(target, i, j, h, w)\n\n return image, target\n\n\nclass RandomZeroPad(object):\n def __init__(self, pad_h, pad_w):\n self.pad_h = pad_h\n self.pad_w = pad_w\n\n def __call__(self, image, target):\n r = random.randint(-self.pad_w, self.pad_w)\n b = random.randint(-self.pad_h, self.pad_h)\n l = 0\n t = 0\n if r < 0:\n l = -r\n r = 0\n if b < 0:\n t = -b\n b = 0\n\n image = F.pad(image, [l, t, r, b], fill=0)\n target = F.pad(target, [l, t, r, b], fill=255)\n\n return image, target\n\n\nclass RandomResize(object):\n def __init__(self, min_size, max_size=None):\n self.min_size = min_size\n if max_size is None:\n max_size = min_size\n self.max_size = max_size\n\n def __call__(self, image, target):\n min_h, min_w = self.min_size\n max_h, max_w = self.max_size\n h = random.randint(min_h, max_h)\n w = random.randint(min_w, max_w)\n image = F.resize(image, [h, w], interpolation=Image.LINEAR)\n if isinstance(target, str):\n return image, target\n elif isinstance(target, dict): # To keep BC\n if 'keypoints' in target:\n w_ori, h_ori = F._get_image_size(image)\n target['keypoints'] = Resize.transform_points(target['keypoints'], (h_ori, w_ori), (h, w))\n if 'padding_mask' in target:\n target['padding_mask'] = F.resize(target['padding_mask'], [h, w], interpolation=Image.NEAREST)\n else:\n target = F.resize(target, [h, w], interpolation=Image.NEAREST)\n\n return image, target\n\n\nclass RandomScale(object):\n def __init__(self, min_scale, max_scale=None):\n self.min_scale = min_scale\n if max_scale is None:\n max_scale = min_scale\n self.max_scale = max_scale\n\n def __call__(self, image, target):\n scale = random.uniform(self.min_scale, self.max_scale)\n w, h = F._get_image_size(image)\n h = int(scale * h)\n w = int(scale * w)\n image = F.resize(image, [h, w], interpolation=Image.LINEAR)\n target = F.resize(target, [h, w], interpolation=Image.NEAREST)\n\n return image, target\n\n\nclass RandomCrop(object):\n def __init__(self, size):\n self.size = size\n\n @staticmethod\n def get_params(img, output_size):\n w, h = F._get_image_size(img)\n th, tw = output_size\n if w <= tw and h <= th:\n return 0, 0, h, w\n\n i = random.randint(0, h - th)\n j = random.randint(0, w - tw)\n return i, j, th, tw\n\n def __call__(self, image, target):\n # Pad if needed\n iw, ih = F._get_image_size(image)\n if ih < self.size[0] or iw < self.size[1]:\n # print(image.size())\n # print(self.size)\n image, target = ZeroPad.zero_pad(image, target,\n max(self.size[0], ih),\n max(self.size[1], iw))\n i, j, h, w = self.get_params(image, self.size)\n image = F.crop(image, i, j, h, w)\n target = F.crop(target, i, j, h, w)\n\n return image, target\n\n\nclass RandomHorizontalFlip(object):\n def __init__(self, flip_prob):\n self.flip_prob = flip_prob\n\n def __call__(self, image, target):\n t = random.random()\n if t < self.flip_prob:\n image = F.hflip(image)\n target = target if (isinstance(target, str) or t >= self.flip_prob) else F.hflip(target)\n\n return image, target\n\n\nclass ToTensor(object):\n def __init__(self, keep_scale=False, reverse_channels=False):\n # keep_scale = True => Images or whatever are not divided by 255\n # reverse_channels = True => RGB images are changed to BGR (the default behavior of openCV & Caffe,\n # let's wish them all go to heaven,\n # for they wasted me days!)\n self.keep_scale = keep_scale\n self.reverse_channels = reverse_channels\n\n def __call__(self, image, target):\n image = self._pil_to_tensor(image)\n target = self.label_to_tensor(target)\n\n return image, target\n\n @staticmethod\n def label_to_tensor(pic): # segmentation masks or keypoint arrays\n if isinstance(pic, str):\n return pic\n elif isinstance(pic, dict):\n if 'keypoints' in pic:\n pic['keypoints'] = torch.as_tensor(pic['keypoints'], dtype=torch.float32)\n if 'padding_mask' in pic:\n pic['padding_mask'] = torch.as_tensor(np.asarray(pic['padding_mask']).copy(), dtype=torch.float32)\n return pic\n else:\n return torch.as_tensor(np.asarray(pic).copy(), dtype=torch.int64)\n\n def _pil_to_tensor(self, pic):\n # Convert a PIL Image to tensor (a direct copy)\n if pic.mode == 'I':\n img = torch.from_numpy(np.array(pic, np.int32, copy=False))\n elif pic.mode == 'I;16':\n img = torch.from_numpy(np.array(pic, np.int16, copy=False))\n elif pic.mode == 'F':\n img = torch.from_numpy(np.array(pic, np.float32, copy=False))\n elif pic.mode == '1':\n img = 255 * torch.from_numpy(np.array(pic, np.uint8, copy=False))\n else:\n img = torch.ByteTensor(torch.ByteStorage.from_buffer(pic.tobytes()))\n # PIL image mode: L, LA, P, I, F, RGB, YCbCr, RGBA, CMYK\n if pic.mode == 'YCbCr':\n nchannel = 3\n elif pic.mode == 'I;16':\n nchannel = 1\n else:\n nchannel = len(pic.mode)\n img = img.view(pic.size[1], pic.size[0], nchannel)\n if self.reverse_channels: # Beware this only works with 3 channels(can't use -1 with tensors)\n img = img[:, :, [2, 1, 0]]\n # put it from HWC to CHW format\n # yikes, this transpose takes 80% of the loading time/CPU\n img = img.transpose(0, 1).transpose(0, 2).contiguous()\n if isinstance(img, torch.ByteTensor):\n if self.keep_scale:\n return img.float()\n else:\n return img.float().div(255)\n else:\n return img\n\n\nclass Normalize(object):\n def __init__(self, mean, std, normalize_target=False):\n self.mean = mean\n self.std = std\n self.normalize_target = normalize_target\n\n @staticmethod\n def transform_points(points, h, w, ignore_x=-2):\n # Divide keypoints by h & w to 0~1\n # A special case of resize\n points = points / torch.tensor([w, h], device=points.device, dtype=points.dtype)\n points[points[:, :, 0] < 0][:, 0] = ignore_x\n\n return points\n\n def __call__(self, image, target):\n image = F.normalize(image, mean=self.mean, std=self.std)\n if self.normalize_target and not isinstance(target, str):\n w, h = F._get_image_size(image)\n if isinstance(target, dict):\n target['keypoints'] = self.transform_points(target['keypoints'], h, w, ignore_x=-2)\n else:\n target = self.transform_points(target, h, w, ignore_x=-2)\n\n return image, target\n\n\n# Init with a python list as the map (mainly for cityscapes's id -> train_id)\nclass LabelMap(object):\n def __init__(self, label_id_map, outlier=False):\n self.label_id_map = torch.tensor(label_id_map)\n self.outlier = outlier\n\n def __call__(self, image, target):\n if self.outlier:\n target[target >= self.label_id_map.shape[0]] = 0 # Label 0 is usually ignored\n target = self.label_id_map[target]\n\n return image, target\n\n\n# Match label and image size\nclass MatchSize(object):\n def __init__(self, l2i=True):\n self.l2i = l2i # Match (l)abel to (i)mage\n\n def __call__(self, image, target):\n wi, hi = F._get_image_size(image)\n wl, hl = F._get_image_size(target)\n if hi == hl and wi == wl:\n return image, target\n\n if self.l2i:\n target = F.resize(target, [hi, wi], interpolation=Image.NEAREST)\n else:\n image = F.resize(image, [hl, wl], interpolation=Image.LINEAR)\n\n return image, target\n\n\n# TODO: Support fill color 255 for tensor inputs (supported in torchvision >= 0.9.0)\n# Now fill color is fixed to 0 (background for lane detection label)\nclass RandomRotation(object):\n def __init__(self, degrees, expand=False, center=None, fill=None):\n self.degrees = _setup_angle(degrees, name=\"degrees\", req_sizes=(2, ))\n\n if center is not None:\n _check_sequence_input(center, \"center\", req_sizes=(2, ))\n\n self.center = center\n self.expand = expand\n self.fill = fill\n\n @staticmethod\n def get_params(degrees):\n\n return random.uniform(degrees[0], degrees[1])\n\n @staticmethod\n def transform_points(points, angle, h, w, ignore_x=-2):\n # Rotate a np.array (L x N x 2) of points (x, y) anti-clockwise, original axis start from top-left corner\n ignore_filter = (points[:, :, 0] == ignore_x)\n offset = np.array([w / 2, h / 2], dtype=np.float32)\n matrix = np.array([[math.cos(angle / 180.0 * math.pi), math.sin(-angle / 180.0 * math.pi)],\n [math.sin(angle / 180.0 * math.pi), math.cos(angle / 180.0 * math.pi)]], dtype=np.float32)\n points = np.matmul((points - offset), matrix) + offset\n # exceed border\n ignore_filter += ((points[:, :, 0] > w) + (points[:, :, 1] > h) + ((points > 0).sum(axis=-1) < 2))\n points[:, :, 0] = points[:, :, 0] * ~ignore_filter + (-2) * ignore_filter\n\n return points\n\n def __call__(self, image, target):\n angle = self.get_params(self.degrees)\n image = F.rotate(image, angle, resample=Image.LINEAR, expand=self.expand, center=self.center, fill=0)\n if isinstance(target, dict): # To keep BC\n if 'keypoints' in target:\n w, h = F._get_image_size(image)\n target['keypoints'] = self.transform_points(target['keypoints'], angle, h, w)\n if 'padding_mask' in target:\n target['padding_mask'] = F.rotate(target['padding_mask'], angle, resample=Image.NEAREST,\n expand=self.expand, center=self.center, fill=1)\n else:\n target = F.rotate(target, angle, resample=Image.NEAREST, expand=self.expand, center=self.center, fill=255)\n\n return image, target\n" ]
[ [ "numpy.matmul", "torch.as_tensor", "torch.tensor", "numpy.asarray", "numpy.array" ] ]
jacobkimmel/GSEA.py
[ "8084e96dcd4f57ea99a8c18fa7f96db25d6f1f0d" ]
[ "tests/test_gsea.py" ]
[ "# Dummy test\nimport numpy as np\nfrom gsea import *\nfrom numpy.testing import assert_almost_equal\n\ndef test_rank_genes():\n D = np.array([[-1,1],[1,-1]])\n C = [0,1]\n L,r = rank_genes(D,C)\n assert_almost_equal(L, [0,1])\n assert_almost_equal(r, [1,-1])\n\ndef test_enrichment_score():\n L = [1,0]\n r = [-1,1]\n S = [0,1]\n ES = enrichment_score(L,r,S)\n assert_almost_equal(ES,1)\n\n L = [0,1,2]\n r = [-1,0,1]\n assert_almost_equal(enrichment_score(L,r,[0]),1)\n assert_almost_equal(enrichment_score(L,r,[1]),-1)\n" ]
[ [ "numpy.array", "numpy.testing.assert_almost_equal" ] ]
jackieleng/pandas
[ "ccec504e31ce74f8016952ac75add1cc4bec7080" ]
[ "pandas/tseries/index.py" ]
[ "# pylint: disable=E1101\nfrom __future__ import division\nimport operator\nimport warnings\nfrom datetime import time, datetime\nfrom datetime import timedelta\nimport numpy as np\nfrom pandas.core.base import _shared_docs\n\nfrom pandas.types.common import (_NS_DTYPE, _INT64_DTYPE,\n is_object_dtype, is_datetime64_dtype,\n is_datetimetz, is_dtype_equal,\n is_integer, is_float,\n is_integer_dtype,\n is_datetime64_ns_dtype,\n is_period_dtype,\n is_bool_dtype,\n is_string_dtype,\n is_list_like,\n is_scalar,\n pandas_dtype,\n _ensure_int64)\nfrom pandas.types.generic import ABCSeries\nfrom pandas.types.dtypes import DatetimeTZDtype\nfrom pandas.types.missing import isnull\n\nimport pandas.types.concat as _concat\nfrom pandas.core.common import (_values_from_object, _maybe_box,\n PerformanceWarning)\n\nfrom pandas.core.index import Index, Int64Index, Float64Index\nfrom pandas.indexes.base import _index_shared_docs\nimport pandas.compat as compat\nfrom pandas.tseries.frequencies import (\n to_offset, get_period_alias,\n Resolution)\nfrom pandas.tseries.base import DatelikeOps, TimelikeOps, DatetimeIndexOpsMixin\nfrom pandas.tseries.offsets import DateOffset, generate_range, Tick, CDay\nfrom pandas.tseries.tools import parse_time_string, normalize_date, to_time\nfrom pandas.tseries.timedeltas import to_timedelta\nfrom pandas.util.decorators import (Appender, cache_readonly,\n deprecate_kwarg, Substitution)\nimport pandas.core.common as com\nimport pandas.tseries.offsets as offsets\nimport pandas.tseries.tools as tools\n\nfrom pandas.lib import Timestamp\nimport pandas.lib as lib\nimport pandas.tslib as tslib\nimport pandas._period as period\nimport pandas._join as _join\nimport pandas.algos as _algos\nimport pandas.index as _index\n\n\ndef _utc():\n import pytz\n return pytz.utc\n\n# -------- some conversion wrapper functions\n\n\ndef _field_accessor(name, field, docstring=None):\n def f(self):\n values = self.asi8\n if self.tz is not None:\n utc = _utc()\n if self.tz is not utc:\n values = self._local_timestamps()\n\n if field in ['is_month_start', 'is_month_end',\n 'is_quarter_start', 'is_quarter_end',\n 'is_year_start', 'is_year_end']:\n month_kw = (self.freq.kwds.get('startingMonth',\n self.freq.kwds.get('month', 12))\n if self.freq else 12)\n\n result = tslib.get_start_end_field(values, field, self.freqstr,\n month_kw)\n elif field in ['weekday_name']:\n result = tslib.get_date_name_field(values, field)\n return self._maybe_mask_results(result)\n elif field in ['is_leap_year']:\n # no need to mask NaT\n return tslib.get_date_field(values, field)\n else:\n result = tslib.get_date_field(values, field)\n\n return self._maybe_mask_results(result, convert='float64')\n\n f.__name__ = name\n f.__doc__ = docstring\n return property(f)\n\n\ndef _dt_index_cmp(opname, nat_result=False):\n \"\"\"\n Wrap comparison operations to convert datetime-like to datetime64\n \"\"\"\n\n def wrapper(self, other):\n func = getattr(super(DatetimeIndex, self), opname)\n if (isinstance(other, datetime) or\n isinstance(other, compat.string_types)):\n other = _to_m8(other, tz=self.tz)\n result = func(other)\n if isnull(other):\n result.fill(nat_result)\n else:\n if isinstance(other, list):\n other = DatetimeIndex(other)\n elif not isinstance(other, (np.ndarray, Index, ABCSeries)):\n other = _ensure_datetime64(other)\n result = func(np.asarray(other))\n result = _values_from_object(result)\n\n if isinstance(other, Index):\n o_mask = other.values.view('i8') == tslib.iNaT\n else:\n o_mask = other.view('i8') == tslib.iNaT\n\n if o_mask.any():\n result[o_mask] = nat_result\n\n if self.hasnans:\n result[self._isnan] = nat_result\n\n # support of bool dtype indexers\n if is_bool_dtype(result):\n return result\n return Index(result)\n\n return wrapper\n\n\ndef _ensure_datetime64(other):\n if isinstance(other, np.datetime64):\n return other\n raise TypeError('%s type object %s' % (type(other), str(other)))\n\n_midnight = time(0, 0)\n\n\ndef _new_DatetimeIndex(cls, d):\n \"\"\" This is called upon unpickling, rather than the default which doesn't\n have arguments and breaks __new__ \"\"\"\n\n # data are already in UTC\n # so need to localize\n tz = d.pop('tz', None)\n\n result = cls.__new__(cls, verify_integrity=False, **d)\n if tz is not None:\n result = result.tz_localize('UTC').tz_convert(tz)\n return result\n\n\nclass DatetimeIndex(DatelikeOps, TimelikeOps, DatetimeIndexOpsMixin,\n Int64Index):\n \"\"\"\n Immutable ndarray of datetime64 data, represented internally as int64, and\n which can be boxed to Timestamp objects that are subclasses of datetime and\n carry metadata such as frequency information.\n\n Parameters\n ----------\n data : array-like (1-dimensional), optional\n Optional datetime-like data to construct index with\n copy : bool\n Make a copy of input ndarray\n freq : string or pandas offset object, optional\n One of pandas date offset strings or corresponding objects\n start : starting value, datetime-like, optional\n If data is None, start is used as the start point in generating regular\n timestamp data.\n periods : int, optional, > 0\n Number of periods to generate, if generating index. Takes precedence\n over end argument\n end : end time, datetime-like, optional\n If periods is none, generated index will extend to first conforming\n time on or just past end argument\n closed : string or None, default None\n Make the interval closed with respect to the given frequency to\n the 'left', 'right', or both sides (None)\n tz : pytz.timezone or dateutil.tz.tzfile\n ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise'\n - 'infer' will attempt to infer fall dst-transition hours based on\n order\n - bool-ndarray where True signifies a DST time, False signifies a\n non-DST time (note that this flag is only applicable for ambiguous\n times)\n - 'NaT' will return NaT where there are ambiguous times\n - 'raise' will raise an AmbiguousTimeError if there are ambiguous times\n infer_dst : boolean, default False (DEPRECATED)\n Attempt to infer fall dst-transition hours based on order\n name : object\n Name to be stored in the index\n\n To learn more about the frequency strings, please see `this link\n <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.\n \"\"\"\n\n _typ = 'datetimeindex'\n _join_precedence = 10\n\n def _join_i8_wrapper(joinf, **kwargs):\n return DatetimeIndexOpsMixin._join_i8_wrapper(joinf, dtype='M8[ns]',\n **kwargs)\n\n _inner_indexer = _join_i8_wrapper(_join.inner_join_indexer_int64)\n _outer_indexer = _join_i8_wrapper(_join.outer_join_indexer_int64)\n _left_indexer = _join_i8_wrapper(_join.left_join_indexer_int64)\n _left_indexer_unique = _join_i8_wrapper(\n _join.left_join_indexer_unique_int64, with_indexers=False)\n _arrmap = None\n\n __eq__ = _dt_index_cmp('__eq__')\n __ne__ = _dt_index_cmp('__ne__', nat_result=True)\n __lt__ = _dt_index_cmp('__lt__')\n __gt__ = _dt_index_cmp('__gt__')\n __le__ = _dt_index_cmp('__le__')\n __ge__ = _dt_index_cmp('__ge__')\n\n _engine_type = _index.DatetimeEngine\n\n tz = None\n offset = None\n _comparables = ['name', 'freqstr', 'tz']\n _attributes = ['name', 'freq', 'tz']\n _datetimelike_ops = ['year', 'month', 'day', 'hour', 'minute', 'second',\n 'weekofyear', 'week', 'dayofweek', 'weekday',\n 'dayofyear', 'quarter', 'days_in_month',\n 'daysinmonth', 'date', 'time', 'microsecond',\n 'nanosecond', 'is_month_start', 'is_month_end',\n 'is_quarter_start', 'is_quarter_end', 'is_year_start',\n 'is_year_end', 'tz', 'freq', 'weekday_name',\n 'is_leap_year']\n _is_numeric_dtype = False\n _infer_as_myclass = True\n\n @deprecate_kwarg(old_arg_name='infer_dst', new_arg_name='ambiguous',\n mapping={True: 'infer', False: 'raise'})\n def __new__(cls, data=None,\n freq=None, start=None, end=None, periods=None,\n copy=False, name=None, tz=None,\n verify_integrity=True, normalize=False,\n closed=None, ambiguous='raise', dtype=None, **kwargs):\n\n # This allows to later ensure that the 'copy' parameter is honored:\n if isinstance(data, Index):\n ref_to_data = data._data\n else:\n ref_to_data = data\n\n if name is None and hasattr(data, 'name'):\n name = data.name\n\n dayfirst = kwargs.pop('dayfirst', None)\n yearfirst = kwargs.pop('yearfirst', None)\n\n freq_infer = False\n if not isinstance(freq, DateOffset):\n\n # if a passed freq is None, don't infer automatically\n if freq != 'infer':\n freq = to_offset(freq)\n else:\n freq_infer = True\n freq = None\n\n if periods is not None:\n if is_float(periods):\n periods = int(periods)\n elif not is_integer(periods):\n raise ValueError('Periods must be a number, got %s' %\n str(periods))\n\n if data is None and freq is None:\n raise ValueError(\"Must provide freq argument if no data is \"\n \"supplied\")\n\n # if dtype has an embeded tz, capture it\n if dtype is not None:\n try:\n dtype = DatetimeTZDtype.construct_from_string(dtype)\n dtz = getattr(dtype, 'tz', None)\n if dtz is not None:\n if tz is not None and str(tz) != str(dtz):\n raise ValueError(\"cannot supply both a tz and a dtype\"\n \" with a tz\")\n tz = dtz\n except TypeError:\n pass\n\n if data is None:\n return cls._generate(start, end, periods, name, freq,\n tz=tz, normalize=normalize, closed=closed,\n ambiguous=ambiguous)\n\n if not isinstance(data, (np.ndarray, Index, ABCSeries)):\n if is_scalar(data):\n raise ValueError('DatetimeIndex() must be called with a '\n 'collection of some kind, %s was passed'\n % repr(data))\n # other iterable of some kind\n if not isinstance(data, (list, tuple)):\n data = list(data)\n data = np.asarray(data, dtype='O')\n elif isinstance(data, ABCSeries):\n data = data._values\n\n # data must be Index or np.ndarray here\n if not (is_datetime64_dtype(data) or is_datetimetz(data) or\n is_integer_dtype(data)):\n data = tools.to_datetime(data, dayfirst=dayfirst,\n yearfirst=yearfirst)\n\n if issubclass(data.dtype.type, np.datetime64) or is_datetimetz(data):\n\n if isinstance(data, DatetimeIndex):\n if tz is None:\n tz = data.tz\n elif data.tz is None:\n data = data.tz_localize(tz, ambiguous=ambiguous)\n else:\n # the tz's must match\n if str(tz) != str(data.tz):\n msg = ('data is already tz-aware {0}, unable to '\n 'set specified tz: {1}')\n raise TypeError(msg.format(data.tz, tz))\n\n subarr = data.values\n\n if freq is None:\n freq = data.offset\n verify_integrity = False\n else:\n if data.dtype != _NS_DTYPE:\n subarr = tslib.cast_to_nanoseconds(data)\n else:\n subarr = data\n else:\n # must be integer dtype otherwise\n if isinstance(data, Int64Index):\n raise TypeError('cannot convert Int64Index->DatetimeIndex')\n if data.dtype != _INT64_DTYPE:\n data = data.astype(np.int64)\n subarr = data.view(_NS_DTYPE)\n\n if isinstance(subarr, DatetimeIndex):\n if tz is None:\n tz = subarr.tz\n else:\n if tz is not None:\n tz = tslib.maybe_get_tz(tz)\n\n if (not isinstance(data, DatetimeIndex) or\n getattr(data, 'tz', None) is None):\n # Convert tz-naive to UTC\n ints = subarr.view('i8')\n subarr = tslib.tz_localize_to_utc(ints, tz,\n ambiguous=ambiguous)\n subarr = subarr.view(_NS_DTYPE)\n\n subarr = cls._simple_new(subarr, name=name, freq=freq, tz=tz)\n if dtype is not None:\n if not is_dtype_equal(subarr.dtype, dtype):\n # dtype must be coerced to DatetimeTZDtype above\n if subarr.tz is not None:\n raise ValueError(\"cannot localize from non-UTC data\")\n\n if verify_integrity and len(subarr) > 0:\n if freq is not None and not freq_infer:\n inferred = subarr.inferred_freq\n if inferred != freq.freqstr:\n on_freq = cls._generate(subarr[0], None, len(subarr), None,\n freq, tz=tz, ambiguous=ambiguous)\n if not np.array_equal(subarr.asi8, on_freq.asi8):\n raise ValueError('Inferred frequency {0} from passed '\n 'dates does not conform to passed '\n 'frequency {1}'\n .format(inferred, freq.freqstr))\n\n if freq_infer:\n inferred = subarr.inferred_freq\n if inferred:\n subarr.offset = to_offset(inferred)\n\n return subarr._deepcopy_if_needed(ref_to_data, copy)\n\n @classmethod\n def _generate(cls, start, end, periods, name, offset,\n tz=None, normalize=False, ambiguous='raise', closed=None):\n if com._count_not_none(start, end, periods) != 2:\n raise ValueError('Must specify two of start, end, or periods')\n\n _normalized = True\n\n if start is not None:\n start = Timestamp(start)\n\n if end is not None:\n end = Timestamp(end)\n\n left_closed = False\n right_closed = False\n\n if start is None and end is None:\n if closed is not None:\n raise ValueError(\"Closed has to be None if not both of start\"\n \"and end are defined\")\n\n if closed is None:\n left_closed = True\n right_closed = True\n elif closed == \"left\":\n left_closed = True\n elif closed == \"right\":\n right_closed = True\n else:\n raise ValueError(\"Closed has to be either 'left', 'right' or None\")\n\n try:\n inferred_tz = tools._infer_tzinfo(start, end)\n except:\n raise TypeError('Start and end cannot both be tz-aware with '\n 'different timezones')\n\n inferred_tz = tslib.maybe_get_tz(inferred_tz)\n\n # these may need to be localized\n tz = tslib.maybe_get_tz(tz)\n if tz is not None:\n date = start or end\n if date.tzinfo is not None and hasattr(tz, 'localize'):\n tz = tz.localize(date.replace(tzinfo=None)).tzinfo\n\n if tz is not None and inferred_tz is not None:\n if not inferred_tz == tz:\n raise AssertionError(\"Inferred time zone not equal to passed \"\n \"time zone\")\n\n elif inferred_tz is not None:\n tz = inferred_tz\n\n if start is not None:\n if normalize:\n start = normalize_date(start)\n _normalized = True\n else:\n _normalized = _normalized and start.time() == _midnight\n\n if end is not None:\n if normalize:\n end = normalize_date(end)\n _normalized = True\n else:\n _normalized = _normalized and end.time() == _midnight\n\n if hasattr(offset, 'delta') and offset != offsets.Day():\n if inferred_tz is None and tz is not None:\n # naive dates\n if start is not None and start.tz is None:\n start = start.tz_localize(tz, ambiguous=False)\n\n if end is not None and end.tz is None:\n end = end.tz_localize(tz, ambiguous=False)\n\n if start and end:\n if start.tz is None and end.tz is not None:\n start = start.tz_localize(end.tz, ambiguous=False)\n\n if end.tz is None and start.tz is not None:\n end = end.tz_localize(start.tz, ambiguous=False)\n\n if _use_cached_range(offset, _normalized, start, end):\n index = cls._cached_range(start, end, periods=periods,\n offset=offset, name=name)\n else:\n index = _generate_regular_range(start, end, periods, offset)\n\n else:\n\n if tz is not None:\n # naive dates\n if start is not None and start.tz is not None:\n start = start.replace(tzinfo=None)\n\n if end is not None and end.tz is not None:\n end = end.replace(tzinfo=None)\n\n if start and end:\n if start.tz is None and end.tz is not None:\n end = end.replace(tzinfo=None)\n\n if end.tz is None and start.tz is not None:\n start = start.replace(tzinfo=None)\n\n if _use_cached_range(offset, _normalized, start, end):\n index = cls._cached_range(start, end, periods=periods,\n offset=offset, name=name)\n else:\n index = _generate_regular_range(start, end, periods, offset)\n\n if tz is not None and getattr(index, 'tz', None) is None:\n index = tslib.tz_localize_to_utc(_ensure_int64(index), tz,\n ambiguous=ambiguous)\n index = index.view(_NS_DTYPE)\n\n # index is localized datetime64 array -> have to convert\n # start/end as well to compare\n if start is not None:\n start = start.tz_localize(tz).asm8\n if end is not None:\n end = end.tz_localize(tz).asm8\n\n if not left_closed and len(index) and index[0] == start:\n index = index[1:]\n if not right_closed and len(index) and index[-1] == end:\n index = index[:-1]\n index = cls._simple_new(index, name=name, freq=offset, tz=tz)\n return index\n\n @property\n def _box_func(self):\n return lambda x: Timestamp(x, freq=self.offset, tz=self.tz)\n\n def _convert_for_op(self, value):\n \"\"\" Convert value to be insertable to ndarray \"\"\"\n if self._has_same_tz(value):\n return _to_m8(value)\n raise ValueError('Passed item and index have different timezone')\n\n def _local_timestamps(self):\n utc = _utc()\n\n if self.is_monotonic:\n return tslib.tz_convert(self.asi8, utc, self.tz)\n else:\n values = self.asi8\n indexer = values.argsort()\n result = tslib.tz_convert(values.take(indexer), utc, self.tz)\n\n n = len(indexer)\n reverse = np.empty(n, dtype=np.int_)\n reverse.put(indexer, np.arange(n))\n return result.take(reverse)\n\n @classmethod\n def _simple_new(cls, values, name=None, freq=None, tz=None,\n dtype=None, **kwargs):\n \"\"\"\n we require the we have a dtype compat for the values\n if we are passed a non-dtype compat, then coerce using the constructor\n \"\"\"\n\n if not getattr(values, 'dtype', None):\n # empty, but with dtype compat\n if values is None:\n values = np.empty(0, dtype=_NS_DTYPE)\n return cls(values, name=name, freq=freq, tz=tz,\n dtype=dtype, **kwargs)\n values = np.array(values, copy=False)\n\n if is_object_dtype(values):\n return cls(values, name=name, freq=freq, tz=tz,\n dtype=dtype, **kwargs).values\n elif not is_datetime64_dtype(values):\n values = _ensure_int64(values).view(_NS_DTYPE)\n\n result = object.__new__(cls)\n result._data = values\n result.name = name\n result.offset = freq\n result.tz = tslib.maybe_get_tz(tz)\n result._reset_identity()\n return result\n\n @property\n def tzinfo(self):\n \"\"\"\n Alias for tz attribute\n \"\"\"\n return self.tz\n\n @cache_readonly\n def _timezone(self):\n \"\"\" Comparable timezone both for pytz / dateutil\"\"\"\n return tslib.get_timezone(self.tzinfo)\n\n def _has_same_tz(self, other):\n zzone = self._timezone\n\n # vzone sholdn't be None if value is non-datetime like\n if isinstance(other, np.datetime64):\n # convert to Timestamp as np.datetime64 doesn't have tz attr\n other = Timestamp(other)\n vzone = tslib.get_timezone(getattr(other, 'tzinfo', '__no_tz__'))\n return zzone == vzone\n\n @classmethod\n def _cached_range(cls, start=None, end=None, periods=None, offset=None,\n name=None):\n if start is None and end is None:\n # I somewhat believe this should never be raised externally and\n # therefore should be a `PandasError` but whatever...\n raise TypeError('Must specify either start or end.')\n if start is not None:\n start = Timestamp(start)\n if end is not None:\n end = Timestamp(end)\n if (start is None or end is None) and periods is None:\n raise TypeError(\n 'Must either specify period or provide both start and end.')\n\n if offset is None:\n # This can't happen with external-facing code, therefore\n # PandasError\n raise TypeError('Must provide offset.')\n\n drc = _daterange_cache\n if offset not in _daterange_cache:\n xdr = generate_range(offset=offset, start=_CACHE_START,\n end=_CACHE_END)\n\n arr = tools.to_datetime(list(xdr), box=False)\n\n cachedRange = DatetimeIndex._simple_new(arr)\n cachedRange.offset = offset\n cachedRange.tz = None\n cachedRange.name = None\n drc[offset] = cachedRange\n else:\n cachedRange = drc[offset]\n\n if start is None:\n if not isinstance(end, Timestamp):\n raise AssertionError('end must be an instance of Timestamp')\n\n end = offset.rollback(end)\n\n endLoc = cachedRange.get_loc(end) + 1\n startLoc = endLoc - periods\n elif end is None:\n if not isinstance(start, Timestamp):\n raise AssertionError('start must be an instance of Timestamp')\n\n start = offset.rollforward(start)\n\n startLoc = cachedRange.get_loc(start)\n endLoc = startLoc + periods\n else:\n if not offset.onOffset(start):\n start = offset.rollforward(start)\n\n if not offset.onOffset(end):\n end = offset.rollback(end)\n\n startLoc = cachedRange.get_loc(start)\n endLoc = cachedRange.get_loc(end) + 1\n\n indexSlice = cachedRange[startLoc:endLoc]\n indexSlice.name = name\n indexSlice.offset = offset\n\n return indexSlice\n\n def _mpl_repr(self):\n # how to represent ourselves to matplotlib\n return tslib.ints_to_pydatetime(self.asi8, self.tz)\n\n @cache_readonly\n def _is_dates_only(self):\n from pandas.formats.format import _is_dates_only\n return _is_dates_only(self.values)\n\n @property\n def _formatter_func(self):\n from pandas.formats.format import _get_format_datetime64\n formatter = _get_format_datetime64(is_dates_only=self._is_dates_only)\n return lambda x: \"'%s'\" % formatter(x, tz=self.tz)\n\n def __reduce__(self):\n\n # we use a special reudce here because we need\n # to simply set the .tz (and not reinterpret it)\n\n d = dict(data=self._data)\n d.update(self._get_attributes_dict())\n return _new_DatetimeIndex, (self.__class__, d), None\n\n def __setstate__(self, state):\n \"\"\"Necessary for making this object picklable\"\"\"\n if isinstance(state, dict):\n super(DatetimeIndex, self).__setstate__(state)\n\n elif isinstance(state, tuple):\n\n # < 0.15 compat\n if len(state) == 2:\n nd_state, own_state = state\n data = np.empty(nd_state[1], dtype=nd_state[2])\n np.ndarray.__setstate__(data, nd_state)\n\n self.name = own_state[0]\n self.offset = own_state[1]\n self.tz = own_state[2]\n\n # provide numpy < 1.7 compat\n if nd_state[2] == 'M8[us]':\n new_state = np.ndarray.__reduce__(data.astype('M8[ns]'))\n np.ndarray.__setstate__(data, new_state[2])\n\n else: # pragma: no cover\n data = np.empty(state)\n np.ndarray.__setstate__(data, state)\n\n self._data = data\n self._reset_identity()\n\n else:\n raise Exception(\"invalid pickle state\")\n _unpickle_compat = __setstate__\n\n def _add_datelike(self, other):\n # adding a timedeltaindex to a datetimelike\n if other is tslib.NaT:\n return self._nat_new(box=True)\n raise TypeError(\"cannot add a datelike to a DatetimeIndex\")\n\n def _sub_datelike(self, other):\n # subtract a datetime from myself, yielding a TimedeltaIndex\n from pandas import TimedeltaIndex\n other = Timestamp(other)\n if other is tslib.NaT:\n result = self._nat_new(box=False)\n # require tz compat\n elif not self._has_same_tz(other):\n raise TypeError(\"Timestamp subtraction must have the same \"\n \"timezones or no timezones\")\n else:\n i8 = self.asi8\n result = i8 - other.value\n result = self._maybe_mask_results(result, fill_value=tslib.iNaT)\n return TimedeltaIndex(result, name=self.name, copy=False)\n\n def _maybe_update_attributes(self, attrs):\n \"\"\" Update Index attributes (e.g. freq) depending on op \"\"\"\n freq = attrs.get('freq', None)\n if freq is not None:\n # no need to infer if freq is None\n attrs['freq'] = 'infer'\n return attrs\n\n def _add_delta(self, delta):\n from pandas import TimedeltaIndex\n name = self.name\n\n if isinstance(delta, (Tick, timedelta, np.timedelta64)):\n new_values = self._add_delta_td(delta)\n elif isinstance(delta, TimedeltaIndex):\n new_values = self._add_delta_tdi(delta)\n # update name when delta is Index\n name = com._maybe_match_name(self, delta)\n elif isinstance(delta, DateOffset):\n new_values = self._add_offset(delta).asi8\n else:\n new_values = self.astype('O') + delta\n\n tz = 'UTC' if self.tz is not None else None\n result = DatetimeIndex(new_values, tz=tz, name=name, freq='infer')\n utc = _utc()\n if self.tz is not None and self.tz is not utc:\n result = result.tz_convert(self.tz)\n return result\n\n def _add_offset(self, offset):\n try:\n if self.tz is not None:\n values = self.tz_localize(None)\n else:\n values = self\n result = offset.apply_index(values)\n if self.tz is not None:\n result = result.tz_localize(self.tz)\n return result\n\n except NotImplementedError:\n warnings.warn(\"Non-vectorized DateOffset being applied to Series \"\n \"or DatetimeIndex\", PerformanceWarning)\n return self.astype('O') + offset\n\n def _format_native_types(self, na_rep='NaT', date_format=None, **kwargs):\n from pandas.formats.format import _get_format_datetime64_from_values\n format = _get_format_datetime64_from_values(self, date_format)\n\n return tslib.format_array_from_datetime(self.asi8,\n tz=self.tz,\n format=format,\n na_rep=na_rep)\n\n def to_datetime(self, dayfirst=False):\n return self.copy()\n\n @Appender(_index_shared_docs['astype'])\n def astype(self, dtype, copy=True):\n dtype = pandas_dtype(dtype)\n if is_object_dtype(dtype):\n return self.asobject\n elif is_integer_dtype(dtype):\n return Index(self.values.astype('i8', copy=copy), name=self.name,\n dtype='i8')\n elif is_datetime64_ns_dtype(dtype):\n if self.tz is not None:\n return self.tz_convert('UTC').tz_localize(None)\n elif copy is True:\n return self.copy()\n return self\n elif is_string_dtype(dtype):\n return Index(self.format(), name=self.name, dtype=object)\n elif is_period_dtype(dtype):\n return self.to_period(freq=dtype.freq)\n raise ValueError('Cannot cast DatetimeIndex to dtype %s' % dtype)\n\n def _get_time_micros(self):\n utc = _utc()\n values = self.asi8\n if self.tz is not None and self.tz is not utc:\n values = self._local_timestamps()\n return tslib.get_time_micros(values)\n\n def to_series(self, keep_tz=False):\n \"\"\"\n Create a Series with both index and values equal to the index keys\n useful with map for returning an indexer based on an index\n\n Parameters\n ----------\n keep_tz : optional, defaults False.\n return the data keeping the timezone.\n\n If keep_tz is True:\n\n If the timezone is not set, the resulting\n Series will have a datetime64[ns] dtype.\n\n Otherwise the Series will have an datetime64[ns, tz] dtype; the\n tz will be preserved.\n\n If keep_tz is False:\n\n Series will have a datetime64[ns] dtype. TZ aware\n objects will have the tz removed.\n\n Returns\n -------\n Series\n \"\"\"\n from pandas import Series\n return Series(self._to_embed(keep_tz), index=self, name=self.name)\n\n def _to_embed(self, keep_tz=False):\n \"\"\"\n return an array repr of this object, potentially casting to object\n\n This is for internal compat\n \"\"\"\n if keep_tz and self.tz is not None:\n\n # preserve the tz & copy\n return self.copy(deep=True)\n\n return self.values.copy()\n\n def to_pydatetime(self):\n \"\"\"\n Return DatetimeIndex as object ndarray of datetime.datetime objects\n\n Returns\n -------\n datetimes : ndarray\n \"\"\"\n return tslib.ints_to_pydatetime(self.asi8, tz=self.tz)\n\n def to_period(self, freq=None):\n \"\"\"\n Cast to PeriodIndex at a particular frequency\n \"\"\"\n from pandas.tseries.period import PeriodIndex\n\n if freq is None:\n freq = self.freqstr or self.inferred_freq\n\n if freq is None:\n msg = (\"You must pass a freq argument as \"\n \"current index has none.\")\n raise ValueError(msg)\n\n freq = get_period_alias(freq)\n\n return PeriodIndex(self.values, name=self.name, freq=freq, tz=self.tz)\n\n def snap(self, freq='S'):\n \"\"\"\n Snap time stamps to nearest occurring frequency\n\n \"\"\"\n # Superdumb, punting on any optimizing\n freq = to_offset(freq)\n\n snapped = np.empty(len(self), dtype=_NS_DTYPE)\n\n for i, v in enumerate(self):\n s = v\n if not freq.onOffset(s):\n t0 = freq.rollback(s)\n t1 = freq.rollforward(s)\n if abs(s - t0) < abs(t1 - s):\n s = t0\n else:\n s = t1\n snapped[i] = s\n\n # we know it conforms; skip check\n return DatetimeIndex(snapped, freq=freq, verify_integrity=False)\n\n def union(self, other):\n \"\"\"\n Specialized union for DatetimeIndex objects. If combine\n overlapping ranges with the same DateOffset, will be much\n faster than Index.union\n\n Parameters\n ----------\n other : DatetimeIndex or array-like\n\n Returns\n -------\n y : Index or DatetimeIndex\n \"\"\"\n self._assert_can_do_setop(other)\n if not isinstance(other, DatetimeIndex):\n try:\n other = DatetimeIndex(other)\n except TypeError:\n pass\n\n this, other = self._maybe_utc_convert(other)\n\n if this._can_fast_union(other):\n return this._fast_union(other)\n else:\n result = Index.union(this, other)\n if isinstance(result, DatetimeIndex):\n result.tz = this.tz\n if (result.freq is None and\n (this.freq is not None or other.freq is not None)):\n result.offset = to_offset(result.inferred_freq)\n return result\n\n def to_perioddelta(self, freq):\n \"\"\"\n Calcuates TimedeltaIndex of difference between index\n values and index converted to PeriodIndex at specified\n freq. Used for vectorized offsets\n\n .. versionadded:: 0.17.0\n\n Parameters\n ----------\n freq : Period frequency\n\n Returns\n -------\n y : TimedeltaIndex\n \"\"\"\n return to_timedelta(self.asi8 - self.to_period(freq)\n .to_timestamp().asi8)\n\n def union_many(self, others):\n \"\"\"\n A bit of a hack to accelerate unioning a collection of indexes\n \"\"\"\n this = self\n\n for other in others:\n if not isinstance(this, DatetimeIndex):\n this = Index.union(this, other)\n continue\n\n if not isinstance(other, DatetimeIndex):\n try:\n other = DatetimeIndex(other)\n except TypeError:\n pass\n\n this, other = this._maybe_utc_convert(other)\n\n if this._can_fast_union(other):\n this = this._fast_union(other)\n else:\n tz = this.tz\n this = Index.union(this, other)\n if isinstance(this, DatetimeIndex):\n this.tz = tz\n\n if this.freq is None:\n this.offset = to_offset(this.inferred_freq)\n return this\n\n def append(self, other):\n \"\"\"\n Append a collection of Index options together\n\n Parameters\n ----------\n other : Index or list/tuple of indices\n\n Returns\n -------\n appended : Index\n \"\"\"\n name = self.name\n to_concat = [self]\n\n if isinstance(other, (list, tuple)):\n to_concat = to_concat + list(other)\n else:\n to_concat.append(other)\n\n for obj in to_concat:\n if isinstance(obj, Index) and obj.name != name:\n name = None\n break\n\n to_concat = self._ensure_compat_concat(to_concat)\n to_concat, factory = _process_concat_data(to_concat, name)\n\n return factory(to_concat)\n\n def join(self, other, how='left', level=None, return_indexers=False):\n \"\"\"\n See Index.join\n \"\"\"\n if (not isinstance(other, DatetimeIndex) and len(other) > 0 and\n other.inferred_type not in ('floating', 'mixed-integer',\n 'mixed-integer-float', 'mixed')):\n try:\n other = DatetimeIndex(other)\n except (TypeError, ValueError):\n pass\n\n this, other = self._maybe_utc_convert(other)\n return Index.join(this, other, how=how, level=level,\n return_indexers=return_indexers)\n\n def _maybe_utc_convert(self, other):\n this = self\n if isinstance(other, DatetimeIndex):\n if self.tz is not None:\n if other.tz is None:\n raise TypeError('Cannot join tz-naive with tz-aware '\n 'DatetimeIndex')\n elif other.tz is not None:\n raise TypeError('Cannot join tz-naive with tz-aware '\n 'DatetimeIndex')\n\n if self.tz != other.tz:\n this = self.tz_convert('UTC')\n other = other.tz_convert('UTC')\n return this, other\n\n def _wrap_joined_index(self, joined, other):\n name = self.name if self.name == other.name else None\n if (isinstance(other, DatetimeIndex) and\n self.offset == other.offset and\n self._can_fast_union(other)):\n joined = self._shallow_copy(joined)\n joined.name = name\n return joined\n else:\n tz = getattr(other, 'tz', None)\n return self._simple_new(joined, name, tz=tz)\n\n def _can_fast_union(self, other):\n if not isinstance(other, DatetimeIndex):\n return False\n\n offset = self.offset\n\n if offset is None or offset != other.offset:\n return False\n\n if not self.is_monotonic or not other.is_monotonic:\n return False\n\n if len(self) == 0 or len(other) == 0:\n return True\n\n # to make our life easier, \"sort\" the two ranges\n if self[0] <= other[0]:\n left, right = self, other\n else:\n left, right = other, self\n\n right_start = right[0]\n left_end = left[-1]\n\n # Only need to \"adjoin\", not overlap\n try:\n return (right_start == left_end + offset) or right_start in left\n except (ValueError):\n\n # if we are comparing an offset that does not propagate timezones\n # this will raise\n return False\n\n def _fast_union(self, other):\n if len(other) == 0:\n return self.view(type(self))\n\n if len(self) == 0:\n return other.view(type(self))\n\n # to make our life easier, \"sort\" the two ranges\n if self[0] <= other[0]:\n left, right = self, other\n else:\n left, right = other, self\n\n left_start, left_end = left[0], left[-1]\n right_end = right[-1]\n\n if not self.offset._should_cache():\n # concatenate dates\n if left_end < right_end:\n loc = right.searchsorted(left_end, side='right')\n right_chunk = right.values[loc:]\n dates = _concat._concat_compat((left.values, right_chunk))\n return self._shallow_copy(dates)\n else:\n return left\n else:\n return type(self)(start=left_start,\n end=max(left_end, right_end),\n freq=left.offset)\n\n def __iter__(self):\n \"\"\"\n Return an iterator over the boxed values\n\n Returns\n -------\n Timestamps : ndarray\n \"\"\"\n\n # convert in chunks of 10k for efficiency\n data = self.asi8\n l = len(self)\n chunksize = 10000\n chunks = int(l / chunksize) + 1\n for i in range(chunks):\n start_i = i * chunksize\n end_i = min((i + 1) * chunksize, l)\n converted = tslib.ints_to_pydatetime(data[start_i:end_i],\n tz=self.tz, freq=self.freq,\n box=True)\n for v in converted:\n yield v\n\n def _wrap_union_result(self, other, result):\n name = self.name if self.name == other.name else None\n if self.tz != other.tz:\n raise ValueError('Passed item and index have different timezone')\n return self._simple_new(result, name=name, freq=None, tz=self.tz)\n\n def intersection(self, other):\n \"\"\"\n Specialized intersection for DatetimeIndex objects. May be much faster\n than Index.intersection\n\n Parameters\n ----------\n other : DatetimeIndex or array-like\n\n Returns\n -------\n y : Index or DatetimeIndex\n \"\"\"\n self._assert_can_do_setop(other)\n if not isinstance(other, DatetimeIndex):\n try:\n other = DatetimeIndex(other)\n except (TypeError, ValueError):\n pass\n result = Index.intersection(self, other)\n if isinstance(result, DatetimeIndex):\n if result.freq is None:\n result.offset = to_offset(result.inferred_freq)\n return result\n\n elif (other.offset is None or self.offset is None or\n other.offset != self.offset or\n not other.offset.isAnchored() or\n (not self.is_monotonic or not other.is_monotonic)):\n result = Index.intersection(self, other)\n if isinstance(result, DatetimeIndex):\n if result.freq is None:\n result.offset = to_offset(result.inferred_freq)\n return result\n\n if len(self) == 0:\n return self\n if len(other) == 0:\n return other\n # to make our life easier, \"sort\" the two ranges\n if self[0] <= other[0]:\n left, right = self, other\n else:\n left, right = other, self\n\n end = min(left[-1], right[-1])\n start = right[0]\n\n if end < start:\n return type(self)(data=[])\n else:\n lslice = slice(*left.slice_locs(start, end))\n left_chunk = left.values[lslice]\n return self._shallow_copy(left_chunk)\n\n def _parsed_string_to_bounds(self, reso, parsed):\n \"\"\"\n Calculate datetime bounds for parsed time string and its resolution.\n\n Parameters\n ----------\n reso : Resolution\n Resolution provided by parsed string.\n parsed : datetime\n Datetime from parsed string.\n\n Returns\n -------\n lower, upper: pd.Timestamp\n\n \"\"\"\n if reso == 'year':\n return (Timestamp(datetime(parsed.year, 1, 1), tz=self.tz),\n Timestamp(datetime(parsed.year, 12, 31, 23,\n 59, 59, 999999), tz=self.tz))\n elif reso == 'month':\n d = tslib.monthrange(parsed.year, parsed.month)[1]\n return (Timestamp(datetime(parsed.year, parsed.month, 1),\n tz=self.tz),\n Timestamp(datetime(parsed.year, parsed.month, d, 23,\n 59, 59, 999999), tz=self.tz))\n elif reso == 'quarter':\n qe = (((parsed.month - 1) + 2) % 12) + 1 # two months ahead\n d = tslib.monthrange(parsed.year, qe)[1] # at end of month\n return (Timestamp(datetime(parsed.year, parsed.month, 1),\n tz=self.tz),\n Timestamp(datetime(parsed.year, qe, d, 23, 59,\n 59, 999999), tz=self.tz))\n elif reso == 'day':\n st = datetime(parsed.year, parsed.month, parsed.day)\n return (Timestamp(st, tz=self.tz),\n Timestamp(Timestamp(st + offsets.Day(),\n tz=self.tz).value - 1))\n elif reso == 'hour':\n st = datetime(parsed.year, parsed.month, parsed.day,\n hour=parsed.hour)\n return (Timestamp(st, tz=self.tz),\n Timestamp(Timestamp(st + offsets.Hour(),\n tz=self.tz).value - 1))\n elif reso == 'minute':\n st = datetime(parsed.year, parsed.month, parsed.day,\n hour=parsed.hour, minute=parsed.minute)\n return (Timestamp(st, tz=self.tz),\n Timestamp(Timestamp(st + offsets.Minute(),\n tz=self.tz).value - 1))\n elif reso == 'second':\n st = datetime(parsed.year, parsed.month, parsed.day,\n hour=parsed.hour, minute=parsed.minute,\n second=parsed.second)\n return (Timestamp(st, tz=self.tz),\n Timestamp(Timestamp(st + offsets.Second(),\n tz=self.tz).value - 1))\n elif reso == 'microsecond':\n st = datetime(parsed.year, parsed.month, parsed.day,\n parsed.hour, parsed.minute, parsed.second,\n parsed.microsecond)\n return (Timestamp(st, tz=self.tz), Timestamp(st, tz=self.tz))\n else:\n raise KeyError\n\n def _partial_date_slice(self, reso, parsed, use_lhs=True, use_rhs=True):\n is_monotonic = self.is_monotonic\n if ((reso in ['day', 'hour', 'minute'] and\n not (self._resolution < Resolution.get_reso(reso) or\n not is_monotonic)) or\n (reso == 'second' and\n not (self._resolution <= Resolution.RESO_SEC or\n not is_monotonic))):\n # These resolution/monotonicity validations came from GH3931,\n # GH3452 and GH2369.\n raise KeyError\n\n if reso == 'microsecond':\n # _partial_date_slice doesn't allow microsecond resolution, but\n # _parsed_string_to_bounds allows it.\n raise KeyError\n\n t1, t2 = self._parsed_string_to_bounds(reso, parsed)\n stamps = self.asi8\n\n if is_monotonic:\n\n # we are out of range\n if (len(stamps) and ((use_lhs and t1.value < stamps[0] and\n t2.value < stamps[0]) or\n ((use_rhs and t1.value > stamps[-1] and\n t2.value > stamps[-1])))):\n raise KeyError\n\n # a monotonic (sorted) series can be sliced\n left = stamps.searchsorted(\n t1.value, side='left') if use_lhs else None\n right = stamps.searchsorted(\n t2.value, side='right') if use_rhs else None\n\n return slice(left, right)\n\n lhs_mask = (stamps >= t1.value) if use_lhs else True\n rhs_mask = (stamps <= t2.value) if use_rhs else True\n\n # try to find a the dates\n return (lhs_mask & rhs_mask).nonzero()[0]\n\n def _possibly_promote(self, other):\n if other.inferred_type == 'date':\n other = DatetimeIndex(other)\n return self, other\n\n def get_value(self, series, key):\n \"\"\"\n Fast lookup of value from 1-dimensional ndarray. Only use this if you\n know what you're doing\n \"\"\"\n\n if isinstance(key, datetime):\n\n # needed to localize naive datetimes\n if self.tz is not None:\n key = Timestamp(key, tz=self.tz)\n\n return self.get_value_maybe_box(series, key)\n\n if isinstance(key, time):\n locs = self.indexer_at_time(key)\n return series.take(locs)\n\n try:\n return _maybe_box(self, Index.get_value(self, series, key),\n series, key)\n except KeyError:\n try:\n loc = self._get_string_slice(key)\n return series[loc]\n except (TypeError, ValueError, KeyError):\n pass\n\n try:\n return self.get_value_maybe_box(series, key)\n except (TypeError, ValueError, KeyError):\n raise KeyError(key)\n\n def get_value_maybe_box(self, series, key):\n # needed to localize naive datetimes\n if self.tz is not None:\n key = Timestamp(key, tz=self.tz)\n elif not isinstance(key, Timestamp):\n key = Timestamp(key)\n values = self._engine.get_value(_values_from_object(series),\n key, tz=self.tz)\n return _maybe_box(self, values, series, key)\n\n def get_loc(self, key, method=None, tolerance=None):\n \"\"\"\n Get integer location for requested label\n\n Returns\n -------\n loc : int\n \"\"\"\n if tolerance is not None:\n # try converting tolerance now, so errors don't get swallowed by\n # the try/except clauses below\n tolerance = self._convert_tolerance(tolerance)\n\n if isinstance(key, datetime):\n # needed to localize naive datetimes\n key = Timestamp(key, tz=self.tz)\n return Index.get_loc(self, key, method, tolerance)\n\n if isinstance(key, time):\n if method is not None:\n raise NotImplementedError('cannot yet lookup inexact labels '\n 'when key is a time object')\n return self.indexer_at_time(key)\n\n try:\n return Index.get_loc(self, key, method, tolerance)\n except (KeyError, ValueError, TypeError):\n try:\n return self._get_string_slice(key)\n except (TypeError, KeyError, ValueError):\n pass\n\n try:\n stamp = Timestamp(key, tz=self.tz)\n return Index.get_loc(self, stamp, method, tolerance)\n except (KeyError, ValueError):\n raise KeyError(key)\n\n def _maybe_cast_slice_bound(self, label, side, kind):\n \"\"\"\n If label is a string, cast it to datetime according to resolution.\n\n Parameters\n ----------\n label : object\n side : {'left', 'right'}\n kind : {'ix', 'loc', 'getitem'}\n\n Returns\n -------\n label : object\n\n Notes\n -----\n Value of `side` parameter should be validated in caller.\n\n \"\"\"\n assert kind in ['ix', 'loc', 'getitem', None]\n\n if is_float(label) or isinstance(label, time) or is_integer(label):\n self._invalid_indexer('slice', label)\n\n if isinstance(label, compat.string_types):\n freq = getattr(self, 'freqstr',\n getattr(self, 'inferred_freq', None))\n _, parsed, reso = parse_time_string(label, freq)\n bounds = self._parsed_string_to_bounds(reso, parsed)\n return bounds[0 if side == 'left' else 1]\n else:\n return label\n\n def _get_string_slice(self, key, use_lhs=True, use_rhs=True):\n freq = getattr(self, 'freqstr',\n getattr(self, 'inferred_freq', None))\n _, parsed, reso = parse_time_string(key, freq)\n loc = self._partial_date_slice(reso, parsed, use_lhs=use_lhs,\n use_rhs=use_rhs)\n return loc\n\n def slice_indexer(self, start=None, end=None, step=None, kind=None):\n \"\"\"\n Return indexer for specified label slice.\n Index.slice_indexer, customized to handle time slicing.\n\n In addition to functionality provided by Index.slice_indexer, does the\n following:\n\n - if both `start` and `end` are instances of `datetime.time`, it\n invokes `indexer_between_time`\n - if `start` and `end` are both either string or None perform\n value-based selection in non-monotonic cases.\n\n \"\"\"\n # For historical reasons DatetimeIndex supports slices between two\n # instances of datetime.time as if it were applying a slice mask to\n # an array of (self.hour, self.minute, self.seconds, self.microsecond).\n if isinstance(start, time) and isinstance(end, time):\n if step is not None and step != 1:\n raise ValueError('Must have step size of 1 with time slices')\n return self.indexer_between_time(start, end)\n\n if isinstance(start, time) or isinstance(end, time):\n raise KeyError('Cannot mix time and non-time slice keys')\n\n try:\n return Index.slice_indexer(self, start, end, step, kind=kind)\n except KeyError:\n # For historical reasons DatetimeIndex by default supports\n # value-based partial (aka string) slices on non-monotonic arrays,\n # let's try that.\n if ((start is None or isinstance(start, compat.string_types)) and\n (end is None or isinstance(end, compat.string_types))):\n mask = True\n if start is not None:\n start_casted = self._maybe_cast_slice_bound(\n start, 'left', kind)\n mask = start_casted <= self\n\n if end is not None:\n end_casted = self._maybe_cast_slice_bound(\n end, 'right', kind)\n mask = (self <= end_casted) & mask\n\n indexer = mask.nonzero()[0][::step]\n if len(indexer) == len(self):\n return slice(None)\n else:\n return indexer\n else:\n raise\n\n # alias to offset\n def _get_freq(self):\n return self.offset\n\n def _set_freq(self, value):\n self.offset = value\n freq = property(fget=_get_freq, fset=_set_freq,\n doc=\"get/set the frequncy of the Index\")\n\n year = _field_accessor('year', 'Y', \"The year of the datetime\")\n month = _field_accessor('month', 'M',\n \"The month as January=1, December=12\")\n day = _field_accessor('day', 'D', \"The days of the datetime\")\n hour = _field_accessor('hour', 'h', \"The hours of the datetime\")\n minute = _field_accessor('minute', 'm', \"The minutes of the datetime\")\n second = _field_accessor('second', 's', \"The seconds of the datetime\")\n microsecond = _field_accessor('microsecond', 'us',\n \"The microseconds of the datetime\")\n nanosecond = _field_accessor('nanosecond', 'ns',\n \"The nanoseconds of the datetime\")\n weekofyear = _field_accessor('weekofyear', 'woy',\n \"The week ordinal of the year\")\n week = weekofyear\n dayofweek = _field_accessor('dayofweek', 'dow',\n \"The day of the week with Monday=0, Sunday=6\")\n weekday = dayofweek\n\n weekday_name = _field_accessor(\n 'weekday_name',\n 'weekday_name',\n \"The name of day in a week (ex: Friday)\\n\\n.. versionadded:: 0.18.1\")\n\n dayofyear = _field_accessor('dayofyear', 'doy',\n \"The ordinal day of the year\")\n quarter = _field_accessor('quarter', 'q', \"The quarter of the date\")\n days_in_month = _field_accessor(\n 'days_in_month',\n 'dim',\n \"The number of days in the month\\n\\n.. versionadded:: 0.16.0\")\n daysinmonth = days_in_month\n is_month_start = _field_accessor(\n 'is_month_start',\n 'is_month_start',\n \"Logical indicating if first day of month (defined by frequency)\")\n is_month_end = _field_accessor(\n 'is_month_end',\n 'is_month_end',\n \"Logical indicating if last day of month (defined by frequency)\")\n is_quarter_start = _field_accessor(\n 'is_quarter_start',\n 'is_quarter_start',\n \"Logical indicating if first day of quarter (defined by frequency)\")\n is_quarter_end = _field_accessor(\n 'is_quarter_end',\n 'is_quarter_end',\n \"Logical indicating if last day of quarter (defined by frequency)\")\n is_year_start = _field_accessor(\n 'is_year_start',\n 'is_year_start',\n \"Logical indicating if first day of year (defined by frequency)\")\n is_year_end = _field_accessor(\n 'is_year_end',\n 'is_year_end',\n \"Logical indicating if last day of year (defined by frequency)\")\n is_leap_year = _field_accessor(\n 'is_leap_year',\n 'is_leap_year',\n \"Logical indicating if the date belongs to a leap year\")\n\n @property\n def time(self):\n \"\"\"\n Returns numpy array of datetime.time. The time part of the Timestamps.\n \"\"\"\n return self._maybe_mask_results(_algos.arrmap_object(\n self.asobject.values,\n lambda x: np.nan if x is tslib.NaT else x.time()))\n\n @property\n def date(self):\n \"\"\"\n Returns numpy array of python datetime.date objects (namely, the date\n part of Timestamps without timezone information).\n \"\"\"\n return self._maybe_mask_results(_algos.arrmap_object(\n self.asobject.values, lambda x: x.date()))\n\n def normalize(self):\n \"\"\"\n Return DatetimeIndex with times to midnight. Length is unaltered\n\n Returns\n -------\n normalized : DatetimeIndex\n \"\"\"\n new_values = tslib.date_normalize(self.asi8, self.tz)\n return DatetimeIndex(new_values, freq='infer', name=self.name,\n tz=self.tz)\n\n @Substitution(klass='DatetimeIndex', value='key')\n @Appender(_shared_docs['searchsorted'])\n def searchsorted(self, key, side='left', sorter=None):\n if isinstance(key, (np.ndarray, Index)):\n key = np.array(key, dtype=_NS_DTYPE, copy=False)\n else:\n key = _to_m8(key, tz=self.tz)\n\n return self.values.searchsorted(key, side=side)\n\n def is_type_compatible(self, typ):\n return typ == self.inferred_type or typ == 'datetime'\n\n @property\n def inferred_type(self):\n # b/c datetime is represented as microseconds since the epoch, make\n # sure we can't have ambiguous indexing\n return 'datetime64'\n\n @cache_readonly\n def dtype(self):\n if self.tz is None:\n return _NS_DTYPE\n return DatetimeTZDtype('ns', self.tz)\n\n @property\n def is_all_dates(self):\n return True\n\n @cache_readonly\n def is_normalized(self):\n \"\"\"\n Returns True if all of the dates are at midnight (\"no time\")\n \"\"\"\n return tslib.dates_normalized(self.asi8, self.tz)\n\n @cache_readonly\n def _resolution(self):\n return period.resolution(self.asi8, self.tz)\n\n def equals(self, other):\n \"\"\"\n Determines if two Index objects contain the same elements.\n \"\"\"\n if self.is_(other):\n return True\n\n if (not hasattr(other, 'inferred_type') or\n other.inferred_type != 'datetime64'):\n if self.offset is not None:\n return False\n try:\n other = DatetimeIndex(other)\n except:\n return False\n\n if self._has_same_tz(other):\n return np.array_equal(self.asi8, other.asi8)\n return False\n\n def insert(self, loc, item):\n \"\"\"\n Make new Index inserting new item at location\n\n Parameters\n ----------\n loc : int\n item : object\n if not either a Python datetime or a numpy integer-like, returned\n Index dtype will be object rather than datetime.\n\n Returns\n -------\n new_index : Index\n \"\"\"\n\n freq = None\n\n if isinstance(item, (datetime, np.datetime64)):\n self._assert_can_do_op(item)\n if not self._has_same_tz(item):\n raise ValueError(\n 'Passed item and index have different timezone')\n # check freq can be preserved on edge cases\n if self.size and self.freq is not None:\n if ((loc == 0 or loc == -len(self)) and\n item + self.freq == self[0]):\n freq = self.freq\n elif (loc == len(self)) and item - self.freq == self[-1]:\n freq = self.freq\n item = _to_m8(item, tz=self.tz)\n try:\n new_dates = np.concatenate((self[:loc].asi8, [item.view(np.int64)],\n self[loc:].asi8))\n if self.tz is not None:\n new_dates = tslib.tz_convert(new_dates, 'UTC', self.tz)\n return DatetimeIndex(new_dates, name=self.name, freq=freq,\n tz=self.tz)\n\n except (AttributeError, TypeError):\n\n # fall back to object index\n if isinstance(item, compat.string_types):\n return self.asobject.insert(loc, item)\n raise TypeError(\n \"cannot insert DatetimeIndex with incompatible label\")\n\n def delete(self, loc):\n \"\"\"\n Make a new DatetimeIndex with passed location(s) deleted.\n\n Parameters\n ----------\n loc: int, slice or array of ints\n Indicate which sub-arrays to remove.\n\n Returns\n -------\n new_index : DatetimeIndex\n \"\"\"\n new_dates = np.delete(self.asi8, loc)\n\n freq = None\n if is_integer(loc):\n if loc in (0, -len(self), -1, len(self) - 1):\n freq = self.freq\n else:\n if is_list_like(loc):\n loc = lib.maybe_indices_to_slice(\n _ensure_int64(np.array(loc)), len(self))\n if isinstance(loc, slice) and loc.step in (1, None):\n if (loc.start in (0, None) or loc.stop in (len(self), None)):\n freq = self.freq\n\n if self.tz is not None:\n new_dates = tslib.tz_convert(new_dates, 'UTC', self.tz)\n return DatetimeIndex(new_dates, name=self.name, freq=freq, tz=self.tz)\n\n def tz_convert(self, tz):\n \"\"\"\n Convert tz-aware DatetimeIndex from one time zone to another (using\n pytz/dateutil)\n\n Parameters\n ----------\n tz : string, pytz.timezone, dateutil.tz.tzfile or None\n Time zone for time. Corresponding timestamps would be converted to\n time zone of the TimeSeries.\n None will remove timezone holding UTC time.\n\n Returns\n -------\n normalized : DatetimeIndex\n\n Raises\n ------\n TypeError\n If DatetimeIndex is tz-naive.\n \"\"\"\n tz = tslib.maybe_get_tz(tz)\n\n if self.tz is None:\n # tz naive, use tz_localize\n raise TypeError('Cannot convert tz-naive timestamps, use '\n 'tz_localize to localize')\n\n # No conversion since timestamps are all UTC to begin with\n return self._shallow_copy(tz=tz)\n\n @deprecate_kwarg(old_arg_name='infer_dst', new_arg_name='ambiguous',\n mapping={True: 'infer', False: 'raise'})\n def tz_localize(self, tz, ambiguous='raise', errors='raise'):\n \"\"\"\n Localize tz-naive DatetimeIndex to given time zone (using\n pytz/dateutil), or remove timezone from tz-aware DatetimeIndex\n\n Parameters\n ----------\n tz : string, pytz.timezone, dateutil.tz.tzfile or None\n Time zone for time. Corresponding timestamps would be converted to\n time zone of the TimeSeries.\n None will remove timezone holding local time.\n ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise'\n - 'infer' will attempt to infer fall dst-transition hours based on\n order\n - bool-ndarray where True signifies a DST time, False signifies a\n non-DST time (note that this flag is only applicable for\n ambiguous times)\n - 'NaT' will return NaT where there are ambiguous times\n - 'raise' will raise an AmbiguousTimeError if there are ambiguous\n times\n errors : 'raise', 'coerce', default 'raise'\n - 'raise' will raise a NonExistentTimeError if a timestamp is not\n valid in the specified timezone (e.g. due to a transition from\n or to DST time)\n - 'coerce' will return NaT if the timestamp can not be converted\n into the specified timezone\n\n .. versionadded:: 0.19.0\n\n infer_dst : boolean, default False (DEPRECATED)\n Attempt to infer fall dst-transition hours based on order\n\n Returns\n -------\n localized : DatetimeIndex\n\n Raises\n ------\n TypeError\n If the DatetimeIndex is tz-aware and tz is not None.\n \"\"\"\n if self.tz is not None:\n if tz is None:\n new_dates = tslib.tz_convert(self.asi8, 'UTC', self.tz)\n else:\n raise TypeError(\"Already tz-aware, use tz_convert to convert.\")\n else:\n tz = tslib.maybe_get_tz(tz)\n # Convert to UTC\n\n new_dates = tslib.tz_localize_to_utc(self.asi8, tz,\n ambiguous=ambiguous,\n errors=errors)\n new_dates = new_dates.view(_NS_DTYPE)\n return self._shallow_copy(new_dates, tz=tz)\n\n def indexer_at_time(self, time, asof=False):\n \"\"\"\n Select values at particular time of day (e.g. 9:30AM)\n\n Parameters\n ----------\n time : datetime.time or string\n\n Returns\n -------\n values_at_time : TimeSeries\n \"\"\"\n from dateutil.parser import parse\n\n if asof:\n raise NotImplementedError(\"'asof' argument is not supported\")\n\n if isinstance(time, compat.string_types):\n time = parse(time).time()\n\n if time.tzinfo:\n # TODO\n raise NotImplementedError(\"argument 'time' with timezone info is \"\n \"not supported\")\n\n time_micros = self._get_time_micros()\n micros = _time_to_micros(time)\n return (micros == time_micros).nonzero()[0]\n\n def indexer_between_time(self, start_time, end_time, include_start=True,\n include_end=True):\n \"\"\"\n Select values between particular times of day (e.g., 9:00-9:30AM).\n\n Return values of the index between two times. If start_time or\n end_time are strings then tseres.tools.to_time is used to convert to\n a time object.\n\n Parameters\n ----------\n start_time, end_time : datetime.time, str\n datetime.time or string in appropriate format (\"%H:%M\", \"%H%M\",\n \"%I:%M%p\", \"%I%M%p\", \"%H:%M:%S\", \"%H%M%S\", \"%I:%M:%S%p\",\n \"%I%M%S%p\")\n include_start : boolean, default True\n include_end : boolean, default True\n\n Returns\n -------\n values_between_time : TimeSeries\n \"\"\"\n start_time = to_time(start_time)\n end_time = to_time(end_time)\n time_micros = self._get_time_micros()\n start_micros = _time_to_micros(start_time)\n end_micros = _time_to_micros(end_time)\n\n if include_start and include_end:\n lop = rop = operator.le\n elif include_start:\n lop = operator.le\n rop = operator.lt\n elif include_end:\n lop = operator.lt\n rop = operator.le\n else:\n lop = rop = operator.lt\n\n if start_time <= end_time:\n join_op = operator.and_\n else:\n join_op = operator.or_\n\n mask = join_op(lop(start_micros, time_micros),\n rop(time_micros, end_micros))\n\n return mask.nonzero()[0]\n\n def to_julian_date(self):\n \"\"\"\n Convert DatetimeIndex to Float64Index of Julian Dates.\n 0 Julian date is noon January 1, 4713 BC.\n http://en.wikipedia.org/wiki/Julian_day\n \"\"\"\n\n # http://mysite.verizon.net/aesir_research/date/jdalg2.htm\n year = self.year\n month = self.month\n day = self.day\n testarr = month < 3\n year[testarr] -= 1\n month[testarr] += 12\n return Float64Index(day +\n np.fix((153 * month - 457) / 5) +\n 365 * year +\n np.floor(year / 4) -\n np.floor(year / 100) +\n np.floor(year / 400) +\n 1721118.5 +\n (self.hour +\n self.minute / 60.0 +\n self.second / 3600.0 +\n self.microsecond / 3600.0 / 1e+6 +\n self.nanosecond / 3600.0 / 1e+9\n ) / 24.0)\n\n\nDatetimeIndex._add_numeric_methods_disabled()\nDatetimeIndex._add_logical_methods_disabled()\nDatetimeIndex._add_datetimelike_methods()\n\n\ndef _generate_regular_range(start, end, periods, offset):\n if isinstance(offset, Tick):\n stride = offset.nanos\n if periods is None:\n b = Timestamp(start).value\n # cannot just use e = Timestamp(end) + 1 because arange breaks when\n # stride is too large, see GH10887\n e = (b + (Timestamp(end).value - b) // stride * stride +\n stride // 2 + 1)\n # end.tz == start.tz by this point due to _generate implementation\n tz = start.tz\n elif start is not None:\n b = Timestamp(start).value\n e = b + np.int64(periods) * stride\n tz = start.tz\n elif end is not None:\n e = Timestamp(end).value + stride\n b = e - np.int64(periods) * stride\n tz = end.tz\n else:\n raise ValueError(\"at least 'start' or 'end' should be specified \"\n \"if a 'period' is given.\")\n\n data = np.arange(b, e, stride, dtype=np.int64)\n data = DatetimeIndex._simple_new(data, None, tz=tz)\n else:\n if isinstance(start, Timestamp):\n start = start.to_pydatetime()\n\n if isinstance(end, Timestamp):\n end = end.to_pydatetime()\n\n xdr = generate_range(start=start, end=end,\n periods=periods, offset=offset)\n\n dates = list(xdr)\n # utc = len(dates) > 0 and dates[0].tzinfo is not None\n data = tools.to_datetime(dates)\n\n return data\n\n\ndef date_range(start=None, end=None, periods=None, freq='D', tz=None,\n normalize=False, name=None, closed=None, **kwargs):\n \"\"\"\n Return a fixed frequency datetime index, with day (calendar) as the default\n frequency\n\n Parameters\n ----------\n start : string or datetime-like, default None\n Left bound for generating dates\n end : string or datetime-like, default None\n Right bound for generating dates\n periods : integer or None, default None\n If None, must specify start and end\n freq : string or DateOffset, default 'D' (calendar daily)\n Frequency strings can have multiples, e.g. '5H'\n tz : string or None\n Time zone name for returning localized DatetimeIndex, for example\n Asia/Hong_Kong\n normalize : bool, default False\n Normalize start/end dates to midnight before generating date range\n name : str, default None\n Name of the resulting index\n closed : string or None, default None\n Make the interval closed with respect to the given frequency to\n the 'left', 'right', or both sides (None)\n\n Notes\n -----\n 2 of start, end, or periods must be specified\n\n To learn more about the frequency strings, please see `this link\n <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.\n\n Returns\n -------\n rng : DatetimeIndex\n \"\"\"\n return DatetimeIndex(start=start, end=end, periods=periods,\n freq=freq, tz=tz, normalize=normalize, name=name,\n closed=closed, **kwargs)\n\n\ndef bdate_range(start=None, end=None, periods=None, freq='B', tz=None,\n normalize=True, name=None, closed=None, **kwargs):\n \"\"\"\n Return a fixed frequency datetime index, with business day as the default\n frequency\n\n Parameters\n ----------\n start : string or datetime-like, default None\n Left bound for generating dates\n end : string or datetime-like, default None\n Right bound for generating dates\n periods : integer or None, default None\n If None, must specify start and end\n freq : string or DateOffset, default 'B' (business daily)\n Frequency strings can have multiples, e.g. '5H'\n tz : string or None\n Time zone name for returning localized DatetimeIndex, for example\n Asia/Beijing\n normalize : bool, default False\n Normalize start/end dates to midnight before generating date range\n name : str, default None\n Name for the resulting index\n closed : string or None, default None\n Make the interval closed with respect to the given frequency to\n the 'left', 'right', or both sides (None)\n\n Notes\n -----\n 2 of start, end, or periods must be specified\n\n To learn more about the frequency strings, please see `this link\n <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.\n\n Returns\n -------\n rng : DatetimeIndex\n \"\"\"\n\n return DatetimeIndex(start=start, end=end, periods=periods,\n freq=freq, tz=tz, normalize=normalize, name=name,\n closed=closed, **kwargs)\n\n\ndef cdate_range(start=None, end=None, periods=None, freq='C', tz=None,\n normalize=True, name=None, closed=None, **kwargs):\n \"\"\"\n **EXPERIMENTAL** Return a fixed frequency datetime index, with\n CustomBusinessDay as the default frequency\n\n .. warning:: EXPERIMENTAL\n\n The CustomBusinessDay class is not officially supported and the API is\n likely to change in future versions. Use this at your own risk.\n\n Parameters\n ----------\n start : string or datetime-like, default None\n Left bound for generating dates\n end : string or datetime-like, default None\n Right bound for generating dates\n periods : integer or None, default None\n If None, must specify start and end\n freq : string or DateOffset, default 'C' (CustomBusinessDay)\n Frequency strings can have multiples, e.g. '5H'\n tz : string or None\n Time zone name for returning localized DatetimeIndex, for example\n Asia/Beijing\n normalize : bool, default False\n Normalize start/end dates to midnight before generating date range\n name : str, default None\n Name for the resulting index\n weekmask : str, Default 'Mon Tue Wed Thu Fri'\n weekmask of valid business days, passed to ``numpy.busdaycalendar``\n holidays : list\n list/array of dates to exclude from the set of valid business days,\n passed to ``numpy.busdaycalendar``\n closed : string or None, default None\n Make the interval closed with respect to the given frequency to\n the 'left', 'right', or both sides (None)\n\n Notes\n -----\n 2 of start, end, or periods must be specified\n\n To learn more about the frequency strings, please see `this link\n <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.\n\n Returns\n -------\n rng : DatetimeIndex\n \"\"\"\n\n if freq == 'C':\n holidays = kwargs.pop('holidays', [])\n weekmask = kwargs.pop('weekmask', 'Mon Tue Wed Thu Fri')\n freq = CDay(holidays=holidays, weekmask=weekmask)\n return DatetimeIndex(start=start, end=end, periods=periods, freq=freq,\n tz=tz, normalize=normalize, name=name,\n closed=closed, **kwargs)\n\n\ndef _to_m8(key, tz=None):\n \"\"\"\n Timestamp-like => dt64\n \"\"\"\n if not isinstance(key, Timestamp):\n # this also converts strings\n key = Timestamp(key, tz=tz)\n\n return np.int64(tslib.pydt_to_i8(key)).view(_NS_DTYPE)\n\n\n_CACHE_START = Timestamp(datetime(1950, 1, 1))\n_CACHE_END = Timestamp(datetime(2030, 1, 1))\n\n_daterange_cache = {}\n\n\ndef _naive_in_cache_range(start, end):\n if start is None or end is None:\n return False\n else:\n if start.tzinfo is not None or end.tzinfo is not None:\n return False\n return _in_range(start, end, _CACHE_START, _CACHE_END)\n\n\ndef _in_range(start, end, rng_start, rng_end):\n return start > rng_start and end < rng_end\n\n\ndef _use_cached_range(offset, _normalized, start, end):\n return (offset._should_cache() and\n not (offset._normalize_cache and not _normalized) and\n _naive_in_cache_range(start, end))\n\n\ndef _time_to_micros(time):\n seconds = time.hour * 60 * 60 + 60 * time.minute + time.second\n return 1000000 * seconds + time.microsecond\n\n\ndef _process_concat_data(to_concat, name):\n klass = Index\n kwargs = {}\n concat = np.concatenate\n\n all_dti = True\n need_utc_convert = False\n has_naive = False\n tz = None\n\n for x in to_concat:\n if not isinstance(x, DatetimeIndex):\n all_dti = False\n else:\n if tz is None:\n tz = x.tz\n\n if x.tz is None:\n has_naive = True\n\n if x.tz != tz:\n need_utc_convert = True\n tz = 'UTC'\n\n if all_dti:\n need_obj_convert = False\n if has_naive and tz is not None:\n need_obj_convert = True\n\n if need_obj_convert:\n to_concat = [x.asobject.values for x in to_concat]\n\n else:\n if need_utc_convert:\n to_concat = [x.tz_convert('UTC').values for x in to_concat]\n else:\n to_concat = [x.values for x in to_concat]\n\n # well, technically not a \"class\" anymore...oh well\n klass = DatetimeIndex._simple_new\n kwargs = {'tz': tz}\n concat = _concat._concat_compat\n else:\n for i, x in enumerate(to_concat):\n if isinstance(x, DatetimeIndex):\n to_concat[i] = x.asobject.values\n elif isinstance(x, Index):\n to_concat[i] = x.values\n\n factory_func = lambda x: klass(concat(x), name=name, **kwargs)\n return to_concat, factory_func\n" ]
[ [ "numpy.asarray", "pandas.core.index.Index.intersection", "pandas.tslib.get_timezone", "pandas.types.common.is_datetime64_ns_dtype", "pandas.util.decorators.deprecate_kwarg", "pandas.tslib.dates_normalized", "pandas.tseries.offsets.generate_range", "pandas.formats.format._get_format_datetime64_from_values", "pandas.formats.format._is_dates_only", "pandas.types.common.is_bool_dtype", "pandas.tseries.base.DatetimeIndexOpsMixin._join_i8_wrapper", "pandas.tseries.frequencies.Resolution.get_reso", "pandas.tseries.offsets.CDay", "numpy.floor", "pandas.types.dtypes.DatetimeTZDtype.construct_from_string", "pandas.types.common.is_object_dtype", "pandas.tslib.get_date_name_field", "pandas.tslib.tz_convert", "numpy.array", "pandas.tseries.tools.to_time", "pandas.types.missing.isnull", "pandas.types.common.is_integer", "pandas.tslib.format_array_from_datetime", "pandas.tseries.offsets.Hour", "pandas.core.common._count_not_none", "numpy.int64", "pandas.types.common.is_datetimetz", "numpy.ndarray.__setstate__", "pandas.tseries.tools.parse_time_string", "pandas.tslib.maybe_get_tz", "pandas.core.index.Index.join", "pandas.tseries.offsets.Minute", "pandas.util.decorators.Appender", "pandas.tslib.cast_to_nanoseconds", "pandas.core.index.Index.get_loc", "numpy.delete", "pandas.types.common.is_scalar", "pandas.tseries.offsets.Second", "pandas.types.dtypes.DatetimeTZDtype", "pandas.formats.format._get_format_datetime64", "pandas.types.common.is_datetime64_dtype", "pandas.types.common.pandas_dtype", "pandas.tslib.get_start_end_field", "pandas.tseries.tools.to_datetime", "pandas.core.common._maybe_match_name", "pandas.tslib.get_time_micros", "pandas.core.common._maybe_box", "pandas.types.common.is_list_like", "pandas.core.index.Index.get_value", "pandas.tslib.date_normalize", "pandas.lib.Timestamp", "pandas.types.common.is_dtype_equal", "pandas.tslib.tz_localize_to_utc", "pandas.types.concat._concat_compat", "pandas.core.index.Index.union", "pandas.types.common.is_integer_dtype", "pandas.types.common.is_float", "numpy.arange", "pandas.types.common.is_string_dtype", "numpy.fix", "pandas._period.resolution", "pandas.tseries.tools.normalize_date", "pandas.tslib.monthrange", "pandas.tslib.get_date_field", "numpy.array_equal", "pandas.tslib.pydt_to_i8", "pandas.tseries.tools._infer_tzinfo", "pandas.types.common._ensure_int64", "pandas.types.common.is_period_dtype", "pandas.tslib.ints_to_pydatetime", "pandas.tseries.frequencies.get_period_alias", "pandas.core.common._values_from_object", "pandas.tseries.frequencies.to_offset", "pandas.core.index.Index.slice_indexer", "pandas.TimedeltaIndex", "pandas.tseries.offsets.Day", "pandas.tseries.period.PeriodIndex", "numpy.empty", "pandas.util.decorators.Substitution", "pandas.core.index.Index" ] ]
aryankhatana01/Currency-Denomination-Prediction
[ "1cc80817af7a126a9f752c634db121408bcb56ab" ]
[ "VideoCap.py" ]
[ "# import the necessary packages\nfrom keras.preprocessing.image import img_to_array\nfrom keras.models import load_model\nimport tensorflow as tf\nimport numpy as np\nimport imutils\nimport time\nimport cv2\nimport os\nimport pyttsx3\n\nframeWidth= 640 # CAMERA RESOLUTION\nframeHeight = 480\nbrightness = 180\nthreshold = 0.90 # PROBABLITY THRESHOLD\nfont = cv2.FONT_HERSHEY_SIMPLEX\n\n# FOR SPEAKING OUT LOUD\nengine = pyttsx3.init('sapi5')\nvoices= engine.getProperty('voices') #getting details of current voice\nengine.setProperty('voice', voices[0].id)\n\ncap = cv2.VideoCapture(0)\ncap.set(3, frameWidth)\ncap.set(4, frameHeight)\ncap.set(10, brightness)\n\n\nmodel = tf.keras.models.load_model('keras_model11.h5')\n\ndef grayscale(img):\n img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)\n return img\n\ndef preprocessing(img):\n img = grayscale(img)\n img = (img.astype(np.float32) / 127.0) - 1 #NORMALIZATION\n return img\n\ndef speak(message):\n engine.say(message)\n engine.runAndWait()\n\nclasses = [\"10\", \"20\", \"50\", \"100\", \"200\", \"500\", \"2000\"]\n\nwhile True:\n\n # READ IMAGE\n _, imgOrignal = cap.read()\n\n # PROCESS IMAGE\n img = np.asarray(imgOrignal)\n img = cv2.resize(img, (224, 224))\n img = preprocessing(img)\n cv2.imshow(\"Processed Image\", img)\n img = img.reshape(1, 224, 224, 3)\n cv2.putText(imgOrignal, \"CLASS: \" , (20, 35), font, 0.75, (0, 0, 255), 2, cv2.LINE_AA)\n cv2.putText(imgOrignal, \"PROBABILITY: \", (20, 75), font, 0.75, (0, 0, 255), 2, cv2.LINE_AA)\n # PREDICT IMAGE\n predictions = model.predict(img)\n result = np.argmax(predictions)\n classIndex = classes[result]\n probabilityValue = np.amax(predictions)\n if probabilityValue > threshold:\n #print(getCalssName(classIndex))\n cv2.putText(imgOrignal,classIndex, (120, 35), font, 0.75, (0, 0, 255), 2, cv2.LINE_AA)\n cv2.putText(imgOrignal, str(round(probabilityValue*100,2) )+\"%\", (180, 75), font, 0.75, (0, 0, 255), 2, cv2.LINE_AA)\n speak(classIndex + 'rupees')\n cv2.imshow(\"Result\", imgOrignal)\n\n if cv2.waitKey(1) and 0xFF == ord('q'):\n break\n" ]
[ [ "numpy.amax", "numpy.argmax", "numpy.asarray", "tensorflow.keras.models.load_model" ] ]
kangyifei/CloudSimPy
[ "45912e7ea35086b67941624102e400cb22e549ab" ]
[ "playground/Non_DAG_with_Energy_rllib/algorithm/DeepJS/DRL.py" ]
[ "import tensorflow as tf\nimport numpy as np\n\ntf.enable_eager_execution()\n\nclass Node(object):\n def __init__(self, observation, action, reward, clock):\n self.observation = observation\n self.action = action\n self.reward = reward\n self.clock = clock\n\n\nclass RLAlgorithm(object):\n def __init__(self, agent, reward_giver, features_normalize_func, features_extract_func):\n self.agent = agent\n self.reward_giver = reward_giver\n self.features_normalize_func = features_normalize_func\n self.features_extract_func = features_extract_func\n self.current_trajectory = []\n\n def extract_features(self, valid_pairs):\n features = []\n for machine, task in valid_pairs:\n features.append([machine.cpu, machine.memory] + self.features_extract_func(task))\n features = self.features_normalize_func(features)\n return features\n\n def __call__(self, cluster, clock):\n machines = cluster.machines\n tasks = cluster.tasks_which_has_waiting_instance\n all_candidates = []\n\n for machine in machines:\n for task in tasks:\n if machine.accommodate(task):\n all_candidates.append((machine, task))\n if len(all_candidates) == 0:\n self.current_trajectory.append(Node(None, None, self.reward_giver.get_reward(), clock))\n return None, None\n else:\n features = self.extract_features(all_candidates)\n features = tf.convert_to_tensor(features, dtype=np.float32)\n logits = self.agent.brain(features)\n pair_index = tf.squeeze(tf.multinomial(logits, num_samples=1), axis=1).numpy()[0]\n\n node = Node(features, pair_index, 0, clock)\n self.current_trajectory.append(node)\n\n return all_candidates[pair_index]" ]
[ [ "tensorflow.enable_eager_execution", "tensorflow.multinomial", "tensorflow.convert_to_tensor" ] ]
xiaonanzzz/easy-deep-learning-pytorch
[ "a3b6566ce0e73f2cbea1007e8883d2ffa2282829" ]
[ "easydl/datasets/cub.py" ]
[ "import torchvision\nimport os\nimport pandas as pd\nfrom easydl.datasets import ImageLoader\nimport numpy as np\nfrom torchvision.transforms import ToTensor, Resize, Normalize\n\n\n_default_image_transformer = torchvision.transforms.Compose([\n Resize((224, 224)),\n ToTensor(),\n Normalize(0.45, 0.22), # simple version from https://pytorch.org/vision/stable/models.html\n])\n\nclass CUBirdsHelper(object):\n def __init__(self, root, *args, **kwargs):\n super(CUBirdsHelper, self).__init__(*args, **kwargs)\n self.root = root\n self.image_folder = os.path.join(self.root, 'CUB_200_2011', 'images')\n image_list = os.path.join(self.root, 'CUB_200_2011', 'images.txt')\n meta_df = pd.read_csv(image_list, sep=' ', names=['image_id', 'image_path'], header=None)\n\n image_class_labels = pd.read_csv(os.path.join(self.root, 'CUB_200_2011', 'image_class_labels.txt'),\n sep=' ', names=['image_id', 'label'])\n meta_df = meta_df.merge(image_class_labels, on='image_id')\n train_test_split = pd.read_csv(os.path.join(self.root, 'CUB_200_2011', 'train_test_split.txt'),\n sep=' ', names=['image_id', 'is_training_img'])\n meta_df = meta_df.merge(train_test_split, on='image_id')\n self.meta_df = meta_df\n\n\nclass Cub2011MetricLearningDS(CUBirdsHelper, ImageLoader):\n\n def __init__(self, root, *args, split='train', image_transform=_default_image_transformer, **kwargs):\n super(Cub2011MetricLearningDS, self).__init__(root, *args, image_transform=image_transform, **kwargs)\n self.split = split\n\n if self.split == 'train':\n self.data = self.meta_df[self.meta_df['label'].isin(np.arange(1, 100 + 1))]\n elif self.split == 'test':\n self.data = self.meta_df[self.meta_df['label'].isin(np.arange(100+1, 200 + 1))]\n else:\n raise ValueError('wrong split mode, only accept train/test')\n self.data.reset_index()\n print('cub 2011 metric learning dataset data size', self.data.shape)\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, idx):\n sample = self.data.iloc[idx]\n path = os.path.join(self.image_folder, sample['image_path'])\n target = sample['label'] - 1\n\n return self.load_image(path), target\n\nif __name__ == '__main__':\n ds = Cub2011MetricLearningDS('/Users/xiaonzha/data/CUB_200_2011', split='test')\n print(ds[0])" ]
[ [ "pandas.read_csv", "numpy.arange" ] ]
PhilJd/addons
[ "758d8838090c24914ee74b88bd24ee02f7e68850" ]
[ "tensorflow_addons/activations/sparsemax_test.py" ]
[ "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nimport pytest\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow_addons.activations import sparsemax\nfrom tensorflow_addons.utils import test_utils\n\ntest_obs = 17\n\n\ndef _np_sparsemax(z):\n z = z - np.mean(z, axis=1)[:, np.newaxis]\n\n # sort z\n z_sorted = np.sort(z, axis=1)[:, ::-1]\n\n # calculate k(z)\n z_cumsum = np.cumsum(z_sorted, axis=1)\n k = np.arange(1, z.shape[1] + 1)\n z_check = 1 + k * z_sorted > z_cumsum\n # use argmax to get the index by row as .nonzero() doesn't\n # take an axis argument. np.argmax return the first index, but the last\n # index is required here, use np.flip to get the last index and\n # `z.shape[axis]` to compensate for np.flip afterwards.\n k_z = z.shape[1] - np.argmax(z_check[:, ::-1], axis=1)\n\n # calculate tau(z)\n tau_sum = z_cumsum[np.arange(0, z.shape[0]), k_z - 1]\n tau_z = ((tau_sum - 1) / k_z).reshape(-1, 1)\n\n # calculate p\n return np.maximum(0, z - tau_z)\n\n\[email protected](\"dtype\", [\"float32\", \"float64\"])\ndef test_sparsemax_against_numpy_axis(dtype):\n \"\"\"check sparsemax kernel against numpy.\"\"\"\n random = np.random.RandomState(1)\n\n z = random.uniform(low=-3, high=3, size=(test_obs, 10))\n\n tf_sparsemax_out = sparsemax(z.astype(dtype), axis=0).numpy()\n np_sparsemax = np.transpose(_np_sparsemax(np.transpose(z))).astype(dtype)\n\n test_utils.assert_allclose_according_to_type(\n np_sparsemax, tf_sparsemax_out, half_atol=5e-3\n )\n assert np_sparsemax.shape == tf_sparsemax_out.shape\n\n\[email protected](\"dtype\", [\"float32\", \"float64\"])\ndef test_sparsemax_against_numpy_low_rank(dtype):\n \"\"\"check sparsemax kernel against numpy.\"\"\"\n random = np.random.RandomState(1)\n\n z = random.uniform(low=-3, high=3, size=(10))\n\n tf_sparsemax_out = sparsemax(z.astype(dtype)).numpy()\n np_sparsemax = np.reshape(_np_sparsemax(np.reshape(z, [1, 10])), [10]).astype(dtype)\n\n test_utils.assert_allclose_according_to_type(\n np_sparsemax, tf_sparsemax_out, half_atol=5e-3\n )\n assert np_sparsemax.shape == tf_sparsemax_out.shape\n\n\n@test_utils.run_all_with_types([\"float32\", \"float64\"])\n@test_utils.run_all_in_graph_and_eager_modes\nclass SparsemaxTest(tf.test.TestCase):\n def _tf_sparsemax(self, z, dtype, **kwargs):\n tf_sparsemax_op = sparsemax(z.astype(dtype), **kwargs)\n tf_sparsemax_out = self.evaluate(tf_sparsemax_op)\n\n return tf_sparsemax_op, tf_sparsemax_out\n\n def test_sparsemax_against_numpy(self, dtype=None):\n \"\"\"check sparsemax kernel against numpy.\"\"\"\n random = np.random.RandomState(1)\n\n z = random.uniform(low=-3, high=3, size=(test_obs, 10))\n\n tf_sparsemax_op, tf_sparsemax_out = self._tf_sparsemax(z, dtype)\n np_sparsemax = _np_sparsemax(z).astype(dtype)\n\n self.assertAllCloseAccordingToType(np_sparsemax, tf_sparsemax_out)\n self.assertShapeEqual(np_sparsemax, tf_sparsemax_op)\n\n def test_sparsemax_against_numpy_high_rank(self, dtype=None):\n \"\"\"check sparsemax kernel against numpy.\"\"\"\n random = np.random.RandomState(1)\n\n z = random.uniform(low=-3, high=3, size=(test_obs, test_obs, 10))\n\n tf_sparsemax_op, tf_sparsemax_out = self._tf_sparsemax(z, dtype)\n np_sparsemax = np.reshape(\n _np_sparsemax(np.reshape(z, [test_obs * test_obs, 10])),\n [test_obs, test_obs, 10],\n ).astype(dtype)\n\n self.assertAllCloseAccordingToType(np_sparsemax, tf_sparsemax_out)\n self.assertShapeEqual(np_sparsemax, tf_sparsemax_op)\n\n def test_sparsemax_of_nan(self, dtype=None):\n \"\"\"check sparsemax transfers nan.\"\"\"\n z_nan = np.asarray(\n [[0, np.nan, 0], [0, np.nan, np.nan], [np.nan, np.nan, np.nan],]\n ).astype(dtype)\n\n _, tf_sparsemax_nan = self._tf_sparsemax(z_nan, dtype)\n self.assertAllEqual(\n [\n [np.nan, np.nan, np.nan],\n [np.nan, np.nan, np.nan],\n [np.nan, np.nan, np.nan],\n ],\n tf_sparsemax_nan,\n )\n\n def test_sparsemax_of_inf(self, dtype=None):\n \"\"\"check sparsemax is infinity safe.\"\"\"\n z_neg = np.asarray(\n [[0, -np.inf, 0], [0, -np.inf, -np.inf], [-np.inf, -np.inf, -np.inf],]\n ).astype(dtype)\n z_pos = np.asarray(\n [[0, np.inf, 0], [0, np.inf, np.inf], [np.inf, np.inf, np.inf]]\n ).astype(dtype)\n z_mix = np.asarray(\n [[0, np.inf, 0], [0, np.inf, -np.inf], [-np.inf, np.inf, -np.inf]]\n ).astype(dtype)\n\n _, tf_sparsemax_neg = self._tf_sparsemax(z_neg, dtype)\n self.assertAllEqual(\n [[0.5, 0, 0.5], [1, 0, 0], [np.nan, np.nan, np.nan]], tf_sparsemax_neg\n )\n\n _, tf_sparsemax_pos = self._tf_sparsemax(z_pos, dtype)\n self.assertAllEqual(\n [\n [np.nan, np.nan, np.nan],\n [np.nan, np.nan, np.nan],\n [np.nan, np.nan, np.nan],\n ],\n tf_sparsemax_pos,\n )\n\n _, tf_sparsemax_mix = self._tf_sparsemax(z_mix, dtype)\n self.assertAllEqual(\n [\n [np.nan, np.nan, np.nan],\n [np.nan, np.nan, np.nan],\n [np.nan, np.nan, np.nan],\n ],\n tf_sparsemax_mix,\n )\n\n def test_sparsemax_of_zero(self, dtype=None):\n \"\"\"check sparsemax proposition 1, part 1.\"\"\"\n z = np.zeros((1, 10))\n\n tf_sparsemax_op, tf_sparsemax_out = self._tf_sparsemax(z, dtype)\n np_sparsemax = np.ones_like(z, dtype=dtype) / z.size\n\n self.assertAllCloseAccordingToType(np_sparsemax, tf_sparsemax_out)\n self.assertShapeEqual(np_sparsemax, tf_sparsemax_op)\n\n def test_sparsemax_of_to_inf(self, dtype=None):\n \"\"\"check sparsemax proposition 1, part 2.\"\"\"\n random = np.random.RandomState(4)\n\n z = random.uniform(low=-3, high=3, size=(test_obs, 10))\n\n # assume |A(z)| = 1, as z is continues random\n z_sort_arg = np.argsort(z, axis=1)[:, ::-1]\n z_sort = np.sort(z, axis=-1)[:, ::-1]\n gamma_z = z_sort[:, 0] - z_sort[:, 1]\n epsilon = (0.99 * gamma_z * 1).reshape(-1, 1)\n\n # construct the expected 1_A(z) array\n p_expected = np.zeros((test_obs, 10), dtype=dtype)\n p_expected[np.arange(0, test_obs), z_sort_arg[:, 0]] = 1\n\n tf_sparsemax_op, tf_sparsemax_out = self._tf_sparsemax((1 / epsilon) * z, dtype)\n\n self.assertAllCloseAccordingToType(p_expected, tf_sparsemax_out)\n self.assertShapeEqual(p_expected, tf_sparsemax_op)\n\n def test_constant_add(self, dtype=None):\n \"\"\"check sparsemax proposition 2.\"\"\"\n random = np.random.RandomState(5)\n\n z = random.uniform(low=-3, high=3, size=(test_obs, 10)).astype(dtype)\n c = random.uniform(low=-3, high=3, size=(test_obs, 1)).astype(dtype)\n\n _, tf_sparsemax_zpc = self._tf_sparsemax(z + c, dtype)\n\n _, tf_sparsemax_z = self._tf_sparsemax(z, dtype)\n\n self.assertAllCloseAccordingToType(\n tf_sparsemax_zpc, tf_sparsemax_z, half_atol=5e-3\n )\n\n\[email protected](\"dtype\", [\"float32\", \"float64\"])\ndef test_two_dimentional(dtype):\n \"\"\"check two dimentation sparsemax case.\"\"\"\n t = np.linspace(-2, 2, test_obs, dtype=dtype)\n z = np.vstack([t, np.zeros(test_obs, dtype=dtype)]).T\n\n tf_sparsemax_out = sparsemax(z.astype(dtype)).numpy()\n\n p0_expected = np.select([t < -1, t <= 1, t > 1], [0, (t + 1) / 2, 1])\n\n test_utils.assert_allclose_according_to_type(p0_expected, tf_sparsemax_out[:, 0])\n test_utils.assert_allclose_according_to_type(\n 1 - p0_expected, tf_sparsemax_out[:, 1]\n )\n assert z.shape == tf_sparsemax_out.shape\n\n\[email protected](\"dtype\", [np.float32, np.float64])\ndef test_diffrence(dtype):\n \"\"\"check sparsemax proposition 4.\"\"\"\n random = np.random.RandomState(7)\n\n z = random.uniform(low=-3, high=3, size=(test_obs, 10))\n p = sparsemax(z.astype(dtype)).numpy()\n\n etol = {np.float32: 1e-6, np.float64: 1e-9}[dtype]\n\n for val in range(0, test_obs):\n for i in range(0, 10):\n for j in range(0, 10):\n # check condition, the obesite pair will be checked anyway\n if z[val, i] > z[val, j]:\n continue\n\n assert 0 <= p[val, j] - p[val, i] <= z[val, j] - z[val, i] + etol\n\n\[email protected](\"dtype\", [\"float32\", \"float64\"])\ndef test_permutation(dtype):\n \"\"\"check sparsemax proposition 3.\"\"\"\n random = np.random.RandomState(6)\n\n z = random.uniform(low=-3, high=3, size=(test_obs, 10))\n p = sparsemax(z.astype(dtype)).numpy()\n\n for i in range(test_obs):\n per = random.permutation(10)\n\n tf_sparsemax_out = sparsemax(z[i, per].reshape(1, -1).astype(dtype))\n p_expected = p[i, per].reshape(1, -1)\n\n test_utils.assert_allclose_according_to_type(\n p_expected, tf_sparsemax_out, half_atol=5e-3\n )\n assert p_expected.shape == tf_sparsemax_out.shape\n\n\[email protected](\"dtype\", [\"float32\", \"float64\"])\ndef test_gradient_against_estimate(dtype):\n \"\"\"check sparsemax Rop, against estimated Rop.\"\"\"\n random = np.random.RandomState(9)\n\n # sparsemax is not a smooth function so gradient estimation is only\n # possible for float64.\n if dtype != \"float64\":\n return\n\n z = random.uniform(low=-1, high=1, size=(test_obs, 10)).astype(dtype)\n\n (jacob_sym,), (jacob_num,) = tf.test.compute_gradient(\n lambda logits: sparsemax(logits), [z], delta=1e-6\n )\n np.testing.assert_allclose(jacob_sym, jacob_num)\n" ]
[ [ "numpy.sort", "numpy.cumsum", "numpy.transpose", "numpy.zeros", "numpy.reshape", "numpy.ones_like", "numpy.argsort", "numpy.argmax", "numpy.asarray", "numpy.arange", "numpy.random.RandomState", "numpy.select", "numpy.maximum", "numpy.testing.assert_allclose", "numpy.linspace", "numpy.mean" ] ]
gopi231091/mmdetection3d
[ "1b2e64cd75c8d1c238c61a3bc1e3c62a7d403b53" ]
[ "mmdet3d/models/dense_heads/train_mixins.py" ]
[ "import numpy as np\nimport torch\n\nfrom mmdet3d.core import limit_period\nfrom mmdet.core import images_to_levels, multi_apply\n\n\nclass AnchorTrainMixin(object):\n \"\"\"Mixin class for target assigning of dense heads.\"\"\"\n\n def anchor_target_3d(self,\n anchor_list,\n gt_bboxes_list,\n input_metas,\n gt_bboxes_ignore_list=None,\n gt_labels_list=None,\n label_channels=1,\n num_classes=1,\n sampling=True):\n \"\"\"Compute regression and classification targets for anchors.\n\n Args:\n anchor_list (list[list]): Multi level anchors of each image.\n gt_bboxes_list (list[:obj:`BaseInstance3DBoxes`]): Ground truth\n bboxes of each image.\n input_metas (list[dict]): Meta info of each image.\n gt_bboxes_ignore_list (None | list): Ignore list of gt bboxes.\n gt_labels_list (list[torch.Tensor]): Gt labels of batches.\n label_channels (int): The channel of labels.\n num_classes (int): The number of classes.\n sampling (bool): Whether to sample anchors.\n\n Returns:\n tuple (list, list, list, list, list, list, int, int):\n Anchor targets, including labels, label weights,\n bbox targets, bbox weights, direction targets,\n direction weights, number of postive anchors and\n number of negative anchors.\n \"\"\"\n num_imgs = len(input_metas)\n assert len(anchor_list) == num_imgs\n\n if isinstance(anchor_list[0][0], list):\n # sizes of anchors are different\n # anchor number of a single level\n num_level_anchors = [\n sum([anchor.size(0) for anchor in anchors])\n for anchors in anchor_list[0]\n ]\n for i in range(num_imgs):\n anchor_list[i] = anchor_list[i][0]\n else:\n # anchor number of multi levels\n num_level_anchors = [\n anchors.view(-1, self.box_code_size).size(0)\n for anchors in anchor_list[0]\n ]\n # concat all level anchors and flags to a single tensor\n for i in range(num_imgs):\n anchor_list[i] = torch.cat(anchor_list[i])\n\n # compute targets for each image\n if gt_bboxes_ignore_list is None:\n gt_bboxes_ignore_list = [None for _ in range(num_imgs)]\n if gt_labels_list is None:\n gt_labels_list = [None for _ in range(num_imgs)]\n\n (all_labels, all_label_weights, all_bbox_targets, all_bbox_weights,\n all_dir_targets, all_dir_weights, pos_inds_list,\n neg_inds_list) = multi_apply(\n self.anchor_target_3d_single,\n anchor_list,\n gt_bboxes_list,\n gt_bboxes_ignore_list,\n gt_labels_list,\n input_metas,\n label_channels=label_channels,\n num_classes=num_classes,\n sampling=sampling)\n\n # no valid anchors\n if any([labels is None for labels in all_labels]):\n return None\n # sampled anchors of all images\n num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list])\n num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list])\n # split targets to a list w.r.t. multiple levels\n labels_list = images_to_levels(all_labels, num_level_anchors)\n label_weights_list = images_to_levels(all_label_weights,\n num_level_anchors)\n bbox_targets_list = images_to_levels(all_bbox_targets,\n num_level_anchors)\n bbox_weights_list = images_to_levels(all_bbox_weights,\n num_level_anchors)\n dir_targets_list = images_to_levels(all_dir_targets, num_level_anchors)\n dir_weights_list = images_to_levels(all_dir_weights, num_level_anchors)\n return (labels_list, label_weights_list, bbox_targets_list,\n bbox_weights_list, dir_targets_list, dir_weights_list,\n num_total_pos, num_total_neg)\n\n def anchor_target_3d_single(self,\n anchors,\n gt_bboxes,\n gt_bboxes_ignore,\n gt_labels,\n input_meta,\n label_channels=1,\n num_classes=1,\n sampling=True):\n \"\"\"Compute targets of anchors in single batch.\n\n Args:\n anchors (torch.Tensor): Concatenated multi-level anchor.\n gt_bboxes (:obj:`BaseInstance3DBoxes`): Gt bboxes.\n gt_bboxes_ignore (torch.Tensor): Ignored gt bboxes.\n gt_labels (torch.Tensor): Gt class labels.\n input_meta (dict): Meta info of each image.\n label_channels (int): The channel of labels.\n num_classes (int): The number of classes.\n sampling (bool): Whether to sample anchors.\n\n Returns:\n tuple[torch.Tensor]: Anchor targets.\n \"\"\"\n if isinstance(self.bbox_assigner,\n list) and (not isinstance(anchors, list)):\n feat_size = anchors.size(0) * anchors.size(1) * anchors.size(2)\n rot_angles = anchors.size(-2)\n assert len(self.bbox_assigner) == anchors.size(-3)\n (total_labels, total_label_weights, total_bbox_targets,\n total_bbox_weights, total_dir_targets, total_dir_weights,\n total_pos_inds, total_neg_inds) = [], [], [], [], [], [], [], []\n current_anchor_num = 0\n for i, assigner in enumerate(self.bbox_assigner):\n current_anchors = anchors[..., i, :, :].reshape(\n -1, self.box_code_size)\n current_anchor_num += current_anchors.size(0)\n if self.assign_per_class:\n gt_per_cls = (gt_labels == i)\n anchor_targets = self.anchor_target_single_assigner(\n assigner, current_anchors, gt_bboxes[gt_per_cls, :],\n gt_bboxes_ignore, gt_labels[gt_per_cls], input_meta,\n num_classes, sampling)\n else:\n anchor_targets = self.anchor_target_single_assigner(\n assigner, current_anchors, gt_bboxes, gt_bboxes_ignore,\n gt_labels, input_meta, num_classes, sampling)\n\n (labels, label_weights, bbox_targets, bbox_weights,\n dir_targets, dir_weights, pos_inds, neg_inds) = anchor_targets\n total_labels.append(labels.reshape(feat_size, 1, rot_angles))\n total_label_weights.append(\n label_weights.reshape(feat_size, 1, rot_angles))\n total_bbox_targets.append(\n bbox_targets.reshape(feat_size, 1, rot_angles,\n anchors.size(-1)))\n total_bbox_weights.append(\n bbox_weights.reshape(feat_size, 1, rot_angles,\n anchors.size(-1)))\n total_dir_targets.append(\n dir_targets.reshape(feat_size, 1, rot_angles))\n total_dir_weights.append(\n dir_weights.reshape(feat_size, 1, rot_angles))\n total_pos_inds.append(pos_inds)\n total_neg_inds.append(neg_inds)\n\n total_labels = torch.cat(total_labels, dim=-2).reshape(-1)\n total_label_weights = torch.cat(\n total_label_weights, dim=-2).reshape(-1)\n total_bbox_targets = torch.cat(\n total_bbox_targets, dim=-3).reshape(-1, anchors.size(-1))\n total_bbox_weights = torch.cat(\n total_bbox_weights, dim=-3).reshape(-1, anchors.size(-1))\n total_dir_targets = torch.cat(\n total_dir_targets, dim=-2).reshape(-1)\n total_dir_weights = torch.cat(\n total_dir_weights, dim=-2).reshape(-1)\n total_pos_inds = torch.cat(total_pos_inds, dim=0).reshape(-1)\n total_neg_inds = torch.cat(total_neg_inds, dim=0).reshape(-1)\n return (total_labels, total_label_weights, total_bbox_targets,\n total_bbox_weights, total_dir_targets, total_dir_weights,\n total_pos_inds, total_neg_inds)\n elif isinstance(self.bbox_assigner, list) and isinstance(\n anchors, list):\n # class-aware anchors with different feature map sizes\n assert len(self.bbox_assigner) == len(anchors), \\\n 'The number of bbox assigners and anchors should be the same.'\n (total_labels, total_label_weights, total_bbox_targets,\n total_bbox_weights, total_dir_targets, total_dir_weights,\n total_pos_inds, total_neg_inds) = [], [], [], [], [], [], [], []\n current_anchor_num = 0\n for i, assigner in enumerate(self.bbox_assigner):\n current_anchors = anchors[i]\n current_anchor_num += current_anchors.size(0)\n if self.assign_per_class:\n gt_per_cls = (gt_labels == i)\n anchor_targets = self.anchor_target_single_assigner(\n assigner, current_anchors, gt_bboxes[gt_per_cls, :],\n gt_bboxes_ignore, gt_labels[gt_per_cls], input_meta,\n num_classes, sampling)\n else:\n anchor_targets = self.anchor_target_single_assigner(\n assigner, current_anchors, gt_bboxes, gt_bboxes_ignore,\n gt_labels, input_meta, num_classes, sampling)\n\n (labels, label_weights, bbox_targets, bbox_weights,\n dir_targets, dir_weights, pos_inds, neg_inds) = anchor_targets\n total_labels.append(labels)\n total_label_weights.append(label_weights)\n total_bbox_targets.append(\n bbox_targets.reshape(-1, anchors[i].size(-1)))\n total_bbox_weights.append(\n bbox_weights.reshape(-1, anchors[i].size(-1)))\n total_dir_targets.append(dir_targets)\n total_dir_weights.append(dir_weights)\n total_pos_inds.append(pos_inds)\n total_neg_inds.append(neg_inds)\n\n total_labels = torch.cat(total_labels, dim=0)\n total_label_weights = torch.cat(total_label_weights, dim=0)\n total_bbox_targets = torch.cat(total_bbox_targets, dim=0)\n total_bbox_weights = torch.cat(total_bbox_weights, dim=0)\n total_dir_targets = torch.cat(total_dir_targets, dim=0)\n total_dir_weights = torch.cat(total_dir_weights, dim=0)\n total_pos_inds = torch.cat(total_pos_inds, dim=0)\n total_neg_inds = torch.cat(total_neg_inds, dim=0)\n return (total_labels, total_label_weights, total_bbox_targets,\n total_bbox_weights, total_dir_targets, total_dir_weights,\n total_pos_inds, total_neg_inds)\n else:\n return self.anchor_target_single_assigner(self.bbox_assigner,\n anchors, gt_bboxes,\n gt_bboxes_ignore,\n gt_labels, input_meta,\n num_classes, sampling)\n\n def anchor_target_single_assigner(self,\n bbox_assigner,\n anchors,\n gt_bboxes,\n gt_bboxes_ignore,\n gt_labels,\n input_meta,\n num_classes=1,\n sampling=True):\n \"\"\"Assign anchors and encode positive anchors.\n\n Args:\n bbox_assigner (BaseAssigner): assign positive and negative boxes.\n anchors (torch.Tensor): Concatenated multi-level anchor.\n gt_bboxes (:obj:`BaseInstance3DBoxes`): Gt bboxes.\n gt_bboxes_ignore (torch.Tensor): Ignored gt bboxes.\n gt_labels (torch.Tensor): Gt class labels.\n input_meta (dict): Meta info of each image.\n num_classes (int): The number of classes.\n sampling (bool): Whether to sample anchors.\n\n Returns:\n tuple[torch.Tensor]: Anchor targets.\n \"\"\"\n anchors = anchors.reshape(-1, anchors.size(-1))\n num_valid_anchors = anchors.shape[0]\n bbox_targets = torch.zeros_like(anchors)\n bbox_weights = torch.zeros_like(anchors)\n dir_targets = anchors.new_zeros((anchors.shape[0]), dtype=torch.long)\n dir_weights = anchors.new_zeros((anchors.shape[0]), dtype=torch.float)\n labels = anchors.new_zeros(num_valid_anchors, dtype=torch.long)\n label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float)\n if len(gt_bboxes) > 0:\n if not isinstance(gt_bboxes, torch.Tensor):\n gt_bboxes = gt_bboxes.tensor.to(anchors.device)\n assign_result = bbox_assigner.assign(anchors, gt_bboxes,\n gt_bboxes_ignore, gt_labels)\n sampling_result = self.bbox_sampler.sample(assign_result, anchors,\n gt_bboxes)\n pos_inds = sampling_result.pos_inds\n neg_inds = sampling_result.neg_inds\n else:\n pos_inds = torch.nonzero(\n anchors.new_zeros((anchors.shape[0], ), dtype=torch.bool) > 0,\n as_tuple=False).squeeze(-1).unique()\n neg_inds = torch.nonzero(\n anchors.new_zeros((anchors.shape[0], ), dtype=torch.bool) == 0,\n as_tuple=False).squeeze(-1).unique()\n\n if gt_labels is not None:\n labels += num_classes\n if len(pos_inds) > 0:\n pos_bbox_targets = self.bbox_coder.encode(\n sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes)\n pos_dir_targets = get_direction_target(\n sampling_result.pos_bboxes,\n pos_bbox_targets,\n self.dir_offset,\n one_hot=False)\n bbox_targets[pos_inds, :] = pos_bbox_targets\n bbox_weights[pos_inds, :] = 1.0\n dir_targets[pos_inds] = pos_dir_targets\n dir_weights[pos_inds] = 1.0\n\n if gt_labels is None:\n labels[pos_inds] = 1\n else:\n labels[pos_inds] = gt_labels[\n sampling_result.pos_assigned_gt_inds]\n if self.train_cfg.pos_weight <= 0:\n label_weights[pos_inds] = 1.0\n else:\n label_weights[pos_inds] = self.train_cfg.pos_weight\n\n if len(neg_inds) > 0:\n label_weights[neg_inds] = 1.0\n return (labels, label_weights, bbox_targets, bbox_weights, dir_targets,\n dir_weights, pos_inds, neg_inds)\n\n\ndef get_direction_target(anchors,\n reg_targets,\n dir_offset=0,\n num_bins=2,\n one_hot=True):\n \"\"\"Encode direction to 0 ~ num_bins-1.\n\n Args:\n anchors (torch.Tensor): Concatenated multi-level anchor.\n reg_targets (torch.Tensor): Bbox regression targets.\n dir_offset (int): Direction offset.\n num_bins (int): Number of bins to divide 2*PI.\n one_hot (bool): Whether to encode as one hot.\n\n Returns:\n torch.Tensor: Encoded direction targets.\n \"\"\"\n rot_gt = reg_targets[..., 6] + anchors[..., 6]\n offset_rot = limit_period(rot_gt - dir_offset, 0, 2 * np.pi)\n dir_cls_targets = torch.floor(offset_rot / (2 * np.pi / num_bins)).long()\n dir_cls_targets = torch.clamp(dir_cls_targets, min=0, max=num_bins - 1)\n if one_hot:\n dir_targets = torch.zeros(\n *list(dir_cls_targets.shape),\n num_bins,\n dtype=anchors.dtype,\n device=dir_cls_targets.device)\n dir_targets.scatter_(dir_cls_targets.unsqueeze(dim=-1).long(), 1.0)\n dir_cls_targets = dir_targets\n return dir_cls_targets\n" ]
[ [ "torch.zeros_like", "torch.cat", "torch.clamp", "torch.floor" ] ]
jaimesouza/devito
[ "aa85166f8ea4924498d3bb143b6d40ff5e97e97a" ]
[ "devito/types/basic.py" ]
[ "import abc\nfrom collections import namedtuple\nfrom ctypes import POINTER, Structure, byref\nfrom functools import reduce\nfrom operator import mul\n\nimport numpy as np\nimport sympy\nfrom sympy.core.assumptions import _assume_rules\nfrom cached_property import cached_property\nfrom cgen import Struct, Value\n\nfrom devito.data import default_allocator\nfrom devito.symbolics import aligned_indices\nfrom devito.tools import (Pickable, ctypes_to_cstr, dtype_to_cstr, dtype_to_ctype,\n frozendict, memoized_meth)\nfrom devito.types.args import ArgProvider\nfrom devito.types.caching import Cached\nfrom devito.types.lazy import Evaluable\nfrom devito.types.utils import DimensionTuple\n\n__all__ = ['Symbol', 'Scalar', 'Indexed', 'Object', 'LocalObject', 'CompositeObject']\n\n\nSize = namedtuple('Size', 'left right')\nOffset = namedtuple('Offset', 'left right')\n\n\nclass Basic(object):\n\n \"\"\"\n Three relevant types inherit from this class:\n\n * AbstractSymbol: represents a scalar; may carry data; may be used\n to build equations.\n * AbstractFunction: represents a discrete R^n -> R function; may\n carry data; may be used to build equations.\n * AbstractTensor: represents a discrete 2nd order tensor or vector:\n R^n -> R^(nd x nd) tensor (nd dimensions),\n R^n -> R^nd vector (nd dimensions),\n may carry data; may be used to build equations.\n * AbstractObject: represents a generic object, for example a (pointer\n to) data structure.\n\n Basic\n |\n --------------------------------------------------------------\n | | | |\n AbstractSymbol AbstractFunction AbstractTensor AbstractObject\n\n All these subtypes must implement a number of methods/properties to enable\n code generation via the Devito compiler. These methods/properties are\n easily recognizable as their name starts with _C_.\n\n Notes\n -----\n The AbstractFunction sub-hierarchy is implemented in :mod:`dense.py`.\n The AbstractTensor sub-hierarchy is implemented in :mod:`tensor.py`.\n \"\"\"\n\n # Top hierarchy\n is_AbstractFunction = False\n is_AbstractSymbol = False\n is_AbstractObject = False\n\n # Symbolic objects created internally by Devito\n is_Symbol = False\n is_ArrayBasic = False\n is_Array = False\n is_PointerArray = False\n is_Object = False\n is_LocalObject = False\n\n # Created by the user\n is_Input = False\n\n # Scalar symbolic objects created by the user\n is_Dimension = False\n is_Constant = False\n\n # Tensor symbolic objects created by the user\n is_DiscreteFunction = False\n is_Function = False\n is_TimeFunction = False\n is_SparseTimeFunction = False\n is_SparseFunction = False\n is_PrecomputedSparseFunction = False\n is_PrecomputedSparseTimeFunction = False\n\n # Time dependence\n is_TimeDependent = False\n\n # Tensor and Vector valued objects\n is_VectorValued = False\n is_TensorValued = False\n\n # Basic symbolic object properties\n is_Scalar = False\n is_Tensor = False\n\n # Some other properties\n is_PerfKnob = False # Does it impact the Operator performance?\n\n @abc.abstractmethod\n def __init__(self, *args, **kwargs):\n return\n\n @abc.abstractproperty\n def _C_name(self):\n \"\"\"\n The C-level name of the object.\n\n Returns\n -------\n str\n \"\"\"\n return\n\n @abc.abstractproperty\n def _C_typename(self):\n \"\"\"\n The C-level type of the object.\n\n Returns\n -------\n str\n \"\"\"\n return\n\n @abc.abstractproperty\n def _C_typedata(self):\n \"\"\"\n The C-level type of the data values.\n\n Returns\n -------\n str\n \"\"\"\n return\n\n @abc.abstractproperty\n def _C_ctype(self):\n \"\"\"\n The C-level type of the object, as a ctypes object, suitable for type\n checking when calling functions via ctypes.\n\n Returns\n -------\n ctypes type\n \"\"\"\n return\n\n @property\n def _C_typedecl(self):\n \"\"\"\n The C-level struct declaration representing the object.\n\n Returns\n -------\n cgen.Struct or None\n None if the object C type can be expressed with a basic C type,\n such as float or int.\n \"\"\"\n return\n\n\nclass AbstractSymbol(sympy.Symbol, Basic, Pickable, Evaluable):\n\n \"\"\"\n Base class for scalar symbols.\n\n The hierarchy is structured as follows\n\n AbstractSymbol\n |\n -------------------------------------\n | |\n DataSymbol Symbol\n | |\n ---------------- -------------------\n | | | |\n Constant DefaultDimension Scalar Dimension\n <:mod:`dimension.py`>\n\n All symbols can be used to build equations. However, while DataSymbol\n carries data, Symbol is a pure symbolic object.\n\n Constant, DefaultDimension, and Dimension (and most of its subclasses) are\n part of the user API; Scalar, instead, is only used internally by Devito.\n\n DefaultDimension and Dimension define a problem dimension (in other words,\n an \"iteration space\"). They can be used to index into Functions. For more\n information, refer to :mod:`dimension.py`.\n \"\"\"\n\n is_AbstractSymbol = True\n is_Symbol = True\n\n # SymPy default assumptions\n is_real = True\n is_imaginary = False\n is_commutative = True\n\n @classmethod\n def _filter_assumptions(cls, **kwargs):\n \"\"\"Extract sympy.Symbol-specific kwargs.\"\"\"\n assumptions = {}\n for i in list(kwargs):\n if i in _assume_rules.defined_facts:\n assumptions[i] = kwargs.pop(i)\n return assumptions, kwargs\n\n def __new__(cls, *args, **kwargs):\n name = kwargs.get('name') or args[0]\n assumptions, kwargs = cls._filter_assumptions(**kwargs)\n\n # Create the new Symbol\n # Note: use __xnew__ to bypass sympy caching\n newobj = sympy.Symbol.__xnew__(cls, name, **assumptions)\n\n # Initialization\n newobj._dtype = cls.__dtype_setup__(**kwargs)\n newobj.__init_finalize__(*args, **kwargs)\n\n return newobj\n\n @classmethod\n def __dtype_setup__(cls, **kwargs):\n \"\"\"Extract the object data type from ``kwargs``.\"\"\"\n return kwargs.get('dtype', np.int32)\n\n def __init__(self, *args, **kwargs):\n # no-op, the true init is performed by __init_finalize__\n pass\n\n def __init_finalize__(self, *args, **kwargs):\n self._is_const = kwargs.get('is_const', False)\n\n @property\n def dtype(self):\n \"\"\"The data type of the object.\"\"\"\n return self._dtype\n\n @property\n def indices(self):\n return ()\n\n @property\n def dimensions(self):\n return self.indices\n\n @property\n def shape(self):\n return ()\n\n @property\n def ndim(self):\n return 0\n\n @property\n def symbolic_shape(self):\n return ()\n\n @property\n def base(self):\n return self\n\n @property\n def function(self):\n return self\n\n @property\n def evaluate(self):\n return self\n\n def indexify(self):\n return self\n\n @property\n def is_const(self):\n \"\"\"\n True if the symbol value cannot be modified within an Operator (and thus\n its value is provided by the user directly from Python-land), False otherwise.\n \"\"\"\n return self._is_const\n\n @property\n def _C_name(self):\n return self.name\n\n @property\n def _C_typename(self):\n return '%s%s' % ('const ' if self.is_const else '',\n dtype_to_cstr(self.dtype))\n\n @property\n def _C_typedata(self):\n return dtype_to_cstr(self.dtype)\n\n @property\n def _C_ctype(self):\n return dtype_to_ctype(self.dtype)\n\n def _subs(self, old, new, **hints):\n \"\"\"\n This stub allows sympy.Basic.subs to operate on an expression\n involving devito Scalars. Ordinarily the comparisons between\n devito subclasses of sympy types are quite strict.\n \"\"\"\n try:\n if old.name == self.name:\n return new\n except AttributeError:\n pass\n\n return self\n\n # Pickling support\n _pickle_args = []\n _pickle_kwargs = ['name', 'dtype', 'is_const']\n __reduce_ex__ = Pickable.__reduce_ex__\n\n\nclass Symbol(AbstractSymbol, Cached):\n\n \"\"\"\n A scalar symbol, cached by both Devito and SymPy, which does not carry\n any data.\n\n Notes\n -----\n A Symbol may not be in the SymPy cache, but still be present in the\n Devito cache. This is because SymPy caches operations, rather than\n actual objects.\n \"\"\"\n\n @classmethod\n def _cache_key(cls, *args, **kwargs):\n args = list(args)\n key = {}\n\n # The base type is necessary, otherwise two objects such as\n # `Scalar(name='s')` and `Dimension(name='s')` would have the same key\n key['cls'] = cls\n\n # The name is always present, and added as if it were an arg\n key['name'] = kwargs.pop('name', None) or args.pop(0)\n\n # From the args\n key['args'] = tuple(args)\n\n # From the kwargs\n key.update(kwargs)\n\n return frozendict(key)\n\n def __new__(cls, *args, **kwargs):\n key = cls._cache_key(*args, **kwargs)\n obj = cls._cache_get(key)\n\n if obj is not None:\n return obj\n\n # Not in cache. Create a new Symbol via sympy.Symbol\n name = kwargs.get('name') or args[0]\n assumptions, kwargs = cls._filter_assumptions(**kwargs)\n\n # Note: use __xnew__ to bypass sympy caching\n newobj = sympy.Symbol.__xnew__(cls, name, **assumptions)\n\n # Initialization\n newobj._dtype = cls.__dtype_setup__(**kwargs)\n newobj.__init_finalize__(*args, **kwargs)\n\n # Store new instance in symbol cache\n Cached.__init__(newobj, key)\n\n return newobj\n\n __hash__ = Cached.__hash__\n\n\nclass DataSymbol(AbstractSymbol, Cached):\n\n \"\"\"\n A scalar symbol, cached by both Devito and SymPy, which carries data.\n \"\"\"\n\n @classmethod\n def _cache_key(cls, *args, **kwargs):\n return cls\n\n def __new__(cls, *args, **kwargs):\n key = cls._cache_key(*args, **kwargs)\n obj = cls._cache_get(key)\n\n if obj is not None:\n return obj\n\n # Not in cache. Create a new Symbol via sympy.Symbol\n name = kwargs.get('name') or args[0]\n assumptions, kwargs = cls._filter_assumptions(**kwargs)\n\n # Create new, unique type instance from cls and the symbol name\n newcls = type(name, (cls,), dict(cls.__dict__))\n\n # Create the new Symbol and invoke __init__\n newobj = sympy.Symbol.__new__(newcls, name, **assumptions)\n\n # Initialization\n newobj._dtype = cls.__dtype_setup__(**kwargs)\n newobj.__init_finalize__(*args, **kwargs)\n\n # Store new instance in symbol cache\n Cached.__init__(newobj, newcls)\n\n return newobj\n\n __hash__ = Cached.__hash__\n\n # Pickling support\n\n @property\n def _pickle_reconstruct(self):\n return self.__class__.__base__\n\n\nclass Scalar(Symbol, ArgProvider):\n\n \"\"\"\n Like a Symbol, but in addition it can pass runtime values to an Operator.\n\n Parameters\n ----------\n name : str\n Name of the symbol.\n dtype : data-type, optional\n Any object that can be interpreted as a numpy data type. Defaults\n to ``np.float32``.\n is_const : bool, optional\n True if the symbol value cannot be modified within an Operator,\n False otherwise. Defaults to False.\n **assumptions\n Any SymPy assumptions, such as ``nonnegative=True``. Refer to the\n SymPy documentation for more information.\n \"\"\"\n\n is_Scalar = True\n\n @classmethod\n def __dtype_setup__(cls, **kwargs):\n return kwargs.get('dtype', np.float32)\n\n\nclass AbstractTensor(sympy.ImmutableDenseMatrix, Basic, Pickable, Evaluable):\n \"\"\"\n Base class for vector and tensor valued functions. It inherits from and\n mimicks the behavior of a sympy.ImmutableDenseMatrix.\n\n\n The sub-hierachy is as follows\n\n AbstractTensor\n |\n TensorFunction\n |\n ---------------------------------\n | |\n VectorFunction TensorTimeFunction\n \\-------\\ |\n \\------- VectorTimeFunction\n\n There are four relevant AbstractTensor sub-types: ::\n\n * TensorFunction: A space-varying tensor valued function.\n * VectorFunction: A space-varying vector valued function.\n * TensorTimeFunction: A time-space-varying tensor valued function.\n * VectorTimeFunction: A time-space-varying vector valued function.\n \"\"\"\n\n # Sympy attributes\n is_MatrixLike = True\n is_Matrix = True\n\n # Devito attributes\n is_AbstractTensor = True\n is_TensorValued = True\n is_VectorValued = False\n\n @classmethod\n def _new(cls, *args, **kwargs):\n if args:\n try:\n # Constructor if input is (rows, cols, lambda)\n newobj = super(AbstractTensor, cls)._new(*args)\n except ValueError:\n # Constructor if input is list of list as (row, cols, list_of_list)\n # doesn't work as it expects a flattened.\n newobj = super(AbstractTensor, cls)._new(args[2])\n\n # Filter grid and dimensions\n grids = {getattr(c, 'grid', None) for c in newobj._mat} - {None}\n dimensions = {d for c in newobj._mat\n for d in getattr(c, 'dimensions', ())} - {None}\n # If none of the components are devito objects, returns a sympy Matrix\n if len(grids) == 0 and len(dimensions) == 0:\n return sympy.ImmutableDenseMatrix(*args)\n elif len(grids) > 0:\n dimensions = None\n assert len(grids) == 1\n grid = grids.pop()\n else:\n grid = None\n dimensions = tuple(dimensions)\n\n # Initialized with constructed object\n newobj.__init_finalize__(newobj.rows, newobj.cols, newobj._mat,\n grid=grid, dimensions=dimensions)\n else:\n # Initialize components and create new Matrix from standard\n # Devito inputs\n comps = cls.__subfunc_setup__(*args, **kwargs)\n newobj = super(AbstractTensor, cls)._new(comps)\n newobj.__init_finalize__(*args, **kwargs)\n\n return newobj\n\n def __init_finalize__(self, *args, **kwargs):\n pass\n\n __hash__ = sympy.ImmutableDenseMatrix.__hash__\n\n def doit(self, **hint):\n return self\n\n def _eval_matrix_mul(self, other):\n \"\"\"\n Copy paste from sympy to avoid explicit call to sympy.Add\n TODO: fix inside sympy\n \"\"\"\n other_len = other.rows*other.cols\n new_len = self.rows*other.cols\n new_mat = [self.zero]*new_len\n\n # If we multiply an n x 0 with a 0 x m, the\n # expected behavior is to produce an n x m matrix of zeros\n if self.cols != 0 and other.rows != 0:\n self_cols = self.cols\n mat = self._mat\n other_mat = other._mat\n for i in range(new_len):\n row, col = i // other.cols, i % other.cols\n row_indices = range(self_cols*row, self_cols*(row+1))\n col_indices = range(col, other_len, other.cols)\n vec = [mat[a]*other_mat[b] for a, b in zip(row_indices, col_indices)]\n new_mat[i] = sum(vec)\n\n # Get new class and return product\n newcls = self.classof_prod(other, new_mat)\n return newcls._new(self.rows, other.cols, new_mat, copy=False)\n\n @classmethod\n def __subfunc_setup__(cls, *args, **kwargs):\n \"\"\"Setup each component of the tensor as a Devito type.\"\"\"\n return []\n\n\nclass AbstractFunction(sympy.Function, Basic, Cached, Pickable, Evaluable):\n \"\"\"\n Base class for tensor symbols, cached by both SymPy and Devito. It inherits\n from and mimicks the behaviour of a sympy.Function.\n\n The hierarchy is structured as follows\n\n AbstractFunction\n |\n ---------------------------------\n | |\n DiscreteFunction Array\n |\n ----------------------------------------\n | |\n | AbstractSparseFunction\n | |\n | -----------------------------------------------------\n | | | |\n Function SparseFunction AbstractSparseTimeFunction PrecomputedSparseFunction\n | | | |\n | | ------------------------------------ --------\n | | | | |\n TimeFunction SparseTimeFunction PrecomputedSparseTimeFunction\n\n There are five relevant AbstractFunction sub-types: ::\n\n * Array: A function that does not carry data.\n * Function: A space-varying discrete function, which carries user data.\n * TimeFunction: A time- and space-varying discrete function, which carries\n user data.\n * SparseFunction: A space-varying discrete function representing \"sparse\"\n points, i.e. points that are not aligned with the\n computational grid.\n * SparseTimeFunction: A time- and space-varying function representing \"sparse\"\n points, i.e. points that are not aligned with the\n computational grid.\n * PrecomputedSparseFunction: A SparseFunction that uses a custom interpolation\n scheme, instead of linear interpolators.\n * PrecomputedSparseTimeFunction: A SparseTimeFunction that uses a custom\n interpolation scheme, instead of linear\n interpolators.\n\n \"\"\"\n # Sympy attributes, explicitly say these are not Matrices\n is_MatrixLike = False\n is_Matrix = False\n\n is_AbstractFunction = True\n\n # SymPy default assumptions\n is_real = True\n is_imaginary = False\n is_commutative = True\n\n @classmethod\n def _cache_key(cls, *args, **kwargs):\n return cls, args\n\n def __new__(cls, *args, **kwargs):\n options = kwargs.get('options', {'evaluate': False})\n\n # Is the object already in cache (e.g., f(x), f(x+1)) ?\n key = cls._cache_key(*args, **kwargs)\n obj = cls._cache_get(key)\n if obj is not None:\n return obj\n\n # Does the base object exist at least (e.g. f(x))?\n obj = cls._cache_get(cls)\n if obj is not None:\n newobj = sympy.Function.__new__(cls, *args, **options)\n newobj.__init_cached__(cls)\n Cached.__init__(newobj, key)\n return newobj\n\n # Preprocess arguments\n args, kwargs = cls.__args_setup__(*args, **kwargs)\n\n # Not in cache. Create a new Function via sympy.Function\n name = kwargs.get('name')\n dimensions, indices = cls.__indices_setup__(**kwargs)\n\n # Create new, unique type instance from cls and the symbol name\n newcls = type(name, (cls,), dict(cls.__dict__))\n\n # Create the new Function object and invoke __init__\n newobj = sympy.Function.__new__(newcls, *indices, **options)\n\n # Initialization. The following attributes must be available\n # when executing __init_finalize__\n newobj._name = name\n newobj._dimensions = dimensions\n newobj._shape = cls.__shape_setup__(**kwargs)\n newobj._dtype = cls.__dtype_setup__(**kwargs)\n newobj.__init_finalize__(*args, **kwargs)\n\n # All objects cached on the AbstractFunction `newobj` keep a reference\n # to `newobj` through the `function` field. Thus, all indexified\n # object will point to `newobj`, the \"actual Function\".\n newobj.function = newobj\n\n # Store new instance in symbol cache\n key = (newcls, indices)\n Cached.__init__(newobj, key, newcls)\n\n return newobj\n\n def __init__(self, *args, **kwargs):\n # no-op, the true init is performed by __init_finalize__\n pass\n\n def __init_finalize__(self, *args, **kwargs):\n # Setup halo and padding regions\n self._is_halo_dirty = False\n self._halo = self.__halo_setup__(**kwargs)\n self._padding = self.__padding_setup__(**kwargs)\n\n __hash__ = Cached.__hash__\n\n @classmethod\n def __args_setup__(cls, *args, **kwargs):\n \"\"\"\n Preprocess *args and **kwargs before object initialization.\n\n Notes\n -----\n This stub is invoked only if a look up in the cache fails.\n \"\"\"\n return args, kwargs\n\n @classmethod\n def __indices_setup__(cls, **kwargs):\n \"\"\"Extract the object indices from ``kwargs``.\"\"\"\n return (), ()\n\n @classmethod\n def __shape_setup__(cls, **kwargs):\n \"\"\"Extract the object shape from ``kwargs``.\"\"\"\n return ()\n\n @classmethod\n def __dtype_setup__(cls, **kwargs):\n \"\"\"Extract the object data type from ``kwargs``.\"\"\"\n return None\n\n def __halo_setup__(self, **kwargs):\n return tuple(kwargs.get('halo', [(0, 0) for i in range(self.ndim)]))\n\n def __padding_setup__(self, **kwargs):\n return tuple(kwargs.get('padding', [(0, 0) for i in range(self.ndim)]))\n\n @cached_property\n def _honors_autopadding(self):\n \"\"\"\n True if the actual padding is greater or equal than whatever autopadding\n would produce, False otherwise.\n \"\"\"\n autopadding = self.__padding_setup__(autopadding=True)\n return all(l0 >= l1 and r0 >= r1\n for (l0, r0), (l1, r1) in zip(self.padding, autopadding))\n\n @property\n def name(self):\n \"\"\"The name of the object.\"\"\"\n return self._name\n\n @property\n def indices(self):\n \"\"\"The indices (aka dimensions) of the object.\"\"\"\n return self.args\n\n @property\n def indices_ref(self):\n \"\"\"The reference indices of the object (indices at first creation).\"\"\"\n return DimensionTuple(*self.function.indices, getters=self.dimensions)\n\n @property\n def origin(self):\n \"\"\"\n Origin of the AbstractFunction in term of Dimension\n f(x) : origin = 0\n f(x + hx/2) : origin = hx/2\n \"\"\"\n return tuple(r - d for d, r in zip(self.dimensions, self.indices_ref))\n\n @property\n def dimensions(self):\n \"\"\"Tuple of Dimensions representing the object indices.\"\"\"\n return self._dimensions\n\n @property\n def _eval_deriv(self):\n return self\n\n @property\n def _is_on_grid(self):\n \"\"\"\n Check whether the object is on the grid and requires averaging.\n For example, if the original non-staggered function is f(x)\n then f(x) is on the grid and f(x + h_x/2) is off the grid.\n \"\"\"\n return self._check_indices(inds=self.indices)\n\n @memoized_meth\n def _check_indices(self, inds=None):\n \"\"\"\n Check if the function indices are aligned with the dimensions.\n \"\"\"\n inds = inds or self.indices\n return all([aligned_indices(i, j, d.spacing) for i, j, d in\n zip(inds, self.indices_ref, self.dimensions)])\n\n @property\n def evaluate(self):\n # Average values if at a location not on the Function's grid\n if self._is_on_grid:\n return self\n weight = 1.0\n avg_list = [self]\n is_averaged = False\n for i, ir, d in zip(self.indices, self.indices_ref, self.dimensions):\n off = (i - ir)/d.spacing\n if not isinstance(off, sympy.Number) or int(off) == off:\n pass\n else:\n weight *= 1/2\n is_averaged = True\n avg_list = [(a.xreplace({i: i - d.spacing/2}) +\n a.xreplace({i: i + d.spacing/2})) for a in avg_list]\n\n if not is_averaged:\n return self\n return weight * sum(avg_list)\n\n @property\n def shape(self):\n \"\"\"The shape of the object.\"\"\"\n return self._shape\n\n @property\n def dtype(self):\n \"\"\"The data type of the object.\"\"\"\n return self._dtype\n\n @property\n def ndim(self):\n \"\"\"The rank of the object.\"\"\"\n return len(self.indices)\n\n @property\n def symbolic_shape(self):\n \"\"\"\n The symbolic shape of the object. This includes the domain, halo, and\n padding regions. While halo and padding are known quantities (integers),\n the domain size is given as a symbol.\n \"\"\"\n halo = [sympy.Add(*i, evaluate=False) for i in self._size_halo]\n padding = [sympy.Add(*i, evaluate=False) for i in self._size_padding]\n domain = [i.symbolic_size for i in self.dimensions]\n ret = tuple(sympy.Add(i, j, k, evaluate=False)\n for i, j, k in zip(domain, halo, padding))\n return DimensionTuple(*ret, getters=self.dimensions)\n\n @cached_property\n def indexed(self):\n \"\"\"The wrapped IndexedData object.\"\"\"\n return IndexedData(self.name, shape=self.shape, function=self.function)\n\n @property\n def _mem_external(self):\n \"\"\"\n True if the associated data was/is/will be allocated directly\n from Python (e.g., via NumPy arrays), False otherwise.\n \"\"\"\n return False\n\n @property\n def _mem_stack(self):\n \"\"\"\n True if the associated data should be allocated on the stack, False otherwise.\n \"\"\"\n return False\n\n @property\n def _mem_heap(self):\n \"\"\"\n True if the associated data was/is/will be allocated on the heap,\n False otherwise.\n \"\"\"\n return False\n\n @property\n def size(self):\n \"\"\"\n The number of elements this object is expected to store in memory.\n Note that this would need to be combined with self.dtype to give the actual\n size in bytes.\n \"\"\"\n return reduce(mul, self.shape)\n\n @property\n def halo(self):\n return self._halo\n\n @property\n def padding(self):\n return self._padding\n\n @property\n def is_const(self):\n return False\n\n @property\n def _C_name(self):\n return \"%s_vec\" % self.name\n\n @property\n def _C_typedata(self):\n return dtype_to_cstr(self.dtype)\n\n @cached_property\n def _size_domain(self):\n \"\"\"Number of points in the domain region.\"\"\"\n return DimensionTuple(*self.shape, getters=self.dimensions)\n\n @cached_property\n def _size_halo(self):\n \"\"\"Number of points in the halo region.\"\"\"\n left = tuple(zip(*self._halo))[0]\n right = tuple(zip(*self._halo))[1]\n\n sizes = tuple(Size(i, j) for i, j in self._halo)\n\n return DimensionTuple(*sizes, getters=self.dimensions, left=left, right=right)\n\n @cached_property\n def _size_owned(self):\n \"\"\"Number of points in the owned region.\"\"\"\n left = tuple(self._size_halo.right)\n right = tuple(self._size_halo.left)\n\n sizes = tuple(Size(i.right, i.left) for i in self._size_halo)\n\n return DimensionTuple(*sizes, getters=self.dimensions, left=left, right=right)\n\n @cached_property\n def _size_padding(self):\n \"\"\"Number of points in the padding region.\"\"\"\n left = tuple(zip(*self._padding))[0]\n right = tuple(zip(*self._padding))[1]\n\n sizes = tuple(Size(i, j) for i, j in self._padding)\n\n return DimensionTuple(*sizes, getters=self.dimensions, left=left, right=right)\n\n @cached_property\n def _size_nopad(self):\n \"\"\"Number of points in the domain+halo region.\"\"\"\n sizes = tuple(i+sum(j) for i, j in zip(self._size_domain, self._size_halo))\n return DimensionTuple(*sizes, getters=self.dimensions)\n\n @cached_property\n def _size_nodomain(self):\n \"\"\"Number of points in the padding+halo region.\"\"\"\n left = tuple(i for i, _ in np.add(self._halo, self._padding))\n right = tuple(i for _, i in np.add(self._halo, self._padding))\n\n sizes = tuple(Size(i, j) for i, j in np.add(self._halo, self._padding))\n\n return DimensionTuple(*sizes, getters=self.dimensions, left=left, right=right)\n\n @cached_property\n def _offset_domain(self):\n \"\"\"Number of points before the first domain element.\"\"\"\n offsets = tuple(np.add(self._size_padding.left, self._size_halo.left))\n return DimensionTuple(*offsets, getters=self.dimensions)\n\n @cached_property\n def _offset_halo(self):\n \"\"\"Number of points before the first and last halo elements.\"\"\"\n left = tuple(self._size_padding.left)\n right = tuple(np.add(np.add(left, self._size_halo.left), self._size_domain))\n\n offsets = tuple(Offset(i, j) for i, j in zip(left, right))\n\n return DimensionTuple(*offsets, getters=self.dimensions, left=left, right=right)\n\n @cached_property\n def _offset_owned(self):\n \"\"\"Number of points before the first and last owned elements.\"\"\"\n left = tuple(self._offset_domain)\n right = tuple(np.add(self._offset_halo.left, self._size_domain))\n\n offsets = tuple(Offset(i, j) for i, j in zip(left, right))\n\n return DimensionTuple(*offsets, getters=self.dimensions, left=left, right=right)\n\n @property\n def _data_alignment(self):\n \"\"\"\n The base virtual address of the data carried by the object is a multiple\n of the alignment.\n \"\"\"\n return default_allocator().guaranteed_alignment\n\n def indexify(self, indices=None, lshift=False, subs=None):\n \"\"\"Create a types.Indexed from the current object.\"\"\"\n if indices is not None:\n return Indexed(self.indexed, *indices)\n\n # Substitution for each index (spacing only used in own dimension)\n subs = subs or {}\n subs = [{**{d.spacing: 1, -d.spacing: -1}, **subs} for d in self.dimensions]\n\n # Add halo shift\n shift = self._size_nodomain.left if lshift else tuple([0]*len(self.dimensions))\n # Indices after substitutions\n indices = [sympy.sympify((a - o + f).xreplace(s)) for a, o, f, s in\n zip(self.args, self.origin, shift, subs)]\n indices = [i.xreplace({k: sympy.Integer(k) for k in i.atoms(sympy.Float)})\n for i in indices]\n return self.indexed[indices]\n\n def __getitem__(self, index):\n \"\"\"Shortcut for ``self.indexed[index]``.\"\"\"\n return self.indexed[index]\n\n # Pickling support\n _pickle_kwargs = ['name', 'dtype', 'halo', 'padding']\n __reduce_ex__ = Pickable.__reduce_ex__\n\n @property\n def _pickle_reconstruct(self):\n return self.__class__.__base__\n\n\n# Objects belonging to the Devito API not involving data, such as data structures\n# that need to be passed to external libraries\n\n\nclass AbstractObject(Basic, sympy.Basic, Pickable):\n\n \"\"\"\n Symbol representing a generic pointer object.\n \"\"\"\n\n is_AbstractObject = True\n\n def __new__(cls, *args, **kwargs):\n obj = sympy.Basic.__new__(cls)\n obj.__init__(*args, **kwargs)\n return obj\n\n def __init__(self, name, dtype):\n self.name = name\n self.dtype = dtype\n\n def __repr__(self):\n return self.name\n\n __str__ = __repr__\n\n def _hashable_content(self):\n return (self.name, self.dtype)\n\n @property\n def free_symbols(self):\n return {self}\n\n @property\n def _C_name(self):\n return self.name\n\n @property\n def _C_typename(self):\n return ctypes_to_cstr(self.dtype)\n\n @property\n def _C_ctype(self):\n return self.dtype\n\n @property\n def function(self):\n return self\n\n # Pickling support\n _pickle_args = ['name', 'dtype']\n __reduce_ex__ = Pickable.__reduce_ex__\n\n\nclass Object(AbstractObject, ArgProvider):\n\n \"\"\"\n Symbol representing a generic pointer object, provided by an outer scope.\n \"\"\"\n\n is_Object = True\n\n def __init__(self, name, dtype, value=None):\n super(Object, self).__init__(name, dtype)\n self.value = value\n\n @property\n def _arg_names(self):\n return (self.name,)\n\n def _arg_defaults(self):\n if callable(self.value):\n return {self.name: self.value()}\n else:\n return {self.name: self.value}\n\n def _arg_values(self, args=None, **kwargs):\n \"\"\"\n Produce runtime values for this Object after evaluating user input.\n\n Parameters\n ----------\n args : dict, optional\n Known argument values.\n **kwargs\n Dictionary of user-provided argument overrides.\n \"\"\"\n if self.name in kwargs:\n return {self.name: kwargs.pop(self.name)}\n else:\n return self._arg_defaults()\n\n\nclass CompositeObject(Object):\n\n \"\"\"\n Symbol representing a pointer to a composite type (e.g., a C struct),\n provided by an outer scope.\n \"\"\"\n\n _dtype_cache = {}\n\n @classmethod\n def _generate_unique_dtype(cls, pname, pfields):\n dtype = POINTER(type(pname, (Structure,), {'_fields_': pfields}))\n key = (pname, tuple(pfields))\n return cls._dtype_cache.setdefault(key, dtype)\n\n def __init__(self, name, pname, pfields, value=None):\n dtype = CompositeObject._generate_unique_dtype(pname, pfields)\n value = self.__value_setup__(dtype, value)\n super(CompositeObject, self).__init__(name, dtype, value)\n\n def __value_setup__(self, dtype, value):\n return value or byref(dtype._type_())\n\n @property\n def pfields(self):\n return tuple(self.dtype._type_._fields_)\n\n @property\n def pname(self):\n return self.dtype._type_.__name__\n\n @property\n def fields(self):\n return [i for i, _ in self.pfields]\n\n def _hashable_content(self):\n return (self.name, self.pfields)\n\n @cached_property\n def _C_typedecl(self):\n return Struct(self.pname, [Value(ctypes_to_cstr(j), i) for i, j in self.pfields])\n\n # Pickling support\n _pickle_args = ['name', 'pname', 'pfields']\n _pickle_kwargs = []\n\n\nclass LocalObject(AbstractObject):\n\n \"\"\"\n Symbol representing a generic pointer object, defined in the local scope.\n \"\"\"\n\n is_LocalObject = True\n\n\n# Extended SymPy hierarchy follows, for essentially two reasons:\n# - To keep track of `function`\n# - To override SymPy caching behaviour\n\n\nclass IndexedData(sympy.IndexedBase, Pickable):\n\n \"\"\"\n Wrapper class that inserts a pointer to the symbolic data object.\n \"\"\"\n\n def __new__(cls, label, shape=None, function=None):\n # Make sure `label` is a devito.Symbol, not a sympy.Symbol\n if isinstance(label, str):\n label = Symbol(name=label, dtype=function.dtype)\n obj = sympy.IndexedBase.__new__(cls, label, shape)\n obj.function = function\n return obj\n\n def func(self, *args):\n obj = super(IndexedData, self).func(*args)\n obj.function = self.function\n return obj\n\n def __getitem__(self, indices, **kwargs):\n \"\"\"Produce a types.Indexed, rather than a sympy.Indexed.\"\"\"\n indexed = super(IndexedData, self).__getitem__(indices, **kwargs)\n return Indexed(*indexed.args)\n\n # Pickling support\n _pickle_kwargs = ['label', 'shape', 'function']\n __reduce_ex__ = Pickable.__reduce_ex__\n\n\nclass Indexed(sympy.Indexed):\n\n # The two type flags have changed in upstream sympy as of version 1.1,\n # but the below interpretation is used throughout the compiler to\n # identify Indexed objects. With the sympy-1.1 changes a new flag\n # obj.is_Indexed was introduced which should be preferred, but the\n # required changes are cumbersome and many...\n is_Symbol = False\n is_Atom = False\n\n is_Dimension = False\n\n @memoized_meth\n def __str__(self):\n return super().__str__()\n\n def _hashable_content(self):\n return super(Indexed, self)._hashable_content() + (self.base.function,)\n\n @property\n def function(self):\n return self.base.function\n\n @property\n def dtype(self):\n return self.function.dtype\n\n @property\n def name(self):\n return self.function.name\n\n @property\n def origin(self):\n return self.function.origin\n\n @cached_property\n def free_symbols(self):\n # Make it cached, since it's relatively expensive and called often\n ret = super(Indexed, self).free_symbols\n # Get rid of the IndexedBase label this Indexed stems from\n # as in Devito we can't have it floating around in Eq's\n ret.discard(self.base.label)\n return ret\n\n def compare(self, other):\n \"\"\"\n Override `sympy.Basic.compare` to honor Devito's canonical ordering\n of arguments.\n In SymPy:\n\n f[x+1] < f[x+2] < ... < f[x+9] < f[x]\n\n While in Devito we pretend\n\n f[x] < f[x+1] < f[x+2] < ... < f[x+9]\n\n That is the arguments need to be ordered monothonically based on the indices\n so that the symbolic trees of two derivative expressions can be compared\n argument-wise.\n \"\"\"\n if (self.__class__ != other.__class__) or (self.function is not other.function):\n return super().compare(other)\n for l, r in zip(self.indices, other.indices):\n try:\n c = int(sympy.sign(l - r))\n except TypeError:\n # E.g., `l=x+1` and `r=y` or `r=sqrt(x)`\n c = l.compare(r)\n if c:\n return c\n return 0\n" ]
[ [ "numpy.add" ] ]
yu-frank/PerspectiveCropLayers
[ "ae0580cb7b3b41c21965cd32e280d2af0e8cf2c3" ]
[ "src/dataset_3dhp.py" ]
[ "import re\nfrom glob import iglob\nfrom os import path\nfrom torchvision import transforms\n\nimport h5py\nimport numpy as np\nimport torch\nfrom PIL import Image, ImageOps\nfrom pose3d_utils.coords import homogeneous_to_cartesian, ensure_homogeneous\nfrom torchvision.transforms import RandomCrop, RandomHorizontalFlip\n\nfrom margipose.data import PoseDataset, collate\nfrom margipose.data.mpi_inf_3dhp.common import Annotations, parse_camera_calibration, Constants, \\\n MpiInf3dhpSkeletonDesc\nfrom margipose.data.skeleton import CanonicalSkeletonDesc, VNect_Common_Skeleton\nfrom margipose.data_specs import DataSpecs, ImageSpecs, JointsSpecs\nfrom margipose.eval import prepare_for_3d_evaluation, gather_3d_metrics\n\nimport utils\nimport pcl\nimport pcl_util\nimport constants\n\n# Load in Constants (Mean and Stds)\nmpi_3d_Mean = constants.mpi_3d_Mean\nmpi_3d_Std = constants.mpi_3d_Std\nmpi_2d_pcl_slant_mean = constants.mpi_2d_pcl_slant_mean\nmpi_2d_pcl_slant_std = constants.mpi_2d_pcl_slant_std\nmpi_2d_pcl_3dscale_mean = constants.mpi_2d_pcl_3dscale_mean\nmpi_2d_pcl_3dscale_std = constants.mpi_2d_pcl_3dscale_std\nmpi_2d_stn_slant_mean = constants.mpi_2d_stn_slant_mean\nmpi_2d_stn_slant_std = constants.mpi_2d_stn_slant_std\nmpi_2d_stn_3dscale_mean = constants.mpi_2d_stn_3dscale_mean\nmpi_2d_stn_3dscale_std = constants.mpi_2d_stn_3dscale_std\n\n\ndef pcl_preprocess(batch_size, num_joints, canon_label_2d_with_hip, orig_img_shape, Ks_px_orig, location, scale, \\\n normalize=True, use_slant_compensation=False):\n\n canon_virt_2d, R_virt2orig, P_virt2orig = pcl.pcl_transforms_2d(canon_label_2d_with_hip, location, scale, Ks_px_orig,\\\n focal_at_image_plane=True, slant_compensation=use_slant_compensation)\n model_input = canon_virt_2d.clone()\n\n if normalize:\n if use_slant_compensation:\n model_input = utils.batch_normalize_canon_pcl_human_joints(model_input, mpi_2d_pcl_slant_mean, mpi_2d_pcl_slant_std)\n else:\n model_input = utils.batch_normalize_canon_pcl_human_joints(model_input, mpi_2d_pcl_3dscale_mean, mpi_2d_pcl_3dscale_std)\n\n model_input = model_input.view(batch_size, -1)\n\n return {'model_input':model_input, 'canon_virt_2d':canon_virt_2d, 'R_virt2orig':R_virt2orig, 'P_virt2orig':P_virt2orig}\n\nclass FrameRef:\n def __init__(self, subject_id, sequence_id, camera_id, frame_index, activity_id=None):\n self.subject_id = subject_id\n self.sequence_id = sequence_id\n self.camera_id = camera_id\n self.frame_index = frame_index\n self.activity_id = activity_id\n\n @property\n def image_file(self):\n return 'S{}/Seq{}/imageSequence/video_{}/img_{:06d}.jpg'.format(\n self.subject_id, self.sequence_id, self.camera_id, self.frame_index + 1\n )\n\n @property\n def bg_mask_file(self):\n return 'S{}/Seq{}/foreground_mask/video_{}/img_{:06d}.png'.format(\n self.subject_id, self.sequence_id, self.camera_id, self.frame_index + 1\n )\n\n @property\n def ub_mask_file(self):\n return 'S{}/Seq{}/up_body_mask/video_{}/img_{:06d}.png'.format(\n self.subject_id, self.sequence_id, self.camera_id, self.frame_index + 1\n )\n\n @property\n def lb_mask_file(self):\n return 'S{}/Seq{}/low_body_mask/video_{}/img_{:06d}.png'.format(\n self.subject_id, self.sequence_id, self.camera_id, self.frame_index + 1\n )\n\n @property\n def annot_file(self):\n return 'S{}/Seq{}/annot.mat'.format(self.subject_id, self.sequence_id)\n\n @property\n def camera_file(self):\n return 'S{}/Seq{}/camera.calibration'.format(self.subject_id, self.sequence_id)\n\n @property\n def metadata_file(self):\n return 'S{}/Seq{}/metadata.h5'.format(self.subject_id, self.sequence_id)\n\n @property\n def bg_augmentable(self):\n seq_path = 'S{}/Seq{}'.format(self.subject_id, self.sequence_id)\n return Constants['seq_info'][seq_path]['bg_augmentable'] == 1\n\n @property\n def ub_augmentable(self):\n seq_path = 'S{}/Seq{}'.format(self.subject_id, self.sequence_id)\n return Constants['seq_info'][seq_path]['ub_augmentable'] == 1\n\n @property\n def lb_augmentable(self):\n seq_path = 'S{}/Seq{}'.format(self.subject_id, self.sequence_id)\n return Constants['seq_info'][seq_path]['lb_augmentable'] == 1\n\n def to_dict(self):\n return {\n 'subject_id': self.subject_id,\n 'sequence_id': self.sequence_id,\n 'camera_id': self.camera_id,\n 'frame_index': self.frame_index,\n 'activity_id': self.activity_id,\n }\n\n\ndef random_texture():\n files = list(iglob('resources/textures/*.png'))\n file = files[np.random.randint(0, len(files))]\n texture = Image.open(file).convert('L')\n texture = ImageOps.colorize(\n texture,\n 'black',\n (np.random.randint(50, 256), np.random.randint(50, 256), np.random.randint(50, 256))\n )\n return texture\n\n\ndef augment_clothing(img, mask, texture):\n a = np.array(img)\n grey = a.mean(axis=-1)\n blackness = (255 - grey).clip(min=0) / 255\n\n texture = np.array(texture, dtype=np.float)\n texture -= blackness[..., np.newaxis] * texture\n texture = Image.fromarray(texture.round().astype(np.uint8))\n\n return Image.composite(texture, img, mask)\n\n\ndef random_background():\n files = list(iglob('resources/backgrounds/*.jpg'))\n file = files[np.random.randint(0, len(files))]\n bg = Image.open(file)\n bg = RandomHorizontalFlip()(RandomCrop(768)(bg))\n return bg\n\n\ndef augment_background(img, mask, bg):\n return Image.composite(img, bg, mask)\n\n\nclass MpiInf3dDataset(PoseDataset):\n preserve_root_joint_at_univ_scale = False\n\n def __init__(self, data_dir, data_specs=None, use_aug=False, disable_mask_aug=False, \\\n without_image=True, human_height=2000, focal_diff=0, use_pcl=True, calculate_scale_from_2d=True, use_slant_compensation=True):\n if data_specs is None:\n data_specs = DataSpecs(\n ImageSpecs(128, mean=ImageSpecs.IMAGENET_MEAN, stddev=ImageSpecs.IMAGENET_STDDEV),\n JointsSpecs(MpiInf3dhpSkeletonDesc, n_dims=3),\n )\n\n super().__init__(data_specs)\n\n \"\"\"NEW\"\"\"\n self.human_height = human_height\n self.focal_diff = focal_diff\n self.use_pcl = use_pcl\n self.calculate_scale_from_2d = calculate_scale_from_2d\n self.use_slant_compensation = use_slant_compensation\n\n if not path.isdir(data_dir):\n raise NotADirectoryError(data_dir)\n\n metadata_files = sorted(iglob(path.join(data_dir, 'S*', 'Seq*', 'metadata.h5')))\n frame_refs = []\n univ_scale_factors = {}\n\n for metadata_file in metadata_files:\n # match = re.match(r'.*S(\\d+)/Seq(\\d+)/metadata.h5', metadata_file)\n match = re.match(r'.*S(\\d+)\\\\Seq(\\d+)\\\\metadata.h5', metadata_file)\n subject_id = int(match.group(1))\n sequence_id = int(match.group(2))\n\n activity_ids = None\n mat_annot_file = path.join(path.dirname(metadata_file), 'annot_data.mat')\n if path.isfile(mat_annot_file):\n with h5py.File(mat_annot_file, 'r') as f:\n activity_ids = f['activity_annotation'][:].flatten().astype(int)\n\n with h5py.File(metadata_file, 'r') as f:\n keys = f['interesting_frames'].keys()\n for key in keys:\n camera_id = int(re.match(r'camera(\\d)', key).group(1))\n for frame_index in f['interesting_frames'][key]:\n activity_id = None\n if activity_ids is not None:\n activity_id = activity_ids[frame_index]\n frame_refs.append(FrameRef(subject_id, sequence_id, camera_id, frame_index, activity_id))\n univ_scale_factors[(subject_id, sequence_id)] = f['scale'][0]\n\n self.data_dir = data_dir\n self.use_aug = use_aug\n self.disable_mask_aug = disable_mask_aug\n self.frame_refs = frame_refs\n self.univ_scale_factors = univ_scale_factors\n self.without_image = without_image\n self.multicrop = False\n\n @staticmethod\n def _mpi_inf_3dhp_to_canonical_skeleton(skel):\n assert skel.size(-2) == MpiInf3dhpSkeletonDesc.n_joints\n\n canonical_joints = [\n MpiInf3dhpSkeletonDesc.joint_names.index(s)\n for s in CanonicalSkeletonDesc.joint_names\n ]\n size = list(skel.size())\n size[-2] = len(canonical_joints)\n canonical_joints_tensor = torch.LongTensor(canonical_joints).unsqueeze(-1).expand(size)\n return skel.gather(-2, canonical_joints_tensor)\n\n def to_canonical_skeleton(self, skel):\n if self.skeleton_desc.canonical:\n return skel\n\n return self._mpi_inf_3dhp_to_canonical_skeleton(skel)\n\n def _get_skeleton_3d(self, index):\n frame_ref = self.frame_refs[index]\n metadata_file = path.join(self.data_dir, frame_ref.metadata_file)\n with h5py.File(metadata_file, 'r') as f:\n # Load the pose joint locations\n original_skel = torch.from_numpy(\n f['joints3d'][frame_ref.camera_id, frame_ref.frame_index]\n )\n\n if original_skel.shape[-2] == MpiInf3dhpSkeletonDesc.n_joints:\n # The training/validation skeletons have 28 joints.\n skel_desc = MpiInf3dhpSkeletonDesc\n elif original_skel.shape[-2] == CanonicalSkeletonDesc.n_joints:\n # The test set skeletons have the 17 canonical joints only.\n skel_desc = CanonicalSkeletonDesc\n else:\n raise Exception('unexpected number of joints: ' + original_skel.shape[-2])\n\n if self.skeleton_desc.canonical:\n if skel_desc == MpiInf3dhpSkeletonDesc:\n original_skel = self._mpi_inf_3dhp_to_canonical_skeleton(original_skel)\n elif skel_desc == CanonicalSkeletonDesc:\n # No conversion necessary.\n pass\n else:\n raise Exception()\n skel_desc = CanonicalSkeletonDesc\n\n return original_skel, skel_desc\n\n def _to_univ_scale(self, skel_3d, skel_desc, univ_scale_factor):\n univ_skel_3d = skel_3d.clone()\n\n # Scale the skeleton to match the universal skeleton size\n if self.preserve_root_joint_at_univ_scale:\n # Scale the skeleton about the root joint position. This should give the same\n # joint position coordinates as the \"univ_annot3\" annotations.\n root = skel_3d[..., skel_desc.root_joint_id:skel_desc.root_joint_id+1, :]\n univ_skel_3d -= root\n univ_skel_3d /= univ_scale_factor\n univ_skel_3d += root\n else:\n # Scale the skeleton about the camera position. Useful for breaking depth/scale\n # ambiguity.\n univ_skel_3d /= univ_scale_factor\n\n return univ_skel_3d\n\n def _evaluate_3d(self, index, original_skel, norm_pred, camera_intrinsics, transform_opts):\n assert self.skeleton_desc.canonical, 'can only evaluate canonical skeletons'\n expected, actual = prepare_for_3d_evaluation(original_skel, norm_pred, self,\n camera_intrinsics, transform_opts,\n known_depth=False)\n included_joints = [\n CanonicalSkeletonDesc.joint_names.index(joint_name)\n for joint_name in VNect_Common_Skeleton\n ]\n return gather_3d_metrics(expected, actual, included_joints)\n\n def __len__(self):\n return len(self.frame_refs)\n\n def _build_sample(self, index, orig_camera, orig_image, orig_skel, transform_opts, transform_opts_big):\n frame_ref = self.frame_refs[index]\n # out_width = self.data_specs.input_specs.width\n # out_height = self.data_specs.input_specs.height\n if orig_skel.shape[0] != 17:\n canonical_original_skel = self._mpi_inf_3dhp_to_canonical_skeleton(ensure_homogeneous(orig_skel, d=3)).float()\n else:\n canonical_original_skel = ensure_homogeneous(orig_skel, d=3).float()\n\n ctx = self.create_transformer_context(transform_opts)\n _, img, _ = ctx.transform(image=orig_image)\n\n big_ctx = self.create_transformer_context(transform_opts_big)\n _, img_big, _ = big_ctx.transform(image=orig_image)\n\n\n sample = {\n 'index': index, # Index in the dataset\n\n 'original_skel': canonical_original_skel, \n\n 'camera_original': orig_camera.matrix[:,:-1].float(),\n 'original_img_shape': torch.FloatTensor(orig_image.size),\n }\n\n img_transform = transforms.Compose([transforms.ToTensor()])\n\n if img:\n sample['input'] = self.input_to_tensor(img)\n\n if img_big:\n sample['input_big'] = self.input_to_tensor(img_big)\n sample['input_big_img'] = img_transform(img_big)\n \n # Generate the GT location and Scale of Crop\n \"\"\"14 is the location of the hip in canonical skeleton!\"\"\"\n pelvis_joint = sample['original_skel'][14,:-1].unsqueeze(0) #because of legacy code in utils that take a list of centers\n all_joints = sample['original_skel'][:,:-1]\n sample['world_coord_skel_mm'] = all_joints\n relative_joints = all_joints - pelvis_joint\n\n sample['non_normalized_3d'] = relative_joints\n\n #Normalize the Joints!\n normalized_joints = utils.batch_normalize_canon_human_joints(relative_joints.unsqueeze(0), mpi_3d_Mean, mpi_3d_Std).squeeze(0)\n\n sample['normalized_skel_mm'] = normalized_joints\n sample['pelvis_location_mm'] = pelvis_joint\n\n Ks_px = sample['camera_original']\n \n K = Ks_px.clone()\n K[0,2] = 0.\n K[1,2] = 0.\n P_px = Ks_px.clone()\n\n pose_2d = utils.world_2_camera_coordinates(P_px, all_joints.float())\n sample['pose2d_original'] = pose_2d\n sample['perspective_matrix'] = P_px\n\n if self.focal_diff != 0:\n Ks_px[0,0] *= self.focal_diff\n Ks_px[1,1] *= self.focal_diff\n sample['camera_original'] = Ks_px\n\n \"\"\"generate_gt_scales_from2d\"\"\"\n if self.calculate_scale_from_2d:\n scale = utils.generate_gt_scales_from2d(pose_2d)\n square_scale = torch.tensor([torch.max(scale), torch.max(scale)])\n else:\n scale = utils.generate_gt_scales(K, self.human_height, pelvis_joint, sample['original_img_shape'][0], sample['original_img_shape'][1]) # 2000 is the height in mm\n square_scale = scale.clone()\n\n square_scale_py = square_scale / sample['original_img_shape']\n sample['stn_square_scale_py'] = square_scale_py\n\n location_2d3d = utils.generate_gt_location(P_px, pelvis_joint, sample['original_img_shape'][0], sample['original_img_shape'][1])\n sample['crop_location_2d3d'] = location_2d3d\n\n # Location that is centered in the middle of the 2D pose (NOTE: not the same as the location calculation in 2D->3D)\n location = torch.FloatTensor([(torch.max(pose_2d[:,0]) + torch.min(pose_2d[:,0]))/2, (torch.max(pose_2d[:,1]) + torch.min(pose_2d[:,1]))/2])\n\n sample['crop_scale'] = torch.FloatTensor(scale)\n sample['crop_location'] = torch.FloatTensor(location)\n\n return sample\n \n\n def _build_sample_without_image(self, index, orig_skel, orig_camera, img_wh):\n frame_ref = self.frame_refs[index]\n if orig_skel.shape[0] != 17:\n canonical_original_skel = self._mpi_inf_3dhp_to_canonical_skeleton(ensure_homogeneous(orig_skel, d=3)).float()\n else:\n canonical_original_skel = ensure_homogeneous(orig_skel, d=3).float()\n Ks_px_video_cam = orig_camera.matrix[:,:-1].float().unsqueeze(0) #originally was 2048 x 2048, need to resize to 768 x 768\n img_w_h_orig = torch.FloatTensor([2048, 2048]).unsqueeze(0)\n img_w_h_small = torch.FloatTensor([img_wh[0], img_wh[1]])\n Ks_px_image_cam = pcl_util.K_new_resolution_px(Ks_px_video_cam, img_w_h_orig, img_w_h_small).squeeze(0)\n sample = {\n 'index': index, # Index in the dataset\n\n 'original_skel': canonical_original_skel, \n\n # Transformed data\n 'camera_original': Ks_px_image_cam,\n 'original_img_shape': torch.FloatTensor(img_wh)\n\n }\n\n # Generate the GT location and Scale of Crop\n \"\"\"HIP IS Position 14 in \"\"\"\n pelvis_joint = sample['original_skel'][14,:-1].unsqueeze(0) #because of legacy code in utils that take a list of centers\n all_joints = sample['original_skel'][:,:-1]\n sample['world_coord_skel_mm'] = all_joints\n relative_joints = all_joints - pelvis_joint\n\n sample['non_normalized_3d'] = relative_joints\n\n #Normalize the Joints!\n normalized_joints = utils.batch_normalize_canon_human_joints(relative_joints.unsqueeze(0), mpi_3d_Mean, mpi_3d_Std).squeeze(0)\n\n sample['normalized_skel_mm'] = normalized_joints\n sample['pelvis_location_mm'] = pelvis_joint\n\n Ks_px = sample['camera_original']\n \n K = Ks_px.clone()\n K[0,2] = 0.\n K[1,2] = 0.\n P_px = Ks_px.clone()\n\n pose_2d = utils.world_2_camera_coordinates(P_px, all_joints.float())\n sample['pose2d_original'] = pose_2d\n sample['perspective_matrix'] = P_px\n\n if self.focal_diff != 0:\n Ks_px[0,0] *= self.focal_diff\n Ks_px[1,1] *= self.focal_diff\n sample['camera_original'] = Ks_px\n\n \"\"\"generate_gt_scales_from2d\"\"\"\n if self.calculate_scale_from_2d:\n scale = utils.generate_gt_scales_from2d(pose_2d)\n square_scale = torch.tensor([torch.max(scale), torch.max(scale)])\n else:\n scale = utils.generate_gt_scales(K, self.human_height, pelvis_joint, sample['original_img_shape'][0], sample['original_img_shape'][1]) # 2000 is the height in mm\n square_scale = scale.clone()\n\n square_scale_py = square_scale / sample['original_img_shape']\n sample['stn_square_scale_py'] = square_scale_py\n\n location = utils.generate_gt_location(P_px, pelvis_joint, sample['original_img_shape'][0], sample['original_img_shape'][1])\n\n sample['crop_scale'] = torch.FloatTensor(scale)\n sample['crop_location'] = torch.FloatTensor(location)\n\n if self.use_pcl:\n canon_label_2d_with_hip = pose_2d.unsqueeze(0)\n preprocess = pcl_preprocess(1, canon_label_2d_with_hip.shape[1], canon_label_2d_with_hip, sample['original_img_shape'].unsqueeze(0), \\\n sample['camera_original'].unsqueeze(0), location.unsqueeze(0), scale.unsqueeze(0),\\\n normalize=True, use_slant_compensation=self.use_slant_compensation)\n \n sample['preprocess-model_input'] = preprocess['model_input'].squeeze(0)\n sample['preprocess-canon_virt_2d'] = preprocess['canon_virt_2d'].squeeze(0)\n sample['preprocess-R_virt2orig'] = preprocess['R_virt2orig'].squeeze(0)\n\n return sample\n\n def __getitem__(self, index):\n if not self.without_image:\n frame_ref = self.frame_refs[index]\n\n skel_3d, skel_desc = self._get_skeleton_3d(index)\n univ_scale_factor = self.univ_scale_factors[(frame_ref.subject_id, frame_ref.sequence_id)]\n orig_skel = self._to_univ_scale(skel_3d, skel_desc, univ_scale_factor)\n\n if self.without_image:\n orig_image = None\n img_w = img_h = 768\n else:\n orig_image = Image.open(path.join(self.data_dir, frame_ref.image_file))\n img_w, img_h = orig_image.size\n\n with open(path.join(self.data_dir, frame_ref.camera_file), 'r') as f:\n cam_cal = parse_camera_calibration(f)[frame_ref.camera_id]\n\n # Correct the camera to account for the fact that video frames were\n # stored at a lower resolution.\n orig_camera = cam_cal['intrinsics'].clone()\n old_w = cam_cal['image_width']\n old_h = cam_cal['image_height']\n orig_camera.scale_image(img_w / old_w, img_h / old_h)\n\n extrinsics = cam_cal['extrinsics']\n\n # Bounding box details\n skel_2d = orig_camera.project_cartesian(skel_3d)\n min_x = skel_2d[:, 0].min().item()\n max_x = skel_2d[:, 0].max().item()\n min_y = skel_2d[:, 1].min().item()\n max_y = skel_2d[:, 1].max().item()\n bb_cx = (min_x + max_x) / 2\n bb_cy = (min_y + max_y) / 2\n bb_size = 1.5 * max(max_x - min_x, max_y - min_y)\n\n img_short_side = min(img_h, img_w)\n out_width = self.data_specs.input_specs.width\n out_height = self.data_specs.input_specs.height\n\n if self.multicrop:\n samples = []\n for aug_hflip in [False, True]:\n for offset in [(0, 0), (-1, 0), (0, -1), (1, 0), (0, 1)]:\n aug_x = offset[0] * 8\n aug_y = offset[1] * 8\n\n transform_opts = {\n 'in_camera': orig_camera,\n 'in_width': img_w,\n 'in_height': img_h,\n 'centre_x': bb_cx + aug_x,\n 'centre_y': bb_cy + aug_y,\n 'rotation': 0,\n 'scale': bb_size / img_short_side,\n 'hflip_indices': self.skeleton_desc.hflip_indices,\n 'hflip': aug_hflip,\n 'out_width': out_width,\n 'out_height': out_height,\n 'brightness': 1,\n 'contrast': 1,\n 'saturation': 1,\n 'hue': 0,\n }\n\n samples.append(self._build_sample(index, orig_camera, orig_image, orig_skel,\n transform_opts, extrinsics))\n\n return collate(samples)\n else:\n aug_bg = aug_ub = aug_lb = False\n aug_hflip = False\n aug_brightness = aug_contrast = aug_saturation = 1.0\n aug_hue = 0.0\n aug_x = aug_y = 0.0\n aug_scale = 1.0\n aug_rot = 0\n\n if self.use_aug:\n if not self.disable_mask_aug:\n aug_bg = frame_ref.bg_augmentable and np.random.uniform() < 0.6\n aug_ub = frame_ref.ub_augmentable and np.random.uniform() < 0.2\n aug_lb = frame_ref.lb_augmentable and np.random.uniform() < 0.5\n aug_hflip = np.random.uniform() < 0.5\n if np.random.uniform() < 0.3:\n aug_brightness = np.random.uniform(0.8, 1.2)\n if np.random.uniform() < 0.3:\n aug_contrast = np.random.uniform(0.8, 1.2)\n if np.random.uniform() < 0.3:\n aug_saturation = np.random.uniform(0.8, 1.2)\n if np.random.uniform() < 0.3:\n aug_hue = np.random.uniform(-0.1, 0.1)\n aug_x = np.random.uniform(-16, 16)\n aug_y = np.random.uniform(-16, 16)\n aug_scale = np.random.uniform(0.9, 1.1)\n if np.random.uniform() < 0.4:\n aug_rot = np.clip(np.random.normal(0, 30), -30, 30)\n\n if orig_image:\n if aug_bg:\n orig_image = augment_background(\n orig_image,\n Image.open(path.join(self.data_dir, frame_ref.bg_mask_file)),\n random_background()\n )\n if aug_ub:\n orig_image = augment_clothing(\n orig_image,\n Image.open(path.join(self.data_dir, frame_ref.ub_mask_file)),\n random_texture()\n )\n if aug_lb:\n orig_image = augment_clothing(\n orig_image,\n Image.open(path.join(self.data_dir, frame_ref.lb_mask_file)),\n random_texture()\n )\n\n transform_opts = {\n 'in_camera': orig_camera,\n 'in_width': img_w,\n 'in_height': img_h,\n 'centre_x': bb_cx + aug_x,\n 'centre_y': bb_cy + aug_y,\n 'rotation': aug_rot,\n 'scale': bb_size * aug_scale / img_short_side,\n 'hflip_indices': self.skeleton_desc.hflip_indices,\n 'hflip': aug_hflip,\n 'out_width': out_width,\n 'out_height': out_height,\n 'brightness': aug_brightness,\n 'contrast': aug_contrast,\n 'saturation': aug_saturation,\n 'hue': aug_hue,\n }\n\n transform_opts_big = {\n 'in_camera': orig_camera,\n 'in_width': img_w,\n 'in_height': img_h,\n 'centre_x': bb_cx + aug_x,\n 'centre_y': bb_cy + aug_y,\n 'rotation': aug_rot,\n 'scale': bb_size * aug_scale / img_short_side,\n 'hflip_indices': self.skeleton_desc.hflip_indices,\n 'hflip': aug_hflip,\n 'out_width': 256,\n 'out_height': 256,\n 'brightness': aug_brightness,\n 'contrast': aug_contrast,\n 'saturation': aug_saturation,\n 'hue': aug_hue,\n }\n\n return self._build_sample(index, orig_camera, orig_image, orig_skel, transform_opts, transform_opts_big)\n\n else:\n # dataloader when no image is required\n frame_ref = self.frame_refs[index]\n skel_3d, skel_desc = self._get_skeleton_3d(index)\n univ_scale_factor = self.univ_scale_factors[(frame_ref.subject_id, frame_ref.sequence_id)]\n img_w = img_h = 768\n\n with open(path.join(self.data_dir, frame_ref.camera_file), 'r') as f:\n cam_cal = parse_camera_calibration(f)[frame_ref.camera_id]\n\n # Correct the camera to account for the fact that video frames were\n # stored at a lower resolution.\n orig_camera = cam_cal['intrinsics'].clone()\n orig_skel = self._to_univ_scale(skel_3d, skel_desc, univ_scale_factor)\n return self._build_sample_without_image(index, orig_skel, orig_camera, [img_w, img_h])" ]
[ [ "numpy.random.uniform", "torch.FloatTensor", "torch.min", "torch.LongTensor", "numpy.random.normal", "torch.from_numpy", "torch.max", "numpy.array", "numpy.random.randint" ] ]
vnshanmukh/pvoutput
[ "9773c60445b38b7b67f8d5e10316379f47090549" ]
[ "pvoutput/pvoutput.py" ]
[ "import logging\nimport os\nimport time\nimport warnings\nfrom datetime import date, datetime, timedelta\nfrom io import StringIO\nfrom typing import Dict, Iterable, List, Optional, Union\nfrom urllib.parse import urljoin\n\nimport numpy as np\nimport pandas as pd\nimport requests\nimport tables\n\nfrom pvoutput.consts import (\n BASE_URL,\n CONFIG_FILENAME,\n ONE_DAY,\n PV_OUTPUT_DATE_FORMAT,\n RATE_LIMIT_PARAMS_TO_API_HEADERS,\n)\nfrom pvoutput.daterange import DateRange, merge_date_ranges_to_years\nfrom pvoutput.exceptions import NoStatusFound, RateLimitExceeded\nfrom pvoutput.utils import (\n _get_param_from_config_file,\n _get_response,\n _print_and_log,\n get_date_ranges_to_download,\n sort_and_de_dupe_pv_system,\n system_id_to_hdf_key,\n)\n\n_LOG = logging.getLogger(\"pvoutput\")\n\n\nclass PVOutput:\n \"\"\"\n Attributes:\n api_key\n system_id\n rate_limit_remaining\n rate_limit_total\n rate_limit_reset_time\n data_service_url\n \"\"\"\n\n def __init__(\n self,\n api_key: str = None,\n system_id: str = None,\n config_filename: Optional[str] = CONFIG_FILENAME,\n data_service_url: Optional[str] = None,\n ):\n \"\"\"\n Args:\n api_key: Your API key from PVOutput.org.\n system_id: Your system ID from PVOutput.org. If you don't have a\n PV system then you can register with PVOutput.org and select\n the 'energy consumption only' box.\n config_filename: Optional, the filename of the .yml config file.\n data_service_url: Optional. If you have subscribed to\n PVOutput.org's data service then add the data service URL here.\n This string must end in '.org'.\n \"\"\"\n\n self.api_key = api_key\n self.system_id = system_id\n self.rate_limit_remaining = None\n self.rate_limit_total = None\n self.rate_limit_reset_time = None\n self.data_service_url = data_service_url\n\n # Set from config file if None\n for param_name in [\"api_key\", \"system_id\"]:\n if getattr(self, param_name) is None:\n try:\n param_value_from_config = _get_param_from_config_file(\n param_name, config_filename\n )\n except Exception as e:\n msg = (\n \"Error loading configuration parameter {param_name}\"\n \" from config file {filename}. Either pass\"\n \" {param_name} into PVOutput constructor, or create\"\n \" config file {filename}. {exception}\".format(\n param_name=param_name, filename=CONFIG_FILENAME, exception=e\n )\n )\n print(msg)\n _LOG.exception(msg)\n raise\n setattr(self, param_name, param_value_from_config)\n # Convert to strings\n setattr(self, param_name, str(getattr(self, param_name)))\n\n # Check for data_service_url\n if self.data_service_url is None:\n try:\n self.data_service_url = _get_param_from_config_file(\n \"data_service_url\", config_filename\n )\n except KeyError:\n pass\n except FileNotFoundError:\n pass\n\n if self.data_service_url is not None:\n if not self.data_service_url.strip(\"/\").endswith(\".org\"):\n raise ValueError(\"data_service_url must end in '.org'\")\n\n def search(\n self,\n query: str,\n lat: Optional[float] = None,\n lon: Optional[float] = None,\n include_country: bool = True,\n **kwargs\n ) -> pd.DataFrame:\n \"\"\"Search for PV systems.\n\n Some quirks of the PVOutput.org API:\n - The maximum number of results returned by PVOutput.org is 30.\n If the number of returned results is 30, then there is no\n indication of whether there are exactly 30 search results,\n or if there are more than 30. Also, there is no way to\n request additional 'pages' of search results.\n - The maximum search radius is 25km\n\n Args:\n query: string, see https://pvoutput.org/help.html#search\n e.g. '5km'.\n lat: float, e.g. 52.0668589\n lon: float, e.g. -1.3484038\n include_country: bool, whether or not to include the country name\n with the returned postcode.\n\n Returns:\n pd.DataFrame, one row per search results. Index is PV system ID.\n Columns:\n name,\n system_DC_capacity_W,\n address, # If `include_country` is True then address is\n # 'country> <postcode>',\n # else address is '<postcode>'.\n orientation,\n num_outputs,\n last_output,\n panel,\n inverter,\n distance_km,\n latitude,\n longitude\n \"\"\"\n api_params = {\"q\": query, \"country\": int(include_country)}\n\n if lat is not None and lon is not None:\n api_params[\"ll\"] = \"{:f},{:f}\".format(lat, lon)\n\n pv_systems_text = self._api_query(service=\"search\", api_params=api_params, **kwargs)\n\n pv_systems = pd.read_csv(\n StringIO(pv_systems_text),\n names=[\n \"name\",\n \"system_DC_capacity_W\",\n \"address\",\n \"orientation\",\n \"num_outputs\",\n \"last_output\",\n \"system_id\",\n \"panel\",\n \"inverter\",\n \"distance_km\",\n \"latitude\",\n \"longitude\",\n ],\n index_col=\"system_id\",\n )\n\n return pv_systems\n\n def get_status(\n self, pv_system_id: int, date: Union[str, datetime], historic: bool = True, **kwargs\n ) -> pd.DataFrame:\n \"\"\"Get PV system status (e.g. power generation) for one day.\n\n The returned DataFrame will be empty if the PVOutput API\n returns 'status 400: No status found'.\n\n Args:\n pv_system_id: int\n date: str in format YYYYMMDD; or datetime\n (localtime of the PV system)\n\n Returns:\n pd.DataFrame:\n index: datetime (DatetimeIndex, localtime of the PV system)\n columns: (all np.float64):\n cumulative_energy_gen_Wh,\n energy_efficiency_kWh_per_kW,\n instantaneous_power_gen_W,\n average_power_gen_W,\n power_gen_normalised,\n energy_consumption_Wh,\n power_demand_W,\n temperature_C,\n voltage\n \"\"\"\n _LOG.info(\"system_id %d: Requesting system status for %s\", pv_system_id, date)\n date = date_to_pvoutput_str(date)\n _check_date(date)\n\n api_params = {\n \"d\": date, # date, YYYYMMDD, localtime of the PV system\n \"h\": int(historic == True), # We want historical data.\n \"limit\": 288, # API limit is 288 (num of 5-min periods per day).\n \"ext\": 0, # Extended data; we don't want extended data.\n \"sid1\": pv_system_id, # SystemID.\n }\n\n try:\n pv_system_status_text = self._api_query(\n service=\"getstatus\", api_params=api_params, **kwargs\n )\n except NoStatusFound:\n _LOG.info(\"system_id %d: No status found for date %s\", pv_system_id, date)\n pv_system_status_text = \"\"\n\n # See https://pvoutput.org/help.html#api-getstatus but make sure\n # you read the 'History Query' subsection, as a historical query\n # has slightly different return columns compared to a non-historical\n # query!\n columns = (\n [\n \"cumulative_energy_gen_Wh\",\n \"energy_efficiency_kWh_per_kW\",\n \"instantaneous_power_gen_W\",\n \"average_power_gen_W\",\n \"power_gen_normalised\",\n \"energy_consumption_Wh\",\n \"power_demand_W\",\n \"temperature_C\",\n \"voltage\",\n ]\n if historic\n else [\n \"cumulative_energy_gen_Wh\",\n \"instantaneous_power_gen_W\",\n \"energy_consumption_Wh\",\n \"power_demand_W\",\n \"power_gen_normalised\",\n \"temperature_C\",\n \"voltage\",\n ]\n )\n\n pv_system_status = pd.read_csv(\n StringIO(pv_system_status_text),\n lineterminator=\";\",\n names=[\"date\", \"time\"] + columns,\n parse_dates={\"datetime\": [\"date\", \"time\"]},\n index_col=[\"datetime\"],\n dtype={col: np.float64 for col in columns},\n ).sort_index()\n\n return pv_system_status\n\n def get_batch_status(\n self,\n pv_system_id: int,\n date_to: Optional[Union[str, datetime]] = None,\n max_retries: Optional[int] = 1000,\n **kwargs\n ) -> Union[None, pd.DataFrame]:\n \"\"\"Get batch PV system status (e.g. power generation).\n\n The returned DataFrame will be empty if the PVOutput API\n returns 'status 400: No status found'.\n\n Data returned is limited to the last 366 days per request.\n To retrieve older data, use the date_to parameter.\n\n The PVOutput getbatchstatus API is asynchronous. When it's first\n called, it replies to say 'accepted'. This function will then\n wait a minute and call the API again to see if the data is ready.\n Set `max_retries` to 1 if you want to return immediately, even\n if data isn't ready yet (and hence this function will return None)\n\n https://pvoutput.org/help.html#dataservice-getbatchstatus\n\n Args:\n pv_system_id: int\n date_to: str in format YYYYMMDD; or datetime\n (localtime of the PV system). The returned timeseries will\n include 366 days of data: from YYYY-1MMDD to YYYYMMDD inclusive\n max_retries: int, number of times to retry after receiving\n a '202 Accepted' request. Set `max_retries` to 1 if you want\n to return immediately, even if data isn't ready yet (and hence\n this function will return None).\n\n Returns:\n None (if data isn't ready after retrying max_retries times) or\n pd.DataFrame:\n index: datetime (DatetimeIndex, localtime of the PV system)\n columns: (all np.float64):\n cumulative_energy_gen_Wh,\n instantaneous_power_gen_W,\n temperature_C,\n voltage\n \"\"\"\n api_params = {\"sid1\": pv_system_id}\n\n _set_date_param(date_to, api_params, \"dt\")\n\n for retry in range(max_retries):\n try:\n pv_system_status_text = self._api_query(\n service=\"getbatchstatus\", api_params=api_params, use_data_service=True, **kwargs\n )\n except NoStatusFound:\n _LOG.info(\"system_id %d: No status found for date_to %s\", pv_system_id, date_to)\n pv_system_status_text = \"\"\n break\n\n if \"Accepted 202\" in pv_system_status_text:\n if retry == 0:\n _print_and_log(\"Request accepted.\")\n if retry < max_retries - 1:\n _print_and_log(\"Sleeping for 1 minute.\")\n time.sleep(60)\n else:\n _print_and_log(\n \"Call get_batch_status again in a minute to see if\" \" results are ready.\"\n )\n else:\n break\n else:\n return\n\n return _process_batch_status(pv_system_status_text)\n\n def get_metadata(self, pv_system_id: int, **kwargs) -> pd.Series:\n \"\"\"Get metadata for a single PV system.\n\n Args:\n pv_system_id: int\n\n Returns:\n pd.Series. Index is:\n name,\n system_DC_capacity_W,\n address,\n num_panels,\n panel_capacity_W_each,\n panel_brand,\n num_inverters,\n inverter_capacity_W,\n inverter_brand,\n orientation,\n array_tilt_degrees,\n shade,\n install_date,\n latitude,\n longitude,\n status_interval_minutes,\n secondary_num_panels,\n secondary_panel_capacity_W_each,\n secondary_orientation,\n secondary_array_tilt_degrees\n \"\"\"\n pv_metadata_text = self._api_query(\n service=\"getsystem\",\n api_params={\n \"array2\": 1, # Provide data about secondary array, if present.\n \"tariffs\": 0,\n \"teams\": 0,\n \"est\": 0,\n \"donations\": 0,\n \"sid1\": pv_system_id, # SystemID\n \"ext\": 0, # Include extended data?\n },\n **kwargs\n )\n\n pv_metadata = pd.read_csv(\n StringIO(pv_metadata_text),\n lineterminator=\";\",\n names=[\n \"name\",\n \"system_DC_capacity_W\",\n \"address\",\n \"num_panels\",\n \"panel_capacity_W_each\",\n \"panel_brand\",\n \"num_inverters\",\n \"inverter_capacity_W\",\n \"inverter_brand\",\n \"orientation\",\n \"array_tilt_degrees\",\n \"shade\",\n \"install_date\",\n \"latitude\",\n \"longitude\",\n \"status_interval_minutes\",\n \"secondary_num_panels\",\n \"secondary_panel_capacity_W_each\",\n \"secondary_orientation\",\n \"secondary_array_tilt_degrees\",\n ],\n parse_dates=[\"install_date\"],\n nrows=1,\n ).squeeze()\n pv_metadata[\"system_id\"] = pv_system_id\n pv_metadata.name = pv_system_id\n return pv_metadata\n\n def get_statistic(\n self,\n pv_system_id: int,\n date_from: Optional[Union[str, date]] = None,\n date_to: Optional[Union[str, date]] = None,\n **kwargs\n ) -> pd.DataFrame:\n \"\"\"Get summary stats for a single PV system.\n\n Args:\n pv_system_id: int\n date_from\n date_to\n\n Returns:\n pd.DataFrame:\n total_energy_gen_Wh,\n energy_exported_Wh,\n average_daily_energy_gen_Wh,\n minimum_daily_energy_gen_Wh,\n maximum_daily_energy_gen_Wh,\n average_efficiency_kWh_per_kW,\n num_outputs, # The number of days for which there's >= 1 val.\n actual_date_from,\n actual_date_to,\n record_efficiency_kWh_per_kW,\n record_efficiency_date,\n query_date_from,\n query_date_to\n \"\"\"\n if date_from and not date_to:\n date_to = pd.Timestamp.now().date()\n if date_to and not date_from:\n date_from = pd.Timestamp(\"1900-01-01\").date()\n\n api_params = {\n \"c\": 0, # consumption and import\n \"crdr\": 0, # credits / debits\n \"sid1\": pv_system_id, # SystemID\n }\n\n _set_date_param(date_from, api_params, \"df\")\n _set_date_param(date_to, api_params, \"dt\")\n\n try:\n pv_metadata_text = self._api_query(\n service=\"getstatistic\", api_params=api_params, **kwargs\n )\n except NoStatusFound:\n pv_metadata_text = \"\"\n\n columns = [\n \"total_energy_gen_Wh\",\n \"energy_exported_Wh\",\n \"average_daily_energy_gen_Wh\",\n \"minimum_daily_energy_gen_Wh\",\n \"maximum_daily_energy_gen_Wh\",\n \"average_efficiency_kWh_per_kW\",\n \"num_outputs\",\n \"actual_date_from\",\n \"actual_date_to\",\n \"record_efficiency_kWh_per_kW\",\n \"record_efficiency_date\",\n ]\n date_cols = [\"actual_date_from\", \"actual_date_to\", \"record_efficiency_date\"]\n numeric_cols = set(columns) - set(date_cols)\n pv_metadata = pd.read_csv(\n StringIO(pv_metadata_text),\n names=columns,\n dtype={col: np.float32 for col in numeric_cols},\n parse_dates=date_cols,\n )\n if pv_metadata.empty:\n data = {col: np.float32(np.NaN) for col in numeric_cols}\n data.update({col: pd.NaT for col in date_cols})\n pv_metadata = pd.DataFrame(data, index=[pv_system_id])\n else:\n pv_metadata.index = [pv_system_id]\n\n pv_metadata[\"query_date_from\"] = pd.Timestamp(date_from) if date_from else pd.NaT\n pv_metadata[\"query_date_to\"] = pd.Timestamp(date_to) if date_to else pd.Timestamp.now()\n return pv_metadata\n\n def _get_statistic_with_cache(\n self,\n store_filename: str,\n pv_system_id: int,\n date_from: Optional[Union[str, date]] = None,\n date_to: Optional[Union[str, date]] = None,\n **kwargs\n ) -> pd.Series:\n \"\"\"Will try to get stats from store_filename['statistics']. If this\n fails, or if date_to > query_date_to, or if\n date_from < query_date_from, then will call the API. Note that the aim\n of this function is just to find the relevant actual_date_from and\n actual_date_to, so this function does not respect the other params.\n \"\"\"\n\n if date_from:\n date_from = pd.Timestamp(date_from).date()\n if date_to:\n date_to = pd.Timestamp(date_to).date()\n\n def _get_fresh_statistic():\n _LOG.info(\"pv_system %d: Getting fresh statistic.\", pv_system_id)\n stats = self.get_statistic(pv_system_id, **kwargs)\n with pd.HDFStore(store_filename, mode=\"a\") as store:\n try:\n store.remove(key=\"statistics\", where=\"index=pv_system_id\")\n except KeyError:\n pass\n store.append(key=\"statistics\", value=stats)\n return stats\n\n try:\n stats = pd.read_hdf(store_filename, key=\"statistics\", where=\"index=pv_system_id\")\n except (FileNotFoundError, KeyError):\n return _get_fresh_statistic()\n\n if stats.empty:\n return _get_fresh_statistic()\n\n query_date_from = stats.iloc[0][\"query_date_from\"]\n query_date_to = stats.iloc[0][\"query_date_to\"]\n\n if (\n not pd.isnull(date_from)\n and not pd.isnull(query_date_from)\n and date_from < query_date_from.date()\n ):\n return _get_fresh_statistic()\n\n if not pd.isnull(date_to) and date_to > query_date_to.date():\n return _get_fresh_statistic()\n\n return stats\n\n def download_multiple_systems_to_disk(\n self,\n system_ids: Iterable[int],\n start_date: datetime,\n end_date: datetime,\n output_filename: str,\n timezone: Optional[str] = None,\n min_data_availability: Optional[float] = 0.5,\n use_get_batch_status_if_available: Optional[bool] = True,\n ):\n \"\"\"Download multiple PV system IDs to disk.\n\n Data is saved to `output_filename` in HDF5 format. The exact data\n format is documented in\n https://github.com/openclimatefix/pvoutput/blob/master/docs/dataset.md\n\n This function is designed to be run for days (!) downloading\n gigabytes of PV data :) As such, this function can be safely\n interrupted and re-started. All the state required to re-start\n is stored in the HDF5 file.\n\n Add appropriate handlers the Python logger `pvoutput` to see progress.\n\n Args:\n system_ids: List of PV system IDs to download.\n start_date: Start of date range to download.\n end_date: End of date range to download.\n output_filename: HDF5 filename to write data to.\n timezone: String representation of timezone of timeseries data.\n e.g. 'Europe/London'.\n min_data_availability: A float in the range [0, 1]. 1 means only\n accept PV systems which have no days of missing data. 0 means\n accept all PV systems, no matter if they have missing data.\n Note that the data availability is measured against the date\n range for which the PV system has data available, not from\n the date range passed into this function.\n use_get_batch_status_if_available: Bool. If true then will use\n PVOutput's getbatchstatus API (which must be paid for, and\n `data_service_url` must be set in `~/.pvoutput.yml` or when\n initialising the PVOutput object).\n \"\"\"\n n = len(system_ids)\n for i, pv_system_id in enumerate(system_ids):\n _LOG.info(\"**********************\")\n msg = \"system_id {:d}: {:d} of {:d} ({:%})\".format(pv_system_id, i + 1, n, (i + 1) / n)\n _LOG.info(msg)\n print(\"\\r\", msg, end=\"\", flush=True)\n\n # Sorted list of DateRange objects. For each DateRange,\n # we need to download from start_date to end_date inclusive.\n date_ranges_to_download = get_date_ranges_to_download(\n output_filename, pv_system_id, start_date, end_date\n )\n\n # How much data is actually available?\n date_ranges_to_download = self._filter_date_range(\n output_filename, pv_system_id, date_ranges_to_download, min_data_availability\n )\n\n if not date_ranges_to_download:\n _LOG.info(\"system_id %d: No data left to download :)\", pv_system_id)\n continue\n\n _LOG.info(\n \"system_id %d: Will download these date ranges: %s\",\n pv_system_id,\n date_ranges_to_download,\n )\n\n if use_get_batch_status_if_available:\n if self.data_service_url:\n self._download_multiple_using_get_batch_status(\n output_filename, pv_system_id, date_ranges_to_download, timezone\n )\n else:\n raise ValueError(\"data_service_url is not set!\")\n else:\n self._download_multiple_using_get_status(\n output_filename, pv_system_id, date_ranges_to_download, timezone\n )\n\n def get_insolation_forecast(\n self,\n date: Union[str, datetime],\n pv_system_id: Optional[int] = None,\n timezone: Optional[str] = None,\n lat: Optional[float] = None,\n lon: Optional[float] = None,\n **kwargs\n ):\n \"\"\"Get Insolation data for a given site, or a given location defined by\n longitude and latitude. This is the estimated output for the site\n based on ideal weather conditions. Also factors in site age, reducing\n ouput by 1% each year, shade and orientation. Need donation mode enabled.\n See https://pvoutput.org/help.html#api-getinsolation\n\n Args:\n date: str in format YYYYMMDD; or datetime\n (localtime of the PV system)\n pv_system_id: int\n timezone: str\n lat: float e.g. -27.4676\n lon: float e.g. 153.0279\n **kwargs:\n\n\n Returns:\n\n \"\"\"\n date = date_to_pvoutput_str(date)\n _check_date(date, prediction=True)\n api_params = {\n \"d\": date, # date, YYYYMMDD, localtime of the PV system\n \"sid1\": pv_system_id, # SystemID.\n \"tz\": timezone, # defaults to configured timezone of system otherwise GMT\n }\n if lat is not None and lon is not None:\n api_params[\"ll\"] = \"{:f},{:f}\".format(lat, lon)\n\n try:\n pv_insolation_text = self._api_query(\n service=\"getinsolation\", api_params=api_params, **kwargs\n )\n except NoStatusFound:\n _LOG.info(\"system_id %d: No status found for date %s\", pv_system_id, date)\n pv_insolation_text = \"\"\n\n columns = [\"predicted_power_gen_W\", \"predicted_cumulative_energy_gen_Wh\"]\n pv_insolation = pd.read_csv(\n StringIO(pv_insolation_text),\n lineterminator=\";\",\n names=[\"time\"] + columns,\n dtype={col: np.float64 for col in columns},\n ).sort_index()\n pv_insolation.index = pd.to_datetime(\n date + \" \" + pv_insolation.time, format=\"%Y-%m-%d %H:%M\"\n )\n pv_insolation.drop(\"time\", axis=1, inplace=True)\n return pv_insolation\n\n def _filter_date_range(\n self,\n store_filename: str,\n system_id: int,\n date_ranges: Iterable[DateRange],\n min_data_availability: Optional[float] = 0.5,\n ) -> List[DateRange]:\n \"\"\"Check getstatistic to see if system_id has data for all date ranges.\n\n Args:\n system_id: PV system ID.\n store_filename: HDF5 filename to cache statistics to / from.\n date_ranges: List of DateRange objects.\n min_data_availability: A float in the range [0, 1]. 1 means only\n accept PV systems which have no days of missing data. 0 means\n accept all PV systems, no matter if they have missing data.\n \"\"\"\n if not date_ranges:\n return date_ranges\n\n stats = self._get_statistic_with_cache(\n store_filename,\n system_id,\n date_to=date_ranges[-1].end_date,\n wait_if_rate_limit_exceeded=True,\n ).squeeze()\n\n if pd.isnull(stats[\"actual_date_from\"]) or pd.isnull(stats[\"actual_date_to\"]):\n _LOG.info(\"system_id %d: Stats say there is no data!\", system_id)\n return []\n\n timeseries_date_range = DateRange(stats[\"actual_date_from\"], stats[\"actual_date_to\"])\n\n data_availability = stats[\"num_outputs\"] / (timeseries_date_range.total_days() + 1)\n\n if data_availability < min_data_availability:\n _LOG.info(\n \"system_id %d: Data availability too low! Only %.0f %%.\",\n system_id,\n data_availability * 100,\n )\n return []\n\n new_date_ranges = []\n for date_range in date_ranges:\n new_date_range = date_range.intersection(timeseries_date_range)\n if new_date_range:\n new_date_ranges.append(new_date_range)\n return new_date_ranges\n\n def _download_multiple_using_get_batch_status(\n self, output_filename, pv_system_id, date_ranges_to_download, timezone: Optional[str] = None\n ):\n years = merge_date_ranges_to_years(date_ranges_to_download)\n dates_to = [year.end_date for year in years]\n total_rows = self._download_multiple_worker(\n output_filename, pv_system_id, dates_to, timezone, use_get_status=False\n )\n\n # Re-load data, sort, remove duplicate indicies, append back\n if total_rows:\n with pd.HDFStore(output_filename, mode=\"a\", complevel=9) as store:\n sort_and_de_dupe_pv_system(store, pv_system_id)\n\n def _download_multiple_using_get_status(\n self, output_filename, pv_system_id, date_ranges_to_download, timezone: Optional[str] = None\n ):\n for date_range in date_ranges_to_download:\n dates = date_range.date_range()\n self._download_multiple_worker(\n output_filename, pv_system_id, dates, timezone, use_get_status=True\n )\n\n def _download_multiple_worker(\n self, output_filename, pv_system_id, dates, timezone, use_get_status\n ) -> int:\n \"\"\"\n Returns:\n total number of rows downloaded\n \"\"\"\n total_rows = 0\n for date_to_load in dates:\n _LOG.info(\"system_id %d: Requesting date: %s\", pv_system_id, date_to_load)\n datetime_of_api_request = pd.Timestamp.utcnow()\n if use_get_status:\n timeseries = self.get_status(\n pv_system_id, date_to_load, wait_if_rate_limit_exceeded=True\n )\n else:\n timeseries = self.get_batch_status(pv_system_id, date_to=date_to_load)\n if timeseries.empty:\n _LOG.info(\n \"system_id %d: Got empty timeseries back for %s\", pv_system_id, date_to_load\n )\n if use_get_status:\n _append_missing_date_range(\n output_filename,\n pv_system_id,\n date_to_load,\n date_to_load,\n datetime_of_api_request,\n )\n else:\n _append_missing_date_range(\n output_filename,\n pv_system_id,\n date_to_load - timedelta(days=365),\n date_to_load,\n datetime_of_api_request,\n )\n else:\n total_rows += len(timeseries)\n timeseries = timeseries.tz_localize(timezone)\n _LOG.info(\n \"system_id: %d: %d rows retrieved: %s to %s\",\n pv_system_id,\n len(timeseries),\n timeseries.index[0],\n timeseries.index[-1],\n )\n if use_get_status:\n check_pv_system_status(timeseries, date_to_load)\n else:\n _record_gaps(\n output_filename,\n pv_system_id,\n date_to_load,\n timeseries,\n datetime_of_api_request,\n )\n timeseries[\"datetime_of_API_request\"] = datetime_of_api_request\n timeseries[\"query_date\"] = pd.Timestamp(date_to_load)\n key = system_id_to_hdf_key(pv_system_id)\n with pd.HDFStore(output_filename, mode=\"a\", complevel=9) as store:\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", tables.NaturalNameWarning)\n store.append(key=key, value=timeseries, data_columns=True)\n\n _LOG.info(\"system_id %d: %d total rows downloaded\", pv_system_id, total_rows)\n return total_rows\n\n def _api_query(\n self,\n service: str,\n api_params: Dict,\n wait_if_rate_limit_exceeded: bool = False,\n use_data_service: bool = False,\n ) -> str:\n \"\"\"Send API request to PVOutput.org and return content text.\n\n Args:\n service: string, e.g. 'search' or 'getstatus'\n api_params: dict\n wait_if_rate_limit_exceeded: bool\n use_data_service: bool\n\n Raises:\n NoStatusFound\n RateLimitExceeded\n \"\"\"\n get_response_func = (\n self._get_data_service_response if use_data_service else self._get_api_response\n )\n\n try:\n response = get_response_func(service, api_params)\n except Exception as e:\n _LOG.exception(e)\n raise\n\n try:\n return self._process_api_response(response)\n except RateLimitExceeded:\n msg = \"PVOutput.org API rate limit exceeded!\" \" Rate limit will be reset at {}\".format(\n self.rate_limit_reset_time\n )\n _print_and_log(msg)\n if wait_if_rate_limit_exceeded:\n self.wait_for_rate_limit_reset()\n return self._api_query(service, api_params, wait_if_rate_limit_exceeded=False)\n\n raise RateLimitExceeded(response, msg)\n\n def _get_api_response(self, service: str, api_params: Dict) -> requests.Response:\n \"\"\"\n Args:\n service: string, e.g. 'search', 'getstatus'\n api_params: dict\n \"\"\"\n self._check_api_params()\n # Create request headers\n headers = {\n \"X-Rate-Limit\": \"1\",\n \"X-Pvoutput-Apikey\": self.api_key,\n \"X-Pvoutput-SystemId\": self.system_id,\n }\n\n api_url = urljoin(BASE_URL, \"service/r2/{}.jsp\".format(service))\n\n return _get_response(api_url, api_params, headers)\n\n def _get_data_service_response(self, service: str, api_params: Dict) -> requests.Response:\n \"\"\"\n Args:\n service: string, e.g. 'getbatchstatus'\n api_params: dict\n \"\"\"\n self._check_api_params()\n if self.data_service_url is None:\n raise ValueError(\"data_service_url must be set to use the data service!\")\n\n headers = {\"X-Rate-Limit\": \"1\"}\n api_params = api_params.copy()\n api_params[\"key\"] = self.api_key\n api_params[\"sid\"] = self.system_id\n\n api_url = urljoin(self.data_service_url, \"service/r2/{}.jsp\".format(service))\n\n return _get_response(api_url, api_params, headers)\n\n def _check_api_params(self):\n # Check we have relevant login details:\n for param_name in [\"api_key\", \"system_id\"]:\n if getattr(self, param_name) is None:\n raise ValueError(\"Please set the {} parameter.\".format(param_name))\n\n def _set_rate_limit_params(self, headers):\n for param_name, header_key in RATE_LIMIT_PARAMS_TO_API_HEADERS.items():\n header_value = int(headers[header_key])\n setattr(self, param_name, header_value)\n\n self.rate_limit_reset_time = pd.Timestamp.utcfromtimestamp(self.rate_limit_reset_time)\n self.rate_limit_reset_time = self.rate_limit_reset_time.tz_localize(\"utc\")\n\n _LOG.debug(\"%s\", self.rate_limit_info())\n\n def rate_limit_info(self) -> Dict:\n info = {}\n for param_name in RATE_LIMIT_PARAMS_TO_API_HEADERS:\n info[param_name] = getattr(self, param_name)\n return info\n\n def _process_api_response(self, response: requests.Response) -> str:\n \"\"\"Turns an API response into text.\n\n Args:\n response: from _get_api_response()\n\n Returns:\n content of the response.\n\n Raises:\n UnicodeDecodeError\n NoStatusFound\n RateLimitExceeded\n \"\"\"\n if response.status_code == 400:\n raise NoStatusFound(response=response)\n\n if response.status_code != 403:\n try:\n response.raise_for_status()\n except Exception as e:\n msg = \"Bad status code! Response content = {}. Exception = {}\".format(\n response.content, e\n )\n _LOG.exception(msg)\n raise e.__class__(msg)\n\n self._set_rate_limit_params(response.headers)\n\n # Did we overshoot our quota?\n if response.status_code == 403 and self.rate_limit_remaining <= 0:\n raise RateLimitExceeded(response=response)\n\n try:\n content = response.content.decode(\"latin1\").strip()\n except Exception as e:\n msg = \"Error decoding this string: {}\\n{}\".format(response.content, e)\n _LOG.exception(msg)\n raise\n\n # If we get to here then the content is valid :)\n return content\n\n def wait_for_rate_limit_reset(self):\n utc_now = pd.Timestamp.utcnow()\n timedelta_to_wait = self.rate_limit_reset_time - utc_now\n timedelta_to_wait += timedelta(minutes=3) # Just for safety\n secs_to_wait = timedelta_to_wait.total_seconds()\n retry_time_utc = utc_now + timedelta_to_wait\n _print_and_log(\n \"Waiting {:.0f} seconds. Will retry at {}\".format(secs_to_wait, retry_time_utc)\n )\n time.sleep(secs_to_wait)\n\n\ndef date_to_pvoutput_str(date: Union[str, datetime]) -> str:\n \"\"\"Convert datetime to date string for PVOutput.org in YYYYMMDD format.\"\"\"\n if isinstance(date, str):\n try:\n datetime.strptime(date, PV_OUTPUT_DATE_FORMAT)\n except ValueError:\n return pd.Timestamp(date).strftime(PV_OUTPUT_DATE_FORMAT)\n else:\n return date\n return date.strftime(PV_OUTPUT_DATE_FORMAT)\n\n\ndef _check_date(date: str, prediction=False):\n \"\"\"Check that date string conforms to YYYYMMDD format,\n and that the date isn't in the future.\n\n Raises:\n ValueError if the date is 'bad'.\n \"\"\"\n dt = datetime.strptime(date, PV_OUTPUT_DATE_FORMAT)\n if dt > datetime.now() and not prediction:\n raise ValueError(\n \"\"\n \"date should not be in the future. Got {}. Current date is {}.\".format(\n date, datetime.now()\n )\n )\n\n\ndef _set_date_param(dt, api_params, key):\n if dt is not None:\n dt = date_to_pvoutput_str(dt)\n _check_date(dt)\n api_params[key] = dt\n\n\ndef check_pv_system_status(pv_system_status: pd.DataFrame, requested_date: date):\n \"\"\"Checks the DataFrame returned by get_pv_system_status.\n\n Args:\n pv_system_status: DataFrame returned by get_pv_system_status\n requested_date: date.\n\n Raises:\n ValueError if the DataFrame is incorrect.\n \"\"\"\n if not isinstance(pv_system_status, pd.DataFrame):\n raise ValueError(\"pv_system_status must be a dataframe\")\n if not pv_system_status.empty:\n index = pv_system_status.index\n for d in [index[0], index[-1]]:\n if not requested_date <= d.date() <= requested_date + ONE_DAY:\n raise ValueError(\n \"A date in the index is outside the expected range.\"\n \" Date from index={}, requested_date={}\".format(d, requested_date)\n )\n\n\ndef _process_batch_status(pv_system_status_text):\n # See https://pvoutput.org/help.html#dataservice-getbatchstatus\n\n # PVOutput uses a non-standard format for the data. The text\n # needs some processing before it can be read as a CSV.\n processed_lines = []\n for line in pv_system_status_text.split(\"\\n\"):\n line_sections = line.split(\";\")\n date = line_sections[0]\n time_and_data = line_sections[1:]\n processed_line = [\n \"{date},{payload}\".format(date=date, payload=payload) for payload in time_and_data\n ]\n processed_lines.extend(processed_line)\n\n if processed_lines:\n first_line = processed_lines[0]\n num_cols = len(first_line.split(\",\"))\n if num_cols >= 8:\n raise NotImplementedError(\"Handling of consumption data is not implemented!\")\n\n processed_text = \"\\n\".join(processed_lines)\n del processed_lines\n\n columns = [\"cumulative_energy_gen_Wh\", \"instantaneous_power_gen_W\", \"temperature_C\", \"voltage\"]\n\n pv_system_status = pd.read_csv(\n StringIO(processed_text),\n names=[\"date\", \"time\"] + columns,\n parse_dates={\"datetime\": [\"date\", \"time\"]},\n index_col=[\"datetime\"],\n dtype={col: np.float64 for col in columns},\n ).sort_index()\n\n return pv_system_status\n\n\ndef _append_missing_date_range(\n output_filename, pv_system_id, missing_start_date, missing_end_date, datetime_of_api_request\n):\n\n data = {\n \"missing_start_date_PV_localtime\": pd.Timestamp(missing_start_date),\n \"missing_end_date_PV_localtime\": pd.Timestamp(missing_end_date),\n \"datetime_of_API_request\": datetime_of_api_request,\n }\n new_missing_date_range = pd.DataFrame(data, index=[pv_system_id])\n new_missing_date_range.index.name = \"pv_system_id\"\n _LOG.info(\n \"system_id %d: Recording missing date range from %s to %s\",\n pv_system_id,\n missing_start_date,\n missing_end_date,\n )\n with pd.HDFStore(output_filename, mode=\"a\", complevel=9) as store:\n store.append(key=\"missing_dates\", value=new_missing_date_range, data_columns=True)\n\n\ndef _record_gaps(output_filename, pv_system_id, date_to, timeseries, datetime_of_api_request):\n dates_of_data = (\n timeseries[\"instantaneous_power_gen_W\"].dropna().resample(\"D\").mean().dropna().index.date\n )\n dates_requested = pd.date_range(date_to - timedelta(days=365), date_to, freq=\"D\").date\n missing_dates = set(dates_requested) - set(dates_of_data)\n missing_date_ranges = _convert_consecutive_dates_to_date_ranges(list(missing_dates))\n _LOG.info(\n \"system_id %d: %d missing date ranges found: \\n%s\",\n pv_system_id,\n len(missing_date_ranges),\n missing_date_ranges,\n )\n if len(missing_date_ranges) == 0:\n return\n # Convert to from date objects to pd.Timestamp objects, because HDF5\n # doesn't like to store date objects.\n missing_date_ranges = missing_date_ranges.astype(\"datetime64\")\n missing_date_ranges[\"pv_system_id\"] = pv_system_id\n missing_date_ranges[\"datetime_of_API_request\"] = datetime_of_api_request\n missing_date_ranges.set_index(\"pv_system_id\", inplace=True)\n with pd.HDFStore(output_filename, mode=\"a\", complevel=9) as store:\n store.append(key=\"missing_dates\", value=missing_date_ranges, data_columns=True)\n\n\ndef _convert_consecutive_dates_to_date_ranges(missing_dates):\n new_missing = []\n missing_dates = np.sort(np.unique(missing_dates))\n if len(missing_dates) == 0:\n return pd.DataFrame(new_missing)\n\n gaps = np.diff(missing_dates).astype(\"timedelta64[D]\").astype(int) > 1\n gaps = np.where(gaps)[0]\n\n start_date = missing_dates[0]\n for gap_i in gaps:\n end_date = missing_dates[gap_i]\n new_missing.append(\n {\n \"missing_start_date_PV_localtime\": start_date,\n \"missing_end_date_PV_localtime\": end_date,\n }\n )\n start_date = missing_dates[gap_i + 1]\n\n end_date = missing_dates[-1]\n new_missing.append(\n {\"missing_start_date_PV_localtime\": start_date, \"missing_end_date_PV_localtime\": end_date}\n )\n\n return pd.DataFrame(new_missing)\n" ]
[ [ "pandas.Timestamp.utcnow", "numpy.diff", "pandas.Timestamp", "pandas.DataFrame", "pandas.Timestamp.now", "numpy.float32", "pandas.HDFStore", "pandas.to_datetime", "pandas.read_hdf", "pandas.isnull", "numpy.where", "pandas.Timestamp.utcfromtimestamp", "numpy.unique" ] ]
CalciferZh/KinectRecorder
[ "cab45c35264feb4bfcde32172e2492711788b3bd" ]
[ "visualizer.py" ]
[ "import numpy as np\nimport pygame\nimport cv2\n\nfrom utils import pickle_load\nfrom utils import pickle_save\n\n\nclass RawVisualizer:\n def __init__(self, load_prefix):\n \"\"\"\n Display raw stream recorded by `KinectRecorder`.\n\n Parameter\n ---------\n load_prefix: Path to load data. Will load color stream from\n `load_prefix`_color.avi, depth stream from `load_prefix`_depth.pkl, and body\n stream from `load_prefix`_body.pkl.\n\n \"\"\"\n self.color_path = load_prefix + '_color.avi'\n self.color_src = cv2.VideoCapture(self.color_path)\n self.color_height = int(self.color_src.get(cv2.CAP_PROP_FRAME_HEIGHT))\n self.color_width = int(self.color_src.get(cv2.CAP_PROP_FRAME_WIDTH))\n\n self.depth_path = load_prefix + '_depth.pkl'\n self.depth_frames = pickle_load(self.depth_path)\n self.depth_height = self.depth_frames[0].shape[0]\n self.depth_width = self.depth_frames[0].shape[1]\n\n self.body_path = load_prefix + '_body.pkl'\n self.body_frames = pickle_load(self.body_path)\n\n self.fps = 30\n self.playing = True\n self.frame_idx = 0\n\n pygame.init()\n self.surface = pygame.Surface(\n (self.color_width + self.depth_width, self.color_height), 0, 24\n )\n self.hw_ratio = self.surface.get_height() / self.surface.get_width()\n\n # screen layout: # is color stream, * is depth, & is body index\n # ----------------------\n # |################# *****|\n # |################# *****|\n # |################# *****|\n # |################# &&&&&|\n # |################# &&&&&|\n # |################# &&&&&|\n # ----------------------\n scale = 0.6\n self.screen = pygame.display.set_mode(\n (\n int(self.surface.get_width() * scale),\n int(self.surface.get_height() * scale)\n ),\n pygame.HWSURFACE | pygame.DOUBLEBUF | pygame.RESIZABLE,\n 24\n )\n self.done = False\n self.clock = pygame.time.Clock()\n pygame.display.set_caption('Playing')\n\n self.frame = np.ones([\n self.surface.get_height(),\n self.surface.get_width(),\n 3\n ])\n\n def run(self):\n \"\"\"\n Main loop.\n\n \"\"\"\n while not self.done:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.done = True\n elif event.type == pygame.VIDEORESIZE:\n self.screen = pygame.display.set_mode(\n event.dict['size'],\n pygame.HWSURFACE | pygame.DOUBLEBUF | pygame.RESIZABLE,\n 24\n )\n elif event.type == pygame.KEYDOWN:\n if self.playing:\n self.playing = False\n pygame.display.set_caption('Paused')\n else:\n self.playing = True\n pygame.display.set_caption('Playing')\n\n if self.playing:\n ret, color = self.color_src.read()\n depth = self.depth_frames[self.frame_idx]\n body = self.body_frames[self.frame_idx]\n self.frame_idx += 1\n\n if self.frame_idx == len(self.depth_frames):\n self.frame_idx = 0\n self.color_src.set(cv2.CAP_PROP_POS_FRAMES, 1)\n\n self.frame[:, :self.color_width] = np.flip(\n color, axis=-1\n ).astype(np.uint8)\n self.frame[:self.depth_height, -self.depth_width:] = np.repeat(\n depth[:, :, np.newaxis] / 4500 * 255, 3, axis=2\n ).astype(np.uint8)\n self.frame[-self.depth_height:, -self.depth_width:] = np.repeat(\n 255 - body[:, :, np.newaxis], 3, axis=2\n ).astype(np.uint8)\n pygame.surfarray.blit_array(\n self.surface, np.transpose(self.frame, axes=[1, 0, 2])\n )\n target_height = int(self.hw_ratio * self.screen.get_width())\n surface_to_draw = pygame.transform.scale(\n self.surface, (self.screen.get_width(), target_height)\n )\n self.screen.blit(surface_to_draw, (0, 0))\n surface_to_draw = None\n pygame.display.update()\n pygame.display.flip()\n\n print(self.clock.get_fps())\n self.clock.tick(self.fps)\n\n pygame.quit()\n\n\nclass AlignedVisualizer:\n def __init__(self, load_path):\n \"\"\"\n Visualize stream after alignment.\n\n Parameter\n ---------\n load_path: Path to load data.\n\n \"\"\"\n data = pickle_load(load_path)\n self.color_frames = data['colors']\n self.depth_frames = data['depths']\n self.body_frames = data['bodies']\n\n self.height = self.color_frames[0].shape[0]\n self.width = self.color_frames[0].shape[1]\n\n self.fps = 30\n self.playing = True\n self.frame_idx = 0\n\n pygame.init()\n self.surface = pygame.Surface(\n (self.width * 3, self.height), 0, 24\n )\n self.hw_ratio = self.surface.get_height() / self.surface.get_width()\n\n scale = 0.6\n self.screen = pygame.display.set_mode(\n (\n int(self.surface.get_width() * scale),\n int(self.surface.get_height() * scale)\n ),\n pygame.HWSURFACE | pygame.DOUBLEBUF | pygame.RESIZABLE,\n 24\n )\n self.done = False\n self.clock = pygame.time.Clock()\n pygame.display.set_caption('Playing')\n\n self.frame = np.ones([\n self.surface.get_height(),\n self.surface.get_width(),\n 3\n ])\n\n def run(self):\n \"\"\"\n Main loop.\n\n \"\"\"\n while not self.done:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.done = True\n elif event.type == pygame.VIDEORESIZE:\n self.screen = pygame.display.set_mode(\n event.dict['size'],\n pygame.HWSURFACE | pygame.DOUBLEBUF | pygame.RESIZABLE,\n 24\n )\n elif event.type == pygame.KEYDOWN:\n if self.playing:\n self.playing = False\n pygame.display.set_caption('Paused')\n else:\n self.playing = True\n pygame.display.set_caption('Playing')\n\n if self.playing:\n color = self.color_frames[self.frame_idx]\n depth = self.depth_frames[self.frame_idx]\n body = self.body_frames[self.frame_idx]\n self.frame_idx += 1\n\n if self.frame_idx == len(self.depth_frames):\n self.frame_idx = 0\n\n self.frame[:, :self.width] = np.flip(\n color, axis=-1\n ).astype(np.uint8)\n self.frame[:, self.width:-self.width] = np.repeat(\n depth[:, :, np.newaxis] / 4500 * 255, 3, axis=2\n ).astype(np.uint8)\n self.frame[:, -self.width:] = np.repeat(\n 255 - body[:, :, np.newaxis], 3, axis=2\n ).astype(np.uint8)\n pygame.surfarray.blit_array(\n self.surface, np.transpose(self.frame, axes=[1, 0, 2])\n )\n target_height = int(self.hw_ratio * self.screen.get_width())\n surface_to_draw = pygame.transform.scale(\n self.surface, (self.screen.get_width(), target_height)\n )\n self.screen.blit(surface_to_draw, (0, 0))\n surface_to_draw = None\n pygame.display.update()\n pygame.display.flip()\n\n self.clock.tick(self.fps)\n\n pygame.quit()\n\n\nif __name__ == '__main__':\n # v = RawVisualizer('test')\n v = AlignedVisualizer('./data/yellow_top.pkl')\n v.run()\n" ]
[ [ "numpy.transpose", "numpy.repeat", "numpy.flip" ] ]
panwalas/SDC-P5
[ "818a2de532c37f16761e2913ca3ff18d2de9f828" ]
[ "vehicleLab/chogtrainingRGB2.py" ]
[ "import matplotlib.image as mpimg\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\nimport numpy as np\nimport cv2\nimport glob\nimport time\nfrom tqdm import tqdm\nfrom sklearn.svm import LinearSVC\nfrom sklearn.preprocessing import StandardScaler\nfrom skimage.feature import hog\nfrom sklearn.externals import joblib\n# NOTE: the next import is only valid for scikit-learn version >= 0.18\n# for scikit-learn <= 0.17 use:\n# from sklearn.cross_validation import train_test_split\nfrom sklearn.model_selection import train_test_split\n\n# Define a function to compute binned color features \ndef bin_spatial(img, size=(32, 32)):\n # Use cv2.resize().ravel() to create the feature vector\n features = cv2.resize(img, size).ravel() \n # Return the feature vector\n return features\n\n# Define a function to compute color histogram features \ndef color_hist(img, nbins=32, bins_range=(0, 256)):\n # Compute the histogram of the color channels separately\n channel1_hist = np.histogram(img[:,:,0], bins=nbins, range=bins_range)\n channel2_hist = np.histogram(img[:,:,1], bins=nbins, range=bins_range)\n channel3_hist = np.histogram(img[:,:,2], bins=nbins, range=bins_range)\n # Concatenate the histograms into a single feature vector\n hist_features = np.concatenate((channel1_hist[0], channel2_hist[0], channel3_hist[0]))\n # Return the individual histograms, bin_centers and feature vector\n return hist_features\n\n# Define a function to return HOG features and visualization\ndef get_hog_features(img, orient, pix_per_cell, cell_per_block, \n vis=False, feature_vec=True):\n # Call with two outputs if vis==True\n if vis == True:\n features, hog_image = hog(img, orientations=orient, pixels_per_cell=(pix_per_cell, pix_per_cell),\n cells_per_block=(cell_per_block, cell_per_block), transform_sqrt=True, \n visualise=vis, feature_vector=feature_vec)\n return features, hog_image\n # Otherwise call with one output\n else: \n features = hog(img, orientations=orient, pixels_per_cell=(pix_per_cell, pix_per_cell),\n cells_per_block=(cell_per_block, cell_per_block), transform_sqrt=True, \n visualise=vis, feature_vector=feature_vec)\n return features\n\n# Define a function to extract features from a list of images\n# Have this function call bin_spatial() and color_hist()\ndef extract_features(imgs, cspace='RGB', spatial_size=(32, 32),\n hist_bins=32, hist_range=(0, 256), orient=9, \n pix_per_cell=8, cell_per_block=2, hog_channel=0, datatype='', visualize=False):\n # Create a list to append feature vectors to\n features = []\n # Iterate through the list of images\n images_pbar = tqdm(range(len(imgs)), desc='Loading '+datatype+' Dataset', unit=' features')\n for i in images_pbar:\n file = imgs[i]\n # Read in each one by one\n image = cv2.cvtColor(cv2.imread(file), cv2.COLOR_BGR2RGB)\n # apply color conversion if other than 'RGB'\n if cspace != 'RGB':\n if cspace == 'HSV':\n feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)\n elif cspace == 'LUV':\n feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2LUV)\n elif cspace == 'HLS':\n feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)\n elif cspace == 'YUV':\n feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2YUV)\n else: feature_image = np.copy(image) \n # Apply bin_spatial() to get spatial color features\n spatial_features = bin_spatial(feature_image, size=spatial_size)\n # Apply color_hist() also with a color space option now\n hist_features = color_hist(feature_image, nbins=hist_bins, bins_range=hist_range)\n # Call get_hog_features() with vis=False, feature_vec=True\n if visualize:\n hog_features, hog_image = get_hog_features(feature_image[:,:,hog_channel], orient, \n pix_per_cell, cell_per_block, vis=visualize, feature_vec=True)\n # print(\"hog_image: \", hog_image.shape, type(hog_image[0][0]), np.min(hog_image), np.max(hog_image))\n # print(\"image: \", image.shape, type(image[0][0][0]), np.min(image), np.max(image))\n minhog = np.min(hog_image)\n hog_image = hog_image - minhog\n maxhog = np.max(hog_image)\n hog_image = ((hog_image/maxhog)*255).astype(np.uint8)\n return image, hog_image\n else:\n hog_features = get_hog_features(feature_image[:,:,hog_channel], orient, \n pix_per_cell, cell_per_block, vis=False, feature_vec=True)\n # Append the new feature vector to the features list\n features.append(np.concatenate((spatial_features, hist_features, hog_features)))\n # Return list of feature vectors\n return features\n\n# Define a way for us to write out a sample of the HOG\ndef drawPlots(imagefile, sampleTitle, orient, pix_per_cell, cell_per_block, trainScore, testScore, carimage, carhog, notcarimage, notcarhog, deltaTime):\n print(\"saving sample image and hogs to \", imagefile)\n # Setup plot\n fig = plt.figure(figsize=(10, 3))\n w_ratios = [1 for n in range(5)]\n h_ratios = [1 for n in range(1)]\n grid = gridspec.GridSpec(1, 5, wspace=0.0, hspace=0.0, width_ratios=w_ratios, height_ratios=h_ratios)\n i = 0\n\n # draw the images\n # next image\n sampleTitleWScores = '%s\\n Orientation: %d\\n Pix_per_cell: %d\\n Cell_per_block: %d\\n Train Accuracy:\\n %10.9f\\n Test Accuracy:\\n %10.9f\\n Decision Time:\\n %10.9f'%(sampleTitle, orient, pix_per_cell, cell_per_block, trainScore, testScore, deltaTime)\n ax = plt.Subplot(fig, grid[i])\n ax.text(0.1,0.4, sampleTitleWScores, fontsize=8)\n ax.set_xticks([])\n ax.set_yticks([])\n for sp in ax.spines.values():\n sp.set_visible(False)\n fig.add_subplot(ax)\n i += 1\n\n ax = plt.Subplot(fig, grid[i])\n ax.imshow(carimage)\n if i==1:\n ax.set_title('Sample Car Image', size=8)\n ax.set_xticks([])\n ax.set_yticks([])\n fig.add_subplot(ax)\n i += 1\n\n ax = plt.Subplot(fig, grid[i])\n ax.imshow(carhog, cmap='gray')\n if i==2:\n ax.set_title('Sample Car HOG', size=8)\n ax.set_xticks([])\n ax.set_yticks([])\n fig.add_subplot(ax)\n i += 1\n\n ax = plt.Subplot(fig, grid[i])\n ax.imshow(notcarimage)\n if i==3:\n ax.set_title('Sample Noncar Image', size=8)\n ax.set_xticks([])\n ax.set_yticks([])\n fig.add_subplot(ax)\n i += 1\n\n ax = plt.Subplot(fig, grid[i])\n ax.imshow(notcarhog, cmap='gray')\n if i==4:\n ax.set_title('Sample Noncar HOG', size=8)\n\n ax.set_xticks([])\n ax.set_yticks([])\n fig.add_subplot(ax)\n i += 1\n\n plt.savefig(imagefile)\n\n# Divide up into cars and notcars\n# NOTE: Using our own collected data from 'birds-eye' view\ncars = glob.glob('../vehicles/*/*/*.jpg')\nnotcars = glob.glob('../non-vehicles/*/*/*.jpg')\n\nprint(\"number of original car samples: \", len(cars))\nprint(\"number of original non-car samples: \", len(notcars))\norient = 8\npix_per_cell = 4\ncell_per_block = 2\n\nt=time.time()\ncar_features = extract_features(cars, cspace='RGB', spatial_size=(32, 32),\n orient=orient, pix_per_cell=pix_per_cell, cell_per_block=cell_per_block,\n hist_bins=32, hist_range=(0, 256), datatype='Car')\nnotcar_features = extract_features(notcars, cspace='RGB', spatial_size=(32, 32),\n orient=orient, pix_per_cell=pix_per_cell, cell_per_block=cell_per_block,\n hist_bins=32, hist_range=(0, 256), datatype='Noncar')\nt2 = time.time()\nprint(t2-t, 'Seconds to load dataset...')\n\nt=time.time()\nprint(\"Data loaded, now scaling and splitting dataset...\")\n\n# Create an array stack of feature vectors\nX = np.vstack((car_features, notcar_features)).astype(np.float64) \n# Fit a per-column scaler\nX_scaler = StandardScaler().fit(X)\n# Apply the scaler to X\nscaled_X = X_scaler.transform(X)\n\n# Define the labels vector\ny = np.hstack((np.ones(len(car_features)), np.zeros(len(notcar_features))))\n\n# Split up data into randomized training and test sets\nrand_state = np.random.randint(0, 100)\nX_train, X_test, y_train, y_test = train_test_split(\n scaled_X, y, test_size=0.2, random_state=rand_state)\n\nt2 = time.time()\nprint(t2-t, 'Seconds to scale and split dataset...')\n\nprint(\"training set size:\", len(X_train))\nprint(\"testing set size:\", len(X_test))\n\n# Use a linear SVC \nsvc = LinearSVC()\n# Check the training time for the SVC\nt=time.time()\nsvc.fit(X_train, y_train)\nt2 = time.time()\nprint(t2-t, 'Seconds to train SVC...')\n# Check the score of the SVC\ntrainingScore = svc.score(X_train, y_train)\ntestingScore = svc.score(X_test, y_test)\nprint('Train Accuracy of SVC = ', trainingScore)\nprint('Test Accuracy of SVC = ', testingScore)\n# Check the prediction time for a single sample\nt=time.time()\nconfidence = svc.decision_function(X_test[0].reshape(1, -1))\nt2 = time.time()\ndeltatime = t2-t\nprint(deltatime, 'Seconds to run decision_function with SVC')\n\n# versionName for this version\nversionName = 'CHOGRGB2'\n\n# saving trained SVC model:\ntrained_model = './trained/'+versionName+'.pkl'\ntrained_scalar = './trained/scaler'+versionName+'.pkl'\nvisualfile = './visualized/'+versionName+'.jpg'\n\nprint('saving trained model to', trained_model) \njoblib.dump(svc, trained_model)\nprint('saving trained scalar to', trained_scalar)\njoblib.dump(X_scaler, trained_scalar)\n\ncarimage, carhog = extract_features([cars[0]], cspace='RGB', spatial_size=(32, 32),\n orient=orient, pix_per_cell=pix_per_cell, cell_per_block=cell_per_block,\n hist_bins=32, hist_range=(0, 256), datatype='Car', visualize=True)\nnotcarimage, notcarhog = extract_features([notcars[0]], cspace='RGB', spatial_size=(32, 32),\n orient=orient, pix_per_cell=pix_per_cell, cell_per_block=cell_per_block,\n hist_bins=32, hist_range=(0, 256), datatype='Noncar', visualize=True)\ndrawPlots(visualfile, versionName, orient, pix_per_cell, cell_per_block, trainingScore, testingScore, carimage, carhog, notcarimage, notcarhog, deltatime)\n\n" ]
[ [ "numpy.vstack", "numpy.histogram", "matplotlib.pyplot.figure", "sklearn.svm.LinearSVC", "matplotlib.pyplot.savefig", "numpy.copy", "numpy.max", "matplotlib.gridspec.GridSpec", "matplotlib.pyplot.Subplot", "numpy.min", "sklearn.preprocessing.StandardScaler", "sklearn.externals.joblib.dump", "numpy.concatenate", "numpy.random.randint", "sklearn.model_selection.train_test_split" ] ]
prefrontalvortex/SigFlux
[ "aa1cb1d4fe66b37ac6f659068678853756117060" ]
[ "sigflux/logscale.py" ]
[ "import numpy as np\nfrom scipy import fftpack, interpolate, signal\n\nfrom sigflux import clip\n\n\ndef freq_logscale(data, ndim=1024, fs=400, down=30, smoothing_cutoff=1, hard_cutoff=200, log_low_cut=-2.32,\n prenormalize=True, useEnvelope=True):\n \"\"\"\n This function returns a distorted version (x-axis log-transformed) of the fourier transform of the signal, related\n to the Mel scale (equal distance is equal temperment (pitch) not frequency)\n :param data: input data, [n,t] array-like\n :param ndim: dimension of output vector\n :param fs: input data sampling frequency\n :param smoothing_cutoff: 'Frequency' of smoothing the spectrum\n :param hard_cutoff: Chop off the frequency spectrum above this frequency\n :param log_low_cut: Sets how much to include very low frequency components\n :return:\n \"\"\"\n if prenormalize:\n data = clip.norm_softclip(data)\n\n if useEnvelope:\n data = clip.envelope(data)\n\n # FFT and magnitude\n ftsig = fftpack.fft(data, axis=0)\n ftsig_a = np.abs(ftsig[:len(ftsig)*hard_cutoff//fs])\n # Smooth it with low pass and downsample. Low pass may not be necessary since resample does appropriate\n # pre-filtering\n ftsig_r = signal.resample_poly(ftsig_a, 1, down, axis=0)\n\n # Ok, now the weird bit. Take the existing x-domain and create an interpolation image of it\n t_rs = np.linspace(0.0001, hard_cutoff, len(ftsig_r))\n fn_ftsig_rs = interpolate.Akima1DInterpolator(t_rs, ftsig_r)\n # And now map an exponential domain, thereby creating a higher density of points around the main freq\n x_basis = np.linspace(log_low_cut, np.log2(hard_cutoff), ndim)\n log_ftsig = fn_ftsig_rs(np.power(2, x_basis))\n return log_ftsig" ]
[ [ "numpy.log2", "scipy.fftpack.fft", "numpy.power", "scipy.signal.resample_poly", "scipy.interpolate.Akima1DInterpolator" ] ]
niab/dip
[ "b83d6d10762adb28c29b116565d17538b6129a2a" ]
[ "common.py" ]
[ "import csv\nimport pymongo\nimport numpy as np\nimport math\nfrom collections import OrderedDict\nfrom decimal import Decimal\nfrom scipy.stats import fisher_exact\n\n#################\n### CONSTANTS ###\n#################\n\nDB_HOST = 'localhost'\nDB_PORT = 27017\nDB_NAME_GR = 'gr' \nDB_NAME_EXAC = 'exac'\n\t\t\t\n\t\t\t\nclass MongoDB():\n\t\"\"\"Database Client.\"\"\"\n\tdef __init__(self):\n\t\tclient = pymongo.MongoClient(host=DB_HOST, port=DB_PORT, document_class=OrderedDict)\n\t\tself.main = client[DB_NAME_GR]\n\t\tself.exac = client[DB_NAME_EXAC]\n\n\n\ndef file_len(fname):\n\t\"\"\"Calculate length of a file.\"\"\"\n\twith open(fname) as f:\n\t\tfor i, l in enumerate(f):\n\t\t\tpass\n\treturn i + 1\n\n\ndef is_float(x):\n\t\"\"\"Check if value (e.g. string) can be converted to float.\"\"\"\n\ttry:\n\t\ta = float(x)\n\texcept ValueError:\n\t\treturn False\n\telse:\n\t\treturn True\n\n\ndef is_int(x):\n\t\"\"\"Check if value (e.g. string) can be converted to integer.\"\"\"\n\ttry:\n\t\ta = float(x)\n\t\tb = int(a)\n\texcept ValueError:\n\t\treturn False\n\telse:\n\t\treturn a == b\n\n\ndef calculate_percentiles(ranked_list, reverse=False):\n\t\"\"\"Return list of percetiles based on number of elements in input list.\"\"\"\n\tpercentiles = OrderedDict()\n\tmax_num = len(ranked_list)\n\n\tpercentile = 0.0\n\tfor x in range(0, max_num):\n\t\tif reverse:\n\t\t\tpercentile = (1 - float(x + 1) / max_num) * 100\n\t\telse:\n\t\t\tpercentile = float(x + 1) / max_num * 100\n\t\tpercentiles[ranked_list[x]] = percentile\n\n\treturn percentiles\n\n\ndef write_table_to_csv(table, output_csv, delimiter=','):\n\t\"\"\"Write table (list of lists) to csv.\"\"\"\n\toutput_file = open(output_csv,'w+')\n\twriter = csv.writer(output_file, delimiter=delimiter)\n\n\tfor row in table:\n\t\twriter.writerow(row)\n\n\toutput_file.close()\n\n\ndef sort_dict_by_values(dictionary, reverse=False):\n\t\"\"\"Return dictionary sorted by values.\"\"\"\n\tsorted_tuples = sorted(dictionary.items(), key=lambda x: x[1], reverse=reverse)\n\tresult = OrderedDict()\n\tfor x in range(0, len(sorted_tuples)):\n\t\tresult[sorted_tuples[x][0]] = sorted_tuples[x][1]\n\treturn result\n\n\ndef float_to_sci_str(num):\n\treturn \"{:.2E}\".format(Decimal(num))\n\n\ndef proportion_to_percents_str(proportion):\n\treturn \"{0:.1f}\".format(proportion*100)\n\n\ndef get_sorted_gene_list(db, collection_name, score_field, reverse=False, ignored_genes=set(), filters={}):\n\tgene_scores = {}\n\tgenes = db.drt[collection_name].find(filters)\n\tfor gene in genes:\n\t\tgene_id = gene['hgnc_id']\n\t\tif gene_id not in ignored_genes:\n\t\t\tgene_scores[gene['hgnc_id']] = gene[score_field]\n\n\tgene_scores = sort_dict_by_values(gene_scores, reverse=reverse)\n\treturn gene_scores\n\n\ndef remove_non_valid_keys_from_dict(dictionary, valid_keys):\n\tupdated_dict = OrderedDict()\n\tfor key, value in dictionary.iteritems():\n\t\tif key in valid_keys:\n\t\t\tupdated_dict[key] = value\n\treturn updated_dict\n\n\ndef get_str_ratio(n, total, only_ratio=False):\n\tratio = float(n * 100 / total)\n\tif only_ratio:\n\t\tstr_ratio = \"{:.2f}\".format(ratio)\n\telse:\n\t\tstr_ratio = \"{} ({:.2f}%)\".format(n, ratio)\n\treturn str_ratio\n\n\ndef report_gene_group_enrichment_in_the_subset(group_name, gene_subset_ids, all_gene_ids, gene_group_ids):\n\tgene_subset_ids = set(gene_subset_ids)\n\tall_gene_ids = set(all_gene_ids)\n\tgene_group_ids = set(gene_group_ids)\n\n\tsubset = len(gene_subset_ids)\n\ttotal = len(all_gene_ids)\n\tgroup_and_all = len(gene_group_ids & all_gene_ids)\n\tgroup_and_subset = len(gene_group_ids & gene_subset_ids)\n\n\tfe, p = fisher_exact([[group_and_subset, subset],\n\t\t\t\t\t\t [group_and_all, total]])\n\tprint([[group_and_subset, subset],\n\t\t\t\t\t\t [group_and_all, total]])\n\tprint('### {} ###'.format(group_name, group_and_all))\n\tprint('Examined subset {}/{}, {:.2f}%'.format(subset, total, subset*100/total))\n\tprint('{} in the subset {}/{}, {:.2f}%'.format(group_name, group_and_subset,\n\t\t\t\t\t\t\t\t\t\t group_and_all, group_and_subset*100/group_and_all))\n\tprint('FE: {:.3f}, P-value: {}'.format(fe, p))\n\n\ndef get_metric_ranked_gene_scores(db, collection_name, score_field, reverse=False, valid_gene_ids=set()):\n\tmetric_scores = OrderedDict()\n\tmetric_genes = db.main[collection_name].find({})\n\tfor metric_gene in metric_genes:\n\t\tgene_id = metric_gene['hgnc_gene_id']\n\t\tif valid_gene_ids and gene_id not in valid_gene_ids:\n\t\t\tcontinue\t\t\n\t\tmetric_scores[gene_id] = metric_gene[score_field]\n\tmetric_scores = sort_dict_by_values(metric_scores, reverse=reverse)\n\treturn metric_scores\n\t\n\n# Modified J.Vo answer from here:\n# https://stackoverflow.com/questions/30098263/inserting-a-document-with-pymongo-invaliddocument-cannot-encode-object\ndef correct_encoding(obj):\n\t\"\"\"Correct the encoding of python dictionaries so they can be encoded to mongodb\n\tinputs\n\t-------\n\tdictionary : dictionary instance to add as document\n\toutput\n\t-------\n\tnew : new dictionary with (hopefully) corrected encodings\"\"\"\n\n\tif isinstance(obj, dict):\n\t\tnew_dict = {}\n\t\tfor key, val in obj.items():\n\t\t\tval = correct_encoding(val)\n\t\t\tnew_dict[key] = val\n\t\treturn new_dict\n\telif isinstance(obj, list):\n\t\tnew_list = []\n\t\tfor val in obj:\n\t\t\tval = correct_encoding(val)\n\t\t\tnew_list.append(val)\n\t\treturn new_list\n\telse:\n\t\tif isinstance(obj, np.bool_):\n\t\t\tobj = bool(obj)\n\n\t\tif isinstance(obj, np.int64):\n\t\t\tobj = int(obj)\n\n\t\tif isinstance(obj, np.float64):\n\t\t\tobj = float(obj)\n\t\treturn obj\n\n\ndef get_keys_from_dict_based_on_value_threshold(dictionary, threshold, comparison_mode):\n\tkeys = []\n\tfor key, value in dictionary.items():\n\t\tif comparison_mode == '>=' and value >= threshold:\n\t\t\tkeys.append(key)\n\t\telif comparison_mode == '<=' and value <= threshold:\n\t\t\tkeys.append(key)\n\t\telif comparison_mode == '>' and value > threshold:\n\t\t\tkeys.append(key)\n\t\telif comparison_mode == '<' and value < threshold:\n\t\t\tkeys.append(key)\n\t\telif comparison_mode == '==' and value == threshold:\n\t\t\tkeys.append(key)\n\treturn keys\n\n\ndef calculate_clf_performance(tp, fp, p_all):\n\tprec = tp / (tp + fp)\n\trec = tp / p_all\n\tf1 = 2 * (prec * rec) / (prec + rec)\n\n\tmetrics = OrderedDict()\n\tmetrics['Precision'] = '{:.2f}%'.format(prec * 100)\n\tmetrics['Recall'] = '{:.2f}%'.format(rec * 100)\n\tmetrics['F1'] = '{:.2f}%'.format(f1 * 100)\n\treturn metrics" ]
[ [ "scipy.stats.fisher_exact" ] ]
gy29289957/deep-anpr
[ "e4e1bc8f1f560f0f01b4c39d6302d8d8edde89fd" ]
[ "common.py" ]
[ "# Copyright (c) 2016 Matthew Earl\n# \n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n# \n# The above copyright notice and this permission notice shall be included\n# in all copies or substantial portions of the Software.\n# \n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\n# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN\n# NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,\n# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE\n# USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\"\"\"\nDefinitions that don't fit elsewhere.\n\n\"\"\"\n\n__all__ = (\n 'DIGITS',\n 'LETTERS',\n 'CHARS',\n 'sigmoid',\n 'softmax',\n)\n\nimport numpy\n\n\nDIGITS = \"0123456789\"\nLETTERS = \"ABCDEFGHJKLMNPQRSTUVWXYZ\"\nDASH = \"-\"\nCHARS = LETTERS + DASH + DIGITS\n\ndef softmax(a):\n exps = numpy.exp(a.astype(numpy.float64))\n return exps / numpy.sum(exps, axis=-1)[:, numpy.newaxis]\n\ndef sigmoid(a):\n return 1. / (1. + numpy.exp(-a))\n\n" ]
[ [ "numpy.sum", "numpy.exp" ] ]
Tjorriemorrie/trading
[ "aafa15a6c564bfa86948ab30e33d554172b38a3e" ]
[ "19_rf_kelly/main.py" ]
[ "import logging as log\nimport pandas as pd\nimport numpy as np\nfrom sklearn.preprocessing import scale\nfrom sklearn.cross_validation import train_test_split\nfrom indicators import ewma, rsi\n\n\nDATA = [\n {'currency': 'AUDUSDe', 'timeframe': 1440},\n {'currency': 'EURGBPe', 'timeframe': 1440},\n {'currency': 'EURJPYe', 'timeframe': 1440},\n {'currency': 'EURUSDe', 'timeframe': 1440},\n {'currency': 'GBPJPYe', 'timeframe': 1440},\n {'currency': 'GBPUSDe', 'timeframe': 1440},\n {'currency': 'NZDUSDe', 'timeframe': 1440},\n {'currency': 'USDCADe', 'timeframe': 1440},\n {'currency': 'USDCHFe', 'timeframe': 1440},\n {'currency': 'USDJPYe', 'timeframe': 1440},\n]\n\n\ndef loadData(currency, timeframe):\n log.info('Data: loading...')\n df = pd.read_csv(\n r'../data/{0}{1}.csv'.format(currency, timeframe),\n names=['date', 'time', 'open', 'high', 'low', 'close', 'volume'],\n parse_dates=[['date', 'time']],\n index_col=0,\n )\n # print df\n log.info('Data: {0} loaded'.format(len(df)))\n return df\n\n\ndef getLabels(df):\n log.info('Getting labels...')\n tmp = df.copy()\n tmp['label'] = tmp['close'].shift(-1)\n tmp['label'] = tmp.apply(lambda x: 'long' if x['label'] - x['close'] >= 0 else 'short', axis=1)\n log.info('Labels set')\n return tmp['label']\n\n\ndef splitAndScale(df, labels):\n log.info('Scaling features')\n features = df.copy()\n\n # drop\n features.drop(['open', 'high', 'low', 'close', 'volume'], axis=1, inplace=True)\n\n # split\n X_train, X_test, y_train, y_test = train_test_split(features, labels)\n\n # scale\n X_train = scale(X_train, axis=0, copy=False)\n X_test = scale(X_test, axis=0, copy=False)\n\n log.info('Scaled features')\n return X_train, X_test, y_train, y_test\n\n\ndef addEwma(df, fibos):\n log.info('Adding EWMA {0}'.format(fibos))\n ewmas = {}\n for n in fibos:\n ewmas[n] = ewma(df, 'close', n)\n for i, n in enumerate(fibos):\n for m in fibos[i+1:]:\n df['ewma_{0}_{1}'.format(n, m)] = ewmas[n] / ewmas[m]\n log.info('Added EWMA {0}'.format(fibos))\n\n\ndef addRsi(df, fibos):\n log.info('Adding RSI {0}'.format(fibos))\n rsis = {}\n for n in fibos:\n rsis[n] = rsi(df, n)\n for i, n in enumerate(fibos):\n for m in fibos[i+1:]:\n df['rsi_{0}_{1}'.format(n, m)] = rsis[n] / rsis[m]\n\n df.replace(to_replace=[np.inf, -np.inf], value=0, method='ffil', inplace=True)\n df.fillna(0, inplace=True)\n\n log.info('Added RSI {0}'.format(fibos))\n\n\n" ]
[ [ "sklearn.preprocessing.scale", "sklearn.cross_validation.train_test_split" ] ]
RyanRizzo96/RL_baselines
[ "4f7c217095c6b02093386ed4e527c44c79b42007" ]
[ "custom/aggregator.py" ]
[ "# MIT License\n# Copyright (c) 2019 Sebastian Penhouet\n# GitHub project: https://github.com/Spenhouet/tensorboard-aggregator\n# ==============================================================================\n\"\"\"Aggregates multiple tensorbaord runs\"\"\"\n\nimport ast\nimport argparse\nimport os\nimport re\nfrom pathlib import Path\n\nimport pandas as pd\nimport numpy as np\nimport tensorflow as tf\nfrom tensorboard.backend.event_processing.event_accumulator import EventAccumulator\nfrom tensorflow.core.util.event_pb2 import Event\n\nFOLDER_NAME = 'aggregates'\n\n\ndef extract(dpath, subpath):\n scalar_accumulators = [EventAccumulator(str(dpath / dname / subpath)).Reload(\n ).scalars for dname in os.listdir(dpath) if dname != FOLDER_NAME]\n\n # Filter non event files\n scalar_accumulators = [scalar_accumulator for scalar_accumulator in scalar_accumulators if scalar_accumulator.Keys()]\n\n # Get and validate all scalar keys\n all_keys = [tuple(scalar_accumulator.Keys()) for scalar_accumulator in scalar_accumulators]\n assert len(set(all_keys)) == 1, \"All runs need to have the same scalar keys. There are mismatches in {}\".format(all_keys)\n keys = all_keys[0]\n\n all_scalar_events_per_key = [[scalar_accumulator.Items(key) for scalar_accumulator in scalar_accumulators] for key in keys]\n\n # Get and validate all steps per key\n all_steps_per_key = [[tuple(scalar_event.step for scalar_event in scalar_events) for scalar_events in all_scalar_events]\n for all_scalar_events in all_scalar_events_per_key]\n\n for i, all_steps in enumerate(all_steps_per_key):\n assert len(set(all_steps)) == 1, \"For scalar {} the step numbering or count doesn't match. Step count for all runs: {}\".format(\n keys[i], [len(steps) for steps in all_steps])\n\n steps_per_key = [all_steps[0] for all_steps in all_steps_per_key]\n\n # Get and average wall times per step per key\n wall_times_per_key = [np.mean([tuple(scalar_event.wall_time for scalar_event in scalar_events) for scalar_events in all_scalar_events], axis=0)\n for all_scalar_events in all_scalar_events_per_key]\n\n # Get values per step per key\n values_per_key = [[[scalar_event.value for scalar_event in scalar_events] for scalar_events in all_scalar_events]\n for all_scalar_events in all_scalar_events_per_key]\n\n all_per_key = dict(zip(keys, zip(steps_per_key, wall_times_per_key, values_per_key)))\n\n return all_per_key\n\n\ndef aggregate_to_summary(dpath, aggregation_ops, extracts_per_subpath):\n for op in aggregation_ops:\n for subpath, all_per_key in extracts_per_subpath.items():\n path = dpath / FOLDER_NAME / op.__name__ / dpath.name / subpath\n aggregations_per_key = {key: (steps, wall_times, op(values, axis=0)) for key, (steps, wall_times, values) in all_per_key.items()}\n write_summary(path, aggregations_per_key)\n\n\ndef write_summary(dpath, aggregations_per_key):\n writer = tf.summary.FileWriter(dpath)\n\n for key, (steps, wall_times, aggregations) in aggregations_per_key.items():\n for step, wall_time, aggregation in zip(steps, wall_times, aggregations):\n summary = tf.Summary(value=[tf.Summary.Value(tag=key, simple_value=aggregation)])\n scalar_event = Event(wall_time=wall_time, step=step, summary=summary)\n writer.add_event(scalar_event)\n\n writer.flush()\n\n\ndef aggregate_to_csv(dpath, aggregation_ops, extracts_per_subpath):\n for subpath, all_per_key in extracts_per_subpath.items():\n for key, (steps, wall_times, values) in all_per_key.items():\n aggregations = [op(values, axis=0) for op in aggregation_ops]\n write_csv(dpath, subpath, key, dpath.name, aggregations, steps, aggregation_ops)\n\n\ndef get_valid_filename(s):\n s = str(s).strip().replace(' ', '_')\n return re.sub(r'(?u)[^-\\w.]', '', s)\n\n\ndef write_csv(dpath, subpath, key, fname, aggregations, steps, aggregation_ops):\n path = dpath / FOLDER_NAME\n\n if not path.exists():\n os.makedirs(path)\n\n file_name = get_valid_filename(key) + '-' + get_valid_filename(subpath) + '-' + fname + '.csv'\n aggregation_ops_names = [aggregation_op.__name__ for aggregation_op in aggregation_ops]\n df = pd.DataFrame(np.transpose(aggregations), index=steps, columns=aggregation_ops_names)\n df.to_csv(path / file_name, sep=',')\n\n\ndef aggregate(dpath, output, subpaths):\n name = dpath.name\n\n aggregation_ops = [np.mean, np.min, np.max, np.median, np.std, np.var]\n\n ops = {\n 'summary': aggregate_to_summary,\n 'csv': aggregate_to_csv\n }\n\n print(\"Started aggregation {}\".format(name))\n\n extracts_per_subpath = {subpath: extract(dpath, subpath) for subpath in subpaths}\n\n ops.get(output)(dpath, aggregation_ops, extracts_per_subpath)\n\n print(\"Ended aggregation {}\".format(name))\n\n\nif __name__ == '__main__':\n def param_list(param):\n p_list = ast.literal_eval(param)\n if type(p_list) is not list:\n raise argparse.ArgumentTypeError(\"Parameter {} is not a list\".format(param))\n return p_list\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--path\", type=str, help=\"main path for tensorboard files\", default=os.getcwd())\n parser.add_argument(\"--subpaths\", type=param_list, help=\"subpath sturctures\", default=['tb'])\n parser.add_argument(\"--output\", type=str, help=\"aggregation can be saves as tensorboard file (summary) or as table (csv)\", default='summary')\n\n args = parser.parse_args()\n print(args.path)\n\n path = Path(args.path)\n print(path)\n\n if not path.exists():\n raise argparse.ArgumentTypeError(\"Parameter {} is not a valid path\".format(path))\n\n subpaths = [path / dname / subpath for subpath in args.subpaths for dname in os.listdir(path) if dname != FOLDER_NAME]\n\n for subpath in subpaths:\n if not os.path.exists(subpath):\n raise argparse.ArgumentTypeError(\"Parameter {} is not a valid path\".format(subpath))\n\n if args.output not in ['summary', 'csv']:\n raise argparse.ArgumentTypeError(\"Parameter {} is not summary or csv\".format(args.output))\n\n aggregate(path, args.output, args.subpaths)\n" ]
[ [ "tensorflow.summary.FileWriter", "numpy.transpose", "tensorflow.Summary.Value", "tensorflow.core.util.event_pb2.Event" ] ]