repo_name
stringlengths 8
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence |
---|---|---|---|---|
cnk113/TREX | [
"add83d8108f3602c5bbe7b37f60ff19f89b2236d"
] | [
"src/trex/writers.py"
] | [
"from pathlib import Path\nfrom typing import List\nfrom .cell import Cell\nimport operator\nimport warnings\n\nwith warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", \"Conversion of the second argument of issubdtype\")\n import loompy\nimport numpy as np\n\n\ndef write_count_matrix(path: Path, cells: List[Cell]):\n \"\"\"Create a Read-count matrix with cells as columns and cloneIDs as rows\"\"\"\n clone_ids = set()\n for cell in cells:\n clone_ids.update(clone_id for clone_id in cell.counts)\n clone_ids = sorted(clone_ids)\n all_counts = [cell.counts for cell in cells]\n with open(path, \"w\") as f:\n f.write(\",\")\n f.write(\",\".join(cell.cell_id for cell in cells))\n f.write(\"\\n\")\n for clone_id in clone_ids:\n f.write(clone_id)\n f.write(\",\")\n values = [lic.get(clone_id, 0) for lic in all_counts]\n f.write(\",\".join(str(v) for v in values))\n f.write(\"\\n\")\n\n\ndef write_cells(path: Path, cells: List[Cell]) -> None:\n \"\"\"Write cells to a tab-separated file\"\"\"\n with open(path, \"w\") as f:\n print(\n \"#cell_id\",\n \":\",\n \"clone_id1\",\n \"count1\",\n \"clone_id2\",\n \"count2\",\n \"...\",\n sep=\"\\t\",\n file=f,\n )\n for cell in cells:\n row = [cell.cell_id, \":\"]\n sorted_clone_ids = sorted(\n cell.counts, key=lambda x: cell.counts[x], reverse=True\n )\n if not sorted_clone_ids:\n continue\n for clone_id in sorted_clone_ids:\n row.extend([clone_id, cell.counts[clone_id]])\n print(*row, sep=\"\\t\", file=f)\n\n\ndef write_reads_or_molecules(path, mols_or_reads, require_umis=True, sort=True):\n with open(path, \"w\") as f:\n if require_umis:\n if sort:\n mols_or_reads = sorted(\n mols_or_reads,\n key=lambda mol_or_read: (\n mol_or_read.umi,\n mol_or_read.cell_id,\n mol_or_read.clone_id,\n ),\n )\n print(\"#cell_id\", \"umi\", \"clone_id\", sep=\"\\t\", file=f)\n for mol_or_read in mols_or_reads:\n print(\n mol_or_read.cell_id,\n mol_or_read.umi,\n mol_or_read.clone_id,\n sep=\"\\t\",\n file=f,\n )\n else:\n if sort:\n mols_or_reads = sorted(\n mols_or_reads,\n key=lambda mol_or_read: (mol_or_read.clone_id, mol_or_read.cell_id),\n )\n print(\"#cell_id\", \"clone_id\", sep=\"\\t\", file=f)\n for mol_or_read in mols_or_reads:\n print(mol_or_read.cell_id, mol_or_read.clone_id, sep=\"\\t\", file=f)\n\n\ndef write_loom(cells: List[Cell], cellranger, output_dir, clone_id_length, top_n=6):\n \"\"\"\n Create a loom file from a Cell Ranger result directory and augment it with information about\n the most abundant cloneIDs and their counts.\n \"\"\"\n # For each cell, collect the most abundant cloneIDs and their counts\n # Maps cell_id to a list of (clone_id, count) pairs that represent the most abundant cloneIDs.\n most_abundant = dict()\n for cell in cells:\n if not cell.counts:\n continue\n counts = sorted(cell.counts.items(), key=operator.itemgetter(1))\n counts.reverse()\n counts = counts[:top_n]\n most_abundant[cell.cell_id] = counts\n\n loompy.create_from_cellranger(cellranger.sample_dir, outdir=output_dir)\n # create_from_cellranger() does not tell us the name of the created file,\n # so we need to re-derive it from the sample name.\n sample_name = cellranger.sample_dir.name\n loom_path = output_dir / (sample_name + \".loom\")\n\n with loompy.connect(loom_path) as ds:\n # Cell ids in the loom file are prefixed by the sample name and a ':'. Remove that prefix.\n loom_cell_ids = [cell_id[len(sample_name) + 1 :] for cell_id in ds.ca.CellID]\n\n # Transform cloneIDs and count data\n # brings cloneID data into correct format for loom file.\n # Array must have same shape as all_cellIDs\n clone_id_lists = [[] for _ in range(top_n)]\n count_lists = [[] for _ in range(top_n)]\n for cell_id in loom_cell_ids:\n clone_id_counts = most_abundant.get(cell_id, [])\n # Fill up to a constant length\n while len(clone_id_counts) < top_n:\n clone_id_counts.append((\"-\", 0))\n\n for i, (clone_id, count) in enumerate(clone_id_counts):\n clone_id_lists[i].append(clone_id)\n count_lists[i].append(count)\n\n # Add cloneID and count information to loom file\n for i in range(top_n):\n ds.ca[f\"cloneid_{i+1}\"] = np.array(\n clone_id_lists[i], dtype=\"S%r\" % clone_id_length\n )\n ds.ca[f\"cloneid_count_{i+1}\"] = np.array(count_lists[i], dtype=int)\n"
] | [
[
"numpy.array"
]
] |
18621579069/PaddleHub-yu | [
"47741382cf15eda852fefdada6ce83ef86350af6",
"15e8bcef2addf239081e235bdcfd039de12330e0"
] | [
"paddlehub/contrib/ppdet/data/source/simple_source.py",
"hub_module/modules/text/semantic_model/slda_news/util.py"
] | [
"# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# function:\n# interface to load data from txt file.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport numpy as np\nimport copy\nfrom ..dataset import Dataset\n\n\nclass SimpleSource(Dataset):\n \"\"\"\n Load image files for testing purpose\n\n Args:\n images (list): list of path of images\n samples (int): number of samples to load, -1 means all\n load_img (bool): should images be loaded\n \"\"\"\n\n def __init__(self, images=[], samples=-1, load_img=True, **kwargs):\n super(SimpleSource, self).__init__()\n self._epoch = -1\n for image in images:\n assert image != '' and os.path.isfile(image), \\\n \"Image {} not found\".format(image)\n self._images = images\n self._fname = None\n self._simple = None\n self._pos = -1\n self._drained = False\n self._samples = samples\n self._load_img = load_img\n self._imid2path = {}\n\n def next(self):\n if self._epoch < 0:\n self.reset()\n\n if self._pos >= self.size():\n self._drained = True\n raise StopIteration(\"no more data in \" + str(self))\n else:\n sample = copy.deepcopy(self._simple[self._pos])\n if self._load_img:\n sample['image'] = self._load_image(sample['im_file'])\n\n self._pos += 1\n return sample\n\n def _load(self):\n ct = 0\n records = []\n for image in self._images:\n if self._samples > 0 and ct >= self._samples:\n break\n rec = {'im_id': np.array([ct]), 'im_file': image}\n self._imid2path[ct] = image\n ct += 1\n records.append(rec)\n assert len(records) > 0, \"no image file found\"\n return records\n\n def _load_image(self, where):\n with open(where, 'rb') as f:\n return f.read()\n\n def reset(self):\n if self._simple is None:\n self._simple = self._load()\n\n if self._epoch < 0:\n self._epoch = 0\n else:\n self._epoch += 1\n\n self._pos = 0\n self._drained = False\n\n def size(self):\n return len(self._simple)\n\n def drained(self):\n assert self._epoch >= 0, \"the first epoch has not started yet\"\n return self._pos >= self.size()\n\n def epoch_id(self):\n return self._epoch\n\n def get_imid2path(self):\n \"\"\"return image id to image path map\"\"\"\n return self._imid2path\n",
"import time\nimport yaml\n\nimport numpy as np\nfrom paddlehub.common.logger import logger\n\nfrom slda_news.config import ModelType\n\n\ndef load_prototxt(config_file, config):\n \"\"\"\n Args:\n config_file: model configuration file.\n config: ModelConfig class\n \"\"\"\n logger.info(\"Loading SLDA config.\")\n with open(config_file, 'r') as f:\n yaml_dict = yaml.load(f, Loader=yaml.FullLoader)\n\n # Assignment.\n if yaml_dict[\"type\"] == \"LDA\":\n config.type = ModelType.LDA\n else:\n config.type = ModelType.SLDA\n config.num_topics = yaml_dict[\"num_topics\"]\n config.alpha = yaml_dict[\"alpha\"]\n config.beta = yaml_dict[\"beta\"]\n config.word_topic_file = yaml_dict[\"word_topic_file\"]\n config.vocab_file = yaml_dict[\"vocab_file\"]\n\n\ndef fix_random_seed(seed=2147483647):\n np.random.seed(seed)\n\n\ndef rand(min_=0, max_=1):\n return np.random.uniform(low=min_, high=max_)\n\n\ndef rand_k(k):\n \"\"\"Returns an integer float number between [0, k - 1].\n \"\"\"\n return int(rand() * k)\n\n\ndef timeit(f):\n \"\"\"Return time cost of function f.\n \"\"\"\n\n def timed(*args, **kwargs):\n start_time = time.time()\n result = f(*args, **kwargs)\n end_time = time.time()\n print(\" [-] %s : %2.5f sec\" % (f.__name__, end_time - start_time))\n return result\n\n return timed\n"
] | [
[
"numpy.array"
],
[
"numpy.random.uniform",
"numpy.random.seed"
]
] |
mcoughlin/PypeIt | [
"9aa1d10633faf3d73135e1a1c94b1cd18c7058e0"
] | [
"pypeit/core/gui/identify.py"
] | [
"import os\nimport copy\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom matplotlib.lines import Line2D\nfrom matplotlib.colors import LinearSegmentedColormap, Normalize\nfrom matplotlib.cm import ScalarMappable\nimport matplotlib.transforms as mtransforms\nfrom matplotlib.widgets import Button, Slider\n\nfrom IPython import embed\n\nfrom pypeit.par import pypeitpar\nfrom pypeit.core.wavecal import wv_fitting, waveio, wvutils\nfrom pypeit import utils, msgs\nfrom astropy.io import ascii as ascii_io\nfrom astropy.table import Table\n\noperations = dict({'cursor': \"Select lines (LMB click)\\n\" +\n \" Select regions (LMB drag = add, RMB drag = remove)\\n\" +\n \" Navigate (LMB drag = pan, RMB drag = zoom)\",\n 'left' : \"Advance the line list slider to the left by one\",\n 'right' : \"Advance the line list slider to the right by one\",\n 'p' : \"Toggle pan/zoom with the cursor\",\n 'q' : \"Close Identify window and continue PypeIt reduction\",\n 'a' : \"Automatically identify lines using current solution\",\n 'c' : \"Clear automatically identified lines\",\n 'd' : \"Delete all line identifications (start from scratch)\",\n 'f' : \"Fit the wavelength solution\",\n 'g' : \"Toggle ghost solution (show predicted line positions when wavelength is on the x-axis)\",\n 'h' : \"Reset ghost parameters\",\n 'l' : \"Load saved line IDs from file (waveids.ascii in local directory)\",\n 'm' : \"Select a line\",\n 'r' : \"Refit a line\",\n 's' : \"Save current line IDs to a file\",\n 'w' : \"Toggle wavelength/pixels on the x-axis of the main panel\",\n 'z' : \"Delete a single line identification\",\n '+/-' : \"Raise/Lower the order of the fitting polynomial\"\n })\n\n\nclass Identify(object):\n \"\"\"\n GUI to interactively identify arc lines. The GUI can be run within\n PypeIt during data reduction, or as a standalone script outside of\n PypeIt. To initialise the GUI, call the initialise() function in this\n file.\n \"\"\"\n\n def __init__(self, canvas, axes, spec, specres, detns, line_lists, par, lflag_color,\n slit=0, spatid='0', wv_calib=None, pxtoler=None):\n \"\"\"Controls for the Identify task in PypeIt.\n\n The main goal of this routine is to interactively identify arc lines\n to be used for wavelength calibration.\n\n Parameters\n ----------\n canvas : Matploltib figure canvas\n The canvas on which all axes are contained\n axes : dict\n Dictionary of four Matplotlib axes instances (Main spectrum panel, two for residuals, one for information)\n spec : Matplotlib.Line2D\n Matplotlib Line2D instance which contains plotting information of the plotted arc spectrum\n specres : dict\n Three element list of Matplotlib Line2D/path instances, used for residuals plotting\n detns : ndarray\n Detections from the arc spectrum\n line_lists : astropy.Table\n Contains information about the line list to be used for wavelength calibration\n par : class\n WavelengthSolutionPar Calibration parameters\n lflag_color : list\n List of colors used for plotting\n slit : int\n The slit to be used for wavelength calibration\n spatid : str\n Spatial ID corresponding to slit\n wv_calib : :obj:`dict`, None, optional\n If a best-fitting solution exists, and you wish to load it, provide the wv_calib dictionary.\n pxtoler : float, optional\n Tolerance in pixels for adding lines with the auto option\n \"\"\"\n # Store the axes\n self.axes = axes\n # Initialise the spectrum properties\n self.spec = spec\n self.specres = specres # Residual information\n self.specdata = spec.get_ydata()\n self.specx = np.arange(self.specdata.size)\n self.plotx = self.specx.copy()\n # Detections, linelist, line IDs, and fitting params\n self._slit = slit\n self._spatid = spatid\n self._detns = detns\n self._detnsy = self.get_ann_ypos() # Get the y locations of the annotations\n self._line_lists = line_lists\n self._lines = np.sort(line_lists['wave'].data) # Remove mask (if any) and then sort\n self._lineids = np.zeros(self._detns.size, dtype=np.float)\n self._lineflg = np.zeros(self._detns.size, dtype=np.int) # Flags: 0=no ID, 1=user ID, 2=auto ID, 3=flag reject\n self._lflag_color = lflag_color\n self.par = par\n # Auto ID\n self.pxtoler = 0.1 if pxtoler is None else pxtoler\n # Fitting properties\n self._fitdict = dict(polyorder=1,\n scale=self.specdata.size-1,\n coeff=None,\n fitc=None,\n full_fit=None,\n res_stats=[]\n )\n # Initialise the residuals colormap\n residcmap = LinearSegmentedColormap.from_list(\"my_list\", ['grey', 'blue', 'orange', 'red'], N=4)\n self.residmap = ScalarMappable(norm=Normalize(vmin=0, vmax=3), cmap=residcmap)\n # Initialise the annotations\n self.annlines = []\n self.anntexts = []\n\n # Unset some of the matplotlib keymaps\n matplotlib.pyplot.rcParams['keymap.fullscreen'] = '' # toggling fullscreen (Default: f, ctrl+f)\n matplotlib.pyplot.rcParams['keymap.home'] = '' # home or reset mnemonic (Default: h, r, home)\n matplotlib.pyplot.rcParams['keymap.back'] = '' # forward / backward keys to enable (Default: left, c, backspace)\n matplotlib.pyplot.rcParams['keymap.forward'] = '' # left handed quick navigation (Default: right, v)\n #matplotlib.pyplot.rcParams['keymap.pan'] = '' # pan mnemonic (Default: p)\n matplotlib.pyplot.rcParams['keymap.zoom'] = '' # zoom mnemonic (Default: o)\n matplotlib.pyplot.rcParams['keymap.save'] = '' # saving current figure (Default: s)\n matplotlib.pyplot.rcParams['keymap.quit'] = '' # close the current figure (Default: ctrl+w, cmd+w)\n matplotlib.pyplot.rcParams['keymap.grid'] = '' # switching on/off a grid in current axes (Default: g)\n matplotlib.pyplot.rcParams['keymap.yscale'] = '' # toggle scaling of y-axes ('log'/'linear') (Default: l)\n matplotlib.pyplot.rcParams['keymap.xscale'] = '' # toggle scaling of x-axes ('log'/'linear') (Default: L, k)\n matplotlib.pyplot.rcParams['keymap.all_axes'] = '' # enable all axes (Default: a)\n\n # Initialise the main canvas tools\n canvas.mpl_connect('draw_event', self.draw_callback)\n canvas.mpl_connect('button_press_event', self.button_press_callback)\n canvas.mpl_connect('key_press_event', self.key_press_callback)\n canvas.mpl_connect('button_release_event', self.button_release_callback)\n canvas.mpl_connect('motion_notify_event', self.motion_notify_event)\n self.canvas = canvas\n self.background = self.canvas.copy_from_bbox(self.axes['main'].bbox)\n\n # Interaction variables\n self._detns_idx = -1\n self._fitr = None # Matplotlib shaded fit region (for refitting lines)\n self._fitregions = np.zeros(self.specdata.size, dtype=np.int) # Mask of the pixels to be included in a fit\n self._addsub = 0 # Adding a region (1) or removing (0)\n self._msedown = False # Is the mouse button being held down (i.e. dragged)\n self._respreq = [False, None] # Does the user need to provide a response before any other operation will be permitted? Once the user responds, the second element of this array provides the action to be performed.\n self._qconf = False # Confirm quit message\n self._changes = False\n self._wavepix = 1 # Show wavelength (0) or pixels (1) on the x-axis of the main panel\n # Setup ghost properties\n # The ghost params correspond to the central wavelength and dispersion, as measured at the middle pixel of the display\n self._ghosttrans = mtransforms.blended_transform_factory(self.axes['main'].transData, self.axes['main'].transAxes)\n self._ghostmode = False # Display a ghost wavelength solution\n self._ghostdown = False\n self._ghostparam = [0.0, 1.0] # Ghost params [shift, scale] = [wavecen, disp]\n self.gstlines = []\n self.gsttexts = []\n\n # Setup slider for the linelist\n self._slideval = 0 # Default starting point for the linelist slider\n self.linelist_init()\n\n\n # If an initial solution is available, load it\n if wv_calib is not None:\n self.load_IDs(wv_calib=wv_calib)\n self.fitsol_fit()\n\n # Draw the spectrum\n self.replot()\n\n @classmethod\n def initialise(cls, arccen, slits, slit=0, par=None, wv_calib_all=None,\n wavelim=None, nonlinear_counts=None, test=False,\n pxtoler=0.1, fwhm=4.):\n \"\"\"Initialise the 'Identify' window for real-time wavelength calibration\n\n .. todo::\n\n * Implement multislit functionality\n\n Parameters\n ----------\n arccen : ndarray\n Arc spectrum\n slits : :class:`SlitTraceSet`\n Data container with slit trace information\n slit : int, optional\n The slit to be used for wavelength calibration\n par : :obj:`int`, optional\n The slit to be used for wavelength calibration\n wv_calib_all : :obj:`dict`, None, optional\n If a best-fitting solution exists, and you wish to load it, provide the wv_calib dictionary.\n wavelim : :obj:`list`, None, optional\n A two element list containing the desired minimum and maximum wavelength of the linelist\n test : bool, optional\n If True, this is a unit test\n nonlinear_counts : float, optional\n Counts where the arc is presumed to go non-linear\n Passed to arc_lines_from_spec()\n fwhm : float, optional\n FWHM of arc lines in pixels\n pxtoler : float, optional\n Tolerance in pixels for adding lines with the auto option\n\n\n Returns\n -------\n object : :class:`Identify`\n Returns an instance of the :class:`Identify` class, which contains the results of the fit\n \"\"\"\n\n # Double check that a WavelengthSolutionPar was input\n par = pypeitpar.WavelengthSolutionPar() if par is None else par\n\n # If a wavelength calibration has been performed already, load it:\n msgs.info(\"Slit ID = {0:d} (SPAT ID = {1:d})\".format(slit, slits.spat_id[slit]))\n wv_calib = wv_calib_all[str(slits.spat_id[slit])] if wv_calib_all is not None else None\n\n # Extract the lines that are detected in arccen\n thisarc = arccen[:, slit]\n tdetns, _, _, icut, _ = wvutils.arc_lines_from_spec(thisarc,\n fwhm=fwhm,\n sigdetect=par['sigdetect'],\n nonlinear_counts=nonlinear_counts)\n detns = tdetns[icut]\n\n # Load line lists\n if 'ThAr' in par['lamps']:\n line_lists_all = waveio.load_line_lists(par['lamps'])\n line_lists = line_lists_all[np.where(line_lists_all['ion'] != 'UNKNWN')]\n else:\n line_lists = waveio.load_line_lists(par['lamps'])\n\n # Trim the wavelength scale if requested\n if wavelim is not None:\n ww = np.ones(len(line_lists), dtype=bool)\n if wavelim[0] is not None:\n ww &= line_lists['wave'] > wavelim[0]\n if wavelim[1] is not None:\n ww &= line_lists['wave'] < wavelim[1]\n line_lists = line_lists[ww]\n\n # Create a Line2D instance for the arc spectrum\n spec = Line2D(np.arange(thisarc.size), thisarc,\n linewidth=1, linestyle='solid', color='k',\n drawstyle='steps-mid', animated=True)\n\n # Add the main figure axis\n fig, ax = plt.subplots(figsize=(16, 9), facecolor=\"white\")\n plt.subplots_adjust(bottom=0.05, top=0.85, left=0.05, right=0.65)\n ax.add_line(spec)\n ax.set_ylim((0.0, 1.1 * spec.get_ydata().max()))\n\n # Add two residual fitting axes\n axfit = fig.add_axes([0.7, .5, .28, 0.35])\n axres = fig.add_axes([0.7, .1, .28, 0.35])\n # Residuals\n lflag_color = ['grey', 'blue', 'yellow', 'red']\n residcmap = LinearSegmentedColormap.from_list(\"my_list\", lflag_color, N=len(lflag_color))\n resres = axres.scatter(detns, np.zeros(detns.size), marker='x',\n c=np.zeros(detns.size), cmap=residcmap, norm=Normalize(vmin=0.0, vmax=3.0))\n axres.axhspan(-1*pxtoler, pxtoler, alpha=0.5, color='grey')\n axres.axhline(0.0, color='r', linestyle='-') # Zero level\n axres.set_xlim((0, thisarc.size - 1))\n axres.set_ylim((-0.3, 0.3))\n axres.set_xlabel('Pixel')\n axres.set_ylabel('Residuals (Pix)')\n\n # pixel vs wavelength\n respts = axfit.scatter(detns, np.zeros(detns.size), marker='x',\n c=np.zeros(detns.size), cmap=residcmap, norm=Normalize(vmin=0.0, vmax=3.0))\n resfit = Line2D(np.arange(thisarc.size), np.zeros(thisarc.size), linewidth=1, linestyle='-', color='r')\n axfit.add_line(resfit)\n axfit.set_xlim((0, thisarc.size - 1))\n axfit.set_ylim((-0.3, 0.3)) # This will get updated as lines are identified\n axfit.set_xlabel('Pixel')\n axfit.set_ylabel('Wavelength')\n\n # Add an information GUI axis\n axinfo = fig.add_axes([0.15, .92, .7, 0.07])\n axinfo.get_xaxis().set_visible(False)\n axinfo.get_yaxis().set_visible(False)\n axinfo.text(0.5, 0.5, \"Press '?' to list the available options\", transform=axinfo.transAxes,\n horizontalalignment='center', verticalalignment='center')\n axinfo.set_xlim((0, 1))\n axinfo.set_ylim((0, 1))\n specres = dict(pixels=respts, model=resfit, resid=resres)\n\n axes = dict(main=ax, fit=axfit, resid=axres, info=axinfo)\n # Initialise the identify window and display to screen\n fig.canvas.set_window_title('PypeIt - Identify')\n ident = Identify(fig.canvas, axes, spec, specres, detns, line_lists, par, lflag_color, slit=slit,\n spatid=str(slits.spat_id[slit]), wv_calib=wv_calib, pxtoler=pxtoler)\n\n if not test:\n plt.show()\n\n # Now return the results\n return ident\n\n def print_help(self):\n \"\"\"Print the keys and descriptions that can be used for Identification\n \"\"\"\n keys = operations.keys()\n print(\"===============================================================\")\n print(\" Colored lines in main panels:\")\n print(\" gray : wavelength has not been assigned to this detection\")\n print(\" red : currently selected line\")\n print(\" blue : user has assigned wavelength to this detection\")\n print(\" yellow : detection has been automatically assigned\")\n print(\" Colored symbols in residual panels:\")\n print(\" gray : wavelength has not been assigned to this detection\")\n print(\" blue : user has assigned wavelength to this detection\")\n print(\" yellow : detection has been automatically assigned\")\n print(\" red : automatically assigned wavelength was rejected\")\n print(\"---------------------------------------------------------------\")\n print(\" IDENTIFY OPERATIONS\")\n for key in keys:\n print(\"{0:6s} : {1:s}\".format(key, operations[key]))\n print(\"---------------------------------------------------------------\")\n\n def replot(self):\n \"\"\"Redraw the entire canvas\n \"\"\"\n # First set the xdata to be shown\n self.canvas.restore_region(self.background)\n self.toggle_wavepix()\n self.draw_residuals()\n self.draw_lines()\n self.draw_ghost()\n self.canvas.draw()\n\n def linelist_update(self, val):\n \"\"\"For a given detection, set the linelist value to be the best guess based on the wavelength solution\n\n When a user selects a detection, reset the current value of the linelist\n to reflect the best candidate wavelength for that detection (given the current\n wavelength solution)\n\n Args:\n val (int): The index corresponding to the closest match\n \"\"\"\n val = int(val)\n self._slidell.label.set_text(\"{0:.4f}\".format(self._lines[val]))\n self._slideval = val\n\n def linelist_select(self, event):\n \"\"\"Assign a wavelength to a detection\n\n Note, only the LMB works.\n\n Args:\n event (Event): A matplotlib event instance\n \"\"\"\n if event.button == 1:\n self.update_line_id()\n self._detns_idx = -1\n # Try to perform a fit\n self.fitsol_fit()\n # Now replot everything\n self.replot()\n\n def linelist_init(self):\n \"\"\"Initialise the linelist Slider (used to assign a line to a detection)\n \"\"\"\n axcolor = 'lightgoldenrodyellow'\n # Slider\n self.axl = plt.axes([0.15, 0.87, 0.7, 0.04], facecolor=axcolor)\n self._slidell = Slider(self.axl, \"{0:.4f}\".format(self._lines[self._slideval]), self._slideval,\n self._lines.size-1, valinit=0, valstep=1)\n self._slidell.valtext.set_visible(False)\n self._slidell.on_changed(self.linelist_update)\n # Select button\n selax = plt.axes([0.86, 0.87, 0.1, 0.04])\n self._select = Button(selax, 'Assign Line', color=axcolor, hovercolor='y')\n self._select.on_clicked(self.linelist_select)\n\n def toggle_wavepix(self, toggled=False):\n if toggled:\n self._wavepix = 1 - self._wavepix\n self.plotx = self.specx.copy() # Plot pixels on the x-axis\n if self._wavepix == 0:\n # Check that a wavelength solution exists\n if self._fitdict['coeff'] is None:\n self.update_infobox(message=\"Unable to show wavelength until a guess at the solution is available\",\n yesno=False)\n else:\n self.plotx = self._fitdict['wave_soln'].copy()\n # Update the x-axis data and axis range\n self.spec.set_xdata(self.plotx)\n if toggled:\n self.axes['main'].set_xlim([self.plotx.min(), self.plotx.max()])\n\n def draw_ghost(self):\n \"\"\"Draw tick marks at the location of the ghost\n \"\"\"\n for i in self.gstlines:\n try:\n i.remove()\n except TypeError:\n i[0].remove()\n for i in self.gsttexts:\n i.remove()\n self.gstlines = []\n self.gsttexts = []\n # Must have ghost mode on, plotting in wavelength, and have an estimated wavelength solution\n if not self._ghostmode or self._wavepix != 0 or self._fitdict['fitc'] is None:\n return\n\n xmn, xmx = self.axes['main'].get_xlim()\n cent = 0.5*(xmn+xmx)\n plotx = cent + (self._lines + self._ghostparam[0] - cent)*self._ghostparam[1]\n\n # Plot the lines\n w = np.where((plotx > xmn) & (plotx < xmx))[0]\n for i in range(w.size):\n self.gstlines.append(self.axes['main'].plot([plotx[w[i]], plotx[w[i]]], [0.45, 0.55],\n color='g', transform=self._ghosttrans))\n txt = \"{0:.2f}\".format(self._lines[w[i]])\n self.gsttexts.append(\n self.axes['main'].annotate(txt, (plotx[w[i]], 0.6), rotation=90.0, alpha=0.5,\n color='g', ha='center', xycoords=self._ghosttrans))\n\n def draw_lines(self):\n \"\"\"Draw the lines and annotate with their IDs\n \"\"\"\n for i in self.annlines:\n i.remove()\n for i in self.anntexts:\n i.remove()\n self.annlines = []\n self.anntexts = []\n # Decide if pixels or wavelength is being plotted\n plotx = self._detns\n if self._wavepix == 0 and self._fitdict['fitc'] is not None:\n # Plot wavelength\n pixel_fit = self._detns\n xnorm = self._fitdict['xnorm']\n\n # Calculate the estimated wavelength of the detections\n plotx = self._fitdict['full_fit'].eval(pixel_fit / xnorm)\n #plotx = utils.func_val(self._fitdict['fitc'],\n # pixel_fit / xnorm,\n # self._fitdict[\"function\"],\n # minx=self._fitdict['fmin'],\n # maxx=self._fitdict['fmax'])\n # Plot the lines\n xmn, xmx = self.axes['main'].get_xlim()\n w = np.where((plotx > xmn) & (plotx < xmx))[0]\n for i in range(w.size):\n if self._lineflg[w[i]] in [0, 3]:\n if w[i] == self._detns_idx:\n self.annlines.append(self.axes['main'].axvline(plotx[w[i]], color='r'))\n else:\n self.annlines.append(self.axes['main'].axvline(plotx[w[i]], color='grey', alpha=0.5))\n continue\n else:\n if w[i] == self._detns_idx:\n self.annlines.append(self.axes['main'].axvline(plotx[w[i]], color='r'))\n else:\n self.annlines.append(self.axes['main'].axvline(plotx[w[i]],\n color=self._lflag_color[self._lineflg[w[i]]]))\n txt = \"{0:.2f}\".format(self._lineids[w[i]])\n self.anntexts.append(\n self.axes['main'].annotate(txt, (plotx[w[i]], self._detnsy[w[i]]), rotation=90.0,\n color='b', ha='right', va='bottom'))\n\n def draw_residuals(self):\n \"\"\"Update the subplots that show the residuals\n \"\"\"\n if self._fitdict[\"coeff\"] is None:\n nid = np.where((self._lineflg == 1) | (self._lineflg == 2))[0].size\n msg = \"Cannot plot residuals until more lines have been identified\\n\" +\\\n \"Polynomial order = {0:d}, Number of line IDs = {1:d}\".format(self._fitdict[\"polyorder\"], nid)\n self.update_infobox(message=msg, yesno=False)\n else:\n # Remove the annotated residual statistics\n for i in self._fitdict[\"res_stats\"]:\n i.remove()\n self._fitdict[\"res_stats\"] = []\n\n # Update the line IDs\n for ii in range(self._fitdict['pixel_fit'].size):\n idx = np.argmin(np.abs(self._detns-self._fitdict['pixel_fit'][ii]))\n self._lineids[idx] = self._fitdict['wave_fit'][ii]\n\n # Extract the fitting info\n wave_soln = self._fitdict['wave_soln']\n pixel_fit = self._detns\n wave_fit = self._lineids\n xnorm = self._fitdict['xnorm']\n ymin, ymax = np.min(wave_soln[wave_soln != 0.0]) * .95, np.max(wave_soln) * 1.05\n\n # Calculate some stats\n wave_soln_fit = self._fitdict['full_fit'].eval(pixel_fit / xnorm)\n dwv_pix = np.median(np.abs(wave_soln - np.roll(wave_soln, 1)))\n resvals = (wave_fit - wave_soln_fit) / dwv_pix\n\n # Pixel vs wavelength\n self.specres['pixels'].set_offsets(np.c_[pixel_fit, wave_fit])\n self.specres['model'].set_ydata(wave_soln)\n self.axes['fit'].set_ylim((ymin, ymax))\n self.specres['pixels'].set_color(self.residmap.to_rgba(self._lineflg))\n\n # Pixel residuals\n self.specres['resid'].set_offsets(np.c_[pixel_fit, resvals])\n self.axes['resid'].set_ylim((-1.0, 1.0))\n self.specres['resid'].set_color(self.residmap.to_rgba(self._lineflg))\n\n # Write some statistics on the plot\n disptxt = r'$\\Delta\\lambda$={:.3f}$\\AA$ (per pix)'.format(dwv_pix)\n rmstxt = 'RMS={:.3f} (pixels)'.format(self._fitdict['rms'])\n self._fitdict[\"res_stats\"].append(self.axes['fit'].text(0.1 * self.specdata.size,\n ymin + 0.90 * (ymax - ymin),\n disptxt, size='small'))\n self._fitdict[\"res_stats\"].append(self.axes['fit'].text(0.1 * self.specdata.size,\n ymin + 0.80 * (ymax - ymin),\n rmstxt, size='small'))\n\n def draw_callback(self, event):\n \"\"\"Draw the lines and annotate with their IDs\n\n Args:\n event (Event): A matplotlib event instance\n \"\"\"\n # Get the background\n self.background = self.canvas.copy_from_bbox(self.axes['main'].bbox)\n # Set the axis transform\n trans = mtransforms.blended_transform_factory(self.axes['main'].transData, self.axes['main'].transAxes)\n self.draw_fitregions(trans)\n self.axes['main'].draw_artist(self.spec)\n self.draw_lines()\n self.draw_ghost()\n\n def draw_fitregions(self, trans):\n \"\"\"Refresh the fit regions\n\n Args:\n trans (AxisTransform): A matplotlib axis transform from data to axes coordinates\n \"\"\"\n if self._fitr is not None:\n self._fitr.remove()\n # Find all regions\n regwhr = np.copy(self._fitregions == 1)\n # Fudge to get the leftmost pixel shaded in too\n regwhr[np.where((self._fitregions[:-1] == 0) & (self._fitregions[1:] == 1))] = True\n self._fitr = self.axes['main'].fill_between(self.plotx, 0, 1, where=regwhr, facecolor='green',\n alpha=0.5, transform=trans)\n\n def get_ann_ypos(self, scale=1.02):\n \"\"\"Calculate the y locations of the annotated IDs\n\n Args:\n scale (float): Scale the location relative to the maximum value of the spectrum\n\n Returns:\n ypos (ndarray): y locations of the annotations\n \"\"\"\n ypos = np.zeros(self._detns.size)\n for xx in range(self._detns.size):\n wmin = np.argmin(np.abs(self.specx-self._detns[xx]))\n ypos[xx] = scale * np.max(self.specdata[wmin-1:wmin+2])\n return ypos\n\n def get_detns(self):\n \"\"\"Get the index of the detection closest to the cursor\n \"\"\"\n return np.argmin(np.abs(self._detns-self.specx[self._end]))\n\n def get_ind_under_point(self, event):\n \"\"\"Get the index of the line closest to the cursor\n\n Args:\n event (Event): Matplotlib event instance containing information about the event\n\n Returns:\n ind (int): Index of the spectrum where the event occurred\n \"\"\"\n ind = np.argmin(np.abs(self.plotx - event.xdata))\n return ind\n\n def get_axisID(self, event):\n \"\"\"Get the ID of the axis where an event has occurred\n\n Args:\n event (Event): Matplotlib event instance containing information about the event\n\n Returns:\n axisID (int, None): Axis where the event has occurred\n \"\"\"\n if event.inaxes == self.axes['main']:\n return 0\n elif event.inaxes == self.axes['resid']:\n return 1\n elif event.inaxes == self.axes['fit']:\n return 2\n elif event.inaxes == self.axes['info']:\n return 3\n return None\n\n def get_results(self):\n \"\"\"Perform the final wavelength calibration\n\n Using the line IDs perform the final fit according\n to the wavelength calibration parameters set by the\n user. This routine must be called after the user has\n manually identified all lines.\n\n Returns:\n wvcalib (dict): Dict of wavelength calibration solutions\n \"\"\"\n wvcalib = {}\n # Check that a result exists:\n if self._fitdict['coeff'] is None:\n wvcalib[str(self._slit)] = None\n else:\n # Perform an initial fit to the user IDs\n self.fitsol_fit()\n # Now perform a detailed fit\n gd_det = np.where((self._lineflg == 1) | (self._lineflg == 2))[0]\n bdisp = self.fitsol_deriv(self.specdata.size/2) # Angstroms/pixel at the centre of the spectrum\n try:\n #n_final = wvutils.parse_param(self.par, 'n_final', self._slit)\n final_fit = wv_fitting.iterative_fitting(self.specdata, self._detns, gd_det,\n self._lineids[gd_det], self._line_lists, bdisp,\n verbose=False, n_first=self._fitdict[\"polyorder\"],\n match_toler=self.par['match_toler'],\n func=self.par['func'],\n n_final=self._fitdict[\"polyorder\"], input_only=True,\n sigrej_first=self.par['sigrej_first'],\n sigrej_final=self.par['sigrej_final'])\n except TypeError:\n wvcalib = None\n else:\n wvcalib = copy.deepcopy(final_fit)\n return wvcalib\n\n def store_solution(self, final_fit, master_dir, binspec, rmstol=0.15,\n force_save=False, wvcalib=None):\n \"\"\"Check if the user wants to store this solution in the reid arxiv\n\n Parameters\n ----------\n\n final_fit : dict\n Dict of wavelength calibration solutions (see self.get_results())\n master_dir : str\n Master directory -- NOT USED\n binspec : int\n Spectral binning\n rmstol : float\n RMS tolerance allowed for the wavelength solution to be stored in the archive\n force_save : bool\n Force save\n wvcalib : :class:`pypeit.wavecalib.WaveCalib`\n Wavelength solution\n\n \"\"\"\n # Line IDs\n ans = ''\n if not force_save:\n while ans != 'y' and ans != 'n':\n ans = input(\"Would you like to store the line IDs? (y/n): \")\n else:\n ans = 'y'\n if ans == 'y':\n self.save_IDs()\n # Solution\n if 'rms' not in final_fit.keys():\n msgs.warn(\"No wavelength solution available\")\n return\n elif final_fit['rms'] < rmstol:\n ans = ''\n if not force_save:\n while ans != 'y' and ans != 'n':\n ans = input(\"Would you like to write this wavelength solution to disk? (y/n): \")\n else:\n ans = 'y'\n if ans == 'y':\n # Arxiv solution\n #outroot = templates.pypeit_identify_record(final_fit, binspec, specname, gratname, dispangl, outdir=master_dir)\n wavelengths = self._fitdict['full_fit'].eval(np.arange(self.specdata.size) /\n (self.specdata.size - 1))\n wvutils.write_template(wavelengths, self.specdata, binspec,\n './', 'wvarxiv.fits')\n msgs.info(\"\\nYour arxiv solution has been written to wvarxiv.fits\")\n #msgs.info(\"\\nYour wavelength solution has been stored here:\" + msgs.newline() +\n # os.path.join(master_dir, outroot) + msgs.newline() + msgs.newline() +\n # \"If you would like to move this to the PypeIt database, please move this file into the directory:\" +\n # msgs.newline() + templates.outpath + msgs.newline() + msgs.newline() +\n # \"Please consider sending your solution to the PypeIt team!\" + msgs.newline())\n #\n if wvcalib is not None:\n wvcalib.to_file('wvcalib.fits')\n msgs.info(\"\\nA WaveCalib container was written to wvcalib.fits\")\n msgs.info(\"\\nPlease consider sending your solution to the PypeIt team!\" + msgs.newline())\n else:\n print(\"Final fit RMS: {0:0.3f} is larger than the allowed tolerance: {1:0.3f}\".format(final_fit['rms'], rmstol))\n print(\"Set the variable --rmstol on the command line to allow a more flexible RMS tolerance\")\n ans = ''\n\n def button_press_callback(self, event):\n \"\"\"What to do when the mouse button is pressed\n\n Args:\n event (Event): Matplotlib event instance containing information about the event\n \"\"\"\n if event.inaxes is None:\n return\n if self.canvas.toolbar.mode != \"\":\n return\n if event.button == 1:\n self._addsub = 1\n elif event.button == 3:\n self._addsub = 0\n if event.inaxes == self.axes[\"main\"]:\n self._msedown = True\n axisID = self.get_axisID(event)\n self._start = self.get_ind_under_point(event)\n self._startdata = event.xdata\n self._oldghostscl = self._ghostparam[1]\n\n def motion_notify_event(self, event):\n if event.inaxes is None:\n return\n self._middata = event.xdata\n if self._ghostmode and self._msedown:\n self.update_ghosts()\n # Now plot\n trans = mtransforms.blended_transform_factory(self.axes['main'].transData, self.axes['main'].transAxes)\n self.canvas.restore_region(self.background)\n self.draw_fitregions(trans)\n # Now replot everything\n self.replot()\n\n def button_release_callback(self, event):\n \"\"\"What to do when the mouse button is released\n\n Args:\n event (Event): Matplotlib event instance containing information about the event\n\n Returns:\n None\n \"\"\"\n self._msedown = False\n if event.inaxes is None:\n return\n if event.inaxes == self.axes['info']:\n if (event.xdata > 0.8) and (event.xdata < 0.9):\n answer = \"y\"\n elif event.xdata >= 0.9:\n answer = \"n\"\n else:\n return\n self.operations(answer, -1)\n self.update_infobox(default=True)\n return\n elif self._respreq[0]:\n # The user is trying to do something before they have responded to a question\n return\n if self.canvas.toolbar.mode != \"\":\n return\n # Draw an actor\n axisID = self.get_axisID(event)\n if axisID is not None:\n if axisID <= 2:\n self._end = self.get_ind_under_point(event)\n if self._end == self._start:\n # The mouse button was pressed (not dragged)\n self.operations('m', axisID, event)\n elif self._end != self._start:\n # The mouse button was dragged\n if axisID == 0:\n if not self._ghostmode:\n if self._start > self._end:\n tmp = self._start\n self._start = self._end\n self._end = tmp\n self.update_regions()\n # Now plot\n trans = mtransforms.blended_transform_factory(self.axes['main'].transData, self.axes['main'].transAxes)\n self.canvas.restore_region(self.background)\n self.draw_fitregions(trans)\n # Now replot everything\n self.replot()\n\n def key_press_callback(self, event):\n \"\"\"What to do when a key is pressed\n\n Args:\n event (Event): Matplotlib event instance containing information about the event\n\n Returns:\n None\n \"\"\"\n # Check that the event is in an axis...\n if not event.inaxes:\n return\n # ... but not the information box!\n if event.inaxes == self.axes['info']:\n return\n axisID = self.get_axisID(event)\n self.operations(event.key, axisID, event)\n\n def operations(self, key, axisID, event):\n \"\"\"Canvas operations\n\n Args:\n key (str): Which key has been pressed\n axisID (int): The index of the axis where the key has been pressed (see get_axisID)\n \"\"\"\n # Check if the user really wants to quit\n if key == 'q' and self._qconf:\n if self._changes:\n self.update_infobox(message=\"WARNING: There are unsaved changes!!\\nPress q again to exit\", yesno=False)\n self._qconf = True\n else:\n msgs.bug(\"Need to change this to kill and return the results to PypeIt\")\n plt.close()\n elif self._qconf:\n self.update_infobox(default=True)\n self._qconf = False\n\n # Manage responses from questions posed to the user.\n if self._respreq[0]:\n if key != \"y\" and key != \"n\":\n return\n else:\n # Switch off the required response\n self._respreq[0] = False\n # Deal with the response\n if self._respreq[1] == \"write\":\n # First remove the old file, and save the new one\n msgs.work(\"Not implemented yet!\")\n self.write()\n else:\n return\n # Reset the info box\n self.update_infobox(default=True)\n return\n\n if key == '?':\n self.print_help()\n elif key == 'left':\n widx = self._slideval - 1\n if widx < 0:\n widx = self._lines.size-1\n self.linelist_update(widx)\n elif key == 'right':\n widx = self._slideval + 1\n if widx >= self._lines.size:\n widx = 0\n self.linelist_update(widx)\n elif key == 'a':\n if self._fitdict['coeff'] is not None:\n self.auto_id()\n else:\n msgs.info(\"You must identify a few lines first\")\n elif key == 'c':\n wclr = np.where((self._lineflg == 2) | (self._lineflg == 3))\n self._lineflg[wclr] = 0\n self.replot()\n elif key == 'd':\n self._lineflg *= 0\n self._lineids *= 0.0\n self._fitdict['coeff'] = None\n self.replot()\n elif key == 'f':\n self.fitsol_fit()\n self.replot()\n elif key == 'l':\n self.load_IDs()\n elif key == 'm':\n self._end = self.get_ind_under_point(event)\n self._detns_idx = self.get_detns()\n # Estimate the wavelength, if a solution is available\n if self._fitdict['coeff'] is not None:\n # Find closest line\n waveest = self.fitsol_value(idx=self._detns_idx)\n widx = np.argmin(np.abs(waveest - self._lines))\n self.linelist_update(widx)\n self._slidell.set_val(self._slideval)\n # Print to the information panel\n self.update_infobox(message=\"Pixel position = {0:.1f} Estimated wavelength = {1:.3f}\".format(\n self._detns[self._detns_idx], waveest), yesno=False)\n self.replot()\n elif key == 'q':\n if self._changes:\n self.update_infobox(message=\"WARNING: There are unsaved changes!!\\nPress q again to exit\", yesno=False)\n self._qconf = True\n else:\n plt.close()\n elif key == 'r':\n if self._detns_idx == -1:\n msgs.info(\"You must select a line first\")\n elif self._fitr is None:\n msgs.info(\"You must select a fitting region first\")\n else:\n msgs.work(\"Feature not yet implemented\")\n elif key == 's':\n self.save_IDs()\n elif key == 'w':\n self.toggle_wavepix(toggled=True)\n self.replot()\n elif key == 'z':\n self.delete_line_id()\n self.operations('f', axisID, event)\n elif key == '+':\n if self._fitdict[\"polyorder\"] < 10:\n self._fitdict[\"polyorder\"] += 1\n self.update_infobox(message=\"Polynomial order = {0:d}\".format(self._fitdict[\"polyorder\"]), yesno=False)\n self.fitsol_fit()\n self.replot()\n else:\n self.update_infobox(message=\"Polynomial order must be <= 10\", yesno=False)\n elif key == '-':\n if self._fitdict[\"polyorder\"] > 1:\n self._fitdict[\"polyorder\"] -= 1\n self.update_infobox(message=\"Polynomial order = {0:d}\".format(self._fitdict[\"polyorder\"]), yesno=False)\n self.fitsol_fit()\n self.replot()\n else:\n self.update_infobox(message=\"Polynomial order must be >= 1\", yesno=False)\n elif key == 'g':\n if self._wavepix == 0:\n self._ghostmode = not self._ghostmode\n self.replot()\n else:\n self.update_infobox(message=\"To enable ghost mode, you need to identify some lines.\\nYou also need to set wavelength as the x-axis scale\", yesno=False)\n elif key == 'h':\n self._ghostparam = [0.0, 1.0]\n self.replot()\n self.canvas.draw()\n\n def auto_id(self):\n \"\"\"Automatically assign lines based on a few lines identified by the user\n\n Using the current line IDs and approximate wavelength solution,\n automatically assign a wavelength to all line detections.\n \"\"\"\n\n # If the IDs are within an acceptable tolerance, flag them as such\n wave_est = self._fitdict['full_fit'].eval(self._detns / self._fitdict['xnorm'])\n for wav in range(wave_est.size):\n if self._lineflg[wav] == 1:\n # User has manually identified this line already\n continue\n pixdiff = np.abs(wave_est[wav]-self._lines)\n amin = np.argmin(pixdiff)\n pxtst = pixdiff[amin]/self._fitdict['cen_disp']\n self._lineids[wav] = self._lines[amin]\n if pxtst < self.pxtoler:\n # Acceptable\n self._lineflg[wav] = 2\n else:\n # Unacceptable\n self._lineflg[wav] = 3\n # Now that we've automatically identified lines, update the canvas\n self.replot()\n\n def delete_line_id(self):\n \"\"\"Remove an incorrect line ID\n \"\"\"\n rmid = self.get_detns()\n self._lineids[rmid] = 0.0\n self._lineflg[rmid] = 0\n\n def fitsol_value(self, xfit=None, idx=None):\n \"\"\"Calculate the wavelength at a pixel\n\n Parameters\n ----------\n\n xfit : ndarray, float\n Pixel values that the user wishes to evaluate the wavelength\n idx : ndarray, int\n Index of the arc line detections that the user wishes to evaluate the wavelength\n\n Returns\n -------\n\n disp : The wavelength (Angstroms) of the requested pixels\n \"\"\"\n if xfit is None:\n xfit = self._detns\n if self._fitdict['coeff'] is not None:\n if idx is None:\n return np.polyval(self._fitdict[\"coeff\"], xfit / self._fitdict[\"scale\"])\n else:\n return np.polyval(self._fitdict[\"coeff\"], xfit[idx] / self._fitdict[\"scale\"])\n else:\n msgs.bug(\"Cannot predict wavelength value - no fit has been performed\")\n return None\n\n def fitsol_deriv(self, xfit=None, idx=None):\n \"\"\"Calculate the dispersion as a function of wavelength\n\n Args:\n xfit (ndarray, float): Pixel values that the user wishes to evaluate the wavelength\n idx (int): Index of the arc line detections that the user wishes to evaluate the wavelength\n\n Returns:\n disp (ndarray, float, None): The dispersion (Angstroms/pixel) as a function of wavelength\n \"\"\"\n if xfit is None:\n xfit = self._detns\n if self._fitdict['coeff'] is not None:\n cder = np.polyder(self._fitdict[\"coeff\"])\n if idx is None:\n return np.polyval(cder, xfit / self._fitdict[\"scale\"]) / self._fitdict[\"scale\"]\n else:\n return np.polyval(cder, xfit[idx] / self._fitdict[\"scale\"]) / self._fitdict[\"scale\"]\n else:\n msgs.bug(\"Cannot predict wavelength value - no fit has been performed\")\n return None\n\n def fitsol_fit(self):\n \"\"\"Perform a fit to the line identifications\n \"\"\"\n # Calculate the dispersion\n # disp = (ids[-1] - ids[0]) / (tcent[idx_str[-1]] - tcent[idx_str[0]])\n # final_fit = fitting.iterative_fitting(censpec, tcent, idx_str, ids,\n # llist, disp, verbose=False,\n # n_first=2, n_final=self._fitdict[\"polyorder\"])\n ord = self._fitdict[\"polyorder\"]\n gd_det = np.where((self._lineflg == 1) | (self._lineflg == 2)) # Use the user IDs or acceptable auto IDs only!\n # Check if there are enough points to perform a fit\n if gd_det[0].size < ord+1:\n msg = \"Polynomial order must be >= number of line IDs\\n\" +\\\n \"Polynomial order = {0:d}, Number of line IDs = {1:d}\".format(ord, gd_det[0].size)\n self.update_infobox(message=msg, yesno=False)\n else:\n # Start by performing a basic fit\n xpix = self._detns[gd_det] / self._fitdict[\"scale\"]\n ylam = self._lineids[gd_det]\n self._fitdict[\"coeff\"] = np.polyfit(xpix, ylam, ord)\n bdisp = self.fitsol_deriv(self.specdata.size / (2*self._fitdict[\"scale\"])) # Angstroms/pixel at the centre of the spectrum\n # Then try a detailed fit\n try:\n final_fit = wv_fitting.iterative_fitting(\n self.specdata, self._detns, gd_det[0],\n self._lineids[gd_det[0]], self._line_lists, bdisp,\n verbose=False, n_first=min(2, self._fitdict[\"polyorder\"]),\n match_toler=self.par['match_toler'],\n func=self.par['func'], input_only=True,\n n_final=self._fitdict[\"polyorder\"],\n sigrej_first=self.par['sigrej_first'],\n sigrej_final=self.par['sigrej_final'])\n final_fit.spat_id = self._slit\n\n # Update the fitdict\n #for key in final_fit:\n # self._fitdict[key] = final_fit[key]\n self._fitdict['polyorder'] = final_fit.pypeitfit['order'][0]\n self._fitdict['fitc'] = final_fit.pypeitfit['fitc']\n self._fitdict['full_fit'] = final_fit.pypeitfit\n self._fitdict['pixel_fit'] = final_fit.pixel_fit\n self._fitdict['wave_fit'] = final_fit.wave_fit\n self._fitdict['wave_soln'] = final_fit.wave_soln\n self._fitdict['xnorm'] = final_fit.xnorm\n self._fitdict['rms'] = final_fit.rms\n self._fitdict['tcent'] = final_fit.tcent\n self._fitdict['cen_disp'] = final_fit.cen_disp\n self._fitdict['cen_wave'] = final_fit.cen_wave\n self._fitdict['WaveFit'] = final_fit\n\n except TypeError:\n # Just stick use the basic fit\n self._fitdict[\"fitc\"] = None\n\n def update_infobox(self, message=\"Press '?' to list the available options\",\n yesno=True, default=False):\n \"\"\"Send a new message to the information window at the top of the canvas\n\n Args:\n message (str): Message to be displayed\n \"\"\"\n self.axes['info'].clear()\n if default:\n self.axes['info'].text(0.5, 0.5, \"Press '?' to list the available options\", transform=self.axes['info'].transAxes,\n horizontalalignment='center', verticalalignment='center')\n self.canvas.draw()\n return\n # Display the message\n self.axes['info'].text(0.5, 0.5, message, transform=self.axes['info'].transAxes,\n horizontalalignment='center', verticalalignment='center')\n if yesno:\n self.axes['info'].fill_between([0.8, 0.9], 0, 1, facecolor='green', alpha=0.5, transform=self.axes['info'].transAxes)\n self.axes['info'].fill_between([0.9, 1.0], 0, 1, facecolor='red', alpha=0.5, transform=self.axes['info'].transAxes)\n self.axes['info'].text(0.85, 0.5, \"YES\", transform=self.axes['info'].transAxes,\n horizontalalignment='center', verticalalignment='center')\n self.axes['info'].text(0.95, 0.5, \"NO\", transform=self.axes['info'].transAxes,\n horizontalalignment='center', verticalalignment='center')\n self.axes['info'].set_xlim((0, 1))\n self.axes['info'].set_ylim((0, 1))\n self.canvas.draw()\n\n def update_line_id(self):\n \"\"\"Find the nearest wavelength in the linelist\n \"\"\"\n if self._detns_idx != -1:\n self._lineids[self._detns_idx] = self._lines[self._slideval]\n self._lineflg[self._detns_idx] = 1\n\n def update_regions(self):\n \"\"\"Update the regions used to fit Gaussian\n \"\"\"\n self._fitregions[self._start:self._end] = self._addsub\n\n def update_ghosts(self):\n \"\"\"Update the ghosts\n \"\"\"\n if self._addsub == 0: # RMB\n # Stretching factor\n xmn, xmx = self.axes['main'].get_xlim()\n self._ghostparam[1] = self._oldghostscl*(1.0 + (self._middata - self._startdata) / (xmx - xmn))\n else: # LMB\n if self._wavepix == 0:\n # Plotting wavelength\n self._ghostparam[0] = self._middata - self._startdata\n elif self._fitdict['fitc'] is not None:\n # Plotting pixels and have a wavelength solution\n xnorm = self._fitdict['xnorm']\n\n # Calculate the estimated wavelength of the detections\n specy = self._fitdict['full_fit'].eval(np.array([self._startdata, self._middata]) / xnorm)\n self._ghostparam[0] = specy[1] - specy[0]\n else:\n # Plotting pixels, but don't have a wavelength solution\n scale = (np.max(self._lines) - np.min(self._lines))/self.specx.size # Angstroms per pixel\n self._ghostparam[0] = (self._middata - self._startdata) * scale # Calculate the shift in Angstroms\n # grad_orig = self.specx.size / (np.max(self._lines) - np.min(self._lines))\n # plotx = self._ghostparam[1] * grad_orig * (self._lines - np.min(self._lines) + self._ghostparam[0])\n\n def load_IDs(self, wv_calib=None, fname='waveid.ascii'):\n \"\"\"Load line IDs\n \"\"\"\n if wv_calib is not None:\n for ii in range(wv_calib['pixel_fit'].size):\n idx = np.argmin(np.abs(self._detns-wv_calib['pixel_fit'][ii]))\n self._lineids[idx] = wv_calib['wave_fit'][ii]\n self._lineflg[idx] = int(wv_calib['mask'][ii])\n self._fitdict['polyorder'] = len(wv_calib['fitc'])-1\n msgs.info(\"Loaded line IDs\")\n elif os.path.exists(fname):\n data = ascii_io.read(fname, format='fixed_width')\n self._detns = data['pixel'].data\n self._lineids = data['wavelength'].data\n self._lineflg = data['flag'].data\n msgs.info(\"Loaded line IDs:\" + msgs.newline() + fname)\n self.update_infobox(message=\"Loaded line IDs: {0:s}\".format(fname), yesno=False)\n else:\n self.update_infobox(message=\"Could not find line IDs: {0:s}\".format(fname), yesno=False)\n\n def save_IDs(self, fname='waveid.ascii'):\n \"\"\"Save the current IDs\n \"\"\"\n meta = dict(comments=[\"flags:\",\n \" 0 = wavelength has not been assigned to this detection\",\n \" 1 = user has assigned wavelength to this detection\",\n \" 2 = detection has been automatically assigned\",\n \" 3 = automatically assigned wavelength was rejected\"])\n data = Table({'pixel' : self._detns,\n 'wavelength' : self._lineids,\n 'flag' : self._lineflg},\n names=['pixel', 'wavelength', 'flag'],\n meta=meta)\n ascii_io.write(data, fname, format='fixed_width')\n msgs.info(\"Line IDs saved as:\" + msgs.newline() + fname)\n self.update_infobox(message=\"Line IDs saved as: {0:s}\".format(fname), yesno=False)\n"
] | [
[
"matplotlib.widgets.Button",
"numpy.copy",
"matplotlib.colors.LinearSegmentedColormap.from_list",
"matplotlib.pyplot.axes",
"matplotlib.pyplot.subplots_adjust",
"numpy.polyfit",
"numpy.polyval",
"matplotlib.colors.Normalize",
"numpy.argmin",
"numpy.abs",
"numpy.where",
"numpy.zeros",
"matplotlib.pyplot.subplots",
"numpy.arange",
"numpy.max",
"numpy.min",
"matplotlib.pyplot.close",
"numpy.sort",
"numpy.roll",
"matplotlib.pyplot.show",
"numpy.polyder",
"numpy.array",
"matplotlib.transforms.blended_transform_factory"
]
] |
JerBouma/OpenBBTerminal | [
"0c60d70cb29b0a6e4db41d6dd0d34f79a6169b27"
] | [
"openbb_terminal/cryptocurrency/overview/coinpaprika_view.py"
] | [
"\"\"\"CoinPaprika view\"\"\"\n__docformat__ = \"numpy\"\n\nimport logging\nimport os\n\nfrom pandas.plotting import register_matplotlib_converters\n\nimport openbb_terminal.cryptocurrency.overview.coinpaprika_model as paprika\nfrom openbb_terminal.cryptocurrency.dataframe_helpers import (\n lambda_long_number_format_with_type_check,\n)\nfrom openbb_terminal.decorators import log_start_end\nfrom openbb_terminal.helper_funcs import export_data, print_rich_table\nfrom openbb_terminal.rich_config import console\n\nlogger = logging.getLogger(__name__)\n\nregister_matplotlib_converters()\n\n# pylint: disable=inconsistent-return-statements\n# pylint: disable=C0302, too-many-lines\n\nCURRENCIES = [\n \"BTC\",\n \"ETH\",\n \"USD\",\n \"EUR\",\n \"PLN\",\n \"KRW\",\n \"GBP\",\n \"CAD\",\n \"JPY\",\n \"RUB\",\n \"TRY\",\n \"NZD\",\n \"AUD\",\n \"CHF\",\n \"UAH\",\n \"HKD\",\n \"SGD\",\n \"NGN\",\n \"PHP\",\n \"MXN\",\n \"BRL\",\n \"THB\",\n \"CLP\",\n \"CNY\",\n \"CZK\",\n \"DKK\",\n \"HUF\",\n \"IDR\",\n \"ILS\",\n \"INR\",\n \"MYR\",\n \"NOK\",\n \"PKR\",\n \"SEK\",\n \"TWD\",\n \"ZAR\",\n \"VND\",\n \"BOB\",\n \"COP\",\n \"PEN\",\n \"ARS\",\n \"ISK\",\n]\n\n# see https://github.com/OpenBB-finance/OpenBBTerminal/pull/562#issuecomment-887842888\n# EXCHANGES = paprika.get_list_of_exchanges()\n\n\n@log_start_end(log=logger)\ndef display_global_market(export: str) -> None:\n \"\"\"Return data frame with most important global crypto statistics like:\n market_cap_usd, volume_24h_usd, bitcoin_dominance_percentage, cryptocurrencies_number,\n market_cap_ath_value, market_cap_ath_date, volume_24h_ath_value, volume_24h_ath_date,\n market_cap_change_24h, volume_24h_change_24h, last_updated [Source: CoinPaprika]\n\n Parameters\n ----------\n export : str\n Export dataframe data to csv,json,xlsx file\n \"\"\"\n\n df = paprika.get_global_market()\n df_data = df.copy()\n df[\"Value\"] = df[\"Value\"].apply( # pylint:disable=unsupported-assignment-operation\n lambda x: lambda_long_number_format_with_type_check(x)\n )\n\n print_rich_table(\n df, headers=list(df.columns), show_index=False, title=\"Global Crypto Statistics\"\n )\n\n export_data(\n export,\n os.path.dirname(os.path.abspath(__file__)),\n \"global\",\n df_data,\n )\n\n\n@log_start_end(log=logger)\ndef display_all_coins_market_info(\n currency: str, sortby: str, descend: bool, top: int, export: str\n) -> None:\n \"\"\"Displays basic market information for all coins from CoinPaprika API. [Source: CoinPaprika]\n\n Parameters\n ----------\n currency: str\n Quoted currency\n top: int\n Number of records to display\n sortby: str\n Key by which to sort data\n descend: bool\n Flag to sort data descending\n links: bool\n Flag to display urls\n export : str\n Export dataframe data to csv,json,xlsx file\n \"\"\"\n\n df = paprika.get_coins_market_info(quotes=currency).sort_values(\n by=sortby, ascending=descend\n )\n\n df_data = df.copy()\n\n if df.empty:\n console.print(\"No data found\", \"\\n\")\n return\n\n cols = [col for col in df.columns if col != \"rank\"]\n df[cols] = df[cols].applymap(lambda x: lambda_long_number_format_with_type_check(x))\n\n console.print(f\"\\nDisplaying data vs {currency}\")\n\n print_rich_table(\n df.head(top),\n headers=list(df.columns),\n show_index=False,\n title=\"Basic Market Information\",\n )\n\n export_data(\n export,\n os.path.dirname(os.path.abspath(__file__)),\n \"markets\",\n df_data,\n )\n\n\n@log_start_end(log=logger)\ndef display_all_coins_info(\n currency: str, sortby: str, descend: bool, top: int, export: str\n) -> None:\n \"\"\"Displays basic coin information for all coins from CoinPaprika API. [Source: CoinPaprika]\n\n Parameters\n ----------\n currency: str\n Quoted currency\n top: int\n Number of records to display\n sortby: str\n Key by which to sort data\n descend: bool\n Flag to sort data descending\n links: bool\n Flag to display urls\n export : str\n Export dataframe data to csv,json,xlsx file\n \"\"\"\n\n df = paprika.get_coins_info(quotes=currency).sort_values(\n by=sortby, ascending=descend\n )\n\n df_data = df.copy()\n\n if df.empty:\n console.print(\"Not data found\", \"\\n\")\n return\n\n cols = [col for col in df.columns if col != \"rank\"]\n df[cols] = df[cols].applymap(lambda x: lambda_long_number_format_with_type_check(x))\n\n console.print(f\"\\nDisplaying data vs {currency}\")\n\n print_rich_table(\n df.head(top),\n headers=list(df.columns),\n show_index=False,\n title=\"Basic Coin Information\",\n )\n\n export_data(\n export,\n os.path.dirname(os.path.abspath(__file__)),\n \"info\",\n df_data,\n )\n\n\n@log_start_end(log=logger)\ndef display_all_exchanges(\n currency: str, sortby: str, descend: bool, top: int, export: str\n) -> None:\n \"\"\"List exchanges from CoinPaprika API. [Source: CoinPaprika]\n\n Parameters\n ----------\n currency: str\n Quoted currency\n top: int\n Number of records to display\n sortby: str\n Key by which to sort data\n descend: bool\n Flag to sort data descending\n links: bool\n Flag to display urls\n export : str\n Export dataframe data to csv,json,xlsx file\n\n \"\"\"\n\n df = paprika.get_list_of_exchanges(quotes=currency).sort_values(\n by=sortby, ascending=descend\n )\n\n df_data = df.copy()\n\n if df.empty:\n console.print(\"No data found\", \"\\n\")\n return\n\n cols = [col for col in df.columns if col != \"rank\"]\n df[cols] = df[cols].applymap(lambda x: lambda_long_number_format_with_type_check(x))\n console.print(f\"\\nDisplaying data vs {currency}\")\n\n print_rich_table(\n df.head(top), headers=list(df.columns), show_index=False, title=\"List Exchanges\"\n )\n\n export_data(\n export,\n os.path.dirname(os.path.abspath(__file__)),\n \"exchanges\",\n df_data,\n )\n\n\n@log_start_end(log=logger)\ndef display_exchange_markets(\n exchange: str, sortby: str, descend: bool, top: int, links: bool, export: str\n) -> None:\n \"\"\"Get all markets for given exchange [Source: CoinPaprika]\n\n Parameters\n ----------\n exchange: str\n Exchange identifier e.g Binance\n top: int\n Number of records to display\n sortby: str\n Key by which to sort data\n descend: bool\n Flag to sort data descending\n links: bool\n Flag to display urls\n export : str\n Export dataframe data to csv,json,xlsx file\n \"\"\"\n\n df = paprika.get_exchanges_market(exchange_id=exchange)\n\n df_data = df.copy()\n\n if df.empty:\n console.print(\"No data found\", \"\\n\")\n return\n\n df = df.sort_values(by=sortby, ascending=descend)\n\n if links is True:\n df = df[[\"exchange_id\", \"pair\", \"trust_score\", \"market_url\"]]\n else:\n df.drop(\"market_url\", axis=1, inplace=True)\n\n print_rich_table(\n df.head(top),\n headers=list(df.columns),\n show_index=False,\n title=\"Exchange Markets\",\n )\n\n export_data(\n export,\n os.path.dirname(os.path.abspath(__file__)),\n \"exmarkets\",\n df_data,\n )\n\n\n@log_start_end(log=logger)\ndef display_all_platforms(export: str) -> None:\n \"\"\"List all smart contract platforms like ethereum, solana, cosmos, polkadot, kusama. [Source: CoinPaprika]\n\n Parameters\n ----------\n export : str\n Export dataframe data to csv,json,xlsx file\n \"\"\"\n\n df = paprika.get_all_contract_platforms()\n\n print_rich_table(\n df, headers=list(df.columns), show_index=False, title=\"Smart Contract Platforms\"\n )\n\n export_data(\n export,\n os.path.dirname(os.path.abspath(__file__)),\n \"platforms\",\n df,\n )\n\n\n@log_start_end(log=logger)\ndef display_contracts(\n platform: str, sortby: str, descend: bool, top: int, export: str\n) -> None:\n \"\"\"Gets all contract addresses for given platform. [Source: CoinPaprika]\n\n Parameters\n ----------\n platform: str\n Blockchain platform like eth-ethereum\n top: int\n Number of records to display\n sortby: str\n Key by which to sort data\n descend: bool\n Flag to sort data descending\n export : str\n Export dataframe data to csv,json,xlsx file\n \"\"\"\n\n df = paprika.get_contract_platform(platform)\n\n if df.empty:\n console.print(f\"Nothing found for platform: {platform}\", \"\\n\")\n return\n\n df = df.sort_values(by=sortby, ascending=descend)\n\n print_rich_table(\n df.head(top),\n headers=list(df.columns),\n show_index=False,\n title=\"Contract Addresses\",\n )\n\n export_data(\n export,\n os.path.dirname(os.path.abspath(__file__)),\n \"contracts\",\n df,\n )\n"
] | [
[
"pandas.plotting.register_matplotlib_converters"
]
] |
haesleinhuepf/pyclesperanto_prototype | [
"65bc3035d3b2b61a2722c93b95bae310bfbd190e"
] | [
"pyclesperanto_prototype/_tier8/_affine_transform.py"
] | [
"from typing import Union\n\nfrom .._tier0 import plugin_function\nfrom .._tier0 import Image\nfrom .._tier0 import push\nfrom ._AffineTransform3D import AffineTransform3D\nfrom skimage.transform import AffineTransform\nimport numpy as np\n\n@plugin_function\ndef affine_transform(source : Image, destination : Image = None, transform : Union[np.ndarray, AffineTransform3D, AffineTransform] = None, linear_interpolation : bool = False):\n \"\"\"\n Applies an affine transform to an image.\n\n Parameters\n ----------\n source : Image\n image to be transformed\n destination : Image, optional\n image where the transformed image should be written to\n transform : 4x4 numpy array or AffineTransform3D object or skimage.transform.AffineTransform object\n transform matrix or object describing the transformation\n linear_interpolation: bool\n not implemented yet\n\n Returns\n -------\n destination\n\n \"\"\"\n import numpy as np\n from .._tier0 import empty_image_like\n from .._tier0 import execute\n from .._tier1 import copy\n from .._tier0 import create\n from .._tier1 import copy_slice\n\n # deal with 2D input images\n if len(source.shape) == 2:\n source_3d = create([1, source.shape[0], source.shape[1]])\n copy_slice(source, source_3d, 0)\n source = source_3d\n\n # deal with 2D output images\n original_destination = destination\n copy_back_after_transforming = False\n if len(destination.shape) == 2:\n destination = create([1, destination.shape[0], destination.shape[1]])\n copy_slice(original_destination, destination, 0)\n copy_back_after_transforming = True\n\n # we invert the transform because we go from the target image to the source image to read pixels\n if isinstance(transform, AffineTransform3D):\n transform_matrix = np.asarray(transform.copy().inverse())\n elif isinstance(transform, AffineTransform):\n matrix = np.asarray(transform.params)\n matrix = np.asarray([\n [matrix[0,0], matrix[0,1], 0, matrix[0,2]],\n [matrix[1,0], matrix[1,1], 0, matrix[1,2]],\n [0, 0, 1, 0],\n [matrix[2,0], matrix[2,1], 0, matrix[2,2]]\n ])\n transform_matrix = np.linalg.inv(matrix)\n else:\n transform_matrix = np.linalg.inv(transform)\n\n gpu_transform_matrix = push(transform_matrix)\n\n kernel_suffix = ''\n if linear_interpolation:\n image = empty_image_like(source)\n copy(source, image)\n source = image\n kernel_suffix = '_interpolate'\n\n\n parameters = {\n \"input\": source,\n \"output\": destination,\n \"mat\": gpu_transform_matrix\n }\n\n execute(__file__, '../clij-opencl-kernels/kernels/affine_transform_' + str(len(destination.shape)) + 'd' + kernel_suffix + '_x.cl',\n 'affine_transform_' + str(len(destination.shape)) + 'd' + kernel_suffix, destination.shape, parameters)\n\n # deal with 2D output images\n if copy_back_after_transforming:\n copy_slice(destination, original_destination, 0)\n\n return original_destination"
] | [
[
"numpy.linalg.inv",
"numpy.asarray"
]
] |
tourdeml/SAM | [
"08cb3cccb39157859a1c77ef1e1852120df4a790"
] | [
"sam/utils.py"
] | [
"from typing import Iterable, Callable\n\nimport torch\nfrom torch.optim import Optimizer\n\n\ndef compute_sam(group: dict, closure: Callable):\n grads = []\n params_with_grads = []\n\n rho = group['rho']\n # update internal_optim's learning rate\n\n for p in group['params']:\n if p.grad is not None:\n # without clone().detach(), p.grad will be zeroed by closure()\n grads.append(p.grad.clone().detach())\n params_with_grads.append(p)\n device = grads[0].device\n\n # compute \\hat{\\epsilon}=\\rho/\\norm{g}\\|g\\|\n grad_norm = torch.stack(\n [g.detach().norm(2).to(device) for g in grads]).norm(2)\n epsilon = grads # alias for readability\n torch._foreach_mul_(epsilon, rho / grad_norm)\n\n # virtual step toward \\epsilon\n torch._foreach_add_(params_with_grads, epsilon)\n # compute g=\\nabla_w L_B(w)|_{w+\\hat{\\epsilon}}\n closure()\n # virtual step back to the original point\n torch._foreach_sub_(params_with_grads, epsilon)\n"
] | [
[
"torch._foreach_mul_",
"torch._foreach_add_",
"torch._foreach_sub_"
]
] |
andrewor14/benchmarks | [
"cb2457bbda6138b3e0af95a6d50b7d476d52c410"
] | [
"scripts/tf_cnn_benchmarks/models/ssd_model.py"
] | [
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\n\"\"\"SSD300 Model Configuration.\n\nReferences:\n Wei Liu, Dragomir Anguelov, Dumitru Erhan, Christian Szegedy, Scott Reed,\n Cheng-Yang Fu, Alexander C. Berg\n SSD: Single Shot MultiBox Detector\n arXiv:1512.02325\n\nPorted from MLPerf reference implementation:\n https://github.com/mlperf/reference/tree/ssd/single_stage_detector/ssd\n\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport multiprocessing\nimport os\nimport re\nimport threading\nimport tensorflow as tf\n\nimport constants\nimport mlperf\nimport ssd_constants\nfrom cnn_util import log_fn\nfrom models import model as model_lib\nfrom models import resnet_model\n\nBACKBONE_MODEL_SCOPE_NAME = 'resnet34_backbone'\n\n\nclass SSD300Model(model_lib.CNNModel):\n \"\"\"Single Shot Multibox Detection (SSD) model for 300x300 image datasets.\"\"\"\n\n def __init__(self, label_num=ssd_constants.NUM_CLASSES, batch_size=32,\n learning_rate=1e-3, backbone='resnet34', params=None):\n super(SSD300Model, self).__init__('ssd300', 300, batch_size, learning_rate,\n params=params)\n # For COCO dataset, 80 categories + 1 background = 81 labels\n self.label_num = label_num\n\n # Currently only support ResNet-34 as backbone model\n if backbone != 'resnet34':\n raise ValueError('Invalid backbone model %s for SSD.' % backbone)\n mlperf.logger.log(key=mlperf.tags.BACKBONE, value=backbone)\n\n # Number of channels and default boxes associated with the following layers:\n # ResNet34 layer, Conv7, Conv8_2, Conv9_2, Conv10_2, Conv11_2\n self.out_chan = [256, 512, 512, 256, 256, 256]\n mlperf.logger.log(key=mlperf.tags.LOC_CONF_OUT_CHANNELS,\n value=self.out_chan)\n\n # Number of default boxes from layers of different scales\n # 38x38x4, 19x19x6, 10x10x6, 5x5x6, 3x3x4, 1x1x4\n self.num_dboxes = [4, 6, 6, 6, 4, 4]\n mlperf.logger.log(key=mlperf.tags.NUM_DEFAULTS_PER_CELL,\n value=self.num_dboxes)\n\n # TODO(haoyuzhang): in order to correctly restore in replicated mode, need\n # to create a saver for each tower before graph is finalized. Use variable\n # manager for better efficiency.\n self.backbone_savers = []\n\n # Collected predictions for eval stage. It maps each image id in eval\n # dataset to a dict containing the following information:\n # source_id: raw ID of image\n # raw_shape: raw shape of image\n # pred_box: encoded box coordinates of prediction\n # pred_scores: scores of classes in prediction\n self.predictions = {}\n\n # Global step when predictions are collected.\n self.eval_global_step = 0\n\n # Average precision. In asynchronous eval mode, this is the latest AP we\n # get so far and may not be the results at current eval step.\n self.eval_coco_ap = 0\n\n # Process, queues, and thread for asynchronous evaluation. When enabled,\n # create a separte process (async_eval_process) that continously pull\n # intermediate results from the predictions queue (a multiprocessing queue),\n # process them, and push final results into results queue (another\n # multiprocessing queue). The main thread is responsible to push message\n # into predictions queue, and start a separate thread to continuously pull\n # messages from results queue to update final results.\n # Message in predictions queue should be a tuple of two elements:\n # (evaluation step, predictions)\n # Message in results queue should be a tuple of two elements:\n # (evaluation step, final results)\n self.async_eval_process = None\n self.async_eval_predictions_queue = None\n self.async_eval_results_queue = None\n self.async_eval_results_getter_thread = None\n\n # The MLPerf reference uses a starting lr of 1e-3 at bs=32.\n self.base_lr_batch_size = 32\n\n def skip_final_affine_layer(self):\n return True\n\n def add_backbone_model(self, cnn):\n # --------------------------------------------------------------------------\n # Resnet-34 backbone model -- modified for SSD\n # --------------------------------------------------------------------------\n\n # Input 300x300, output 150x150\n cnn.conv(64, 7, 7, 2, 2, mode='SAME_RESNET', use_batch_norm=True)\n cnn.mpool(3, 3, 2, 2, mode='SAME')\n\n resnet34_layers = [3, 4, 6, 3]\n version = 'v1'\n\n # ResNet-34 block group 1\n # Input 150x150, output 75x75\n for i in range(resnet34_layers[0]):\n # Last argument forces residual_block to use projection shortcut, even\n # though the numbers of input and output channels are equal\n resnet_model.residual_block(cnn, 64, 1, version)\n\n # ResNet-34 block group 2\n # Input 75x75, output 38x38\n for i in range(resnet34_layers[1]):\n stride = 2 if i == 0 else 1\n resnet_model.residual_block(cnn, 128, stride, version, i == 0)\n\n # ResNet-34 block group 3\n # This block group is modified: first layer uses stride=1 so that the image\n # size does not change in group of layers\n # Input 38x38, output 38x38\n for i in range(resnet34_layers[2]):\n # The following line is intentionally commented out to differentiate from\n # the original ResNet-34 model\n # stride = 2 if i == 0 else 1\n resnet_model.residual_block(cnn, 256, stride, version, i == 0)\n\n # ResNet-34 block group 4: removed final block group\n # The following 3 lines are intentially commented out to differentiate from\n # the original ResNet-34 model\n # for i in range(resnet34_layers[3]):\n # stride = 2 if i == 0 else 1\n # resnet_model.residual_block(cnn, 512, stride, version, i == 0)\n\n def add_inference(self, cnn):\n cnn.use_batch_norm = True\n cnn.batch_norm_config = {'decay': ssd_constants.BATCH_NORM_DECAY,\n 'epsilon': ssd_constants.BATCH_NORM_EPSILON,\n 'scale': True}\n\n with tf.variable_scope(BACKBONE_MODEL_SCOPE_NAME):\n self.add_backbone_model(cnn)\n\n # --------------------------------------------------------------------------\n # SSD additional layers\n # --------------------------------------------------------------------------\n\n def add_ssd_layer(cnn, depth, k_size, stride, mode):\n return cnn.conv(depth, k_size, k_size, stride, stride,\n mode=mode, use_batch_norm=False,\n kernel_initializer=tf.contrib.layers.xavier_initializer())\n\n # Activations for feature maps of different layers\n self.activations = [cnn.top_layer]\n # Conv7_1, Conv7_2\n # Input 38x38, output 19x19\n add_ssd_layer(cnn, 256, 1, 1, 'valid')\n self.activations.append(add_ssd_layer(cnn, 512, 3, 2, 'same'))\n\n # Conv8_1, Conv8_2\n # Input 19x19, output 10x10\n add_ssd_layer(cnn, 256, 1, 1, 'valid')\n self.activations.append(add_ssd_layer(cnn, 512, 3, 2, 'same'))\n\n # Conv9_1, Conv9_2\n # Input 10x10, output 5x5\n add_ssd_layer(cnn, 128, 1, 1, 'valid')\n self.activations.append(add_ssd_layer(cnn, 256, 3, 2, 'same'))\n\n # Conv10_1, Conv10_2\n # Input 5x5, output 3x3\n add_ssd_layer(cnn, 128, 1, 1, 'valid')\n self.activations.append(add_ssd_layer(cnn, 256, 3, 1, 'valid'))\n\n # Conv11_1, Conv11_2\n # Input 3x3, output 1x1\n add_ssd_layer(cnn, 128, 1, 1, 'valid')\n self.activations.append(add_ssd_layer(cnn, 256, 3, 1, 'valid'))\n\n self.loc = []\n self.conf = []\n\n for nd, ac, oc in zip(self.num_dboxes, self.activations, self.out_chan):\n l = cnn.conv(nd * 4, 3, 3, 1, 1, input_layer=ac,\n num_channels_in=oc, activation=None, use_batch_norm=False,\n kernel_initializer=tf.contrib.layers.xavier_initializer())\n scale = l.get_shape()[-1]\n # shape = [batch_size, nd * 4, scale, scale]\n l = tf.reshape(l, [self.batch_size, nd, 4, scale, scale])\n # shape = [batch_size, nd, 4, scale, scale]\n l = tf.transpose(l, [0, 1, 3, 4, 2])\n # shape = [batch_size, nd, scale, scale, 4]\n self.loc.append(tf.reshape(l, [self.batch_size, -1, 4]))\n # shape = [batch_size, nd * scale * scale, 4]\n\n c = cnn.conv(nd * self.label_num, 3, 3, 1, 1, input_layer=ac,\n num_channels_in=oc, activation=None, use_batch_norm=False,\n kernel_initializer=tf.contrib.layers.xavier_initializer())\n # shape = [batch_size, nd * label_num, scale, scale]\n c = tf.reshape(c, [self.batch_size, nd, self.label_num, scale, scale])\n # shape = [batch_size, nd, label_num, scale, scale]\n c = tf.transpose(c, [0, 1, 3, 4, 2])\n # shape = [batch_size, nd, scale, scale, label_num]\n self.conf.append(tf.reshape(c, [self.batch_size, -1, self.label_num]))\n # shape = [batch_size, nd * scale * scale, label_num]\n\n # Shape of locs: [batch_size, NUM_SSD_BOXES, 4]\n # Shape of confs: [batch_size, NUM_SSD_BOXES, label_num]\n locs, confs = tf.concat(self.loc, 1), tf.concat(self.conf, 1)\n\n # Pack location and confidence outputs into a single output layer\n # Shape of logits: [batch_size, NUM_SSD_BOXES, 4+label_num]\n logits = tf.concat([locs, confs], 2)\n\n cnn.top_layer = logits\n cnn.top_size = 4 + self.label_num\n\n return cnn.top_layer\n\n def get_learning_rate(self, global_step, batch_size):\n rescaled_lr = self.get_scaled_base_learning_rate(batch_size)\n # Defined in MLPerf reference model\n boundaries = [160000, 200000]\n boundaries = [b * self.base_lr_batch_size // batch_size for b in boundaries]\n decays = [1, 0.1, 0.01]\n learning_rates = [rescaled_lr * d for d in decays]\n lr = tf.train.piecewise_constant(global_step, boundaries, learning_rates)\n warmup_steps = int(118287 / batch_size * 5)\n warmup_lr = (\n rescaled_lr * tf.cast(global_step, tf.float32) / tf.cast(\n warmup_steps, tf.float32))\n return tf.cond(global_step < warmup_steps, lambda: warmup_lr, lambda: lr)\n\n def get_scaled_base_learning_rate(self, batch_size):\n \"\"\"Calculates base learning rate for creating lr schedule.\n\n In replicated mode, gradients are summed rather than averaged which, with\n the sgd and momentum optimizers, increases the effective learning rate by\n lr * num_gpus. Dividing the base lr by num_gpus negates the increase.\n\n Args:\n batch_size: Total batch-size.\n\n Returns:\n Base learning rate to use to create lr schedule.\n \"\"\"\n base_lr = self.learning_rate\n if self.params.variable_update == 'replicated':\n base_lr = self.learning_rate / self.params.num_gpus\n scaled_lr = base_lr * (batch_size / self.base_lr_batch_size)\n return scaled_lr\n\n def _collect_backbone_vars(self):\n backbone_vars = tf.get_collection(\n tf.GraphKeys.GLOBAL_VARIABLES, scope='.*'+ BACKBONE_MODEL_SCOPE_NAME)\n var_list = {}\n\n # Assume variables in the checkpoint are following the naming convention of\n # a model checkpoint trained with TF official model\n # TODO(haoyuzhang): the following variable name parsing is hacky and easy\n # to break if there is change in naming convention of either benchmarks or\n # official models.\n for v in backbone_vars:\n # conv2d variable example (model <-- checkpoint):\n # v/cg/conv24/conv2d/kernel:0 <-- conv2d_24/kernel\n if 'conv2d' in v.name:\n re_match = re.search(r'conv(\\d+)/conv2d/(.+):', v.name)\n if re_match:\n layer_id = int(re_match.group(1))\n param_name = re_match.group(2)\n vname_in_ckpt = self._var_name_in_official_model_ckpt(\n 'conv2d', layer_id, param_name)\n var_list[vname_in_ckpt] = v\n\n # batchnorm varariable example:\n # v/cg/conv24/batchnorm25/gamma:0 <-- batch_normalization_25/gamma\n elif 'batchnorm' in v.name:\n re_match = re.search(r'batchnorm(\\d+)/(.+):', v.name)\n if re_match:\n layer_id = int(re_match.group(1))\n param_name = re_match.group(2)\n vname_in_ckpt = self._var_name_in_official_model_ckpt(\n 'batch_normalization', layer_id, param_name)\n var_list[vname_in_ckpt] = v\n\n return var_list\n\n def _var_name_in_official_model_ckpt(self, layer_name, layer_id, param_name):\n \"\"\"Return variable names according to convention in TF official models.\"\"\"\n vname_in_ckpt = layer_name\n if layer_id > 0:\n vname_in_ckpt += '_' + str(layer_id)\n vname_in_ckpt += '/' + param_name\n return vname_in_ckpt\n\n def loss_function(self, inputs, build_network_result):\n logits = build_network_result.logits\n\n # Unpack model output back to locations and confidence scores of predictions\n # Shape of pred_loc: [batch_size, NUM_SSD_BOXES, 4]\n # Shape of pred_label: [batch_size, NUM_SSD_BOXES, label_num]\n pred_loc, pred_label = tf.split(logits, [4, self.label_num], 2)\n\n # Shape of gt_loc: [batch_size, NUM_SSD_BOXES, 4]\n # Shape of gt_label: [batch_size, NUM_SSD_BOXES, 1]\n # Shape of num_gt: [batch_size]\n _, gt_loc, gt_label, num_gt = inputs\n gt_label = tf.cast(gt_label, tf.int32)\n\n box_loss = self._localization_loss(pred_loc, gt_loc, gt_label, num_gt)\n class_loss = self._classification_loss(pred_label, gt_label, num_gt)\n\n tf.summary.scalar('box_loss', tf.reduce_mean(box_loss))\n tf.summary.scalar('class_loss', tf.reduce_mean(class_loss))\n return class_loss + box_loss\n\n def _localization_loss(self, pred_loc, gt_loc, gt_label, num_matched_boxes):\n \"\"\"Computes the localization loss.\n\n Computes the localization loss using smooth l1 loss.\n Args:\n pred_loc: a flatten tensor that includes all predicted locations. The\n shape is [batch_size, num_anchors, 4].\n gt_loc: a tensor representing box regression targets in\n [batch_size, num_anchors, 4].\n gt_label: a tensor that represents the classification groundtruth targets.\n The shape is [batch_size, num_anchors, 1].\n num_matched_boxes: the number of anchors that are matched to a groundtruth\n targets, used as the loss normalizater. The shape is [batch_size].\n Returns:\n box_loss: a float32 representing total box regression loss.\n \"\"\"\n mask = tf.greater(tf.squeeze(gt_label), 0)\n float_mask = tf.cast(mask, tf.float32)\n\n smooth_l1 = tf.reduce_sum(tf.losses.huber_loss(\n gt_loc, pred_loc,\n reduction=tf.losses.Reduction.NONE\n ), axis=2)\n smooth_l1 = tf.multiply(smooth_l1, float_mask)\n box_loss = tf.reduce_sum(smooth_l1, axis=1)\n\n return tf.reduce_mean(box_loss / num_matched_boxes)\n\n def _classification_loss(self, pred_label, gt_label, num_matched_boxes):\n \"\"\"Computes the classification loss.\n\n Computes the classification loss with hard negative mining.\n Args:\n pred_label: a flatten tensor that includes all predicted class. The shape\n is [batch_size, num_anchors, num_classes].\n gt_label: a tensor that represents the classification groundtruth targets.\n The shape is [batch_size, num_anchors, 1].\n num_matched_boxes: the number of anchors that are matched to a groundtruth\n targets. This is used as the loss normalizater.\n Returns:\n box_loss: a float32 representing total box regression loss.\n \"\"\"\n cross_entropy = tf.losses.sparse_softmax_cross_entropy(\n gt_label, pred_label, reduction=tf.losses.Reduction.NONE)\n\n mask = tf.greater(tf.squeeze(gt_label), 0)\n float_mask = tf.cast(mask, tf.float32)\n\n # Hard example mining\n neg_masked_cross_entropy = cross_entropy * (1 - float_mask)\n relative_position = tf.contrib.framework.argsort(\n tf.contrib.framework.argsort(\n neg_masked_cross_entropy, direction='DESCENDING'))\n num_neg_boxes = tf.minimum(\n tf.to_int32(num_matched_boxes) * ssd_constants.NEGS_PER_POSITIVE,\n ssd_constants.NUM_SSD_BOXES)\n top_k_neg_mask = tf.cast(tf.less(\n relative_position,\n tf.tile(num_neg_boxes[:, tf.newaxis], (1, ssd_constants.NUM_SSD_BOXES))\n ), tf.float32)\n\n class_loss = tf.reduce_sum(\n tf.multiply(cross_entropy, float_mask + top_k_neg_mask), axis=1)\n\n return tf.reduce_mean(class_loss / num_matched_boxes)\n\n def add_backbone_saver(self):\n # Create saver with mapping from variable names in checkpoint of backbone\n # model to variables in SSD model\n backbone_var_list = self._collect_backbone_vars()\n self.backbone_savers.append(tf.train.Saver(backbone_var_list))\n\n def load_backbone_model(self, sess, backbone_model_path):\n for saver in self.backbone_savers:\n saver.restore(sess, backbone_model_path)\n\n def get_input_data_types(self, subset):\n if subset == 'validation':\n return [self.data_type, tf.float32, tf.float32, tf.float32, tf.int32]\n return [self.data_type, tf.float32, tf.float32, tf.float32]\n\n def get_input_shapes(self, subset):\n \"\"\"Return encoded tensor shapes for train and eval data respectively.\"\"\"\n if subset == 'validation':\n # Validation data shapes:\n # 1. images\n # 2. ground truth locations of boxes\n # 3. ground truth classes of objects in boxes\n # 4. source image IDs\n # 5. raw image shapes\n return [\n [self.batch_size, self.image_size, self.image_size, self.depth],\n [self.batch_size, ssd_constants.MAX_NUM_EVAL_BOXES, 4],\n [self.batch_size, ssd_constants.MAX_NUM_EVAL_BOXES, 1],\n [self.batch_size],\n [self.batch_size, 3],\n ]\n\n # Training data shapes:\n # 1. images\n # 2. ground truth locations of boxes\n # 3. ground truth classes of objects in boxes\n # 4. numbers of objects in images\n return [\n [self.batch_size, self.image_size, self.image_size, self.depth],\n [self.batch_size, ssd_constants.NUM_SSD_BOXES, 4],\n [self.batch_size, ssd_constants.NUM_SSD_BOXES, 1],\n [self.batch_size]\n ]\n\n def accuracy_function(self, inputs, logits):\n \"\"\"Returns the ops to measure the mean precision of the model.\"\"\"\n try:\n import ssd_dataloader # pylint: disable=g-import-not-at-top\n from object_detection.box_coders import faster_rcnn_box_coder # pylint: disable=g-import-not-at-top\n from object_detection.core import box_coder # pylint: disable=g-import-not-at-top\n from object_detection.core import box_list # pylint: disable=g-import-not-at-top\n except ImportError:\n raise ImportError('To use the COCO dataset, you must clone the '\n 'repo https://github.com/tensorflow/models and add '\n 'tensorflow/models and tensorflow/models/research to '\n 'the PYTHONPATH, and compile the protobufs by '\n 'following https://github.com/tensorflow/models/blob/'\n 'master/research/object_detection/g3doc/installation.md'\n '#protobuf-compilation ; To evaluate using COCO'\n 'metric, download and install Python COCO API from'\n 'https://github.com/cocodataset/cocoapi')\n\n # Unpack model output back to locations and confidence scores of predictions\n # pred_locs: relative locations (coordiates) of objects in all SSD boxes\n # shape: [batch_size, NUM_SSD_BOXES, 4]\n # pred_labels: confidence scores of objects being of all categories\n # shape: [batch_size, NUM_SSD_BOXES, label_num]\n pred_locs, pred_labels = tf.split(logits, [4, self.label_num], 2)\n\n ssd_box_coder = faster_rcnn_box_coder.FasterRcnnBoxCoder(\n scale_factors=ssd_constants.BOX_CODER_SCALES)\n anchors = box_list.BoxList(\n tf.convert_to_tensor(ssd_dataloader.DefaultBoxes()('ltrb')))\n pred_boxes = box_coder.batch_decode(\n encoded_boxes=pred_locs, box_coder=ssd_box_coder, anchors=anchors)\n\n pred_scores = tf.nn.softmax(pred_labels, axis=2)\n\n # TODO(haoyuzhang): maybe use `gt_boxes` and `gt_classes` for visualization.\n _, gt_boxes, gt_classes, source_id, raw_shape = inputs # pylint: disable=unused-variable\n\n return {\n (constants.UNREDUCED_ACCURACY_OP_PREFIX +\n ssd_constants.PRED_BOXES): pred_boxes,\n (constants.UNREDUCED_ACCURACY_OP_PREFIX +\n ssd_constants.PRED_SCORES): pred_scores,\n # TODO(haoyuzhang): maybe use these values for visualization.\n # constants.UNREDUCED_ACCURACY_OP_PREFIX+'gt_boxes': gt_boxes,\n # constants.UNREDUCED_ACCURACY_OP_PREFIX+'gt_classes': gt_classes,\n (constants.UNREDUCED_ACCURACY_OP_PREFIX +\n ssd_constants.SOURCE_ID): source_id,\n (constants.UNREDUCED_ACCURACY_OP_PREFIX +\n ssd_constants.RAW_SHAPE): raw_shape\n }\n\n def postprocess(self, results):\n \"\"\"Postprocess results returned from model.\"\"\"\n try:\n import coco_metric # pylint: disable=g-import-not-at-top\n except ImportError:\n raise ImportError('To use the COCO dataset, you must clone the '\n 'repo https://github.com/tensorflow/models and add '\n 'tensorflow/models and tensorflow/models/research to '\n 'the PYTHONPATH, and compile the protobufs by '\n 'following https://github.com/tensorflow/models/blob/'\n 'master/research/object_detection/g3doc/installation.md'\n '#protobuf-compilation ; To evaluate using COCO'\n 'metric, download and install Python COCO API from'\n 'https://github.com/cocodataset/cocoapi')\n\n pred_boxes = results[ssd_constants.PRED_BOXES]\n pred_scores = results[ssd_constants.PRED_SCORES]\n # TODO(haoyuzhang): maybe use these values for visualization.\n # gt_boxes = results['gt_boxes']\n # gt_classes = results['gt_classes']\n source_id = results[ssd_constants.SOURCE_ID]\n raw_shape = results[ssd_constants.RAW_SHAPE]\n\n # COCO evaluation requires processing COCO_NUM_VAL_IMAGES exactly once. Due\n # to rounding errors (i.e., COCO_NUM_VAL_IMAGES % batch_size != 0), setting\n # `num_eval_epochs` to 1 is not enough and will often miss some images. We\n # expect user to set `num_eval_epochs` to >1, which will leave some unused\n # images from previous steps in `predictions`. Here we check if we are doing\n # eval at a new global step.\n if results['global_step'] > self.eval_global_step:\n self.eval_global_step = results['global_step']\n self.predictions.clear()\n\n for i, sid in enumerate(source_id):\n self.predictions[int(sid)] = {\n ssd_constants.PRED_BOXES: pred_boxes[i],\n ssd_constants.PRED_SCORES: pred_scores[i],\n ssd_constants.SOURCE_ID: source_id[i],\n ssd_constants.RAW_SHAPE: raw_shape[i]\n }\n\n # COCO metric calculates mAP only after a full epoch of evaluation. Return\n # dummy results for top_N_accuracy to be compatible with benchmar_cnn.py.\n if len(self.predictions) >= ssd_constants.COCO_NUM_VAL_IMAGES:\n log_fn('Got results for all {:d} eval examples. Calculate mAP...'.format(\n ssd_constants.COCO_NUM_VAL_IMAGES))\n\n annotation_file = os.path.join(self.params.data_dir,\n ssd_constants.ANNOTATION_FILE)\n # Size of predictions before decoding about 15--30GB, while size after\n # decoding is 100--200MB. When using async eval mode, decoding takes\n # 20--30 seconds of main thread time but is necessary to avoid OOM during\n # inter-process communication.\n decoded_preds = coco_metric.decode_predictions(self.predictions.values())\n self.predictions.clear()\n\n if self.params.collect_eval_results_async:\n def _eval_results_getter():\n \"\"\"Iteratively get eval results from async eval process.\"\"\"\n while True:\n step, eval_results = self.async_eval_results_queue.get()\n self.eval_coco_ap = eval_results['COCO/AP']\n mlperf.logger.log_eval_accuracy(\n self.eval_coco_ap, step, self.batch_size * self.params.num_gpus)\n if self.reached_target():\n # Reached target, clear all pending messages in predictions queue\n # and insert poison pill to stop the async eval process.\n while not self.async_eval_predictions_queue.empty():\n self.async_eval_predictions_queue.get()\n self.async_eval_predictions_queue.put('STOP')\n break\n\n if not self.async_eval_process:\n # Limiting the number of messages in predictions queue to prevent OOM.\n # Each message (predictions data) can potentially consume a lot of\n # memory, and normally there should only be few messages in the queue.\n # If often blocked on this, consider reducing eval frequency.\n self.async_eval_predictions_queue = multiprocessing.Queue(2)\n self.async_eval_results_queue = multiprocessing.Queue()\n\n # Reason to use a Process as opposed to Thread is mainly the\n # computationally intensive eval runner. Python multithreading is not\n # truly running in parallel, a runner thread would get significantly\n # delayed (or alternatively delay the main thread).\n self.async_eval_process = multiprocessing.Process(\n target=coco_metric.async_eval_runner,\n args=(self.async_eval_predictions_queue,\n self.async_eval_results_queue,\n annotation_file))\n self.async_eval_process.daemon = True\n self.async_eval_process.start()\n\n self.async_eval_results_getter_thread = threading.Thread(\n target=_eval_results_getter, args=())\n self.async_eval_results_getter_thread.daemon = True\n self.async_eval_results_getter_thread.start()\n\n self.async_eval_predictions_queue.put(\n (self.eval_global_step, decoded_preds))\n return {'top_1_accuracy': 0, 'top_5_accuracy': 0.}\n\n eval_results = coco_metric.compute_map(decoded_preds, annotation_file)\n self.eval_coco_ap = eval_results['COCO/AP']\n ret = {'top_1_accuracy': self.eval_coco_ap, 'top_5_accuracy': 0.}\n for metric_key, metric_value in eval_results.items():\n ret[constants.SIMPLE_VALUE_RESULT_PREFIX + metric_key] = metric_value\n mlperf.logger.log_eval_accuracy(self.eval_coco_ap, self.eval_global_step,\n self.batch_size * self.params.num_gpus)\n return ret\n log_fn('Got {:d} out of {:d} eval examples.'\n ' Waiting for the remaining to calculate mAP...'.format(\n len(self.predictions), ssd_constants.COCO_NUM_VAL_IMAGES))\n return {'top_1_accuracy': self.eval_coco_ap, 'top_5_accuracy': 0.}\n\n def get_synthetic_inputs(self, input_name, nclass):\n \"\"\"Generating synthetic data matching real data shape and type.\"\"\"\n inputs = tf.random_uniform(\n self.get_input_shapes('train')[0], dtype=self.data_type)\n inputs = tf.contrib.framework.local_variable(inputs, name=input_name)\n boxes = tf.random_uniform(\n [self.batch_size, ssd_constants.NUM_SSD_BOXES, 4], dtype=tf.float32)\n classes = tf.random_uniform(\n [self.batch_size, ssd_constants.NUM_SSD_BOXES, 1], dtype=tf.float32)\n nboxes = tf.random_uniform(\n [self.batch_size], minval=1, maxval=10, dtype=tf.float32)\n return (inputs, boxes, classes, nboxes)\n\n def reached_target(self):\n return (self.params.stop_at_top_1_accuracy and\n self.eval_coco_ap >= self.params.stop_at_top_1_accuracy)\n"
] | [
[
"tensorflow.reshape",
"tensorflow.variable_scope",
"tensorflow.squeeze",
"tensorflow.concat",
"tensorflow.nn.softmax",
"tensorflow.reduce_sum",
"tensorflow.split",
"tensorflow.contrib.framework.argsort",
"tensorflow.multiply",
"tensorflow.losses.sparse_softmax_cross_entropy",
"tensorflow.cond",
"tensorflow.transpose",
"tensorflow.get_collection",
"tensorflow.losses.huber_loss",
"tensorflow.random_uniform",
"tensorflow.cast",
"tensorflow.train.Saver",
"tensorflow.tile",
"tensorflow.contrib.framework.local_variable",
"tensorflow.contrib.layers.xavier_initializer",
"tensorflow.train.piecewise_constant",
"tensorflow.reduce_mean",
"tensorflow.to_int32"
]
] |
Nagasaki45/deep_disfluency | [
"4c57a194433af9601ebef0e4c9a451cce4c06252"
] | [
"deep_disfluency/rnn/elman.py"
] | [
"import theano\nimport numpy as np\nimport os\n\nfrom theano import tensor as T\nfrom collections import OrderedDict\n\n# nb might be theano.config.floatX\ndtype = T.config.floatX # @UndefinedVariable\n\n\nclass Elman(object):\n\n def __init__(self, ne, de, na, nh, n_out, cs, npos,\n update_embeddings=True):\n '''\n ne :: number of word embeddings in the vocabulary\n de :: dimension of the word embeddings\n na :: number of acoustic or language model features at each word step\n (acoustic context size in frames * number of features)\n nh :: dimension of the hidden layer\n n_out :: number of classes\n cs :: word window context size\n npos :: number of pos tags\n '''\n # parameters of the model\n self.emb = theano.shared(0.2 * np.random.uniform(-1.0, 1.0,\n (ne + 1, de)).\n astype(dtype)) # add one for PADDING\n if na == 0:\n # NB original one, now Wx becomes much bigger with acoustic data\n self.Wx = theano.shared(0.2 * np.random.uniform(-1.0, 1.0,\n ((de * cs) +\n (npos * cs),\n nh))\n .astype(dtype))\n else:\n self.Wx = theano.shared(0.2 * np.random.uniform(-1.0, 1.0,\n ((de * cs) +\n (npos * cs) +\n na, nh))\n .astype(dtype))\n self.Wh = theano.shared(0.2 * np.random.uniform(-1.0, 1.0,\n (nh, nh))\n .astype(dtype))\n self.W = theano.shared(0.2 * np.random.uniform(-1.0, 1.0,\n (nh, n_out))\n .astype(dtype))\n self.bh = theano.shared(np.zeros(nh, dtype=dtype))\n self.b = theano.shared(np.zeros(n_out, dtype=dtype))\n self.h0 = theano.shared(np.zeros(nh, dtype=dtype))\n # Use the eye function (diagonal 1s) for the POS, small in memory\n self.pos = T.eye(npos, npos, 0)\n self.n_acoust = na # the number of acoustic features\n\n # Weights for L1 and L2\n self.L1_reg = 0.0\n self.L2_reg = 0.00001\n\n # without embeddings updates\n self.params = [self.Wx, self.Wh, self.W, self.bh, self.b, self.h0]\n self.names = ['Wx', 'Wh', 'W', 'bh', 'b', 'h0']\n if update_embeddings:\n self.params = [self.emb, self.Wx, self.Wh, self.W, self.bh,\n self.b, self.h0]\n self.names = ['embeddings', 'Wx', 'Wh', 'W', 'bh', 'b', 'h0']\n\n # as many columns as context window size/lines as words in the sentence\n self.idxs = T.imatrix()\n self.pos_idxs = T.imatrix()\n\n # simply a matrix: number of features * length sentence\n self.extra_features = T.matrix()\n\n # TODO Old version no pos\n # x = self.emb[self.idxs].reshape((self.idxs.shape[0], de*cs))\n\n if na == 0:\n # POS version, not just the embeddings\n # but with the POS window concatenated\n x = T.concatenate((self.emb[self.idxs].reshape((self.idxs.shape[0],\n de*cs)),\n self.pos[self.pos_idxs].reshape(\n (self.pos_idxs.shape[0],\n npos*cs))), 1)\n else:\n # TODO new version with extra features\n x = T.concatenate((self.emb[self.idxs].reshape((self.idxs.shape[0],\n de*cs)),\n self.pos[self.pos_idxs].reshape(\n (self.pos_idxs.shape[0],\n npos*cs)),\n self.extra_features), 1)\n self.y = T.iscalar('y') # label\n # TODO for sentences\n # self.y = T.ivector('y') #labels for whole sentence\n\n def recurrence(x_t, h_tm1):\n h_t = T.nnet.sigmoid(T.dot(x_t, self.Wx) + T.dot(h_tm1, self.Wh) +\n self.bh)\n s_t = T.nnet.softmax(T.dot(h_t, self.W) + self.b)\n return [h_t, s_t]\n\n [h, s], _ = theano.scan(fn=recurrence,\n sequences=x, outputs_info=[self.h0, None],\n n_steps=x.shape[0])\n\n p_y_given_x_lastword = s[-1, 0, :]\n p_y_given_x_sentence = s[:, 0, :]\n p_y_given_x_sentence_hidden = (h, s[:, 0, :])\n y_pred = T.argmax(p_y_given_x_sentence, axis=1)\n\n # TODO adding this- zero one loss for the last word\n # y_pred_word = T.argmax(p_y_given_x_lastword)\n\n # learning rate not hard coded as could decay\n self.lr = T.scalar('lr')\n\n # Cost: standard nll loss\n self.nll = -T.mean(T.log(p_y_given_x_lastword)[self.y])\n self.sentence_nll = -T.mean(T.log(p_y_given_x_sentence)\n [T.arange(x.shape[0]), self.y])\n\n if na == 0:\n self.classify = theano.function(inputs=[self.idxs, self.pos_idxs],\n outputs=y_pred)\n else:\n self.classify = theano.function(inputs=[self.idxs, self.pos_idxs,\n self.extra_features],\n outputs=y_pred)\n\n # regularisation terms\n # L1 norm ; one regularization option is to enforce L1 norm to\n # be small\n # if not using this set this to 0 to avoid unecessary computation\n self.L1 = 0\n # self.L1 = abs(self.Wh.sum()) + abs(self.Wx.sum()) + \\\n # abs(self.W.sum()) + abs(self.emb.sum())\\\n # + abs(self.bh.sum()) + abs(self.b.sum()) + abs(self.h0.sum())\n\n # square of L2 norm ; one regularization option is to enforce\n # square of L2 norm to be small\n self.L2_sqr = (self.Wh ** 2).sum() + (self.Wx ** 2).sum() +\\\n (self.W ** 2).sum() + (self.emb ** 2).sum() +\\\n (self.bh ** 2).sum() + (self.b ** 2).sum() +\\\n (self.h0 ** 2).sum()\n\n self.cost = self.nll \\\n + self.L1_reg * self.L1 \\\n + self.L2_reg * self.L2_sqr\n gradients = T.grad(self.cost, self.params)\n\n self.updates = OrderedDict((p, p-self.lr*g)\n for p, g in zip(self.params, gradients))\n\n # costs for multiple labels (one for each in the input)\n self.sentence_cost = self.sentence_nll \\\n + self.L1_reg * self.L1 \\\n + self.L2_reg * self.L2_sqr\n sentence_gradients = T.grad(self.sentence_cost, self.params)\n\n self.sentence_updates = OrderedDict((p, p - self.lr*g)\n for p, g in\n zip(self.params,\n sentence_gradients))\n\n if na == 0:\n self.soft_max = theano.function(inputs=[self.idxs, self.pos_idxs],\n outputs=p_y_given_x_sentence)\n self.soft_max_return_hidden_layer = theano.function(\n inputs=[self.idxs, self.pos_idxs],\n outputs=p_y_given_x_sentence_hidden)\n else:\n self.soft_max = theano.function(inputs=[self.idxs, self.pos_idxs,\n self.extra_features],\n outputs=p_y_given_x_sentence)\n self.soft_max_return_hidden_layer = theano.function(\n inputs=[self.idxs, self.pos_idxs,\n self.extra_features],\n outputs=p_y_given_x_sentence_hidden)\n\n if na == 0:\n self.train = theano.function(inputs=[self.idxs, self.pos_idxs,\n self.y,\n self.lr],\n outputs=self.nll,\n updates=self.updates)\n else:\n self.train = theano.function(inputs=[self.idxs, self.pos_idxs,\n self.extra_features,\n self.y,\n self.lr],\n outputs=self.nll,\n updates=self.updates)\n\n self.normalize = theano.function(\n inputs=[],\n updates={self.emb:\n self.emb /\n T.sqrt((self.emb**2).sum(axis=1))\n .dimshuffle(0, 'x')}\n )\n\n def classify_by_index(self, word_idx, indices, pos_idx=None,\n extra_features=None):\n \"\"\"Classification method which assumes the dialogue matrix is\n in the right format.\n\n :param word_idx: window size * dialogue length matrix\n :param labels: vector dialogue length long\n :param indices: 2 * dialogue length matrix for start, stop indices\n :param pos_idx: pos window size * dialogue length matrix\n :param extra_features: number of features * dialogue length matrix\n \"\"\"\n output = []\n for start, stop in indices:\n\n if extra_features:\n\n output.extend(self.classify(word_idx[start:stop+1, :],\n pos_idx[start:stop+1, :],\n np.asarray(\n extra_features[start:stop+1, :],\n dtype='float32')\n )\n )\n else:\n output.extend(self.classify(word_idx[start:stop+1, :],\n pos_idx[start:stop+1, :]\n )\n )\n return output\n\n def fit(self, word_idx, labels, lr, indices, pos_idx=None,\n extra_features=None):\n \"\"\"Fit method which assumes the dialogue matrix is in the right\n format.\n\n :param word_idx: window size * dialogue length matrix\n :param labels: vector dialogue length long\n :param indices: 2 * dialogue length matrix for start, stop indices\n :param pos_idx: pos window size * dialogue length matrix\n :param extra_features: number of features * dialogue length matrix\n \"\"\"\n loss = 0\n test = 0\n testing = False\n for start, stop in indices:\n # print start, stop\n if testing:\n test += 1\n if test > 50:\n break\n if extra_features:\n\n x = self.train(word_idx[start:stop+1, :],\n pos_idx[start:stop+1, :],\n np.asarray(extra_features[start:stop+1, :],\n dtype='float32'),\n labels[stop],\n lr)\n else:\n x = self.train(word_idx[start:stop+1, :],\n pos_idx[start:stop+1, :],\n labels[stop],\n lr)\n loss += x\n self.normalize()\n return loss\n\n def shared_dataset(self, mycorpus, borrow=True, data_type='int32'):\n \"\"\" Load the dataset into shared variables \"\"\"\n return theano.shared(np.asarray(mycorpus, dtype=data_type),\n borrow=True)\n\n def load_weights_from_folder(self, folder):\n for name, param in zip(self.names, self.params):\n param.set_value(np.load(os.path.join(folder, name + \".npy\")))\n\n def load(self, folder):\n emb = np.load(os.path.join(folder, 'embeddings.npy'))\n Wx = np.load(os.path.join(folder, 'Wx.npy'))\n Wh = np.load(os.path.join(folder, 'Wh.npy'))\n W = np.load(os.path.join(folder, 'W.npy'))\n bh = np.load(os.path.join(folder, 'bh.npy'))\n b = np.load(os.path.join(folder, 'b.npy'))\n h0 = np.load(os.path.join(folder, 'h0.npy'))\n return emb, Wx, Wh, W, bh, b, h0\n\n def load_weights(self, emb=None, Wx=None, Wh=None, W=None, bh=None, b=None,\n h0=None):\n if emb is not None:\n self.emb.set_value(emb)\n if Wx is not None:\n self.Wx.set_value(Wx)\n if Wh is not None:\n self.Wh.set_value(Wh)\n if W is not None:\n self.W.set_value(W)\n if bh is not None:\n self.bh.set_value(bh)\n if b is not None:\n self.b.set_value(b)\n if h0 is not None:\n self.h0.set_value(h0)\n\n def save(self, folder):\n for param, name in zip(self.params, self.names):\n np.save(os.path.join(folder, name + '.npy'), param.get_value())\n"
] | [
[
"numpy.random.uniform",
"numpy.asarray",
"numpy.zeros"
]
] |
mrakitin/xrt | [
"a2d09296860386ed3a83cea45ab43e7959e58f33"
] | [
"xrt/runner.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nModule :mod:`runner` defines the entry point of xrt - :func:`run_ray_tracing`,\ncontainers for job properties and functions for running the processes or\nthreads and accumulating the resulting histograms.\n\"\"\"\n__author__ = \"Konstantin Klementiev, Roman Chernikov\"\n__date__ = \"26 Mar 2016\"\n\nimport os\nimport sys\nimport time\nimport inspect\nimport pickle\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport multiprocessing\nimport errno\nimport threading\nif sys.version_info < (3, 1):\n import Queue\nelse:\n import queue\n Queue = queue\nimport uuid # is needed on some platforms with pyopencl # analysis:ignore\n\nfrom . import multipro\nfrom .backends import raycing\n\n# _DEBUG = True\n__fdir__ = os.path.abspath(os.path.dirname(__file__))\nrunCardVals = None\nrunCardProcs = None\n_plots = []\n\n\ndef retry_on_eintr(function, *args, **kw):\n \"\"\"\n Suggested in:\n http://mail.python.org/pipermail/python-list/2011-February/1266462.html\n as a solution for `IOError: [Errno 4] Interrupted system call` in Linux.\n \"\"\"\n while True:\n try:\n return function(*args, **kw)\n except IOError as e:\n if e.errno == errno.EINTR:\n continue\n else:\n raise\n\n\nclass RunCardVals(object):\n \"\"\"\n Serves as a global container for a sub-set of run properties passed by the\n user to :func:`run_ray_tracing`. The sub-set is limited to pickleable\n objects for passing it to job processes or threads.\n \"\"\"\n def __init__(self, threads, processes, repeats, updateEvery, pickleEvery,\n backend, globalNorm, runfile):\n if threads >= processes:\n self.Event = threading.Event\n self.Queue = Queue.Queue\n else:\n self.Event = multiprocessing.Event\n self.Queue = multiprocessing.Queue\n\n self.stop_event = self.Event()\n self.finished_event = self.Event()\n self.stop_event.clear()\n self.finished_event.clear()\n\n self.threads = threads\n self.processes = processes\n self.repeats = repeats\n self.updateEvery = updateEvery\n self.pickleEvery = pickleEvery\n self.backend = backend\n self.globalNorm = globalNorm\n self.runfile = runfile\n self.passNo = 0\n self.savedResults = []\n self.iteration = 0\n self.lastRunsPickleName = os.path.join(__fdir__, 'lastRuns.pickle')\n self.lastRuns = []\n try:\n with open(self.lastRunsPickleName, 'rb') as f:\n self.lastRuns = pickle.load(f)\n except: # analysis:ignore\n pass\n if self.lastRuns:\n print(\"The last {0} run{1}\".format(len(self.lastRuns),\n 's' if len(self.lastRuns) > 1 else ''))\n for lastRun in self.lastRuns:\n if len(lastRun) > 3:\n print(\"{0}::\".format(lastRun[3]))\n st0 = time.strftime(\"%a, %d %b %Y %H:%M:%S\", lastRun[0])\n if (time.strftime(\"%a, %d %b %Y\", lastRun[0]) ==\n time.strftime(\"%a, %d %b %Y\", lastRun[1])):\n st1 = time.strftime(\"%H:%M:%S\", lastRun[1])\n else:\n st1 = time.strftime(\"%a, %d %b %Y %H:%M:%S\", lastRun[1])\n print(\"start: {0}; stop: {1}; duration: {2:.1f} s\".format(\n st0, st1, lastRun[2]))\n\n\nclass RunCardProcs(object):\n \"\"\"\n Serves as a global container for a sub-set of run properties passed by the\n user to :func:`run_ray_tracing` limited to functions. These cannot be\n passed to job processes or threads (because are not pickleable) and have to\n be executed by the job server (this module).\n \"\"\"\n def __init__(self, afterScript, afterScriptArgs, afterScriptKWargs):\n self.afterScript = afterScript\n self.afterScriptArgs = afterScriptArgs\n self.afterScriptKWargs = afterScriptKWargs\n self.generatorNorm = None\n self.generatorPlot = None\n\n\ndef set_repeats(repeats=0):\n if runCardVals is not None:\n runCardVals.repeats = repeats\n\n\ndef _simple_generator():\n \"\"\"\n The simplest generator for running only one ray-tracing study. Search\n examples for generators that run complex ray-tracing studies.\n \"\"\"\n yield\n\n\ndef start_jobs():\n \"\"\"\n Restores the plots if requested and if the persistent files exist and\n starts the qt timer of the 1st plot.\n \"\"\"\n for plot in _plots:\n if plot.persistentName:\n plot.restore_plots()\n try:\n plot.fig.canvas.manager.set_window_title(plot.title)\n except AttributeError:\n pass\n\n runCardVals.iteration = np.long(0)\n noTimer = len(_plots) == 0 or\\\n (plt.get_backend().lower() in (x.lower() for x in\n mpl.rcsetup.non_interactive_bk))\n if noTimer:\n print(\"The job is running... \")\n while True:\n sys.stdout.flush()\n res = dispatch_jobs()\n tFromStart = time.time() - runCardVals.tstart\n msg = '{0} of {1} in {2:.1f} s'.format(\n runCardVals.iteration, runCardVals.repeats, tFromStart)\n if os.name == 'posix':\n sys.stdout.write(\"\\r\\x1b[K \" + msg)\n else:\n sys.stdout.write(\"\\r \")\n print(msg+' ')\n if res:\n return\n else:\n plot = _plots[0]\n plot.areProcessAlreadyRunning = False\n plot.timer = plot.fig.canvas.new_timer()\n plot.timer.add_callback(plot.timer_callback)\n plot.timer.start()\n\n\ndef dispatch_jobs():\n \"\"\"Runs the jobs in separate processes or threads and collects the resulted\n histograms from the output queues. One cannot run this function in a loop\n because the redrawing will not work. Instead, it is started from a timer\n event handler of a qt-graph.\"\"\"\n if (runCardVals.iteration >= runCardVals.repeats) or \\\n runCardVals.stop_event.is_set():\n on_finish()\n return True\n one_iteration()\n if (runCardVals.iteration >= runCardVals.repeats) or \\\n runCardVals.stop_event.is_set():\n on_finish()\n return True\n if runCardVals.iteration % runCardVals.updateEvery == 0:\n for plot in _plots:\n plot.plot_plots()\n if runCardVals.pickleEvery:\n if runCardVals.iteration % runCardVals.pickleEvery == 0:\n for plot in _plots:\n plot.store_plots()\n if len(_plots) > 0:\n _plots[0].areProcessAlreadyRunning = False\n\n\ndef one_iteration():\n \"\"\"The body of :func:`dispatch_jobs`.\"\"\"\n plots2Pickle = [plot.card_copy() for plot in _plots]\n outPlotQueues = [runCardVals.Queue() for plot in _plots]\n alarmQueue = runCardVals.Queue()\n\n# in the 1st iteration the plots may require some of x, y, e limits to be\n# calculated and thus this case is special:\n cpus = max(runCardVals.threads, runCardVals.processes)\n\n if runCardVals.iteration == 0:\n runCardVals.uniqueFirstRun = False\n if hasattr(runCardVals, 'beamLine'):\n bl = runCardVals.beamLine\n bl.forceAlign = False\n for oe in bl.oes + bl.slits + bl.screens:\n if raycing.is_auto_align_required(oe):\n bl.forceAlign = True\n runCardVals.uniqueFirstRun = True\n break\n\n if not runCardVals.uniqueFirstRun:\n for plot in _plots:\n xLimitsDefined = (plot.xaxis.limits is not None) and\\\n (not isinstance(plot.xaxis.limits, str))\n yLimitsDefined = (plot.yaxis.limits is not None) and\\\n (not isinstance(plot.yaxis.limits, str))\n cLimitsDefined = (plot.caxis.limits is not None) and\\\n (not isinstance(plot.caxis.limits, str)) or plot.ePos == 0\n if not (xLimitsDefined and yLimitsDefined and cLimitsDefined):\n runCardVals.uniqueFirstRun = True\n break\n\n if runCardVals.uniqueFirstRun:\n cpus = 1\n\n elif runCardVals.iteration == 1:\n if runCardVals.uniqueFirstRun: # balances the 1st iteration\n cpus -= 1\n\n if cpus < 1:\n cpus = 1\n\n if runCardVals.backend.startswith('raycing'):\n runCardVals.beamLine.alarms = []\n\n if runCardVals.threads >= runCardVals.processes or cpus == 1:\n BackendOrProcess = multipro.BackendThread\n else:\n BackendOrProcess = multipro.BackendProcess\n processes = [BackendOrProcess(runCardVals, plots2Pickle, outPlotQueues,\n alarmQueue, icpu) for icpu in range(cpus)]\n# print('top process:', os.getpid())\n for pid, p in enumerate(processes):\n p.ppid = pid + runCardVals.iteration\n p.start()\n\n for p in processes:\n if runCardVals.backend.startswith('raycing'):\n runCardVals.beamLine.alarms = retry_on_eintr(alarmQueue.get)\n for alarm in runCardVals.beamLine.alarms:\n print(alarm)\n outList = [0, ]\n for plot, aqueue in zip(_plots, outPlotQueues):\n outList = retry_on_eintr(aqueue.get)\n\n if len(outList) == 0:\n continue\n if (runCardVals.iteration >= runCardVals.repeats) or \\\n runCardVals.stop_event.is_set():\n continue\n\n plot.nRaysAll += outList[13]\n if runCardVals.backend.startswith('shadow'):\n plot.nRaysNeeded += outList[14]\n elif runCardVals.backend.startswith('raycing'):\n nRaysVarious = outList[14]\n plot.nRaysAlive += nRaysVarious[0]\n plot.nRaysGood += nRaysVarious[1]\n plot.nRaysOut += nRaysVarious[2]\n plot.nRaysOver += nRaysVarious[3]\n plot.nRaysDead += nRaysVarious[4]\n plot.nRaysAccepted += nRaysVarious[5]\n plot.nRaysAcceptedE += nRaysVarious[6]\n plot.nRaysSeeded += nRaysVarious[7]\n plot.nRaysSeededI += nRaysVarious[8]\n plot.displayAsAbsorbedPower = outList[15]\n\n for iaxis, axis in enumerate(\n [plot.xaxis, plot.yaxis, plot.caxis]):\n if (iaxis == 2) and (not plot.ePos):\n continue\n axis.total1D += outList[0+iaxis*3]\n axis.total1D_RGB += outList[1+iaxis*3]\n if runCardVals.iteration == 0:\n axis.binEdges = outList[2+iaxis*3]\n plot.total2D += outList[9]\n plot.total2D_RGB += outList[10]\n if plot.fluxKind.lower().endswith('4d'):\n plot.total4D += outList[11]\n elif plot.fluxKind.lower().endswith('pca'):\n plot.total4D.append(outList[11])\n plot.intensity += outList[12]\n\n if runCardVals.iteration == 0: # needed for multiprocessing\n plot.set_axes_limits(*outList.pop())\n\n tFromStart = time.time() - runCardVals.tstart\n plot.textStatus.set_text(\n \"{0} of {1} in {2:.1f} s (right click to stop)\".format(\n runCardVals.iteration+1, runCardVals.repeats, tFromStart))\n# aqueue.task_done()\n\n if len(outList) > 0:\n runCardVals.iteration += 1\n for p in processes:\n p.join(60.)\n if hasattr(runCardVals, 'beamLine'):\n bl = runCardVals.beamLine\n bl.forceAlign = False\n if bl.flowSource == 'legacy':\n bl.flowSource = 'done_once'\n\n\ndef on_finish():\n \"\"\"Executed on exit from the ray-tracing iteration loop.\"\"\"\n if len(_plots) > 0:\n plot = _plots[0]\n if plt.get_backend().lower() not in (\n x.lower() for x in mpl.rcsetup.non_interactive_bk):\n plot.timer.stop()\n plot.timer.remove_callback(plot.timer_callback)\n plot.areProcessAlreadyRunning = False\n for plot in _plots:\n if plot.fluxKind.startswith('E') and \\\n plot.fluxKind.lower().endswith('pca'):\n xbin, zbin = plot.xaxis.bins, plot.yaxis.bins\n plot.total4D = np.concatenate(plot.total4D).reshape(-1, xbin, zbin)\n plot.field3D = plot.total4D\n plot.textStatus.set_text('')\n plot.fig.canvas.mpl_disconnect(plot.cidp)\n plot.plot_plots()\n plot.save()\n runCardVals.tstop = time.time()\n runCardVals.tstopLong = time.localtime()\n print('The ray tracing with {0} iteration{1} took {2:0.1f} s'.format(\n runCardVals.iteration, 's' if runCardVals.iteration > 1 else '',\n runCardVals.tstop-runCardVals.tstart))\n runCardVals.finished_event.set()\n for plot in _plots:\n if runCardVals.globalNorm or plot.persistentName:\n plot.store_plots()\n if runCardVals.stop_event.is_set():\n print('Interrupted by user after iteration {0}'.format(\n runCardVals.iteration))\n return\n try:\n if runCardProcs.generatorPlot is not None:\n if sys.version_info < (3, 1):\n runCardProcs.generatorPlot.next()\n else:\n next(runCardProcs.generatorPlot)\n except StopIteration:\n pass\n else:\n for plot in _plots:\n plot.clean_plots()\n start_jobs()\n return\n\n if runCardVals.globalNorm:\n aSavedResult = -1\n print('normalizing ...')\n for aRenormalization in runCardProcs.generatorNorm:\n for plot in _plots:\n aSavedResult += 1\n saved = runCardVals.savedResults[aSavedResult]\n plot.clean_plots()\n saved.restore(plot)\n try:\n plot.fig.canvas.manager.set_window_title(plot.title)\n except AttributeError:\n pass\n for runCardVals.passNo in [1, 2]:\n plot.plot_plots()\n plot.save('_norm' + str(runCardVals.passNo))\n\n print('finished')\n\n runCardVals.lastRuns.append([runCardVals.tstartLong, runCardVals.tstopLong,\n runCardVals.tstop-runCardVals.tstart,\n runCardVals.runfile])\n try:\n with open(runCardVals.lastRunsPickleName, 'wb') as f:\n pickle.dump(runCardVals.lastRuns[-10:], f, protocol=2)\n except OSError: # Read-only file system\n pass # no history tracking of last 10 runs\n\n# plt.close('all')\n if runCardProcs.afterScript:\n runCardProcs.afterScript(\n *runCardProcs.afterScriptArgs, **runCardProcs.afterScriptKWargs)\n\n\ndef normalize_sibling_plots(plots):\n print('normalization started')\n max1Dx = 0\n max1Dy = 0\n max1Dc = 0\n max1Dx_RGB = 0\n max1Dy_RGB = 0\n max1Dc_RGB = 0\n max2D_RGB = 0\n for plot in plots:\n if max1Dx < plot.xaxis.max1D:\n max1Dx = plot.xaxis.max1D\n if max1Dy < plot.yaxis.max1D:\n max1Dy = plot.yaxis.max1D\n if max1Dc < plot.caxis.max1D:\n max1Dc = plot.caxis.max1D\n if max1Dx_RGB < plot.xaxis.max1D_RGB:\n max1Dx_RGB = plot.xaxis.max1D_RGB\n if max1Dy_RGB < plot.yaxis.max1D_RGB:\n max1Dy_RGB = plot.yaxis.max1D_RGB\n if max1Dc_RGB < plot.caxis.max1D_RGB:\n max1Dc_RGB = plot.caxis.max1D_RGB\n if max2D_RGB < plot.max2D_RGB:\n max2D_RGB = plot.max2D_RGB\n\n for plot in plots:\n plot.xaxis.globalMax1D = max1Dx\n plot.yaxis.globalMax1D = max1Dy\n plot.caxis.globalMax1D = max1Dc\n plot.xaxis.globalMax1D_RGB = max1Dx_RGB\n plot.yaxis.globalMax1D_RGB = max1Dy_RGB\n plot.caxis.globalMax1D_RGB = max1Dc_RGB\n plot.globalMax2D_RGB = max2D_RGB\n\n for runCardVals.passNo in [1, 2]:\n for plot in plots:\n plot.plot_plots()\n plot.save('_norm' + str(runCardVals.passNo))\n print('normalization finished')\n\n\ndef run_ray_tracing(\n plots=[], repeats=1, updateEvery=1, pickleEvery=None, energyRange=None,\n backend='raycing', beamLine=None, threads=1, processes=1,\n generator=None, generatorArgs=[], generatorKWargs='auto', globalNorm=0,\n afterScript=None, afterScriptArgs=[], afterScriptKWargs={}):\n u\"\"\"\n This function is the entry point of xrt.\n Parameters are all optional except the 1st one. Please use them as keyword\n arguments because the list of parameters may change in future versions.\n\n *plots*: instance of :class:`~xrt.plotter.XYCPlot` or a sequence of\n instances or an empty sequence if no graphical output is wanted.\n\n *repeats*: int\n The number of ray tracing runs. It should be stressed that\n accumulated are not rays, which would be limited by the physical\n memory, but rather the histograms from each run are summed up. In\n this way the number of rays is unlimited.\n\n *updateEvery*: int\n Redrawing rate. Redrawing happens when the current iteration index\n is divisible by *updateEvery*.\n\n *pickleEvery*: int\n Saving rate. Applicable to plots with a defined *persistentName*.\n If None, the pickling will happen once at the end.\n\n *energyRange*: [*eMin*: float, *eMax*: float]\n Only in `shadow` backend: If not None, sets the energy range of\n shadow source. Alternatively, this can be done directly inside\n the *generator*.\n\n *backend*: str\n so far supported: {'shadow' | 'raycing' | 'dummy'}\n\n *beamLine*: instance of :class:`~xrt.backends.raycing.BeamLine`, used\n with `raycing` backend.\n\n *threads*, *processes*: int or str\n The number of parallel threads or processes, should not be greater\n than the number of cores in your computer, otherwise it gives no\n gain. The bigger of the two will be used as a signal for using\n either :mod:`threading` or :mod:`multiprocessing`. If they are\n equal, :mod:`threading` is used. See also\n :ref:`performance tests<tests>`. If 'all' is given then the number\n returned by multiprocessing.cpu_count() will be used.\n\n .. warning::\n You cannot use multiprocessing in combination with OpenCL\n because the resources (CPU or GPU) are already shared by\n OpenCL. You will get an error if *processes* > 1. You can still\n use *threads* > 1 but with a little gain.\n\n .. note::\n For the :mod:`shadow` backend you must create ``tmp0``,\n ``tmp1`` etc. directories (counted by *threads* or *processes*)\n in your working directory. Even if the execution is not\n parallelized, there must be ``tmp0`` with the shadow files\n prepared in it.\n\n *generator*: generator object\n A generator for running complex ray-tracing studies. It must modify\n the optics, specify the graph limits, define the output file names\n etc. in a loop and return to xrt by ``yield``.\n See the supplied examples.\n\n *generatorArgs*, *generatorKWargs*: list and (dictionary or 'auto')\n If *generatorKWargs* is 'auto', the following keyword dictionary\n will be used for the generator: kwargs = {} if *generator* is\n defined within the caller of :func:`run_ray_tracing` or if\n *generatorArgs* is not empty, otherwise\n kwargs = {'plots'=pots, 'beamLine'=beamLine}.\n\n .. _globalNorm:\n\n *globalNorm*: bool\n If True, the intensity of the histograms will be normalized to the\n global maximum throughout the series of graphs. There are two\n flavors of normalization:\n\n 1) only the heights of 1D histograms are globally normalized while\n the brightness is kept with the normalization to the local\n maximum (i.e. the maximum in the given graph).\n 2) both the heights of 1D histograms and the brightness of 1D and\n 2D histograms are globally normalized.\n\n The second way is physically more correct but sometimes is less\n visual: some of the normalized pictures may become too dark, e.g.\n when you compare focused and strongly unfocused images. Both\n normalizations are saved with suffixes ``_norm1`` and ``_norm2``\n for you to select the better one.\n\n Here is a normalization example where the intensity maximum was\n found throughout a series of images for filters of different\n thickness. The brightest image was for the case of no filter (not\n shown here) and the normalization shown below was done relative to\n that image:\n\n +------------------+-----------------------------------------+\n | normalized | |\n | to local maximum | |image_nonorm| |\n +------------------+-----------------------------------------+\n | global | |\n | normalization, | |\n | type 1 | |image_norm1| |\n +------------------+-----------------------------------------+\n | global | |\n | normalization, | |\n | type 2 | |image_norm2| |\n +------------------+-----------------------------------------+\n\n .. |image_nonorm| imagezoom:: _images/filterFootprint2_I400mum.png\n :scale: 50 %\n .. |image_norm1| imagezoom:: _images/filterFootprint2_I400mum_norm1.png\n :scale: 50 %\n .. |image_norm2| imagezoom:: _images/filterFootprint2_I400mum_norm2.png\n :scale: 50 %\n\n *afterScript*: function object\n This function is executed at the end of the current script. For\n example, it may run the next ray-tracing script.\n\n *afterScriptArgs*, *afterScriptKWargs*: list and dictionary\n args and kwargs for *afterScript*.\n\n\n \"\"\"\n global runCardVals, runCardProcs, _plots\n frm = inspect.stack()[1]\n mod = inspect.getmodule(frm[0])\n runfile = mod.__file__\n # patch for starting a script with processes>1 from Spyder console\n if not hasattr(mod, \"__spec__\"):\n mod.__spec__ = None\n\n if isinstance(plots, (list, tuple)):\n _plots = plots\n else:\n _plots = [plots, ]\n for plot in _plots:\n if backend == 'raycing':\n if plot.caxis.useCategory:\n plot.caxis.limits = [raycing.hueMin, raycing.hueMax]\n if isinstance(plot.rayFlag, int):\n plot.rayFlag = plot.rayFlag,\n if updateEvery < 1:\n updateEvery = 1\n if (repeats > 1) and (updateEvery > repeats):\n updateEvery = repeats\n cpuCount = multiprocessing.cpu_count()\n if isinstance(processes, str):\n if processes.startswith('a'): # all\n processes = cpuCount\n else:\n processes = max(cpuCount // 2, 1)\n if isinstance(threads, str):\n if threads.startswith('a'): # all\n threads = cpuCount\n else:\n threads = max(cpuCount // 2, 1)\n runCardVals = RunCardVals(threads, processes, repeats, updateEvery,\n pickleEvery, backend, globalNorm, runfile)\n runCardProcs = RunCardProcs(\n afterScript, afterScriptArgs, afterScriptKWargs)\n\n runCardVals.cwd = os.getcwd()\n if backend.startswith('shadow'):\n from .backends import shadow\n cpuCount = max(processes, threads)\n shadow.check_shadow_dirs(cpuCount, runCardVals.cwd)\n runCardVals.fWiggler, runCardVals.fPolar, runCardVals.blockNRays = \\\n shadow.init_shadow(cpuCount, runCardVals.cwd, energyRange)\n elif backend == 'raycing':\n runCardVals.beamLine = beamLine\n\n if generator is None:\n runCardProcs.generatorPlot = _simple_generator()\n else:\n if generatorKWargs == 'auto':\n if (generator.__name__ in sys._getframe(1).f_locals) or\\\n len(generatorArgs) > 0:\n # generator is defined within the caller function\n kwargs = {}\n else:\n # outside the caller\n kwargs = {'plots': plots, 'beamLine': beamLine}\n else:\n kwargs = generatorKWargs\n runCardProcs.generatorPlot = generator(*generatorArgs, **kwargs)\n if globalNorm:\n runCardProcs.generatorNorm = generator(*generatorArgs, **kwargs)\n\n if runCardProcs.generatorPlot is not None:\n if sys.version_info < (3, 1):\n runCardProcs.generatorPlot.next()\n else:\n next(runCardProcs.generatorPlot)\n\n runCardVals.tstart = time.time()\n runCardVals.tstartLong = time.localtime()\n start_jobs()\n plt.show()\n"
] | [
[
"numpy.concatenate",
"matplotlib.pyplot.show",
"matplotlib.pyplot.get_backend",
"numpy.long"
]
] |
luckykamon/Morpion | [
"a4da849a354c542fc5a79a3742a86b040df7e016"
] | [
"create_image/white.py"
] | [
"import imageio\nimport matplotlib.pyplot as plt\nimport Image\nimport numpy as np\n\nim = Image.new(\"RGB\", (65,65), \"white\")\npic = np.array(im)\nim=pic\nimageio.imsave(\"white.png\", im)\n\n"
] | [
[
"numpy.array"
]
] |
faymek/compression | [
"20c6745b741e266f7118e6b3fc88d22f6179cfdf"
] | [
"examples/varate.py"
] | [
"#%%\n\"\"\"\nbmshj2018\n\n\"\"\"\n\nimport argparse\nimport glob\nimport sys\n\nfrom absl import app\nfrom absl.flags import argparse_flags\nimport numpy as np\nimport tensorflow.compat.v1 as tf\n\nimport tensorflow_compression as tfc\nfrom dynamic import *\n\n\nSCALES_MIN = 0.11\nSCALES_MAX = 256\nSCALES_LEVELS = 64\n\n\ndef read_png(filename):\n \"\"\"Loads a PNG image file.\"\"\"\n string = tf.read_file(filename)\n image = tf.image.decode_image(string, channels=3)\n image = tf.cast(image, tf.float32)\n image /= 255\n return image\n\n\ndef quantize_image(image):\n image = tf.round(image * 255)\n image = tf.saturate_cast(image, tf.uint8)\n return image\n\n\ndef write_png(filename, image):\n \"\"\"Saves an image to a PNG file.\"\"\"\n image = quantize_image(image)\n string = tf.image.encode_png(image)\n return tf.write_file(filename, string)\n\n\nclass AnalysisTransform(tf.keras.layers.Layer):\n \"\"\"The analysis transform.\"\"\"\n\n def __init__(self, num_filters, *args, **kwargs):\n self.num_filters = num_filters\n super(AnalysisTransform, self).__init__(*args, **kwargs)\n\n def build(self, input_shape):\n self._layers = [\n DynamicSignalConv2D(\n self.num_filters, (5, 5), name=\"layer_0\", corr=True, strides_down=2,\n padding=\"same_zeros\", use_bias=True,\n activation=DynamicGDN(name=\"gdn_0\")),\n DynamicSignalConv2D(\n self.num_filters, (5, 5), name=\"layer_1\", corr=True, strides_down=2,\n padding=\"same_zeros\", use_bias=True,\n activation=DynamicGDN(name=\"gdn_1\")),\n DynamicSignalConv2D(\n self.num_filters, (5, 5), name=\"layer_2\", corr=True, strides_down=2,\n padding=\"same_zeros\", use_bias=True,\n activation=DynamicGDN(name=\"gdn_2\")),\n DynamicSignalConv2D(\n self.num_filters, (5, 5), name=\"layer_3\", corr=True, strides_down=2,\n padding=\"same_zeros\", use_bias=True,\n activation=None),\n ]\n super(AnalysisTransform, self).build(input_shape)\n\n def call(self, tensor):\n for layer in self._layers:\n tensor = layer(tensor)\n return tensor\n\n\nclass SynthesisTransform(tf.keras.layers.Layer):\n \"\"\"The synthesis transform.\"\"\"\n\n def __init__(self, num_filters, *args, **kwargs):\n self.num_filters = num_filters\n super(SynthesisTransform, self).__init__(*args, **kwargs)\n\n def build(self, input_shape):\n self._layers = [\n DynamicSignalConv2D(\n self.num_filters, (5, 5), name=\"layer_0\", corr=False, strides_up=2,\n padding=\"same_zeros\", use_bias=True,\n activation=DynamicGDN(name=\"igdn_0\", inverse=True)),\n DynamicSignalConv2D(\n self.num_filters, (5, 5), name=\"layer_1\", corr=False, strides_up=2,\n padding=\"same_zeros\", use_bias=True,\n activation=DynamicGDN(name=\"igdn_1\", inverse=True)),\n DynamicSignalConv2D(\n self.num_filters, (5, 5), name=\"layer_2\", corr=False, strides_up=2,\n padding=\"same_zeros\", use_bias=True,\n activation=DynamicGDN(name=\"igdn_2\", inverse=True)),\n DynamicSignalConv2D(\n 3, (5, 5), name=\"layer_3\", corr=False, strides_up=2,\n padding=\"same_zeros\", use_bias=True,\n activation=None),\n ]\n super(SynthesisTransform, self).build(input_shape)\n\n def call(self, tensor):\n for layer in self._layers:\n tensor = layer(tensor)\n return tensor\n\n\nclass HyperAnalysisTransform(tf.keras.layers.Layer):\n \"\"\"The analysis transform for the entropy model parameters.\"\"\"\n\n def __init__(self, num_filters, *args, **kwargs):\n self.num_filters = num_filters\n super(HyperAnalysisTransform, self).__init__(*args, **kwargs)\n\n def build(self, input_shape):\n self._layers = [\n DynamicSignalConv2D(\n self.num_filters, (3, 3), name=\"layer_0\", corr=True, strides_down=1,\n padding=\"same_zeros\", use_bias=True,\n activation=tf.nn.relu),\n DynamicSignalConv2D(\n self.num_filters, (5, 5), name=\"layer_1\", corr=True, strides_down=2,\n padding=\"same_zeros\", use_bias=True,\n activation=tf.nn.relu),\n DynamicSignalConv2D(\n self.num_filters, (5, 5), name=\"layer_2\", corr=True, strides_down=2,\n padding=\"same_zeros\", use_bias=False,\n activation=None),\n ]\n super(HyperAnalysisTransform, self).build(input_shape)\n\n def call(self, tensor):\n for layer in self._layers:\n tensor = layer(tensor)\n return tensor\n\n\nclass HyperSynthesisTransform(tf.keras.layers.Layer):\n \"\"\"The synthesis transform for the entropy model parameters.\"\"\"\n\n def __init__(self, num_filters, *args, **kwargs):\n self.num_filters = num_filters\n super(HyperSynthesisTransform, self).__init__(*args, **kwargs)\n\n def build(self, input_shape):\n self._layers = [\n DynamicSignalConv2D(\n self.num_filters, (5, 5), name=\"layer_0\", corr=False, strides_up=2,\n padding=\"same_zeros\", use_bias=True, kernel_parameterizer=None,\n activation=tf.nn.relu),\n DynamicSignalConv2D(\n self.num_filters, (5, 5), name=\"layer_1\", corr=False, strides_up=2,\n padding=\"same_zeros\", use_bias=True, kernel_parameterizer=None,\n activation=tf.nn.relu),\n DynamicSignalConv2D(\n self.num_filters, (3, 3), name=\"layer_2\", corr=False, strides_up=1,\n padding=\"same_zeros\", use_bias=True, kernel_parameterizer=None,\n activation=None),\n ]\n super(HyperSynthesisTransform, self).build(input_shape)\n\n def call(self, tensor):\n for layer in self._layers:\n tensor = layer(tensor)\n return tensor\n\n\ndef train(args):\n \"\"\"Trains the model.\"\"\"\n\n if args.verbose:\n tf.logging.set_verbosity(tf.logging.INFO)\n\n # Create input data pipeline.\n with tf.device(\"/cpu:0\"):\n train_files = glob.glob(args.train_glob)\n if not train_files:\n raise RuntimeError(\n \"No training images found with glob '{}'.\".format(args.train_glob))\n train_dataset = tf.data.Dataset.from_tensor_slices(train_files)\n train_dataset = train_dataset.shuffle(buffer_size=len(train_files)).repeat()\n train_dataset = train_dataset.map(\n read_png, num_parallel_calls=args.preprocess_threads)\n train_dataset = train_dataset.map(\n lambda x: tf.random_crop(x, (args.patchsize, args.patchsize, 3)))\n train_dataset = train_dataset.batch(args.batchsize)\n train_dataset = train_dataset.prefetch(32)\n\n num_pixels = args.batchsize * args.patchsize ** 2\n\n # Get training patch from dataset.\n x = train_dataset.make_one_shot_iterator().get_next()\n\n # Instantiate model.\n analysis_transform = AnalysisTransform(args.num_filters)\n synthesis_transform = SynthesisTransform(args.num_filters)\n hyper_analysis_transform = HyperAnalysisTransform(args.num_filters)\n hyper_synthesis_transform = HyperSynthesisTransform(args.num_filters)\n entropy_bottleneck = DynamicEntropyBottleneck()\n\n # Build autoencoder and hyperprior.\n y = analysis_transform(x)\n z = hyper_analysis_transform(abs(y))\n z_tilde, z_likelihoods = entropy_bottleneck(z, training=True)\n sigma = hyper_synthesis_transform(z_tilde)\n scale_table = np.exp(np.linspace(\n np.log(SCALES_MIN), np.log(SCALES_MAX), SCALES_LEVELS))\n conditional_bottleneck = tfc.GaussianConditional(sigma, scale_table)\n y_tilde, y_likelihoods = conditional_bottleneck(y, training=True)\n x_tilde = synthesis_transform(y_tilde)\n\n # Total number of bits divided by number of pixels.\n train_bpp = (tf.reduce_sum(tf.log(y_likelihoods)) +\n tf.reduce_sum(tf.log(z_likelihoods))) / (-np.log(2) * num_pixels)\n\n # Mean squared error across pixels.\n train_mse = tf.reduce_mean(tf.squared_difference(x, x_tilde))\n # Multiply by 255^2 to correct for rescaling.\n train_mse *= 255 ** 2\n\n # The rate-distortion cost.\n train_loss = args.lmbda * train_mse + train_bpp\n\n # Minimize loss and auxiliary loss, and execute update op.\n step = tf.train.create_global_step()\n main_optimizer = tf.train.AdamOptimizer(learning_rate=1e-4)\n main_step = main_optimizer.minimize(train_loss, global_step=step)\n\n aux_optimizer = tf.train.AdamOptimizer(learning_rate=1e-3)\n aux_step = aux_optimizer.minimize(entropy_bottleneck.losses[0])\n\n train_op = tf.group(main_step, aux_step, entropy_bottleneck.updates[0])\n\n tf.summary.scalar(\"loss\", train_loss)\n tf.summary.scalar(\"bpp\", train_bpp)\n tf.summary.scalar(\"mse\", train_mse)\n\n tf.summary.image(\"original\", quantize_image(x))\n tf.summary.image(\"reconstruction\", quantize_image(x_tilde))\n\n hooks = [\n tf.train.StopAtStepHook(last_step=args.last_step),\n tf.train.NanTensorHook(train_loss),\n ]\n with tf.train.MonitoredTrainingSession(\n hooks=hooks, checkpoint_dir=args.checkpoint_dir,\n save_checkpoint_secs=300, save_summaries_secs=60) as sess:\n while not sess.should_stop():\n sess.run(train_op)\n\n\ndef test_train(args):\n \"\"\"Trains the model.\"\"\"\n\n if args.verbose:\n tf.logging.set_verbosity(tf.logging.INFO)\n\n # Create input data pipeline.\n with tf.device(\"/cpu:0\"):\n train_files = glob.glob(args.train_glob)\n if not train_files:\n raise RuntimeError(\n \"No training images found with glob '{}'.\".format(args.train_glob))\n train_dataset = tf.data.Dataset.from_tensor_slices(train_files)\n train_dataset = train_dataset.shuffle(buffer_size=len(train_files)).repeat()\n train_dataset = train_dataset.map(\n read_png, num_parallel_calls=args.preprocess_threads)\n train_dataset = train_dataset.map(\n lambda x: tf.random_crop(x, (args.patchsize, args.patchsize, 3)))\n train_dataset = train_dataset.batch(args.batchsize)\n train_dataset = train_dataset.prefetch(32)\n\n num_pixels = args.batchsize * args.patchsize ** 2\n\n # Get training patch from dataset.\n x = train_dataset.make_one_shot_iterator().get_next()\n\n # Instantiate model.\n analysis_transform = AnalysisTransform(args.num_filters)\n synthesis_transform = SynthesisTransform(args.num_filters)\n hyper_analysis_transform = HyperAnalysisTransform(args.num_filters)\n hyper_synthesis_transform = HyperSynthesisTransform(args.num_filters)\n entropy_bottleneck = DynamicEntropyBottleneck(name=\"entropy_bottleneck\")\n\n # Build autoencoder and hyperprior.\n y = analysis_transform(x)\n z = hyper_analysis_transform(abs(y))\n z_tilde, z_likelihoods = entropy_bottleneck(z, training=True)\n sigma = hyper_synthesis_transform(z_tilde)\n scale_table = np.exp(np.linspace(\n np.log(SCALES_MIN), np.log(SCALES_MAX), SCALES_LEVELS))\n conditional_bottleneck = tfc.GaussianConditional(sigma, scale_table)\n y_tilde, y_likelihoods = conditional_bottleneck(y, training=True)\n x_tilde = synthesis_transform(y_tilde)\n\n with tf.Session() as sess:\n latest = tf.train.latest_checkpoint(checkpoint_dir=\"./tfc256-05\")\n tf.train.Saver().restore(sess, save_path=latest)\n\n active_0 = 256\n x_tilde_0 = synthesis_transform(y_tilde[:,:,:,:active_0])\n train_bpp_0 = (tf.reduce_sum(tf.log(y_likelihoods[:,:,:,:active_0])) +\n tf.reduce_sum(tf.log(z_likelihoods))) / (-np.log(2) * num_pixels)\n train_mse_0 = tf.reduce_mean(tf.squared_difference(x, x_tilde_0)) * (255**2)\n\n active_1 = 248\n x_tilde_1 = synthesis_transform(y_tilde[:,:,:,:active_1])\n train_bpp_1 = (tf.reduce_sum(tf.log(y_likelihoods[:,:,:,:active_1])) +\n tf.reduce_sum(tf.log(z_likelihoods))) / (-np.log(2) * num_pixels)\n train_mse_1 = tf.reduce_mean(tf.squared_difference(x, x_tilde_1)) * (255**2)\n\n def RateOfWidth(W):\n return 0.0267 * np.exp(0.0178*W)\n\n # The rate-distortion cost.\n train_loss = train_mse_0 + train_mse_1 \\\n + 1000*tf.squared_difference(train_bpp_0, RateOfWidth(active_0)) \\\n + 1000*tf.squared_difference(train_bpp_1, RateOfWidth(active_1)) \n\n # Minimize loss and auxiliary loss, and execute update op.\n step = tf.train.create_global_step()\n main_optimizer = tf.train.AdamOptimizer(learning_rate=1e-4)\n main_step = main_optimizer.minimize(train_loss, global_step=step)\n\n aux_optimizer = tf.train.AdamOptimizer(learning_rate=1e-3)\n aux_step = aux_optimizer.minimize(entropy_bottleneck.losses[0])\n\n train_op = tf.group(main_step, aux_step, entropy_bottleneck.updates[0])\n\n tf.summary.scalar(\"loss\", train_loss)\n tf.summary.scalar(\"bpp\", train_bpp_1)\n tf.summary.scalar(\"mse\", train_mse_1)\n\n tf.summary.image(\"original\", quantize_image(x))\n tf.summary.image(\"reconstruction\", quantize_image(x_tilde_1))\n\n hooks = [\n tf.train.StopAtStepHook(last_step=args.last_step),\n tf.train.NanTensorHook(train_loss),\n ]\n with tf.train.MonitoredTrainingSession(\n hooks=hooks, checkpoint_dir=args.checkpoint_dir,\n save_checkpoint_secs=300, save_summaries_secs=60) as sess:\n while not sess.should_stop():\n sess.run(train_op)\n\n\n\n\ndef compress(args):\n \"\"\"Compresses an image.\"\"\"\n\n # Load input image and add batch dimension.\n x = read_png(args.input_file)\n x = tf.expand_dims(x, 0)\n x.set_shape([1, None, None, 3])\n x_shape = tf.shape(x)\n\n # Instantiate model.\n analysis_transform = AnalysisTransform(args.num_filters)\n synthesis_transform = SynthesisTransform(args.num_filters)\n hyper_analysis_transform = HyperAnalysisTransform(args.num_filters)\n hyper_synthesis_transform = HyperSynthesisTransform(args.num_filters)\n entropy_bottleneck = tfc.EntropyBottleneck()\n\n # Transform and compress the image.\n y = analysis_transform(x)\n y_shape = tf.shape(y)\n z = hyper_analysis_transform(abs(y))\n z_hat, z_likelihoods = entropy_bottleneck(z, training=False)\n sigma = hyper_synthesis_transform(z_hat)\n sigma = sigma[:, :y_shape[1], :y_shape[2], :]\n scale_table = np.exp(np.linspace(\n np.log(SCALES_MIN), np.log(SCALES_MAX), SCALES_LEVELS))\n conditional_bottleneck = tfc.GaussianConditional(sigma, scale_table)\n side_string = entropy_bottleneck.compress(z)\n string = conditional_bottleneck.compress(y)\n\n # Transform the quantized image back (if requested).\n y_hat, y_likelihoods = conditional_bottleneck(y, training=False)\n x_hat = synthesis_transform(y_hat)\n x_hat = x_hat[:, :x_shape[1], :x_shape[2], :]\n\n num_pixels = tf.cast(tf.reduce_prod(tf.shape(x)[:-1]), dtype=tf.float32)\n\n # Total number of bits divided by number of pixels.\n eval_bpp = (tf.reduce_sum(tf.log(y_likelihoods)) +\n tf.reduce_sum(tf.log(z_likelihoods))) / (-np.log(2) * num_pixels)\n\n # Bring both images back to 0..255 range.\n x *= 255\n x_hat = tf.clip_by_value(x_hat, 0, 1)\n x_hat = tf.round(x_hat * 255)\n\n mse = tf.reduce_mean(tf.squared_difference(x, x_hat))\n psnr = tf.squeeze(tf.image.psnr(x_hat, x, 255))\n msssim = tf.squeeze(tf.image.ssim_multiscale(x_hat, x, 255))\n\n with tf.Session() as sess:\n # Load the latest model checkpoint, get the compressed string and the tensor\n # shapes.\n latest = tf.train.latest_checkpoint(checkpoint_dir=args.checkpoint_dir)\n tf.train.Saver().restore(sess, save_path=latest)\n tensors = [string, side_string,\n tf.shape(x)[1:-1], tf.shape(y)[1:-1], tf.shape(z)[1:-1]]\n arrays = sess.run(tensors)\n\n # Write a binary file with the shape information and the compressed string.\n packed = tfc.PackedTensors()\n packed.pack(tensors, arrays)\n with open(args.output_file, \"wb\") as f:\n f.write(packed.string)\n\n # If requested, transform the quantized image back and measure performance.\n if args.verbose:\n eval_bpp, mse, psnr, msssim, num_pixels = sess.run(\n [eval_bpp, mse, psnr, msssim, num_pixels])\n\n # The actual bits per pixel including overhead.\n bpp = len(packed.string) * 8 / num_pixels\n\n print(\"Mean squared error: {:0.4f}\".format(mse))\n print(\"PSNR (dB): {:0.2f}\".format(psnr))\n print(\"Multiscale SSIM: {:0.4f}\".format(msssim))\n print(\"Multiscale SSIM (dB): {:0.2f}\".format(-10 * np.log10(1 - msssim)))\n print(\"Information content in bpp: {:0.4f}\".format(eval_bpp))\n print(\"Actual bits per pixel: {:0.4f}\".format(bpp))\n\n\ndef get_uninitialized_variables(sess):\n global_vars = tf.global_variables()\n\n # print([str(i.name) for i in global_vars])\n\n is_not_initialized = sess.run([tf.is_variable_initialized(var) for var in global_vars])\n not_initialized_vars = [v for (v, f) in zip(global_vars, is_not_initialized) if not f]\n print([str(i.name) for i in not_initialized_vars])\n return not_initialized_vars\n\n\nclass DyTFC():\n def __init__(self, num_filters):\n self.num_filters = num_filters\n self.analysis_transform = AnalysisTransform(num_filters)\n self.synthesis_transform = SynthesisTransform(num_filters)\n self.hyper_analysis_transform = HyperAnalysisTransform(num_filters)\n self.hyper_synthesis_transform = HyperSynthesisTransform(num_filters)\n self.entropy_bottleneck = DynamicEntropyBottleneck(name=\"entropy_bottleneck\")\n \n def build(self, x):\n self.x = x\n self.x_shape = tf.shape(self.x)\n\n # Transform and compress the image.\n self.y = self.analysis_transform(self.x)\n self.y_shape = tf.shape(self.y)\n self.z = self.hyper_analysis_transform(abs(self.y))\n self.z_shape = tf.shape(self.z)\n self.z_hat, self.z_likelihoods = self.entropy_bottleneck(self.z, training=False)\n sigma = self.hyper_synthesis_transform(self.z_hat)\n self.sigma = sigma[:, :self.y_shape[1], :self.y_shape[2], :]\n self.scale_table = np.exp(np.linspace(\n np.log(SCALES_MIN), np.log(SCALES_MAX), SCALES_LEVELS))\n self.conditional_bottleneck = DynamicGaussianConditional(self.sigma, self.scale_table, name=\"gaussian_conditional\")\n self.side_string = self.entropy_bottleneck.compress(self.z)\n self.string = self.conditional_bottleneck.compress(self.y)\n\n # Transform the quantized image back (if requested).\n self.y_hat, self.y_likelihoods = self.conditional_bottleneck(self.y, training=False)\n self.x_hat = self.synthesis_transform(self.y_hat)\n self.x_hat = self.x_hat[:, :self.x_shape[1], :self.x_shape[2], :]\n\n self.num_pixels = tf.cast(tf.reduce_prod(tf.shape(self.x)[:-1]), dtype=tf.float32)\n\n # Total number of bits divided by number of pixels.\n self.eval_bpp = (tf.reduce_sum(tf.log(self.y_likelihoods)) +\n tf.reduce_sum(tf.log(self.z_likelihoods))) / (-np.log(2) * self.num_pixels)\n\n x = self.x * 255\n x_hat = tf.clip_by_value(self.x_hat, 0, 1)\n x_hat = tf.round(x_hat * 255)\n\n self.mse = tf.reduce_mean(tf.squared_difference(x, x_hat))\n self.psnr = tf.squeeze(tf.image.psnr(x_hat, x, 255))\n self.msssim = tf.squeeze(tf.image.ssim_multiscale(x_hat, x, 255))\n\n self.vst = {v.name:v for v in tf.global_variables()}\n\n def _reorg(self, sess, trans, active_out_filters, sort_in, sort_out, flag=None):\n layers = trans._layers\n sorted_idx = sort_in\n for layer in layers[:-1]:\n layer.active_out_filters = active_out_filters\n sorted_idx = layer.sort_filter(sess, self.vst, sorted_idx, True)\n if flag is not \"tail\":\n layers[-1].active_out_filters = active_out_filters\n sorted_idx = layers[-1].sort_filter(sess, self.vst, sorted_idx, sort_out)\n return sorted_idx\n\n def reorg(self, sess, active):\n y_sorted_idx = self._reorg(sess, self.analysis_transform, active, False, True, \"head\")\n # print(sess.run(y_sorted_idx))\n self._reorg( sess, self.synthesis_transform, active, y_sorted_idx, False, \"tail\")\n z_sorted_idx = self._reorg(sess, self.hyper_analysis_transform, active, y_sorted_idx, True, \"body\")\n self.entropy_bottleneck.sort_weight(sess, self.vst, z_sorted_idx)\n self.entropy_bottleneck.input_spec = tf.keras.layers.InputSpec(ndim=4, axes={3: active})\n sorted_idx = self._reorg( sess, self.hyper_synthesis_transform, active, z_sorted_idx, y_sorted_idx, \"body\")\n\n def _active(self, trans, active_out_filters, flag=None):\n layers = trans._layers\n for layer in layers[:-1]:\n layer.active_out_filters = active_out_filters\n if flag is not \"tail\":\n layers[-1].active_out_filters = active_out_filters\n\n def set_active(self, active):\n self._active(self.analysis_transform, active, \"head\")\n self._active(self.synthesis_transform, active, \"tail\")\n self._active(self.hyper_analysis_transform, active, \"body\")\n self._active(self.hyper_synthesis_transform, active, \"body\")\n self.entropy_bottleneck.input_spec = tf.keras.layers.InputSpec(ndim=4, axes={3: active})\n self.conditional_bottleneck.input_spec = tf.keras.layers.InputSpec(ndim=4, axes={3: active})\n\n\ndef test_compress(args):\n \"\"\"Compresses an image.\"\"\"\n\n # Load input image and add batch dimension.\n x = read_png(args.input_file)\n x = tf.expand_dims(x, 0)\n x.set_shape([1, None, None, 3])\n x_shape = tf.shape(x)\n\n net = DyTFC(192)\n net.build(x)\n\n sess = tf.Session()\n latest = tf.train.latest_checkpoint(checkpoint_dir=args.checkpoint_dir)\n tf.train.Saver().restore(sess, save_path=latest)\n print(sess.run( tf.reduce_sum(tf.log(net.y_likelihoods), axis=(0,1,2)) / (-np.log(2) * net.num_pixels)) )\n return\n\n #vnames = ['gaussian_conditional/quantized_cdf:0', 'gaussian_conditional/cdf_length:0']\n #old_cb_weights = net.conditional_bottleneck.get_weights()\n\n #print(old_cb_weights)\n #net.set_active(192)\n #net.build(x)\n #sess.run(tf.variables_initializer(get_uninitialized_variables(sess)))\n #sess.run(tf.variables_initializer([net.vst[name] for name in vnames]))\n #net.conditional_bottleneck.set_weights(old_cb_weights)\n\n #\n #tf.train.Saver().save(sess,\"./sort128/model.ckpt\") \n\n tensors = [net.string, net.side_string,\n net.x_shape[1:-1], net.y_shape[1:-1], net.z_shape[1:-1]]\n \n arrays = sess.run(tensors)\n\n # Write a binary file with the shape information and the compressed string.\n packed = tfc.PackedTensors()\n packed.pack(tensors, arrays)\n with open(args.output_file, \"wb\") as f:\n f.write(packed.string)\n\n # If requested, transform the quantized image back and measure performance.\n if args.verbose:\n eval_bpp, mse, psnr, msssim, num_pixels = sess.run(\n [net.eval_bpp, net.mse, net.psnr, net.msssim, net.num_pixels])\n\n # The actual bits per pixel including overhead.\n bpp = len(packed.string) * 8 / num_pixels\n\n print(\"Mean squared error: {:0.4f}\".format(mse))\n print(\"PSNR (dB): {:0.2f}\".format(psnr))\n print(\"Multiscale SSIM: {:0.4f}\".format(msssim))\n print(\"Multiscale SSIM (dB): {:0.2f}\".format(-10 * np.log10(1 - msssim)))\n print(\"Information content in bpp: {:0.4f}\".format(eval_bpp))\n print(\"Actual bits per pixel: {:0.4f}\".format(bpp))\n \n\n\ndef test_decompress(args):\n \"\"\"Decompresses an image.\"\"\"\n\n # Read the shape information and compressed string from the binary file.\n string = tf.placeholder(tf.string, [1])\n side_string = tf.placeholder(tf.string, [1])\n x_shape = tf.placeholder(tf.int32, [2])\n y_shape = tf.placeholder(tf.int32, [2])\n z_shape = tf.placeholder(tf.int32, [2])\n with open(args.input_file, \"rb\") as f:\n packed = tfc.PackedTensors(f.read())\n tensors = [string, side_string, x_shape, y_shape, z_shape]\n arrays = packed.unpack(tensors)\n\n # Instantiate model.\n synthesis_transform = SynthesisTransform(args.num_filters)\n hyper_synthesis_transform = HyperSynthesisTransform(args.num_filters)\n entropy_bottleneck = tfc.EntropyBottleneck(dtype=tf.float32)\n\n # Decompress and transform the image back.\n z_shape = tf.concat([z_shape, [args.num_filters]], axis=0)\n z_hat = entropy_bottleneck.decompress(\n side_string, z_shape, channels=args.num_filters)\n sigma = hyper_synthesis_transform(z_hat)\n sigma = sigma[:, :y_shape[0], :y_shape[1], :]\n scale_table = np.exp(np.linspace(\n np.log(SCALES_MIN), np.log(SCALES_MAX), SCALES_LEVELS))\n conditional_bottleneck = tfc.GaussianConditional(\n sigma, scale_table, dtype=tf.float32)\n y_hat_all = conditional_bottleneck.decompress(string)\n\n x = read_png(\"kodak/kodim01.png\")\n x = tf.expand_dims(x, 0)\n x.set_shape([1, None, None, 3])\n x_shape = tf.shape(x)\n x *= 255\n\n active = 192\n y_hat = y_hat_all[:,:,:,:active]\n x_hat = synthesis_transform(y_hat)\n x_hat = tf.clip_by_value(x_hat, 0, 1)\n x_hat = tf.round(x_hat * 255)\n mse = tf.reduce_mean(tf.squared_difference(x, x_hat))\n psnr = tf.squeeze(tf.image.psnr(x_hat, x, 255))\n msssim = tf.squeeze(tf.image.ssim_multiscale(x_hat, x, 255))\n\n #x_hat = x_hat[0, :x_shape[0], :x_shape[1], :]\n #op = write_png(args.output_file, x_hat)\n\n sess = tf.Session()\n latest = tf.train.latest_checkpoint(checkpoint_dir=args.checkpoint_dir)\n tf.train.Saver().restore(sess, save_path=latest)\n #sess.run(op, feed_dict=dict(zip(tensors, arrays)))\n\n #vmse, vpsnr, vmsssim = sess.run([mse, psnr, msssim], feed_dict=dict(zip(tensors, arrays)))\n #print(vmse, vpsnr, vmsssim)\n\n for active in range(192,0,-8):\n y_hat = y_hat_all[:,:,:,:active]\n x_hat = synthesis_transform(y_hat)\n x_hat = tf.clip_by_value(x_hat, 0, 1)\n x_hat = tf.round(x_hat * 255)\n mse = tf.reduce_mean(tf.squared_difference(x, x_hat))\n psnr = tf.squeeze(tf.image.psnr(x_hat, x, 255))\n msssim = tf.squeeze(tf.image.ssim_multiscale(x_hat, x, 255))\n vmse, vpsnr, vmsssim = sess.run([mse, psnr, msssim], feed_dict=dict(zip(tensors, arrays)))\n print(active, vmse, vpsnr, vmsssim)\n\n\n\n\ndef decompress(args):\n \"\"\"Decompresses an image.\"\"\"\n\n # Read the shape information and compressed string from the binary file.\n string = tf.placeholder(tf.string, [1])\n side_string = tf.placeholder(tf.string, [1])\n x_shape = tf.placeholder(tf.int32, [2])\n y_shape = tf.placeholder(tf.int32, [2])\n z_shape = tf.placeholder(tf.int32, [2])\n with open(args.input_file, \"rb\") as f:\n packed = tfc.PackedTensors(f.read())\n tensors = [string, side_string, x_shape, y_shape, z_shape]\n arrays = packed.unpack(tensors)\n\n # Instantiate model.\n synthesis_transform = SynthesisTransform(args.num_filters)\n hyper_synthesis_transform = HyperSynthesisTransform(args.num_filters)\n entropy_bottleneck = tfc.EntropyBottleneck(dtype=tf.float32)\n\n # Decompress and transform the image back.\n z_shape = tf.concat([z_shape, [args.num_filters]], axis=0)\n z_hat = entropy_bottleneck.decompress(\n side_string, z_shape, channels=args.num_filters)\n sigma = hyper_synthesis_transform(z_hat)\n sigma = sigma[:, :y_shape[0], :y_shape[1], :]\n scale_table = np.exp(np.linspace(\n np.log(SCALES_MIN), np.log(SCALES_MAX), SCALES_LEVELS))\n conditional_bottleneck = tfc.GaussianConditional(\n sigma, scale_table, dtype=tf.float32)\n y_hat = conditional_bottleneck.decompress(string)\n x_hat = synthesis_transform(y_hat)\n\n # Remove batch dimension, and crop away any extraneous padding on the bottom\n # or right boundaries.\n x_hat = x_hat[0, :x_shape[0], :x_shape[1], :]\n\n # Write reconstructed image out as a PNG file.\n op = write_png(args.output_file, x_hat)\n\n # Load the latest model checkpoint, and perform the above actions.\n with tf.Session() as sess:\n latest = tf.train.latest_checkpoint(checkpoint_dir=args.checkpoint_dir)\n tf.train.Saver().restore(sess, save_path=latest)\n sess.run(op, feed_dict=dict(zip(tensors, arrays)))\n\n\ndef parse_args(argv):\n \"\"\"Parses command line arguments.\"\"\"\n parser = argparse_flags.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n # High-level options.\n parser.add_argument(\n \"--verbose\", \"-V\", action=\"store_true\",\n help=\"Report bitrate and distortion when training or compressing.\")\n parser.add_argument(\n \"--num_filters\", type=int, default=192,\n help=\"Number of filters per layer.\")\n parser.add_argument(\n \"--checkpoint_dir\", default=\"train\",\n help=\"Directory where to save/load model checkpoints.\")\n subparsers = parser.add_subparsers(\n title=\"commands\", dest=\"command\",\n help=\"What to do: 'train' loads training data and trains (or continues \"\n \"to train) a new model. 'compress' reads an image file (lossless \"\n \"PNG format) and writes a compressed binary file. 'decompress' \"\n \"reads a binary file and reconstructs the image (in PNG format). \"\n \"input and output filenames need to be provided for the latter \"\n \"two options. Invoke '<command> -h' for more information.\")\n\n # 'train' subcommand.\n train_cmd = subparsers.add_parser(\n \"train\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n description=\"Trains (or continues to train) a new model.\")\n train_cmd.add_argument(\n \"--train_glob\", default=\"images/*.png\",\n help=\"Glob pattern identifying training data. This pattern must expand \"\n \"to a list of RGB images in PNG format.\")\n train_cmd.add_argument(\n \"--batchsize\", type=int, default=8,\n help=\"Batch size for training.\")\n train_cmd.add_argument(\n \"--patchsize\", type=int, default=256,\n help=\"Size of image patches for training.\")\n train_cmd.add_argument(\n \"--lambda\", type=float, default=0.01, dest=\"lmbda\",\n help=\"Lambda for rate-distortion tradeoff.\")\n train_cmd.add_argument(\n \"--last_step\", type=int, default=1000000,\n help=\"Train up to this number of steps.\")\n train_cmd.add_argument(\n \"--preprocess_threads\", type=int, default=16,\n help=\"Number of CPU threads to use for parallel decoding of training \"\n \"images.\")\n\n # 'compress' subcommand.\n compress_cmd = subparsers.add_parser(\n \"compress\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n description=\"Reads a PNG file, compresses it, and writes a TFCI file.\")\n\n # 'decompress' subcommand.\n decompress_cmd = subparsers.add_parser(\n \"decompress\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n description=\"Reads a TFCI file, reconstructs the image, and writes back \"\n \"a PNG file.\")\n\n # Arguments for both 'compress' and 'decompress'.\n for cmd, ext in ((compress_cmd, \".tfci\"), (decompress_cmd, \".png\")):\n cmd.add_argument(\n \"input_file\",\n help=\"Input filename.\")\n cmd.add_argument(\n \"output_file\", nargs=\"?\",\n help=\"Output filename (optional). If not provided, appends '{}' to \"\n \"the input filename.\".format(ext))\n\n # Parse arguments.\n args = parser.parse_args(argv[1:])\n if args.command is None:\n parser.print_usage()\n sys.exit(2)\n return args\n\n\ndef main(args):\n # Invoke subcommand.\n if args.command == \"train\":\n test_train(args)\n elif args.command == \"compress\":\n if not args.output_file:\n args.output_file = args.input_file + \".tfci\"\n test_compress(args)\n elif args.command == \"decompress\":\n if not args.output_file:\n args.output_file = args.input_file + \".png\"\n test_decompress(args)\n\n#%%\n\n#%%\n\nif __name__ == \"__main__\":\n app.run(main, flags_parser=parse_args)\n"
] | [
[
"tensorflow.compat.v1.train.MonitoredTrainingSession",
"tensorflow.compat.v1.train.StopAtStepHook",
"tensorflow.compat.v1.log",
"tensorflow.compat.v1.train.AdamOptimizer",
"tensorflow.compat.v1.expand_dims",
"numpy.log",
"tensorflow.compat.v1.image.psnr",
"tensorflow.compat.v1.shape",
"tensorflow.compat.v1.clip_by_value",
"tensorflow.compat.v1.logging.set_verbosity",
"tensorflow.compat.v1.placeholder",
"tensorflow.compat.v1.train.NanTensorHook",
"tensorflow.compat.v1.group",
"tensorflow.compat.v1.train.Saver",
"tensorflow.compat.v1.train.create_global_step",
"tensorflow.compat.v1.device",
"tensorflow.compat.v1.read_file",
"numpy.log10",
"tensorflow.compat.v1.cast",
"tensorflow.compat.v1.saturate_cast",
"tensorflow.compat.v1.keras.layers.InputSpec",
"tensorflow.compat.v1.data.Dataset.from_tensor_slices",
"tensorflow.compat.v1.Session",
"tensorflow.compat.v1.is_variable_initialized",
"tensorflow.compat.v1.concat",
"tensorflow.compat.v1.image.encode_png",
"tensorflow.compat.v1.image.decode_image",
"tensorflow.compat.v1.write_file",
"tensorflow.compat.v1.summary.scalar",
"tensorflow.compat.v1.image.ssim_multiscale",
"tensorflow.compat.v1.round",
"tensorflow.compat.v1.train.latest_checkpoint",
"tensorflow.compat.v1.global_variables",
"numpy.exp",
"tensorflow.compat.v1.squared_difference",
"tensorflow.compat.v1.random_crop"
]
] |
szokejokepu/natural-rws | [
"bb1ad4ca3ec714e6bf071d2136593dc853492b68"
] | [
"core/argo/core/network/MultivariateNormalTriL.py"
] | [
"import tensorflow as tf\nfrom tensorflow_probability import distributions as tfd\nfrom functools import partial\nfrom .AbstractGaussianSimple import AbstractGaussianSimple\nimport types\nimport sonnet as snt\n\nclass MultivariateNormalTriL(AbstractGaussianSimple):\n\n def __init__(self,\n output_size,\n minimal_covariance=0.,\n initializers={},\n regularizers={},\n custom_getter={},\n name='normal_tril'):\n super().__init__(output_size=output_size,\n minimal_covariance=minimal_covariance,\n initializers=initializers,\n regularizers=regularizers,\n custom_getter=custom_getter,\n name=name)\n\n def _build(self, inputs):\n\n inputs = tf.layers.flatten(inputs)\n\n self.dense_loc = snt.Linear(self._output_size, **self._extra_kwargs)\n self.dense_diag_params = snt.Linear(self._output_size, **self._extra_kwargs)\n n_out_of_diag_elems = int(self._output_size * (self._output_size - 1) / 2)\n self.dense_out_of_diag_params = snt.Linear(n_out_of_diag_elems, **self._extra_kwargs)\n\n\n loc = self.dense_loc(inputs)\n diag_params = self.dense_diag_params(inputs)\n out_of_diag_params = self.dense_out_of_diag_params(inputs)\n\n lower_triangle = tf.contrib.distributions.fill_triangular(out_of_diag_params)\n lower_triangle = tf.pad(lower_triangle, [[0, 0], [1, 0], [0, 1]])\n\n diag_positive = self._minimal_covariance + tf.nn.softplus(diag_params)\n\n scale_tril = tf.linalg.set_diag(lower_triangle, diag_positive)\n\n dtype = inputs.dtype\n n_tril = n_out_of_diag_elems + self._output_size\n self._calibration_tril_params = tf.get_variable(\"calibration_tril_params\",\n shape=(n_tril,),\n dtype=dtype,\n trainable=False,\n initializer=tf.initializers.constant(value=1.))\n\n self.calibration_tril = tf.contrib.distributions.fill_triangular(self._calibration_tril_params, name=\"calibration_tril\")\n\n\n ouput_params = {\"loc\" : loc, \"scale_tril\" : tf.multiply(self.calibration_tril, scale_tril)}\n\n distr = tfd.MultivariateNormalTriL(**ouput_params)\n\n return distr\n\n"
] | [
[
"tensorflow.pad",
"tensorflow.contrib.distributions.fill_triangular",
"tensorflow.layers.flatten",
"tensorflow.multiply",
"tensorflow.initializers.constant",
"tensorflow.linalg.set_diag",
"tensorflow.nn.softplus"
]
] |
cristhiandcl/AD-DL | [
"b7abb3fe619e736b269067033ba4aad1f03cf3b8"
] | [
"clinicadl/clinicadl/tools/tsv/tsv_utils.py"
] | [
"# coding: utf8\n\nfrom copy import copy\nimport numpy as np\nimport pandas as pd\nfrom os import path\n\n\ndef neighbour_session(session, session_list, neighbour):\n if session not in session_list:\n temp_list = session_list + [session]\n temp_list.sort()\n else:\n temp_list = copy(session_list)\n temp_list.sort()\n index_session = temp_list.index(session)\n\n if index_session + neighbour < 0 or index_session + neighbour >= len(temp_list):\n return None\n else:\n if temp_list[index_session + neighbour] < 10:\n return 'ses-M0' + str(temp_list[index_session + neighbour])\n else:\n return 'ses-M' + str(temp_list[index_session + neighbour])\n\n\ndef after_end_screening(session, session_list):\n if session in session_list:\n return False\n else:\n temp_list = session_list + [session]\n temp_list.sort()\n index_session = temp_list.index(session)\n return index_session == len(temp_list) - 1\n\n\ndef last_session(session_list):\n temp_list = copy(session_list)\n temp_list.sort()\n if temp_list[-1] < 10:\n return 'ses-M0' + str(temp_list[-1])\n else:\n return 'ses-M' + str(temp_list[-1])\n\n\ndef complementary_list(total_list, sub_list):\n result_list = []\n for element in total_list:\n if element not in sub_list:\n result_list.append(element)\n return result_list\n\n\ndef first_session(subject_df):\n session_list = [int(session[5:]) for _, session in subject_df.index.values]\n session_list.sort()\n first_session = session_list[0]\n if first_session < 10:\n return 'ses-M0' + str(first_session)\n else:\n return 'ses-M' + str(first_session)\n\n\ndef next_session(subject_df, session_orig):\n session_list = [int(session[5:]) for _, session in subject_df.index.values]\n session_list.sort()\n session_id_list = []\n for session in session_list:\n if session < 10:\n session_id_list.append('ses-M0' + str(session))\n else:\n session_id_list.append('ses-M' + str(session))\n index = session_id_list.index(session_orig)\n if index < len(session_id_list) - 1:\n return session_id_list[index + 1]\n else:\n raise ValueError('The argument session is the last session')\n\n\ndef extract_baseline(diagnosis_df, diagnosis, set_index=True):\n from copy import deepcopy\n\n if set_index:\n all_df = diagnosis_df.set_index(['participant_id', 'session_id'])\n else:\n all_df = deepcopy(diagnosis_df)\n\n result_df = pd.DataFrame()\n for subject, subject_df in all_df.groupby(level=0):\n baseline = first_session(subject_df)\n subject_baseline_df = pd.DataFrame(data=[[subject, baseline] +\n subject_df.loc[(subject, baseline)].tolist()],\n columns=[\"participant_id\", \"session_id\"] + subject_df.columns.values.tolist())\n result_df = pd.concat([result_df, subject_baseline_df])\n\n result_df[\"diagnosis\"] = [diagnosis] * len(result_df)\n result_df.reset_index(inplace=True, drop=True)\n\n return result_df\n\n\ndef chi2(x_test, x_train):\n from scipy.stats import chisquare\n\n # Look for chi2 computation\n total_categories = np.concatenate([x_test, x_train])\n unique_categories = np.unique(total_categories)\n f_obs = [(x_test == category).sum() / len(x_test) for category in unique_categories]\n f_exp = [(x_train == category).sum() / len(x_train) for category in unique_categories]\n\n T, p = chisquare(f_obs, f_exp)\n\n return T, p\n\n\ndef add_demographics(df, demographics_df, diagnosis):\n out_df = pd.DataFrame()\n tmp_demo_df = copy(demographics_df)\n tmp_demo_df.reset_index(inplace=True)\n for idx in df.index.values:\n participant = df.loc[idx, \"participant_id\"]\n session = df.loc[idx, \"session_id\"]\n row_df = tmp_demo_df[(tmp_demo_df.participant_id == participant) & (tmp_demo_df.session_id == session)]\n out_df = pd.concat([out_df, row_df])\n out_df.reset_index(inplace=True, drop=True)\n out_df.diagnosis = [diagnosis] * len(out_df)\n return out_df\n\n\ndef remove_unicity(values_list):\n \"\"\"Count the values of each class and label all the classes with only one label under the same label.\"\"\"\n unique_classes, counts = np.unique(values_list, return_counts=True)\n one_sub_classes = unique_classes[(counts == 1)]\n for class_element in one_sub_classes:\n values_list[values_list.index(class_element)] = unique_classes.min()\n\n return values_list\n\n\ndef category_conversion(values_list):\n values_np = np.array(values_list)\n unique_classes = np.unique(values_np)\n for index, unique_class in enumerate(unique_classes):\n values_np[values_np == unique_class] = index + 1\n\n return values_np.astype(int).tolist()\n\n\ndef find_label(labels_list, target_label):\n if target_label in labels_list:\n return target_label\n else:\n min_length = np.inf\n found_label = None\n for label in labels_list:\n if target_label.lower() in label.lower() and min_length > len(label):\n min_length = len(label)\n found_label = label\n if found_label is None:\n raise ValueError(f\"No label was found in {labels_list} for target label {target_label}.\")\n\n return found_label\n\n\ndef retrieve_longitudinal(df, diagnosis_df):\n final_df = pd.DataFrame()\n for idx in df.index.values:\n subject = df.loc[idx, 'participant_id']\n row_df = diagnosis_df[diagnosis_df.participant_id == subject]\n final_df = pd.concat([final_df, row_df])\n\n return final_df\n\n\ndef remove_sub_labels(diagnosis_df, sub_labels, diagnosis_df_paths, results_path,\n logger=None):\n\n from ..deep_learning.iotools import return_logger\n\n if logger is None:\n logger = return_logger(2, \"remove sub labels\")\n\n supplementary_diagnoses = []\n\n logger.debug('Before subjects removal')\n sub_df = diagnosis_df.reset_index().groupby('participant_id')['session_id'].nunique()\n logger.debug(f'{len(sub_df)} subjects, {len(diagnosis_df)} scans')\n\n for label in sub_labels:\n if f'{label}.tsv' in diagnosis_df_paths:\n sub_diag_df = pd.read_csv(path.join(results_path, f'{label}.tsv'), sep='\\t')\n sub_diag_baseline_df = extract_baseline(sub_diag_df, label)\n for idx in sub_diag_baseline_df.index.values:\n subject = sub_diag_baseline_df.loc[idx, 'participant_id']\n diagnosis_df.drop(subject, inplace=True, level=0)\n supplementary_diagnoses.append(label)\n\n logger.debug(f'Removed {len(sub_diag_baseline_df)} subjects based on {label} label')\n sub_df = diagnosis_df.reset_index().groupby('participant_id')['session_id'].nunique()\n logger.debug(f'{len(sub_df)} subjects, {len(diagnosis_df)} scans')\n\n return diagnosis_df, supplementary_diagnoses\n"
] | [
[
"scipy.stats.chisquare",
"pandas.DataFrame",
"pandas.concat",
"numpy.array",
"numpy.concatenate",
"numpy.unique"
]
] |
jeetbanik/Corona-Real-Time-Face-Mask-and-Keypoints-Detection | [
"3232f5d7b84fffcc61c2bb84d1b5109154bcc6bb"
] | [
"Testing Model Including Facial Keypoints.py"
] | [
"import numpy as np\nfrom PIL import Image\nimport cv2\nfrom model import Net\nimport torch\nfrom torchvision import transforms\nfrom mtcnn import MTCNN\n\ndef LoadModel(fpath):\n '''\n function to load saved model\n '''\n c = torch.load(fpath, map_location='cpu')\n model = c['model']\n model.load_state_dict(c['state_dict'])\n # as we've to perform testing, we don't need backpropagation so setting 'requires_grad' as false\n for parameter in model.parameters():\n parameter.requires_grad = False\n # model.eval() -> .eval() does not change any behaviour of gradient calculations , but are used to set specific layers\n # like dropout and batchnorm to evaluation mode i.e. dropout layer won't drop activations and \n # batchnorm will use running estimates instead of batch statistics.\n return model.eval()\n\ntrain_transforms = transforms.Compose([\n transforms.Resize((224,224)),\n transforms.ToTensor(),\n transforms.Normalize((0.5,0.5,0.5), (0.5,0.5,0.5))\n])\n\n# Initializing file paths for both the models\nfpath1 = 'Real-Time Face Mask Detection Model.pth'\nfpath2 = 'Facial Keypoints Model.pt'\n\n# Loading the models for testing\nmodel = LoadModel(fpath1)\nnet = Net()\nnet.load_state_dict(torch.load(fpath2))\nfor parameter in net.parameters():\n parameter.requires_grad = False\nnet.eval()\nmodel_lm = net\n\ndevice = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n\ndetector = MTCNN()\n\n# Accessing the webcam\ncap = cv2.VideoCapture(0)\nf = cv2.FONT_HERSHEY_DUPLEX\nt = 2\nred = (0,0,255)\ngreen = (0,255,0)\nblue = (255,255,0)\nyellow = (0,155,255)\nwhile (cap.isOpened()):\n # getting the frame in 'frm' and a bool value in 'ret' which is true if a frame is returned\n ret, frm = cap.read()\n if ret == True:\n # converting into grayscale for feature reduction and grayscale images are less computation intensive to operate on\n gray = cv2.cvtColor(frm, cv2.COLOR_BGR2GRAY)\n col = cv2.cvtColor(frm, cv2.COLOR_BGR2RGB)\n # detecting the faces in the frame returned, it will return the coords of bounding box along with its height and width\n result = detector.detect_faces(col)\n for box in result:\n x, y, w, h = box['box']\n keypoints = box['keypoints']\n # drawing the bounding box based on the coordinates provided by haar_cascade\n cv2.rectangle(frm, (x,y), (x+w,y+h), 2)\n # cropping the portion of image covered by the bounding box\n crp = Image.fromarray(frm,mode = 'RGB')\n #cropped_img = frm[y:y+h, x:x+w]\n cropped_img = crp.crop((x,y,x+w,y+h))\n s = (w*h)/(50000)\n if s<0.5:\n s=0.5\n pil_image = train_transforms(cropped_img)\n image = pil_image.unsqueeze(0)\n # feeding the test cropped image into the model\n result = model(image)\n img = np.array(image)\n img = img[:,0,:,:]\n img = img.reshape(img.shape[0], 1, img.shape[1], img.shape[2])\n result_lm = model_lm(torch.from_numpy(img))\n result_lm = np.array(result_lm)\n result_lm = result_lm*(0.19*h)\n result_lm = result_lm.reshape(68,2)\n result_lm[:,0] += x+(0.28*h)\n result_lm[:,1] += y+(0.49*w)\n _, maximum = torch.max(result.data, 1)\n pred = maximum.item()\n # displaying results based on classification\n if pred == 0:\n cv2.circle(frm, (keypoints['left_eye']), 2, yellow, 2)\n cv2.circle(frm, (keypoints['right_eye']), 2, yellow, 2)\n cv2.circle(frm, (keypoints['nose']), 2, yellow, 2)\n cv2.circle(frm, (keypoints['mouth_left']), 2, yellow, 2)\n cv2.circle(frm, (keypoints['mouth_right']), 2, yellow, 2)\n (lw,lh), bl = cv2.getTextSize(\"Correctly Masked\", f, s, t)\n cv2.putText(frm, \"Correctly Masked\", ((int(((w+x)-x-lw)/2)+x),y-10), f, s, green, t)\n cv2.rectangle(frm, (x,y), (x+w,y+h), green, 2) # green colour rectangle if mask is worn correctly\n elif pred == 1:\n cv2.circle(frm, (keypoints['left_eye']), 2, yellow, 2)\n cv2.circle(frm, (keypoints['right_eye']), 2, yellow, 2)\n cv2.circle(frm, (keypoints['nose']), 2, yellow, 2)\n cv2.circle(frm, (keypoints['mouth_left']), 2, yellow, 2)\n cv2.circle(frm, (keypoints['mouth_right']), 2, yellow, 2)\n (lw,lh), bl = cv2.getTextSize(\"Unmasked\", f, s, t)\n cv2.putText(frm, \"Unmasked\", ((int(((w+x)-x-lw)/2)+x),y-10), f, s, red, t)\n cv2.rectangle(frm, (x,y), (x+w,y+h), red, 2) # red colour rectangle if mask is not being worn\n elif pred == 2:\n cv2.circle(frm, (keypoints['left_eye']), 2, yellow, 2)\n cv2.circle(frm, (keypoints['right_eye']), 2, yellow, 2)\n cv2.circle(frm, (keypoints['nose']), 2, yellow, 2)\n cv2.circle(frm, (keypoints['mouth_left']), 2, yellow, 2)\n cv2.circle(frm, (keypoints['mouth_right']), 2, yellow, 2)\n (lw,lh), bl = cv2.getTextSize(\"Incorrectly Masked\", f, s, t)\n cv2.putText(frm, \"Incorrectly Masked\", ((int(((w+x)-x-lw)/2)+x),y-10), f, s, blue, t)\n cv2.rectangle(frm, (x,y), (x+w,y+h), blue, 2) # blue colour rectangle if mask is not worn correctly\n cv2.imshow('frame',frm)\n if (cv2.waitKey(1) & 0xFF) == ord('q'): # press 'q' to exit\n break\n else:\n break\ncap.release()\ncv2.destroyAllWindows()"
] | [
[
"torch.load",
"torch.cuda.is_available",
"torch.from_numpy",
"torch.max",
"numpy.array"
]
] |
KarthikKothareddy/AirFlow | [
"faaf0b8b4467bcf5bff4a5b49086a9e02cb9c112"
] | [
"tests/core.py"
] | [
"# -*- coding: utf-8 -*-\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nimport json\nimport unittest\n\nimport bleach\nimport doctest\nimport mock\nimport multiprocessing\nimport os\nimport re\nimport signal\nimport sqlalchemy\nimport tempfile\nimport warnings\nfrom datetime import timedelta\nfrom dateutil.relativedelta import relativedelta\nfrom email.mime.application import MIMEApplication\nfrom email.mime.multipart import MIMEMultipart\nfrom freezegun import freeze_time\nfrom numpy.testing import assert_array_almost_equal\nfrom six.moves.urllib.parse import urlencode\nfrom time import sleep\n\nfrom airflow import configuration\nfrom airflow.executors import SequentialExecutor\nfrom airflow.models import Variable\n\nconfiguration.load_test_config()\nfrom airflow import jobs, models, DAG, utils, macros, settings, exceptions\nfrom airflow.models import BaseOperator\nfrom airflow.operators.bash_operator import BashOperator\nfrom airflow.operators.check_operator import CheckOperator, ValueCheckOperator\nfrom airflow.operators.dagrun_operator import TriggerDagRunOperator\nfrom airflow.operators.python_operator import PythonOperator\nfrom airflow.operators.dummy_operator import DummyOperator\n\nfrom airflow.hooks.base_hook import BaseHook\nfrom airflow.hooks.sqlite_hook import SqliteHook\nfrom airflow.bin import cli\nfrom airflow.www import app as application\nfrom airflow.settings import Session\nfrom airflow.utils import timezone\nfrom airflow.utils.timezone import datetime\nfrom airflow.utils.state import State\nfrom airflow.utils.dates import infer_time_unit, round_time, scale_time_units\nfrom lxml import html\nfrom airflow.exceptions import AirflowException\nfrom airflow.configuration import AirflowConfigException, run_command\nfrom jinja2.sandbox import SecurityError\nfrom jinja2 import UndefinedError\n\nimport six\n\nNUM_EXAMPLE_DAGS = 19\nDEV_NULL = '/dev/null'\nTEST_DAG_FOLDER = os.path.join(\n os.path.dirname(os.path.realpath(__file__)), 'dags')\nDEFAULT_DATE = datetime(2015, 1, 1)\nDEFAULT_DATE_ISO = DEFAULT_DATE.isoformat()\nDEFAULT_DATE_DS = DEFAULT_DATE_ISO[:10]\nTEST_DAG_ID = 'unit_tests'\n\ntry:\n import cPickle as pickle\nexcept ImportError:\n # Python 3\n import pickle\n\n\ndef reset(dag_id=TEST_DAG_ID):\n session = Session()\n tis = session.query(models.TaskInstance).filter_by(dag_id=dag_id)\n tis.delete()\n session.commit()\n session.close()\n\n\nreset()\n\n\nclass OperatorSubclass(BaseOperator):\n \"\"\"\n An operator to test template substitution\n \"\"\"\n template_fields = ['some_templated_field']\n\n def __init__(self, some_templated_field, *args, **kwargs):\n super(OperatorSubclass, self).__init__(*args, **kwargs)\n self.some_templated_field = some_templated_field\n\n def execute(*args, **kwargs):\n pass\n\n\nclass CoreTest(unittest.TestCase):\n # These defaults make the test faster to run\n default_scheduler_args = {\"file_process_interval\": 0,\n \"processor_poll_interval\": 0.5,\n \"num_runs\": 1}\n\n def setUp(self):\n configuration.load_test_config()\n self.dagbag = models.DagBag(\n dag_folder=DEV_NULL, include_examples=True)\n self.args = {'owner': 'airflow', 'start_date': DEFAULT_DATE}\n self.dag = DAG(TEST_DAG_ID, default_args=self.args)\n self.dag_bash = self.dagbag.dags['example_bash_operator']\n self.runme_0 = self.dag_bash.get_task('runme_0')\n self.run_after_loop = self.dag_bash.get_task('run_after_loop')\n self.run_this_last = self.dag_bash.get_task('run_this_last')\n\n def test_schedule_dag_no_previous_runs(self):\n \"\"\"\n Tests scheduling a dag with no previous runs\n \"\"\"\n dag = DAG(TEST_DAG_ID + 'test_schedule_dag_no_previous_runs')\n dag.add_task(models.BaseOperator(\n task_id=\"faketastic\",\n owner='Also fake',\n start_date=datetime(2015, 1, 2, 0, 0)))\n\n dag_run = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)\n self.assertIsNotNone(dag_run)\n self.assertEqual(dag.dag_id, dag_run.dag_id)\n self.assertIsNotNone(dag_run.run_id)\n self.assertNotEqual('', dag_run.run_id)\n self.assertEqual(\n datetime(2015, 1, 2, 0, 0),\n dag_run.execution_date,\n msg='dag_run.execution_date did not match expectation: {0}'\n .format(dag_run.execution_date)\n )\n self.assertEqual(State.RUNNING, dag_run.state)\n self.assertFalse(dag_run.external_trigger)\n dag.clear()\n\n def test_schedule_dag_fake_scheduled_previous(self):\n \"\"\"\n Test scheduling a dag where there is a prior DagRun\n which has the same run_id as the next run should have\n \"\"\"\n delta = timedelta(hours=1)\n dag = DAG(TEST_DAG_ID + 'test_schedule_dag_fake_scheduled_previous',\n schedule_interval=delta,\n start_date=DEFAULT_DATE)\n dag.add_task(models.BaseOperator(\n task_id=\"faketastic\",\n owner='Also fake',\n start_date=DEFAULT_DATE))\n\n scheduler = jobs.SchedulerJob(**self.default_scheduler_args)\n dag.create_dagrun(run_id=models.DagRun.id_for_date(DEFAULT_DATE),\n execution_date=DEFAULT_DATE,\n state=State.SUCCESS,\n external_trigger=True)\n dag_run = scheduler.create_dag_run(dag)\n self.assertIsNotNone(dag_run)\n self.assertEqual(dag.dag_id, dag_run.dag_id)\n self.assertIsNotNone(dag_run.run_id)\n self.assertNotEqual('', dag_run.run_id)\n self.assertEqual(\n DEFAULT_DATE + delta,\n dag_run.execution_date,\n msg='dag_run.execution_date did not match expectation: {0}'\n .format(dag_run.execution_date)\n )\n self.assertEqual(State.RUNNING, dag_run.state)\n self.assertFalse(dag_run.external_trigger)\n\n def test_schedule_dag_once(self):\n \"\"\"\n Tests scheduling a dag scheduled for @once - should be scheduled the first time\n it is called, and not scheduled the second.\n \"\"\"\n dag = DAG(TEST_DAG_ID + 'test_schedule_dag_once')\n dag.schedule_interval = '@once'\n dag.add_task(models.BaseOperator(\n task_id=\"faketastic\",\n owner='Also fake',\n start_date=datetime(2015, 1, 2, 0, 0)))\n dag_run = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)\n dag_run2 = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)\n\n self.assertIsNotNone(dag_run)\n self.assertIsNone(dag_run2)\n dag.clear()\n\n def test_fractional_seconds(self):\n \"\"\"\n Tests if fractional seconds are stored in the database\n \"\"\"\n dag = DAG(TEST_DAG_ID + 'test_fractional_seconds')\n dag.schedule_interval = '@once'\n dag.add_task(models.BaseOperator(\n task_id=\"faketastic\",\n owner='Also fake',\n start_date=datetime(2015, 1, 2, 0, 0)))\n\n start_date = timezone.utcnow()\n\n run = dag.create_dagrun(\n run_id='test_' + start_date.isoformat(),\n execution_date=start_date,\n start_date=start_date,\n state=State.RUNNING,\n external_trigger=False\n )\n\n run.refresh_from_db()\n\n self.assertEqual(start_date, run.execution_date,\n \"dag run execution_date loses precision\")\n self.assertEqual(start_date, run.start_date,\n \"dag run start_date loses precision \")\n\n def test_schedule_dag_start_end_dates(self):\n \"\"\"\n Tests that an attempt to schedule a task after the Dag's end_date\n does not succeed.\n \"\"\"\n delta = timedelta(hours=1)\n runs = 3\n start_date = DEFAULT_DATE\n end_date = start_date + (runs - 1) * delta\n dag = DAG(TEST_DAG_ID + 'test_schedule_dag_start_end_dates',\n start_date=start_date,\n end_date=end_date,\n schedule_interval=delta)\n dag.add_task(models.BaseOperator(task_id='faketastic',\n owner='Also fake'))\n\n # Create and schedule the dag runs\n dag_runs = []\n scheduler = jobs.SchedulerJob(**self.default_scheduler_args)\n for i in range(runs):\n dag_runs.append(scheduler.create_dag_run(dag))\n\n additional_dag_run = scheduler.create_dag_run(dag)\n\n for dag_run in dag_runs:\n self.assertIsNotNone(dag_run)\n\n self.assertIsNone(additional_dag_run)\n\n @freeze_time('2016-01-01')\n def test_schedule_dag_no_end_date_up_to_today_only(self):\n \"\"\"\n Tests that a Dag created without an end_date can only be scheduled up\n to and including the current datetime.\n\n For example, if today is 2016-01-01 and we are scheduling from a\n start_date of 2015-01-01, only jobs up to, but not including\n 2016-01-01 should be scheduled.\n \"\"\"\n session = settings.Session()\n delta = timedelta(days=1)\n start_date = DEFAULT_DATE\n runs = 365\n dag = DAG(TEST_DAG_ID + 'test_schedule_dag_no_end_date_up_to_today_only',\n start_date=start_date,\n schedule_interval=delta)\n dag.add_task(models.BaseOperator(task_id='faketastic',\n owner='Also fake'))\n\n dag_runs = []\n scheduler = jobs.SchedulerJob(**self.default_scheduler_args)\n for i in range(runs):\n dag_run = scheduler.create_dag_run(dag)\n dag_runs.append(dag_run)\n\n # Mark the DagRun as complete\n dag_run.state = State.SUCCESS\n session.merge(dag_run)\n session.commit()\n\n # Attempt to schedule an additional dag run (for 2016-01-01)\n additional_dag_run = scheduler.create_dag_run(dag)\n\n for dag_run in dag_runs:\n self.assertIsNotNone(dag_run)\n\n self.assertIsNone(additional_dag_run)\n\n def test_confirm_unittest_mod(self):\n self.assertTrue(configuration.get('core', 'unit_test_mode'))\n\n def test_pickling(self):\n dp = self.dag.pickle()\n self.assertEqual(dp.pickle.dag_id, self.dag.dag_id)\n\n def test_rich_comparison_ops(self):\n\n class DAGsubclass(DAG):\n pass\n\n dag_eq = DAG(TEST_DAG_ID, default_args=self.args)\n\n dag_diff_load_time = DAG(TEST_DAG_ID, default_args=self.args)\n dag_diff_name = DAG(TEST_DAG_ID + '_neq', default_args=self.args)\n\n dag_subclass = DAGsubclass(TEST_DAG_ID, default_args=self.args)\n dag_subclass_diff_name = DAGsubclass(\n TEST_DAG_ID + '2', default_args=self.args)\n\n for d in [dag_eq, dag_diff_name, dag_subclass, dag_subclass_diff_name]:\n d.last_loaded = self.dag.last_loaded\n\n # test identity equality\n self.assertEqual(self.dag, self.dag)\n\n # test dag (in)equality based on _comps\n self.assertEqual(dag_eq, self.dag)\n self.assertNotEqual(dag_diff_name, self.dag)\n self.assertNotEqual(dag_diff_load_time, self.dag)\n\n # test dag inequality based on type even if _comps happen to match\n self.assertNotEqual(dag_subclass, self.dag)\n\n # a dag should equal an unpickled version of itself\n d = pickle.dumps(self.dag)\n self.assertEqual(pickle.loads(d), self.dag)\n\n # dags are ordered based on dag_id no matter what the type is\n self.assertLess(self.dag, dag_diff_name)\n self.assertGreater(self.dag, dag_diff_load_time)\n self.assertLess(self.dag, dag_subclass_diff_name)\n\n # greater than should have been created automatically by functools\n self.assertGreater(dag_diff_name, self.dag)\n\n # hashes are non-random and match equality\n self.assertEqual(hash(self.dag), hash(self.dag))\n self.assertEqual(hash(dag_eq), hash(self.dag))\n self.assertNotEqual(hash(dag_diff_name), hash(self.dag))\n self.assertNotEqual(hash(dag_subclass), hash(self.dag))\n\n def test_check_operators(self):\n\n conn_id = \"sqlite_default\"\n\n captainHook = BaseHook.get_hook(conn_id=conn_id)\n captainHook.run(\"CREATE TABLE operator_test_table (a, b)\")\n captainHook.run(\"insert into operator_test_table values (1,2)\")\n\n t = CheckOperator(\n task_id='check',\n sql=\"select count(*) from operator_test_table\",\n conn_id=conn_id,\n dag=self.dag)\n t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)\n\n t = ValueCheckOperator(\n task_id='value_check',\n pass_value=95,\n tolerance=0.1,\n conn_id=conn_id,\n sql=\"SELECT 100\",\n dag=self.dag)\n t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)\n\n captainHook.run(\"drop table operator_test_table\")\n\n def test_clear_api(self):\n task = self.dag_bash.tasks[0]\n task.clear(\n start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,\n upstream=True, downstream=True)\n ti = models.TaskInstance(task=task, execution_date=DEFAULT_DATE)\n ti.are_dependents_done()\n\n def test_illegal_args(self):\n \"\"\"\n Tests that Operators reject illegal arguments\n \"\"\"\n with warnings.catch_warnings(record=True) as w:\n t = BashOperator(\n task_id='test_illegal_args',\n bash_command='echo success',\n dag=self.dag,\n illegal_argument_1234='hello?')\n self.assertTrue(\n issubclass(w[0].category, PendingDeprecationWarning))\n self.assertIn(\n 'Invalid arguments were passed to BashOperator.',\n w[0].message.args[0])\n\n def test_bash_operator(self):\n t = BashOperator(\n task_id='test_bash_operator',\n bash_command=\"echo success\",\n dag=self.dag)\n t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)\n\n def test_bash_operator_multi_byte_output(self):\n t = BashOperator(\n task_id='test_multi_byte_bash_operator',\n bash_command=u\"echo \\u2600\",\n dag=self.dag,\n output_encoding='utf-8')\n t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)\n\n def test_bash_operator_kill(self):\n import psutil\n sleep_time = \"100%d\" % os.getpid()\n t = BashOperator(\n task_id='test_bash_operator_kill',\n execution_timeout=timedelta(seconds=1),\n bash_command=\"/bin/bash -c 'sleep %s'\" % sleep_time,\n dag=self.dag)\n self.assertRaises(\n exceptions.AirflowTaskTimeout,\n t.run,\n start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)\n sleep(2)\n pid = -1\n for proc in psutil.process_iter():\n if proc.cmdline() == ['sleep', sleep_time]:\n pid = proc.pid\n if pid != -1:\n os.kill(pid, signal.SIGTERM)\n self.fail(\"BashOperator's subprocess still running after stopping on timeout!\")\n\n def test_trigger_dagrun(self):\n def trigga(context, obj):\n if True:\n return obj\n\n t = TriggerDagRunOperator(\n task_id='test_trigger_dagrun',\n trigger_dag_id='example_bash_operator',\n python_callable=trigga,\n dag=self.dag)\n t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)\n\n def test_dryrun(self):\n t = BashOperator(\n task_id='test_dryrun',\n bash_command=\"echo success\",\n dag=self.dag)\n t.dry_run()\n\n def test_sqlite(self):\n import airflow.operators.sqlite_operator\n t = airflow.operators.sqlite_operator.SqliteOperator(\n task_id='time_sqlite',\n sql=\"CREATE TABLE IF NOT EXISTS unitest (dummy VARCHAR(20))\",\n dag=self.dag)\n t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)\n\n def test_timeout(self):\n t = PythonOperator(\n task_id='test_timeout',\n execution_timeout=timedelta(seconds=1),\n python_callable=lambda: sleep(5),\n dag=self.dag)\n self.assertRaises(\n exceptions.AirflowTaskTimeout,\n t.run,\n start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)\n\n def test_python_op(self):\n def test_py_op(templates_dict, ds, **kwargs):\n if not templates_dict['ds'] == ds:\n raise Exception(\"failure\")\n\n t = PythonOperator(\n task_id='test_py_op',\n provide_context=True,\n python_callable=test_py_op,\n templates_dict={'ds': \"{{ ds }}\"},\n dag=self.dag)\n t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)\n\n def test_complex_template(self):\n def verify_templated_field(context):\n self.assertEqual(context['ti'].task.some_templated_field['bar'][1], context['ds'])\n\n t = OperatorSubclass(\n task_id='test_complex_template',\n some_templated_field={\n 'foo': '123',\n 'bar': ['baz', '{{ ds }}']\n },\n on_success_callback=verify_templated_field,\n dag=self.dag)\n t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)\n\n def test_template_with_variable(self):\n \"\"\"\n Test the availability of variables in templates\n \"\"\"\n val = {\n 'success': False,\n 'test_value': 'a test value'\n }\n Variable.set(\"a_variable\", val['test_value'])\n\n def verify_templated_field(context):\n self.assertEqual(context['ti'].task.some_templated_field,\n val['test_value'])\n val['success'] = True\n\n t = OperatorSubclass(\n task_id='test_complex_template',\n some_templated_field='{{ var.value.a_variable }}',\n on_success_callback=verify_templated_field,\n dag=self.dag)\n t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)\n self.assertTrue(val['success'])\n\n def test_template_with_json_variable(self):\n \"\"\"\n Test the availability of variables (serialized as JSON) in templates\n \"\"\"\n val = {\n 'success': False,\n 'test_value': {'foo': 'bar', 'obj': {'v1': 'yes', 'v2': 'no'}}\n }\n Variable.set(\"a_variable\", val['test_value'], serialize_json=True)\n\n def verify_templated_field(context):\n self.assertEqual(context['ti'].task.some_templated_field,\n val['test_value']['obj']['v2'])\n val['success'] = True\n\n t = OperatorSubclass(\n task_id='test_complex_template',\n some_templated_field='{{ var.json.a_variable.obj.v2 }}',\n on_success_callback=verify_templated_field,\n dag=self.dag)\n t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)\n self.assertTrue(val['success'])\n\n def test_template_with_json_variable_as_value(self):\n \"\"\"\n Test the availability of variables (serialized as JSON) in templates, but\n accessed as a value\n \"\"\"\n val = {\n 'success': False,\n 'test_value': {'foo': 'bar'}\n }\n Variable.set(\"a_variable\", val['test_value'], serialize_json=True)\n\n def verify_templated_field(context):\n self.assertEqual(context['ti'].task.some_templated_field,\n u'{\"foo\": \"bar\"}')\n val['success'] = True\n\n t = OperatorSubclass(\n task_id='test_complex_template',\n some_templated_field='{{ var.value.a_variable }}',\n on_success_callback=verify_templated_field,\n dag=self.dag)\n t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)\n self.assertTrue(val['success'])\n\n def test_template_non_bool(self):\n \"\"\"\n Test templates can handle objects with no sense of truthiness\n \"\"\"\n\n class NonBoolObject(object):\n def __len__(self):\n return NotImplemented\n\n def __bool__(self):\n return NotImplemented\n\n t = OperatorSubclass(\n task_id='test_bad_template_obj',\n some_templated_field=NonBoolObject(),\n dag=self.dag)\n t.resolve_template_files()\n\n def test_import_examples(self):\n self.assertEqual(len(self.dagbag.dags), NUM_EXAMPLE_DAGS)\n\n def test_local_task_job(self):\n TI = models.TaskInstance\n ti = TI(\n task=self.runme_0, execution_date=DEFAULT_DATE)\n job = jobs.LocalTaskJob(task_instance=ti, ignore_ti_state=True)\n job.run()\n\n def test_raw_job(self):\n TI = models.TaskInstance\n ti = TI(\n task=self.runme_0, execution_date=DEFAULT_DATE)\n ti.dag = self.dag_bash\n ti.run(ignore_ti_state=True)\n\n def test_doctests(self):\n modules = [utils, macros]\n for mod in modules:\n failed, tests = doctest.testmod(mod)\n if failed:\n raise Exception(\"Failed a doctest\")\n\n def test_variable_set_get_round_trip(self):\n Variable.set(\"tested_var_set_id\", \"Monday morning breakfast\")\n self.assertEqual(\"Monday morning breakfast\", Variable.get(\"tested_var_set_id\"))\n\n def test_variable_set_get_round_trip_json(self):\n value = {\"a\": 17, \"b\": 47}\n Variable.set(\"tested_var_set_id\", value, serialize_json=True)\n self.assertEqual(value, Variable.get(\"tested_var_set_id\", deserialize_json=True))\n\n def test_get_non_existing_var_should_return_default(self):\n default_value = \"some default val\"\n self.assertEqual(default_value, Variable.get(\"thisIdDoesNotExist\",\n default_var=default_value))\n\n def test_get_non_existing_var_should_not_deserialize_json_default(self):\n default_value = \"}{ this is a non JSON default }{\"\n self.assertEqual(default_value, Variable.get(\"thisIdDoesNotExist\",\n default_var=default_value,\n deserialize_json=True))\n\n def test_variable_setdefault_round_trip(self):\n key = \"tested_var_setdefault_1_id\"\n value = \"Monday morning breakfast in Paris\"\n Variable.setdefault(key, value)\n self.assertEqual(value, Variable.get(key))\n\n def test_variable_setdefault_round_trip_json(self):\n key = \"tested_var_setdefault_2_id\"\n value = {\"city\": 'Paris', \"Hapiness\": True}\n Variable.setdefault(key, value, deserialize_json=True)\n self.assertEqual(value, Variable.get(key, deserialize_json=True))\n\n def test_variable_setdefault_existing_json(self):\n key = \"tested_var_setdefault_2_id\"\n value = {\"city\": 'Paris', \"Hapiness\": True}\n Variable.set(key, value, serialize_json=True)\n val = Variable.setdefault(key, value, deserialize_json=True)\n # Check the returned value, and the stored value are handled correctly.\n self.assertEqual(value, val)\n self.assertEqual(value, Variable.get(key, deserialize_json=True))\n\n def test_parameterized_config_gen(self):\n\n cfg = configuration.parameterized_config(configuration.DEFAULT_CONFIG)\n\n # making sure some basic building blocks are present:\n self.assertIn(\"[core]\", cfg)\n self.assertIn(\"dags_folder\", cfg)\n self.assertIn(\"sql_alchemy_conn\", cfg)\n self.assertIn(\"fernet_key\", cfg)\n\n # making sure replacement actually happened\n self.assertNotIn(\"{AIRFLOW_HOME}\", cfg)\n self.assertNotIn(\"{FERNET_KEY}\", cfg)\n\n def test_config_use_original_when_original_and_fallback_are_present(self):\n self.assertTrue(configuration.has_option(\"core\", \"FERNET_KEY\"))\n self.assertFalse(configuration.has_option(\"core\", \"FERNET_KEY_CMD\"))\n\n FERNET_KEY = configuration.get('core', 'FERNET_KEY')\n\n configuration.set(\"core\", \"FERNET_KEY_CMD\", \"printf HELLO\")\n\n FALLBACK_FERNET_KEY = configuration.get(\n \"core\",\n \"FERNET_KEY\"\n )\n\n self.assertEqual(FERNET_KEY, FALLBACK_FERNET_KEY)\n\n # restore the conf back to the original state\n configuration.remove_option(\"core\", \"FERNET_KEY_CMD\")\n\n def test_config_throw_error_when_original_and_fallback_is_absent(self):\n self.assertTrue(configuration.has_option(\"core\", \"FERNET_KEY\"))\n self.assertFalse(configuration.has_option(\"core\", \"FERNET_KEY_CMD\"))\n\n FERNET_KEY = configuration.get(\"core\", \"FERNET_KEY\")\n configuration.remove_option(\"core\", \"FERNET_KEY\")\n\n with self.assertRaises(AirflowConfigException) as cm:\n configuration.get(\"core\", \"FERNET_KEY\")\n\n exception = str(cm.exception)\n message = \"section/key [core/fernet_key] not found in config\"\n self.assertEqual(message, exception)\n\n # restore the conf back to the original state\n configuration.set(\"core\", \"FERNET_KEY\", FERNET_KEY)\n self.assertTrue(configuration.has_option(\"core\", \"FERNET_KEY\"))\n\n def test_config_override_original_when_non_empty_envvar_is_provided(self):\n key = \"AIRFLOW__CORE__FERNET_KEY\"\n value = \"some value\"\n self.assertNotIn(key, os.environ)\n\n os.environ[key] = value\n FERNET_KEY = configuration.get('core', 'FERNET_KEY')\n self.assertEqual(value, FERNET_KEY)\n\n # restore the envvar back to the original state\n del os.environ[key]\n\n def test_config_override_original_when_empty_envvar_is_provided(self):\n key = \"AIRFLOW__CORE__FERNET_KEY\"\n value = \"\"\n self.assertNotIn(key, os.environ)\n\n os.environ[key] = value\n FERNET_KEY = configuration.get('core', 'FERNET_KEY')\n self.assertEqual(value, FERNET_KEY)\n\n # restore the envvar back to the original state\n del os.environ[key]\n\n def test_round_time(self):\n\n rt1 = round_time(datetime(2015, 1, 1, 6), timedelta(days=1))\n self.assertEqual(datetime(2015, 1, 1, 0, 0), rt1)\n\n rt2 = round_time(datetime(2015, 1, 2), relativedelta(months=1))\n self.assertEqual(datetime(2015, 1, 1, 0, 0), rt2)\n\n rt3 = round_time(datetime(2015, 9, 16, 0, 0), timedelta(1), datetime(\n 2015, 9, 14, 0, 0))\n self.assertEqual(datetime(2015, 9, 16, 0, 0), rt3)\n\n rt4 = round_time(datetime(2015, 9, 15, 0, 0), timedelta(1), datetime(\n 2015, 9, 14, 0, 0))\n self.assertEqual(datetime(2015, 9, 15, 0, 0), rt4)\n\n rt5 = round_time(datetime(2015, 9, 14, 0, 0), timedelta(1), datetime(\n 2015, 9, 14, 0, 0))\n self.assertEqual(datetime(2015, 9, 14, 0, 0), rt5)\n\n rt6 = round_time(datetime(2015, 9, 13, 0, 0), timedelta(1), datetime(\n 2015, 9, 14, 0, 0))\n self.assertEqual(datetime(2015, 9, 14, 0, 0), rt6)\n\n def test_infer_time_unit(self):\n\n self.assertEqual('minutes', infer_time_unit([130, 5400, 10]))\n\n self.assertEqual('seconds', infer_time_unit([110, 50, 10, 100]))\n\n self.assertEqual('hours', infer_time_unit([100000, 50000, 10000, 20000]))\n\n self.assertEqual('days', infer_time_unit([200000, 100000]))\n\n def test_scale_time_units(self):\n\n # use assert_almost_equal from numpy.testing since we are comparing\n # floating point arrays\n arr1 = scale_time_units([130, 5400, 10], 'minutes')\n assert_array_almost_equal(arr1, [2.167, 90.0, 0.167], decimal=3)\n\n arr2 = scale_time_units([110, 50, 10, 100], 'seconds')\n assert_array_almost_equal(arr2, [110.0, 50.0, 10.0, 100.0], decimal=3)\n\n arr3 = scale_time_units([100000, 50000, 10000, 20000], 'hours')\n assert_array_almost_equal(arr3, [27.778, 13.889, 2.778, 5.556],\n decimal=3)\n\n arr4 = scale_time_units([200000, 100000], 'days')\n assert_array_almost_equal(arr4, [2.315, 1.157], decimal=3)\n\n def test_duplicate_dependencies(self):\n\n regexp = \"Dependency (.*)runme_0(.*)run_after_loop(.*) \" \\\n \"already registered\"\n\n with self.assertRaisesRegexp(AirflowException, regexp):\n self.runme_0.set_downstream(self.run_after_loop)\n\n with self.assertRaisesRegexp(AirflowException, regexp):\n self.run_after_loop.set_upstream(self.runme_0)\n\n def test_cyclic_dependencies_1(self):\n\n regexp = \"Cycle detected in DAG. (.*)runme_0(.*)\"\n with self.assertRaisesRegexp(AirflowException, regexp):\n self.runme_0.set_upstream(self.run_after_loop)\n\n def test_cyclic_dependencies_2(self):\n regexp = \"Cycle detected in DAG. (.*)run_after_loop(.*)\"\n with self.assertRaisesRegexp(AirflowException, regexp):\n self.run_after_loop.set_downstream(self.runme_0)\n\n def test_cyclic_dependencies_3(self):\n regexp = \"Cycle detected in DAG. (.*)run_this_last(.*)\"\n with self.assertRaisesRegexp(AirflowException, regexp):\n self.run_this_last.set_downstream(self.runme_0)\n\n def test_bad_trigger_rule(self):\n with self.assertRaises(AirflowException):\n DummyOperator(\n task_id='test_bad_trigger',\n trigger_rule=\"non_existant\",\n dag=self.dag)\n\n def test_terminate_task(self):\n \"\"\"If a task instance's db state get deleted, it should fail\"\"\"\n TI = models.TaskInstance\n dag = self.dagbag.dags.get('test_utils')\n task = dag.task_dict.get('sleeps_forever')\n\n ti = TI(task=task, execution_date=DEFAULT_DATE)\n job = jobs.LocalTaskJob(\n task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())\n\n # Running task instance asynchronously\n p = multiprocessing.Process(target=job.run)\n p.start()\n sleep(5)\n settings.engine.dispose()\n session = settings.Session()\n ti.refresh_from_db(session=session)\n # making sure it's actually running\n self.assertEqual(State.RUNNING, ti.state)\n ti = session.query(TI).filter_by(\n dag_id=task.dag_id,\n task_id=task.task_id,\n execution_date=DEFAULT_DATE\n ).one()\n\n # deleting the instance should result in a failure\n session.delete(ti)\n session.commit()\n # waiting for the async task to finish\n p.join()\n\n # making sure that the task ended up as failed\n ti.refresh_from_db(session=session)\n self.assertEqual(State.FAILED, ti.state)\n session.close()\n\n def test_task_fail_duration(self):\n \"\"\"If a task fails, the duration should be recorded in TaskFail\"\"\"\n\n p = BashOperator(\n task_id='pass_sleepy',\n bash_command='sleep 3',\n dag=self.dag)\n f = BashOperator(\n task_id='fail_sleepy',\n bash_command='sleep 5',\n execution_timeout=timedelta(seconds=3),\n retry_delay=timedelta(seconds=0),\n dag=self.dag)\n session = settings.Session()\n try:\n p.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)\n except:\n pass\n try:\n f.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)\n except:\n pass\n p_fails = session.query(models.TaskFail).filter_by(\n task_id='pass_sleepy',\n dag_id=self.dag.dag_id,\n execution_date=DEFAULT_DATE).all()\n f_fails = session.query(models.TaskFail).filter_by(\n task_id='fail_sleepy',\n dag_id=self.dag.dag_id,\n execution_date=DEFAULT_DATE).all()\n print(f_fails)\n self.assertEqual(0, len(p_fails))\n self.assertEqual(1, len(f_fails))\n # C\n self.assertGreaterEqual(sum([f.duration for f in f_fails]), 3)\n\n def test_dag_stats(self):\n \"\"\"Correctly sets/dirties/cleans rows of DagStat table\"\"\"\n\n session = settings.Session()\n\n session.query(models.DagRun).delete()\n session.query(models.DagStat).delete()\n session.commit()\n\n models.DagStat.update([], session=session)\n\n run1 = self.dag_bash.create_dagrun(\n run_id=\"run1\",\n execution_date=DEFAULT_DATE,\n state=State.RUNNING)\n\n models.DagStat.update([self.dag_bash.dag_id], session=session)\n\n qry = session.query(models.DagStat).all()\n\n self.assertEqual(3, len(qry))\n self.assertEqual(self.dag_bash.dag_id, qry[0].dag_id)\n for stats in qry:\n if stats.state == State.RUNNING:\n self.assertEqual(stats.count, 1)\n else:\n self.assertEqual(stats.count, 0)\n self.assertFalse(stats.dirty)\n\n run2 = self.dag_bash.create_dagrun(\n run_id=\"run2\",\n execution_date=DEFAULT_DATE + timedelta(days=1),\n state=State.RUNNING)\n\n models.DagStat.update([self.dag_bash.dag_id], session=session)\n\n qry = session.query(models.DagStat).all()\n\n self.assertEqual(3, len(qry))\n self.assertEqual(self.dag_bash.dag_id, qry[0].dag_id)\n for stats in qry:\n if stats.state == State.RUNNING:\n self.assertEqual(stats.count, 2)\n else:\n self.assertEqual(stats.count, 0)\n self.assertFalse(stats.dirty)\n\n session.query(models.DagRun).first().state = State.SUCCESS\n session.commit()\n\n models.DagStat.update([self.dag_bash.dag_id], session=session)\n\n qry = session.query(models.DagStat).filter(models.DagStat.state == State.SUCCESS).all()\n self.assertEqual(1, len(qry))\n self.assertEqual(self.dag_bash.dag_id, qry[0].dag_id)\n self.assertEqual(State.SUCCESS, qry[0].state)\n self.assertEqual(1, qry[0].count)\n self.assertFalse(qry[0].dirty)\n\n qry = session.query(models.DagStat).filter(models.DagStat.state == State.RUNNING).all()\n self.assertEqual(1, len(qry))\n self.assertEqual(self.dag_bash.dag_id, qry[0].dag_id)\n self.assertEqual(State.RUNNING, qry[0].state)\n self.assertEqual(1, qry[0].count)\n self.assertFalse(qry[0].dirty)\n\n session.query(models.DagRun).delete()\n session.query(models.DagStat).delete()\n session.commit()\n session.close()\n\n def test_run_command(self):\n if six.PY3:\n write = r'sys.stdout.buffer.write(\"\\u1000foo\".encode(\"utf8\"))'\n else:\n write = r'sys.stdout.write(u\"\\u1000foo\".encode(\"utf8\"))'\n\n cmd = 'import sys; {0}; sys.stdout.flush()'.format(write)\n\n self.assertEqual(run_command(\"python -c '{0}'\".format(cmd)),\n u'\\u1000foo' if six.PY3 else 'foo')\n\n self.assertEqual(run_command('echo \"foo bar\"'), u'foo bar\\n')\n self.assertRaises(AirflowConfigException, run_command, 'bash -c \"exit 1\"')\n\n\nclass CliTests(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n super(CliTests, cls).setUpClass()\n cls._cleanup()\n\n def setUp(self):\n super(CliTests, self).setUp()\n configuration.load_test_config()\n app = application.create_app()\n app.config['TESTING'] = True\n self.parser = cli.CLIFactory.get_parser()\n self.dagbag = models.DagBag(dag_folder=DEV_NULL, include_examples=True)\n self.session = Session()\n\n def tearDown(self):\n self._cleanup(session=self.session)\n super(CliTests, self).tearDown()\n\n @staticmethod\n def _cleanup(session=None):\n if session is None:\n session = Session()\n\n session.query(models.Pool).delete()\n session.query(models.Variable).delete()\n session.commit()\n session.close()\n\n def test_cli_list_dags(self):\n args = self.parser.parse_args(['list_dags', '--report'])\n cli.list_dags(args)\n\n def test_cli_list_tasks(self):\n for dag_id in self.dagbag.dags.keys():\n args = self.parser.parse_args(['list_tasks', dag_id])\n cli.list_tasks(args)\n\n args = self.parser.parse_args([\n 'list_tasks', 'example_bash_operator', '--tree'])\n cli.list_tasks(args)\n\n @mock.patch(\"airflow.bin.cli.db_utils.initdb\")\n def test_cli_initdb(self, initdb_mock):\n cli.initdb(self.parser.parse_args(['initdb']))\n\n initdb_mock.assert_called_once_with()\n\n @mock.patch(\"airflow.bin.cli.db_utils.resetdb\")\n def test_cli_resetdb(self, resetdb_mock):\n cli.resetdb(self.parser.parse_args(['resetdb', '--yes']))\n\n resetdb_mock.assert_called_once_with()\n\n def test_cli_connections_list(self):\n with mock.patch('sys.stdout',\n new_callable=six.StringIO) as mock_stdout:\n cli.connections(self.parser.parse_args(['connections', '--list']))\n stdout = mock_stdout.getvalue()\n conns = [[x.strip(\"'\") for x in re.findall(\"'\\w+'\", line)[:2]]\n for ii, line in enumerate(stdout.split('\\n'))\n if ii % 2 == 1]\n conns = [conn for conn in conns if len(conn) > 0]\n\n # Assert that some of the connections are present in the output as\n # expected:\n self.assertIn(['aws_default', 'aws'], conns)\n self.assertIn(['beeline_default', 'beeline'], conns)\n self.assertIn(['bigquery_default', 'bigquery'], conns)\n self.assertIn(['emr_default', 'emr'], conns)\n self.assertIn(['mssql_default', 'mssql'], conns)\n self.assertIn(['mysql_default', 'mysql'], conns)\n self.assertIn(['postgres_default', 'postgres'], conns)\n self.assertIn(['wasb_default', 'wasb'], conns)\n\n # Attempt to list connections with invalid cli args\n with mock.patch('sys.stdout',\n new_callable=six.StringIO) as mock_stdout:\n cli.connections(self.parser.parse_args(\n ['connections', '--list', '--conn_id=fake', '--conn_uri=fake-uri',\n '--conn_type=fake-type', '--conn_host=fake_host',\n '--conn_login=fake_login', '--conn_password=fake_password',\n '--conn_schema=fake_schema', '--conn_port=fake_port', '--conn_extra=fake_extra']))\n stdout = mock_stdout.getvalue()\n\n # Check list attempt stdout\n lines = [l for l in stdout.split('\\n') if len(l) > 0]\n self.assertListEqual(lines, [\n (\"\\tThe following args are not compatible with the \" +\n \"--list flag: ['conn_id', 'conn_uri', 'conn_extra', 'conn_type', 'conn_host', 'conn_login', 'conn_password', 'conn_schema', 'conn_port']\"),\n ])\n\n def test_cli_connections_add_delete(self):\n # Add connections:\n uri = 'postgresql://airflow:airflow@host:5432/airflow'\n with mock.patch('sys.stdout',\n new_callable=six.StringIO) as mock_stdout:\n cli.connections(self.parser.parse_args(\n ['connections', '--add', '--conn_id=new1',\n '--conn_uri=%s' % uri]))\n cli.connections(self.parser.parse_args(\n ['connections', '-a', '--conn_id=new2',\n '--conn_uri=%s' % uri]))\n cli.connections(self.parser.parse_args(\n ['connections', '--add', '--conn_id=new3',\n '--conn_uri=%s' % uri, '--conn_extra', \"{'extra': 'yes'}\"]))\n cli.connections(self.parser.parse_args(\n ['connections', '-a', '--conn_id=new4',\n '--conn_uri=%s' % uri, '--conn_extra', \"{'extra': 'yes'}\"]))\n cli.connections(self.parser.parse_args(\n ['connections', '--add', '--conn_id=new5',\n '--conn_type=hive_metastore', '--conn_login=airflow',\n '--conn_password=airflow', '--conn_host=host',\n '--conn_port=9083', '--conn_schema=airflow']))\n cli.connections(self.parser.parse_args(\n ['connections', '-a', '--conn_id=new6',\n '--conn_uri', \"\", '--conn_type=google_cloud_platform', '--conn_extra', \"{'extra': 'yes'}\"]))\n stdout = mock_stdout.getvalue()\n\n # Check addition stdout\n lines = [l for l in stdout.split('\\n') if len(l) > 0]\n self.assertListEqual(lines, [\n (\"\\tSuccessfully added `conn_id`=new1 : \" +\n \"postgresql://airflow:airflow@host:5432/airflow\"),\n (\"\\tSuccessfully added `conn_id`=new2 : \" +\n \"postgresql://airflow:airflow@host:5432/airflow\"),\n (\"\\tSuccessfully added `conn_id`=new3 : \" +\n \"postgresql://airflow:airflow@host:5432/airflow\"),\n (\"\\tSuccessfully added `conn_id`=new4 : \" +\n \"postgresql://airflow:airflow@host:5432/airflow\"),\n (\"\\tSuccessfully added `conn_id`=new5 : \" +\n \"hive_metastore://airflow:airflow@host:9083/airflow\"),\n (\"\\tSuccessfully added `conn_id`=new6 : \" +\n \"google_cloud_platform://:@:\")\n ])\n\n # Attempt to add duplicate\n with mock.patch('sys.stdout',\n new_callable=six.StringIO) as mock_stdout:\n cli.connections(self.parser.parse_args(\n ['connections', '--add', '--conn_id=new1',\n '--conn_uri=%s' % uri]))\n stdout = mock_stdout.getvalue()\n\n # Check stdout for addition attempt\n lines = [l for l in stdout.split('\\n') if len(l) > 0]\n self.assertListEqual(lines, [\n \"\\tA connection with `conn_id`=new1 already exists\",\n ])\n\n # Attempt to add without providing conn_id\n with mock.patch('sys.stdout',\n new_callable=six.StringIO) as mock_stdout:\n cli.connections(self.parser.parse_args(\n ['connections', '--add', '--conn_uri=%s' % uri]))\n stdout = mock_stdout.getvalue()\n\n # Check stdout for addition attempt\n lines = [l for l in stdout.split('\\n') if len(l) > 0]\n self.assertListEqual(lines, [\n (\"\\tThe following args are required to add a connection:\" +\n \" ['conn_id']\"),\n ])\n\n # Attempt to add without providing conn_uri\n with mock.patch('sys.stdout',\n new_callable=six.StringIO) as mock_stdout:\n cli.connections(self.parser.parse_args(\n ['connections', '--add', '--conn_id=new']))\n stdout = mock_stdout.getvalue()\n\n # Check stdout for addition attempt\n lines = [l for l in stdout.split('\\n') if len(l) > 0]\n self.assertListEqual(lines, [\n (\"\\tThe following args are required to add a connection:\" +\n \" ['conn_uri or conn_type']\"),\n ])\n\n # Prepare to add connections\n session = settings.Session()\n extra = {'new1': None,\n 'new2': None,\n 'new3': \"{'extra': 'yes'}\",\n 'new4': \"{'extra': 'yes'}\"}\n\n # Add connections\n for index in range(1, 6):\n conn_id = 'new%s' % index\n result = (session\n .query(models.Connection)\n .filter(models.Connection.conn_id == conn_id)\n .first())\n result = (result.conn_id, result.conn_type, result.host,\n result.port, result.get_extra())\n if conn_id in ['new1', 'new2', 'new3', 'new4']:\n self.assertEqual(result, (conn_id, 'postgres', 'host', 5432,\n extra[conn_id]))\n elif conn_id == 'new5':\n self.assertEqual(result, (conn_id, 'hive_metastore', 'host',\n 9083, None))\n elif conn_id == 'new6':\n self.assertEqual(result, (conn_id, 'google_cloud_platform',\n None, None, \"{'extra': 'yes'}\"))\n\n # Delete connections\n with mock.patch('sys.stdout',\n new_callable=six.StringIO) as mock_stdout:\n cli.connections(self.parser.parse_args(\n ['connections', '--delete', '--conn_id=new1']))\n cli.connections(self.parser.parse_args(\n ['connections', '--delete', '--conn_id=new2']))\n cli.connections(self.parser.parse_args(\n ['connections', '--delete', '--conn_id=new3']))\n cli.connections(self.parser.parse_args(\n ['connections', '--delete', '--conn_id=new4']))\n cli.connections(self.parser.parse_args(\n ['connections', '--delete', '--conn_id=new5']))\n cli.connections(self.parser.parse_args(\n ['connections', '--delete', '--conn_id=new6']))\n stdout = mock_stdout.getvalue()\n\n # Check deletion stdout\n lines = [l for l in stdout.split('\\n') if len(l) > 0]\n self.assertListEqual(lines, [\n \"\\tSuccessfully deleted `conn_id`=new1\",\n \"\\tSuccessfully deleted `conn_id`=new2\",\n \"\\tSuccessfully deleted `conn_id`=new3\",\n \"\\tSuccessfully deleted `conn_id`=new4\",\n \"\\tSuccessfully deleted `conn_id`=new5\",\n \"\\tSuccessfully deleted `conn_id`=new6\"\n ])\n\n # Check deletions\n for index in range(1, 7):\n conn_id = 'new%s' % index\n result = (session.query(models.Connection)\n .filter(models.Connection.conn_id == conn_id)\n .first())\n\n self.assertTrue(result is None)\n\n # Attempt to delete a non-existing connnection\n with mock.patch('sys.stdout',\n new_callable=six.StringIO) as mock_stdout:\n cli.connections(self.parser.parse_args(\n ['connections', '--delete', '--conn_id=fake']))\n stdout = mock_stdout.getvalue()\n\n # Check deletion attempt stdout\n lines = [l for l in stdout.split('\\n') if len(l) > 0]\n self.assertListEqual(lines, [\n \"\\tDid not find a connection with `conn_id`=fake\",\n ])\n\n # Attempt to delete with invalid cli args\n with mock.patch('sys.stdout',\n new_callable=six.StringIO) as mock_stdout:\n cli.connections(self.parser.parse_args(\n ['connections', '--delete', '--conn_id=fake',\n '--conn_uri=%s' % uri, '--conn_type=fake-type']))\n stdout = mock_stdout.getvalue()\n\n # Check deletion attempt stdout\n lines = [l for l in stdout.split('\\n') if len(l) > 0]\n self.assertListEqual(lines, [\n (\"\\tThe following args are not compatible with the \" +\n \"--delete flag: ['conn_uri', 'conn_type']\"),\n ])\n\n session.close()\n\n def test_cli_test(self):\n cli.test(self.parser.parse_args([\n 'test', 'example_bash_operator', 'runme_0',\n DEFAULT_DATE.isoformat()]))\n cli.test(self.parser.parse_args([\n 'test', 'example_bash_operator', 'runme_0', '--dry_run',\n DEFAULT_DATE.isoformat()]))\n\n def test_cli_test_with_params(self):\n cli.test(self.parser.parse_args([\n 'test', 'example_passing_params_via_test_command', 'run_this',\n '-tp', '{\"foo\":\"bar\"}', DEFAULT_DATE.isoformat()]))\n cli.test(self.parser.parse_args([\n 'test', 'example_passing_params_via_test_command', 'also_run_this',\n '-tp', '{\"foo\":\"bar\"}', DEFAULT_DATE.isoformat()]))\n\n def test_cli_run(self):\n cli.run(self.parser.parse_args([\n 'run', 'example_bash_operator', 'runme_0', '-l',\n DEFAULT_DATE.isoformat()]))\n\n def test_task_state(self):\n cli.task_state(self.parser.parse_args([\n 'task_state', 'example_bash_operator', 'runme_0',\n DEFAULT_DATE.isoformat()]))\n\n def test_dag_state(self):\n self.assertEqual(None, cli.dag_state(self.parser.parse_args([\n 'dag_state', 'example_bash_operator', DEFAULT_DATE.isoformat()])))\n\n def test_pause(self):\n args = self.parser.parse_args([\n 'pause', 'example_bash_operator'])\n cli.pause(args)\n self.assertIn(self.dagbag.dags['example_bash_operator'].is_paused, [True, 1])\n\n args = self.parser.parse_args([\n 'unpause', 'example_bash_operator'])\n cli.unpause(args)\n self.assertIn(self.dagbag.dags['example_bash_operator'].is_paused, [False, 0])\n\n def test_subdag_clear(self):\n args = self.parser.parse_args([\n 'clear', 'example_subdag_operator', '--no_confirm'])\n cli.clear(args)\n args = self.parser.parse_args([\n 'clear', 'example_subdag_operator', '--no_confirm', '--exclude_subdags'])\n cli.clear(args)\n\n def test_get_dags(self):\n dags = cli.get_dags(self.parser.parse_args(['clear', 'example_subdag_operator', '-c']))\n self.assertEqual(len(dags), 1)\n\n dags = cli.get_dags(self.parser.parse_args(['clear', 'subdag', '-dx', '-c']))\n self.assertGreater(len(dags), 1)\n\n with self.assertRaises(AirflowException):\n cli.get_dags(self.parser.parse_args(['clear', 'foobar', '-dx', '-c']))\n\n def test_backfill(self):\n cli.backfill(self.parser.parse_args([\n 'backfill', 'example_bash_operator',\n '-s', DEFAULT_DATE.isoformat()]))\n\n cli.backfill(self.parser.parse_args([\n 'backfill', 'example_bash_operator', '-t', 'runme_0', '--dry_run',\n '-s', DEFAULT_DATE.isoformat()]))\n\n cli.backfill(self.parser.parse_args([\n 'backfill', 'example_bash_operator', '--dry_run',\n '-s', DEFAULT_DATE.isoformat()]))\n\n cli.backfill(self.parser.parse_args([\n 'backfill', 'example_bash_operator', '-l',\n '-s', DEFAULT_DATE.isoformat()]))\n\n def test_process_subdir_path_with_placeholder(self):\n self.assertEqual(os.path.join(settings.DAGS_FOLDER, 'abc'), cli.process_subdir('DAGS_FOLDER/abc'))\n\n def test_trigger_dag(self):\n cli.trigger_dag(self.parser.parse_args([\n 'trigger_dag', 'example_bash_operator',\n '-c', '{\"foo\": \"bar\"}']))\n self.assertRaises(\n ValueError,\n cli.trigger_dag,\n self.parser.parse_args([\n 'trigger_dag', 'example_bash_operator',\n '--run_id', 'trigger_dag_xxx',\n '-c', 'NOT JSON'])\n )\n\n def test_delete_dag(self):\n DM = models.DagModel\n key = \"my_dag_id\"\n session = settings.Session()\n session.add(DM(dag_id=key))\n session.commit()\n cli.delete_dag(self.parser.parse_args([\n 'delete_dag', key, '--yes']))\n self.assertEqual(session.query(DM).filter_by(dag_id=key).count(), 0)\n self.assertRaises(\n AirflowException,\n cli.delete_dag,\n self.parser.parse_args([\n 'delete_dag',\n 'does_not_exist_dag',\n '--yes'])\n )\n\n def test_pool_create(self):\n cli.pool(self.parser.parse_args(['pool', '-s', 'foo', '1', 'test']))\n self.assertEqual(self.session.query(models.Pool).count(), 1)\n\n def test_pool_get(self):\n cli.pool(self.parser.parse_args(['pool', '-s', 'foo', '1', 'test']))\n try:\n cli.pool(self.parser.parse_args(['pool', '-g', 'foo']))\n except Exception as e:\n self.fail(\"The 'pool -g foo' command raised unexpectedly: %s\" % e)\n\n def test_pool_delete(self):\n cli.pool(self.parser.parse_args(['pool', '-s', 'foo', '1', 'test']))\n cli.pool(self.parser.parse_args(['pool', '-x', 'foo']))\n self.assertEqual(self.session.query(models.Pool).count(), 0)\n\n def test_pool_no_args(self):\n try:\n cli.pool(self.parser.parse_args(['pool']))\n except Exception as e:\n self.fail(\"The 'pool' command raised unexpectedly: %s\" % e)\n\n def test_variables(self):\n # Checks if all subcommands are properly received\n cli.variables(self.parser.parse_args([\n 'variables', '-s', 'foo', '{\"foo\":\"bar\"}']))\n cli.variables(self.parser.parse_args([\n 'variables', '-g', 'foo']))\n cli.variables(self.parser.parse_args([\n 'variables', '-g', 'baz', '-d', 'bar']))\n cli.variables(self.parser.parse_args([\n 'variables']))\n cli.variables(self.parser.parse_args([\n 'variables', '-x', 'bar']))\n cli.variables(self.parser.parse_args([\n 'variables', '-i', DEV_NULL]))\n cli.variables(self.parser.parse_args([\n 'variables', '-e', DEV_NULL]))\n\n cli.variables(self.parser.parse_args([\n 'variables', '-s', 'bar', 'original']))\n # First export\n cli.variables(self.parser.parse_args([\n 'variables', '-e', 'variables1.json']))\n\n first_exp = open('variables1.json', 'r')\n\n cli.variables(self.parser.parse_args([\n 'variables', '-s', 'bar', 'updated']))\n cli.variables(self.parser.parse_args([\n 'variables', '-s', 'foo', '{\"foo\":\"oops\"}']))\n cli.variables(self.parser.parse_args([\n 'variables', '-x', 'foo']))\n # First import\n cli.variables(self.parser.parse_args([\n 'variables', '-i', 'variables1.json']))\n\n self.assertEqual('original', models.Variable.get('bar'))\n self.assertEqual('{\"foo\": \"bar\"}', models.Variable.get('foo'))\n # Second export\n cli.variables(self.parser.parse_args([\n 'variables', '-e', 'variables2.json']))\n\n second_exp = open('variables2.json', 'r')\n self.assertEqual(first_exp.read(), second_exp.read())\n second_exp.close()\n first_exp.close()\n # Second import\n cli.variables(self.parser.parse_args([\n 'variables', '-i', 'variables2.json']))\n\n self.assertEqual('original', models.Variable.get('bar'))\n self.assertEqual('{\"foo\": \"bar\"}', models.Variable.get('foo'))\n\n os.remove('variables1.json')\n os.remove('variables2.json')\n\n def _wait_pidfile(self, pidfile):\n while True:\n try:\n with open(pidfile) as f:\n return int(f.read())\n except:\n sleep(1)\n\n def test_cli_webserver_foreground(self):\n import subprocess\n\n # Confirm that webserver hasn't been launched.\n # pgrep returns exit status 1 if no process matched.\n self.assertEqual(1, subprocess.Popen([\"pgrep\", \"-c\", \"airflow\"]).wait())\n self.assertEqual(1, subprocess.Popen([\"pgrep\", \"-c\", \"gunicorn\"]).wait())\n\n # Run webserver in foreground and terminate it.\n p = subprocess.Popen([\"airflow\", \"webserver\"])\n p.terminate()\n p.wait()\n\n # Assert that no process remains.\n self.assertEqual(1, subprocess.Popen([\"pgrep\", \"-c\", \"airflow\"]).wait())\n self.assertEqual(1, subprocess.Popen([\"pgrep\", \"-c\", \"gunicorn\"]).wait())\n\n @unittest.skipIf(\"TRAVIS\" in os.environ and bool(os.environ[\"TRAVIS\"]),\n \"Skipping test due to lack of required file permission\")\n def test_cli_webserver_foreground_with_pid(self):\n import subprocess\n\n # Run webserver in foreground with --pid option\n pidfile = tempfile.mkstemp()[1]\n p = subprocess.Popen([\"airflow\", \"webserver\", \"--pid\", pidfile])\n\n # Check the file specified by --pid option exists\n self._wait_pidfile(pidfile)\n\n # Terminate webserver\n p.terminate()\n p.wait()\n\n @unittest.skipIf(\"TRAVIS\" in os.environ and bool(os.environ[\"TRAVIS\"]),\n \"Skipping test due to lack of required file permission\")\n def test_cli_webserver_background(self):\n import subprocess\n import psutil\n\n # Confirm that webserver hasn't been launched.\n self.assertEqual(1, subprocess.Popen([\"pgrep\", \"-c\", \"airflow\"]).wait())\n self.assertEqual(1, subprocess.Popen([\"pgrep\", \"-c\", \"gunicorn\"]).wait())\n\n # Run webserver in background.\n subprocess.Popen([\"airflow\", \"webserver\", \"-D\"])\n pidfile = cli.setup_locations(\"webserver\")[0]\n self._wait_pidfile(pidfile)\n\n # Assert that gunicorn and its monitor are launched.\n self.assertEqual(0, subprocess.Popen([\"pgrep\", \"-c\", \"airflow\"]).wait())\n self.assertEqual(0, subprocess.Popen([\"pgrep\", \"-c\", \"gunicorn\"]).wait())\n\n # Terminate monitor process.\n pidfile = cli.setup_locations(\"webserver-monitor\")[0]\n pid = self._wait_pidfile(pidfile)\n p = psutil.Process(pid)\n p.terminate()\n p.wait()\n\n # Assert that no process remains.\n self.assertEqual(1, subprocess.Popen([\"pgrep\", \"-c\", \"airflow\"]).wait())\n self.assertEqual(1, subprocess.Popen([\"pgrep\", \"-c\", \"gunicorn\"]).wait())\n\n\nclass SecurityTests(unittest.TestCase):\n def setUp(self):\n configuration.load_test_config()\n configuration.conf.set(\"webserver\", \"authenticate\", \"False\")\n configuration.conf.set(\"webserver\", \"expose_config\", \"True\")\n app = application.create_app()\n app.config['TESTING'] = True\n self.app = app.test_client()\n\n self.dagbag = models.DagBag(\n dag_folder=DEV_NULL, include_examples=True)\n self.dag_bash = self.dagbag.dags['example_bash_operator']\n self.runme_0 = self.dag_bash.get_task('runme_0')\n\n def get_csrf(self, response):\n tree = html.fromstring(response.data)\n form = tree.find('.//form')\n\n return form.find('.//input[@name=\"_csrf_token\"]').value\n\n def test_csrf_rejection(self):\n endpoints = ([\n \"/admin/queryview/\",\n \"/admin/airflow/paused?dag_id=example_python_operator&is_paused=false\",\n ])\n for endpoint in endpoints:\n response = self.app.post(endpoint)\n self.assertIn('CSRF token is missing', response.data.decode('utf-8'))\n\n def test_csrf_acceptance(self):\n response = self.app.get(\"/admin/queryview/\")\n csrf = self.get_csrf(response)\n response = self.app.post(\"/admin/queryview/\", data=dict(csrf_token=csrf))\n self.assertEqual(200, response.status_code)\n\n def test_xss(self):\n try:\n self.app.get(\"/admin/airflow/tree?dag_id=<script>alert(123456)</script>\")\n except:\n # exception is expected here since dag doesnt exist\n pass\n response = self.app.get(\"/admin/log\", follow_redirects=True)\n self.assertIn(bleach.clean(\"<script>alert(123456)</script>\"), response.data.decode('UTF-8'))\n\n def test_chart_data_template(self):\n \"\"\"Protect chart_data from being able to do RCE.\"\"\"\n session = settings.Session()\n Chart = models.Chart\n chart1 = Chart(\n label='insecure_chart',\n conn_id='airflow_db',\n chart_type='bar',\n sql=\"SELECT {{ ''.__class__.__mro__[1].__subclasses__() }}\"\n )\n chart2 = Chart(\n label=\"{{ ''.__class__.__mro__[1].__subclasses__() }}\",\n conn_id='airflow_db',\n chart_type='bar',\n sql=\"SELECT 1\"\n )\n chart3 = Chart(\n label=\"{{ subprocess.check_output('ls') }}\",\n conn_id='airflow_db',\n chart_type='bar',\n sql=\"SELECT 1\"\n )\n session.add(chart1)\n session.add(chart2)\n session.add(chart3)\n session.commit()\n chart1 = session.query(Chart).filter(Chart.label == 'insecure_chart').first()\n with self.assertRaises(SecurityError):\n self.app.get(\"/admin/airflow/chart_data?chart_id={}\".format(chart1.id))\n\n chart2 = session.query(Chart).filter(\n Chart.label == \"{{ ''.__class__.__mro__[1].__subclasses__() }}\"\n ).first()\n with self.assertRaises(SecurityError):\n self.app.get(\"/admin/airflow/chart_data?chart_id={}\".format(chart2.id))\n\n chart3 = session.query(Chart).filter(\n Chart.label == \"{{ subprocess.check_output('ls') }}\"\n ).first()\n with self.assertRaises(UndefinedError):\n self.app.get(\"/admin/airflow/chart_data?chart_id={}\".format(chart3.id))\n\n def tearDown(self):\n configuration.conf.set(\"webserver\", \"expose_config\", \"False\")\n self.dag_bash.clear(start_date=DEFAULT_DATE, end_date=timezone.utcnow())\n\n\nclass WebUiTests(unittest.TestCase):\n def setUp(self):\n configuration.load_test_config()\n configuration.conf.set(\"webserver\", \"authenticate\", \"False\")\n configuration.conf.set(\"webserver\", \"expose_config\", \"True\")\n app = application.create_app()\n app.config['TESTING'] = True\n app.config['WTF_CSRF_METHODS'] = []\n self.app = app.test_client()\n\n self.dagbag = models.DagBag(include_examples=True)\n self.dag_bash = self.dagbag.dags['example_bash_operator']\n self.dag_bash2 = self.dagbag.dags['test_example_bash_operator']\n self.sub_dag = self.dagbag.dags['example_subdag_operator']\n self.runme_0 = self.dag_bash.get_task('runme_0')\n self.example_xcom = self.dagbag.dags['example_xcom']\n\n self.dagrun_bash2 = self.dag_bash2.create_dagrun(\n run_id=\"test_{}\".format(models.DagRun.id_for_date(timezone.utcnow())),\n execution_date=DEFAULT_DATE,\n start_date=timezone.utcnow(),\n state=State.RUNNING\n )\n\n self.sub_dag.create_dagrun(\n run_id=\"test_{}\".format(models.DagRun.id_for_date(timezone.utcnow())),\n execution_date=DEFAULT_DATE,\n start_date=timezone.utcnow(),\n state=State.RUNNING\n )\n\n self.example_xcom.create_dagrun(\n run_id=\"test_{}\".format(models.DagRun.id_for_date(timezone.utcnow())),\n execution_date=DEFAULT_DATE,\n start_date=timezone.utcnow(),\n state=State.RUNNING\n )\n\n def test_index(self):\n response = self.app.get('/', follow_redirects=True)\n resp_html = response.data.decode('utf-8')\n self.assertIn(\"DAGs\", resp_html)\n self.assertIn(\"example_bash_operator\", resp_html)\n\n # The HTML should contain data for the last-run. A link to the specific run, and the text of\n # the date.\n url = \"/admin/airflow/graph?\" + urlencode({\n \"dag_id\": self.dag_bash2.dag_id,\n \"execution_date\": self.dagrun_bash2.execution_date,\n }).replace(\"&\", \"&\")\n self.assertIn(url, resp_html)\n self.assertIn(self.dagrun_bash2.execution_date.strftime(\"%Y-%m-%d %H:%M\"), resp_html)\n\n def test_query(self):\n response = self.app.get('/admin/queryview/')\n self.assertIn(\"Ad Hoc Query\", response.data.decode('utf-8'))\n response = self.app.post(\n \"/admin/queryview/\", data=dict(\n conn_id=\"airflow_db\",\n sql=\"SELECT+COUNT%281%29+as+TEST+FROM+task_instance\"))\n self.assertIn(\"TEST\", response.data.decode('utf-8'))\n\n def test_health(self):\n response = self.app.get('/health')\n self.assertIn('The server is healthy!', response.data.decode('utf-8'))\n\n def test_noaccess(self):\n response = self.app.get('/admin/airflow/noaccess')\n self.assertIn(\"You don't seem to have access.\", response.data.decode('utf-8'))\n\n def test_pickle_info(self):\n response = self.app.get('/admin/airflow/pickle_info')\n self.assertIn('{', response.data.decode('utf-8'))\n\n def test_dag_views(self):\n response = self.app.get(\n '/admin/airflow/graph?dag_id=example_bash_operator')\n self.assertIn(\"runme_0\", response.data.decode('utf-8'))\n response = self.app.get(\n '/admin/airflow/tree?num_runs=25&dag_id=example_bash_operator')\n self.assertIn(\"runme_0\", response.data.decode('utf-8'))\n response = self.app.get(\n '/admin/airflow/duration?days=30&dag_id=example_bash_operator')\n self.assertIn(\"example_bash_operator\", response.data.decode('utf-8'))\n response = self.app.get(\n '/admin/airflow/tries?days=30&dag_id=example_bash_operator')\n self.assertIn(\"example_bash_operator\", response.data.decode('utf-8'))\n response = self.app.get(\n '/admin/airflow/landing_times?'\n 'days=30&dag_id=test_example_bash_operator')\n self.assertIn(\"test_example_bash_operator\", response.data.decode('utf-8'))\n response = self.app.get(\n '/admin/airflow/landing_times?'\n 'days=30&dag_id=example_xcom')\n self.assertIn(\"example_xcom\", response.data.decode('utf-8'))\n response = self.app.get(\n '/admin/airflow/gantt?dag_id=example_bash_operator')\n self.assertIn(\"example_bash_operator\", response.data.decode('utf-8'))\n response = self.app.get(\n '/admin/airflow/code?dag_id=example_bash_operator')\n self.assertIn(\"example_bash_operator\", response.data.decode('utf-8'))\n response = self.app.get(\n '/admin/airflow/blocked')\n response = self.app.get(\n '/admin/configurationview/')\n self.assertIn(\"Airflow Configuration\", response.data.decode('utf-8'))\n self.assertIn(\"Running Configuration\", response.data.decode('utf-8'))\n response = self.app.get(\n '/admin/airflow/rendered?'\n 'task_id=runme_1&dag_id=example_bash_operator&'\n 'execution_date={}'.format(DEFAULT_DATE_ISO))\n self.assertIn(\"example_bash_operator\", response.data.decode('utf-8'))\n response = self.app.get(\n '/admin/airflow/log?task_id=run_this_last&'\n 'dag_id=example_bash_operator&execution_date={}'\n ''.format(DEFAULT_DATE_ISO))\n self.assertIn(\"run_this_last\", response.data.decode('utf-8'))\n response = self.app.get(\n '/admin/airflow/task?'\n 'task_id=runme_0&dag_id=example_bash_operator&'\n 'execution_date={}'.format(DEFAULT_DATE_DS))\n self.assertIn(\"Attributes\", response.data.decode('utf-8'))\n response = self.app.get(\n '/admin/airflow/dag_stats')\n self.assertIn(\"example_bash_operator\", response.data.decode('utf-8'))\n response = self.app.get(\n '/admin/airflow/task_stats')\n self.assertIn(\"example_bash_operator\", response.data.decode('utf-8'))\n url = (\n \"/admin/airflow/success?task_id=run_this_last&\"\n \"dag_id=test_example_bash_operator&upstream=false&downstream=false&\"\n \"future=false&past=false&execution_date={}&\"\n \"origin=/admin\".format(DEFAULT_DATE_DS))\n response = self.app.get(url)\n self.assertIn(\"Wait a minute\", response.data.decode('utf-8'))\n response = self.app.get(url + \"&confirmed=true\")\n response = self.app.get(\n '/admin/airflow/clear?task_id=run_this_last&'\n 'dag_id=test_example_bash_operator&future=true&past=false&'\n 'upstream=true&downstream=false&'\n 'execution_date={}&'\n 'origin=/admin'.format(DEFAULT_DATE_DS))\n self.assertIn(\"Wait a minute\", response.data.decode('utf-8'))\n url = (\n \"/admin/airflow/success?task_id=section-1&\"\n \"dag_id=example_subdag_operator&upstream=true&downstream=true&\"\n \"future=false&past=false&execution_date={}&\"\n \"origin=/admin\".format(DEFAULT_DATE_DS))\n response = self.app.get(url)\n self.assertIn(\"Wait a minute\", response.data.decode('utf-8'))\n self.assertIn(\"section-1-task-1\", response.data.decode('utf-8'))\n self.assertIn(\"section-1-task-2\", response.data.decode('utf-8'))\n self.assertIn(\"section-1-task-3\", response.data.decode('utf-8'))\n self.assertIn(\"section-1-task-4\", response.data.decode('utf-8'))\n self.assertIn(\"section-1-task-5\", response.data.decode('utf-8'))\n response = self.app.get(url + \"&confirmed=true\")\n url = (\n \"/admin/airflow/clear?task_id=runme_1&\"\n \"dag_id=test_example_bash_operator&future=false&past=false&\"\n \"upstream=false&downstream=true&\"\n \"execution_date={}&\"\n \"origin=/admin\".format(DEFAULT_DATE_DS))\n response = self.app.get(url)\n self.assertIn(\"Wait a minute\", response.data.decode('utf-8'))\n response = self.app.get(url + \"&confirmed=true\")\n url = (\n \"/admin/airflow/run?task_id=runme_0&\"\n \"dag_id=example_bash_operator&ignore_all_deps=false&ignore_ti_state=true&\"\n \"ignore_task_deps=true&execution_date={}&\"\n \"origin=/admin\".format(DEFAULT_DATE_DS))\n response = self.app.get(url)\n response = self.app.get(\n \"/admin/airflow/refresh?dag_id=example_bash_operator\")\n response = self.app.get(\"/admin/airflow/refresh_all\")\n response = self.app.post(\n \"/admin/airflow/paused?\"\n \"dag_id=example_python_operator&is_paused=false\")\n self.assertIn(\"OK\", response.data.decode('utf-8'))\n response = self.app.get(\"/admin/xcom\", follow_redirects=True)\n self.assertIn(\"Xcoms\", response.data.decode('utf-8'))\n\n def test_charts(self):\n session = Session()\n chart_label = \"Airflow task instance by type\"\n chart = session.query(\n models.Chart).filter(models.Chart.label == chart_label).first()\n chart_id = chart.id\n session.close()\n response = self.app.get(\n '/admin/airflow/chart'\n '?chart_id={}&iteration_no=1'.format(chart_id))\n self.assertIn(\"Airflow task instance by type\", response.data.decode('utf-8'))\n response = self.app.get(\n '/admin/airflow/chart_data'\n '?chart_id={}&iteration_no=1'.format(chart_id))\n self.assertIn(\"example\", response.data.decode('utf-8'))\n response = self.app.get(\n '/admin/airflow/dag_details?dag_id=example_branch_operator')\n self.assertIn(\"run_this_first\", response.data.decode('utf-8'))\n\n def test_fetch_task_instance(self):\n url = (\n \"/admin/airflow/object/task_instances?\"\n \"dag_id=test_example_bash_operator&\"\n \"execution_date={}\".format(DEFAULT_DATE_DS))\n response = self.app.get(url)\n self.assertIn(\"run_this_last\", response.data.decode('utf-8'))\n\n def tearDown(self):\n configuration.conf.set(\"webserver\", \"expose_config\", \"False\")\n self.dag_bash.clear(start_date=DEFAULT_DATE, end_date=timezone.utcnow())\n session = Session()\n session.query(models.DagRun).delete()\n session.query(models.TaskInstance).delete()\n session.commit()\n session.close()\n\n\nclass SecureModeWebUiTests(unittest.TestCase):\n def setUp(self):\n configuration.load_test_config()\n configuration.conf.set(\"webserver\", \"authenticate\", \"False\")\n configuration.conf.set(\"core\", \"secure_mode\", \"True\")\n app = application.create_app()\n app.config['TESTING'] = True\n self.app = app.test_client()\n\n def test_query(self):\n response = self.app.get('/admin/queryview/')\n self.assertEqual(response.status_code, 404)\n\n def test_charts(self):\n response = self.app.get('/admin/chart/')\n self.assertEqual(response.status_code, 404)\n\n def tearDown(self):\n configuration.remove_option(\"core\", \"SECURE_MODE\")\n\n\nclass WebPasswordAuthTest(unittest.TestCase):\n def setUp(self):\n configuration.conf.set(\"webserver\", \"authenticate\", \"True\")\n configuration.conf.set(\"webserver\", \"auth_backend\", \"airflow.contrib.auth.backends.password_auth\")\n\n app = application.create_app()\n app.config['TESTING'] = True\n self.app = app.test_client()\n from airflow.contrib.auth.backends.password_auth import PasswordUser\n\n session = Session()\n user = models.User()\n password_user = PasswordUser(user)\n password_user.username = 'airflow_passwordauth'\n password_user.password = 'password'\n print(password_user._password)\n session.add(password_user)\n session.commit()\n session.close()\n\n def get_csrf(self, response):\n tree = html.fromstring(response.data)\n form = tree.find('.//form')\n\n return form.find('.//input[@name=\"_csrf_token\"]').value\n\n def login(self, username, password):\n response = self.app.get('/admin/airflow/login')\n csrf_token = self.get_csrf(response)\n\n return self.app.post('/admin/airflow/login', data=dict(\n username=username,\n password=password,\n csrf_token=csrf_token\n ), follow_redirects=True)\n\n def logout(self):\n return self.app.get('/admin/airflow/logout', follow_redirects=True)\n\n def test_login_logout_password_auth(self):\n self.assertTrue(configuration.getboolean('webserver', 'authenticate'))\n\n response = self.login('user1', 'whatever')\n self.assertIn('Incorrect login details', response.data.decode('utf-8'))\n\n response = self.login('airflow_passwordauth', 'wrongpassword')\n self.assertIn('Incorrect login details', response.data.decode('utf-8'))\n\n response = self.login('airflow_passwordauth', 'password')\n self.assertIn('Data Profiling', response.data.decode('utf-8'))\n\n response = self.logout()\n self.assertIn('form-signin', response.data.decode('utf-8'))\n\n def test_unauthorized_password_auth(self):\n response = self.app.get(\"/admin/airflow/landing_times\")\n self.assertEqual(response.status_code, 302)\n\n def tearDown(self):\n configuration.load_test_config()\n session = Session()\n session.query(models.User).delete()\n session.commit()\n session.close()\n configuration.conf.set(\"webserver\", \"authenticate\", \"False\")\n\n\nclass WebLdapAuthTest(unittest.TestCase):\n def setUp(self):\n configuration.conf.set(\"webserver\", \"authenticate\", \"True\")\n configuration.conf.set(\"webserver\", \"auth_backend\", \"airflow.contrib.auth.backends.ldap_auth\")\n try:\n configuration.conf.add_section(\"ldap\")\n except:\n pass\n configuration.conf.set(\"ldap\", \"uri\", \"ldap://localhost:3890\")\n configuration.conf.set(\"ldap\", \"user_filter\", \"objectClass=*\")\n configuration.conf.set(\"ldap\", \"user_name_attr\", \"uid\")\n configuration.conf.set(\"ldap\", \"bind_user\", \"cn=Manager,dc=example,dc=com\")\n configuration.conf.set(\"ldap\", \"bind_password\", \"insecure\")\n configuration.conf.set(\"ldap\", \"basedn\", \"dc=example,dc=com\")\n configuration.conf.set(\"ldap\", \"cacert\", \"\")\n\n app = application.create_app()\n app.config['TESTING'] = True\n self.app = app.test_client()\n\n def get_csrf(self, response):\n tree = html.fromstring(response.data)\n form = tree.find('.//form')\n\n return form.find('.//input[@name=\"_csrf_token\"]').value\n\n def login(self, username, password):\n response = self.app.get('/admin/airflow/login')\n csrf_token = self.get_csrf(response)\n\n return self.app.post('/admin/airflow/login', data=dict(\n username=username,\n password=password,\n csrf_token=csrf_token\n ), follow_redirects=True)\n\n def logout(self):\n return self.app.get('/admin/airflow/logout', follow_redirects=True)\n\n def test_login_logout_ldap(self):\n self.assertTrue(configuration.getboolean('webserver', 'authenticate'))\n\n response = self.login('user1', 'userx')\n self.assertIn('Incorrect login details', response.data.decode('utf-8'))\n\n response = self.login('userz', 'user1')\n self.assertIn('Incorrect login details', response.data.decode('utf-8'))\n\n response = self.login('user1', 'user1')\n self.assertIn('Data Profiling', response.data.decode('utf-8'))\n\n response = self.logout()\n self.assertIn('form-signin', response.data.decode('utf-8'))\n\n def test_unauthorized(self):\n response = self.app.get(\"/admin/airflow/landing_times\")\n self.assertEqual(response.status_code, 302)\n\n def test_no_filter(self):\n response = self.login('user1', 'user1')\n self.assertIn('Data Profiling', response.data.decode('utf-8'))\n self.assertIn('Connections', response.data.decode('utf-8'))\n\n def test_with_filters(self):\n configuration.conf.set('ldap', 'superuser_filter',\n 'description=superuser')\n configuration.conf.set('ldap', 'data_profiler_filter',\n 'description=dataprofiler')\n\n response = self.login('dataprofiler', 'dataprofiler')\n self.assertIn('Data Profiling', response.data.decode('utf-8'))\n\n response = self.login('superuser', 'superuser')\n self.assertIn('Connections', response.data.decode('utf-8'))\n\n def tearDown(self):\n configuration.load_test_config()\n session = Session()\n session.query(models.User).delete()\n session.commit()\n session.close()\n configuration.conf.set(\"webserver\", \"authenticate\", \"False\")\n\n\nclass LdapGroupTest(unittest.TestCase):\n def setUp(self):\n configuration.conf.set(\"webserver\", \"authenticate\", \"True\")\n configuration.conf.set(\"webserver\", \"auth_backend\", \"airflow.contrib.auth.backends.ldap_auth\")\n try:\n configuration.conf.add_section(\"ldap\")\n except:\n pass\n configuration.conf.set(\"ldap\", \"uri\", \"ldap://localhost:3890\")\n configuration.conf.set(\"ldap\", \"user_filter\", \"objectClass=*\")\n configuration.conf.set(\"ldap\", \"user_name_attr\", \"uid\")\n configuration.conf.set(\"ldap\", \"bind_user\", \"cn=Manager,dc=example,dc=com\")\n configuration.conf.set(\"ldap\", \"bind_password\", \"insecure\")\n configuration.conf.set(\"ldap\", \"basedn\", \"dc=example,dc=com\")\n configuration.conf.set(\"ldap\", \"cacert\", \"\")\n\n def test_group_belonging(self):\n from airflow.contrib.auth.backends.ldap_auth import LdapUser\n users = {\"user1\": [\"group1\", \"group3\"],\n \"user2\": [\"group2\"]\n }\n for user in users:\n mu = models.User(username=user,\n is_superuser=False)\n auth = LdapUser(mu)\n self.assertEqual(set(users[user]), set(auth.ldap_groups))\n\n def tearDown(self):\n configuration.load_test_config()\n configuration.conf.set(\"webserver\", \"authenticate\", \"False\")\n\n\nclass FakeWebHDFSHook(object):\n def __init__(self, conn_id):\n self.conn_id = conn_id\n\n def get_conn(self):\n return self.conn_id\n\n def check_for_path(self, hdfs_path):\n return hdfs_path\n\n\nclass FakeSnakeBiteClientException(Exception):\n pass\n\n\nclass FakeSnakeBiteClient(object):\n\n def __init__(self):\n self.started = True\n\n def ls(self, path, include_toplevel=False):\n \"\"\"\n the fake snakebite client\n :param path: the array of path to test\n :param include_toplevel: to return the toplevel directory info\n :return: a list for path for the matching queries\n \"\"\"\n if path[0] == '/datadirectory/empty_directory' and not include_toplevel:\n return []\n elif path[0] == '/datadirectory/datafile':\n return [{\n 'group': u'supergroup',\n 'permission': 420,\n 'file_type': 'f',\n 'access_time': 1481122343796,\n 'block_replication': 3,\n 'modification_time': 1481122343862,\n 'length': 0,\n 'blocksize': 134217728,\n 'owner': u'hdfs',\n 'path': '/datadirectory/datafile'\n }]\n elif path[0] == '/datadirectory/empty_directory' and include_toplevel:\n return [{\n 'group': u'supergroup',\n 'permission': 493,\n 'file_type': 'd',\n 'access_time': 0,\n 'block_replication': 0,\n 'modification_time': 1481132141540,\n 'length': 0,\n 'blocksize': 0,\n 'owner': u'hdfs',\n 'path': '/datadirectory/empty_directory'\n }]\n elif path[0] == '/datadirectory/not_empty_directory' and include_toplevel:\n return [{\n 'group': u'supergroup',\n 'permission': 493,\n 'file_type': 'd',\n 'access_time': 0,\n 'block_replication': 0,\n 'modification_time': 1481132141540,\n 'length': 0,\n 'blocksize': 0,\n 'owner': u'hdfs',\n 'path': '/datadirectory/empty_directory'\n }, {\n 'group': u'supergroup',\n 'permission': 420,\n 'file_type': 'f',\n 'access_time': 1481122343796,\n 'block_replication': 3,\n 'modification_time': 1481122343862,\n 'length': 0,\n 'blocksize': 134217728,\n 'owner': u'hdfs',\n 'path': '/datadirectory/not_empty_directory/test_file'\n }]\n elif path[0] == '/datadirectory/not_empty_directory':\n return [{\n 'group': u'supergroup',\n 'permission': 420,\n 'file_type': 'f',\n 'access_time': 1481122343796,\n 'block_replication': 3,\n 'modification_time': 1481122343862,\n 'length': 0,\n 'blocksize': 134217728,\n 'owner': u'hdfs',\n 'path': '/datadirectory/not_empty_directory/test_file'\n }]\n elif path[0] == '/datadirectory/not_existing_file_or_directory':\n raise FakeSnakeBiteClientException\n elif path[0] == '/datadirectory/regex_dir':\n return [{\n 'group': u'supergroup',\n 'permission': 420,\n 'file_type': 'f',\n 'access_time': 1481122343796,\n 'block_replication': 3,\n 'modification_time': 1481122343862, 'length': 12582912,\n 'blocksize': 134217728,\n 'owner': u'hdfs',\n 'path': '/datadirectory/regex_dir/test1file'\n }, {\n 'group': u'supergroup',\n 'permission': 420,\n 'file_type': 'f',\n 'access_time': 1481122343796,\n 'block_replication': 3,\n 'modification_time': 1481122343862,\n 'length': 12582912,\n 'blocksize': 134217728,\n 'owner': u'hdfs',\n 'path': '/datadirectory/regex_dir/test2file'\n }, {\n 'group': u'supergroup',\n 'permission': 420,\n 'file_type': 'f',\n 'access_time': 1481122343796,\n 'block_replication': 3,\n 'modification_time': 1481122343862,\n 'length': 12582912,\n 'blocksize': 134217728,\n 'owner': u'hdfs',\n 'path': '/datadirectory/regex_dir/test3file'\n }, {\n 'group': u'supergroup',\n 'permission': 420,\n 'file_type': 'f',\n 'access_time': 1481122343796,\n 'block_replication': 3,\n 'modification_time': 1481122343862,\n 'length': 12582912,\n 'blocksize': 134217728,\n 'owner': u'hdfs',\n 'path': '/datadirectory/regex_dir/copying_file_1.txt._COPYING_'\n }, {\n 'group': u'supergroup',\n 'permission': 420,\n 'file_type': 'f',\n 'access_time': 1481122343796,\n 'block_replication': 3,\n 'modification_time': 1481122343862,\n 'length': 12582912,\n 'blocksize': 134217728,\n 'owner': u'hdfs',\n 'path': '/datadirectory/regex_dir/copying_file_3.txt.sftp'\n }]\n else:\n raise FakeSnakeBiteClientException\n\n\nclass FakeHDFSHook(object):\n def __init__(self, conn_id=None):\n self.conn_id = conn_id\n\n def get_conn(self):\n client = FakeSnakeBiteClient()\n return client\n\n\nclass ConnectionTest(unittest.TestCase):\n def setUp(self):\n configuration.load_test_config()\n utils.db.initdb()\n os.environ['AIRFLOW_CONN_TEST_URI'] = (\n 'postgres://username:[email protected]:5432/the_database')\n os.environ['AIRFLOW_CONN_TEST_URI_NO_CREDS'] = (\n 'postgres://ec2.compute.com/the_database')\n\n def tearDown(self):\n env_vars = ['AIRFLOW_CONN_TEST_URI', 'AIRFLOW_CONN_AIRFLOW_DB']\n for ev in env_vars:\n if ev in os.environ:\n del os.environ[ev]\n\n def test_using_env_var(self):\n c = SqliteHook.get_connection(conn_id='test_uri')\n self.assertEqual('ec2.compute.com', c.host)\n self.assertEqual('the_database', c.schema)\n self.assertEqual('username', c.login)\n self.assertEqual('password', c.password)\n self.assertEqual(5432, c.port)\n\n def test_using_unix_socket_env_var(self):\n c = SqliteHook.get_connection(conn_id='test_uri_no_creds')\n self.assertEqual('ec2.compute.com', c.host)\n self.assertEqual('the_database', c.schema)\n self.assertIsNone(c.login)\n self.assertIsNone(c.password)\n self.assertIsNone(c.port)\n\n def test_param_setup(self):\n c = models.Connection(conn_id='local_mysql', conn_type='mysql',\n host='localhost', login='airflow',\n password='airflow', schema='airflow')\n self.assertEqual('localhost', c.host)\n self.assertEqual('airflow', c.schema)\n self.assertEqual('airflow', c.login)\n self.assertEqual('airflow', c.password)\n self.assertIsNone(c.port)\n\n def test_env_var_priority(self):\n c = SqliteHook.get_connection(conn_id='airflow_db')\n self.assertNotEqual('ec2.compute.com', c.host)\n\n os.environ['AIRFLOW_CONN_AIRFLOW_DB'] = \\\n 'postgres://username:[email protected]:5432/the_database'\n c = SqliteHook.get_connection(conn_id='airflow_db')\n self.assertEqual('ec2.compute.com', c.host)\n self.assertEqual('the_database', c.schema)\n self.assertEqual('username', c.login)\n self.assertEqual('password', c.password)\n self.assertEqual(5432, c.port)\n del os.environ['AIRFLOW_CONN_AIRFLOW_DB']\n\n def test_dbapi_get_uri(self):\n conn = BaseHook.get_connection(conn_id='test_uri')\n hook = conn.get_hook()\n self.assertEqual('postgres://username:[email protected]:5432/the_database', hook.get_uri())\n conn2 = BaseHook.get_connection(conn_id='test_uri_no_creds')\n hook2 = conn2.get_hook()\n self.assertEqual('postgres://ec2.compute.com/the_database', hook2.get_uri())\n\n def test_dbapi_get_sqlalchemy_engine(self):\n conn = BaseHook.get_connection(conn_id='test_uri')\n hook = conn.get_hook()\n engine = hook.get_sqlalchemy_engine()\n self.assertIsInstance(engine, sqlalchemy.engine.Engine)\n self.assertEqual('postgres://username:[email protected]:5432/the_database', str(engine.url))\n\n def test_get_connections_env_var(self):\n conns = SqliteHook.get_connections(conn_id='test_uri')\n assert len(conns) == 1\n assert conns[0].host == 'ec2.compute.com'\n assert conns[0].schema == 'the_database'\n assert conns[0].login == 'username'\n assert conns[0].password == 'password'\n assert conns[0].port == 5432\n\n def test_get_connections_db(self):\n conns = BaseHook.get_connections(conn_id='airflow_db')\n assert len(conns) == 1\n assert conns[0].host == 'localhost'\n assert conns[0].schema == 'airflow'\n assert conns[0].login == 'root'\n\n\nclass WebHDFSHookTest(unittest.TestCase):\n def setUp(self):\n configuration.load_test_config()\n\n def test_simple_init(self):\n from airflow.hooks.webhdfs_hook import WebHDFSHook\n c = WebHDFSHook()\n self.assertIsNone(c.proxy_user)\n\n def test_init_proxy_user(self):\n from airflow.hooks.webhdfs_hook import WebHDFSHook\n c = WebHDFSHook(proxy_user='someone')\n self.assertEqual('someone', c.proxy_user)\n\n\ntry:\n from airflow.hooks.hdfs_hook import HDFSHook\n import snakebite\nexcept ImportError:\n HDFSHook = None\n\n\[email protected](HDFSHook is None,\n \"Skipping test because HDFSHook is not installed\")\nclass HDFSHookTest(unittest.TestCase):\n def setUp(self):\n configuration.load_test_config()\n os.environ['AIRFLOW_CONN_HDFS_DEFAULT'] = ('hdfs://localhost:8020')\n\n def test_get_client(self):\n client = HDFSHook(proxy_user='foo').get_conn()\n self.assertIsInstance(client, snakebite.client.Client)\n self.assertEqual('localhost', client.host)\n self.assertEqual(8020, client.port)\n self.assertEqual('foo', client.service.channel.effective_user)\n\n @mock.patch('airflow.hooks.hdfs_hook.AutoConfigClient')\n @mock.patch('airflow.hooks.hdfs_hook.HDFSHook.get_connections')\n def test_get_autoconfig_client(self, mock_get_connections,\n MockAutoConfigClient):\n c = models.Connection(conn_id='hdfs', conn_type='hdfs',\n host='localhost', port=8020, login='foo',\n extra=json.dumps({'autoconfig': True}))\n mock_get_connections.return_value = [c]\n HDFSHook(hdfs_conn_id='hdfs').get_conn()\n MockAutoConfigClient.assert_called_once_with(effective_user='foo',\n use_sasl=False)\n\n @mock.patch('airflow.hooks.hdfs_hook.AutoConfigClient')\n def test_get_autoconfig_client_no_conn(self, MockAutoConfigClient):\n HDFSHook(hdfs_conn_id='hdfs_missing', autoconfig=True).get_conn()\n MockAutoConfigClient.assert_called_once_with(effective_user=None,\n use_sasl=False)\n\n @mock.patch('airflow.hooks.hdfs_hook.HDFSHook.get_connections')\n def test_get_ha_client(self, mock_get_connections):\n c1 = models.Connection(conn_id='hdfs_default', conn_type='hdfs',\n host='localhost', port=8020)\n c2 = models.Connection(conn_id='hdfs_default', conn_type='hdfs',\n host='localhost2', port=8020)\n mock_get_connections.return_value = [c1, c2]\n client = HDFSHook().get_conn()\n self.assertIsInstance(client, snakebite.client.HAClient)\n\n\ntry:\n from airflow.hooks.http_hook import HttpHook\nexcept ImportError:\n HttpHook = None\n\n\[email protected](HttpHook is None,\n \"Skipping test because HttpHook is not installed\")\nclass HttpHookTest(unittest.TestCase):\n def setUp(self):\n configuration.load_test_config()\n\n @mock.patch('airflow.hooks.http_hook.HttpHook.get_connection')\n def test_http_connection(self, mock_get_connection):\n c = models.Connection(conn_id='http_default', conn_type='http',\n host='localhost', schema='http')\n mock_get_connection.return_value = c\n hook = HttpHook()\n hook.get_conn({})\n self.assertEqual(hook.base_url, 'http://localhost')\n\n @mock.patch('airflow.hooks.http_hook.HttpHook.get_connection')\n def test_https_connection(self, mock_get_connection):\n c = models.Connection(conn_id='http_default', conn_type='http',\n host='localhost', schema='https')\n mock_get_connection.return_value = c\n hook = HttpHook()\n hook.get_conn({})\n self.assertEqual(hook.base_url, 'https://localhost')\n\n @mock.patch('airflow.hooks.http_hook.HttpHook.get_connection')\n def test_host_encoded_http_connection(self, mock_get_connection):\n c = models.Connection(conn_id='http_default', conn_type='http',\n host='http://localhost')\n mock_get_connection.return_value = c\n hook = HttpHook()\n hook.get_conn({})\n self.assertEqual(hook.base_url, 'http://localhost')\n\n @mock.patch('airflow.hooks.http_hook.HttpHook.get_connection')\n def test_host_encoded_https_connection(self, mock_get_connection):\n c = models.Connection(conn_id='http_default', conn_type='http',\n host='https://localhost')\n mock_get_connection.return_value = c\n hook = HttpHook()\n hook.get_conn({})\n self.assertEqual(hook.base_url, 'https://localhost')\n\n\nsend_email_test = mock.Mock()\n\n\nclass EmailTest(unittest.TestCase):\n def setUp(self):\n configuration.remove_option('email', 'EMAIL_BACKEND')\n\n @mock.patch('airflow.utils.email.send_email')\n def test_default_backend(self, mock_send_email):\n res = utils.email.send_email('to', 'subject', 'content')\n mock_send_email.assert_called_with('to', 'subject', 'content')\n self.assertEqual(mock_send_email.return_value, res)\n\n @mock.patch('airflow.utils.email.send_email_smtp')\n def test_custom_backend(self, mock_send_email):\n configuration.set('email', 'EMAIL_BACKEND', 'tests.core.send_email_test')\n utils.email.send_email('to', 'subject', 'content')\n send_email_test.assert_called_with(\n 'to', 'subject', 'content', files=None, dryrun=False,\n cc=None, bcc=None, mime_subtype='mixed'\n )\n self.assertFalse(mock_send_email.called)\n\n\nclass EmailSmtpTest(unittest.TestCase):\n def setUp(self):\n configuration.set('smtp', 'SMTP_SSL', 'False')\n\n @mock.patch('airflow.utils.email.send_MIME_email')\n def test_send_smtp(self, mock_send_mime):\n attachment = tempfile.NamedTemporaryFile()\n attachment.write(b'attachment')\n attachment.seek(0)\n utils.email.send_email_smtp('to', 'subject', 'content', files=[attachment.name])\n self.assertTrue(mock_send_mime.called)\n call_args = mock_send_mime.call_args[0]\n self.assertEqual(configuration.get('smtp', 'SMTP_MAIL_FROM'), call_args[0])\n self.assertEqual(['to'], call_args[1])\n msg = call_args[2]\n self.assertEqual('subject', msg['Subject'])\n self.assertEqual(configuration.get('smtp', 'SMTP_MAIL_FROM'), msg['From'])\n self.assertEqual(2, len(msg.get_payload()))\n self.assertEqual(u'attachment; filename=\"' + os.path.basename(attachment.name) + '\"',\n msg.get_payload()[-1].get(u'Content-Disposition'))\n mimeapp = MIMEApplication('attachment')\n self.assertEqual(mimeapp.get_payload(), msg.get_payload()[-1].get_payload())\n\n @mock.patch('airflow.utils.email.send_MIME_email')\n def test_send_bcc_smtp(self, mock_send_mime):\n attachment = tempfile.NamedTemporaryFile()\n attachment.write(b'attachment')\n attachment.seek(0)\n utils.email.send_email_smtp('to', 'subject', 'content', files=[attachment.name], cc='cc', bcc='bcc')\n self.assertTrue(mock_send_mime.called)\n call_args = mock_send_mime.call_args[0]\n self.assertEqual(configuration.get('smtp', 'SMTP_MAIL_FROM'), call_args[0])\n self.assertEqual(['to', 'cc', 'bcc'], call_args[1])\n msg = call_args[2]\n self.assertEqual('subject', msg['Subject'])\n self.assertEqual(configuration.get('smtp', 'SMTP_MAIL_FROM'), msg['From'])\n self.assertEqual(2, len(msg.get_payload()))\n self.assertEqual(u'attachment; filename=\"' + os.path.basename(attachment.name) + '\"',\n msg.get_payload()[-1].get(u'Content-Disposition'))\n mimeapp = MIMEApplication('attachment')\n self.assertEqual(mimeapp.get_payload(), msg.get_payload()[-1].get_payload())\n\n @mock.patch('smtplib.SMTP_SSL')\n @mock.patch('smtplib.SMTP')\n def test_send_mime(self, mock_smtp, mock_smtp_ssl):\n mock_smtp.return_value = mock.Mock()\n mock_smtp_ssl.return_value = mock.Mock()\n msg = MIMEMultipart()\n utils.email.send_MIME_email('from', 'to', msg, dryrun=False)\n mock_smtp.assert_called_with(\n configuration.get('smtp', 'SMTP_HOST'),\n configuration.getint('smtp', 'SMTP_PORT'),\n )\n self.assertTrue(mock_smtp.return_value.starttls.called)\n mock_smtp.return_value.login.assert_called_with(\n configuration.get('smtp', 'SMTP_USER'),\n configuration.get('smtp', 'SMTP_PASSWORD'),\n )\n mock_smtp.return_value.sendmail.assert_called_with('from', 'to', msg.as_string())\n self.assertTrue(mock_smtp.return_value.quit.called)\n\n @mock.patch('smtplib.SMTP_SSL')\n @mock.patch('smtplib.SMTP')\n def test_send_mime_ssl(self, mock_smtp, mock_smtp_ssl):\n configuration.set('smtp', 'SMTP_SSL', 'True')\n mock_smtp.return_value = mock.Mock()\n mock_smtp_ssl.return_value = mock.Mock()\n utils.email.send_MIME_email('from', 'to', MIMEMultipart(), dryrun=False)\n self.assertFalse(mock_smtp.called)\n mock_smtp_ssl.assert_called_with(\n configuration.get('smtp', 'SMTP_HOST'),\n configuration.getint('smtp', 'SMTP_PORT'),\n )\n\n @mock.patch('smtplib.SMTP_SSL')\n @mock.patch('smtplib.SMTP')\n def test_send_mime_noauth(self, mock_smtp, mock_smtp_ssl):\n configuration.conf.remove_option('smtp', 'SMTP_USER')\n configuration.conf.remove_option('smtp', 'SMTP_PASSWORD')\n mock_smtp.return_value = mock.Mock()\n mock_smtp_ssl.return_value = mock.Mock()\n utils.email.send_MIME_email('from', 'to', MIMEMultipart(), dryrun=False)\n self.assertFalse(mock_smtp_ssl.called)\n mock_smtp.assert_called_with(\n configuration.get('smtp', 'SMTP_HOST'),\n configuration.getint('smtp', 'SMTP_PORT'),\n )\n self.assertFalse(mock_smtp.login.called)\n\n @mock.patch('smtplib.SMTP_SSL')\n @mock.patch('smtplib.SMTP')\n def test_send_mime_dryrun(self, mock_smtp, mock_smtp_ssl):\n utils.email.send_MIME_email('from', 'to', MIMEMultipart(), dryrun=True)\n self.assertFalse(mock_smtp.called)\n self.assertFalse(mock_smtp_ssl.called)\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] | [
[
"numpy.testing.assert_array_almost_equal"
]
] |
fheyen/ClaVis | [
"528ca85dd05606d39761b5a00d755500cf1cd2f6"
] | [
"backend/modules/classifiers/cifar10_cnn/__init__.py"
] | [
"import numpy\nimport keras\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Activation, Flatten\nfrom keras.layers import Conv2D, MaxPooling2D\nfrom keras.callbacks import EarlyStopping\nimport os\nfrom ...tools import check_argument as check\nfrom termcolor import cprint\n\n\"\"\"\nFrom https://keras.io/examples/cifar10_cnn/\n\"\"\"\n\nCLF_INFO = {\n 'name': 'cifar10_cnn',\n 'short': 'C10 CNN',\n 'description': 'Cifar10 Convolutional Neural Network',\n 'parameters': [\n {\n 'name': 'save_model',\n 'description': 'Save model to file',\n 'type': 'boolean',\n 'default_value': False\n },\n {\n 'name': 'data_augmentation',\n 'description': 'Data augmentation',\n 'type': 'boolean',\n 'default_value': True\n },\n {\n 'name': 'early_stopping',\n 'description': 'Early stopping',\n 'type': 'boolean',\n 'default_value': True\n },\n {\n 'name': 'early_stopping_patience',\n 'description': 'Early stopping patience',\n 'type': 'integer',\n 'range': [0, 1000000],\n 'default_value': 10\n },\n {\n 'name': 'epochs',\n 'description': 'Epochs',\n 'type': 'integer',\n 'range': [0, 10000],\n 'default_value': 20\n },\n {\n 'name': 'steps_per_epoch',\n 'description': 'Steps per epoch',\n 'type': 'integer',\n 'range': [0, 1000000],\n 'default_value': 1000\n },\n {\n 'name': 'batch_size',\n 'description': 'Batch size',\n 'type': 'integer',\n 'range': [0, 100000],\n 'default_value': 32\n },\n {\n 'name': 'random_state',\n 'description': 'Random seed',\n 'type': 'integer',\n 'range': [0, 1000000],\n 'default_value': 0\n }\n ]\n}\n\n\ndef get_clf(args, data_specs):\n return Classifier(args, data_specs)\n\n\ndef get_info():\n return CLF_INFO\n\n\nclass Classifier():\n def __init__(self, args, data_specs):\n self.title = args['title']\n\n # check all params and save them\n self.save_model = check('save_model', args, CLF_INFO)\n\n self.data_augmentation = check('data_augmentation', args, CLF_INFO)\n\n self.epochs = check('epochs', args, CLF_INFO)\n self.steps_per_epoch = check('steps_per_epoch', args, CLF_INFO)\n self.early_stopping = check('early_stopping', args, CLF_INFO)\n self.early_stopping_patience = check(\n 'early_stopping_patience', args, CLF_INFO)\n\n self.batch_size = check('batch_size', args, CLF_INFO)\n self.random_state = check('random_state', args, CLF_INFO)\n\n # save data parameters\n self.job_title = data_specs['job_title']\n self.dataset_name = data_specs['dataset_name']\n self.num_classes = data_specs['num_classes']\n self.original_shape = data_specs['original_shape']\n if len(self.original_shape) == 3:\n # Conv layer needs 4 dimensions\n self.original_shape += (1,)\n\n def get_info(self):\n return CLF_INFO\n\n def fit(self, X_train, y_train):\n numpy.random.seed(self.random_state)\n print('Number of classes: {}'.format(self.num_classes))\n\n # Reshape data back to original\n new_shape = (len(X_train),) + self.original_shape[1:]\n cprint('Scaling and reshaping to {}'.format(new_shape))\n X_train = X_train.astype('float32')\n X_train /= 255\n X_train = X_train.reshape(new_shape)\n\n # Convert class vectors to binary class matrices.\n y_train = keras.utils.to_categorical(y_train, self.num_classes)\n\n # model architecture\n model = Sequential()\n model.add(Conv2D(32, (3, 3), padding='same',\n input_shape=X_train.shape[1:]))\n model.add(Activation('relu'))\n model.add(Conv2D(32, (3, 3)))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.25))\n\n model.add(Conv2D(64, (3, 3), padding='same'))\n model.add(Activation('relu'))\n model.add(Conv2D(64, (3, 3)))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.25))\n\n model.add(Flatten())\n model.add(Dense(512))\n model.add(Activation('relu'))\n model.add(Dropout(0.5))\n model.add(Dense(self.num_classes))\n model.add(Activation('softmax'))\n\n # optimizer\n opt = keras.optimizers.rmsprop(lr=0.0001, decay=1e-6)\n\n # compile model\n model.compile(loss='categorical_crossentropy',\n optimizer=opt,\n metrics=['accuracy'])\n\n if not self.data_augmentation:\n cprint('Not using data augmentation (as specified in args).', 'cyan')\n\n # early stopping\n callbacks = None\n if self.early_stopping:\n cprint(\n f'Using early stopping with patience {self.early_stopping_patience}', 'green')\n early_stopping = EarlyStopping(monitor='val_loss',\n patience=self.early_stopping_patience,\n restore_best_weights=True,\n verbose=2)\n callbacks = [early_stopping]\n else:\n cprint('Not using early stopping', 'yellow')\n\n # train model\n history = model.fit(X_train, y_train,\n batch_size=self.batch_size,\n epochs=self.epochs,\n validation_split=0.2,\n shuffle=True,\n callbacks=callbacks)\n else:\n cprint('Using real-time data augmentation.', 'cyan')\n # This will do preprocessing and realtime data augmentation:\n datagen = ImageDataGenerator(\n featurewise_center=False, # set input mean to 0 over the dataset\n samplewise_center=False, # set each sample mean to 0\n featurewise_std_normalization=False, # divide inputs by std of the dataset\n samplewise_std_normalization=False, # divide each input by its std\n zca_whitening=False, # apply ZCA whitening\n zca_epsilon=1e-06, # epsilon for ZCA whitening\n # randomly rotate images in the range (degrees, 0 to 180)\n rotation_range=0,\n # randomly shift images horizontally (fraction of total width)\n width_shift_range=0.1,\n # randomly shift images vertically (fraction of total height)\n height_shift_range=0.1,\n shear_range=0., # set range for random shear\n zoom_range=0., # set range for random zoom\n channel_shift_range=0., # set range for random channel shifts\n # set mode for filling points outside the input boundaries\n fill_mode='nearest',\n cval=0., # value used for fill_mode = \"constant\"\n horizontal_flip=True, # randomly flip images\n vertical_flip=False, # randomly flip images\n # set rescaling factor (applied before any other transformation)\n rescale=None,\n # set function that will be applied on each input\n preprocessing_function=None,\n # image data format, either \"channels_first\" or \"channels_last\"\n data_format=None,\n # fraction of images reserved for validation (strictly between 0 and 1)\n validation_split=0.1)\n\n # Compute quantities required for feature-wise normalization\n # (std, mean, and principal components if ZCA whitening is applied).\n datagen.fit(X_train)\n\n # Fit the model on the batches generated by datagen.flow().\n history = model.fit_generator(\n datagen.flow(X_train, y_train, batch_size=self.batch_size),\n epochs=self.epochs, steps_per_epoch=self.steps_per_epoch, workers=8\n )\n\n self.clf = model\n self.history = history\n\n # save model and weights\n if self.save_model:\n save_dir = os.path.join(\n os.getcwd(), 'saved_models', self.job_title)\n if not os.path.isdir(save_dir):\n os.makedirs(save_dir)\n model_path = os.path.join(save_dir, f'{self.title}.h5')\n model.save(model_path)\n cprint('Saved trained model at {}'.format(model_path), 'green')\n\n def get_history(self):\n return self.history.history\n\n def predict(self, X_test, y_test):\n # same format as X_train\n X_test = X_test.astype('float32')\n X_test /= 255\n\n # adapt shape for test set\n new_shape = (len(X_test),) + self.original_shape[1:]\n X_test = X_test.reshape(new_shape)\n\n return self.clf.predict_classes(X_test)\n\n def predict_proba(self, X_test, y_test):\n # same format as X_train\n X_test = X_test.astype('float32')\n X_test /= 255\n\n # adapt shape for test set\n new_shape = (len(X_test),) + self.original_shape[1:]\n X_test = X_test.reshape(new_shape)\n\n return self.clf.predict_proba(X_test)\n"
] | [
[
"numpy.random.seed"
]
] |
Tianxiaomo/ROI | [
"8422716605f846c6f4276051a9738cb6c162611d"
] | [
"roi/layers/nms.py"
] | [
"# -*- coding: utf-8 -*-\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\nimport torch\nfrom torchvision.ops import boxes as box_ops\nfrom torchvision.ops import nms # BC-compat\n\n\ndef batched_nms(boxes, scores, idxs, iou_threshold):\n \"\"\"\n Same as torchvision.ops.boxes.batched_nms, but safer.\n \"\"\"\n assert boxes.shape[-1] == 4\n # TODO may need better strategy.\n # Investigate after having a fully-cuda NMS op.\n if len(boxes) < 40000:\n return box_ops.batched_nms(boxes, scores, idxs, iou_threshold)\n\n result_mask = scores.new_zeros(scores.size(), dtype=torch.bool)\n for id in torch.unique(idxs).cpu().tolist():\n mask = (idxs == id).nonzero().view(-1)\n keep = nms(boxes[mask], scores[mask], iou_threshold)\n result_mask[mask[keep]] = True\n keep = result_mask.nonzero().view(-1)\n keep = keep[scores[keep].argsort(descending=True)]\n return keep\n\n\n# Note: this function (nms_rotated) might be moved into\n# torchvision/ops/boxes.py in the future\ndef nms_rotated(boxes, scores, iou_threshold):\n \"\"\"\n Performs non-maximum suppression (NMS) on the rotated boxes according\n to their intersection-over-union (IoU).\n\n Rotated NMS iteratively removes lower scoring rotated boxes which have an\n IoU greater than iou_threshold with another (higher scoring) rotated box.\n\n Note that RotatedBox (5, 3, 4, 2, -90) covers exactly the same region as\n RotatedBox (5, 3, 4, 2, 90) does, and their IoU will be 1. However, they\n can be representing completely different objects in certain tasks, e.g., OCR.\n\n As for the question of whether rotated-NMS should treat them as faraway boxes\n even though their IOU is 1, it depends on the application and/or ground truth annotation.\n\n As an extreme example, consider a single character v and the square box around it.\n\n If the angle is 0 degree, the object (text) would be read as 'v';\n\n If the angle is 90 degrees, the object (text) would become '>';\n\n If the angle is 180 degrees, the object (text) would become '^';\n\n If the angle is 270/-90 degrees, the object (text) would become '<'\n\n All of these cases have IoU of 1 to each other, and rotated NMS that only\n uses IoU as criterion would only keep one of them with the highest score -\n which, practically, still makes sense in most cases because typically\n only one of theses orientations is the correct one. Also, it does not matter\n as much if the box is only used to classify the object (instead of transcribing\n them with a sequential OCR recognition model) later.\n\n On the other hand, when we use IoU to filter proposals that are close to the\n ground truth during training, we should definitely take the angle into account if\n we know the ground truth is labeled with the strictly correct orientation (as in,\n upside-down words are annotated with -180 degrees even though they can be covered\n with a 0/90/-90 degree box, etc.)\n\n The way the original dataset is annotated also matters. For example, if the dataset\n is a 4-point polygon dataset that does not enforce ordering of vertices/orientation,\n we can estimate a minimum rotated bounding box to this polygon, but there's no way\n we can tell the correct angle with 100% confidence (as shown above, there could be 4 different\n rotated boxes, with angles differed by 90 degrees to each other, covering the exactly\n same region). In that case we have to just use IoU to determine the box\n proximity (as many detection benchmarks (even for text) do) unless there're other\n assumptions we can make (like width is always larger than height, or the object is not\n rotated by more than 90 degrees CCW/CW, etc.)\n\n In summary, not considering angles in rotated NMS seems to be a good option for now,\n but we should be aware of its implications.\n\n Args:\n boxes (Tensor[N, 5]): Rotated boxes to perform NMS on. They are expected to be in\n (x_center, y_center, width, height, angle_degrees) format.\n scores (Tensor[N]): Scores for each one of the rotated boxes\n iou_threshold (float): Discards all overlapping rotated boxes with IoU < iou_threshold\n\n Returns:\n keep (Tensor): int64 tensor with the indices of the elements that have been kept\n by Rotated NMS, sorted in decreasing order of scores\n \"\"\"\n from roi import _C\n\n return _C.nms_rotated(boxes, scores, iou_threshold)\n\n\n# Note: this function (batched_nms_rotated) might be moved into\n# torchvision/ops/boxes.py in the future\ndef batched_nms_rotated(boxes, scores, idxs, iou_threshold):\n \"\"\"\n Performs non-maximum suppression in a batched fashion.\n\n Each index value correspond to a category, and NMS\n will not be applied between elements of different categories.\n\n Args:\n boxes (Tensor[N, 5]):\n boxes where NMS will be performed. They\n are expected to be in (x_ctr, y_ctr, width, height, angle_degrees) format\n scores (Tensor[N]):\n scores for each one of the boxes\n idxs (Tensor[N]):\n indices of the categories for each one of the boxes.\n iou_threshold (float):\n discards all overlapping boxes\n with IoU < iou_threshold\n\n Returns:\n Tensor:\n int64 tensor with the indices of the elements that have been kept\n by NMS, sorted in decreasing order of scores\n \"\"\"\n assert boxes.shape[-1] == 5\n\n if boxes.numel() == 0:\n return torch.empty((0,), dtype=torch.int64, device=boxes.device)\n # Strategy: in order to perform NMS independently per class,\n # we add an offset to all the boxes. The offset is dependent\n # only on the class idx, and is large enough so that boxes\n # from different classes do not overlap\n\n # Note that batched_nms in torchvision/ops/boxes.py only uses max_coordinate,\n # which won't handle negative coordinates correctly.\n # Here by using min_coordinate we can make sure the negative coordinates are\n # correctly handled.\n max_coordinate = (\n torch.max(boxes[:, 0], boxes[:, 1]) + torch.max(boxes[:, 2], boxes[:, 3]) / 2\n ).max()\n min_coordinate = (\n torch.min(boxes[:, 0], boxes[:, 1]) - torch.min(boxes[:, 2], boxes[:, 3]) / 2\n ).min()\n offsets = idxs.to(boxes) * (max_coordinate - min_coordinate + 1)\n boxes_for_nms = boxes.clone() # avoid modifying the original values in boxes\n boxes_for_nms[:, :2] += offsets[:, None]\n keep = nms_rotated(boxes_for_nms, scores, iou_threshold)\n return keep\n"
] | [
[
"torch.empty",
"torch.min",
"torch.unique",
"torch.max"
]
] |
xmengxin/MFGR | [
"ba807d0f52c0eb00d330eaa9bcef56c1343d2588"
] | [
"models/dcgan_conv.py"
] | [
"import torch\nimport torch.nn as nn\n\n\n# custom weights initialization called on netG and netD\ndef weights_init(m):\n classname = m.__class__.__name__\n if classname.find('Conv') != -1:\n nn.init.normal_(m.weight.data, 0.0, 0.02)\n elif classname.find('BatchNorm') != -1:\n nn.init.normal_(m.weight.data, 1.0, 0.02)\n nn.init.constant_(m.bias.data, 0)\n\n\nclass Generator(nn.Module):\n def __init__(self, latent_dim, img_size=32):\n super(Generator, self).__init__()\n\n self.init_size = img_size // 4\n self.l1 = nn.Sequential(nn.Linear(latent_dim, 128 * self.init_size ** 2))\n\n self.conv_blocks = nn.Sequential(\n nn.BatchNorm2d(128),\n nn.Upsample(scale_factor=2),\n nn.Conv2d(128, 128, 3, stride=1, padding=1),\n nn.BatchNorm2d(128, 0.8),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Upsample(scale_factor=2),\n nn.Conv2d(128, 64, 3, stride=1, padding=1),\n nn.BatchNorm2d(64, 0.8),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Conv2d(64, 3, 3, stride=1, padding=1),\n nn.Tanh(),\n )\n\n def forward(self, z):\n out = self.l1(z)\n out = out.view(out.shape[0], 128, self.init_size, self.init_size)\n img = self.conv_blocks(out)\n return img\n\n\nclass Discriminator(nn.Module):\n def __init__(self, img_size=32):\n super(Discriminator, self).__init__()\n\n def discriminator_block(in_filters, out_filters, bn=True):\n block = [nn.Conv2d(in_filters, out_filters, 3, 2, 1), nn.LeakyReLU(0.2, inplace=True), nn.Dropout2d(0.25)]\n if bn:\n block.append(nn.BatchNorm2d(out_filters, 0.8))\n return block\n\n self.conv_blocks = nn.Sequential(\n *discriminator_block(3, 16, bn=False),\n *discriminator_block(16, 32),\n *discriminator_block(32, 64),\n *discriminator_block(64, 128),\n )\n\n # The height and width of downsampled image\n ds_size = img_size // 2 ** 4\n self.adv_layer = nn.Sequential(nn.Linear(128 * ds_size ** 2, 1), nn.Sigmoid())\n\n def forward(self, img):\n out = self.conv_blocks(img)\n out = torch.flatten(out, 1)\n validity = self.adv_layer(out)\n\n return validity\n\n\ndef test():\n n_class, nz = 10, 100\n netG = Generator(n_class)\n netD = Discriminator()\n noise = torch.randn([32, nz])\n label = torch.randint(0, n_class, [32])\n img = netG(noise, label)\n valid, output = netD(img)\n pass\n\n# test()\n"
] | [
[
"torch.nn.BatchNorm2d",
"torch.nn.Linear",
"torch.randint",
"torch.nn.init.constant_",
"torch.nn.Dropout2d",
"torch.randn",
"torch.flatten",
"torch.nn.init.normal_",
"torch.nn.Tanh",
"torch.nn.Upsample",
"torch.nn.Conv2d",
"torch.nn.Sigmoid",
"torch.nn.LeakyReLU"
]
] |
hiagopinacio/ross | [
"1bc84061f23df455d9e37cb11b244ac795c836ad"
] | [
"ross/api_report.py"
] | [
"# fmt: off\nfrom copy import copy, deepcopy\n\nimport numpy as np\nimport pandas as pd\nfrom plotly import express as px\nfrom plotly import graph_objects as go\nfrom plotly.subplots import make_subplots\nfrom scipy.interpolate import interp1d\nfrom scipy.signal import argrelextrema\n\nfrom ross.bearing_seal_element import BearingElement, SealElement\nfrom ross.disk_element import DiskElement\nfrom ross.materials import steel\nfrom ross.rotor_assembly import Rotor\nfrom ross.shaft_element import ShaftElement\n\n# fmt: on\n\n# set Plotly palette of colors\ncolors1 = px.colors.qualitative.Dark24\ncolors2 = px.colors.sequential.PuBu\n\n__all__ = [\"Report\", \"report_example\"]\n\n\nclass Report:\n \"\"\"Report according to standard analysis.\n\n - Perform unbalance response\n - Perform Stability_level1 analysis\n - Apply Level 1 Screening Criteria\n - Perform Stability_level2 analysis\n\n Parameters\n ----------\n rotor : object\n A rotor built from rotor_assembly.\n speed_range : tuple\n Tuple with (min, max) for speed range.\n tripspeed : float\n Machine trip speed.\n bearing_stiffness_range : tuple, optional\n Tuple with (start, end) bearing stiffness range.\n Argument to calculate the Undamped Critical Speed Map.\n bearing_clearance_lists : list of lists, optional\n List with two bearing elements lists:\n The first bearing list is set for minimum clearance.\n The second bearing list it set for maximum clearance.\n machine_type : str\n Machine type analyzed. Options: compressor, turbine or axial_flow.\n If other option is given, it will be treated as a compressor\n Default is compressor\n speed_units : str\n String defining the unit for rotor speed.\n Default is \"rpm\".\n tag : str\n String to name the rotor model\n Default is the Rotor.tag attribute\n\n Attributes\n ----------\n rotor_type: str\n Defines if the rotor is between bearings or overhung\n disk_nodes: list\n List of disk between bearings or overhung (depending on the\n rotor type)\n\n Returns\n -------\n A Report object\n\n Examples\n --------\n >>> import ross as rs\n >>> rotor = rs.rotor_example()\n >>>\n >>> # coefficients for minimum clearance\n >>> stfx = [0.7e7, 0.8e7, 0.9e7, 1.0e7]\n >>> damp = [2.0e3, 1.9e3, 1.8e3, 1.7e3]\n >>> freq = [400, 800, 1200, 1600]\n >>> bearing0 = rs.BearingElement(0, kxx=stfx, cxx=damp, frequency=freq)\n >>> bearing1 = rs.BearingElement(6, kxx=stfx, cxx=damp, frequency=freq)\n >>> min_clearance_brg = [bearing0, bearing1]\n >>>\n >>> # coefficients for maximum clearance\n >>> stfx = [0.4e7, 0.5e7, 0.6e7, 0.7e7]\n >>> damp = [2.8e3, 2.7e3, 2.6e3, 2.5e3]\n >>> freq = [400, 800, 1200, 1600]\n >>> bearing0 = rs.BearingElement(0, kxx=stfx, cxx=damp, frequency=freq)\n >>> bearing1 = rs.BearingElement(6, kxx=stfx, cxx=damp, frequency=freq)\n >>> max_clearance_brg = [bearing0, bearing1]\n >>>\n >>> bearings = [min_clearance_brg, max_clearance_brg]\n >>> report = rs.Report(rotor=rotor,\n ... speed_range=(400, 1000),\n ... tripspeed=1200,\n ... bearing_stiffness_range=(5,8),\n ... bearing_clearance_lists=bearings,\n ... speed_units=\"rad/s\")\n >>> report.rotor_type\n 'between_bearings'\n \"\"\"\n\n def __init__(\n self,\n rotor,\n speed_range,\n tripspeed,\n bearing_stiffness_range=None,\n bearing_clearance_lists=None,\n machine_type=\"compressor\",\n speed_units=\"rpm\",\n tag=None,\n ):\n self.rotor = rotor\n self.speed_units = speed_units\n self.speed_range = speed_range\n\n if speed_units == \"rpm\":\n self.minspeed = speed_range[0] * np.pi / 30\n self.maxspeed = speed_range[1] * np.pi / 30\n self.tripspeed = tripspeed * np.pi / 30\n if speed_units == \"rad/s\":\n self.minspeed = speed_range[0]\n self.maxspeed = speed_range[1]\n self.tripspeed = tripspeed\n\n self.bearing_stiffness_range = bearing_stiffness_range\n self.bearing_clearance_lists = bearing_clearance_lists\n\n # check if rotor is between bearings, single or double overhung\n # fmt: off\n if(\n all(i > min(rotor.df_bearings[\"n\"]) for i in rotor.df_disks[\"n\"]) and\n all(i < max(rotor.df_bearings[\"n\"]) for i in rotor.df_disks[\"n\"])\n ):\n rotor_type = \"between_bearings\"\n disk_nodes = [\n i for i in rotor.df_disks[\"n\"] if(\n i > min(rotor.df_bearings[\"n\"]) and\n i < max(rotor.df_bearings[\"n\"])\n )\n ]\n elif(\n any(i < min(rotor.df_bearings[\"n\"]) for i in rotor.df_disks[\"n\"]) and\n all(i < max(rotor.df_bearings[\"n\"]) for i in rotor.df_disks[\"n\"])\n ):\n rotor_type = \"single_overhung_l\"\n disk_nodes = [\n i for i in rotor.df_disks[\"n\"] if i < min(rotor.df_bearings[\"n\"])\n ]\n elif(\n all(i > min(rotor.df_bearings[\"n\"]) for i in rotor.df_disks[\"n\"]) and\n any(i > max(rotor.df_bearings[\"n\"]) for i in rotor.df_disks[\"n\"])\n ):\n rotor_type = \"single_overhung_r\"\n disk_nodes = [\n i for i in rotor.df_disks[\"n\"] if i > max(rotor.df_bearings[\"n\"])\n ]\n elif(\n any(i < min(rotor.df_bearings[\"n\"]) for i in rotor.df_disks[\"n\"]) and\n any(i > max(rotor.df_bearings[\"n\"]) for i in rotor.df_disks[\"n\"])\n ):\n rotor_type = \"double_overhung\"\n disk_nodes = [\n i for i in rotor.df_disks[\"n\"] if(\n i < min(rotor.df_bearings[\"n\"]) or\n i > max(rotor.df_bearings[\"n\"])\n )\n ]\n # fmt: on\n\n self.rotor_type = rotor_type\n self.disk_nodes = disk_nodes\n\n machine_options = [\"compressor\", \"turbine\", \"axial_flow\"]\n if machine_type not in machine_options:\n machine_type = \"compressor\"\n self.machine_type = machine_type\n\n if tag is None:\n self.tag = rotor.tag\n else:\n self.tag = tag\n\n # Multiplicative factor of the speed range - according to API 684\n self.speed_factor = 1.25\n\n # list of attributes\n self.Q0 = None\n self.Qa = None\n self.log_dec_a = None\n self.CSR = None\n self.Qratio = None\n self.crit_speed = None\n self.MCS = None\n self.RHO_gas = None\n self.condition = None\n self.node_min = None\n self.node_max = None\n self.U_force = None\n\n @classmethod\n def from_saved_rotors(\n cls,\n path,\n speed_range,\n tripspeed,\n bearing_stiffness_range=None,\n bearing_clearance_lists=None,\n machine_type=\"compressor\",\n speed_units=\"rpm\",\n tag=None,\n ):\n \"\"\"Instantiate a rotor from a previously saved rotor model.\n\n Parameters\n ----------\n path : str\n File name\n maxspeed : float\n Maximum operation speed.\n minspeed : float\n Minimum operation speed.\n tripspeed : float\n Machine trip speed.\n stiffness_range : tuple, optional\n Tuple with (start, end) for stiffness range. Argument to calculate\n the Undamped Critical Speed Map\n machine_type : str\n Machine type analyzed. Options: compressor, turbine or axial_flow.\n If other option is given, it will be treated as a compressor\n Default is compressor\n speed_units : str\n String defining the unit for rotor speed.\n Default is \"rpm\".\n\n Returns\n -------\n A Report object\n \"\"\"\n rotor = Rotor.load(path)\n return cls(\n rotor,\n speed_range,\n tripspeed,\n bearing_stiffness_range,\n bearing_clearance_lists,\n machine_type,\n speed_units,\n tag,\n )\n\n def rotor_instance(self, rotor, bearing_list):\n \"\"\"Build an instance of an auxiliary rotor with different bearing clearances.\n\n Parameters\n ----------\n rotor : object\n A rotor built from rotor_assembly.\n bearing_list : list\n List with the bearing elements.\n\n Returns\n -------\n aux_rotor : Rotor.object\n Returns a rotor object copy with different bearing clearance.\n\n Example\n -------\n >>> import ross as rs\n >>> stfx = [0.4e7, 0.5e7, 0.6e7, 0.7e7]\n >>> damp = [2.8e3, 2.7e3, 2.6e3, 2.5e3]\n >>> freq = [400, 800, 1200, 1600]\n >>> bearing0 = rs.BearingElement(0, kxx=stfx, cxx=damp, frequency=freq)\n >>> bearing1 = rs.BearingElement(6, kxx=stfx, cxx=damp, frequency=freq)\n >>> bearings = [bearing0, bearing1]\n >>> rotor = rs.rotor_example()\n >>> report = rs.report_example()\n >>> aux_rotor = report.rotor_instance(rotor, bearings)\n \"\"\"\n sh_elm = rotor.shaft_elements\n dk_elm = rotor.disk_elements\n pm_elm = rotor.point_mass_elements\n min_w = rotor.min_w\n max_w = rotor.max_w\n rated_w = rotor.rated_w\n tag = rotor.tag\n\n aux_rotor = Rotor(\n sh_elm, dk_elm, bearing_list, pm_elm, min_w, max_w, rated_w, tag\n )\n\n return aux_rotor\n\n def run(self, D, H, HP, oper_speed, RHO_ratio, RHOs, RHOd, unit=\"m\"):\n \"\"\"Run API report.\n\n This method runs the API analysis and prepare the results to\n generate the PDF report.\n\n Parameters\n ----------\n D: list\n Impeller diameter, m (in.),\n Blade pitch diameter, m (in.),\n H: list\n Minimum diffuser width per impeller, m (in.),\n Effective blade height, m (in.),\n HP: list\n Rated power per stage/impeller, W (HP),\n oper_speed: float\n Operating speed, rpm,\n RHO_ratio: list\n Density ratio between the discharge gas density and the suction\n gas density per impeller (RHO_discharge / RHO_suction),\n kg/m3 (lbm/in.3),\n RHOs: float\n Suction gas density in the first stage, kg/m3 (lbm/in.3).\n RHOd: float\n Discharge gas density in the last stage, kg/m3 (lbm/in.3),\n unit: str, optional\n Adopted unit system. Options are \"m\" (meter) and \"in\" (inch)\n Default is \"m\"\n\n Returns\n -------\n fig_ucs : list\n List with undamped critical speed map figures.\n fig_mode_shape : list\n List with mode shape figures.\n fig_unbalance : list\n List with unbalance response figures.\n df_unbalance : dataframe\n Dataframe for the unbalance response informations.\n fig_a_lvl1 : list\n List with \"Applied Cross-Coupled Stiffness\" (stability level 1) figures.\n fig_b_lvl1 : list\n List with \"CSR vs. Mean Gas Density\" (stability level 1) figures.\n df_lvl2 : dataframe\n Dataframe for the stability level 2 informations.\n summaries : pd.Dataframe\n Dataframes with a summary of stability level 1 and 2 analyses.\n\n Example\n -------\n >>> import ross as rs\n >>> report = rs.report_example()\n >>> D = [0.35, 0.35]\n >>> H = [0.08, 0.08]\n >>> HP = [10000, 10000]\n >>> RHO_ratio = [1.11, 1.14]\n >>> RHOd = 30.45\n >>> RHOs = 37.65\n >>> oper_speed = 1000.0\n >>> # to run the API report analysis, use:\n >>> # report.run(D, H, HP, oper_speed, RHO_ratio, RHOs, RHOd)\n \"\"\"\n fig_ucs = []\n fig_mode_shape = []\n fig_unbalance = []\n fig_a_lvl1 = []\n fig_b_lvl1 = []\n df_unbalance = []\n summaries = []\n\n rotor0 = self.rotor\n\n for bearings in self.bearing_clearance_lists:\n self.rotor = self.rotor_instance(rotor0, bearings)\n\n # undamped critical speed map\n fig_ucs.append(self.plot_ucs(stiffness_range=self.bearing_stiffness_range))\n\n for i, mode in enumerate([0, 2]):\n # mode shape figures\n fig_mode_shape.append(self.mode_shape(mode))\n\n # unbalance response figures and dataframe\n fig, _dict = self.unbalance_response(mode)\n fig_unbalance.append(fig)\n df = pd.DataFrame(_dict).astype(object)\n df_unbalance.append(df)\n\n # stability level 1 figures\n figs = self.stability_level_1(D, H, HP, oper_speed, RHO_ratio, RHOs, RHOd)\n fig_a_lvl1.append(figs[0])\n fig_b_lvl1.append(figs[1])\n\n # stability level 2 dataframe\n df_lvl2 = self.stability_level_2()\n\n # API summary tables\n summaries.append(self.summary())\n\n df_unbalance = pd.concat(df_unbalance)\n\n self.rotor = rotor0\n\n return (\n fig_ucs,\n fig_mode_shape,\n fig_unbalance,\n df_unbalance,\n fig_a_lvl1,\n fig_b_lvl1,\n df_lvl2,\n summaries,\n )\n\n def plot_ucs(self, stiffness_range=None, num=20):\n \"\"\"Plot undamped critical speed map.\n\n This method will plot the undamped critical speed map for a given range\n of stiffness values. If the range is not provided, the bearing\n stiffness at rated speed will be used to create a range.\n\n Parameters\n ----------\n stiffness_range : tuple, optional\n Tuple with (start, end) for stiffness range.\n num : int\n Number of steps in the range.\n Default is 20.\n\n Returns\n -------\n fig : Plotly graph_objects.Figure()\n The figure object with the plot.\n\n Example\n -------\n >>> import ross as rs\n >>> report = rs.report_example()\n >>> fig = report.plot_ucs(stiffness_range=(5, 8))\n \"\"\"\n if stiffness_range is None:\n if self.rotor.rated_w is not None:\n bearing = self.rotor.bearing_elements[0]\n k = bearing.kxx.interpolated(self.rotor.rated_w)\n k = int(np.log10(k))\n stiffness_range = (k - 3, k + 3)\n else:\n stiffness_range = (6, 11)\n\n stiffness_log = np.logspace(*stiffness_range, num=num)\n rotor_wn = np.zeros((4, len(stiffness_log)))\n\n bearings_elements = [] # exclude the seals\n for bearing in self.rotor.bearing_elements:\n if not isinstance(bearing, SealElement):\n bearings_elements.append(bearing)\n\n for i, k in enumerate(stiffness_log):\n bearings = [BearingElement(b.n, kxx=k, cxx=0) for b in bearings_elements]\n rotor = self.rotor.__class__(\n self.rotor.shaft_elements, self.rotor.disk_elements, bearings\n )\n modal = rotor.run_modal(speed=0, num_modes=16)\n rotor_wn[:, i] = modal.wn[:8:2]\n\n bearing0 = bearings_elements[0]\n\n fig = go.Figure()\n\n fig.add_trace(\n go.Scatter(\n x=bearing0.kxx.interpolated(bearing0.frequency),\n y=bearing0.frequency,\n mode=\"markers\",\n marker=dict(size=10, symbol=\"circle\", color=\"#888844\"),\n name=\"Kxx\",\n hovertemplate=(\"Kxx: %{x:.2e}<br>\" + \"Frequency: %{y:.2f}\"),\n )\n )\n fig.add_trace(\n go.Scatter(\n x=bearing0.kyy.interpolated(bearing0.frequency),\n y=bearing0.frequency,\n mode=\"markers\",\n marker=dict(size=10, symbol=\"square\", color=\"#888844\"),\n name=\"Kyy\",\n hovertemplate=(\"Kyy: %{x:.2e}<br>\" + \"Frequency: %{y:.2f}\"),\n )\n )\n\n # Speeds References\n fig.add_trace(\n go.Scatter(\n x=stiffness_log,\n y=[self.maxspeed] * num,\n mode=\"lines\",\n line=dict(dash=\"dot\", width=4, color=colors2[8]),\n name=\"MCS Speed\",\n hoverinfo=\"none\",\n )\n )\n fig.add_trace(\n go.Scatter(\n x=stiffness_log,\n y=[self.minspeed] * num,\n mode=\"lines\",\n line=dict(dash=\"dash\", width=4, color=colors2[8]),\n name=\"MOS Speed\",\n hoverinfo=\"none\",\n )\n )\n fig.add_trace(\n go.Scatter(\n x=stiffness_log,\n y=[self.tripspeed] * num,\n mode=\"lines\",\n line=dict(dash=\"dashdot\", width=4, color=colors2[8]),\n name=\"Trip Speed\",\n hoverinfo=\"none\",\n )\n )\n fig.add_trace(\n go.Scatter(\n x=stiffness_log,\n y=[self.speed_factor * self.tripspeed] * num,\n mode=\"lines\",\n line=dict(dash=\"longdash\", width=4, color=colors2[8]),\n name=\"{}% Trip Speed\".format(100 * self.speed_factor),\n hoverinfo=\"none\",\n )\n )\n for j in range(rotor_wn.T.shape[1]):\n fig.add_trace(\n go.Scatter(\n x=stiffness_log,\n y=np.transpose(rotor_wn.T)[j],\n mode=\"lines\",\n line=dict(width=4, color=colors1[j]),\n hoverinfo=\"none\",\n showlegend=False,\n )\n )\n fig.update_xaxes(\n title_text=\"<b>Bearing Stiffness</b>\",\n title_font=dict(size=16),\n tickfont=dict(size=14),\n gridcolor=\"lightgray\",\n showline=True,\n linewidth=2.5,\n linecolor=\"black\",\n mirror=True,\n type=\"log\",\n exponentformat=\"power\",\n )\n fig.update_yaxes(\n title_text=\"<b>Critical Speed</b>\",\n title_font=dict(size=16),\n tickfont=dict(size=14),\n gridcolor=\"lightgray\",\n showline=True,\n linewidth=2.5,\n linecolor=\"black\",\n mirror=True,\n type=\"log\",\n exponentformat=\"power\",\n )\n fig.update_layout(\n width=800,\n height=600,\n plot_bgcolor=\"white\",\n legend=dict(\n font=dict(family=\"sans-serif\", size=14),\n bgcolor=\"white\",\n bordercolor=\"black\",\n borderwidth=2,\n ),\n title=dict(text=\"<b>Undamped Critical Speed Map</b>\", font=dict(size=16)),\n )\n\n return fig\n\n def static_forces(self):\n \"\"\"Calculate the bearing reaction forces.\n\n Returns\n -------\n Fb : list\n Bearing reaction forces.\n\n Example\n -------\n >>> import ross as rs\n >>> report = rs.report_example()\n >>> report.static_forces()\n array([44.09320349, 44.09320349])\n \"\"\"\n # get reaction forces on bearings\n self.rotor.run_static()\n Fb = list(self.rotor.bearing_forces_nodal.values())\n Fb = np.array(Fb) / 9.8065\n\n return Fb\n\n def unbalance_forces(self, mode):\n \"\"\"Calculate the unbalance forces.\n\n The unbalance forces are calculated base on the rotor type:\n between_bearings :\n The unbalance forces derives from the reaction bearing forces.\n single_overung_l :\n The unbalance forces derives from the disk's masses on the\n shaft left end.\n single_overung_r :\n The unbalance forces derives from the disk's masses on the\n shaft right end.\n double_overung :\n The unbalance forces derives from the disk's masses on the\n shaft left and right ends.\n\n Parameters\n ----------\n mode : int\n n'th mode shape.\n\n Returns\n -------\n U : list\n Unbalancing forces.\n\n Example\n -------\n >>> import ross as rs\n >>> report = rs.report_example()\n >>> report.unbalance_forces(mode=0)\n [58.641354289961676]\n \"\"\"\n if mode > 3:\n raise ValueError(\n \"This module calculates only the response for the first \"\n \"two backward and forward modes. \"\n )\n\n N = 60 * self.maxspeed / (2 * np.pi)\n\n # get reaction forces on bearings\n if self.rotor_type == \"between_bearings\":\n Fb = self.static_forces()\n if mode == 0 or mode == 1:\n U_force = [max(6350 * np.sum(Fb) / N, 254e-6 * np.sum(Fb))]\n\n if mode == 2 or mode == 3:\n U_force = [max(6350 * f / N, 254e-6 * f) for f in Fb]\n\n # get disk masses\n elif self.rotor_type == \"single_overhung_l\":\n Wd = [\n disk.m\n for disk in self.rotor.disk_elements\n if disk.n < min(self.rotor.df_bearings[\"n\"])\n ]\n Ws = [\n sh.m\n for sh in self.rotor.shaft_elements\n if sh.n_l < min(self.rotor.df_bearings[\"n\"])\n ]\n W3 = np.sum(Wd + Ws)\n\n U_force = [6350 * W3 / N]\n\n elif self.rotor_type == \"single_overhung_r\":\n Wd = [\n disk.m\n for disk in self.rotor.disk_elements\n if disk.n > max(self.rotor.df_bearings[\"n\"])\n ]\n Ws = [\n sh.m\n for sh in self.rotor.shaft_elements\n if sh.n_r > max(self.rotor.df_bearings[\"n\"])\n ]\n W3 = np.sum(Wd + Ws)\n\n U_force = [6350 * W3 / N]\n\n elif self.rotor_type == \"double_overhung\":\n Wd_l = [\n disk.m\n for disk in self.rotor.disk_elements\n if disk.n < min(self.rotor.df_bearings[\"n\"])\n ]\n Ws_l = [\n sh.m\n for sh in self.rotor.shaft_elements\n if sh.n_l < min(self.rotor.df_bearings[\"n\"])\n ]\n Wd_r = [\n disk.m\n for disk in self.rotor.disk_elements\n if disk.n > max(self.rotor.df_bearings[\"n\"])\n ]\n Ws_r = [\n sh.m\n for sh in self.rotor.shaft_elements\n if sh.n_r > max(self.rotor.df_bearings[\"n\"])\n ]\n W3 = np.array([np.sum(Wd_l + Ws_l), np.sum(Wd_r + Ws_r)])\n\n U_force = 6350 * W3 / N\n\n self.U_force = U_force\n\n return U_force\n\n def unbalance_response(self, mode, samples=201):\n \"\"\"Evaluate the unbalance response for the rotor.\n\n This analysis takes the critical speeds of interest, calculates the\n position and weight of the required unbalance and performs the analysis\n including:\n - Check if vibration at MCS is below the limit with the applied weight;\n - Check if the clearances are ok if the vibration deteriorate to the\n limit level;\n\n Parameters\n ----------\n mode : int\n n'th mode shape.\n samples : int\n Number of samples to generate de frequency range.\n\n Returns\n -------\n subplots : Plotly graph_objects.make_subplots()\n Plotly figure with Amplitude vs Frequency and Phase vs Frequency plots.\n unbalance_dict : dict\n A dictionary with information about simulation parameters to be\n displayed in the report. The dictionary contains:\n - Mode number;\n - Critical frequencies;\n - Amplification factors;\n - Separation margins (actual and required);\n - Unbalance stations;\n - Unbalance weights;\n - Unbalance phases;\n\n Example\n -------\n >>> import ross as rs\n >>> report = rs.report_example()\n >>> fig, unbalance_dict = report.unbalance_response(mode=0)\n \"\"\"\n maxspeed = self.maxspeed\n minspeed = self.minspeed\n freq_range = np.linspace(0, self.speed_factor * maxspeed, 201)\n\n # returns de nodes where forces will be applied\n self.mode_shape(mode)\n node_min = self.node_min\n node_max = self.node_max\n nodes = [int(node) for sub_nodes in [node_min, node_max] for node in sub_nodes]\n\n force = self.unbalance_forces(mode)\n\n phase = []\n phase_angle = 0\n for node in nodes:\n phase.append(phase_angle)\n phase_angle += np.pi\n\n unbalance_dict = {\n \"Mode\": mode + 1,\n \"Frequency\": [],\n \"Amplification factor\": [],\n \"Separation margin - ACTUAL\": [],\n \"Separation margin - REQUIRED\": [],\n \"Unbalance station(s)\": [nodes],\n \"Unbalance weight(s)\": [force],\n \"Unbalance phase(s)\": [phase],\n }\n\n response = self.rotor.run_unbalance_response(nodes, force, phase, freq_range)\n mag = response.magnitude\n\n for node in nodes:\n dof = 4 * node + 1\n mag_plot = response.plot_magnitude([(node, np.pi / 2)])\n phs_plot = response.plot_phase([(node, np.pi / 2)])\n\n magnitude = mag[dof]\n idx_max = argrelextrema(magnitude, np.greater)[0].tolist()\n wn = freq_range[idx_max]\n\n for i, peak in enumerate(magnitude[idx_max]):\n peak_n = 0.707 * peak\n peak_aux = np.linspace(peak_n, peak_n, len(freq_range))\n\n idx = np.argwhere(np.diff(np.sign(peak_aux - magnitude))).flatten()\n idx = np.sort(np.append(idx, idx_max[i]))\n\n # if speed range is not long enough to catch the magnitudes\n try:\n idx_aux = [\n list(idx).index(idx_max[i]) - 1,\n list(idx).index(idx_max[i]) + 1,\n ]\n idx = idx[idx_aux]\n except IndexError:\n idx = [list(idx).index(idx_max[i]) - 1, len(freq_range) - 1]\n\n # Amplification Factor (AF) - API684 - SP6.8.2.1\n AF = wn[i] / (freq_range[idx[1]] - freq_range[idx[0]])\n\n # Separation Margin (SM) - API684 - SP6.8.2.10\n if AF > 2.5 and wn[i] < minspeed:\n SM = min([16, 17 * (1 - 1 / (AF - 1.5))]) / 100\n SMspeed = wn[i] * (1 + SM)\n SM_ref = (minspeed - wn[i]) / wn[i]\n\n hovertemplate = (\n f\"<b>Critical Speed: {wn[i]:.2f}<b><br>\"\n + f\"<b>Speed at 0.707 x amplitude peak: {SMspeed:.2f}<b><br>\"\n )\n mag_plot.add_trace(\n go.Scatter(\n x=[wn[i], SMspeed, SMspeed, wn[i], wn[i]],\n y=[0, 0, max(magnitude[idx_max]), max(magnitude[idx_max]), 0],\n text=hovertemplate,\n mode=\"lines\",\n opacity=0.3,\n fill=\"toself\",\n fillcolor=colors1[3],\n line=dict(width=1.5, color=colors1[3]),\n showlegend=True if i == 0 else False,\n name=\"Separation Margin\",\n legendgroup=\"Separation Margin\",\n hoveron=\"points+fills\",\n hoverinfo=\"text\",\n hovertemplate=hovertemplate,\n hoverlabel=dict(bgcolor=colors1[3]),\n )\n )\n\n elif AF > 2.5 and wn[i] > maxspeed:\n SM = min([26, 10 + 17 * (1 - 1 / (AF - 1.5))]) / 100\n SMspeed = wn[i] * (1 - SM)\n SM_ref = (wn[i] - maxspeed) / maxspeed\n\n hovertemplate = (\n f\"<b>Critical Speed: {wn[i]:.2f}<b><br>\"\n + f\"<b>Speed at 0.707 x amplitude peak: {SMspeed:.2f}<b><br>\"\n )\n mag_plot.add_trace(\n go.Scatter(\n x=[SMspeed, wn[i], wn[i], SMspeed, SMspeed],\n y=[0, 0, max(magnitude[idx_max]), max(magnitude[idx_max]), 0],\n text=hovertemplate,\n mode=\"lines\",\n opacity=0.3,\n fill=\"toself\",\n fillcolor=colors1[3],\n line=dict(width=1.5, color=colors1[3]),\n showlegend=True if i == 0 else False,\n name=\"Separation Margin\",\n legendgroup=\"Separation Margin\",\n hoveron=\"points+fills\",\n hoverinfo=\"text\",\n hovertemplate=hovertemplate,\n hoverlabel=dict(bgcolor=colors1[3]),\n )\n )\n\n else:\n SM = None\n SM_ref = None\n SMspeed = None\n\n unbalance_dict[\"Amplification factor\"].append(AF)\n unbalance_dict[\"Separation margin - ACTUAL\"].append(SM)\n unbalance_dict[\"Separation margin - REQUIRED\"].append(SM_ref)\n unbalance_dict[\"Frequency\"].append(wn[i])\n\n # amplitude limit in micrometers (A1) - API684 - SP6.8.2.11\n A1 = 25.4 * np.sqrt(12000 / (30 * maxspeed / np.pi))\n\n Amax = max(mag[dof])\n\n # Scale Factor (Scc) - API684 - SP6.8.2.11 / API617 - 4.8.2.11\n Scc = max(A1 / Amax, 0.5)\n Scc = min(Scc, 6.0)\n\n mag_plot.add_trace(\n go.Scatter(\n x=[minspeed, maxspeed, maxspeed, minspeed, minspeed],\n y=[0, 0, max(mag[dof]), max(mag[dof]), 0],\n text=\"Operation Speed Range\",\n mode=\"lines\",\n opacity=0.3,\n fill=\"toself\",\n fillcolor=colors1[2],\n line=dict(width=1.5, color=colors1[2]),\n name=\"Operation Speed Range\",\n legendgroup=\"Operation Speed Range\",\n hoveron=\"points+fills\",\n hoverinfo=\"text\",\n hoverlabel=dict(bgcolor=colors1[2]),\n )\n )\n mag_plot.add_trace(\n go.Scatter(\n x=[minspeed, maxspeed],\n y=[A1, A1],\n mode=\"lines\",\n line=dict(width=2.0, color=colors1[5], dash=\"dashdot\"),\n name=\"Av1 - Mechanical test vibration limit\",\n hoverinfo=\"none\",\n )\n )\n mag_plot.add_annotation(\n x=(minspeed + maxspeed) / 2,\n y=A1,\n axref=\"x\",\n ayref=\"y\",\n xshift=0,\n yshift=10,\n text=\"<b>Av1</b>\",\n font=dict(size=18),\n showarrow=False,\n )\n mag_plot[\"data\"][0][\"line\"] = dict(width=4.0, color=colors1[5])\n phs_plot[\"data\"][0][\"line\"] = dict(width=4.0, color=colors1[5])\n\n subplots = make_subplots(rows=2, cols=1)\n for data in mag_plot[\"data\"]:\n subplots.add_trace(data, row=1, col=1)\n for data in phs_plot[\"data\"]:\n subplots.add_trace(data, row=2, col=1)\n\n subplots.update_xaxes(mag_plot.layout.xaxis, row=1, col=1)\n subplots.update_yaxes(mag_plot.layout.yaxis, row=1, col=1)\n subplots.update_xaxes(phs_plot.layout.xaxis, row=2, col=1)\n subplots.update_yaxes(phs_plot.layout.yaxis, row=2, col=1)\n subplots.update_layout(\n width=1800,\n height=900,\n plot_bgcolor=\"white\",\n hoverlabel_align=\"right\",\n legend=dict(\n itemsizing=\"constant\",\n bgcolor=\"white\",\n borderwidth=2,\n font=dict(size=14),\n ),\n )\n\n return subplots, unbalance_dict\n\n def mode_shape(self, mode):\n \"\"\"Evaluate the mode shapes for the rotor.\n\n This analysis presents the vibration mode for each critical speed.\n The importance is to locate the critical node, where the displacement\n is the greatest, then apply loads for unbalance response (stability\n level 1)\n\n Parameters\n ----------\n mode : int\n the n'th vibration mode\n\n Attributes\n ----------\n node_min : int\n Nodes where the maximum displacements occur\n node_max : int\n Nodes where the minimum displacements occur\n\n Returns\n -------\n fig : Plotly graph_objects.Figure()\n The figure object with the plot.\n\n Example\n -------\n >>> import ross as rs\n >>> report = rs.report_example()\n >>> fig = report.mode_shape(mode=0)\n >>> report.node_min\n array([], dtype=float64)\n >>> report.node_max\n array([3.])\n \"\"\"\n nodes_pos = self.rotor.nodes_pos\n df_bearings = self.rotor.df_bearings\n df_disks = self.rotor.df_disks\n\n modal = self.rotor.run_modal(speed=self.maxspeed)\n xn, yn, zn, xc, yc, zc_pos, nn = modal.calc_mode_shape(mode=mode)\n\n # reduce 3D view to 2D view\n vn = np.zeros(len(zn))\n for i in range(len(zn)):\n theta = np.arctan(xn[i] / yn[i])\n vn[i] = xn[i] * np.sin(theta) + yn[i] * np.cos(theta)\n\n # remove repetitive values from zn and vn\n idx_remove = []\n for i in range(1, len(zn)):\n if zn[i] == zn[i - 1]:\n idx_remove.append(i)\n zn = np.delete(zn, idx_remove)\n vn = np.delete(vn, idx_remove)\n\n node_min = np.array([])\n node_max = np.array([])\n\n if self.rotor_type == \"between_bearings\":\n\n aux_idx_max = argrelextrema(vn, np.greater)[0].tolist()\n aux_idx_min = argrelextrema(vn, np.less)[0].tolist()\n\n # verification of rigid modes\n if len(aux_idx_max) == 0 and len(aux_idx_min) == 0:\n idx_max = np.argmax(vn)\n idx_min = np.argmin(vn)\n\n # corrects the index by the removed points\n for i in idx_remove:\n if idx_min > i:\n idx_min += 1\n if idx_max > i:\n idx_max += 1\n node_max = np.round(np.array([idx_max]) / nn)\n node_min = np.round(np.array([idx_min]) / nn)\n\n if len(aux_idx_min) != 0:\n idx_min = np.where(vn == min(vn[aux_idx_min]))[0].tolist()\n\n # corrects the index by the removed points\n for i in idx_remove:\n if idx_min[0] > i:\n idx_min[0] += 1\n node_min = np.round(np.array(idx_min) / nn)\n\n if len(aux_idx_max) != 0:\n idx_max = np.where(vn == max(vn[aux_idx_max]))[0].tolist()\n\n # corrects the index by the removed points\n for i in idx_remove:\n if idx_max[0] > i:\n idx_max[0] += 1\n node_max = np.round(np.array(idx_max) / nn)\n\n elif self.rotor_type == \"double_overhung\":\n node_max = [max(df_disks[\"n\"])]\n node_min = [min(df_disks[\"n\"])]\n\n elif self.rotor_type == \"single_overhung_l\":\n node_min = [min(df_disks[\"n\"])]\n\n elif self.rotor_type == \"single_overhung_r\":\n node_max = [max(df_disks[\"n\"])]\n\n nodes_pos = np.array(nodes_pos)\n rpm_speed = (30 / np.pi) * modal.wn[mode]\n\n self.node_min = node_min\n self.node_max = node_max\n\n fig = go.Figure()\n fig.add_trace(\n go.Scatter(\n x=zn,\n y=vn,\n mode=\"lines\",\n line=dict(width=4, color=colors1[3]),\n name=\"<b>Mode {}</b><br><b>Speed = {:.1f} RPM</b>\".format(\n mode, rpm_speed\n ),\n hovertemplate=\"Axial position: %{x:.2f}<br>Deformation: %{y:.2f}\",\n )\n )\n fig.add_trace(\n go.Scatter(\n x=nodes_pos,\n y=np.zeros(len(nodes_pos)),\n mode=\"lines\",\n line=dict(width=4, color=colors1[5], dash=\"dashdot\"),\n name=\"centerline\",\n hoverinfo=\"none\",\n showlegend=False,\n )\n )\n fig.add_trace(\n go.Scatter(\n x=nodes_pos[df_bearings[\"n\"]],\n y=np.zeros(len(df_bearings)),\n mode=\"markers\",\n marker=dict(size=12, color=colors1[5]),\n name=\"bearing_node\",\n showlegend=False,\n hovertemplate=\"Bearing Position: %{x:.2f}\",\n )\n )\n\n pos0 = nodes_pos[min(df_bearings[\"n\"])]\n pos1 = nodes_pos[max(df_bearings[\"n\"])]\n fig.add_annotation(\n x=np.mean(nodes_pos[df_bearings[\"n\"]]),\n y=0,\n axref=\"x\",\n ayref=\"y\",\n xshift=0,\n yshift=20,\n text=\"<b>Bearing Span = {:.2f}</b>\".format(pos1 - pos0),\n font=dict(size=18),\n showarrow=False,\n )\n\n for node in nodes_pos[df_bearings[\"n\"]]:\n fig.add_trace(\n go.Scatter(\n x=[node, node],\n y=[-2, 2],\n mode=\"lines\",\n line=dict(width=2.5, color=colors1[5], dash=\"dash\"),\n name=\"Span\",\n legendgroup=\"Span\",\n hoverinfo=\"none\",\n showlegend=False,\n )\n )\n\n fig.update_xaxes(\n title_text=\"<b>Rotor lenght</b>\",\n title_font=dict(family=\"Arial\", size=20),\n tickfont=dict(size=16),\n gridcolor=\"lightgray\",\n showline=True,\n linewidth=2.5,\n linecolor=\"black\",\n mirror=True,\n )\n fig.update_yaxes(\n title_text=\"<b>Non dimensional deformation</b>\",\n title_font=dict(family=\"Arial\", size=20),\n tickfont=dict(size=16),\n range=[-2, 2],\n gridcolor=\"lightgray\",\n showline=True,\n linewidth=2.5,\n linecolor=\"black\",\n mirror=True,\n )\n fig.update_layout(\n width=1200,\n height=900,\n plot_bgcolor=\"white\",\n hoverlabel_align=\"right\",\n title=dict(\n text=\"<b>Undamped Mode Shape</b>\".format(node), font=dict(size=20)\n ),\n )\n\n return fig\n\n def stability_level_1(self, D, H, HP, oper_speed, RHO_ratio, RHOs, RHOd, unit=\"m\"):\n \"\"\"Stability analysis level 1.\n\n This analysis consider a anticipated cross coupling QA based on\n conditions at the normal operating point and the cross-coupling\n required to produce a zero log decrement, Q0.\n\n Components such as seals and impellers are not considered in this\n analysis.\n\n Parameters\n ----------\n D: list\n Impeller diameter, m (in.),\n Blade pitch diameter, m (in.),\n H: list\n Minimum diffuser width per impeller, m (in.),\n Effective blade height, m (in.),\n HP: list\n Rated power per stage/impeller, W (HP),\n oper_speed: float\n Operating speed, rpm,\n RHO_ratio: list\n Density ratio between the discharge gas density and the suction\n gas density per impeller (RHO_discharge / RHO_suction),\n kg/m3 (lbm/in.3),\n RHOs: float\n Suction gas density in the first stage, kg/m3 (lbm/in.3).\n RHOd: float\n Discharge gas density in the last stage, kg/m3 (lbm/in.3),\n unit: str, optional\n Adopted unit system. Options are \"m\" (meter) and \"in\" (inch)\n Default is \"m\"\n\n Attributes\n ----------\n condition: bool\n False: Stability Level 1 satisfies the analysis;\n True: Stability Level 2 is required.\n\n Return\n ------\n fig1 : Plotly graph_objects.Figure()\n Applied Cross-Coupled Stiffness vs. Log Decrement plot.\n fig2 : Plotly graph_objects.Figure()\n CSR vs. Mean Gas Density plot.\n\n Example\n -------\n >>> import ross as rs\n >>> report = rs.report_example()\n >>> fig1, fig2 = report.stability_level_1(D=[0.35, 0.35],\n ... H=[0.08, 0.08],\n ... HP=[10000, 10000],\n ... RHO_ratio=[1.11, 1.14],\n ... RHOd=30.45,\n ... RHOs=37.65,\n ... oper_speed=1000.0)\n >>> report.Qa\n 23022.32142857143\n \"\"\"\n steps = 11\n if unit == \"m\":\n C = 9.55\n elif unit == \"in\":\n C = 63.0\n else:\n raise TypeError(\"choose between meters (m) or inches (in)\")\n\n if len(D) != len(H):\n raise Exception(\"length of D must be the same of H\")\n\n Qa = 0.0\n cross_coupled_array = np.array([])\n # Qa - Anticipated cross-coupling for compressors - API 684 - SP6.8.5.6\n if self.machine_type == \"compressor\":\n Bc = 3.0\n Dc, Hc = D, H\n for i, disk in enumerate(self.rotor.disk_elements):\n if disk.n in self.disk_nodes:\n qi = HP[i] * Bc * C * RHO_ratio[i] / (Dc[i] * Hc[i] * oper_speed)\n Qi = np.linspace(0, 10 * qi, steps)\n cross_coupled_array = np.append(cross_coupled_array, Qi)\n Qa += qi\n\n # Qa - Anticipated cross-coupling for turbines - API 684 - SP6.8.5.6\n if self.machine_type == \"turbine\" or self.machine_type == \"axial_flow\":\n Bt = 1.5\n Dt, Ht = D, H\n for i, disk in enumerate(self.rotor.disk_elements):\n if disk.n in self.disk_nodes:\n qi = (HP[i] * Bt * C) / (Dt[i] * Ht[i] * oper_speed)\n Qi = np.linspace(0, 10 * qi, steps)\n cross_coupled_array = np.append(cross_coupled_array, Qi)\n Qa += qi\n\n # Defining cross-coupling range to 10*Qa - API 684 - SP6.8.5.8\n Qi = np.linspace(0, 10 * Qa, steps)\n cross_coupled_array = np.append(cross_coupled_array, Qi)\n cross_coupled_array = cross_coupled_array.reshape(\n [len(self.disk_nodes) + 1, steps]\n ).T\n\n log_dec = np.zeros(len(cross_coupled_array))\n\n # remove disks and seals from the rotor model\n bearing_list = [\n copy(b)\n for b in self.rotor.bearing_elements\n if not isinstance(b, SealElement)\n ]\n\n # Applying cross-coupling on rotor mid-span\n if self.rotor_type == \"between_bearings\":\n for i, Q in enumerate(cross_coupled_array[:, -1]):\n bearings = [copy(b) for b in bearing_list]\n\n # cross-coupling introduced at the rotor mid-span\n n = np.round(np.mean(self.rotor.nodes))\n cross_coupling = BearingElement(n=int(n), kxx=0, cxx=0, kxy=Q, kyx=-Q)\n bearings.append(cross_coupling)\n\n aux_rotor = Rotor(\n shaft_elements=self.rotor.shaft_elements,\n disk_elements=[],\n bearing_elements=bearings,\n rated_w=self.rotor.rated_w,\n )\n modal = aux_rotor.run_modal(speed=oper_speed * np.pi / 30)\n non_backward = modal.whirl_direction() != \"Backward\"\n log_dec[i] = modal.log_dec[non_backward][0]\n\n # Applying cross-coupling for each disk - API 684 - SP6.8.5.9\n else:\n for i, Q in enumerate(cross_coupled_array[:, :-1]):\n bearings = [copy(b) for b in bearing_list]\n # cross-coupling introduced at overhung disks\n for n, q in zip(self.disk_nodes, Q):\n cross_coupling = BearingElement(n=n, kxx=0, cxx=0, kxy=q, kyx=-q)\n bearings.append(cross_coupling)\n\n aux_rotor = Rotor(\n shaft_elements=self.rotor.shaft_elements,\n disk_elements=[],\n bearing_elements=bearings,\n rated_w=self.rotor.rated_w,\n )\n modal = aux_rotor.run_modal(speed=oper_speed * np.pi / 30)\n non_backward = modal.whirl_direction() != \"Backward\"\n log_dec[i] = modal.log_dec[non_backward][0]\n\n # verifies if log dec is greater than zero to begin extrapolation\n cross_coupled_Qa = cross_coupled_array[:, -1]\n if log_dec[-1] >= 0:\n g = interp1d(\n cross_coupled_Qa, log_dec, fill_value=\"extrapolate\", kind=\"linear\"\n )\n stiff = cross_coupled_Qa[-1] * (1 + 1 / (len(cross_coupled_Qa)))\n while g(stiff) > 0:\n log_dec = np.append(log_dec, g(stiff))\n cross_coupled_Qa = np.append(cross_coupled_Qa, stiff)\n stiff += cross_coupled_Qa[-1] / (len(cross_coupled_Qa))\n Q0 = cross_coupled_Qa[-1]\n\n else:\n idx = min(range(len(log_dec)), key=lambda i: abs(log_dec[i]))\n Q0 = cross_coupled_Qa[idx]\n\n # Find value for log_dec corresponding to Qa\n log_dec_a = log_dec[np.where(cross_coupled_Qa == Qa)][0]\n\n # CSR - Critical Speed Ratio\n crit_speed = self.rotor.run_modal(speed=self.maxspeed).wn[0]\n CSR = self.maxspeed / crit_speed\n\n # RHO_mean - Average gas density\n RHO_mean = (RHOd + RHOs) / 2\n RHO = np.linspace(0, RHO_mean * 5, 501)\n\n # CSR_boundary - function to define the CSR boundaries\n CSR_boundary = np.piecewise(\n RHO,\n [RHO <= 16.53, RHO > 16.53, RHO == 60, RHO > 60],\n [2.5, lambda RHO: (-0.0115 * RHO + 2.69), 2.0, 0.0],\n )\n\n # Plotting area\n\n fig1 = go.Figure()\n\n fig1.add_trace(\n go.Scatter(\n x=cross_coupled_Qa,\n y=log_dec,\n mode=\"lines\",\n showlegend=False,\n hoverinfo=\"none\",\n )\n )\n fig1.add_trace(\n go.Scatter(\n x=[Qa],\n y=[log_dec_a],\n mode=\"markers\",\n name=\"<b>Qa: Anticipated cross-coupling</b>\",\n hoverinfo=\"none\",\n )\n )\n fig1.add_annotation(\n x=Qa,\n y=log_dec_a,\n axref=\"x\",\n ayref=\"y\",\n xshift=15,\n yshift=15,\n text=\"<b>Qa</b>\",\n showarrow=False,\n )\n fig1.update_xaxes(\n title_text=\"<b>Applied Cross-Coupled Stiffness, Q (N/m)</b>\",\n rangemode=\"nonnegative\",\n )\n fig1.update_yaxes(title_text=\"<b>Log Dec</b>\", rangemode=\"nonnegative\")\n fig1.update_layout(\n title=dict(\n text=(\n \"<b>Applied Cross-Coupled Stiffness vs. Log Decrement</b><br>\"\n + \"<b>(API 684 - SP 6.8.5.10)</b>\"\n )\n )\n )\n\n fig2 = go.Figure()\n fig2.add_annotation(\n x=RHO_mean,\n y=CSR,\n axref=\"x\",\n ayref=\"y\",\n xshift=40,\n yshift=0,\n text=\"<b>{}</b>\".format(self.tag),\n showarrow=False,\n )\n\n for text, x, y in zip([\"Region A\", \"Region B\"], [30, 60], [1.20, 2.75]):\n fig2.add_annotation(\n x=x,\n y=y,\n axref=\"x\",\n ayref=\"y\",\n xshift=0,\n yshift=0,\n text=f\"<b>{text}</b>\",\n opacity=0.4,\n showarrow=False,\n )\n\n fig2.add_trace(\n go.Scatter(\n x=RHO,\n y=CSR_boundary,\n mode=\"lines\",\n showlegend=False,\n hoverinfo=\"none\",\n xaxis=\"x\",\n )\n )\n fig2.add_trace(\n go.Scatter(\n x=0.062428 * RHO,\n y=CSR_boundary,\n mode=\"lines\",\n showlegend=False,\n hoverinfo=\"none\",\n xaxis=\"x2\",\n )\n )\n fig2.add_trace(\n go.Scatter(\n x=[RHO_mean],\n y=[CSR],\n mode=\"markers\",\n name=\"<b>CSR: Critical Speed Ratio</b>\",\n hoverinfo=\"none\",\n xaxis=\"x\",\n )\n )\n\n fig2.update_xaxes(mirror=True)\n fig2.update_yaxes(\n title_text=\"<b>Maximum Critical Speed Ratio</b>\",\n rangemode=\"nonnegative\",\n domain=[0.1, 1],\n )\n fig2.update_layout(\n xaxis=dict(\n title_text=\"<b>kg/m³</b>\",\n rangemode=\"nonnegative\",\n overlaying=\"x2\",\n anchor=\"y\",\n ),\n xaxis2=dict(\n title_text=\"<b>lb/ft³</b>\",\n rangemode=\"nonnegative\",\n anchor=\"free\",\n side=\"bottom\",\n position=0,\n ),\n title=dict(\n text=(\n \"<b>CSR vs. Mean Gas Density</b><br>\"\n + \"<b>(API 684 - SP 6.8.5.10)</b>\"\n )\n ),\n )\n\n # Level 1 screening criteria - API 684 - SP6.8.5.10\n idx = min(range(len(RHO)), key=lambda i: abs(RHO[i] - RHO_mean))\n\n if self.machine_type == \"compressor\":\n if Q0 / Qa < 2.0:\n condition = True\n\n if log_dec_a < 0.1:\n condition = True\n\n if 2.0 < Q0 / Qa < 10.0 and CSR > CSR_boundary[idx]:\n condition = True\n\n else:\n condition = False\n\n if self.machine_type == \"turbine\" or self.machine_type == \"axial flow\":\n if log_dec_a < 0.1:\n condition = True\n\n else:\n condition = False\n\n # updating attributes\n self.Q0 = Q0\n self.Qa = Qa\n self.log_dec_a = log_dec_a\n self.CSR = CSR\n self.Qratio = Q0 / Qa\n self.crit_speed = crit_speed\n self.MCS = self.maxspeed\n self.RHO_gas = RHO_mean\n self.condition = condition\n\n return fig1, fig2\n\n def stability_level_2(self):\n \"\"\"Stability analysis level 2.\n\n For the level 2 stability analysis additional sources that contribute\n to the rotor stability shall be considered such as:\n a) labyrinth seals;\n b) damper seals;\n c) impeller/blade flow aerodynamic effects;\n d) internal friction.\n\n Returns\n -------\n df_logdec: pd.DataFrame\n A dataframe relating the logarithmic decrement for each case analyzed.\n\n Example\n -------\n >>> import ross as rs\n >>> report = rs.report_example()\n >>> dataframe = report.stability_level_2()\n \"\"\"\n # Build a list of seals\n seal_list = [\n copy(b) for b in self.rotor.bearing_elements if isinstance(b, SealElement)\n ]\n\n bearing_list = [\n copy(b)\n for b in self.rotor.bearing_elements\n if not isinstance(b, SealElement)\n ]\n\n log_dec_seal = []\n log_dec_disk = []\n log_dec_full = []\n data_seal = {}\n data_disk = {}\n data_rotor = {}\n\n # Evaluate log dec for each component - Disks\n if len(self.rotor.disk_elements):\n for disk in self.rotor.disk_elements:\n aux_rotor = Rotor(\n shaft_elements=self.rotor.shaft_elements,\n disk_elements=[disk],\n bearing_elements=bearing_list,\n rated_w=self.maxspeed,\n )\n modal = aux_rotor.run_modal(speed=self.maxspeed)\n non_backward = modal.whirl_direction() != \"Backward\"\n log_dec_disk.append(modal.log_dec[non_backward][0])\n\n # Evaluate log dec for group bearings + disks\n disk_tags = [\n \"Shaft + Bearings + \" + disk.tag for disk in self.rotor.disk_elements\n ]\n\n # Evaluate log dec for group bearings + all disks\n if len(self.rotor.disk_elements) > 1:\n all_disks_tag = \" + \".join(\n [disk.tag for disk in self.rotor.disk_elements]\n )\n disk_tags.append(\"Shaft + Bearings + \" + all_disks_tag)\n\n aux_rotor = Rotor(\n shaft_elements=self.rotor.shaft_elements,\n disk_elements=self.rotor.disk_elements,\n bearing_elements=bearing_list,\n rated_w=self.maxspeed,\n )\n modal = aux_rotor.run_modal(speed=self.maxspeed)\n non_backward = modal.whirl_direction() != \"Backward\"\n log_dec_disk.append(modal.log_dec[non_backward][0])\n\n data_disk = {\"tags\": disk_tags, \"log_dec\": log_dec_disk}\n\n # Evaluate log dec for each component - Seals\n if len(seal_list):\n for seal in seal_list:\n bearings_seal = deepcopy(bearing_list)\n bearings_seal.append(seal)\n\n aux_rotor = Rotor(\n shaft_elements=self.rotor.shaft_elements,\n disk_elements=[],\n bearing_elements=bearings_seal,\n rated_w=self.maxspeed,\n )\n modal = aux_rotor.run_modal(speed=self.maxspeed)\n non_backward = modal.whirl_direction() != \"Backward\"\n log_dec_seal.append(modal.log_dec[non_backward][0])\n\n seal_tags = [\"Shaft + Bearings + \" + seal.tag for seal in seal_list]\n\n if len(seal_list) > 1:\n # Evaluate log dec for group bearings + seals\n all_seals_tag = \" + \".join([seal.tag for seal in seal_list])\n seal_tags.append(\"Shaft + Bearings + \" + all_seals_tag)\n\n aux_rotor = Rotor(\n shaft_elements=self.rotor.shaft_elements,\n disk_elements=[],\n bearing_elements=self.rotor.bearing_elements,\n rated_w=self.maxspeed,\n )\n modal = aux_rotor.run_modal(speed=self.maxspeed)\n non_backward = modal.whirl_direction() != \"Backward\"\n log_dec_seal.append(modal.log_dec[non_backward][0])\n\n data_seal = {\"tags\": seal_tags, \"log_dec\": log_dec_seal}\n\n # Evaluate log dec for all components\n modal = self.rotor.run_modal(speed=self.maxspeed)\n non_backward = modal.whirl_direction() != \"Backward\"\n log_dec_full.append(modal.log_dec[non_backward][0])\n rotor_tags = [self.tag]\n\n data_rotor = {\"tags\": rotor_tags, \"log_dec\": log_dec_full}\n\n df_logdec_disk = pd.DataFrame(data_disk)\n df_logdec_seal = pd.DataFrame(data_seal)\n df_logdec_full = pd.DataFrame(data_rotor)\n df_logdec = pd.concat([df_logdec_disk, df_logdec_seal, df_logdec_full])\n df_logdec = df_logdec.reset_index(drop=True)\n\n self.df_logdec_disk = df_logdec_disk\n self.df_logdec_seal = df_logdec_seal\n self.df_logdec_full = df_logdec_full\n self.df_logdec = df_logdec\n\n return df_logdec\n\n def summary(self):\n \"\"\"Return datarfreames for Report summary.\n\n This method will create dataframes with relevant info about the report.\n\n Returns\n -------\n df_stab_lvl1 : pd.DataFrame\n Dataframe with stability level 1 results\n df_stab_lvl2 : pd.DataFrame\n Dataframe with stability level 2 results\n\n Example\n -------\n >>> import ross as rs\n >>> report = rs.report_example()\n >>> stability1 = report.stability_level_1(D=[0.35, 0.35],\n ... H=[0.08, 0.08],\n ... HP=[10000, 10000],\n ... RHO_ratio=[1.11, 1.14],\n ... RHOd=30.45,\n ... RHOs=37.65,\n ... oper_speed=1000.0)\n >>> stability2 = report.stability_level_2()\n >>> df_lvl1, df_lvl2 = report.summary()\n \"\"\"\n stab_lvl1_data = dict(\n tags=[self.tag],\n machine_type=[self.machine_type],\n Q0=[self.Q0],\n Qa=[self.Qa],\n log_dec_a=[self.log_dec_a],\n Qratio=[self.Qratio],\n crti_speed=[self.crit_speed],\n MCS=[self.MCS],\n CSR=[self.CSR],\n RHO_gas=[self.RHO_gas],\n )\n stab_lvl2_data = dict(\n tags=self.df_logdec[\"tags\"], logdec=self.df_logdec[\"log_dec\"]\n )\n\n df_stab_lvl1 = pd.DataFrame(stab_lvl1_data)\n df_stab_lvl2 = pd.DataFrame(stab_lvl2_data)\n\n return df_stab_lvl1, df_stab_lvl2\n\n def plot_summary(self):\n \"\"\"Plot the report .\n\n This method will create tables to be presented in the report.\n\n Returns\n -------\n fig : Plotly graph_objects.make_subplots()\n The figure object with the tables.\n\n Example\n -------\n >>> import ross as rs\n >>> report = rs.report_example()\n >>> stability1 = report.stability_level_1(D=[0.35, 0.35],\n ... H=[0.08, 0.08],\n ... HP=[10000, 10000],\n ... RHO_ratio=[1.11, 1.14],\n ... RHOd=30.45,\n ... RHOs=37.65,\n ... oper_speed=1000.0)\n >>> stability2 = report.stability_level_2()\n >>> table = report.plot_summary()\n \"\"\"\n stab_lvl1_data, stab_lvl2_data = self.summary()\n for var in stab_lvl1_data.columns[2:]:\n stab_lvl1_data[str(var)] = np.round(stab_lvl1_data[str(var)], 3)\n\n stab_lvl2_data[\"logdec\"] = np.round(stab_lvl2_data[\"logdec\"], 4)\n\n stab_lvl1_titles = [\n \"<b>Rotor Tag</b>\",\n \"<b>Machine Type</b>\",\n \"<b>Q_0</b>\",\n \"<b>Q_A</b>\",\n \"<b>log dec (δ)</b>\",\n \"<b>Q_0 / Q_A</b>\",\n \"<b>1st Critical Spped</b>\",\n \"<b>MCS</b>\",\n \"<b>CSR</b>\",\n \"<b>Gas Density</b>\",\n ]\n stab_lvl2_titles = [\"<b>Components</b>\", \"<b>Log. Dec.</b>\"]\n\n fig = make_subplots(\n rows=2,\n cols=1,\n specs=[[{\"type\": \"table\"}], [{\"type\": \"table\"}]],\n subplot_titles=[\"<b>Stability Level 1</b>\", \"<b>Stability Level 2</b>\"],\n )\n\n colors = [\"#ffffff\", \"#c4d9ed\"]\n cell_colors = [colors[i % 2] for i in range(len(stab_lvl1_data[\"tags\"]))]\n fig.add_trace(\n go.Table(\n header=dict(\n values=stab_lvl1_titles,\n font=dict(family=\"Verdana\", size=14, color=\"white\"),\n line=dict(color=\"#1e4162\", width=1.5),\n fill=dict(color=\"#1e4162\"),\n align=\"center\",\n ),\n cells=dict(\n values=[stab_lvl1_data[str(var)] for var in stab_lvl1_data.columns],\n font=dict(family=\"Verdana\", size=14, color=\"#12263b\"),\n line=dict(color=\"#c4d9ed\", width=1.5),\n fill=dict(color=[cell_colors * len(stab_lvl1_data[\"tags\"])]),\n align=\"center\",\n height=25,\n ),\n ),\n row=1,\n col=1,\n )\n\n cell_colors = [colors[i % 2] for i in range(len(stab_lvl2_data[\"tags\"]))]\n fig.add_trace(\n go.Table(\n header=dict(\n values=stab_lvl2_titles,\n font=dict(family=\"Verdana\", size=14, color=\"white\"),\n line=dict(color=\"#1e4162\", width=1.5),\n fill=dict(color=\"#1e4162\"),\n align=\"center\",\n ),\n cells=dict(\n values=[stab_lvl2_data[str(var)] for var in stab_lvl2_data.columns],\n font=dict(family=\"Verdana\", size=14, color=\"#12263b\"),\n line=dict(color=\"#c4d9ed\", width=1.5),\n fill=dict(color=[cell_colors * len(stab_lvl2_data[\"tags\"])]),\n align=\"center\",\n height=25,\n ),\n ),\n row=2,\n col=1,\n )\n\n return fig\n\n\ndef report_example():\n \"\"\"Build a report example.\n\n This function returns an instance of a simple report from a rotor\n example. The purpose of this is to make available a simple model\n so that doctest can be written using this.\n\n Returns\n -------\n An instance of a report object.\n\n Examples\n --------\n >>> import ross as rs\n >>> report = rs.report_example()\n >>> report.rotor_type\n 'between_bearings'\n \"\"\"\n i_d = 0\n o_d = 0.05\n n = 6\n L = [0.25 for _ in range(n)]\n\n shaft_elem = [\n ShaftElement(\n l,\n i_d,\n o_d,\n material=steel,\n shear_effects=True,\n rotary_inertia=True,\n gyroscopic=True,\n )\n for l in L\n ]\n\n disk0 = DiskElement.from_geometry(\n n=2, material=steel, width=0.07, i_d=0.05, o_d=0.28\n )\n disk1 = DiskElement.from_geometry(\n n=4, material=steel, width=0.07, i_d=0.05, o_d=0.28\n )\n\n stfx = [0.4e7, 0.5e7, 0.6e7, 0.7e7]\n stfy = [0.8e7, 0.9e7, 1.0e7, 1.1e7]\n freq = [400, 800, 1200, 1600]\n bearing0 = BearingElement(0, kxx=stfx, kyy=stfy, cxx=2e3, frequency=freq)\n bearing1 = BearingElement(6, kxx=stfx, kyy=stfy, cxx=2e3, frequency=freq)\n\n rotor = Rotor(shaft_elem, [disk0, disk1], [bearing0, bearing1])\n\n # coefficients for minimum clearance\n stfx = [0.7e7, 0.8e7, 0.9e7, 1.0e7]\n dampx = [2.0e3, 1.9e3, 1.8e3, 1.7e3]\n freq = [400, 800, 1200, 1600]\n bearing0 = BearingElement(0, kxx=stfx, cxx=dampx, frequency=freq)\n bearing1 = BearingElement(6, kxx=stfx, cxx=dampx, frequency=freq)\n min_clearance_brg = [bearing0, bearing1]\n\n # coefficients for maximum clearance\n stfx = [0.4e7, 0.5e7, 0.6e7, 0.7e7]\n dampx = [2.8e3, 2.7e3, 2.6e3, 2.5e3]\n freq = [400, 800, 1200, 1600]\n bearing0 = BearingElement(0, kxx=stfx, cxx=dampx, frequency=freq)\n bearing1 = BearingElement(6, kxx=stfx, cxx=dampx, frequency=freq)\n max_clearance_brg = [bearing0, bearing1]\n\n bearings = [min_clearance_brg, max_clearance_brg]\n return Report(\n rotor=rotor,\n speed_range=(400, 1000),\n tripspeed=1200,\n bearing_stiffness_range=(5, 8),\n bearing_clearance_lists=bearings,\n speed_units=\"rad/s\",\n )\n"
] | [
[
"numpy.sum",
"scipy.interpolate.interp1d",
"scipy.signal.argrelextrema",
"numpy.transpose",
"numpy.append",
"numpy.argmin",
"numpy.cos",
"numpy.log10",
"numpy.delete",
"numpy.where",
"numpy.round",
"numpy.linspace",
"numpy.mean",
"numpy.argmax",
"pandas.concat",
"numpy.array",
"numpy.sign",
"numpy.arctan",
"pandas.DataFrame",
"numpy.piecewise",
"numpy.logspace",
"numpy.sqrt",
"numpy.sin"
]
] |
dimitymiller/cac-openset | [
"b07dadbb8caa5d7430c403734f6543ff17e2ae11"
] | [
"datasets/generate_trainval_splits.py"
] | [
"\"\"\"\n\tRandomly select train and validation subsets from training datasets.\n\t80/20 split ratio used for all datasets except TinyImageNet, which will use 90/10.\n\n\tDimity Miller, 2020\n\"\"\"\n\nimport json\nimport random\nimport torchvision\nimport numpy as np\n\nrandom.seed(1000)\n\ndef save_trainval_split(dataset, train_idxs, val_idxs):\n\tprint(\"Saving {} Train/Val split to {}/trainval_idxs.json\".format(dataset, dataset))\n\tfile = open('{}/trainval_idxs.json'.format(dataset), 'w')\n\tfile.write(json.dumps({'Train': train_idxs, 'Val': val_idxs}))\n\tfile.close()\n\nmnist = torchvision.datasets.MNIST('data')\nsvhn = torchvision.datasets.SVHN('data')\ncifar10 = torchvision.datasets.CIFAR10('data')\ntinyImagenet = torchvision.datasets.ImageFolder('data/tiny-imagenet-200/train')\n\ndatasets = {'MNIST': mnist, 'SVHN': svhn, 'CIFAR10': cifar10, 'TinyImageNet': tinyImagenet}\nsplit = {'MNIST': 0.8, 'SVHN': 0.8, 'CIFAR10': 0.8, 'TinyImageNet': 0.9}\n\nfor datasetName in datasets.keys():\n\tdataset = datasets[datasetName]\t\n\n\t#get class label for each image. svhn has different syntax as .labels\n\ttry:\n\t\ttargets = dataset.targets\n\t\tnum_classes = len(dataset.classes)\n\texcept:\n\t\ttargets = dataset.labels\n\t\tnum_classes = len(np.unique(targets))\n\n\t#save image idxs per class\n\tclass_idxs = [[] for i in range(num_classes)]\n\tfor i, lbl in enumerate(targets):\n\t\tclass_idxs[lbl] += [i]\n\n\t#determine size of train subset\n\tclass_size = [len(x) for x in class_idxs]\n\tclass_train_size = [int(split[datasetName]*x) for x in class_size]\n\n\t#subset per class into train and val subsets randomly\n\ttrain_idxs = {}\n\tval_idxs = {}\n\tfor class_num in range(num_classes):\n\t\ttrain_size = class_train_size[class_num]\n\t\tidxs = class_idxs[class_num]\n\t\trandom.shuffle(idxs)\n\t\ttrain_idxs[class_num] = idxs[:train_size]\n\t\tval_idxs[class_num] = idxs[train_size:]\n\n\tsave_trainval_split(datasetName, train_idxs, val_idxs)\n\n\t#cifar10 and cifar+m datasets can use the same training and val splits\n\tif 'CIFAR' in datasetName:\n\t\tsave_trainval_split('CIFAR+10', train_idxs, val_idxs)\n\t\tsave_trainval_split('CIFAR+50', train_idxs, val_idxs)"
] | [
[
"numpy.unique"
]
] |
Pandinosaurus/depthai | [
"a46ad95744d8175f1c87bf8cd92c7423a84b8607"
] | [
"depthai_profiler.py"
] | [
"#!/usr/bin/env python3\n\n#depthai function profiler\nimport subprocess\nimport sys\nimport numpy as np\n\n#this is a debugging tool, that's why it's not added to requirements.txt\ntry:\n import snakeviz\nexcept ImportError:\n raise ImportError('\\033[1;5;31m snakeviz module not found, run: \\033[0m python3 -m pip install snakeviz ')\n\nif __name__ == \"__main__\":\n output_profile_file = 'depthai.prof'\n cmd = [\"python3\", \"-m\", \"cProfile\", \"-o\", output_profile_file, \"-s\", \"tottime\", \"depthai_demo.py\"]\n cmd = np.concatenate((cmd, sys.argv[1:]))\n print(cmd)\n\n subprocess.run(cmd)\n subprocess.run([\"snakeviz\", output_profile_file])\n"
] | [
[
"numpy.concatenate"
]
] |
IJSComplexMatter/cddm | [
"f4d7521ad88271027c61743b2e8a2355a40cb117"
] | [
"examples/paper/plot_error.py"
] | [
"\"\"\"Plots fig 3. and fig 4. from the paper.\n\nYou must first create data calling the following scripts:\n \n$ python auto_correlate_random_error.py\n$ python auto_correlate_standard_error.py\n$ python auto_correlate_fast_error.py\n$ python cross_correlate_error.py\n\n\"\"\"\n\nfrom cddm.sim import random_time_count\nfrom cddm.multitau import ccorr_multi_count, acorr_multi_count,log_merge_count, multilevel, merge_multilevel\nfrom cddm.norm import sigma_prime_weighted, weight_prime_from_g, sigma_weighted, weight_from_g\n#from cddm.avg import denoise,decreasing\n\nimport matplotlib.pyplot as plt\n\nimport numpy as np\nfrom examples.paper.conf import NFRAMES, PERIOD , NFRAMES_RANDOM, PERIOD_RANDOM\n\nfrom examples.paper.conf import DATA_PATH\nfrom examples.paper.conf import SAVE_FIGS\nimport os\n\nfrom examples.paper.form_factor import g1, bg1, bg2\n\n#: whether toplot cross-correlationor auto-correlation data\nCROSS = False\n\n#: whether to plot binned data\nBINNING_DATA = 1\n\n#: whether to to mark binned data with markers\nBINNING_ERROR = 0\n\n#: whether to plot binning error model\nBINNING_MODEL = 0\n\n#: which K value to plot\nK = 16\n\nif CROSS:\n\n data = np.load(os.path.join(DATA_PATH,\"cross_error_corr.npy\"))\n bgs = np.load(os.path.join(DATA_PATH,\"cross_error_bg.npy\"))\n vars = np.load(os.path.join(DATA_PATH,\"cross_error_var.npy\"))\n\n data_regular = np.load(os.path.join(DATA_PATH,\"auto_standard_error_corr.npy\"))[...,0:NFRAMES//PERIOD*2]\nelse:\n data = np.load(os.path.join(DATA_PATH,\"auto_random_error_corr.npy\"))\n bgs = np.load(os.path.join(DATA_PATH,\"auto_random_error_bg.npy\"))\n vars = np.load(os.path.join(DATA_PATH,\"auto_random_error_var.npy\"))\n\n data_regular = np.load(os.path.join(DATA_PATH,\"auto_fast_error_corr.npy\"))[...,0:NFRAMES]\n\n\n\nLABELS = {1: \"B'\", 2 : \"S'\", 3 : \"W'\", 5 : \"B\", 6 : \"S\", 7 : \"W\", 9 : \"B''\", 10 : \"S''\", 11 : \"W''\" }\n\n\nMARKERS = {1: \"1\", 2 : \"2\", 3 : \"3\", 5 : \"4\",6 : \"+\", 7 : \"x\", 9 : \"4\", 10 : \"+\", 11 : \"x\"}\n\n\nplt.figure()\n\n\n\nif not BINNING_MODEL:\n \n #estimated count for the random triggering experiment\n if CROSS:\n n = NFRAMES/PERIOD*2\n else:\n n = random_time_count(NFRAMES_RANDOM, PERIOD_RANDOM)[0:NFRAMES_RANDOM]\nelse: \n if CROSS:\n clin,cmulti = ccorr_multi_count(NFRAMES, period = PERIOD, level_size = 16, binning = 1)\n else:\n clin,cmulti = acorr_multi_count(NFRAMES_RANDOM, period = PERIOD_RANDOM, level_size = 16, binning = 1)\n \n #get eefective count in aveariging... \n x,n = log_merge_count(clin, cmulti, binning = 1)\n\n\ni,j = (K,0)\n\nx = np.arange(NFRAMES)\n\n#delta parameter for weight model\ndelta = 0.\n\nbg1 = bg1(51,0)[...,i,j]\nbg2 = bg2(51,0)[...,i,j] if CROSS else bg1\ng = g1(x,51,0, cross = CROSS)[...,i,j]\n\nwp = weight_prime_from_g(g,delta,bg1,bg2)\nw = weight_from_g(g, delta)\n\n#error estimators using a simple model of independent data (delta = 0).\n\nerr1 = sigma_prime_weighted(0., g+0j, delta,bg1,bg2)#/n**0.5\nerr2 = sigma_prime_weighted(1., g, delta,bg1,bg2)#/n**0.5\nerr3 = sigma_prime_weighted(wp, g, delta,bg1,bg2)#/n**0.5\n\nerr5 = sigma_weighted(0., g, delta)#/n**0.5\nerr6 = sigma_weighted(1., g, delta)#/n**0.5\nerr7 = sigma_weighted(w, g, delta)#/n**0.5\n\n\nax1 = plt.subplot(121)\nax1.set_xscale(\"log\")\nax1.set_xlabel(r\"$\\tau$\")\nax1.set_title(r\"$g(\\tau), w(\\tau)$ @ $q = {}$\".format(K))\n\nax2 = plt.subplot(122)\nax2.set_title(r\"$\\sigma (\\tau)$ @ $q = {}$\".format(K))\n\nfor binning in (0,1):\n x,y = merge_multilevel(multilevel(data_regular[:,2,i,j,:],binning = binning))\n if CROSS:\n x = x*PERIOD//2\n g = g1(x,51,0)[...,i,j]\n\n std = (((y - g)**2).mean(axis = 0))**0.5\n\n if binning == BINNING_DATA:\n ax1.semilogx(x[1:],y[0,1:],marker = \"o\", linestyle = '',fillstyle = \"none\",label = \"$R$\", color = \"k\") \n if binning == BINNING_ERROR:\n ax2.semilogx(x[1:],std[1:],marker = \"o\", linestyle = '', fillstyle = \"none\",label = \"$R$\", color = \"k\")\n else:\n ax2.semilogx(x[1:],std[1:],linestyle = ':', fillstyle = \"none\", color = \"k\")\n\n\n\n\nfor binning in (0,1):\n ax1.set_prop_cycle(None)\n ax2.set_prop_cycle(None)\n for norm in (1,2,3,5,6,7):\n \n x,y = merge_multilevel(multilevel(data[:,norm,i,j,:],binning = binning))\n g = g1(x,51,0)[...,i,j]\n std = (((y - g)**2).mean(axis = 0))**0.5\n \n if binning == BINNING_DATA:\n ax1.semilogx(x[1:],y[0,1:],marker = MARKERS.get(norm,\"o\"), linestyle = '',fillstyle = \"none\",label = \"${}$\".format(LABELS.get(norm)))\n if binning == BINNING_ERROR:\n ax2.semilogx(x[1:],std[1:],marker = MARKERS.get(norm,\"o\"), linestyle = '', fillstyle = \"none\",label = \"${}$\".format(LABELS.get(norm)))\n else: \n ax2.semilogx(x[1:],std[1:],linestyle = ':', fillstyle = \"none\")\n\n\nax1.plot(x[1:],g1(x[1:],51,0)[...,i,j], \"k\",label = \"$g$\")\n\n# #: take first run, norm = 3 data for g estimation\n# x,g = log_average(data[0,3,i,j,:])\n# g = denoise(g)\n# g = decreasing(g)\n# g = g.clip(0,1)\n# ax1.plot(x[1:],g[1:], \"k:\",label = \"denoised\")\n\nx = np.arange(NFRAMES)\nax1.plot(x[1:],w[1:], \"k--\",label = \"$w$\")\nax1.plot(x[1:],wp[1:], \"k:\",label = \"$w'$\")\n\n#ax2.set_ylim(ax1.get_ylim())\n\n\nx,err1 = merge_multilevel(multilevel(err1,binning = 0))\nx,err2 = merge_multilevel(multilevel(err2,binning = 0))\nx,err3 = merge_multilevel(multilevel(err3,binning = 0))\nx,err5 = merge_multilevel(multilevel(err5,binning = 0))\nx,err6 = merge_multilevel(multilevel(err6,binning = 0))\nx,err7 = merge_multilevel(multilevel(err7,binning = 0))\n\nax2.set_prop_cycle(None)\n\n\nnmax = len(x)\nif BINNING_MODEL or not CROSS:\n n = n[1:nmax]\n\nax2.loglog(x[1:],err1[1:]/np.sqrt(n),\"-\")\nax2.loglog(x[1:],err2[1:]/np.sqrt(n),\"-\")\nax2.loglog(x[1:],err3[1:]/np.sqrt(n),\"-\")\nax2.loglog(x[1:],err5[1:]/np.sqrt(n),\"-\")\nax2.loglog(x[1:],err6[1:]/np.sqrt(n),\"-\")\nax2.loglog(x[1:],err7[1:]/np.sqrt(n),\"-\")\n\nax2.set_xlabel(r\"$\\tau$\")\nax2.set_ylabel(r\"$\\sigma$\")\nax2.set_ylim(0.001,2)\nax1.set_ylabel(r\"$g,w$\")\nax1.set_ylim(-1,1.5)\n\nax1.legend(loc = 3)\n\nplt.tight_layout()\n\nif SAVE_FIGS:\n if CROSS:\n plt.savefig(\"plots/plot_cross_error_{}.pdf\".format(K))\n else:\n plt.savefig(\"plots/plot_auto_error_{}.pdf\".format(K))\n \nplt.show()\n\n\n"
] | [
[
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"numpy.arange",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"numpy.sqrt"
]
] |
panpanyunshi/rlkit | [
"e1f6c9e59ab2baab93d35385cdc43ab3632b2b65"
] | [
"rlkit/torch/core.py"
] | [
"import numpy as np\nimport torch\n\nfrom rlkit.torch import pytorch_util as ptu\n\n\ndef eval_np(module, *args, **kwargs):\n \"\"\"\n Eval this module with a numpy interface, 返回numpy类型变量\n\n Same as a call to __call__ except all Variable input/outputs are\n replaced with numpy equivalents.\n\n Assumes the output is either a single object or a tuple of objects.\n \"\"\"\n torch_args = tuple(torch_ify(x) for x in args)\n torch_kwargs = {k: torch_ify(v) for k, v in kwargs.items()}\n outputs = module(*torch_args, **torch_kwargs)\n if isinstance(outputs, tuple):\n return tuple(np_ify(x) for x in outputs)\n else:\n return np_ify(outputs)\n\n\ndef torch_ify(np_array_or_other):\n '''\n 将numpy数据转化为torch数据\n :param np_array_or_other:\n :return:\n '''\n if isinstance(np_array_or_other, np.ndarray):\n return ptu.from_numpy(np_array_or_other)\n else:\n return np_array_or_other\n\n\ndef np_ify(tensor_or_other):\n '''\n 将tensor变量转化为numpy\n :param tensor_or_other:\n :return:\n '''\n if isinstance(tensor_or_other, torch.autograd.Variable):\n return ptu.get_numpy(tensor_or_other)\n else:\n return tensor_or_other\n\n\ndef _elem_or_tuple_to_variable(elem_or_tuple):\n if isinstance(elem_or_tuple, tuple):\n return tuple(\n _elem_or_tuple_to_variable(e) for e in elem_or_tuple\n )\n return ptu.from_numpy(elem_or_tuple).float()\n\n\ndef _filter_batch(np_batch):\n for k, v in np_batch.items():\n if v.dtype == np.bool:\n yield k, v.astype(int)\n else:\n yield k, v\n\n\ndef np_to_pytorch_batch(np_batch):\n return {\n k: _elem_or_tuple_to_variable(x)\n for k, x in _filter_batch(np_batch)\n if x.dtype != np.dtype('O') # ignore object (e.g. dictionaries)\n }\n\n"
] | [
[
"numpy.dtype"
]
] |
asaran/decision-transformer | [
"f6f8bf283256d616d213ac5bd07cb7f3efb101b3"
] | [
"gym/decision_transformer/models/decision_transformer.py"
] | [
"import numpy as np\nimport torch\nimport torch.nn as nn\n\nimport transformers\n\nfrom decision_transformer.models.model import TrajectoryModel\nfrom decision_transformer.models.trajectory_gpt2 import GPT2Model\n\n\nclass DecisionTransformer(TrajectoryModel):\n\n \"\"\"\n This model uses GPT to model (Return_1, state_1, action_1, Return_2, state_2, ...)\n \"\"\"\n\n def __init__(\n self,\n state_dim,\n act_dim,\n hidden_size,\n max_length=None,\n max_ep_len=4096,\n action_tanh=True,\n **kwargs\n ):\n super().__init__(state_dim, act_dim, max_length=max_length)\n\n self.hidden_size = hidden_size\n config = transformers.GPT2Config(\n vocab_size=1, # doesn't matter -- we don't use the vocab\n n_embd=hidden_size,\n **kwargs\n )\n\n # note: the only difference between this GPT2Model and the default Huggingface version\n # is that the positional embeddings are removed (since we'll add those ourselves)\n self.transformer = GPT2Model(config)\n\n self.embed_timestep = nn.Embedding(max_ep_len, hidden_size)\n self.embed_return = torch.nn.Linear(1, hidden_size)\n self.embed_state = torch.nn.Linear(self.state_dim, hidden_size)\n self.embed_action = torch.nn.Linear(self.act_dim, hidden_size)\n\n self.embed_ln = nn.LayerNorm(hidden_size)\n\n # note: we don't predict states or returns for the paper\n self.predict_state = torch.nn.Linear(hidden_size, self.state_dim)\n self.predict_action = nn.Sequential(\n *([nn.Linear(hidden_size, self.act_dim)] + ([nn.Tanh()] if action_tanh else []))\n )\n self.predict_return = torch.nn.Linear(hidden_size, 1)\n\n def forward(self, states, actions, rewards, returns_to_go, timesteps, attention_mask=None):\n\n batch_size, seq_length = states.shape[0], states.shape[1]\n\n if attention_mask is None:\n # attention mask for GPT: 1 if can be attended to, 0 if not\n attention_mask = torch.ones((batch_size, seq_length), dtype=torch.long)\n\n # embed each modality with a different head\n state_embeddings = self.embed_state(states)\n action_embeddings = self.embed_action(actions)\n returns_embeddings = self.embed_return(returns_to_go)\n time_embeddings = self.embed_timestep(timesteps)\n\n # time embeddings are treated similar to positional embeddings\n state_embeddings = state_embeddings + time_embeddings\n action_embeddings = action_embeddings + time_embeddings\n returns_embeddings = returns_embeddings + time_embeddings\n\n # this makes the sequence look like (R_1, s_1, a_1, R_2, s_2, a_2, ...)\n # which works nice in an autoregressive sense since states predict actions\n stacked_inputs = torch.stack(\n (returns_embeddings, state_embeddings, action_embeddings), dim=1\n ).permute(0, 2, 1, 3).reshape(batch_size, 3*seq_length, self.hidden_size)\n stacked_inputs = self.embed_ln(stacked_inputs)\n\n # to make the attention mask fit the stacked inputs, have to stack it as well\n stacked_attention_mask = torch.stack(\n (attention_mask, attention_mask, attention_mask), dim=1\n ).permute(0, 2, 1).reshape(batch_size, 3*seq_length)\n\n # we feed in the input embeddings (not word indices as in NLP) to the model\n transformer_outputs = self.transformer(\n inputs_embeds=stacked_inputs,\n attention_mask=stacked_attention_mask,\n )\n x = transformer_outputs['last_hidden_state']\n\n # reshape x so that the second dimension corresponds to the original\n # returns (0), states (1), or actions (2); i.e. x[:,1,t] is the token for s_t\n x = x.reshape(batch_size, seq_length, 3, self.hidden_size).permute(0, 2, 1, 3)\n\n # get predictions\n return_preds = self.predict_return(x[:,2]) # predict next return given state and action\n state_preds = self.predict_state(x[:,2]) # predict next state given state and action\n action_preds = self.predict_action(x[:,1]) # predict next action given state\n\n return state_preds, action_preds, return_preds\n\n def get_action(self, states, actions, rewards, returns_to_go, timesteps, **kwargs):\n # we don't care about the past rewards in this model\n\n states = states.reshape(1, -1, self.state_dim)\n actions = actions.reshape(1, -1, self.act_dim)\n returns_to_go = returns_to_go.reshape(1, -1, 1)\n timesteps = timesteps.reshape(1, -1)\n\n if self.max_length is not None:\n states = states[:,-self.max_length:]\n actions = actions[:,-self.max_length:]\n returns_to_go = returns_to_go[:,-self.max_length:]\n timesteps = timesteps[:,-self.max_length:]\n\n # pad all tokens to sequence length\n attention_mask = torch.cat([torch.zeros(self.max_length-states.shape[1]), torch.ones(states.shape[1])])\n attention_mask = attention_mask.to(dtype=torch.long, device=states.device).reshape(1, -1)\n states = torch.cat(\n [torch.zeros((states.shape[0], self.max_length-states.shape[1], self.state_dim), device=states.device), states],\n dim=1).to(dtype=torch.float32)\n actions = torch.cat(\n [torch.zeros((actions.shape[0], self.max_length - actions.shape[1], self.act_dim),\n device=actions.device), actions],\n dim=1).to(dtype=torch.float32)\n returns_to_go = torch.cat(\n [torch.zeros((returns_to_go.shape[0], self.max_length-returns_to_go.shape[1], 1), device=returns_to_go.device), returns_to_go],\n dim=1).to(dtype=torch.float32)\n timesteps = torch.cat(\n [torch.zeros((timesteps.shape[0], self.max_length-timesteps.shape[1]), device=timesteps.device), timesteps],\n dim=1\n ).to(dtype=torch.long)\n else:\n attention_mask = None\n\n _, action_preds, return_preds = self.forward(\n states, actions, None, returns_to_go, timesteps, attention_mask=attention_mask, **kwargs)\n\n return action_preds[0,-1]\n"
] | [
[
"torch.ones",
"torch.stack",
"torch.nn.Linear",
"torch.nn.Embedding",
"torch.nn.Tanh",
"torch.nn.LayerNorm",
"torch.zeros"
]
] |
elwintay/clearml_test | [
"c87985303e69490e83ec779d098570bc505f80ae"
] | [
"model_gtt/run_pl_gtt.py"
] | [
"import argparse\nimport glob\nimport pandas as pd\nimport logging\nimport os\nimport json\nfrom collections import OrderedDict\nfrom eval import eval_tf\n\nimport numpy as np\nimport torch\nfrom seqeval.metrics import f1_score, precision_score, recall_score, accuracy_score\nfrom torch.nn import CrossEntropyLoss\nfrom torch.utils.data import DataLoader, TensorDataset\n\nfrom transformer_base import BaseTransformer, add_generic_args, generic_train\nfrom utils_gtt import convert_examples_to_features, get_labels, read_examples_from_file, read_golds_from_test_file, not_sub_string, incident_token_to_type\n\nrole_list = [\"incident_type\", \"PerpInd\", \"PerpOrg\", \"Target\", \"Victim\", \"Weapon\"]\n\nlogging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n\n# def upload_file(s3,bucket_name,s3_folder,filename, preds_log):\n \n# path = s3_folder + filename\n# s3object = s3.Object(bucket_name,path)\n# s3object.put(Body=(bytes(json.dumps(preds_log, indent=4))))\n# return\n\nclass NERTransformer(BaseTransformer):\n \"\"\"\n A training module for single-transformer-ee. See BaseTransformer for the core options.\n \"\"\"\n\n mode = \"base\"\n\n def __init__(self, hparams):\n self.pad_token_label_id = CrossEntropyLoss().ignore_index\n # super(NERTransformer, self).__init__(hparams, num_labels, self.mode)\n \n super(NERTransformer, self).__init__(hparams, None, self.mode)\n\n self.device = torch.device(\"cuda\" if torch.cuda.is_available() and self.hparams.n_gpu else \"cpu\")\n # n_gpu = torch.cuda.device_count()\n # self.MASK = tokenizer.convert_tokens_to_ids(['[MASK]'])[0]\n self.SEP = self.tokenizer.convert_tokens_to_ids(['[SEP]'])[0]\n self.CLS = self.tokenizer.convert_tokens_to_ids(['[CLS]'])[0] \n self.SEP_template = self.tokenizer.convert_tokens_to_ids([\"[unused0]\"])[0]\n\n def forward(self, **inputs):\n labels = inputs.pop(\"labels\", None) # doc_length\n args = self.hparams\n\n outputs = self.model(**inputs) # sequence_output, pooled_output, (hidden_states), (attentions)\n sequence_output = outputs[0]\n src_sequence_output = sequence_output[:, :args.max_seq_length_src, :]\n src_sequence_output = torch.transpose(src_sequence_output, 1, 2) # hidden * doc_length\n tgt_sequence_output = sequence_output[:, args.max_seq_length_src:, :] # tgt_length * hidden\n logits = torch.bmm(tgt_sequence_output, src_sequence_output) # tgt_length * doc_length\n\n outputs = (logits,) + outputs[2:]\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n tgt_attention_mask_1d = inputs[\"attention_mask\"][:, -1, args.max_seq_length_src:]\n if tgt_attention_mask_1d is not None:\n active_logits = logits.view(-1, args.max_seq_length_src)\n active_labels = labels.view(-1)\n loss = loss_fct(active_logits, active_labels)\n else:\n loss = loss_fct(logits.view(-1, args.max_seq_length_src), labels.view(-1))\n outputs = (loss,) + outputs\n\n # import ipdb; ipdb.set_trace()\n return outputs\n\n def training_step(self, batch, batch_num):\n \"Compute loss and log.\"\n inputs = {\"input_ids\": batch[0], \"attention_mask\": batch[1], \"token_type_ids\": batch[2], \"position_ids\": batch[3], \"labels\": batch[4]}\n outputs = self(**inputs)\n loss = outputs[0]\n tensorboard_logs = {\"training_loss\": loss, \"rate\": self.lr_scheduler.get_last_lr()[-1]}\n return {\"loss\": loss, \"log\": tensorboard_logs}\n\n def prepare_data(self):\n \"Called to initialize data. Use the call to construct features\"\n args = self.hparams\n for mode in [\"train\", \"dev\", \"test\"]:\n cached_features_file = self._feature_file(mode)\n if not os.path.exists(cached_features_file):\n logger.info(\"Creating features from dataset file at %s\", args.data_dir)\n examples = read_examples_from_file(args.data_dir, mode, self.tokenizer, debug=args.debug)\n features = convert_examples_to_features(\n examples,\n # self.labels,\n args.max_seq_length_src,\n args.max_seq_length_tgt,\n self.tokenizer,\n cls_token_at_end=bool(args.model_type in [\"xlnet\"]),\n cls_token=self.tokenizer.cls_token,\n cls_token_segment_id=2 if args.model_type in [\"xlnet\"] else 0,\n sep_token=self.tokenizer.sep_token,\n sep_token_extra=bool(args.model_type in [\"roberta\"]),\n pad_on_left=bool(args.model_type in [\"xlnet\"]),\n pad_token=self.tokenizer.convert_tokens_to_ids([self.tokenizer.pad_token])[0],\n pad_token_segment_id=4 if args.model_type in [\"xlnet\"] else 0,\n pad_token_label_id=self.pad_token_label_id,\n )\n logger.info(\"Saving features into cached file %s\", cached_features_file)\n torch.save(features, cached_features_file)\n # import ipdb; ipdb.set_trace()\n\n def load_dataset(self, mode, batch_size):\n \"Load datasets. Called after prepare data.\"\n args = self.hparams\n cached_features_file = self._feature_file(mode)\n logger.info(\"Loading features from cached file %s\", cached_features_file)\n features = torch.load(cached_features_file)\n if args.debug:\n features = features[:2]\n # features = features[:len(features)//10]\n all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long) #check this next\n all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)\n all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)\n all_position_ids = torch.tensor([f.position_ids for f in features], dtype=torch.long)\n all_label_ids = torch.tensor([f.label_ids for f in features], dtype=torch.long)\n all_docid = torch.tensor([f.docid for f in features], dtype=torch.long)\n return DataLoader(\n TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_position_ids, all_label_ids, all_docid), batch_size=batch_size\n )\n\n def validation_step(self, batch, batch_nb):\n \"Compute validation\"\n inputs = {\"input_ids\": batch[0], \"attention_mask\": batch[1], \"token_type_ids\": batch[2], \"position_ids\": batch[3], \"labels\": batch[4]}\n # inputs = {\"input_ids\": batch[0], \"attention_mask\": batch[1], \"position_ids\": batch[3], \"labels\": batch[4]}\n outputs = self(**inputs)\n tmp_eval_loss, logits = outputs[:2]\n preds = logits.detach().cpu().numpy()\n out_label_ids = inputs[\"labels\"].detach().cpu().numpy()\n docid = batch[5].detach().cpu().numpy()\n return {\"val_loss\": tmp_eval_loss.detach().cpu(), \"pred\": preds, \"target\": out_label_ids, \"docid\": docid}\n\n\n def validation_epoch_end(self, outputs):\n val_loss_mean = torch.stack([x[\"val_loss\"] for x in outputs]).mean()\n preds = np.concatenate([x[\"pred\"] for x in outputs], axis=0)\n preds = np.argmax(preds, axis=2)\n out_label_ids = np.concatenate([x[\"target\"] for x in outputs], axis=0)\n\n out_label_list = [[] for _ in range(out_label_ids.shape[0])]\n preds_list = [[] for _ in range(out_label_ids.shape[0])]\n\n for i in range(out_label_ids.shape[0]):\n for j in range(out_label_ids.shape[1]):\n if out_label_ids[i, j] != self.pad_token_label_id:\n out_label_list[i].append(out_label_ids[i][j])\n preds_list[i].append(preds[i][j])\n # import ipdb; ipdb.set_trace()\n\n logs = {\n \"val_loss\": val_loss_mean,\n \"val_accuracy\": accuracy_score(out_label_list, preds_list)\n }\n return {\"val_loss\": logs[\"val_loss\"], \"log\": logs, \"progress_bar\": logs}\n\n\n def test_step(self, batch, batch_nb):\n \"Compute test\"\n # test_loss\n # inputs = {\"input_ids\": batch[0], \"attention_mask\": batch[1], \"token_type_ids\": batch[2], \"position_ids\": batch[3], \"labels\": batch[4]}\n # outputs = self(**inputs)\n # tmp_eval_loss, tmp_eval_logits = outputs[:2]\n # tmp_eval_logits = tmp_eval_logits.detach().cpu().numpy()\n # out_label_ids = inputs[\"labels\"].detach().cpu().numpy()\n\n # preds (real decoding)\n global_args = self.hparams\n max_seq_length_src = self.hparams.max_seq_length_src\n max_seq_length_tgt = self.hparams.max_seq_length_tgt\n bs = batch[0].size(0)\n\n i = max_seq_length_src\n src_input_ids = batch[0][:, :max_seq_length_src]\n src_position_ids = batch[3][:, :max_seq_length_src]\n tgt_input_ids, init_tgt_input_ids = torch.tensor([[self.CLS]]).to(self.device), torch.tensor([[self.CLS]]).to(self.device)\n tgt_position_ids, init_tgt_position_ids = torch.tensor([[0]]).to(self.device), torch.tensor([[0]]).to(self.device)\n\n # get out_input_id_list (pred_seq)\n while i <= max_seq_length_src + max_seq_length_tgt - 1:\n input_ids = torch.cat((src_input_ids, tgt_input_ids), dim=1)\n attention_mask = batch[1][:, :i+1, :i+1]\n for j in range(max_seq_length_src, i+1):\n attention_mask[:, j, max_seq_length_src:j+1] = 1\n # if i == max_seq_length_src + 3: # debug\n # import ipdb; ipdb.set_trace()\n token_type_ids = batch[2][:, :i+1]\n position_ids = torch.cat((src_position_ids, tgt_position_ids), dim=1)\n inputs = {\"input_ids\": input_ids, \"attention_mask\": attention_mask, \"token_type_ids\": token_type_ids, \"position_ids\": position_ids}\n # print(tgt_position_ids) # debug\n outputs = self(**inputs)\n logits = outputs[0][0]\n\n ## option 1: setting the decoding constraints (!!!)\n # # (constraint 1) on decoding offset (length and larger offset)\n # for j in range(tgt_position_ids.size(1)):\n # if j == 0: continue\n # cur_token_id = tgt_input_ids[0][j].detach().cpu().tolist()\n # cur_token_position = tgt_position_ids[0][j].detach().cpu().tolist() \n # if cur_token_id == self.CLS or cur_token_id == self.SEP or cur_token_id == self.SEP_template: \n # continue\n # else:\n # # remove (early stop/output [SEP]) the case like ``the post post post post ...''\n # token_id_cnt = 0\n # k = j\n # while k > 0 and tgt_input_ids[0][k].detach().cpu().tolist() == cur_token_id:\n # token_id_cnt += 1\n # k -= 1\n # if token_id_cnt >= 4:\n # for q in range(max_seq_length_src):\n # if src_input_ids[0][q].detach().cpu().tolist() == self.SEP:\n # logits[j][q] += 10000.0\n\n # before_mask = cur_token_position\n # for k in range(before_mask):\n # if src_input_ids[0][k].detach().cpu().tolist() == self.SEP: \n # continue\n # logits[j][k] -= 10000.0\n # # for k in range(cur_token_position + 30, max_seq_length_src):\n # # if src_input_ids[0][k].detach().cpu().tolist() == self.SEP: continue\n # # logits[j][k] -= 10000.0\n\n # (constraint 2) thresh for predicting [SEP]\n probs = torch.nn.Softmax(dim=-1)(logits)\n top_2_probs, top_2_indices = torch.topk(probs, 2, dim=-1)\n for j in range(top_2_indices.size(0)):\n prob_gap = (top_2_probs[j][0]/top_2_probs[j][1]).detach().cpu().tolist()\n if src_input_ids[0][top_2_indices[j][0].detach().cpu().tolist()].detach().cpu().tolist() == self.SEP and prob_gap < global_args.thresh:\n top_2_indices[j][0] = top_2_indices[j][1]\n\n out_position_id = top_2_indices[:, 0]\n\n # # option 2: direct greedy decoding\n # out_position_id = torch.argmax(logits, -1)\n \n # print(out_position_id) # debug\n out_input_id = torch.index_select(src_input_ids, 1, out_position_id)\n out_position_id = out_position_id.unsqueeze(dim=0) # add batch dim\n tgt_input_ids = torch.cat((init_tgt_input_ids, out_input_id), dim=1)\n tgt_position_ids = torch.cat((init_tgt_position_ids, out_position_id), dim=1)\n i += 1\n\n # import ipdb; ipdb.set_trace()\n\n # #########save prob logits\n # temp_save = pd.DataFrame(probs.cpu().numpy())\n # temp_torch = torch.range(0, src_input_ids.size(1)-1,dtype=int,device='cuda')\n # temp_out_input_id = torch.index_select(src_input_ids, 1, temp_torch)\n # temp_out_input_id = temp_out_input_id.detach().cpu().tolist()\n # column = self.tokenizer.convert_ids_to_tokens(temp_out_input_id[0])\n # temp_save.columns = column\n # docids = batch[5].detach().cpu().tolist()\n # docids_name = str(docids) + '_probs.csv'\n # temp_save.to_csv(docids_name, index=False)\n\n\n # from out_input_id_list (pred_seq) to pred_extracts\n docids = batch[5].detach().cpu().tolist()\n pred_seq = []\n pred_extract = []\n for b in range(bs): # bs == 1\n src_input_id_list = src_input_ids[b].detach().cpu().tolist()\n out_input_id_list = out_input_id[b].detach().cpu().tolist()\n out_position_id_list = out_position_id[b].detach().cpu().tolist()\n if out_input_id_list[-1] != self.CLS:\n out_input_id_list.append(self.CLS)\n\n # get raw pred_seq\n # sep_cnt = 0\n for idx, token_id in enumerate(out_input_id_list):\n if token_id == self.CLS:\n break\n pred_seq.append(self.tokenizer.convert_ids_to_tokens(out_input_id_list[:idx+1]))\n\n # get pred_extract\n temps_extract = []\n buf_template = []\n buf_template_pos = []\n for idx, token_id in enumerate(out_input_id_list[:idx]):\n if token_id == self.SEP_template:\n # decode one template's content\n # incident_token_to_type[]\n p_extract = []\n sep_cnt = 0\n position_buf = []\n for temp_idx, temp_token_id in enumerate(buf_template):\n if temp_token_id == self.SEP:\n sep_cnt += 1\n entitys = []\n s_e_pair = []\n for position in position_buf:\n s_e_pair.append(position)\n if len(s_e_pair) == 2:\n s, e = s_e_pair[0], s_e_pair[1]\n extract_ids = []\n for j in range(s, e+1): \n extract_ids.append(src_input_id_list[j])\n extract_tokens = self.tokenizer.convert_ids_to_tokens(extract_ids)\n if extract_tokens:\n if len(extract_tokens) <= 20: \n candidate_str = \" \".join(extract_tokens).replace(\" ##\", \"\").replace(\" - \", \"-\")\n if sep_cnt != 5 or \"bomb\" not in candidate_str:\n if [candidate_str] not in entitys and not_sub_string(candidate_str, entitys) and candidate_str[:2] != \"##\":\n entitys.append([candidate_str])\n s_e_pair = []\n # extra s in s_e_pair\n if s_e_pair:\n extract_tokens = self.tokenizer.convert_ids_to_tokens([src_input_id_list[s_e_pair[0]]])\n if len(extract_tokens) <= 20: \n candidate_str = \" \".join(extract_tokens).replace(\" ##\", \"\").replace(\" - \", \"-\")\n if sep_cnt != 5 or \"bomb\" not in candidate_str:\n if [candidate_str] not in entitys and not_sub_string(candidate_str, entitys) and candidate_str[:2] != \"##\":\n entitys.append([candidate_str])\n # add all entitys of this role\n p_extract.append(entitys)\n # clean buffer\n position_buf = []\n else:\n position_buf.append(buf_template_pos[temp_idx])\n if sep_cnt >= 6: break\n\n # extra token1 token2 [unused0] (no final [SEP])\n if position_buf:\n entitys = []\n s_e_pair = []\n for position in position_buf:\n s_e_pair.append(position)\n if len(s_e_pair) == 2:\n s, e = s_e_pair[0], s_e_pair[1]\n extract_ids = []\n for j in range(s, e+1): \n extract_ids.append(src_input_id_list[j])\n extract_tokens = self.tokenizer.convert_ids_to_tokens(extract_ids)\n if extract_tokens:\n if len(extract_tokens) <= 20: \n candidate_str = \" \".join(extract_tokens).replace(\" ##\", \"\").replace(\" - \", \"-\")\n if sep_cnt != 5 or \"bomb\" not in candidate_str:\n if [candidate_str] not in entitys and not_sub_string(candidate_str, entitys) and candidate_str[:2] != \"##\":\n entitys.append([candidate_str])\n p_extract.append(entitys)\n\n if p_extract:\n temps_extract.append(p_extract)\n buf_template = []\n buf_template_pos = []\n else:\n buf_template.append(out_input_id_list[idx])\n buf_template_pos.append(out_position_id_list[idx])\n\n\n pred_extract.append(temps_extract)\n ### old ###\n # sep_cnt = 0\n # position_buf = []\n # for idx, token_id in enumerate(out_input_id_list):\n # if token_id == self.SEP:\n # sep_cnt += 1\n # entitys = []\n # s_e_pair = []\n # for position in position_buf:\n # s_e_pair.append(position)\n # if len(s_e_pair) == 2:\n # s, e = s_e_pair[0], s_e_pair[1]\n # extract_ids = []\n # for j in range(s, e+1): \n # extract_ids.append(src_input_id_list[j])\n # extract_tokens = self.tokenizer.convert_ids_to_tokens(extract_ids)\n # if extract_tokens:\n # if len(extract_tokens) <= 20: \n # candidate_str = \" \".join(extract_tokens).replace(\" ##\", \"\")\n # if sep_cnt != 4 or \"bomb\" not in candidate_str:\n # if [candidate_str] not in entitys and not_sub_string(candidate_str, entitys) and candidate_str[:2] != \"##\":\n # entitys.append([candidate_str])\n # s_e_pair = []\n # # extra s in s_e_pair\n # if s_e_pair:\n # extract_tokens = self.tokenizer.convert_ids_to_tokens([src_input_id_list[s_e_pair[0]]])\n # if len(extract_tokens) <= 20: \n # candidate_str = \" \".join(extract_tokens).replace(\" ##\", \"\")\n # if sep_cnt != 4 or \"bomb\" not in candidate_str:\n # if [candidate_str] not in entitys and not_sub_string(candidate_str, entitys) and candidate_str[:2] != \"##\":\n # entitys.append([candidate_str])\n # # add all entitys of this role\n # p_extract.append(entitys)\n # # clean buffer\n # position_buf = []\n # else:\n # position_buf.append(out_position_id_list[idx])\n # if sep_cnt >= 5: break\n ### old ###\n\n\n # return {\"test_loss\": tmp_eval_loss.detach().cpu(), \"pred_seq\": pred_seq, \"pred_extract\": pred_extract, \"logits\": tmp_eval_logits, \"target\": out_label_ids, \"docid\": docids}\n return {\"pred_seq\": pred_seq, \"pred_extract\": pred_extract, \"docid\": docids}\n\n\n def test_epoch_end(self, outputs):\n # # updating to test_epoch_end instead of deprecated test_end\n args = self.hparams\n logs = {}\n\n ## real decoding\n # read golds\n doctexts_tokens, golds = read_golds_from_test_file(args.data_dir, self.tokenizer, debug=args.debug)\n # get preds and preds_log\n preds = OrderedDict()\n preds_log = OrderedDict()\n for x in outputs:\n docids = x[\"docid\"]\n pred_seq = x[\"pred_seq\"]\n pred_extract = x[\"pred_extract\"]\n # preds (pred_extract)]\n for docid, temps_extract in zip(docids, pred_extract):\n if docid not in preds:\n preds[docid] = []\n for temp_raw in temps_extract:\n temp = OrderedDict()\n template_name = temp_raw[0][0][0]\n with open('/data/wikievents/muc_format/role_dicts.json') as f:\n role_dict = json.load(f)\n role_list = ['incident_type'] + role_dict[template_name]\n for idx, role in enumerate(role_list):\n temp[role] = []\n if idx+1 > len(temp_raw):\n continue\n elif temp_raw[idx]:\n if role == \"incident_type\":\n if temp_raw[idx][0][0] in incident_token_to_type:\n temp[role] = incident_token_to_type[temp_raw[idx][0][0]]\n else:\n temp[role] = temp_raw[idx][0][0]\n else:\n temp[role] = temp_raw[idx]\n\n preds[docid].append(temp)\n\n \n # preds_log\n for docid, p_seq in zip(docids, pred_seq):\n if docid not in preds_log:\n preds_log[docid] = OrderedDict()\n preds_log[docid][\"doctext\"] = \" \".join(doctexts_tokens[docid])\n preds_log[docid][\"pred_seq\"] = \" \".join(p_seq)\n preds_log[docid][\"pred_templates\"] = preds[docid]\n preds_log[docid][\"gold_templates\"] = golds[docid]\n\n # # evaluate (rewrite this for it to work)\n # results = eval_tf(preds, golds)\n # for key in results:\n # if key == \"micro_avg\":\n # print(\"***************** {} *****************\".format(key))\n # else:\n # print(\"================= {} =================\".format(key))\n # print(\"P: {:.2f}%, R: {:.2f}%, F1: {:.2f}%\".format(results[key][\"p\"] * 100, results[key][\"r\"] * 100, results[key][\"f1\"] * 100)) # phi_strict\n\n logger.info(\"writing preds to .out file:\")\n # session = boto3.Session(aws_access_key_id=\"AKIA8C43BC01F5E3176C\",aws_secret_access_key=\"VKYHHxqQl5GW/g3RG6c/qR65EbNrpTBBdNRtYX08\")\n # s3 = session.resource('s3',endpoint_url=\"https://ecs.dsta.ai\")\n # upload_file(s3,\"blackwidow-s3\",\"models/trained_outputs/gtt/\",\"preds_gtt.out\", preds_log)\n if args.debug:\n output_path = os.path.join(args.output_dir,\"preds_gtt_debug.out\") \n with open(\"preds_gtt_debug.out\", \"w+\") as f:\n f.write(json.dumps(preds_log, indent=4)) \n else:\n output_path = os.path.join(args.output_dir,\"preds_gtt.out\") \n with open(output_path, \"w+\") as f:\n f.write(json.dumps(preds_log, indent=4))\n\n # import ipdb; ipdb.set_trace()\n\n return {\"log\": logs, \"progress_bar\": logs}\n # return {\"test_loss\": logs[\"test_loss\"], \"log\": logs, \"progress_bar\": logs}\n\n @staticmethod\n def add_model_specific_args(parser, root_dir):\n # Add NER specific options\n BaseTransformer.add_model_specific_args(parser, root_dir)\n parser.add_argument(\n \"--max_seq_length_src\",\n default=128,\n type=int,\n help=\"The maximum total input sequence length after tokenization for src. Sequences longer \"\n \"than this will be truncated, sequences shorter will be padded.\",\n )\n\n parser.add_argument(\n \"--max_seq_length_tgt\",\n default=128,\n type=int,\n help=\"The maximum total input sequence length after tokenization for tgt. Sequences longer \"\n \"than this will be truncated, sequences shorter will be padded.\",\n )\n\n parser.add_argument(\n \"--labels\",\n default=\"\",\n type=str,\n help=\"Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.\",\n )\n\n parser.add_argument(\n \"--data_dir\",\n default=None,\n type=str,\n required=True,\n help=\"The input data dir. Should contain the training files for the CoNLL-2003 NER task.\",\n )\n\n parser.add_argument(\n \"--overwrite_cache\", action=\"store_true\", help=\"Overwrite the cached training and evaluation sets\"\n )\n\n parser.add_argument(\"--debug\", action=\"store_true\", help=\"if in debug mode\")\n\n parser.add_argument(\"--thresh\", default=1, type=float, help=\"thresh for predicting [SEP]\",)\n return parser\n\n\n# if __name__ == \"__main__\":\n# parser = argparse.ArgumentParser()\n# add_generic_args(parser, os.getcwd())\n# parser = NERTransformer.add_model_specific_args(parser, os.getcwd())\n# args = parser.parse_args()\n# global_args = args\n# logger.info(args)\n# model = NERTransformer(args)\n# trainer = generic_train(model, args)\n\n# if args.do_predict:\n# # See https://github.com/huggingface/transformers/issues/3159\n# # pl use this format to create a checkpoint:\n# # https://github.com/PyTorchLightning/pytorch-lightning/blob/master\\\n# # /pytorch_lightning/callbacks/model_checkpoint.py#L169\n# checkpoints = list(sorted(glob.glob(os.path.join(args.output_dir, \"checkpointepoch=*.ckpt\"), recursive=True)))\n# model = NERTransformer.load_from_checkpoint(checkpoints[-1])\n# model.hparams = args\n# if args.debug:\n# model.hparams.debug = True\n# trainer.test(model)\n"
] | [
[
"torch.stack",
"torch.load",
"torch.nn.Softmax",
"torch.save",
"torch.tensor",
"numpy.argmax",
"torch.nn.CrossEntropyLoss",
"torch.utils.data.TensorDataset",
"torch.topk",
"torch.cuda.is_available",
"torch.index_select",
"numpy.concatenate",
"torch.bmm",
"torch.cat",
"torch.transpose"
]
] |
simonsimon006/tensorflow-wavelets | [
"21a095bf0048ae2488ca5ae4961d2cbfe94263a9"
] | [
"Development/models/DWT2.py"
] | [
"import os\nimport cv2\nimport math\nimport pywt\nimport numpy as np\nfrom utils import mse\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\nfrom tensorflow.keras.datasets import mnist, cifar10\n\n# os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # for tensor flow warning\n# os.environ['CUDA_VISIBLE_DEVICES'] = '-1'\n\n\nclass DWT(layers.Layer):\n def __init__(self, name='haar', **kwargs):\n super(DWT, self).__init__(**kwargs)\n self._name = self.name + \"_\" + name\n # get filter coeffs from 3rd party lib\n wavelet = pywt.Wavelet(name)\n self.dec_len = wavelet.dec_len\n\n # decomposition filter low pass and hight pass coeffs\n db2_lpf = wavelet.dec_lo\n db2_hpf = wavelet.dec_hi\n\n # covert filters into tensors and reshape for convolution math\n db2_lpf = tf.constant(db2_lpf[::-1])\n self.db2_lpf = tf.reshape(db2_lpf, (1, wavelet.dec_len, 1, 1))\n\n db2_hpf = tf.constant(db2_hpf[::-1])\n self.db2_hpf = tf.reshape(db2_hpf, (1, wavelet.dec_len, 1, 1))\n\n self.conv_type = \"VALID\"\n self.border_padd = \"SYMMETRIC\"\n\n def build(self, input_shape):\n # filter dims should be bigger if input is not gray scale\n if input_shape[-1] != 1:\n self.db2_lpf = tf.repeat(self.db2_lpf, input_shape[-1], axis=-1)\n self.db2_hpf = tf.repeat(self.db2_hpf, input_shape[-1], axis=-1)\n\n def call(self, inputs, training=None, mask=None):\n\n # border padding symatric add coulums\n inputs_pad = tf.pad(inputs, [[0, 0], [0, 0], [self.dec_len-1, self.dec_len-1], [0, 0]], self.border_padd)\n\n # approximation conv only rows\n a = tf.nn.conv2d(\n inputs_pad, self.db2_lpf, padding=self.conv_type, strides=[1, 1, 1, 1],\n )\n # details conv only rows\n d = tf.nn.conv2d(\n inputs_pad, self.db2_hpf, padding=self.conv_type, strides=[1, 1, 1, 1],\n )\n # ds - down sample\n a_ds = a[:, :, 1:a.shape[2]:2, :]\n d_ds = d[:, :, 1:d.shape[2]:2, :]\n\n # border padding symatric add rows\n a_ds_pad = tf.pad(a_ds, [[0, 0], [self.dec_len-1, self.dec_len-1], [0, 0], [0, 0]], self.border_padd)\n d_ds_pad = tf.pad(d_ds, [[0, 0], [self.dec_len-1, self.dec_len-1], [0, 0], [0, 0]], self.border_padd)\n\n # convolution is done on the rows so we need to\n # transpose the matrix in order to convolve the colums\n a_ds_pad = tf.transpose(a_ds_pad, perm=[0, 2, 1, 3])\n d_ds_pad = tf.transpose(d_ds_pad, perm=[0, 2, 1, 3])\n\n # aa approximation approximation\n aa = tf.nn.conv2d(\n a_ds_pad, self.db2_lpf, padding=self.conv_type, strides=[1, 1, 1, 1],\n )\n # ad approximation details\n ad = tf.nn.conv2d(\n a_ds_pad, self.db2_hpf, padding=self.conv_type, strides=[1, 1, 1, 1],\n )\n # ad details aproximation\n da = tf.nn.conv2d(\n d_ds_pad, self.db2_lpf, padding=self.conv_type, strides=[1, 1, 1, 1],\n )\n # dd details details\n dd = tf.nn.conv2d(\n d_ds_pad, self.db2_hpf, padding=self.conv_type, strides=[1, 1, 1, 1],\n )\n\n # transpose back the matrix\n aa = tf.transpose(aa, perm=[0, 2, 1, 3])\n ad = tf.transpose(ad, perm=[0, 2, 1, 3])\n da = tf.transpose(da, perm=[0, 2, 1, 3])\n dd = tf.transpose(dd, perm=[0, 2, 1, 3])\n\n # down sample\n ll = aa[:, 1:aa.shape[1]:2, :, :]\n lh = ad[:, 1:ad.shape[1]:2, :, :]\n hl = da[:, 1:da.shape[1]:2, :, :]\n hh = dd[:, 1:dd.shape[1]:2, :, :]\n\n # concate all outputs ionto tensor\n x = tf.concat([ll, lh, hl, hh], axis=-1)\n\n return x\n\n\nclass IDWT(layers.Layer):\n def __init__(self, name='haar', **kwargs):\n super(IDWT, self).__init__(**kwargs)\n self._name = self.name + \"_\" + name\n self.pad_type = \"VALID\"\n self.border_pad = \"SYMMETRIC\"\n\n # get filter coeffs from 3rd party lib\n wavelet = pywt.Wavelet(name)\n self.rec_len = wavelet.rec_len\n\n # decomposition filter low pass and hight pass coeffs\n db2_lpf = wavelet.rec_lo\n db2_hpf = wavelet.rec_hi\n\n # covert filters into tensors and reshape for convolution math\n db2_lpf = tf.constant(db2_lpf[::-1])\n self.db2_lpf = tf.reshape(db2_lpf, (1, wavelet.rec_len, 1, 1))\n\n db2_hpf = tf.constant(db2_hpf[::-1])\n self.db2_hpf = tf.reshape(db2_hpf, (1, wavelet.rec_len, 1, 1))\n\n def upsampler2d(self, x):\n \"\"\"\n up sampling with zero insertion between rows and columns\n :param x: 4 dim tensor (?, w, h, ch)\n :return: up sampled tensor with shape (?, 2*w, 2*h, ch)\n \"\"\"\n # create zero like tensor\n zero_tensor = tf.zeros_like(x)\n # stack both tensors\n stack_rows = tf.stack([x, zero_tensor], axis=3)\n # reshape for zero insertion between the rows\n stack_rows = tf.reshape(stack_rows, shape=[-1, x.shape[1], x.shape[2]*2, x.shape[3]])\n # transpose in order to insert zeros for the columns\n stack_rows = tf.transpose(stack_rows, perm=[0, 2, 1, 3])\n # create zero like tensor but now like the padded one\n zero_tensor_1 = tf.zeros_like(stack_rows)\n # stack both tensors\n stack_rows_cols = tf.stack([stack_rows, zero_tensor_1], axis=3)\n # reshape for zero insertion between the columns\n us_padded = tf.reshape(stack_rows_cols, shape=[-1, x.shape[1]*2, x.shape[2]*2, x.shape[3]])\n # transpose back to normal\n us_padded = tf.transpose(us_padded, perm=[0, 2, 1, 3])\n return us_padded\n\n def call(self, inputs, training=None, mask=None):\n\n # border padding for convolution with low pass and high pass filters\n x = tf.pad(inputs,\n [[0, 0], [self.rec_len-1, self.rec_len-1], [self.rec_len-1, self.rec_len-1], [0, 0]],\n self.border_pad)\n\n # convert to float32\n # x = tf.cast(x, tf.float32)\n # GPU works with float 32\n # CPU can work with 64 but need to add extra flag\n # convert to float64\n # x = tf.cast(x, tf.float64)\n\n # extract approximation and details from input tensor\n # TODO: whit if tensor shape is bigger then 4?\n # and expand the dims for the up sampling\n ll = tf.expand_dims(x[:, :, :, 0], axis=-1)\n lh = tf.expand_dims(x[:, :, :, 1], axis=-1)\n hl = tf.expand_dims(x[:, :, :, 2], axis=-1)\n hh = tf.expand_dims(x[:, :, :, 3], axis=-1)\n\n ll_us_pad = self.upsampler2d(ll)\n lh_us_pad = self.upsampler2d(lh)\n hl_us_pad = self.upsampler2d(hl)\n hh_us_pad = self.upsampler2d(hh)\n\n # convolution for the rows\n # transpose for the column convolution\n # convolution for the column\n # transpose back to normal\n\n ll_conv_lpf = tf.nn.conv2d(ll_us_pad, self.db2_lpf, padding=self.pad_type, strides=[1, 1, 1, 1], )\n ll_conv_lpf_tr = tf.transpose(ll_conv_lpf, perm=[0, 2, 1, 3])\n ll_conv_lpf_lpf = tf.nn.conv2d(ll_conv_lpf_tr, self.db2_lpf, padding=self.pad_type, strides=[1, 1, 1, 1], )\n ll_conv_lpf_lpf_tr = tf.transpose(ll_conv_lpf_lpf, perm=[0, 2, 1, 3])\n\n lh_conv_lpf = tf.nn.conv2d(lh_us_pad, self.db2_lpf, padding=self.pad_type, strides=[1, 1, 1, 1], )\n lh_conv_lpf_tr = tf.transpose(lh_conv_lpf, perm=[0, 2, 1, 3])\n lh_conv_lpf_hpf = tf.nn.conv2d(lh_conv_lpf_tr, self.db2_lpf, padding=self.pad_type, strides=[1, 1, 1, 1], )\n lh_conv_lpf_hpf_tr = tf.transpose(lh_conv_lpf_hpf, perm=[0, 2, 1, 3])\n\n hl_conv_hpf = tf.nn.conv2d(hl_us_pad, self.db2_hpf, padding=self.pad_type, strides=[1, 1, 1, 1], )\n hl_conv_hpf_tr = tf.transpose(hl_conv_hpf, perm=[0, 2, 1, 3])\n hl_conv_hpf_lpf = tf.nn.conv2d(hl_conv_hpf_tr, self.db2_lpf, padding=self.pad_type, strides=[1, 1, 1, 1], )\n hl_conv_hpf_lpf_tr = tf.transpose(hl_conv_hpf_lpf, perm=[0, 2, 1, 3])\n\n hh_conv_hpf = tf.nn.conv2d(hh_us_pad, self.db2_hpf, padding=self.pad_type, strides=[1, 1, 1, 1], )\n hh_conv_hpf_tr = tf.transpose(hh_conv_hpf, perm=[0, 2, 1, 3])\n hh_conv_hpf_hpf = tf.nn.conv2d(hh_conv_hpf_tr, self.db2_hpf, padding=self.pad_type, strides=[1, 1, 1, 1], )\n hh_conv_hpf_hpf_tr = tf.transpose(hh_conv_hpf_hpf, perm=[0, 2, 1, 3])\n\n # add all together\n reconstructed = tf.add_n([ll_conv_lpf_lpf_tr,\n lh_conv_lpf_hpf_tr,\n hl_conv_hpf_lpf_tr,\n hh_conv_hpf_hpf_tr])\n # crop the paded part\n crop = (self.rec_len -1)*2\n return reconstructed[:, crop-1:-crop, crop-1:-crop, :]\n\n\nif __name__ == \"__main__\":\n # (x_train, y_train), (x_test, y_test) = cifar10.load_data()\n # x_train = x_train.astype(\"float32\")\n # x_test = x_test.astype(\"float32\")\n # # x_train = cv2.imread(\"../input/LennaGrey.png\", 0)\n # frog = tf.expand_dims(\n # x_train[0, :, :, :], 0, name=None\n # )\n # print(\"frog shape\", frog.shape)\n # model = keras.Sequential()\n # model.add(keras.Input(shape=(256, 256, 4)))\n # model.add(IDWT())\n # model.summary()\n\n name = \"db2\"\n img = cv2.imread(\"../input/LennaGrey.png\",0)\n img_ex1 = np.expand_dims(img, axis=-1)\n img_ex2 = np.expand_dims(img_ex1, axis=0)\n\n model = keras.Sequential()\n model.add(layers.InputLayer(input_shape=img_ex1.shape))\n model.add(DWT(name=name))\n # model.summary()\n coeffs = model.predict(img_ex2)\n LL = coeffs[0, ..., 0]\n LH = coeffs[0, ..., 1]\n HL = coeffs[0, ..., 2]\n HH = coeffs[0, ..., 3]\n\n model = keras.Sequential()\n model.add(layers.InputLayer(input_shape=coeffs[0].shape))\n model.add(IDWT(name=name))\n model.summary()\n\n my_recon = model.predict(coeffs)\n img_my_rec = my_recon[0, :, :, 0]\n coeffs2 = pywt.wavedec2(img, name,level=1)\n\n LL2 = coeffs2[0]\n LH2 = coeffs2[1][0]\n HL2 = coeffs2[1][1]\n HH2 = coeffs2[1][2]\n\n recon_pywt = pywt.waverec2(coeffs2, name)\n img_pywt_rec = recon_pywt\n\n print(\"LL mse \", mse.mse(LL, LL2))\n print(\"LH mse \", mse.mse(LH, LH2))\n print(\"HL mse \", mse.mse(HL, HL2))\n print(\"HH mse \", mse.mse(HH, HH2))\n print(\"img mse \", mse.mse(img_pywt_rec, img_my_rec))\n\n difference = cv2.absdiff(np.int32(img_my_rec), np.int32(img_pywt_rec))\n _, mask = cv2.threshold(difference.astype(\"uint8\"), 0, 255, cv2.THRESH_BINARY)\n\n cv2.imshow(\"diff\", mask)\n cv2.waitKey(0)\n pass\n\n\n\n\n\n # a = model.predict(frog, steps=1)\n # #\n # approx = tf.image.convert_image_dtype(a[0, ..., 0], dtype=tf.float32)\n # with tf.Session() as sess:\n # img = sess.run(approx)\n # # pass\n # #\n # img = np.clip(img, 0, 255)\n # img = np.ceil(img)\n # img = img.astype(\"uint8\")\n # with open(r\"D:\\TEMP\\LL_python_layer.raw\", \"wb\") as outfile:\n # outfile.write(img) # Write it\n\n # model = models.WaveletCifar10CNN.WaveletCNN((32,32,3), 10)\n # model.summary()"
] | [
[
"tensorflow.pad",
"tensorflow.stack",
"tensorflow.add_n",
"tensorflow.reshape",
"tensorflow.keras.Sequential",
"tensorflow.expand_dims",
"tensorflow.zeros_like",
"tensorflow.nn.conv2d",
"numpy.int32",
"tensorflow.repeat",
"numpy.expand_dims",
"tensorflow.keras.layers.InputLayer",
"tensorflow.concat",
"tensorflow.constant",
"tensorflow.transpose"
]
] |
chetansurwade/great_expectations | [
"f488d861f3c00c73a6181d6bd5788fb8895079d9"
] | [
"tests/conftest.py"
] | [
"import datetime\nimport locale\nimport logging\nimport os\nimport random\nimport shutil\nimport sys\nimport warnings\nfrom typing import Dict, List, Optional\n\nimport numpy as np\nimport pandas as pd\nimport pytest\nfrom freezegun import freeze_time\nfrom ruamel.yaml import YAML\n\nimport great_expectations as ge\nfrom great_expectations import DataContext\nfrom great_expectations.core import ExpectationConfiguration\nfrom great_expectations.core.expectation_suite import ExpectationSuite\nfrom great_expectations.core.expectation_validation_result import (\n ExpectationValidationResult,\n)\nfrom great_expectations.core.usage_statistics.usage_statistics import (\n UsageStatisticsHandler,\n)\nfrom great_expectations.core.util import get_or_create_spark_application\nfrom great_expectations.data_context.store.profiler_store import ProfilerStore\nfrom great_expectations.data_context.types.base import (\n AnonymizedUsageStatisticsConfig,\n CheckpointConfig,\n DataContextConfig,\n GeCloudConfig,\n)\nfrom great_expectations.data_context.types.resource_identifiers import (\n ConfigurationIdentifier,\n ExpectationSuiteIdentifier,\n GeCloudIdentifier,\n)\nfrom great_expectations.data_context.util import (\n file_relative_path,\n instantiate_class_from_config,\n)\nfrom great_expectations.dataset.pandas_dataset import PandasDataset\nfrom great_expectations.datasource import SqlAlchemyDatasource\nfrom great_expectations.datasource.data_connector.util import (\n get_filesystem_one_level_directory_glob_path_list,\n)\nfrom great_expectations.datasource.new_datasource import BaseDatasource, Datasource\nfrom great_expectations.rule_based_profiler.config import RuleBasedProfilerConfig\nfrom great_expectations.rule_based_profiler.config.base import (\n ruleBasedProfilerConfigSchema,\n)\nfrom great_expectations.rule_based_profiler.parameter_builder.simple_date_format_string_parameter_builder import (\n DEFAULT_CANDIDATE_STRINGS,\n)\nfrom great_expectations.self_check.util import (\n build_test_backends_list as build_test_backends_list_v3,\n)\nfrom great_expectations.self_check.util import (\n expectationSuiteValidationResultSchema,\n get_dataset,\n)\nfrom great_expectations.util import is_library_loadable\n\nRULE_BASED_PROFILER_MIN_PYTHON_VERSION: tuple = (3, 7)\n\nyaml = YAML()\n###\n#\n# NOTE: THESE TESTS ARE WRITTEN WITH THE en_US.UTF-8 LOCALE AS DEFAULT FOR STRING FORMATTING\n#\n###\n\nlocale.setlocale(locale.LC_ALL, \"en_US.UTF-8\")\n\nlogger = logging.getLogger(__name__)\n\n\ndef skip_if_python_below_minimum_version():\n \"\"\"\n All test fixtures for Rule-Based Profiler must execute this method; for example:\n ```\n skip_if_python_below_minimum_version()\n ```\n for as long as the support for Python versions less than 3.7 is provided. In particular, Python-3.6 support for\n \"dataclasses.asdict()\" does not handle None values as well as the more recent versions of Python do.\n \"\"\"\n if sys.version_info < RULE_BASED_PROFILER_MIN_PYTHON_VERSION:\n pytest.skip(\n \"skipping fixture because Python version 3.7 (or greater) is required\"\n )\n\n\ndef pytest_configure(config):\n config.addinivalue_line(\n \"markers\",\n \"smoketest: mark test as smoketest--it does not have useful assertions but may produce side effects \"\n \"that require manual inspection.\",\n )\n config.addinivalue_line(\n \"markers\",\n \"rendered_output: produces rendered output that should be manually reviewed.\",\n )\n config.addinivalue_line(\n \"markers\",\n \"aws_integration: runs aws integration test that may be very slow and requires credentials\",\n )\n\n\ndef pytest_addoption(parser):\n # note: --no-spark will be deprecated in favor of --spark\n parser.addoption(\n \"--no-spark\",\n action=\"store_true\",\n help=\"If set, suppress tests against the spark test suite\",\n )\n parser.addoption(\n \"--spark\",\n action=\"store_true\",\n help=\"If set, execute tests against the spark test suite\",\n )\n parser.addoption(\n \"--no-sqlalchemy\",\n action=\"store_true\",\n help=\"If set, suppress all tests using sqlalchemy\",\n )\n parser.addoption(\n \"--postgresql\",\n action=\"store_true\",\n help=\"If set, execute tests against postgresql\",\n )\n # note: --no-postgresql will be deprecated in favor of --postgresql\n parser.addoption(\n \"--no-postgresql\",\n action=\"store_true\",\n help=\"If set, supress tests against postgresql\",\n )\n parser.addoption(\n \"--mysql\",\n action=\"store_true\",\n help=\"If set, execute tests against mysql\",\n )\n parser.addoption(\n \"--mssql\",\n action=\"store_true\",\n help=\"If set, execute tests against mssql\",\n )\n parser.addoption(\n \"--bigquery\",\n action=\"store_true\",\n help=\"If set, execute tests against bigquery\",\n )\n parser.addoption(\n \"--aws\",\n action=\"store_true\",\n help=\"If set, execute tests against AWS resources like S3, RedShift and Athena\",\n )\n parser.addoption(\n \"--aws-integration\",\n action=\"store_true\",\n help=\"If set, run aws integration tests for usage_statistics\",\n )\n parser.addoption(\n \"--docs-tests\",\n action=\"store_true\",\n help=\"If set, run integration tests for docs\",\n )\n parser.addoption(\n \"--performance-tests\",\n action=\"store_true\",\n help=\"If set, run performance tests (which might also require additional arguments like --bigquery)\",\n )\n\n\ndef build_test_backends_list(metafunc):\n test_backend_names: List[str] = build_test_backends_list_cfe(metafunc)\n backend_name_class_name_map: Dict[str, str] = {\n \"pandas\": \"PandasDataset\",\n \"spark\": \"SparkDFDataset\",\n }\n backend_name: str\n return [\n (backend_name_class_name_map.get(backend_name) or backend_name)\n for backend_name in test_backend_names\n ]\n\n\ndef build_test_backends_list_cfe(metafunc):\n # adding deprecation warnings\n if metafunc.config.getoption(\"--no-postgresql\"):\n warnings.warn(\n \"--no-sqlalchemy is deprecated as of v0.14 in favor of the --postgresql flag. It will be removed in v0.16. Please adjust your tests accordingly\",\n DeprecationWarning,\n )\n if metafunc.config.getoption(\"--no-spark\"):\n warnings.warn(\n \"--no-spark is deprecated as of v0.14 in favor of the --spark flag. It will be removed in v0.16. Please adjust your tests accordingly.\",\n DeprecationWarning,\n )\n include_pandas: bool = True\n include_spark: bool = metafunc.config.getoption(\"--spark\")\n include_sqlalchemy: bool = not metafunc.config.getoption(\"--no-sqlalchemy\")\n include_postgresql: bool = metafunc.config.getoption(\"--postgresql\")\n include_mysql: bool = metafunc.config.getoption(\"--mysql\")\n include_mssql: bool = metafunc.config.getoption(\"--mssql\")\n include_bigquery: bool = metafunc.config.getoption(\"--bigquery\")\n include_aws: bool = metafunc.config.getoption(\"--aws\")\n test_backend_names: List[str] = build_test_backends_list_v3(\n include_pandas=include_pandas,\n include_spark=include_spark,\n include_sqlalchemy=include_sqlalchemy,\n include_postgresql=include_postgresql,\n include_mysql=include_mysql,\n include_mssql=include_mssql,\n include_bigquery=include_bigquery,\n include_aws=include_aws,\n )\n return test_backend_names\n\n\ndef pytest_generate_tests(metafunc):\n test_backends = build_test_backends_list(metafunc)\n if \"test_backend\" in metafunc.fixturenames:\n metafunc.parametrize(\"test_backend\", test_backends, scope=\"module\")\n if \"test_backends\" in metafunc.fixturenames:\n metafunc.parametrize(\"test_backends\", [test_backends], scope=\"module\")\n\n\ndef pytest_collection_modifyitems(config, items):\n if config.getoption(\"--aws-integration\"):\n # --aws-integration given in cli: do not skip aws-integration tests\n return\n if config.getoption(\"--docs-tests\"):\n # --docs-tests given in cli: do not skip documentation integration tests\n return\n skip_aws_integration = pytest.mark.skip(\n reason=\"need --aws-integration option to run\"\n )\n skip_docs_integration = pytest.mark.skip(reason=\"need --docs-tests option to run\")\n for item in items:\n if \"aws_integration\" in item.keywords:\n item.add_marker(skip_aws_integration)\n if \"docs\" in item.keywords:\n item.add_marker(skip_docs_integration)\n\n\[email protected](autouse=True)\ndef no_usage_stats(monkeypatch):\n # Do not generate usage stats from test runs\n monkeypatch.setenv(\"GE_USAGE_STATS\", \"False\")\n\n\[email protected](scope=\"module\")\ndef sa(test_backends):\n if not any(\n [dbms in test_backends for dbms in [\"postgresql\", \"sqlite\", \"mysql\", \"mssql\"]]\n ):\n pytest.skip(\"No recognized sqlalchemy backend selected.\")\n else:\n try:\n import sqlalchemy as sa\n\n return sa\n except ImportError:\n raise ValueError(\"SQL Database tests require sqlalchemy to be installed.\")\n\n\[email protected](index=2)\[email protected]\ndef spark_session(test_backends):\n if \"SparkDFDataset\" not in test_backends:\n pytest.skip(\"No spark backend selected.\")\n\n try:\n import pyspark\n from pyspark.sql import SparkSession\n\n return get_or_create_spark_application(\n spark_config={\n \"spark.sql.catalogImplementation\": \"hive\",\n \"spark.executor.memory\": \"450m\",\n # \"spark.driver.allowMultipleContexts\": \"true\", # This directive does not appear to have any effect.\n }\n )\n except ImportError:\n raise ValueError(\"spark tests are requested, but pyspark is not installed\")\n\n\[email protected]\ndef basic_spark_df_execution_engine(spark_session):\n from great_expectations.execution_engine import SparkDFExecutionEngine\n\n conf: List[tuple] = spark_session.sparkContext.getConf().getAll()\n spark_config: Dict[str, str] = dict(conf)\n execution_engine: SparkDFExecutionEngine = SparkDFExecutionEngine(\n spark_config=spark_config,\n )\n return execution_engine\n\n\[email protected](index=3)\[email protected]\ndef spark_session_v012(test_backends):\n if \"SparkDFDataset\" not in test_backends:\n pytest.skip(\"No spark backend selected.\")\n\n try:\n import pyspark\n from pyspark.sql import SparkSession\n\n return get_or_create_spark_application(\n spark_config={\n \"spark.sql.catalogImplementation\": \"hive\",\n \"spark.executor.memory\": \"450m\",\n # \"spark.driver.allowMultipleContexts\": \"true\", # This directive does not appear to have any effect.\n }\n )\n except ImportError:\n raise ValueError(\"spark tests are requested, but pyspark is not installed\")\n\n\[email protected]\ndef basic_expectation_suite(empty_data_context_stats_enabled):\n context: DataContext = empty_data_context_stats_enabled\n expectation_suite = ExpectationSuite(\n expectation_suite_name=\"default\",\n meta={},\n expectations=[\n ExpectationConfiguration(\n expectation_type=\"expect_column_to_exist\",\n kwargs={\"column\": \"infinities\"},\n ),\n ExpectationConfiguration(\n expectation_type=\"expect_column_to_exist\", kwargs={\"column\": \"nulls\"}\n ),\n ExpectationConfiguration(\n expectation_type=\"expect_column_to_exist\", kwargs={\"column\": \"naturals\"}\n ),\n ExpectationConfiguration(\n expectation_type=\"expect_column_values_to_be_unique\",\n kwargs={\"column\": \"naturals\"},\n ),\n ],\n data_context=context,\n )\n return expectation_suite\n\n\[email protected]\ndef numeric_high_card_dict():\n # fmt: off\n data = {\n \"norm_0_1\": [\n 0.7225866251125405, -0.5951819764073379, -0.2679313226299394, -0.22503289285616823, 0.1432092195399402, 1.1874676802669433, 1.2766412196640815, 0.15197071140718296, -0.08787273509474242, -0.14524643717509128, -1.236408169492396, -0.1595432263317598, 1.0856768114741797, 0.5082788229519655, 0.26419244684748955, -0.2532308428977167, -0.6362679196021943, -3.134120304969242, -1.8990888524318292, 0.15701781863102648,\n -0.775788419966582, -0.7400872167978756, -0.10578357492485335, 0.30287010067847436, -1.2127058770179304, -0.6750567678010801, 0.3341434318919877, 1.8336516507046157, 1.105410842250908, -0.7711783703442725, -0.20834347267477862, -0.06315849766945486, 0.003016997583954831, -1.0500016329150343, -0.9168020284223636, 0.306128397266698, 1.0980602112281863, -0.10465519493772572, 0.4557797534454941, -0.2524452955086468,\n -1.6176089110359837, 0.46251282530754667, 0.45751208998354903, 0.4222844954971609, 0.9651098606162691, -0.1364401431697167, -0.4988616288584964, -0.29549238375582904, 0.6950204582392359, 0.2975369992016046, -1.0159498719807218, 1.3704532401348395, 1.1210419577766673, 1.2051869452003332, 0.10749349867353084, -3.1876892257116562, 1.316240976262548, -1.3777452919511493, -1.0666211985935259, 1.605446695828751,\n -0.39682821266996865, -0.2828059717857655, 1.30488698803017, -2.116606225467923, -0.2026680301462151, -0.05504008273574069, -0.028520163428411835, 0.4424105678123449, -0.3427628263418371, 0.23805293411919937, -0.7515414823259695, -0.1272505897548366, 1.803348436304099, -2.0178252709022124, 0.4860300090112474, 1.2304054166426217, 0.7228668982068365, 1.7400607500575112, 0.3480274098246697, -0.3887978895385282,\n -1.6511926233909175, 0.14517929503564567, -1.1599010576123796, -0.016133552438119002, 0.47157644883706273, 0.27657785075518254, 1.4464286976282463, -1.2605489185634533, -1.2548765025615338, 0.0755319579826929, 1.0476733637516833, -0.7038690219524807, -0.9580696842862921, -0.18135657098008018, -0.18163993379314564, 0.4092798531146971, -2.049808182546896, -1.2447062617916826, -1.6681140306283337, 1.0709944517933483,\n -0.7059385234342846, -0.8033587669003331, -1.8152275905903312, 0.11729996097670137, 2.2994900038012376, -0.1291192451734159, -0.6731565869164164, -0.06690994571366346, -0.40330072968473235, -0.23927186025094221, 2.7756216937096676, 0.06441299443146056, -0.5095247173507204, -0.5228853558871007, 0.806629654091097, -2.110096084114651, -0.1233374136509439, -1.021178519845751, 0.058906278340351045, -0.26316852406211017,\n -1.2990807244026237, -0.1937986598084067, 0.3909222793445317, 0.578027315076297, -0.11837271520846208, -1.134297652720464, 0.496915417153268, -0.5315184110418045, 0.5284176849952198, -1.6810338988102331, 0.41220454054009154, 1.0554031136792, -1.4222775023918832, -1.1664353586956209, 0.018952180522661358, -0.04620616876577671, -0.8446292647938418, -0.6889432180332509, -0.16012081070647954, 0.5680940644754282,\n -1.9792941921407943, 0.35441842206114726, 0.12433268557499534, 0.25366905921805377, 0.6262297786892028, 1.327981424671081, 1.774834324890265, -0.9725604763128438, 0.42824027889428, 0.19725541390327114, 1.4640606982992412, 1.6484993842838995, 0.009848260786412894, -2.318740403198263, -0.4125245127403577, -0.15500831770388285, 1.010740123094443, 0.7509498708766653, -0.021415407776108144, 0.6466776546788641,\n -1.421096837521404, 0.5632248951325018, -1.230539161899903, -0.26766333435961503, -1.7208241092827994, -1.068122926814994, -1.6339248620455546, 0.07225436117508208, -1.2018233250224348, -0.07213000691963527, -1.0080992229563746, -1.151378048476321, -0.2660104149809121, 1.6307779136408695, 0.8394822016824073, -0.23362802143120032, -0.36799502320054384, 0.35359852278856263, 0.5830948999779656, -0.730683771776052,\n 1.4715728371820667, -1.0668090648998136, -1.025762014881618, 0.21056106958224155, -0.5141254207774576, -0.1592942838690149, 0.7688711617969363, -2.464535892598544, -0.33306989349452987, 0.9457207224940593, 0.36108072442574435, -0.6490066877470516, -0.8714147266896871, 0.6567118414749348, -0.18543305444915045, 0.11156511615955596, 0.7299392157186994, -0.9902398239693843, -1.3231344439063761, -1.1402773433114928,\n 0.3696183719476138, -1.0512718152423168, -0.6093518314203102, 0.0010622538704462257, -0.17676306948277776, -0.6291120128576891, 1.6390197341434742, -0.8105788162716191, -2.0105672384392204, -0.7909143328024505, -0.10510684692203587, -0.013384480496840259, 0.37683659744804815, -0.15123337965442354, 1.8427651248902048, 1.0371006855495906, 0.29198928612503655, -1.7455852392709181, 1.0854545339796853, 1.8156620972829793,\n 1.2399563224061596, 1.1196530775769857, 0.4349954478175989, 0.11093680938321168, 0.9945934589378227, -0.5779739742428905, 1.0398502505219054, -0.09401160691650227, 0.22793239636661505, -1.8664992140331715, -0.16104499274010126, -0.8497511318264537, -0.005035074822415585, -1.7956896952184151, 1.8304783101189757, 0.19094408763231646, 1.3353023874309002, 0.5889134606052353, -0.48487660139277866, 0.4817014755127622,\n 1.5981632863770983, 2.1416849775567943, -0.5524061711669017, 0.3364804821524787, -0.8609687548167294, 0.24548635047971906, -0.1281468603588133, -0.03871410517044196, -0.2678174852638268, 0.41800607312114096, -0.2503930647517959, 0.8432391494945226, -0.5684563173706987, -0.6737077809046504, 2.0559579098493606, -0.29098826888414253, -0.08572747304559661, -0.301857666880195, -0.3446199959065524, 0.7391340848217359,\n -0.3087136212446006, 0.5245553707204758, -3.063281336805349, 0.47471623010413705, 0.3733427291759615, -0.26216851429591426, -0.5433523111756248, 0.3305385199964823, -1.4866150542941634, -0.4699911958560942, 0.7312367186673805, -0.22346998944216903, -0.4102860865811592, -0.3003478250288424, -0.3436168605845268, 0.9456524589400904, -0.03710285453384255, 0.10330609878001526, 0.6919858329179392, 0.8673477607085118,\n 0.380742577915601, 0.5785785515837437, -0.011421905830097267, 0.587187810965595, -1.172536467775141, -0.532086162097372, -0.34440413367820183, -1.404900386188497, -0.1916375229779241, 1.6910999461291834, -0.6070351182769795, -0.8371447893868493, 0.8853944070432224, 1.4062946075925473, -0.4575973141608374, 1.1458755768004445, 0.2619874618238163, 1.7105876844856704, -1.3938976454537522, -0.11403217166441704,\n -1.0354305240085717, -0.4285770475062154, 0.10326635421187867, 0.6911853442971228, 0.6293835213179542, -0.819693698713199, -0.7378190403744175, -1.495947672573938, -1.2406693914431872, -1.0486341638186725, -1.3715759883075953, 3.585407817418151, -0.8007079372574223, -1.527336776754733, -0.4716571043072485, -0.6967311271405545, 1.0003347462169225, -0.30569565002022697, 0.3646134876772732, 0.49083033603832493,\n 0.07754580794955847, -0.13467337850920083, 0.02134473458605164, 0.5025183900540823, -0.940929087894874, 1.441600637127558, -0.0857298131221344, -0.575175243519591, 0.42622029657630595, -0.3239674701415489, 0.22648849821602596, -0.6636465305318631, 0.30415000329164754, -0.6170241274574016, 0.07578674772163065, 0.2952841441615124, 0.8120317689468056, -0.46861353019671337, 0.04718559572470416, -0.3105660017232523,\n -0.28898463203535724, 0.9575298065734561, -0.1977556031830993, 0.009658232624257272, 1.1432743259603295, -1.8989396918936858, 0.20787070770386357, 1.4256750543782999, -0.03838329973778874, -0.9051229357470373, -1.2002277085489457, 2.405569956130733, 1.895817948326675, -0.8260858325924574, 0.5759061866255807, 2.7022875569683342, 1.0591327405967745, 0.21449833798124354, 0.19970388388081273, 0.018242139911433558,\n -0.630960146999549, -2.389646042147776, 0.5424304992480339, -1.2159551561948718, -1.6851632640204128, -0.4812221268109694, 0.6217652794219579, -0.380139431677482, -0.2643524783321051, 0.5106648694993016, -0.895602157034141, -0.20559568725141816, 1.5449271875734911, 1.544075783565114, 0.17877619857826843, 1.9729717339967108, 0.8302033109816261, -0.39118561199170965, -0.4428357598297098, -0.02550407946753186,\n -1.0202977138210447, 2.6604654314300835, 1.9163029269361842, 0.34697436596877657, -0.8078124769022497, -1.3876596649099957, 0.44707250163663864, -0.6752837232272447, -0.851291770954755, 0.7599767868730256, 0.8134109401706875, -1.6766750539980289, -0.06051832829232975, -0.4652931327216134, -0.9249124398287735, 1.9022739762222731, 1.7632300613807597, 1.675335012283785, 0.47529854476887495, -0.7892463423254658,\n 0.3910120652706098, 0.5812432547936405, 0.2693084649672777, -0.08138564925779349, 0.9150619269526952, -0.8637356349272142, -0.14137853834901817, -0.20192754829896423, 0.04718228147088756, -0.9743600144318, -0.9936290943927825, 0.3544612180477054, 0.6839546770735121, 1.5089070357620178, 1.301167565172228, -1.5396145667672985, 0.42854366341485456, -1.5876582617301032, -0.0316985879141714, 0.3144220016570915,\n -0.05054766725644431, 0.2934139006870167, 0.11396170275994542, -0.6472140129693643, 1.6556030742445431, 1.0319410208453506, 0.3292217603989991, -0.058758121958605435, -0.19917171648476298, -0.5192866115874029, 0.1997510689920335, -1.3675686656161756, -1.7761517497832053, -0.11260276070167097, 0.9717892642758689, 0.0840815981843948, -0.40211265381258554, 0.27384496844034517, -1.0403875081272367, 1.2884781173493884,\n -1.8066239592554476, 1.1136979156298865, -0.06223155785690416, 1.3930381289015936, 0.4586305673655182, 1.3159249757827194, -0.5369892835955705, 0.17827408233621184, 0.22693934439969682, 0.8216240002114816, -1.0422409752281838, 0.3329686606709231, -1.5128804353968217, 1.0323052869815534, 1.1640486934424354, 1.6450118078345612, -0.6717687395070293, -0.08135119186406627, 1.2746921873544188, -0.8255794145095643,\n 0.7123504776564864, 0.6953336934741682, 2.191382322698439, 1.4155790749261592, 2.4681081786912866, -2.2904357033803815, -0.8375155191566624, 1.1040106662196736, 0.7084133268872015, -3.401968681942055, 0.23237090512844757, 1.1199436238058174, 0.6333916486592628, -0.6012340913121055, -0.3693951838866523, -1.7742670566875682, -0.36431378282545124, -0.4042586409194551, -0.04648644034604476, 1.5138191613743486,\n -0.2053670782251071, 1.8679122383251414, 0.8355881018692999, -0.5369705129279005, -0.7909355080370954, 2.1080036780007987, 0.019537331188020687, -1.4672982688640615, -1.486842866467901, -1.1036839537574874, 1.0800858540685894, -0.2313974176207594, 0.47763272078271807, -1.9196070490691473, -0.8193535127855751, -0.6853651905832031, -0.18272370464882973, -0.33413577684633056, 2.2261342671906106, 1.6853726343573683,\n 0.8563421109235769, 1.0468799885096596, 0.12189082561416206, -1.3596466927672854, -0.7607432068282968, 0.7061728288620306, -0.4384478018639071, 0.8620104661898899, 1.04258758121448, -1.1464159128515612, 0.9617945424413628, 0.04987102831355013, -0.8472878887606543, 0.32986774370339184, 1.278319839581162, -0.4040926804592034, -0.6691567800662129, 0.9415431940597389, 0.3974846022291844, -0.8425204662387112,\n -1.506166868030291, -0.04248497940038203, 0.26434168799067986, -1.5698380163561454, -0.6651727917714935, 1.2400220571204048, -0.1251830593977037, 0.6156254221302833, 0.43585628657139575, -1.6014619037611209, 1.9152323656075512, -0.8847911114213622, 1.359854519784993, -0.5554989575409871, 0.25064804193232354, 0.7976616257678464, 0.37834567410982123, -0.6300374359617635, -1.0613465068052854, -0.866474302027355,\n 1.2458556977164312, 0.577814049080149, 2.069400463823993, 0.9068690176961165, -0.5031387968484738, -0.3640749863516844, -1.041502465417534, 0.6732994659644133, -0.006355018868252906, -0.3650517541386253, 1.0975063446734974, -2.203726812834859, 1.060685913143899, -0.4618706570892267, 0.06475263817517128, -0.19326357638969882, -0.01812119454736379, 0.1337618009668529, 1.1838276997792907, 0.4273677345455913,\n -0.4912341608307858, 0.2349993979417651, 0.9566260826411601, -0.7948243131958422, -0.6168334352331588, 0.3369425926447926, 0.8547756445246633, 0.2666330662219728, 2.431868771129661, 1.0089732701876513, -0.1162341515974066, -1.1746306816795218, -0.08227639025627424, 0.794676385688044, 0.15005011094018297, -0.8763821573601055, -1.0811684990769739, 0.6311588092267179, 0.026124278982220386, 0.8306502001533514,\n 1.0856487813261877, -0.018702855899823106, -0.07338137135247896, -0.8435746484744243, -0.18091216366556986, 0.2295807891528797, -1.0689295774443397, -1.5621175533013612, 1.3314045672598216, 0.6211561903553582, 1.0479302317100871, -1.1509436982013124, 0.447985084931758, 0.19917261474342404, 0.3582887259341301, 0.9953552868908098, 0.8948165434511316, 0.4949033431999123, -0.23004847985703908, 0.6411581535557106,\n -1.1589671573242186, -0.13691519182560624, -0.8849560872785238, 0.6629182075027006, 2.2608150731789696, 2.2823614453180294, -1.2291376923498247, -0.9267975556981378, 0.2597417839242135, -0.7667310491821938, 0.10503294084132372, 2.960320355577672, -1.0645098483081497, -1.2888339889815872, -0.6564570556444346, 0.4742489396354781, 0.8879606773334898, -0.6477585196839569, -0.7309497810668936, 1.7025953934976548,\n 0.1789174966941155, -0.4839093362740933, -0.8917713440107442, 1.4521776747175792, -0.1676974219641624, -0.500672037099228, -0.2947747621553442, 0.929636971325952, -0.7614935150071248, 1.6886298813725842, -0.8136217834373227, 1.2030997228178093, 1.382267485738376, 2.594387458306705, -0.7703668776292266, -0.7642584795112598, 1.3356598324609947, -0.5745269784148925, -2.212092904499444, -1.727975556661197,\n -0.18543087256023608, -0.10167435635752538, 1.3480966068787303, 0.0142803272337873, -0.480077631815393, -0.32270216749876185, -1.7884435311074431, -0.5695640948971382, -0.22859087912027687, -0.08783386938029487, -0.18151955278624396, 0.2031493507095467, 0.06444304447669409, -0.4339138073294572, 0.236563959074551, -0.2937958719187449, 0.1611232843821199, -0.6574871644742827, 1.3141902865107886, 0.6093649138398077,\n 0.056674985715912514, -1.828714441504608, -0.46768482587669535, 0.6489735384886999, 0.5035677725398181, -0.887590772676158, -0.3222316759913631, -0.35172770495027483, -0.4329205472963193, -0.8449916868048998, 0.38282765028957993, 1.3171924061732359, 0.2956667124648384, 0.5390909497681301, -0.7591989862253667, -1.1520792974885883, -0.39344757869384944, 0.6192677330177175, -0.05578834574542242, 0.593015990282657,\n 0.9374465229256678, 0.647772562443425, 1.1071167572595217, -1.3015016617832518, 1.267300472456379, -0.5807673178649629, 0.9343468385348384, -0.28554893036513673, 0.4487573993840033, 0.6749018890520516, -1.20482985206765, 0.17291806504654686, -0.4124576407610529, -0.9203236505429044, -0.7461342369802754, -0.19694162321688435, 0.46556512963300906, 0.5198366004764268, -1.7222561645076129, -0.7078891617994071,\n -1.1653209054214695, 1.5560964971092122, 0.3335520152642012, 0.008390825910327906, 0.11336719644324977, 0.3158913817073965, 0.4704483453862008, -0.5700583482495889, -1.276634964816531, -1.7880560933777756, -0.26514994709973827, 0.6194447367446946, -0.654762456435761, 1.0621929196158544, 0.4454719444987052, -0.9323145612076791, 1.3197357985874438, -0.8792938558447049, -0.2470423905508279, 0.5128954444799875,\n -0.09202044992462606, -1.3082892596744382, -0.34428948138804927, 0.012422196356164879, 1.4626152292162142, 0.34678216997159833, 0.409462409138861, 0.32838364873801185, 1.8776849459782967, 1.6816627852133539, -0.24894138693568296, 0.7150105850753732, 0.22929306929129853, -0.21434910504054566, 1.3339497173912471, -1.2497042452057836, -0.04487255356399775, -0.6486304639082145, -0.8048044333264733, -1.8090170501469942,\n 1.481689285694336, -1.4772553200884717, -0.36792462539303805, -1.103508260812736, -0.2135236993720317, 0.40889179796540165, 1.993585196733386, 0.43879096427562897, -0.44512875171982147, -1.1780830020629518, -1.666001035275436, -0.2977294957665528, 1.7299614542270356, 0.9882265798853356, 2.2412430815464597, 0.5801434875813244, -0.739190619909163, -1.2663490594895201, 0.5735521649879137, 1.2105709455012765,\n 1.9112159951415644, -2.259218931706201, -0.563310876529377, -2.4119185903750493, 0.9662624485722368, -0.22788851242764951, 0.9198283887420099, 0.7855927065251492, -0.7459868094792474, 0.10543289218409971, 0.6401750224618271, -0.0077375118689326705, -0.11647036625911977, -0.4722391874001602, -0.2718425102733572, -0.8796746964457087, 0.6112903638894259, 0.5347851929096421, -0.4749419210717794, 1.0633720764557604,\n -0.2590556665572949, 2.590182301241823, 1.4524061372706638, -0.8503733047335056, 0.5609357391481067, -1.5661825434426477, 0.8019667474525984, 1.2716795425969496, 0.20011166646917924, -0.7105405282282679, -0.5593129072748189, -1.2401371010520867, -0.7002520937780202, -2.236596391787529, -1.8130090502823886, -0.23990633860801777, 1.7428780878151378, 1.4661206538178901, -0.8678567353744017, 0.2957423562639015,\n 0.13935419069962593, 1.399598845123674, 0.059729544605779575, -0.9607778026198247, 0.18474907798482051, 1.0117193651915666, -0.9173540069396245, 0.8934765521365161, -0.665655291396948, -0.32955768273493324, 0.3062873812209283, 0.177342106982554, 0.3595522704599547, -1.5964209653110262, 0.6705899137346863, -1.1034642863469553, -1.0029562484065524, 0.10622956543479244, 0.4261871936541378, 0.7777501694354336,\n -0.806235923997437, -0.8272801398172428, -1.2783440745845536, 0.5982979227669168, -0.28214494859284556, 1.101560367699546, -0.14008021262664466, -0.38717961692054237, 0.9962925044431369, -0.7391490127960976, -0.06294945881724459, 0.7283671247384875, -0.8458895297768138, 0.22808829204347086, 0.43685668023014523, 0.9204095286935638, -0.028241645704951284, 0.15951784765135396, 0.8068984900818966, -0.34387965576978663,\n 0.573828962760762, -0.13374515460012618, -0.5552788325377814, 0.5644705833909952, -0.7500532220469983, 0.33436674493862256, -0.8595435026628129, -0.38943898244735853, 0.6401502590131951, -1.2968645995363652, 0.5861622311675501, 0.2311759458689689, 0.10962292708600496, -0.26025023584932205, -0.5398478003611565, -1.0514168636922954, 1.2689172189127857, 1.7029909647408918, -0.02325431623491577, -0.3064675950620902,\n -1.5816446841009473, 0.6874254059433739, 0.7755967316475798, 1.4119333324396597, 0.14198739135512406, 0.2927714469848192, -0.7239793888399496, 0.3506448783535265, -0.7568480706640158, -1.2158508387501554, 0.22197589131086445, -0.5621415304506887, -1.2381112050191665, -1.917208333033256, -0.3321665793941188, -0.5916951886991071, -1.244826507645294, -0.29767661008214463, 0.8590635852032509, -1.8579290298421591,\n -1.0470546224962876, -2.540080936704841, 0.5458326769958273, 0.042222128206941614, 0.6080450228346708, 0.6542717901662132, -1.7292955132690793, -0.4793123354077725, 0.7341767020417185, -1.3322222208234826, -0.5076389542432337, 0.684399163420284, 0.3948487980667425, -1.7919279627150193, 1.582925890933478, 0.8341846456063038, 0.11776890377042544, 1.7471239793853526, 1.2269451783893597, 0.4235463733287474,\n 1.5908284320029056, -1.635191535538596, 0.04419903330064594, -1.264385360373252, 0.5370192519783876, 1.2368603501240771, -0.9241079150337286, -0.3428051342915208, 0.0882286441353256, -2.210824604513402, -1.9000343283757128, 0.4633735273417207, -0.32534396967175094, 0.026187836765356437, 0.18253601230609245, 0.8519745761039671, -0.028225375482784816, -0.5114197447067229, -1.2428743809444227, 0.2879711400745508,\n 1.2857130031108321, 0.5296743558975853, -0.8440551904275335, -1.3776032491368861, 1.8164028526343798, -1.1422045767986222, -1.8675179752970443, 0.6969635320800454, 0.9444010906414336, -1.28197913481747, -0.06259132322304235, -0.4518754825442558, 0.9183188639099813, -0.2916931407869574, -1.1464007469977915, -0.4475136941593681, 0.44385573868752803, 2.1606711638680762, -1.4813603018181851, -0.5647618024870872,\n -1.474746204557383, -2.9067748098220485, 0.06132111635940877, -0.09663310829361334, -1.087053744976143, -1.774855117659402, 0.8130120568830074, -0.5179279676199186, -0.32549430825787784, -1.1995838271705979, 0.8587480835176114, -0.02095126282663596, 0.6677898019388228, -1.1891003375304232, -2.1125937754631305, -0.047765192715672734, 0.09812525010300294, -1.034992359189106, 1.0213451864081846, 1.0788796513160641,\n -1.444469239557739, 0.28341828947950637, -2.4556013891966737, 1.7126080715698266, -0.5943068899412715, 1.0897594994215383, -0.16345461884651272, 0.7027032523865234, 2.2851158088542562, 0.5038100496225458, -0.16724173993999966, -0.6747457076421414, 0.42254684460738184, 1.277203836895222, -0.34438446183574595, 0.38956738377878264, -0.26884968654334923, -0.02148772950361766, 0.02044885235644607, -1.3873669828232345,\n 0.19995968746809226, -1.5826859815811556, -0.20385119370067947, 0.5724329589281247, -1.330307658319185, 0.7756101314358208, -0.4989071461473931, 0.5388161769427321, -0.9811085284266614, 2.335331094403556, -0.5588657325211347, -1.2850853695283377, 0.40092993245913744, -1.9675685522110529, 0.9378938542456674, -0.18645815013912917, -0.6828273180353106, -1.840122530632185, -1.2581798109361761, 0.2867275394896832,\n ],\n }\n # fmt: on\n return data\n\n\[email protected]\ndef numeric_high_card_dataset(test_backend, numeric_high_card_dict):\n schemas = {\n \"pandas\": {\n \"norm_0_1\": \"float64\",\n },\n \"postgresql\": {\n # \"norm_0_1\": \"DOUBLE_PRECISION\",\n \"norm_0_1\": \"NUMERIC\",\n },\n \"sqlite\": {\n \"norm_0_1\": \"FLOAT\",\n },\n \"mysql\": {\n \"norm_0_1\": \"DOUBLE\",\n },\n \"mssql\": {\n \"norm_0_1\": \"FLOAT\",\n },\n \"spark\": {\n \"norm_0_1\": \"FloatType\",\n },\n }\n return get_dataset(test_backend, numeric_high_card_dict, schemas=schemas)\n\n\[email protected]\ndef non_numeric_high_card_dataset(test_backend):\n \"\"\"Provide dataset fixtures that have special values and/or are otherwise useful outside\n the standard json testing framework\"\"\"\n\n # fmt: off\n data = {\n \"highcardnonnum\": [\n \"CZVYSnQhHhoti8mQ66XbDuIjE5FMeIHb\", \"cPWAg2MJjh8fkRRU1B9aD8vWq3P8KTxJ\", \"4tehKwWiCDpuOmTPRYYqTqM7TvEa8Zi7\", \"ZvlAnCGiGfkKgQoNrhnnyrjmU7sLsUZz\", \"AaqMhdYukVdexTk6LlWvzXYXTp5upPuf\", \"ZSKmXUB35K14khHGyjYtuCHuI8yeM7yR\", \"F1cwKp4HsCN2s2kXQGR5RUa3WAcibCq2\", \"coaX8bSHoVZ8FP8SuQ57SFbrvpRHcibq\", \"3IzmbSJF525qtn7O4AvfKONnz7eFgnyU\", \"gLCtw7435gaR532PNFVCtvk14lNJpZXv\",\n \"hNyjMYZkVlOKRjhg8cKymU5Bvnh0MK5R\", \"IqKC2auGTNehP8y24HzDQOdt9oysgFyx\", \"TePy034aBKlNeAmcJmKJ4p1yF7EUYEOg\", \"cIfDv6ieTAobe84P84InzDKrJrccmqbq\", \"m1979gfI6lVF9ijJA245bchYFd1EaMap\", \"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\", \"7wcR161jyKYhFLEZkhFqSXLwXW46I5x8\", \"IpmNsUFgbbVnL0ljJZOBHnTV0FKARwSn\", \"hsA4btHJg6Gq1jwOuOc3pl2UPB5QUwZg\", \"vwZyG0jGUys3HQdUiOocIbzhUdUugwKX\",\n \"rTc9h94WjOXN5Wg40DyatFEFfp9mgWj6\", \"p1f20s14ZJGUTIBUNeBmJEkWKlwoyqjA\", \"VzgAIYNKHA0APN0oZtzMAfmbCzJenswy\", \"IO7BqR3iS136YMlLCEo6W3jKNOVJIlLG\", \"eTEyhiRuyEcTnHThi1W6yi1mxUjq8TEp\", \"4OHPKQgk3sPPYpKWcEWUtNZ0jv00UuPU\", \"ZJCstyyUvTR2gwSM6FLgkXYDwG54qo8u\", \"nGQsvDAzuL5Yc2XpqoG5P7RhpiTpJp8H\", \"NfX4KfEompMbbKloFq8NQpdXtk5PjaPe\", \"CP22IFHDX1maoSjTEdtBfrMHWQKACGDB\",\n \"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\", \"hGwZQW7ao9HqNV2xAovuMBdyafNDE8q6\", \"OJmDHbqP1wzarsaSwCphsqvdy5SnTQMT\", \"JQbXIcgwUhttfPIGB7VGGfL2KiElabrO\", \"eTTNDggfPpRC22SEVNo9W0BPEWO4Cr57\", \"GW2JuUJmuCebia7RUiCNl2BTjukIzZWj\", \"oVFAvQEKmRTLBqdCuPoJNvzPvQ7UArWC\", \"zeMHFFKLr5j4DIFxRQ7jHWCMClrP3LmJ\", \"eECcArV5TZRftL6ZWaUDO6D2l3HiZj1Y\", \"xLNJXaCkOLrD6E0kgGaFOFwctNXjrd77\",\n \"1f8KOCkOvehXYvN8PKv1Ch6dzOjRAr01\", \"uVF6HJgjVmoipK1sEpVOFJYuv2TXXsOG\", \"agIk8H2nFa0K27IFr0VM2RNp6saihYI3\", \"cAUnysbb8SBLSTr0H7cA1fmnpaL80e0N\", \"fM1IzD5USx4lMYi6bqPCEZjd2aP7G9vv\", \"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\", \"i65d8jqET5FsVw9t5BwAvBjkEJI6eUMj\", \"HbT1b7DQL7n7ZEt2FsKHIggycT1XIYd8\", \"938eC0iGMSqZNlqgDNG9YVE7t4izO2Ev\", \"PyZetp4izgE4ymPcUXyImF5mm7I6zbta\",\n \"FaXA6YSUrvSnW7quAimLqQMNrU1Dxyjs\", \"PisVMvI9RsqQw21B7qYcKkRo5c8C2AKd\", \"eSQIxFqyYVf55UMzMEZrotPO74i3Sh03\", \"2b74DhJ6YFHrAkrjK4tvvKkYUKll44bR\", \"3svDRnrELyAsC69Phpnl2Os89856tFBJ\", \"ZcSGN9YYNHnHjUp0SktWoZI7JDmvRTTN\", \"m9eDkZ5oZEOFP3HUfaZEirecv2UhQ1B1\", \"wZTwJmMX5Q58DhDdmScdigTSyUUC04sO\", \"oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ\", \"KAuFgcmRKQPIIqGMAQQPfjyC1VXt40vs\",\n \"0S4iueoqKNjvS55O57BdY3DbfwhIDwKc\", \"ywbQfOLkvXEUzZISZp1cpwCenrrNPjfF\", \"Mayxk8JkV3Z6aROtnsKyqwVK5exiJa8i\", \"pXqIRP5fQzbDtj1xFqgJey6dyFOJ1YiU\", \"6Ba6RSM56x4MIaJ2wChQ3trBVOw1SWGM\", \"puqzOpRJyNVAwH2vLjVCL3uuggxO5aoB\", \"jOI4E43wA3lYBWbV0nMxqix885Tye1Pf\", \"YgTTYpRDrxU1dMKZeVHYzYNovH2mWGB7\", \"24yYfUg1ATvfI1PW79ytsEqHWJHI69wQ\", \"mS2AVcLFp6i36sX7yAUrdfM0g0RB2X4D\",\n \"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\", \"ItvI4l02oAIZEd5cPtDf4OnyBazji0PL\", \"DW4oLNP49MNNENFoFf7jDTI04xdvCiWg\", \"vrOZrkAS9MCGOqzhCv4cmr5AGddVBShU\", \"NhTsracusfp5V6zVeWqLZnychDl7jjO4\", \"R74JT4EEhh3Xeu5tbx8bZFkXZRhx6HUn\", \"bd9yxS6b1QrKXuT4irY4kpjSyLmKZmx6\", \"UMdFQNSiJZtLK3jxBETZrINDKcRqRd0c\", \"He7xIY2BMNZ7vSO47KfKoYskVJeeedI7\", \"G8PqO0ADoKfDPsMT1K0uOrYf1AtwlTSR\",\n \"hqfmEBNCA7qgntcQVqB7beBt0hB7eaxF\", \"mlYdlfei13P6JrT7ZbSZdsudhE24aPYr\", \"gUTUoH9LycaItbwLZkK9qf0xbRDgOMN4\", \"xw3AuIPyHYq59Qbo5QkQnECSqd2UCvLo\", \"kbfzRyRqGZ9WvmTdYKDjyds6EK4fYCyx\", \"7AOZ3o2egl6aU1zOrS8CVwXYZMI8NTPg\", \"Wkh43H7t95kRb9oOMjTSqC7163SrI4rU\", \"x586wCHsLsOaXl3F9cYeaROwdFc2pbU1\", \"oOd7GdoPn4qqfAeFj2Z3ddyFdmkuPznh\", \"suns0vGgaMzasYpwDEEof2Ktovy0o4os\",\n \"of6W1csCTCBMBXli4a6cEmGZ9EFIOFRC\", \"mmTiWVje9SotwPgmRxrGrNeI9DssAaCj\", \"pIX0vhOzql5c6Z6NpLbzc8MvYiONyT54\", \"nvyCo3MkIK4tS6rkuL4Yw1RgGKwhm4c2\", \"prQGAOvQbB8fQIrp8xaLXmGwcxDcCnqt\", \"ajcLVizD2vwZlmmGKyXYki03SWn7fnt3\", \"mty9rQJBeTsBQ7ra8vWRbBaWulzhWRSG\", \"JL38Vw7yERPC4gBplBaixlbpDg8V7gC6\", \"MylTvGl5L1tzosEcgGCQPjIRN6bCUwtI\", \"hmr0LNyYObqe5sURs408IhRb50Lnek5K\",\n \"CZVYSnQhHhoti8mQ66XbDuIjE5FMeIHb\", \"cPWAg2MJjh8fkRRU1B9aD8vWq3P8KTxJ\", \"4tehKwWiCDpuOmTPRYYqTqM7TvEa8Zi7\", \"ZvlAnCGiGfkKgQoNrhnnyrjmU7sLsUZz\", \"AaqMhdYukVdexTk6LlWvzXYXTp5upPuf\", \"ZSKmXUB35K14khHGyjYtuCHuI8yeM7yR\", \"F1cwKp4HsCN2s2kXQGR5RUa3WAcibCq2\", \"coaX8bSHoVZ8FP8SuQ57SFbrvpRHcibq\", \"3IzmbSJF525qtn7O4AvfKONnz7eFgnyU\", \"gLCtw7435gaR532PNFVCtvk14lNJpZXv\",\n \"hNyjMYZkVlOKRjhg8cKymU5Bvnh0MK5R\", \"IqKC2auGTNehP8y24HzDQOdt9oysgFyx\", \"TePy034aBKlNeAmcJmKJ4p1yF7EUYEOg\", \"cIfDv6ieTAobe84P84InzDKrJrccmqbq\", \"m1979gfI6lVF9ijJA245bchYFd1EaMap\", \"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\", \"7wcR161jyKYhFLEZkhFqSXLwXW46I5x8\", \"IpmNsUFgbbVnL0ljJZOBHnTV0FKARwSn\", \"hsA4btHJg6Gq1jwOuOc3pl2UPB5QUwZg\", \"vwZyG0jGUys3HQdUiOocIbzhUdUugwKX\",\n \"rTc9h94WjOXN5Wg40DyatFEFfp9mgWj6\", \"p1f20s14ZJGUTIBUNeBmJEkWKlwoyqjA\", \"VzgAIYNKHA0APN0oZtzMAfmbCzJenswy\", \"IO7BqR3iS136YMlLCEo6W3jKNOVJIlLG\", \"eTEyhiRuyEcTnHThi1W6yi1mxUjq8TEp\", \"4OHPKQgk3sPPYpKWcEWUtNZ0jv00UuPU\", \"ZJCstyyUvTR2gwSM6FLgkXYDwG54qo8u\", \"nGQsvDAzuL5Yc2XpqoG5P7RhpiTpJp8H\", \"NfX4KfEompMbbKloFq8NQpdXtk5PjaPe\", \"CP22IFHDX1maoSjTEdtBfrMHWQKACGDB\",\n \"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\", \"hGwZQW7ao9HqNV2xAovuMBdyafNDE8q6\", \"OJmDHbqP1wzarsaSwCphsqvdy5SnTQMT\", \"JQbXIcgwUhttfPIGB7VGGfL2KiElabrO\", \"eTTNDggfPpRC22SEVNo9W0BPEWO4Cr57\", \"GW2JuUJmuCebia7RUiCNl2BTjukIzZWj\", \"oVFAvQEKmRTLBqdCuPoJNvzPvQ7UArWC\", \"zeMHFFKLr5j4DIFxRQ7jHWCMClrP3LmJ\", \"eECcArV5TZRftL6ZWaUDO6D2l3HiZj1Y\", \"xLNJXaCkOLrD6E0kgGaFOFwctNXjrd77\",\n \"1f8KOCkOvehXYvN8PKv1Ch6dzOjRAr01\", \"uVF6HJgjVmoipK1sEpVOFJYuv2TXXsOG\", \"agIk8H2nFa0K27IFr0VM2RNp6saihYI3\", \"cAUnysbb8SBLSTr0H7cA1fmnpaL80e0N\", \"fM1IzD5USx4lMYi6bqPCEZjd2aP7G9vv\", \"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\", \"i65d8jqET5FsVw9t5BwAvBjkEJI6eUMj\", \"HbT1b7DQL7n7ZEt2FsKHIggycT1XIYd8\", \"938eC0iGMSqZNlqgDNG9YVE7t4izO2Ev\", \"PyZetp4izgE4ymPcUXyImF5mm7I6zbta\",\n \"FaXA6YSUrvSnW7quAimLqQMNrU1Dxyjs\", \"PisVMvI9RsqQw21B7qYcKkRo5c8C2AKd\", \"eSQIxFqyYVf55UMzMEZrotPO74i3Sh03\", \"2b74DhJ6YFHrAkrjK4tvvKkYUKll44bR\", \"3svDRnrELyAsC69Phpnl2Os89856tFBJ\", \"ZcSGN9YYNHnHjUp0SktWoZI7JDmvRTTN\", \"m9eDkZ5oZEOFP3HUfaZEirecv2UhQ1B1\", \"wZTwJmMX5Q58DhDdmScdigTSyUUC04sO\", \"oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ\", \"KAuFgcmRKQPIIqGMAQQPfjyC1VXt40vs\",\n \"0S4iueoqKNjvS55O57BdY3DbfwhIDwKc\", \"ywbQfOLkvXEUzZISZp1cpwCenrrNPjfF\", \"Mayxk8JkV3Z6aROtnsKyqwVK5exiJa8i\", \"pXqIRP5fQzbDtj1xFqgJey6dyFOJ1YiU\", \"6Ba6RSM56x4MIaJ2wChQ3trBVOw1SWGM\", \"puqzOpRJyNVAwH2vLjVCL3uuggxO5aoB\", \"jOI4E43wA3lYBWbV0nMxqix885Tye1Pf\", \"YgTTYpRDrxU1dMKZeVHYzYNovH2mWGB7\", \"24yYfUg1ATvfI1PW79ytsEqHWJHI69wQ\", \"mS2AVcLFp6i36sX7yAUrdfM0g0RB2X4D\",\n \"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\", \"ItvI4l02oAIZEd5cPtDf4OnyBazji0PL\", \"DW4oLNP49MNNENFoFf7jDTI04xdvCiWg\", \"vrOZrkAS9MCGOqzhCv4cmr5AGddVBShU\", \"NhTsracusfp5V6zVeWqLZnychDl7jjO4\", \"R74JT4EEhh3Xeu5tbx8bZFkXZRhx6HUn\", \"bd9yxS6b1QrKXuT4irY4kpjSyLmKZmx6\", \"UMdFQNSiJZtLK3jxBETZrINDKcRqRd0c\", \"He7xIY2BMNZ7vSO47KfKoYskVJeeedI7\", \"G8PqO0ADoKfDPsMT1K0uOrYf1AtwlTSR\",\n \"hqfmEBNCA7qgntcQVqB7beBt0hB7eaxF\", \"mlYdlfei13P6JrT7ZbSZdsudhE24aPYr\", \"gUTUoH9LycaItbwLZkK9qf0xbRDgOMN4\", \"xw3AuIPyHYq59Qbo5QkQnECSqd2UCvLo\", \"kbfzRyRqGZ9WvmTdYKDjyds6EK4fYCyx\", \"7AOZ3o2egl6aU1zOrS8CVwXYZMI8NTPg\", \"Wkh43H7t95kRb9oOMjTSqC7163SrI4rU\", \"x586wCHsLsOaXl3F9cYeaROwdFc2pbU1\", \"oOd7GdoPn4qqfAeFj2Z3ddyFdmkuPznh\", \"suns0vGgaMzasYpwDEEof2Ktovy0o4os\",\n \"of6W1csCTCBMBXli4a6cEmGZ9EFIOFRC\", \"mmTiWVje9SotwPgmRxrGrNeI9DssAaCj\", \"pIX0vhOzql5c6Z6NpLbzc8MvYiONyT54\", \"nvyCo3MkIK4tS6rkuL4Yw1RgGKwhm4c2\", \"prQGAOvQbB8fQIrp8xaLXmGwcxDcCnqt\", \"ajcLVizD2vwZlmmGKyXYki03SWn7fnt3\", \"mty9rQJBeTsBQ7ra8vWRbBaWulzhWRSG\", \"JL38Vw7yERPC4gBplBaixlbpDg8V7gC6\", \"MylTvGl5L1tzosEcgGCQPjIRN6bCUwtI\", \"hmr0LNyYObqe5sURs408IhRb50Lnek5K\",\n ],\n # Built from highcardnonnum using the following:\n # vals = pd.Series(data[\"highcardnonnum\"])\n # sample_vals = vals.sample(n=10, random_state=42)\n # weights = np.random.RandomState(42).rand(10)\n # weights = weights / np.sum(weights)\n # new_vals = sample_vals.sample(n=200, weights=weights, replace=True, random_state=11)\n \"medcardnonnum\": [\n \"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\", \"ajcLVizD2vwZlmmGKyXYki03SWn7fnt3\", \"oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ\", \"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\", \"oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ\", \"oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ\", \"ajcLVizD2vwZlmmGKyXYki03SWn7fnt3\", \"oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ\", \"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\", \"NhTsracusfp5V6zVeWqLZnychDl7jjO4\",\n \"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\", \"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\", \"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\", \"NhTsracusfp5V6zVeWqLZnychDl7jjO4\", \"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\", \"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\", \"ajcLVizD2vwZlmmGKyXYki03SWn7fnt3\", \"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\", \"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\", \"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\",\n \"NhTsracusfp5V6zVeWqLZnychDl7jjO4\", \"NhTsracusfp5V6zVeWqLZnychDl7jjO4\", \"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\", \"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\", \"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\", \"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\", \"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\", \"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\", \"ajcLVizD2vwZlmmGKyXYki03SWn7fnt3\", \"oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ\",\n \"oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ\", \"NhTsracusfp5V6zVeWqLZnychDl7jjO4\", \"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\", \"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\", \"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\", \"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\", \"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\", \"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\", \"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\", \"NhTsracusfp5V6zVeWqLZnychDl7jjO4\",\n \"ajcLVizD2vwZlmmGKyXYki03SWn7fnt3\", \"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\", \"ajcLVizD2vwZlmmGKyXYki03SWn7fnt3\", \"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\", \"ajcLVizD2vwZlmmGKyXYki03SWn7fnt3\", \"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\", \"NhTsracusfp5V6zVeWqLZnychDl7jjO4\", \"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\", \"NhTsracusfp5V6zVeWqLZnychDl7jjO4\", \"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\",\n \"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\", \"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\", \"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\", \"oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ\", \"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\", \"ajcLVizD2vwZlmmGKyXYki03SWn7fnt3\", \"ajcLVizD2vwZlmmGKyXYki03SWn7fnt3\", \"NhTsracusfp5V6zVeWqLZnychDl7jjO4\", \"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\", \"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\",\n \"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\", \"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\", \"oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ\", \"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\", \"NhTsracusfp5V6zVeWqLZnychDl7jjO4\", \"NhTsracusfp5V6zVeWqLZnychDl7jjO4\", \"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\", \"NhTsracusfp5V6zVeWqLZnychDl7jjO4\", \"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\", \"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\",\n \"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\", \"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\", \"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\", \"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\", \"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\", \"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\", \"oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ\", \"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\", \"ajcLVizD2vwZlmmGKyXYki03SWn7fnt3\", \"NhTsracusfp5V6zVeWqLZnychDl7jjO4\",\n \"oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ\", \"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\", \"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\", \"oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ\", \"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\", \"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\", \"NhTsracusfp5V6zVeWqLZnychDl7jjO4\", \"NhTsracusfp5V6zVeWqLZnychDl7jjO4\", \"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\", \"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\",\n \"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\", \"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\", \"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\", \"NhTsracusfp5V6zVeWqLZnychDl7jjO4\", \"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\", \"oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ\", \"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\", \"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\", \"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\", \"mS2AVcLFp6i36sX7yAUrdfM0g0RB2X4D\",\n \"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\", \"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\", \"NhTsracusfp5V6zVeWqLZnychDl7jjO4\", \"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\", \"oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ\", \"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\", \"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\", \"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\", \"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\", \"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\",\n \"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\", \"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\", \"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\", \"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\", \"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\", \"NhTsracusfp5V6zVeWqLZnychDl7jjO4\", \"NfX4KfEompMbbKloFq8NQpdXtk5PjaPe\", \"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\", \"oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ\", \"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\",\n \"NfX4KfEompMbbKloFq8NQpdXtk5PjaPe\", \"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\", \"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\", \"oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ\", \"oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ\", \"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\", \"oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ\", \"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\", \"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\", \"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\",\n \"NhTsracusfp5V6zVeWqLZnychDl7jjO4\", \"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\", \"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\", \"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\", \"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\", \"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\", \"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\", \"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\", \"oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ\", \"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\",\n \"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\", \"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\", \"NfX4KfEompMbbKloFq8NQpdXtk5PjaPe\", \"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\", \"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\", \"NfX4KfEompMbbKloFq8NQpdXtk5PjaPe\", \"oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ\", \"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\", \"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\", \"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\",\n \"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\", \"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\", \"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\", \"ajcLVizD2vwZlmmGKyXYki03SWn7fnt3\", \"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\", \"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\", \"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\", \"ajcLVizD2vwZlmmGKyXYki03SWn7fnt3\", \"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\", \"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\",\n \"ajcLVizD2vwZlmmGKyXYki03SWn7fnt3\", \"NhTsracusfp5V6zVeWqLZnychDl7jjO4\", \"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\", \"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\", \"ajcLVizD2vwZlmmGKyXYki03SWn7fnt3\", \"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\", \"NhTsracusfp5V6zVeWqLZnychDl7jjO4\", \"oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ\", \"oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ\", \"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\",\n \"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\", \"ajcLVizD2vwZlmmGKyXYki03SWn7fnt3\", \"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\", \"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\", \"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\", \"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\", \"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\", \"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\", \"ajcLVizD2vwZlmmGKyXYki03SWn7fnt3\", \"NhTsracusfp5V6zVeWqLZnychDl7jjO4\",\n \"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\", \"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\", \"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\", \"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\", \"NhTsracusfp5V6zVeWqLZnychDl7jjO4\", \"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\", \"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\", \"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\", \"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\", \"NhTsracusfp5V6zVeWqLZnychDl7jjO4\",\n \"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\", \"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\", \"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk\", \"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP\", \"NhTsracusfp5V6zVeWqLZnychDl7jjO4\", \"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J\", \"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\", \"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer\", \"ajcLVizD2vwZlmmGKyXYki03SWn7fnt3\", \"oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ\",\n ],\n }\n # fmt: on\n schemas = {\n \"pandas\": {\n \"highcardnonnum\": \"str\",\n \"medcardnonnum\": \"str\",\n },\n \"postgresql\": {\n \"highcardnonnum\": \"TEXT\",\n \"medcardnonnum\": \"TEXT\",\n },\n \"sqlite\": {\n \"highcardnonnum\": \"VARCHAR\",\n \"medcardnonnum\": \"VARCHAR\",\n },\n \"mysql\": {\n \"highcardnonnum\": \"TEXT\",\n \"medcardnonnum\": \"TEXT\",\n },\n \"mssql\": {\n \"highcardnonnum\": \"VARCHAR\",\n \"medcardnonnum\": \"VARCHAR\",\n },\n \"spark\": {\n \"highcardnonnum\": \"StringType\",\n \"medcardnonnum\": \"StringType\",\n },\n }\n return get_dataset(test_backend, data, schemas=schemas)\n\n\ndef dataset_sample_data(test_backend):\n # No infinities for mysql\n if test_backend == \"mysql\":\n data = {\n # \"infinities\": [-np.inf, -10, -np.pi, 0, np.pi, 10/2.2, np.inf],\n \"nulls\": [np.nan, None, 0, 1.1, 2.2, 3.3, None],\n \"naturals\": [1, 2, 3, 4, 5, 6, 7],\n }\n else:\n data = {\n \"infinities\": [-np.inf, -10, -np.pi, 0, np.pi, 10 / 2.2, np.inf],\n \"nulls\": [np.nan, None, 0, 1.1, 2.2, 3.3, None],\n \"naturals\": [1, 2, 3, 4, 5, 6, 7],\n }\n schemas = {\n \"pandas\": {\"infinities\": \"float64\", \"nulls\": \"float64\", \"naturals\": \"float64\"},\n \"postgresql\": {\n \"infinities\": \"DOUBLE_PRECISION\",\n \"nulls\": \"DOUBLE_PRECISION\",\n \"naturals\": \"NUMERIC\",\n },\n \"sqlite\": {\"infinities\": \"FLOAT\", \"nulls\": \"FLOAT\", \"naturals\": \"FLOAT\"},\n \"mysql\": {\"nulls\": \"DOUBLE\", \"naturals\": \"DOUBLE\"},\n \"mssql\": {\"infinities\": \"FLOAT\", \"nulls\": \"FLOAT\", \"naturals\": \"FLOAT\"},\n \"spark\": {\n \"infinities\": \"FloatType\",\n \"nulls\": \"FloatType\",\n \"naturals\": \"FloatType\",\n },\n }\n return data, schemas\n\n\[email protected]\ndef dataset(test_backend):\n \"\"\"Provide dataset fixtures that have special values and/or are otherwise useful outside\n the standard json testing framework\"\"\"\n data, schemas = dataset_sample_data(test_backend)\n return get_dataset(test_backend, data, schemas=schemas)\n\n\[email protected]\ndef pandas_dataset():\n test_backend = \"PandasDataset\"\n data, schemas = dataset_sample_data(test_backend)\n return get_dataset(test_backend, data, schemas=schemas)\n\n\[email protected]\ndef sqlalchemy_dataset(test_backends):\n \"\"\"Provide dataset fixtures that have special values and/or are otherwise useful outside\n the standard json testing framework\"\"\"\n if \"postgresql\" in test_backends:\n backend = \"postgresql\"\n elif \"sqlite\" in test_backends:\n backend = \"sqlite\"\n else:\n return\n\n data = {\n \"infinities\": [-np.inf, -10, -np.pi, 0, np.pi, 10 / 2.2, np.inf],\n \"nulls\": [np.nan, None, 0, 1.1, 2.2, 3.3, None],\n \"naturals\": [1, 2, 3, 4, 5, 6, 7],\n }\n schemas = {\n \"postgresql\": {\n \"infinities\": \"DOUBLE_PRECISION\",\n \"nulls\": \"DOUBLE_PRECISION\",\n \"naturals\": \"DOUBLE_PRECISION\",\n },\n \"sqlite\": {\"infinities\": \"FLOAT\", \"nulls\": \"FLOAT\", \"naturals\": \"FLOAT\"},\n }\n return get_dataset(backend, data, schemas=schemas, profiler=None)\n\n\[email protected]\ndef sqlitedb_engine(test_backend):\n if test_backend == \"sqlite\":\n try:\n import sqlalchemy as sa\n\n return sa.create_engine(\"sqlite://\")\n except ImportError:\n raise ValueError(\"sqlite tests require sqlalchemy to be installed\")\n else:\n pytest.skip(\"Skipping test designed for sqlite on non-sqlite backend.\")\n\n\[email protected]\ndef postgresql_engine(test_backend):\n if test_backend == \"postgresql\":\n try:\n import sqlalchemy as sa\n\n db_hostname = os.getenv(\"GE_TEST_LOCAL_DB_HOSTNAME\", \"localhost\")\n engine = sa.create_engine(\n f\"postgresql://postgres@{db_hostname}/test_ci\"\n ).connect()\n yield engine\n engine.close()\n except ImportError:\n raise ValueError(\"SQL Database tests require sqlalchemy to be installed.\")\n else:\n pytest.skip(\"Skipping test designed for postgresql on non-postgresql backend.\")\n\n\[email protected](scope=\"function\")\ndef empty_data_context(\n tmp_path,\n) -> DataContext:\n project_path = tmp_path / \"empty_data_context\"\n project_path.mkdir()\n project_path = str(project_path)\n context = ge.data_context.DataContext.create(project_path)\n context_path = os.path.join(project_path, \"great_expectations\")\n asset_config_path = os.path.join(context_path, \"expectations\")\n os.makedirs(asset_config_path, exist_ok=True)\n assert context.list_datasources() == []\n return context\n\n\[email protected]\ndef titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled(\n tmp_path_factory,\n monkeypatch,\n):\n # Re-enable GE_USAGE_STATS\n monkeypatch.delenv(\"GE_USAGE_STATS\")\n\n project_path: str = str(tmp_path_factory.mktemp(\"titanic_data_context\"))\n context_path: str = os.path.join(project_path, \"great_expectations\")\n os.makedirs(os.path.join(context_path, \"expectations\"), exist_ok=True)\n data_path: str = os.path.join(context_path, \"..\", \"data\", \"titanic\")\n os.makedirs(os.path.join(data_path), exist_ok=True)\n shutil.copy(\n file_relative_path(\n __file__,\n os.path.join(\n \"test_fixtures\",\n \"great_expectations_v013_no_datasource_stats_enabled.yml\",\n ),\n ),\n str(os.path.join(context_path, \"great_expectations.yml\")),\n )\n shutil.copy(\n file_relative_path(__file__, os.path.join(\"test_sets\", \"Titanic.csv\")),\n str(\n os.path.join(\n context_path, \"..\", \"data\", \"titanic\", \"Titanic_19120414_1313.csv\"\n )\n ),\n )\n shutil.copy(\n file_relative_path(__file__, os.path.join(\"test_sets\", \"Titanic.csv\")),\n str(\n os.path.join(context_path, \"..\", \"data\", \"titanic\", \"Titanic_19120414_1313\")\n ),\n )\n shutil.copy(\n file_relative_path(__file__, os.path.join(\"test_sets\", \"Titanic.csv\")),\n str(os.path.join(context_path, \"..\", \"data\", \"titanic\", \"Titanic_1911.csv\")),\n )\n shutil.copy(\n file_relative_path(__file__, os.path.join(\"test_sets\", \"Titanic.csv\")),\n str(os.path.join(context_path, \"..\", \"data\", \"titanic\", \"Titanic_1912.csv\")),\n )\n\n context: DataContext = DataContext(context_root_dir=context_path)\n assert context.root_directory == context_path\n\n datasource_config: str = f\"\"\"\n class_name: Datasource\n\n execution_engine:\n class_name: PandasExecutionEngine\n\n data_connectors:\n my_basic_data_connector:\n class_name: InferredAssetFilesystemDataConnector\n base_directory: {data_path}\n default_regex:\n pattern: (.*)\\\\.csv\n group_names:\n - data_asset_name\n\n my_special_data_connector:\n class_name: ConfiguredAssetFilesystemDataConnector\n base_directory: {data_path}\n glob_directive: \"*.csv\"\n\n default_regex:\n pattern: (.+)\\\\.csv\n group_names:\n - name\n assets:\n users:\n base_directory: {data_path}\n pattern: (.+)_(\\\\d+)_(\\\\d+)\\\\.csv\n group_names:\n - name\n - timestamp\n - size\n\n my_other_data_connector:\n class_name: ConfiguredAssetFilesystemDataConnector\n base_directory: {data_path}\n glob_directive: \"*.csv\"\n\n default_regex:\n pattern: (.+)\\\\.csv\n group_names:\n - name\n assets:\n users: {{}}\n\n my_runtime_data_connector:\n module_name: great_expectations.datasource.data_connector\n class_name: RuntimeDataConnector\n batch_identifiers:\n - pipeline_stage_name\n - airflow_run_id\n \"\"\"\n\n # noinspection PyUnusedLocal\n datasource: Datasource = context.test_yaml_config(\n name=\"my_datasource\", yaml_config=datasource_config, pretty_print=False\n )\n # noinspection PyProtectedMember\n context._save_project_config()\n\n return context\n\n\[email protected]\ndef titanic_v013_multi_datasource_pandas_data_context_with_checkpoints_v1_with_empty_store_stats_enabled(\n titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled,\n tmp_path_factory,\n monkeypatch,\n):\n context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled\n\n project_dir: str = context.root_directory\n data_path: str = os.path.join(project_dir, \"..\", \"data\", \"titanic\")\n\n datasource_config: str = f\"\"\"\n class_name: Datasource\n\n execution_engine:\n class_name: PandasExecutionEngine\n\n data_connectors:\n my_additional_data_connector:\n class_name: InferredAssetFilesystemDataConnector\n base_directory: {data_path}\n default_regex:\n pattern: (.*)\\\\.csv\n group_names:\n - data_asset_name\n \"\"\"\n\n # noinspection PyUnusedLocal\n datasource: BaseDatasource = context.add_datasource(\n \"my_additional_datasource\", **yaml.load(datasource_config)\n )\n\n return context\n\n\[email protected]\ndef titanic_v013_multi_datasource_multi_execution_engine_data_context_with_checkpoints_v1_with_empty_store_stats_enabled(\n sa,\n spark_session,\n titanic_v013_multi_datasource_pandas_data_context_with_checkpoints_v1_with_empty_store_stats_enabled,\n tmp_path_factory,\n test_backends,\n monkeypatch,\n):\n context: DataContext = titanic_v013_multi_datasource_pandas_data_context_with_checkpoints_v1_with_empty_store_stats_enabled\n\n project_dir: str = context.root_directory\n data_path: str = os.path.join(project_dir, \"..\", \"data\", \"titanic\")\n\n if (\n any(\n [\n dbms in test_backends\n for dbms in [\"postgresql\", \"sqlite\", \"mysql\", \"mssql\"]\n ]\n )\n and (sa is not None)\n and is_library_loadable(library_name=\"sqlalchemy\")\n ):\n db_fixture_file_path: str = file_relative_path(\n __file__,\n os.path.join(\"test_sets\", \"titanic_sql_test_cases.db\"),\n )\n db_file_path: str = os.path.join(\n data_path,\n \"titanic_sql_test_cases.db\",\n )\n shutil.copy(\n db_fixture_file_path,\n db_file_path,\n )\n\n datasource_config: str = f\"\"\"\n class_name: Datasource\n execution_engine:\n class_name: SqlAlchemyExecutionEngine\n connection_string: sqlite:///{db_file_path}\n data_connectors:\n default_runtime_data_connector_name:\n class_name: RuntimeDataConnector\n batch_identifiers:\n - default_identifier_name\n default_inferred_data_connector_name:\n class_name: InferredAssetSqlDataConnector\n name: whole_table\n \"\"\"\n\n # noinspection PyUnusedLocal\n datasource: BaseDatasource = context.add_datasource(\n \"my_sqlite_db_datasource\", **yaml.load(datasource_config)\n )\n\n return context\n\n\[email protected]\ndef deterministic_asset_dataconnector_context(\n tmp_path_factory,\n monkeypatch,\n):\n # Re-enable GE_USAGE_STATS\n monkeypatch.delenv(\"GE_USAGE_STATS\")\n\n project_path = str(tmp_path_factory.mktemp(\"titanic_data_context\"))\n context_path = os.path.join(project_path, \"great_expectations\")\n os.makedirs(os.path.join(context_path, \"expectations\"), exist_ok=True)\n data_path = os.path.join(context_path, \"..\", \"data\", \"titanic\")\n os.makedirs(os.path.join(data_path), exist_ok=True)\n shutil.copy(\n file_relative_path(\n __file__,\n \"./test_fixtures/great_expectations_v013_no_datasource_stats_enabled.yml\",\n ),\n str(os.path.join(context_path, \"great_expectations.yml\")),\n )\n shutil.copy(\n file_relative_path(__file__, \"./test_sets/Titanic.csv\"),\n str(\n os.path.join(\n context_path, \"..\", \"data\", \"titanic\", \"Titanic_19120414_1313.csv\"\n )\n ),\n )\n shutil.copy(\n file_relative_path(__file__, \"./test_sets/Titanic.csv\"),\n str(os.path.join(context_path, \"..\", \"data\", \"titanic\", \"Titanic_1911.csv\")),\n )\n shutil.copy(\n file_relative_path(__file__, \"./test_sets/Titanic.csv\"),\n str(os.path.join(context_path, \"..\", \"data\", \"titanic\", \"Titanic_1912.csv\")),\n )\n context = ge.data_context.DataContext(context_path)\n assert context.root_directory == context_path\n\n datasource_config = f\"\"\"\n class_name: Datasource\n\n execution_engine:\n class_name: PandasExecutionEngine\n\n data_connectors:\n my_other_data_connector:\n class_name: ConfiguredAssetFilesystemDataConnector\n base_directory: {data_path}\n glob_directive: \"*.csv\"\n\n default_regex:\n pattern: (.+)\\\\.csv\n group_names:\n - name\n assets:\n users: {{}}\n \"\"\"\n\n context.test_yaml_config(\n name=\"my_datasource\", yaml_config=datasource_config, pretty_print=False\n )\n # noinspection PyProtectedMember\n context._save_project_config()\n return context\n\n\[email protected]\ndef titanic_pandas_data_context_with_v013_datasource_stats_enabled_with_checkpoints_v1_with_templates(\n titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled,\n):\n context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled\n\n # add simple template config\n simple_checkpoint_template_config: CheckpointConfig = CheckpointConfig(\n name=\"my_simple_template_checkpoint\",\n config_version=1,\n run_name_template=\"%Y-%M-foo-bar-template-$VAR\",\n action_list=[\n {\n \"name\": \"store_validation_result\",\n \"action\": {\n \"class_name\": \"StoreValidationResultAction\",\n },\n },\n {\n \"name\": \"store_evaluation_params\",\n \"action\": {\n \"class_name\": \"StoreEvaluationParametersAction\",\n },\n },\n {\n \"name\": \"update_data_docs\",\n \"action\": {\n \"class_name\": \"UpdateDataDocsAction\",\n },\n },\n ],\n evaluation_parameters={\n \"environment\": \"$GE_ENVIRONMENT\",\n \"tolerance\": 1.0e-2,\n \"aux_param_0\": \"$MY_PARAM\",\n \"aux_param_1\": \"1 + $MY_PARAM\",\n },\n runtime_configuration={\n \"result_format\": {\n \"result_format\": \"BASIC\",\n \"partial_unexpected_count\": 20,\n }\n },\n )\n simple_checkpoint_template_config_key: ConfigurationIdentifier = (\n ConfigurationIdentifier(\n configuration_key=simple_checkpoint_template_config.name\n )\n )\n context.checkpoint_store.set(\n key=simple_checkpoint_template_config_key,\n value=simple_checkpoint_template_config,\n )\n\n # add nested template configs\n nested_checkpoint_template_config_1: CheckpointConfig = CheckpointConfig(\n name=\"my_nested_checkpoint_template_1\",\n config_version=1,\n run_name_template=\"%Y-%M-foo-bar-template-$VAR\",\n expectation_suite_name=\"suite_from_template_1\",\n action_list=[\n {\n \"name\": \"store_validation_result\",\n \"action\": {\n \"class_name\": \"StoreValidationResultAction\",\n },\n },\n {\n \"name\": \"store_evaluation_params\",\n \"action\": {\n \"class_name\": \"StoreEvaluationParametersAction\",\n },\n },\n {\n \"name\": \"update_data_docs\",\n \"action\": {\n \"class_name\": \"UpdateDataDocsAction\",\n },\n },\n ],\n evaluation_parameters={\n \"environment\": \"FOO\",\n \"tolerance\": \"FOOBOO\",\n \"aux_param_0\": \"FOOBARBOO\",\n \"aux_param_1\": \"FOOBARBOO\",\n \"template_1_key\": 456,\n },\n runtime_configuration={\n \"result_format\": \"FOOBARBOO\",\n \"partial_unexpected_count\": \"FOOBARBOO\",\n \"template_1_key\": 123,\n },\n validations=[\n {\n \"batch_request\": {\n \"datasource_name\": \"my_datasource_template_1\",\n \"data_connector_name\": \"my_special_data_connector_template_1\",\n \"data_asset_name\": \"users_from_template_1\",\n \"data_connector_query\": {\"partition_index\": -999},\n }\n }\n ],\n )\n nested_checkpoint_template_config_1_key: ConfigurationIdentifier = (\n ConfigurationIdentifier(\n configuration_key=nested_checkpoint_template_config_1.name\n )\n )\n context.checkpoint_store.set(\n key=nested_checkpoint_template_config_1_key,\n value=nested_checkpoint_template_config_1,\n )\n\n nested_checkpoint_template_config_2: CheckpointConfig = CheckpointConfig(\n name=\"my_nested_checkpoint_template_2\",\n config_version=1,\n template_name=\"my_nested_checkpoint_template_1\",\n run_name_template=\"%Y-%M-foo-bar-template-$VAR-template-2\",\n action_list=[\n {\n \"name\": \"store_validation_result\",\n \"action\": {\n \"class_name\": \"StoreValidationResultAction\",\n },\n },\n {\n \"name\": \"store_evaluation_params\",\n \"action\": {\n \"class_name\": \"MyCustomStoreEvaluationParametersActionTemplate2\",\n },\n },\n {\n \"name\": \"update_data_docs\",\n \"action\": {\n \"class_name\": \"UpdateDataDocsAction\",\n },\n },\n {\n \"name\": \"new_action_from_template_2\",\n \"action\": {\"class_name\": \"Template2SpecialAction\"},\n },\n ],\n evaluation_parameters={\n \"environment\": \"$GE_ENVIRONMENT\",\n \"tolerance\": 1.0e-2,\n \"aux_param_0\": \"$MY_PARAM\",\n \"aux_param_1\": \"1 + $MY_PARAM\",\n },\n runtime_configuration={\n \"result_format\": \"BASIC\",\n \"partial_unexpected_count\": 20,\n },\n )\n nested_checkpoint_template_config_2_key: ConfigurationIdentifier = (\n ConfigurationIdentifier(\n configuration_key=nested_checkpoint_template_config_2.name\n )\n )\n context.checkpoint_store.set(\n key=nested_checkpoint_template_config_2_key,\n value=nested_checkpoint_template_config_2,\n )\n\n nested_checkpoint_template_config_3: CheckpointConfig = CheckpointConfig(\n name=\"my_nested_checkpoint_template_3\",\n config_version=1,\n template_name=\"my_nested_checkpoint_template_2\",\n run_name_template=\"%Y-%M-foo-bar-template-$VAR-template-3\",\n action_list=[\n {\n \"name\": \"store_validation_result\",\n \"action\": {\n \"class_name\": \"StoreValidationResultAction\",\n },\n },\n {\n \"name\": \"store_evaluation_params\",\n \"action\": {\n \"class_name\": \"MyCustomStoreEvaluationParametersActionTemplate3\",\n },\n },\n {\n \"name\": \"update_data_docs\",\n \"action\": {\n \"class_name\": \"UpdateDataDocsAction\",\n },\n },\n {\n \"name\": \"new_action_from_template_3\",\n \"action\": {\"class_name\": \"Template3SpecialAction\"},\n },\n ],\n evaluation_parameters={\n \"environment\": \"$GE_ENVIRONMENT\",\n \"tolerance\": 1.0e-2,\n \"aux_param_0\": \"$MY_PARAM\",\n \"aux_param_1\": \"1 + $MY_PARAM\",\n \"template_3_key\": 123,\n },\n runtime_configuration={\n \"result_format\": \"BASIC\",\n \"partial_unexpected_count\": 20,\n \"template_3_key\": \"bloopy!\",\n },\n )\n nested_checkpoint_template_config_3_key: ConfigurationIdentifier = (\n ConfigurationIdentifier(\n configuration_key=nested_checkpoint_template_config_3.name\n )\n )\n context.checkpoint_store.set(\n key=nested_checkpoint_template_config_3_key,\n value=nested_checkpoint_template_config_3,\n )\n\n # add minimal SimpleCheckpoint\n simple_checkpoint_config: CheckpointConfig = CheckpointConfig(\n name=\"my_minimal_simple_checkpoint\",\n class_name=\"SimpleCheckpoint\",\n config_version=1,\n )\n simple_checkpoint_config_key: ConfigurationIdentifier = ConfigurationIdentifier(\n configuration_key=simple_checkpoint_config.name\n )\n context.checkpoint_store.set(\n key=simple_checkpoint_config_key,\n value=simple_checkpoint_config,\n )\n\n # add SimpleCheckpoint with slack webhook\n simple_checkpoint_with_slack_webhook_config: CheckpointConfig = CheckpointConfig(\n name=\"my_simple_checkpoint_with_slack\",\n class_name=\"SimpleCheckpoint\",\n config_version=1,\n slack_webhook=\"https://hooks.slack.com/foo/bar\",\n )\n simple_checkpoint_with_slack_webhook_config_key: ConfigurationIdentifier = (\n ConfigurationIdentifier(\n configuration_key=simple_checkpoint_with_slack_webhook_config.name\n )\n )\n context.checkpoint_store.set(\n key=simple_checkpoint_with_slack_webhook_config_key,\n value=simple_checkpoint_with_slack_webhook_config,\n )\n\n # add SimpleCheckpoint with slack webhook and notify_with\n simple_checkpoint_with_slack_webhook_and_notify_with_all_config: CheckpointConfig = CheckpointConfig(\n name=\"my_simple_checkpoint_with_slack_and_notify_with_all\",\n class_name=\"SimpleCheckpoint\",\n config_version=1,\n slack_webhook=\"https://hooks.slack.com/foo/bar\",\n notify_with=\"all\",\n )\n simple_checkpoint_with_slack_webhook_and_notify_with_all_config_key: ConfigurationIdentifier = ConfigurationIdentifier(\n configuration_key=simple_checkpoint_with_slack_webhook_and_notify_with_all_config.name\n )\n context.checkpoint_store.set(\n key=simple_checkpoint_with_slack_webhook_and_notify_with_all_config_key,\n value=simple_checkpoint_with_slack_webhook_and_notify_with_all_config,\n )\n\n # add SimpleCheckpoint with site_names\n simple_checkpoint_with_site_names_config: CheckpointConfig = CheckpointConfig(\n name=\"my_simple_checkpoint_with_site_names\",\n class_name=\"SimpleCheckpoint\",\n config_version=1,\n site_names=[\"local_site\"],\n )\n simple_checkpoint_with_site_names_config_key: ConfigurationIdentifier = (\n ConfigurationIdentifier(\n configuration_key=simple_checkpoint_with_site_names_config.name\n )\n )\n context.checkpoint_store.set(\n key=simple_checkpoint_with_site_names_config_key,\n value=simple_checkpoint_with_site_names_config,\n )\n\n # noinspection PyProtectedMember\n context._save_project_config()\n return context\n\n\[email protected]\ndef empty_context_with_checkpoint(empty_data_context):\n context = empty_data_context\n root_dir = empty_data_context.root_directory\n fixture_name = \"my_checkpoint.yml\"\n fixture_path = file_relative_path(\n __file__, f\"./data_context/fixtures/contexts/{fixture_name}\"\n )\n checkpoints_file = os.path.join(root_dir, \"checkpoints\", fixture_name)\n shutil.copy(fixture_path, checkpoints_file)\n assert os.path.isfile(checkpoints_file)\n return context\n\n\[email protected]\ndef empty_data_context_stats_enabled(tmp_path_factory, monkeypatch):\n # Re-enable GE_USAGE_STATS\n monkeypatch.delenv(\"GE_USAGE_STATS\", raising=False)\n project_path = str(tmp_path_factory.mktemp(\"empty_data_context\"))\n context = ge.data_context.DataContext.create(project_path)\n context_path = os.path.join(project_path, \"great_expectations\")\n asset_config_path = os.path.join(context_path, \"expectations\")\n os.makedirs(asset_config_path, exist_ok=True)\n return context\n\n\[email protected]\ndef titanic_data_context(\n tmp_path_factory,\n) -> DataContext:\n project_path = str(tmp_path_factory.mktemp(\"titanic_data_context\"))\n context_path = os.path.join(project_path, \"great_expectations\")\n os.makedirs(os.path.join(context_path, \"expectations\"), exist_ok=True)\n os.makedirs(os.path.join(context_path, \"checkpoints\"), exist_ok=True)\n data_path = os.path.join(context_path, \"..\", \"data\")\n os.makedirs(os.path.join(data_path), exist_ok=True)\n titanic_yml_path = file_relative_path(\n __file__, \"./test_fixtures/great_expectations_v013_titanic.yml\"\n )\n shutil.copy(\n titanic_yml_path, str(os.path.join(context_path, \"great_expectations.yml\"))\n )\n titanic_csv_path = file_relative_path(__file__, \"./test_sets/Titanic.csv\")\n shutil.copy(\n titanic_csv_path, str(os.path.join(context_path, \"..\", \"data\", \"Titanic.csv\"))\n )\n return ge.data_context.DataContext(context_path)\n\n\[email protected]\ndef titanic_data_context_no_data_docs_no_checkpoint_store(tmp_path_factory):\n project_path = str(tmp_path_factory.mktemp(\"titanic_data_context\"))\n context_path = os.path.join(project_path, \"great_expectations\")\n os.makedirs(os.path.join(context_path, \"expectations\"), exist_ok=True)\n os.makedirs(os.path.join(context_path, \"checkpoints\"), exist_ok=True)\n data_path = os.path.join(context_path, \"..\", \"data\")\n os.makedirs(os.path.join(data_path), exist_ok=True)\n titanic_yml_path = file_relative_path(\n __file__, \"./test_fixtures/great_expectations_titanic_pre_v013_no_data_docs.yml\"\n )\n shutil.copy(\n titanic_yml_path, str(os.path.join(context_path, \"great_expectations.yml\"))\n )\n titanic_csv_path = file_relative_path(__file__, \"./test_sets/Titanic.csv\")\n shutil.copy(\n titanic_csv_path, str(os.path.join(context_path, \"..\", \"data\", \"Titanic.csv\"))\n )\n return ge.data_context.DataContext(context_path)\n\n\[email protected]\ndef titanic_data_context_no_data_docs(tmp_path_factory):\n project_path = str(tmp_path_factory.mktemp(\"titanic_data_context\"))\n context_path = os.path.join(project_path, \"great_expectations\")\n os.makedirs(os.path.join(context_path, \"expectations\"), exist_ok=True)\n os.makedirs(os.path.join(context_path, \"checkpoints\"), exist_ok=True)\n data_path = os.path.join(context_path, \"..\", \"data\")\n os.makedirs(os.path.join(data_path), exist_ok=True)\n titanic_yml_path = file_relative_path(\n __file__, \"./test_fixtures/great_expectations_titanic_no_data_docs.yml\"\n )\n shutil.copy(\n titanic_yml_path, str(os.path.join(context_path, \"great_expectations.yml\"))\n )\n titanic_csv_path = file_relative_path(__file__, \"./test_sets/Titanic.csv\")\n shutil.copy(\n titanic_csv_path, str(os.path.join(context_path, \"..\", \"data\", \"Titanic.csv\"))\n )\n return ge.data_context.DataContext(context_path)\n\n\[email protected]\ndef titanic_data_context_stats_enabled(tmp_path_factory, monkeypatch):\n # Re-enable GE_USAGE_STATS\n monkeypatch.delenv(\"GE_USAGE_STATS\")\n project_path = str(tmp_path_factory.mktemp(\"titanic_data_context\"))\n context_path = os.path.join(project_path, \"great_expectations\")\n os.makedirs(os.path.join(context_path, \"expectations\"), exist_ok=True)\n os.makedirs(os.path.join(context_path, \"checkpoints\"), exist_ok=True)\n data_path = os.path.join(context_path, \"..\", \"data\")\n os.makedirs(os.path.join(data_path), exist_ok=True)\n titanic_yml_path = file_relative_path(\n __file__, \"./test_fixtures/great_expectations_v013_titanic.yml\"\n )\n shutil.copy(\n titanic_yml_path, str(os.path.join(context_path, \"great_expectations.yml\"))\n )\n titanic_csv_path = file_relative_path(__file__, \"./test_sets/Titanic.csv\")\n shutil.copy(\n titanic_csv_path, str(os.path.join(context_path, \"..\", \"data\", \"Titanic.csv\"))\n )\n return ge.data_context.DataContext(context_path)\n\n\[email protected]\ndef titanic_data_context_stats_enabled_config_version_2(tmp_path_factory, monkeypatch):\n # Re-enable GE_USAGE_STATS\n monkeypatch.delenv(\"GE_USAGE_STATS\")\n project_path = str(tmp_path_factory.mktemp(\"titanic_data_context\"))\n context_path = os.path.join(project_path, \"great_expectations\")\n os.makedirs(os.path.join(context_path, \"expectations\"), exist_ok=True)\n os.makedirs(os.path.join(context_path, \"checkpoints\"), exist_ok=True)\n data_path = os.path.join(context_path, \"..\", \"data\")\n os.makedirs(os.path.join(data_path), exist_ok=True)\n titanic_yml_path = file_relative_path(\n __file__, \"./test_fixtures/great_expectations_titanic.yml\"\n )\n shutil.copy(\n titanic_yml_path, str(os.path.join(context_path, \"great_expectations.yml\"))\n )\n titanic_csv_path = file_relative_path(__file__, \"./test_sets/Titanic.csv\")\n shutil.copy(\n titanic_csv_path, str(os.path.join(context_path, \"..\", \"data\", \"Titanic.csv\"))\n )\n return ge.data_context.DataContext(context_path)\n\n\[email protected]\ndef titanic_data_context_stats_enabled_config_version_3(tmp_path_factory, monkeypatch):\n # Re-enable GE_USAGE_STATS\n monkeypatch.delenv(\"GE_USAGE_STATS\")\n project_path = str(tmp_path_factory.mktemp(\"titanic_data_context\"))\n context_path = os.path.join(project_path, \"great_expectations\")\n os.makedirs(os.path.join(context_path, \"expectations\"), exist_ok=True)\n os.makedirs(os.path.join(context_path, \"checkpoints\"), exist_ok=True)\n data_path = os.path.join(context_path, \"..\", \"data\")\n os.makedirs(os.path.join(data_path), exist_ok=True)\n titanic_yml_path = file_relative_path(\n __file__, \"./test_fixtures/great_expectations_v013_upgraded_titanic.yml\"\n )\n shutil.copy(\n titanic_yml_path, str(os.path.join(context_path, \"great_expectations.yml\"))\n )\n titanic_csv_path = file_relative_path(__file__, \"./test_sets/Titanic.csv\")\n shutil.copy(\n titanic_csv_path, str(os.path.join(context_path, \"..\", \"data\", \"Titanic.csv\"))\n )\n return ge.data_context.DataContext(context_path)\n\n\[email protected]\ndef titanic_sqlite_db(sa):\n try:\n import sqlalchemy as sa\n from sqlalchemy import create_engine\n\n titanic_db_path = file_relative_path(__file__, \"./test_sets/titanic.db\")\n engine = create_engine(f\"sqlite:///{titanic_db_path}\")\n assert engine.execute(\"select count(*) from titanic\").fetchall()[0] == (1313,)\n return engine\n except ImportError:\n raise ValueError(\"sqlite tests require sqlalchemy to be installed\")\n\n\[email protected]\ndef titanic_sqlite_db_connection_string(sa):\n try:\n import sqlalchemy as sa\n from sqlalchemy import create_engine\n\n titanic_db_path = file_relative_path(__file__, \"./test_sets/titanic.db\")\n engine = create_engine(f\"sqlite:////{titanic_db_path}\")\n assert engine.execute(\"select count(*) from titanic\").fetchall()[0] == (1313,)\n return f\"sqlite:///{titanic_db_path}\"\n except ImportError:\n raise ValueError(\"sqlite tests require sqlalchemy to be installed\")\n\n\[email protected]\ndef titanic_expectation_suite(empty_data_context_stats_enabled):\n data_context: DataContext = empty_data_context_stats_enabled\n return ExpectationSuite(\n expectation_suite_name=\"Titanic.warning\",\n meta={},\n data_asset_type=\"Dataset\",\n expectations=[\n ExpectationConfiguration(\n expectation_type=\"expect_column_to_exist\", kwargs={\"column\": \"PClass\"}\n ),\n ExpectationConfiguration(\n expectation_type=\"expect_column_values_to_not_be_null\",\n kwargs={\"column\": \"Name\"},\n ),\n ExpectationConfiguration(\n expectation_type=\"expect_table_row_count_to_equal\",\n kwargs={\"value\": 1313},\n ),\n ],\n data_context=data_context,\n )\n\n\[email protected]\ndef empty_sqlite_db(sa):\n \"\"\"An empty in-memory sqlite db that always gets run.\"\"\"\n try:\n import sqlalchemy as sa\n from sqlalchemy import create_engine\n\n engine = create_engine(\"sqlite://\")\n assert engine.execute(\"select 1\").fetchall()[0] == (1,)\n return engine\n except ImportError:\n raise ValueError(\"sqlite tests require sqlalchemy to be installed\")\n\n\[email protected]\n@freeze_time(\"09/26/2019 13:42:41\")\ndef site_builder_data_context_with_html_store_titanic_random(\n tmp_path_factory, filesystem_csv_3\n):\n base_dir = str(tmp_path_factory.mktemp(\"project_dir\"))\n project_dir = os.path.join(base_dir, \"project_path\")\n os.mkdir(project_dir)\n\n os.makedirs(os.path.join(project_dir, \"data\"))\n os.makedirs(os.path.join(project_dir, \"data/titanic\"))\n shutil.copy(\n file_relative_path(__file__, \"./test_sets/Titanic.csv\"),\n str(os.path.join(project_dir, \"data\", \"titanic\", \"Titanic.csv\")),\n )\n\n os.makedirs(os.path.join(project_dir, \"data\", \"random\"))\n shutil.copy(\n os.path.join(filesystem_csv_3, \"f1.csv\"),\n str(os.path.join(project_dir, \"data\", \"random\", \"f1.csv\")),\n )\n shutil.copy(\n os.path.join(filesystem_csv_3, \"f2.csv\"),\n str(os.path.join(project_dir, \"data\", \"random\", \"f2.csv\")),\n )\n ge.data_context.DataContext.create(project_dir)\n shutil.copy(\n file_relative_path(\n __file__, \"./test_fixtures/great_expectations_site_builder.yml\"\n ),\n str(os.path.join(project_dir, \"great_expectations\", \"great_expectations.yml\")),\n )\n context = ge.data_context.DataContext(\n context_root_dir=os.path.join(project_dir, \"great_expectations\")\n )\n\n context.add_datasource(\n \"titanic\",\n class_name=\"PandasDatasource\",\n batch_kwargs_generators={\n \"subdir_reader\": {\n \"class_name\": \"SubdirReaderBatchKwargsGenerator\",\n \"base_directory\": os.path.join(project_dir, \"data\", \"titanic\"),\n }\n },\n )\n context.add_datasource(\n \"random\",\n class_name=\"PandasDatasource\",\n batch_kwargs_generators={\n \"subdir_reader\": {\n \"class_name\": \"SubdirReaderBatchKwargsGenerator\",\n \"base_directory\": os.path.join(project_dir, \"data\", \"random\"),\n }\n },\n )\n\n context.profile_datasource(\"titanic\")\n context.profile_datasource(\"random\")\n context.profile_datasource(context.list_datasources()[0][\"name\"])\n\n context._project_config.anonymous_usage_statistics = {\n \"enabled\": True,\n \"data_context_id\": \"f43d4897-385f-4366-82b0-1a8eda2bf79c\",\n }\n\n return context\n\n\[email protected](scope=\"function\")\n@freeze_time(\"09/26/2019 13:42:41\")\ndef site_builder_data_context_v013_with_html_store_titanic_random(\n tmp_path, filesystem_csv_3\n):\n base_dir = tmp_path / \"project_dir\"\n base_dir.mkdir()\n base_dir = str(base_dir)\n project_dir = os.path.join(base_dir, \"project_path\")\n os.mkdir(project_dir)\n\n os.makedirs(os.path.join(project_dir, \"data\"))\n os.makedirs(os.path.join(project_dir, \"data\", \"titanic\"))\n shutil.copy(\n file_relative_path(__file__, \"./test_sets/Titanic.csv\"),\n str(os.path.join(project_dir, \"data\", \"titanic\", \"Titanic.csv\")),\n )\n\n os.makedirs(os.path.join(project_dir, \"data\", \"random\"))\n shutil.copy(\n os.path.join(filesystem_csv_3, \"f1.csv\"),\n str(os.path.join(project_dir, \"data\", \"random\", \"f1.csv\")),\n )\n shutil.copy(\n os.path.join(filesystem_csv_3, \"f2.csv\"),\n str(os.path.join(project_dir, \"data\", \"random\", \"f2.csv\")),\n )\n ge.data_context.DataContext.create(project_dir)\n shutil.copy(\n file_relative_path(\n __file__, \"./test_fixtures/great_expectations_v013_site_builder.yml\"\n ),\n str(os.path.join(project_dir, \"great_expectations\", \"great_expectations.yml\")),\n )\n context = ge.data_context.DataContext(\n context_root_dir=os.path.join(project_dir, \"great_expectations\")\n )\n\n context.add_datasource(\n \"titanic\",\n class_name=\"PandasDatasource\",\n batch_kwargs_generators={\n \"subdir_reader\": {\n \"class_name\": \"SubdirReaderBatchKwargsGenerator\",\n \"base_directory\": os.path.join(project_dir, \"data\", \"titanic\"),\n }\n },\n )\n context.add_datasource(\n \"random\",\n class_name=\"PandasDatasource\",\n batch_kwargs_generators={\n \"subdir_reader\": {\n \"class_name\": \"SubdirReaderBatchKwargsGenerator\",\n \"base_directory\": os.path.join(project_dir, \"data\", \"random\"),\n }\n },\n )\n\n context.profile_datasource(\"titanic\")\n context.profile_datasource(\"random\")\n context.profile_datasource(context.list_datasources()[0][\"name\"])\n\n context._project_config.anonymous_usage_statistics = {\n \"enabled\": True,\n \"data_context_id\": \"f43d4897-385f-4366-82b0-1a8eda2bf79c\",\n }\n\n return context\n\n\[email protected]\ndef v20_project_directory(tmp_path_factory):\n \"\"\"\n GE config_version: 2 project for testing upgrade helper\n \"\"\"\n project_path = str(tmp_path_factory.mktemp(\"v20_project\"))\n context_root_dir = os.path.join(project_path, \"great_expectations\")\n shutil.copytree(\n file_relative_path(\n __file__, \"./test_fixtures/upgrade_helper/great_expectations_v20_project/\"\n ),\n context_root_dir,\n )\n shutil.copy(\n file_relative_path(\n __file__, \"./test_fixtures/upgrade_helper/great_expectations_v2.yml\"\n ),\n os.path.join(context_root_dir, \"great_expectations.yml\"),\n )\n return context_root_dir\n\n\[email protected]\ndef data_context_parameterized_expectation_suite_no_checkpoint_store(tmp_path_factory):\n \"\"\"\n This data_context is *manually* created to have the config we want, vs\n created with DataContext.create()\n \"\"\"\n project_path = str(tmp_path_factory.mktemp(\"data_context\"))\n context_path = os.path.join(project_path, \"great_expectations\")\n asset_config_path = os.path.join(context_path, \"expectations\")\n fixture_dir = file_relative_path(__file__, \"./test_fixtures\")\n os.makedirs(\n os.path.join(asset_config_path, \"my_dag_node\"),\n exist_ok=True,\n )\n shutil.copy(\n os.path.join(fixture_dir, \"great_expectations_basic.yml\"),\n str(os.path.join(context_path, \"great_expectations.yml\")),\n )\n shutil.copy(\n os.path.join(\n fixture_dir,\n \"expectation_suites/parameterized_expectation_suite_fixture.json\",\n ),\n os.path.join(asset_config_path, \"my_dag_node\", \"default.json\"),\n )\n os.makedirs(os.path.join(context_path, \"plugins\"), exist_ok=True)\n shutil.copy(\n os.path.join(fixture_dir, \"custom_pandas_dataset.py\"),\n str(os.path.join(context_path, \"plugins\", \"custom_pandas_dataset.py\")),\n )\n shutil.copy(\n os.path.join(fixture_dir, \"custom_sqlalchemy_dataset.py\"),\n str(os.path.join(context_path, \"plugins\", \"custom_sqlalchemy_dataset.py\")),\n )\n shutil.copy(\n os.path.join(fixture_dir, \"custom_sparkdf_dataset.py\"),\n str(os.path.join(context_path, \"plugins\", \"custom_sparkdf_dataset.py\")),\n )\n return ge.data_context.DataContext(context_path)\n\n\[email protected]\ndef data_context_parameterized_expectation_suite(tmp_path_factory):\n \"\"\"\n This data_context is *manually* created to have the config we want, vs\n created with DataContext.create()\n \"\"\"\n project_path = str(tmp_path_factory.mktemp(\"data_context\"))\n context_path = os.path.join(project_path, \"great_expectations\")\n asset_config_path = os.path.join(context_path, \"expectations\")\n fixture_dir = file_relative_path(__file__, \"./test_fixtures\")\n os.makedirs(\n os.path.join(asset_config_path, \"my_dag_node\"),\n exist_ok=True,\n )\n shutil.copy(\n os.path.join(fixture_dir, \"great_expectations_v013_basic.yml\"),\n str(os.path.join(context_path, \"great_expectations.yml\")),\n )\n shutil.copy(\n os.path.join(\n fixture_dir,\n \"expectation_suites/parameterized_expectation_suite_fixture.json\",\n ),\n os.path.join(asset_config_path, \"my_dag_node\", \"default.json\"),\n )\n os.makedirs(os.path.join(context_path, \"plugins\"), exist_ok=True)\n shutil.copy(\n os.path.join(fixture_dir, \"custom_pandas_dataset.py\"),\n str(os.path.join(context_path, \"plugins\", \"custom_pandas_dataset.py\")),\n )\n shutil.copy(\n os.path.join(fixture_dir, \"custom_sqlalchemy_dataset.py\"),\n str(os.path.join(context_path, \"plugins\", \"custom_sqlalchemy_dataset.py\")),\n )\n shutil.copy(\n os.path.join(fixture_dir, \"custom_sparkdf_dataset.py\"),\n str(os.path.join(context_path, \"plugins\", \"custom_sparkdf_dataset.py\")),\n )\n return ge.data_context.DataContext(context_path)\n\n\[email protected]\ndef data_context_simple_expectation_suite(tmp_path_factory):\n \"\"\"\n This data_context is *manually* created to have the config we want, vs\n created with DataContext.create()\n \"\"\"\n project_path = str(tmp_path_factory.mktemp(\"data_context\"))\n context_path = os.path.join(project_path, \"great_expectations\")\n asset_config_path = os.path.join(context_path, \"expectations\")\n fixture_dir = file_relative_path(__file__, \"./test_fixtures\")\n os.makedirs(\n os.path.join(asset_config_path, \"my_dag_node\"),\n exist_ok=True,\n )\n shutil.copy(\n os.path.join(fixture_dir, \"great_expectations_basic.yml\"),\n str(os.path.join(context_path, \"great_expectations.yml\")),\n )\n shutil.copy(\n os.path.join(\n fixture_dir,\n \"rendering_fixtures/expectations_suite_1.json\",\n ),\n os.path.join(asset_config_path, \"default.json\"),\n )\n os.makedirs(os.path.join(context_path, \"plugins\"), exist_ok=True)\n shutil.copy(\n os.path.join(fixture_dir, \"custom_pandas_dataset.py\"),\n str(os.path.join(context_path, \"plugins\", \"custom_pandas_dataset.py\")),\n )\n shutil.copy(\n os.path.join(fixture_dir, \"custom_sqlalchemy_dataset.py\"),\n str(os.path.join(context_path, \"plugins\", \"custom_sqlalchemy_dataset.py\")),\n )\n shutil.copy(\n os.path.join(fixture_dir, \"custom_sparkdf_dataset.py\"),\n str(os.path.join(context_path, \"plugins\", \"custom_sparkdf_dataset.py\")),\n )\n return ge.data_context.DataContext(context_path)\n\n\[email protected]()\ndef filesystem_csv_data_context_with_validation_operators(\n titanic_data_context_stats_enabled, filesystem_csv_2\n):\n titanic_data_context_stats_enabled.add_datasource(\n \"rad_datasource\",\n module_name=\"great_expectations.datasource\",\n class_name=\"PandasDatasource\",\n batch_kwargs_generators={\n \"subdir_reader\": {\n \"class_name\": \"SubdirReaderBatchKwargsGenerator\",\n \"base_directory\": str(filesystem_csv_2),\n }\n },\n )\n return titanic_data_context_stats_enabled\n\n\[email protected]()\ndef filesystem_csv_data_context(\n empty_data_context,\n filesystem_csv_2,\n) -> DataContext:\n empty_data_context.add_datasource(\n \"rad_datasource\",\n module_name=\"great_expectations.datasource\",\n class_name=\"PandasDatasource\",\n batch_kwargs_generators={\n \"subdir_reader\": {\n \"class_name\": \"SubdirReaderBatchKwargsGenerator\",\n \"base_directory\": str(filesystem_csv_2),\n }\n },\n )\n return empty_data_context\n\n\[email protected]\ndef filesystem_csv(tmp_path_factory):\n base_dir = tmp_path_factory.mktemp(\"filesystem_csv\")\n base_dir = str(base_dir)\n # Put a few files in the directory\n with open(os.path.join(base_dir, \"f1.csv\"), \"w\") as outfile:\n outfile.writelines([\"a,b,c\\n\"])\n with open(os.path.join(base_dir, \"f2.csv\"), \"w\") as outfile:\n outfile.writelines([\"a,b,c\\n\"])\n\n os.makedirs(os.path.join(base_dir, \"f3\"), exist_ok=True)\n with open(os.path.join(base_dir, \"f3\", \"f3_20190101.csv\"), \"w\") as outfile:\n outfile.writelines([\"a,b,c\\n\"])\n with open(os.path.join(base_dir, \"f3\", \"f3_20190102.csv\"), \"w\") as outfile:\n outfile.writelines([\"a,b,c\\n\"])\n\n return base_dir\n\n\[email protected](scope=\"function\")\ndef filesystem_csv_2(tmp_path):\n base_dir = tmp_path / \"filesystem_csv_2\"\n base_dir.mkdir()\n base_dir = str(base_dir)\n\n # Put a file in the directory\n toy_dataset = PandasDataset({\"x\": [1, 2, 3]})\n toy_dataset.to_csv(os.path.join(base_dir, \"f1.csv\"), index=False)\n assert os.path.isabs(base_dir)\n assert os.path.isfile(os.path.join(base_dir, \"f1.csv\"))\n\n return base_dir\n\n\[email protected](scope=\"function\")\ndef filesystem_csv_3(tmp_path):\n base_dir = tmp_path / \"filesystem_csv_3\"\n base_dir.mkdir()\n base_dir = str(base_dir)\n\n # Put a file in the directory\n toy_dataset = PandasDataset({\"x\": [1, 2, 3]})\n toy_dataset.to_csv(os.path.join(base_dir, \"f1.csv\"), index=False)\n\n toy_dataset_2 = PandasDataset({\"y\": [1, 2, 3]})\n toy_dataset_2.to_csv(os.path.join(base_dir, \"f2.csv\"), index=False)\n\n return base_dir\n\n\[email protected](scope=\"function\")\ndef filesystem_csv_4(tmp_path):\n base_dir = tmp_path / \"filesystem_csv_4\"\n base_dir.mkdir()\n base_dir = str(base_dir)\n\n # Put a file in the directory\n toy_dataset = PandasDataset(\n {\n \"x\": [1, 2, 3],\n \"y\": [1, 2, 3],\n }\n )\n toy_dataset.to_csv(os.path.join(base_dir, \"f1.csv\"), index=None)\n\n return base_dir\n\n\[email protected]\ndef titanic_profiled_evrs_1():\n with open(\n file_relative_path(\n __file__, \"./render/fixtures/BasicDatasetProfiler_evrs.json\"\n ),\n ) as infile:\n return expectationSuiteValidationResultSchema.loads(infile.read())\n\n\n# various types of evr\[email protected]\ndef evr_failed():\n return ExpectationValidationResult(\n success=False,\n result={\n \"element_count\": 1313,\n \"missing_count\": 0,\n \"missing_percent\": 0.0,\n \"unexpected_count\": 3,\n \"unexpected_percent\": 0.2284843869002285,\n \"unexpected_percent_nonmissing\": 0.2284843869002285,\n \"partial_unexpected_list\": [\n \"Daly, Mr Peter Denis \",\n \"Barber, Ms \",\n \"Geiger, Miss Emily \",\n ],\n \"partial_unexpected_index_list\": [77, 289, 303],\n \"partial_unexpected_counts\": [\n {\"value\": \"Barber, Ms \", \"count\": 1},\n {\"value\": \"Daly, Mr Peter Denis \", \"count\": 1},\n {\"value\": \"Geiger, Miss Emily \", \"count\": 1},\n ],\n },\n exception_info={\n \"raised_exception\": False,\n \"exception_message\": None,\n \"exception_traceback\": None,\n },\n expectation_config=ExpectationConfiguration(\n expectation_type=\"expect_column_values_to_not_match_regex\",\n kwargs={\n \"column\": \"Name\",\n \"regex\": \"^\\\\s+|\\\\s+$\",\n \"result_format\": \"SUMMARY\",\n },\n ),\n )\n\n\[email protected]\ndef evr_success():\n return ExpectationValidationResult(\n success=True,\n result={\"observed_value\": 1313},\n exception_info={\n \"raised_exception\": False,\n \"exception_message\": None,\n \"exception_traceback\": None,\n },\n expectation_config=ExpectationConfiguration(\n expectation_type=\"expect_table_row_count_to_be_between\",\n kwargs={\"min_value\": 0, \"max_value\": None, \"result_format\": \"SUMMARY\"},\n ),\n )\n\n\[email protected]\ndef sqlite_view_engine(test_backends):\n # Create a small in-memory engine with two views, one of which is temporary\n if \"sqlite\" in test_backends:\n try:\n import sqlalchemy as sa\n\n sqlite_engine = sa.create_engine(\"sqlite://\")\n df = pd.DataFrame({\"a\": [1, 2, 3, 4, 5]})\n df.to_sql(name=\"test_table\", con=sqlite_engine, index=True)\n sqlite_engine.execute(\n \"CREATE TEMP VIEW test_temp_view AS SELECT * FROM test_table where a < 4;\"\n )\n sqlite_engine.execute(\n \"CREATE VIEW test_view AS SELECT * FROM test_table where a > 4;\"\n )\n return sqlite_engine\n except ImportError:\n sa = None\n else:\n pytest.skip(\"SqlAlchemy tests disabled; not testing views\")\n\n\[email protected]\ndef expectation_suite_identifier():\n return ExpectationSuiteIdentifier(\"my.expectation.suite.name\")\n\n\[email protected]\ndef basic_sqlalchemy_datasource(sqlitedb_engine):\n return SqlAlchemyDatasource(\"basic_sqlalchemy_datasource\", engine=sqlitedb_engine)\n\n\[email protected]\ndef test_folder_connection_path_csv(tmp_path_factory):\n df1 = pd.DataFrame({\"col_1\": [1, 2, 3, 4, 5], \"col_2\": [\"a\", \"b\", \"c\", \"d\", \"e\"]})\n path = str(tmp_path_factory.mktemp(\"test_folder_connection_path_csv\"))\n df1.to_csv(path_or_buf=os.path.join(path, \"test.csv\"), index=False)\n return str(path)\n\n\[email protected]\ndef test_db_connection_string(tmp_path_factory, test_backends):\n if \"sqlite\" not in test_backends:\n pytest.skip(\"skipping fixture because sqlite not selected\")\n df1 = pd.DataFrame({\"col_1\": [1, 2, 3, 4, 5], \"col_2\": [\"a\", \"b\", \"c\", \"d\", \"e\"]})\n df2 = pd.DataFrame({\"col_1\": [0, 1, 2, 3, 4], \"col_2\": [\"b\", \"c\", \"d\", \"e\", \"f\"]})\n\n try:\n import sqlalchemy as sa\n\n basepath = str(tmp_path_factory.mktemp(\"db_context\"))\n path = os.path.join(basepath, \"test.db\")\n engine = sa.create_engine(\"sqlite:///\" + str(path))\n df1.to_sql(name=\"table_1\", con=engine, index=True)\n df2.to_sql(name=\"table_2\", con=engine, index=True, schema=\"main\")\n\n # Return a connection string to this newly-created db\n return \"sqlite:///\" + str(path)\n except ImportError:\n raise ValueError(\"SQL Database tests require sqlalchemy to be installed.\")\n\n\[email protected]\ndef test_df(tmp_path_factory):\n def generate_ascending_list_of_datetimes(\n k, start_date=datetime.date(2020, 1, 1), end_date=datetime.date(2020, 12, 31)\n ):\n start_time = datetime.datetime(\n start_date.year, start_date.month, start_date.day\n )\n days_between_dates = (end_date - start_date).total_seconds()\n\n datetime_list = [\n start_time\n + datetime.timedelta(seconds=random.randrange(days_between_dates))\n for i in range(k)\n ]\n datetime_list.sort()\n return datetime_list\n\n k = 120\n random.seed(1)\n\n timestamp_list = generate_ascending_list_of_datetimes(\n k, end_date=datetime.date(2020, 1, 31)\n )\n date_list = [datetime.date(ts.year, ts.month, ts.day) for ts in timestamp_list]\n\n batch_ids = [random.randint(0, 10) for i in range(k)]\n batch_ids.sort()\n\n session_ids = [random.randint(2, 60) for i in range(k)]\n session_ids.sort()\n session_ids = [i - random.randint(0, 2) for i in session_ids]\n\n events_df = pd.DataFrame(\n {\n \"id\": range(k),\n \"batch_id\": batch_ids,\n \"date\": date_list,\n \"y\": [d.year for d in date_list],\n \"m\": [d.month for d in date_list],\n \"d\": [d.day for d in date_list],\n \"timestamp\": timestamp_list,\n \"session_ids\": session_ids,\n \"event_type\": [\n random.choice([\"start\", \"stop\", \"continue\"]) for i in range(k)\n ],\n \"favorite_color\": [\n \"#\"\n + \"\".join([random.choice(list(\"0123456789ABCDEF\")) for j in range(6)])\n for i in range(k)\n ],\n }\n )\n return events_df\n\n\[email protected]\ndef data_context_with_simple_sql_datasource_for_testing_get_batch(\n sa, empty_data_context\n):\n context: DataContext = empty_data_context\n\n db_file_path: str = file_relative_path(\n __file__,\n os.path.join(\"test_sets\", \"test_cases_for_sql_data_connector.db\"),\n )\n\n datasource_config: str = f\"\"\"\nclass_name: SimpleSqlalchemyDatasource\nconnection_string: sqlite:///{db_file_path}\nintrospection:\n whole_table: {{}}\n\n daily:\n splitter_method: _split_on_converted_datetime\n splitter_kwargs:\n column_name: date\n date_format_string: \"%Y-%m-%d\"\n\n weekly:\n splitter_method: _split_on_converted_datetime\n splitter_kwargs:\n column_name: date\n date_format_string: \"%Y-%W\"\n\n by_id_dozens:\n splitter_method: _split_on_divided_integer\n splitter_kwargs:\n column_name: id\n divisor: 12\n\"\"\"\n\n try:\n context.add_datasource(\"my_sqlite_db\", **yaml.load(datasource_config))\n except AttributeError:\n pytest.skip(\"SQL Database tests require sqlalchemy to be installed.\")\n\n return context\n\n\[email protected]\ndef basic_datasource(tmp_path_factory):\n base_directory: str = str(\n tmp_path_factory.mktemp(\"basic_datasource_runtime_data_connector\")\n )\n\n basic_datasource: Datasource = instantiate_class_from_config(\n config=yaml.load(\n f\"\"\"\nclass_name: Datasource\n\ndata_connectors:\n test_runtime_data_connector:\n module_name: great_expectations.datasource.data_connector\n class_name: RuntimeDataConnector\n batch_identifiers:\n - pipeline_stage_name\n - airflow_run_id\n - custom_key_0\n\nexecution_engine:\n class_name: PandasExecutionEngine\n\n \"\"\",\n ),\n runtime_environment={\n \"name\": \"my_datasource\",\n },\n config_defaults={\n \"module_name\": \"great_expectations.datasource\",\n },\n )\n\n return basic_datasource\n\n\[email protected]\ndef db_file():\n return file_relative_path(\n __file__,\n os.path.join(\"test_sets\", \"test_cases_for_sql_data_connector.db\"),\n )\n\n\[email protected]\ndef data_context_with_datasource_pandas_engine(empty_data_context):\n context = empty_data_context\n config = yaml.load(\n f\"\"\"\n class_name: Datasource\n execution_engine:\n class_name: PandasExecutionEngine\n data_connectors:\n default_runtime_data_connector_name:\n class_name: RuntimeDataConnector\n batch_identifiers:\n - default_identifier_name\n \"\"\",\n )\n context.add_datasource(\n \"my_datasource\",\n **config,\n )\n return context\n\n\[email protected]\ndef data_context_with_datasource_spark_engine(empty_data_context, spark_session):\n context = empty_data_context\n config = yaml.load(\n f\"\"\"\n class_name: Datasource\n execution_engine:\n class_name: SparkDFExecutionEngine\n data_connectors:\n default_runtime_data_connector_name:\n class_name: RuntimeDataConnector\n batch_identifiers:\n - default_identifier_name\n \"\"\",\n )\n context.add_datasource(\n \"my_datasource\",\n **config,\n )\n return context\n\n\[email protected]\ndef data_context_with_datasource_sqlalchemy_engine(empty_data_context, db_file):\n context = empty_data_context\n config = yaml.load(\n f\"\"\"\n class_name: Datasource\n execution_engine:\n class_name: SqlAlchemyExecutionEngine\n connection_string: sqlite:///{db_file}\n data_connectors:\n default_runtime_data_connector_name:\n class_name: RuntimeDataConnector\n batch_identifiers:\n - default_identifier_name\n \"\"\",\n )\n context.add_datasource(\n \"my_datasource\",\n **config,\n )\n return context\n\n\[email protected]\ndef data_context_with_query_store(\n empty_data_context, titanic_sqlite_db_connection_string\n):\n context = empty_data_context\n config = yaml.load(\n f\"\"\"\n class_name: Datasource\n execution_engine:\n class_name: SqlAlchemyExecutionEngine\n connection_string: {titanic_sqlite_db_connection_string}\n data_connectors:\n default_runtime_data_connector_name:\n class_name: RuntimeDataConnector\n batch_identifiers:\n - default_identifier_name\n \"\"\"\n )\n context.add_datasource(\n \"my_datasource\",\n **config,\n )\n store_config = yaml.load(\n f\"\"\"\n class_name: SqlAlchemyQueryStore\n credentials:\n connection_string: {titanic_sqlite_db_connection_string}\n queries:\n col_count:\n query: \"SELECT COUNT(*) FROM titanic;\"\n return_type: \"scalar\"\n dist_col_count:\n query: \"SELECT COUNT(DISTINCT PClass) FROM titanic;\"\n return_type: \"scalar\"\n \"\"\"\n )\n context.add_store(\"my_query_store\", store_config)\n return context\n\n\[email protected]\ndef ge_cloud_base_url():\n return \"https://app.test.greatexpectations.io\"\n\n\[email protected]\ndef ge_cloud_organization_id():\n return \"bd20fead-2c31-4392-bcd1-f1e87ad5a79c\"\n\n\[email protected]\ndef ge_cloud_access_token():\n return \"6bb5b6f5c7794892a4ca168c65c2603e\"\n\n\[email protected]\ndef ge_cloud_config(ge_cloud_base_url, ge_cloud_organization_id, ge_cloud_access_token):\n return GeCloudConfig(\n base_url=ge_cloud_base_url,\n organization_id=ge_cloud_organization_id,\n access_token=ge_cloud_access_token,\n )\n\n\[email protected](scope=\"function\")\ndef empty_ge_cloud_data_context_config(\n ge_cloud_base_url, ge_cloud_organization_id, ge_cloud_access_token\n):\n config_yaml_str = f\"\"\"\nstores:\n default_evaluation_parameter_store:\n class_name: EvaluationParameterStore\n\n default_expectations_store:\n class_name: ExpectationsStore\n store_backend:\n class_name: GeCloudStoreBackend\n ge_cloud_base_url: {ge_cloud_base_url}\n ge_cloud_resource_type: expectation_suite\n ge_cloud_credentials:\n access_token: {ge_cloud_access_token}\n organization_id: {ge_cloud_organization_id}\n suppress_store_backend_id: True\n\n default_validations_store:\n class_name: ValidationsStore\n store_backend:\n class_name: GeCloudStoreBackend\n ge_cloud_base_url: {ge_cloud_base_url}\n ge_cloud_resource_type: suite_validation_result\n ge_cloud_credentials:\n access_token: {ge_cloud_access_token}\n organization_id: {ge_cloud_organization_id}\n suppress_store_backend_id: True\n\n default_checkpoint_store:\n class_name: CheckpointStore\n store_backend:\n class_name: GeCloudStoreBackend\n ge_cloud_base_url: {ge_cloud_base_url}\n ge_cloud_resource_type: contract\n ge_cloud_credentials:\n access_token: {ge_cloud_access_token}\n organization_id: {ge_cloud_organization_id}\n suppress_store_backend_id: True\n\nevaluation_parameter_store_name: default_evaluation_parameter_store\nexpectations_store_name: default_expectations_store\nvalidations_store_name: default_validations_store\ncheckpoint_store_name: default_checkpoint_store\n\"\"\"\n data_context_config_dict = yaml.load(config_yaml_str)\n return DataContextConfig(**data_context_config_dict)\n\n\[email protected](scope=\"function\")\ndef empty_cloud_data_context(\n tmp_path, empty_ge_cloud_data_context_config, ge_cloud_config\n) -> DataContext:\n project_path = tmp_path / \"empty_data_context\"\n project_path.mkdir()\n project_path = str(project_path)\n\n context = ge.data_context.BaseDataContext(\n project_config=empty_ge_cloud_data_context_config,\n context_root_dir=project_path,\n ge_cloud_mode=True,\n ge_cloud_config=ge_cloud_config,\n )\n assert context.list_datasources() == []\n return context\n\n\[email protected]\ndef cloud_data_context_with_datasource_pandas_engine(empty_cloud_data_context):\n context = empty_cloud_data_context\n config = yaml.load(\n f\"\"\"\n class_name: Datasource\n execution_engine:\n class_name: PandasExecutionEngine\n data_connectors:\n default_runtime_data_connector_name:\n class_name: RuntimeDataConnector\n batch_identifiers:\n - default_identifier_name\n \"\"\",\n )\n context.add_datasource(\n \"my_datasource\",\n **config,\n )\n return context\n\n\[email protected]\ndef cloud_data_context_with_datasource_sqlalchemy_engine(\n empty_cloud_data_context, db_file\n):\n context = empty_cloud_data_context\n config = yaml.load(\n f\"\"\"\n class_name: Datasource\n execution_engine:\n class_name: SqlAlchemyExecutionEngine\n connection_string: sqlite:///{db_file}\n data_connectors:\n default_runtime_data_connector_name:\n class_name: RuntimeDataConnector\n batch_identifiers:\n - default_identifier_name\n \"\"\",\n )\n context.add_datasource(\n \"my_datasource\",\n **config,\n )\n return context\n\n\[email protected](scope=\"function\")\ndef profiler_name() -> str:\n skip_if_python_below_minimum_version()\n\n return \"my_first_profiler\"\n\n\[email protected](scope=\"function\")\ndef profiler_store_name() -> str:\n skip_if_python_below_minimum_version()\n\n return \"profiler_store\"\n\n\[email protected](scope=\"function\")\ndef profiler_config_with_placeholder_args(\n profiler_name: str,\n) -> RuleBasedProfilerConfig:\n \"\"\"\n This fixture does not correspond to a practical profiler with rules, whose constituent components perform meaningful\n computations; rather, it uses \"placeholder\" style attribute values, which is adequate for configuration level tests.\n \"\"\"\n skip_if_python_below_minimum_version()\n\n return RuleBasedProfilerConfig(\n name=profiler_name,\n class_name=\"RuleBasedProfiler\",\n config_version=1.0,\n variables={\n \"false_positive_threshold\": 1.0e-2,\n },\n rules={\n \"rule_1\": {\n \"domain_builder\": {\n \"class_name\": \"TableDomainBuilder\",\n },\n \"parameter_builders\": [\n {\n \"class_name\": \"MetricMultiBatchParameterBuilder\",\n \"name\": \"my_parameter\",\n \"metric_name\": \"my_metric\",\n },\n ],\n \"expectation_configuration_builders\": [\n {\n \"class_name\": \"DefaultExpectationConfigurationBuilder\",\n \"expectation_type\": \"expect_column_pair_values_A_to_be_greater_than_B\",\n \"column_A\": \"$domain.domain_kwargs.column_A\",\n \"column_B\": \"$domain.domain_kwargs.column_B\",\n \"my_arg\": \"$parameter.my_parameter.value[0]\",\n \"my_other_arg\": \"$parameter.my_parameter.value[1]\",\n \"meta\": {\n \"details\": {\n \"my_parameter_estimator\": \"$parameter.my_parameter.details\",\n \"note\": \"Important remarks about estimation algorithm.\",\n },\n },\n },\n ],\n },\n },\n )\n\n\[email protected]\ndef empty_profiler_store(profiler_store_name: str) -> ProfilerStore:\n skip_if_python_below_minimum_version()\n\n return ProfilerStore(profiler_store_name)\n\n\[email protected]\ndef profiler_key(profiler_name: str) -> ConfigurationIdentifier:\n skip_if_python_below_minimum_version()\n\n return ConfigurationIdentifier(configuration_key=profiler_name)\n\n\[email protected]\ndef ge_cloud_profiler_id() -> str:\n skip_if_python_below_minimum_version()\n\n return \"my_ge_cloud_profiler_id\"\n\n\[email protected]\ndef ge_cloud_profiler_key(ge_cloud_profiler_id: str) -> GeCloudIdentifier:\n skip_if_python_below_minimum_version()\n\n return GeCloudIdentifier(resource_type=\"contract\", ge_cloud_id=ge_cloud_profiler_id)\n\n\[email protected]\ndef populated_profiler_store(\n empty_profiler_store: ProfilerStore,\n profiler_config_with_placeholder_args: RuleBasedProfilerConfig,\n profiler_key: ConfigurationIdentifier,\n) -> ProfilerStore:\n skip_if_python_below_minimum_version()\n\n # Roundtrip through schema validation to remove any illegal fields add/or restore any missing fields.\n serialized_config: dict = ruleBasedProfilerConfigSchema.dump(\n profiler_config_with_placeholder_args\n )\n deserialized_config: dict = ruleBasedProfilerConfigSchema.load(serialized_config)\n\n profiler_config: RuleBasedProfilerConfig = RuleBasedProfilerConfig(\n **deserialized_config\n )\n\n profiler_store = empty_profiler_store\n profiler_store.set(key=profiler_key, value=profiler_config)\n return profiler_store\n\n\[email protected]\n@freeze_time(\"09/26/2019 13:42:41\")\ndef alice_columnar_table_single_batch(empty_data_context):\n \"\"\"\n About the \"Alice\" User Workflow Fixture\n\n Alice has a single table of columnar data called user_events (DataAsset) that she wants to check periodically as new\n data is added.\n\n - She knows what some of the columns mean, but not all - and there are MANY of them (only a subset currently shown\n in examples and fixtures).\n\n - She has organized other tables similarly so that for example column name suffixes indicate which are for user\n ids (_id) and which timestamps are for versioning (_ts).\n\n She wants to use a configurable profiler to generate a description (ExpectationSuite) about table so that she can:\n\n 1. use it to validate the user_events table periodically and set up alerts for when things change\n\n 2. have a place to add her domain knowledge of the data (that can also be validated against new data)\n\n 3. if all goes well, generalize some of the Profiler to use on her other tables\n\n Alice configures her Profiler using the YAML configurations and data file locations captured in this fixture.\n \"\"\"\n skip_if_python_below_minimum_version()\n\n verbose_profiler_config_file_path: str = file_relative_path(\n __file__,\n os.path.join(\n \"test_fixtures\",\n \"rule_based_profiler\",\n \"alice_user_workflow_verbose_profiler_config.yml\",\n ),\n )\n\n verbose_profiler_config: str\n with open(verbose_profiler_config_file_path) as f:\n verbose_profiler_config = f.read()\n\n my_rule_for_user_ids_expectation_configurations: List[ExpectationConfiguration] = [\n ExpectationConfiguration(\n expectation_type=\"expect_column_values_to_be_of_type\",\n kwargs={\n \"column\": \"user_id\",\n \"type_\": \"INTEGER\",\n },\n meta={},\n ),\n ExpectationConfiguration(\n expectation_type=\"expect_column_values_to_be_between\",\n kwargs={\n \"min_value\": 1000,\n \"max_value\": 999999999999,\n \"column\": \"user_id\",\n },\n meta={},\n ),\n ExpectationConfiguration(\n expectation_type=\"expect_column_values_to_not_be_null\",\n kwargs={\n \"column\": \"user_id\",\n },\n meta={},\n ),\n ExpectationConfiguration(\n expectation_type=\"expect_column_values_to_be_less_than\",\n meta={},\n kwargs={\"value\": 9488404, \"column\": \"user_id\"},\n ),\n ExpectationConfiguration(\n expectation_type=\"expect_column_values_to_be_greater_than\",\n meta={},\n kwargs={\"value\": 397433, \"column\": \"user_id\"},\n ),\n ]\n\n event_ts_column_data: Dict[str, str] = {\n \"column_name\": \"event_ts\",\n \"observed_max_time_str\": \"2004-10-19 11:05:20\",\n \"observed_strftime_format\": \"%Y-%m-%d %H:%M:%S\",\n }\n\n my_rule_for_timestamps_column_data: List[Dict[str, str]] = [\n event_ts_column_data,\n {\n \"column_name\": \"server_ts\",\n \"observed_max_time_str\": \"2004-10-19 11:05:20\",\n },\n {\n \"column_name\": \"device_ts\",\n \"observed_max_time_str\": \"2004-10-19 11:05:22\",\n },\n ]\n my_rule_for_timestamps_expectation_configurations: List[\n ExpectationConfiguration\n ] = []\n column_data: Dict[str, str]\n for column_data in my_rule_for_timestamps_column_data:\n my_rule_for_timestamps_expectation_configurations.extend(\n [\n ExpectationConfiguration(\n expectation_type=\"expect_column_values_to_be_of_type\",\n kwargs={\n \"column\": column_data[\"column_name\"],\n \"type_\": \"TIMESTAMP\",\n },\n meta={},\n ),\n ExpectationConfiguration(\n expectation_type=\"expect_column_values_to_be_increasing\",\n kwargs={\n \"column\": column_data[\"column_name\"],\n },\n meta={},\n ),\n ExpectationConfiguration(\n expectation_type=\"expect_column_values_to_be_dateutil_parseable\",\n kwargs={\n \"column\": column_data[\"column_name\"],\n },\n meta={},\n ),\n ExpectationConfiguration(\n expectation_type=\"expect_column_min_to_be_between\",\n kwargs={\n \"column\": column_data[\"column_name\"],\n \"min_value\": \"2004-10-19T10:23:54\", # From variables\n \"max_value\": \"2004-10-19T10:23:54\", # From variables\n },\n meta={\n \"notes\": {\n \"format\": \"markdown\",\n \"content\": [\n \"### This expectation confirms no events occur before tracking started **2004-10-19 10:23:54**\"\n ],\n }\n },\n ),\n ExpectationConfiguration(\n expectation_type=\"expect_column_max_to_be_between\",\n kwargs={\n \"column\": column_data[\"column_name\"],\n \"min_value\": \"2004-10-19T10:23:54\", # From variables\n \"max_value\": event_ts_column_data[\n \"observed_max_time_str\"\n ], # Pin to event_ts column\n },\n meta={\n \"notes\": {\n \"format\": \"markdown\",\n \"content\": [\n \"### This expectation confirms that the event_ts contains the latest timestamp of all domains\"\n ],\n }\n },\n ),\n ExpectationConfiguration(\n expectation_type=\"expect_column_values_to_match_strftime_format\",\n kwargs={\n \"column\": column_data[\"column_name\"],\n \"strftime_format\": {\n \"value\": event_ts_column_data[\n \"observed_strftime_format\"\n ], # Pin to event_ts column\n \"details\": {\n \"success_ratio\": 1.0,\n \"candidate_strings\": sorted(DEFAULT_CANDIDATE_STRINGS),\n },\n },\n },\n meta={\n \"notes\": {\n \"format\": \"markdown\",\n \"content\": [\n \"### This expectation confirms that fields ending in _ts are of the format detected by parameter builder SimpleDateFormatStringParameterBuilder\"\n ],\n }\n },\n ),\n ]\n )\n\n my_rule_for_one_cardinality_expectation_configurations: List[\n ExpectationConfiguration\n ] = [\n ExpectationConfiguration(\n expectation_type=\"expect_column_values_to_be_in_set\",\n kwargs={\n \"column\": \"user_agent\",\n \"value_set\": [\n \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36\"\n ],\n },\n meta={},\n ),\n ]\n\n expectation_configurations: List[ExpectationConfiguration] = []\n\n expectation_configurations.extend(my_rule_for_user_ids_expectation_configurations)\n expectation_configurations.extend(my_rule_for_timestamps_expectation_configurations)\n expectation_configurations.extend(\n my_rule_for_one_cardinality_expectation_configurations\n )\n\n expectation_suite_name: str = \"alice_columnar_table_single_batch\"\n expected_expectation_suite: ExpectationSuite = ExpectationSuite(\n expectation_suite_name=expectation_suite_name, data_context=empty_data_context\n )\n expectation_configuration: ExpectationConfiguration\n for expectation_configuration in expectation_configurations:\n # NOTE Will 20211208 add_expectation() method, although being called by an ExpectationSuite instance, is being\n # called within a fixture, and we will prevent it from sending a usage_event by calling the private method\n # _add_expectation().\n expected_expectation_suite._add_expectation(\n expectation_configuration=expectation_configuration, send_usage_event=False\n )\n\n # NOTE that this expectation suite should fail when validated on the data in \"sample_data_relative_path\"\n # because the device_ts is ahead of the event_ts for the latest event\n sample_data_relative_path: str = \"alice_columnar_table_single_batch_data.csv\"\n\n profiler_config: dict = yaml.load(verbose_profiler_config)\n\n # Roundtrip through schema validation to remove any illegal fields add/or restore any missing fields.\n deserialized_config: dict = ruleBasedProfilerConfigSchema.load(profiler_config)\n serialized_config: dict = ruleBasedProfilerConfigSchema.dump(deserialized_config)\n\n # `class_name`/`module_name` are generally consumed through `instantiate_class_from_config`\n # so we need to manually remove those values if we wish to use the **kwargs instantiation pattern\n serialized_config.pop(\"class_name\")\n serialized_config.pop(\"module_name\")\n expected_expectation_suite.add_citation(\n comment=\"Suite created by Rule-Based Profiler with the configuration included.\",\n profiler_config=serialized_config,\n )\n\n return {\n \"profiler_config\": verbose_profiler_config,\n \"expected_expectation_suite_name\": expectation_suite_name,\n \"expected_expectation_suite\": expected_expectation_suite,\n \"sample_data_relative_path\": sample_data_relative_path,\n }\n\n\[email protected]\ndef alice_columnar_table_single_batch_context(\n monkeypatch,\n empty_data_context_stats_enabled,\n alice_columnar_table_single_batch,\n):\n skip_if_python_below_minimum_version()\n\n context: DataContext = empty_data_context_stats_enabled\n # We need our salt to be consistent between runs to ensure idempotent anonymized values\n context._usage_statistics_handler = UsageStatisticsHandler(\n context, \"00000000-0000-0000-0000-00000000a004\", \"N/A\"\n )\n monkeypatch.chdir(context.root_directory)\n data_relative_path: str = \"../data\"\n data_path: str = os.path.join(context.root_directory, data_relative_path)\n os.makedirs(data_path, exist_ok=True)\n\n # Copy data\n filename: str = alice_columnar_table_single_batch[\"sample_data_relative_path\"]\n shutil.copy(\n file_relative_path(\n __file__,\n os.path.join(\n \"test_sets\",\n f\"{filename}\",\n ),\n ),\n str(os.path.join(data_path, filename)),\n )\n\n data_connector_base_directory: str = \"./\"\n monkeypatch.setenv(\"base_directory\", data_connector_base_directory)\n monkeypatch.setenv(\"data_fixtures_root\", data_relative_path)\n\n datasource_name: str = \"alice_columnar_table_single_batch_datasource\"\n data_connector_name: str = \"alice_columnar_table_single_batch_data_connector\"\n data_asset_name: str = \"alice_columnar_table_single_batch_data_asset\"\n datasource_config: str = rf\"\"\"\nclass_name: Datasource\nmodule_name: great_expectations.datasource\nexecution_engine:\n module_name: great_expectations.execution_engine\n class_name: PandasExecutionEngine\ndata_connectors:\n {data_connector_name}:\n class_name: ConfiguredAssetFilesystemDataConnector\n assets:\n {data_asset_name}:\n module_name: great_expectations.datasource.data_connector.asset\n group_names:\n - filename\n pattern: (.*)\\.csv\n reader_options:\n delimiter: \",\"\n class_name: Asset\n base_directory: ${{data_fixtures_root}}\n glob_directive: \"*.csv\"\n base_directory: ${{base_directory}}\n module_name: great_expectations.datasource.data_connector\n \"\"\"\n\n context.add_datasource(name=datasource_name, **yaml.load(datasource_config))\n\n assert context.list_datasources() == [\n {\n \"class_name\": \"Datasource\",\n \"data_connectors\": {\n data_connector_name: {\n \"assets\": {\n data_asset_name: {\n \"base_directory\": data_relative_path,\n \"class_name\": \"Asset\",\n \"glob_directive\": \"*.csv\",\n \"group_names\": [\"filename\"],\n \"module_name\": \"great_expectations.datasource.data_connector.asset\",\n \"pattern\": \"(.*)\\\\.csv\",\n }\n },\n \"base_directory\": data_connector_base_directory,\n \"class_name\": \"ConfiguredAssetFilesystemDataConnector\",\n \"module_name\": \"great_expectations.datasource.data_connector\",\n },\n },\n \"execution_engine\": {\n \"class_name\": \"PandasExecutionEngine\",\n \"module_name\": \"great_expectations.execution_engine\",\n },\n \"module_name\": \"great_expectations.datasource\",\n \"name\": datasource_name,\n }\n ]\n return context\n\n\[email protected]\n@freeze_time(\"09/26/2019 13:42:41\")\ndef bobby_columnar_table_multi_batch(empty_data_context):\n \"\"\"\n About the \"Bobby\" User Workflow Fixture\n Bobby has multiple tables of columnar data called user_events (DataAsset) that he wants to check periodically as new\n data is added.\n - He knows what some of the columns are of the accounting/financial/account type.\n He wants to use a configurable profiler to generate a description (ExpectationSuite) about tables so that he can:\n 1. monitor the average number of rows in the tables\n 2. use it to validate min/max boundaries of all columns are of the accounting/financial/account type and set up\n alerts for when things change\n 3. have a place to add his domain knowledge of the data (that can also be validated against new data)\n 4. if all goes well, generalize some of the Profiler to use on his other tables\n Bobby uses a crude, highly inaccurate deterministic parametric estimator -- for illustrative purposes.\n Bobby configures his Profiler using the YAML configurations and data file locations captured in this fixture.\n \"\"\"\n skip_if_python_below_minimum_version()\n\n verbose_profiler_config_file_path: str = file_relative_path(\n __file__,\n os.path.join(\n \"test_fixtures\",\n \"rule_based_profiler\",\n \"bobby_user_workflow_verbose_profiler_config.yml\",\n ),\n )\n\n verbose_profiler_config: str\n with open(verbose_profiler_config_file_path) as f:\n verbose_profiler_config = f.read()\n\n my_row_count_range_rule_expectation_configurations_oneshot_estimator: List[\n ExpectationConfiguration\n ] = [\n ExpectationConfiguration(\n **{\n \"kwargs\": {\"min_value\": 7505, \"max_value\": 8495},\n \"expectation_type\": \"expect_table_row_count_to_be_between\",\n \"meta\": {\n \"profiler_details\": {\n \"metric_configuration\": {\n \"metric_name\": \"table.row_count\",\n \"domain_kwargs\": {},\n \"metric_value_kwargs\": None,\n \"metric_dependencies\": None,\n },\n \"num_batches\": 2,\n },\n },\n },\n ),\n ]\n\n my_column_ranges_rule_expectation_configurations_oneshot_estimator: List[\n ExpectationConfiguration\n ] = [\n ExpectationConfiguration(\n **{\n \"expectation_type\": \"expect_column_min_to_be_between\",\n \"meta\": {\n \"profiler_details\": {\n \"metric_configuration\": {\n \"metric_name\": \"column.min\",\n \"domain_kwargs\": {\"column\": \"VendorID\"},\n \"metric_value_kwargs\": None,\n \"metric_dependencies\": None,\n },\n \"num_batches\": 2,\n }\n },\n \"kwargs\": {\n \"column\": \"VendorID\",\n \"min_value\": 1,\n \"max_value\": 1,\n \"mostly\": 1.0,\n },\n },\n ),\n ExpectationConfiguration(\n **{\n \"expectation_type\": \"expect_column_max_to_be_between\",\n \"meta\": {\n \"profiler_details\": {\n \"metric_configuration\": {\n \"metric_name\": \"column.max\",\n \"domain_kwargs\": {\"column\": \"VendorID\"},\n \"metric_value_kwargs\": None,\n \"metric_dependencies\": None,\n },\n \"num_batches\": 2,\n }\n },\n \"kwargs\": {\n \"column\": \"VendorID\",\n \"min_value\": 4,\n \"max_value\": 4,\n \"mostly\": 1.0,\n },\n },\n ),\n ExpectationConfiguration(\n **{\n \"expectation_type\": \"expect_column_min_to_be_between\",\n \"meta\": {\n \"profiler_details\": {\n \"metric_configuration\": {\n \"metric_name\": \"column.min\",\n \"domain_kwargs\": {\"column\": \"passenger_count\"},\n \"metric_value_kwargs\": None,\n \"metric_dependencies\": None,\n },\n \"num_batches\": 2,\n }\n },\n \"kwargs\": {\n \"column\": \"passenger_count\",\n \"min_value\": 0,\n \"max_value\": 1,\n \"mostly\": 1.0,\n },\n },\n ),\n ExpectationConfiguration(\n **{\n \"expectation_type\": \"expect_column_max_to_be_between\",\n \"meta\": {\n \"profiler_details\": {\n \"metric_configuration\": {\n \"metric_name\": \"column.max\",\n \"domain_kwargs\": {\"column\": \"passenger_count\"},\n \"metric_value_kwargs\": None,\n \"metric_dependencies\": None,\n },\n \"num_batches\": 2,\n }\n },\n \"kwargs\": {\n \"column\": \"passenger_count\",\n \"min_value\": 6,\n \"max_value\": 6,\n \"mostly\": 1.0,\n },\n },\n ),\n ExpectationConfiguration(\n **{\n \"expectation_type\": \"expect_column_min_to_be_between\",\n \"meta\": {\n \"profiler_details\": {\n \"metric_configuration\": {\n \"metric_name\": \"column.min\",\n \"domain_kwargs\": {\"column\": \"trip_distance\"},\n \"metric_value_kwargs\": None,\n \"metric_dependencies\": None,\n },\n \"num_batches\": 2,\n }\n },\n \"kwargs\": {\n \"column\": \"trip_distance\",\n \"min_value\": 0.0,\n \"max_value\": 0.0,\n \"mostly\": 1.0,\n },\n },\n ),\n ExpectationConfiguration(\n **{\n \"expectation_type\": \"expect_column_max_to_be_between\",\n \"meta\": {\n \"profiler_details\": {\n \"metric_configuration\": {\n \"metric_name\": \"column.max\",\n \"domain_kwargs\": {\"column\": \"trip_distance\"},\n \"metric_value_kwargs\": None,\n \"metric_dependencies\": None,\n },\n \"num_batches\": 2,\n }\n },\n \"kwargs\": {\n \"column\": \"trip_distance\",\n \"min_value\": 37.62,\n \"max_value\": 57.85,\n \"mostly\": 1.0,\n },\n },\n ),\n ExpectationConfiguration(\n **{\n \"expectation_type\": \"expect_column_min_to_be_between\",\n \"meta\": {\n \"profiler_details\": {\n \"metric_configuration\": {\n \"metric_name\": \"column.min\",\n \"domain_kwargs\": {\"column\": \"RatecodeID\"},\n \"metric_value_kwargs\": None,\n \"metric_dependencies\": None,\n },\n \"num_batches\": 2,\n }\n },\n \"kwargs\": {\n \"column\": \"RatecodeID\",\n \"min_value\": 1,\n \"max_value\": 1,\n \"mostly\": 1.0,\n },\n },\n ),\n ExpectationConfiguration(\n **{\n \"expectation_type\": \"expect_column_max_to_be_between\",\n \"meta\": {\n \"profiler_details\": {\n \"metric_configuration\": {\n \"metric_name\": \"column.max\",\n \"domain_kwargs\": {\"column\": \"RatecodeID\"},\n \"metric_value_kwargs\": None,\n \"metric_dependencies\": None,\n },\n \"num_batches\": 2,\n }\n },\n \"kwargs\": {\n \"column\": \"RatecodeID\",\n \"min_value\": 5,\n \"max_value\": 6,\n \"mostly\": 1.0,\n },\n },\n ),\n ExpectationConfiguration(\n **{\n \"expectation_type\": \"expect_column_min_to_be_between\",\n \"meta\": {\n \"profiler_details\": {\n \"metric_configuration\": {\n \"metric_name\": \"column.min\",\n \"domain_kwargs\": {\"column\": \"PULocationID\"},\n \"metric_value_kwargs\": None,\n \"metric_dependencies\": None,\n },\n \"num_batches\": 2,\n }\n },\n \"kwargs\": {\n \"column\": \"PULocationID\",\n \"min_value\": 1,\n \"max_value\": 1,\n \"mostly\": 1.0,\n },\n },\n ),\n ExpectationConfiguration(\n **{\n \"expectation_type\": \"expect_column_max_to_be_between\",\n \"meta\": {\n \"profiler_details\": {\n \"metric_configuration\": {\n \"metric_name\": \"column.max\",\n \"domain_kwargs\": {\"column\": \"PULocationID\"},\n \"metric_value_kwargs\": None,\n \"metric_dependencies\": None,\n },\n \"num_batches\": 2,\n }\n },\n \"kwargs\": {\n \"column\": \"PULocationID\",\n \"min_value\": 265,\n \"max_value\": 265,\n \"mostly\": 1.0,\n },\n },\n ),\n ExpectationConfiguration(\n **{\n \"expectation_type\": \"expect_column_min_to_be_between\",\n \"meta\": {\n \"profiler_details\": {\n \"metric_configuration\": {\n \"metric_name\": \"column.min\",\n \"domain_kwargs\": {\"column\": \"DOLocationID\"},\n \"metric_value_kwargs\": None,\n \"metric_dependencies\": None,\n },\n \"num_batches\": 2,\n }\n },\n \"kwargs\": {\n \"column\": \"DOLocationID\",\n \"min_value\": 1,\n \"max_value\": 1,\n \"mostly\": 1.0,\n },\n },\n ),\n ExpectationConfiguration(\n **{\n \"expectation_type\": \"expect_column_max_to_be_between\",\n \"meta\": {\n \"profiler_details\": {\n \"metric_configuration\": {\n \"metric_name\": \"column.max\",\n \"domain_kwargs\": {\"column\": \"DOLocationID\"},\n \"metric_value_kwargs\": None,\n \"metric_dependencies\": None,\n },\n \"num_batches\": 2,\n }\n },\n \"kwargs\": {\n \"column\": \"DOLocationID\",\n \"min_value\": 265,\n \"max_value\": 265,\n \"mostly\": 1.0,\n },\n },\n ),\n ExpectationConfiguration(\n **{\n \"expectation_type\": \"expect_column_min_to_be_between\",\n \"meta\": {\n \"profiler_details\": {\n \"metric_configuration\": {\n \"metric_name\": \"column.min\",\n \"domain_kwargs\": {\"column\": \"payment_type\"},\n \"metric_value_kwargs\": None,\n \"metric_dependencies\": None,\n },\n \"num_batches\": 2,\n }\n },\n \"kwargs\": {\n \"column\": \"payment_type\",\n \"min_value\": 1,\n \"max_value\": 1,\n \"mostly\": 1.0,\n },\n },\n ),\n ExpectationConfiguration(\n **{\n \"expectation_type\": \"expect_column_max_to_be_between\",\n \"meta\": {\n \"profiler_details\": {\n \"metric_configuration\": {\n \"metric_name\": \"column.max\",\n \"domain_kwargs\": {\"column\": \"payment_type\"},\n \"metric_value_kwargs\": None,\n \"metric_dependencies\": None,\n },\n \"num_batches\": 2,\n }\n },\n \"kwargs\": {\n \"column\": \"payment_type\",\n \"min_value\": 4,\n \"max_value\": 4,\n \"mostly\": 1.0,\n },\n },\n ),\n ExpectationConfiguration(\n **{\n \"expectation_type\": \"expect_column_min_to_be_between\",\n \"meta\": {\n \"profiler_details\": {\n \"metric_configuration\": {\n \"metric_name\": \"column.min\",\n \"domain_kwargs\": {\"column\": \"fare_amount\"},\n \"metric_value_kwargs\": None,\n \"metric_dependencies\": None,\n },\n \"num_batches\": 2,\n }\n },\n \"kwargs\": {\n \"column\": \"fare_amount\",\n \"min_value\": -51.84,\n \"max_value\": -21.16,\n \"mostly\": 1.0,\n },\n },\n ),\n ExpectationConfiguration(\n **{\n \"expectation_type\": \"expect_column_max_to_be_between\",\n \"meta\": {\n \"profiler_details\": {\n \"metric_configuration\": {\n \"metric_name\": \"column.max\",\n \"domain_kwargs\": {\"column\": \"fare_amount\"},\n \"metric_value_kwargs\": None,\n \"metric_dependencies\": None,\n },\n \"num_batches\": 2,\n }\n },\n \"kwargs\": {\n \"column\": \"fare_amount\",\n \"min_value\": 228.94,\n \"max_value\": 2990.05,\n \"mostly\": 1.0,\n },\n },\n ),\n ExpectationConfiguration(\n **{\n \"expectation_type\": \"expect_column_min_to_be_between\",\n \"meta\": {\n \"profiler_details\": {\n \"metric_configuration\": {\n \"metric_name\": \"column.min\",\n \"domain_kwargs\": {\"column\": \"extra\"},\n \"metric_value_kwargs\": None,\n \"metric_dependencies\": None,\n },\n \"num_batches\": 2,\n }\n },\n \"kwargs\": {\n \"column\": \"extra\",\n \"min_value\": -36.53,\n \"max_value\": -1.18,\n \"mostly\": 1.0,\n },\n },\n ),\n ExpectationConfiguration(\n **{\n \"expectation_type\": \"expect_column_max_to_be_between\",\n \"meta\": {\n \"profiler_details\": {\n \"metric_configuration\": {\n \"metric_name\": \"column.max\",\n \"domain_kwargs\": {\"column\": \"extra\"},\n \"metric_value_kwargs\": None,\n \"metric_dependencies\": None,\n },\n \"num_batches\": 2,\n }\n },\n \"kwargs\": {\n \"column\": \"extra\",\n \"min_value\": 4.51,\n \"max_value\": 6.99,\n \"mostly\": 1.0,\n },\n },\n ),\n ExpectationConfiguration(\n **{\n \"expectation_type\": \"expect_column_min_to_be_between\",\n \"meta\": {\n \"profiler_details\": {\n \"metric_configuration\": {\n \"metric_name\": \"column.min\",\n \"domain_kwargs\": {\"column\": \"mta_tax\"},\n \"metric_value_kwargs\": None,\n \"metric_dependencies\": None,\n },\n \"num_batches\": 2,\n }\n },\n \"kwargs\": {\n \"column\": \"mta_tax\",\n \"min_value\": -0.5,\n \"max_value\": -0.5,\n \"mostly\": 1.0,\n },\n },\n ),\n ExpectationConfiguration(\n **{\n \"expectation_type\": \"expect_column_max_to_be_between\",\n \"meta\": {\n \"profiler_details\": {\n \"metric_configuration\": {\n \"metric_name\": \"column.max\",\n \"domain_kwargs\": {\"column\": \"mta_tax\"},\n \"metric_value_kwargs\": None,\n \"metric_dependencies\": None,\n },\n \"num_batches\": 2,\n }\n },\n \"kwargs\": {\n \"column\": \"mta_tax\",\n \"min_value\": 0.69,\n \"max_value\": 37.32,\n \"mostly\": 1.0,\n },\n },\n ),\n ExpectationConfiguration(\n **{\n \"expectation_type\": \"expect_column_min_to_be_between\",\n \"meta\": {\n \"profiler_details\": {\n \"metric_configuration\": {\n \"metric_name\": \"column.min\",\n \"domain_kwargs\": {\"column\": \"tip_amount\"},\n \"metric_value_kwargs\": None,\n \"metric_dependencies\": None,\n },\n \"num_batches\": 2,\n }\n },\n \"kwargs\": {\n \"column\": \"tip_amount\",\n \"min_value\": 0.0,\n \"max_value\": 0.0,\n \"mostly\": 1.0,\n },\n },\n ),\n ExpectationConfiguration(\n **{\n \"expectation_type\": \"expect_column_max_to_be_between\",\n \"meta\": {\n \"profiler_details\": {\n \"metric_configuration\": {\n \"metric_name\": \"column.max\",\n \"domain_kwargs\": {\"column\": \"tip_amount\"},\n \"metric_value_kwargs\": None,\n \"metric_dependencies\": None,\n },\n \"num_batches\": 2,\n }\n },\n \"kwargs\": {\n \"column\": \"tip_amount\",\n \"min_value\": 46.84,\n \"max_value\": 74.86,\n \"mostly\": 1.0,\n },\n },\n ),\n ExpectationConfiguration(\n **{\n \"expectation_type\": \"expect_column_min_to_be_between\",\n \"meta\": {\n \"profiler_details\": {\n \"metric_configuration\": {\n \"metric_name\": \"column.min\",\n \"domain_kwargs\": {\"column\": \"tolls_amount\"},\n \"metric_value_kwargs\": None,\n \"metric_dependencies\": None,\n },\n \"num_batches\": 2,\n }\n },\n \"kwargs\": {\n \"column\": \"tolls_amount\",\n \"min_value\": 0.0,\n \"max_value\": 0.0,\n \"mostly\": 1.0,\n },\n },\n ),\n ExpectationConfiguration(\n **{\n \"expectation_type\": \"expect_column_max_to_be_between\",\n \"meta\": {\n \"profiler_details\": {\n \"metric_configuration\": {\n \"metric_name\": \"column.max\",\n \"domain_kwargs\": {\"column\": \"tolls_amount\"},\n \"metric_value_kwargs\": None,\n \"metric_dependencies\": None,\n },\n \"num_batches\": 2,\n }\n },\n \"kwargs\": {\n \"column\": \"tolls_amount\",\n \"min_value\": 26.4,\n \"max_value\": 497.67,\n \"mostly\": 1.0,\n },\n },\n ),\n ExpectationConfiguration(\n **{\n \"expectation_type\": \"expect_column_min_to_be_between\",\n \"meta\": {\n \"profiler_details\": {\n \"metric_configuration\": {\n \"metric_name\": \"column.min\",\n \"domain_kwargs\": {\"column\": \"improvement_surcharge\"},\n \"metric_value_kwargs\": None,\n \"metric_dependencies\": None,\n },\n \"num_batches\": 2,\n }\n },\n \"kwargs\": {\n \"column\": \"improvement_surcharge\",\n \"min_value\": -0.3,\n \"max_value\": -0.3,\n \"mostly\": 1.0,\n },\n },\n ),\n ExpectationConfiguration(\n **{\n \"expectation_type\": \"expect_column_max_to_be_between\",\n \"meta\": {\n \"profiler_details\": {\n \"metric_configuration\": {\n \"metric_name\": \"column.max\",\n \"domain_kwargs\": {\"column\": \"improvement_surcharge\"},\n \"metric_value_kwargs\": None,\n \"metric_dependencies\": None,\n },\n \"num_batches\": 2,\n }\n },\n \"kwargs\": {\n \"column\": \"improvement_surcharge\",\n \"min_value\": 0.3,\n \"max_value\": 0.3,\n \"mostly\": 1.0,\n },\n },\n ),\n ExpectationConfiguration(\n **{\n \"expectation_type\": \"expect_column_min_to_be_between\",\n \"meta\": {\n \"profiler_details\": {\n \"metric_configuration\": {\n \"metric_name\": \"column.min\",\n \"domain_kwargs\": {\"column\": \"total_amount\"},\n \"metric_value_kwargs\": None,\n \"metric_dependencies\": None,\n },\n \"num_batches\": 2,\n }\n },\n \"kwargs\": {\n \"column\": \"total_amount\",\n \"min_value\": -52.66,\n \"max_value\": -24.44,\n \"mostly\": 1.0,\n },\n },\n ),\n ExpectationConfiguration(\n **{\n \"expectation_type\": \"expect_column_max_to_be_between\",\n \"meta\": {\n \"profiler_details\": {\n \"metric_configuration\": {\n \"metric_name\": \"column.max\",\n \"domain_kwargs\": {\"column\": \"total_amount\"},\n \"metric_value_kwargs\": None,\n \"metric_dependencies\": None,\n },\n \"num_batches\": 2,\n }\n },\n \"kwargs\": {\n \"column\": \"total_amount\",\n \"min_value\": 550.18,\n \"max_value\": 2992.47,\n \"mostly\": 1.0,\n },\n },\n ),\n ExpectationConfiguration(\n **{\n \"expectation_type\": \"expect_column_min_to_be_between\",\n \"meta\": {\n \"profiler_details\": {\n \"metric_configuration\": {\n \"metric_name\": \"column.min\",\n \"domain_kwargs\": {\"column\": \"congestion_surcharge\"},\n \"metric_value_kwargs\": None,\n \"metric_dependencies\": None,\n },\n \"num_batches\": 2,\n }\n },\n \"kwargs\": {\n \"column\": \"congestion_surcharge\",\n \"min_value\": -2.49,\n \"max_value\": -0.01,\n \"mostly\": 1.0,\n },\n },\n ),\n ExpectationConfiguration(\n **{\n \"expectation_type\": \"expect_column_max_to_be_between\",\n \"meta\": {\n \"profiler_details\": {\n \"metric_configuration\": {\n \"metric_name\": \"column.max\",\n \"domain_kwargs\": {\"column\": \"congestion_surcharge\"},\n \"metric_value_kwargs\": None,\n \"metric_dependencies\": None,\n },\n \"num_batches\": 2,\n }\n },\n \"kwargs\": {\n \"column\": \"congestion_surcharge\",\n \"min_value\": 0.01,\n \"max_value\": 2.49,\n \"mostly\": 1.0,\n },\n },\n ),\n ]\n\n my_column_timestamps_rule_expectation_configurations_oneshot_estimator: List[\n ExpectationConfiguration\n ] = [\n ExpectationConfiguration(\n **{\n \"expectation_type\": \"expect_column_values_to_match_strftime_format\",\n \"kwargs\": {\n \"column\": \"pickup_datetime\",\n \"strftime_format\": \"%Y-%m-%d %H:%M:%S\",\n },\n \"meta\": {\n \"details\": {\n \"success_ratio\": 1.0,\n \"candidate_strings\": [\n \"%Y-%m-%d %H:%M:%S\",\n \"%y-%m-%d\",\n ],\n },\n \"notes\": {\n \"format\": \"markdown\",\n \"content\": [\n \"### This expectation confirms that fields ending in _datetime are of the format detected by parameter builder SimpleDateFormatStringParameterBuilder\"\n ],\n },\n },\n }\n ),\n ExpectationConfiguration(\n **{\n \"expectation_type\": \"expect_column_values_to_match_strftime_format\",\n \"kwargs\": {\n \"column\": \"dropoff_datetime\",\n \"strftime_format\": \"%Y-%m-%d %H:%M:%S\",\n },\n \"meta\": {\n \"details\": {\n \"success_ratio\": 1.0,\n \"candidate_strings\": [\n \"%Y-%m-%d %H:%M:%S\",\n \"%y-%m-%d\",\n ],\n },\n \"notes\": {\n \"format\": \"markdown\",\n \"content\": [\n \"### This expectation confirms that fields ending in _datetime are of the format detected by parameter builder SimpleDateFormatStringParameterBuilder\"\n ],\n },\n },\n }\n ),\n ]\n my_column_regex_rule_expectation_configurations_oneshot_estimator: List[\n ExpectationConfiguration\n ] = [\n ExpectationConfiguration(\n **{\n \"expectation_type\": \"expect_column_values_to_match_regex\",\n \"kwargs\": {\n \"column\": \"VendorID\",\n \"regex\": {\n \"value\": [r\"^\\d{1}$\"],\n \"details\": {\n \"evaluated_regexes\": {r\"^\\d{1}$\": 1.0, r\"^\\d{2}$\": 0.0},\n \"threshold\": 0.9,\n },\n },\n },\n \"meta\": {\n \"notes\": {\n \"format\": \"markdown\",\n \"content\": [\n \"### This expectation confirms that fields ending in ID are of the format detected by parameter builder RegexPatternStringParameterBuilder\"\n ],\n },\n },\n }\n ),\n ExpectationConfiguration(\n **{\n \"expectation_type\": \"expect_column_values_to_match_regex\",\n \"meta\": {\"notes\": {\"format\": \"markdown\", \"content\": None}},\n \"kwargs\": {\n \"column\": \"RatecodeID\",\n \"regex\": {\n \"value\": [r\"^\\d{1}$\"],\n \"details\": {\n \"evaluated_regexes\": {r\"^\\d{1}$\": 1.0, r\"^\\d{2}$\": 0.0},\n \"threshold\": 0.9,\n },\n },\n },\n \"meta\": {\n \"notes\": {\n \"format\": \"markdown\",\n \"content\": [\n \"### This expectation confirms that fields ending in ID are of the format detected by parameter builder RegexPatternStringParameterBuilder\"\n ],\n }\n },\n }\n ),\n ExpectationConfiguration(\n **{\n \"expectation_type\": \"expect_column_values_to_match_regex\",\n \"meta\": {\"notes\": {\"format\": \"markdown\", \"content\": None}},\n \"kwargs\": {\n \"column\": \"PULocationID\",\n \"regex\": {\n \"value\": [r\"^\\d{1}$\"],\n \"details\": {\n \"evaluated_regexes\": {r\"^\\d{1}$\": 1.0, r\"^\\d{2}$\": 0.0},\n \"threshold\": 0.9,\n },\n },\n },\n \"meta\": {\n \"notes\": {\n \"format\": \"markdown\",\n \"content\": [\n \"### This expectation confirms that fields ending in ID are of the format detected by parameter builder RegexPatternStringParameterBuilder\"\n ],\n }\n },\n }\n ),\n ExpectationConfiguration(\n **{\n \"expectation_type\": \"expect_column_values_to_match_regex\",\n \"meta\": {\"notes\": {\"format\": \"markdown\", \"content\": None}},\n \"kwargs\": {\n \"column\": \"DOLocationID\",\n \"regex\": {\n \"value\": [r\"^\\d{1}$\"],\n \"details\": {\n \"evaluated_regexes\": {r\"^\\d{1}$\": 1.0, r\"^\\d{2}$\": 0.0},\n \"threshold\": 0.9,\n },\n },\n },\n \"meta\": {\n \"notes\": {\n \"format\": \"markdown\",\n \"content\": [\n \"### This expectation confirms that fields ending in ID are of the format detected by parameter builder RegexPatternStringParameterBuilder\"\n ],\n }\n },\n }\n ),\n ]\n\n my_rule_for_very_few_cardinality_expectation_configurations: List[\n ExpectationConfiguration\n ] = [\n ExpectationConfiguration(\n **{\n \"expectation_type\": \"expect_column_values_to_be_in_set\",\n \"kwargs\": {\n \"column\": \"VendorID\",\n \"value_set\": [1, 2, 4],\n },\n \"meta\": {},\n }\n ),\n ExpectationConfiguration(\n **{\n \"expectation_type\": \"expect_column_values_to_be_in_set\",\n \"kwargs\": {\n \"column\": \"passenger_count\",\n \"value_set\": [0, 1, 2, 3, 4, 5, 6],\n },\n \"meta\": {},\n }\n ),\n ]\n expectation_configurations: List[ExpectationConfiguration] = []\n\n expectation_configurations.extend(\n my_row_count_range_rule_expectation_configurations_oneshot_estimator\n )\n expectation_configurations.extend(\n my_column_ranges_rule_expectation_configurations_oneshot_estimator\n )\n expectation_configurations.extend(\n my_column_timestamps_rule_expectation_configurations_oneshot_estimator\n )\n\n expectation_configurations.extend(\n my_column_regex_rule_expectation_configurations_oneshot_estimator\n )\n expectation_configurations.extend(\n my_rule_for_very_few_cardinality_expectation_configurations\n )\n expectation_suite_name_oneshot_estimator: str = (\n \"bobby_columnar_table_multi_batch_oneshot_estimator\"\n )\n expected_expectation_suite_oneshot_estimator: ExpectationSuite = ExpectationSuite(\n expectation_suite_name=expectation_suite_name_oneshot_estimator,\n data_context=empty_data_context,\n )\n expectation_configuration: ExpectationConfiguration\n for expectation_configuration in expectation_configurations:\n # NOTE Will 20211208 add_expectation() method, although being called by an ExpectationSuite instance, is being\n # called within a fixture, and we will prevent it from sending a usage_event by calling the private method.\n expected_expectation_suite_oneshot_estimator._add_expectation(\n expectation_configuration=expectation_configuration, send_usage_event=False\n )\n\n profiler_config: dict = yaml.load(verbose_profiler_config)\n\n # Roundtrip through schema validation to remove any illegal fields add/or restore any missing fields.\n deserialized_config: dict = ruleBasedProfilerConfigSchema.load(profiler_config)\n serialized_config: dict = ruleBasedProfilerConfigSchema.dump(deserialized_config)\n\n # `class_name`/`module_name` are generally consumed through `instantiate_class_from_config`\n # so we need to manually remove those values if we wish to use the **kwargs instantiation pattern\n serialized_config.pop(\"class_name\")\n serialized_config.pop(\"module_name\")\n\n expected_expectation_suite_oneshot_estimator.add_citation(\n comment=\"Suite created by Rule-Based Profiler with the configuration included.\",\n profiler_config=serialized_config,\n )\n\n return {\n \"profiler_config\": verbose_profiler_config,\n \"test_configuration_oneshot_estimator\": {\n \"expectation_suite_name\": expectation_suite_name_oneshot_estimator,\n \"expected_expectation_suite\": expected_expectation_suite_oneshot_estimator,\n },\n }\n\n\[email protected]\ndef bobby_columnar_table_multi_batch_deterministic_data_context(\n tmp_path_factory,\n monkeypatch,\n) -> DataContext:\n skip_if_python_below_minimum_version()\n\n # Re-enable GE_USAGE_STATS\n monkeypatch.delenv(\"GE_USAGE_STATS\")\n monkeypatch.setattr(AnonymizedUsageStatisticsConfig, \"enabled\", True)\n\n project_path: str = str(tmp_path_factory.mktemp(\"taxi_data_context\"))\n context_path: str = os.path.join(project_path, \"great_expectations\")\n os.makedirs(os.path.join(context_path, \"expectations\"), exist_ok=True)\n data_path: str = os.path.join(context_path, \"..\", \"data\")\n os.makedirs(os.path.join(data_path), exist_ok=True)\n shutil.copy(\n file_relative_path(\n __file__,\n os.path.join(\n \"integration\",\n \"fixtures\",\n \"yellow_tripdata_pandas_fixture\",\n \"great_expectations\",\n \"great_expectations.yml\",\n ),\n ),\n str(os.path.join(context_path, \"great_expectations.yml\")),\n )\n shutil.copy(\n file_relative_path(\n __file__,\n os.path.join(\n \"test_sets\",\n \"taxi_yellow_tripdata_samples\",\n \"random_subsamples\",\n \"yellow_tripdata_7500_lines_sample_2019-01.csv\",\n ),\n ),\n str(\n os.path.join(\n context_path, \"..\", \"data\", \"yellow_tripdata_sample_2019-01.csv\"\n )\n ),\n )\n shutil.copy(\n file_relative_path(\n __file__,\n os.path.join(\n \"test_sets\",\n \"taxi_yellow_tripdata_samples\",\n \"random_subsamples\",\n \"yellow_tripdata_8500_lines_sample_2019-02.csv\",\n ),\n ),\n str(\n os.path.join(\n context_path, \"..\", \"data\", \"yellow_tripdata_sample_2019-02.csv\"\n )\n ),\n )\n shutil.copy(\n file_relative_path(\n __file__,\n os.path.join(\n \"test_sets\",\n \"taxi_yellow_tripdata_samples\",\n \"random_subsamples\",\n \"yellow_tripdata_9000_lines_sample_2019-03.csv\",\n ),\n ),\n str(\n os.path.join(\n context_path, \"..\", \"data\", \"yellow_tripdata_sample_2019-03.csv\"\n )\n ),\n )\n\n context: DataContext = DataContext(context_root_dir=context_path)\n assert context.root_directory == context_path\n\n return context\n\n\[email protected]\ndef bobster_columnar_table_multi_batch_normal_mean_5000_stdev_1000():\n \"\"\"\n About the \"Bobster\" User Workflow Fixture\n\n Bobster has multiple tables of columnar data called user_events (DataAsset) that he wants to check periodically as\n new data is added.\n\n - He knows what some of the columns are of the acconting/financial/account type, but he is currently interested in\n the average table size (in terms of the number of rows in a table).\n\n He wants to use a configurable profiler to generate a description (ExpectationSuite) about tables so that he can:\n\n 1. monitor the average number of rows in the tables\n\n 2. have a place to add his domain knowledge of the data (that can also be validated against new data)\n\n 3. if all goes well, generalize some of the Profiler to use on his other tables\n\n Bobster uses a custom implementation of the \"bootstrap\" non-parametric (i.e, data-driven) statistical estimator.\n\n Bobster configures his Profiler using the YAML configurations and data file locations captured in this fixture.\n \"\"\"\n skip_if_python_below_minimum_version()\n\n verbose_profiler_config_file_path: str = file_relative_path(\n __file__,\n os.path.join(\n \"test_fixtures\",\n \"rule_based_profiler\",\n \"bobster_user_workflow_verbose_profiler_config.yml\",\n ),\n )\n\n verbose_profiler_config: str\n with open(verbose_profiler_config_file_path) as f:\n verbose_profiler_config = f.read()\n\n expectation_suite_name_bootstrap_estimator: str = (\n \"bobby_columnar_table_multi_batch_bootstrap_estimator\"\n )\n\n my_row_count_range_rule_expect_table_row_count_to_be_between_expectation_mean_value: int = (\n 5000\n )\n my_row_count_range_rule_expect_table_row_count_to_be_between_expectation_std_value: float = (\n 1.0e3\n )\n my_row_count_range_rule_expect_table_row_count_to_be_between_expectation_num_stds: float = (\n 3.00\n )\n\n my_row_count_range_rule_expect_table_row_count_to_be_between_expectation_min_value_mean_value: int = round(\n float(\n my_row_count_range_rule_expect_table_row_count_to_be_between_expectation_mean_value\n )\n - (\n my_row_count_range_rule_expect_table_row_count_to_be_between_expectation_num_stds\n * my_row_count_range_rule_expect_table_row_count_to_be_between_expectation_std_value\n )\n )\n\n my_row_count_range_rule_expect_table_row_count_to_be_between_expectation_max_value_mean_value: int = round(\n float(\n my_row_count_range_rule_expect_table_row_count_to_be_between_expectation_mean_value\n )\n + (\n my_row_count_range_rule_expect_table_row_count_to_be_between_expectation_num_stds\n * my_row_count_range_rule_expect_table_row_count_to_be_between_expectation_std_value\n )\n )\n\n return {\n \"profiler_config\": verbose_profiler_config,\n \"test_configuration_bootstrap_estimator\": {\n \"expectation_suite_name\": expectation_suite_name_bootstrap_estimator,\n \"expect_table_row_count_to_be_between_mean_value\": my_row_count_range_rule_expect_table_row_count_to_be_between_expectation_mean_value,\n \"expect_table_row_count_to_be_between_min_value_mean_value\": my_row_count_range_rule_expect_table_row_count_to_be_between_expectation_min_value_mean_value,\n \"expect_table_row_count_to_be_between_max_value_mean_value\": my_row_count_range_rule_expect_table_row_count_to_be_between_expectation_max_value_mean_value,\n },\n }\n\n\[email protected]\ndef bobster_columnar_table_multi_batch_normal_mean_5000_stdev_1000_data_context(\n tmp_path_factory,\n monkeypatch,\n) -> DataContext:\n \"\"\"\n This fixture generates three years' worth (36 months; i.e., 36 batches) of taxi trip data with the number of rows\n of a batch sampled from a normal distribution with the mean of 5,000 rows and the standard deviation of 1,000 rows.\n \"\"\"\n skip_if_python_below_minimum_version()\n\n # Re-enable GE_USAGE_STATS\n monkeypatch.delenv(\"GE_USAGE_STATS\")\n monkeypatch.setattr(AnonymizedUsageStatisticsConfig, \"enabled\", True)\n\n project_path: str = str(tmp_path_factory.mktemp(\"taxi_data_context\"))\n context_path: str = os.path.join(project_path, \"great_expectations\")\n os.makedirs(os.path.join(context_path, \"expectations\"), exist_ok=True)\n data_path: str = os.path.join(context_path, \"..\", \"data\")\n os.makedirs(os.path.join(data_path), exist_ok=True)\n shutil.copy(\n file_relative_path(\n __file__,\n os.path.join(\n \"integration\",\n \"fixtures\",\n \"yellow_tripdata_pandas_fixture\",\n \"great_expectations\",\n \"great_expectations.yml\",\n ),\n ),\n str(os.path.join(context_path, \"great_expectations.yml\")),\n )\n base_directory: str = file_relative_path(\n __file__,\n os.path.join(\n \"test_sets\",\n \"taxi_yellow_tripdata_samples\",\n ),\n )\n file_name_list: List[str] = get_filesystem_one_level_directory_glob_path_list(\n base_directory_path=base_directory, glob_directive=\"*.csv\"\n )\n file_name_list = sorted(file_name_list)\n num_files: int = len(file_name_list)\n\n rnd_num_sample: np.float64\n output_file_lenths: List[int] = [\n round(rnd_num_sample)\n for rnd_num_sample in np.random.normal(loc=5.0e3, scale=1.0e3, size=num_files)\n ]\n\n idx: int\n file_name: str\n\n output_file_name_length_map: Dict[str, int] = {\n file_name_list[idx]: output_file_lenths[idx]\n for idx, file_name in enumerate(file_name_list)\n }\n\n csv_source_path: str\n df: pd.DataFrame\n for file_name in file_name_list:\n csv_source_path = os.path.join(base_directory, file_name)\n df = pd.read_csv(filepath_or_buffer=csv_source_path)\n df = df.sample(\n n=output_file_name_length_map[file_name], replace=False, random_state=1\n )\n # noinspection PyTypeChecker\n df.to_csv(\n path_or_buf=os.path.join(context_path, \"..\", \"data\", file_name), index=False\n )\n\n context: DataContext = DataContext(context_root_dir=context_path)\n assert context.root_directory == context_path\n\n return context\n\n\[email protected]\ndef quentin_columnar_table_multi_batch():\n \"\"\"\n About the \"Quentin\" User Workflow Fixture\n Quentin has multiple tables of columnar data called user_events (DataAsset) that he wants to check periodically as\n new data is added.\n - He knows what some of the columns are of the accounting/financial/account type, but he is currently interested\n in the range of quantiles of columns capturing financial quantities (column names ending on \"_amount\" suffix).\n He wants to use a configurable profiler to generate a description (ExpectationSuite) about tables so that he can:\n 1. monitor the range of quantiles of columns capturing financial quantities in the tables\n 2. have a place to add his domain knowledge of the data (that can also be validated against new data)\n 3. if all goes well, generalize some of the Profiler to use on his other tables\n Quentin uses a custom implementation of the \"bootstrap\" non-parametric (i.e, data-driven) statistical estimator.\n Quentin configures his Profiler using the YAML configurations and data file locations captured in this fixture.\n \"\"\"\n skip_if_python_below_minimum_version()\n\n verbose_profiler_config_file_path: str = file_relative_path(\n __file__,\n os.path.join(\n \"test_fixtures\",\n \"rule_based_profiler\",\n \"quentin_user_workflow_verbose_profiler_config.yml\",\n ),\n )\n\n verbose_profiler_config: str\n with open(verbose_profiler_config_file_path) as f:\n verbose_profiler_config = f.read()\n\n expectation_suite_name_bootstrap_estimator: str = (\n \"quentin_columnar_table_multi_batch\"\n )\n\n return {\n \"profiler_config\": verbose_profiler_config,\n \"test_configuration\": {\n \"expectation_suite_name\": expectation_suite_name_bootstrap_estimator,\n \"expect_column_quantile_values_to_be_between_quantile_ranges_by_column\": {\n \"tolls_amount\": [[0.0, 0.0], [0.0, 0.0], [0.0, 0.0]],\n \"fare_amount\": [\n [5.842754275, 6.5],\n [8.675167517, 9.5750000000],\n [13.344354435, 15.650000000],\n ],\n \"tip_amount\": [\n [0.0, 0.0],\n [0.81269502, 1.97259736],\n [2.346049055, 2.993680968],\n ],\n \"total_amount\": [\n [8.2740033, 11.422183043],\n [11.2955000, 14.875000000],\n [16.746263451, 21.327684643],\n ],\n },\n },\n }\n\n\[email protected]\ndef quentin_columnar_table_multi_batch_data_context(\n tmp_path_factory,\n monkeypatch,\n) -> DataContext:\n \"\"\"\n This fixture generates three years' worth (36 months; i.e., 36 batches) of taxi trip data with the number of rows\n of each batch being equal to the original number per log file (10,000 rows).\n \"\"\"\n skip_if_python_below_minimum_version()\n\n # Re-enable GE_USAGE_STATS\n monkeypatch.delenv(\"GE_USAGE_STATS\")\n monkeypatch.setattr(AnonymizedUsageStatisticsConfig, \"enabled\", True)\n\n project_path: str = str(tmp_path_factory.mktemp(\"taxi_data_context\"))\n context_path: str = os.path.join(project_path, \"great_expectations\")\n os.makedirs(os.path.join(context_path, \"expectations\"), exist_ok=True)\n data_path: str = os.path.join(context_path, \"..\", \"data\")\n os.makedirs(os.path.join(data_path), exist_ok=True)\n shutil.copy(\n file_relative_path(\n __file__,\n os.path.join(\n \"integration\",\n \"fixtures\",\n \"yellow_tripdata_pandas_fixture\",\n \"great_expectations\",\n \"great_expectations.yml\",\n ),\n ),\n str(os.path.join(context_path, \"great_expectations.yml\")),\n )\n base_directory: str = file_relative_path(\n __file__,\n os.path.join(\n \"test_sets\",\n \"taxi_yellow_tripdata_samples\",\n ),\n )\n file_name_list: List[str] = get_filesystem_one_level_directory_glob_path_list(\n base_directory_path=base_directory, glob_directive=\"*.csv\"\n )\n file_name_list = sorted(file_name_list)\n\n file_name: str\n csv_source_path: str\n for file_name in file_name_list:\n csv_source_path = os.path.join(base_directory, file_name)\n shutil.copy(\n csv_source_path,\n os.path.join(context_path, \"..\", \"data\", file_name),\n )\n\n context: DataContext = DataContext(context_root_dir=context_path)\n assert context.root_directory == context_path\n\n return context\n\n\n# TODO: AJB 20210525 This fixture is not yet used but may be helpful to generate batches for unit tests of multibatch\n# workflows. It should probably be extended to add different column types / data.\[email protected]\ndef multibatch_generic_csv_generator():\n \"\"\"\n Construct a series of csv files with many data types for use in multibatch testing\n \"\"\"\n skip_if_python_below_minimum_version()\n\n def _multibatch_generic_csv_generator(\n data_path: str,\n start_date: Optional[datetime.datetime] = None,\n num_event_batches: Optional[int] = 20,\n num_events_per_batch: Optional[int] = 5,\n ) -> List[str]:\n\n if start_date is None:\n start_date = datetime.datetime(2000, 1, 1)\n\n file_list = []\n category_strings = {\n 0: \"category0\",\n 1: \"category1\",\n 2: \"category2\",\n 3: \"category3\",\n 4: \"category4\",\n 5: \"category5\",\n 6: \"category6\",\n }\n for batch_num in range(num_event_batches):\n # generate a dataframe with multiple column types\n batch_start_date = start_date + datetime.timedelta(\n days=(batch_num * num_events_per_batch)\n )\n # TODO: AJB 20210416 Add more column types\n df = pd.DataFrame(\n {\n \"event_date\": [\n (batch_start_date + datetime.timedelta(days=i)).strftime(\n \"%Y-%m-%d\"\n )\n for i in range(num_events_per_batch)\n ],\n \"batch_num\": [batch_num + 1 for _ in range(num_events_per_batch)],\n \"string_cardinality_3\": [\n category_strings[i % 3] for i in range(num_events_per_batch)\n ],\n }\n )\n filename = f\"csv_batch_{batch_num + 1:03}_of_{num_event_batches:03}.csv\"\n file_list.append(filename)\n # noinspection PyTypeChecker\n df.to_csv(\n os.path.join(data_path, filename),\n index_label=\"intra_batch_index\",\n )\n\n return file_list\n\n return _multibatch_generic_csv_generator\n\n\[email protected]\ndef multibatch_generic_csv_generator_context(monkeypatch, empty_data_context):\n skip_if_python_below_minimum_version()\n\n context: DataContext = empty_data_context\n monkeypatch.chdir(context.root_directory)\n data_relative_path = \"../data\"\n data_path = os.path.join(context.root_directory, data_relative_path)\n os.makedirs(data_path, exist_ok=True)\n\n data_connector_base_directory = \"./\"\n monkeypatch.setenv(\"base_directory\", data_connector_base_directory)\n monkeypatch.setenv(\"data_fixtures_root\", data_relative_path)\n\n datasource_name = \"generic_csv_generator\"\n data_connector_name = \"daily_data_connector\"\n asset_name = \"daily_data_asset\"\n datasource_config = rf\"\"\"\nclass_name: Datasource\nmodule_name: great_expectations.datasource\nexecution_engine:\n module_name: great_expectations.execution_engine\n class_name: PandasExecutionEngine\ndata_connectors:\n {data_connector_name}:\n class_name: ConfiguredAssetFilesystemDataConnector\n assets:\n {asset_name}:\n module_name: great_expectations.datasource.data_connector.asset\n group_names:\n - batch_num\n - total_batches\n pattern: csv_batch_(\\d.+)_of_(\\d.+)\\.csv\n reader_options:\n delimiter: \",\"\n class_name: Asset\n base_directory: $data_fixtures_root\n glob_directive: \"*.csv\"\n base_directory: $base_directory\n module_name: great_expectations.datasource.data_connector\n \"\"\"\n\n context.add_datasource(name=datasource_name, **yaml.load(datasource_config))\n\n assert context.list_datasources() == [\n {\n \"class_name\": \"Datasource\",\n \"data_connectors\": {\n data_connector_name: {\n \"assets\": {\n asset_name: {\n \"base_directory\": data_relative_path,\n \"class_name\": \"Asset\",\n \"glob_directive\": \"*.csv\",\n \"group_names\": [\"batch_num\", \"total_batches\"],\n \"module_name\": \"great_expectations.datasource.data_connector.asset\",\n \"pattern\": \"csv_batch_(\\\\d.+)_of_(\\\\d.+)\\\\.csv\",\n }\n },\n \"base_directory\": data_connector_base_directory,\n \"class_name\": \"ConfiguredAssetFilesystemDataConnector\",\n \"module_name\": \"great_expectations.datasource.data_connector\",\n }\n },\n \"execution_engine\": {\n \"class_name\": \"PandasExecutionEngine\",\n \"module_name\": \"great_expectations.execution_engine\",\n },\n \"module_name\": \"great_expectations.datasource\",\n \"name\": \"generic_csv_generator\",\n }\n ]\n return context\n"
] | [
[
"pandas.read_csv",
"pandas.DataFrame",
"numpy.random.normal"
]
] |
drudd/pandas | [
"99922b9175c4ca6acb0f42dd17c01c507cbd94d6"
] | [
"pandas/core/algorithms.py"
] | [
"\"\"\"\nGeneric data algorithms. This module is experimental at the moment and not\nintended for public consumption\n\"\"\"\nfrom __future__ import division\nfrom warnings import warn\nimport numpy as np\n\nimport pandas.core.common as com\nimport pandas.algos as algos\nimport pandas.hashtable as htable\nimport pandas.compat as compat\n\ndef match(to_match, values, na_sentinel=-1):\n \"\"\"\n Compute locations of to_match into values\n\n Parameters\n ----------\n to_match : array-like\n values to find positions of\n values : array-like\n Unique set of values\n na_sentinel : int, default -1\n Value to mark \"not found\"\n\n Examples\n --------\n\n Returns\n -------\n match : ndarray of integers\n \"\"\"\n values = com._asarray_tuplesafe(values)\n if issubclass(values.dtype.type, compat.string_types):\n values = np.array(values, dtype='O')\n\n f = lambda htype, caster: _match_generic(to_match, values, htype, caster)\n result = _hashtable_algo(f, values.dtype)\n\n if na_sentinel != -1:\n\n # replace but return a numpy array\n # use a Series because it handles dtype conversions properly\n from pandas.core.series import Series\n result = Series(result.ravel()).replace(-1,na_sentinel).values.reshape(result.shape)\n\n return result\n\n\ndef unique(values):\n \"\"\"\n Compute unique values (not necessarily sorted) efficiently from input array\n of values\n\n Parameters\n ----------\n values : array-like\n\n Returns\n -------\n uniques\n \"\"\"\n values = com._asarray_tuplesafe(values)\n f = lambda htype, caster: _unique_generic(values, htype, caster)\n return _hashtable_algo(f, values.dtype)\n\n\n# def count(values, uniques=None):\n# f = lambda htype, caster: _count_generic(values, htype, caster)\n\n# if uniques is not None:\n# raise NotImplementedError\n# else:\n# return _hashtable_algo(f, values.dtype)\n\n\ndef _hashtable_algo(f, dtype):\n \"\"\"\n f(HashTable, type_caster) -> result\n \"\"\"\n if com.is_float_dtype(dtype):\n return f(htable.Float64HashTable, com._ensure_float64)\n elif com.is_integer_dtype(dtype):\n return f(htable.Int64HashTable, com._ensure_int64)\n else:\n return f(htable.PyObjectHashTable, com._ensure_object)\n\n\ndef _count_generic(values, table_type, type_caster):\n from pandas.core.series import Series\n\n values = type_caster(values)\n table = table_type(min(len(values), 1000000))\n uniques, labels = table.factorize(values)\n\n return Series(counts, index=uniques)\n\n\ndef _match_generic(values, index, table_type, type_caster):\n values = type_caster(values)\n index = type_caster(index)\n table = table_type(min(len(index), 1000000))\n table.map_locations(index)\n return table.lookup(values)\n\n\ndef _unique_generic(values, table_type, type_caster):\n values = type_caster(values)\n table = table_type(min(len(values), 1000000))\n uniques = table.unique(values)\n return type_caster(uniques)\n\n\ndef factorize(values, sort=False, order=None, na_sentinel=-1):\n \"\"\"\n Encode input values as an enumerated type or categorical variable\n\n Parameters\n ----------\n values : ndarray (1-d)\n Sequence\n sort : boolean, default False\n Sort by values\n order :\n na_sentinel: int, default -1\n Value to mark \"not found\"\n\n Returns\n -------\n \"\"\"\n from pandas.tseries.period import PeriodIndex\n vals = np.asarray(values)\n is_datetime = com.is_datetime64_dtype(vals)\n (hash_klass, vec_klass), vals = _get_data_algo(vals, _hashtables)\n\n table = hash_klass(len(vals))\n uniques = vec_klass()\n labels = table.get_labels(vals, uniques, 0, na_sentinel)\n\n labels = com._ensure_platform_int(labels)\n\n uniques = uniques.to_array()\n\n if sort and len(uniques) > 0:\n sorter = uniques.argsort()\n reverse_indexer = np.empty(len(sorter), dtype=np.int_)\n reverse_indexer.put(sorter, np.arange(len(sorter)))\n\n mask = labels < 0\n labels = reverse_indexer.take(labels)\n np.putmask(labels, mask, -1)\n\n uniques = uniques.take(sorter)\n\n if is_datetime:\n uniques = uniques.view('M8[ns]')\n if isinstance(values, PeriodIndex):\n uniques = PeriodIndex(ordinal=uniques, freq=values.freq)\n\n return labels, uniques\n\n\ndef value_counts(values, sort=True, ascending=False, normalize=False,\n bins=None):\n \"\"\"\n Compute a histogram of the counts of non-null values\n\n Parameters\n ----------\n values : ndarray (1-d)\n sort : boolean, default True\n Sort by values\n ascending : boolean, default False\n Sort in ascending order\n normalize: boolean, default False\n If True then compute a relative histogram\n bins : integer, optional\n Rather than count values, group them into half-open bins,\n convenience for pd.cut, only works with numeric data\n\n Returns\n -------\n value_counts : Series\n\n \"\"\"\n from pandas.core.series import Series\n from pandas.tools.tile import cut\n\n values = Series(values).values\n\n if bins is not None:\n try:\n cat, bins = cut(values, bins, retbins=True)\n except TypeError:\n raise TypeError(\"bins argument only works with numeric data.\")\n values = cat.labels\n\n if com.is_integer_dtype(values.dtype):\n values = com._ensure_int64(values)\n keys, counts = htable.value_count_int64(values)\n\n elif issubclass(values.dtype.type, (np.datetime64, np.timedelta64)):\n dtype = values.dtype\n values = values.view(np.int64)\n keys, counts = htable.value_count_int64(values)\n\n # convert the keys back to the dtype we came in\n keys = Series(keys, dtype=dtype)\n\n else:\n mask = com.isnull(values)\n values = com._ensure_object(values)\n keys, counts = htable.value_count_object(values, mask)\n\n result = Series(counts, index=com._values_from_object(keys))\n\n if bins is not None:\n # TODO: This next line should be more efficient\n result = result.reindex(np.arange(len(cat.levels)), fill_value=0)\n result.index = bins[:-1]\n\n if sort:\n result.sort()\n if not ascending:\n result = result[::-1]\n\n if normalize:\n result = result / float(values.size)\n\n return result\n\n\ndef mode(values):\n \"\"\"Returns the mode or mode(s) of the passed Series or ndarray (sorted)\"\"\"\n # must sort because hash order isn't necessarily defined.\n from pandas.core.series import Series\n\n if isinstance(values, Series):\n constructor = values._constructor\n values = values.values\n else:\n values = np.asanyarray(values)\n constructor = Series\n\n dtype = values.dtype\n if com.is_integer_dtype(values.dtype):\n values = com._ensure_int64(values)\n result = constructor(sorted(htable.mode_int64(values)), dtype=dtype)\n\n elif issubclass(values.dtype.type, (np.datetime64, np.timedelta64)):\n dtype = values.dtype\n values = values.view(np.int64)\n result = constructor(sorted(htable.mode_int64(values)), dtype=dtype)\n\n else:\n mask = com.isnull(values)\n values = com._ensure_object(values)\n res = htable.mode_object(values, mask)\n try:\n res = sorted(res)\n except TypeError as e:\n warn(\"Unable to sort modes: %s\" % e)\n result = constructor(res, dtype=dtype)\n\n return result\n\n\ndef rank(values, axis=0, method='average', na_option='keep',\n ascending=True):\n \"\"\"\n\n \"\"\"\n if values.ndim == 1:\n f, values = _get_data_algo(values, _rank1d_functions)\n ranks = f(values, ties_method=method, ascending=ascending,\n na_option=na_option)\n elif values.ndim == 2:\n f, values = _get_data_algo(values, _rank2d_functions)\n ranks = f(values, axis=axis, ties_method=method,\n ascending=ascending, na_option=na_option)\n return ranks\n\n\ndef quantile(x, q, interpolation_method='fraction'):\n \"\"\"\n Compute sample quantile or quantiles of the input array. For example, q=0.5\n computes the median.\n\n The `interpolation_method` parameter supports three values, namely\n `fraction` (default), `lower` and `higher`. Interpolation is done only,\n if the desired quantile lies between two data points `i` and `j`. For\n `fraction`, the result is an interpolated value between `i` and `j`;\n for `lower`, the result is `i`, for `higher` the result is `j`.\n\n Parameters\n ----------\n x : ndarray\n Values from which to extract score.\n q : scalar or array\n Percentile at which to extract score.\n interpolation_method : {'fraction', 'lower', 'higher'}, optional\n This optional parameter specifies the interpolation method to use,\n when the desired quantile lies between two data points `i` and `j`:\n\n - fraction: `i + (j - i)*fraction`, where `fraction` is the\n fractional part of the index surrounded by `i` and `j`.\n -lower: `i`.\n - higher: `j`.\n\n Returns\n -------\n score : float\n Score at percentile.\n\n Examples\n --------\n >>> from scipy import stats\n >>> a = np.arange(100)\n >>> stats.scoreatpercentile(a, 50)\n 49.5\n\n \"\"\"\n x = np.asarray(x)\n mask = com.isnull(x)\n\n x = x[-mask]\n\n values = np.sort(x)\n\n def _get_score(at):\n if len(values) == 0:\n return np.nan\n\n idx = at * (len(values) - 1)\n if idx % 1 == 0:\n score = values[idx]\n else:\n if interpolation_method == 'fraction':\n score = _interpolate(values[int(idx)], values[int(idx) + 1],\n idx % 1)\n elif interpolation_method == 'lower':\n score = values[np.floor(idx)]\n elif interpolation_method == 'higher':\n score = values[np.ceil(idx)]\n else:\n raise ValueError(\"interpolation_method can only be 'fraction' \"\n \", 'lower' or 'higher'\")\n\n return score\n\n if np.isscalar(q):\n return _get_score(q)\n else:\n q = np.asarray(q, np.float64)\n return algos.arrmap_float64(q, _get_score)\n\n\ndef _interpolate(a, b, fraction):\n \"\"\"Returns the point at the given fraction between a and b, where\n 'fraction' must be between 0 and 1.\n \"\"\"\n return a + (b - a) * fraction\n\n\ndef _get_data_algo(values, func_map):\n if com.is_float_dtype(values):\n f = func_map['float64']\n values = com._ensure_float64(values)\n elif com.is_datetime64_dtype(values):\n f = func_map['int64']\n values = values.view('i8')\n elif com.is_integer_dtype(values):\n f = func_map['int64']\n values = com._ensure_int64(values)\n else:\n f = func_map['generic']\n values = com._ensure_object(values)\n return f, values\n\n\ndef group_position(*args):\n \"\"\"\n Get group position\n \"\"\"\n from collections import defaultdict\n table = defaultdict(int)\n\n result = []\n for tup in zip(*args):\n result.append(table[tup])\n table[tup] += 1\n\n return result\n\n\n_rank1d_functions = {\n 'float64': algos.rank_1d_float64,\n 'int64': algos.rank_1d_int64,\n 'generic': algos.rank_1d_generic\n}\n\n_rank2d_functions = {\n 'float64': algos.rank_2d_float64,\n 'int64': algos.rank_2d_int64,\n 'generic': algos.rank_2d_generic\n}\n\n_hashtables = {\n 'float64': (htable.Float64HashTable, htable.Float64Vector),\n 'int64': (htable.Int64HashTable, htable.Int64Vector),\n 'generic': (htable.PyObjectHashTable, htable.ObjectVector)\n}\n"
] | [
[
"pandas.hashtable.value_count_int64",
"pandas.core.common.isnull",
"pandas.hashtable.mode_int64",
"numpy.asarray",
"pandas.hashtable.mode_object",
"pandas.core.common._asarray_tuplesafe",
"numpy.isscalar",
"pandas.core.common.is_float_dtype",
"pandas.algos.arrmap_float64",
"pandas.core.common._values_from_object",
"numpy.ceil",
"numpy.asanyarray",
"pandas.core.common.is_integer_dtype",
"numpy.putmask",
"pandas.core.common._ensure_float64",
"numpy.sort",
"pandas.core.common.is_datetime64_dtype",
"pandas.hashtable.value_count_object",
"pandas.tseries.period.PeriodIndex",
"pandas.tools.tile.cut",
"pandas.core.series.Series",
"numpy.floor",
"pandas.core.common._ensure_int64",
"pandas.core.common._ensure_object",
"numpy.array",
"pandas.core.common._ensure_platform_int"
]
] |
flaght/zipline | [
"15b8832421e2b1ba98ec9938ceb794f64ad581b5",
"0848a8a4862fd8bbe7ba64654e6bc731b4b622b7"
] | [
"tests/test_perf_tracking.py",
"tests/test_api_shim.py"
] | [
"#\n# Copyright 2016 Quantopian, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import division\n\nimport copy\nfrom datetime import (\n datetime,\n timedelta,\n)\nimport logging\n\nimport nose.tools as nt\nimport pytz\n\nimport pandas as pd\nimport numpy as np\nfrom six.moves import range, zip\n\nfrom zipline.assets import Asset\nfrom zipline.assets.synthetic import make_simple_equity_info\nfrom zipline.data.us_equity_pricing import (\n SQLiteAdjustmentWriter,\n SQLiteAdjustmentReader,\n)\nimport zipline.utils.factory as factory\nimport zipline.finance.performance as perf\nfrom zipline.finance.transaction import create_transaction\nimport zipline.utils.math_utils as zp_math\n\nfrom zipline.finance.blotter import Order\nfrom zipline.finance.performance.position import Position\nfrom zipline.utils.factory import create_simulation_parameters\nfrom zipline.utils.serialization_utils import (\n loads_with_persistent_ids, dumps_with_persistent_ids\n)\nfrom zipline.testing import (\n MockDailyBarReader,\n create_data_portal_from_trade_history,\n create_empty_splits_mergers_frame,\n tmp_trading_env,\n)\nfrom zipline.testing.fixtures import (\n WithInstanceTmpDir,\n WithSimParams,\n WithTmpDir,\n WithTradingEnvironment,\n WithTradingCalendars,\n ZiplineTestCase,\n)\nfrom zipline.utils.calendars import get_calendar\n\nlogger = logging.getLogger('Test Perf Tracking')\n\noneday = timedelta(days=1)\ntradingday = timedelta(hours=6, minutes=30)\n\n# nose.tools changed name in python 3\nif not hasattr(nt, 'assert_count_equal'):\n nt.assert_count_equal = nt.assert_items_equal\n\n\ndef check_perf_period(pp,\n gross_leverage,\n net_leverage,\n long_exposure,\n longs_count,\n short_exposure,\n shorts_count):\n\n perf_data = pp.to_dict()\n np.testing.assert_allclose(\n gross_leverage, perf_data['gross_leverage'], rtol=1e-3)\n np.testing.assert_allclose(\n net_leverage, perf_data['net_leverage'], rtol=1e-3)\n np.testing.assert_allclose(\n long_exposure, perf_data['long_exposure'], rtol=1e-3)\n np.testing.assert_allclose(\n longs_count, perf_data['longs_count'], rtol=1e-3)\n np.testing.assert_allclose(\n short_exposure, perf_data['short_exposure'], rtol=1e-3)\n np.testing.assert_allclose(\n shorts_count, perf_data['shorts_count'], rtol=1e-3)\n\n\ndef check_account(account,\n settled_cash,\n equity_with_loan,\n total_positions_value,\n total_positions_exposure,\n regt_equity,\n available_funds,\n excess_liquidity,\n cushion,\n leverage,\n net_leverage,\n net_liquidation):\n # this is a long only portfolio that is only partially invested\n # so net and gross leverage are equal.\n\n np.testing.assert_allclose(settled_cash,\n account.settled_cash, rtol=1e-3)\n np.testing.assert_allclose(equity_with_loan,\n account.equity_with_loan, rtol=1e-3)\n np.testing.assert_allclose(total_positions_value,\n account.total_positions_value, rtol=1e-3)\n np.testing.assert_allclose(total_positions_exposure,\n account.total_positions_exposure, rtol=1e-3)\n np.testing.assert_allclose(regt_equity,\n account.regt_equity, rtol=1e-3)\n np.testing.assert_allclose(available_funds,\n account.available_funds, rtol=1e-3)\n np.testing.assert_allclose(excess_liquidity,\n account.excess_liquidity, rtol=1e-3)\n np.testing.assert_allclose(cushion,\n account.cushion, rtol=1e-3)\n np.testing.assert_allclose(leverage, account.leverage, rtol=1e-3)\n np.testing.assert_allclose(net_leverage,\n account.net_leverage, rtol=1e-3)\n np.testing.assert_allclose(net_liquidation,\n account.net_liquidation, rtol=1e-3)\n\n\ndef create_txn(asset, dt, price, amount):\n \"\"\"\n Create a fake transaction to be filled and processed prior to the execution\n of a given trade event.\n \"\"\"\n if not isinstance(asset, Asset):\n raise ValueError(\"pass an asset to create_txn\")\n\n mock_order = Order(dt, asset, amount, id=None)\n return create_transaction(mock_order, dt, price, amount)\n\n\ndef calculate_results(sim_params,\n env,\n data_portal,\n splits=None,\n txns=None,\n commissions=None):\n \"\"\"\n Run the given events through a stripped down version of the loop in\n AlgorithmSimulator.transform.\n\n IMPORTANT NOTE FOR TEST WRITERS/READERS:\n\n This loop has some wonky logic for the order of event processing for\n datasource types. This exists mostly to accommodate legacy tests that were\n making assumptions about how events would be sorted.\n\n In particular:\n\n - Dividends passed for a given date are processed PRIOR to any events\n for that date.\n - Splits passed for a given date are process AFTER any events for that\n date.\n\n Tests that use this helper should not be considered useful guarantees of\n the behavior of AlgorithmSimulator on a stream containing the same events\n unless the subgroups have been explicitly re-sorted in this way.\n \"\"\"\n\n txns = txns or []\n splits = splits or {}\n commissions = commissions or {}\n\n perf_tracker = perf.PerformanceTracker(\n sim_params, get_calendar(\"NYSE\"), env\n )\n\n results = []\n\n for date in sim_params.sessions:\n for txn in filter(lambda txn: txn.dt == date, txns):\n # Process txns for this date.\n perf_tracker.process_transaction(txn)\n\n try:\n commissions_for_date = commissions[date]\n for comm in commissions_for_date:\n perf_tracker.process_commission(comm)\n except KeyError:\n pass\n\n try:\n splits_for_date = splits[date]\n perf_tracker.handle_splits(splits_for_date)\n except KeyError:\n pass\n\n msg = perf_tracker.handle_market_close(date, data_portal)\n perf_tracker.position_tracker.sync_last_sale_prices(\n date, False, data_portal,\n )\n msg['account'] = perf_tracker.get_account(True)\n results.append(copy.deepcopy(msg))\n return results\n\n\ndef check_perf_tracker_serialization(perf_tracker):\n scalar_keys = [\n 'emission_rate',\n 'txn_count',\n 'market_open',\n 'last_close',\n 'start_session',\n 'day_count',\n 'capital_base',\n 'market_close',\n 'saved_dt',\n 'period_end',\n 'total_days',\n ]\n p_string = dumps_with_persistent_ids(perf_tracker)\n\n test = loads_with_persistent_ids(p_string, env=perf_tracker.env)\n\n for k in scalar_keys:\n nt.assert_equal(getattr(test, k), getattr(perf_tracker, k), k)\n\n perf_periods = (\n test.cumulative_performance,\n test.todays_performance\n )\n for period in perf_periods:\n nt.assert_true(hasattr(period, '_position_tracker'))\n\n\ndef setup_env_data(env, sim_params, sids, futures_sids=[]):\n data = {}\n for sid in sids:\n data[sid] = {\n \"start_date\": sim_params.sessions[0],\n \"end_date\": get_calendar(\"NYSE\").next_session_label(\n sim_params.sessions[-1]\n )\n }\n\n env.write_data(equities_data=data)\n\n futures_data = {}\n for future_sid in futures_sids:\n futures_data[future_sid] = {\n \"start_date\": sim_params.sessions[0],\n # (obviously) FIXME once we have a future calendar\n \"end_date\": get_calendar(\"NYSE\").next_session_label(\n sim_params.sessions[-1]\n ),\n \"multiplier\": 100\n }\n\n env.write_data(futures_data=futures_data)\n\n\nclass TestSplitPerformance(WithSimParams, WithTmpDir, ZiplineTestCase):\n START_DATE = pd.Timestamp('2006-01-03', tz='utc')\n END_DATE = pd.Timestamp('2006-01-04', tz='utc')\n SIM_PARAMS_CAPITAL_BASE = 10e3\n\n ASSET_FINDER_EQUITY_SIDS = 1, 2\n\n @classmethod\n def init_class_fixtures(cls):\n super(TestSplitPerformance, cls).init_class_fixtures()\n cls.asset1 = cls.env.asset_finder.retrieve_asset(1)\n\n def test_multiple_splits(self):\n # if multiple positions all have splits at the same time, verify that\n # the total leftover cash is correct\n perf_tracker = perf.PerformanceTracker(self.sim_params,\n self.trading_calendar,\n self.env)\n\n asset1 = self.asset_finder.retrieve_asset(1)\n asset2 = self.asset_finder.retrieve_asset(2)\n\n perf_tracker.position_tracker.positions[1] = \\\n Position(asset1, amount=10, cost_basis=10, last_sale_price=11)\n\n perf_tracker.position_tracker.positions[2] = \\\n Position(asset2, amount=10, cost_basis=10, last_sale_price=11)\n\n leftover_cash = perf_tracker.position_tracker.handle_splits(\n [(1, 0.333), (2, 0.333)]\n )\n\n # we used to have 10 shares that each cost us $10, total $100\n # now we have 33 shares that each cost us $3.33, total $99.9\n # each position returns $0.10 as leftover cash\n self.assertEqual(0.2, leftover_cash)\n\n def test_split_long_position(self):\n events = factory.create_trade_history(\n self.asset1,\n # TODO: Should we provide adjusted prices in the tests, or provide\n # raw prices and adjust via DataPortal?\n [20, 60],\n [100, 100],\n oneday,\n self.sim_params,\n trading_calendar=self.trading_calendar,\n )\n\n # set up a long position in sid 1\n # 100 shares at $20 apiece = $2000 position\n data_portal = create_data_portal_from_trade_history(\n self.env.asset_finder,\n self.trading_calendar,\n self.tmpdir,\n self.sim_params,\n {1: events},\n )\n\n txns = [create_txn(self.asset1, events[0].dt, 20, 100)]\n\n # set up a split with ratio 3 occurring at the start of the second\n # day.\n splits = {\n events[1].dt: [(1, 3)]\n }\n\n results = calculate_results(self.sim_params,\n self.env,\n data_portal,\n txns=txns,\n splits=splits)\n\n # should have 33 shares (at $60 apiece) and $20 in cash\n self.assertEqual(2, len(results))\n\n latest_positions = results[1]['daily_perf']['positions']\n self.assertEqual(1, len(latest_positions))\n\n # check the last position to make sure it's been updated\n position = latest_positions[0]\n\n self.assertEqual(1, position['sid'])\n self.assertEqual(33, position['amount'])\n self.assertEqual(60, position['cost_basis'])\n self.assertEqual(60, position['last_sale_price'])\n\n # since we started with $10000, and we spent $2000 on the\n # position, but then got $20 back, we should have $8020\n # (or close to it) in cash.\n\n # we won't get exactly 8020 because sometimes a split is\n # denoted as a ratio like 0.3333, and we lose some digits\n # of precision. thus, make sure we're pretty close.\n daily_perf = results[1]['daily_perf']\n\n self.assertTrue(\n zp_math.tolerant_equals(8020,\n daily_perf['ending_cash'], 1),\n \"ending_cash was {0}\".format(daily_perf['ending_cash']))\n\n # Validate that the account attributes were updated.\n account = results[1]['account']\n self.assertEqual(float('inf'), account.day_trades_remaining)\n # this is a long only portfolio that is only partially invested\n # so net and gross leverage are equal.\n np.testing.assert_allclose(0.198, account.leverage, rtol=1e-3)\n np.testing.assert_allclose(0.198, account.net_leverage, rtol=1e-3)\n np.testing.assert_allclose(8020, account.regt_equity, rtol=1e-3)\n self.assertEqual(float('inf'), account.regt_margin)\n np.testing.assert_allclose(8020, account.available_funds, rtol=1e-3)\n self.assertEqual(0, account.maintenance_margin_requirement)\n np.testing.assert_allclose(10000,\n account.equity_with_loan, rtol=1e-3)\n self.assertEqual(float('inf'), account.buying_power)\n self.assertEqual(0, account.initial_margin_requirement)\n np.testing.assert_allclose(8020, account.excess_liquidity,\n rtol=1e-3)\n np.testing.assert_allclose(8020, account.settled_cash, rtol=1e-3)\n np.testing.assert_allclose(10000, account.net_liquidation,\n rtol=1e-3)\n np.testing.assert_allclose(0.802, account.cushion, rtol=1e-3)\n np.testing.assert_allclose(1980, account.total_positions_value,\n rtol=1e-3)\n self.assertEqual(0, account.accrued_interest)\n\n for i, result in enumerate(results):\n for perf_kind in ('daily_perf', 'cumulative_perf'):\n perf_result = result[perf_kind]\n # prices aren't changing, so pnl and returns should be 0.0\n self.assertEqual(0.0, perf_result['pnl'],\n \"day %s %s pnl %s instead of 0.0\" %\n (i, perf_kind, perf_result['pnl']))\n self.assertEqual(0.0, perf_result['returns'],\n \"day %s %s returns %s instead of 0.0\" %\n (i, perf_kind, perf_result['returns']))\n\n\nclass TestDividendPerformance(WithSimParams,\n WithInstanceTmpDir,\n ZiplineTestCase):\n START_DATE = pd.Timestamp('2006-01-03', tz='utc')\n END_DATE = pd.Timestamp('2006-01-10', tz='utc')\n ASSET_FINDER_EQUITY_SIDS = 1, 2\n SIM_PARAMS_CAPITAL_BASE = 10e3\n\n @classmethod\n def init_class_fixtures(cls):\n super(TestDividendPerformance, cls).init_class_fixtures()\n cls.asset1 = cls.asset_finder.retrieve_asset(1)\n cls.asset2 = cls.asset_finder.retrieve_asset(2)\n\n def test_market_hours_calculations(self):\n # DST in US/Eastern began on Sunday March 14, 2010\n before = datetime(2010, 3, 12, 14, 31, tzinfo=pytz.utc)\n after = factory.get_next_trading_dt(\n before,\n timedelta(days=1),\n self.trading_calendar,\n )\n self.assertEqual(after.hour, 13)\n\n def test_long_position_receives_dividend(self):\n # post some trades in the market\n events = factory.create_trade_history(\n self.asset1,\n [10, 10, 10, 10, 10, 10],\n [100, 100, 100, 100, 100, 100],\n oneday,\n self.sim_params,\n trading_calendar=self.trading_calendar,\n )\n\n dbpath = self.instance_tmpdir.getpath('adjustments.sqlite')\n\n writer = SQLiteAdjustmentWriter(\n dbpath,\n MockDailyBarReader(),\n self.trading_calendar.all_sessions,\n )\n splits = mergers = create_empty_splits_mergers_frame()\n dividends = pd.DataFrame({\n 'sid': np.array([1], dtype=np.uint32),\n 'amount': np.array([10.00], dtype=np.float64),\n 'declared_date': np.array([events[0].dt], dtype='datetime64[ns]'),\n 'ex_date': np.array([events[1].dt], dtype='datetime64[ns]'),\n 'record_date': np.array([events[1].dt], dtype='datetime64[ns]'),\n 'pay_date': np.array([events[2].dt], dtype='datetime64[ns]'),\n })\n writer.write(splits, mergers, dividends)\n adjustment_reader = SQLiteAdjustmentReader(dbpath)\n data_portal = create_data_portal_from_trade_history(\n self.env.asset_finder,\n self.trading_calendar,\n self.instance_tmpdir,\n self.sim_params,\n {1: events},\n )\n data_portal._adjustment_reader = adjustment_reader\n\n # Simulate a transaction being filled prior to the ex_date.\n txns = [create_txn(self.asset1, events[0].dt, 10.0, 100)]\n results = calculate_results(\n self.sim_params,\n self.env,\n data_portal,\n txns=txns,\n )\n\n self.assertEqual(len(results), 6)\n cumulative_returns = \\\n [event['cumulative_perf']['returns'] for event in results]\n self.assertEqual(cumulative_returns, [0.0, 0.0, 0.1, 0.1, 0.1, 0.1])\n daily_returns = [event['daily_perf']['returns']\n for event in results]\n self.assertEqual(daily_returns, [0.0, 0.0, 0.10, 0.0, 0.0, 0.0])\n cash_flows = [event['daily_perf']['capital_used']\n for event in results]\n self.assertEqual(cash_flows, [-1000, 0, 1000, 0, 0, 0])\n cumulative_cash_flows = \\\n [event['cumulative_perf']['capital_used'] for event in results]\n self.assertEqual(cumulative_cash_flows, [-1000, -1000, 0, 0, 0, 0])\n cash_pos = \\\n [event['cumulative_perf']['ending_cash'] for event in results]\n self.assertEqual(cash_pos, [9000, 9000, 10000, 10000, 10000, 10000])\n\n def test_long_position_receives_stock_dividend(self):\n # post some trades in the market\n events = {}\n for asset in [self.asset1, self.asset2]:\n events[asset.sid] = factory.create_trade_history(\n asset,\n [10, 10, 10, 10, 10, 10],\n [100, 100, 100, 100, 100, 100],\n oneday,\n self.sim_params,\n trading_calendar=self.trading_calendar,\n )\n\n dbpath = self.instance_tmpdir.getpath('adjustments.sqlite')\n\n writer = SQLiteAdjustmentWriter(\n dbpath,\n MockDailyBarReader(),\n self.trading_calendar.all_sessions\n )\n splits = mergers = create_empty_splits_mergers_frame()\n dividends = pd.DataFrame({\n 'sid': np.array([], dtype=np.uint32),\n 'amount': np.array([], dtype=np.float64),\n 'declared_date': np.array([], dtype='datetime64[ns]'),\n 'ex_date': np.array([], dtype='datetime64[ns]'),\n 'pay_date': np.array([], dtype='datetime64[ns]'),\n 'record_date': np.array([], dtype='datetime64[ns]'),\n })\n sid_1 = events[1]\n stock_dividends = pd.DataFrame({\n 'sid': np.array([1], dtype=np.uint32),\n 'payment_sid': np.array([2], dtype=np.uint32),\n 'ratio': np.array([2], dtype=np.float64),\n 'declared_date': np.array([sid_1[0].dt], dtype='datetime64[ns]'),\n 'ex_date': np.array([sid_1[1].dt], dtype='datetime64[ns]'),\n 'record_date': np.array([sid_1[1].dt], dtype='datetime64[ns]'),\n 'pay_date': np.array([sid_1[2].dt], dtype='datetime64[ns]'),\n })\n writer.write(splits, mergers, dividends, stock_dividends)\n adjustment_reader = SQLiteAdjustmentReader(dbpath)\n\n data_portal = create_data_portal_from_trade_history(\n self.env.asset_finder,\n self.trading_calendar,\n self.instance_tmpdir,\n self.sim_params,\n events,\n )\n\n data_portal._adjustment_reader = adjustment_reader\n txns = [create_txn(self.asset1, events[1][0].dt, 10.0, 100)]\n\n results = calculate_results(\n self.sim_params,\n self.env,\n data_portal,\n txns=txns,\n )\n\n self.assertEqual(len(results), 6)\n cumulative_returns = \\\n [event['cumulative_perf']['returns'] for event in results]\n self.assertEqual(cumulative_returns, [0.0, 0.0, 0.2, 0.2, 0.2, 0.2])\n daily_returns = [event['daily_perf']['returns']\n for event in results]\n self.assertEqual(daily_returns, [0.0, 0.0, 0.2, 0.0, 0.0, 0.0])\n cash_flows = [event['daily_perf']['capital_used']\n for event in results]\n self.assertEqual(cash_flows, [-1000, 0, 0, 0, 0, 0])\n cumulative_cash_flows = \\\n [event['cumulative_perf']['capital_used'] for event in results]\n self.assertEqual(cumulative_cash_flows, [-1000] * 6)\n cash_pos = \\\n [event['cumulative_perf']['ending_cash'] for event in results]\n self.assertEqual(cash_pos, [9000] * 6)\n\n def test_long_position_purchased_on_ex_date_receives_no_dividend(self):\n # post some trades in the market\n events = factory.create_trade_history(\n self.asset1,\n [10, 10, 10, 10, 10, 10],\n [100, 100, 100, 100, 100, 100],\n oneday,\n self.sim_params,\n trading_calendar=self.trading_calendar\n )\n\n dbpath = self.instance_tmpdir.getpath('adjustments.sqlite')\n\n writer = SQLiteAdjustmentWriter(\n dbpath,\n MockDailyBarReader(),\n self.trading_calendar.all_sessions\n )\n splits = mergers = create_empty_splits_mergers_frame()\n dividends = pd.DataFrame({\n 'sid': np.array([1], dtype=np.uint32),\n 'amount': np.array([10.00], dtype=np.float64),\n 'declared_date': np.array([events[0].dt], dtype='datetime64[ns]'),\n 'ex_date': np.array([events[1].dt], dtype='datetime64[ns]'),\n 'record_date': np.array([events[1].dt], dtype='datetime64[ns]'),\n 'pay_date': np.array([events[2].dt], dtype='datetime64[ns]'),\n })\n writer.write(splits, mergers, dividends)\n adjustment_reader = SQLiteAdjustmentReader(dbpath)\n\n data_portal = create_data_portal_from_trade_history(\n self.env.asset_finder,\n self.trading_calendar,\n self.instance_tmpdir,\n self.sim_params,\n {1: events},\n )\n data_portal._adjustment_reader = adjustment_reader\n\n # Simulate a transaction being filled on the ex_date.\n txns = [create_txn(self.asset1, events[1].dt, 10.0, 100)]\n\n results = calculate_results(\n self.sim_params,\n self.env,\n data_portal,\n txns=txns,\n )\n\n self.assertEqual(len(results), 6)\n cumulative_returns = \\\n [event['cumulative_perf']['returns'] for event in results]\n self.assertEqual(cumulative_returns, [0, 0, 0, 0, 0, 0])\n daily_returns = [event['daily_perf']['returns'] for event in results]\n self.assertEqual(daily_returns, [0, 0, 0, 0, 0, 0])\n cash_flows = [event['daily_perf']['capital_used'] for event in results]\n self.assertEqual(cash_flows, [0, -1000, 0, 0, 0, 0])\n cumulative_cash_flows = \\\n [event['cumulative_perf']['capital_used'] for event in results]\n self.assertEqual(cumulative_cash_flows,\n [0, -1000, -1000, -1000, -1000, -1000])\n\n def test_selling_before_dividend_payment_still_gets_paid(self):\n # post some trades in the market\n events = factory.create_trade_history(\n self.asset1,\n [10, 10, 10, 10, 10, 10],\n [100, 100, 100, 100, 100, 100],\n oneday,\n self.sim_params,\n trading_calendar=self.trading_calendar,\n )\n\n dbpath = self.instance_tmpdir.getpath('adjustments.sqlite')\n\n writer = SQLiteAdjustmentWriter(\n dbpath,\n MockDailyBarReader(),\n self.trading_calendar.all_sessions,\n )\n splits = mergers = create_empty_splits_mergers_frame()\n dividends = pd.DataFrame({\n 'sid': np.array([1], dtype=np.uint32),\n 'amount': np.array([10.00], dtype=np.float64),\n 'declared_date': np.array([events[0].dt], dtype='datetime64[ns]'),\n 'ex_date': np.array([events[1].dt], dtype='datetime64[ns]'),\n 'record_date': np.array([events[1].dt], dtype='datetime64[ns]'),\n 'pay_date': np.array([events[3].dt], dtype='datetime64[ns]'),\n })\n writer.write(splits, mergers, dividends)\n adjustment_reader = SQLiteAdjustmentReader(dbpath)\n\n data_portal = create_data_portal_from_trade_history(\n self.env.asset_finder,\n self.trading_calendar,\n self.instance_tmpdir,\n self.sim_params,\n {1: events},\n )\n data_portal._adjustment_reader = adjustment_reader\n\n buy_txn = create_txn(self.asset1, events[0].dt, 10.0, 100)\n sell_txn = create_txn(self.asset1, events[2].dt, 10.0, -100)\n txns = [buy_txn, sell_txn]\n\n results = calculate_results(\n self.sim_params,\n self.env,\n data_portal,\n txns=txns,\n )\n\n self.assertEqual(len(results), 6)\n cumulative_returns = \\\n [event['cumulative_perf']['returns'] for event in results]\n self.assertEqual(cumulative_returns, [0, 0, 0, 0.1, 0.1, 0.1])\n daily_returns = [event['daily_perf']['returns'] for event in results]\n self.assertEqual(daily_returns, [0, 0, 0, 0.1, 0, 0])\n cash_flows = [event['daily_perf']['capital_used'] for event in results]\n self.assertEqual(cash_flows, [-1000, 0, 1000, 1000, 0, 0])\n cumulative_cash_flows = \\\n [event['cumulative_perf']['capital_used'] for event in results]\n self.assertEqual(cumulative_cash_flows,\n [-1000, -1000, 0, 1000, 1000, 1000])\n\n def test_buy_and_sell_before_ex(self):\n # need a six-day simparam\n\n # post some trades in the market\n events = factory.create_trade_history(\n self.asset1,\n [10, 10, 10, 10, 10, 10],\n [100, 100, 100, 100, 100, 100],\n oneday,\n self.sim_params,\n trading_calendar=self.trading_calendar,\n )\n dbpath = self.instance_tmpdir.getpath('adjustments.sqlite')\n\n writer = SQLiteAdjustmentWriter(\n dbpath,\n MockDailyBarReader(),\n self.trading_calendar.all_sessions,\n )\n splits = mergers = create_empty_splits_mergers_frame()\n\n dividends = pd.DataFrame({\n 'sid': np.array([1], dtype=np.uint32),\n 'amount': np.array([10.0], dtype=np.float64),\n 'declared_date': np.array([events[3].dt], dtype='datetime64[ns]'),\n 'ex_date': np.array([events[4].dt], dtype='datetime64[ns]'),\n 'pay_date': np.array([events[5].dt], dtype='datetime64[ns]'),\n 'record_date': np.array([events[4].dt], dtype='datetime64[ns]'),\n })\n writer.write(splits, mergers, dividends)\n adjustment_reader = SQLiteAdjustmentReader(dbpath)\n\n data_portal = create_data_portal_from_trade_history(\n self.env.asset_finder,\n self.trading_calendar,\n self.instance_tmpdir,\n self.sim_params,\n {1: events},\n )\n data_portal._adjustment_reader = adjustment_reader\n buy_txn = create_txn(self.asset1, events[1].dt, 10.0, 100)\n sell_txn = create_txn(self.asset1, events[2].dt, 10.0, -100)\n txns = [buy_txn, sell_txn]\n\n results = calculate_results(\n self.sim_params,\n self.env,\n data_portal,\n txns=txns,\n )\n\n self.assertEqual(len(results), 6)\n cumulative_returns = \\\n [event['cumulative_perf']['returns'] for event in results]\n self.assertEqual(cumulative_returns, [0, 0, 0, 0, 0, 0])\n daily_returns = [event['daily_perf']['returns'] for event in results]\n self.assertEqual(daily_returns, [0, 0, 0, 0, 0, 0])\n cash_flows = [event['daily_perf']['capital_used'] for event in results]\n self.assertEqual(cash_flows, [0, -1000, 1000, 0, 0, 0])\n cumulative_cash_flows = \\\n [event['cumulative_perf']['capital_used'] for event in results]\n self.assertEqual(cumulative_cash_flows, [0, -1000, 0, 0, 0, 0])\n\n def test_ending_before_pay_date(self):\n # post some trades in the market\n events = factory.create_trade_history(\n self.asset1,\n [10, 10, 10, 10, 10, 10],\n [100, 100, 100, 100, 100, 100],\n oneday,\n self.sim_params,\n trading_calendar=self.trading_calendar,\n )\n\n pay_date = self.sim_params.first_open\n # find pay date that is much later.\n for i in range(30):\n pay_date = factory.get_next_trading_dt(pay_date, oneday,\n self.trading_calendar)\n\n dbpath = self.instance_tmpdir.getpath('adjustments.sqlite')\n\n writer = SQLiteAdjustmentWriter(\n dbpath,\n MockDailyBarReader(),\n self.trading_calendar.all_sessions,\n )\n splits = mergers = create_empty_splits_mergers_frame()\n dividends = pd.DataFrame({\n 'sid': np.array([1], dtype=np.uint32),\n 'amount': np.array([10.00], dtype=np.float64),\n 'declared_date': np.array([events[0].dt], dtype='datetime64[ns]'),\n 'ex_date': np.array([events[0].dt], dtype='datetime64[ns]'),\n 'record_date': np.array([events[0].dt], dtype='datetime64[ns]'),\n 'pay_date': np.array([pay_date], dtype='datetime64[ns]'),\n })\n writer.write(splits, mergers, dividends)\n adjustment_reader = SQLiteAdjustmentReader(dbpath)\n\n data_portal = create_data_portal_from_trade_history(\n self.env.asset_finder,\n self.trading_calendar,\n self.instance_tmpdir,\n self.sim_params,\n {1: events},\n )\n data_portal._adjustment_reader = adjustment_reader\n txns = [create_txn(self.asset1, events[1].dt, 10.0, 100)]\n\n results = calculate_results(\n self.sim_params,\n self.env,\n data_portal,\n txns=txns,\n )\n\n self.assertEqual(len(results), 6)\n cumulative_returns = \\\n [event['cumulative_perf']['returns'] for event in results]\n self.assertEqual(cumulative_returns, [0, 0, 0, 0.0, 0.0, 0.0])\n daily_returns = [event['daily_perf']['returns'] for event in results]\n self.assertEqual(daily_returns, [0, 0, 0, 0, 0, 0])\n cash_flows = [event['daily_perf']['capital_used'] for event in results]\n self.assertEqual(cash_flows, [0, -1000, 0, 0, 0, 0])\n cumulative_cash_flows = \\\n [event['cumulative_perf']['capital_used'] for event in results]\n self.assertEqual(\n cumulative_cash_flows,\n [0, -1000, -1000, -1000, -1000, -1000]\n )\n\n def test_short_position_pays_dividend(self):\n # post some trades in the market\n events = factory.create_trade_history(\n self.asset1,\n [10, 10, 10, 10, 10, 10],\n [100, 100, 100, 100, 100, 100],\n oneday,\n self.sim_params,\n trading_calendar=self.trading_calendar,\n )\n\n dbpath = self.instance_tmpdir.getpath('adjustments.sqlite')\n\n writer = SQLiteAdjustmentWriter(\n dbpath,\n MockDailyBarReader(),\n self.trading_calendar.all_sessions,\n )\n splits = mergers = create_empty_splits_mergers_frame()\n dividends = pd.DataFrame({\n 'sid': np.array([1], dtype=np.uint32),\n 'amount': np.array([10.00], dtype=np.float64),\n 'declared_date': np.array([events[0].dt], dtype='datetime64[ns]'),\n 'ex_date': np.array([events[2].dt], dtype='datetime64[ns]'),\n 'record_date': np.array([events[2].dt], dtype='datetime64[ns]'),\n 'pay_date': np.array([events[3].dt], dtype='datetime64[ns]'),\n })\n writer.write(splits, mergers, dividends)\n adjustment_reader = SQLiteAdjustmentReader(dbpath)\n\n data_portal = create_data_portal_from_trade_history(\n self.env.asset_finder,\n self.trading_calendar,\n self.instance_tmpdir,\n self.sim_params,\n {1: events},\n )\n data_portal._adjustment_reader = adjustment_reader\n txns = [create_txn(self.asset1, events[1].dt, 10.0, -100)]\n\n results = calculate_results(\n self.sim_params,\n self.env,\n data_portal,\n txns=txns,\n )\n\n self.assertEqual(len(results), 6)\n cumulative_returns = \\\n [event['cumulative_perf']['returns'] for event in results]\n self.assertEqual(cumulative_returns, [0.0, 0.0, 0.0, -0.1, -0.1, -0.1])\n daily_returns = [event['daily_perf']['returns'] for event in results]\n self.assertEqual(daily_returns, [0.0, 0.0, 0.0, -0.1, 0.0, 0.0])\n cash_flows = [event['daily_perf']['capital_used'] for event in results]\n self.assertEqual(cash_flows, [0, 1000, 0, -1000, 0, 0])\n cumulative_cash_flows = \\\n [event['cumulative_perf']['capital_used'] for event in results]\n self.assertEqual(cumulative_cash_flows, [0, 1000, 1000, 0, 0, 0])\n\n def test_no_position_receives_no_dividend(self):\n # post some trades in the market\n events = factory.create_trade_history(\n self.asset1,\n [10, 10, 10, 10, 10, 10],\n [100, 100, 100, 100, 100, 100],\n oneday,\n self.sim_params,\n trading_calendar=self.trading_calendar,\n )\n\n dbpath = self.instance_tmpdir.getpath('adjustments.sqlite')\n\n writer = SQLiteAdjustmentWriter(\n dbpath,\n MockDailyBarReader(),\n self.trading_calendar.all_sessions,\n )\n splits = mergers = create_empty_splits_mergers_frame()\n dividends = pd.DataFrame({\n 'sid': np.array([1], dtype=np.uint32),\n 'amount': np.array([10.00], dtype=np.float64),\n 'declared_date': np.array([events[0].dt], dtype='datetime64[ns]'),\n 'ex_date': np.array([events[1].dt], dtype='datetime64[ns]'),\n 'pay_date': np.array([events[2].dt], dtype='datetime64[ns]'),\n 'record_date': np.array([events[2].dt], dtype='datetime64[ns]'),\n })\n writer.write(splits, mergers, dividends)\n adjustment_reader = SQLiteAdjustmentReader(dbpath)\n\n data_portal = create_data_portal_from_trade_history(\n self.env.asset_finder,\n self.trading_calendar,\n self.instance_tmpdir,\n self.sim_params,\n {1: events},\n )\n data_portal._adjustment_reader = adjustment_reader\n\n results = calculate_results(\n self.sim_params,\n self.env,\n data_portal,\n )\n\n self.assertEqual(len(results), 6)\n cumulative_returns = \\\n [event['cumulative_perf']['returns'] for event in results]\n self.assertEqual(cumulative_returns, [0.0, 0.0, 0.0, 0.0, 0.0, 0.0])\n daily_returns = [event['daily_perf']['returns'] for event in results]\n self.assertEqual(daily_returns, [0.0, 0.0, 0.0, 0.0, 0.0, 0.0])\n cash_flows = [event['daily_perf']['capital_used'] for event in results]\n self.assertEqual(cash_flows, [0, 0, 0, 0, 0, 0])\n cumulative_cash_flows = \\\n [event['cumulative_perf']['capital_used'] for event in results]\n self.assertEqual(cumulative_cash_flows, [0, 0, 0, 0, 0, 0])\n\n def test_no_dividend_at_simulation_end(self):\n # post some trades in the market\n events = factory.create_trade_history(\n self.asset1,\n [10, 10, 10, 10, 10, 10],\n [100, 100, 100, 100, 100, 100],\n oneday,\n self.sim_params,\n trading_calendar=self.trading_calendar,\n )\n\n dbpath = self.instance_tmpdir.getpath('adjustments.sqlite')\n\n writer = SQLiteAdjustmentWriter(\n dbpath,\n MockDailyBarReader(),\n self.trading_calendar.all_sessions,\n )\n splits = mergers = create_empty_splits_mergers_frame()\n dividends = pd.DataFrame({\n 'sid': np.array([1], dtype=np.uint32),\n 'amount': np.array([10.00], dtype=np.float64),\n 'declared_date': np.array([events[-3].dt], dtype='datetime64[ns]'),\n 'ex_date': np.array([events[-2].dt], dtype='datetime64[ns]'),\n 'record_date': np.array([events[0].dt], dtype='datetime64[ns]'),\n 'pay_date': np.array(\n [self.trading_calendar.next_session_label(\n self.trading_calendar.minute_to_session_label(\n events[-1].dt\n )\n )],\n dtype='datetime64[ns]'),\n })\n writer.write(splits, mergers, dividends)\n adjustment_reader = SQLiteAdjustmentReader(dbpath)\n\n # Set the last day to be the last event\n sim_params = create_simulation_parameters(\n num_days=6,\n capital_base=10e3,\n start=self.sim_params.start_session,\n end=self.sim_params.end_session\n )\n\n sim_params = sim_params.create_new(\n sim_params.start_session,\n events[-1].dt\n )\n\n data_portal = create_data_portal_from_trade_history(\n self.env.asset_finder,\n self.trading_calendar,\n self.instance_tmpdir,\n sim_params,\n {1: events},\n )\n data_portal._adjustment_reader = adjustment_reader\n # Simulate a transaction being filled prior to the ex_date.\n txns = [create_txn(self.asset1, events[0].dt, 10.0, 100)]\n results = calculate_results(\n sim_params,\n self.env,\n data_portal,\n txns=txns,\n )\n\n self.assertEqual(len(results), 6)\n cumulative_returns = \\\n [event['cumulative_perf']['returns'] for event in results]\n self.assertEqual(cumulative_returns, [0.0, 0.0, 0.0, 0.0, 0.0, 0.0])\n daily_returns = [event['daily_perf']['returns'] for event in results]\n self.assertEqual(daily_returns, [0.0, 0.0, 0.0, 0.0, 0.0, 0.0])\n cash_flows = [event['daily_perf']['capital_used'] for event in results]\n self.assertEqual(cash_flows, [-1000, 0, 0, 0, 0, 0])\n cumulative_cash_flows = \\\n [event['cumulative_perf']['capital_used'] for event in results]\n self.assertEqual(cumulative_cash_flows,\n [-1000, -1000, -1000, -1000, -1000, -1000])\n\n\nclass TestDividendPerformanceHolidayStyle(TestDividendPerformance):\n\n # The holiday tests begins the simulation on the day\n # before Thanksgiving, so that the next trading day is\n # two days ahead. Any tests that hard code events\n # to be start + oneday will fail, since those events will\n # be skipped by the simulation.\n START_DATE = pd.Timestamp('2003-11-30', tz='utc')\n END_DATE = pd.Timestamp('2003-12-08', tz='utc')\n\n\nclass TestPositionPerformance(WithInstanceTmpDir, WithTradingCalendars,\n ZiplineTestCase):\n\n def create_environment_stuff(self,\n num_days=4,\n sids=[1, 2],\n futures_sids=[3]):\n start = pd.Timestamp('2006-01-01', tz='utc')\n end = start + timedelta(days=num_days * 2)\n equities = make_simple_equity_info(sids, start, end)\n futures = pd.DataFrame.from_dict(\n {\n sid: {\n 'start_date': start,\n 'end_date': end,\n 'multiplier': 100,\n 'exchange': \"TEST\",\n }\n for sid in futures_sids\n },\n orient='index',\n )\n self.env = self.enter_instance_context(tmp_trading_env(\n equities=equities,\n futures=futures,\n ))\n self.sim_params = create_simulation_parameters(\n start=start,\n num_days=num_days,\n )\n\n self.finder = self.env.asset_finder\n self.asset1 = self.env.asset_finder.retrieve_asset(1)\n self.asset2 = self.env.asset_finder.retrieve_asset(2)\n self.asset3 = self.env.asset_finder.retrieve_asset(3)\n\n def test_long_short_positions(self):\n \"\"\"\n start with $1000\n buy 100 stock1 shares at $10\n sell short 100 stock2 shares at $10\n stock1 then goes down to $9\n stock2 goes to $11\n \"\"\"\n self.create_environment_stuff()\n\n trades_1 = factory.create_trade_history(\n self.asset1,\n [10, 10, 10, 9],\n [100, 100, 100, 100],\n oneday,\n self.sim_params,\n trading_calendar=self.trading_calendar,\n )\n\n trades_2 = factory.create_trade_history(\n self.asset2,\n [10, 10, 10, 11],\n [100, 100, 100, 100],\n oneday,\n self.sim_params,\n trading_calendar=self.trading_calendar,\n )\n\n data_portal = create_data_portal_from_trade_history(\n self.env.asset_finder,\n self.trading_calendar,\n self.instance_tmpdir,\n self.sim_params,\n {1: trades_1, 2: trades_2}\n )\n\n txn1 = create_txn(self.asset1, trades_1[0].dt, 10.0, 100)\n txn2 = create_txn(self.asset2, trades_1[0].dt, 10.0, -100)\n\n pt = perf.PositionTracker(self.env.asset_finder,\n self.sim_params.data_frequency)\n pp = perf.PerformancePeriod(1000.0, self.env.asset_finder,\n self.sim_params.data_frequency)\n pp.position_tracker = pt\n pt.execute_transaction(txn1)\n pp.handle_execution(txn1)\n pt.execute_transaction(txn2)\n pp.handle_execution(txn2)\n\n dt = trades_1[-2].dt\n pt.sync_last_sale_prices(dt, False, data_portal)\n\n pp.calculate_performance()\n\n check_perf_period(\n pp,\n gross_leverage=2.0,\n net_leverage=0.0,\n long_exposure=1000.0,\n longs_count=1,\n short_exposure=-1000.0,\n shorts_count=1)\n # Validate that the account attributes were updated.\n account = pp.as_account()\n check_account(account,\n settled_cash=1000.0,\n equity_with_loan=1000.0,\n total_positions_value=0.0,\n total_positions_exposure=0.0,\n regt_equity=1000.0,\n available_funds=1000.0,\n excess_liquidity=1000.0,\n cushion=1.0,\n leverage=2.0,\n net_leverage=0.0,\n net_liquidation=1000.0)\n\n dt = trades_1[-1].dt\n pt.sync_last_sale_prices(dt, False, data_portal)\n\n pp.calculate_performance()\n\n # Validate that the account attributes were updated.\n account = pp.as_account()\n\n check_perf_period(\n pp,\n gross_leverage=2.5,\n net_leverage=-0.25,\n long_exposure=900.0,\n longs_count=1,\n short_exposure=-1100.0,\n shorts_count=1)\n\n check_account(account,\n settled_cash=1000.0,\n equity_with_loan=800.0,\n total_positions_value=-200.0,\n total_positions_exposure=-200.0,\n regt_equity=1000.0,\n available_funds=1000.0,\n excess_liquidity=1000.0,\n cushion=1.25,\n leverage=2.5,\n net_leverage=-0.25,\n net_liquidation=800.0)\n\n def test_levered_long_position(self):\n \"\"\"\n start with $1,000, then buy 1000 shares at $10.\n price goes to $11\n \"\"\"\n # post some trades in the market\n\n self.create_environment_stuff()\n\n trades = factory.create_trade_history(\n self.asset1,\n [10, 10, 10, 11],\n [100, 100, 100, 100],\n oneday,\n self.sim_params,\n trading_calendar=self.trading_calendar,\n )\n\n data_portal = create_data_portal_from_trade_history(\n self.env.asset_finder,\n self.trading_calendar,\n self.instance_tmpdir,\n self.sim_params,\n {1: trades})\n txn = create_txn(self.asset1, trades[1].dt, 10.0, 1000)\n pt = perf.PositionTracker(self.env.asset_finder,\n self.sim_params.data_frequency)\n pp = perf.PerformancePeriod(1000.0, self.env.asset_finder,\n self.sim_params.data_frequency)\n pp.position_tracker = pt\n\n pt.execute_transaction(txn)\n pp.handle_execution(txn)\n\n pp.calculate_performance()\n\n check_perf_period(\n pp,\n gross_leverage=10.0,\n net_leverage=10.0,\n long_exposure=10000.0,\n longs_count=1,\n short_exposure=0.0,\n shorts_count=0)\n\n # Validate that the account attributes were updated.\n pt.sync_last_sale_prices(trades[-2].dt, False, data_portal)\n\n # Validate that the account attributes were updated.\n account = pp.as_account()\n check_account(account,\n settled_cash=-9000.0,\n equity_with_loan=1000.0,\n total_positions_value=10000.0,\n total_positions_exposure=10000.0,\n regt_equity=-9000.0,\n available_funds=-9000.0,\n excess_liquidity=-9000.0,\n cushion=-9.0,\n leverage=10.0,\n net_leverage=10.0,\n net_liquidation=1000.0)\n\n # now simulate a price jump to $11\n pt.sync_last_sale_prices(trades[-1].dt, False, data_portal)\n\n pp.calculate_performance()\n\n check_perf_period(\n pp,\n gross_leverage=5.5,\n net_leverage=5.5,\n long_exposure=11000.0,\n longs_count=1,\n short_exposure=0.0,\n shorts_count=0)\n\n # Validate that the account attributes were updated.\n account = pp.as_account()\n\n check_account(account,\n settled_cash=-9000.0,\n equity_with_loan=2000.0,\n total_positions_value=11000.0,\n total_positions_exposure=11000.0,\n regt_equity=-9000.0,\n available_funds=-9000.0,\n excess_liquidity=-9000.0,\n cushion=-4.5,\n leverage=5.5,\n net_leverage=5.5,\n net_liquidation=2000.0)\n\n def test_long_position(self):\n \"\"\"\n verify that the performance period calculates properly for a\n single buy transaction\n \"\"\"\n self.create_environment_stuff()\n\n # post some trades in the market\n trades = factory.create_trade_history(\n self.asset1,\n [10, 10, 10, 11],\n [100, 100, 100, 100],\n oneday,\n self.sim_params,\n trading_calendar=self.trading_calendar,\n )\n\n data_portal = create_data_portal_from_trade_history(\n self.env.asset_finder,\n self.trading_calendar,\n self.instance_tmpdir,\n self.sim_params,\n {1: trades})\n txn = create_txn(self.asset1, trades[1].dt, 10.0, 100)\n pt = perf.PositionTracker(self.env.asset_finder,\n self.sim_params.data_frequency)\n pp = perf.PerformancePeriod(1000.0, self.env.asset_finder,\n self.sim_params.data_frequency,\n period_open=self.sim_params.start_session,\n period_close=self.sim_params.end_session)\n pp.position_tracker = pt\n\n pt.execute_transaction(txn)\n pp.handle_execution(txn)\n\n # This verifies that the last sale price is being correctly\n # set in the positions. If this is not the case then returns can\n # incorrectly show as sharply dipping if a transaction arrives\n # before a trade. This is caused by returns being based on holding\n # stocks with a last sale price of 0.\n self.assertEqual(pp.positions[1].last_sale_price, 10.0)\n\n pt.sync_last_sale_prices(trades[-1].dt, False, data_portal)\n\n pp.calculate_performance()\n\n self.assertEqual(\n pp.cash_flow,\n -1 * txn.price * txn.amount,\n \"capital used should be equal to the opposite of the transaction \\\n cost of sole txn in test\"\n )\n\n self.assertEqual(\n len(pp.positions),\n 1,\n \"should be just one position\")\n\n self.assertEqual(\n pp.positions[1].sid,\n txn.sid,\n \"position should be in security with id 1\")\n\n self.assertEqual(\n pp.positions[1].amount,\n txn.amount,\n \"should have a position of {sharecount} shares\".format(\n sharecount=txn.amount\n )\n )\n\n self.assertEqual(\n pp.positions[1].cost_basis,\n txn.price,\n \"should have a cost basis of 10\"\n )\n\n self.assertEqual(\n pp.positions[1].last_sale_price,\n trades[-1].price,\n \"last sale should be same as last trade. \\\n expected {exp} actual {act}\".format(\n exp=trades[-1].price,\n act=pp.positions[1].last_sale_price)\n )\n\n self.assertEqual(\n pp.ending_value,\n 1100,\n \"ending value should be price of last trade times number of \\\n shares in position\"\n )\n\n self.assertEqual(pp.pnl, 100, \"gain of 1 on 100 shares should be 100\")\n\n check_perf_period(\n pp,\n gross_leverage=1.0,\n net_leverage=1.0,\n long_exposure=1100.0,\n longs_count=1,\n short_exposure=0.0,\n shorts_count=0)\n\n # Validate that the account attributes were updated.\n account = pp.as_account()\n check_account(account,\n settled_cash=0.0,\n equity_with_loan=1100.0,\n total_positions_value=1100.0,\n total_positions_exposure=1100.0,\n regt_equity=0.0,\n available_funds=0.0,\n excess_liquidity=0.0,\n cushion=0.0,\n leverage=1.0,\n net_leverage=1.0,\n net_liquidation=1100.0)\n\n def test_short_position(self):\n \"\"\"verify that the performance period calculates properly for a \\\nsingle short-sale transaction\"\"\"\n self.create_environment_stuff(num_days=6)\n\n trades = factory.create_trade_history(\n self.asset1,\n [10, 10, 10, 11, 10, 9],\n [100, 100, 100, 100, 100, 100],\n oneday,\n self.sim_params,\n trading_calendar=self.trading_calendar,\n )\n\n trades_1 = trades[:-2]\n\n data_portal = create_data_portal_from_trade_history(\n self.env.asset_finder,\n self.trading_calendar,\n self.instance_tmpdir,\n self.sim_params,\n {1: trades})\n\n txn = create_txn(self.asset1, trades[1].dt, 10.0, -100)\n pt = perf.PositionTracker(self.env.asset_finder,\n self.sim_params.data_frequency)\n pp = perf.PerformancePeriod(\n 1000.0, self.env.asset_finder,\n self.sim_params.data_frequency)\n pp.position_tracker = pt\n\n pt.execute_transaction(txn)\n pp.handle_execution(txn)\n\n pt.sync_last_sale_prices(trades_1[-1].dt, False, data_portal)\n\n pp.calculate_performance()\n\n self.assertEqual(\n pp.cash_flow,\n -1 * txn.price * txn.amount,\n \"capital used should be equal to the opposite of the transaction\\\n cost of sole txn in test\"\n )\n\n self.assertEqual(\n len(pp.positions),\n 1,\n \"should be just one position\")\n\n self.assertEqual(\n pp.positions[1].sid,\n txn.sid,\n \"position should be in security from the transaction\"\n )\n\n self.assertEqual(\n pp.positions[1].amount,\n -100,\n \"should have a position of -100 shares\"\n )\n\n self.assertEqual(\n pp.positions[1].cost_basis,\n txn.price,\n \"should have a cost basis of 10\"\n )\n\n self.assertEqual(\n pp.positions[1].last_sale_price,\n trades_1[-1].price,\n \"last sale should be price of last trade\"\n )\n\n self.assertEqual(\n pp.ending_value,\n -1100,\n \"ending value should be price of last trade times number of \\\n shares in position\"\n )\n\n self.assertEqual(pp.pnl, -100, \"gain of 1 on 100 shares should be 100\")\n\n # simulate additional trades, and ensure that the position value\n # reflects the new price\n trades_2 = trades[-2:]\n\n # simulate a rollover to a new period\n pp.rollover()\n\n pt.sync_last_sale_prices(trades[-1].dt, False, data_portal)\n\n pp.calculate_performance()\n\n self.assertEqual(\n pp.cash_flow,\n 0,\n \"capital used should be zero, there were no transactions in \\\n performance period\"\n )\n\n self.assertEqual(\n len(pp.positions),\n 1,\n \"should be just one position\"\n )\n\n self.assertEqual(\n pp.positions[1].sid,\n txn.sid,\n \"position should be in security from the transaction\"\n )\n\n self.assertEqual(\n pp.positions[1].amount,\n -100,\n \"should have a position of -100 shares\"\n )\n\n self.assertEqual(\n pp.positions[1].cost_basis,\n txn.price,\n \"should have a cost basis of 10\"\n )\n\n self.assertEqual(\n pp.positions[1].last_sale_price,\n trades_2[-1].price,\n \"last sale should be price of last trade\"\n )\n\n self.assertEqual(\n pp.ending_value,\n -900,\n \"ending value should be price of last trade times number of \\\n shares in position\")\n\n self.assertEqual(\n pp.pnl,\n 200,\n \"drop of 2 on -100 shares should be 200\"\n )\n\n # now run a performance period encompassing the entire trade sample.\n ptTotal = perf.PositionTracker(self.env.asset_finder,\n self.sim_params.data_frequency)\n ppTotal = perf.PerformancePeriod(1000.0, self.env.asset_finder,\n self.sim_params.data_frequency)\n ppTotal.position_tracker = pt\n\n ptTotal.execute_transaction(txn)\n ppTotal.handle_execution(txn)\n\n ptTotal.sync_last_sale_prices(trades[-1].dt, False, data_portal)\n\n ppTotal.calculate_performance()\n\n self.assertEqual(\n ppTotal.cash_flow,\n -1 * txn.price * txn.amount,\n \"capital used should be equal to the opposite of the transaction \\\ncost of sole txn in test\"\n )\n\n self.assertEqual(\n len(ppTotal.positions),\n 1,\n \"should be just one position\"\n )\n self.assertEqual(\n ppTotal.positions[1].sid,\n txn.sid,\n \"position should be in security from the transaction\"\n )\n\n self.assertEqual(\n ppTotal.positions[1].amount,\n -100,\n \"should have a position of -100 shares\"\n )\n\n self.assertEqual(\n ppTotal.positions[1].cost_basis,\n txn.price,\n \"should have a cost basis of 10\"\n )\n\n self.assertEqual(\n ppTotal.positions[1].last_sale_price,\n trades_2[-1].price,\n \"last sale should be price of last trade\"\n )\n\n self.assertEqual(\n ppTotal.ending_value,\n -900,\n \"ending value should be price of last trade times number of \\\n shares in position\")\n\n self.assertEqual(\n ppTotal.pnl,\n 100,\n \"drop of 1 on -100 shares should be 100\"\n )\n\n check_perf_period(\n pp,\n gross_leverage=0.8181,\n net_leverage=-0.8181,\n long_exposure=0.0,\n longs_count=0,\n short_exposure=-900.0,\n shorts_count=1)\n\n # Validate that the account attributes.\n account = ppTotal.as_account()\n check_account(account,\n settled_cash=2000.0,\n equity_with_loan=1100.0,\n total_positions_value=-900.0,\n total_positions_exposure=-900.0,\n regt_equity=2000.0,\n available_funds=2000.0,\n excess_liquidity=2000.0,\n cushion=1.8181,\n leverage=0.8181,\n net_leverage=-0.8181,\n net_liquidation=1100.0)\n\n def test_covering_short(self):\n \"\"\"verify performance where short is bought and covered, and shares \\\ntrade after cover\"\"\"\n self.create_environment_stuff(num_days=10)\n\n trades = factory.create_trade_history(\n self.asset1,\n [10, 10, 10, 11, 9, 8, 7, 8, 9, 10],\n [100, 100, 100, 100, 100, 100, 100, 100, 100, 100],\n oneday,\n self.sim_params,\n trading_calendar=self.trading_calendar,\n )\n\n data_portal = create_data_portal_from_trade_history(\n self.env.asset_finder,\n self.trading_calendar,\n self.instance_tmpdir,\n self.sim_params,\n {1: trades})\n\n short_txn = create_txn(self.asset1, trades[1].dt, 10.0, -100)\n cover_txn = create_txn(self.asset1, trades[6].dt, 7.0, 100)\n pt = perf.PositionTracker(self.env.asset_finder,\n self.sim_params.data_frequency)\n pp = perf.PerformancePeriod(1000.0, self.env.asset_finder,\n self.sim_params.data_frequency)\n pp.position_tracker = pt\n\n pt.execute_transaction(short_txn)\n pp.handle_execution(short_txn)\n pt.execute_transaction(cover_txn)\n pp.handle_execution(cover_txn)\n\n pt.sync_last_sale_prices(trades[-1].dt, False, data_portal)\n\n pp.calculate_performance()\n\n short_txn_cost = short_txn.price * short_txn.amount\n cover_txn_cost = cover_txn.price * cover_txn.amount\n\n self.assertEqual(\n pp.cash_flow,\n -1 * short_txn_cost - cover_txn_cost,\n \"capital used should be equal to the net transaction costs\"\n )\n\n self.assertEqual(\n len(pp.positions),\n 0,\n \"should be zero positions\"\n )\n\n self.assertEqual(\n pp.ending_value,\n 0,\n \"ending value should be price of last trade times number of \\\nshares in position\"\n )\n\n self.assertEqual(\n pp.pnl,\n 300,\n \"gain of 1 on 100 shares should be 300\"\n )\n\n check_perf_period(\n pp,\n gross_leverage=0.0,\n net_leverage=0.0,\n long_exposure=0.0,\n longs_count=0,\n short_exposure=0.0,\n shorts_count=0)\n\n account = pp.as_account()\n check_account(account,\n settled_cash=1300.0,\n equity_with_loan=1300.0,\n total_positions_value=0.0,\n total_positions_exposure=0.0,\n regt_equity=1300.0,\n available_funds=1300.0,\n excess_liquidity=1300.0,\n cushion=1.0,\n leverage=0.0,\n net_leverage=0.0,\n net_liquidation=1300.0)\n\n def test_cost_basis_calc(self):\n self.create_environment_stuff(num_days=5)\n\n history_args = (\n self.asset1,\n [10, 11, 11, 12, 10],\n [100, 100, 100, 100, 100],\n oneday,\n self.sim_params,\n self.trading_calendar,\n )\n trades = factory.create_trade_history(*history_args)\n transactions = factory.create_txn_history(*history_args)[:4]\n\n data_portal = create_data_portal_from_trade_history(\n self.env.asset_finder,\n self.trading_calendar,\n self.instance_tmpdir,\n self.sim_params,\n {1: trades})\n\n pt = perf.PositionTracker(self.env.asset_finder,\n self.sim_params.data_frequency)\n pp = perf.PerformancePeriod(\n 1000.0,\n self.env.asset_finder,\n self.sim_params.data_frequency,\n period_open=self.sim_params.start_session,\n period_close=self.sim_params.sessions[-1]\n )\n pp.position_tracker = pt\n\n average_cost = 0\n for i, txn in enumerate(transactions):\n pt.execute_transaction(txn)\n pp.handle_execution(txn)\n average_cost = (average_cost * i + txn.price) / (i + 1)\n self.assertEqual(pt.positions[1].cost_basis, average_cost)\n\n dt = trades[-2].dt\n self.assertEqual(\n pt.positions[1].last_sale_price,\n trades[-2].price,\n \"should have a last sale of 12, got {val}\".format(\n val=pt.positions[1].last_sale_price)\n )\n\n self.assertEqual(\n pt.positions[1].cost_basis,\n 11,\n \"should have a cost basis of 11\"\n )\n\n pt.sync_last_sale_prices(dt, False, data_portal)\n\n pp.calculate_performance()\n\n self.assertEqual(\n pp.pnl,\n 400\n )\n\n down_tick = trades[-1]\n sale_txn = create_txn(self.asset1, down_tick.dt, 10.0, -100)\n pp.rollover()\n\n pt.execute_transaction(sale_txn)\n pp.handle_execution(sale_txn)\n\n dt = down_tick.dt\n pt.sync_last_sale_prices(dt, False, data_portal)\n\n pp.calculate_performance()\n self.assertEqual(\n pp.positions[1].last_sale_price,\n 10,\n \"should have a last sale of 10, was {val}\".format(\n val=pp.positions[1].last_sale_price)\n )\n\n self.assertEqual(\n pp.positions[1].cost_basis,\n 11,\n \"should have a cost basis of 11\"\n )\n\n self.assertEqual(pp.pnl, -800, \"this period goes from +400 to -400\")\n\n pt3 = perf.PositionTracker(self.env.asset_finder,\n self.sim_params.data_frequency)\n pp3 = perf.PerformancePeriod(1000.0, self.env.asset_finder,\n self.sim_params.data_frequency)\n pp3.position_tracker = pt3\n\n average_cost = 0\n for i, txn in enumerate(transactions):\n pt3.execute_transaction(txn)\n pp3.handle_execution(txn)\n average_cost = (average_cost * i + txn.price) / (i + 1)\n self.assertEqual(pp3.positions[1].cost_basis, average_cost)\n\n pt3.execute_transaction(sale_txn)\n pp3.handle_execution(sale_txn)\n\n trades.append(down_tick)\n pt3.sync_last_sale_prices(trades[-1].dt, False, data_portal)\n\n pp3.calculate_performance()\n self.assertEqual(\n pp3.positions[1].last_sale_price,\n 10,\n \"should have a last sale of 10\"\n )\n\n self.assertEqual(\n pp3.positions[1].cost_basis,\n 11,\n \"should have a cost basis of 11\"\n )\n\n self.assertEqual(\n pp3.pnl,\n -400,\n \"should be -400 for all trades and transactions in period\"\n )\n\n def test_cost_basis_calc_close_pos(self):\n self.create_environment_stuff(num_days=8)\n\n history_args = (\n 1,\n [10, 9, 11, 8, 9, 12, 13, 14],\n [200, -100, -100, 100, -300, 100, 500, 400],\n oneday,\n self.sim_params,\n self.trading_calendar,\n )\n cost_bases = [10, 10, 0, 8, 9, 9, 13, 13.5]\n\n transactions = factory.create_txn_history(*history_args)\n\n pt = perf.PositionTracker(self.env.asset_finder,\n self.sim_params.data_frequency)\n pp = perf.PerformancePeriod(1000.0, self.env.asset_finder,\n self.sim_params.data_frequency)\n pp.position_tracker = pt\n\n for idx, (txn, cb) in enumerate(zip(transactions, cost_bases)):\n pt.execute_transaction(txn)\n pp.handle_execution(txn)\n\n if idx == 2:\n # buy 200, sell 100, sell 100 = 0 shares = no position\n self.assertNotIn(1, pp.positions)\n else:\n self.assertEqual(pp.positions[1].cost_basis, cb)\n\n pp.calculate_performance()\n\n self.assertEqual(pp.positions[1].cost_basis, cost_bases[-1])\n\n def test_capital_change_intra_period(self):\n self.create_environment_stuff()\n\n # post some trades in the market\n trades = factory.create_trade_history(\n self.asset1,\n [10.0, 11.0, 12.0, 13.0],\n [100, 100, 100, 100],\n oneday,\n self.sim_params,\n trading_calendar=self.trading_calendar,\n )\n\n data_portal = create_data_portal_from_trade_history(\n self.env.asset_finder,\n self.trading_calendar,\n self.instance_tmpdir,\n self.sim_params,\n {1: trades})\n txn = create_txn(self.asset1, trades[0].dt, 10.0, 100)\n pt = perf.PositionTracker(self.env.asset_finder,\n self.sim_params.data_frequency)\n pp = perf.PerformancePeriod(1000.0, self.env.asset_finder,\n self.sim_params.data_frequency,\n period_open=self.sim_params.start_session,\n period_close=self.sim_params.end_session)\n pp.position_tracker = pt\n\n pt.execute_transaction(txn)\n pp.handle_execution(txn)\n\n # sync prices before we introduce a capital change\n pt.sync_last_sale_prices(trades[2].dt, False, data_portal)\n\n pp.initialize_subperiod_divider()\n pp.set_current_subperiod_starting_values(1000.0)\n\n pt.sync_last_sale_prices(trades[-1].dt, False, data_portal)\n pp.calculate_performance()\n\n self.assertAlmostEqual(pp.returns, 1200/1000 * 2300/2200 - 1)\n self.assertAlmostEqual(pp.pnl, 300)\n self.assertAlmostEqual(pp.cash_flow, -1000)\n\n def test_capital_change_inter_period(self):\n self.create_environment_stuff()\n\n # post some trades in the market\n trades = factory.create_trade_history(\n self.asset1,\n [10.0, 11.0, 12.0, 13.0],\n [100, 100, 100, 100],\n oneday,\n self.sim_params,\n trading_calendar=self.trading_calendar,\n )\n\n data_portal = create_data_portal_from_trade_history(\n self.env.asset_finder,\n self.trading_calendar,\n self.instance_tmpdir,\n self.sim_params,\n {1: trades})\n txn = create_txn(self.asset1, trades[0].dt, 10.0, 100)\n pt = perf.PositionTracker(self.env.asset_finder,\n self.sim_params.data_frequency)\n pp = perf.PerformancePeriod(1000.0, self.env.asset_finder,\n self.sim_params.data_frequency,\n period_open=self.sim_params.start_session,\n period_close=self.sim_params.end_session)\n pp.position_tracker = pt\n\n pt.execute_transaction(txn)\n pp.handle_execution(txn)\n pt.sync_last_sale_prices(trades[0].dt, False, data_portal)\n pp.calculate_performance()\n self.assertAlmostEqual(pp.returns, 0)\n self.assertAlmostEqual(pp.pnl, 0)\n self.assertAlmostEqual(pp.cash_flow, -1000)\n pp.rollover()\n\n pt.sync_last_sale_prices(trades[1].dt, False, data_portal)\n pp.calculate_performance()\n self.assertAlmostEqual(pp.returns, 1100.0/1000.0 - 1)\n self.assertAlmostEqual(pp.pnl, 100)\n self.assertAlmostEqual(pp.cash_flow, 0)\n pp.rollover()\n\n pp.adjust_period_starting_capital(1000)\n pt.sync_last_sale_prices(trades[2].dt, False, data_portal)\n pp.calculate_performance()\n self.assertAlmostEqual(pp.returns, 2200.0/2100.0 - 1)\n self.assertAlmostEqual(pp.pnl, 100)\n self.assertAlmostEqual(pp.cash_flow, 0)\n pp.rollover()\n\n pt.sync_last_sale_prices(trades[3].dt, False, data_portal)\n pp.calculate_performance()\n self.assertAlmostEqual(pp.returns, 2300.0/2200.0 - 1)\n self.assertAlmostEqual(pp.pnl, 100)\n self.assertAlmostEqual(pp.cash_flow, 0)\n\n\nclass TestPositionTracker(WithTradingEnvironment,\n WithInstanceTmpDir,\n ZiplineTestCase):\n ASSET_FINDER_EQUITY_SIDS = 1, 2\n\n @classmethod\n def make_futures_info(cls):\n return pd.DataFrame.from_dict(\n {\n 3: {'multiplier': 1000, 'exchange': 'TEST'},\n 4: {'multiplier': 1000, 'exchange': 'TEST'},\n 1032201401: {'multiplier': 50, 'exchange': 'TEST'},\n },\n orient='index',\n )\n\n def test_empty_positions(self):\n \"\"\"\n make sure all the empty position stats return a numeric 0\n\n Originally this bug was due to np.dot([], []) returning\n np.bool_(False)\n \"\"\"\n sim_params = factory.create_simulation_parameters(num_days=4)\n\n pt = perf.PositionTracker(self.env.asset_finder,\n sim_params.data_frequency)\n pos_stats = pt.stats()\n\n stats = [\n 'net_value',\n 'net_exposure',\n 'gross_value',\n 'gross_exposure',\n 'short_value',\n 'short_exposure',\n 'shorts_count',\n 'long_value',\n 'long_exposure',\n 'longs_count',\n ]\n for name in stats:\n val = getattr(pos_stats, name)\n self.assertEquals(val, 0)\n self.assertNotIsInstance(val, (bool, np.bool_))\n\n def test_position_values_and_exposures(self):\n pt = perf.PositionTracker(self.env.asset_finder, None)\n dt = pd.Timestamp(\"1984/03/06 3:00PM\")\n pos1 = perf.Position(1, amount=np.float64(10.0),\n last_sale_date=dt, last_sale_price=10)\n pos2 = perf.Position(2, amount=np.float64(-20.0),\n last_sale_date=dt, last_sale_price=10)\n pos3 = perf.Position(3, amount=np.float64(30.0),\n last_sale_date=dt, last_sale_price=10)\n pos4 = perf.Position(4, amount=np.float64(-40.0),\n last_sale_date=dt, last_sale_price=10)\n pt.update_positions({1: pos1, 2: pos2, 3: pos3, 4: pos4})\n\n # Test long-only methods\n pos_stats = pt.stats()\n self.assertEqual(100, pos_stats.long_value)\n self.assertEqual(100 + 300000, pos_stats.long_exposure)\n self.assertEqual(2, pos_stats.longs_count)\n\n # Test short-only methods\n self.assertEqual(-200, pos_stats.short_value)\n self.assertEqual(-200 - 400000, pos_stats.short_exposure)\n self.assertEqual(2, pos_stats.shorts_count)\n\n # Test gross and net values\n self.assertEqual(100 + 200, pos_stats.gross_value)\n self.assertEqual(100 - 200, pos_stats.net_value)\n\n # Test gross and net exposures\n self.assertEqual(100 + 200 + 300000 + 400000, pos_stats.gross_exposure)\n self.assertEqual(100 - 200 + 300000 - 400000, pos_stats.net_exposure)\n\n def test_update_positions(self):\n pt = perf.PositionTracker(self.env.asset_finder, None)\n dt = pd.Timestamp(\"2014/01/01 3:00PM\")\n pos1 = perf.Position(1, amount=np.float64(10.0),\n last_sale_date=dt, last_sale_price=10)\n pos2 = perf.Position(2, amount=np.float64(-20.0),\n last_sale_date=dt, last_sale_price=10)\n pos3 = perf.Position(1032201401, amount=np.float64(30.0),\n last_sale_date=dt, last_sale_price=100)\n\n # Call update_positions twice. When the second call is made,\n # self.positions will already contain data. The order of this data\n # needs to be preserved so that it is consistent with the order of the\n # data stored in the multipliers OrderedDict()'s. If self.positions\n # were to be stored as a dict, then its order could change in arbitrary\n # ways when the second update_positions call is made. Hence we also\n # store it as an OrderedDict.\n pt.update_positions({1: pos1, 1032201401: pos3})\n pt.update_positions({2: pos2})\n\n pos_stats = pt.stats()\n # Test long-only methods\n self.assertEqual(100, pos_stats.long_value)\n # 150,000 = 30 * 100 * 50 (amount * last_sale_price * multiplier)\n self.assertEqual(100 + 150000, pos_stats.long_exposure)\n self.assertEqual(2, pos_stats.longs_count)\n\n # Test short-only methods\n self.assertEqual(-200, pos_stats.short_value)\n self.assertEqual(-200, pos_stats.short_exposure)\n self.assertEqual(1, pos_stats.shorts_count)\n\n # Test gross and net values\n self.assertEqual(100 + 200, pos_stats.gross_value)\n self.assertEqual(100 - 200, pos_stats.net_value)\n\n # Test gross and net exposures\n self.assertEqual(100 + 150000 + 200, pos_stats.gross_exposure)\n self.assertEqual(100 + 150000 - 200, pos_stats.net_exposure)\n\n def test_close_position(self):\n future_sid = 1032201401\n equity_sid = 1\n pt = perf.PositionTracker(self.env.asset_finder, None)\n dt = pd.Timestamp('2017/01/04 3:00PM')\n\n pos1 = perf.Position(\n sid=future_sid,\n amount=np.float64(30.0),\n last_sale_date=dt,\n last_sale_price=100,\n )\n pos2 = perf.Position(\n sid=equity_sid,\n amount=np.float64(10.0),\n last_sale_date=dt,\n last_sale_price=10,\n )\n\n # Update the positions dictionary with `future_sid` first. The order\n # matters because it affects the multipliers dictionaries, which are\n # OrderedDicts. If `future_sid` is not removed from the multipliers\n # dictionaries, equities will hit the incorrect multiplier when\n # computing `pt.stats()`.\n pt.update_positions({future_sid: pos1, equity_sid: pos2})\n\n asset_to_close = self.env.asset_finder.retrieve_asset(future_sid)\n txn = create_txn(asset_to_close, dt, 100, -30)\n pt.execute_transaction(txn)\n\n pos_stats = pt.stats()\n\n # Test long-only methods.\n self.assertEqual(100, pos_stats.long_value)\n self.assertEqual(100, pos_stats.long_exposure)\n self.assertEqual(1, pos_stats.longs_count)\n\n # Test short-only methods.\n self.assertEqual(0, pos_stats.short_value)\n self.assertEqual(0, pos_stats.short_exposure)\n self.assertEqual(0, pos_stats.shorts_count)\n\n # Test gross and net values.\n self.assertEqual(100, pos_stats.gross_value)\n self.assertEqual(100, pos_stats.net_value)\n\n # Test gross and net exposures.\n self.assertEqual(100, pos_stats.gross_exposure)\n self.assertEqual(100, pos_stats.net_exposure)\n",
"import warnings\n\nfrom mock import patch\nimport numpy as np\nimport pandas as pd\nfrom pandas.core.common import PerformanceWarning\n\nfrom zipline import TradingAlgorithm\nfrom zipline.finance.trading import SimulationParameters\nfrom zipline.testing import (\n MockDailyBarReader,\n create_daily_df_for_asset,\n create_minute_df_for_asset,\n str_to_seconds,\n)\nfrom zipline.testing.fixtures import (\n WithCreateBarData,\n WithDataPortal,\n WithSimParams,\n ZiplineTestCase,\n)\nfrom zipline.zipline_warnings import ZiplineDeprecationWarning\n\nsimple_algo = \"\"\"\nfrom zipline.api import sid, order\ndef initialize(context):\n pass\n\ndef handle_data(context, data):\n assert sid(1) in data\n assert sid(2) in data\n assert len(data) == 3\n for asset in data:\n pass\n\"\"\"\n\nhistory_algo = \"\"\"\nfrom zipline.api import sid, history\n\ndef initialize(context):\n context.sid1 = sid(1)\n\ndef handle_data(context, data):\n context.history_window = history(5, \"1m\", \"volume\")\n\"\"\"\n\nhistory_bts_algo = \"\"\"\nfrom zipline.api import sid, history, record\n\ndef initialize(context):\n context.sid3 = sid(3)\n context.num_bts = 0\n\ndef before_trading_start(context, data):\n context.num_bts += 1\n\n # Get history at the second BTS (beginning of second day)\n if context.num_bts == 2:\n record(history=history(5, \"1m\", \"volume\"))\n\ndef handle_data(context, data):\n pass\n\"\"\"\n\nsimple_transforms_algo = \"\"\"\nfrom zipline.api import sid\ndef initialize(context):\n context.count = 0\n\ndef handle_data(context, data):\n if context.count == 2:\n context.mavg = data[sid(1)].mavg(5)\n context.vwap = data[sid(1)].vwap(5)\n context.stddev = data[sid(1)].stddev(5)\n context.returns = data[sid(1)].returns()\n\n context.count += 1\n\"\"\"\n\nmanipulation_algo = \"\"\"\ndef initialize(context):\n context.asset1 = sid(1)\n context.asset2 = sid(2)\n\ndef handle_data(context, data):\n assert len(data) == 2\n assert len(data.keys()) == 2\n assert context.asset1 in data.keys()\n assert context.asset2 in data.keys()\n\"\"\"\n\nsid_accessor_algo = \"\"\"\nfrom zipline.api import sid\n\ndef initialize(context):\n context.asset1 = sid(1)\n\ndef handle_data(context,data):\n assert data[sid(1)].sid == context.asset1\n assert data[sid(1)][\"sid\"] == context.asset1\n\"\"\"\n\ndata_items_algo = \"\"\"\nfrom zipline.api import sid\n\ndef initialize(context):\n context.asset1 = sid(1)\n context.asset2 = sid(2)\n\ndef handle_data(context, data):\n iter_list = list(data.iteritems())\n items_list = data.items()\n assert iter_list == items_list\n\"\"\"\n\n\nclass TestAPIShim(WithCreateBarData,\n WithDataPortal,\n WithSimParams,\n ZiplineTestCase,\n ):\n START_DATE = pd.Timestamp(\"2016-01-05\", tz='UTC')\n END_DATE = pd.Timestamp(\"2016-01-28\", tz='UTC')\n SIM_PARAMS_DATA_FREQUENCY = 'minute'\n\n sids = ASSET_FINDER_EQUITY_SIDS = 1, 2, 3\n\n @classmethod\n def make_equity_minute_bar_data(cls):\n for sid in cls.sids:\n yield sid, create_minute_df_for_asset(\n cls.trading_calendar,\n cls.SIM_PARAMS_START,\n cls.SIM_PARAMS_END,\n )\n\n @classmethod\n def make_equity_daily_bar_data(cls):\n for sid in cls.sids:\n yield sid, create_daily_df_for_asset(\n cls.trading_calendar,\n cls.SIM_PARAMS_START,\n cls.SIM_PARAMS_END,\n )\n\n @classmethod\n def make_splits_data(cls):\n return pd.DataFrame([\n {\n 'effective_date': str_to_seconds('2016-01-06'),\n 'ratio': 0.5,\n 'sid': 3,\n }\n ])\n\n @classmethod\n def make_adjustment_writer_equity_daily_bar_reader(cls):\n return MockDailyBarReader()\n\n @classmethod\n def init_class_fixtures(cls):\n super(TestAPIShim, cls).init_class_fixtures()\n\n cls.asset1 = cls.env.asset_finder.retrieve_asset(1)\n cls.asset2 = cls.env.asset_finder.retrieve_asset(2)\n cls.asset3 = cls.env.asset_finder.retrieve_asset(3)\n\n def create_algo(self, code, filename=None, sim_params=None):\n if sim_params is None:\n sim_params = self.sim_params\n\n return TradingAlgorithm(\n script=code,\n sim_params=sim_params,\n env=self.env,\n algo_filename=filename\n )\n\n def test_old_new_data_api_paths(self):\n \"\"\"\n Test that the new and old data APIs hit the same code paths.\n\n We want to ensure that the old data API(data[sid(N)].field and\n similar) and the new data API(data.current(sid(N), field) and\n similar) hit the same code paths on the DataPortal.\n \"\"\"\n test_start_minute = self.trading_calendar.minutes_for_session(\n self.sim_params.sessions[0]\n )[1]\n test_end_minute = self.trading_calendar.minutes_for_session(\n self.sim_params.sessions[0]\n )[-1]\n bar_data = self.create_bardata(\n lambda: test_end_minute,\n )\n ohlcvp_fields = [\n \"open\",\n \"high\",\n \"low\"\n \"close\",\n \"volume\",\n \"price\",\n ]\n spot_value_meth = 'zipline.data.data_portal.DataPortal.get_spot_value'\n\n def assert_get_spot_value_called(fun, field):\n \"\"\"\n Assert that get_spot_value was called during the execution of fun.\n\n Takes in a function fun and a string field.\n \"\"\"\n with patch(spot_value_meth) as gsv:\n fun()\n gsv.assert_called_with(\n self.asset1,\n field,\n test_end_minute,\n 'minute'\n )\n # Ensure that data.current(sid(n), field) has the same behaviour as\n # data[sid(n)].field.\n for field in ohlcvp_fields:\n assert_get_spot_value_called(\n lambda: getattr(bar_data[self.asset1], field),\n field,\n )\n assert_get_spot_value_called(\n lambda: bar_data.current(self.asset1, field),\n field,\n )\n\n history_meth = 'zipline.data.data_portal.DataPortal.get_history_window'\n\n def assert_get_history_window_called(fun, is_legacy):\n \"\"\"\n Assert that get_history_window was called during fun().\n\n Takes in a function fun and a boolean is_legacy.\n \"\"\"\n with patch(history_meth) as ghw:\n fun()\n # Slightly hacky, but done to get around the fact that\n # history( explicitly passes an ffill param as the last arg,\n # while data.history doesn't.\n if is_legacy:\n ghw.assert_called_with(\n [self.asset1, self.asset2, self.asset3],\n test_end_minute,\n 5,\n \"1m\",\n \"volume\",\n True\n )\n else:\n ghw.assert_called_with(\n [self.asset1, self.asset2, self.asset3],\n test_end_minute,\n 5,\n \"1m\",\n \"volume\",\n )\n\n test_sim_params = SimulationParameters(\n start_session=test_start_minute,\n end_session=test_end_minute,\n data_frequency=\"minute\",\n trading_calendar=self.trading_calendar,\n )\n\n history_algorithm = self.create_algo(\n history_algo,\n sim_params=test_sim_params\n )\n assert_get_history_window_called(\n lambda: history_algorithm.run(self.data_portal),\n is_legacy=True\n )\n assert_get_history_window_called(\n lambda: bar_data.history(\n [self.asset1, self.asset2, self.asset3],\n \"volume\",\n 5,\n \"1m\"\n ),\n is_legacy=False\n )\n\n def test_sid_accessor(self):\n \"\"\"\n Test that we maintain backwards compat for sid access on a data object.\n\n We want to support both data[sid(24)].sid, as well as\n data[sid(24)][\"sid\"]. Since these are deprecated and will eventually\n cease to be supported, we also want to assert that we're seeing a\n deprecation warning.\n \"\"\"\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"ignore\", PerformanceWarning)\n warnings.simplefilter(\"default\", ZiplineDeprecationWarning)\n algo = self.create_algo(sid_accessor_algo)\n algo.run(self.data_portal)\n\n # Since we're already raising a warning on doing data[sid(x)],\n # we don't want to raise an extra warning on data[sid(x)].sid.\n self.assertEqual(2, len(w))\n\n # Check that both the warnings raised were in fact\n # ZiplineDeprecationWarnings\n for warning in w:\n self.assertEqual(\n ZiplineDeprecationWarning,\n warning.category\n )\n self.assertEqual(\n \"`data[sid(N)]` is deprecated. Use `data.current`.\",\n str(warning.message)\n )\n\n def test_data_items(self):\n \"\"\"\n Test that we maintain backwards compat for data.[items | iteritems].\n\n We also want to assert that we warn that iterating over the assets\n in `data` is deprecated.\n \"\"\"\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"ignore\", PerformanceWarning)\n warnings.simplefilter(\"default\", ZiplineDeprecationWarning)\n algo = self.create_algo(data_items_algo)\n algo.run(self.data_portal)\n\n self.assertEqual(4, len(w))\n\n for idx, warning in enumerate(w):\n self.assertEqual(\n ZiplineDeprecationWarning,\n warning.category\n )\n if idx % 2 == 0:\n self.assertEqual(\n \"Iterating over the assets in `data` is deprecated.\",\n str(warning.message)\n )\n else:\n self.assertEqual(\n \"`data[sid(N)]` is deprecated. Use `data.current`.\",\n str(warning.message)\n )\n\n def test_iterate_data(self):\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"ignore\", PerformanceWarning)\n warnings.simplefilter(\"default\", ZiplineDeprecationWarning)\n\n algo = self.create_algo(simple_algo)\n algo.run(self.data_portal)\n\n self.assertEqual(4, len(w))\n\n line_nos = [warning.lineno for warning in w]\n self.assertEqual(4, len(set(line_nos)))\n\n for idx, warning in enumerate(w):\n self.assertEqual(ZiplineDeprecationWarning,\n warning.category)\n\n self.assertEqual(\"<string>\", warning.filename)\n self.assertEqual(line_nos[idx], warning.lineno)\n\n if idx < 2:\n self.assertEqual(\n \"Checking whether an asset is in data is deprecated.\",\n str(warning.message)\n )\n else:\n self.assertEqual(\n \"Iterating over the assets in `data` is deprecated.\",\n str(warning.message)\n )\n\n def test_history(self):\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"ignore\", PerformanceWarning)\n warnings.simplefilter(\"default\", ZiplineDeprecationWarning)\n\n sim_params = self.sim_params.create_new(\n self.sim_params.sessions[1],\n self.sim_params.end_session\n )\n\n algo = self.create_algo(history_algo,\n sim_params=sim_params)\n algo.run(self.data_portal)\n\n self.assertEqual(1, len(w))\n self.assertEqual(ZiplineDeprecationWarning, w[0].category)\n self.assertEqual(\"<string>\", w[0].filename)\n self.assertEqual(8, w[0].lineno)\n self.assertEqual(\"The `history` method is deprecated. Use \"\n \"`data.history` instead.\", str(w[0].message))\n\n def test_old_new_history_bts_paths(self):\n \"\"\"\n Tests that calling history in before_trading_start gets us the correct\n values, which involves 1) calling data_portal.get_history_window as of\n the previous market minute, 2) getting adjustments between the previous\n market minute and the current time, and 3) applying those adjustments\n \"\"\"\n algo = self.create_algo(history_bts_algo)\n algo.run(self.data_portal)\n\n expected_vol_without_split = np.arange(386, 391) * 100\n expected_vol_with_split = np.arange(386, 391) * 200\n\n window = algo.recorded_vars['history']\n np.testing.assert_array_equal(window[self.asset1].values,\n expected_vol_without_split)\n np.testing.assert_array_equal(window[self.asset2].values,\n expected_vol_without_split)\n np.testing.assert_array_equal(window[self.asset3].values,\n expected_vol_with_split)\n\n def test_simple_transforms(self):\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"ignore\", PerformanceWarning)\n warnings.simplefilter(\"default\", ZiplineDeprecationWarning)\n\n sim_params = SimulationParameters(\n start_session=self.sim_params.sessions[8],\n end_session=self.sim_params.sessions[-1],\n data_frequency=\"minute\",\n trading_calendar=self.trading_calendar,\n )\n\n algo = self.create_algo(simple_transforms_algo,\n sim_params=sim_params)\n algo.run(self.data_portal)\n\n self.assertEqual(8, len(w))\n transforms = [\"mavg\", \"vwap\", \"stddev\", \"returns\"]\n\n for idx, line_no in enumerate(range(8, 12)):\n warning1 = w[idx * 2]\n warning2 = w[(idx * 2) + 1]\n\n self.assertEqual(\"<string>\", warning1.filename)\n self.assertEqual(\"<string>\", warning2.filename)\n\n self.assertEqual(line_no, warning1.lineno)\n self.assertEqual(line_no, warning2.lineno)\n\n self.assertEqual(\"`data[sid(N)]` is deprecated. Use \"\n \"`data.current`.\",\n str(warning1.message))\n self.assertEqual(\"The `{0}` method is \"\n \"deprecated.\".format(transforms[idx]),\n str(warning2.message))\n\n # now verify the transform values\n # minute price\n # 2016-01-11 14:31:00+00:00 1561\n # ...\n # 2016-01-14 20:59:00+00:00 3119\n # 2016-01-14 21:00:00+00:00 3120\n # 2016-01-15 14:31:00+00:00 3121\n # 2016-01-15 14:32:00+00:00 3122\n # 2016-01-15 14:33:00+00:00 3123\n\n # volume\n # 2016-01-11 14:31:00+00:00 156100\n # ...\n # 2016-01-14 20:59:00+00:00 311900\n # 2016-01-14 21:00:00+00:00 312000\n # 2016-01-15 14:31:00+00:00 312100\n # 2016-01-15 14:32:00+00:00 312200\n # 2016-01-15 14:33:00+00:00 312300\n\n # daily price (last day built with minute data)\n # 2016-01-14 00:00:00+00:00 9\n # 2016-01-15 00:00:00+00:00 3123\n\n # mavg = average of all the prices = (1561 + 3123) / 2 = 2342\n # vwap = sum(price * volume) / sum(volumes)\n # = 889119531400.0 / 366054600.0\n # = 2428.9259891830343\n # stddev = stddev(price, ddof=1) = 451.3435498597493\n # returns = (todayprice - yesterdayprice) / yesterdayprice\n # = (3123 - 9) / 9 = 346\n self.assertEqual(2342, algo.mavg)\n self.assertAlmostEqual(2428.92599, algo.vwap, places=5)\n self.assertAlmostEqual(451.34355, algo.stddev, places=5)\n self.assertAlmostEqual(346, algo.returns)\n\n def test_manipulation(self):\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"ignore\", PerformanceWarning)\n warnings.simplefilter(\"default\", ZiplineDeprecationWarning)\n\n algo = self.create_algo(simple_algo)\n algo.run(self.data_portal)\n\n self.assertEqual(4, len(w))\n\n for idx, warning in enumerate(w):\n self.assertEqual(\"<string>\", warning.filename)\n self.assertEqual(7 + idx, warning.lineno)\n\n if idx < 2:\n self.assertEqual(\"Checking whether an asset is in data is \"\n \"deprecated.\",\n str(warning.message))\n else:\n self.assertEqual(\"Iterating over the assets in `data` is \"\n \"deprecated.\",\n str(warning.message))\n"
] | [
[
"numpy.testing.assert_allclose",
"numpy.array",
"pandas.Timestamp",
"numpy.float64",
"pandas.DataFrame.from_dict"
],
[
"numpy.arange",
"pandas.Timestamp",
"numpy.testing.assert_array_equal"
]
] |
steven-murray/powerbox | [
"09809f3fe9e2b25dfb2f956ca4c2d4d40a0ac693"
] | [
"tests/test_power.py"
] | [
"import numpy as np\nimport os\nimport inspect\nimport sys\n\nLOCATION = \"/\".join(os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))).split(\"/\")[:-1])\nsys.path.insert(0, LOCATION)\n\nfrom powerbox import PowerBox, get_power\n\n\ndef test_power1d():\n p = [0] * 40\n for i in range(40):\n pb = PowerBox(8001, dim=1, pk=lambda k: 1.0 * k ** -2., boxlength=1.0, a=0, b=1)\n\n p[i], k = get_power(pb.delta_x(), pb.boxlength, a=0, b=1)\n\n assert np.allclose(np.mean(np.array(p), axis=0)[2000:], 1.0 * k[2000:] ** -2., rtol=2)\n\n\ndef test_power1d_n3():\n p = [0] * 40\n for i in range(40):\n pb = PowerBox(8001, dim=1, pk=lambda k: 1.0 * k ** -3., boxlength=1.0, b=1)\n p[i], k = get_power(pb.delta_x(), pb.boxlength, b=1)\n\n assert np.allclose(np.mean(np.array(p), axis=0)[2000:], 1.0 * k[2000:] ** -3., rtol=2)\n\n\ndef test_power1d_bigL():\n p = [0] * 40\n for i in range(40):\n pb = PowerBox(8001, dim=1, pk=lambda k: 1.0 * k ** -3., boxlength=10.0, b=1)\n p[i], k = get_power(pb.delta_x(), pb.boxlength, b=1)\n\n assert np.allclose(np.mean(np.array(p), axis=0)[2000:], 1.0 * k[2000:] ** -3., rtol=2)\n\n\ndef test_power1d_ordinary_freq():\n p = [0] * 40\n for i in range(40):\n pb = PowerBox(8001, dim=1, pk=lambda k: 1.0 * k ** -3., boxlength=1.0)\n p[i], k = get_power(pb.delta_x(), pb.boxlength)\n\n assert np.allclose(np.mean(np.array(p), axis=0)[2000:], 1.0 * k[2000:] ** -3., rtol=2)\n\n\ndef test_power1d_halfN():\n p = [0] * 40\n for i in range(40):\n pb = PowerBox(4001, dim=1, pk=lambda k: 1.0 * k ** -3., boxlength=1.0, b=1)\n p[i], k = get_power(pb.delta_x(), pb.boxlength, b=1)\n\n assert np.allclose(np.mean(np.array(p), axis=0)[1000:], 1.0 * k[1000:] ** -3., rtol=2)\n\n\ndef test_power2d():\n p = [0] * 5\n for i in range(5):\n pb = PowerBox(200, dim=2, pk=lambda k: 1.0 * k ** -2., boxlength=1.0, b=1)\n p[i], k = get_power(pb.delta_x(), pb.boxlength, b=1)\n\n assert np.allclose(np.mean(np.array(p), axis=0)[100:], 1.0 * k[100:] ** -2., rtol=2)\n\n\ndef test_power3d():\n pb = PowerBox(50, dim=3, pk=lambda k: 1.0 * k ** -2., boxlength=1.0, b=1)\n p, k = get_power(pb.delta_x(), pb.boxlength, b=1)\n\n print(p / (1.0 * k ** -2.))\n assert np.allclose(p, 1.0 * k ** -2., rtol=2)\n\n\ndef test_k_zero_ignore():\n pb = PowerBox(50, dim=2, pk=lambda k: 1.0 * k ** -2., boxlength=1.0, b=1)\n\n dx = pb.delta_x()\n p1, k1 = get_power(dx, pb.boxlength, bin_ave=False)\n p0, k0 = get_power(dx, pb.boxlength, ignore_zero_mode=True, bin_ave=False)\n\n assert np.all(k1 == k0)\n\n assert np.all(p1[1:] == p0[1:])\n\n assert p1[0] != p0[0]\n\n\ndef test_k_weights():\n pb = PowerBox(50, dim=2, pk=lambda k: 1.0 * k ** -2., boxlength=1.0, b=1)\n\n dx = pb.delta_x()\n\n k_weights = np.ones_like(dx)\n k_weights[:, 25] = 0\n\n p1, k1 = get_power(dx, pb.boxlength, bin_ave=False)\n p0, k0 = get_power(dx, pb.boxlength, bin_ave=False, k_weights= k_weights)\n\n assert np.all(k1 == k0)\n assert not np.allclose(p1, p0)"
] | [
[
"numpy.array",
"numpy.allclose",
"numpy.all",
"numpy.ones_like"
]
] |
jesserobertson/cogj | [
"25f1d85023764ef0cc459a8a715b1b678f971858"
] | [
"setup_extensions.py"
] | [
"\"\"\" file: setup_extensions.py (cogj)\n author: Jess Robertson, @jesserobertson\n date: Saturday, 16 March 2019\n\n description: Set up Cython extensions for CO-GJ\n\"\"\"\n\nfrom pathlib import Path\nfrom logging import getLogger\nfrom multiprocessing import cpu_count\n\nimport numpy\nfrom setuptools import Extension\nfrom setuptools.command.sdist import sdist\n\n# Here we try to import Cython - if it's here then we can generate new c sources\n# directly from the pyx files using their build_ext class.\n# If not then we just use the default setuptools version\ntry:\n from Cython.Distutils import build_ext\n HAVE_CYTHON = True\nexcept ImportError:\n from setuptools.command.build_ext import build_ext\n HAVE_CYTHON = False\n\nLOGGER = getLogger()\n\n# Where are our extensions located?\nEXTENSIONS_MODULE = Path('cogj/extensions')\n\ndef update_thread_count():\n \"\"\" Update the thread count for OpenMP extensions\n\n Uses one thread per core, with the estimate of the number of cores from\n multiprocessing.cpu_count.\n \"\"\"\n LOGGER.info('Updating thread count for cython code to %s', cpu_count())\n num_threads = cpu_count() # We're just going for 1 thread/CPU here\n fname = EXTENSIONS_MODULE / 'common.pxd'\n\n # Don't clobber other definitions\n try:\n with open(fname, 'r') as src:\n content = src.readlines() # this is short, just slurp it\n\n # Write out a new definition\n with open(fname, 'w') as sink:\n for line in content:\n if line.startswith('cdef int NUM_THREADS'):\n sink.write('cdef int NUM_THREADS = {0}'.format(num_threads))\n else:\n sink.write(line)\n\n except FileNotFoundError:\n # doesn't exist so just leave it\n with open(fname, 'w') as sink:\n sink.write('cdef int NUM_THREADS = {0}'.format(num_threads))\n\n\n\ndef get_extensions():\n \"\"\" Find our extensions to build.\n\n Also updates the thread count for OpenMP extensions to the number of CPUs\n availble on the current machine.\n\n Returns:\n a list of Extension objects to pass to setup\n \"\"\"\n update_thread_count()\n\n # Get the extensions\n if HAVE_CYTHON:\n files = [f for f in EXTENSIONS_MODULE.iterdir() if f.suffix == '.pyx']\n else:\n files = [f for f in EXTENSIONS_MODULE.iterdir() if f.suffix == '.c']\n\n # Construct keyword arguments for all extensions\n kwargs = dict(\n # extra_compile_args=['-fopenmp'],\n # extra_link_args=['-fopenmp'],\n include_dirs=[numpy.get_include(), EXTENSIONS_MODULE]\n )\n\n # Construct all the extension objects and return them\n extensions = []\n for fname in files:\n module_name = fname.stem\n extension_name = '.'.join(list(EXTENSIONS_MODULE.parts) + [module_name])\n source = str(fname)\n extensions.append(Extension(extension_name, sources=[source], **kwargs))\n return extensions\n\n# Update source distribution - we always require Cython for this...\nclass cython_sdist(sdist): # pylint: disable=C0103\n\n \"Custom sdist command to build cython on-the-fly\"\n\n def run(self):\n # Make sure the compiled Cython files in the distribution are up-to-date\n from Cython.Build import cythonize\n update_thread_count()\n cythonize([str(f)\n for f in EXTENSIONS_MODULE.iterdir()\n if f.suffix == '.pyx'])\n super().run()\n\ndef get_cmdclass():\n \"\"\" Return a command class which builds cython extensions automatically\n \"\"\"\n cmdclass = {\n 'build_ext': build_ext,\n 'sdist': cython_sdist\n }\n return cmdclass\n"
] | [
[
"numpy.get_include"
]
] |
CAVED123/Tensorforce | [
"823177f77f9047b1e71eccfffc08315ed1636878"
] | [
"tensorforce/core/optimizers/solvers/line_search.py"
] | [
"# Copyright 2018 Tensorforce Team. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nimport tensorflow as tf\n\nfrom tensorforce import TensorforceError, util\nfrom tensorforce.core import parameter_modules\nfrom tensorforce.core.optimizers.solvers import Iterative\n\n\nclass LineSearch(Iterative):\n \"\"\"\n Line search algorithm which iteratively optimizes the value $f(x)$ for $x$ on the line between \n $x'$ and $x_0$ by optimistically taking the first acceptable $x$ starting from $x_0$ and \n moving towards $x'$.\n \"\"\"\n\n def __init__(\n self, name, max_iterations, accept_ratio, mode, parameter, unroll_loop=False\n ):\n \"\"\"\n Creates a new line search solver instance.\n\n Args:\n max_iterations: Maximum number of iterations before termination.\n accept_ratio: Lower limit of what improvement ratio over $x = x'$ is acceptable \n (based either on a given estimated improvement or with respect to the value at \n $x = x'$).\n mode: Mode of movement between $x_0$ and $x'$, either 'linear' or 'exponential'.\n parameter: Movement mode parameter, additive or multiplicative, respectively.\n unroll_loop: Unrolls the TensorFlow while loop if true.\n \"\"\"\n super().__init__(name=name, max_iterations=max_iterations, unroll_loop=unroll_loop)\n\n assert accept_ratio >= 0.0\n self.accept_ratio = self.add_module(\n name='accept-ratio', module=accept_ratio, modules=parameter_modules, dtype='float'\n )\n\n # TODO: Implement such sequences more generally, also useful for learning rate decay or so.\n if mode not in ('linear', 'exponential'):\n raise TensorforceError(\n \"Invalid line search mode: {}, please choose one of 'linear' or 'exponential'\".format(mode)\n )\n self.mode = mode\n\n self.parameter = self.add_module(\n name='parameter', module=parameter, modules=parameter_modules, dtype='float'\n )\n\n def tf_solve(self, fn_x, x_init, base_value, target_value, estimated_improvement=None):\n \"\"\"\n Iteratively optimizes $f(x)$ for $x$ on the line between $x'$ and $x_0$.\n\n Args:\n fn_x: A callable returning the value $f(x)$ at $x$.\n x_init: Initial solution guess $x_0$.\n base_value: Value $f(x')$ at $x = x'$.\n target_value: Value $f(x_0)$ at $x = x_0$.\n estimated_improvement: Estimated improvement for $x = x_0$, $f(x')$ if None.\n\n Returns:\n A solution $x$ to the problem as given by the solver.\n \"\"\"\n return super().tf_solve(fn_x, x_init, base_value, target_value, estimated_improvement)\n\n def tf_start(self, x_init, base_value, target_value, estimated_improvement):\n \"\"\"\n Initialization step preparing the arguments for the first iteration of the loop body.\n\n Args:\n x_init: Initial solution guess $x_0$.\n base_value: Value $f(x')$ at $x = x'$.\n target_value: Value $f(x_0)$ at $x = x_0$.\n estimated_improvement: Estimated value at $x = x_0$, $f(x')$ if None.\n\n Returns:\n Initial arguments for tf_step.\n \"\"\"\n self.base_value = base_value\n\n if estimated_improvement is None: # TODO: Is this a good alternative?\n estimated_improvement = tf.abs(x=base_value)\n\n difference = target_value - self.base_value\n epsilon = tf.constant(value=util.epsilon, dtype=util.tf_dtype(dtype='float'))\n improvement = difference / tf.maximum(x=estimated_improvement, y=epsilon)\n\n last_improvement = improvement - 1.0\n parameter = self.parameter.value()\n\n if self.mode == 'linear':\n deltas = [-t * parameter for t in x_init]\n self.estimated_incr = -estimated_improvement * parameter\n\n elif self.mode == 'exponential':\n deltas = [-t * parameter for t in x_init]\n\n return x_init, deltas, improvement, last_improvement, estimated_improvement\n\n def tf_step(self, x, deltas, improvement, last_improvement, estimated_improvement):\n \"\"\"\n Iteration loop body of the line search algorithm.\n\n Args:\n x: Current solution estimate $x_t$.\n deltas: Current difference $x_t - x'$.\n improvement: Current improvement $(f(x_t) - f(x')) / v'$.\n last_improvement: Last improvement $(f(x_{t-1}) - f(x')) / v'$.\n estimated_improvement: Current estimated value $v'$.\n\n Returns:\n Updated arguments for next iteration.\n \"\"\"\n next_x = [t + delta for t, delta in zip(x, deltas)]\n parameter = self.parameter.value()\n\n if self.mode == 'linear':\n next_deltas = deltas\n next_estimated_improvement = estimated_improvement + self.estimated_incr\n\n elif self.mode == 'exponential':\n next_deltas = [delta * parameter for delta in deltas]\n next_estimated_improvement = estimated_improvement * parameter\n\n target_value = self.fn_x(next_deltas)\n\n difference = target_value - self.base_value\n epsilon = tf.constant(value=util.epsilon, dtype=util.tf_dtype(dtype='float'))\n next_improvement = difference / tf.maximum(x=next_estimated_improvement, y=epsilon)\n\n return next_x, next_deltas, next_improvement, improvement, next_estimated_improvement\n\n def tf_next_step(self, x, deltas, improvement, last_improvement, estimated_improvement):\n \"\"\"\n Termination condition: max number of iterations, or no improvement for last step, or \n improvement less than acceptable ratio, or estimated value not positive.\n\n Args:\n x: Current solution estimate $x_t$.\n deltas: Current difference $x_t - x'$.\n improvement: Current improvement $(f(x_t) - f(x')) / v'$.\n last_improvement: Last improvement $(f(x_{t-1}) - f(x')) / v'$.\n estimated_improvement: Current estimated value $v'$.\n\n Returns:\n True if another iteration should be performed.\n \"\"\"\n improved = improvement > last_improvement\n accept_ratio = self.accept_ratio.value()\n next_step = tf.math.logical_and(x=improved, y=(improvement < accept_ratio))\n epsilon = tf.constant(value=util.epsilon, dtype=util.tf_dtype(dtype='float'))\n return tf.math.logical_and(x=next_step, y=(estimated_improvement > epsilon))\n\n def tf_end(self, x_final, deltas, improvement, last_improvement, estimated_improvement):\n \"\"\"\n Termination step preparing the return value.\n\n Args:\n x_init: Final solution estimate $x_n$.\n deltas: Current difference $x_n - x'$.\n improvement: Current improvement $(f(x_n) - f(x')) / v'$.\n last_improvement: Last improvement $(f(x_{n-1}) - f(x')) / v'$.\n estimated_improvement: Current estimated value $v'$.\n\n Returns:\n Final solution.\n \"\"\"\n def accept_deltas():\n return [t + delta for t, delta in zip(x_final, deltas)]\n\n def undo_deltas():\n value = self.fn_x([-delta for delta in deltas])\n with tf.control_dependencies(control_inputs=(value,)):\n return util.fmap(function=util.identity_operation, xs=x_final)\n\n skip_undo_deltas = improvement > last_improvement\n x_final = self.cond(pred=skip_undo_deltas, true_fn=accept_deltas, false_fn=undo_deltas)\n return x_final\n"
] | [
[
"tensorflow.abs",
"tensorflow.maximum",
"tensorflow.control_dependencies",
"tensorflow.math.logical_and"
]
] |
spirit-code/aiida-spirit | [
"7a0c0ca7406f958599b691a410201137f9fb94e9"
] | [
"tests/test_calculations.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\" Tests for calculations\n\n\"\"\"\nimport os\nimport numpy as np\nfrom aiida.plugins import CalculationFactory\nfrom aiida.orm import Dict\nfrom aiida.engine import run, run_get_node\nfrom aiida_spirit.tools.helpers import prepare_test_inputs\n\nfrom . import TEST_DIR\n\n\ndef test_input_para_validator():\n \"\"\"Test running a calculation\n note this does only a dry run to check if the calculation plugin works\"\"\"\n\n # put an invalid type in params and check if the validator captures it\n for key, val in {\n 'llg_n_iterations': 17.2,\n 'mc_n_iterations': [1, 2, 3],\n 'bravais_lattice': 'test'\n }.items():\n parameters = Dict(dict={key: val})\n builder = CalculationFactory('spirit').get_builder()\n raised_error = False\n try:\n builder.parameters = parameters\n except (TypeError, ValueError):\n raised_error = True\n # check if an error was raised\n assert raised_error\n\n\ndef test_spirit_calc_dry_run(spirit_code):\n \"\"\"Test running a calculation\n note this does only a dry run to check if the calculation plugin works\"\"\"\n\n # Prepare input parameters\n inputs = prepare_test_inputs(os.path.join(TEST_DIR, 'input_files'))\n inputs['code'] = spirit_code\n inputs['metadata']['options'] = {\n # 5 mins max runtime\n 'max_wallclock_seconds': 300\n }\n inputs['metadata']['dry_run'] = True\n\n result = run(CalculationFactory('spirit'), **inputs)\n print(result)\n\n assert result is not None\n\n\ndef test_spirit_calc(spirit_code):\n \"\"\"Test running a calculation\n this actually runs spirit and therefore needs\n to have spirit installed in the python environment.\"\"\"\n\n # Prepare input parameters\n inputs = prepare_test_inputs(os.path.join(TEST_DIR, 'input_files'))\n inputs['code'] = spirit_code\n inputs['metadata']['options'] = {\n # 5 mins max runtime\n 'max_wallclock_seconds': 300\n }\n\n result, node = run_get_node(CalculationFactory('spirit'), **inputs)\n print(result, node)\n assert node.is_finished_ok\n\n # check consistency of the output files\n check_outcome(result)\n\n\ndef test_spirit_calc_with_param(spirit_code):\n \"\"\"Test running a calculation\n this actually runs spirit and therefore needs\n to have spirit installed in the python environment.\n\n This test runs a spirit calculation with an external field and a small temperature\n \"\"\"\n\n # Prepare input parameters\n inputs = prepare_test_inputs(os.path.join(TEST_DIR, 'input_files'))\n inputs['code'] = spirit_code\n inputs['metadata']['options'] = {\n # 5 mins max runtime\n 'max_wallclock_seconds': 300\n }\n # prepare parameters\n parameters = Dict(\n dict={\n 'llg_temperature': 10.0, # 10 K temperature noise\n 'external_field_magnitude': 2.0, # external field of 2 T\n 'external_field_normal':\n [0.0, 0.0, 1.0], # external field points in z direction\n 'mu_s': [2.2], # change spin moment to have the right size for Fe\n 'llg_n_iterations': 20000 # limit the number of iterations\n })\n inputs['parameters'] = parameters\n\n # run options input dict\n inputs['run_options'] = Dict(dict={\n 'simulation_method': 'LLG',\n 'solver': 'Depondt',\n })\n\n # first a dry run\n inputs['metadata']['dry_run'] = True\n result = run(CalculationFactory('spirit'), **inputs)\n\n # then run the calculation\n inputs['metadata']['dry_run'] = False\n result, node = run_get_node(CalculationFactory('spirit'), **inputs)\n print(result)\n assert node.is_finished_ok\n\n # check consistency of the output files\n spins_final = check_outcome(result, threshold=0.10)\n mag_mean = np.mean(spins_final, axis=0)\n print(mag_mean)\n assert mag_mean[0] < 0.25\n assert mag_mean[1] < 0.25\n assert mag_mean[2] > 0.85\n\n\ndef check_outcome(result, threshold=1e-5):\n \"\"\"check the result of a spirit calculation\n Checks if retrieved is there and if the output inside of the retreived makes sense\"\"\"\n\n # check output\n assert 'retrieved' in result\n ret = result['retrieved']\n out_file_list = ret.list_object_names()\n\n # check if spirit std out exists\n print(f'contents of retrieved: {out_file_list}')\n assert 'spirit.stdout' in out_file_list\n with ret.open('spirit.stdout') as _file:\n txt = _file.readlines()\n #from pprint import pprint\n #pprint(txt)\n assert len(txt) > 100\n\n # check some lines in the spirit std output\n for line in txt:\n if 'Number of Errors:' in line:\n errors = line.split()[-1]\n if 'Number of Warnings:' in line:\n warnings = line.split()[-1]\n assert int(errors) == 0\n assert int(warnings) == 0\n\n # check if initial and final spin image make sense\n spins_initial = result['magnetization'].get_array('initial')\n var_initial = np.std(spins_initial, axis=0).max()\n print('std initial', var_initial)\n assert var_initial > 0.4\n\n spins_final = result['magnetization'].get_array('final')\n var_final = np.std(spins_final, axis=0).max()\n print('std final', var_final)\n assert var_final < threshold\n\n return spins_final\n"
] | [
[
"numpy.std",
"numpy.mean"
]
] |
heather999/lenstronomy | [
"8102fe026c1f3ba6e81d8a1f59cceb90e68430b4"
] | [
"lenstronomy/ImSim/differential_extinction.py"
] | [
"from lenstronomy.LightModel.light_model import LightModel\nimport numpy as np\n\n__all__ = ['DifferentialExtinction']\n\n\nclass DifferentialExtinction(object):\n \"\"\"\n class to compute an extinction (for a specific band/wavelength). This class uses the functionality available in\n the LightModel module to describe an optical depth tau_ext to compute the extinction on the sky/image.\n \"\"\"\n\n def __init__(self, optical_depth_model=None, tau0_index=0):\n \"\"\"\n\n :param optical_depth_model: list of strings naming the profiles (same convention as LightModel module)\n describing the optical depth of the extinction\n \"\"\"\n if optical_depth_model is None:\n optical_depth_model = []\n self._profile = LightModel(light_model_list=optical_depth_model)\n if len(optical_depth_model) == 0:\n self._compute_bool = False\n else:\n self._compute_bool = True\n self._tau0_index = tau0_index\n\n @property\n def compute_bool(self):\n \"\"\"\n :return: True when a differential extinction is set, False otherwise \n \"\"\"\n return self._compute_bool\n\n def extinction(self, x, y, kwargs_extinction=None, kwargs_special=None):\n \"\"\"\n\n :param x: coordinate in image plane of flux intensity\n :param y: coordinate in image plane of flux intensity\n :param kwargs_extinction: keyword argument list matching the extinction profile\n :param kwargs_special: keyword arguments hosting special parameters, here required 'tau0_list'\n :return: extinction corrected flux\n \"\"\"\n if self._compute_bool is False or kwargs_extinction is None:\n return 1\n tau = self._profile.surface_brightness(x, y, kwargs_list=kwargs_extinction)\n tau0_list = kwargs_special.get('tau0_list', None)\n if tau0_list is not None:\n tau0 = tau0_list[self._tau0_index]\n else:\n tau0 = 1\n return np.exp(-tau0 * tau)\n"
] | [
[
"numpy.exp"
]
] |
forestriveral/floris | [
"02c31e121283ad6ccae987cfa3aa1bf1e4b43014"
] | [
"examples/visualization/subtract_inflow.py"
] | [
"# Copyright 2021 NREL\n\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may not\n# use this file except in compliance with the License. You may obtain a copy of\n# the License at http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations under\n# the License.\n\n# See https://floris.readthedocs.io for documentation\n\n\nimport matplotlib.pyplot as plt\n\nimport floris.tools as wfct\nimport floris.tools.cut_plane as cp\n\n\n# Initialize the FLORIS interface fi\nfi = wfct.floris_interface.FlorisInterface(\"../example_input.json\")\n\n# Single turbine at 0,0\nfi.reinitialize_flow_field(layout_array=([0], [0]))\n\n# Calculate wake\nfi.calculate_wake()\n\n# Grab some cross planes\nD = 126\ncut_plane_base_5 = fi.get_cross_plane(5 * D)\ncut_plane_base_in = fi.get_cross_plane(-5 * D)\n\n\n# Get the difference planes\ncut_plane_diff = cp.subtract(cut_plane_base_5, cut_plane_base_in)\n\n\n# Plot and show\nfig, axarr = plt.subplots(3, 1, figsize=(7, 10))\n\nax = axarr[0]\nwfct.visualization.visualize_cut_plane(cut_plane_base_5, ax=ax, minSpeed=4, maxSpeed=8)\nax.set_title(\"Baseline, 5D\")\n\nax = axarr[1]\nwfct.visualization.visualize_cut_plane(cut_plane_base_in, ax=ax, minSpeed=4, maxSpeed=8)\nax.set_title(\"Baseline, Inflow\")\n\nax = axarr[2]\nwfct.visualization.visualize_cut_plane(cut_plane_diff, ax=ax, minSpeed=-2, maxSpeed=2)\nax.set_title(\"5D - INFLOW\")\n\n# Reverse axis making the view upstream looking down\nfor ax in axarr.flatten():\n wfct.visualization.reverse_cut_plane_x_axis_in_plot(ax)\n\nplt.savefig(\"sub_inflow.png\", format='png', bbox_inches='tight', dpi=150)\nplt.show()\n"
] | [
[
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplots"
]
] |
ConsenSys/handel | [
"bc3f6f8194db140a1067ab157fc6bb1fb53a0144"
] | [
"simul/plots/failing_time.py"
] | [
"#!/usr/bin/env python\n\n## This script generate the graphs that compares handel signature \n## generation with different number of failing nodes for a fixed \n## number of total nodes, and a fixed threshold 51%\n##\nimport sys\n\nimport matplotlib.pyplot as plt\nimport pandas as pd\nplt.figure(figsize=(4,2))\nfrom lib import *\n\n\nsigColumn = \"sigen_wall_avg\"\nnodeColumn = \"totalNbOfNodes\"\nfailingColumn = \"failing\"\n\nyColumns = {\n \"sigen_wall_avg\": \"Average\",\n \"sigen_wall_max\": \"Maximum\"}\n\n# \"sigen_wall_min\": \"Minimum\",\n \n\n## threshold of signatures required\nthreshold = \"51\"\nexpectedNodes = 4000\nnodes = None\n\nfiles = {\"csv/handel_4000_failing.csv\": \"handel\"}\ndatas = read_datafiles(files)\n\nfor f,v in datas.items():\n nodes = v[nodeColumn].max() # should be 2000\n if int(v[nodeColumn].mean()) != expectedNodes:\n print(\"error : nodes should be \" + str(expectedNodes))\n sys.exit(1)\n\n x = v[failingColumn].map(lambda x: int((x/nodes) * 100))\n for c,name in yColumns.items():\n # y = v[c]\n y = v[c].map(lambda x: x * 1000)\n print(\"file %s -> %d data points on %s\" % (f,len(y),sigColumn))\n # label = files[f]\n label = name\n if label == \"\":\n label = input(\"Label for file %s: \" % f)\n\n plot(x,y,\"-\",label,allColors.popleft())\n\nlabel= 35\nplt.legend(fontsize=label)\nplt.ylabel(\"Signature generation (ms)\",fontsize=label)\nplt.xlabel(\"Failing nodes in %\",fontsize=label)\n# plt.yscale('log')\n# plt.title(\"Time for 51% signature threshold over 4000 nodes\")\n# plt.axis('log')\nplt.show()\n"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
]
] |
sakibguy/models | [
"7214e17eb425963ec3d0295be215d5d26deaeb32"
] | [
"official/nlp/tools/export_tfhub_lib_test.py"
] | [
"# Copyright 2022 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests export_tfhub_lib.\"\"\"\n\nimport os\nimport tempfile\n\nfrom absl.testing import parameterized\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow_hub as hub\nimport tensorflow_text as text\n\nfrom sentencepiece import SentencePieceTrainer\nfrom official.legacy.bert import configs\nfrom official.modeling import tf_utils\nfrom official.nlp.configs import encoders\nfrom official.nlp.modeling import layers\nfrom official.nlp.modeling import models\nfrom official.nlp.tools import export_tfhub_lib\n\n\ndef _get_bert_config_or_encoder_config(use_bert_config, hidden_size,\n num_hidden_layers, vocab_size=100):\n \"\"\"Returns config args for export_tfhub_lib._create_model().\"\"\"\n if use_bert_config:\n bert_config = configs.BertConfig(\n vocab_size=vocab_size,\n hidden_size=hidden_size,\n intermediate_size=32,\n max_position_embeddings=128,\n num_attention_heads=2,\n num_hidden_layers=num_hidden_layers)\n encoder_config = None\n else:\n bert_config = None\n encoder_config = encoders.EncoderConfig(\n type=\"albert\",\n albert=encoders.AlbertEncoderConfig(\n vocab_size=vocab_size,\n embedding_width=16,\n hidden_size=hidden_size,\n intermediate_size=32,\n max_position_embeddings=128,\n num_attention_heads=2,\n num_layers=num_hidden_layers,\n dropout_rate=0.1))\n\n return bert_config, encoder_config\n\n\ndef _get_vocab_or_sp_model_dummy(temp_dir, use_sp_model):\n \"\"\"Returns tokenizer asset args for export_tfhub_lib.export_model().\"\"\"\n dummy_file = os.path.join(temp_dir, \"dummy_file.txt\")\n with tf.io.gfile.GFile(dummy_file, \"w\") as f:\n f.write(\"dummy content\")\n if use_sp_model:\n vocab_file, sp_model_file = None, dummy_file\n else:\n vocab_file, sp_model_file = dummy_file, None\n return vocab_file, sp_model_file\n\n\ndef _read_asset(asset: tf.saved_model.Asset):\n return tf.io.gfile.GFile(asset.asset_path.numpy()).read()\n\n\ndef _find_lambda_layers(layer):\n \"\"\"Returns list of all Lambda layers in a Keras model.\"\"\"\n if isinstance(layer, tf.keras.layers.Lambda):\n return [layer]\n elif hasattr(layer, \"layers\"): # It's nested, like a Model.\n result = []\n for l in layer.layers:\n result += _find_lambda_layers(l)\n return result\n else:\n return []\n\n\nclass ExportModelTest(tf.test.TestCase, parameterized.TestCase):\n \"\"\"Tests exporting a Transformer Encoder model as a SavedModel.\n\n This covers export from an Encoder checkpoint to a SavedModel without\n the .mlm subobject. This is no longer preferred, but still useful\n for models like Electra that are trained without the MLM task.\n\n The export code is generic. This test focuses on two main cases\n (the most important ones in practice when this was written in 2020):\n - BERT built from a legacy BertConfig, for use with BertTokenizer.\n - ALBERT built from an EncoderConfig (as a representative of all other\n choices beyond BERT, for use with SentencepieceTokenizer (the one\n alternative to BertTokenizer).\n \"\"\"\n\n @parameterized.named_parameters((\"Bert\", True), (\"Albert\", False))\n def test_export_model(self, use_bert):\n # Create the encoder and export it.\n hidden_size = 16\n num_hidden_layers = 1\n bert_config, encoder_config = _get_bert_config_or_encoder_config(\n use_bert, hidden_size, num_hidden_layers)\n bert_model, encoder = export_tfhub_lib._create_model(\n bert_config=bert_config, encoder_config=encoder_config, with_mlm=False)\n self.assertEmpty(\n _find_lambda_layers(bert_model),\n \"Lambda layers are non-portable since they serialize Python bytecode.\")\n model_checkpoint_dir = os.path.join(self.get_temp_dir(), \"checkpoint\")\n checkpoint = tf.train.Checkpoint(encoder=encoder)\n checkpoint.save(os.path.join(model_checkpoint_dir, \"test\"))\n model_checkpoint_path = tf.train.latest_checkpoint(model_checkpoint_dir)\n\n vocab_file, sp_model_file = _get_vocab_or_sp_model_dummy(\n self.get_temp_dir(), use_sp_model=not use_bert)\n export_path = os.path.join(self.get_temp_dir(), \"hub\")\n export_tfhub_lib.export_model(\n export_path=export_path,\n bert_config=bert_config,\n encoder_config=encoder_config,\n model_checkpoint_path=model_checkpoint_path,\n with_mlm=False,\n vocab_file=vocab_file,\n sp_model_file=sp_model_file,\n do_lower_case=True)\n\n # Restore the exported model.\n hub_layer = hub.KerasLayer(export_path, trainable=True)\n\n # Check legacy tokenization data.\n if use_bert:\n self.assertTrue(hub_layer.resolved_object.do_lower_case.numpy())\n self.assertEqual(\"dummy content\",\n _read_asset(hub_layer.resolved_object.vocab_file))\n self.assertFalse(hasattr(hub_layer.resolved_object, \"sp_model_file\"))\n else:\n self.assertFalse(hasattr(hub_layer.resolved_object, \"do_lower_case\"))\n self.assertFalse(hasattr(hub_layer.resolved_object, \"vocab_file\"))\n self.assertEqual(\"dummy content\",\n _read_asset(hub_layer.resolved_object.sp_model_file))\n\n # Check restored weights.\n self.assertEqual(len(bert_model.trainable_weights),\n len(hub_layer.trainable_weights))\n for source_weight, hub_weight in zip(bert_model.trainable_weights,\n hub_layer.trainable_weights):\n self.assertAllClose(source_weight.numpy(), hub_weight.numpy())\n\n # Check computation.\n seq_length = 10\n dummy_ids = np.zeros((2, seq_length), dtype=np.int32)\n input_dict = dict(\n input_word_ids=dummy_ids,\n input_mask=dummy_ids,\n input_type_ids=dummy_ids)\n hub_output = hub_layer(input_dict)\n source_output = bert_model(input_dict)\n encoder_output = encoder(input_dict)\n self.assertEqual(hub_output[\"pooled_output\"].shape, (2, hidden_size))\n self.assertEqual(hub_output[\"sequence_output\"].shape,\n (2, seq_length, hidden_size))\n self.assertLen(hub_output[\"encoder_outputs\"], num_hidden_layers)\n\n for key in (\"pooled_output\", \"sequence_output\", \"encoder_outputs\"):\n self.assertAllClose(source_output[key], hub_output[key])\n self.assertAllClose(source_output[key], encoder_output[key])\n\n # The \"default\" output of BERT as a text representation is pooled_output.\n self.assertAllClose(hub_output[\"pooled_output\"], hub_output[\"default\"])\n\n # Test that training=True makes a difference (activates dropout).\n def _dropout_mean_stddev(training, num_runs=20):\n input_ids = np.array([[14, 12, 42, 95, 99]], np.int32)\n input_dict = dict(\n input_word_ids=input_ids,\n input_mask=np.ones_like(input_ids),\n input_type_ids=np.zeros_like(input_ids))\n outputs = np.concatenate([\n hub_layer(input_dict, training=training)[\"pooled_output\"]\n for _ in range(num_runs)\n ])\n return np.mean(np.std(outputs, axis=0))\n\n self.assertLess(_dropout_mean_stddev(training=False), 1e-6)\n self.assertGreater(_dropout_mean_stddev(training=True), 1e-3)\n\n # Test propagation of seq_length in shape inference.\n input_word_ids = tf.keras.layers.Input(shape=(seq_length,), dtype=tf.int32)\n input_mask = tf.keras.layers.Input(shape=(seq_length,), dtype=tf.int32)\n input_type_ids = tf.keras.layers.Input(shape=(seq_length,), dtype=tf.int32)\n input_dict = dict(\n input_word_ids=input_word_ids,\n input_mask=input_mask,\n input_type_ids=input_type_ids)\n output_dict = hub_layer(input_dict)\n pooled_output = output_dict[\"pooled_output\"]\n sequence_output = output_dict[\"sequence_output\"]\n encoder_outputs = output_dict[\"encoder_outputs\"]\n\n self.assertEqual(pooled_output.shape.as_list(), [None, hidden_size])\n self.assertEqual(sequence_output.shape.as_list(),\n [None, seq_length, hidden_size])\n self.assertLen(encoder_outputs, num_hidden_layers)\n\n\nclass ExportModelWithMLMTest(tf.test.TestCase, parameterized.TestCase):\n \"\"\"Tests exporting a Transformer Encoder model as a SavedModel.\n\n This covers export from a Pretrainer checkpoint to a SavedModel including\n the .mlm subobject, which is the preferred way since 2020.\n\n The export code is generic. This test focuses on two main cases\n (the most important ones in practice when this was written in 2020):\n - BERT built from a legacy BertConfig, for use with BertTokenizer.\n - ALBERT built from an EncoderConfig (as a representative of all other\n choices beyond BERT, for use with SentencepieceTokenizer (the one\n alternative to BertTokenizer).\n \"\"\"\n\n def test_copy_pooler_dense_to_encoder(self):\n encoder_config = encoders.EncoderConfig(\n type=\"bert\",\n bert=encoders.BertEncoderConfig(\n hidden_size=24, intermediate_size=48, num_layers=2))\n cls_heads = [\n layers.ClassificationHead(\n inner_dim=24, num_classes=2, name=\"next_sentence\")\n ]\n encoder = encoders.build_encoder(encoder_config)\n pretrainer = models.BertPretrainerV2(\n encoder_network=encoder,\n classification_heads=cls_heads,\n mlm_activation=tf_utils.get_activation(\n encoder_config.get().hidden_activation))\n # Makes sure the pretrainer variables are created.\n _ = pretrainer(pretrainer.inputs)\n checkpoint = tf.train.Checkpoint(**pretrainer.checkpoint_items)\n model_checkpoint_dir = os.path.join(self.get_temp_dir(), \"checkpoint\")\n checkpoint.save(os.path.join(model_checkpoint_dir, \"test\"))\n\n vocab_file, sp_model_file = _get_vocab_or_sp_model_dummy(\n self.get_temp_dir(), use_sp_model=True)\n export_path = os.path.join(self.get_temp_dir(), \"hub\")\n export_tfhub_lib.export_model(\n export_path=export_path,\n encoder_config=encoder_config,\n model_checkpoint_path=tf.train.latest_checkpoint(model_checkpoint_dir),\n with_mlm=True,\n copy_pooler_dense_to_encoder=True,\n vocab_file=vocab_file,\n sp_model_file=sp_model_file,\n do_lower_case=True)\n # Restores a hub KerasLayer.\n hub_layer = hub.KerasLayer(export_path, trainable=True)\n dummy_ids = np.zeros((2, 10), dtype=np.int32)\n input_dict = dict(\n input_word_ids=dummy_ids,\n input_mask=dummy_ids,\n input_type_ids=dummy_ids)\n hub_pooled_output = hub_layer(input_dict)[\"pooled_output\"]\n encoder_outputs = encoder(input_dict)\n # Verify that hub_layer's pooled_output is the same as the output of next\n # sentence prediction's dense layer.\n pretrained_pooled_output = cls_heads[0].dense(\n (encoder_outputs[\"sequence_output\"][:, 0, :]))\n self.assertAllClose(hub_pooled_output, pretrained_pooled_output)\n # But the pooled_output between encoder and hub_layer are not the same.\n encoder_pooled_output = encoder_outputs[\"pooled_output\"]\n self.assertNotAllClose(hub_pooled_output, encoder_pooled_output)\n\n @parameterized.named_parameters(\n (\"Bert\", True),\n (\"Albert\", False),\n )\n def test_export_model_with_mlm(self, use_bert):\n # Create the encoder and export it.\n hidden_size = 16\n num_hidden_layers = 2\n bert_config, encoder_config = _get_bert_config_or_encoder_config(\n use_bert, hidden_size, num_hidden_layers)\n bert_model, pretrainer = export_tfhub_lib._create_model(\n bert_config=bert_config, encoder_config=encoder_config, with_mlm=True)\n self.assertEmpty(\n _find_lambda_layers(bert_model),\n \"Lambda layers are non-portable since they serialize Python bytecode.\")\n bert_model_with_mlm = bert_model.mlm\n model_checkpoint_dir = os.path.join(self.get_temp_dir(), \"checkpoint\")\n\n checkpoint = tf.train.Checkpoint(**pretrainer.checkpoint_items)\n\n checkpoint.save(os.path.join(model_checkpoint_dir, \"test\"))\n model_checkpoint_path = tf.train.latest_checkpoint(model_checkpoint_dir)\n\n vocab_file, sp_model_file = _get_vocab_or_sp_model_dummy(\n self.get_temp_dir(), use_sp_model=not use_bert)\n export_path = os.path.join(self.get_temp_dir(), \"hub\")\n export_tfhub_lib.export_model(\n export_path=export_path,\n bert_config=bert_config,\n encoder_config=encoder_config,\n model_checkpoint_path=model_checkpoint_path,\n with_mlm=True,\n vocab_file=vocab_file,\n sp_model_file=sp_model_file,\n do_lower_case=True)\n\n # Restore the exported model.\n hub_layer = hub.KerasLayer(export_path, trainable=True)\n\n # Check legacy tokenization data.\n if use_bert:\n self.assertTrue(hub_layer.resolved_object.do_lower_case.numpy())\n self.assertEqual(\"dummy content\",\n _read_asset(hub_layer.resolved_object.vocab_file))\n self.assertFalse(hasattr(hub_layer.resolved_object, \"sp_model_file\"))\n else:\n self.assertFalse(hasattr(hub_layer.resolved_object, \"do_lower_case\"))\n self.assertFalse(hasattr(hub_layer.resolved_object, \"vocab_file\"))\n self.assertEqual(\"dummy content\",\n _read_asset(hub_layer.resolved_object.sp_model_file))\n\n # Check restored weights.\n # Note that we set `_auto_track_sub_layers` to False when exporting the\n # SavedModel, so hub_layer has the same number of weights as bert_model;\n # otherwise, hub_layer will have extra weights from its `mlm` subobject.\n self.assertEqual(len(bert_model.trainable_weights),\n len(hub_layer.trainable_weights))\n for source_weight, hub_weight in zip(bert_model.trainable_weights,\n hub_layer.trainable_weights):\n self.assertAllClose(source_weight, hub_weight)\n\n # Check computation.\n seq_length = 10\n dummy_ids = np.zeros((2, seq_length), dtype=np.int32)\n input_dict = dict(\n input_word_ids=dummy_ids,\n input_mask=dummy_ids,\n input_type_ids=dummy_ids)\n hub_outputs_dict = hub_layer(input_dict)\n source_outputs_dict = bert_model(input_dict)\n encoder_outputs_dict = pretrainer.encoder_network(\n [dummy_ids, dummy_ids, dummy_ids])\n self.assertEqual(hub_outputs_dict[\"pooled_output\"].shape, (2, hidden_size))\n self.assertEqual(hub_outputs_dict[\"sequence_output\"].shape,\n (2, seq_length, hidden_size))\n for output_key in (\"pooled_output\", \"sequence_output\", \"encoder_outputs\"):\n self.assertAllClose(source_outputs_dict[output_key],\n hub_outputs_dict[output_key])\n self.assertAllClose(source_outputs_dict[output_key],\n encoder_outputs_dict[output_key])\n\n # The \"default\" output of BERT as a text representation is pooled_output.\n self.assertAllClose(hub_outputs_dict[\"pooled_output\"],\n hub_outputs_dict[\"default\"])\n\n # Test that training=True makes a difference (activates dropout).\n def _dropout_mean_stddev(training, num_runs=20):\n input_ids = np.array([[14, 12, 42, 95, 99]], np.int32)\n input_dict = dict(\n input_word_ids=input_ids,\n input_mask=np.ones_like(input_ids),\n input_type_ids=np.zeros_like(input_ids))\n outputs = np.concatenate([\n hub_layer(input_dict, training=training)[\"pooled_output\"]\n for _ in range(num_runs)\n ])\n return np.mean(np.std(outputs, axis=0))\n\n self.assertLess(_dropout_mean_stddev(training=False), 1e-6)\n self.assertGreater(_dropout_mean_stddev(training=True), 1e-3)\n\n # Checks sub-object `mlm`.\n self.assertTrue(hasattr(hub_layer.resolved_object, \"mlm\"))\n\n self.assertLen(hub_layer.resolved_object.mlm.trainable_variables,\n len(bert_model_with_mlm.trainable_weights))\n self.assertLen(hub_layer.resolved_object.mlm.trainable_variables,\n len(pretrainer.trainable_weights))\n for source_weight, hub_weight, pretrainer_weight in zip(\n bert_model_with_mlm.trainable_weights,\n hub_layer.resolved_object.mlm.trainable_variables,\n pretrainer.trainable_weights):\n self.assertAllClose(source_weight, hub_weight)\n self.assertAllClose(source_weight, pretrainer_weight)\n\n max_predictions_per_seq = 4\n mlm_positions = np.zeros((2, max_predictions_per_seq), dtype=np.int32)\n input_dict = dict(\n input_word_ids=dummy_ids,\n input_mask=dummy_ids,\n input_type_ids=dummy_ids,\n masked_lm_positions=mlm_positions)\n hub_mlm_outputs_dict = hub_layer.resolved_object.mlm(input_dict)\n source_mlm_outputs_dict = bert_model_with_mlm(input_dict)\n for output_key in (\"pooled_output\", \"sequence_output\", \"mlm_logits\",\n \"encoder_outputs\"):\n self.assertAllClose(hub_mlm_outputs_dict[output_key],\n source_mlm_outputs_dict[output_key])\n\n pretrainer_mlm_logits_output = pretrainer(input_dict)[\"mlm_logits\"]\n self.assertAllClose(hub_mlm_outputs_dict[\"mlm_logits\"],\n pretrainer_mlm_logits_output)\n\n # Test that training=True makes a difference (activates dropout).\n def _dropout_mean_stddev_mlm(training, num_runs=20):\n input_ids = np.array([[14, 12, 42, 95, 99]], np.int32)\n mlm_position_ids = np.array([[1, 2, 3, 4]], np.int32)\n input_dict = dict(\n input_word_ids=input_ids,\n input_mask=np.ones_like(input_ids),\n input_type_ids=np.zeros_like(input_ids),\n masked_lm_positions=mlm_position_ids)\n outputs = np.concatenate([\n hub_layer.resolved_object.mlm(input_dict,\n training=training)[\"pooled_output\"]\n for _ in range(num_runs)\n ])\n return np.mean(np.std(outputs, axis=0))\n\n self.assertLess(_dropout_mean_stddev_mlm(training=False), 1e-6)\n self.assertGreater(_dropout_mean_stddev_mlm(training=True), 1e-3)\n\n # Test propagation of seq_length in shape inference.\n input_word_ids = tf.keras.layers.Input(shape=(seq_length,), dtype=tf.int32)\n input_mask = tf.keras.layers.Input(shape=(seq_length,), dtype=tf.int32)\n input_type_ids = tf.keras.layers.Input(shape=(seq_length,), dtype=tf.int32)\n input_dict = dict(\n input_word_ids=input_word_ids,\n input_mask=input_mask,\n input_type_ids=input_type_ids)\n hub_outputs_dict = hub_layer(input_dict)\n self.assertEqual(hub_outputs_dict[\"pooled_output\"].shape.as_list(),\n [None, hidden_size])\n self.assertEqual(hub_outputs_dict[\"sequence_output\"].shape.as_list(),\n [None, seq_length, hidden_size])\n\n\n_STRING_NOT_TO_LEAK = \"private_path_component_\"\n\n\nclass ExportPreprocessingTest(tf.test.TestCase, parameterized.TestCase):\n\n def _make_vocab_file(self, vocab, filename=\"vocab.txt\", add_mask_token=False):\n \"\"\"Creates wordpiece vocab file with given words plus special tokens.\n\n The tokens of the resulting model are, in this order:\n [PAD], [UNK], [CLS], [SEP], [MASK]*, ...vocab...\n *=if requested by args.\n\n This function also accepts wordpieces that start with the ## continuation\n marker, but avoiding those makes this function interchangeable with\n _make_sp_model_file(), up to the extra dimension returned by BertTokenizer.\n\n Args:\n vocab: a list of strings with the words or wordpieces to put into the\n model's vocabulary. Do not include special tokens here.\n filename: Optionally, a filename (relative to the temporary directory\n created by this function).\n add_mask_token: an optional bool, whether to include a [MASK] token.\n\n Returns:\n The absolute filename of the created vocab file.\n \"\"\"\n full_vocab = [\"[PAD]\", \"[UNK]\", \"[CLS]\", \"[SEP]\"\n ] + [\"[MASK]\"]*add_mask_token + vocab\n path = os.path.join(\n tempfile.mkdtemp(dir=self.get_temp_dir(), # New subdir each time.\n prefix=_STRING_NOT_TO_LEAK),\n filename)\n with tf.io.gfile.GFile(path, \"w\") as f:\n f.write(\"\\n\".join(full_vocab + [\"\"]))\n return path\n\n def _make_sp_model_file(self, vocab, prefix=\"spm\", add_mask_token=False):\n \"\"\"Creates Sentencepiece word model with given words plus special tokens.\n\n The tokens of the resulting model are, in this order:\n <pad>, <unk>, [CLS], [SEP], [MASK]*, ...vocab..., <s>, </s>\n *=if requested by args.\n\n The words in the input vocab are plain text, without the whitespace marker.\n That makes this function interchangeable with _make_vocab_file().\n\n Args:\n vocab: a list of strings with the words to put into the model's\n vocabulary. Do not include special tokens here.\n prefix: an optional string, to change the filename prefix for the model\n (relative to the temporary directory created by this function).\n add_mask_token: an optional bool, whether to include a [MASK] token.\n\n Returns:\n The absolute filename of the created Sentencepiece model file.\n \"\"\"\n model_prefix = os.path.join(\n tempfile.mkdtemp(dir=self.get_temp_dir()), # New subdir each time.\n prefix)\n input_file = model_prefix + \"_train_input.txt\"\n # Create input text for training the sp model from the tokens provided.\n # Repeat tokens, the earlier the more, because they are sorted by frequency.\n input_text = []\n for i, token in enumerate(vocab):\n input_text.append(\" \".join([token] * (len(vocab) - i)))\n with tf.io.gfile.GFile(input_file, \"w\") as f:\n f.write(\"\\n\".join(input_text + [\"\"]))\n control_symbols = \"[CLS],[SEP]\"\n full_vocab_size = len(vocab) + 6 # <pad>, <unk>, [CLS], [SEP], <s>, </s>.\n if add_mask_token:\n control_symbols += \",[MASK]\"\n full_vocab_size += 1\n flags = dict(\n model_prefix=model_prefix,\n model_type=\"word\",\n input=input_file,\n pad_id=0, unk_id=1, control_symbols=control_symbols,\n vocab_size=full_vocab_size,\n bos_id=full_vocab_size-2, eos_id=full_vocab_size-1)\n SentencePieceTrainer.Train(\n \" \".join([\"--{}={}\".format(k, v) for k, v in flags.items()]))\n return model_prefix + \".model\"\n\n def _do_export(self, vocab, do_lower_case, default_seq_length=128,\n tokenize_with_offsets=True, use_sp_model=False,\n experimental_disable_assert=False, add_mask_token=False):\n \"\"\"Runs SavedModel export and returns the export_path.\"\"\"\n export_path = tempfile.mkdtemp(dir=self.get_temp_dir())\n vocab_file = sp_model_file = None\n if use_sp_model:\n sp_model_file = self._make_sp_model_file(vocab,\n add_mask_token=add_mask_token)\n else:\n vocab_file = self._make_vocab_file(vocab, add_mask_token=add_mask_token)\n export_tfhub_lib.export_preprocessing(\n export_path,\n vocab_file=vocab_file,\n sp_model_file=sp_model_file,\n do_lower_case=do_lower_case,\n tokenize_with_offsets=tokenize_with_offsets,\n default_seq_length=default_seq_length,\n experimental_disable_assert=experimental_disable_assert)\n # Invalidate the original filename to verify loading from the SavedModel.\n tf.io.gfile.remove(sp_model_file or vocab_file)\n return export_path\n\n def test_no_leaks(self):\n \"\"\"Tests not leaking the path to the original vocab file.\"\"\"\n path = self._do_export(\n [\"d\", \"ef\", \"abc\", \"xy\"], do_lower_case=True, use_sp_model=False)\n with tf.io.gfile.GFile(os.path.join(path, \"saved_model.pb\"), \"rb\") as f:\n self.assertFalse( # pylint: disable=g-generic-assert\n _STRING_NOT_TO_LEAK.encode(\"ascii\") in f.read())\n\n @parameterized.named_parameters((\"Bert\", False), (\"Sentencepiece\", True))\n def test_exported_callables(self, use_sp_model):\n preprocess = tf.saved_model.load(self._do_export(\n [\"d\", \"ef\", \"abc\", \"xy\"], do_lower_case=True,\n tokenize_with_offsets=not use_sp_model, # TODO(b/181866850): drop this.\n experimental_disable_assert=True, # TODO(b/175369555): drop this.\n use_sp_model=use_sp_model))\n\n def fold_dim(rt):\n \"\"\"Removes the word/subword distinction of BertTokenizer.\"\"\"\n return rt if use_sp_model else rt.merge_dims(1, 2)\n\n # .tokenize()\n inputs = tf.constant([\"abc d ef\", \"ABC D EF d\"])\n token_ids = preprocess.tokenize(inputs)\n self.assertAllEqual(fold_dim(token_ids),\n tf.ragged.constant([[6, 4, 5],\n [6, 4, 5, 4]]))\n\n special_tokens_dict = {\n k: v.numpy().item() # Expecting eager Tensor, converting to Python.\n for k, v in preprocess.tokenize.get_special_tokens_dict().items()}\n self.assertDictEqual(special_tokens_dict,\n dict(padding_id=0,\n start_of_sequence_id=2,\n end_of_segment_id=3,\n vocab_size=4+6 if use_sp_model else 4+4))\n\n # .tokenize_with_offsets()\n if use_sp_model:\n # TODO(b/181866850): Enable tokenize_with_offsets when it works and test.\n self.assertFalse(hasattr(preprocess, \"tokenize_with_offsets\"))\n else:\n token_ids, start_offsets, limit_offsets = (\n preprocess.tokenize_with_offsets(inputs))\n self.assertAllEqual(fold_dim(token_ids),\n tf.ragged.constant([[6, 4, 5],\n [6, 4, 5, 4]]))\n self.assertAllEqual(fold_dim(start_offsets),\n tf.ragged.constant([[0, 4, 6],\n [0, 4, 6, 9]]))\n self.assertAllEqual(fold_dim(limit_offsets),\n tf.ragged.constant([[3, 5, 8],\n [3, 5, 8, 10]]))\n self.assertIs(preprocess.tokenize.get_special_tokens_dict,\n preprocess.tokenize_with_offsets.get_special_tokens_dict)\n\n # Root callable.\n bert_inputs = preprocess(inputs)\n self.assertAllEqual(bert_inputs[\"input_word_ids\"].shape.as_list(), [2, 128])\n self.assertAllEqual(bert_inputs[\"input_word_ids\"][:, :10],\n tf.constant([[2, 6, 4, 5, 3, 0, 0, 0, 0, 0],\n [2, 6, 4, 5, 4, 3, 0, 0, 0, 0]]))\n self.assertAllEqual(bert_inputs[\"input_mask\"].shape.as_list(), [2, 128])\n self.assertAllEqual(bert_inputs[\"input_mask\"][:, :10],\n tf.constant([[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],\n [1, 1, 1, 1, 1, 1, 0, 0, 0, 0]]))\n self.assertAllEqual(bert_inputs[\"input_type_ids\"].shape.as_list(), [2, 128])\n self.assertAllEqual(bert_inputs[\"input_type_ids\"][:, :10],\n tf.constant([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]))\n\n # .bert_pack_inputs()\n inputs_2 = tf.constant([\"d xy\", \"xy abc\"])\n token_ids_2 = preprocess.tokenize(inputs_2)\n bert_inputs = preprocess.bert_pack_inputs(\n [token_ids, token_ids_2], seq_length=256)\n self.assertAllEqual(bert_inputs[\"input_word_ids\"].shape.as_list(), [2, 256])\n self.assertAllEqual(bert_inputs[\"input_word_ids\"][:, :10],\n tf.constant([[2, 6, 4, 5, 3, 4, 7, 3, 0, 0],\n [2, 6, 4, 5, 4, 3, 7, 6, 3, 0]]))\n self.assertAllEqual(bert_inputs[\"input_mask\"].shape.as_list(), [2, 256])\n self.assertAllEqual(bert_inputs[\"input_mask\"][:, :10],\n tf.constant([[1, 1, 1, 1, 1, 1, 1, 1, 0, 0],\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 0]]))\n self.assertAllEqual(bert_inputs[\"input_type_ids\"].shape.as_list(), [2, 256])\n self.assertAllEqual(bert_inputs[\"input_type_ids\"][:, :10],\n tf.constant([[0, 0, 0, 0, 0, 1, 1, 1, 0, 0],\n [0, 0, 0, 0, 0, 0, 1, 1, 1, 0]]))\n\n # For BertTokenizer only: repeat relevant parts for do_lower_case=False,\n # default_seq_length=10, experimental_disable_assert=False,\n # tokenize_with_offsets=False, and without folding the word/subword dimension.\n def test_cased_length10(self):\n preprocess = tf.saved_model.load(self._do_export(\n [\"d\", \"##ef\", \"abc\", \"ABC\"],\n do_lower_case=False, default_seq_length=10,\n tokenize_with_offsets=False,\n use_sp_model=False,\n experimental_disable_assert=False))\n inputs = tf.constant([\"abc def\", \"ABC DEF\"])\n token_ids = preprocess.tokenize(inputs)\n self.assertAllEqual(token_ids, tf.ragged.constant([[[6], [4, 5]],\n [[7], [1]]]))\n\n self.assertFalse(hasattr(preprocess, \"tokenize_with_offsets\"))\n\n bert_inputs = preprocess(inputs)\n self.assertAllEqual(bert_inputs[\"input_word_ids\"],\n tf.constant([[2, 6, 4, 5, 3, 0, 0, 0, 0, 0],\n [2, 7, 1, 3, 0, 0, 0, 0, 0, 0]]))\n self.assertAllEqual(bert_inputs[\"input_mask\"],\n tf.constant([[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],\n [1, 1, 1, 1, 0, 0, 0, 0, 0, 0]]))\n self.assertAllEqual(bert_inputs[\"input_type_ids\"],\n tf.constant([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]))\n\n inputs_2 = tf.constant([\"d ABC\", \"ABC abc\"])\n token_ids_2 = preprocess.tokenize(inputs_2)\n bert_inputs = preprocess.bert_pack_inputs([token_ids, token_ids_2])\n # Test default seq_length=10.\n self.assertAllEqual(bert_inputs[\"input_word_ids\"],\n tf.constant([[2, 6, 4, 5, 3, 4, 7, 3, 0, 0],\n [2, 7, 1, 3, 7, 6, 3, 0, 0, 0]]))\n self.assertAllEqual(bert_inputs[\"input_mask\"],\n tf.constant([[1, 1, 1, 1, 1, 1, 1, 1, 0, 0],\n [1, 1, 1, 1, 1, 1, 1, 0, 0, 0]]))\n self.assertAllEqual(bert_inputs[\"input_type_ids\"],\n tf.constant([[0, 0, 0, 0, 0, 1, 1, 1, 0, 0],\n [0, 0, 0, 0, 1, 1, 1, 0, 0, 0]]))\n\n # XLA requires fixed shapes for tensors found in graph mode.\n # Statically known shapes in Python are a particularly firm way to\n # guarantee that, and they are generally more convenient to work with.\n # We test that the exported SavedModel plays well with TF's shape\n # inference when applied to fully or partially known input shapes.\n @parameterized.named_parameters((\"Bert\", False), (\"Sentencepiece\", True))\n def test_shapes(self, use_sp_model):\n preprocess = tf.saved_model.load(self._do_export(\n [\"abc\", \"def\"], do_lower_case=True,\n tokenize_with_offsets=not use_sp_model, # TODO(b/181866850): drop this.\n experimental_disable_assert=True, # TODO(b/175369555): drop this.\n use_sp_model=use_sp_model))\n\n def expected_bert_input_shapes(batch_size, seq_length):\n return dict(input_word_ids=[batch_size, seq_length],\n input_mask=[batch_size, seq_length],\n input_type_ids=[batch_size, seq_length])\n\n for batch_size in [7, None]:\n if use_sp_model:\n token_out_shape = [batch_size, None] # No word/subword distinction.\n else:\n token_out_shape = [batch_size, None, None]\n self.assertEqual(\n _result_shapes_in_tf_function(\n preprocess.tokenize,\n tf.TensorSpec([batch_size], tf.string)),\n token_out_shape,\n \"with batch_size=%s\" % batch_size)\n # TODO(b/181866850): Enable tokenize_with_offsets when it works and test.\n if use_sp_model:\n self.assertFalse(hasattr(preprocess, \"tokenize_with_offsets\"))\n else:\n self.assertEqual(\n _result_shapes_in_tf_function(\n preprocess.tokenize_with_offsets,\n tf.TensorSpec([batch_size], tf.string)),\n [token_out_shape] * 3,\n \"with batch_size=%s\" % batch_size)\n self.assertEqual(\n _result_shapes_in_tf_function(\n preprocess.bert_pack_inputs,\n [tf.RaggedTensorSpec([batch_size, None, None], tf.int32)] * 2,\n seq_length=256), expected_bert_input_shapes(batch_size, 256),\n \"with batch_size=%s\" % batch_size)\n self.assertEqual(\n _result_shapes_in_tf_function(preprocess,\n tf.TensorSpec([batch_size], tf.string)),\n expected_bert_input_shapes(batch_size, 128),\n \"with batch_size=%s\" % batch_size)\n\n @parameterized.named_parameters((\"Bert\", False), (\"Sentencepiece\", True))\n def test_reexport(self, use_sp_model):\n \"\"\"Test that preprocess keeps working after another save/load cycle.\"\"\"\n path1 = self._do_export(\n [\"d\", \"ef\", \"abc\", \"xy\"], do_lower_case=True, default_seq_length=10,\n tokenize_with_offsets=False,\n experimental_disable_assert=True, # TODO(b/175369555): drop this.\n use_sp_model=use_sp_model)\n path2 = path1.rstrip(\"/\") + \".2\"\n model1 = tf.saved_model.load(path1)\n tf.saved_model.save(model1, path2)\n # Delete the first SavedModel to test that the sceond one loads by itself.\n # https://github.com/tensorflow/tensorflow/issues/46456 reports such a\n # failure case for BertTokenizer.\n tf.io.gfile.rmtree(path1)\n model2 = tf.saved_model.load(path2)\n\n inputs = tf.constant([\"abc d ef\", \"ABC D EF d\"])\n bert_inputs = model2(inputs)\n self.assertAllEqual(bert_inputs[\"input_word_ids\"],\n tf.constant([[2, 6, 4, 5, 3, 0, 0, 0, 0, 0],\n [2, 6, 4, 5, 4, 3, 0, 0, 0, 0]]))\n self.assertAllEqual(bert_inputs[\"input_mask\"],\n tf.constant([[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],\n [1, 1, 1, 1, 1, 1, 0, 0, 0, 0]]))\n self.assertAllEqual(bert_inputs[\"input_type_ids\"],\n tf.constant([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]))\n\n @parameterized.named_parameters((\"Bert\", True), (\"Albert\", False))\n def test_preprocessing_for_mlm(self, use_bert):\n \"\"\"Combines both SavedModel types and TF.text helpers for MLM.\"\"\"\n # Create the preprocessing SavedModel with a [MASK] token.\n non_special_tokens = [\"hello\", \"world\",\n \"nice\", \"movie\", \"great\", \"actors\",\n \"quick\", \"fox\", \"lazy\", \"dog\"]\n preprocess = tf.saved_model.load(self._do_export(\n non_special_tokens, do_lower_case=True,\n tokenize_with_offsets=use_bert, # TODO(b/181866850): drop this.\n experimental_disable_assert=True, # TODO(b/175369555): drop this.\n add_mask_token=True, use_sp_model=not use_bert))\n vocab_size = len(non_special_tokens) + (5 if use_bert else 7)\n\n # Create the encoder SavedModel with an .mlm subobject.\n hidden_size = 16\n num_hidden_layers = 2\n bert_config, encoder_config = _get_bert_config_or_encoder_config(\n use_bert, hidden_size, num_hidden_layers, vocab_size)\n _, pretrainer = export_tfhub_lib._create_model(\n bert_config=bert_config, encoder_config=encoder_config, with_mlm=True)\n model_checkpoint_dir = os.path.join(self.get_temp_dir(), \"checkpoint\")\n checkpoint = tf.train.Checkpoint(**pretrainer.checkpoint_items)\n checkpoint.save(os.path.join(model_checkpoint_dir, \"test\"))\n model_checkpoint_path = tf.train.latest_checkpoint(model_checkpoint_dir)\n vocab_file, sp_model_file = _get_vocab_or_sp_model_dummy( # Not used below.\n self.get_temp_dir(), use_sp_model=not use_bert)\n encoder_export_path = os.path.join(self.get_temp_dir(), \"encoder_export\")\n export_tfhub_lib.export_model(\n export_path=encoder_export_path,\n bert_config=bert_config,\n encoder_config=encoder_config,\n model_checkpoint_path=model_checkpoint_path,\n with_mlm=True,\n vocab_file=vocab_file,\n sp_model_file=sp_model_file,\n do_lower_case=True)\n encoder = tf.saved_model.load(encoder_export_path)\n\n # Get special tokens from the vocab (and vocab size).\n special_tokens_dict = preprocess.tokenize.get_special_tokens_dict()\n self.assertEqual(int(special_tokens_dict[\"vocab_size\"]), vocab_size)\n padding_id = int(special_tokens_dict[\"padding_id\"])\n self.assertEqual(padding_id, 0)\n start_of_sequence_id = int(special_tokens_dict[\"start_of_sequence_id\"])\n self.assertEqual(start_of_sequence_id, 2)\n end_of_segment_id = int(special_tokens_dict[\"end_of_segment_id\"])\n self.assertEqual(end_of_segment_id, 3)\n mask_id = int(special_tokens_dict[\"mask_id\"])\n self.assertEqual(mask_id, 4)\n\n # A batch of 3 segment pairs.\n raw_segments = [tf.constant([\"hello\", \"nice movie\", \"quick fox\"]),\n tf.constant([\"world\", \"great actors\", \"lazy dog\"])]\n batch_size = 3\n\n # Misc hyperparameters.\n seq_length = 10\n max_selections_per_seq = 2\n\n # Tokenize inputs.\n tokenized_segments = [preprocess.tokenize(s) for s in raw_segments]\n # Trim inputs to eventually fit seq_lentgh.\n num_special_tokens = len(raw_segments) + 1\n trimmed_segments = text.WaterfallTrimmer(\n seq_length - num_special_tokens).trim(tokenized_segments)\n # Combine input segments into one input sequence.\n input_ids, segment_ids = text.combine_segments(\n trimmed_segments,\n start_of_sequence_id=start_of_sequence_id,\n end_of_segment_id=end_of_segment_id)\n # Apply random masking controlled by policy objects.\n (masked_input_ids, masked_lm_positions,\n masked_ids) = text.mask_language_model(\n input_ids=input_ids,\n item_selector=text.RandomItemSelector(\n max_selections_per_seq,\n selection_rate=0.5, # Adjusted for the short test examples.\n unselectable_ids=[start_of_sequence_id, end_of_segment_id]),\n mask_values_chooser=text.MaskValuesChooser(\n vocab_size=vocab_size, mask_token=mask_id,\n # Always put [MASK] to have a predictable result.\n mask_token_rate=1.0, random_token_rate=0.0))\n # Pad to fixed-length Transformer encoder inputs.\n input_word_ids, _ = text.pad_model_inputs(masked_input_ids,\n seq_length,\n pad_value=padding_id)\n input_type_ids, input_mask = text.pad_model_inputs(segment_ids, seq_length,\n pad_value=0)\n masked_lm_positions, _ = text.pad_model_inputs(masked_lm_positions,\n max_selections_per_seq,\n pad_value=0)\n masked_lm_positions = tf.cast(masked_lm_positions, tf.int32)\n num_predictions = int(tf.shape(masked_lm_positions)[1])\n\n # Test transformer inputs.\n self.assertEqual(num_predictions, max_selections_per_seq)\n expected_word_ids = np.array([\n # [CLS] hello [SEP] world [SEP]\n [2, 5, 3, 6, 3, 0, 0, 0, 0, 0],\n # [CLS] nice movie [SEP] great actors [SEP]\n [2, 7, 8, 3, 9, 10, 3, 0, 0, 0],\n # [CLS] brown fox [SEP] lazy dog [SEP]\n [2, 11, 12, 3, 13, 14, 3, 0, 0, 0]])\n for i in range(batch_size):\n for j in range(num_predictions):\n k = int(masked_lm_positions[i, j])\n if k != 0:\n expected_word_ids[i, k] = 4 # [MASK]\n self.assertAllEqual(input_word_ids, expected_word_ids)\n\n # Call the MLM head of the Transformer encoder.\n mlm_inputs = dict(\n input_word_ids=input_word_ids,\n input_mask=input_mask,\n input_type_ids=input_type_ids,\n masked_lm_positions=masked_lm_positions,\n )\n mlm_outputs = encoder.mlm(mlm_inputs)\n self.assertEqual(mlm_outputs[\"pooled_output\"].shape,\n (batch_size, hidden_size))\n self.assertEqual(mlm_outputs[\"sequence_output\"].shape,\n (batch_size, seq_length, hidden_size))\n self.assertEqual(mlm_outputs[\"mlm_logits\"].shape,\n (batch_size, num_predictions, vocab_size))\n self.assertLen(mlm_outputs[\"encoder_outputs\"], num_hidden_layers)\n\n # A real trainer would now compute the loss of mlm_logits\n # trying to predict the masked_ids.\n del masked_ids # Unused.\n\n @parameterized.named_parameters((\"Bert\", False), (\"Sentencepiece\", True))\n def test_special_tokens_in_estimator(self, use_sp_model):\n \"\"\"Tests getting special tokens without an Eager init context.\"\"\"\n preprocess_export_path = self._do_export(\n [\"d\", \"ef\", \"abc\", \"xy\"], do_lower_case=True,\n use_sp_model=use_sp_model, tokenize_with_offsets=False)\n\n def _get_special_tokens_dict(obj):\n \"\"\"Returns special tokens of restored tokenizer as Python values.\"\"\"\n if tf.executing_eagerly():\n special_tokens_numpy = {k: v.numpy()\n for k, v in obj.get_special_tokens_dict()}\n else:\n with tf.Graph().as_default():\n # This code expects `get_special_tokens_dict()` to be a tf.function\n # with no dependencies (bound args) from the context it was loaded in,\n # and boldly assumes that it can just be called in a dfferent context.\n special_tokens_tensors = obj.get_special_tokens_dict()\n with tf.compat.v1.Session() as sess:\n special_tokens_numpy = sess.run(special_tokens_tensors)\n return {k: v.item() # Numpy to Python.\n for k, v in special_tokens_numpy.items()}\n\n def input_fn():\n self.assertFalse(tf.executing_eagerly())\n # Build a preprocessing Model.\n sentences = tf.keras.layers.Input(shape=[], dtype=tf.string)\n preprocess = tf.saved_model.load(preprocess_export_path)\n tokenize = hub.KerasLayer(preprocess.tokenize)\n special_tokens_dict = _get_special_tokens_dict(tokenize.resolved_object)\n for k, v in special_tokens_dict.items():\n self.assertIsInstance(v, int, \"Unexpected type for {}\".format(k))\n tokens = tokenize(sentences)\n packed_inputs = layers.BertPackInputs(\n 4, special_tokens_dict=special_tokens_dict)(tokens)\n preprocessing = tf.keras.Model(sentences, packed_inputs)\n # Map the dataset.\n ds = tf.data.Dataset.from_tensors(\n (tf.constant([\"abc\", \"D EF\"]), tf.constant([0, 1])))\n ds = ds.map(lambda features, labels: (preprocessing(features), labels))\n return ds\n\n def model_fn(features, labels, mode):\n del labels # Unused.\n return tf.estimator.EstimatorSpec(mode=mode,\n predictions=features[\"input_word_ids\"])\n\n estimator = tf.estimator.Estimator(model_fn=model_fn)\n outputs = list(estimator.predict(input_fn))\n self.assertAllEqual(outputs, np.array([[2, 6, 3, 0],\n [2, 4, 5, 3]]))\n\n # TODO(b/175369555): Remove that code and its test.\n @parameterized.named_parameters((\"Bert\", False), (\"Sentencepiece\", True))\n def test_check_no_assert(self, use_sp_model):\n \"\"\"Tests the self-check during export without assertions.\"\"\"\n preprocess_export_path = self._do_export(\n [\"d\", \"ef\", \"abc\", \"xy\"], do_lower_case=True,\n use_sp_model=use_sp_model, tokenize_with_offsets=False,\n experimental_disable_assert=False)\n with self.assertRaisesRegex(AssertionError,\n r\"failed to suppress \\d+ Assert ops\"):\n export_tfhub_lib._check_no_assert(preprocess_export_path)\n\n\ndef _result_shapes_in_tf_function(fn, *args, **kwargs):\n \"\"\"Returns shapes (as lists) observed on the result of `fn`.\n\n Args:\n fn: A callable.\n *args: TensorSpecs for Tensor-valued arguments and actual values\n for Python-valued arguments to fn.\n **kwargs: Same for keyword arguments.\n\n Returns:\n The nest of partial tensor shapes (as lists) that is statically known inside\n tf.function(fn)(*args, **kwargs) for the nest of its results.\n \"\"\"\n # Use a captured mutable container for a side outout from the wrapper.\n uninitialized = \"uninitialized!\"\n result_shapes_container = [uninitialized]\n assert result_shapes_container[0] is uninitialized\n\n @tf.function\n def shape_reporting_wrapper(*args, **kwargs):\n result = fn(*args, **kwargs)\n result_shapes_container[0] = tf.nest.map_structure(\n lambda x: x.shape.as_list(), result)\n return result\n\n shape_reporting_wrapper.get_concrete_function(*args, **kwargs)\n assert result_shapes_container[0] is not uninitialized\n return result_shapes_container[0]\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n"
] | [
[
"tensorflow.io.gfile.GFile",
"numpy.ones_like",
"tensorflow.executing_eagerly",
"tensorflow.RaggedTensorSpec",
"tensorflow.ragged.constant",
"tensorflow.Graph",
"tensorflow.constant",
"tensorflow.test.main",
"tensorflow.saved_model.load",
"tensorflow.compat.v1.Session",
"tensorflow.shape",
"numpy.zeros",
"tensorflow.io.gfile.remove",
"tensorflow.estimator.EstimatorSpec",
"tensorflow.cast",
"tensorflow.saved_model.save",
"numpy.std",
"tensorflow.estimator.Estimator",
"numpy.zeros_like",
"tensorflow.io.gfile.rmtree",
"tensorflow.keras.Model",
"tensorflow.train.latest_checkpoint",
"tensorflow.TensorSpec",
"numpy.array",
"tensorflow.train.Checkpoint",
"tensorflow.keras.layers.Input"
]
] |
good5dog5/Speaker-Diarization | [
"4cc38f77a2f2c24ce086323aa37098f6cd0f7f10"
] | [
"ghostvlad/generate_embeddings.py"
] | [
"from __future__ import absolute_import\nfrom __future__ import print_function\nimport os\nimport sys\nimport numpy as np\nimport librosa\n\nimport toolkits\nimport random\n\n# ===========================================\n# Parse the argument\n# ===========================================\nimport argparse\nparser = argparse.ArgumentParser()\n# set up training configuration.\nparser.add_argument('--gpu', default='', type=str)\nparser.add_argument('--resume', default=r'pretrained/weights.h5', type=str)\nparser.add_argument('--data_path', default='4persons', type=str)\n# set up network configuration.\nparser.add_argument('--net', default='resnet34s',\n choices=['resnet34s', 'resnet34l'], type=str)\nparser.add_argument('--ghost_cluster', default=2, type=int)\nparser.add_argument('--vlad_cluster', default=8, type=int)\nparser.add_argument('--bottleneck_dim', default=512, type=int)\nparser.add_argument('--aggregation_mode', default='gvlad',\n choices=['avg', 'vlad', 'gvlad'], type=str)\n# set up learning rate, training loss and optimizer.\nparser.add_argument('--loss', default='softmax',\n choices=['softmax', 'amsoftmax'], type=str)\nparser.add_argument('--test_type', default='normal',\n choices=['normal', 'hard', 'extend'], type=str)\n\nglobal args\nargs = parser.parse_args()\n\n\n# calc speaker-embeddings similarity in pretty format output.\ndef similar(matrix):\n ids = matrix.shape[0]\n for i in range(ids):\n for j in range(ids):\n dist = matrix[i, :]*matrix[j, :]\n dist = np.linalg.norm(matrix[i, :] - matrix[j, :])\n print('%.2f ' % dist, end='')\n if((j+1) % 3 == 0 and j != 0):\n print(\"| \", end='')\n if((i+1) % 3 == 0 and i != 0):\n print('\\n')\n print(\"*\"*80, end='')\n print(\"\\n\")\n\n# ===============================================\n# code from Arsha for loading data.\n# ===============================================\n\n\ndef load_wav(vid_path, sr):\n wav, sr_ret = librosa.load(vid_path, sr=sr)\n assert sr_ret == sr\n\n intervals = librosa.effects.split(wav, top_db=20)\n wav_output = []\n for sliced in intervals:\n wav_output.extend(wav[sliced[0]:sliced[1]])\n wav_output = np.array(wav_output)\n return wav_output\n\n\ndef lin_spectogram_from_wav(wav, hop_length, win_length, n_fft=1024):\n linear = librosa.stft(wav, n_fft=n_fft, win_length=win_length,\n hop_length=hop_length) # linear spectrogram\n return linear.T\n\n\ndef load_data(path_spk_tuples, win_length=400, sr=16000, hop_length=160, n_fft=512, min_win_time=240, max_win_time=1600):\n win_time = np.random.randint(min_win_time, max_win_time, 1)[\n 0] # win_length in [240,1600] ms\n win_spec = win_time//(1000//(sr//hop_length)) # win_length in spectrum\n hop_spec = win_spec//2\n\n wavs = np.array([])\n change_points = []\n paths = list(zip(*path_spk_tuples))[0]\n speakers = list(zip(*path_spk_tuples))[1]\n\n for path in paths:\n wav = load_wav(path, sr=sr) # VAD\n wavs = np.concatenate((wavs, wav))\n # change_point in spectrum\n change_points.append(wavs.shape[0]//hop_length)\n\n linear_spect = lin_spectogram_from_wav(wavs, hop_length, win_length, n_fft)\n mag, _ = librosa.magphase(linear_spect) # magnitude\n mag_T = mag.T\n freq, time = mag_T.shape\n spec_mag = mag_T\n\n utterance_specs = []\n utterance_speakers = []\n\n cur_spec = 0\n cur_speaker = speakers[0]\n i = 0\n while(True):\n if(cur_spec+win_spec > time):\n break\n spec_mag = mag_T[:, cur_spec:cur_spec+win_spec]\n\n # cur win_spec span to the next speaker\n if(cur_spec+win_spec//2 > change_points[i]):\n i += 1\n cur_speaker = speakers[i]\n\n # preprocessing, subtract mean, divided by time-wise var\n mu = np.mean(spec_mag, 0, keepdims=True)\n std = np.std(spec_mag, 0, keepdims=True)\n spec_mag = (spec_mag - mu) / (std + 1e-5)\n utterance_specs.append(spec_mag)\n utterance_speakers.append(cur_speaker)\n cur_spec += hop_spec\n\n return utterance_specs, utterance_speakers\n\n\ndef prepare_data(SRC_PATH):\n wavDir = os.listdir(SRC_PATH)\n wavDir.sort()\n print(f'wavDir: {wavDir}')\n\n allpath_list = []\n allspk_list = []\n for i, spkDir in enumerate(wavDir): # Each speaker's directory\n spk = spkDir # speaker name\n wavPath = os.path.join(SRC_PATH, spkDir, 'audio')\n for wav in os.listdir(wavPath): # wavfile\n utter_path = os.path.join(wavPath, wav)\n allpath_list.append(utter_path)\n allspk_list.append(i)\n if(i > 100):\n break\n\n path_spk_list = list(zip(allpath_list, allspk_list))\n return path_spk_list\n\n\ndef main():\n\n # gpu configuration\n toolkits.initialize_GPU(args)\n\n import model\n # ==================================\n # Get Train/Val.\n # ==================================\n\n total_list = [os.path.join(args.data_path, file)\n for file in os.listdir(args.data_path)]\n unique_list = np.unique(total_list)\n\n # ==================================\n # Get Model\n # ==================================\n # construct the data generator.\n params = {'dim': (257, None, 1),\n 'nfft': 512,\n 'min_slice': 720,\n 'win_length': 400,\n 'hop_length': 160,\n 'n_classes': 5994,\n 'sampling_rate': 16000,\n 'normalize': True,\n }\n\n network_eval = model.vggvox_resnet2d_icassp(input_dim=params['dim'],\n num_class=params['n_classes'],\n mode='eval', args=args)\n\n # ==> load pre-trained model ???\n if args.resume:\n # ==> get real_model from arguments input,\n # load the model if the imag_model == real_model.\n if os.path.isfile(args.resume):\n network_eval.load_weights(os.path.join(args.resume), by_name=True)\n print('==> successfully loading model {}.'.format(args.resume))\n else:\n raise IOError(\n \"==> no checkpoint found at '{}'\".format(args.resume))\n else:\n raise IOError('==> please type in the model to load')\n\n # The feature extraction process has to be done sample-by-sample,\n # because each sample is of different lengths.\n\n SRC_PATH = r'~/Workspace/SOHO/speaker-diarization/dataset/ST-CMDS-20170001_1-OS'\n path_spk_tuples = prepare_data(SRC_PATH)\n train_sequence = []\n train_cluster_id = []\n\n for epoch in range(7000): # Random choice utterances from whole wavfiles\n # A merged utterance contains [10,20] utterances\n splits_count = np.random.randint(10, 20, 1)\n path_spks = random.sample(path_spk_tuples, splits_count[0])\n utterance_specs, utterance_speakers = load_data(\n path_spks, min_win_time=500, max_win_time=1600)\n feats = []\n for spec in utterance_specs:\n spec = np.expand_dims(np.expand_dims(spec, 0), -1)\n v = network_eval.predict(spec)\n feats += [v]\n\n feats = np.array(feats)[:, 0, :] # [splits, embedding dim]\n train_sequence.append(feats)\n train_cluster_id.append(utterance_speakers)\n print(\"epoch:{}, utterance length: {}, speakers: {}\".format(\n epoch, len(utterance_speakers), len(path_spks)))\n\n np.savez('training_data', train_sequence=train_sequence,\n train_cluster_id=train_cluster_id)\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"numpy.savez",
"numpy.expand_dims",
"numpy.array",
"numpy.std",
"numpy.concatenate",
"numpy.random.randint",
"numpy.linalg.norm",
"numpy.unique",
"numpy.mean"
]
] |
fzyzcjy/ncnn | [
"42e71609508fde1bd54d9d9de6ca5522ee3bcf37"
] | [
"tools/pnnx/tests/test_nn_BatchNorm2d.py"
] | [
"# Tencent is pleased to support the open source community by making ncnn available.\n#\n# Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.\n#\n# Licensed under the BSD 3-Clause License (the \"License\"); you may not use this file except\n# in compliance with the License. You may obtain a copy of the License at\n#\n# https://opensource.org/licenses/BSD-3-Clause\n#\n# Unless required by applicable law or agreed to in writing, software distributed\n# under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR\n# CONDITIONS OF ANY KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations under the License.\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass Model(nn.Module):\n def __init__(self):\n super(Model, self).__init__()\n\n self.bn_0 = nn.BatchNorm2d(num_features=32)\n self.bn_1 = nn.BatchNorm2d(num_features=32, eps=1e-1, affine=False)\n self.bn_2 = nn.BatchNorm2d(num_features=11, affine=True)\n\n def forward(self, x, y):\n x = self.bn_0(x)\n x = self.bn_1(x)\n\n y = self.bn_2(y)\n\n return x, y\n\ndef test():\n net = Model()\n net.eval()\n\n torch.manual_seed(0)\n x = torch.rand(1, 32, 12, 64)\n y = torch.rand(1, 11, 1, 1)\n\n a0, a1 = net(x, y)\n\n # export torchscript\n mod = torch.jit.trace(net, (x, y))\n mod.save(\"test_nn_BatchNorm2d.pt\")\n\n # torchscript to pnnx\n import os\n os.system(\"../src/pnnx test_nn_BatchNorm2d.pt inputshape=[1,32,12,64],[1,11,1,1]\")\n\n # pnnx inference\n import test_nn_BatchNorm2d_pnnx\n b0, b1 = test_nn_BatchNorm2d_pnnx.test_inference()\n\n return torch.equal(a0, b0) and torch.equal(a1, b1)\n\nif __name__ == \"__main__\":\n if test():\n exit(0)\n else:\n exit(1)\n"
] | [
[
"torch.nn.BatchNorm2d",
"torch.manual_seed",
"torch.rand",
"torch.equal",
"torch.jit.trace"
]
] |
ofgulban/iphigen | [
"47c972a5852677e01ab0b120f69d004abc57e478"
] | [
"iphigen/iphigen_nifti.py"
] | [
"\"\"\"MRI data processing with retinex and balance methods.\"\"\"\n\nfrom __future__ import division\nimport os\nimport numpy as np\nimport nibabel as nb\nfrom iphigen import core, utils\nfrom iphigen.ui import user_interface, display_welcome_message\nimport iphigen.config as cfg\n\n\ndef main():\n \"\"\"Iphigen processes for nifti images.\"\"\"\n user_interface()\n display_welcome_message()\n\n # Load data\n data, affine, dirname, basename, ext = [], [], [], [], []\n nr_fileinputs = len(cfg.filename)\n print('Selected file(s):')\n for i in range(nr_fileinputs):\n nii = nb.load(cfg.filename[i])\n affine.append(nii.affine)\n parses = utils.parse_filepath(cfg.filename[i])\n data.append(np.squeeze(nii.get_data()))\n print(' Name: {}'.format(cfg.filename[i]))\n print(' Dimensions: {}'.format(data[i].shape))\n if cfg.out_dir:\n dirname.append(cfg.out_dir)\n else:\n dirname.append(parses[0])\n basename.append(parses[1])\n ext.append(parses[2])\n\n # Reorganize data\n data = np.asarray(data)\n data = data.transpose([1, 2, 3, 0])\n # Compute intensity\n inten = np.sum(data, axis=-1)\n # Compute barycentic coordinates (equivalent to intensity for 0-simplex)\n bary = data / inten[..., None]\n\n suf = '' # suffix\n # TODO: consider zero_to option for MRI data\n if cfg.intensity_balance:\n raise ValueError('Intensity balance not implemented.')\n # print('Applying intensity balance...')\n # print(' Percentiles: {}'.format(cfg.int_bal_perc))\n # suf = suf + '_IB'\n # inten = utils.truncate_and_scale(\n # inten, pmin=cfg.int_bal_perc[0], pmax=cfg.int_bal_perc[1],\n # zero_to=255*data.shape[-1])\n # data = bary * inten[..., None]\n # # Update barycentic coordinates\n # bary = data / inten[..., None]\n\n if cfg.retinex:\n print('Applying multi-scale retinex with barycenter preservation (MSRBP)...')\n print(' Selected scales: {}'.format(cfg.scales_nifti))\n suf = suf + '_MSRBP' + utils.prepare_scale_suffix(cfg.scales_nifti)\n new_inten = core.multi_scale_retinex(inten, scales=cfg.scales_nifti)\n # Scale back to the approximage original intensity range\n inten = core.scale_approx(new_inten, inten)\n\n if cfg.simplex_color_balance:\n print('Applying simplex color balance...')\n print(' Centering: {}'.format(cfg.simplex_center))\n print(' Standardize: {}'.format(cfg.simplex_standardize))\n suf = suf + '_SimplexCB'\n bary = core.simplex_color_balance(bary)\n\n # Insert back the processed intensity image\n data = bary * inten[..., None]\n\n if cfg.simplest_color_balance:\n print('Applying simplest color balance...')\n print(' Percentiles: {}'.format(cfg.int_bal_perc))\n suf = suf + '_SimplestCB'\n data = core.simplest_color_balance(\n data, pmin=cfg.simplest_perc[0], pmax=cfg.simplest_perc[1])\n\n # Check at least one operation is selected before saving anything\n if sum([cfg.retinex, cfg.intensity_balance, cfg.simplex_color_balance,\n cfg.simplest_color_balance]) > 0:\n print('Saving output(s)...')\n for i in range(nr_fileinputs):\n # Generate output path\n out_basepath = os.path.join(dirname[i],\n '{}{}'.format(basename[i], suf))\n out_path = out_basepath + os.extsep + ext[i]\n # Create nifti image and save\n img = nb.Nifti1Image(data[..., i], affine=affine[i])\n nb.save(img, out_path)\n print(' {} is saved.\\n'.format(out_path))\n else:\n print('No operation selected, not saving anything.')\n print('Finished.')\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"numpy.sum",
"numpy.asarray"
]
] |
johnruth96/privacy-justifiable-fairness | [
"3f5ae92d791df1827cbc8720cf5e7aa33ceed7aa"
] | [
"experiments/evaluate.py"
] | [
"import os\n\nimport pandas as pd\n\nfrom experiments.conf import Config\nfrom fairness import measure_fairness\nfrom privacy.models import get_l_distinct, get_k\n\n\ndef evaluate_experiment(conf: Config):\n # Load setup\n setup = conf.get_setup()\n A = setup[\"A\"]\n I = setup[\"I\"]\n O = setup[\"O\"]\n S = setup[\"S\"]\n QI = setup[\"QI\"]\n\n # Evaluation\n for table_dir, result_file in zip(conf.table_dirs_resampling, conf.result_files_resampling):\n if not os.path.exists(table_dir):\n continue\n\n print(f\"INFO: Evaluating {table_dir}\")\n\n df_exp = pd.read_csv(conf.exp_file, header=0, index_col=[0, 1])\n indexes = []\n col_names = []\n rows = []\n\n # Read tables\n for k, l in df_exp.index:\n print(\"Evaluating ({}, {}) ...\".format(k, l))\n table_file = os.path.join(table_dir, \"K{}L{}.csv\".format(k, l))\n df = pd.read_csv(table_file, header=0, index_col=0)\n\n k_df, n_df = get_k(df, QI)\n l_df = get_l_distinct(df, S, QI)\n idx = (k_df, l_df)\n\n if idx in indexes:\n print(f\"WARNING: index ({k_df}, {l_df}) already in {table_file}\")\n\n measurements = measure_fairness(df, A, I, O, S)\n measurements.update(\n n_groups=n_df,\n idx_original=(k, l),\n )\n\n if not col_names:\n col_names = sorted(measurements.keys())\n\n indexes.append(idx)\n rows.append([measurements[measure] for measure in col_names])\n\n results = pd.DataFrame(rows, columns=col_names,\n index=pd.MultiIndex.from_tuples(indexes, names=[\"k\", \"l\"]))\n print(f\"Writing results to {result_file} ...\", flush=True, end=\"\")\n results.to_csv(result_file, index_label=[\"k\", \"l\"], index=True)\n print(\" done\")\n"
] | [
[
"pandas.read_csv",
"pandas.MultiIndex.from_tuples"
]
] |
Yidansong/SchNet | [
"49a1e6031f50d79a83ea21148b8e8cbcabdaabb7"
] | [
"src/schnet/nn/utils.py"
] | [
"import numpy as np\nimport tensorflow as tf\nfrom tensorflow.python.ops.array_grad import _TileGrad\nfrom tensorflow.python.framework import ops\n\n\ndef shape(x):\n if isinstance(x, tf.Tensor):\n return x.get_shape().as_list()\n return np.shape(x)\n\n\[email protected](\"TileDense\")\ndef tile_grad_dense(op, grad):\n grad = tf.convert_to_tensor(grad)\n return _TileGrad(op, grad)\n"
] | [
[
"tensorflow.python.ops.array_grad._TileGrad",
"numpy.shape",
"tensorflow.python.framework.ops.RegisterGradient",
"tensorflow.convert_to_tensor"
]
] |
Banconxuan/RTS3D | [
"6d2738501eaf90f019eeaa22254cd9756f8d3364"
] | [
"src/lib/models/embedding_space_generater.py"
] | [
"# ------------------------------------------------------------------------------\n# Copyright (c) Microsoft\n# Licensed under the MIT License.\n# Written by Bin Xiao ([email protected])\n# Modified by Xingyi Zhou\n# ------------------------------------------------------------------------------\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nimport torch\ndef generate_gridpoint(dim, pos, ori, calib_l, calib_r, trans_output_l, trans_output_r, opt=None): # dim B,K,3\n '''\n generate grid point coordinates, the image featuremap coordinates corresponding the grid point.\n return:\n image_xy_l: left image featuremap coordinates corresponding the grid point.\n image_xy_r: right image featuremap coordinates corresponding the grid point.\n xyz_norm: the grid point coordinates in the object coordinate system\n xyz: the grid point coordinates in the camera coordinate system\n '''\n\n h = dim[0]\n w = dim[1]\n l = dim[2]\n x_axi = -torch.linspace(-l / 2., l / 2., opt.R_l).cuda()\n y_axi = torch.linspace(0, -h, opt.R_h).cuda()\n z_axi = -torch.linspace(-w / 2., w / 2., opt.R_w).cuda()\n xx, yy, zz = torch.meshgrid(x_axi, y_axi, z_axi)\n xyz = torch.stack([xx, yy, zz], 0).view((3, -1)) # 3,resl***2\n R = ori\n xyz = R.mm(xyz)\n xyz_norm = xyz.clone()\n xyz[0, :] += pos[0]\n xyz[1, :] += pos[1]\n xyz[2, :] += pos[2]\n ones = torch.ones((1, xyz.size(1))).cuda()\n xyz_hom = torch.cat((xyz, ones), dim=0)\n image_xy_hom_l = calib_l.mm(xyz_hom)\n image_xy_hom_l = image_xy_hom_l / image_xy_hom_l[2, :]\n\n image_xy_hom_r = calib_r.mm(xyz_hom)\n image_xy_hom_r = image_xy_hom_r / image_xy_hom_r[2, :]\n image_xy_l = []\n image_xy_r = []\n for py in range(opt.pynum):\n image_xy_l.append(trans_output_l[py].mm(image_xy_hom_l))\n image_xy_r.append(trans_output_r[py].mm(image_xy_hom_r))\n\n image_xy_l = torch.stack(image_xy_l,dim=0)\n image_xy_r = torch.stack(image_xy_r, dim=0)\n return image_xy_l, image_xy_r, xyz_norm, xyz\n\ndef featuremap2gridpoint(batch, phase='train', opt = None):\n '''\n image featuremap to gridpoint\n '''\n outputs_l, outputs_r = batch['left_image_feature'], batch['right_image_feature']\n batch_for_point = {}\n batch_for_point['dim'] = []\n batch_for_point['pos'] = []\n batch_for_point['ori'] = []\n batch_for_point['dim_real'] = []\n batch_for_point['pos_real'] = []\n batch_for_point['ori_real'] = []\n batch_for_point['dim_est'] = []\n batch_for_point['pos_est'] = []\n batch_for_point['ori_est_scalar'] = []\n batch_for_point['reg_mask'] = []\n\n\n B = outputs_l[0].size(0)\n ## *_est represent monocular 3D detector results.\n dim = batch['dim_est']\n pos = batch['pos_est']\n ori = batch['ori_est']\n calib_l = batch['calib_l']\n calib_r = batch['calib_r']\n ## trans_output_* represent the transformation from 3D grid point to image featuremap.\n trans_output_l = batch['trans_output_l']\n trans_output_r = batch['trans_output_r']\n\n pointNet_input_list_r = []\n pointNet_input_list_l = []\n pointNet_input_list_xyz_abs = []\n pointNet_input_consis = []\n reg_mask = batch['reg_mask']\n obj_num=[]\n for b in range(B):\n index_box_l = []\n index_box_r = []\n volume_xyz_list = []\n volume_xyz_abs_list = []\n mask = torch.nonzero(reg_mask[b])\n K = mask.size(0)\n obj_num.append(K)\n for k in range(K):#range(self.opt.max_objs):\n #k_index = mask[k, 0]\n index_l, index_r, xyz, xyz_abs = generate_gridpoint(dim[b, k], pos[b, k],\n ori[b, k], calib_l[b],\n calib_r[b], trans_output_l[b],\n trans_output_r[b], opt)\n index_box_l.append(index_l)\n index_box_r.append(index_r)\n volume_xyz_list.append(xyz)\n volume_xyz_abs_list.append(xyz_abs)\n index_box_l = torch.stack(index_box_l, 0).transpose(3,2).unsqueeze(0) # 1,K,3,2,resl***2\n index_box_r = torch.stack(index_box_r, 0).transpose(3,2).unsqueeze(0)\n\n volume_xyz_list = torch.stack(volume_xyz_list, 0) # m(<=K),3,resl***2\n volume_xyz_abs_list = torch.stack(volume_xyz_abs_list, 0)\n volume_from_heatmap_l = []\n volume_from_heatmap_r = []\n for py in range(opt.pynum):\n grid_l = index_box_l[:,:,py,:,:] #1, K,resl***2,2\n grid_r = index_box_r[:,:,py,:,:] #1, K,resl***2,2\n featuremap_l = outputs_l[py]\n featuremap_r = outputs_r[py]\n lx = 2 * (grid_l[:, :, :, 0] / featuremap_l.size(3) - 0.5)\n ly = 2 * (grid_l[:, :, :, 1] / featuremap_l.size(2) - 0.5)\n rx = 2 * (grid_r[:, :, :, 0] / featuremap_r.size(3) - 0.5)\n ry = 2 * (grid_r[:, :, :, 1] / featuremap_r.size(2) - 0.5)\n\n grid_l = torch.stack((lx, ly),dim=3)\n grid_r = torch.stack((rx, ry), dim=3)\n\n volume_from_heatmap_l.append(torch.nn.functional.grid_sample(featuremap_l[b:b + 1], grid_l)) # 1,64,16K,resl***2\n volume_from_heatmap_r.append(torch.nn.functional.grid_sample(featuremap_r[b:b + 1], grid_r)) # 1,64,16K,resl***2\n\n volume_from_heatmap_l = torch.cat(volume_from_heatmap_l,dim=1) # 1,mm,K,resl***2\n volume_from_heatmap_r = torch.cat(volume_from_heatmap_r, dim=1) # 1,mm,K,resl***2\n\n volume_from_heatmap_l = volume_from_heatmap_l[0].transpose(1, 0)\n volume_from_heatmap_r = volume_from_heatmap_r[0].transpose(1, 0)\n\n\n volume_from_heatmap = volume_from_heatmap_l[:,:128,:] - volume_from_heatmap_r[:,:128,:]\n\n BRF=(volume_from_heatmap_l[:,128:256,:] +volume_from_heatmap_r[:,128:256,:])/2\n semantic = (volume_from_heatmap_l[:, 256:, :] + volume_from_heatmap_r[:, 256:, :]) / 2\n volume_from_heatmap=torch.exp(-(volume_from_heatmap**2)*(BRF**2))\n\n volume_depth=torch.norm(volume_xyz_abs_list,p=2,dim=1,keepdim=True)\n volume_from_heatmap = torch.cat([volume_from_heatmap,volume_xyz_list,volume_depth,semantic], dim=1)\n\n if phase == 'train' or phase == 'val':\n batch_for_point['dim'].append(batch['dim'][b])\n batch_for_point['pos'].append(batch['pos'][b])\n batch_for_point['ori'].append(batch['ori'][b])\n batch_for_point['dim_real'].append(batch['dim_real'][b])\n batch_for_point['pos_real'].append(batch['pos_real'][b])\n batch_for_point['ori_real'].append(batch['ori_real'][b])\n batch_for_point['reg_mask'].append(batch['reg_mask'][b])\n batch_for_point['dim_est'].append(batch['dim_est'][b])\n batch_for_point['pos_est'].append(batch['pos_est'][b])\n batch_for_point['ori_est_scalar'].append(batch['ori_est_scalar'][b])\n pointNet_input_list_l.append(volume_from_heatmap_l)\n pointNet_input_list_r.append(volume_from_heatmap_r)\n pointNet_input_list_xyz_abs.append(volume_xyz_abs_list)\n pointNet_input_consis.append(volume_from_heatmap)\n\n pointNet_input_tensor_l = torch.cat(pointNet_input_list_l, dim=0)\n pointNet_input_tensor_r = torch.cat(pointNet_input_list_r, dim=0)\n pointNet_input_tensor_consis = torch.cat(pointNet_input_consis, dim=0)\n pointNet_input_tensor_xyz_abs = torch.cat(pointNet_input_list_xyz_abs, dim=0)\n\n input_model = {}\n input_model['input_feat_l'] = pointNet_input_tensor_l\n input_model['input_feat_r'] = pointNet_input_tensor_r\n input_model['input_feat_xyz_abs'] = pointNet_input_tensor_xyz_abs\n input_model['input_feat_consis'] = pointNet_input_tensor_consis\n if phase == 'train' or phase =='val':\n batch_for_point['dim'] = torch.cat(batch_for_point['dim'], dim=0)\n batch_for_point['pos'] = torch.cat(batch_for_point['pos'], dim=0)\n batch_for_point['ori'] = torch.cat(batch_for_point['ori'], dim=0)\n batch_for_point['dim_real'] = torch.cat(batch_for_point['dim_real'], dim=0)\n batch_for_point['pos_real'] = torch.cat(batch_for_point['pos_real'], dim=0)\n batch_for_point['ori_real'] = torch.cat(batch_for_point['ori_real'], dim=0)\n\n batch_for_point['dim_est'] = torch.cat(batch_for_point['dim_est'], dim=0)\n batch_for_point['pos_est'] = torch.cat(batch_for_point['pos_est'], dim=0)\n batch_for_point['ori_est_scalar'] = torch.cat(batch_for_point['ori_est_scalar'], dim=0)\n batch_for_point['reg_mask'] = torch.cat(batch_for_point['reg_mask'], dim=0)\n input_model['input_batch'] = batch_for_point\n #input_model['obj_num']=obj_num\n return input_model\n\n\n"
] | [
[
"torch.stack",
"torch.nonzero",
"torch.linspace",
"torch.exp",
"torch.norm",
"torch.nn.functional.grid_sample",
"torch.meshgrid",
"torch.cat"
]
] |
FelixNeutatz/auto-sklearn | [
"b5d141603332041475ed746aa1640334f5561aea"
] | [
"autosklearn/pipeline/components/data_preprocessing/imputation/categorical_imputation.py"
] | [
"from ConfigSpace.configuration_space import ConfigurationSpace\nfrom autosklearn.pipeline.components.base import AutoSklearnPreprocessingAlgorithm\nfrom autosklearn.pipeline.constants import DENSE, SPARSE, UNSIGNED_DATA, INPUT\nfrom ConfigSpace.hyperparameters import CategoricalHyperparameter, UniformIntegerHyperparameter, UniformFloatHyperparameter\nfrom ConfigSpace.conditions import EqualsCondition\nfrom sklearn.model_selection import train_test_split\nimport numpy as np\n\n\nclass CategoricalImputation(AutoSklearnPreprocessingAlgorithm):\n \"\"\"\n Substitute missing values by 2\n \"\"\"\n\n def __init__(self, strategy=\"constant\", n_neighbors=5,\n weights='uniform', training_fraction=0.5, random_state=None):\n self.strategy = strategy\n self.n_neighbors = n_neighbors\n self.weights = weights\n self.random_stated = random_state\n self.training_fraction = training_fraction\n\n def fit(self, X, y=None):\n import sklearn.impute\n\n if self.strategy == 'constant':\n self.preprocessor = sklearn.impute.SimpleImputer(strategy='constant', fill_value=2, copy=False)\n elif self.strategy == 'most-frequent':\n self.preprocessor = sklearn.impute.SimpleImputer(strategy='most_frequent', copy=False)\n elif self.strategy == 'knn':\n self.preprocessor = sklearn.impute.KNNImputer(n_neighbors=self.n_neighbors, weights=self.weights, copy=False)\n\n X_new = None\n try:\n min_training_instances = max(\n [self.training_fraction * len(X), 10 * len(np.unique(y)), self.n_neighbors + 1])\n X_new, _, _, _ = train_test_split(X, y, train_size=min_training_instances, random_state=42)\n except:\n X_new = X\n\n self.preprocessor.fit(X_new)\n return self\n\n def transform(self, X):\n if self.preprocessor is None:\n raise NotImplementedError()\n X = self.preprocessor.transform(X).astype(int)\n return X\n\n def fit_transform(self, X, y=None):\n return self.fit(X, y).transform(X)\n\n @staticmethod\n def get_properties(dataset_properties=None):\n return {'shortname': 'CategoricalImputation',\n 'name': 'Categorical Imputation',\n 'handles_missing_values': True,\n 'handles_nominal_values': True,\n 'handles_numerical_features': True,\n 'prefers_data_scaled': False,\n 'prefers_data_normalized': False,\n 'handles_regression': True,\n 'handles_classification': True,\n 'handles_multiclass': True,\n 'handles_multilabel': True,\n 'is_deterministic': True,\n # TODO find out of this is right!\n 'handles_sparse': True,\n 'handles_dense': True,\n 'input': (DENSE, SPARSE, UNSIGNED_DATA),\n 'output': (INPUT,),\n 'preferred_dtype': None}\n\n @staticmethod\n def get_hyperparameter_search_space(dataset_properties=None):\n cs = ConfigurationSpace()\n strategy = CategoricalHyperparameter(\"strategy\", [\"constant\", \"most-frequent\", \"knn\"],\n default_value=\"constant\")\n cs.add_hyperparameter(strategy)\n\n # knn hyperparameters\n n_neighbors = UniformIntegerHyperparameter(name=\"n_neighbors\", lower=2, upper=100, log=True, default_value=5)\n weights = CategoricalHyperparameter(name=\"weights\", choices=[\"uniform\", \"distance\"], default_value=\"uniform\")\n cs.add_hyperparameters([n_neighbors, weights])\n\n n_neighbors_depends_on_knn = EqualsCondition(n_neighbors, strategy, \"knn\")\n weights_depends_on_knn = EqualsCondition(weights, strategy, \"knn\")\n cs.add_conditions([n_neighbors_depends_on_knn, weights_depends_on_knn])\n\n training_fraction = UniformFloatHyperparameter(\"training_fraction\", 0.0001, 1.0, log=True, default_value=0.5)\n cs.add_hyperparameter(training_fraction)\n\n return cs\n"
] | [
[
"numpy.unique",
"sklearn.model_selection.train_test_split"
]
] |
helloworldpark/PyEMD | [
"d28481b3244f317c196dbfe92af7e2d776b64382"
] | [
"PyEMD/EMD_matlab.py"
] | [
"#!/usr/bin/python\r\n# coding: UTF-8\r\n#\r\n# Author: Dawid Laszuk\r\n# Contact: [email protected]\r\n#\r\n# Edited: 11/05/2017\r\n#\r\n# Feel free to contact for any information.\r\n\r\nfrom __future__ import division, print_function\r\n\r\nimport logging\r\nimport numpy as np\r\nimport time\r\n\r\nfrom scipy.interpolate import interp1d\r\nfrom PyEMD.PyEMD.splines import *\r\n\r\nclass EMD:\r\n \"\"\"\r\n Empirical Mode Decomposition\r\n\r\n *Note:*\r\n Default and recommended package for EMD is EMD.py.\r\n This is meant to provide with the same results as MATLAB version of EMD,\r\n which is not necessarily the most efficient or numerically accurate.\r\n\r\n Method of decomposing signal into Intrinsic Mode Functions (IMFs)\r\n based on algorithm presented in Huang et al. [1].\r\n\r\n Algorithm was validated with Rilling et al. [2] Matlab's version from 3.2007.\r\n\r\n [1] N. E. Huang et al., \"The empirical mode decomposition and the\r\n Hilbert spectrum for non-linear and non stationary time series\r\n analysis\", Proc. Royal Soc. London A, Vol. 454, pp. 903-995, 1998\r\n [2] G. Rilling, P. Flandrin and P. Goncalves, \"On Empirical Mode\r\n Decomposition and its algorithms\", IEEE-EURASIP Workshop on\r\n Nonlinear Signal and Image Processing NSIP-03, Grado (I), June 2003\r\n \"\"\"\r\n\r\n logger = logging.getLogger(__name__)\r\n\r\n def __init__(self):\r\n\r\n self.splineKind = 'cubic'\r\n\r\n self.nbsym = 2\r\n self.reduceScale = 1.\r\n self.maxIteration = 500\r\n self.scaleFactor = 100\r\n\r\n self.FIXE = 0\r\n self.FIXE_H = 0\r\n\r\n self.stop1 = 0.05\r\n self.stop2 = 0.5\r\n self.stop3 = 0.05\r\n\r\n self.DTYPE = np.float64\r\n self.MAX_ITERATION = 1000\r\n\r\n self.TIME = False\r\n\r\n def extractMaxMinSpline(self, T, S):\r\n \"\"\"\r\n Input:\r\n -----------------\r\n S - Input signal array. Should be 1D.\r\n T - Time array. If none passed numpy arange is created.\r\n\r\n Output:\r\n -----------------\r\n maxSpline - Upper envelope of signal S.\r\n minSpline - Bottom envelope of signal S.\r\n maxExtrema - Position (1st row) and values (2nd row) of maxima.\r\n minExtrema - Position (1st row) and values (2nd row) of minma.\r\n \"\"\"\r\n\r\n # Get indexes of extrema\r\n maxPos, maxVal, minPos, minVal, _ = self.findExtrema(T, S)\r\n\r\n if len(maxPos) + len(minPos) < 3: return [-1]*4\r\n\r\n # Extrapolation of signal (ober boundaries)\r\n maxExtrema, minExtrema = self.preparePoints(S, T, maxPos, maxVal, minPos, minVal)\r\n\r\n _, maxSpline = self.splinePoints(T, maxExtrema, self.splineKind)\r\n _, minSpline = self.splinePoints(T, minExtrema, self.splineKind)\r\n\r\n return maxSpline, minSpline, maxExtrema, minExtrema\r\n\r\n def preparePoints(self, S, T, maxPos, maxVal, minPos, minVal):\r\n \"\"\"\r\n Adds to signal extrema according to mirror technique.\r\n Number of added points depends on nbsym variable.\r\n\r\n Input:\r\n ---------\r\n S: Signal (1D numpy array).\r\n T: Timeline (1D numpy array).\r\n maxPos: sorted time positions of maxima.\r\n maxVal: signal values at maxPos positions.\r\n minPos: sorted time positions of minima.\r\n minVal: signal values at minPos positions.\r\n\r\n Output:\r\n ---------\r\n minExtrema: Position (1st row) and values (2nd row) of minima.\r\n minExtrema: Position (1st row) and values (2nd row) of maxima.\r\n \"\"\"\r\n\r\n # Find indices for time array of extrema\r\n indmin = np.array([np.nonzero(T==t)[0] for t in minPos]).flatten()\r\n indmax = np.array([np.nonzero(T==t)[0] for t in maxPos]).flatten()\r\n\r\n # Local variables\r\n nbsym = self.nbsym\r\n endMin, endMax = len(minPos), len(maxPos)\r\n\r\n ####################################\r\n # Left bound - mirror nbsym points to the left\r\n if indmax[0] < indmin[0]:\r\n if S[0] > S[indmin[0]]:\r\n lmax = indmax[1:min(endMax,nbsym+1)][::-1]\r\n lmin = indmin[0:min(endMin,nbsym+0)][::-1]\r\n lsym = indmax[0]\r\n else:\r\n lmax = indmax[0:min(endMax,nbsym)][::-1]\r\n lmin = np.append(indmin[0:min(endMin,nbsym-1)][::-1],0)\r\n lsym = 0\r\n else:\r\n if S[0] < S[indmax[0]]:\r\n lmax = indmax[0:min(endMax,nbsym+0)][::-1]\r\n lmin = indmin[1:min(endMin,nbsym+1)][::-1]\r\n lsym = indmin[0]\r\n else:\r\n lmax = np.append(indmax[0:min(endMax,nbsym-1)][::-1],0)\r\n lmin = indmin[0:min(endMin,nbsym)][::-1]\r\n lsym = 0\r\n\r\n ####################################\r\n # Right bound - mirror nbsym points to the right\r\n if indmax[-1] < indmin[-1]:\r\n if S[-1] < S[indmax[-1]]:\r\n rmax = indmax[max(endMax-nbsym,0):][::-1]\r\n rmin = indmin[max(endMin-nbsym-1,0):-1][::-1]\r\n rsym = indmin[-1]\r\n else:\r\n rmax = np.append(indmax[max(endMax-nbsym+1,0):], len(S)-1)[::-1]\r\n rmin = indmin[max(endMin-nbsym,0):][::-1]\r\n rsym = len(S)-1\r\n else:\r\n if S[-1] > S[indmin[-1]]:\r\n rmax = indmax[max(endMax-nbsym-1,0):-1][::-1]\r\n rmin = indmin[max(endMin-nbsym,0):][::-1]\r\n rsym = indmax[-1]\r\n else:\r\n rmax = indmax[max(endMax-nbsym,0):][::-1]\r\n rmin = np.append(indmin[max(endMin-nbsym+1,0):], len(S)-1)[::-1]\r\n rsym = len(S)-1\r\n\r\n # In case any array missing\r\n if not lmin.size: lmin = indmin\r\n if not rmin.size: rmin = indmin\r\n if not lmax.size: lmax = indmax\r\n if not rmax.size: rmax = indmax\r\n\r\n # Mirror points\r\n tlmin = 2*T[lsym]-T[lmin]\r\n tlmax = 2*T[lsym]-T[lmax]\r\n trmin = 2*T[rsym]-T[rmin]\r\n trmax = 2*T[rsym]-T[rmax]\r\n\r\n # If mirrored points are not outside passed time range.\r\n if tlmin[0] > T[0] or tlmax[0] > T[0]:\r\n if lsym == indmax[0]:\r\n lmax = indmax[0:min(endMax,nbsym)][::-1]\r\n else:\r\n lmin = indmin[0:min(endMin,nbsym)][::-1]\r\n\r\n if lsym == 0:\r\n raise Exception('bug')\r\n\r\n lsym = 0\r\n tlmin = 2*T[lsym]-T[lmin]\r\n tlmax = 2*T[lsym]-T[lmax]\r\n\r\n if trmin[-1] < T[-1] or trmax[-1] < T[-1]:\r\n if rsym == indmax[-1]:\r\n rmax = indmax[max(endMax-nbsym,0):][::-1]\r\n else:\r\n rmin = indmin[max(endMin-nbsym,0):][::-1]\r\n\r\n if rsym == len(S)-1:\r\n raise Exception('bug')\r\n\r\n rsym = len(S)-1\r\n trmin = 2*T[rsym]-T[rmin]\r\n trmax = 2*T[rsym]-T[rmax]\r\n\r\n zlmax = S[lmax]\r\n zlmin = S[lmin]\r\n zrmax = S[rmax]\r\n zrmin = S[rmin]\r\n\r\n tmin = np.append(tlmin, np.append(T[indmin], trmin))\r\n tmax = np.append(tlmax, np.append(T[indmax], trmax))\r\n zmin = np.append(zlmin, np.append(S[indmin], zrmin))\r\n zmax = np.append(zlmax, np.append(S[indmax], zrmax))\r\n\r\n maxExtrema = np.array([tmax, zmax], dtype=self.DTYPE)\r\n minExtrema = np.array([tmin, zmin], dtype=self.DTYPE)\r\n\r\n # Make double sure, that each extremum is significant\r\n maxExtrema = np.delete(maxExtrema, np.where(maxExtrema[0,1:]==maxExtrema[0,:-1]),axis=1)\r\n minExtrema = np.delete(minExtrema, np.where(minExtrema[0,1:]==minExtrema[0,:-1]),axis=1)\r\n\r\n return maxExtrema, minExtrema\r\n\r\n def splinePoints(self, T, extrema, splineKind):\r\n \"\"\"\r\n Constructs spline over given points.\r\n\r\n Input:\r\n ---------\r\n T: Time array.\r\n extrema: Poistion (1st row) and values (2nd row) of points.\r\n splineKind: Type of spline.\r\n\r\n Output:\r\n ---------\r\n T: Poistion array.\r\n spline: Spline over the given points.\r\n \"\"\"\r\n\r\n kind = splineKind.lower()\r\n t = T[np.r_[T>=extrema[0,0]] & np.r_[T<=extrema[0,-1]]]\r\n if t.dtype != self.DTYPE: self.logger.error('t.dtype: '+str(t.dtype))\r\n if extrema.dtype != self.DTYPE: self.logger.error('extrema.dtype: '+str(xtrema.dtype))\r\n\r\n if kind == \"akima\":\r\n return t, akima(extrema[0], extrema[1], t)\r\n\r\n elif kind == 'cubic':\r\n if extrema.shape[1]>3:\r\n return t, interp1d(extrema[0], extrema[1], kind=kind)(t).astype(self.DTYPE)\r\n else:\r\n return self.cubicSpline_3points(T, extrema)\r\n\r\n elif kind in ['slinear', 'quadratic', 'linear']:\r\n return T, interp1d(extrema[0], extrema[1], kind=kind)(t).astype(self.DTYPE)\r\n\r\n else:\r\n raise ValueError(\"No such interpolation method!\")\r\n\r\n def cubicSpline_3points(self, T, extrema):\r\n \"\"\"\r\n Apperently scipy.interpolate.interp1d does not support\r\n cubic spline for less than 4 points.\r\n \"\"\"\r\n\r\n x0, x1, x2 = extrema[0]\r\n y0, y1, y2 = extrema[1]\r\n\r\n x1x0, x2x1 = x1-x0, x2-x1\r\n y1y0, y2y1 = y1-y0, y2-y1\r\n _x1x0, _x2x1 = 1./x1x0, 1./x2x1\r\n\r\n m11, m12, m13= 2*_x1x0, _x1x0, 0\r\n m21, m22, m23 = _x1x0, 2.*(_x1x0+_x2x1), _x2x1\r\n m31, m32, m33 = 0, _x2x1, 2.*_x2x1\r\n\r\n v1 = 3*y1y0*_x1x0*_x1x0\r\n v3 = 3*y2y1*_x2x1*_x2x1\r\n v2 = v1+v3\r\n\r\n M = np.matrix([[m11,m12,m13],[m21,m22,m23],[m31,m32,m33]])\r\n v = np.matrix([v1,v2,v3]).T\r\n k = np.array(np.linalg.inv(M)*v)\r\n\r\n a1 = k[0]*x1x0 - y1y0\r\n b1 =-k[1]*x1x0 + y1y0\r\n a2 = k[1]*x2x1 - y2y1\r\n b2 =-k[2]*x2x1 + y2y1\r\n\r\n t = T[np.r_[T>=x0] & np.r_[T<=x2]]\r\n t1 = (T[np.r_[T>=x0]&np.r_[T< x1]] - x0)/x1x0\r\n t2 = (T[np.r_[T>=x1]&np.r_[T<=x2]] - x1)/x2x1\r\n t11, t22 = 1.-t1, 1.-t2\r\n\r\n q1 = t11*y0 + t1*y1 + t1*t11*(a1*t11 + b1*t1)\r\n q2 = t22*y1 + t2*y2 + t2*t22*(a2*t22 + b2*t2)\r\n q = np.append(q1,q2)\r\n\r\n return t, q.astype(self.DTYPE)\r\n\r\n @classmethod\r\n def findExtrema(cls, t, s):\r\n \"\"\"\r\n Finds extrema and zero-crossings.\r\n\r\n Input:\r\n ---------\r\n S: Signal.\r\n T: Time array.\r\n\r\n Output:\r\n ---------\r\n localMaxPos: Time positions of maxima.\r\n localMaxVal: Values of signal at localMaxPos positions.\r\n localMinPos: Time positions of minima.\r\n localMinVal: Values of signal at localMinPos positions.\r\n indzer: Indexes of zero crossings.\r\n \"\"\"\r\n\r\n # Finds indexes of zero-crossings\r\n s1, s2 = s[:-1], s[1:]\r\n indzer = np.nonzero(s1*s2<0)[0]\r\n if np.any(s==0):\r\n iz = np.nonzero(s==0)[0]\r\n indz = []\r\n if np.any(np.diff(iz)==1):\r\n zer = (s==0)\r\n dz = np.diff(np.append(np.append(0, zer), 0))\r\n debz = np.nonzero(dz==1)[0]\r\n finz = np.nonzero(dz==-1)[0]-1\r\n indz = np.round((debz+finz)/2)\r\n else:\r\n indz = iz\r\n\r\n indzer = np.sort(np.append(indzer, indz))\r\n\r\n\r\n # Finds local extrema\r\n d = np.diff(s)\r\n d1, d2 = d[:-1], d[1:]\r\n indmin = np.nonzero(np.r_[d1*d2<0] & np.r_[d1<0])[0]+1\r\n indmax = np.nonzero(np.r_[d1*d2<0] & np.r_[d1>0])[0]+1\r\n\r\n # When two or more points have the same value\r\n if np.any(d==0):\r\n\r\n imax, imin = [], []\r\n\r\n bad = (d==0)\r\n dd = np.diff(np.append(np.append(0, bad), 0))\r\n debs = np.nonzero(dd==1)[0]\r\n fins = np.nonzero(dd==-1)[0]\r\n if debs[0]==1:\r\n if len(debs) > 1:\r\n debs, fins = debs[1:], fins[1:]\r\n else:\r\n debs, fins = [], []\r\n\r\n if len(debs) > 0:\r\n if fins[-1] == len(s)-1:\r\n if len(debs) > 1:\r\n debs, fins = debs[:-1], fins[:-1]\r\n else:\r\n debs, fins = [], []\r\n\r\n lc = len(debs)\r\n if lc > 0:\r\n for k in range(lc):\r\n if d[debs[k]-1] > 0:\r\n if d[fins[k]] < 0:\r\n imax.append(round((fins[k]+debs[k])/2.))\r\n else:\r\n if d[fins[k]] > 0:\r\n imin.append(round((fins[k]+debs[k])/2.))\r\n\r\n if len(imax) > 0:\r\n indmax = indmax.tolist()\r\n for x in imax: indmax.append(int(x))\r\n indmax.sort()\r\n\r\n if len(imin) > 0:\r\n indmin = indmin.tolist()\r\n for x in imin: indmin.append(int(x))\r\n indmin.sort()\r\n\r\n localMaxPos = t[indmax]\r\n localMaxVal = s[indmax]\r\n localMinPos = t[indmin]\r\n localMinVal = s[indmin]\r\n\r\n return localMaxPos, localMaxVal, localMinPos, localMinVal, indzer\r\n\r\n def stop_sifting(self, imf, envMax, envMin, mean, extNo):\r\n \"\"\"\r\n Criterium for stopping sifting process.\r\n Based on conditions presented in [1].\r\n\r\n [1] G. Rilling, P. Flandrin and P. Goncalves\r\n \"On Empirical Mode Decomposition and its\r\n algorithms\", 2003\r\n\r\n Input:\r\n ---------\r\n imf: Current imf.\r\n envMax: Upper envelope of imf.\r\n envMin: Bottom envelope of imf.\r\n mean: Mean of envelopes.\r\n extNo: Number of extrema.\r\n\r\n Output:\r\n ---------\r\n boolean: True if stopping criteria are meet.\r\n \"\"\"\r\n\r\n amp = np.abs(envMax - envMin)/2.\r\n sx = np.abs(mean)/amp\r\n\r\n f1 = np.mean(sx > self.stop1) > self.stop3\r\n f2 = np.any(sx > self.stop2)\r\n f3 = extNo > 2\r\n\r\n if ( not (f1 or f2) ) and f3:\r\n return True\r\n else:\r\n return False\r\n\r\n @staticmethod\r\n def _common_dtype(x, y):\r\n\r\n dtype = np.find_common_type([x.dtype, y.dtype], [])\r\n if x.dtype != dtype: x = x.astype(dtype)\r\n if y.dtype != dtype: y = y.astype(dtype)\r\n\r\n return x, y\r\n\r\n def emd(self, S, T=None, maxImf=None):\r\n \"\"\"\r\n Performs Emerical Mode Decomposition on signal S.\r\n The decomposition is limited to maxImf imf. No limitation as default.\r\n Returns IMF functions in dic format. IMF = {0:imf0, 1:imf1...}.\r\n\r\n Input:\r\n ---------\r\n S: Signal.\r\n T: Positions of signal. If none passed numpy arange is created.\r\n maxImf: IMF number to which decomposition should be performed.\r\n As a default, all IMFs are returned.\r\n\r\n Output:\r\n ---------\r\n return IMF, EXT, TIME, ITER, imfNo\r\n IMF: Signal IMFs in dictionary type. IMF = {0:imf0, 1:imf1...}\r\n EXT: Number of extrema for each IMF. IMF = {0:ext0, 1:ext1...}\r\n ITER: Number of iteration for each IMF.\r\n imfNo: Number of IMFs.\r\n \"\"\"\r\n\r\n if T is None: T = np.arange(len(S), dtype=S.dtype)\r\n if maxImf is None: maxImf = -1\r\n\r\n # Make sure same types are dealt\r\n S, T = self._common_dtype(S, T)\r\n self.DTYPE = S.dtype\r\n\r\n Res = S.astype(self.DTYPE)\r\n scale = 1.\r\n Res, scaledS = Res/scale, S/scale\r\n imf = np.zeros(len(S), dtype=self.DTYPE)\r\n imfOld = Res.copy()\r\n\r\n if Res.dtype!=self.DTYPE: self.logger.error('Res.dtype: '+str(Res.dtype))\r\n if scaledS.dtype!=self.DTYPE: self.logger.error('scaledS.dtype: '+str(scaledS.dtype))\r\n if imf.dtype!=self.DTYPE: self.logger.error('imf.dtype: '+str(imf.dtype))\r\n if imfOld.dtype!=self.DTYPE: self.logger.error('imfOld.dtype: '+str(imfOld.dtype))\r\n if T.dtype!=self.DTYPE: self.logger.error('T.dtype: '+str(T.dtype))\r\n\r\n if S.shape != T.shape:\r\n info = \"Time array should be the same size as signal.\"\r\n raise Exception(info)\r\n\r\n # Create arrays\r\n IMF = {} # Dic for imfs signals\r\n EXT = {} # Dic for number of extrema\r\n ITER = {} # Dic for number of iterations\r\n TIME = {} # Dic for time of computation\r\n imfNo = 0\r\n notFinish = True\r\n\r\n while(notFinish):\r\n self.logger.debug('IMF -- '+str(imfNo))\r\n\r\n #~ Res = scaledS - np.sum([IMF[i] for i in range(imfNo)],axis=0)\r\n Res -= imf\r\n imf = Res.copy()\r\n mean = np.zeros(len(S), dtype=self.DTYPE)\r\n\r\n # Counters\r\n n = 0 # All iterations for current imf.\r\n n_h = 0 # counts when |#zero - #ext| <=1\r\n\r\n # Time counter\r\n timeInit = time.time()\r\n if self.TIME:\r\n singleTime = time.time()\r\n\r\n while(n<self.MAX_ITERATION):\r\n n += 1\r\n\r\n if self.TIME:\r\n self.logger.info(\"Execution time: \"+str(time.time() - singleTime))\r\n singleTime = time.time()\r\n ext_res = self.findExtrema(T, imf)\r\n MP, mP = ext_res[0], ext_res[2]\r\n indzer = ext_res[4]\r\n\r\n extNo = len(mP)+len(MP)\r\n nzm = len(indzer)\r\n\r\n if extNo > 2:\r\n\r\n # Plotting. Either into file, or on-screen display.\r\n imfOld = imf.copy()\r\n imf = imf - self.reduceScale*mean\r\n\r\n env_ext = self.extractMaxMinSpline(T, imf)\r\n maxEnv, minEnv = env_ext[0], env_ext[1]\r\n\r\n if isinstance(maxEnv, int):\r\n notFinish = True\r\n break\r\n\r\n mean = 0.5*(maxEnv+minEnv)\r\n\r\n if maxEnv.dtype!=self.DTYPE: self.logger.error('maxEnvimf.dtype: '+str(maxEnv.dtype))\r\n if minEnv.dtype!=self.DTYPE: self.logger.error('minEnvimf.dtype: '+str(minEnvimf.dtype))\r\n if imf.dtype!=self.DTYPE: self.logger.error('imf.dtype: '+str(imf.dtype))\r\n if mean.dtype!=self.DTYPE: self.logger.error('mean.dtype: '+str(mean.dtype))\r\n\r\n # Stop, because of too many iterations\r\n if n > self.maxIteration:\r\n self.logger.info('TOO MANY ITERATIONS! BREAK!')\r\n break\r\n\r\n # Fix number of iterations\r\n if self.FIXE:\r\n if n>=self.FIXE+1: break\r\n\r\n # Fix number of iterations after number of zero-crossings\r\n # and extrema differ at most by one.\r\n elif self.FIXE_H:\r\n\r\n ext_res = self.findExtrema(T, imf)\r\n mP, MP, indzer = ext_res[0], ext_res[2], ext_res[4]\r\n extNo = len(mP)+len(MP)\r\n nzm = len(indzer)\r\n\r\n if n == 1: continue\r\n if abs(extNo-nzm)>1: n_h = 0\r\n else: n_h += 1\r\n\r\n # STOP\r\n if n_h >= self.FIXE_H: break\r\n\r\n # Stops after default stopping criteria are meet.\r\n else:\r\n\r\n mP,mV,MP,MV, indzer = self.findExtrema(T, imf)\r\n extNo = len(mP)+len(MP)\r\n nzm = len(indzer)\r\n\r\n f1 = self.stop_sifting(imf, maxEnv, minEnv, mean, extNo)\r\n f2 = abs(extNo - nzm)<2\r\n\r\n # STOP\r\n if f1 and f2: break\r\n\r\n else:\r\n notFinish = False\r\n break\r\n\r\n IMF[imfNo] = imf.copy()\r\n ITER[imfNo] = n\r\n EXT[imfNo] = extNo\r\n TIME[imfNo] = time.time() - timeInit\r\n imfNo += 1\r\n\r\n if imfNo==maxImf-1:\r\n notFinish = False\r\n break\r\n\r\n #~ Saving residuum if meaningful\r\n Res = scaledS - np.sum([IMF[i] for i in range(imfNo)],axis=0)\r\n if np.sum(np.abs(Res)) > 1e-10:\r\n IMF[imfNo] = Res\r\n ITER[imfNo] = 0\r\n EXT[imfNo] = extNo\r\n TIME[imfNo] = 0\r\n imfNo += 1\r\n\r\n for key in list(IMF.keys()):\r\n IMF[key] *= scale\r\n return IMF, EXT, ITER, imfNo\r\n\r\n###################################################\r\n## Beggining of program\r\n\r\nif __name__ == \"__main__\":\r\n\r\n import pylab as plt\r\n\r\n # Logging options\r\n logging.basicConfig(level=logging.DEBUG)\r\n\r\n # EMD options\r\n maxImf = -1\r\n DTYPE = np.float64\r\n\r\n # Signal options\r\n N = 1000\r\n tMin, tMax = 0, 1\r\n T = np.linspace(tMin, tMax, N, dtype=DTYPE)\r\n\r\n S = 6*T +np.cos(8*np.pi**T)+0.5*np.cos(40*np.pi*T)\r\n S = S.astype(DTYPE)\r\n\r\n # Prepare and run EMD\r\n emd = EMD()\r\n emd.FIXE_H = 5\r\n #~ emd.FIXE = 10\r\n emd.nbsym = 2\r\n emd.splineKind = 'cubic'\r\n emd.DTYPE = DTYPE\r\n IMF, EXT, ITER, imfNo = emd.emd(S, T, maxImf)\r\n\r\n # Save results (IMFs) into file\r\n npIMF = np.zeros((imfNo, N), dtype=DTYPE)\r\n for i in range(imfNo): npIMF[i] = IMF[i]\r\n\r\n np.save('imfs', npIMF)\r\n\r\n # Plotting\r\n #~ c = np.floor(np.sqrt(imfNo+3))\r\n #~ r = np.ceil( (imfNo+3)/c)\r\n c = np.floor(np.sqrt(imfNo+1))\r\n r = np.ceil( (imfNo+1)/c)\r\n\r\n plt.ioff()\r\n plt.subplot(r,c,1)\r\n plt.plot(T, S, 'r')\r\n plt.title(\"Original signal\")\r\n plt.xlabel('Time [s]')\r\n plt.ylabel('Amplitude')\r\n\r\n #~ plt.subplot(r,c,2)\r\n #~ plt.plot([EXT[i] for i in range(imfNo)], 'o')\r\n #~ plt.ylim(0, max([EXT[i] for i in range(imfNo)])+1)\r\n #~ plt.title(\"Number of extrema\")\r\n #~\r\n #~ plt.subplot(r,c,3)\r\n #~ plt.plot([ITER[i] for i in range(imfNo)], 'o')\r\n #~ plt.ylim(0, max([ITER[i] for i in range(imfNo)])+1)\r\n #~ plt.title(\"Number of iterations\")\r\n\r\n for num in range(imfNo):\r\n #~ plt.subplot(r,c,num+4)\r\n plt.subplot(r,c,num+2)\r\n plt.plot(T, IMF[num],'g')\r\n plt.xlabel('Time')\r\n plt.ylabel('Amplitude')\r\n\r\n if num == imfNo-1:\r\n plt.title('Residue')\r\n else:\r\n plt.title(\"Imf \"+str(num))\r\n\r\n plt.tight_layout()\r\n plt.show()\r\n"
] | [
[
"numpy.save",
"scipy.interpolate.interp1d",
"numpy.diff",
"numpy.any",
"numpy.append",
"numpy.abs",
"numpy.cos",
"numpy.where",
"numpy.round",
"numpy.nonzero",
"numpy.linspace",
"numpy.mean",
"numpy.ceil",
"numpy.zeros",
"numpy.array",
"numpy.linalg.inv",
"numpy.matrix",
"numpy.sqrt",
"numpy.find_common_type"
]
] |
simpla-fusion/spdb | [
"be6667eb6c7d464f68b0fd51ca2a8f021581eb84"
] | [
"examples/obsolete/putslice_eq.py"
] | [
"# Definition of the class structures in file imas.py\nimport imas\nimport numpy\nimport sys\nimport os\n\n'''\nThis sample program will create a pulse file (shot 13, run 1) and will\nput an example of equilibirium IDS using put_slice methods.\n'''\n\n# This routine reads an array of pfsystems IDSs in the database, filling\n# some fields of the IDSS\n\nTEST_DATABASE_NAME = 'test'\n\n\ndef put_ids():\n \"\"\"Class Itm is the main class for the UAL.\n\n It contains a set of field classes, each corresponding to a IDS\n defined in the UAL The parameters passed to this creator define the\n shot and run number. The second pair of arguments defines the\n reference shot and run and is used when the a new database is\n created, as in this example.\n\n \"\"\"\n\n shot = 13\n time = 1\n interp = 1\n\n imas_obj = imas.ids(13, 1, 13, 1)\n # Create a new instance of database\n imas_obj.create_env(\"fydev\", \"test\", \"3\")\n\n if imas_obj.isConnected():\n print('Creation of data entry OK!')\n else:\n print('Creation of data entry FAILED!')\n sys.exit()\n\n number = 10\n\n # Allocate a first generic vector and its time base\n lentime_1 = 3\n vect1DDouble_1 = numpy.empty([lentime_1])\n time_1 = numpy.empty([lentime_1])\n\n for i in range(lentime_1):\n time_1[i] = i\n vect1DDouble_1[i] = i * 10\n\n print('========================================================')\n print(time_1)\n print(vect1DDouble_1)\n\n # Allocate a second generic vector and its time base\n lentime_2 = 4\n vect1DDouble_2 = numpy.empty([lentime_2])\n time_2 = numpy.empty([lentime_2])\n\n for i in range(lentime_2):\n time_2[i] = i\n vect1DDouble_2[i] = i * 11\n\n print('========================================================')\n print(time_2)\n print(vect1DDouble_2)\n\n vect2DDouble_1 = numpy.zeros([3, 3])\n for i in range(3):\n for j in range(3):\n vect2DDouble_1[i, j] = i * 100 + j\n\n print('========================================================')\n print(vect2DDouble_1)\n # Allocate a second generic vector and its time base\n lentime_2 = 4\n vect1DDouble_2 = numpy.empty([lentime_2])\n time_2 = numpy.empty([lentime_2])\n\n for i in range(lentime_2):\n time_2[i] = i\n vect1DDouble_2[i] = i * 11\n\n print('========================================================')\n print(time_2)\n print(vect1DDouble_2)\n\n vect2DDouble_1 = numpy.zeros([3, 3])\n for i in range(3):\n for j in range(3):\n vect2DDouble_1[i, j] = i * 100 + j\n\n print('========================================================')\n print(vect2DDouble_1)\n\n vect2DDouble_2 = vect2DDouble_1 + 10000\n '''\n print( '========================================================')\n print( vect3DDouble_2)\n '''\n imas_obj.equilibrium.ids_properties.comment = 'This is a test ids'\n\n # A sample int\n\n # Mandatory to define this property\n imas_obj.equilibrium.ids_properties.homogeneous_time = 1\n imas_obj.equilibrium.resize(1)\n imas_obj.equilibrium.time_slice[0].profiles_2d.resize(2)\n imas_obj.equilibrium.time_slice[0].profiles_2d[0].grid_type.name = 'GRID TYPE 1A'\n imas_obj.equilibrium.time_slice[0].profiles_2d[1].grid_type.name = 'GRID TYPE 2B'\n\n imas_obj.equilibrium.time.resize(1)\n imas_obj.equilibrium.time_slice[0].profiles_2d[0].r.resize(3, 3)\n imas_obj.equilibrium.time_slice[0].profiles_2d[1].r.resize(3, 3)\n\n print('Start Put, writing first slice')\n imas_obj.equilibrium.time_slice[0].profiles_2d[0].r[:, 0] = vect2DDouble_1[0, :]\n imas_obj.equilibrium.time_slice[0].profiles_2d[1].r[:, 0] = vect2DDouble_2[0, :]\n imas_obj.equilibrium.time[0] = time_1[0]\n imas_obj.equilibrium.put()\n print('Completed Put ')\n\n for i in range(lentime_1):\n print('========================================================')\n print('vect3DDouble_1[i,:,:]')\n print(vect2DDouble_1[i, :])\n print('========================================================')\n\n imas_obj.equilibrium.time_slice[0].profiles_2d[0].r[:, i] = vect2DDouble_1[i, :]\n print('========================================================')\n print('imas_obj.equilibrium.time_slice[0].profiles_2d[0].r')\n print(imas_obj.equilibrium.time_slice[0].profiles_2d[0].r[:, i])\n print('========================================================')\n imas_obj.equilibrium.time_slice[0].profiles_2d[1].r[:, i] = vect2DDouble_2[i, :]\n imas_obj.equilibrium.time[0] = time_1[i]\n print(('Writing slice={0}'.format(i)))\n imas_obj.equilibrium.putSlice()\n\n print('========================================================')\n print(imas_obj.equilibrium.time_slice[0].profiles_2d[0].r)\n\n '''\n print( '========================================================')\n print (imas_obj.equilibrium.time_slice[0].profiles_2d[1].r)\n '''\n imas_obj.close()\n\n\nput_ids()\n"
] | [
[
"numpy.empty",
"numpy.zeros"
]
] |
ChuanTianML/learn_gnmt | [
"19e97e04feaecd7682abaf6247a0f9e3f37f9892"
] | [
"nmt/utils/common_test_utils.py"
] | [
"# Copyright 2017 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Common utility functions for tests.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\nfrom tensorflow.python.ops import lookup_ops\n\nfrom ..utils import iterator_utils\nfrom ..utils import standard_hparams_utils\n\n\ndef create_test_hparams(unit_type=\"lstm\",\n encoder_type=\"uni\",\n num_layers=4,\n attention=\"\",\n attention_architecture=None,\n use_residual=False,\n inference_indices=None,\n num_translations_per_input=1,\n beam_width=0,\n init_op=\"uniform\"):\n \"\"\"Create training and inference test hparams.\"\"\"\n num_residual_layers = 0\n if use_residual:\n # TODO(rzhao): Put num_residual_layers computation logic into\n # `model_utils.py`, so we can also test it here.\n num_residual_layers = 2\n\n standard_hparams = standard_hparams_utils.create_standard_hparams()\n\n # Networks\n standard_hparams.num_units = 5\n standard_hparams.num_encoder_layers = num_layers\n standard_hparams.num_decoder_layers = num_layers\n standard_hparams.dropout = 0.5\n standard_hparams.unit_type = unit_type\n standard_hparams.encoder_type = encoder_type\n standard_hparams.residual = use_residual\n standard_hparams.num_residual_layers = num_residual_layers\n\n # Attention mechanisms\n standard_hparams.attention = attention\n standard_hparams.attention_architecture = attention_architecture\n\n # Train\n standard_hparams.init_op = init_op\n standard_hparams.num_train_steps = 1\n standard_hparams.decay_scheme = \"\"\n\n # Infer\n standard_hparams.tgt_max_len_infer = 100\n standard_hparams.beam_width = beam_width\n standard_hparams.num_translations_per_input = num_translations_per_input\n\n # Misc\n standard_hparams.forget_bias = 0.0\n standard_hparams.random_seed = 3\n\n # Vocab\n standard_hparams.src_vocab_size = 5\n standard_hparams.tgt_vocab_size = 5\n standard_hparams.eos = \"eos\"\n standard_hparams.sos = \"sos\"\n standard_hparams.src_vocab_file = \"\"\n standard_hparams.tgt_vocab_file = \"\"\n standard_hparams.src_embed_file = \"\"\n standard_hparams.tgt_embed_file = \"\"\n\n # For inference.py test\n standard_hparams.subword_option = \"bpe\"\n standard_hparams.src = \"src\"\n standard_hparams.tgt = \"tgt\"\n standard_hparams.src_max_len = 400\n standard_hparams.tgt_eos_id = 0\n standard_hparams.inference_indices = inference_indices\n return standard_hparams\n\n\ndef create_test_iterator(hparams, mode):\n \"\"\"Create test iterator.\"\"\"\n src_vocab_table = lookup_ops.index_table_from_tensor(\n tf.constant([hparams.eos, \"a\", \"b\", \"c\", \"d\"]))\n tgt_vocab_mapping = tf.constant([hparams.sos, hparams.eos, \"a\", \"b\", \"c\"])\n tgt_vocab_table = lookup_ops.index_table_from_tensor(tgt_vocab_mapping)\n if mode == tf.contrib.learn.ModeKeys.INFER:\n reverse_tgt_vocab_table = lookup_ops.index_to_string_table_from_tensor(\n tgt_vocab_mapping)\n\n src_dataset = tf.data.Dataset.from_tensor_slices(\n tf.constant([\"a a b b c\", \"a b b\"]))\n\n if mode != tf.contrib.learn.ModeKeys.INFER:\n tgt_dataset = tf.data.Dataset.from_tensor_slices(\n tf.constant([\"a b c b c\", \"a b c b\"]))\n return (\n iterator_utils.get_iterator(\n src_dataset=src_dataset,\n tgt_dataset=tgt_dataset,\n src_vocab_table=src_vocab_table,\n tgt_vocab_table=tgt_vocab_table,\n batch_size=hparams.batch_size,\n sos=hparams.sos,\n eos=hparams.eos,\n random_seed=hparams.random_seed,\n num_buckets=hparams.num_buckets),\n src_vocab_table,\n tgt_vocab_table)\n else:\n return (\n iterator_utils.get_infer_iterator(\n src_dataset=src_dataset,\n src_vocab_table=src_vocab_table,\n eos=hparams.eos,\n batch_size=hparams.batch_size),\n src_vocab_table,\n tgt_vocab_table,\n reverse_tgt_vocab_table)\n"
] | [
[
"tensorflow.python.ops.lookup_ops.index_table_from_tensor",
"tensorflow.constant",
"tensorflow.python.ops.lookup_ops.index_to_string_table_from_tensor"
]
] |
floyebolu/GPy | [
"d493b200642196c6d211ea1bcb052f3fbf396f24"
] | [
"GPy/plotting/gpy_plot/latent_plots.py"
] | [
"#===============================================================================\n# Copyright (c) 2015, Max Zwiessele\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of GPy.plotting.gpy_plot.latent_plots nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#===============================================================================\nimport numpy as np\nfrom . import plotting_library as pl\nfrom .plot_util import get_x_y_var,\\\n update_not_existing_kwargs, \\\n helper_for_plot_data, scatter_label_generator, subsample_X,\\\n find_best_layout_for_subplots\n\ndef _wait_for_updates(view, updates):\n if view is not None:\n try:\n if updates:\n clear = raw_input('yes or enter to deactivate updates - otherwise still do updates - use plots[imshow].deactivate() to clear')\n if clear.lower() in 'yes' or clear == '':\n view.deactivate()\n else:\n view.deactivate()\n except AttributeError:\n # No updateable view:\n pass\n except TypeError:\n # No updateable view:\n pass\n\ndef _new_canvas(self, projection, kwargs, which_indices):\n input_1, input_2, input_3 = sig_dims = self.get_most_significant_input_dimensions(which_indices)\n\n if input_3 is None:\n zlabel = None\n else:\n zlabel = 'latent dimension %i' % input_3\n canvas, kwargs = pl().new_canvas(projection=projection, xlabel='latent dimension %i' % input_1,\n ylabel='latent dimension %i' % input_2,\n zlabel=zlabel, **kwargs)\n return canvas, projection, kwargs, sig_dims\n\ndef _plot_latent_scatter(canvas, X, visible_dims, labels, marker, num_samples, projection='2d', **kwargs):\n from .. import Tango\n Tango.reset()\n X, labels = subsample_X(X, labels, num_samples)\n scatters = []\n generate_colors = 'color' not in kwargs\n for x, y, z, this_label, _, m in scatter_label_generator(labels, X, visible_dims, marker):\n update_not_existing_kwargs(kwargs, pl().defaults.latent_scatter)\n if generate_colors:\n kwargs['color'] = Tango.nextMedium()\n if projection == '3d':\n scatters.append(pl().scatter(canvas, x, y, Z=z, marker=m, label=this_label, **kwargs))\n else: scatters.append(pl().scatter(canvas, x, y, marker=m, label=this_label, **kwargs))\n return scatters\n\ndef plot_latent_scatter(self, labels=None,\n which_indices=None,\n legend=True,\n plot_limits=None,\n marker='<>^vsd',\n num_samples=1000,\n projection='2d',\n **kwargs):\n \"\"\"\n Plot a scatter plot of the latent space.\n\n :param array-like labels: a label for each data point (row) of the inputs\n :param (int, int) which_indices: which input dimensions to plot against each other\n :param bool legend: whether to plot the legend on the figure\n :param plot_limits: the plot limits for the plot\n :type plot_limits: (xmin, xmax, ymin, ymax) or ((xmin, xmax), (ymin, ymax))\n :param str marker: markers to use - cycle if more labels then markers are given\n :param kwargs: the kwargs for the scatter plots\n \"\"\"\n canvas, projection, kwargs, sig_dims = _new_canvas(self, projection, kwargs, which_indices)\n\n X, _, _ = get_x_y_var(self)\n if labels is None:\n labels = np.ones(self.num_data)\n legend = False\n else:\n legend = find_best_layout_for_subplots(len(np.unique(labels)))[1]\n scatters = _plot_latent_scatter(canvas, X, sig_dims, labels, marker, num_samples, projection=projection, **kwargs)\n return pl().add_to_canvas(canvas, dict(scatter=scatters), legend=legend)\n\n\ndef plot_latent_inducing(self,\n which_indices=None,\n legend=False,\n plot_limits=None,\n marker=None,\n projection='2d',\n **kwargs):\n \"\"\"\n Plot a scatter plot of the inducing inputs.\n\n :param [int] which_indices: which input dimensions to plot against each other\n :param bool legend: whether to plot the legend on the figure\n :param plot_limits: the plot limits for the plot\n :type plot_limits: (xmin, xmax, ymin, ymax) or ((xmin, xmax), (ymin, ymax))\n :param str marker: marker to use [default is custom arrow like]\n :param kwargs: the kwargs for the scatter plots\n :param str projection: for now 2d or 3d projection (other projections can be implemented, see developer documentation)\n \"\"\"\n canvas, projection, kwargs, sig_dims = _new_canvas(self, projection, kwargs, which_indices)\n\n if legend: label = 'inducing'\n else: label = None\n if marker is not None:\n kwargs['marker'] = marker\n update_not_existing_kwargs(kwargs, pl().defaults.inducing_2d) # @UndefinedVariable\n from .data_plots import _plot_inducing\n scatters = _plot_inducing(self, canvas, sig_dims[:2], projection, label, **kwargs)\n return pl().add_to_canvas(canvas, dict(scatter=scatters), legend=legend)\n\n\n\n\n\n\ndef _plot_magnification(self, canvas, which_indices, Xgrid,\n xmin, xmax, resolution, updates,\n mean=True, covariance=True,\n kern=None,\n **imshow_kwargs):\n def plot_function(x):\n Xtest_full = np.zeros((x.shape[0], Xgrid.shape[1]))\n Xtest_full[:, which_indices] = x\n\n mf = self.predict_magnification(Xtest_full, kern=kern, mean=mean, covariance=covariance)\n return mf.reshape(resolution, resolution).T\n imshow_kwargs = update_not_existing_kwargs(imshow_kwargs, pl().defaults.magnification)\n try:\n if updates:\n return pl().imshow_interact(canvas, plot_function, (xmin[0], xmax[0], xmin[1], xmax[1]), resolution=resolution, **imshow_kwargs)\n else: raise NotImplementedError\n except NotImplementedError:\n return pl().imshow(canvas, plot_function(Xgrid[:, which_indices]), (xmin[0], xmax[0], xmin[1], xmax[1]), **imshow_kwargs)\n\ndef plot_magnification(self, labels=None, which_indices=None,\n resolution=60, marker='<>^vsd', legend=True,\n plot_limits=None,\n updates=False,\n mean=True, covariance=True,\n kern=None, num_samples=1000,\n scatter_kwargs=None, plot_scatter=True,\n **imshow_kwargs):\n \"\"\"\n Plot the magnification factor of the GP on the inputs. This is the\n density of the GP as a gray scale.\n\n :param array-like labels: a label for each data point (row) of the inputs\n :param (int, int) which_indices: which input dimensions to plot against each other\n :param int resolution: the resolution at which we predict the magnification factor\n :param str marker: markers to use - cycle if more labels then markers are given\n :param bool legend: whether to plot the legend on the figure\n :param plot_limits: the plot limits for the plot\n :type plot_limits: (xmin, xmax, ymin, ymax) or ((xmin, xmax), (ymin, ymax))\n :param bool updates: if possible, make interactive updates using the specific library you are using\n :param bool mean: use the mean of the Wishart embedding for the magnification factor\n :param bool covariance: use the covariance of the Wishart embedding for the magnification factor\n :param :py:class:`~GPy.kern.Kern` kern: the kernel to use for prediction\n :param int num_samples: the number of samples to plot maximally. We do a stratified subsample from the labels, if the number of samples (in X) is higher then num_samples.\n :param imshow_kwargs: the kwargs for the imshow (magnification factor)\n :param kwargs: the kwargs for the scatter plots\n \"\"\"\n input_1, input_2 = which_indices = self.get_most_significant_input_dimensions(which_indices)[:2]\n X = get_x_y_var(self)[0]\n _, _, Xgrid, _, _, xmin, xmax, resolution = helper_for_plot_data(self, X, plot_limits, which_indices, None, resolution)\n canvas, imshow_kwargs = pl().new_canvas(xlim=(xmin[0], xmax[0]), ylim=(xmin[1], xmax[1]),\n xlabel='latent dimension %i' % input_1, ylabel='latent dimension %i' % input_2, **imshow_kwargs)\n plots = {}\n if legend and plot_scatter:\n if (labels is not None):\n legend = find_best_layout_for_subplots(len(np.unique(labels)))[1]\n else:\n labels = np.ones(self.num_data)\n legend = False\n if plot_scatter:\n plots['scatters'] = _plot_latent_scatter(canvas, X, which_indices, labels, marker, num_samples, projection='2d', **scatter_kwargs or {})\n plots['view'] = _plot_magnification(self, canvas, which_indices, Xgrid, xmin, xmax, resolution, updates, mean, covariance, kern, **imshow_kwargs)\n retval = pl().add_to_canvas(canvas, plots,\n legend=legend,\n )\n _wait_for_updates(plots['view'], updates)\n return retval\n\n\n\n\ndef _plot_latent(self, canvas, which_indices, Xgrid,\n xmin, xmax, resolution, updates,\n kern=None,\n **imshow_kwargs):\n def plot_function(x):\n Xtest_full = np.zeros((x.shape[0], Xgrid.shape[1]))\n Xtest_full[:, which_indices] = x\n mf = self.predict(Xtest_full, kern=kern)[1]\n if mf.shape[1]==self.output_dim:\n mf = mf.sum(-1)\n else:\n mf *= self.output_dim\n mf = np.log(mf)\n return mf.reshape(resolution, resolution).T\n\n imshow_kwargs = update_not_existing_kwargs(imshow_kwargs, pl().defaults.latent)\n try:\n if updates:\n return pl().imshow_interact(canvas, plot_function, (xmin[0], xmax[0], xmin[1], xmax[1]), resolution=resolution, **imshow_kwargs)\n else: raise NotImplementedError\n except NotImplementedError:\n return pl().imshow(canvas, plot_function(Xgrid[:, which_indices]), (xmin[0], xmax[0], xmin[1], xmax[1]), **imshow_kwargs)\n\ndef plot_latent(self, labels=None, which_indices=None,\n resolution=60, legend=True,\n plot_limits=None,\n updates=False,\n kern=None, marker='<>^vsd',\n num_samples=1000, projection='2d',\n scatter_kwargs=None, **imshow_kwargs):\n \"\"\"\n Plot the latent space of the GP on the inputs. This is the\n density of the GP posterior as a grey scale and the\n scatter plot of the input dimemsions selected by which_indices.\n\n :param array-like labels: a label for each data point (row) of the inputs\n :param (int, int) which_indices: which input dimensions to plot against each other\n :param int resolution: the resolution at which we predict the magnification factor\n :param bool legend: whether to plot the legend on the figure\n :param plot_limits: the plot limits for the plot\n :type plot_limits: (xmin, xmax, ymin, ymax) or ((xmin, xmax), (ymin, ymax))\n :param bool updates: if possible, make interactive updates using the specific library you are using\n :param :py:class:`~GPy.kern.Kern` kern: the kernel to use for prediction\n :param str marker: markers to use - cycle if more labels then markers are given\n :param int num_samples: the number of samples to plot maximally. We do a stratified subsample from the labels, if the number of samples (in X) is higher then num_samples.\n :param imshow_kwargs: the kwargs for the imshow (magnification factor)\n :param scatter_kwargs: the kwargs for the scatter plots\n \"\"\"\n if projection != '2d':\n raise ValueError('Cannot plot latent in other then 2 dimensions, consider plot_scatter')\n input_1, input_2 = which_indices = self.get_most_significant_input_dimensions(which_indices)[:2]\n X = get_x_y_var(self)[0]\n _, _, Xgrid, _, _, xmin, xmax, resolution = helper_for_plot_data(self, X, plot_limits, which_indices, None, resolution)\n canvas, imshow_kwargs = pl().new_canvas(xlim=(xmin[0], xmax[0]), ylim=(xmin[1], xmax[1]),\n xlabel='latent dimension %i' % input_1, ylabel='latent dimension %i' % input_2, **imshow_kwargs)\n if legend:\n if (labels is not None):\n legend = find_best_layout_for_subplots(len(np.unique(labels)))[1]\n else:\n labels = np.ones(self.num_data)\n legend = False\n scatters = _plot_latent_scatter(canvas, X, which_indices, labels, marker, num_samples, projection='2d', **scatter_kwargs or {})\n view = _plot_latent(self, canvas, which_indices, Xgrid, xmin, xmax, resolution, updates, kern, **imshow_kwargs)\n retval = pl().add_to_canvas(canvas, dict(scatter=scatters, imshow=view), legend=legend)\n _wait_for_updates(view, updates)\n return retval\n\ndef _plot_steepest_gradient_map(self, canvas, which_indices, Xgrid,\n xmin, xmax, resolution, output_labels, updates,\n kern=None, annotation_kwargs=None,\n **imshow_kwargs):\n if output_labels is None:\n output_labels = range(self.output_dim)\n def plot_function(x):\n Xgrid[:, which_indices] = x\n dmu_dX = np.sqrt(((self.predictive_gradients(Xgrid, kern=kern)[0])**2).sum(1))\n #dmu_dX = self.predictive_gradients(Xgrid, kern=kern)[0].sum(1)\n argmax = np.argmax(dmu_dX, 1).astype(int)\n return dmu_dX.max(1).reshape(resolution, resolution).T, np.array(output_labels)[argmax].reshape(resolution, resolution).T\n annotation_kwargs = update_not_existing_kwargs(annotation_kwargs or {}, pl().defaults.annotation)\n imshow_kwargs = update_not_existing_kwargs(imshow_kwargs or {}, pl().defaults.gradient)\n try:\n if updates:\n return dict(annotation=pl().annotation_heatmap_interact(canvas, plot_function, (xmin[0], xmax[0], xmin[1], xmax[1]), resolution=resolution, imshow_kwargs=imshow_kwargs, **annotation_kwargs))\n else:\n raise NotImplementedError\n except NotImplementedError:\n imshow, annotation = pl().annotation_heatmap(canvas, *plot_function(Xgrid[:, which_indices]), extent=(xmin[0], xmax[0], xmin[1], xmax[1]), imshow_kwargs=imshow_kwargs, **annotation_kwargs)\n return dict(heatmap=imshow, annotation=annotation)\n\ndef plot_steepest_gradient_map(self, output_labels=None, data_labels=None, which_indices=None,\n resolution=15, legend=True,\n plot_limits=None,\n updates=False,\n kern=None, marker='<>^vsd',\n num_samples=1000,\n annotation_kwargs=None, scatter_kwargs=None, **imshow_kwargs):\n\n \"\"\"\n Plot the latent space of the GP on the inputs. This is the\n density of the GP posterior as a grey scale and the\n scatter plot of the input dimemsions selected by which_indices.\n\n :param array-like labels: a label for each data point (row) of the inputs\n :param (int, int) which_indices: which input dimensions to plot against each other\n :param int resolution: the resolution at which we predict the magnification factor\n :param bool legend: whether to plot the legend on the figure, if int plot legend columns on legend\n :param plot_limits: the plot limits for the plot\n :type plot_limits: (xmin, xmax, ymin, ymax) or ((xmin, xmax), (ymin, ymax))\n :param bool updates: if possible, make interactive updates using the specific library you are using\n :param :py:class:`~GPy.kern.Kern` kern: the kernel to use for prediction\n :param str marker: markers to use - cycle if more labels then markers are given\n :param int num_samples: the number of samples to plot maximally. We do a stratified subsample from the labels, if the number of samples (in X) is higher then num_samples.\n :param imshow_kwargs: the kwargs for the imshow (magnification factor)\n :param annotation_kwargs: the kwargs for the annotation plot\n :param scatter_kwargs: the kwargs for the scatter plots\n \"\"\"\n input_1, input_2 = which_indices = self.get_most_significant_input_dimensions(which_indices)[:2]\n X = get_x_y_var(self)[0]\n _, _, Xgrid, _, _, xmin, xmax, resolution = helper_for_plot_data(self, X, plot_limits, which_indices, None, resolution)\n canvas, imshow_kwargs = pl().new_canvas(xlim=(xmin[0], xmax[0]), ylim=(xmin[1], xmax[1]),\n xlabel='latent dimension %i' % input_1, ylabel='latent dimension %i' % input_2, **imshow_kwargs)\n if (data_labels is not None):\n legend = find_best_layout_for_subplots(len(np.unique(data_labels)))[1]\n else:\n data_labels = np.ones(self.num_data)\n legend = False\n plots = dict(scatter=_plot_latent_scatter(canvas, X, which_indices, data_labels, marker, num_samples, **scatter_kwargs or {}))\n plots.update(_plot_steepest_gradient_map(self, canvas, which_indices, Xgrid, xmin, xmax, resolution, output_labels, updates, kern, annotation_kwargs=annotation_kwargs, **imshow_kwargs))\n retval = pl().add_to_canvas(canvas, plots, legend=legend)\n _wait_for_updates(plots['annotation'], updates)\n return retval\n\n\n\n\n"
] | [
[
"numpy.ones",
"numpy.zeros",
"numpy.argmax",
"numpy.log",
"numpy.array",
"numpy.unique"
]
] |
swpucwf/Deeplearning | [
"be19885d52b7ce8782949d931a1b2994de36679f"
] | [
"OpenCV/video_flow.py"
] | [
"import numpy as np\nimport cv2\n\ncap = cv2.VideoCapture('car.mp4')\n\n# params for ShiTomasi corner detection\nfeature_params = dict(maxCorners=100,\n qualityLevel=0.3,\n minDistance=7,\n blockSize=7)\n\n# Parameters for lucas kanade optical flow\nlk_params = dict(winSize=(15, 15),\n maxLevel=2,\n criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))\n\n# Create some random colors\ncolor = np.random.randint(0, 255, (100, 3))\n\n# Take first frame and find corners in it\nret, old_frame = cap.read()\nold_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)\np0 = cv2.goodFeaturesToTrack(old_gray, mask=None, **feature_params)\n\n# Create a mask image for drawing purposes\nmask = np.zeros_like(old_frame)\n\nwhile (1):\n ret, frame = cap.read()\n frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n # calculate optical flow\n p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params)\n\n # Select good points\n good_new = p1[st == 1]\n good_old = p0[st == 1]\n\n # draw the tracks\n for i, (new, old) in enumerate(zip(good_new, good_old)):\n a, b = new.ravel()\n c, d = old.ravel()\n mask = cv2.line(mask, (a, b), (c, d), color[i].tolist(), 2)\n frame = cv2.circle(frame, (a, b), 5, color[i].tolist(), -1)\n img = cv2.add(frame, mask)\n\n cv2.imshow('frame', img)\n k = cv2.waitKey(30) & 0xff\n if k == 27:\n break\n\n # Now update the previous frame and previous points\n old_gray = frame_gray.copy()\n p0 = good_new.reshape(-1, 1, 2)\n\ncv2.destroyAllWindows()\ncap.release()\n"
] | [
[
"numpy.zeros_like",
"numpy.random.randint"
]
] |
vita-epfl/pedestrian-transition-dataset | [
"7e1b723a37289850b5ef8628e6881845a24912f9"
] | [
"src/dataset/loader.py"
] | [
"import os\nimport copy\nimport PIL\nimport torch\nimport torchvision\nimport numpy as np\nimport math\n\nimport logging\nfrom typing import List\n\nLOG = logging.getLogger(__name__)\n\n\ndef define_path(use_jaad=True, use_pie=True, use_titan=True):\n \"\"\"\n Define the correct paths to datasets'annotations and images\n \"\"\"\n all_anns_paths = {'JAAD': {'anns': '../../DATA/annotations/JAAD/JAAD_DATA.pkl', \n 'split': '../../DATA/annotations/JAAD/splits'},\n 'PIE': {'anns': '../../DATA/annotations/PIE/PIE_DATA.pkl'},\n 'TITAN': {'anns': '../../DATA/annotations/TITAN/titan_0_4',\n 'split':'../../DATA/annotations/TITAN/splits' }\n }\n all_image_dir = {'JAAD': '../../DATA/JAAD/images/',\n 'PIE': '../../DATA/PIE/images/',\n 'TITAN': '../../DATA/TITAN/images_anonymized/'\n }\n anns_paths = {}\n image_dir = {}\n if use_jaad:\n anns_paths['JAAD'] = all_anns_paths['JAAD']\n image_dir['JAAD'] = all_image_dir['JAAD']\n if use_pie:\n anns_paths['PIE'] = all_anns_paths['PIE']\n image_dir['PIE'] = all_image_dir['PIE']\n if use_titan:\n anns_paths['TITAN'] = all_anns_paths['TITAN']\n image_dir['TITAN'] = all_image_dir['TITAN']\n\n return anns_paths, image_dir\n \n\nclass ImageList(torch.utils.data.Dataset):\n \"\"\"\n Basic dataloader for images\n \"\"\"\n\n def __init__(self, image_paths, preprocess=None):\n self.image_paths = image_paths\n self.preprocess = preprocess\n\n def __getitem__(self, index):\n image_path = self.image_paths[index]\n with open(image_path, 'rb') as f:\n image = PIL.Image.open(f).convert('RGB')\n if self.preprocess is not None:\n image = self.preprocess(image)\n\n return image\n\n def __len__(self):\n return len(self.image_paths)\n\n\nclass MultiLoader:\n # Class for loading data from mulitple datasets\n last_task_index = None\n \n\n def __init__(self, loaders: List[torch.utils.data.DataLoader], \n weights=None, n_batches=None):\n \n self.loaders = loaders\n self._weights = weights\n\n if self._weights is None:\n self._weights = [1.0 / len(loaders) for _ in range(len(loaders))]\n elif len(self._weights) == len(loaders) - 1:\n self._weights.append(1.0 - sum(self._weights))\n elif len(self._weights) == len(loaders):\n pass\n else:\n raise Exception('invalid dataset weights: {}'.format(self._weights))\n assert all(w > 0.0 for w in self._weights)\n sum_w = sum(self._weights)\n # normalize weights between datasets\n self._weights = [w / sum_w for w in self._weights]\n LOG.info('dataset weights: %s', self._weights)\n # set the total number of batches in one epoch\n self.n_batches = int(min(len(l) / w for l, w in zip(loaders, self._weights)))\n if n_batches is not None:\n self.n_batches = min(self.n_batches, n_batches)\n\n def __iter__(self):\n loader_iters = [iter(l) for l in self.loaders]\n # counter of loaded batches for each dataset\n n_loaded = [0 for _ in self.loaders]\n while True:\n # select loader for one iteration\n loader_index = int(np.argmin([n / w for n, w in zip(n_loaded, self._weights)]))\n next_batch = next(loader_iters[loader_index], None)\n if next_batch is None:\n break\n n_loaded[loader_index] += 1\n MultiLoader.last_task_index = loader_index\n # generator\n yield next_batch\n # termination\n if sum(n_loaded) >= self.n_batches:\n break\n\n def __len__(self):\n return self.n_batches\n \n\nclass FrameDataset(torch.utils.data.Dataset):\n\n def __init__(self, samples, image_dir, preprocess=None):\n self.samples = samples\n self.image_dir = image_dir\n self.preprocess = preprocess\n\n def __getitem__(self, index):\n ids = list(self.samples.keys())\n idx = ids[index]\n frame = self.samples[idx]['frame']\n bbox = copy.deepcopy(self.samples[idx]['bbox'])\n source = self.samples[idx][\"source\"]\n anns = {'bbox': bbox, 'source': source}\n TTE = self.samples[idx][\"TTE\"]\n if 'trans_label' in list(self.samples[idx].keys()):\n label = self.samples[idx]['trans_label']\n else:\n label = None\n if 'behavior' in list(self.samples[idx].keys()):\n behavior = self.samples[idx]['behavior']\n else:\n behavior = [-1,-1,-1,-1] # no behavior annotations\n if 'attributes' in list(self.samples[idx].keys()):\n attributes = self.samples[idx]['attributes'] # scene attributes\n else:\n attributes = [-1,-1,-1,-1,-1,-1]\n image_path = None\n # image paths\n if source == \"JAAD\":\n vid = self.samples[idx]['video_number']\n image_path = os.path.join(self.image_dir['JAAD'], vid, '{:05d}.png'.format(frame))\n elif source == \"PIE\":\n vid = self.samples[idx]['video_number']\n sid = self.samples[idx]['set_number']\n image_path = os.path.join(self.image_dir['PIE'], sid, vid, '{:05d}.png'.format(frame))\n elif source == \"TITAN\":\n vid = self.samples[idx]['video_number']\n image_path = os.path.join(self.image_dir['TITAN'], vid, 'images', '{:06}.png'.format(frame))\n\n with open(image_path, 'rb') as f:\n img = PIL.Image.open(f).convert('RGB')\n if self.preprocess is not None:\n img, anns = self.preprocess(img, anns)\n img_tensor = torchvision.transforms.ToTensor()(img)\n if label is not None:\n label = torch.tensor(label)\n label = label.to(torch.float32)\n \n if math.isnan(TTE):\n pass\n else:\n TTE = round(self.samples[idx][\"TTE\"],2)\n TTE = torch.tensor(TTE).to(torch.float32)\n attributes = torch.tensor(attributes).to(torch.float32)\n sample = {'image': img_tensor, 'bbox': anns['bbox'], 'id': idx,\n 'label': label, 'source': source, 'TTE': TTE,\n 'attributes': attributes, 'behavior': behavior\n }\n\n return sample\n\n def __len__(self):\n return len(self.samples.keys())\n\n\nclass SequenceDataset(torch.utils.data.Dataset):\n \"\"\"\n Basic dataloader for loading sequence/history samples\n \"\"\"\n\n def __init__(self, samples, image_dir, preprocess=None):\n \"\"\"\n :params: samples: transition history samples(dict)\n image_dir: root dir for images extracted from video clips\n preprocess: optional preprocessing on image tensors and annotations\n \"\"\"\n self.samples = samples\n self.image_dir = image_dir\n self.preprocess = preprocess\n\n def __getitem__(self, index):\n ids = list(self.samples.keys())\n idx = ids[index]\n frames = self.samples[idx]['frame']\n bbox = copy.deepcopy(self.samples[idx]['bbox'])\n source = self.samples[idx][\"source\"]\n action = self.samples[idx]['action']\n TTE = round(self.samples[idx][\"TTE\"],2)\n if 'trans_label' in list(self.samples[idx].keys()):\n label = self.samples[idx]['trans_label']\n else:\n label = None\n bbox_new= []\n image_path = None\n # image paths\n img_tensors = []\n for i in range(len(frames)):\n anns = {'bbox': bbox[i], 'source': source}\n if source == \"JAAD\":\n vid = self.samples[idx]['video_number']\n image_path = os.path.join(self.image_dir['JAAD'], vid, '{:05d}.png'.format(frames[i]))\n elif source == \"PIE\":\n vid = self.samples[idx]['video_number']\n sid = self.samples[idx]['set_number']\n image_path = os.path.join(self.image_dir['PIE'], sid, vid, '{:05d}.png'.format(frames[i]))\n elif source == \"TITAN\":\n vid = self.samples[idx]['video_number']\n image_path = os.path.join(self.image_dir['TITAN'], vid, 'images', '{:06}.png'.format(frames[i]))\n with open(image_path, 'rb') as f:\n img = PIL.Image.open(f).convert('RGB')\n if self.preprocess is not None:\n img, anns = self.preprocess(img, anns)\n img_tensors.append(torchvision.transforms.ToTensor()(img))\n bbox_new.append(anns['bbox'])\n img_tensors = torch.stack(img_tensors)\n if label is not None:\n label = torch.tensor(label)\n label = label.to(torch.float32)\n sample = {'image': img_tensors, 'bbox': bbox_new, 'action': action, 'id': idx, 'label': label, 'source': source, 'TTE': TTE }\n\n return sample\n\n def __len__(self):\n return len(self.samples.keys())\n\n\nclass PaddedSequenceDataset(torch.utils.data.Dataset):\n \"\"\"\n Dataloader for loading sequence/history samples,\n all sequences are padded to unify the length\n \"\"\"\n\n def __init__(self, samples, image_dir, padded_length=10, preprocess=None, hflip_p=0.0):\n \"\"\"\n :params: samples: transition history samples(dict)\n image_dir: root dir for images extracted from video clips\n padded_length: length of each sequence after padded\n preprocess: optional preprocessing on image tensors and annotations\n \"\"\"\n self.samples = samples\n self.image_dir = image_dir\n self.preprocess = preprocess\n self.padded_length = padded_length\n self.hflip_p = hflip_p\n\n def __getitem__(self, index):\n ids = list(self.samples.keys())\n idx = ids[index]\n frames = self.samples[idx]['frame']\n bbox = copy.deepcopy(self.samples[idx]['bbox'])\n source = self.samples[idx][\"source\"]\n action = self.samples[idx]['action']\n TTE = self.samples[idx][\"TTE\"]\n if source == \"PIE\":\n set_number = self.samples[idx]['set_number']\n else:\n set_number = None\n if 'trans_label' in list(self.samples[idx].keys()):\n label = self.samples[idx]['trans_label']\n else:\n label = None\n if 'behavior' in list(self.samples[idx].keys()):\n behavior = self.samples[idx]['behavior']\n else:\n behavior = [-1,-1,-1,-1]\n if 'attributes' in list(self.samples[idx].keys()):\n attributes = self.samples[idx]['attributes']\n else:\n attributes = [-1,-1,-1,-1,-1,-1]\n bbox_new = []\n bbox_ped_new = []\n image_path = None\n # image paths\n img_tensors = []\n hflip = True if float(torch.rand(1).item()) < self.hflip_p else False\n for i in range(len(frames)):\n anns = {'bbox': bbox[i], 'source': source}\n if source == \"JAAD\":\n vid = self.samples[idx]['video_number']\n image_path = os.path.join(self.image_dir['JAAD'], vid, '{:05d}.png'.format(frames[i]))\n elif source == \"PIE\":\n vid = self.samples[idx]['video_number']\n sid = self.samples[idx]['set_number']\n image_path = os.path.join(self.image_dir['PIE'], sid, vid, '{:05d}.png'.format(frames[i]))\n elif source == \"TITAN\":\n vid = self.samples[idx]['video_number']\n image_path = os.path.join(self.image_dir['TITAN'], vid, 'images', '{:06}.png'.format(frames[i]))\n with open(image_path, 'rb') as f:\n img = PIL.Image.open(f).convert('RGB')\n if hflip:\n img = img.transpose(PIL.Image.FLIP_LEFT_RIGHT)\n w, h = img.size\n x_max = w - anns['bbox'][0]\n x_min = w - anns['bbox'][2]\n anns['bbox'][0] = x_min\n anns['bbox'][2] = x_max\n anns['bbox_ped'] = copy.deepcopy(anns['bbox'])\n if self.preprocess is not None:\n img, anns = self.preprocess(img, anns)\n img_tensors.append(torchvision.transforms.ToTensor()(img))\n bbox_new.append(anns['bbox'])\n bbox_ped_new.append(anns['bbox_ped'])\n \n img_tensors = torch.stack(img_tensors)\n imgs_size = img_tensors.size()\n img_tensors_padded = torch.zeros((self.padded_length, imgs_size[1], imgs_size[2], imgs_size[3]))\n img_tensors_padded[:imgs_size[0], :, :, :] = img_tensors\n bbox_new_padded = copy.deepcopy(bbox_new)\n bbox_ped_new_padded = copy.deepcopy(bbox_ped_new)\n action_padded = copy.deepcopy(action)\n behavior_padded = copy.deepcopy(behavior)\n for i in range(imgs_size[0],self.padded_length):\n bbox_new_padded.append([0,0,0,0])\n bbox_ped_new_padded.append([0,0,0,0])\n action_padded.append(-1)\n behavior_padded.append([-1,-1,-1,-1])\n # seq_len = torch.squeeze(torch.LongTensor(imgs_size[0]))\n seq_len = imgs_size[0]\n if label is not None:\n label = torch.tensor(label)\n label = label.to(torch.float32)\n TTE_tag = -1\n if math.isnan(TTE):\n pass\n else:\n TTE = round(self.samples[idx][\"TTE\"],2)\n TTE = torch.tensor(TTE).to(torch.float32)\n TTE_tag = torch.tensor(TTE_tag)\n TTE_tag = TTE_tag.to(torch.float32)\n attributes = torch.tensor(attributes).to(torch.float32)\n sample = {'image': img_tensors_padded, 'bbox': bbox_new_padded, 'bbox_ped': bbox_ped_new_padded,\n 'seq_length': seq_len, 'action': action_padded, 'id': idx, 'label': label,\n 'source': source, 'TTE': TTE, \n 'behavior': behavior_padded, 'attributes': attributes}\n\n return sample\n\n def __len__(self):\n return len(self.samples.keys())\n \n\n"
] | [
[
"torch.zeros",
"torch.rand",
"torch.stack",
"torch.tensor"
]
] |
mizolotu/DonkeyCarExperiments | [
"3d6be742915efe51c0f5abda4c69a4349a555373"
] | [
"reinforcement_learning/her/utils.py"
] | [
"from collections import OrderedDict\n\nimport numpy as np\nfrom reinforcement_learning.gym import spaces\n\n# Important: gym mixes up ordered and unordered keys\n# and the Dict space may return a different order of keys that the actual one\nKEY_ORDER = ['observation', 'achieved_goal', 'desired_goal']\n\n\nclass HERGoalEnvWrapper(object):\n \"\"\"\n A wrapper that allow to use dict observation space (coming from GoalEnv) with\n the RL algorithms.\n It assumes that all the spaces of the dict space are of the same type.\n\n :param env: (gym.GoalEnv)\n \"\"\"\n\n def __init__(self, env):\n super(HERGoalEnvWrapper, self).__init__()\n self.env = env\n self.metadata = self.env.metadata\n self.action_space = env.action_space\n self.spaces = list(env.observation_space.spaces.values())\n # Check that all spaces are of the same type\n # (current limitation of the wrapper)\n space_types = [type(env.observation_space.spaces[key]) for key in KEY_ORDER]\n assert len(set(space_types)) == 1, \"The spaces for goal and observation\"\\\n \" must be of the same type\"\n\n if isinstance(self.spaces[0], spaces.Discrete):\n self.obs_dim = 1\n self.goal_dim = 1\n else:\n goal_space_shape = env.observation_space.spaces['achieved_goal'].shape\n self.obs_dim = env.observation_space.spaces['observation'].shape[0]\n self.goal_dim = goal_space_shape[0]\n\n if len(goal_space_shape) == 2:\n assert goal_space_shape[1] == 1, \"Only 1D observation spaces are supported yet\"\n else:\n assert len(goal_space_shape) == 1, \"Only 1D observation spaces are supported yet\"\n\n if isinstance(self.spaces[0], spaces.MultiBinary):\n total_dim = self.obs_dim + 2 * self.goal_dim\n self.observation_space = spaces.MultiBinary(total_dim)\n\n elif isinstance(self.spaces[0], spaces.Box):\n lows = np.concatenate([space.low for space in self.spaces])\n highs = np.concatenate([space.high for space in self.spaces])\n self.observation_space = spaces.Box(lows, highs, dtype=np.float32)\n\n elif isinstance(self.spaces[0], spaces.Discrete):\n dimensions = [env.observation_space.spaces[key].n for key in KEY_ORDER]\n self.observation_space = spaces.MultiDiscrete(dimensions)\n\n else:\n raise NotImplementedError(\"{} space is not supported\".format(type(self.spaces[0])))\n\n def convert_dict_to_obs(self, obs_dict):\n \"\"\"\n :param obs_dict: (dict<np.ndarray>)\n :return: (np.ndarray)\n \"\"\"\n # Note: achieved goal is not removed from the observation\n # this is helpful to have a revertible transformation\n if isinstance(self.observation_space, spaces.MultiDiscrete):\n # Special case for multidiscrete\n return np.concatenate([[int(obs_dict[key])] for key in KEY_ORDER])\n return np.concatenate([obs_dict[key] for key in KEY_ORDER])\n\n def convert_obs_to_dict(self, observations):\n \"\"\"\n Inverse operation of convert_dict_to_obs\n\n :param observations: (np.ndarray)\n :return: (OrderedDict<np.ndarray>)\n \"\"\"\n return OrderedDict([\n ('observation', observations[:self.obs_dim]),\n ('achieved_goal', observations[self.obs_dim:self.obs_dim + self.goal_dim]),\n ('desired_goal', observations[self.obs_dim + self.goal_dim:]),\n ])\n\n def step(self, action):\n obs, reward, done, info = self.env.step(action)\n return self.convert_dict_to_obs(obs), reward, done, info\n\n def seed(self, seed=None):\n return self.env.seed(seed)\n\n def reset(self):\n return self.convert_dict_to_obs(self.env.reset())\n\n def compute_reward(self, achieved_goal, desired_goal, info):\n return self.env.compute_reward(achieved_goal, desired_goal, info)\n\n def render(self, mode='human'):\n return self.env.render(mode)\n\n def close(self):\n return self.env.close()\n"
] | [
[
"numpy.concatenate"
]
] |
JuliaChae/faster-rcnn.pytorch | [
"220005b5dbed1dd7e5abcfb85eee9f976a8a5f58"
] | [
"lib/detection_metric/Evaluator.py"
] | [
"###########################################################################################\n# #\n# Evaluator class: Implements the most popular metrics for object detection #\n# #\n# Developed by: Rafael Padilla ([email protected]) #\n# SMT - Signal Multimedia and Telecommunications Lab #\n# COPPE - Universidade Federal do Rio de Janeiro #\n# Last modification: Oct 9th 2018 #\n###########################################################################################\n\nimport os\nimport sys\nfrom collections import Counter\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom detection_metric.BoundingBox import *\nfrom detection_metric.BoundingBoxes import *\nfrom detection_metric.utils import *\n\n\nclass Evaluator:\n def GetPascalVOCMetrics(self,\n boundingboxes,\n IOUThreshold=0.5,\n method=MethodAveragePrecision.EveryPointInterpolation):\n \"\"\"Get the metrics used by the VOC Pascal 2012 challenge.\n Get\n Args:\n boundingboxes: Object of the class BoundingBoxes representing ground truth and detected\n bounding boxes;\n IOUThreshold: IOU threshold indicating which detections will be considered TP or FP\n (default value = 0.5);\n method (default = EveryPointInterpolation): It can be calculated as the implementation\n in the official PASCAL VOC toolkit (EveryPointInterpolation), or applying the 11-point\n interpolatio as described in the paper \"The PASCAL Visual Object Classes(VOC) Challenge\"\n or EveryPointInterpolation\" (ElevenPointInterpolation);\n Returns:\n A list of dictionaries. Each dictionary contains information and metrics of each class.\n The keys of each dictionary are:\n dict['class']: class representing the current dictionary;\n dict['precision']: array with the precision values;\n dict['recall']: array with the recall values;\n dict['AP']: average precision;\n dict['interpolated precision']: interpolated precision values;\n dict['interpolated recall']: interpolated recall values;\n dict['total positives']: total number of ground truth positives;\n dict['total TP']: total number of True Positive detections;\n dict['total FP']: total number of False Negative detections;\n \"\"\"\n ret = [] # list containing metrics (precision, recall, average precision) of each class\n # List with all ground truths (Ex: [imageName,class,confidence=1, (bb coordinates XYX2Y2)])\n groundTruths = []\n # List with all detections (Ex: [imageName,class,confidence,(bb coordinates XYX2Y2)])\n detections = []\n # Get all classes\n classes = []\n # Loop through all bounding boxes and separate them into GTs and detections\n for bb in boundingboxes.getBoundingBoxes():\n # [imageName, class, confidence, (bb coordinates XYX2Y2)]\n if bb.getBBType() == BBType.GroundTruth:\n groundTruths.append([\n bb.getImageName(),\n bb.getClassId(), 1,\n bb.getAbsoluteBoundingBox(BBFormat.XYX2Y2)\n ])\n else:\n detections.append([\n bb.getImageName(),\n bb.getClassId(),\n bb.getConfidence(),\n bb.getAbsoluteBoundingBox(BBFormat.XYX2Y2)\n ])\n # get class\n if bb.getClassId() not in classes:\n classes.append(bb.getClassId())\n classes = sorted(classes)\n # Precision x Recall is obtained individually by each class\n # Loop through by classes\n for c in classes:\n # Get only detection of class c\n dects = []\n [dects.append(d) for d in detections if d[1] == c]\n # Get only ground truths of class c\n gts = []\n [gts.append(g) for g in groundTruths if g[1] == c]\n npos = len(gts)\n # sort detections by decreasing confidence\n dects = sorted(dects, key=lambda conf: conf[2], reverse=True)\n TP = np.zeros(len(dects))\n FP = np.zeros(len(dects))\n # create dictionary with amount of gts for each image\n det = Counter([cc[0] for cc in gts])\n for key, val in det.items():\n det[key] = np.zeros(val)\n # print(\"Evaluating class: %s (%d detections)\" % (str(c), len(dects)))\n # Loop through detections\n for d in range(len(dects)):\n # print('dect %s => %s' % (dects[d][0], dects[d][3],))\n # Find ground truth image\n gt = [gt for gt in gts if gt[0] == dects[d][0]]\n iouMax = sys.float_info.min\n for j in range(len(gt)):\n # print('Ground truth gt => %s' % (gt[j][3],))\n iou = Evaluator.iou(dects[d][3], gt[j][3])\n if iou > iouMax:\n iouMax = iou\n jmax = j\n # Assign detection as true positive/don't care/false positive\n if iouMax >= IOUThreshold:\n if det[dects[d][0]][jmax] == 0:\n TP[d] = 1 # count as true positive\n det[dects[d][0]][jmax] = 1 # flag as already 'seen'\n # print(\"TP\")\n else:\n FP[d] = 1 # count as false positive\n # print(\"FP\")\n # - A detected \"cat\" is overlaped with a GT \"cat\" with IOU >= IOUThreshold.\n else:\n FP[d] = 1 # count as false positive\n # print(\"FP\")\n # compute precision, recall and average precision\n acc_FP = np.cumsum(FP)\n acc_TP = np.cumsum(TP)\n rec = acc_TP / npos\n prec = np.divide(acc_TP, (acc_FP + acc_TP))\n # Depending on the method, call the right implementation\n if method == MethodAveragePrecision.EveryPointInterpolation:\n [ap, mpre, mrec, ii] = Evaluator.CalculateAveragePrecision(rec, prec)\n else:\n [ap, mpre, mrec, _] = Evaluator.ElevenPointInterpolatedAP(rec, prec)\n # add class result in the dictionary to be returned\n r = {\n 'class': c,\n 'precision': prec,\n 'recall': rec,\n 'AP': ap,\n 'interpolated precision': mpre,\n 'interpolated recall': mrec,\n 'total positives': npos,\n 'total TP': np.sum(TP),\n 'total FP': np.sum(FP)\n }\n ret.append(r)\n return ret\n\n def PlotPrecisionRecallCurve(self,\n boundingBoxes,\n IOUThreshold=0.5,\n method=MethodAveragePrecision.EveryPointInterpolation,\n showAP=False,\n showInterpolatedPrecision=False,\n savePath=None,\n showGraphic=True):\n \"\"\"PlotPrecisionRecallCurve\n Plot the Precision x Recall curve for a given class.\n Args:\n boundingBoxes: Object of the class BoundingBoxes representing ground truth and detected\n bounding boxes;\n IOUThreshold (optional): IOU threshold indicating which detections will be considered\n TP or FP (default value = 0.5);\n method (default = EveryPointInterpolation): It can be calculated as the implementation\n in the official PASCAL VOC toolkit (EveryPointInterpolation), or applying the 11-point\n interpolatio as described in the paper \"The PASCAL Visual Object Classes(VOC) Challenge\"\n or EveryPointInterpolation\" (ElevenPointInterpolation).\n showAP (optional): if True, the average precision value will be shown in the title of\n the graph (default = False);\n showInterpolatedPrecision (optional): if True, it will show in the plot the interpolated\n precision (default = False);\n savePath (optional): if informed, the plot will be saved as an image in this path\n (ex: /home/mywork/ap.png) (default = None);\n showGraphic (optional): if True, the plot will be shown (default = True)\n Returns:\n A list of dictionaries. Each dictionary contains information and metrics of each class.\n The keys of each dictionary are:\n dict['class']: class representing the current dictionary;\n dict['precision']: array with the precision values;\n dict['recall']: array with the recall values;\n dict['AP']: average precision;\n dict['interpolated precision']: interpolated precision values;\n dict['interpolated recall']: interpolated recall values;\n dict['total positives']: total number of ground truth positives;\n dict['total TP']: total number of True Positive detections;\n dict['total FP']: total number of False Negative detections;\n \"\"\"\n results = self.GetPascalVOCMetrics(boundingBoxes, IOUThreshold, method)\n result = None\n # Each resut represents a class\n for result in results:\n if result is None:\n raise IOError('Error: Class %d could not be found.' % classId)\n\n classId = result['class']\n precision = result['precision']\n recall = result['recall']\n average_precision = result['AP']\n mpre = result['interpolated precision']\n mrec = result['interpolated recall']\n npos = result['total positives']\n total_tp = result['total TP']\n total_fp = result['total FP']\n\n plt.close()\n if showInterpolatedPrecision:\n if method == MethodAveragePrecision.EveryPointInterpolation:\n plt.plot(mrec, mpre, '--r', label='Interpolated precision (every point)')\n elif method == MethodAveragePrecision.ElevenPointInterpolation:\n # Uncomment the line below if you want to plot the area\n # plt.plot(mrec, mpre, 'or', label='11-point interpolated precision')\n # Remove duplicates, getting only the highest precision of each recall value\n nrec = []\n nprec = []\n for idx in range(len(mrec)):\n r = mrec[idx]\n if r not in nrec:\n idxEq = np.argwhere(mrec == r)\n nrec.append(r)\n nprec.append(max([mpre[int(id)] for id in idxEq]))\n plt.plot(nrec, nprec, 'or', label='11-point interpolated precision')\n plt.plot(recall, precision, label='Precision')\n plt.xlabel('recall')\n plt.ylabel('precision')\n if showAP:\n ap_str = \"{0:.2f}%\".format(average_precision * 100)\n # ap_str = \"{0:.4f}%\".format(average_precision * 100)\n plt.title('Precision x Recall curve \\nClass: %s, AP: %s' % (str(classId), ap_str))\n else:\n plt.title('Precision x Recall curve \\nClass: %s' % str(classId))\n plt.legend(shadow=True)\n plt.grid()\n ############################################################\n # Uncomment the following block to create plot with points #\n ############################################################\n # plt.plot(recall, precision, 'bo')\n # labels = ['R', 'Y', 'J', 'A', 'U', 'C', 'M', 'F', 'D', 'B', 'H', 'P', 'E', 'X', 'N', 'T',\n # 'K', 'Q', 'V', 'I', 'L', 'S', 'G', 'O']\n # dicPosition = {}\n # dicPosition['left_zero'] = (-30,0)\n # dicPosition['left_zero_slight'] = (-30,-10)\n # dicPosition['right_zero'] = (30,0)\n # dicPosition['left_up'] = (-30,20)\n # dicPosition['left_down'] = (-30,-25)\n # dicPosition['right_up'] = (20,20)\n # dicPosition['right_down'] = (20,-20)\n # dicPosition['up_zero'] = (0,30)\n # dicPosition['up_right'] = (0,30)\n # dicPosition['left_zero_long'] = (-60,-2)\n # dicPosition['down_zero'] = (-2,-30)\n # vecPositions = [\n # dicPosition['left_down'],\n # dicPosition['left_zero'],\n # dicPosition['right_zero'],\n # dicPosition['right_zero'], #'R', 'Y', 'J', 'A',\n # dicPosition['left_up'],\n # dicPosition['left_up'],\n # dicPosition['right_up'],\n # dicPosition['left_up'], # 'U', 'C', 'M', 'F',\n # dicPosition['left_zero'],\n # dicPosition['right_up'],\n # dicPosition['right_down'],\n # dicPosition['down_zero'], #'D', 'B', 'H', 'P'\n # dicPosition['left_up'],\n # dicPosition['up_zero'],\n # dicPosition['right_up'],\n # dicPosition['left_up'], # 'E', 'X', 'N', 'T',\n # dicPosition['left_zero'],\n # dicPosition['right_zero'],\n # dicPosition['left_zero_long'],\n # dicPosition['left_zero_slight'], # 'K', 'Q', 'V', 'I',\n # dicPosition['right_down'],\n # dicPosition['left_down'],\n # dicPosition['right_up'],\n # dicPosition['down_zero']\n # ] # 'L', 'S', 'G', 'O'\n # for idx in range(len(labels)):\n # box = dict(boxstyle='round,pad=.5',facecolor='yellow',alpha=0.5)\n # plt.annotate(labels[idx],\n # xy=(recall[idx],precision[idx]), xycoords='data',\n # xytext=vecPositions[idx], textcoords='offset points',\n # arrowprops=dict(arrowstyle=\"->\", connectionstyle=\"arc3\"),\n # bbox=box)\n if savePath is not None:\n plt.savefig(os.path.join(savePath, classId + '.png'))\n if showGraphic is True:\n plt.show()\n # plt.waitforbuttonpress()\n plt.pause(0.05)\n return results\n\n @staticmethod\n def CalculateAveragePrecision(rec, prec):\n mrec = []\n mrec.append(0)\n [mrec.append(e) for e in rec]\n mrec.append(1)\n mpre = []\n mpre.append(0)\n [mpre.append(e) for e in prec]\n mpre.append(0)\n for i in range(len(mpre) - 1, 0, -1):\n mpre[i - 1] = max(mpre[i - 1], mpre[i])\n ii = []\n for i in range(len(mrec) - 1):\n if mrec[1:][i] != mrec[0:-1][i]:\n ii.append(i + 1)\n ap = 0\n for i in ii:\n ap = ap + np.sum((mrec[i] - mrec[i - 1]) * mpre[i])\n # return [ap, mpre[1:len(mpre)-1], mrec[1:len(mpre)-1], ii]\n return [ap, mpre[0:len(mpre) - 1], mrec[0:len(mpre) - 1], ii]\n\n @staticmethod\n # 11-point interpolated average precision\n def ElevenPointInterpolatedAP(rec, prec):\n # def CalculateAveragePrecision2(rec, prec):\n mrec = []\n # mrec.append(0)\n [mrec.append(e) for e in rec]\n # mrec.append(1)\n mpre = []\n # mpre.append(0)\n [mpre.append(e) for e in prec]\n # mpre.append(0)\n recallValues = np.linspace(0, 1, 11)\n recallValues = list(recallValues[::-1])\n rhoInterp = []\n recallValid = []\n # For each recallValues (0, 0.1, 0.2, ... , 1)\n for r in recallValues:\n # Obtain all recall values higher or equal than r\n argGreaterRecalls = np.argwhere(mrec[:] >= r)\n pmax = 0\n # If there are recalls above r\n if argGreaterRecalls.size != 0:\n pmax = max(mpre[argGreaterRecalls.min():])\n recallValid.append(r)\n rhoInterp.append(pmax)\n # By definition AP = sum(max(precision whose recall is above r))/11\n ap = sum(rhoInterp) / 11\n # Generating values for the plot\n rvals = []\n rvals.append(recallValid[0])\n [rvals.append(e) for e in recallValid]\n rvals.append(0)\n pvals = []\n pvals.append(0)\n [pvals.append(e) for e in rhoInterp]\n pvals.append(0)\n # rhoInterp = rhoInterp[::-1]\n cc = []\n for i in range(len(rvals)):\n p = (rvals[i], pvals[i - 1])\n if p not in cc:\n cc.append(p)\n p = (rvals[i], pvals[i])\n if p not in cc:\n cc.append(p)\n recallValues = [i[0] for i in cc]\n rhoInterp = [i[1] for i in cc]\n return [ap, rhoInterp, recallValues, None]\n\n # For each detections, calculate IOU with reference\n @staticmethod\n def _getAllIOUs(reference, detections):\n ret = []\n bbReference = reference.getAbsoluteBoundingBox(BBFormat.XYX2Y2)\n # img = np.zeros((200,200,3), np.uint8)\n for d in detections:\n bb = d.getAbsoluteBoundingBox(BBFormat.XYX2Y2)\n iou = Evaluator.iou(bbReference, bb)\n # Show blank image with the bounding boxes\n # img = add_bb_into_image(img, d, color=(255,0,0), thickness=2, label=None)\n # img = add_bb_into_image(img, reference, color=(0,255,0), thickness=2, label=None)\n ret.append((iou, reference, d)) # iou, reference, detection\n # cv2.imshow(\"comparing\",img)\n # cv2.waitKey(0)\n # cv2.destroyWindow(\"comparing\")\n return sorted(ret, key=lambda i: i[0], reverse=True) # sort by iou (from highest to lowest)\n\n @staticmethod\n def iou(boxA, boxB):\n # if boxes dont intersect\n if Evaluator._boxesIntersect(boxA, boxB) is False:\n return 0\n interArea = Evaluator._getIntersectionArea(boxA, boxB)\n union = Evaluator._getUnionAreas(boxA, boxB, interArea=interArea)\n # intersection over union\n iou = interArea / union\n assert iou >= 0\n return iou\n\n # boxA = (Ax1,Ay1,Ax2,Ay2)\n # boxB = (Bx1,By1,Bx2,By2)\n @staticmethod\n def _boxesIntersect(boxA, boxB):\n if boxA[0] > boxB[2]:\n return False # boxA is right of boxB\n if boxB[0] > boxA[2]:\n return False # boxA is left of boxB\n if boxA[3] < boxB[1]:\n return False # boxA is above boxB\n if boxA[1] > boxB[3]:\n return False # boxA is below boxB\n return True\n\n @staticmethod\n def _getIntersectionArea(boxA, boxB):\n xA = max(boxA[0], boxB[0])\n yA = max(boxA[1], boxB[1])\n xB = min(boxA[2], boxB[2])\n yB = min(boxA[3], boxB[3])\n # intersection area\n return (xB - xA + 1) * (yB - yA + 1)\n\n @staticmethod\n def _getUnionAreas(boxA, boxB, interArea=None):\n area_A = Evaluator._getArea(boxA)\n area_B = Evaluator._getArea(boxB)\n if interArea is None:\n interArea = Evaluator._getIntersectionArea(boxA, boxB)\n return float(area_A + area_B - interArea)\n\n @staticmethod\n def _getArea(box):\n return (box[2] - box[0] + 1) * (box[3] - box[1] + 1)\n"
] | [
[
"numpy.sum",
"matplotlib.pyplot.legend",
"numpy.cumsum",
"numpy.divide",
"numpy.argwhere",
"numpy.zeros",
"matplotlib.pyplot.pause",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.close",
"matplotlib.pyplot.plot",
"numpy.linspace",
"matplotlib.pyplot.xlabel"
]
] |
mindspore-ai/models | [
"9127b128e2961fd698977e918861dadfad00a44c",
"9127b128e2961fd698977e918861dadfad00a44c",
"9127b128e2961fd698977e918861dadfad00a44c"
] | [
"official/nlp/transformer/export.py",
"research/cv/meta-baseline/preprocess.py",
"research/cv/ras/src/resnet50.py"
] | [
"# Copyright 2020-2022 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\" export checkpoint file into models\"\"\"\n\nimport numpy as np\n\nimport mindspore as ms\nfrom mindspore import Tensor\n\nfrom src.transformer_model import TransformerModel\nfrom src.model_utils.config import config\nfrom src.model_utils.moxing_adapter import moxing_wrapper\nfrom src.model_utils.device_adapter import get_device_id\nfrom eval import load_weights\n\n\nconfig.batch_size = config.batch_size_ev\nconfig.hidden_dropout_prob = config.hidden_dropout_prob_ev\nconfig.attention_probs_dropout_prob = config.attention_probs_dropout_prob_ev\n\nms.set_context(mode=ms.GRAPH_MODE, device_target=config.device_target)\nif config.device_target == \"Ascend\":\n ms.set_context(device_id=get_device_id())\n\ndef modelarts_pre_process():\n pass\n\n@moxing_wrapper(pre_process=modelarts_pre_process)\ndef export_transformer():\n \"\"\" export_transformer \"\"\"\n tfm_model = TransformerModel(config=config, is_training=False, use_one_hot_embeddings=False)\n\n parameter_dict = load_weights(config.model_file)\n ms.load_param_into_net(tfm_model, parameter_dict)\n\n source_ids = Tensor(np.ones((config.batch_size, config.seq_length)).astype(np.int32))\n source_mask = Tensor(np.ones((config.batch_size, config.seq_length)).astype(np.int32))\n\n ms.export(tfm_model, source_ids, source_mask, file_name=config.file_name, file_format=config.file_format)\n\nif __name__ == '__main__':\n export_transformer()\n",
"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"\npreprocess\n\"\"\"\nimport os\nimport argparse\nimport numpy as np\nfrom mindspore import ops, context\nimport mindspore.dataset as ds\nimport src.util as util\nfrom src.data.IterSamplers import CategoriesSampler\nfrom src.data.mini_Imagenet import MiniImageNet\n\n\ndef gen_bin(args):\n \"\"\"\n generate binary files\n \"\"\"\n n_way = 5\n n_query = 15\n n_shots = [args.num_shots]\n root_path = os.path.join(args.root_path, args.dataset)\n testset = MiniImageNet(root_path, 'test')\n\n fs_loaders = []\n for n_shot in n_shots:\n test_sampler = CategoriesSampler(testset.data, testset.label, n_way, n_shot + n_query,\n 200,\n args.ep_per_batch)\n test_loader = ds.GeneratorDataset(test_sampler, ['data'], shuffle=True)\n fs_loaders.append(test_loader)\n\n input_path = os.path.join(args.pre_result_path, \"00_data\")\n label_path = os.path.join(args.pre_result_path, \"label.npy\")\n shape_path = os.path.join(args.pre_result_path, \"shape.npy\")\n if not os.path.exists(input_path):\n os.makedirs(input_path)\n\n label_list = []\n shape_list = []\n for i, n_shot in enumerate(n_shots):\n np.random.seed(0)\n label_shot = []\n for j, data in enumerate(fs_loaders[i].create_dict_iterator()):\n x_shot, x_query = data['data'][:, :, :n_shot], data['data'][:, :, n_shot:]\n img_shape = x_query.shape[-3:]\n x_query = x_query.view(args.ep_per_batch, -1,\n *img_shape) # bs*(way*n_query)*3*84*84\n label = util.make_nk_label(n_way, n_query, args.ep_per_batch) # bs*(way*n_query)\n if j == 0:\n shape_list.append(x_shot.shape)\n shape_list.append(x_query.shape)\n\n img_shape = x_shot.shape[-3:]\n\n x_shot = x_shot.view(-1, *img_shape)\n x_query = x_query.view(-1, *img_shape)\n input0 = ops.Concat(0)([x_shot, x_query])\n file_name = \"nshot_\" + str(i) + \"_\" + str(j) + \".bin\"\n input0.asnumpy().tofile(os.path.join(input_path, file_name))\n label_shot.append(label.asnumpy())\n label_list.append(label_shot)\n\n np.save(label_path, label_list)\n np.save(shape_path, shape_list)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--root_path', default='./dataset/')\n parser.add_argument('--device_target', type=str, default='CPU', choices=['Ascend', 'GPU', 'CPU'])\n parser.add_argument('--dataset', default='mini-imagenet')\n parser.add_argument('--ep_per_batch', type=int, default=4)\n parser.add_argument('--pre_result_path', type=str, default='./preprocess_Result')\n parser.add_argument('--num_shots', type=int, default=1)\n\n args_opt = parser.parse_args()\n context.set_context(mode=context.GRAPH_MODE, device_target=args_opt.device_target, save_graphs=False)\n gen_bin(args_opt)\n",
"\"\"\"\n# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"\n\n\nimport mindspore as ms\nimport mindspore.nn as nn\nimport numpy as np\n\nclass Basic_Block(nn.Cell):\n \"\"\"\n Components constituting resnet50\n \"\"\"\n expansion = 4\n def __init__(self, in_c, out_c, stride=1, downsample=None):\n super(Basic_Block, self).__init__()\n self.conv1 = nn.Conv2d(in_channels=in_c, out_channels=out_c, kernel_size=(1, 1), stride=1)\n self.bn1 = nn.BatchNorm2d(out_c, moving_mean_init=0, moving_var_init=1)\n self.conv2 = nn.Conv2d(in_channels=out_c, out_channels=out_c, kernel_size=(3, 3), stride=stride, \\\n pad_mode='pad', padding=1)\n self.bn2 = nn.BatchNorm2d(out_c)\n self.conv3 = nn.Conv2d(in_channels=out_c, out_channels=out_c*4, kernel_size=(1, 1), stride=1)\n self.bn3 = nn.BatchNorm2d(out_c*4)\n self.relu = nn.ReLU()\n self.down_sample_layer = downsample\n\n def construct(self, x):\n \"\"\"\n\n Args:\n x: tensor\n\n Returns: tensor\n\n \"\"\"\n residual = x\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.down_sample_layer is not None:\n residual = self.down_sample_layer(residual)\n out = out + residual\n out = self.relu(out)\n return out\n\n\nclass ResNet50(nn.Cell):\n \"\"\"\n A BoneBack Net of RAS\n \"\"\"\n def __init__(self):\n super(ResNet50, self).__init__()\n self.in_c = 64\n self.conv1 = nn.Conv2d(3, 64, kernel_size=(7, 7), stride=2, pad_mode='pad', padding=3)\n self.bn1 = nn.BatchNorm2d(64)\n self.relu = nn.ReLU()\n self.pool = nn.MaxPool2d(kernel_size=(3, 3), stride=2, pad_mode='same')\n\n self.layer1 = self._build_layer(Basic_Block, 64, 3, 1)\n self.layer2 = self._build_layer(Basic_Block, 128, 4, 2)\n self.layer3 = self._build_layer(Basic_Block, 256, 6, 2)\n self.layer4 = self._build_layer(Basic_Block, 512, 3, 2)\n\n for _, m in self.cells_and_names():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.set_data(ms.Tensor(np.random.normal(0, np.sqrt(2./n), m.weight.data.shape).astype(np.float32)))\n elif isinstance(m, nn.BatchNorm2d):\n m.gamma.set_data(ms.Tensor(np.ones(m.gamma.data.shape, dtype=np.float32)))\n m.beta.set_data(ms.Tensor(np.zeros(m.beta.data.shape, dtype=np.float32)))\n\n def _build_layer(self, block, out_c, blocks, stride):\n layers = []\n downsample = nn.SequentialCell(nn.Conv2d(self.in_c, out_c*block.expansion, kernel_size=(1, 1), stride=stride),\n nn.BatchNorm2d(out_c*4))\n layers.append(block(self.in_c, out_c, stride=stride, downsample=downsample))\n self.in_c = out_c * block.expansion\n for _ in range(1, blocks):\n layers.append(block(self.in_c, out_c))\n return nn.SequentialCell(layers)\n\n\n def construct(self, x):\n \"\"\"\n\n Args:\n x:\n\n Returns:\n 5 outputs\n \"\"\"\n out = self.conv1(x)\n out = self.bn1(out)\n x1 = self.relu(out)\n x2 = self.pool(x1)\n\n x2 = self.layer1(x2)\n x3 = self.layer2(x2)\n x4 = self.layer3(x3)\n x5 = self.layer4(x4)\n\n return x1, x2, x3, x4, x5\n"
] | [
[
"numpy.ones"
],
[
"numpy.save",
"numpy.random.seed"
],
[
"numpy.sqrt",
"numpy.ones",
"numpy.zeros"
]
] |
PPACI/Devoxx19-TensorflowJS | [
"4096c8ea460af8a9f8a36df01e88309568318ab8"
] | [
"python/02_train.py"
] | [
"from PIL import Image\nimport numpy\nfrom tensorflow.python.keras.preprocessing.image import ImageDataGenerator\nfrom tensorflow.python.keras.applications.mobilenet import MobileNet\nfrom tensorflow.python.keras.models import Model\nfrom tensorflow.python.keras.layers import Dense, Dropout, BatchNormalization\nfrom tensorflow.python.keras.optimizers import Adam\nfrom tensorflow.python.keras.callbacks import *\n\n# Create image data generator\nimage_generator = ImageDataGenerator(\n validation_split=0.15,\n horizontal_flip=True,\n zoom_range=0.1,\n width_shift_range=0.1,\n height_shift_range=0.1,\n rotation_range=5,\n rescale=1. / 255\n)\ntrain_generator = image_generator.flow_from_directory(\"dataset\", subset=\"training\", target_size=(224, 224),\n batch_size=8)\nvalidation_generator = image_generator.flow_from_directory(\"dataset\", subset=\"validation\", target_size=(224, 224),\n batch_size=8)\n\n# Show an image from train set\nImage.fromarray((next(train_generator)[0][0] * 255).astype(numpy.uint8)).show()\n\n# Create model\nmobile = MobileNet(\n input_shape=(224, 224, 3),\n include_top=False,\n weights='imagenet',\n pooling='avg',\n alpha=0.5\n)\noutput = Dropout(0.4)(mobile.output)\noutput = Dense(8, activation=\"relu\")(output)\noutput = Dense(3, activation=\"sigmoid\")(output)\n\nmodel = Model(inputs=mobile.input, outputs=output)\nmodel.summary()\n\n# Compile model\nmodel.compile(optimizer=Adam(amsgrad=True), loss=\"categorical_crossentropy\", metrics=[\"accuracy\"])\n\ncallbacks = [\n ReduceLROnPlateau(\n patience=3,\n factor=0.2,\n verbose=1,\n min_lr=1e-5\n ),\n ModelCheckpoint(\n filepath=\"croissant.hdf5\",\n verbose=1,\n save_best_only=True\n )\n]\n\n# Train\nmodel.fit_generator(\n generator=train_generator,\n steps_per_epoch=256,\n epochs=50,\n verbose=1,\n validation_data=validation_generator,\n validation_steps=40,\n callbacks=callbacks\n)\n"
] | [
[
"tensorflow.python.keras.layers.Dense",
"tensorflow.python.keras.applications.mobilenet.MobileNet",
"tensorflow.python.keras.models.Model",
"tensorflow.python.keras.optimizers.Adam",
"tensorflow.python.keras.preprocessing.image.ImageDataGenerator",
"tensorflow.python.keras.layers.Dropout"
]
] |
fbrundu/scCODA | [
"5508a0419d4a46e33897a5df69ba6d4e1753fadd"
] | [
"sccoda/model/dirichlet_models.py"
] | [
"\"\"\"\nDirichlet-multinomial models for statistical analysis of compositional changes in single-cell data.\n\nFor further reference, see:\nBüttner, Ostner et al.: scCODA: A Bayesian model for compositional single-cell data analysis\n\n:authors: Johannes Ostner\n\"\"\"\nimport numpy as np\nimport time\nimport warnings\n\nimport tensorflow as tf\nimport tensorflow_probability as tfp\n\nfrom sccoda.util import result_classes as res\nfrom typing import Optional, Tuple, Collection, Union, List\n\ntfd = tfp.distributions\ntfb = tfp.bijectors\n\n\nclass CompositionalModel:\n \"\"\"\n Dynamical framework for formulation and inference of Bayesian models for compositional data analysis.\n This framework is used to implement scCODA's model as a subclass of this class.\n Tensorflow probability then allows to run a multitude of inference algorithms on these models\n without the need to specify them every time.\n\n A `CompositionalModel` consists of the following parameters:\n\n - `covariate_matrix`: Numpy array that specifies the independent variables (X). Generated equivalently to the covariate matrix of a linear regression.\n\n - `data_matrix`: Dependent variable (Y). Includes the raw cell type counts for every sample.\n\n - `cell_types`, covariate_names: Names of cell types and covariates\n\n - `formula`: String that represents which covariates to include and how to transform them. Used analogously to the formula in R's lm function\n\n A specific compositional model is then implemented as a child class, with the following additional parameters\n specified in the constructor:\n\n - `target_log_prob_fn`: Log-probability function of the model. For more specific information, please refer to (tensorflow probability's API)[https://www.tensorflow.org/probability/api_docs/python/tfp]\n\n - `param_names`: Names of prior and intermediate parameters that are included in the model output. The order has to be the same as in the states_burnin output of `self.get_y_hat`\n\n - `init_params`: Initial values for the inference method\n\n Methods implemented by this class:\n\n - `sampling`: General MCMC sampling that uses a transition kernel\n\n - `get_chains_after_burnin`: Application of burn-in to MCMC sampling results\n\n - MCMC sampling methods (`sample_hmc`, `sample_hmc_da`, `sample_nuts`)\n\n Methods implemented by a child class:\n\n - `get_y_hat`: Calculation of intermediate parameters for all MCMC chain states and posterior mode of the cell count matrix\n\n \"\"\"\n\n def __init__(\n self,\n covariate_matrix: np.ndarray,\n data_matrix: np.ndarray,\n cell_types: List[str],\n covariate_names: List[str],\n formula: str,\n *args,\n **kwargs\n ):\n \"\"\"\n Generalized Constructor of Bayesian compositional model class.\n\n Parameters\n ----------\n covariate_matrix\n covariate matrix, size NxD\n data_matrix\n cell count matrix, size NxK\n cell_types\n Cell type names\n covariate_names\n Covariate names\n \"\"\"\n\n dtype = tf.float64\n self.x = tf.convert_to_tensor(covariate_matrix, dtype)\n\n # Add pseudocount if zeroes are present.\n if np.count_nonzero(data_matrix) != np.size(data_matrix):\n print(\"Zero counts encountered in data! Added a pseudocount of 0.5.\")\n data_matrix += 0.5\n self.y = tf.convert_to_tensor(data_matrix, dtype)\n\n sample_counts = np.sum(data_matrix, axis=1)\n self.n_total = tf.cast(sample_counts, dtype)\n self.cell_types = cell_types\n self.covariate_names = covariate_names\n self.formula = formula\n\n # Get dimensions of data\n self.N, self.D = self.x.shape\n self.K = self.y.shape[1]\n\n # Check input data\n if self.N != self.y.shape[0]:\n raise ValueError(\"Wrong input dimensions X[{},:] != y[{},:]\".format(self.x.shape[0], self.y.shape[0]))\n if self.N != len(self.n_total):\n raise ValueError(\"Wrong input dimensions X[{},:] != n_total[{}]\".format(self.x.shape[0], len(self.n_total)))\n\n def sampling(\n self,\n num_results: int,\n num_burnin: int,\n kernel,\n init_state: dict,\n trace_fn,\n ) -> Tuple[List[any], List[any], float]:\n \"\"\"\n MCMC sampling process (tensorflow 2)\n\n Parameters\n ----------\n num_results\n MCMC chain length (default 20000)\n num_burnin\n Number of burnin iterations (default 5000)\n kernel\n tensorflow MCMC kernel object\n init_state\n Starting parameters\n trace_fn\n tracing function\n\n Returns\n -------\n MCMC chain states and results\n\n states\n States of MCMC chain\n kernel_results\n sampling meta-information\n duration\n Duration of MCMC sampling process\n \"\"\"\n\n # HMC sampling function\n @tf.function\n def sample_mcmc(num_results_, num_burnin_, kernel_, current_state_, trace_fn):\n\n return tfp.mcmc.sample_chain(\n num_results=num_results_,\n num_burnin_steps=num_burnin_,\n kernel=kernel_,\n current_state=current_state_,\n trace_fn=trace_fn\n )\n\n # The actual sampling process\n start = time.time()\n states, kernel_results = sample_mcmc(num_results, num_burnin, kernel, init_state, trace_fn)\n duration = time.time() - start\n print(\"MCMC sampling finished. ({:.3f} sec)\".format(duration))\n\n return states, kernel_results, duration\n\n def get_chains_after_burnin(\n self,\n samples: List[any],\n kernel_results: List[any],\n num_burnin: int,\n is_nuts: bool = False\n ) -> Tuple[List[any], dict, float]:\n \"\"\"\n Application of burn-in after MCMC sampling.\n Cuts the first `num_burnin` samples from all inferred variables and diagnostic statistics.\n\n Parameters\n ----------\n samples\n all kernel states\n kernel_results\n Kernel meta-information. The tracked statistics depend on the sampling method.\n num_burnin\n number of burn-in iterations\n is_nuts\n Specifies whether NUTS sampling was used\n\n Returns\n -------\n MCMC chain without burn-in, sampling statistics, acceptance rate\n\n states_burnin\n Kernel states without burn-in samples\n stats\n sampling statistics\n p_accept\n acceptance rate of MCMC process\n \"\"\"\n\n # Samples after burn-in\n states_burnin = []\n stats = {}\n\n # Apply burn-in to MCMC results\n for s in samples:\n states_burnin.append(s[num_burnin:].numpy())\n\n # Apply burn-in to sampling statistics\n for k, v in kernel_results.items():\n stats[k] = v[num_burnin:].numpy()\n\n # Calculation of acceptance rate (different for NUTS sampling)\n if is_nuts:\n p_accept = np.mean(np.exp(kernel_results[\"log_accept_ratio\"].numpy()))\n else:\n acceptances = kernel_results[\"is_accepted\"].numpy()\n\n # Calculate acceptance rate\n p_accept = sum(acceptances) / acceptances.shape[0]\n print('Acceptance rate: %0.1f%%' % (100 * p_accept))\n\n return states_burnin, stats, p_accept\n\n def sample_hmc(\n self,\n num_results: int = int(20e3),\n num_burnin: int = int(5e3),\n num_adapt_steps: Optional[int] = None,\n num_leapfrog_steps: Optional[int] = 10,\n step_size: float = 0.01\n ) -> res.CAResult:\n\n \"\"\"\n Hamiltonian Monte Carlo (HMC) sampling in tensorflow 2.\n\n Tracked diagnostic statistics:\n\n - `target_log_prob`: Value of the model's log-probability\n\n - `diverging`: Marks samples as diverging (NOTE: Handle with care, the spike-and-slab prior of scCODA usually leads to many samples being flagged as diverging)\n\n - `is_accepted`: Whether the proposed sample was accepted in the algorithm's acceptance step\n\n - `step_size`: The step size used by the algorithm in each step\n\n Parameters\n ----------\n num_results\n MCMC chain length (default 20000)\n num_burnin\n Number of burnin iterations (default 5000)\n num_adapt_steps\n Length of step size adaptation procedure\n num_leapfrog_steps\n HMC leapfrog steps (default 10)\n step_size\n Initial step size (default 0.01)\n\n Returns\n -------\n results object\n\n result\n Compositional analysis result\n \"\"\"\n\n # bijectors (not in use atm, therefore identity)\n constraining_bijectors = [tfb.Identity() for x in range(len(self.init_params))]\n\n # HMC transition kernel\n hmc_kernel = tfp.mcmc.HamiltonianMonteCarlo(\n target_log_prob_fn=self.target_log_prob_fn,\n step_size=step_size,\n num_leapfrog_steps=num_leapfrog_steps)\n hmc_kernel = tfp.mcmc.TransformedTransitionKernel(\n inner_kernel=hmc_kernel, bijector=constraining_bijectors)\n\n # Set default value for adaptation steps if none given\n if num_adapt_steps is None:\n num_adapt_steps = int(0.8 * num_burnin)\n\n # Add step size adaptation (Andrieu, Thomas - 2008)\n hmc_kernel = tfp.mcmc.SimpleStepSizeAdaptation(\n inner_kernel=hmc_kernel, num_adaptation_steps=num_adapt_steps, target_accept_prob=0.8)\n\n # diagnostics tracing function\n def trace_fn(_, pkr):\n return {\n 'target_log_prob': pkr.inner_results.inner_results.accepted_results.target_log_prob,\n 'diverging': (pkr.inner_results.inner_results.log_accept_ratio < -1000.),\n 'is_accepted': pkr.inner_results.inner_results.is_accepted,\n 'step_size': pkr.inner_results.inner_results.accepted_results.step_size,\n }\n\n # The actual HMC sampling process\n states, kernel_results, duration = self.sampling(num_results, num_burnin,\n hmc_kernel, self.init_params, trace_fn)\n\n # apply burn-in\n states_burnin, sample_stats, acc_rate = self.get_chains_after_burnin(states, kernel_results, num_burnin,\n is_nuts=False)\n\n # Calculate posterior predictive\n y_hat = self.get_y_hat(states_burnin, num_results, num_burnin)\n\n params = dict(zip(self.param_names, states_burnin))\n\n # Result object generation setup\n # Get names of cell types that are not the reference\n if self.reference_cell_type is not None:\n cell_types_nb = self.cell_types[:self.reference_cell_type] + self.cell_types[self.reference_cell_type+1:]\n else:\n cell_types_nb = self.cell_types\n\n # Result object generation process. Uses arviz's data structure.\n posterior = {var_name: [var] for var_name, var in params.items() if\n \"prediction\" not in var_name}\n\n if \"prediction\" in self.param_names:\n posterior_predictive = {\"prediction\": [params[\"prediction\"]]}\n else:\n posterior_predictive = {}\n\n observed_data = {\"y\": self.y}\n dims = {\"alpha\": [\"cell_type\"],\n \"mu_b\": [\"1\"],\n \"sigma_b\": [\"1\"],\n \"b_offset\": [\"covariate\", \"cell_type_nb\"],\n \"ind_raw\": [\"covariate\", \"cell_type_nb\"],\n \"ind\": [\"covariate\", \"cell_type_nb\"],\n \"b_raw\": [\"covariate\", \"cell_type_nb\"],\n \"beta\": [\"covariate\", \"cell_type\"],\n \"concentration\": [\"sample\", \"cell_type\"],\n \"prediction\": [\"sample\", \"cell_type\"]\n }\n coords = {\"cell_type\": self.cell_types,\n \"cell_type_nb\": cell_types_nb,\n \"covariate\": self.covariate_names,\n \"sample\": range(self.y.shape[0])\n }\n\n sampling_stats = {\"chain_length\": num_results, \"num_burnin\": num_burnin,\n \"acc_rate\": acc_rate, \"duration\": duration, \"y_hat\": y_hat}\n\n model_specs = {\"reference\": self.reference_cell_type, \"formula\": self.formula}\n\n return res.CAResultConverter(posterior=posterior,\n posterior_predictive=posterior_predictive,\n observed_data=observed_data,\n dims=dims,\n sample_stats=sample_stats,\n coords=coords).to_result_data(sampling_stats=sampling_stats,\n model_specs=model_specs)\n\n def sample_hmc_da(\n self,\n num_results: int = int(20e3),\n num_burnin: int = int(5e3),\n num_adapt_steps: Optional[int] = None,\n num_leapfrog_steps: Optional[int] = 10,\n step_size: float = 0.01\n ) -> res.CAResult:\n \"\"\"\n HMC sampling with dual-averaging step size adaptation (Nesterov, 2009)\n\n Tracked diagnostic statistics:\n\n - `target_log_prob`: Value of the model's log-probability\n\n - `diverging`: Marks samples as diverging (NOTE: Handle with care, the spike-and-slab prior of scCODA usually leads to many samples being flagged as diverging)\n\n - `log_acc_ratio`: log-acceptance ratio\n\n - `is_accepted`: Whether the proposed sample was accepted in the algorithm's acceptance step\n\n - `step_size`: The step size used by the algorithm in each step\n\n Parameters\n ----------\n num_results\n MCMC chain length (default 20000)\n num_burnin\n Number of burnin iterations (default 5000)\n num_adapt_steps\n Length of step size adaptation procedure\n num_leapfrog_steps\n HMC leapfrog steps (default 10)\n step_size\n Initial step size (default 0.01)\n\n Returns\n -------\n result object\n\n result\n Compositional analysis result\n \"\"\"\n\n warnings.warn(\n \"This feature is untested and might yield different results than expected. Please use sample_hmc().\",\n category=UserWarning\n )\n\n # bijectors (not in use atm, therefore identity)\n constraining_bijectors = [tfb.Identity() for x in range(len(self.init_params))]\n\n # HMC transition kernel\n hmc_kernel = tfp.mcmc.HamiltonianMonteCarlo(\n target_log_prob_fn=self.target_log_prob_fn,\n step_size=step_size,\n num_leapfrog_steps=num_leapfrog_steps)\n hmc_kernel = tfp.mcmc.TransformedTransitionKernel(\n inner_kernel=hmc_kernel, bijector=constraining_bijectors)\n\n # Set default value for adaptation steps if none given\n if num_adapt_steps is None:\n num_adapt_steps = int(0.8 * num_burnin)\n\n # Add step size adaptation\n hmc_kernel = tfp.mcmc.DualAveragingStepSizeAdaptation(\n inner_kernel=hmc_kernel, num_adaptation_steps=num_adapt_steps, target_accept_prob=0.8, decay_rate=0.5)\n\n # tracing function\n def trace_fn(_, pkr):\n return {\n 'target_log_prob': pkr.inner_results.inner_results.accepted_results.target_log_prob,\n 'diverging': (pkr.inner_results.inner_results.log_accept_ratio < -1000.),\n \"log_acc_ratio\": pkr.inner_results.inner_results.log_accept_ratio,\n 'is_accepted': pkr.inner_results.inner_results.is_accepted,\n 'step_size': tf.exp(pkr.log_averaging_step[0]),\n }\n\n # HMC sampling\n states, kernel_results, duration = self.sampling(num_results, num_burnin, hmc_kernel, self.init_params, trace_fn)\n states_burnin, sample_stats, acc_rate = self.get_chains_after_burnin(states, kernel_results, num_burnin,\n is_nuts=False)\n\n y_hat = self.get_y_hat(states_burnin, num_results, num_burnin)\n\n params = dict(zip(self.param_names, states_burnin))\n\n # Specification of cell types that were not used as the reference\n if self.reference_cell_type is not None:\n cell_types_nb = self.cell_types[:self.reference_cell_type] + self.cell_types[self.reference_cell_type+1:]\n else:\n cell_types_nb = self.cell_types\n\n # Result object generation process. Uses arviz's data structure.\n posterior = {var_name: [var] for var_name, var in params.items() if\n \"prediction\" not in var_name}\n\n if \"prediction\" in self.param_names:\n posterior_predictive = {\"prediction\": [params[\"prediction\"]]}\n else:\n posterior_predictive = {}\n\n observed_data = {\"y\": self.y}\n dims = {\"alpha\": [\"cell_type\"],\n \"mu_b\": [\"1\"],\n \"sigma_b\": [\"1\"],\n \"b_offset\": [\"covariate\", \"cell_type_nb\"],\n \"ind_raw\": [\"covariate\", \"cell_type_nb\"],\n \"ind\": [\"covariate\", \"cell_type_nb\"],\n \"b_raw\": [\"covariate\", \"cell_type_nb\"],\n \"beta\": [\"covariate\", \"cell_type\"],\n \"concentration\": [\"sample\", \"cell_type\"],\n \"prediction\": [\"sample\", \"cell_type\"]\n }\n coords = {\"cell_type\": self.cell_types,\n \"cell_type_nb\": cell_types_nb,\n \"covariate\": self.covariate_names,\n \"sample\": range(self.y.shape[0])\n }\n\n # build dictionary with sampling statistics\n sampling_stats = {\"chain_length\": num_results, \"num_burnin\": num_burnin,\n \"acc_rate\": acc_rate, \"duration\": duration, \"y_hat\": y_hat}\n\n model_specs = {\"reference\": self.reference_cell_type, \"formula\": self.formula}\n\n return res.CAResultConverter(posterior=posterior,\n posterior_predictive=posterior_predictive,\n observed_data=observed_data,\n dims=dims,\n sample_stats=sample_stats,\n coords=coords).to_result_data(sampling_stats=sampling_stats,\n model_specs=model_specs)\n\n def sample_nuts(\n self,\n num_results: int = int(10e3),\n num_burnin: int = int(5e3),\n num_adapt_steps: Optional[int] = None,\n max_tree_depth: int = 10,\n step_size: float = 0.01\n ) -> res.CAResult:\n \"\"\"\n HMC with No-U-turn (NUTS) sampling.\n This method is untested and might yield different results than expected.\n\n Tracked diagnostic statistics:\n\n - `target_log_prob`: Value of the model's log-probability\n\n - `leapfros_taken`: Number of leapfrog steps taken by hte integrator\n\n - `diverging`: Marks samples as diverging (NOTE: Handle with care, the spike-and-slab prior of scCODA usually leads to many samples being flagged as diverging)\n\n - `energy`: HMC \"Energy\" value for each step\n\n - `log_accept_ratio`: log-acceptance ratio\n\n - `step_size`: The step size used by the algorithm in each step\n\n - `reached_max_depth`: Whether the NUTS algorithm reached the maximum sampling depth in each step\n\n - `is_accepted`: Whether the proposed sample was accepted in the algorithm's acceptance step\n\n Parameters\n ----------\n num_results\n MCMC chain length (default 10000)\n num_burnin\n Number of burnin iterations (default 5000)\n num_adapt_steps\n Length of step size adaptation procedure\n max_tree_depth\n Maximum tree depth (default 10)\n step_size\n Initial step size (default 0.01)\n\n Returns\n -------\n result object\n\n result\n Compositional analysis result\n \"\"\"\n\n warnings.warn(\n \"This feature is untested and might yield different results than expected. Please use sample_hmc().\",\n category=UserWarning\n )\n\n # bijectors (not in use atm, therefore identity)\n constraining_bijectors = [tfb.Identity() for x in range(len(self.init_params))]\n\n # NUTS transition kernel\n nuts_kernel = tfp.mcmc.NoUTurnSampler(\n target_log_prob_fn=self.target_log_prob_fn,\n step_size=step_size,\n max_tree_depth=max_tree_depth)\n nuts_kernel = tfp.mcmc.TransformedTransitionKernel(\n inner_kernel=nuts_kernel,\n bijector=constraining_bijectors\n )\n\n # Set default value for adaptation steps\n if num_adapt_steps is None:\n num_adapt_steps = int(0.8 * num_burnin)\n\n # Step size adaptation (Nesterov, 2009)\n nuts_kernel = tfp.mcmc.DualAveragingStepSizeAdaptation(\n inner_kernel=nuts_kernel, num_adaptation_steps=num_adapt_steps, target_accept_prob=0.8,\n step_size_setter_fn=lambda pkr, new_step_size: pkr._replace(\n inner_results=pkr.inner_results._replace(step_size=new_step_size)\n ),\n step_size_getter_fn=lambda pkr: pkr.inner_results.step_size,\n log_accept_prob_getter_fn=lambda pkr: pkr.inner_results.log_accept_ratio,\n )\n\n # trace function\n def trace_fn(_, pkr):\n return {\n \"target_log_prob\": pkr.inner_results.inner_results.target_log_prob,\n \"leapfrogs_taken\": pkr.inner_results.inner_results.leapfrogs_taken,\n \"diverging\": pkr.inner_results.inner_results.has_divergence,\n \"energy\": pkr.inner_results.inner_results.energy,\n \"log_accept_ratio\": pkr.inner_results.inner_results.log_accept_ratio,\n \"step_size\": pkr.inner_results.inner_results.step_size[0],\n \"reach_max_depth\": pkr.inner_results.inner_results.reach_max_depth,\n \"is_accepted\": pkr.inner_results.inner_results.is_accepted,\n }\n\n # HMC sampling\n states, kernel_results, duration = self.sampling(num_results, num_burnin, nuts_kernel, self.init_params, trace_fn)\n states_burnin, sample_stats, acc_rate = self.get_chains_after_burnin(states, kernel_results, num_burnin,\n is_nuts=True)\n\n y_hat = self.get_y_hat(states_burnin, num_results, num_burnin)\n\n params = dict(zip(self.param_names, states_burnin))\n\n # Result object generation process. Uses arviz's data structure.\n # Get names of cell types that are not the reference\n if self.reference_cell_type is not None:\n cell_types_nb = self.cell_types[:self.reference_cell_type] + self.cell_types[self.reference_cell_type + 1:]\n else:\n cell_types_nb = self.cell_types\n\n posterior = {var_name: [var] for var_name, var in params.items() if\n \"prediction\" not in var_name}\n\n if \"prediction\" in self.param_names:\n posterior_predictive = {\"prediction\": [params[\"prediction\"]]}\n else:\n posterior_predictive = {}\n\n observed_data = {\"y\": self.y}\n dims = {\"alpha\": [\"cell_type\"],\n \"mu_b\": [\"1\"],\n \"sigma_b\": [\"1\"],\n \"b_offset\": [\"covariate\", \"cell_type_nb\"],\n \"ind_raw\": [\"covariate\", \"cell_type_nb\"],\n \"ind\": [\"covariate\", \"cell_type_nb\"],\n \"b_raw\": [\"covariate\", \"cell_type_nb\"],\n \"beta\": [\"covariate\", \"cell_type\"],\n \"concentration\": [\"sample\", \"cell_type\"],\n \"prediction\": [\"sample\", \"cell_type\"]\n }\n coords = {\"cell_type\": self.cell_types,\n \"cell_type_nb\": cell_types_nb,\n \"covariate\": self.covariate_names,\n \"sample\": range(self.y.shape[0])\n }\n\n sampling_stats = {\"chain_length\": num_results, \"num_burnin\": num_burnin,\n \"acc_rate\": acc_rate, \"duration\": duration, \"y_hat\": y_hat}\n\n model_specs = {\"reference\": self.reference_cell_type, \"formula\": self.formula}\n\n return res.CAResultConverter(posterior=posterior,\n posterior_predictive=posterior_predictive,\n observed_data=observed_data,\n dims=dims,\n sample_stats=sample_stats,\n coords=coords).to_result_data(sampling_stats=sampling_stats,\n model_specs=model_specs)\n\n\nclass ReferenceModel(CompositionalModel):\n \"\"\"\n Statistical model for single-cell differential composition analysis with specification of a reference cell type.\n This is the standard scCODA model and recommenced for all uses.\n\n The hierarchical formulation of the model for one sample is:\n\n .. math::\n y|x &\\\\sim DirMult(a(x), \\\\bar{y}) \\\\\\\\\n \\\\log(a(x)) &= \\\\alpha + x \\\\beta \\\\\\\\\n \\\\alpha_k &\\\\sim N(0, 5) \\\\quad &\\\\forall k \\\\in [K] \\\\\\\\\n \\\\beta_{d, \\\\hat{k}} &= 0 &\\\\forall d \\\\in [D]\\\\\\\\\n \\\\beta_{d, k} &= \\\\tau_{d, k} \\\\tilde{\\\\beta}_{d, k} \\\\quad &\\\\forall d \\\\in [D], k \\\\in \\\\{[K] \\\\smallsetminus \\\\hat{k}\\\\} \\\\\\\\\n \\\\tau_{d, k} &= \\\\frac{\\\\exp(t_{d, k})}{1+ \\\\exp(t_{d, k})} \\\\quad &\\\\forall d \\\\in [D], k \\\\in \\\\{[K] \\\\smallsetminus \\\\hat{k}\\\\} \\\\\\\\\n \\\\frac{t_{d, k}}{50} &\\\\sim N(0, 1) \\\\quad &\\\\forall d \\\\in [D], k \\\\in \\\\{[K] \\\\smallsetminus \\\\hat{k}\\\\} \\\\\\\\\n \\\\tilde{\\\\beta}_{d, k} &= (\\\\tilde{\\\\mu} + \\\\tilde{\\\\sigma}^2) \\\\cdot \\\\tilde{\\\\gamma}_{d, k} \\\\quad &\\\\forall d \\\\in [D], k \\\\in \\\\{[K] \\\\smallsetminus \\\\hat{k}\\\\} \\\\\\\\\n \\\\tilde{\\\\mu} &\\\\sim N(0, 1) \\\\\\\\\n \\\\tilde{\\\\sigma}^2 &\\\\sim HC(0, 1) \\\\\\\\\n \\\\tilde{\\\\gamma}_{d, k} &\\\\sim N(0,1) \\\\quad &\\\\forall d \\\\in [D], k \\\\in \\\\{[K] \\\\smallsetminus \\\\hat{k}\\\\} \\\\\\\\\n\n with y being the cell counts and x the covariates.\n\n For further information, see `scCODA: A Bayesian model for compositional single-cell data analysis`\n (Büttner, Ostner et al., 2020)\n\n \"\"\"\n\n def __init__(\n self,\n reference_cell_type: int,\n *args,\n **kwargs):\n \"\"\"\n Constructor of model class. Defines model structure, log-probability function, parameter names,\n and MCMC starting values.\n\n Parameters\n ----------\n reference_cell_type\n Index of reference cell type (column in count data matrix)\n args\n arguments passed to top-level class\n kwargs\n arguments passed to top-level class\n \"\"\"\n\n super(self.__class__, self).__init__(*args, **kwargs)\n\n self.reference_cell_type = reference_cell_type\n dtype = tf.float64\n\n # All parameters that are returned for analysis\n self.param_names = [\"mu_b\", \"sigma_b\", \"b_offset\", \"ind_raw\", \"alpha\",\n \"ind\", \"b_raw\", \"beta\", \"concentration\", \"prediction\"]\n\n alpha_size = [self.K]\n beta_size = [self.D, self.K]\n beta_nobl_size = [self.D, self.K-1]\n\n Root = tfd.JointDistributionCoroutine.Root\n\n def model():\n mu_b = yield Root(tfd.Independent(\n tfd.Normal(loc=tf.zeros(1, dtype=dtype),\n scale=tf.ones(1, dtype=dtype),\n name=\"mu_b\"),\n reinterpreted_batch_ndims=1))\n\n sigma_b = yield Root(tfd.Independent(\n tfd.HalfCauchy(tf.zeros(1, dtype=dtype),\n tf.ones(1, dtype=dtype),\n name=\"sigma_b\"),\n reinterpreted_batch_ndims=1))\n\n b_offset = yield Root(tfd.Independent(\n tfd.Normal(\n loc=tf.zeros(beta_nobl_size, dtype=dtype),\n scale=tf.ones(beta_nobl_size, dtype=dtype),\n name=\"b_offset\"),\n reinterpreted_batch_ndims=2))\n\n # Spike-and-slab\n ind_raw = yield Root(tfd.Independent(\n tfd.Normal(\n loc=tf.zeros(shape=beta_nobl_size, dtype=dtype),\n scale=tf.ones(shape=beta_nobl_size, dtype=dtype),\n name='ind_raw'),\n reinterpreted_batch_ndims=2))\n\n ind_scaled = ind_raw * 50\n ind = tf.exp(ind_scaled) / (1 + tf.exp(ind_scaled))\n\n b_raw = mu_b + sigma_b * b_offset\n\n beta = ind * b_raw\n\n # Include slope 0 for reference cell type\n beta = tf.concat(axis=1, values=[beta[:, :reference_cell_type],\n tf.zeros(shape=[self.D, 1], dtype=dtype),\n beta[:, reference_cell_type:]])\n\n alpha = yield Root(tfd.Independent(\n tfd.Normal(\n loc=tf.zeros(alpha_size, dtype=dtype),\n scale=tf.ones(alpha_size, dtype=dtype) * 5,\n name=\"alpha\"),\n reinterpreted_batch_ndims=1))\n\n concentrations = tf.exp(alpha + tf.matmul(self.x, beta))\n\n # Cell count prediction via DirMult\n predictions = yield Root(tfd.Independent(\n tfd.DirichletMultinomial(\n total_count=tf.cast(self.n_total, dtype),\n concentration=concentrations,\n name=\"predictions\"),\n reinterpreted_batch_ndims=1))\n\n self.model_struct = tfd.JointDistributionCoroutine(model)\n\n # Joint posterior distribution\n self.target_log_prob_fn = lambda *args:\\\n self.model_struct.log_prob(list(args) + [tf.cast(self.y, dtype)])\n\n # MCMC starting values\n self.init_params = [\n tf.zeros(1, name=\"init_mu_b\", dtype=dtype),\n tf.ones(1, name=\"init_sigma_b\", dtype=dtype),\n tf.random.normal(beta_nobl_size, 0, 1, name='init_b_offset', dtype=dtype),\n tf.zeros(beta_nobl_size, name='init_ind_raw', dtype=dtype),\n tf.random.normal(alpha_size, 0, 1, name='init_alpha', dtype=dtype)\n ]\n\n # Calculate predicted cell counts (for analysis purposes)\n def get_y_hat(\n self,\n states_burnin: List[any],\n num_results: int,\n num_burnin: int\n ) -> np.ndarray:\n \"\"\"\n Calculate posterior mode of cell counts (for analysis purposes) and add intermediate parameters\n that are no priors to MCMC results.\n\n Parameters\n ----------\n states_burnin\n MCMC chain without burn-in samples\n num_results\n Chain length (with burn-in)\n num_burnin\n Number of burn-in samples\n\n Returns\n -------\n posterior mode\n\n y_mean\n posterior mode of cell counts\n \"\"\"\n\n chain_size_y = [num_results - num_burnin, self.N, self.K]\n chain_size_beta = [num_results - num_burnin, self.D, self.K]\n\n alphas = states_burnin[4]\n alphas_final = alphas.mean(axis=0)\n\n ind_raw = states_burnin[3] * 50\n mu_b = states_burnin[0]\n sigma_b = states_burnin[1]\n b_offset = states_burnin[2]\n\n ind_ = np.exp(ind_raw) / (1 + np.exp(ind_raw))\n\n b_raw_ = mu_b.reshape((num_results - num_burnin, 1, 1)) + np.einsum(\"...jk, ...j->...jk\", b_offset, sigma_b)\n\n beta_temp = np.einsum(\"..., ...\", ind_, b_raw_)\n\n beta_ = np.zeros(chain_size_beta)\n for i in range(num_results - num_burnin):\n beta_[i] = np.concatenate([beta_temp[i, :, :self.reference_cell_type],\n np.zeros(shape=[self.D, 1], dtype=np.float64),\n beta_temp[i, :, self.reference_cell_type:]], axis=1)\n conc_ = np.exp(np.einsum(\"jk, ...kl->...jl\", self.x, beta_)\n + alphas.reshape((num_results - num_burnin, 1, self.K)))\n\n predictions_ = np.zeros(chain_size_y)\n for i in range(num_results - num_burnin):\n pred = tfd.DirichletMultinomial(self.n_total, conc_[i, :, :]).mean().numpy()\n predictions_[i, :, :] = pred\n\n betas_final = beta_.mean(axis=0)\n states_burnin.append(ind_)\n states_burnin.append(b_raw_)\n states_burnin.append(beta_)\n states_burnin.append(conc_)\n states_burnin.append(predictions_)\n\n concentration = np.exp(np.matmul(self.x, betas_final) + alphas_final).astype(np.float64)\n\n y_mean = concentration / np.sum(concentration, axis=1, keepdims=True) * self.n_total.numpy()[:, np.newaxis]\n\n return y_mean\n"
] | [
[
"numpy.sum",
"tensorflow.zeros",
"numpy.matmul",
"numpy.zeros",
"tensorflow.ones",
"numpy.exp",
"numpy.count_nonzero",
"numpy.size",
"tensorflow.cast",
"tensorflow.exp",
"tensorflow.matmul",
"tensorflow.convert_to_tensor",
"tensorflow.random.normal",
"numpy.einsum"
]
] |
ylimit/ModelDiff | [
"f509bd2a1de20138aeb5cf105f99597a279f6f0b"
] | [
"utils.py"
] | [
"import os\nimport os.path as osp\nimport sys\nimport time\nimport argparse\nfrom pdb import set_trace as st\nimport json\nimport functools\n\nimport torch\nimport numpy as np\nimport torchvision\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\n\nfrom torchvision import transforms\n\n\nclass MovingAverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n def __init__(self, name, fmt=':f', momentum=0.9):\n self.name = name\n self.fmt = fmt\n self.momentum = momentum\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n\n def update(self, val, n=1):\n self.val = val\n self.avg = self.momentum*self.avg + (1-self.momentum)*val\n\n def __str__(self):\n fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'\n return fmtstr.format(**self.__dict__)\n\nclass ProgressMeter(object):\n def __init__(self, num_batches, meters, prefix=\"\", output_dir=None):\n self.batch_fmtstr = self._get_batch_fmtstr(num_batches)\n self.meters = meters\n self.prefix = prefix\n if output_dir is not None:\n self.filepath = osp.join(output_dir, \"progress\")\n\n def display(self, batch):\n entries = [self.prefix + self.batch_fmtstr.format(batch)]\n entries += [str(meter) for meter in self.meters]\n log_str = '\\t'.join(entries)\n print(log_str)\n # if self.filepath is not None:\n # with open(self.filepath, \"a\") as f:\n # f.write(log_str+\"\\n\")\n\n def _get_batch_fmtstr(self, num_batches):\n num_digits = len(str(num_batches // 1))\n fmt = '{:' + str(num_digits) + 'd}'\n return '[' + fmt + '/' + fmt.format(num_batches) + ']'\n \nclass CrossEntropyLabelSmooth(nn.Module):\n def __init__(self, num_classes, epsilon = 0.1):\n super(CrossEntropyLabelSmooth, self).__init__()\n self.num_classes = num_classes\n self.epsilon = epsilon\n self.logsoftmax = nn.LogSoftmax(dim=1)\n\n def forward(self, inputs, targets):\n log_probs = self.logsoftmax(inputs)\n targets = torch.zeros_like(log_probs).scatter_(1, targets.unsqueeze(1), 1)\n targets = (1 - self.epsilon) * targets + self.epsilon / self.num_classes\n loss = (-targets * log_probs).sum(1)\n return loss.mean()\n\n\ndef linear_l2(model, beta_lmda):\n beta_loss = 0\n for m in model.modules():\n if isinstance(m, nn.Linear):\n beta_loss += (m.weight).pow(2).sum()\n beta_loss += (m.bias).pow(2).sum()\n return 0.5*beta_loss*beta_lmda, beta_loss\n\n\ndef l2sp(model, reg):\n reg_loss = 0\n dist = 0\n for m in model.modules():\n if hasattr(m, 'weight') and hasattr(m, 'old_weight'):\n diff = (m.weight - m.old_weight).pow(2).sum()\n dist += diff\n reg_loss += diff \n\n if hasattr(m, 'bias') and hasattr(m, 'old_bias'):\n diff = (m.bias - m.old_bias).pow(2).sum()\n dist += diff\n reg_loss += diff \n\n if dist > 0:\n dist = dist.sqrt()\n \n loss = (reg * reg_loss)\n return loss, dist\n\n\ndef advtest_fast(model, loader, adversary, args):\n advDataset = torch.load(args.adv_data_dir)\n test_loader = torch.utils.data.DataLoader(\n advDataset,\n batch_size=4, shuffle=False,\n num_workers=0, pin_memory=False)\n model.eval()\n\n total_ce = 0\n total = 0\n top1 = 0\n\n total = 0\n top1_clean = 0\n top1_adv = 0\n adv_success = 0\n adv_trial = 0\n for i, (batch, label, adv_batch, adv_label) in enumerate(test_loader):\n batch, label = batch.to('cuda'), label.to('cuda')\n adv_batch = adv_batch.to('cuda')\n\n total += batch.size(0)\n out_clean = model(batch)\n\n # if 'mbnetv2' in args.network:\n # y = torch.zeros(batch.shape[0], model.classifier[1].in_features).cuda()\n # else:\n # y = torch.zeros(batch.shape[0], model.fc.in_features).cuda()\n \n # y[:,0] = args.m\n # advbatch = adversary.perturb(batch, y)\n\n out_adv = model(adv_batch)\n\n _, pred_clean = out_clean.max(dim=1)\n _, pred_adv = out_adv.max(dim=1)\n\n clean_correct = pred_clean.eq(label)\n adv_trial += int(clean_correct.sum().item())\n adv_success += int(pred_adv[clean_correct].eq(label[clean_correct]).sum().detach().item())\n top1_clean += int(pred_clean.eq(label).sum().detach().item())\n top1_adv += int(pred_adv.eq(label).sum().detach().item())\n\n # print('{}/{}...'.format(i+1, len(test_loader)))\n print(f\"Finish adv test fast\")\n del test_loader\n del advDataset\n return float(top1_clean)/total*100, float(top1_adv)/total*100, float(adv_trial-adv_success) / adv_trial *100\n\n\ndef lazy_property(func):\n attribute = '_lazy_' + func.__name__\n\n @property\n @functools.wraps(func)\n def wrapper(self):\n if not hasattr(self, attribute):\n setattr(self, attribute, func(self))\n return getattr(self, attribute)\n\n return wrapper\n\n\nclass Utils:\n _instance = None\n\n def __init__(self):\n self.cache = {}\n\n @staticmethod\n def _get_instance():\n if Utils._instance is None:\n Utils._instance = Utils()\n return Utils._instance\n\n @staticmethod\n def show_images(images, labels, title='examples'):\n plt.figure(figsize=(10,10))\n plt.subplots_adjust(hspace=0.2)\n for n in range(25):\n plt.subplot(5,5,n+1)\n img = images[n]\n img = img.numpy().squeeze()\n plt.imshow(img)\n plt.title(f'{labels[n]}')\n plt.axis('off')\n _ = plt.suptitle(title)\n plt.show()\n\n @staticmethod\n def copy_weights(source_model, target_model):\n # print(source_model.summary())\n # print(target_model.summary())\n for i, layer in enumerate(target_model.layers):\n if not layer.get_weights():\n continue\n source_layer = source_model.get_layer(layer.name)\n # print(layer)\n # print(source_layer)\n layer.set_weights(source_layer.get_weights())\n return target_model\n\n @staticmethod\n def normalize(v):\n norm = np.linalg.norm(v)\n if norm == 0:\n return v\n return v / norm\n\n"
] | [
[
"torch.utils.data.DataLoader",
"torch.load",
"torch.zeros_like",
"torch.nn.LogSoftmax",
"numpy.linalg.norm"
]
] |
TomLiu59/AI-Final-Project | [
"160cb39f7a6c2d51a5f131c70a2ef4677a6d554e"
] | [
"main.py"
] | [
"import numpy as np\nimport pprint\nimport tensorflow as tf\nimport os\nfrom datetime import datetime\n\nfrom model import AlternatingAttention\nimport data_helper\nimport train\nimport test1\nimport sys\n\nflags = tf.app.flags;\n\nflags.DEFINE_integer(\"embedding_dim\", 384, \"Dimensionality of character embedding (default: 384)\")\nflags.DEFINE_integer(\"encoding_dim\", 128, \"Dimensionality of bidirectional GRU encoding for query / document\")\nflags.DEFINE_integer(\"num_glimpses\", 8, \"Number of glimpse iterations during read (default: 8)\")\nflags.DEFINE_float(\"dropout_keep_prob\", 0.8, \"Dropout keep probability (default: 0.8)\")\nflags.DEFINE_float(\"l2_reg_lambda\", 1e-4, \"L2 regularizaion lambda (default: 0.0001)\")\nflags.DEFINE_float(\"learning_rate\", 1e-3, \"AdamOptimizer learning rate (default: 0.001)\")\nflags.DEFINE_float(\"learning_rate_decay\", 0.8, \"How much learning rate will decay after half epoch of non-decreasing loss (default: 0.8)\")\n\n# Training parameters\nflags.DEFINE_integer(\"batch_size\", 1, \"Batch Size (default: 32)\")\nflags.DEFINE_integer(\"num_epochs\", 12, \"Number of training epochs (default: 12)\")\nflags.DEFINE_integer(\"evaluate_every\", 300, \"Evaluate model on validation set after this many steps (default: 300)\")\n\nflags.DEFINE_boolean(\"trace\", False, \"Trace (load smaller dataset)\")\nflags.DEFINE_string(\"log_dir\", \"logs\", \"Directory for summary logs to be written to default (./logs/)\")\n\nflags.DEFINE_integer(\"checkpoint_every\", 1000, \"Save model after this many steps (default: 1000)\")\nflags.DEFINE_string(\"ckpt_dir\", \"./ckpts/\", \"Directory for checkpoints default (./ckpts/)\")\nflags.DEFINE_string(\"restore_file\", \"model-l3.165_a0.510.ckpt-11000\", \"Checkpoint to load\")\n\nflags.DEFINE_boolean(\"evaluate\", True, \"Whether to run evaluation epoch on a checkpoint. Must have restore_file set.\")\n\ndef main(_):\n FLAGS = tf.app.flags.FLAGS\n pp = pprint.PrettyPrinter()\n# FLAGS._parse_flags()\n FLAGS(sys.argv)\n pp.pprint(FLAGS.__flags)\n\n # Load Data\n X_train, Q_train, Y_train = data_helper.load_data('train')\n X_test, Q_test, Y_test = data_helper.load_data('valid')\n\n vocab_size = np.max(X_train) + 1\n print('[?] Vocabulary Size:', vocab_size)\n\n # Create directories\n if not os.path.exists(FLAGS.ckpt_dir):\n os.makedirs(FLAGS.ckpt_dir)\n\n timestamp = str('\\\\') + datetime.now().strftime('%Y%m%d%H%M%S') \n FLAGS.log_dir = str('log') + str(timestamp)\n print(FLAGS.log_dir)\n if not os.path.exists(FLAGS.log_dir):\n os.makedirs(FLAGS.log_dir)\n\n # Train Model\n with tf.Session(config=tf.ConfigProto(log_device_placement=False, allow_soft_placement=True)) as sess, tf.device('/gpu:0'):\n model = AlternatingAttention(FLAGS.batch_size, vocab_size, FLAGS.encoding_dim, FLAGS.embedding_dim, FLAGS.num_glimpses, session=sess)\n\n if FLAGS.trace: # Trace model for debugging\n train.trace(FLAGS, sess, model, (X_train, Q_train, Y_train))\n return\n\n saver = tf.train.Saver()\n\n if FLAGS.restore_file is not None:\n# saver = tf.train.import_meta_graph('/tmp/model.ckpt.meta')\n saver = tf.train.import_meta_graph(str(\"./ckpts/\")+str(FLAGS.restore_file))\n print('[?] Loading variables from checkpoint %s' % FLAGS.restore_file)\n saver.restore(sess, \"./ckpts/model-l3.165_a0.510.ckpt-11000\")\n# saver.restore(sess, FLAGS.restore_file)\n\n # Run evaluation\n if FLAGS.evaluate:\n if not FLAGS.restore_file:\n print('Need to specify a restore_file checkpoint to evaluate')\n else:\n test_data = data_helper.load_data('test')\n word2idx, _, _ = data_helper.build_vocab()\n test1.run(FLAGS, sess, model, test_data, word2idx)\n else:\n train.run(FLAGS, sess, model,\n (X_train, Q_train, Y_train),\n (X_test, Q_test, Y_test),\n saver)\n\nif __name__ == '__main__':\n tf.app.run()\n"
] | [
[
"tensorflow.app.run",
"tensorflow.device",
"numpy.max",
"tensorflow.train.Saver",
"tensorflow.ConfigProto"
]
] |
ShuoZ9379/Integration_SIL_and_MBL | [
"d7df6501a665d65eb791f7fd9b8e85fd660e6320"
] | [
"algos/mbl_copos2_sil/run.py"
] | [
"import multiprocessing\nimport os.path as osp\nimport gym,sys\nfrom collections import defaultdict\nimport tensorflow as tf\nimport numpy as np\nimport pickle\nfrom baselines.common.vec_env import VecFrameStack,VecEnv, VecNormalize\nfrom baselines.run import parse_cmdline_kwargs, build_env, configure_logger, get_default_network, get_env_type\nfrom baselines.common.tf_util import get_session\nfrom baselines import logger\nfrom importlib import import_module\nfrom baselines.common import set_global_seeds\nimport baselines.common.tf_util as U\nfrom baselines.common.policies import build_policy\nfrom baselines.common.input import observation_placeholder\nfrom baselines.common.vec_env.vec_video_recorder import VecVideoRecorder\nfrom baselines.common.cmd_util import common_arg_parser, parse_unknown_args,make_vec_env, make_env\ntry:\n from mpi4py import MPI\nexcept ImportError:\n MPI = None\n\ntry:\n import pybullet_envs\nexcept ImportError:\n pybullet_envs = None\n\ntry:\n import roboschool\nexcept ImportError:\n roboschool = None\n\n_game_envs = defaultdict(set)\nfor env in gym.envs.registry.all():\n # TODO: solve this with regexes\n env_type = env._entry_point.split(':')[0].split('.')[-1]\n if env.id.find('Sparse') > -1:\n _game_envs['sparse_{}'.format(env_type)].add(env.id)\n else:\n _game_envs[env_type].add(env.id)\n\n# reading benchmark names directly from retro requires\n# importing retro here, and for some reason that crashes tensorflow\n# in ubuntu\n_game_envs['retro'] = {\n 'BubbleBobble-Nes',\n 'SuperMarioBros-Nes',\n 'TwinBee3PokoPokoDaimaou-Nes',\n 'SpaceHarrier-Nes',\n 'SonicTheHedgehog-Genesis',\n 'Vectorman-Genesis',\n 'FinalFight-Snes',\n 'SpaceInvaders-Snes',\n}\n\n\ndef train(args, extra_args):\n env_type, env_id = get_env_type(args)\n print('env_type: {}'.format(env_type))\n total_timesteps = int(args.num_timesteps)\n seed = args.seed\n set_global_seeds(seed)\n \n learn = get_learn_function(args.alg)\n alg_kwargs = get_learn_function_defaults(args.alg, env_type)\n alg_kwargs.update(extra_args)\n\n env = build_env(args,normalize_ob=False)\n eval_env = build_env(args,normalize_ob=False, is_eval=True)\n if args.save_video_interval != 0:\n env = VecVideoRecorder(env, osp.join(logger.get_dir(), \"videos\"), record_video_trigger=lambda x: x % args.save_video_interval == 0, video_length=args.save_video_length)\n\n if args.network:\n alg_kwargs['network'] = args.network\n else:\n if alg_kwargs.get('network') is None:\n alg_kwargs['network'] = get_default_network(env_type)\n beta = -1\n if beta < 0:\n #print(alg_kwargs)\n nr_episodes = total_timesteps // alg_kwargs['timesteps_per_batch']\n # Automatically compute beta based on initial entropy and number of iterations\n policy = build_policy(env, alg_kwargs['network'], value_network='copy', normalize_observations=alg_kwargs['normalize_observations'], copos=True)\n ob = observation_placeholder(env.observation_space)\n \n sess = U.single_threaded_session()\n sess.__enter__()\n with tf.variable_scope(\"tmp_pi\"):\n tmp_pi = policy(observ_placeholder=ob)\n sess.run(tf.global_variables_initializer())\n \n tmp_ob = np.zeros((1,) + env.observation_space.shape)\n entropy = sess.run(tmp_pi.pd.entropy(), feed_dict={tmp_pi.X: tmp_ob})\n #beta = 2 * entropy / nr_episodes\n beta = 0\n print(\"Initial entropy: \" + str(entropy) + \", episodes: \" + str(nr_episodes))\n print(\"Constantly set beta: \" + str(beta))\n\n print('Training {} on {}:{} with arguments \\n{}'.format(args.alg, env_type, env_id, alg_kwargs))\n iters = 0\n for model in learn(\n env=env,\n env_id=env_id,\n eval_env=eval_env,\n make_eval_env=lambda: build_env(args, normalize_ob=False, is_eval=True),\n seed=seed,\n beta=beta,\n total_timesteps=total_timesteps,\n sil_update=args.sil_update,\n sil_loss=args.sil_loss, \n **alg_kwargs\n ):\n if args.store_ckpt:\n save_path = osp.join(logger.get_dir(), 'model-{}'.format(iters))\n model.save(save_path) \n if isinstance(env, VecNormalize):\n rms_path = osp.join(logger.get_dir(), 'rms-{}'.format(iters))\n with open(rms_path, 'wb') as f:\n rms = (env.ob_rms, env.ret_rms)\n pickle.dump(rms, f)\n logger.log('Save {} model'.format(iters+1))\n iters += 1\n\n return model, env\n\ndef get_alg_module(alg, submodule=None):\n submodule = submodule or alg\n print(submodule)\n try:\n # first try to import the alg module from baselines\n alg_module = import_module('.'.join([submodule]))\n \n except ImportError:\n # then from rl_algs\n alg_module = import_module('.'.join(['rl_' + 'algs', alg, submodule]))\n\n return alg_module\n\ndef get_learn_function(alg):\n return get_alg_module(alg).learn\n\n\ndef get_learn_function_defaults(alg, env_type):\n try:\n alg_defaults = get_alg_module(alg, 'defaults')\n kwargs = getattr(alg_defaults, env_type)()\n except (ImportError, AttributeError):\n kwargs = {}\n return kwargs\n\ndef main(args):\n # configure logger, disable logging in child MPI processes (with rank > 0)\n\n arg_parser = common_arg_parser()\n args, unknown_args = arg_parser.parse_known_args()\n extra_args = parse_cmdline_kwargs(unknown_args)\n print(args)\n\n if MPI is None or MPI.COMM_WORLD.Get_rank() == 0:\n #rank = 0\n #logger.configure()\n #logger.configure(dir=extra_args['logdir'])\n rank = 0\n configure_logger(args.log_path)\n else:\n rank = MPI.COMM_WORLD.Get_rank()\n configure_logger(args.log_path, format_strs=[])\n\n model, env = train(args, extra_args)\n\n if args.save_path is not None and rank == 0:\n save_path = osp.expanduser(args.save_path)\n model.save(save_path)\n\n if args.play:\n logger.log(\"Running trained model\")\n obs = env.reset()\n\n state = model.initial_state if hasattr(model, 'initial_state') else None\n dones = np.zeros((1,))\n\n episode_rew = 0\n while True:\n if state is not None:\n actions, _, state, _ = model.step(obs,S=state, M=dones)\n else:\n actions, _, _, _ = model.step(obs)\n\n obs, rew, done, _ = env.step(actions)\n episode_rew += rew[0] if isinstance(env, VecEnv) else rew\n env.render()\n done = done.any() if isinstance(done, np.ndarray) else done\n if done:\n print('episode_rew={}'.format(episode_rew))\n episode_rew = 0\n obs = env.reset()\n env.close()\n return model\n\nif __name__ == '__main__':\n main(sys.argv)\n"
] | [
[
"tensorflow.variable_scope",
"numpy.zeros",
"tensorflow.global_variables_initializer"
]
] |
mrtucar/keras-unet-collection | [
"38ac652f33799502df1933c805c04e366ee05c3d"
] | [
"keras_unet_collection/_model_swin_unet_2d.py"
] | [
"\nfrom __future__ import absolute_import\n\nfrom keras_unet_collection.layer_utils import *\nfrom keras_unet_collection.transformer_layers import patch_extract, patch_embedding, SwinTransformerBlock, patch_merging, patch_expanding\n\nfrom tensorflow.keras.layers import Input, Dense\nfrom tensorflow.keras.models import Model\n\ndef swin_transformer_stack(X, stack_num, embed_dim, num_patch, num_heads, window_size, num_mlp, shift_window=True, name=''):\n '''\n Stacked Swin Transformers that share the same token size.\n \n Alternated Window-MSA and Swin-MSA will be configured if `shift_window=True`, Window-MSA only otherwise.\n *Dropout is turned off.\n '''\n # Turn-off dropouts\n mlp_drop_rate = 0 # Droupout after each MLP layer\n attn_drop_rate = 0 # Dropout after Swin-Attention\n proj_drop_rate = 0 # Dropout at the end of each Swin-Attention block, i.e., after linear projections\n drop_path_rate = 0 # Drop-path within skip-connections\n \n qkv_bias = True # Convert embedded patches to query, key, and values with a learnable additive value\n qk_scale = None # None: Re-scale query based on embed dimensions per attention head # Float for user specified scaling factor\n \n if shift_window:\n shift_size = window_size // 2\n else:\n shift_size = 0\n \n for i in range(stack_num):\n \n if i % 2 == 0:\n shift_size_temp = 0\n else:\n shift_size_temp = shift_size\n\n X = SwinTransformerBlock(dim=embed_dim, num_patch=num_patch, num_heads=num_heads, \n window_size=window_size, shift_size=shift_size_temp, num_mlp=num_mlp, qkv_bias=qkv_bias, qk_scale=qk_scale,\n mlp_drop=mlp_drop_rate, attn_drop=attn_drop_rate, proj_drop=proj_drop_rate, drop_path_prob=drop_path_rate, \n name='name{}'.format(i))(X)\n return X\n\n\ndef swin_unet_2d_base(input_tensor, filter_num_begin, depth, stack_num_down, stack_num_up, \n patch_size, num_heads, window_size, num_mlp, shift_window=True, name='swin_unet'):\n '''\n The base of SwinUNET.\n \n ----------\n Cao, H., Wang, Y., Chen, J., Jiang, D., Zhang, X., Tian, Q. and Wang, M., 2021. \n Swin-Unet: Unet-like Pure Transformer for Medical Image Segmentation. arXiv preprint arXiv:2105.05537.\n \n Input\n ----------\n input_tensor: the input tensor of the base, e.g., `keras.layers.Inpyt((None, None, 3))`.\n filter_num_begin: number of channels in the first downsampling block; \n it is also the number of embedded dimensions.\n depth: the depth of Swin-UNET, e.g., depth=4 means three down/upsampling levels and a bottom level.\n stack_num_down: number of convolutional layers per downsampling level/block. \n stack_num_up: number of convolutional layers (after concatenation) per upsampling level/block.\n name: prefix of the created keras model and its layers.\n \n ---------- (keywords of Swin-Transformers) ----------\n \n patch_size: The size of extracted patches, \n e.g., patch_size=(2, 2) means 2-by-2 patches\n *Height and width of the patch must be equal.\n \n num_heads: number of attention heads per down/upsampling level,\n e.g., num_heads=[4, 8, 16, 16] means increased attention heads with increasing depth.\n *The length of num_heads must equal to `depth`.\n \n window_size: the size of attention window per down/upsampling level,\n e.g., window_size=[4, 2, 2, 2] means decreased window size with increasing depth.\n \n num_mlp: number of MLP nodes.\n \n shift_window: The indicator of window shifting;\n shift_window=True means applying Swin-MSA for every two Swin-Transformer blocks.\n shift_window=False means MSA with fixed window locations for all blocks.\n\n Output\n ----------\n output tensor.\n \n Note: This function is experimental.\n The activation functions of all Swin-Transformers are fixed to GELU.\n \n '''\n # Compute number be patches to be embeded\n input_size = input_tensor.shape.as_list()[1:]\n num_patch_x = input_size[0]//patch_size[0]\n num_patch_y = input_size[1]//patch_size[1]\n \n # Number of Embedded dimensions\n embed_dim = filter_num_begin\n \n depth_ = depth\n \n X_skip = []\n\n X = input_tensor\n \n # Patch extraction\n X = patch_extract(patch_size)(X)\n\n # Embed patches to tokens\n X = patch_embedding(num_patch_x*num_patch_y, embed_dim)(X)\n \n # The first Swin Transformer stack\n X = swin_transformer_stack(X, stack_num=stack_num_down, \n embed_dim=embed_dim, num_patch=(num_patch_x, num_patch_y), \n num_heads=num_heads[0], window_size=window_size[0], num_mlp=num_mlp, \n shift_window=shift_window, name='{}_swin_down0'.format(name))\n X_skip.append(X)\n \n # Downsampling blocks\n for i in range(depth_-1):\n \n # Patch merging\n X = patch_merging((num_patch_x, num_patch_y), embed_dim=embed_dim, name='down{}'.format(i))(X)\n \n # update token shape info\n embed_dim = embed_dim*2\n num_patch_x = num_patch_x//2\n num_patch_y = num_patch_y//2\n \n # Swin Transformer stacks\n X = swin_transformer_stack(X, stack_num=stack_num_down, \n embed_dim=embed_dim, num_patch=(num_patch_x, num_patch_y), \n num_heads=num_heads[i+1], window_size=window_size[i+1], num_mlp=num_mlp, \n shift_window=shift_window, name='{}_swin_down{}'.format(name, i+1))\n \n # Store tensors for concat\n X_skip.append(X)\n \n # reverse indexing encoded tensors and hyperparams\n X_skip = X_skip[::-1]\n num_heads = num_heads[::-1]\n window_size = window_size[::-1]\n \n # upsampling begins at the deepest available tensor\n X = X_skip[0]\n \n # other tensors are preserved for concatenation\n X_decode = X_skip[1:]\n \n depth_decode = len(X_decode)\n \n for i in range(depth_decode):\n \n # Patch expanding\n X = patch_expanding(num_patch=(num_patch_x, num_patch_y),\n embed_dim=embed_dim, upsample_rate=2, return_vector=True, name='{}_swin_up{}'.format(name, i))(X)\n \n\n # update token shape info\n embed_dim = embed_dim//2\n num_patch_x = num_patch_x*2\n num_patch_y = num_patch_y*2\n \n # Concatenation and linear projection\n X = concatenate([X, X_decode[i]], axis=-1, name='{}_concat_{}'.format(name, i))\n X = Dense(embed_dim, use_bias=False, name='{}_concat_linear_proj_{}'.format(name, i))(X)\n \n # Swin Transformer stacks\n X = swin_transformer_stack(X, stack_num=stack_num_up, \n embed_dim=embed_dim, num_patch=(num_patch_x, num_patch_y), \n num_heads=num_heads[i], window_size=window_size[i], num_mlp=num_mlp, \n shift_window=shift_window, name='{}_swin_up{}'.format(name, i))\n \n # The last expanding layer; it produces full-size feature maps based on the patch size\n # !!! <--- \"patch_size[0]\" is used; it assumes patch_size = (size, size)\n X = patch_expanding(num_patch=(num_patch_x, num_patch_y),\n embed_dim=embed_dim, upsample_rate=patch_size[0], return_vector=False)(X)\n \n return X\n\n\ndef swin_unet_2d(input_size, filter_num_begin, n_labels, depth, stack_num_down, stack_num_up, \n patch_size, num_heads, window_size, num_mlp, output_activation='Softmax', shift_window=True, name='swin_unet'):\n '''\n The base of SwinUNET.\n \n ----------\n Cao, H., Wang, Y., Chen, J., Jiang, D., Zhang, X., Tian, Q. and Wang, M., 2021. \n Swin-Unet: Unet-like Pure Transformer for Medical Image Segmentation. arXiv preprint arXiv:2105.05537.\n \n Input\n ----------\n input_size: the size/shape of network input, e.g., `(128, 128, 3)`.\n filter_num_begin: number of channels in the first downsampling block; \n it is also the number of embedded dimensions.\n n_labels: number of output labels.\n depth: the depth of Swin-UNET, e.g., depth=4 means three down/upsampling levels and a bottom level.\n stack_num_down: number of convolutional layers per downsampling level/block. \n stack_num_up: number of convolutional layers (after concatenation) per upsampling level/block.\n name: prefix of the created keras model and its layers.\n \n ---------- (keywords of Swin-Transformers) ----------\n \n patch_size: The size of extracted patches, \n e.g., patch_size=(2, 2) means 2-by-2 patches\n *Height and width of the patch must be equal.\n \n num_heads: number of attention heads per down/upsampling level,\n e.g., num_heads=[4, 8, 16, 16] means increased attention heads with increasing depth.\n *The length of num_heads must equal to `depth`.\n \n window_size: the size of attention window per down/upsampling level,\n e.g., window_size=[4, 2, 2, 2] means decreased window size with increasing depth.\n \n num_mlp: number of MLP nodes.\n \n shift_window: The indicator of window shifting;\n shift_window=True means applying Swin-MSA for every two Swin-Transformer blocks.\n shift_window=False means MSA with fixed window locations for all blocks.\n \n Output\n ----------\n model: a keras model.\n \n Note: This function is experimental.\n The activation functions of all Swin-Transformers are fixed to GELU.\n '''\n IN = Input(input_size)\n \n # base \n X = swin_unet_2d_base(IN, filter_num_begin=filter_num_begin, depth=depth, stack_num_down=stack_num_down, stack_num_up=stack_num_up, \n patch_size=patch_size, num_heads=num_heads, window_size=window_size, num_mlp=num_mlp, shift_window=True, name=name)\n \n # output layer\n OUT = CONV_output(X, n_labels, kernel_size=1, activation=output_activation, name='{}_output'.format(name))\n \n # functional API model\n model = Model(inputs=[IN,], outputs=[OUT,], name='{}_model'.format(name))\n \n return model\n"
] | [
[
"tensorflow.keras.layers.Input"
]
] |
warcraft12321/Hyperfoods | [
"b995cd7afe10fcbd338158c80f53ce637bfffc0c",
"b995cd7afe10fcbd338158c80f53ce637bfffc0c"
] | [
"src/torch/nn/grad.py",
"src/torch/utils/data/distributed.py"
] | [
"\"\"\"Gradient interface\"\"\"\n\nimport torch\nfrom .modules.utils import _single, _pair, _triple\n\n\ndef _grad_input_padding(grad_output, input_size, stride, padding, kernel_size):\n input_size = list(input_size)\n k = grad_output.dim() - 2\n\n if len(input_size) == k + 2:\n input_size = input_size[-k:]\n if len(input_size) != k:\n raise ValueError(\"input_size must have {} elements (got {})\"\n .format(k + 2, len(input_size)))\n\n def dim_size(d):\n return ((grad_output.size(d + 2) - 1) * stride[d] - 2 * padding[d] +\n kernel_size[d])\n\n min_sizes = [dim_size(d) for d in range(k)]\n max_sizes = [min_sizes[d] + stride[d] - 1 for d in range(k)]\n for size, min_size, max_size in zip(input_size, min_sizes, max_sizes):\n if size < min_size or size > max_size:\n raise ValueError(\n (\"requested an input grad size of {}, but valid sizes range \"\n \"from {} to {} (for a grad_output of {})\").format(\n input_size, min_sizes, max_sizes,\n grad_output.size()[2:]))\n\n return tuple(input_size[d] - min_sizes[d] for d in range(k))\n\n\ndef conv1d_input(input_size, weight, grad_output, stride=1, padding=0, dilation=1, groups=1, bias=None):\n r\"\"\"\n Computes the gradient of conv1d with respect to the input of the convolution.\n This is same as the 1D transposed convolution operator under the hood but requires\n the shape of the gradient w.r.t. input to be specified explicitly.\n\n Args:\n input_size : Shape of the input gradient tensor\n weight: weight tensor (out_channels x in_channels/groups x kW)\n grad_output : output gradient tensor (minibatch x out_channels x oW)\n stride (int or tuple, optional): Stride of the convolution. Default: 1\n padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0\n dilation (int or tuple, optional): Spacing between kernel elements. Default: 1\n groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1\n bias: optional bias tensor (out_channels). Default: None\n\n Examples::\n\n >>> input = torch.randn(1,1,3, requires_grad=True)\n >>> weight = torch.randn(1,1,1, requires_grad=True)\n >>> output = F.conv1d(input, weight)\n >>> grad_output = torch.randn(output.shape)\n >>> grad_input = torch.autograd.grad(output, input, grad_output)\n >>> F.grad.conv1d_input(input.shape, weight, grad_output)\n\n \"\"\"\n stride = _single(stride)\n padding = _single(padding)\n dilation = _single(dilation)\n kernel_size = [weight.shape[2]]\n\n if input_size is None:\n raise ValueError(\"grad.conv1d_input requires specifying an input_size\")\n\n grad_input_padding = _grad_input_padding(grad_output, input_size, stride,\n padding, kernel_size)\n\n return torch.conv_transpose1d(\n grad_output, weight, bias, stride, padding, grad_input_padding, groups,\n dilation)\n\n\ndef conv1d_weight(input, weight_size, grad_output, stride=1, padding=0, dilation=1, groups=1, bias=None):\n r\"\"\"\n Computes the gradient of conv1d with respect to the weight of the convolution.\n\n Args:\n input: input tensor of shape (minibatch x in_channels x iW)\n weight_size : Shape of the weight gradient tensor\n grad_output : output gradient tensor (minibatch x out_channels x oW)\n stride (int or tuple, optional): Stride of the convolution. Default: 1\n padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0\n dilation (int or tuple, optional): Spacing between kernel elements. Default: 1\n groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1\n bias: optional bias tensor (out_channels). Default: None\n\n Examples::\n\n >>> input = torch.randn(1,1,3, requires_grad=True)\n >>> weight = torch.randn(1,1,1, requires_grad=True)\n >>> output = F.conv1d(input, weight)\n >>> grad_output = torch.randn(output.shape)\n >>> grad_weight = torch.autograd.grad(output, filter, grad_output)\n >>> F.grad.conv1d_weight(input, weight.shape, grad_output)\n\n \"\"\"\n stride = _single(stride)\n padding = _single(padding)\n dilation = _single(dilation)\n in_channels = input.shape[1]\n out_channels = grad_output.shape[1]\n min_batch = input.shape[0]\n\n grad_output = grad_output.contiguous().repeat(1, in_channels // groups, 1)\n grad_output = grad_output.contiguous().view(\n grad_output.shape[0] * grad_output.shape[1], 1, grad_output.shape[2])\n\n input = input.contiguous().view(1, input.shape[0] * input.shape[1],\n input.shape[2])\n\n grad_weight = torch.conv1d(input, grad_output, bias, dilation, padding,\n stride, in_channels * min_batch)\n\n grad_weight = grad_weight.contiguous().view(\n min_batch, grad_weight.shape[1] // min_batch, grad_weight.shape[2])\n\n return grad_weight.sum(dim=0).view(\n in_channels // groups, out_channels, grad_weight.shape[2]).transpose(\n 0, 1).narrow(2, 0, weight_size[2])\n\n\ndef conv2d_input(input_size, weight, grad_output, stride=1, padding=0, dilation=1, groups=1, bias=None):\n r\"\"\"\n Computes the gradient of conv2d with respect to the input of the convolution.\n This is same as the 2D transposed convolution operator under the hood but requires\n the shape of the gradient w.r.t. input to be specified explicitly.\n\n Args:\n input_size : Shape of the input gradient tensor\n weight: weight tensor (out_channels x in_channels/groups x kH x kW)\n grad_output : output gradient tensor (minibatch x out_channels x oH x oW)\n stride (int or tuple, optional): Stride of the convolution. Default: 1\n padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0\n dilation (int or tuple, optional): Spacing between kernel elements. Default: 1\n groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1\n bias: optional bias tensor (out_channels). Default: None\n\n Examples::\n\n >>> input = torch.randn(1,1,3,3, requires_grad=True)\n >>> weight = torch.randn(1,1,1,2, requires_grad=True)\n >>> output = F.conv2d(input, weight)\n >>> grad_output = torch.randn(output.shape)\n >>> grad_input = torch.autograd.grad(output, input, grad_output)\n >>> F.grad.conv2d_input(input.shape, weight, grad_output)\n\n \"\"\"\n stride = _pair(stride)\n padding = _pair(padding)\n dilation = _pair(dilation)\n kernel_size = (weight.shape[2], weight.shape[3])\n\n if input_size is None:\n raise ValueError(\"grad.conv2d_input requires specifying an input_size\")\n\n grad_input_padding = _grad_input_padding(grad_output, input_size, stride,\n padding, kernel_size)\n\n return torch.conv_transpose2d(\n grad_output, weight, bias, stride, padding, grad_input_padding, groups,\n dilation)\n\n\ndef conv2d_weight(input, weight_size, grad_output, stride=1, padding=0, dilation=1, groups=1, bias=None):\n r\"\"\"\n Computes the gradient of conv2d with respect to the weight of the convolution.\n\n Args:\n input: input tensor of shape (minibatch x in_channels x iH x iW)\n weight_size : Shape of the weight gradient tensor\n grad_output : output gradient tensor (minibatch x out_channels x oH x oW)\n stride (int or tuple, optional): Stride of the convolution. Default: 1\n padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0\n dilation (int or tuple, optional): Spacing between kernel elements. Default: 1\n groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1\n bias: optional bias tensor (out_channels). Default: None\n\n Examples::\n\n >>> input = torch.randn(1,1,3,3, requires_grad=True)\n >>> weight = torch.randn(1,1,1,2, requires_grad=True)\n >>> output = F.conv2d(input, weight)\n >>> grad_output = torch.randn(output.shape)\n >>> grad_weight = torch.autograd.grad(output, filter, grad_output)\n >>> F.grad.conv2d_weight(input, weight.shape, grad_output)\n\n \"\"\"\n stride = _pair(stride)\n padding = _pair(padding)\n dilation = _pair(dilation)\n in_channels = input.shape[1]\n out_channels = grad_output.shape[1]\n min_batch = input.shape[0]\n\n grad_output = grad_output.contiguous().repeat(1, in_channels // groups, 1,\n 1)\n grad_output = grad_output.contiguous().view(\n grad_output.shape[0] * grad_output.shape[1], 1, grad_output.shape[2],\n grad_output.shape[3])\n\n input = input.contiguous().view(1, input.shape[0] * input.shape[1],\n input.shape[2], input.shape[3])\n\n grad_weight = torch.conv2d(input, grad_output, bias, dilation, padding,\n stride, in_channels * min_batch)\n\n grad_weight = grad_weight.contiguous().view(\n min_batch, grad_weight.shape[1] // min_batch, grad_weight.shape[2],\n grad_weight.shape[3])\n\n return grad_weight.sum(dim=0).view(\n in_channels // groups, out_channels,\n grad_weight.shape[2], grad_weight.shape[3]).transpose(0, 1).narrow(\n 2, 0, weight_size[2]).narrow(3, 0, weight_size[3])\n\n\ndef conv3d_input(input_size, weight, grad_output, stride=1, padding=0, dilation=1, groups=1, bias=None):\n r\"\"\"\n Computes the gradient of conv3d with respect to the input of the convolution.\n This is same as the 3D transposed convolution operator under the hood but requires\n the shape of the gradient w.r.t. input to be specified explicitly.\n\n Args:\n input_size : Shape of the input gradient tensor\n weight: weights tensor (out_channels x in_channels/groups x kT x kH x kW)\n grad_output : output gradient tensor (minibatch x out_channels x oT x oH x oW)\n stride (int or tuple, optional): Stride of the convolution. Default: 1\n padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0\n dilation (int or tuple, optional): Spacing between kernel elements. Default: 1\n groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1\n bias: optional bias tensor (out_channels). Default: None\n\n Examples::\n\n >>> input = torch.randn(2, 8, 10, 10, 20, requires_grad=True)\n >>> weight = torch.randn(4, 8, 2, 3, 3, requires_grad=True)\n >>> output = F.conv3d(input, weight)\n >>> grad_output = torch.randn(output.shape)\n >>> grad_input = torch.autograd.grad(output, input, grad_output)\n >>> F.grad.conv3d_input(input.shape, weight, grad_output)\n\n \"\"\"\n stride = _triple(stride)\n padding = _triple(padding)\n dilation = _triple(dilation)\n kernel_size = (weight.shape[2], weight.shape[3], weight.shape[4])\n\n if input_size is None:\n raise ValueError(\"grad.conv3d_input requires specifying an input_size\")\n\n grad_input_padding = _grad_input_padding(grad_output, input_size, stride,\n padding, kernel_size)\n\n return torch.conv_transpose3d(\n grad_output, weight, bias, stride, padding, grad_input_padding, groups,\n dilation)\n\n\ndef conv3d_weight(input, weight_size, grad_output, stride=1, padding=0, dilation=1, groups=1, bias=None):\n r\"\"\"\n Computes the gradient of conv3d with respect to the weight of the convolution.\n\n Args:\n input: input tensor of shape (minibatch x in_channels x iT x iH x iW)\n weight_size : Shape of the weight gradient tensor\n grad_output : output gradient tensor (minibatch x out_channels x oT x oH x oW)\n stride (int or tuple, optional): Stride of the convolution. Default: 1\n padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0\n dilation (int or tuple, optional): Spacing between kernel elements. Default: 1\n groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1\n bias: optional bias tensor (out_channels). Default: None\n\n Examples::\n\n >>> input = torch.randn(2, 8, 10, 10, 20, requires_grad=True)\n >>> weight = torch.randn(4, 8, 2, 3, 3, requires_grad=True)\n >>> output = F.conv3d(input, weight)\n >>> grad_output = torch.randn(output.shape)\n >>> grad_weight = torch.autograd.grad(output, weight, grad_output)\n >>> F.grad.conv3d_weight(input, weight.shape, grad_output)\n\n \"\"\"\n stride = _triple(stride)\n padding = _triple(padding)\n dilation = _triple(dilation)\n in_channels = input.shape[1]\n out_channels = grad_output.shape[1]\n min_batch = input.shape[0]\n\n grad_output = grad_output.repeat(1, in_channels // groups, 1, 1, 1)\n grad_output = grad_output.contiguous().view(\n grad_output.shape[0] * grad_output.shape[1], 1, grad_output.shape[2],\n grad_output.shape[3], grad_output.shape[4])\n\n input = input.contiguous().view(1, input.shape[0] * input.shape[1],\n input.shape[2], input.shape[3],\n input.shape[4])\n\n grad_weight = torch.conv3d(input, grad_output, bias, dilation, padding,\n stride, in_channels * min_batch)\n\n grad_weight = grad_weight.contiguous().view(\n min_batch, grad_weight.shape[1] // min_batch, grad_weight.shape[2],\n grad_weight.shape[3], grad_weight.shape[4])\n\n return grad_weight.sum(dim=0).view(\n in_channels // groups, out_channels, grad_weight.shape[2],\n grad_weight.shape[3], grad_weight.shape[4]).transpose(0, 1).narrow(\n 2, 0, weight_size[2]).narrow(3, 0, weight_size[3]).narrow(\n 4, 0, weight_size[4])\n",
"import math\nimport torch\nfrom . import Sampler\nfrom torch.distributed import get_world_size, get_rank\n\n\nclass DistributedSampler(Sampler):\n \"\"\"Sampler that restricts data loading to a subset of the dataset.\n\n It is especially useful in conjunction with\n :class:`torch.nn.parallel.DistributedDataParallel`. In such case, each\n process can pass a DistributedSampler instance as a DataLoader sampler,\n and load a subset of the original dataset that is exclusive to it.\n\n .. note::\n Dataset is assumed to be of constant size.\n\n Arguments:\n dataset: Dataset used for sampling.\n num_replicas (optional): Number of processes participating in\n distributed training.\n rank (optional): Rank of the current process within num_replicas.\n \"\"\"\n\n def __init__(self, dataset, num_replicas=None, rank=None):\n if num_replicas is None:\n num_replicas = get_world_size()\n if rank is None:\n rank = get_rank()\n self.dataset = dataset\n self.num_replicas = num_replicas\n self.rank = rank\n self.epoch = 0\n self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.num_replicas))\n self.total_size = self.num_samples * self.num_replicas\n\n def __iter__(self):\n # deterministically shuffle based on epoch\n g = torch.Generator()\n g.manual_seed(self.epoch)\n indices = list(torch.randperm(len(self.dataset), generator=g))\n\n # add extra samples to make it evenly divisible\n indices += indices[:(self.total_size - len(indices))]\n assert len(indices) == self.total_size\n\n # subsample\n offset = self.num_samples * self.rank\n indices = indices[offset:offset + self.num_samples]\n assert len(indices) == self.num_samples\n\n return iter(indices)\n\n def __len__(self):\n return self.num_samples\n\n def set_epoch(self, epoch):\n self.epoch = epoch\n"
] | [
[
"torch.conv_transpose2d",
"torch.conv1d",
"torch.conv_transpose3d",
"torch.conv2d",
"torch.conv3d",
"torch.conv_transpose1d"
],
[
"torch.distributed.get_rank",
"torch.Generator",
"torch.distributed.get_world_size"
]
] |
vonkaenelerik/self-supervised-poisson-gaussian | [
"7ebb4527fa79ace7d5de8c28fb484ef1a5cd1c96"
] | [
"test_mydat.py"
] | [
"import numpy as np\nfrom skimage.metrics import peak_signal_noise_ratio\nfrom nets import *\nfrom scipy.optimize import minimize\n\nimport os\nfrom os import listdir\nfrom os.path import join\nfrom imageio import imread, imwrite\nimport glob\nfrom tqdm import trange\n\nimport argparse\nparser = argparse.ArgumentParser()\nparser.add_argument('--path',required=True,help='path to dataset root')\nparser.add_argument('--dataset',required=True,help='dataset name e.g. Confocal_MICE')\nparser.add_argument('--mode',default='uncalib',help='noise model: mse, uncalib, gaussian, poisson, poissongaussian')\nparser.add_argument('--reg',type=float,default=0.1,help='regularization weight on prior std. dev.')\n\nargs = parser.parse_args()\n\n\"\"\" Re-create the model and load the weights \"\"\"\n\n# model = gaussian_blindspot_network((512, 512, 1),'uncalib')\n# model = gaussian_blindspot_network((args.crop, args.crop, 1),args.mode,args.reg)\n\nif args.mode == 'uncalib' or args.mode == 'mse':\n model = gaussian_blindspot_network((1024, 1024, 1), args.mode)\n weights_path = 'weights/weights.%s.%s.latest.hdf5'%(args.dataset,args.mode)\nelse:\n model = gaussian_blindspot_network((1024, 1024, 1), args.mode, args.reg)\n weights_path = 'weights/weights.%s.%s.%0.3f.latest.hdf5'%(args.dataset,args.mode,args.reg)\n\nmodel.load_weights(weights_path)\n\n\"\"\" Load test images \"\"\"\n\ntest_images = []\n\ndef load_images(noise):\n basepath = args.path + '/' + args.dataset + '/' + noise\n images = []\n for path in sorted(glob.glob(basepath + '/mydata_test3/*.tif')):\n images.append(imread(path))\n return np.stack(images,axis=0)[:,:,:,None]/255.\n\nX = load_images('raw')\n#Y = load_images('gt')\n#gt = np.squeeze(Y)*255\n\n\"\"\" Denoise test images \"\"\"\ndef poisson_gaussian_loss(x,y,a,b):\n var = np.maximum(1e-4,a*x+b)\n loss = (y-x)**2 / var + np.log(var)\n return np.mean(loss)\noptfun = lambda p, x, y : poisson_gaussian_loss(x,y,p[0],p[1])\n\ndef denoise_uncalib(y,loc,std,a,b):\n total_var = std**2\n noise_var = np.maximum(1e-3,a*loc+b)\n noise_std = noise_var**0.5\n prior_var = np.maximum(1e-4,total_var-noise_var)\n prior_std = prior_var**0.5\n return np.squeeze(gaussian_posterior_mean(y,loc,prior_std,noise_std))\n\nif args.mode == 'mse' or args.mode == 'uncalib':\n experiment_name = '%s.%s'%(args.dataset,args.mode)\nelse:\n experiment_name = '%s.%s.%0.3f'%(args.dataset,args.mode,args.reg)\nos.makedirs(\"results/%s\"%experiment_name,exist_ok=True)\nresults_path = 'results/%s.tab'%experiment_name\nwith open(results_path,'w') as f:\n f.write('inputPSNR\\tdenoisedPSNR\\n')\n for index,im in enumerate(X):\n pred = model.predict(im.reshape(1,1024,1024,1))\n \n if args.mode == 'uncalib':\n # select only pixels above bottom 2% and below top 3% of noisy image\n good = np.logical_and(im >= np.quantile(im,0.02), im <= np.quantile(im,0.97))[None,:,:,:]\n pseudo_clean = pred[0][good]\n noisy = im[np.squeeze(good, axis=0)]\n\n # estimate noise level\n res = minimize(optfun, (0.01,0), (np.squeeze(pseudo_clean),np.squeeze(noisy)), method='Nelder-Mead')\n print('bootstrap poisson-gaussian fit: a = %f, b=%f, loss=%f'%(res.x[0],res.x[1],res.fun))\n a,b = res.x\n \n # run denoising\n denoised = denoise_uncalib(im[None,:,:,:],pred[0],pred[1],a,b)\n else:\n denoised = pred[0]\n \n # scale and clip to 8-bit\n denoised = np.clip(np.squeeze(denoised*255),0,255)\n \n # write out image\n imwrite('results/%s/%02d.png'%(experiment_name,index),denoised.astype('uint8'))\n\n #noisy = np.squeeze(im)*255\n #psnr_noisy = peak_signal_noise_ratio(gt, noisy, data_range = 255)\n #psnr_denoised = peak_signal_noise_ratio(gt, denoised, data_range = 255)\n\n #print(psnr_noisy,psnr_denoised)\n #f.write('%.15f\\t%.15f\\n'%(psnr_noisy,psnr_denoised))\n\n\"\"\" Print averages \"\"\"\n#results = np.loadtxt(results_path,delimiter='\\t',skiprows=1)\n#print('averages:')\n#print(np.mean(results,axis=0))\n\n"
] | [
[
"numpy.squeeze",
"numpy.quantile",
"numpy.stack",
"numpy.log",
"numpy.maximum",
"numpy.mean"
]
] |
ryanjmccall/sb_ml_eng_capstone | [
"dfa87dcbd741c6f502b6cd0eb8f31203568c09a2"
] | [
"modules/module_5_3_2/data/population.py"
] | [
"#!/usr/bin/env python\n\nimport numpy as np\nimport pandas as pd\nimport os\n\ndata_file = os.path.join(os.path.dirname(__file__),'Top5000population.csv')\n\ndata = pd.read_csv(data_file, header=None, thousands=',',sep=',',\n names=['city','state','pop'],\n encoding='iso-8859-1')\n\ndata['city'] = data['city'].str.strip()\ncities = [{'city':line[0],'state':line[1], 'pop':line[2]} for line in data.values]\n"
] | [
[
"pandas.read_csv"
]
] |
kovenock/FATES_Parameter_Selection | [
"eb38cc96b3cb6c02ae71426b6351e60b16ed8a56"
] | [
"psfxns/annmeans.py"
] | [
"import netCDF4 as nc4\nimport numpy as np\n\n\ndef annual_mean_model(filepath, var, varfiletype, nyrs, conv_factor):\n \"\"\"Calculate time series of model annual means for one variable.\n \n :param filepath (str): the file path and name for the data file\n :param var (str): the name of the variable to call from data file\n :param varfiletype (int): the model file type, where:\n 0 - contains monthly averages for the entire ecosystem; and\n 1 - contains annual mean values by tree size\n :param nyrs (int): the number of years to use in analysis\n :param conv_factor (int, float): the conversion factor for\n the variable given by var\n :return: a 2-D array containing the annual mean time series\n indexed by (parameter_set, nyrs)\n :rtype: numpy.ndarray\n \"\"\"\n \n # If model output is stored as monthly ecosystem average,\n # calculate annual means. \n if varfiletype == 0:\n \n # Load monthly time series\n if var != 'FLH':\n mthts_temp = nc4.Dataset(filepath).variables[var][:, :, 0]\n elif var == 'FLH':\n mthts_temp = (nc4.Dataset(filepath).variables['FCTR'][:, :, 0] \n + nc4.Dataset(filepath).variables['FGEV'][:, :, 0] \n + nc4.Dataset(filepath).variables['FCEV'][:, :, 0])\n \n # Calculate annual means for nyrs and convert units\n annmeants = (np.nanmean(np.reshape(\n (mthts_temp[:, int(-1*nyrs*12):]),\n (mthts_temp.shape[0], -1, 12)), \n axis=2)) * conv_factor\n \n mthts_temp = None\n \n # If model output is stored as annual means by tree size,\n # sum across tree sizes.\n elif varfiletype == 1:\n \n # Calculate annual means for nyrs and convert units\n annmeants = np.squeeze(np.nansum((\n nc4.Dataset(filepath).variables[var + '_SCLS'][:, int(-1*nyrs):, :]),\n axis=2)) * conv_factor\n \n return annmeants\n\n\ndef annual_mean_fluxobs(mthts, startmth):\n \"\"\"Calculate annual mean time series from monthly fluxtower estimates.\n \n :param mthts (numpy.ndarray): a 2-D array of fluxtower \n observations with shape (years, months)\n :param startmth (int): the number of the start month\n for this annual mean time series calculation\n (e.g., 7 = start with July, 9 = start with Sept)\n :return: a vector containing the annual mean time series\n :rtype: numpy.ndarray\n \"\"\"\n \n # Specify number of months to discard\n mthts_dif = np.reshape(mthts, (1, -1))[:, startmth-1 : startmth-1-12]\n \n # Calculate annual mean time series\n annmeants = np.nanmean(np.reshape(\n mthts_dif, (int(mthts_dif.shape[1] / 12), 12)), axis=1)\n \n return annmeants\n"
] | [
[
"numpy.reshape"
]
] |
DFNaiff/BVBQ | [
"48f0eb624483f67b748d791efc0c06ddfb6e0646"
] | [
"bvbq/interface.py"
] | [
"# -*- coding: utf-8 -*-\n# pylint: disable=E1101\n\"\"\"\n Deprecated. Use named_interface.BVBQMixMVN.\n Won't be documented due to this\n\"\"\"\nimport torch\n\nfrom . import utils\nfrom . import bvbq\nfrom . import distributions\nfrom . import gp\nfrom . import acquisition\nfrom . import metrics\n\n\nclass BVBQMixMVN(object):\n def __init__(self, eval_function, ndim):\n self.set_eval_function(eval_function)\n self.ndim = ndim\n self.logprobgp = None\n self.mixmeans = None\n self.mixvars = None\n self.mixweights = None\n self.nmixtures = 0\n\n def initialize_data(self, xdata, ydata, kind='smatern52',\n noise=0.0, mean=-30.0, empirical_params=False,\n **kwargs):\n # TODO : Assertions, customizations and new policies\n logprobgp = gp.SimpleGP(self.ndim, kind=kind,\n noise=noise, zeromax=True)\n logprobgp.mean = mean\n logprobgp.fix_mean()\n logprobgp.fix_noise()\n logprobgp.set_data(xdata, ydata, empirical_params=empirical_params)\n self.logprobgp = logprobgp\n\n def initialize_components(self, init_policy='manual', **kwargs):\n # TODO : Assertions, customization and new policies\n assert init_policy in ['manual', 'manual_mix']\n if init_policy == 'manual':\n mean = kwargs.get('mean')\n var = kwargs.get('var')\n mixmeans = torch.atleast_2d(utils.tensor_convert(mean))\n mixvars = torch.atleast_2d(utils.tensor_convert(var))\n mixweights = torch.ones(1)\n nmixtures = 1\n elif init_policy == 'manual_mix':\n nmixtures = mixmeans.shape[0]\n self.mixmeans = mixmeans\n self.mixvars = mixvars\n self.mixweights = mixweights\n self.nmixtures = nmixtures\n\n def update_distribution(self):\n #TODO : Customization\n mean, var = bvbq.propose_component_mvn_mixmvn_relbo(\n self.logprobgp,\n self.mixmeans,\n self.mixvars,\n self.mixweights)\n mixmeans, mixvars, mixweights = bvbq.update_distribution_mvn_mixmvn(\n self.logprobgp,\n mean, var,\n self.mixmeans,\n self.mixvars,\n self.mixweights)\n self.mixmeans = mixmeans\n self.mixvars = mixvars\n self.mixweights = mixweights\n\n def update_evaluations(self, name='PP'):\n x0 = self.distribution.sample(1)[0, :]\n x = acquisition.acquire_next_point_mixmvn(x0,\n self.logprobgp,\n self.distribution,\n name=name)\n y = self.evaluate_single(x)\n\n # FIXME: Fix this function\n# self.logprobgp.update(x,y)\n # FIXME : Substitute below lines for actual (fixed) efficient update above\n X = torch.vstack([self.eval_points, x])\n y = torch.vstack([self.eval_values, y])\n self.logprobgp.set_data(X, y)\n\n def evaluate_single(self, x):\n return torch.squeeze(self.eval_function(x))\n\n def fit_all_parameters(self):\n #TODO : Customization\n mixmeans, mixvars, mixweights = bvbq.fit_mixmvn_elbo(\n self.logprobgp, self.mixmeans, self.mixvars, self.mixweights)\n self.mixmeans = mixmeans\n self.mixvars = mixvars\n self.mixweights = mixweights\n \n def fit_all_weights(self):\n #TODO : Customization\n mixmeans, mixvars, mixweights = bvbq.reweight_mixmvn_elbo(\n self.logprobgp, self.mixmeans, self.mixvars, self.mixweights)\n self.mixmeans = mixmeans\n self.mixvars = mixvars\n self.mixweights = mixweights\n\n def set_eval_function(self, eval_function):\n self._eval_function = eval_function\n self.eval_function = utils.numpy_to_torch_wrapper(eval_function)\n\n def elbo_metric(self, nsamples=1000):\n return metrics.bq_mixmvn_elbo_with_var(self.logprobgp,\n self.mixmeans,\n self.mixvars,\n self.mixweights,\n nsamples=nsamples)\n\n def optimize_gp_params(self, *args, **kwargs):\n baseopt = kwargs.get('baseopt', 'QN')\n kwargs.pop('baseopt', None)\n assert baseopt in ['QN', 'SGD']\n if baseopt == 'QN':\n return self.optimize_gp_params_qn(*args, **kwargs)\n elif baseopt == 'SGD':\n return self.optimize_gp_params_sgd(*args, **kwargs)\n\n def suggest_initialization_points(self, n):\n raise NotImplementedError\n #return xdata\n\n @property\n def distribution(self):\n return distributions.MixtureDiagonalNormalDistribution(\n self.mixmeans, self.mixvars, self.mixweights)\n\n # XXX: This actually performs computation\n @property\n def optimize_gp_params_qn(self):\n return self.logprobgp.optimize_params_qn\n\n @property\n def optimize_gp_params_sgd(self):\n return self.logprobgp.optimize_params_sgd\n\n @property\n def eval_points(self):\n return self.logprobgp.X\n\n @property\n def eval_values(self):\n return self.logprobgp.y\n"
] | [
[
"torch.ones",
"torch.vstack"
]
] |
YoNyeoSeok/refinenet-pytorch | [
"34dfa49a141630247aef1d5d2424c823ecba46c7"
] | [
"train/training.py"
] | [
"import sys\nsys.path.append('/home/user/research/refinenet-pytorch')\nimport os\nimport numpy as np\nimport tqdm\nimport argparse\nimport math\nimport random\nfrom PIL import Image\n\nimport torch\nimport torch.nn as nn\nimport datasets as ds\nfrom torchvision import transforms as trf\nfrom models.refinenet_resnet import refinenet_resnet101\nfrom utils.metrics import runningScore\nfrom vision.transforms import RandomHorizontalFlip, RandomResizedCrop\n\nimport wandb\n\n\ndef arg_parser(parser=argparse.ArgumentParser()):\n parser.add_argument('--input-scale-factor', type=float, default=1.)\n parser.add_argument('--freeze-batch-norm', action='store_true')\n\n parser.add_argument('--clear-foggy-beta', type=str, default='clear', choices=['clear', 'beta_0.02', 'beta_0.01', 'beta_0.005'])\n parser.add_argument('--total-epoch', type=int, default=12)\n parser.add_argument('--batch-size', type=int, default=1)\n parser.add_argument('--valid-batch-size', type=int, default=1)\n parser.add_argument('--optimizer', type=str, default='SGD')\n parser.add_argument('--optimizer-lr', type=float, default=5e-5)\n\n parser.add_argument('--data-aug-hflip', action='store_true')\n parser.add_argument('--data-aug-hflip-p', type=float, default=0.5)\n parser.add_argument('--data-aug-crop', action='store_true')\n parser.add_argument('--data-aug-crop-size', type=int, nargs=2, default=[512, 512])\n parser.add_argument('--data-aug-crop-scale', type=float, nargs=2, default=[0.7, 1.3])\n parser.add_argument('--data-aug-crop-ratio', type=float, nargs=2, default=[1, 1])\n\n # parser.add_argument('--gpu', type=int, default=0)\n parser.add_argument('--use-wandb', action='store_true')\n return parser\n\ndef load_train_valid_loader(args):\n cityscape_dataset_dir = '/home/user/data/Cityscapes'\n classes = ds.Cityscapes.classes\n id2label = {cls.id:cls for cls in classes}\n\n def semantic2sparse(semantic):\n sparse = np.vectorize(lambda x: id2label[x].train_id)(np.array(semantic))\n # pylint: disable=E1101\n sparse = torch.from_numpy(sparse)\n # pylint: enable=E1101\n return sparse\n\n hflip = [RandomHorizontalFlip(args.data_aug_hflip_p)] if args.data_aug_hflip else []\n resized_crop = ([\n RandomResizedCrop(args.data_aug_crop_size, args.data_aug_crop_scale, args.data_aug_crop_ratio)] \n if args.data_aug_crop else [])\n\n aug_transform = trf.Compose(hflip + resized_crop)\n\n tensor_transform = trf.Compose([\n trf.ToTensor(),\n trf.Lambda(lambda x: x*255-128),\n ])\n semantic_transform = trf.Compose([\n trf.Lambda(semantic2sparse),\n ])\n image_transform = trf.Compose([\n trf.Lambda(np.array)\n ])\n\n aug_tensor_transform = trf.Compose(\n aug_transform.transforms + tensor_transform.transforms)\n aug_semantic_transform = trf.Compose(\n aug_transform.transforms + semantic_transform.transforms)\n aug_image_transform = trf.Compose(\n aug_transform.transforms + image_transform.transforms)\n\n if 'clear' == args.clear_foggy_beta:\n image_modes = ['clear', 'gtFine'] \n image_types = [['_leftImg8bit.png'], ['semantic', 'color']]\n else:\n image_modes = ['foggyDBF', 'gtFine'] \n image_types = [[args.clear_foggy_beta], ['semantic', 'color']]\n train_image_transforms = [aug_tensor_transform, [aug_semantic_transform, aug_image_transform]]\n valid_image_transforms = [tensor_transform, [semantic_transform, image_transform]]\n \n train_ds = ds.RefinedFoggyCityscapes(\n cityscape_dataset_dir,\n split='train',\n image_modes=image_modes, \n image_types=image_types,\n image_transforms=train_image_transforms,\n refined_filenames='foggy_trainval_refined_filenames.txt')\n train_ds.share_transform = aug_transform\n train_ds.update_share_transform = lambda : [transform.update() for transform in aug_transform.transforms]\n\n valid_ds = ds.RefinedFoggyCityscapes(\n cityscape_dataset_dir,\n split='val',\n image_modes=image_modes, \n image_types=image_types,\n image_transforms=valid_image_transforms,\n refined_filenames='foggy_trainval_refined_filenames.txt')\n\n train_dl = torch.utils.data.DataLoader(train_ds, batch_size=args.batch_size, shuffle=True)\n valid_dl = torch.utils.data.DataLoader(valid_ds, batch_size=args.valid_batch_size, shuffle=False)\n\n return train_dl, valid_dl\n\ndef load_model(args):\n model_pretrained_dir = '/home/user/research/refinenet-pytorch/pretrained/Cityscapes'\n model = refinenet_resnet101(model_pretrained_dir)\n return model\n\n# train_dl, valid_dl = load_train_valid_loader(args)\nclass InputOutputInterpolate(torch.nn.Module):\n def __init__(self, model, scale_factor):\n super(InputOutputInterpolate, self).__init__()\n self.model = model\n self.scale_factor = scale_factor\n\n def forward(self, x):\n size = x.shape[-2:]\n x = torch.nn.functional.interpolate(x, scale_factor=self.scale_factor, mode='bilinear', align_corners=False)\n out = self.model(x)\n return torch.nn.functional.interpolate(out, size=size, mode='bilinear', align_corners=False)\n\ndef load_training_model(args):\n training_model = InputOutputInterpolate(load_model(args), args.input_scale_factor)\n training_model.model.resnet.to(1)\n training_model.model.refinenets.to(0)\n training_model.model.clf.to(0)\n return training_model\n\nclass ModelCriteria(torch.nn.Module):\n def __init__(self, model, criteria):\n super(ModelCriteria, self).__init__()\n self.model = model\n self.criteria = criteria\n \n def forward(self, input, target):\n output = self.model(input)\n return self.criteria(output, target.to(output.device))\n\n def state_dict(self):\n return self.model.state_dict()\n\nclass ModelOptimizer(torch.nn.Module):\n def __init__(self, model_criteria, optimizer):\n super(ModelOptimizer, self).__init__()\n self.model_criteria = model_criteria\n self.optimizer = optimizer\n\n def step(self, input, target):\n self.optimizer.zero_grad()\n loss = self.model_criteria(input, target)\n loss.backward()\n self.optimizer.step()\n\n return loss\n\ndef load_model_criteria_optimizer(args):\n model = load_training_model(args)\n\n CELoss = torch.nn.CrossEntropyLoss(ignore_index=255)\n # L1Loss = torch.nn.L1Loss()\n # L2Loss = torch.nn.MSELoss()\n model_criteria = ModelCriteria(model, CELoss)\n \n optimizer = torch.optim.__dict__[args.optimizer](\n model.parameters(),\n **{k.lstrip('optimizer_'): v for k, v in vars(args).items() if 'optimizer_' in k})\n # optimizer = torch.optim.SGD(model.parameters(), lr=args.learning_rate)\n model_optimizer = ModelOptimizer(model_criteria, optimizer)\n\n return model, model_criteria, model_optimizer\n\nclass WandbLog():\n def __init__(self, use_wandb):\n self.use_wandb = use_wandb\n self.train_batch_step = 0\n self.valid_epoch_step = 0\n def train_batch_log(self, train_batch_loss):\n self.train_batch_step += 1\n if self.use_wandb:\n wandb.log({'Train_Batch_Loss': train_batch_loss}, step=self.train_batch_step)\n def valid_epoch_log(self, valid_epoch_loss):\n if self.use_wandb:\n wandb.log({'Valid_Epoch_Loss': valid_epoch_loss}, step=self.train_batch_step)\n self.valid_epoch_step += 1\n\ndef train_model(model_optimizer, train_dl, wandb_log, args):\n train_loss = 0\n device = next(model_optimizer.parameters()).device\n model_optimizer.train()\n if args.freeze_batch_norm:\n for module in model_optimizer.modules():\n if isinstance(module, nn.modules.batchnorm._BatchNorm):\n module.eval()\n \n pbar = tqdm.tqdm(enumerate(train_dl), total=len(train_dl))\n for _, ((b_clear_beta, ), (b_sparse, b_color)) in pbar:\n loss = model_optimizer.step(b_clear_beta.to(device), b_sparse)\n train_loss += loss\n\n pbar.set_description(\"Train Batch {:3d}\".format(wandb_log.train_batch_step))\n pbar.set_postfix_str(\"Batch Loss={:.4f}\".format(loss.detach().cpu().numpy()))\n wandb_log.train_batch_log(loss.detach().cpu().numpy())\n\n train_dl.dataset.update_share_transform()\n train_loss /= len(train_dl)\n pbar.write(\"Train Epoch Loss={:.4f}\".format(train_loss.detach().cpu().numpy()))\n return train_loss\n\ndef eval_model(model_criteria, valid_dl, wandb_log, args):\n eval_loss = 0\n device = next(model_criteria.parameters()).device\n model_criteria.eval()\n \n pbar = tqdm.tqdm(enumerate(valid_dl), total=len(valid_dl))\n with torch.no_grad():\n for _, ((b_clear_beta, ), (b_sparse, _)) in pbar:\n loss = model_criteria(b_clear_beta.to(device), b_sparse)\n eval_loss += loss\n\n pbar.set_description(\"Valid Epoch {:3d}\".format(wandb_log.valid_epoch_step))\n eval_loss /= len(valid_dl)\n pbar.write(\"Valid Epoch Loss={:.4f}\".format(eval_loss.cpu().numpy()))\n if wandb_log.use_wandb:\n state_dict_name = 'state_dict.{:02d}.pth'.format(wandb_log.valid_epoch_step)\n torch.save(model_criteria.state_dict(), os.path.join(wandb.run.dir, state_dict_name))\n wandb.save(state_dict_name)\n wandb_log.valid_epoch_log(eval_loss.cpu().numpy())\n return eval_loss\n\ndef main(parser, name, load_train_valid_loader, load_model_criteria_optimizer, train_model, eval_model):\n args = parser.parse_args()\n print(args)\n if args.use_wandb:\n wandb.init(project='refinenet-pytorch', name=name, config=args, dir='/home/user/research/refinenet-pytorch/train')\n\n train_dl, valid_dl = load_train_valid_loader(args)\n # train_dl.dataset.indices = train_dl.dataset.indices[:10]\n # valid_dl.dataset.indices = valid_dl.dataset.indices[:10]\n print('dataset loaded')\n model, model_criteria, model_optimizer = load_model_criteria_optimizer(args)\n print('model loaded')\n wandb_log = WandbLog(args.use_wandb)\n\n eval_model(model_criteria, valid_dl, wandb_log, args)\n for epoch in range(args.total_epoch):\n train_model(model_optimizer, train_dl, wandb_log, args)\n eval_model(model_criteria, valid_dl, wandb_log, args)\n\nif __name__ == '__main__':\n main(\n parser=arg_parser(),\n name='training',\n load_train_valid_loader=load_train_valid_loader,\n load_model_criteria_optimizer=load_model_criteria_optimizer,\n train_model=train_model,\n eval_model=eval_model)"
] | [
[
"torch.utils.data.DataLoader",
"numpy.vectorize",
"torch.no_grad",
"torch.nn.CrossEntropyLoss",
"torch.from_numpy",
"numpy.array",
"torch.nn.functional.interpolate"
]
] |
alexanderkell/temporal_granularity | [
"f29b294beb360d8d66c6fedf78bbf9ae84055b24"
] | [
"test/test_metrics/test_multi_year_metrics.py"
] | [
"from pathlib import Path\nimport pandas as pd\nfrom src.metrics.multi_year_metrics import MultiYearMetrics\nimport pytest\nimport logging\n\nlogging.basicConfig(level=logging.DEBUG)\nlogger = logging.getLogger(__name__)\n\nproject_dir = Path(\"__file__\").resolve().parents[1]\n\n\[email protected]\ndef define_multi_year_metrics():\n pv_original = pd.read_csv('{}/temporal_granularity/data/processed/resources/pv_processed.csv'.format(project_dir))\n pv_representative = pd.DataFrame({\"index_for_year\": list(range(8760)), \"capacity_factor\": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] * 365})\n wind_representative = pd.DataFrame({\"index_for_year\": list(range(8760)), \"capacity_factor\": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] * 365})\n load_representative = pd.DataFrame({\"index_for_year\": list(range(8760)), \"capacity_factor\": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] * 365})\n wind_original = pd.read_csv('{}/temporal_granularity/data/processed/resources/onshore_processed.csv'.format(project_dir))\n # load_original = pd.read_csv('{}/temporal_granularity/data/processed/demand/load_processed_normalised.csv'.format(project_dir))\n load_original = pd.read_csv('{}/temporal_granularity/data/processed/demand/load_NG/load_processed_normalised.csv'.format(project_dir))\n\n original_data = []\n for dat in [pv_original, wind_original, load_original]:\n dat.datetime = pd.to_datetime(dat.datetime)\n dat['year'] = dat.datetime.dt.year\n original_data.append(dat)\n\n multi_year_metrics_calc = MultiYearMetrics(original_data[0], pv_representative, original_data[1], wind_representative, original_data[2], load_representative)\n yield multi_year_metrics_calc\n\n\nclass Test_MultiYearMetrics:\n\n def test_group_list_dataframes(self, define_multi_year_metrics):\n\n grouped_dfs = define_multi_year_metrics._group_list_dataframes()\n assert len(grouped_dfs) == 3\n assert list(grouped_dfs[0].groups.keys()) == list(range(1980, 2017))\n assert list(grouped_dfs[1].groups.keys()) == list(range(1980, 2017))\n assert list(grouped_dfs[2].groups.keys()) == list(range(2005, 2019))\n\n def test_get_multi_year_metrics(self, define_multi_year_metrics):\n\n result_errors = define_multi_year_metrics.get_multi_year_metrics(\"dc\")\n\n def test_get_multi_year_average_metrics(self, define_multi_year_metrics):\n mean_errors = define_multi_year_metrics.get_multi_year_average_metrics(\"dc\")\n logger.debug(mean_errors)\n"
] | [
[
"pandas.to_datetime"
]
] |
Frognar/Super-Resolution | [
"406b909d71e156aa11ee589698744e3ad9abfee7"
] | [
"nn/block/upsample_blocks.py"
] | [
"import torch.nn as nn\nfrom torch.nn.functional import interpolate\n\n\nclass PixelShuffleUpscaleBlock(nn.Module):\n def __init__(self, in_channels=64, kernel_size=3, upscale_factor=2):\n super().__init__()\n\n self.block = nn.Sequential(\n nn.Conv2d(in_channels=in_channels,\n out_channels=in_channels * (upscale_factor ** 2),\n kernel_size=kernel_size, padding=kernel_size // 2),\n nn.PixelShuffle(upscale_factor=upscale_factor),\n nn.PReLU()\n )\n\n def forward(self, input_data):\n return self.block(input_data)\n\n\nclass UpscaleBlock(nn.Module):\n def __init__(self, channels=64, kernel_size=3, upscale_factor=2):\n super().__init__()\n self.scale_factor = upscale_factor\n self.block = nn.Sequential(\n nn.Conv2d(in_channels=channels, out_channels=channels,\n kernel_size=kernel_size, padding=kernel_size // 2),\n nn.LeakyReLU(negative_slope=0.2, inplace=True)\n )\n\n def forward(self, input_data):\n return self.block(self.upscale(input_data))\n\n def upscale(self, data):\n return interpolate(data, scale_factor=self.scale_factor, mode='nearest')\n"
] | [
[
"torch.nn.PixelShuffle",
"torch.nn.PReLU",
"torch.nn.Conv2d",
"torch.nn.functional.interpolate",
"torch.nn.LeakyReLU"
]
] |
TannerGilbert/Machine-Learning-Explained | [
"5309f44a38ce862f3f177e8d5de2e60eea44637b"
] | [
"Optimizers/adam/code/adam.py"
] | [
"# based on https://ruder.io/optimizing-gradient-descent/#adam\n# and https://github.com/eriklindernoren/ML-From-Scratch/blob/master/mlfromscratch/deep_learning/optimizers.py#L106\n\nimport numpy as np\n\n\nclass Adam:\n \"\"\"Adam - Adaptive Moment Estimation\n Parameters:\n -----------\n learning_rate: float = 0.001\n The step length used when following the negative gradient.\n beta_1: float = 0.9\n The exponential decay rate for the 1st moment estimates.\n beta_2: float = 0.999\n The exponential decay rate for the 2nd moment estimates.\n epsilon: float = 1e-07\n A small floating point value to avoid zero denominator.\n \"\"\"\n def __init__(self, learning_rate: float = 0.001, beta_1: float = 0.9, beta_2: float = 0.999, epsilon: float = 1e-7) -> None:\n self.learning_rate = learning_rate\n self.epsilon = epsilon\n self.beta_1 = beta_1\n self.beta_2 = beta_2\n\n self.t = 0\n self.m = None # Decaying averages of past gradients\n self.v = None # Decaying averages of past squared gradients\n\n def update(self, w: np.ndarray, grad_wrt_w: np.ndarray) -> np.ndarray:\n self.t += 1\n if self.m is None:\n self.m = np.zeros(np.shape(grad_wrt_w))\n self.v = np.zeros(np.shape(grad_wrt_w))\n\n self.m = self.beta_1 * self.m + (1 - self.beta_1) * grad_wrt_w\n self.v = self.beta_2 * self.v + (1 - self.beta_2) * np.power(grad_wrt_w, 2)\n\n m_hat = self.m / (1 - self.beta_1**self.t)\n v_hat = self.v / (1 - self.beta_2**self.t)\n\n w_update = self.learning_rate * m_hat / (np.sqrt(v_hat) + self.epsilon)\n\n return w - w_update\n"
] | [
[
"numpy.sqrt",
"numpy.power",
"numpy.shape"
]
] |
mustelideos/td-opswtw-competition-rl | [
"afbd6603b74f09c133d5d68e587fc93387ca93ba"
] | [
"models/neural_net.py"
] | [
"import torch\nimport torch.nn as nn\nimport torch.nn.init as init\nimport torch.nn.functional as F\nfrom torch.utils.checkpoint import checkpoint\n\nimport math\nimport numpy as np\n\n# ------------------------------------------------------------------------------\n# Transformer model from: https://github.com/JayParks/transformer\n# and https://github.com/jadore801120/attention-is-all-you-need-pytorch\n\n\nclass ScaledDotProductAttention(nn.Module):\n def __init__(self, d_k):\n super(ScaledDotProductAttention, self).__init__()\n self.scale_factor = np.sqrt(d_k)\n self.softmax = nn.Softmax(dim=-1)\n\n def forward(self, q, k, v, attn_mask=None):\n # q: [b_size x len_q x d_k]\n # k: [b_size x len_k x d_k]\n # v: [b_size x len_v x d_v] note: (len_k == len_v)\n attn = torch.bmm(q, k.transpose(1, 2)) / self.scale_factor # attn: [b_size x len_q x len_k]\n if attn_mask is not None:\n # assert attn_mask.size() == attn.size()\n attn.data.masked_fill_(attn_mask==0, -1e32)\n\n attn = self.softmax(attn )\n outputs = torch.bmm(attn, v) # outputs: [b_size x len_q x d_v]\n return outputs, attn\n\n\nclass _MultiHeadAttention(nn.Module):\n def __init__(self, d_model, n_heads):\n super(_MultiHeadAttention, self).__init__()\n\n self.d_k = d_model // n_heads\n self.d_v = d_model // n_heads\n self.d_model = d_model\n self.n_heads = n_heads\n\n self.w_q = nn.Parameter(torch.FloatTensor(n_heads, d_model, self.d_k))\n self.w_k = nn.Parameter(torch.FloatTensor(n_heads, d_model, self.d_k))\n self.w_v = nn.Parameter(torch.FloatTensor(n_heads, d_model, self.d_v))\n\n self.attention = ScaledDotProductAttention(self.d_k)\n\n def forward(self, q, k, v, attn_mask=None, use_adj_mask=False):\n (d_k, d_v, d_model, n_heads) = (self.d_k, self.d_v, self.d_model, self.n_heads)\n b_size = k.size(0)\n\n q_s = q.repeat(n_heads, 1, 1).view(n_heads, -1, d_model) # [n_heads x b_size * len_q x d_model]\n k_s = k.repeat(n_heads, 1, 1).view(n_heads, -1, d_model) # [n_heads x b_size * len_k x d_model]\n v_s = v.repeat(n_heads, 1, 1).view(n_heads, -1, d_model) # [n_heads x b_size * len_v x d_model]\n\n q_s = torch.bmm(q_s, self.w_q).view(b_size * n_heads, -1, d_k) # [b_size * n_heads x len_q x d_k]\n k_s = torch.bmm(k_s, self.w_k).view(b_size * n_heads, -1, d_k) # [b_size * n_heads x len_k x d_k]\n v_s = torch.bmm(v_s, self.w_v).view(b_size * n_heads, -1, d_v) # [b_size * n_heads x len_v x d_v]\n\n if attn_mask is not None:\n if use_adj_mask:\n outputs, attn = self.attention(q_s, k_s, v_s, attn_mask=attn_mask.repeat(n_heads, 1, 1))\n else:\n outputs, attn = self.attention(q_s, k_s, v_s, attn_mask=attn_mask.unsqueeze(1).repeat(n_heads, 1, 1))\n else:\n outputs, attn = self.attention(q_s, k_s, v_s, attn_mask=None)\n\n return torch.split(outputs, b_size, dim=0), attn\n\n\nclass MultiHeadAttention(nn.Module):\n def __init__(self, d_model, n_heads):\n super(MultiHeadAttention, self).__init__()\n\n self.d_k = d_model // n_heads\n self.attention = _MultiHeadAttention(d_model, n_heads)\n self.proj = nn.Linear(n_heads * self.d_k, d_model)\n self.layer_norm = nn.LayerNorm(d_model)\n\n def forward(self, q, k, v, attn_mask = None, use_adj_mask = False):\n # q: [b_size x len_q x d_model]\n # k: [b_size x len_k x d_model]\n # v: [b_size x len_v x d_model] note (len_k == len_v)\n\n # outputs: a list of tensors of shape [b_size x len_q x d_v] (length: n_heads)\n outputs, attn = self.attention(q, k, v, attn_mask=attn_mask, use_adj_mask=use_adj_mask)\n # concatenate 'n_heads' multi-head attentions\n outputs = torch.cat(outputs, dim=-1)\n # project back to residual size, result_size = [b_size x len_q x d_model]\n outputs = self.proj(outputs)\n\n return outputs \n\n\n#----------- Pointer models common blocks ---------------------\n\nclass Attention(nn.Module):\n # Bahdanau Attention (sum)\n def __init__(self, hidden_size, is_glimpse=False, C=10):\n \n super(Attention, self).__init__()\n \n self.C = C # tanh exploration\n self.W1 = nn.Linear(hidden_size, hidden_size, bias=False)\n self.W2 = nn.Linear(hidden_size, hidden_size)\n self.V = nn.Parameter(torch.zeros((hidden_size, 1), requires_grad=True))\n self.is_glimpse = is_glimpse\n\n def forward(self, h0, enc_outputs, mask):\n \n w1e = self.W1(enc_outputs)\n w2h = self.W2(h0).unsqueeze(1)\n u = torch.tanh(w1e + w2h)\n a = u.matmul(self.V)\n\n if self.is_glimpse:\n att = F.softmax(a, dim=1).transpose(1, 2)\n out = torch.bmm(att, enc_outputs).squeeze(1)\n return out\n else:\n a = self.C*torch.tanh(a).squeeze(2)\n policy = F.softmax(a + mask.float().log(), dim=1)\n return policy\n\n\nclass Decoder(nn.Module):\n def __init__(self, hidden_size, has_glimpse = False):\n super(Decoder, self).__init__()\n\n self.has_glimpse = has_glimpse\n self.first_h_0 = nn.Parameter(torch.FloatTensor(1, hidden_size), requires_grad=True)\n self.first_h_0.data.uniform_(-(1. / math.sqrt(hidden_size)), 1. / math.sqrt(hidden_size))\n\n self.c0 = nn.Parameter(torch.FloatTensor( 1, hidden_size),requires_grad=True)\n self.c0.data.uniform_(-(1. / math.sqrt(hidden_size)), 1. / math.sqrt(hidden_size))\n\n self.hidden_0 = (self.first_h_0, self.c0)\n\n self.lstm = nn.LSTMCell(hidden_size, hidden_size)\n\n self.pointer = Attention(hidden_size)\n if self.has_glimpse:\n self.glimpse = Attention(hidden_size, is_glimpse=True)\n\n def forward(self, input, hidden, enc_outputs, mask):\n hidden = self.lstm(input, hidden)\n\n if self.has_glimpse:\n glimpse_h0 = self.glimpse(hidden[0], enc_outputs, mask)\n policy = self.pointer(glimpse_h0, enc_outputs, mask)\n else:\n policy = self.pointer(hidden[0], enc_outputs, mask)\n return policy, hidden\n\n\nclass EncoderLayer(nn.Module):\n def __init__(self, d_model, d_ff, n_heads, dropout, pre_lnorm=False):\n super(EncoderLayer, self).__init__()\n\n self.pre_lnorm = pre_lnorm\n self.self_attn = MultiHeadAttention(d_model, n_heads)\n self.w_1 = nn.Linear(d_model, d_ff)\n self.w_2 = nn.Linear(d_ff, d_model)\n self.dropout1 = nn.Dropout(dropout)\n self.dropout2 = nn.Dropout(dropout)\n self.dropout3 = nn.Dropout(dropout)\n self.layer_norm1 = nn.LayerNorm(d_model)\n self.layer_norm2 = nn.LayerNorm(d_model)\n self.relu = nn.ReLU()\n\n def forward(self, src, rec_src, self_attn_mask, use_adj_mask = False):\n\n if self.pre_lnorm:\n src = self.layer_norm1(src)\n src2 = self.self_attn(src, rec_src, src, attn_mask=self_attn_mask, use_adj_mask = use_adj_mask)\n src = src + self.dropout1(src2)\n src = self.layer_norm2(src)\n src2 = self.w_2(self.dropout2(self.relu(self.w_1(src))))\n src = src + self.dropout3(src2)\n else:\n src2 = self.self_attn(src, rec_src, src, attn_mask=self_attn_mask, use_adj_mask = use_adj_mask)\n src = src + self.dropout1(src2)\n src = self.layer_norm1(src)\n src2 = self.w_2(self.dropout2(self.relu(self.w_1(src))))\n src = src + self.dropout3(src2)\n src = self.layer_norm2(src)\n return src\n\n\nclass Encoder(nn.Module):\n def __init__(self, features_dim, dfeatures_dim, hidden_size, args):\n super(Encoder, self).__init__()\n\n n_heads = args.n_heads # number of heads\n d_ff = args.ff_dim # feed_forward_hidden\n n_layers = args.n_layers # number of Layers\n dropout = args.dropout\n self.pre_lnorm = args.pre_lnorm\n self.L1 = nn.Linear(features_dim, hidden_size//2) # for static features\n self.L2 = nn.Linear(dfeatures_dim, hidden_size//2) # for dynamic features\n\n self.layers = nn.ModuleList([EncoderLayer(hidden_size, d_ff, n_heads, dropout, pre_lnorm=self.pre_lnorm) for _ in range(n_layers)])\n self.last_norm = nn.LayerNorm(hidden_size)\n self.use_adj_mask = args.use_lookahead\n\n def forward(self, emb_inp, rec_inp, mask, dummy_arg):\n for layer in self.layers:\n emb_inp = layer(emb_inp, rec_inp, mask, self.use_adj_mask)\n\n if self.pre_lnorm:\n emb_inp = self.last_norm(emb_inp)\n return emb_inp\n\n\nclass Agent(nn.Module):\n\n def __init__(self, features_dim, dfeatures_dim, hidden_dim, args, has_glimpse = False):\n super(Agent, self).__init__()\n\n self.features_dim = features_dim\n self.dfeatures_dim = dfeatures_dim\n self.use_checkpoint = args.use_checkpoint\n self.hidden_dim = hidden_dim\n self.decoder = Decoder(hidden_dim, has_glimpse)\n self.encoder = Encoder(features_dim, dfeatures_dim, hidden_dim, args)\n # see https://discuss.pytorch.org/t/checkpoint-with-no-grad-requiring-inputs-problem/19117/11\n self.dummy_tensor = torch.ones(1, dtype=torch.float32, requires_grad=True)\n\n self._initialize_parameters()\n\n def _initialize_parameters(self):\n for name, param in self.named_parameters():\n if len(param.shape) > 1:\n nn.init.xavier_uniform_(param)\n\n def _load_model_weights(self, path_string, device):\n self.load_state_dict(torch.load(path_string, map_location=device))\n\n\n def forward(self, enc_inputs, enc_hidden, adj_mask, dec_input, dec_hidden, mask, first_step=False):\n policy, dec_hidden, enc_outputs = self._one_step(enc_inputs, enc_hidden, adj_mask, dec_input, dec_hidden, mask, first_step)\n return policy, dec_hidden, enc_outputs\n\n def _one_step(self, enc_inputs, enc_hidden, adj_mask, dec_input, dec_hidden, mask, first_step):\n if self.use_checkpoint:\n enc_outputs = checkpoint(self.encoder, enc_inputs, enc_hidden, adj_mask, self.dummy_tensor)\n else:\n enc_outputs = self.encoder(enc_inputs, enc_hidden, adj_mask, self.dummy_tensor)\n\n if first_step:\n return None, None, enc_outputs\n else:\n policy, dec_hidden = self.decoder(dec_input, dec_hidden, enc_outputs, mask)\n return policy, dec_hidden, enc_outputs\n\n def sta_emb(self, sta_inp):\n return torch.tanh(self.encoder.L1(sta_inp))\n\n def dyn_emb(self, dyn_inp):\n return torch.tanh(self.encoder.L2(dyn_inp))\n"
] | [
[
"torch.ones",
"torch.FloatTensor",
"torch.nn.Linear",
"torch.load",
"torch.nn.Dropout",
"torch.split",
"torch.utils.checkpoint.checkpoint",
"torch.nn.init.xavier_uniform_",
"torch.nn.Softmax",
"torch.nn.functional.softmax",
"torch.zeros",
"torch.nn.LSTMCell",
"torch.nn.ReLU",
"torch.nn.LayerNorm",
"torch.tanh",
"numpy.sqrt",
"torch.bmm",
"torch.cat"
]
] |
michelebucelli/cardioemulator | [
"0ce8d5fce017a7251865ab01fdf3d0653490b60f"
] | [
"example/circulation_closed_loop.py"
] | [
"import numpy as np\nimport pandas as pd\nimport json\nimport csv\nimport time\nfrom scipy.integrate import RK45, solve_ivp\n\nclass circulation_closed_loop:\n \"\"\"\n Closed loop circulation model.\n\n References\n ----------\n F. Regazzoni, M. Salvador, P. C. Africa, M. Fedele, L. Dede', A. Quarteroni,\n \"A cardiac electromechanics model coupled with a lumped parameters model for\n closed-loop blood circulation. Part I: model derivation\", arXiv (2020)\n https://arxiv.org/abs/2011.15040\n\n \"\"\"\n\n def __init__(self, options = dict()):\n\n if isinstance(options, str):\n with open(options, mode='r', newline='') as inputfile:\n options = json.loads(inputfile.read())\n\n ############ Heartbeat\n self.BPM = float(options.get('BPM', 72)) # [1 / min]\n self.THB = 60. / self.BPM # [s], Heartbeat period\n\n ############ Chambers\n # LA\n options_curr = options.get('LA', dict())\n EA_LA = float(options_curr.get('EA', 0.07)) # [mmHg / ml]\n EB_LA = float(options_curr.get('EB', 0.09)) # [mmHg / ml]\n TC_LA = float(options_curr.get('TC', 0.17)) * self.THB # [s]\n TR_LA = float(options_curr.get('TR', 0.17)) * self.THB # [s]\n tC_LA = float(options_curr.get('tC', 0.80)) * self.THB # [s]\n self.V0_LA = float(options_curr.get('V0', 4.0)) # [ml]\n self.E_LA = self.time_varying_elastance(EA_LA, EB_LA, tC_LA, TC_LA, TR_LA)\n\n # LV\n options_curr = options.get('LV', dict())\n EA_LV = float(options_curr.get('EA', 2.75)) # [mmHg / ml]\n EB_LV = float(options_curr.get('EB', 0.08)) # [mmHg / ml]\n TC_LV = float(options_curr.get('TC', 0.34)) * self.THB # [s]\n TR_LV = float(options_curr.get('TR', 0.17)) * self.THB # [s]\n tC_LV = float(options_curr.get('tC', 0.00)) * self.THB # [s]\n self.V0_LV = float(options_curr.get('V0', 5.0)) # [ml]\n self.E_LV = self.time_varying_elastance(EA_LV, EB_LV, tC_LV, TC_LV, TR_LV)\n\n # RA\n options_curr = options.get('RA', dict())\n EA_RA = float(options_curr.get('EA', 0.06)) # [mmHg / ml]\n EB_RA = float(options_curr.get('EB', 0.07)) # [mmHg / ml]\n TC_RA = float(options_curr.get('TC', 0.17)) * self.THB # [s]\n TR_RA = float(options_curr.get('TR', 0.17)) * self.THB # [s]\n tC_RA = float(options_curr.get('tC', 0.80)) * self.THB # [s]\n self.V0_RA = float(options_curr.get('V0', 4.0)) # [ml]\n self.E_RA = self.time_varying_elastance(EA_RA, EB_RA, tC_RA, TC_RA, TR_RA)\n\n # RV\n options_curr = options.get('RV', dict())\n EA_RV = float(options_curr.get('EA', 0.55)) # [mmHg / ml]\n EB_RV = float(options_curr.get('EB', 0.05)) # [mmHg / ml]\n TC_RV = float(options_curr.get('TC', 0.34)) * self.THB # [s]\n TR_RV = float(options_curr.get('TR', 0.17)) * self.THB # [s]\n tC_RV = float(options_curr.get('tC', 0.00)) * self.THB # [s]\n self.V0_RV = float(options_curr.get('V0', 10.0)) # [ml]\n self.E_RV = self.time_varying_elastance(EA_RV, EB_RV, tC_RV, TC_RV, TR_RV)\n\n ############ Valves\n heavisideMY = lambda x: np.arctan( np.pi / 2 * x * 200 ) * 1 / np.pi + 0.5\n options_curr = options.get('valves', dict())\n Rmin = float(options_curr.get('Rmin', 0.0075)) # [mmHg s / ml]\n Rmax = float(options_curr.get('Rmax', 75006.2)) # [mmHg s / ml]\n self.R_MV = lambda w, v: 10.**( np.log10( Rmin ) + ( np.log10( Rmax ) - np.log10( Rmin ) ) * heavisideMY( v - w ) )\n self.R_AV = lambda w, v: 10.**( np.log10( Rmin ) + ( np.log10( Rmax ) - np.log10( Rmin ) ) * heavisideMY( v - w ) )\n self.R_TV = lambda w, v: 10.**( np.log10( Rmin ) + ( np.log10( Rmax ) - np.log10( Rmin ) ) * heavisideMY( v - w ) )\n self.R_PV = lambda w, v: 10.**( np.log10( Rmin ) + ( np.log10( Rmax ) - np.log10( Rmin ) ) * heavisideMY( v - w ) )\n\n ############ Systemic circulation\n options_curr = options.get('SYS', dict())\n self.R_AR_SYS = float(options_curr.get('R_AR' , 0.8 )) # [mmHg s /ml]\n self.C_AR_SYS = float(options_curr.get('C_AR' , 1.2 )) # [ml / mmHg]\n self.R_VEN_SYS = float(options_curr.get('R_VEN', 0.26)) # [mmHg s /ml]\n self.C_VEN_SYS = float(options_curr.get('C_VEN', 60. )) # [ml / mmHg]\n self.L_AR_SYS = float(options_curr.get('L_AR' , 5e-3)) # [mmHg s^2 / ml]\n self.L_VEN_SYS = float(options_curr.get('L_VEN', 5e-4)) # [mmHg s^2 / ml]\n\n ############ Pulmonary circulation\n options_curr = options.get('PUL', dict())\n self.R_AR_PUL = float(options_curr.get('R_AR' , 0.1625)) # [mmHg s /ml]\n self.C_AR_PUL = float(options_curr.get('C_AR' , 10. )) # [ml / mmHg]\n self.R_VEN_PUL = float(options_curr.get('R_VEN', 0.1625)) # [mmHg s /ml]\n self.C_VEN_PUL = float(options_curr.get('C_VEN', 16. )) # [ml / mmHg]\n self.L_AR_PUL = float(options_curr.get('L_AR' , 5e-4 )) # [mmHg s^2 / ml]\n self.L_VEN_PUL = float(options_curr.get('L_VEN', 5e-4 )) # [mmHg s^2 / ml]\n\n ############ PV relationships\n self.p_LA_func = lambda V, t: self.E_LA(t) * ( V - self.V0_LA )\n self.p_LV_func = lambda V, t: self.E_LV(t) * ( V - self.V0_LV )\n self.p_RA_func = lambda V, t: self.E_RA(t) * ( V - self.V0_RA )\n self.p_RV_func = lambda V, t: self.E_RV(t) * ( V - self.V0_RV )\n\n def flux_through_valve(self, p1, p2, R):\n return ( p1 - p2 ) / R( p1, p2 )\n\n def time_varying_elastance(self, EA, EB, time_C, duration_C, duration_R):\n time_R = time_C + duration_C\n e = lambda t: 0.5 * ( 1 - np.cos( np.pi / duration_C * ( np.mod( t - time_C, self.THB ) ) ) ) * ( 0 <= np.mod( t - time_C, self.THB ) ) * ( np.mod( t - time_C, self.THB ) < duration_C ) + \\\n 0.5 * ( 1 + np.cos( np.pi / duration_R * ( np.mod( t - time_R, self.THB ) ) ) ) * ( 0 <= np.mod( t - time_R, self.THB ) ) * ( np.mod( t - time_R, self.THB ) < duration_R )\n return lambda t: EA * np.clip(e(t), 0.0, 1.0) + EB\n\n def initialize(self, initial_state = dict()):\n\n if isinstance(initial_state, str):\n with open(initial_state, mode='r', newline='') as inputfile:\n initial_state = json.loads(inputfile.read())\n\n self.V_LA = float(initial_state.get('V_LA' , 65.)) # [ml]\n self.V_LV = float(initial_state.get('V_LV' , 120.)) # [ml]\n self.V_RA = float(initial_state.get('V_RA' , 65.)) # [ml]\n self.V_RV = float(initial_state.get('V_RV' , 145.)) # [ml]\n\n self.p_AR_SYS = float(initial_state.get('p_AR_SYS' , 80.)) # [mmHg]\n self.p_VEN_SYS = float(initial_state.get('p_VEN_SYS', 30.)) # [mmHg]\n self.p_AR_PUL = float(initial_state.get('p_AR_PUL' , 35.)) # [mmHg]\n self.p_VEN_PUL = float(initial_state.get('p_VEN_PUL', 24.)) # [mmHg]\n\n self.Q_AR_SYS = float(initial_state.get('Q_AR_SYS' , 0.)) # [ml/s]\n self.Q_VEN_SYS = float(initial_state.get('Q_VEN_SYS', 0.)) # [ml/s]\n self.Q_AR_PUL = float(initial_state.get('Q_AR_PUL' , 0.)) # [ml/s]\n self.Q_VEN_PUL = float(initial_state.get('Q_VEN_PUL', 0.)) # [ml/s]\n\n self.update_static_variables(0.)\n\n def update_static_variables(self, t):\n self.p_LA = self.p_LA_func(self.V_LA, t)\n self.p_LV = self.p_LV_func(self.V_LV, t)\n self.p_RA = self.p_RA_func(self.V_RA, t)\n self.p_RV = self.p_RV_func(self.V_RV, t)\n\n self.Q_MV = self.flux_through_valve( self.p_LA, self.p_LV , self.R_MV )\n self.Q_AV = self.flux_through_valve( self.p_LV, self.p_AR_SYS, self.R_AV )\n self.Q_TV = self.flux_through_valve( self.p_RA, self.p_RV , self.R_TV )\n self.Q_PV = self.flux_through_valve( self.p_RV, self.p_AR_PUL, self.R_PV )\n\n def solve_step_FE(self, t, dt):\n self.update_static_variables(t)\n\n self.V_LA += dt * ( self.Q_VEN_PUL - self.Q_MV )\n self.V_LV += dt * ( self.Q_MV - self.Q_AV )\n self.V_RA += dt * ( self.Q_VEN_SYS - self.Q_TV )\n self.V_RV += dt * ( self.Q_TV - self.Q_PV )\n self.p_AR_SYS += dt * ( self.Q_AV - self.Q_AR_SYS ) / self.C_AR_SYS\n self.p_VEN_SYS += dt * ( self.Q_AR_SYS - self.Q_VEN_SYS ) / self.C_VEN_SYS\n self.p_AR_PUL += dt * ( self.Q_PV - self.Q_AR_PUL ) / self.C_AR_PUL\n self.p_VEN_PUL += dt * ( self.Q_AR_PUL - self.Q_VEN_PUL ) / self.C_VEN_PUL\n self.Q_AR_SYS += -dt * ( self.R_AR_SYS * self.Q_AR_SYS + self.p_VEN_SYS - self.p_AR_SYS ) / self.L_AR_SYS\n self.Q_VEN_SYS += -dt * ( self.R_VEN_SYS * self.Q_VEN_SYS + self.p_RA - self.p_VEN_SYS ) / self.L_VEN_SYS\n self.Q_AR_PUL += -dt * ( self.R_AR_PUL * self.Q_AR_PUL + self.p_VEN_PUL - self.p_AR_PUL ) / self.L_AR_PUL\n self.Q_VEN_PUL += -dt * ( self.R_VEN_PUL * self.Q_VEN_PUL + self.p_LA - self.p_VEN_PUL ) / self.L_VEN_PUL\n\n def solve(self, T = None, num_cycles = None,\n initial_state = None,\n dt = 1e-3,\n dt_eval = None):\n\n print('Circulation model - running simulation...')\n if (T is None and num_cycles is None) or (T is not None and num_cycles is not None):\n raise Exception('Exactly one among T and num_cycles should be not None.')\n\n if num_cycles is not None:\n T = self.THB * num_cycles\n if dt_eval is None:\n output_every_n_steps = 1\n else:\n output_every_n_steps = np.round(dt_eval / dt)\n times = np.arange(0, T, dt)\n\n self.initialize(initial_state = initial_state)\n self.initialize_output()\n self.dump_output(0.0)\n\n time_start = time.time()\n\n for iT in range(1, times.shape[0]):\n self.solve_step_FE(times[iT], dt)\n if iT % output_every_n_steps == 0:\n self.dump_output(times[iT])\n\n duration = time.time() - time_start\n\n print('Circulation model - elapsed time %1.4f s' % duration)\n return pd.DataFrame(self.results)\n\n def initialize_output(self):\n self.results = dict()\n self.results['time'] = list()\n self.results['VLA'] = list()\n self.results['VLV'] = list()\n self.results['VRA'] = list()\n self.results['VRV'] = list()\n self.results['pARSYS'] = list()\n self.results['pVENSYS'] = list()\n self.results['pARPUL'] = list()\n self.results['pVENPUL'] = list()\n self.results['QARSYS'] = list()\n self.results['QVENSYS'] = list()\n self.results['QARPUL'] = list()\n self.results['QVENPUL'] = list()\n self.results['pLA'] = list()\n self.results['pLV'] = list()\n self.results['pRA'] = list()\n self.results['pRV'] = list()\n self.results['ELA'] = list()\n self.results['ELV'] = list()\n self.results['ERA'] = list()\n self.results['ERV'] = list()\n self.results['QMV'] = list()\n self.results['QAV'] = list()\n self.results['QTV'] = list()\n self.results['QPV'] = list()\n\n def dump_output(self, t):\n self.results['time' ].append(t)\n self.results['VLA' ].append(self.V_LA)\n self.results['VLV' ].append(self.V_LV)\n self.results['VRA' ].append(self.V_RA)\n self.results['VRV' ].append(self.V_RV)\n self.results['pARSYS' ].append(self.p_AR_SYS)\n self.results['pVENSYS'].append(self.p_VEN_SYS)\n self.results['pARPUL' ].append(self.p_AR_PUL)\n self.results['pVENPUL'].append(self.p_VEN_PUL)\n self.results['QARSYS' ].append(self.Q_AR_SYS)\n self.results['QVENSYS'].append(self.Q_VEN_SYS)\n self.results['QARPUL' ].append(self.Q_AR_PUL)\n self.results['QVENPUL'].append(self.Q_VEN_PUL)\n self.results['pLA' ].append(self.p_LA)\n self.results['pLV' ].append(self.p_LV)\n self.results['pRA' ].append(self.p_RA)\n self.results['pRV' ].append(self.p_RV)\n self.results['ELA' ].append(self.E_LA(t))\n self.results['ELV' ].append(self.E_LV(t))\n self.results['ERA' ].append(self.E_RA(t))\n self.results['ERV' ].append(self.E_RV(t))\n self.results['QMV' ].append(self.Q_MV)\n self.results['QAV' ].append(self.Q_AV)\n self.results['QTV' ].append(self.Q_TV)\n self.results['QPV' ].append(self.Q_PV)\n\n def save_state(self, filename):\n\n with open(filename, mode='w', newline='') as outfile:\n state = dict()\n state['V_LA'] = float(self.V_LA)\n state['V_LV'] = float(self.V_LV)\n state['V_RA'] = float(self.V_RA)\n state['V_RV'] = float(self.V_RV)\n state['p_LA'] = float(self.p_LA)\n state['p_LV'] = float(self.p_LV)\n state['p_RA'] = float(self.p_RA)\n state['p_RV'] = float(self.p_RV)\n state['p_AR_SYS'] = float(self.p_AR_SYS)\n state['p_VEN_SYS'] = float(self.p_VEN_SYS)\n state['p_AR_PUL'] = float(self.p_AR_PUL)\n state['p_VEN_PUL'] = float(self.p_VEN_PUL)\n state['Q_AR_SYS'] = float(self.Q_AR_SYS)\n state['Q_VEN_SYS'] = float(self.Q_VEN_SYS)\n state['Q_AR_PUL'] = float(self.Q_AR_PUL)\n state['Q_VEN_PUL'] = float(self.Q_VEN_PUL)\n json.dump(state, outfile, indent=2)\n\n def print_info(self):\n\n print('V_LA = %4.2f mL' % self.V_LA)\n print('V_LV = %4.2f mL' % self.V_LV)\n print('V_RA = %4.2f mL' % self.V_RA)\n print('V_RV = %4.2f mL' % self.V_RV)\n print('V_AR_SYS = %4.2f mL' % (self.C_AR_SYS * self.p_AR_SYS ))\n print('V_VEN_SYS = %4.2f mL' % (self.C_VEN_SYS * self.p_VEN_SYS))\n print('V_AR_PUL = %4.2f mL' % (self.C_AR_PUL * self.p_AR_PUL ))\n print('V_VEN_PUL = %4.2f mL' % (self.C_VEN_PUL * self.p_VEN_PUL))\n\n V_tot_heart = self.V_LA + self.V_LV + self.V_RA + self.V_RV\n V_tot_SYS = self.C_AR_SYS * self.p_AR_SYS \\\n + self.C_VEN_SYS * self.p_VEN_SYS\n V_tot_PUL = self.C_AR_PUL * self.p_AR_PUL \\\n + self.C_VEN_PUL * self.p_VEN_PUL\n V_tot = V_tot_heart + V_tot_SYS + V_tot_PUL\n print('======================')\n print('V (heart) = %4.2f mL' % V_tot_heart)\n print('V (SYS) = %4.2f mL' % V_tot_SYS)\n print('V (PUL) = %4.2f mL' % V_tot_PUL)\n print('======================')\n print('V = %4.2f mL' % V_tot)"
] | [
[
"numpy.arctan",
"pandas.DataFrame",
"numpy.mod",
"numpy.arange",
"numpy.log10",
"numpy.round"
]
] |
kim95175/detr | [
"342947185153e1f599b47da423a0c49329bbe055"
] | [
"main.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport argparse\nimport datetime\nimport json\nimport random\nimport time\nfrom pathlib import Path\n\nimport numpy as np\nimport torch\nfrom torch.utils.data import DataLoader, DistributedSampler\n\nimport datasets\nimport util.misc as utils\nfrom datasets import build_dataset, get_coco_api_from_dataset\nfrom engine import evaluate, train_one_epoch\nfrom models import build_model\nfrom detr_dataset import *\n\n\ndef get_args_parser():\n parser = argparse.ArgumentParser('Set transformer detector', add_help=False)\n parser.add_argument('--lr', default=1e-4, type=float)\n parser.add_argument('--lr_backbone', default=1e-5, type=float)\n parser.add_argument('--batch_size', default=32, type=int)\n parser.add_argument('--weight_decay', default=1e-4, type=float)\n parser.add_argument('--epochs', default=200, type=int)\n parser.add_argument('--lr_drop', default=200, type=int)\n parser.add_argument('--clip_max_norm', default=0.1, type=float,\n help='gradient clipping max norm')\n\n # Model parameters\n parser.add_argument('--frozen_weights', type=str, default=None,\n help=\"Path to the pretrained model. If set, only the mask head will be trained\")\n # * Backbone\n parser.add_argument('--backbone', default='resnet50', type=str,\n help=\"Name of the convolutional backbone to use\")\n parser.add_argument('--dilation', action='store_true',\n help=\"If true, we replace stride with dilation in the last convolutional block (DC5)\")\n parser.add_argument('--position_embedding', default='sine', type=str, choices=('sine', 'learned'),\n help=\"Type of positional embedding to use on top of the image features\")\n\n # * Transformer\n parser.add_argument('--enc_layers', default=6, type=int,\n help=\"Number of encoding layers in the transformer\")\n parser.add_argument('--dec_layers', default=6, type=int,\n help=\"Number of decoding layers in the transformer\")\n parser.add_argument('--dim_feedforward', default=2048, type=int,\n help=\"Intermediate size of the feedforward layers in the transformer blocks\")\n parser.add_argument('--hidden_dim', default=256, type=int,\n help=\"Size of the embeddings (dimension of the transformer)\")\n parser.add_argument('--dropout', default=0.1, type=float,\n help=\"Dropout applied in the transformer\")\n parser.add_argument('--nheads', default=8, type=int,\n help=\"Number of attention heads inside the transformer's attentions\")\n parser.add_argument('--num_queries', default=20, type=int,\n help=\"Number of query slots\")\n parser.add_argument('--pre_norm', action='store_true')\n\n # * Segmentation\n parser.add_argument('--masks', action='store_true',\n help=\"Train segmentation head if the flag is provided\")\n\n # Loss\n parser.add_argument('--no_aux_loss', dest='aux_loss', action='store_false',\n help=\"Disables auxiliary decoding losses (loss at each layer)\")\n # * Matcher\n parser.add_argument('--set_cost_class', default=1, type=float,\n help=\"Class coefficient in the matching cost\")\n parser.add_argument('--set_cost_bbox', default=5, type=float,\n help=\"L1 box coefficient in the matching cost\")\n parser.add_argument('--set_cost_giou', default=2, type=float,\n help=\"giou box coefficient in the matching cost\")\n # * Loss coefficients\n parser.add_argument('--mask_loss_coef', default=1, type=float)\n parser.add_argument('--dice_loss_coef', default=1, type=float)\n parser.add_argument('--bbox_loss_coef', default=5, type=float)\n parser.add_argument('--giou_loss_coef', default=2, type=float)\n parser.add_argument('--eos_coef', default=0.1, type=float,\n help=\"Relative classification weight of the no-object class\")\n\n # dataset parameters\n parser.add_argument('--dataset_file', default='coco')\n parser.add_argument('--coco_path', type=str)\n parser.add_argument('--coco_panoptic_path', type=str)\n parser.add_argument('--remove_difficult', action='store_true')\n\n parser.add_argument('--output_dir', default='./weights',\n help='path where to save, empty for no saving')\n parser.add_argument('--device', default='cuda',\n help='device to use for training / testing')\n parser.add_argument('--seed', default=42, type=int)\n parser.add_argument('--resume', default='', help='resume from checkpoint')\n parser.add_argument('--start_epoch', default=0, type=int, metavar='N',\n help='start epoch')\n parser.add_argument('--eval', action='store_true')\n parser.add_argument('--num_workers', default=2, type=int)\n\n # distributed training parameters\n parser.add_argument('--world_size', default=1, type=int,\n help='number of distributed processes')\n parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')\n \n parser.add_argument('--cutoff', type=int, default=320, #448, # 284, 246\n help='cut off the front of the input data, --> length = 2048 - cutoff')\n parser.add_argument('--vis', action='store_true', default=False,\n help='visualize the image for debugging')\n parser.add_argument('--val_vis', action='store_true', default=False,\n help='visualize the image for debugging')\n #parser.add_argument('--gpu-num', type=int, default=0,\n # help = 'gpu number if you use a single gpu, set this device number')\n return parser\n\n\ndef main(args):\n utils.init_distributed_mode(args)\n print(\"git:\\n {}\\n\".format(utils.get_sha()))\n\n if args.frozen_weights is not None:\n assert args.masks, \"Frozen training is meant for segmentation only\"\n print(args)\n\n device = torch.device(args.device)\n\n # fix the seed for reproducibility\n seed = args.seed + utils.get_rank()\n torch.manual_seed(seed)\n np.random.seed(seed)\n random.seed(seed)\n\n model, criterion, postprocessors = build_model(args)\n model.to(device)\n \n model_without_ddp = model\n if args.distributed:\n model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])\n model_without_ddp = model.module\n n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)\n print('number of params:', n_parameters)\n\n param_dicts = [\n {\"params\": [p for n, p in model_without_ddp.named_parameters() if \"backbone\" not in n and p.requires_grad]},\n {\n \"params\": [p for n, p in model_without_ddp.named_parameters() if \"backbone\" in n and p.requires_grad],\n \"lr\": args.lr_backbone,\n },\n ]\n optimizer = torch.optim.AdamW(param_dicts, lr=args.lr,\n weight_decay=args.weight_decay)\n lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, args.lr_drop)\n\n # UWB Dataset\n dataset_train = DetrDataset(mode='train', args=args)\n dataset_val = DetrDataset(mode='test', args=args)\n\n if args.distributed:\n sampler_train = DistributedSampler(dataset_train)\n sampler_val = DistributedSampler(dataset_val)#, shuffle=False)\n else:\n sampler_train = torch.utils.data.RandomSampler(dataset_train)\n sampler_val = torch.utils.data.SequentialSampler(dataset_val)\n\n batch_sampler_train = torch.utils.data.BatchSampler(\n sampler_train, args.batch_size, drop_last=True)\n\n data_loader_train = DataLoader(dataset_train, batch_size=args.batch_size, shuffle=True, collate_fn=detection_collate, num_workers=args.num_workers, pin_memory=True)\n data_loader_val = DataLoader(dataset_val, batch_size=args.batch_size, shuffle=True, collate_fn=detection_collate_var, num_workers=args.num_workers, pin_memory=True)\n\n '''\n if args.dataset_file == \"coco_panoptic\":\n # We also evaluate AP during panoptic training, on original coco DS\n coco_val = datasets.coco.build(\"val\", args)\n base_ds = get_coco_api_from_dataset(coco_val)\n else:\n base_ds = get_coco_api_from_dataset(dataset_val)\n '''\n #exit(-1)\n base_ds = None\n\n if args.frozen_weights is not None:\n checkpoint = torch.load(args.frozen_weights, map_location='cpu')\n model_without_ddp.detr.load_state_dict(checkpoint['model'])\n\n output_dir = Path(args.output_dir)\n if args.resume:\n if args.resume.startswith('https'):\n checkpoint = torch.hub.load_state_dict_from_url(\n args.resume, map_location='cpu', check_hash=True)\n else:\n checkpoint = torch.load(args.resume, map_location='cpu')\n model_without_ddp.load_state_dict(checkpoint['model'])\n if not args.eval and 'optimizer' in checkpoint and 'lr_scheduler' in checkpoint and 'epoch' in checkpoint:\n optimizer.load_state_dict(checkpoint['optimizer'])\n lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])\n args.start_epoch = checkpoint['epoch'] + 1\n\n if args.eval:\n #test_stats, coco_evaluator = evaluate(model, criterion, postprocessors,\n test_stats = evaluate(model, criterion, postprocessors,\n data_loader_val, base_ds, device, args.output_dir, val_vis=args.val_vis)\n #print(test_stats)\n #if args.output_dir:\n # utils.save_on_master(coco_evaluator.coco_eval[\"bbox\"].eval, output_dir / \"eval.pth\")\n return\n\n print(\"Start training\")\n start_time = time.time()\n for epoch in range(args.start_epoch, args.epochs):\n if args.distributed:\n sampler_train.set_epoch(epoch)\n train_stats = train_one_epoch(\n model, criterion, data_loader_train, optimizer, device, epoch,\n args.clip_max_norm)\n lr_scheduler.step()\n if args.output_dir:\n checkpoint_paths = [output_dir / 'checkpoint.pth']\n # extra checkpoint before LR drop and every 10 epochs\n if (epoch + 1) % args.lr_drop == 0 or (epoch + 1) % 10 == 0:\n checkpoint_paths.append(output_dir / f'checkpoint{epoch:04}.pth')\n for checkpoint_path in checkpoint_paths:\n utils.save_on_master({\n 'model': model_without_ddp.state_dict(),\n 'optimizer': optimizer.state_dict(),\n 'lr_scheduler': lr_scheduler.state_dict(),\n 'epoch': epoch,\n 'args': args,\n }, checkpoint_path)\n coco_evaluator = None\n\n #test_stats, coco_evaluator = evaluate(\n test_stats = evaluate(\n model, criterion, postprocessors, data_loader_val, base_ds, device, args.output_dir, val_vis=args.val_vis\n )\n\n log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},\n **{f'test_{k}': v for k, v in test_stats.items()},\n 'epoch': epoch,\n 'n_parameters': n_parameters}\n\n if args.output_dir and utils.is_main_process():\n with (output_dir / \"log.txt\").open(\"a\") as f:\n f.write(json.dumps(log_stats) + \"\\n\")\n\n # for evaluation logs\n '''\n if coco_evaluator is not None:\n (output_dir / 'eval').mkdir(exist_ok=True)\n if \"bbox\" in coco_evaluator.coco_eval:\n filenames = ['latest.pth']\n if epoch % 50 == 0:\n filenames.append(f'{epoch:03}.pth')\n for name in filenames:\n torch.save(coco_evaluator.coco_eval[\"bbox\"].eval,\n output_dir / \"eval\" / name)\n '''\n\n total_time = time.time() - start_time\n total_time_str = str(datetime.timedelta(seconds=int(total_time)))\n print('Training time {}'.format(total_time_str))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser('DETR training and evaluation script', parents=[get_args_parser()])\n args = parser.parse_args()\n if args.output_dir:\n Path(args.output_dir).mkdir(parents=True, exist_ok=True)\n main(args)\n"
] | [
[
"torch.utils.data.DataLoader",
"torch.hub.load_state_dict_from_url",
"torch.load",
"torch.utils.data.BatchSampler",
"torch.manual_seed",
"torch.utils.data.DistributedSampler",
"numpy.random.seed",
"torch.utils.data.SequentialSampler",
"torch.nn.parallel.DistributedDataParallel",
"torch.optim.AdamW",
"torch.optim.lr_scheduler.StepLR",
"torch.utils.data.RandomSampler",
"torch.device"
]
] |
oguzhangur96/automl-benchmark | [
"785b4d762164dd251b7c5e63131579113c2dc2c2"
] | [
"autogluon/taxi_trip_duration.py"
] | [
"# %% [markdown]\n# This is a simple notebook for Autogluon AutoMl prediction.\n# MLflow used as tracking tool since experiments take long time complete\n# and it is hard to manage too many experiments.\n#%%\n# Importing necessary libraries\nimport os\nimport re\nimport random\nimport string\nimport math\nimport pandas as pd\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import mean_squared_log_error\nfrom autogluon import TabularPrediction as task\nimport mlflow\nfrom sklearn.metrics import mean_squared_error\nfrom autogluon.utils.tabular.metrics import make_scorer\n\n# %%\n# Initialize mlflow experiment\nmlflow.set_tracking_uri(f'..{os.sep}mlruns')\nexperiment_name = 'automl-benchmark'\ntry:\n experiment = mlflow.create_experiment(experiment_name)\nexcept:\n experiment = mlflow.get_experiment_by_name(experiment_name)\nmlflow.set_experiment(experiment_name)\n\n# Reading seeds\nseed_path = f'..{os.sep}data{os.sep}seeds.txt'\nseeds = []\nwith open(seed_path,mode ='r') as file:\n for seed in file:\n seed.strip(r'/n')\n seeds.append(int(seed))\n\ndataset_name = 'taxi_trip_duration'\ndata = pd.read_pickle(f'..{os.sep}data{os.sep}{dataset_name}{os.sep}{dataset_name}.pkl')\n# Renaming all the characters except for regex experresion\n# For some reason lightgbm gives error with some column names\ndata = data.rename(columns = lambda x:re.sub('[^A-Za-z0-9_]+', '', x))\n#%%\nrun_time_secs = 600\ntarget_column = 'trip_duration'\n# Pickling other models require >1GB amount of space\n# Used hyperparameters option to discard other models\nhyper_parameters = {'NN':{},'GBM':{},'CAT':{},'LR':{} }\n# Since root_mean_squared_log_error does not exist in autogluon\n# it is defined with autogluon.utils.tabular.metrics.make_scorer\ndef RMSLE(y_true, y_pred):\n y_pred[y_pred < 0] = 0\n error = mean_squared_log_error(y_true, y_pred) \n return np.sqrt(error)\n\ncustom_metric = make_scorer('root_mean_squared_log_error',\n RMSLE,\n optimum=0,\n greater_is_better=False)\nfor seed in seeds:\n with mlflow.start_run(run_name='autogluon'):\n # Create output directory for auto gluon\n models_dir = 'AutogluonModels'\n random_dir = ''.join(random.choices(string.ascii_uppercase +\n string.digits, k = 12))\n output_dir = f'{models_dir}{os.sep}{random_dir}'\n os.mkdir(output_dir)\n # Split data into two parts (train, valid)\n train, valid = train_test_split(data, random_state = seed)\n predictor = task.fit(train_data=train, \n label=target_column,\n problem_type = 'regression',\n eval_metric = custom_metric, \n stopping_metric=custom_metric,\n hyperparameters= hyper_parameters,\n stack_ensemble_levels=2, \n time_limits = run_time_secs,\n cache_data=False, \n verbosity = 2,\n output_directory=output_dir)\n test_data = valid\n y_test = test_data[target_column] # values to predict\n test_data_nolab = test_data.drop(labels=[target_column],axis=1) # delete label column to prove we're not cheating\n # AutoGluon will gauge predictive performance using \n # evaluation metric: roc_auc this metric expects predicted probabilities \n # rather than predicted class labels, so you'll need to use predict_proba() \n # instead of predict()\n y_pred = predictor.predict_proba(test_data_nolab)\n score = RMSLE(y_test,y_pred)\n mlflow.log_metric('RMSLE', score)\n mlflow.log_param('seed', seed)\n mlflow.log_param('run_time', run_time_secs)\n mlflow.log_param('dataset_name', dataset_name)\n mlflow.log_param('model_name',predictor.leaderboard().iloc[0,0])\n mlflow.log_artifact(output_dir)"
] | [
[
"numpy.sqrt",
"pandas.read_pickle",
"sklearn.model_selection.train_test_split",
"sklearn.metrics.mean_squared_log_error"
]
] |
lebrice/continuum | [
"7fa9048361b5821b61fa8ec1ac535c2438329626"
] | [
"continuum/task_set.py"
] | [
"from typing import Tuple, Union\n\nimport numpy as np\nfrom PIL import Image\nfrom torch.utils.data import Dataset as TorchDataset\nfrom torchvision import transforms\n\nfrom continuum.viz import plot\n\n\nclass TaskSet(TorchDataset):\n \"\"\"A task dataset returned by the CLLoader.\n\n :param x: The data, either image-arrays or paths to images saved on disk.\n :param y: The targets, not one-hot encoded.\n :param t: The task id of each sample.\n :param trsf: The transformations to apply on the images.\n :param data_type: Type of the data, either \"image_path\", \"image_array\", or \"text\".\n \"\"\"\n\n def __init__(\n self,\n x: np.ndarray,\n y: np.ndarray,\n t: np.ndarray,\n trsf: transforms.Compose,\n data_type: str = \"image_array\"\n ):\n self.x, self.y, self.t = x, y, t\n self.trsf = trsf\n self.data_type = data_type\n\n @property\n def nb_classes(self):\n \"\"\"The number of classes contained in the current task.\"\"\"\n return len(np.unique(self.y))\n\n def add_memory(\n self, x_memory: np.ndarray, y_memory: np.ndarray, t_memory: Union[None, np.ndarray] = None\n ):\n \"\"\"Add memory for rehearsal.\n\n :param x_memory: Sampled data chosen for rehearsal.\n :param y_memory: The associated targets of `x_memory`.\n :param t_memory: The associated task ids. If not provided, they will be\n defaulted to -1.\n \"\"\"\n self.x = np.concatenate((self.x, x_memory))\n self.y = np.concatenate((self.y, y_memory))\n if t_memory is not None:\n self.t = np.concatenate((self.t, t_memory))\n else:\n self.t = np.concatenate((self.t, -1 * np.ones(len(x_memory))))\n\n def plot(\n self,\n path: Union[str, None] = None,\n title: str = \"\",\n nb_per_class: int = 5,\n shape=None\n ) -> None:\n \"\"\"Plot samples of the current task, useful to check if everything is ok.\n\n :param path: If not None, save on disk at this path.\n :param title: The title of the figure.\n :param nb_per_class: Amount to sample per class.\n :param shape: Shape to resize the image before plotting.\n \"\"\"\n plot(self, title=title, path=path, nb_per_class=nb_per_class, shape=shape)\n\n def __len__(self) -> int:\n \"\"\"The amount of images in the current task.\"\"\"\n return self.x.shape[0]\n\n def get_sample(self, index: int) -> np.ndarray:\n \"\"\"Returns a Pillow image corresponding to the given `index`.\n\n :param index: Index to query the image.\n :return: A Pillow image.\n \"\"\"\n x = self.x[index]\n\n if self.data_type == \"image_path\":\n x = Image.open(x).convert(\"RGB\")\n elif self.data_type == \"image_array\":\n x = Image.fromarray(x.astype(\"uint8\"))\n elif self.data_type == \"text\":\n pass\n\n return x\n\n def __getitem__(self, index: int) -> Tuple[np.ndarray, int, int]:\n \"\"\"Method used by PyTorch's DataLoaders to query a sample and its target.\"\"\"\n img = self.get_sample(index)\n y = self.y[index]\n t = self.t[index]\n\n if self.trsf is not None:\n img = self.trsf(img)\n\n return img, y, t\n\n\ndef split_train_val(dataset: TaskSet, val_split: float = 0.1) -> Tuple[TaskSet, TaskSet]:\n \"\"\"Split train dataset into two datasets, one for training and one for validation.\n\n :param dataset: A torch dataset, with .x and .y attributes.\n :param val_split: Percentage to allocate for validation, between [0, 1[.\n :return: A tuple a dataset, respectively for train and validation.\n \"\"\"\n random_state = np.random.RandomState(seed=1)\n\n indexes = np.arange(len(dataset.x))\n random_state.shuffle(indexes)\n\n train_indexes = indexes[int(val_split * len(indexes)):]\n val_indexes = indexes[:int(val_split * len(indexes))]\n\n x, y, t = dataset.x, dataset.y, dataset.t\n train_dataset = TaskSet(\n x[train_indexes], y[train_indexes], t[train_indexes], dataset.trsf, dataset.data_type\n )\n val_dataset = TaskSet(\n x[val_indexes], y[val_indexes], t[val_indexes], dataset.trsf, dataset.data_type\n )\n\n return train_dataset, val_dataset\n"
] | [
[
"numpy.random.RandomState",
"numpy.concatenate",
"numpy.unique"
]
] |
FerdinandEiteneuer/ReinforcementLearning | [
"15c75d7f984bd0a8a25b9df822113d8837aa4a93"
] | [
"utils/memory.py"
] | [
"\"\"\"\nMemory\n\"\"\"\nimport numpy as np\nimport os\n\nfrom utils import export\n\n\n@export\nclass NumpyArrayMemory:\n \"\"\"\n Datastructure for all the experiences (states, actions, rewards, next_states)\n the agent saw.\n \"\"\"\n def __init__(self, size, input_shape, nb_actions, data_dir):\n\n self.data_dir = data_dir\n if data_dir is not None:\n self.memory_path = os.path.join(data_dir, 'memory.npy')\n\n self.size = size\n self.input_shape = input_shape\n self.nb_actions = nb_actions\n\n shape_mem = size, input_shape + nb_actions\n self.memory = np.zeros(shape_mem)\n\n self.add_index = 0\n\n def add(self, states, qvalues):\n\n idx = self.add_index % self.size\n data = list(states) + list(qvalues)\n\n self.memory[idx] = data\n self.add_index += 1\n\n def save(self, path=None):\n\n if path is None:\n path = self.memory_path\n\n np.save(file=path, arr=self.memory)\n\n def load(self, path=None):\n\n if path is None:\n path = self.memory_path\n\n try:\n self.memory = np.load(path)\n except FileNotFoundError as e:\n print(f'Memory could not be loaded: {e}')\n\n def ready(self):\n \"\"\"\n Does the memory still need to be filled up? Can Training begin?\n Not very reliable implementation, but it will do.\n \"\"\"\n assert self.memory is not None\n return np.any(self.memory[-1] != 0)\n\n def complete_training_data(self):\n \"\"\"\n Prepares the data in a format used for keras.\n \"\"\"\n # one Q_memory row consists of [state, Qvalues(dim=nb_actions)]\n states = self.memory[:, :-self.nb_actions]\n targets = self.memory[:, -self.nb_actions:]\n\n return states, targets\n\n def get_batch(self, batch_size):\n raise NotImplementedError\n \"\"\"\n deprecated\n memory = self.Q_memory[:self.episodes]\n batch_size = max(1, min(self.batch_size, self.e))\n\n if self.episodes > self.size_Q_memory: # memory is filled\n indices = np.random.choice(range(self.size_Q_memory), self.batch_size)\n x_train = self.Q_memory[:, :-1][indices]\n y_train = self.Q_memory[:, -1][indices]\n return x_train, y_train, True\n\n else: # memory is too small\n return None, None, False\n \"\"\"\n"
] | [
[
"numpy.save",
"numpy.any",
"numpy.load",
"numpy.zeros"
]
] |
KEVINYZY/python-tutorial | [
"d0f7348e1da4ff954e3add66e1aae55d599283ee"
] | [
"17tensorflow/mnist/__init__.py"
] | [
"# -*- coding: utf-8 -*-\n# Author: XuMing <[email protected]>\n# Data: 17/10/10\n# Brief: \nimport tensorflow as tf\nimport numpy as np\n\n# 使用 NumPy 生成假数据(phony data), 总共 100 个点.\nx_data = np.float32(np.random.rand(2, 100)) # 随机输入\ny_data = np.dot([0.100, 0.200], x_data) + 0.300\n\n# 构造一个线性模型\n#\nb = tf.Variable(tf.zeros([1]))\nW = tf.Variable(tf.random_uniform([1, 2], -1.0, 1.0))\ny = tf.matmul(W, x_data) + b\n\n# 最小化方差\nloss = tf.reduce_mean(tf.square(y - y_data))\noptimizer = tf.train.GradientDescentOptimizer(0.5)\ntrain = optimizer.minimize(loss)\n\n# 初始化变量\ninit = tf.global_variables_initializer()\n\n# 启动图 (graph)\nsess = tf.Session()\nsess.run(init)\n\n# 拟合平面\nfor step in range(0, 201):\n sess.run(train)\n if step % 20 == 0:\n print(step, sess.run(W), sess.run(b))\n\n# 得到最佳拟合结果 W: [[0.100 0.200]], b: [0.300]"
] | [
[
"tensorflow.zeros",
"tensorflow.global_variables_initializer",
"tensorflow.matmul",
"tensorflow.random_uniform",
"numpy.random.rand",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.Session",
"tensorflow.square",
"numpy.dot"
]
] |
joybhallaa/pandas | [
"1779155552631a30d4bb176dec70b8cc477defd7"
] | [
"pandas/core/internals/concat.py"
] | [
"from __future__ import annotations\n\nfrom collections import defaultdict\nimport copy\nimport itertools\nfrom typing import TYPE_CHECKING, Dict, List, Sequence, cast\n\nimport numpy as np\n\nfrom pandas._libs import internals as libinternals\nfrom pandas._typing import ArrayLike, DtypeObj, Manager, Shape\nfrom pandas.util._decorators import cache_readonly\n\nfrom pandas.core.dtypes.cast import ensure_dtype_can_hold_na, find_common_type\nfrom pandas.core.dtypes.common import (\n is_categorical_dtype,\n is_datetime64_dtype,\n is_datetime64tz_dtype,\n is_extension_array_dtype,\n is_float_dtype,\n is_numeric_dtype,\n is_sparse,\n is_timedelta64_dtype,\n)\nfrom pandas.core.dtypes.concat import concat_compat\nfrom pandas.core.dtypes.missing import isna_all\n\nimport pandas.core.algorithms as algos\nfrom pandas.core.arrays import DatetimeArray, ExtensionArray\nfrom pandas.core.internals.array_manager import ArrayManager\nfrom pandas.core.internals.blocks import make_block\nfrom pandas.core.internals.managers import BlockManager\n\nif TYPE_CHECKING:\n from pandas import Index\n from pandas.core.arrays.sparse.dtype import SparseDtype\n\n\ndef concatenate_block_managers(\n mgrs_indexers, axes: List[Index], concat_axis: int, copy: bool\n) -> Manager:\n \"\"\"\n Concatenate block managers into one.\n\n Parameters\n ----------\n mgrs_indexers : list of (BlockManager, {axis: indexer,...}) tuples\n axes : list of Index\n concat_axis : int\n copy : bool\n\n Returns\n -------\n BlockManager\n \"\"\"\n if isinstance(mgrs_indexers[0][0], ArrayManager):\n\n if concat_axis == 1:\n # TODO for now only fastpath without indexers\n mgrs = [t[0] for t in mgrs_indexers]\n arrays = [\n concat_compat([mgrs[i].arrays[j] for i in range(len(mgrs))], axis=0)\n for j in range(len(mgrs[0].arrays))\n ]\n return ArrayManager(arrays, [axes[1], axes[0]])\n elif concat_axis == 0:\n mgrs = [t[0] for t in mgrs_indexers]\n arrays = list(itertools.chain.from_iterable([mgr.arrays for mgr in mgrs]))\n return ArrayManager(arrays, [axes[1], axes[0]])\n\n concat_plans = [\n _get_mgr_concatenation_plan(mgr, indexers) for mgr, indexers in mgrs_indexers\n ]\n concat_plan = _combine_concat_plans(concat_plans, concat_axis)\n blocks = []\n\n for placement, join_units in concat_plan:\n\n if len(join_units) == 1 and not join_units[0].indexers:\n b = join_units[0].block\n values = b.values\n if copy:\n values = values.copy()\n else:\n values = values.view()\n b = b.make_block_same_class(values, placement=placement)\n elif _is_uniform_join_units(join_units):\n blk = join_units[0].block\n vals = [ju.block.values for ju in join_units]\n\n if not blk.is_extension:\n # _is_uniform_join_units ensures a single dtype, so\n # we can use np.concatenate, which is more performant\n # than concat_compat\n values = np.concatenate(vals, axis=blk.ndim - 1)\n else:\n # TODO(EA2D): special-casing not needed with 2D EAs\n values = concat_compat(vals)\n if not isinstance(values, ExtensionArray):\n values = values.reshape(1, len(values))\n\n if blk.values.dtype == values.dtype:\n # Fast-path\n b = blk.make_block_same_class(values, placement=placement)\n else:\n b = make_block(values, placement=placement, ndim=blk.ndim)\n else:\n b = make_block(\n _concatenate_join_units(join_units, concat_axis, copy=copy),\n placement=placement,\n ndim=len(axes),\n )\n blocks.append(b)\n\n return BlockManager(blocks, axes)\n\n\ndef _get_mgr_concatenation_plan(mgr: BlockManager, indexers: Dict[int, np.ndarray]):\n \"\"\"\n Construct concatenation plan for given block manager and indexers.\n\n Parameters\n ----------\n mgr : BlockManager\n indexers : dict of {axis: indexer}\n\n Returns\n -------\n plan : list of (BlockPlacement, JoinUnit) tuples\n\n \"\"\"\n # Calculate post-reindex shape , save for item axis which will be separate\n # for each block anyway.\n mgr_shape_list = list(mgr.shape)\n for ax, indexer in indexers.items():\n mgr_shape_list[ax] = len(indexer)\n mgr_shape = tuple(mgr_shape_list)\n\n if 0 in indexers:\n ax0_indexer = indexers.pop(0)\n blknos = algos.take_nd(mgr.blknos, ax0_indexer, fill_value=-1)\n blklocs = algos.take_nd(mgr.blklocs, ax0_indexer, fill_value=-1)\n else:\n\n if mgr.is_single_block:\n blk = mgr.blocks[0]\n return [(blk.mgr_locs, JoinUnit(blk, mgr_shape, indexers))]\n\n ax0_indexer = None\n blknos = mgr.blknos\n blklocs = mgr.blklocs\n\n plan = []\n for blkno, placements in libinternals.get_blkno_placements(blknos, group=False):\n\n assert placements.is_slice_like\n\n join_unit_indexers = indexers.copy()\n\n shape_list = list(mgr_shape)\n shape_list[0] = len(placements)\n shape = tuple(shape_list)\n\n if blkno == -1:\n unit = JoinUnit(None, shape)\n else:\n blk = mgr.blocks[blkno]\n ax0_blk_indexer = blklocs[placements.indexer]\n\n unit_no_ax0_reindexing = (\n len(placements) == len(blk.mgr_locs)\n and\n # Fastpath detection of join unit not\n # needing to reindex its block: no ax0\n # reindexing took place and block\n # placement was sequential before.\n (\n (\n ax0_indexer is None\n and blk.mgr_locs.is_slice_like\n and blk.mgr_locs.as_slice.step == 1\n )\n or\n # Slow-ish detection: all indexer locs\n # are sequential (and length match is\n # checked above).\n (np.diff(ax0_blk_indexer) == 1).all()\n )\n )\n\n # Omit indexer if no item reindexing is required.\n if unit_no_ax0_reindexing:\n join_unit_indexers.pop(0, None)\n else:\n join_unit_indexers[0] = ax0_blk_indexer\n\n unit = JoinUnit(blk, shape, join_unit_indexers)\n\n plan.append((placements, unit))\n\n return plan\n\n\nclass JoinUnit:\n def __init__(self, block, shape: Shape, indexers=None):\n # Passing shape explicitly is required for cases when block is None.\n if indexers is None:\n indexers = {}\n self.block = block\n self.indexers = indexers\n self.shape = shape\n\n def __repr__(self) -> str:\n return f\"{type(self).__name__}({repr(self.block)}, {self.indexers})\"\n\n @cache_readonly\n def needs_filling(self) -> bool:\n for indexer in self.indexers.values():\n # FIXME: cache results of indexer == -1 checks.\n if (indexer == -1).any():\n return True\n\n return False\n\n @cache_readonly\n def dtype(self):\n blk = self.block\n if blk is None:\n raise AssertionError(\"Block is None, no dtype\")\n\n if not self.needs_filling:\n return blk.dtype\n return ensure_dtype_can_hold_na(blk.dtype)\n\n @cache_readonly\n def is_na(self) -> bool:\n if self.block is None:\n return True\n\n if not self.block._can_hold_na:\n return False\n\n # Usually it's enough to check but a small fraction of values to see if\n # a block is NOT null, chunks should help in such cases. 1000 value\n # was chosen rather arbitrarily.\n values = self.block.values\n if is_sparse(self.block.values.dtype):\n return False\n elif self.block.is_extension:\n # TODO(EA2D): no need for special case with 2D EAs\n values_flat = values\n else:\n values_flat = values.ravel(order=\"K\")\n\n return isna_all(values_flat)\n\n def get_reindexed_values(self, empty_dtype: DtypeObj, upcasted_na) -> ArrayLike:\n if upcasted_na is None:\n # No upcasting is necessary\n fill_value = self.block.fill_value\n values = self.block.get_values()\n else:\n fill_value = upcasted_na\n\n if self.is_na:\n blk_dtype = getattr(self.block, \"dtype\", None)\n\n if blk_dtype == np.dtype(object):\n # we want to avoid filling with np.nan if we are\n # using None; we already know that we are all\n # nulls\n values = self.block.values.ravel(order=\"K\")\n if len(values) and values[0] is None:\n fill_value = None\n\n if is_datetime64tz_dtype(blk_dtype) or is_datetime64tz_dtype(\n empty_dtype\n ):\n if self.block is None:\n # TODO(EA2D): special case unneeded with 2D EAs\n i8values = np.full(self.shape[1], fill_value.value)\n return DatetimeArray(i8values, dtype=empty_dtype)\n elif is_categorical_dtype(blk_dtype):\n pass\n elif is_extension_array_dtype(blk_dtype):\n pass\n elif is_extension_array_dtype(empty_dtype):\n missing_arr = empty_dtype.construct_array_type()._from_sequence(\n [], dtype=empty_dtype\n )\n ncols, nrows = self.shape\n assert ncols == 1, ncols\n empty_arr = -1 * np.ones((nrows,), dtype=np.intp)\n return missing_arr.take(\n empty_arr, allow_fill=True, fill_value=fill_value\n )\n else:\n missing_arr = np.empty(self.shape, dtype=empty_dtype)\n missing_arr.fill(fill_value)\n return missing_arr\n\n if (not self.indexers) and (not self.block._can_consolidate):\n # preserve these for validation in concat_compat\n return self.block.values\n\n if self.block.is_bool and not self.block.is_categorical:\n # External code requested filling/upcasting, bool values must\n # be upcasted to object to avoid being upcasted to numeric.\n values = self.block.astype(np.object_).values\n elif self.block.is_extension:\n values = self.block.values\n else:\n # No dtype upcasting is done here, it will be performed during\n # concatenation itself.\n values = self.block.values\n\n if not self.indexers:\n # If there's no indexing to be done, we want to signal outside\n # code that this array must be copied explicitly. This is done\n # by returning a view and checking `retval.base`.\n values = values.view()\n\n else:\n for ax, indexer in self.indexers.items():\n values = algos.take_nd(values, indexer, axis=ax)\n\n return values\n\n\ndef _concatenate_join_units(\n join_units: List[JoinUnit], concat_axis: int, copy: bool\n) -> ArrayLike:\n \"\"\"\n Concatenate values from several join units along selected axis.\n \"\"\"\n if concat_axis == 0 and len(join_units) > 1:\n # Concatenating join units along ax0 is handled in _merge_blocks.\n raise AssertionError(\"Concatenating join units along axis0\")\n\n empty_dtype = _get_empty_dtype(join_units)\n\n has_none_blocks = any(unit.block is None for unit in join_units)\n upcasted_na = _dtype_to_na_value(empty_dtype, has_none_blocks)\n\n to_concat = [\n ju.get_reindexed_values(empty_dtype=empty_dtype, upcasted_na=upcasted_na)\n for ju in join_units\n ]\n\n if len(to_concat) == 1:\n # Only one block, nothing to concatenate.\n concat_values = to_concat[0]\n if copy:\n if isinstance(concat_values, np.ndarray):\n # non-reindexed (=not yet copied) arrays are made into a view\n # in JoinUnit.get_reindexed_values\n if concat_values.base is not None:\n concat_values = concat_values.copy()\n else:\n concat_values = concat_values.copy()\n elif any(isinstance(t, ExtensionArray) for t in to_concat):\n # concatting with at least one EA means we are concatting a single column\n # the non-EA values are 2D arrays with shape (1, n)\n to_concat = [t if isinstance(t, ExtensionArray) else t[0, :] for t in to_concat]\n concat_values = concat_compat(to_concat, axis=0)\n if not isinstance(concat_values, ExtensionArray) or (\n isinstance(concat_values, DatetimeArray) and concat_values.tz is None\n ):\n # if the result of concat is not an EA but an ndarray, reshape to\n # 2D to put it a non-EA Block\n # special case DatetimeArray, which *is* an EA, but is put in a\n # consolidated 2D block\n concat_values = np.atleast_2d(concat_values)\n else:\n concat_values = concat_compat(to_concat, axis=concat_axis)\n\n return concat_values\n\n\ndef _dtype_to_na_value(dtype: DtypeObj, has_none_blocks: bool):\n \"\"\"\n Find the NA value to go with this dtype.\n \"\"\"\n if is_extension_array_dtype(dtype):\n return dtype.na_value\n elif dtype.kind in [\"m\", \"M\"]:\n return dtype.type(\"NaT\")\n elif dtype.kind in [\"f\", \"c\"]:\n return dtype.type(\"NaN\")\n elif dtype.kind == \"b\":\n return None\n elif dtype.kind in [\"i\", \"u\"]:\n if not has_none_blocks:\n return None\n return np.nan\n elif dtype.kind == \"O\":\n return np.nan\n raise NotImplementedError\n\n\ndef _get_empty_dtype(join_units: Sequence[JoinUnit]) -> DtypeObj:\n \"\"\"\n Return dtype and N/A values to use when concatenating specified units.\n\n Returned N/A value may be None which means there was no casting involved.\n\n Returns\n -------\n dtype\n \"\"\"\n if len(join_units) == 1:\n blk = join_units[0].block\n if blk is None:\n return np.dtype(np.float64)\n\n if _is_uniform_reindex(join_units):\n # FIXME: integrate property\n empty_dtype = join_units[0].block.dtype\n return empty_dtype\n\n has_none_blocks = any(unit.block is None for unit in join_units)\n dtypes = [None if unit.block is None else unit.dtype for unit in join_units]\n\n filtered_dtypes = [\n unit.dtype for unit in join_units if unit.block is not None and not unit.is_na\n ]\n if not len(filtered_dtypes):\n filtered_dtypes = [unit.dtype for unit in join_units if unit.block is not None]\n dtype_alt = find_common_type(filtered_dtypes)\n\n upcast_classes = _get_upcast_classes(join_units, dtypes)\n\n if is_extension_array_dtype(dtype_alt):\n return dtype_alt\n elif dtype_alt == object:\n return dtype_alt\n\n # TODO: de-duplicate with maybe_promote?\n # create the result\n if \"extension\" in upcast_classes:\n return np.dtype(\"object\")\n elif \"bool\" in upcast_classes:\n if has_none_blocks:\n return np.dtype(np.object_)\n else:\n return np.dtype(np.bool_)\n elif \"datetimetz\" in upcast_classes:\n # GH-25014. We use NaT instead of iNaT, since this eventually\n # ends up in DatetimeArray.take, which does not allow iNaT.\n dtype = upcast_classes[\"datetimetz\"]\n return dtype[0]\n elif \"datetime\" in upcast_classes:\n return np.dtype(\"M8[ns]\")\n elif \"timedelta\" in upcast_classes:\n return np.dtype(\"m8[ns]\")\n else:\n try:\n common_dtype = np.find_common_type(upcast_classes, [])\n except TypeError:\n # At least one is an ExtensionArray\n return np.dtype(np.object_)\n else:\n if is_float_dtype(common_dtype):\n return common_dtype\n elif is_numeric_dtype(common_dtype):\n if has_none_blocks:\n return np.dtype(np.float64)\n else:\n return common_dtype\n\n msg = \"invalid dtype determination in get_concat_dtype\"\n raise AssertionError(msg)\n\n\ndef _get_upcast_classes(\n join_units: Sequence[JoinUnit],\n dtypes: Sequence[DtypeObj],\n) -> Dict[str, List[DtypeObj]]:\n \"\"\"Create mapping between upcast class names and lists of dtypes.\"\"\"\n upcast_classes: Dict[str, List[DtypeObj]] = defaultdict(list)\n null_upcast_classes: Dict[str, List[DtypeObj]] = defaultdict(list)\n for dtype, unit in zip(dtypes, join_units):\n if dtype is None:\n continue\n\n upcast_cls = _select_upcast_cls_from_dtype(dtype)\n # Null blocks should not influence upcast class selection, unless there\n # are only null blocks, when same upcasting rules must be applied to\n # null upcast classes.\n if unit.is_na:\n null_upcast_classes[upcast_cls].append(dtype)\n else:\n upcast_classes[upcast_cls].append(dtype)\n\n if not upcast_classes:\n upcast_classes = null_upcast_classes\n\n return upcast_classes\n\n\ndef _select_upcast_cls_from_dtype(dtype: DtypeObj) -> str:\n \"\"\"Select upcast class name based on dtype.\"\"\"\n if is_categorical_dtype(dtype):\n return \"extension\"\n elif is_datetime64tz_dtype(dtype):\n return \"datetimetz\"\n elif is_extension_array_dtype(dtype):\n return \"extension\"\n elif issubclass(dtype.type, np.bool_):\n return \"bool\"\n elif issubclass(dtype.type, np.object_):\n return \"object\"\n elif is_datetime64_dtype(dtype):\n return \"datetime\"\n elif is_timedelta64_dtype(dtype):\n return \"timedelta\"\n elif is_sparse(dtype):\n dtype = cast(\"SparseDtype\", dtype)\n return dtype.subtype.name\n elif is_float_dtype(dtype) or is_numeric_dtype(dtype):\n return dtype.name\n else:\n return \"float\"\n\n\ndef _is_uniform_join_units(join_units: List[JoinUnit]) -> bool:\n \"\"\"\n Check if the join units consist of blocks of uniform type that can\n be concatenated using Block.concat_same_type instead of the generic\n _concatenate_join_units (which uses `concat_compat`).\n\n \"\"\"\n # TODO: require dtype match in addition to same type? e.g. DatetimeTZBlock\n # cannot necessarily join\n return (\n # all blocks need to have the same type\n all(type(ju.block) is type(join_units[0].block) for ju in join_units) # noqa\n and\n # no blocks that would get missing values (can lead to type upcasts)\n # unless we're an extension dtype.\n all(not ju.is_na or ju.block.is_extension for ju in join_units)\n and\n # no blocks with indexers (as then the dimensions do not fit)\n all(not ju.indexers for ju in join_units)\n and\n # only use this path when there is something to concatenate\n len(join_units) > 1\n )\n\n\ndef _is_uniform_reindex(join_units) -> bool:\n return (\n # TODO: should this be ju.block._can_hold_na?\n all(ju.block and ju.block.is_extension for ju in join_units)\n and len({ju.block.dtype.name for ju in join_units}) == 1\n )\n\n\ndef _trim_join_unit(join_unit: JoinUnit, length: int) -> JoinUnit:\n \"\"\"\n Reduce join_unit's shape along item axis to length.\n\n Extra items that didn't fit are returned as a separate block.\n \"\"\"\n if 0 not in join_unit.indexers:\n extra_indexers = join_unit.indexers\n\n if join_unit.block is None:\n extra_block = None\n else:\n extra_block = join_unit.block.getitem_block(slice(length, None))\n join_unit.block = join_unit.block.getitem_block(slice(length))\n else:\n extra_block = join_unit.block\n\n extra_indexers = copy.copy(join_unit.indexers)\n extra_indexers[0] = extra_indexers[0][length:]\n join_unit.indexers[0] = join_unit.indexers[0][:length]\n\n extra_shape = (join_unit.shape[0] - length,) + join_unit.shape[1:]\n join_unit.shape = (length,) + join_unit.shape[1:]\n\n return JoinUnit(block=extra_block, indexers=extra_indexers, shape=extra_shape)\n\n\ndef _combine_concat_plans(plans, concat_axis: int):\n \"\"\"\n Combine multiple concatenation plans into one.\n\n existing_plan is updated in-place.\n \"\"\"\n if len(plans) == 1:\n for p in plans[0]:\n yield p[0], [p[1]]\n\n elif concat_axis == 0:\n offset = 0\n for plan in plans:\n last_plc = None\n\n for plc, unit in plan:\n yield plc.add(offset), [unit]\n last_plc = plc\n\n if last_plc is not None:\n offset += last_plc.as_slice.stop\n\n else:\n num_ended = [0]\n\n def _next_or_none(seq):\n retval = next(seq, None)\n if retval is None:\n num_ended[0] += 1\n return retval\n\n plans = list(map(iter, plans))\n next_items = list(map(_next_or_none, plans))\n\n while num_ended[0] != len(next_items):\n if num_ended[0] > 0:\n raise ValueError(\"Plan shapes are not aligned\")\n\n placements, units = zip(*next_items)\n\n lengths = list(map(len, placements))\n min_len, max_len = min(lengths), max(lengths)\n\n if min_len == max_len:\n yield placements[0], units\n next_items[:] = map(_next_or_none, plans)\n else:\n yielded_placement = None\n yielded_units = [None] * len(next_items)\n for i, (plc, unit) in enumerate(next_items):\n yielded_units[i] = unit\n if len(plc) > min_len:\n # _trim_join_unit updates unit in place, so only\n # placement needs to be sliced to skip min_len.\n next_items[i] = (plc[min_len:], _trim_join_unit(unit, min_len))\n else:\n yielded_placement = plc\n next_items[i] = _next_or_none(plans[i])\n\n yield yielded_placement, yielded_units\n"
] | [
[
"pandas.core.dtypes.cast.ensure_dtype_can_hold_na",
"numpy.ones",
"numpy.find_common_type",
"numpy.diff",
"numpy.dtype",
"pandas.core.arrays.DatetimeArray",
"pandas.core.internals.managers.BlockManager",
"pandas.core.dtypes.common.is_float_dtype",
"pandas.core.internals.array_manager.ArrayManager",
"pandas._libs.internals.get_blkno_placements",
"pandas.core.dtypes.common.is_numeric_dtype",
"pandas.core.internals.blocks.make_block",
"pandas.core.dtypes.common.is_categorical_dtype",
"pandas.core.dtypes.concat.concat_compat",
"pandas.core.dtypes.common.is_datetime64_dtype",
"numpy.atleast_2d",
"pandas.core.algorithms.take_nd",
"pandas.core.dtypes.common.is_timedelta64_dtype",
"pandas.core.dtypes.cast.find_common_type",
"pandas.core.dtypes.common.is_extension_array_dtype",
"numpy.empty",
"pandas.core.dtypes.missing.isna_all",
"pandas.core.dtypes.common.is_sparse",
"pandas.core.dtypes.common.is_datetime64tz_dtype",
"numpy.concatenate",
"numpy.full"
]
] |
ankitshaw/DenGa | [
"92dfb0f3760c30dd9a32d650da92d5c3276099d1"
] | [
"denga/genda.py"
] | [
"\nimport denga.augment as au\nimport pandas as pd\n\nclass Genda():\n\n\tdef __init__(self,filepath): \n\t\tself.filepath = filepath\n\t\tself.dataset = None\n\t\ttry:\n\t\t\tself.dataset = pd.read_csv(self.filepath, header= None, error_bad_lines=False)\n\t\texcept:\n\t\t\traise Exception(\"ERROR: File Missing\") \n\t\tself.data = None\n\n\tdef generate(self):\n\t\tself.data = au.nlp(self.dataset)\n\n\tdef save(self,filename=\"genda.txt\"):\n\t\tif(self.data is None):\n\t\t\traise Exception(\"ERROR: New Dataset not yet generated.\")\n\n\t\tif not \".\" in filename:\n\t\t\traise Exception(\"ERROR: extension missing from file name.\")\n\t\telif filename.endswith(\".csv\"):\n\t\t\tdf = pd.DataFrame(self.data, columns=[\"New Sentences\"])\n\t\t\tdf.to_csv(filename, index=False)\n\t\telif filename.endswith(\".txt\"):\n\t\t\twith open(filename, \"w\") as output:\n\t\t\t\tfor line in self.data:\n\t\t\t\t\toutput.write(str(line)+\"\\n\")\n\t\telse:\n\t\t\traise Exception(\"ERROR: file type not supported use .txt or .csv file name.\") \t"
] | [
[
"pandas.read_csv",
"pandas.DataFrame"
]
] |
ABernard27/PROJET-groupe-3 | [
"a9ab9d80c10724ded9e20751fda018a7ed05589b"
] | [
"Coberny/graph_min_cost/best_price_path.py"
] | [
"import pandas as pd\nimport networkx as nx\nfrom networkx.algorithms import dijkstra_path\nimport itertools\nimport time\nimport datetime as dt\n# import matplotlib.pyplot as plt\n\n\n# Retourne la liste de toutes les villes du dataframe\ndef GetListOfcolnames(data):\n listofColnames = list(data.columns)[1:]\n return listofColnames\n\n# Retourne la liste de tous les sommets des chemins possibles entre\n# la ville de départ (entrance) et la ville d'arrivée (outlet)\ndef GetListOfVertexPath(data, entrance, outlet):\n listofColnames = GetListOfcolnames(data)\n outlet_index = listofColnames.index(outlet)\n listOfVertexPath = listofColnames[0:outlet_index+1]\n return listOfVertexPath\n\n# Retourne la liste de toutes les sorties possibles entre\n# la ville de départ (entrance) et la ville d'arrivée (outlet)\ndef GetListOfPossibleExit(data, entrance, outlet):\n listOfVertexPath = GetListOfVertexPath(data, entrance, outlet)\n listOfExit = [e for e in listOfVertexPath if (e != entrance and e != outlet)]\n return listOfExit\n\n# Retourne la contrainte K qui correspond au nombre total de sorties\n# qu'on peut emprunter entre la ville de départ (entrance) et \n# la ville d'arrivée (outlet)\ndef GetKMaxConstraint(data, entrance, outlet):\n k = len(GetListOfPossibleExit(data, entrance, outlet))\n return k\n\n# Retourne la liste de tous les chemins qu'on peut emprunter en fonction\n# du nombre de sorties (nbr_exit) utilisé entre la ville de départ\n# et celle d'arrivée\ndef GetListOfPath(data, entrance, outlet, nbr_exit):\n listOfExit = GetListOfPossibleExit(data, entrance, outlet)\n listOfPath = list(itertools.combinations(listOfExit, nbr_exit))\n for i in range(len(listOfPath)):\n listOfPath[i] = (entrance,) + listOfPath[i] + (outlet,)\n return listOfPath\n\n# Création du graph représentant tous les itinéraires qu'on peut \n# emprunter (entre la ville de départ et celle d'arrivée)\n# en fonction du nombre de sorties (nbr_exit) utilisé \ndef CreateGraphOfPath(data, entrance, outlet, nbr_exit):\n G_nbr_exit = nx.DiGraph()\n cities = data.columns[0]\n if nbr_exit == 0:\n G_nbr_exit.add_nodes_from([entrance, outlet])\n row_index = int(data[data[cities] == entrance].index[0])\n col_index = data.columns.get_loc(outlet)\n G_nbr_exit.add_weighted_edges_from(\n [(entrance, outlet, data.iloc[row_index,col_index])]\n )\n else:\n listOfVertexPath = GetListOfVertexPath(data, entrance, outlet)\n G_nbr_exit.add_nodes_from(listOfVertexPath)\n listOfEdges = []\n listOfPath = GetListOfPath(data, entrance, outlet, nbr_exit)\n for tup in listOfPath:\n for elt in range(len(tup) - 1):\n row_index = int(data[data[cities] == tup[elt]].index[0])\n col_index = data.columns.get_loc(tup[elt+1])\n listOfEdges.append(\n (tup[elt], tup[elt + 1], data.iloc[row_index,col_index])\n )\n G_nbr_exit.add_weighted_edges_from(listOfEdges)\n return G_nbr_exit\n\n# Retourne la liste des noeuds du plus court chemin trouvé dans le graph G\ndef FindShortestPath(G, entrance, outlet):\n listOfSPNodes = dijkstra_path(G, entrance, outlet)\n return listOfSPNodes\n\n# Retourne la somme des poids du plus court chemin\ndef ShortestPathWeight(G, entrance, outlet):\n listOfSPNodes = FindShortestPath(G, entrance, outlet)\n w = 0\n for elt in range(len(listOfSPNodes)-1):\n d = G.get_edge_data(listOfSPNodes[elt], listOfSPNodes[elt+1])\n w += d['weight']\n return w\n\n# Cette fonction calcule la somme totale des poids du chemin\ndef weight(data, n_uplet):\n cities = data.columns[0]\n listOfWeights = []\n for i in range(len(n_uplet) - 1):\n row_index = int(data[data[cities] == n_uplet[i]].index[0])\n col_index = data.columns.get_loc(n_uplet[i+1])\n w = data.iloc[row_index, col_index]\n listOfWeights.append(w)\n return sum(listOfWeights)\n\n# Cette fonction retourne le chemin optimal dans un n-uplet \ndef FindBestPathForPriceV2(data, listOfTuple):\n tupWeights = []\n for tup in listOfTuple:\n tp_w = weight(data, tup)\n tupWeights.append(tp_w)\n min_w = min(tupWeights)\n min_w_index = tupWeights.index(min_w)\n bestPathForPrice = listOfTuple[min_w_index]\n return bestPathForPrice\n\n# Retourne le couple composé du chemin optimal et du prix final (minimal)\n# que l'on va payer en empruntant ce cheminpour aller de la ville de \n# départ à celle d'arrivée\n\ndef FindBestPathForPrice(data, entrance, outlet, k):\n \"\"\"Affiche la liste composé des sommets du chemin optimal (le chemin qui revient le moins cher entre la ville de départ et celle d'arrivée) et le prix total (minimal) que l'utilisateur va payer en empruntant ce chemin.\n \n :param dataframe data: Le dataframe donnant le prix du trajet direct entre 2 villes. Chaque case du dataframe correspond au prix que l'on va payer entre la ville associée à l'indice de la ligne et la ville associée à l'indice de la colonne dans le dataframe.\n\n .. warning::\n\n Attention! le dataframe doit avoir un format adéquat pour que l'algorithme fonctionne, une rubrique détaillant le format attendu pour le dataframe est disponible\n\n :param str entrance: La ville de départ\n :param str outlet: La ville de sortie\n :param int k: Contrainte du nombre de sorties maximales imposées par l'utilisateur\n \n :returns: Un couple. Le 1er element du couple est la liste des sommets du chemin optimal. Le 2ème élément du couple est le prix total (minimal) que l'utilisateur va payer en empruntant ce chemin.\n \n .. code:: \n \n Coberny.FindBestPathForPrice(data, entrance, outlet, k)\n \n \"\"\"\n if k > GetKMaxConstraint(data, entrance, outlet):\n ans = 'La contrainte k est supérieure au nombre maximal de sorties possibles'\n return ans\n else:\n listOfSP = []\n listOfSPWeight = []\n for i in range(k+1):\n G = CreateGraphOfPath(data, entrance, outlet, i)\n listOfSP.append(FindShortestPath(G, entrance, outlet))\n listOfSPWeight.append(ShortestPathWeight(G, entrance, outlet))\n best_price = min(listOfSPWeight)\n best_price_index = listOfSPWeight.index(best_price)\n bestPathForPrice = listOfSP[best_price_index]\n if len(bestPathForPrice) <= 2+k:\n return (bestPathForPrice, best_price)\n else:\n listOfTuple = GetListOfPath(data, entrance, outlet, k)\n bestTupleForPrice = FindBestPathForPriceV2(data, listOfTuple)\n best_price = weight(data, bestTupleForPrice)\n bestPathForPrice = list(bestTupleForPrice)\n return (bestPathForPrice, best_price)\n\n# Retourne le graph du chemin optimal ie le chemin qui revient le moins cher\n# entre la ville de départ et celle d'arrivée\n# Les sorties intermédiraires sont coloriées en orange\n# La ville de départ et d'arrivée sont coloriées en bleu\ndef CreateGraphOfBestPathForPrice(data, entrance, outlet, k):\n \"\"\"Trace le graph du chemin optimal ie le chemin qui revient le moins cher entre la ville de départ et celle d'arrivée.\n\n :param dataframe data: Le dataframe donnant le prix du trajet direct entre 2 villes. Chaque case du dataframe correspond au prix que l'on va payer entre la ville associée à l'indice de la ligne et la ville associée à l'indice de la colonne dans le dataframe.\n\n .. warning::\n\n Attention! le dataframe doit avoir un format adéquat pour que l'algorithme fonctionne, une rubrique détaillant le format attendu pour le dataframe est disponible.\n\n :param str entrance: La ville de départ\n :param str outlet: La ville de sortie\n :param int k: Contrainte du nombre de sorties maximales imposées par l'utilisateur\n\n :returns: Le graphe du chemin optimal entre la ville de départ et celle d'arrivée\n \n .. code:: \n \n Coberny.CreateGraphOfBestPathForPrice(data, entrance, outlet, k)\n \n \"\"\"\n \n if k > GetKMaxConstraint(data, entrance, outlet):\n ans = 'La contrainte k est supérieure au nombre maximal de sorties possibles'\n return ans\n else:\n cities = data.columns[0]\n listOfNodesColors = []\n listOfEdges = []\n # d_edges_labels = {}\n G_bestPath = nx.DiGraph()\n couple = FindBestPathForPrice(data, entrance, outlet, k)\n bestPathForPrice = couple[0]\n for node in bestPathForPrice:\n if (node != entrance) and (node != outlet):\n listOfNodesColors.append('tab:orange')\n else:\n listOfNodesColors.append('tab:blue')\n G_bestPath.add_nodes_from(bestPathForPrice)\n for vx in range(len(bestPathForPrice)-1):\n row_index = int(data[data[cities] == bestPathForPrice[vx]].index[0])\n col_index = data.columns.get_loc(bestPathForPrice[vx+1])\n listOfEdges.append(\n (bestPathForPrice[vx], bestPathForPrice[vx+1], data.iloc[row_index,col_index])\n )\n # d_edges_labels[(str(bestPathForPrice[vx]), str(bestPathForPrice[vx+1]))] = str(\n # data.iloc[row_index,col_index]\n # )\n G_bestPath.add_weighted_edges_from(listOfEdges)\n return nx.draw(G_bestPath, node_color = listOfNodesColors, with_labels = True)\n # plt.show()\n # nx.draw_networkx_edge_labels(G_bestPath, nx.spring_layout(G_bestPath, seed=3113794652),\n # edge_labels = d_edges_labels)\n\n\nif __name__ == '__main__':\n df_price = pd.read_csv('prix.csv')\n df_price = df_price.fillna(0)\n startTime = time.time()\n print('Couple meilleur chemin et prix: ', FindBestPathForPrice(df_price,\n 'Sete', 'Montgiscard', 5))\n CreateGraphOfBestPathForPrice(df_price, 'Sete', 'Montgiscard', 5)\n runTime = time.time() - startTime\n roundRunTime = str(dt.timedelta(seconds=runTime))\n print(\"Le temps d'execution du programme vaut: \", runTime, ' secondes.\\n cad '\n , roundRunTime, \" dans le format heures minutes secondes\")\n \n"
] | [
[
"pandas.read_csv"
]
] |
sdementen/pandas | [
"e23e6f164209167c0fba0d32c862c5e75e6d4a8a"
] | [
"pandas/io/pytables.py"
] | [
"\"\"\"\nHigh level interface to PyTables for reading and writing pandas data structures\nto disk\n\"\"\"\n\n# pylint: disable-msg=E1101,W0613,W0603\nfrom datetime import datetime, date\nimport time\nimport re\nimport copy\nimport itertools\nimport warnings\nimport os\n\nfrom pandas.types.common import (is_list_like,\n is_categorical_dtype,\n is_timedelta64_dtype,\n is_datetime64tz_dtype,\n is_datetime64_dtype,\n _ensure_object,\n _ensure_int64,\n _ensure_platform_int)\nfrom pandas.types.missing import array_equivalent\n\nimport numpy as np\n\nimport pandas as pd\nfrom pandas import (Series, DataFrame, Panel, Panel4D, Index,\n MultiIndex, Int64Index, isnull)\nfrom pandas.core import config\nfrom pandas.io.common import _stringify_path\nfrom pandas.sparse.api import SparseSeries, SparseDataFrame\nfrom pandas.sparse.array import BlockIndex, IntIndex\nfrom pandas.tseries.api import PeriodIndex, DatetimeIndex\nfrom pandas.tseries.tdi import TimedeltaIndex\nfrom pandas.core.base import StringMixin\nfrom pandas.formats.printing import adjoin, pprint_thing\nfrom pandas.core.common import _asarray_tuplesafe, PerformanceWarning\nfrom pandas.core.algorithms import match, unique\nfrom pandas.core.categorical import Categorical\nfrom pandas.core.internals import (BlockManager, make_block,\n _block2d_to_blocknd,\n _factor_indexer, _block_shape)\nfrom pandas.core.index import _ensure_index\nfrom pandas.tools.merge import concat\nfrom pandas import compat\nfrom pandas.compat import u_safe as u, PY3, range, lrange, string_types, filter\nfrom pandas.core.config import get_option\nfrom pandas.computation.pytables import Expr, maybe_expression\n\nimport pandas.lib as lib\nimport pandas.algos as algos\nimport pandas.tslib as tslib\n\nfrom distutils.version import LooseVersion\n\n# versioning attribute\n_version = '0.15.2'\n\n# encoding\n# PY3 encoding if we don't specify\n_default_encoding = 'UTF-8'\n\n\ndef _ensure_decoded(s):\n \"\"\" if we have bytes, decode them to unicode \"\"\"\n if isinstance(s, np.bytes_):\n s = s.decode('UTF-8')\n return s\n\n\ndef _ensure_encoding(encoding):\n # set the encoding if we need\n if encoding is None:\n if PY3:\n encoding = _default_encoding\n return encoding\n\nTerm = Expr\n\n\ndef _ensure_term(where, scope_level):\n \"\"\"\n ensure that the where is a Term or a list of Term\n this makes sure that we are capturing the scope of variables\n that are passed\n create the terms here with a frame_level=2 (we are 2 levels down)\n \"\"\"\n\n # only consider list/tuple here as an ndarray is automaticaly a coordinate\n # list\n level = scope_level + 1\n if isinstance(where, (list, tuple)):\n wlist = []\n for w in filter(lambda x: x is not None, where):\n if not maybe_expression(w):\n wlist.append(w)\n else:\n wlist.append(Term(w, scope_level=level))\n where = wlist\n elif maybe_expression(where):\n where = Term(where, scope_level=level)\n return where\n\n\nclass PossibleDataLossError(Exception):\n pass\n\n\nclass ClosedFileError(Exception):\n pass\n\n\nclass IncompatibilityWarning(Warning):\n pass\n\nincompatibility_doc = \"\"\"\nwhere criteria is being ignored as this version [%s] is too old (or\nnot-defined), read the file in and write it out to a new file to upgrade (with\nthe copy_to method)\n\"\"\"\n\n\nclass AttributeConflictWarning(Warning):\n pass\n\nattribute_conflict_doc = \"\"\"\nthe [%s] attribute of the existing index is [%s] which conflicts with the new\n[%s], resetting the attribute to None\n\"\"\"\n\n\nclass DuplicateWarning(Warning):\n pass\n\nduplicate_doc = \"\"\"\nduplicate entries in table, taking most recently appended\n\"\"\"\n\nperformance_doc = \"\"\"\nyour performance may suffer as PyTables will pickle object types that it cannot\nmap directly to c-types [inferred_type->%s,key->%s] [items->%s]\n\"\"\"\n\n# formats\n_FORMAT_MAP = {\n u('f'): 'fixed',\n u('fixed'): 'fixed',\n u('t'): 'table',\n u('table'): 'table',\n}\n\nformat_deprecate_doc = \"\"\"\nthe table keyword has been deprecated\nuse the format='fixed(f)|table(t)' keyword instead\n fixed(f) : specifies the Fixed format\n and is the default for put operations\n table(t) : specifies the Table format\n and is the default for append operations\n\"\"\"\n\n# map object types\n_TYPE_MAP = {\n\n Series: u('series'),\n SparseSeries: u('sparse_series'),\n pd.TimeSeries: u('series'),\n DataFrame: u('frame'),\n SparseDataFrame: u('sparse_frame'),\n Panel: u('wide'),\n Panel4D: u('ndim'),\n}\n\n# storer class map\n_STORER_MAP = {\n u('TimeSeries'): 'LegacySeriesFixed',\n u('Series'): 'LegacySeriesFixed',\n u('DataFrame'): 'LegacyFrameFixed',\n u('DataMatrix'): 'LegacyFrameFixed',\n u('series'): 'SeriesFixed',\n u('sparse_series'): 'SparseSeriesFixed',\n u('frame'): 'FrameFixed',\n u('sparse_frame'): 'SparseFrameFixed',\n u('wide'): 'PanelFixed',\n}\n\n# table class map\n_TABLE_MAP = {\n u('generic_table'): 'GenericTable',\n u('appendable_series'): 'AppendableSeriesTable',\n u('appendable_multiseries'): 'AppendableMultiSeriesTable',\n u('appendable_frame'): 'AppendableFrameTable',\n u('appendable_multiframe'): 'AppendableMultiFrameTable',\n u('appendable_panel'): 'AppendablePanelTable',\n u('appendable_ndim'): 'AppendableNDimTable',\n u('worm'): 'WORMTable',\n u('legacy_frame'): 'LegacyFrameTable',\n u('legacy_panel'): 'LegacyPanelTable',\n}\n\n# axes map\n_AXES_MAP = {\n DataFrame: [0],\n Panel: [1, 2],\n Panel4D: [1, 2, 3],\n}\n\n# register our configuration options\ndropna_doc = \"\"\"\n: boolean\n drop ALL nan rows when appending to a table\n\"\"\"\nformat_doc = \"\"\"\n: format\n default format writing format, if None, then\n put will default to 'fixed' and append will default to 'table'\n\"\"\"\n\nwith config.config_prefix('io.hdf'):\n config.register_option('dropna_table', False, dropna_doc,\n validator=config.is_bool)\n config.register_option(\n 'default_format', None, format_doc,\n validator=config.is_one_of_factory(['fixed', 'table', None])\n )\n\n# oh the troubles to reduce import time\n_table_mod = None\n_table_file_open_policy_is_strict = False\n\n\ndef _tables():\n global _table_mod\n global _table_file_open_policy_is_strict\n if _table_mod is None:\n import tables\n _table_mod = tables\n\n # version requirements\n if LooseVersion(tables.__version__) < '3.0.0':\n raise ImportError(\"PyTables version >= 3.0.0 is required\")\n\n # set the file open policy\n # return the file open policy; this changes as of pytables 3.1\n # depending on the HDF5 version\n try:\n _table_file_open_policy_is_strict = (\n tables.file._FILE_OPEN_POLICY == 'strict')\n except:\n pass\n\n return _table_mod\n\n# interface to/from ###\n\n\ndef to_hdf(path_or_buf, key, value, mode=None, complevel=None, complib=None,\n append=None, **kwargs):\n \"\"\" store this object, close it if we opened it \"\"\"\n\n if append:\n f = lambda store: store.append(key, value, **kwargs)\n else:\n f = lambda store: store.put(key, value, **kwargs)\n\n path_or_buf = _stringify_path(path_or_buf)\n if isinstance(path_or_buf, string_types):\n with HDFStore(path_or_buf, mode=mode, complevel=complevel,\n complib=complib) as store:\n f(store)\n else:\n f(path_or_buf)\n\n\ndef read_hdf(path_or_buf, key=None, **kwargs):\n \"\"\" read from the store, close it if we opened it\n\n Retrieve pandas object stored in file, optionally based on where\n criteria\n\n Parameters\n ----------\n path_or_buf : path (string), buffer, or path object (pathlib.Path or\n py._path.local.LocalPath) to read from\n\n .. versionadded:: 0.19.0 support for pathlib, py.path.\n\n key : group identifier in the store. Can be omitted if the HDF file\n contains a single pandas object.\n where : list of Term (or convertable) objects, optional\n start : optional, integer (defaults to None), row number to start\n selection\n stop : optional, integer (defaults to None), row number to stop\n selection\n columns : optional, a list of columns that if not None, will limit the\n return columns\n iterator : optional, boolean, return an iterator, default False\n chunksize : optional, nrows to include in iteration, return an iterator\n\n Returns\n -------\n The selected object\n\n \"\"\"\n\n if kwargs.get('mode', 'a') not in ['r', 'r+', 'a']:\n raise ValueError('mode {0} is not allowed while performing a read. '\n 'Allowed modes are r, r+ and a.'\n .format(kwargs.get('mode')))\n # grab the scope\n if 'where' in kwargs:\n kwargs['where'] = _ensure_term(kwargs['where'], scope_level=1)\n\n path_or_buf = _stringify_path(path_or_buf)\n if isinstance(path_or_buf, string_types):\n\n try:\n exists = os.path.exists(path_or_buf)\n\n # if filepath is too long\n except (TypeError, ValueError):\n exists = False\n\n if not exists:\n raise IOError('File %s does not exist' % path_or_buf)\n\n # can't auto open/close if we are using an iterator\n # so delegate to the iterator\n store = HDFStore(path_or_buf, **kwargs)\n auto_close = True\n\n elif isinstance(path_or_buf, HDFStore):\n if not path_or_buf.is_open:\n raise IOError('The HDFStore must be open for reading.')\n\n store = path_or_buf\n auto_close = False\n\n else:\n raise NotImplementedError('Support for generic buffers has not been '\n 'implemented.')\n\n try:\n if key is None:\n groups = store.groups()\n if len(groups) == 0:\n raise ValueError('No dataset in HDF5 file.')\n candidate_only_group = groups[0]\n\n # For the HDF file to have only one dataset, all other groups\n # should then be metadata groups for that candidate group. (This\n # assumes that the groups() method enumerates parent groups\n # before their children.)\n for group_to_check in groups[1:]:\n if not _is_metadata_of(group_to_check, candidate_only_group):\n raise ValueError('key must be provided when HDF5 file '\n 'contains multiple datasets.')\n key = candidate_only_group._v_pathname\n return store.select(key, auto_close=auto_close, **kwargs)\n except:\n # if there is an error, close the store\n try:\n store.close()\n except:\n pass\n\n raise\n\n\ndef _is_metadata_of(group, parent_group):\n \"\"\"Check if a given group is a metadata group for a given parent_group.\"\"\"\n if group._v_depth <= parent_group._v_depth:\n return False\n\n current = group\n while current._v_depth > 1:\n parent = current._v_parent\n if parent == parent_group and current._v_name == 'meta':\n return True\n current = current._v_parent\n return False\n\n\nclass HDFStore(StringMixin):\n\n \"\"\"\n dict-like IO interface for storing pandas objects in PyTables\n either Fixed or Table format.\n\n Parameters\n ----------\n path : string\n File path to HDF5 file\n mode : {'a', 'w', 'r', 'r+'}, default 'a'\n\n ``'r'``\n Read-only; no data can be modified.\n ``'w'``\n Write; a new file is created (an existing file with the same\n name would be deleted).\n ``'a'``\n Append; an existing file is opened for reading and writing,\n and if the file does not exist it is created.\n ``'r+'``\n It is similar to ``'a'``, but the file must already exist.\n complevel : int, 1-9, default 0\n If a complib is specified compression will be applied\n where possible\n complib : {'zlib', 'bzip2', 'lzo', 'blosc', None}, default None\n If complevel is > 0 apply compression to objects written\n in the store wherever possible\n fletcher32 : bool, default False\n If applying compression use the fletcher32 checksum\n\n Examples\n --------\n >>> from pandas import DataFrame\n >>> from numpy.random import randn\n >>> bar = DataFrame(randn(10, 4))\n >>> store = HDFStore('test.h5')\n >>> store['foo'] = bar # write to HDF5\n >>> bar = store['foo'] # retrieve\n >>> store.close()\n \"\"\"\n\n def __init__(self, path, mode=None, complevel=None, complib=None,\n fletcher32=False, **kwargs):\n try:\n import tables # noqa\n except ImportError as ex: # pragma: no cover\n raise ImportError('HDFStore requires PyTables, \"{ex}\" problem '\n 'importing'.format(ex=str(ex)))\n\n if complib not in (None, 'blosc', 'bzip2', 'lzo', 'zlib'):\n raise ValueError(\"complib only supports 'blosc', 'bzip2', lzo' \"\n \"or 'zlib' compression.\")\n\n self._path = path\n if mode is None:\n mode = 'a'\n self._mode = mode\n self._handle = None\n self._complevel = complevel\n self._complib = complib\n self._fletcher32 = fletcher32\n self._filters = None\n self.open(mode=mode, **kwargs)\n\n @property\n def root(self):\n \"\"\" return the root node \"\"\"\n self._check_if_open()\n return self._handle.root\n\n @property\n def filename(self):\n return self._path\n\n def __getitem__(self, key):\n return self.get(key)\n\n def __setitem__(self, key, value):\n self.put(key, value)\n\n def __delitem__(self, key):\n return self.remove(key)\n\n def __getattr__(self, name):\n \"\"\" allow attribute access to get stores \"\"\"\n self._check_if_open()\n try:\n return self.get(name)\n except:\n pass\n raise AttributeError(\"'%s' object has no attribute '%s'\" %\n (type(self).__name__, name))\n\n def __contains__(self, key):\n \"\"\" check for existance of this key\n can match the exact pathname or the pathnm w/o the leading '/'\n \"\"\"\n node = self.get_node(key)\n if node is not None:\n name = node._v_pathname\n if name == key or name[1:] == key:\n return True\n return False\n\n def __len__(self):\n return len(self.groups())\n\n def __unicode__(self):\n output = '%s\\nFile path: %s\\n' % (type(self), pprint_thing(self._path))\n if self.is_open:\n lkeys = sorted(list(self.keys()))\n if len(lkeys):\n keys = []\n values = []\n\n for k in lkeys:\n try:\n s = self.get_storer(k)\n if s is not None:\n keys.append(pprint_thing(s.pathname or k))\n values.append(\n pprint_thing(s or 'invalid_HDFStore node'))\n except Exception as detail:\n keys.append(k)\n values.append(\"[invalid_HDFStore node: %s]\"\n % pprint_thing(detail))\n\n output += adjoin(12, keys, values)\n else:\n output += 'Empty'\n else:\n output += \"File is CLOSED\"\n\n return output\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n self.close()\n\n def keys(self):\n \"\"\"\n Return a (potentially unordered) list of the keys corresponding to the\n objects stored in the HDFStore. These are ABSOLUTE path-names (e.g.\n have the leading '/'\n \"\"\"\n return [n._v_pathname for n in self.groups()]\n\n def __iter__(self):\n return iter(self.keys())\n\n def items(self):\n \"\"\"\n iterate on key->group\n \"\"\"\n for g in self.groups():\n yield g._v_pathname, g\n\n iteritems = items\n\n def open(self, mode='a', **kwargs):\n \"\"\"\n Open the file in the specified mode\n\n Parameters\n ----------\n mode : {'a', 'w', 'r', 'r+'}, default 'a'\n See HDFStore docstring or tables.open_file for info about modes\n \"\"\"\n tables = _tables()\n\n if self._mode != mode:\n\n # if we are changing a write mode to read, ok\n if self._mode in ['a', 'w'] and mode in ['r', 'r+']:\n pass\n elif mode in ['w']:\n\n # this would truncate, raise here\n if self.is_open:\n raise PossibleDataLossError(\n \"Re-opening the file [{0}] with mode [{1}] \"\n \"will delete the current file!\"\n .format(self._path, self._mode)\n )\n\n self._mode = mode\n\n # close and reopen the handle\n if self.is_open:\n self.close()\n\n if self._complib is not None:\n if self._complevel is None:\n self._complevel = 9\n self._filters = _tables().Filters(self._complevel,\n self._complib,\n fletcher32=self._fletcher32)\n\n try:\n self._handle = tables.open_file(self._path, self._mode, **kwargs)\n except (IOError) as e: # pragma: no cover\n if 'can not be written' in str(e):\n print('Opening %s in read-only mode' % self._path)\n self._handle = tables.open_file(self._path, 'r', **kwargs)\n else:\n raise\n\n except (ValueError) as e:\n\n # trap PyTables >= 3.1 FILE_OPEN_POLICY exception\n # to provide an updated message\n if 'FILE_OPEN_POLICY' in str(e):\n e = ValueError(\n \"PyTables [{version}] no longer supports opening multiple \"\n \"files\\n\"\n \"even in read-only mode on this HDF5 version \"\n \"[{hdf_version}]. You can accept this\\n\"\n \"and not open the same file multiple times at once,\\n\"\n \"upgrade the HDF5 version, or downgrade to PyTables 3.0.0 \"\n \"which allows\\n\"\n \"files to be opened multiple times at once\\n\"\n .format(version=tables.__version__,\n hdf_version=tables.get_hdf5_version()))\n\n raise e\n\n except (Exception) as e:\n\n # trying to read from a non-existant file causes an error which\n # is not part of IOError, make it one\n if self._mode == 'r' and 'Unable to open/create file' in str(e):\n raise IOError(str(e))\n raise\n\n def close(self):\n \"\"\"\n Close the PyTables file handle\n \"\"\"\n if self._handle is not None:\n self._handle.close()\n self._handle = None\n\n @property\n def is_open(self):\n \"\"\"\n return a boolean indicating whether the file is open\n \"\"\"\n if self._handle is None:\n return False\n return bool(self._handle.isopen)\n\n def flush(self, fsync=False):\n \"\"\"\n Force all buffered modifications to be written to disk.\n\n Parameters\n ----------\n fsync : bool (default False)\n call ``os.fsync()`` on the file handle to force writing to disk.\n\n Notes\n -----\n Without ``fsync=True``, flushing may not guarantee that the OS writes\n to disk. With fsync, the operation will block until the OS claims the\n file has been written; however, other caching layers may still\n interfere.\n \"\"\"\n if self._handle is not None:\n self._handle.flush()\n if fsync:\n try:\n os.fsync(self._handle.fileno())\n except:\n pass\n\n def get(self, key):\n \"\"\"\n Retrieve pandas object stored in file\n\n Parameters\n ----------\n key : object\n\n Returns\n -------\n obj : type of object stored in file\n \"\"\"\n group = self.get_node(key)\n if group is None:\n raise KeyError('No object named %s in the file' % key)\n return self._read_group(group)\n\n def select(self, key, where=None, start=None, stop=None, columns=None,\n iterator=False, chunksize=None, auto_close=False, **kwargs):\n \"\"\"\n Retrieve pandas object stored in file, optionally based on where\n criteria\n\n Parameters\n ----------\n key : object\n where : list of Term (or convertable) objects, optional\n start : integer (defaults to None), row number to start selection\n stop : integer (defaults to None), row number to stop selection\n columns : a list of columns that if not None, will limit the return\n columns\n iterator : boolean, return an iterator, default False\n chunksize : nrows to include in iteration, return an iterator\n auto_close : boolean, should automatically close the store when\n finished, default is False\n\n Returns\n -------\n The selected object\n\n \"\"\"\n group = self.get_node(key)\n if group is None:\n raise KeyError('No object named %s in the file' % key)\n\n # create the storer and axes\n where = _ensure_term(where, scope_level=1)\n s = self._create_storer(group)\n s.infer_axes()\n\n # function to call on iteration\n def func(_start, _stop, _where):\n return s.read(start=_start, stop=_stop,\n where=_where,\n columns=columns, **kwargs)\n\n # create the iterator\n it = TableIterator(self, s, func, where=where, nrows=s.nrows,\n start=start, stop=stop, iterator=iterator,\n chunksize=chunksize, auto_close=auto_close)\n\n return it.get_result()\n\n def select_as_coordinates(\n self, key, where=None, start=None, stop=None, **kwargs):\n \"\"\"\n return the selection as an Index\n\n Parameters\n ----------\n key : object\n where : list of Term (or convertable) objects, optional\n start : integer (defaults to None), row number to start selection\n stop : integer (defaults to None), row number to stop selection\n \"\"\"\n where = _ensure_term(where, scope_level=1)\n return self.get_storer(key).read_coordinates(where=where, start=start,\n stop=stop, **kwargs)\n\n def select_column(self, key, column, **kwargs):\n \"\"\"\n return a single column from the table. This is generally only useful to\n select an indexable\n\n Parameters\n ----------\n key : object\n column: the column of interest\n\n Exceptions\n ----------\n raises KeyError if the column is not found (or key is not a valid\n store)\n raises ValueError if the column can not be extracted individually (it\n is part of a data block)\n\n \"\"\"\n return self.get_storer(key).read_column(column=column, **kwargs)\n\n def select_as_multiple(self, keys, where=None, selector=None, columns=None,\n start=None, stop=None, iterator=False,\n chunksize=None, auto_close=False, **kwargs):\n \"\"\" Retrieve pandas objects from multiple tables\n\n Parameters\n ----------\n keys : a list of the tables\n selector : the table to apply the where criteria (defaults to keys[0]\n if not supplied)\n columns : the columns I want back\n start : integer (defaults to None), row number to start selection\n stop : integer (defaults to None), row number to stop selection\n iterator : boolean, return an iterator, default False\n chunksize : nrows to include in iteration, return an iterator\n\n Exceptions\n ----------\n raises KeyError if keys or selector is not found or keys is empty\n raises TypeError if keys is not a list or tuple\n raises ValueError if the tables are not ALL THE SAME DIMENSIONS\n \"\"\"\n\n # default to single select\n where = _ensure_term(where, scope_level=1)\n if isinstance(keys, (list, tuple)) and len(keys) == 1:\n keys = keys[0]\n if isinstance(keys, string_types):\n return self.select(key=keys, where=where, columns=columns,\n start=start, stop=stop, iterator=iterator,\n chunksize=chunksize, **kwargs)\n\n if not isinstance(keys, (list, tuple)):\n raise TypeError(\"keys must be a list/tuple\")\n\n if not len(keys):\n raise ValueError(\"keys must have a non-zero length\")\n\n if selector is None:\n selector = keys[0]\n\n # collect the tables\n tbls = [self.get_storer(k) for k in keys]\n s = self.get_storer(selector)\n\n # validate rows\n nrows = None\n for t, k in itertools.chain([(s, selector)], zip(tbls, keys)):\n if t is None:\n raise KeyError(\"Invalid table [%s]\" % k)\n if not t.is_table:\n raise TypeError(\n \"object [%s] is not a table, and cannot be used in all \"\n \"select as multiple\" % t.pathname\n )\n\n if nrows is None:\n nrows = t.nrows\n elif t.nrows != nrows:\n raise ValueError(\n \"all tables must have exactly the same nrows!\")\n\n # axis is the concentation axes\n axis = list(set([t.non_index_axes[0][0] for t in tbls]))[0]\n\n def func(_start, _stop, _where):\n\n # retrieve the objs, _where is always passed as a set of\n # coordinates here\n objs = [t.read(where=_where, columns=columns, **kwargs)\n for t in tbls]\n\n # concat and return\n return concat(objs, axis=axis,\n verify_integrity=False).consolidate()\n\n # create the iterator\n it = TableIterator(self, s, func, where=where, nrows=nrows,\n start=start, stop=stop, iterator=iterator,\n chunksize=chunksize, auto_close=auto_close)\n\n return it.get_result(coordinates=True)\n\n def put(self, key, value, format=None, append=False, **kwargs):\n \"\"\"\n Store object in HDFStore\n\n Parameters\n ----------\n key : object\n value : {Series, DataFrame, Panel}\n format : 'fixed(f)|table(t)', default is 'fixed'\n fixed(f) : Fixed format\n Fast writing/reading. Not-appendable, nor searchable\n table(t) : Table format\n Write as a PyTables Table structure which may perform\n worse but allow more flexible operations like searching\n / selecting subsets of the data\n append : boolean, default False\n This will force Table format, append the input data to the\n existing.\n data_columns : list of columns to create as data columns, or True to\n use all columns. See\n `here <http://pandas.pydata.org/pandas-docs/stable/io.html#query-via-data-columns>`__ # noqa\n encoding : default None, provide an encoding for strings\n dropna : boolean, default False, do not write an ALL nan row to\n the store settable by the option 'io.hdf.dropna_table'\n \"\"\"\n if format is None:\n format = get_option(\"io.hdf.default_format\") or 'fixed'\n kwargs = self._validate_format(format, kwargs)\n self._write_to_group(key, value, append=append, **kwargs)\n\n def remove(self, key, where=None, start=None, stop=None):\n \"\"\"\n Remove pandas object partially by specifying the where condition\n\n Parameters\n ----------\n key : string\n Node to remove or delete rows from\n where : list of Term (or convertable) objects, optional\n start : integer (defaults to None), row number to start selection\n stop : integer (defaults to None), row number to stop selection\n\n Returns\n -------\n number of rows removed (or None if not a Table)\n\n Exceptions\n ----------\n raises KeyError if key is not a valid store\n\n \"\"\"\n where = _ensure_term(where, scope_level=1)\n try:\n s = self.get_storer(key)\n except:\n\n if where is not None:\n raise ValueError(\n \"trying to remove a node with a non-None where clause!\")\n\n # we are actually trying to remove a node (with children)\n s = self.get_node(key)\n if s is not None:\n s._f_remove(recursive=True)\n return None\n\n if s is None:\n raise KeyError('No object named %s in the file' % key)\n\n # remove the node\n if where is None and start is None and stop is None:\n s.group._f_remove(recursive=True)\n\n # delete from the table\n else:\n if not s.is_table:\n raise ValueError(\n 'can only remove with where on objects written as tables')\n return s.delete(where=where, start=start, stop=stop)\n\n def append(self, key, value, format=None, append=True, columns=None,\n dropna=None, **kwargs):\n \"\"\"\n Append to Table in file. Node must already exist and be Table\n format.\n\n Parameters\n ----------\n key : object\n value : {Series, DataFrame, Panel, Panel4D}\n format: 'table' is the default\n table(t) : table format\n Write as a PyTables Table structure which may perform\n worse but allow more flexible operations like searching\n / selecting subsets of the data\n append : boolean, default True, append the input data to the\n existing\n data_columns : list of columns, or True, default None\n List of columns to create as indexed data columns for on-disk\n queries, or True to use all columns. By default only the axes\n of the object are indexed. See `here\n <http://pandas.pydata.org/pandas-docs/stable/io.html#query-via-data-columns>`__.\n min_itemsize : dict of columns that specify minimum string sizes\n nan_rep : string to use as string nan represenation\n chunksize : size to chunk the writing\n expectedrows : expected TOTAL row size of this table\n encoding : default None, provide an encoding for strings\n dropna : boolean, default False, do not write an ALL nan row to\n the store settable by the option 'io.hdf.dropna_table'\n\n Notes\n -----\n Does *not* check if data being appended overlaps with existing\n data in the table, so be careful\n \"\"\"\n if columns is not None:\n raise TypeError(\"columns is not a supported keyword in append, \"\n \"try data_columns\")\n\n if dropna is None:\n dropna = get_option(\"io.hdf.dropna_table\")\n if format is None:\n format = get_option(\"io.hdf.default_format\") or 'table'\n kwargs = self._validate_format(format, kwargs)\n self._write_to_group(key, value, append=append, dropna=dropna,\n **kwargs)\n\n def append_to_multiple(self, d, value, selector, data_columns=None,\n axes=None, dropna=False, **kwargs):\n \"\"\"\n Append to multiple tables\n\n Parameters\n ----------\n d : a dict of table_name to table_columns, None is acceptable as the\n values of one node (this will get all the remaining columns)\n value : a pandas object\n selector : a string that designates the indexable table; all of its\n columns will be designed as data_columns, unless data_columns is\n passed, in which case these are used\n data_columns : list of columns to create as data columns, or True to\n use all columns\n dropna : if evaluates to True, drop rows from all tables if any single\n row in each table has all NaN. Default False.\n\n Notes\n -----\n axes parameter is currently not accepted\n\n \"\"\"\n if axes is not None:\n raise TypeError(\"axes is currently not accepted as a parameter to\"\n \" append_to_multiple; you can create the \"\n \"tables independently instead\")\n\n if not isinstance(d, dict):\n raise ValueError(\n \"append_to_multiple must have a dictionary specified as the \"\n \"way to split the value\"\n )\n\n if selector not in d:\n raise ValueError(\n \"append_to_multiple requires a selector that is in passed dict\"\n )\n\n # figure out the splitting axis (the non_index_axis)\n axis = list(set(range(value.ndim)) - set(_AXES_MAP[type(value)]))[0]\n\n # figure out how to split the value\n remain_key = None\n remain_values = []\n for k, v in d.items():\n if v is None:\n if remain_key is not None:\n raise ValueError(\n \"append_to_multiple can only have one value in d that \"\n \"is None\"\n )\n remain_key = k\n else:\n remain_values.extend(v)\n if remain_key is not None:\n ordered = value.axes[axis]\n ordd = ordered.difference(Index(remain_values))\n ordd = sorted(ordered.get_indexer(ordd))\n d[remain_key] = ordered.take(ordd)\n\n # data_columns\n if data_columns is None:\n data_columns = d[selector]\n\n # ensure rows are synchronized across the tables\n if dropna:\n idxs = (value[cols].dropna(how='all').index for cols in d.values())\n valid_index = next(idxs)\n for index in idxs:\n valid_index = valid_index.intersection(index)\n value = value.ix[valid_index]\n\n # append\n for k, v in d.items():\n dc = data_columns if k == selector else None\n\n # compute the val\n val = value.reindex_axis(v, axis=axis)\n\n self.append(k, val, data_columns=dc, **kwargs)\n\n def create_table_index(self, key, **kwargs):\n \"\"\" Create a pytables index on the table\n Parameters\n ----------\n key : object (the node to index)\n\n Exceptions\n ----------\n raises if the node is not a table\n\n \"\"\"\n\n # version requirements\n _tables()\n s = self.get_storer(key)\n if s is None:\n return\n\n if not s.is_table:\n raise TypeError(\n \"cannot create table index on a Fixed format store\")\n s.create_index(**kwargs)\n\n def groups(self):\n \"\"\"return a list of all the top-level nodes (that are not themselves a\n pandas storage object)\n \"\"\"\n _tables()\n self._check_if_open()\n return [\n g for g in self._handle.walk_nodes()\n if (getattr(g._v_attrs, 'pandas_type', None) or\n getattr(g, 'table', None) or\n (isinstance(g, _table_mod.table.Table) and\n g._v_name != u('table')))\n ]\n\n def get_node(self, key):\n \"\"\" return the node with the key or None if it does not exist \"\"\"\n self._check_if_open()\n try:\n if not key.startswith('/'):\n key = '/' + key\n return self._handle.get_node(self.root, key)\n except:\n return None\n\n def get_storer(self, key):\n \"\"\" return the storer object for a key, raise if not in the file \"\"\"\n group = self.get_node(key)\n if group is None:\n return None\n s = self._create_storer(group)\n s.infer_axes()\n return s\n\n def copy(self, file, mode='w', propindexes=True, keys=None, complib=None,\n complevel=None, fletcher32=False, overwrite=True):\n \"\"\" copy the existing store to a new file, upgrading in place\n\n Parameters\n ----------\n propindexes: restore indexes in copied file (defaults to True)\n keys : list of keys to include in the copy (defaults to all)\n overwrite : overwrite (remove and replace) existing nodes in the\n new store (default is True)\n mode, complib, complevel, fletcher32 same as in HDFStore.__init__\n\n Returns\n -------\n open file handle of the new store\n\n \"\"\"\n new_store = HDFStore(\n file,\n mode=mode,\n complib=complib,\n complevel=complevel,\n fletcher32=fletcher32)\n if keys is None:\n keys = list(self.keys())\n if not isinstance(keys, (tuple, list)):\n keys = [keys]\n for k in keys:\n s = self.get_storer(k)\n if s is not None:\n\n if k in new_store:\n if overwrite:\n new_store.remove(k)\n\n data = self.select(k)\n if s.is_table:\n\n index = False\n if propindexes:\n index = [a.name for a in s.axes if a.is_indexed]\n new_store.append(\n k, data, index=index,\n data_columns=getattr(s, 'data_columns', None),\n encoding=s.encoding\n )\n else:\n new_store.put(k, data, encoding=s.encoding)\n\n return new_store\n\n # private methods ######\n def _check_if_open(self):\n if not self.is_open:\n raise ClosedFileError(\"{0} file is not open!\".format(self._path))\n\n def _validate_format(self, format, kwargs):\n \"\"\" validate / deprecate formats; return the new kwargs \"\"\"\n kwargs = kwargs.copy()\n\n # validate\n try:\n kwargs['format'] = _FORMAT_MAP[format.lower()]\n except:\n raise TypeError(\"invalid HDFStore format specified [{0}]\"\n .format(format))\n\n return kwargs\n\n def _create_storer(self, group, format=None, value=None, append=False,\n **kwargs):\n \"\"\" return a suitable class to operate \"\"\"\n\n def error(t):\n raise TypeError(\n \"cannot properly create the storer for: [%s] [group->%s,\"\n \"value->%s,format->%s,append->%s,kwargs->%s]\"\n % (t, group, type(value), format, append, kwargs)\n )\n\n pt = _ensure_decoded(getattr(group._v_attrs, 'pandas_type', None))\n tt = _ensure_decoded(getattr(group._v_attrs, 'table_type', None))\n\n # infer the pt from the passed value\n if pt is None:\n if value is None:\n\n _tables()\n if (getattr(group, 'table', None) or\n isinstance(group, _table_mod.table.Table)):\n pt = u('frame_table')\n tt = u('generic_table')\n else:\n raise TypeError(\n \"cannot create a storer if the object is not existing \"\n \"nor a value are passed\")\n else:\n\n try:\n pt = _TYPE_MAP[type(value)]\n except:\n error('_TYPE_MAP')\n\n # we are actually a table\n if format == 'table':\n pt += u('_table')\n\n # a storer node\n if u('table') not in pt:\n try:\n return globals()[_STORER_MAP[pt]](self, group, **kwargs)\n except:\n error('_STORER_MAP')\n\n # existing node (and must be a table)\n if tt is None:\n\n # if we are a writer, determin the tt\n if value is not None:\n\n if pt == u('series_table'):\n index = getattr(value, 'index', None)\n if index is not None:\n if index.nlevels == 1:\n tt = u('appendable_series')\n elif index.nlevels > 1:\n tt = u('appendable_multiseries')\n elif pt == u('frame_table'):\n index = getattr(value, 'index', None)\n if index is not None:\n if index.nlevels == 1:\n tt = u('appendable_frame')\n elif index.nlevels > 1:\n tt = u('appendable_multiframe')\n elif pt == u('wide_table'):\n tt = u('appendable_panel')\n elif pt == u('ndim_table'):\n tt = u('appendable_ndim')\n\n else:\n\n # distiguish between a frame/table\n tt = u('legacy_panel')\n try:\n fields = group.table._v_attrs.fields\n if len(fields) == 1 and fields[0] == u('value'):\n tt = u('legacy_frame')\n except:\n pass\n\n try:\n return globals()[_TABLE_MAP[tt]](self, group, **kwargs)\n except:\n error('_TABLE_MAP')\n\n def _write_to_group(self, key, value, format, index=True, append=False,\n complib=None, encoding=None, **kwargs):\n group = self.get_node(key)\n\n # remove the node if we are not appending\n if group is not None and not append:\n self._handle.remove_node(group, recursive=True)\n group = None\n\n # we don't want to store a table node at all if are object is 0-len\n # as there are not dtypes\n if getattr(value, 'empty', None) and (format == 'table' or append):\n return\n\n if group is None:\n paths = key.split('/')\n\n # recursively create the groups\n path = '/'\n for p in paths:\n if not len(p):\n continue\n new_path = path\n if not path.endswith('/'):\n new_path += '/'\n new_path += p\n group = self.get_node(new_path)\n if group is None:\n group = self._handle.create_group(path, p)\n path = new_path\n\n s = self._create_storer(group, format, value, append=append,\n encoding=encoding, **kwargs)\n if append:\n # raise if we are trying to append to a Fixed format,\n # or a table that exists (and we are putting)\n if (not s.is_table or\n (s.is_table and format == 'fixed' and s.is_exists)):\n raise ValueError('Can only append to Tables')\n if not s.is_exists:\n s.set_object_info()\n else:\n s.set_object_info()\n\n if not s.is_table and complib:\n raise ValueError(\n 'Compression not supported on Fixed format stores'\n )\n\n # write the object\n s.write(obj=value, append=append, complib=complib, **kwargs)\n\n if s.is_table and index:\n s.create_index(columns=index)\n\n def _read_group(self, group, **kwargs):\n s = self._create_storer(group)\n s.infer_axes()\n return s.read(**kwargs)\n\n\ndef get_store(path, **kwargs):\n \"\"\" Backwards compatible alias for ``HDFStore``\n \"\"\"\n return HDFStore(path, **kwargs)\n\n\nclass TableIterator(object):\n\n \"\"\" define the iteration interface on a table\n\n Parameters\n ----------\n\n store : the reference store\n s : the refered storer\n func : the function to execute the query\n where : the where of the query\n nrows : the rows to iterate on\n start : the passed start value (default is None)\n stop : the passed stop value (default is None)\n iterator : boolean, whether to use the default iterator\n chunksize : the passed chunking value (default is 50000)\n auto_close : boolean, automatically close the store at the end of\n iteration, default is False\n kwargs : the passed kwargs\n \"\"\"\n\n def __init__(self, store, s, func, where, nrows, start=None, stop=None,\n iterator=False, chunksize=None, auto_close=False):\n self.store = store\n self.s = s\n self.func = func\n self.where = where\n\n # set start/stop if they are not set if we are a table\n if self.s.is_table:\n if nrows is None:\n nrows = 0\n if start is None:\n start = 0\n if stop is None:\n stop = nrows\n stop = min(nrows, stop)\n\n self.nrows = nrows\n self.start = start\n self.stop = stop\n\n self.coordinates = None\n if iterator or chunksize is not None:\n if chunksize is None:\n chunksize = 100000\n self.chunksize = int(chunksize)\n else:\n self.chunksize = None\n\n self.auto_close = auto_close\n\n def __iter__(self):\n\n # iterate\n current = self.start\n while current < self.stop:\n\n stop = min(current + self.chunksize, self.stop)\n value = self.func(None, None, self.coordinates[current:stop])\n current = stop\n if value is None or not len(value):\n continue\n\n yield value\n\n self.close()\n\n def close(self):\n if self.auto_close:\n self.store.close()\n\n def get_result(self, coordinates=False):\n\n # return the actual iterator\n if self.chunksize is not None:\n if not self.s.is_table:\n raise TypeError(\n \"can only use an iterator or chunksize on a table\")\n\n self.coordinates = self.s.read_coordinates(where=self.where)\n\n return self\n\n # if specified read via coordinates (necessary for multiple selections\n if coordinates:\n where = self.s.read_coordinates(where=self.where)\n else:\n where = self.where\n\n # directly return the result\n results = self.func(self.start, self.stop, where)\n self.close()\n return results\n\n\nclass IndexCol(StringMixin):\n\n \"\"\" an index column description class\n\n Parameters\n ----------\n\n axis : axis which I reference\n values : the ndarray like converted values\n kind : a string description of this type\n typ : the pytables type\n pos : the position in the pytables\n\n \"\"\"\n is_an_indexable = True\n is_data_indexable = True\n _info_fields = ['freq', 'tz', 'index_name']\n\n def __init__(self, values=None, kind=None, typ=None, cname=None,\n itemsize=None, name=None, axis=None, kind_attr=None,\n pos=None, freq=None, tz=None, index_name=None, **kwargs):\n self.values = values\n self.kind = kind\n self.typ = typ\n self.itemsize = itemsize\n self.name = name\n self.cname = cname\n self.kind_attr = kind_attr\n self.axis = axis\n self.pos = pos\n self.freq = freq\n self.tz = tz\n self.index_name = index_name\n self.table = None\n self.meta = None\n self.metadata = None\n\n if name is not None:\n self.set_name(name, kind_attr)\n if pos is not None:\n self.set_pos(pos)\n\n def set_name(self, name, kind_attr=None):\n \"\"\" set the name of this indexer \"\"\"\n self.name = name\n self.kind_attr = kind_attr or \"%s_kind\" % name\n if self.cname is None:\n self.cname = name\n\n return self\n\n def set_axis(self, axis):\n \"\"\" set the axis over which I index \"\"\"\n self.axis = axis\n\n return self\n\n def set_pos(self, pos):\n \"\"\" set the position of this column in the Table \"\"\"\n self.pos = pos\n if pos is not None and self.typ is not None:\n self.typ._v_pos = pos\n return self\n\n def set_table(self, table):\n self.table = table\n return self\n\n def __unicode__(self):\n temp = tuple(\n map(pprint_thing,\n (self.name,\n self.cname,\n self.axis,\n self.pos,\n self.kind)))\n return \"name->%s,cname->%s,axis->%s,pos->%s,kind->%s\" % temp\n\n def __eq__(self, other):\n \"\"\" compare 2 col items \"\"\"\n return all([getattr(self, a, None) == getattr(other, a, None)\n for a in ['name', 'cname', 'axis', 'pos']])\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n @property\n def is_indexed(self):\n \"\"\" return whether I am an indexed column \"\"\"\n try:\n return getattr(self.table.cols, self.cname).is_indexed\n except:\n False\n\n def copy(self):\n new_self = copy.copy(self)\n return new_self\n\n def infer(self, handler):\n \"\"\"infer this column from the table: create and return a new object\"\"\"\n table = handler.table\n new_self = self.copy()\n new_self.set_table(table)\n new_self.get_attr()\n new_self.read_metadata(handler)\n return new_self\n\n def convert(self, values, nan_rep, encoding):\n \"\"\" set the values from this selection: take = take ownership \"\"\"\n\n # values is a recarray\n if values.dtype.fields is not None:\n values = values[self.cname]\n\n values = _maybe_convert(values, self.kind, encoding)\n\n kwargs = dict()\n if self.freq is not None:\n kwargs['freq'] = _ensure_decoded(self.freq)\n if self.index_name is not None:\n kwargs['name'] = _ensure_decoded(self.index_name)\n try:\n self.values = Index(values, **kwargs)\n except:\n\n # if the output freq is different that what we recorded,\n # it should be None (see also 'doc example part 2')\n if 'freq' in kwargs:\n kwargs['freq'] = None\n self.values = Index(values, **kwargs)\n\n self.values = _set_tz(self.values, self.tz)\n\n return self\n\n def take_data(self):\n \"\"\" return the values & release the memory \"\"\"\n self.values, values = None, self.values\n return values\n\n @property\n def attrs(self):\n return self.table._v_attrs\n\n @property\n def description(self):\n return self.table.description\n\n @property\n def col(self):\n \"\"\" return my current col description \"\"\"\n return getattr(self.description, self.cname, None)\n\n @property\n def cvalues(self):\n \"\"\" return my cython values \"\"\"\n return self.values\n\n def __iter__(self):\n return iter(self.values)\n\n def maybe_set_size(self, min_itemsize=None, **kwargs):\n \"\"\" maybe set a string col itemsize:\n min_itemsize can be an interger or a dict with this columns name\n with an integer size \"\"\"\n if _ensure_decoded(self.kind) == u('string'):\n\n if isinstance(min_itemsize, dict):\n min_itemsize = min_itemsize.get(self.name)\n\n if min_itemsize is not None and self.typ.itemsize < min_itemsize:\n self.typ = _tables(\n ).StringCol(itemsize=min_itemsize, pos=self.pos)\n\n def validate(self, handler, append, **kwargs):\n self.validate_names()\n\n def validate_names(self):\n pass\n\n def validate_and_set(self, handler, append, **kwargs):\n self.set_table(handler.table)\n self.validate_col()\n self.validate_attr(append)\n self.validate_metadata(handler)\n self.write_metadata(handler)\n self.set_attr()\n\n def validate_col(self, itemsize=None):\n \"\"\" validate this column: return the compared against itemsize \"\"\"\n\n # validate this column for string truncation (or reset to the max size)\n if _ensure_decoded(self.kind) == u('string'):\n c = self.col\n if c is not None:\n if itemsize is None:\n itemsize = self.itemsize\n if c.itemsize < itemsize:\n raise ValueError(\n \"Trying to store a string with len [%s] in [%s] \"\n \"column but\\nthis column has a limit of [%s]!\\n\"\n \"Consider using min_itemsize to preset the sizes on \"\n \"these columns\" % (itemsize, self.cname, c.itemsize))\n return c.itemsize\n\n return None\n\n def validate_attr(self, append):\n # check for backwards incompatibility\n if append:\n existing_kind = getattr(self.attrs, self.kind_attr, None)\n if existing_kind is not None and existing_kind != self.kind:\n raise TypeError(\"incompatible kind in col [%s - %s]\" %\n (existing_kind, self.kind))\n\n def update_info(self, info):\n \"\"\" set/update the info for this indexable with the key/value\n if there is a conflict raise/warn as needed \"\"\"\n\n for key in self._info_fields:\n\n value = getattr(self, key, None)\n idx = _get_info(info, self.name)\n\n existing_value = idx.get(key)\n if key in idx and value is not None and existing_value != value:\n\n # frequency/name just warn\n if key in ['freq', 'index_name']:\n ws = attribute_conflict_doc % (key, existing_value, value)\n warnings.warn(ws, AttributeConflictWarning, stacklevel=6)\n\n # reset\n idx[key] = None\n setattr(self, key, None)\n\n else:\n raise ValueError(\n \"invalid info for [%s] for [%s], existing_value [%s] \"\n \"conflicts with new value [%s]\"\n % (self.name, key, existing_value, value))\n else:\n if value is not None or existing_value is not None:\n idx[key] = value\n\n return self\n\n def set_info(self, info):\n \"\"\" set my state from the passed info \"\"\"\n idx = info.get(self.name)\n if idx is not None:\n self.__dict__.update(idx)\n\n def get_attr(self):\n \"\"\" set the kind for this colummn \"\"\"\n self.kind = getattr(self.attrs, self.kind_attr, None)\n\n def set_attr(self):\n \"\"\" set the kind for this colummn \"\"\"\n setattr(self.attrs, self.kind_attr, self.kind)\n\n def read_metadata(self, handler):\n \"\"\" retrieve the metadata for this columns \"\"\"\n self.metadata = handler.read_metadata(self.cname)\n\n def validate_metadata(self, handler):\n \"\"\" validate that kind=category does not change the categories \"\"\"\n if self.meta == 'category':\n new_metadata = self.metadata\n cur_metadata = handler.read_metadata(self.cname)\n if new_metadata is not None and cur_metadata is not None \\\n and not array_equivalent(new_metadata, cur_metadata):\n raise ValueError(\"cannot append a categorical with \"\n \"different categories to the existing\")\n\n def write_metadata(self, handler):\n \"\"\" set the meta data \"\"\"\n if self.metadata is not None:\n handler.write_metadata(self.cname, self.metadata)\n\n\nclass GenericIndexCol(IndexCol):\n\n \"\"\" an index which is not represented in the data of the table \"\"\"\n\n @property\n def is_indexed(self):\n return False\n\n def convert(self, values, nan_rep, encoding):\n \"\"\" set the values from this selection: take = take ownership \"\"\"\n\n self.values = Int64Index(np.arange(self.table.nrows))\n return self\n\n def get_attr(self):\n pass\n\n def set_attr(self):\n pass\n\n\nclass DataCol(IndexCol):\n\n \"\"\" a data holding column, by definition this is not indexable\n\n Parameters\n ----------\n\n data : the actual data\n cname : the column name in the table to hold the data (typically\n values)\n meta : a string description of the metadata\n metadata : the actual metadata\n \"\"\"\n is_an_indexable = False\n is_data_indexable = False\n _info_fields = ['tz', 'ordered']\n\n @classmethod\n def create_for_block(\n cls, i=None, name=None, cname=None, version=None, **kwargs):\n \"\"\" return a new datacol with the block i \"\"\"\n\n if cname is None:\n cname = name or 'values_block_%d' % i\n if name is None:\n name = cname\n\n # prior to 0.10.1, we named values blocks like: values_block_0 an the\n # name values_0\n try:\n if version[0] == 0 and version[1] <= 10 and version[2] == 0:\n m = re.search(\"values_block_(\\d+)\", name)\n if m:\n name = \"values_%s\" % m.groups()[0]\n except:\n pass\n\n return cls(name=name, cname=cname, **kwargs)\n\n def __init__(self, values=None, kind=None, typ=None,\n cname=None, data=None, meta=None, metadata=None,\n block=None, **kwargs):\n super(DataCol, self).__init__(values=values, kind=kind, typ=typ,\n cname=cname, **kwargs)\n self.dtype = None\n self.dtype_attr = u(\"%s_dtype\" % self.name)\n self.meta = meta\n self.meta_attr = u(\"%s_meta\" % self.name)\n self.set_data(data)\n self.set_metadata(metadata)\n\n def __unicode__(self):\n temp = tuple(\n map(pprint_thing,\n (self.name,\n self.cname,\n self.dtype,\n self.kind,\n self.shape)))\n return \"name->%s,cname->%s,dtype->%s,kind->%s,shape->%s\" % temp\n\n def __eq__(self, other):\n \"\"\" compare 2 col items \"\"\"\n return all([getattr(self, a, None) == getattr(other, a, None)\n for a in ['name', 'cname', 'dtype', 'pos']])\n\n def set_data(self, data, dtype=None):\n self.data = data\n if data is not None:\n if dtype is not None:\n self.dtype = dtype\n self.set_kind()\n elif self.dtype is None:\n self.dtype = data.dtype.name\n self.set_kind()\n\n def take_data(self):\n \"\"\" return the data & release the memory \"\"\"\n self.data, data = None, self.data\n return data\n\n def set_metadata(self, metadata):\n \"\"\" record the metadata \"\"\"\n if metadata is not None:\n metadata = np.array(metadata, copy=False).ravel()\n self.metadata = metadata\n\n def set_kind(self):\n # set my kind if we can\n\n if self.dtype is not None:\n dtype = _ensure_decoded(self.dtype)\n\n if dtype.startswith(u('string')) or dtype.startswith(u('bytes')):\n self.kind = 'string'\n elif dtype.startswith(u('float')):\n self.kind = 'float'\n elif dtype.startswith(u('complex')):\n self.kind = 'complex'\n elif dtype.startswith(u('int')) or dtype.startswith(u('uint')):\n self.kind = 'integer'\n elif dtype.startswith(u('date')):\n self.kind = 'datetime'\n elif dtype.startswith(u('timedelta')):\n self.kind = 'timedelta'\n elif dtype.startswith(u('bool')):\n self.kind = 'bool'\n else:\n raise AssertionError(\n \"cannot interpret dtype of [%s] in [%s]\" % (dtype, self))\n\n # set my typ if we need\n if self.typ is None:\n self.typ = getattr(self.description, self.cname, None)\n\n def set_atom(self, block, block_items, existing_col, min_itemsize,\n nan_rep, info, encoding=None, **kwargs):\n \"\"\" create and setup my atom from the block b \"\"\"\n\n self.values = list(block_items)\n\n # short-cut certain block types\n if block.is_categorical:\n return self.set_atom_categorical(block, items=block_items,\n info=info)\n elif block.is_datetimetz:\n return self.set_atom_datetime64tz(block, info=info)\n elif block.is_datetime:\n return self.set_atom_datetime64(block)\n elif block.is_timedelta:\n return self.set_atom_timedelta64(block)\n elif block.is_complex:\n return self.set_atom_complex(block)\n\n dtype = block.dtype.name\n inferred_type = lib.infer_dtype(block.values)\n\n if inferred_type == 'date':\n raise TypeError(\n \"[date] is not implemented as a table column\")\n elif inferred_type == 'datetime':\n # after 8260\n # this only would be hit for a mutli-timezone dtype\n # which is an error\n\n raise TypeError(\n \"too many timezones in this block, create separate \"\n \"data columns\"\n )\n elif inferred_type == 'unicode':\n raise TypeError(\n \"[unicode] is not implemented as a table column\")\n\n # this is basically a catchall; if say a datetime64 has nans then will\n # end up here ###\n elif inferred_type == 'string' or dtype == 'object':\n self.set_atom_string(\n block, block_items,\n existing_col,\n min_itemsize,\n nan_rep,\n encoding)\n\n # set as a data block\n else:\n self.set_atom_data(block)\n\n def get_atom_string(self, block, itemsize):\n return _tables().StringCol(itemsize=itemsize, shape=block.shape[0])\n\n def set_atom_string(self, block, block_items, existing_col, min_itemsize,\n nan_rep, encoding):\n # fill nan items with myself, don't disturb the blocks by\n # trying to downcast\n block = block.fillna(nan_rep, downcast=False)\n if isinstance(block, list):\n block = block[0]\n data = block.values\n\n # see if we have a valid string type\n inferred_type = lib.infer_dtype(data.ravel())\n if inferred_type != 'string':\n\n # we cannot serialize this data, so report an exception on a column\n # by column basis\n for i, item in enumerate(block_items):\n\n col = block.iget(i)\n inferred_type = lib.infer_dtype(col.ravel())\n if inferred_type != 'string':\n raise TypeError(\n \"Cannot serialize the column [%s] because\\n\"\n \"its data contents are [%s] object dtype\"\n % (item, inferred_type)\n )\n\n # itemsize is the maximum length of a string (along any dimension)\n data_converted = _convert_string_array(data, encoding)\n itemsize = data_converted.itemsize\n\n # specified min_itemsize?\n if isinstance(min_itemsize, dict):\n min_itemsize = int(min_itemsize.get(\n self.name) or min_itemsize.get('values') or 0)\n itemsize = max(min_itemsize or 0, itemsize)\n\n # check for column in the values conflicts\n if existing_col is not None:\n eci = existing_col.validate_col(itemsize)\n if eci > itemsize:\n itemsize = eci\n\n self.itemsize = itemsize\n self.kind = 'string'\n self.typ = self.get_atom_string(block, itemsize)\n self.set_data(data_converted.astype('|S%d' % itemsize, copy=False))\n\n def get_atom_coltype(self, kind=None):\n \"\"\" return the PyTables column class for this column \"\"\"\n if kind is None:\n kind = self.kind\n if self.kind.startswith('uint'):\n col_name = \"UInt%sCol\" % kind[4:]\n else:\n col_name = \"%sCol\" % kind.capitalize()\n\n return getattr(_tables(), col_name)\n\n def get_atom_data(self, block, kind=None):\n return self.get_atom_coltype(kind=kind)(shape=block.shape[0])\n\n def set_atom_complex(self, block):\n self.kind = block.dtype.name\n itemsize = int(self.kind.split('complex')[-1]) // 8\n self.typ = _tables().ComplexCol(\n itemsize=itemsize, shape=block.shape[0])\n self.set_data(block.values.astype(self.typ.type, copy=False))\n\n def set_atom_data(self, block):\n self.kind = block.dtype.name\n self.typ = self.get_atom_data(block)\n self.set_data(block.values.astype(self.typ.type, copy=False))\n\n def set_atom_categorical(self, block, items, info=None, values=None):\n # currently only supports a 1-D categorical\n # in a 1-D block\n\n values = block.values\n codes = values.codes\n self.kind = 'integer'\n self.dtype = codes.dtype.name\n if values.ndim > 1:\n raise NotImplementedError(\"only support 1-d categoricals\")\n if len(items) > 1:\n raise NotImplementedError(\"only support single block categoricals\")\n\n # write the codes; must be in a block shape\n self.ordered = values.ordered\n self.typ = self.get_atom_data(block, kind=codes.dtype.name)\n self.set_data(_block_shape(codes))\n\n # write the categories\n self.meta = 'category'\n self.set_metadata(block.values.categories)\n\n # update the info\n self.update_info(info)\n\n def get_atom_datetime64(self, block):\n return _tables().Int64Col(shape=block.shape[0])\n\n def set_atom_datetime64(self, block, values=None):\n self.kind = 'datetime64'\n self.typ = self.get_atom_datetime64(block)\n if values is None:\n values = block.values.view('i8')\n self.set_data(values, 'datetime64')\n\n def set_atom_datetime64tz(self, block, info, values=None):\n\n if values is None:\n values = block.values\n\n # convert this column to i8 in UTC, and save the tz\n values = values.asi8.reshape(block.shape)\n\n # store a converted timezone\n self.tz = _get_tz(block.values.tz)\n self.update_info(info)\n\n self.kind = 'datetime64'\n self.typ = self.get_atom_datetime64(block)\n self.set_data(values, 'datetime64')\n\n def get_atom_timedelta64(self, block):\n return _tables().Int64Col(shape=block.shape[0])\n\n def set_atom_timedelta64(self, block, values=None):\n self.kind = 'timedelta64'\n self.typ = self.get_atom_timedelta64(block)\n if values is None:\n values = block.values.view('i8')\n self.set_data(values, 'timedelta64')\n\n @property\n def shape(self):\n return getattr(self.data, 'shape', None)\n\n @property\n def cvalues(self):\n \"\"\" return my cython values \"\"\"\n return self.data\n\n def validate_attr(self, append):\n \"\"\"validate that we have the same order as the existing & same dtype\"\"\"\n if append:\n existing_fields = getattr(self.attrs, self.kind_attr, None)\n if (existing_fields is not None and\n existing_fields != list(self.values)):\n raise ValueError(\"appended items do not match existing items\"\n \" in table!\")\n\n existing_dtype = getattr(self.attrs, self.dtype_attr, None)\n if (existing_dtype is not None and\n existing_dtype != self.dtype):\n raise ValueError(\"appended items dtype do not match existing \"\n \"items dtype in table!\")\n\n def convert(self, values, nan_rep, encoding):\n \"\"\"set the data from this selection (and convert to the correct dtype\n if we can)\n \"\"\"\n\n # values is a recarray\n if values.dtype.fields is not None:\n values = values[self.cname]\n\n self.set_data(values)\n\n # use the meta if needed\n meta = _ensure_decoded(self.meta)\n\n # convert to the correct dtype\n if self.dtype is not None:\n dtype = _ensure_decoded(self.dtype)\n\n # reverse converts\n if dtype == u('datetime64'):\n\n # recreate with tz if indicated\n self.data = _set_tz(self.data, self.tz, coerce=True)\n\n elif dtype == u('timedelta64'):\n self.data = np.asarray(self.data, dtype='m8[ns]')\n elif dtype == u('date'):\n try:\n self.data = np.asarray(\n [date.fromordinal(v) for v in self.data], dtype=object)\n except ValueError:\n self.data = np.asarray(\n [date.fromtimestamp(v) for v in self.data],\n dtype=object)\n elif dtype == u('datetime'):\n self.data = np.asarray(\n [datetime.fromtimestamp(v) for v in self.data],\n dtype=object)\n\n elif meta == u('category'):\n\n # we have a categorical\n categories = self.metadata\n self.data = Categorical.from_codes(self.data.ravel(),\n categories=categories,\n ordered=self.ordered)\n\n else:\n\n try:\n self.data = self.data.astype(dtype, copy=False)\n except:\n self.data = self.data.astype('O', copy=False)\n\n # convert nans / decode\n if _ensure_decoded(self.kind) == u('string'):\n self.data = _unconvert_string_array(\n self.data, nan_rep=nan_rep, encoding=encoding)\n\n return self\n\n def get_attr(self):\n \"\"\" get the data for this colummn \"\"\"\n self.values = getattr(self.attrs, self.kind_attr, None)\n self.dtype = getattr(self.attrs, self.dtype_attr, None)\n self.meta = getattr(self.attrs, self.meta_attr, None)\n self.set_kind()\n\n def set_attr(self):\n \"\"\" set the data for this colummn \"\"\"\n setattr(self.attrs, self.kind_attr, self.values)\n setattr(self.attrs, self.meta_attr, self.meta)\n if self.dtype is not None:\n setattr(self.attrs, self.dtype_attr, self.dtype)\n\n\nclass DataIndexableCol(DataCol):\n\n \"\"\" represent a data column that can be indexed \"\"\"\n is_data_indexable = True\n\n def validate_names(self):\n if not Index(self.values).is_object():\n raise ValueError(\"cannot have non-object label DataIndexableCol\")\n\n def get_atom_string(self, block, itemsize):\n return _tables().StringCol(itemsize=itemsize)\n\n def get_atom_data(self, block, kind=None):\n return self.get_atom_coltype(kind=kind)()\n\n def get_atom_datetime64(self, block):\n return _tables().Int64Col()\n\n def get_atom_timedelta64(self, block):\n return _tables().Int64Col()\n\n\nclass GenericDataIndexableCol(DataIndexableCol):\n\n \"\"\" represent a generic pytables data column \"\"\"\n\n def get_attr(self):\n pass\n\n\nclass Fixed(StringMixin):\n\n \"\"\" represent an object in my store\n facilitate read/write of various types of objects\n this is an abstract base class\n\n Parameters\n ----------\n\n parent : my parent HDFStore\n group : the group node where the table resides\n \"\"\"\n pandas_kind = None\n obj_type = None\n ndim = None\n is_table = False\n\n def __init__(self, parent, group, encoding=None, **kwargs):\n self.parent = parent\n self.group = group\n self.encoding = _ensure_encoding(encoding)\n self.set_version()\n\n @property\n def is_old_version(self):\n return (self.version[0] <= 0 and self.version[1] <= 10 and\n self.version[2] < 1)\n\n def set_version(self):\n \"\"\" compute and set our version \"\"\"\n version = _ensure_decoded(\n getattr(self.group._v_attrs, 'pandas_version', None))\n try:\n self.version = tuple([int(x) for x in version.split('.')])\n if len(self.version) == 2:\n self.version = self.version + (0,)\n except:\n self.version = (0, 0, 0)\n\n @property\n def pandas_type(self):\n return _ensure_decoded(getattr(self.group._v_attrs,\n 'pandas_type', None))\n\n @property\n def format_type(self):\n return 'fixed'\n\n def __unicode__(self):\n \"\"\" return a pretty representation of myself \"\"\"\n self.infer_axes()\n s = self.shape\n if s is not None:\n if isinstance(s, (list, tuple)):\n s = \"[%s]\" % ','.join([pprint_thing(x) for x in s])\n return \"%-12.12s (shape->%s)\" % (self.pandas_type, s)\n return self.pandas_type\n\n def set_object_info(self):\n \"\"\" set my pandas type & version \"\"\"\n self.attrs.pandas_type = str(self.pandas_kind)\n self.attrs.pandas_version = str(_version)\n self.set_version()\n\n def copy(self):\n new_self = copy.copy(self)\n return new_self\n\n @property\n def storage_obj_type(self):\n return self.obj_type\n\n @property\n def shape(self):\n return self.nrows\n\n @property\n def pathname(self):\n return self.group._v_pathname\n\n @property\n def _handle(self):\n return self.parent._handle\n\n @property\n def _filters(self):\n return self.parent._filters\n\n @property\n def _complevel(self):\n return self.parent._complevel\n\n @property\n def _fletcher32(self):\n return self.parent._fletcher32\n\n @property\n def _complib(self):\n return self.parent._complib\n\n @property\n def attrs(self):\n return self.group._v_attrs\n\n def set_attrs(self):\n \"\"\" set our object attributes \"\"\"\n pass\n\n def get_attrs(self):\n \"\"\" get our object attributes \"\"\"\n pass\n\n @property\n def storable(self):\n \"\"\" return my storable \"\"\"\n return self.group\n\n @property\n def is_exists(self):\n return False\n\n @property\n def nrows(self):\n return getattr(self.storable, 'nrows', None)\n\n def validate(self, other):\n \"\"\" validate against an existing storable \"\"\"\n if other is None:\n return\n return True\n\n def validate_version(self, where=None):\n \"\"\" are we trying to operate on an old version? \"\"\"\n return True\n\n def infer_axes(self):\n \"\"\" infer the axes of my storer\n return a boolean indicating if we have a valid storer or not \"\"\"\n\n s = self.storable\n if s is None:\n return False\n self.get_attrs()\n return True\n\n def read(self, **kwargs):\n raise NotImplementedError(\n \"cannot read on an abstract storer: subclasses should implement\")\n\n def write(self, **kwargs):\n raise NotImplementedError(\n \"cannot write on an abstract storer: sublcasses should implement\")\n\n def delete(self, where=None, start=None, stop=None, **kwargs):\n \"\"\"\n support fully deleting the node in its entirety (only) - where\n specification must be None\n \"\"\"\n if where is None and start is None and stop is None:\n self._handle.remove_node(self.group, recursive=True)\n return None\n\n raise TypeError(\"cannot delete on an abstract storer\")\n\n\nclass GenericFixed(Fixed):\n\n \"\"\" a generified fixed version \"\"\"\n _index_type_map = {DatetimeIndex: 'datetime', PeriodIndex: 'period'}\n _reverse_index_map = dict([(v, k)\n for k, v in compat.iteritems(_index_type_map)])\n attributes = []\n\n # indexer helpders\n def _class_to_alias(self, cls):\n return self._index_type_map.get(cls, '')\n\n def _alias_to_class(self, alias):\n if isinstance(alias, type): # pragma: no cover\n # compat: for a short period of time master stored types\n return alias\n return self._reverse_index_map.get(alias, Index)\n\n def _get_index_factory(self, klass):\n if klass == DatetimeIndex:\n def f(values, freq=None, tz=None):\n return DatetimeIndex._simple_new(values, None, freq=freq,\n tz=tz)\n return f\n elif klass == PeriodIndex:\n def f(values, freq=None, tz=None):\n return PeriodIndex._simple_new(values, None, freq=freq)\n return f\n\n return klass\n\n def validate_read(self, kwargs):\n \"\"\"\n remove table keywords from kwargs and return\n raise if any keywords are passed which are not-None\n \"\"\"\n kwargs = copy.copy(kwargs)\n\n columns = kwargs.pop('columns', None)\n if columns is not None:\n raise TypeError(\"cannot pass a column specification when reading \"\n \"a Fixed format store. this store must be \"\n \"selected in its entirety\")\n where = kwargs.pop('where', None)\n if where is not None:\n raise TypeError(\"cannot pass a where specification when reading \"\n \"from a Fixed format store. this store must be \"\n \"selected in its entirety\")\n return kwargs\n\n @property\n def is_exists(self):\n return True\n\n def set_attrs(self):\n \"\"\" set our object attributes \"\"\"\n self.attrs.encoding = self.encoding\n\n def get_attrs(self):\n \"\"\" retrieve our attributes \"\"\"\n self.encoding = _ensure_encoding(getattr(self.attrs, 'encoding', None))\n for n in self.attributes:\n setattr(self, n, _ensure_decoded(getattr(self.attrs, n, None)))\n\n def write(self, obj, **kwargs):\n self.set_attrs()\n\n def read_array(self, key, start=None, stop=None):\n \"\"\" read an array for the specified node (off of group \"\"\"\n import tables\n node = getattr(self.group, key)\n data = node[start:stop]\n attrs = node._v_attrs\n\n transposed = getattr(attrs, 'transposed', False)\n\n if isinstance(node, tables.VLArray):\n ret = data[0]\n else:\n dtype = getattr(attrs, 'value_type', None)\n shape = getattr(attrs, 'shape', None)\n\n if shape is not None:\n # length 0 axis\n ret = np.empty(shape, dtype=dtype)\n else:\n ret = data\n\n if dtype == u('datetime64'):\n\n # reconstruct a timezone if indicated\n ret = _set_tz(ret, getattr(attrs, 'tz', None), coerce=True)\n\n elif dtype == u('timedelta64'):\n ret = np.asarray(ret, dtype='m8[ns]')\n\n if transposed:\n return ret.T\n else:\n return ret\n\n def read_index(self, key, **kwargs):\n variety = _ensure_decoded(getattr(self.attrs, '%s_variety' % key))\n\n if variety == u('multi'):\n return self.read_multi_index(key, **kwargs)\n elif variety == u('block'):\n return self.read_block_index(key, **kwargs)\n elif variety == u('sparseint'):\n return self.read_sparse_intindex(key, **kwargs)\n elif variety == u('regular'):\n _, index = self.read_index_node(getattr(self.group, key), **kwargs)\n return index\n else: # pragma: no cover\n raise TypeError('unrecognized index variety: %s' % variety)\n\n def write_index(self, key, index):\n if isinstance(index, MultiIndex):\n setattr(self.attrs, '%s_variety' % key, 'multi')\n self.write_multi_index(key, index)\n elif isinstance(index, BlockIndex):\n setattr(self.attrs, '%s_variety' % key, 'block')\n self.write_block_index(key, index)\n elif isinstance(index, IntIndex):\n setattr(self.attrs, '%s_variety' % key, 'sparseint')\n self.write_sparse_intindex(key, index)\n else:\n setattr(self.attrs, '%s_variety' % key, 'regular')\n converted = _convert_index(index, self.encoding,\n self.format_type).set_name('index')\n\n self.write_array(key, converted.values)\n\n node = getattr(self.group, key)\n node._v_attrs.kind = converted.kind\n node._v_attrs.name = index.name\n\n if isinstance(index, (DatetimeIndex, PeriodIndex)):\n node._v_attrs.index_class = self._class_to_alias(type(index))\n\n if hasattr(index, 'freq'):\n node._v_attrs.freq = index.freq\n\n if hasattr(index, 'tz') and index.tz is not None:\n node._v_attrs.tz = _get_tz(index.tz)\n\n def write_block_index(self, key, index):\n self.write_array('%s_blocs' % key, index.blocs)\n self.write_array('%s_blengths' % key, index.blengths)\n setattr(self.attrs, '%s_length' % key, index.length)\n\n def read_block_index(self, key, **kwargs):\n length = getattr(self.attrs, '%s_length' % key)\n blocs = self.read_array('%s_blocs' % key, **kwargs)\n blengths = self.read_array('%s_blengths' % key, **kwargs)\n return BlockIndex(length, blocs, blengths)\n\n def write_sparse_intindex(self, key, index):\n self.write_array('%s_indices' % key, index.indices)\n setattr(self.attrs, '%s_length' % key, index.length)\n\n def read_sparse_intindex(self, key, **kwargs):\n length = getattr(self.attrs, '%s_length' % key)\n indices = self.read_array('%s_indices' % key, **kwargs)\n return IntIndex(length, indices)\n\n def write_multi_index(self, key, index):\n setattr(self.attrs, '%s_nlevels' % key, index.nlevels)\n\n for i, (lev, lab, name) in enumerate(zip(index.levels,\n index.labels,\n index.names)):\n # write the level\n level_key = '%s_level%d' % (key, i)\n conv_level = _convert_index(lev, self.encoding,\n self.format_type).set_name(level_key)\n self.write_array(level_key, conv_level.values)\n node = getattr(self.group, level_key)\n node._v_attrs.kind = conv_level.kind\n node._v_attrs.name = name\n\n # write the name\n setattr(node._v_attrs, '%s_name%d' % (key, i), name)\n\n # write the labels\n label_key = '%s_label%d' % (key, i)\n self.write_array(label_key, lab)\n\n def read_multi_index(self, key, **kwargs):\n nlevels = getattr(self.attrs, '%s_nlevels' % key)\n\n levels = []\n labels = []\n names = []\n for i in range(nlevels):\n level_key = '%s_level%d' % (key, i)\n name, lev = self.read_index_node(getattr(self.group, level_key),\n **kwargs)\n levels.append(lev)\n names.append(name)\n\n label_key = '%s_label%d' % (key, i)\n lab = self.read_array(label_key, **kwargs)\n labels.append(lab)\n\n return MultiIndex(levels=levels, labels=labels, names=names,\n verify_integrity=True)\n\n def read_index_node(self, node, start=None, stop=None):\n data = node[start:stop]\n # If the index was an empty array write_array_empty() will\n # have written a sentinel. Here we relace it with the original.\n if ('shape' in node._v_attrs and\n self._is_empty_array(getattr(node._v_attrs, 'shape'))):\n data = np.empty(getattr(node._v_attrs, 'shape'),\n dtype=getattr(node._v_attrs, 'value_type'))\n kind = _ensure_decoded(node._v_attrs.kind)\n name = None\n\n if 'name' in node._v_attrs:\n name = node._v_attrs.name\n\n index_class = self._alias_to_class(getattr(node._v_attrs,\n 'index_class', ''))\n factory = self._get_index_factory(index_class)\n\n kwargs = {}\n if u('freq') in node._v_attrs:\n kwargs['freq'] = node._v_attrs['freq']\n\n if u('tz') in node._v_attrs:\n kwargs['tz'] = node._v_attrs['tz']\n\n if kind in (u('date'), u('datetime')):\n index = factory(_unconvert_index(data, kind,\n encoding=self.encoding),\n dtype=object, **kwargs)\n else:\n index = factory(_unconvert_index(data, kind,\n encoding=self.encoding), **kwargs)\n\n index.name = name\n\n return name, index\n\n def write_array_empty(self, key, value):\n \"\"\" write a 0-len array \"\"\"\n\n # ugly hack for length 0 axes\n arr = np.empty((1,) * value.ndim)\n self._handle.create_array(self.group, key, arr)\n getattr(self.group, key)._v_attrs.value_type = str(value.dtype)\n getattr(self.group, key)._v_attrs.shape = value.shape\n\n def _is_empty_array(self, shape):\n \"\"\"Returns true if any axis is zero length.\"\"\"\n return any(x == 0 for x in shape)\n\n def write_array(self, key, value, items=None):\n if key in self.group:\n self._handle.remove_node(self.group, key)\n\n # Transform needed to interface with pytables row/col notation\n empty_array = self._is_empty_array(value.shape)\n transposed = False\n\n if is_categorical_dtype(value):\n raise NotImplementedError('Cannot store a category dtype in '\n 'a HDF5 dataset that uses format='\n '\"fixed\". Use format=\"table\".')\n\n if not empty_array:\n value = value.T\n transposed = True\n\n if self._filters is not None:\n atom = None\n try:\n # get the atom for this datatype\n atom = _tables().Atom.from_dtype(value.dtype)\n except ValueError:\n pass\n\n if atom is not None:\n # create an empty chunked array and fill it from value\n if not empty_array:\n ca = self._handle.create_carray(self.group, key, atom,\n value.shape,\n filters=self._filters)\n ca[:] = value\n getattr(self.group, key)._v_attrs.transposed = transposed\n\n else:\n self.write_array_empty(key, value)\n\n return\n\n if value.dtype.type == np.object_:\n\n # infer the type, warn if we have a non-string type here (for\n # performance)\n inferred_type = lib.infer_dtype(value.ravel())\n if empty_array:\n pass\n elif inferred_type == 'string':\n pass\n else:\n try:\n items = list(items)\n except:\n pass\n ws = performance_doc % (inferred_type, key, items)\n warnings.warn(ws, PerformanceWarning, stacklevel=7)\n\n vlarr = self._handle.create_vlarray(self.group, key,\n _tables().ObjectAtom())\n vlarr.append(value)\n else:\n if empty_array:\n self.write_array_empty(key, value)\n else:\n if is_datetime64_dtype(value.dtype):\n self._handle.create_array(\n self.group, key, value.view('i8'))\n getattr(\n self.group, key)._v_attrs.value_type = 'datetime64'\n elif is_datetime64tz_dtype(value.dtype):\n # store as UTC\n # with a zone\n self._handle.create_array(self.group, key,\n value.asi8)\n\n node = getattr(self.group, key)\n node._v_attrs.tz = _get_tz(value.tz)\n node._v_attrs.value_type = 'datetime64'\n elif is_timedelta64_dtype(value.dtype):\n self._handle.create_array(\n self.group, key, value.view('i8'))\n getattr(\n self.group, key)._v_attrs.value_type = 'timedelta64'\n else:\n self._handle.create_array(self.group, key, value)\n\n getattr(self.group, key)._v_attrs.transposed = transposed\n\n\nclass LegacyFixed(GenericFixed):\n\n def read_index_legacy(self, key, start=None, stop=None):\n node = getattr(self.group, key)\n data = node[start:stop]\n kind = node._v_attrs.kind\n return _unconvert_index_legacy(data, kind, encoding=self.encoding)\n\n\nclass LegacySeriesFixed(LegacyFixed):\n\n def read(self, **kwargs):\n kwargs = self.validate_read(kwargs)\n index = self.read_index_legacy('index')\n values = self.read_array('values')\n return Series(values, index=index)\n\n\nclass LegacyFrameFixed(LegacyFixed):\n\n def read(self, **kwargs):\n kwargs = self.validate_read(kwargs)\n index = self.read_index_legacy('index')\n columns = self.read_index_legacy('columns')\n values = self.read_array('values')\n return DataFrame(values, index=index, columns=columns)\n\n\nclass SeriesFixed(GenericFixed):\n pandas_kind = u('series')\n attributes = ['name']\n\n @property\n def shape(self):\n try:\n return len(getattr(self.group, 'values')),\n except:\n return None\n\n def read(self, **kwargs):\n kwargs = self.validate_read(kwargs)\n index = self.read_index('index', **kwargs)\n values = self.read_array('values', **kwargs)\n return Series(values, index=index, name=self.name)\n\n def write(self, obj, **kwargs):\n super(SeriesFixed, self).write(obj, **kwargs)\n self.write_index('index', obj.index)\n self.write_array('values', obj.values)\n self.attrs.name = obj.name\n\n\nclass SparseFixed(GenericFixed):\n\n def validate_read(self, kwargs):\n \"\"\"\n we don't support start, stop kwds in Sparse\n \"\"\"\n kwargs = super(SparseFixed, self).validate_read(kwargs)\n if 'start' in kwargs or 'stop' in kwargs:\n raise NotImplementedError(\"start and/or stop are not supported \"\n \"in fixed Sparse reading\")\n return kwargs\n\n\nclass SparseSeriesFixed(SparseFixed):\n pandas_kind = u('sparse_series')\n attributes = ['name', 'fill_value', 'kind']\n\n def read(self, **kwargs):\n kwargs = self.validate_read(kwargs)\n index = self.read_index('index')\n sp_values = self.read_array('sp_values')\n sp_index = self.read_index('sp_index')\n return SparseSeries(sp_values, index=index, sparse_index=sp_index,\n kind=self.kind or u('block'),\n fill_value=self.fill_value,\n name=self.name)\n\n def write(self, obj, **kwargs):\n super(SparseSeriesFixed, self).write(obj, **kwargs)\n self.write_index('index', obj.index)\n self.write_index('sp_index', obj.sp_index)\n self.write_array('sp_values', obj.sp_values)\n self.attrs.name = obj.name\n self.attrs.fill_value = obj.fill_value\n self.attrs.kind = obj.kind\n\n\nclass SparseFrameFixed(SparseFixed):\n pandas_kind = u('sparse_frame')\n attributes = ['default_kind', 'default_fill_value']\n\n def read(self, **kwargs):\n kwargs = self.validate_read(kwargs)\n columns = self.read_index('columns')\n sdict = {}\n for c in columns:\n key = 'sparse_series_%s' % c\n s = SparseSeriesFixed(self.parent, getattr(self.group, key))\n s.infer_axes()\n sdict[c] = s.read()\n return SparseDataFrame(sdict, columns=columns,\n default_kind=self.default_kind,\n default_fill_value=self.default_fill_value)\n\n def write(self, obj, **kwargs):\n \"\"\" write it as a collection of individual sparse series \"\"\"\n super(SparseFrameFixed, self).write(obj, **kwargs)\n for name, ss in compat.iteritems(obj):\n key = 'sparse_series_%s' % name\n if key not in self.group._v_children:\n node = self._handle.create_group(self.group, key)\n else:\n node = getattr(self.group, key)\n s = SparseSeriesFixed(self.parent, node)\n s.write(ss)\n self.attrs.default_fill_value = obj.default_fill_value\n self.attrs.default_kind = obj.default_kind\n self.write_index('columns', obj.columns)\n\n\nclass BlockManagerFixed(GenericFixed):\n attributes = ['ndim', 'nblocks']\n is_shape_reversed = False\n\n @property\n def shape(self):\n try:\n ndim = self.ndim\n\n # items\n items = 0\n for i in range(self.nblocks):\n node = getattr(self.group, 'block%d_items' % i)\n shape = getattr(node, 'shape', None)\n if shape is not None:\n items += shape[0]\n\n # data shape\n node = getattr(self.group, 'block0_values')\n shape = getattr(node, 'shape', None)\n if shape is not None:\n shape = list(shape[0:(ndim - 1)])\n else:\n shape = []\n\n shape.append(items)\n\n # hacky - this works for frames, but is reversed for panels\n if self.is_shape_reversed:\n shape = shape[::-1]\n\n return shape\n except:\n return None\n\n def read(self, start=None, stop=None, **kwargs):\n # start, stop applied to rows, so 0th axis only\n\n kwargs = self.validate_read(kwargs)\n select_axis = self.obj_type()._get_block_manager_axis(0)\n\n axes = []\n for i in range(self.ndim):\n\n _start, _stop = (start, stop) if i == select_axis else (None, None)\n ax = self.read_index('axis%d' % i, start=_start, stop=_stop)\n axes.append(ax)\n\n items = axes[0]\n blocks = []\n for i in range(self.nblocks):\n\n blk_items = self.read_index('block%d_items' % i)\n values = self.read_array('block%d_values' % i,\n start=_start, stop=_stop)\n blk = make_block(values,\n placement=items.get_indexer(blk_items))\n blocks.append(blk)\n\n return self.obj_type(BlockManager(blocks, axes))\n\n def write(self, obj, **kwargs):\n super(BlockManagerFixed, self).write(obj, **kwargs)\n data = obj._data\n if not data.is_consolidated():\n data = data.consolidate()\n\n self.attrs.ndim = data.ndim\n for i, ax in enumerate(data.axes):\n if i == 0:\n if not ax.is_unique:\n raise ValueError(\n \"Columns index has to be unique for fixed format\")\n self.write_index('axis%d' % i, ax)\n\n # Supporting mixed-type DataFrame objects...nontrivial\n self.attrs.nblocks = len(data.blocks)\n for i, blk in enumerate(data.blocks):\n # I have no idea why, but writing values before items fixed #2299\n blk_items = data.items.take(blk.mgr_locs)\n self.write_array('block%d_values' % i, blk.values, items=blk_items)\n self.write_index('block%d_items' % i, blk_items)\n\n\nclass FrameFixed(BlockManagerFixed):\n pandas_kind = u('frame')\n obj_type = DataFrame\n\n\nclass PanelFixed(BlockManagerFixed):\n pandas_kind = u('wide')\n obj_type = Panel\n is_shape_reversed = True\n\n def write(self, obj, **kwargs):\n obj._consolidate_inplace()\n return super(PanelFixed, self).write(obj, **kwargs)\n\n\nclass Table(Fixed):\n\n \"\"\" represent a table:\n facilitate read/write of various types of tables\n\n Attrs in Table Node\n -------------------\n These are attributes that are store in the main table node, they are\n necessary to recreate these tables when read back in.\n\n index_axes : a list of tuples of the (original indexing axis and\n index column)\n non_index_axes: a list of tuples of the (original index axis and\n columns on a non-indexing axis)\n values_axes : a list of the columns which comprise the data of this\n table\n data_columns : a list of the columns that we are allowing indexing\n (these become single columns in values_axes), or True to force all\n columns\n nan_rep : the string to use for nan representations for string\n objects\n levels : the names of levels\n metadata : the names of the metadata columns\n\n \"\"\"\n pandas_kind = u('wide_table')\n table_type = None\n levels = 1\n is_table = True\n is_shape_reversed = False\n\n def __init__(self, *args, **kwargs):\n super(Table, self).__init__(*args, **kwargs)\n self.index_axes = []\n self.non_index_axes = []\n self.values_axes = []\n self.data_columns = []\n self.metadata = []\n self.info = dict()\n self.nan_rep = None\n self.selection = None\n\n @property\n def table_type_short(self):\n return self.table_type.split('_')[0]\n\n @property\n def format_type(self):\n return 'table'\n\n def __unicode__(self):\n \"\"\" return a pretty representatgion of myself \"\"\"\n self.infer_axes()\n dc = \",dc->[%s]\" % ','.join(\n self.data_columns) if len(self.data_columns) else ''\n\n ver = ''\n if self.is_old_version:\n ver = \"[%s]\" % '.'.join([str(x) for x in self.version])\n\n return \"%-12.12s%s (typ->%s,nrows->%s,ncols->%s,indexers->[%s]%s)\" % (\n self.pandas_type, ver, self.table_type_short, self.nrows,\n self.ncols, ','.join([a.name for a in self.index_axes]), dc\n )\n\n def __getitem__(self, c):\n \"\"\" return the axis for c \"\"\"\n for a in self.axes:\n if c == a.name:\n return a\n return None\n\n def validate(self, other):\n \"\"\" validate against an existing table \"\"\"\n if other is None:\n return\n\n if other.table_type != self.table_type:\n raise TypeError(\"incompatible table_type with existing [%s - %s]\" %\n (other.table_type, self.table_type))\n\n for c in ['index_axes', 'non_index_axes', 'values_axes']:\n sv = getattr(self, c, None)\n ov = getattr(other, c, None)\n if sv != ov:\n\n # show the error for the specific axes\n for i, sax in enumerate(sv):\n oax = ov[i]\n if sax != oax:\n raise ValueError(\n \"invalid combinate of [%s] on appending data [%s] \"\n \"vs current table [%s]\" % (c, sax, oax))\n\n # should never get here\n raise Exception(\n \"invalid combinate of [%s] on appending data [%s] vs \"\n \"current table [%s]\" % (c, sv, ov))\n\n @property\n def is_multi_index(self):\n \"\"\"the levels attribute is 1 or a list in the case of a multi-index\"\"\"\n return isinstance(self.levels, list)\n\n def validate_metadata(self, existing):\n \"\"\" create / validate metadata \"\"\"\n self.metadata = [\n c.name for c in self.values_axes if c.metadata is not None]\n\n def validate_multiindex(self, obj):\n \"\"\"validate that we can store the multi-index; reset and return the\n new object\n \"\"\"\n levels = [l if l is not None else \"level_{0}\".format(i)\n for i, l in enumerate(obj.index.names)]\n try:\n return obj.reset_index(), levels\n except ValueError:\n raise ValueError(\"duplicate names/columns in the multi-index when \"\n \"storing as a table\")\n\n @property\n def nrows_expected(self):\n \"\"\" based on our axes, compute the expected nrows \"\"\"\n return np.prod([i.cvalues.shape[0] for i in self.index_axes])\n\n @property\n def is_exists(self):\n \"\"\" has this table been created \"\"\"\n return u('table') in self.group\n\n @property\n def storable(self):\n return getattr(self.group, 'table', None)\n\n @property\n def table(self):\n \"\"\" return the table group (this is my storable) \"\"\"\n return self.storable\n\n @property\n def dtype(self):\n return self.table.dtype\n\n @property\n def description(self):\n return self.table.description\n\n @property\n def axes(self):\n return itertools.chain(self.index_axes, self.values_axes)\n\n @property\n def ncols(self):\n \"\"\" the number of total columns in the values axes \"\"\"\n return sum([len(a.values) for a in self.values_axes])\n\n @property\n def is_transposed(self):\n return False\n\n @property\n def data_orientation(self):\n \"\"\"return a tuple of my permutated axes, non_indexable at the front\"\"\"\n return tuple(itertools.chain([int(a[0]) for a in self.non_index_axes],\n [int(a.axis) for a in self.index_axes]))\n\n def queryables(self):\n \"\"\" return a dict of the kinds allowable columns for this object \"\"\"\n\n # compute the values_axes queryables\n return dict(\n [(a.cname, a) for a in self.index_axes] +\n [(self.storage_obj_type._AXIS_NAMES[axis], None)\n for axis, values in self.non_index_axes] +\n [(v.cname, v) for v in self.values_axes\n if v.name in set(self.data_columns)]\n )\n\n def index_cols(self):\n \"\"\" return a list of my index cols \"\"\"\n return [(i.axis, i.cname) for i in self.index_axes]\n\n def values_cols(self):\n \"\"\" return a list of my values cols \"\"\"\n return [i.cname for i in self.values_axes]\n\n def _get_metadata_path(self, key):\n \"\"\" return the metadata pathname for this key \"\"\"\n return \"{group}/meta/{key}/meta\".format(group=self.group._v_pathname,\n key=key)\n\n def write_metadata(self, key, values):\n \"\"\"\n write out a meta data array to the key as a fixed-format Series\n\n Parameters\n ----------\n key : string\n values : ndarray\n\n \"\"\"\n values = Series(values)\n self.parent.put(self._get_metadata_path(key), values, format='table',\n encoding=self.encoding, nan_rep=self.nan_rep)\n\n def read_metadata(self, key):\n \"\"\" return the meta data array for this key \"\"\"\n if getattr(getattr(self.group, 'meta', None), key, None) is not None:\n return self.parent.select(self._get_metadata_path(key))\n return None\n\n def set_info(self):\n \"\"\" update our table index info \"\"\"\n self.attrs.info = self.info\n\n def set_attrs(self):\n \"\"\" set our table type & indexables \"\"\"\n self.attrs.table_type = str(self.table_type)\n self.attrs.index_cols = self.index_cols()\n self.attrs.values_cols = self.values_cols()\n self.attrs.non_index_axes = self.non_index_axes\n self.attrs.data_columns = self.data_columns\n self.attrs.nan_rep = self.nan_rep\n self.attrs.encoding = self.encoding\n self.attrs.levels = self.levels\n self.attrs.metadata = self.metadata\n self.set_info()\n\n def get_attrs(self):\n \"\"\" retrieve our attributes \"\"\"\n self.non_index_axes = getattr(\n self.attrs, 'non_index_axes', None) or []\n self.data_columns = getattr(\n self.attrs, 'data_columns', None) or []\n self.info = getattr(\n self.attrs, 'info', None) or dict()\n self.nan_rep = getattr(self.attrs, 'nan_rep', None)\n self.encoding = _ensure_encoding(\n getattr(self.attrs, 'encoding', None))\n self.levels = getattr(\n self.attrs, 'levels', None) or []\n self.index_axes = [\n a.infer(self) for a in self.indexables if a.is_an_indexable\n ]\n self.values_axes = [\n a.infer(self) for a in self.indexables if not a.is_an_indexable\n ]\n self.metadata = getattr(\n self.attrs, 'metadata', None) or []\n\n def validate_version(self, where=None):\n \"\"\" are we trying to operate on an old version? \"\"\"\n if where is not None:\n if (self.version[0] <= 0 and self.version[1] <= 10 and\n self.version[2] < 1):\n ws = incompatibility_doc % '.'.join(\n [str(x) for x in self.version])\n warnings.warn(ws, IncompatibilityWarning)\n\n def validate_min_itemsize(self, min_itemsize):\n \"\"\"validate the min_itemisze doesn't contain items that are not in the\n axes this needs data_columns to be defined\n \"\"\"\n if min_itemsize is None:\n return\n if not isinstance(min_itemsize, dict):\n return\n\n q = self.queryables()\n for k, v in min_itemsize.items():\n\n # ok, apply generally\n if k == 'values':\n continue\n if k not in q:\n raise ValueError(\n \"min_itemsize has the key [%s] which is not an axis or \"\n \"data_column\" % k)\n\n @property\n def indexables(self):\n \"\"\" create/cache the indexables if they don't exist \"\"\"\n if self._indexables is None:\n\n self._indexables = []\n\n # index columns\n self._indexables.extend([\n IndexCol(name=name, axis=axis, pos=i)\n for i, (axis, name) in enumerate(self.attrs.index_cols)\n ])\n\n # values columns\n dc = set(self.data_columns)\n base_pos = len(self._indexables)\n\n def f(i, c):\n klass = DataCol\n if c in dc:\n klass = DataIndexableCol\n return klass.create_for_block(i=i, name=c, pos=base_pos + i,\n version=self.version)\n\n self._indexables.extend(\n [f(i, c) for i, c in enumerate(self.attrs.values_cols)])\n\n return self._indexables\n\n def create_index(self, columns=None, optlevel=None, kind=None):\n \"\"\"\n Create a pytables index on the specified columns\n note: cannot index Time64Col() or ComplexCol currently;\n PyTables must be >= 3.0\n\n Parameters\n ----------\n columns : False (don't create an index), True (create all columns\n index), None or list_like (the indexers to index)\n optlevel: optimization level (defaults to 6)\n kind : kind of index (defaults to 'medium')\n\n Exceptions\n ----------\n raises if the node is not a table\n\n \"\"\"\n\n if not self.infer_axes():\n return\n if columns is False:\n return\n\n # index all indexables and data_columns\n if columns is None or columns is True:\n columns = [a.cname for a in self.axes if a.is_data_indexable]\n if not isinstance(columns, (tuple, list)):\n columns = [columns]\n\n kw = dict()\n if optlevel is not None:\n kw['optlevel'] = optlevel\n if kind is not None:\n kw['kind'] = kind\n\n table = self.table\n for c in columns:\n v = getattr(table.cols, c, None)\n if v is not None:\n\n # remove the index if the kind/optlevel have changed\n if v.is_indexed:\n index = v.index\n cur_optlevel = index.optlevel\n cur_kind = index.kind\n\n if kind is not None and cur_kind != kind:\n v.remove_index()\n else:\n kw['kind'] = cur_kind\n\n if optlevel is not None and cur_optlevel != optlevel:\n v.remove_index()\n else:\n kw['optlevel'] = cur_optlevel\n\n # create the index\n if not v.is_indexed:\n if v.type.startswith('complex'):\n raise TypeError(\n 'Columns containing complex values can be stored '\n 'but cannot'\n ' be indexed when using table format. Either use '\n 'fixed format, set index=False, or do not include '\n 'the columns containing complex values to '\n 'data_columns when initializing the table.')\n v.create_index(**kw)\n\n def read_axes(self, where, **kwargs):\n \"\"\"create and return the axes sniffed from the table: return boolean\n for success\n \"\"\"\n\n # validate the version\n self.validate_version(where)\n\n # infer the data kind\n if not self.infer_axes():\n return False\n\n # create the selection\n self.selection = Selection(self, where=where, **kwargs)\n values = self.selection.select()\n\n # convert the data\n for a in self.axes:\n a.set_info(self.info)\n a.convert(values, nan_rep=self.nan_rep, encoding=self.encoding)\n\n return True\n\n def get_object(self, obj):\n \"\"\" return the data for this obj \"\"\"\n return obj\n\n def validate_data_columns(self, data_columns, min_itemsize):\n \"\"\"take the input data_columns and min_itemize and create a data\n columns spec\n \"\"\"\n\n if not len(self.non_index_axes):\n return []\n\n axis, axis_labels = self.non_index_axes[0]\n info = self.info.get(axis, dict())\n if info.get('type') == 'MultiIndex' and data_columns:\n raise ValueError(\"cannot use a multi-index on axis [{0}] with \"\n \"data_columns {1}\".format(axis, data_columns))\n\n # evaluate the passed data_columns, True == use all columns\n # take only valide axis labels\n if data_columns is True:\n data_columns = axis_labels\n elif data_columns is None:\n data_columns = []\n\n # if min_itemsize is a dict, add the keys (exclude 'values')\n if isinstance(min_itemsize, dict):\n\n existing_data_columns = set(data_columns)\n data_columns.extend([\n k for k in min_itemsize.keys()\n if k != 'values' and k not in existing_data_columns\n ])\n\n # return valid columns in the order of our axis\n return [c for c in data_columns if c in axis_labels]\n\n def create_axes(self, axes, obj, validate=True, nan_rep=None,\n data_columns=None, min_itemsize=None, **kwargs):\n \"\"\" create and return the axes\n leagcy tables create an indexable column, indexable index,\n non-indexable fields\n\n Parameters:\n -----------\n axes: a list of the axes in order to create (names or numbers of\n the axes)\n obj : the object to create axes on\n validate: validate the obj against an existing object already\n written\n min_itemsize: a dict of the min size for a column in bytes\n nan_rep : a values to use for string column nan_rep\n encoding : the encoding for string values\n data_columns : a list of columns that we want to create separate to\n allow indexing (or True will force all columns)\n\n \"\"\"\n\n # set the default axes if needed\n if axes is None:\n try:\n axes = _AXES_MAP[type(obj)]\n except:\n raise TypeError(\"cannot properly create the storer for: \"\n \"[group->%s,value->%s]\"\n % (self.group._v_name, type(obj)))\n\n # map axes to numbers\n axes = [obj._get_axis_number(a) for a in axes]\n\n # do we have an existing table (if so, use its axes & data_columns)\n if self.infer_axes():\n existing_table = self.copy()\n existing_table.infer_axes()\n axes = [a.axis for a in existing_table.index_axes]\n data_columns = existing_table.data_columns\n nan_rep = existing_table.nan_rep\n self.encoding = existing_table.encoding\n self.info = copy.copy(existing_table.info)\n else:\n existing_table = None\n\n # currently support on ndim-1 axes\n if len(axes) != self.ndim - 1:\n raise ValueError(\n \"currently only support ndim-1 indexers in an AppendableTable\")\n\n # create according to the new data\n self.non_index_axes = []\n self.data_columns = []\n\n # nan_representation\n if nan_rep is None:\n nan_rep = 'nan'\n\n self.nan_rep = nan_rep\n\n # create axes to index and non_index\n index_axes_map = dict()\n for i, a in enumerate(obj.axes):\n\n if i in axes:\n name = obj._AXIS_NAMES[i]\n index_axes_map[i] = _convert_index(\n a, self.encoding, self.format_type\n ).set_name(name).set_axis(i)\n else:\n\n # we might be able to change the axes on the appending data if\n # necessary\n append_axis = list(a)\n if existing_table is not None:\n indexer = len(self.non_index_axes)\n exist_axis = existing_table.non_index_axes[indexer][1]\n if append_axis != exist_axis:\n\n # ahah! -> reindex\n if sorted(append_axis) == sorted(exist_axis):\n append_axis = exist_axis\n\n # the non_index_axes info\n info = _get_info(self.info, i)\n info['names'] = list(a.names)\n info['type'] = a.__class__.__name__\n\n self.non_index_axes.append((i, append_axis))\n\n # set axis positions (based on the axes)\n self.index_axes = [\n index_axes_map[a].set_pos(j).update_info(self.info)\n for j, a in enumerate(axes)\n ]\n j = len(self.index_axes)\n\n # check for column conflicts\n if validate:\n for a in self.axes:\n a.maybe_set_size(min_itemsize=min_itemsize)\n\n # reindex by our non_index_axes & compute data_columns\n for a in self.non_index_axes:\n obj = _reindex_axis(obj, a[0], a[1])\n\n def get_blk_items(mgr, blocks):\n return [mgr.items.take(blk.mgr_locs) for blk in blocks]\n\n # figure out data_columns and get out blocks\n block_obj = self.get_object(obj).consolidate()\n blocks = block_obj._data.blocks\n blk_items = get_blk_items(block_obj._data, blocks)\n if len(self.non_index_axes):\n axis, axis_labels = self.non_index_axes[0]\n data_columns = self.validate_data_columns(\n data_columns, min_itemsize)\n if len(data_columns):\n mgr = block_obj.reindex_axis(\n Index(axis_labels).difference(Index(data_columns)),\n axis=axis\n )._data\n\n blocks = list(mgr.blocks)\n blk_items = get_blk_items(mgr, blocks)\n for c in data_columns:\n mgr = block_obj.reindex_axis([c], axis=axis)._data\n blocks.extend(mgr.blocks)\n blk_items.extend(get_blk_items(mgr, mgr.blocks))\n\n # reorder the blocks in the same order as the existing_table if we can\n if existing_table is not None:\n by_items = dict([(tuple(b_items.tolist()), (b, b_items))\n for b, b_items in zip(blocks, blk_items)])\n new_blocks = []\n new_blk_items = []\n for ea in existing_table.values_axes:\n items = tuple(ea.values)\n try:\n b, b_items = by_items.pop(items)\n new_blocks.append(b)\n new_blk_items.append(b_items)\n except:\n raise ValueError(\n \"cannot match existing table structure for [%s] on \"\n \"appending data\" % ','.join(pprint_thing(item) for\n item in items))\n blocks = new_blocks\n blk_items = new_blk_items\n\n # add my values\n self.values_axes = []\n for i, (b, b_items) in enumerate(zip(blocks, blk_items)):\n\n # shape of the data column are the indexable axes\n klass = DataCol\n name = None\n\n # we have a data_column\n if (data_columns and len(b_items) == 1 and\n b_items[0] in data_columns):\n klass = DataIndexableCol\n name = b_items[0]\n self.data_columns.append(name)\n\n # make sure that we match up the existing columns\n # if we have an existing table\n if existing_table is not None and validate:\n try:\n existing_col = existing_table.values_axes[i]\n except:\n raise ValueError(\"Incompatible appended table [%s] with \"\n \"existing table [%s]\"\n % (blocks, existing_table.values_axes))\n else:\n existing_col = None\n\n try:\n col = klass.create_for_block(\n i=i, name=name, version=self.version)\n col.set_atom(block=b, block_items=b_items,\n existing_col=existing_col,\n min_itemsize=min_itemsize,\n nan_rep=nan_rep,\n encoding=self.encoding,\n info=self.info,\n **kwargs)\n col.set_pos(j)\n\n self.values_axes.append(col)\n except (NotImplementedError, ValueError, TypeError) as e:\n raise e\n except Exception as detail:\n raise Exception(\n \"cannot find the correct atom type -> \"\n \"[dtype->%s,items->%s] %s\"\n % (b.dtype.name, b_items, str(detail))\n )\n j += 1\n\n # validate our min_itemsize\n self.validate_min_itemsize(min_itemsize)\n\n # validate our metadata\n self.validate_metadata(existing_table)\n\n # validate the axes if we have an existing table\n if validate:\n self.validate(existing_table)\n\n def process_axes(self, obj, columns=None):\n \"\"\" process axes filters \"\"\"\n\n # make a copy to avoid side effects\n if columns is not None:\n columns = list(columns)\n\n # make sure to include levels if we have them\n if columns is not None and self.is_multi_index:\n for n in self.levels:\n if n not in columns:\n columns.insert(0, n)\n\n # reorder by any non_index_axes & limit to the select columns\n for axis, labels in self.non_index_axes:\n obj = _reindex_axis(obj, axis, labels, columns)\n\n # apply the selection filters (but keep in the same order)\n if self.selection.filter is not None:\n for field, op, filt in self.selection.filter.format():\n\n def process_filter(field, filt):\n\n for axis_name in obj._AXIS_NAMES.values():\n axis_number = obj._get_axis_number(axis_name)\n axis_values = obj._get_axis(axis_name)\n\n # see if the field is the name of an axis\n if field == axis_name:\n\n # if we have a multi-index, then need to include\n # the levels\n if self.is_multi_index:\n filt = filt.union(Index(self.levels))\n\n takers = op(axis_values, filt)\n return obj.ix._getitem_axis(takers,\n axis=axis_number)\n\n # this might be the name of a file IN an axis\n elif field in axis_values:\n\n # we need to filter on this dimension\n values = _ensure_index(getattr(obj, field).values)\n filt = _ensure_index(filt)\n\n # hack until we support reversed dim flags\n if isinstance(obj, DataFrame):\n axis_number = 1 - axis_number\n takers = op(values, filt)\n return obj.ix._getitem_axis(takers,\n axis=axis_number)\n\n raise ValueError(\n \"cannot find the field [%s] for filtering!\" % field)\n\n obj = process_filter(field, filt)\n\n return obj\n\n def create_description(self, complib=None, complevel=None,\n fletcher32=False, expectedrows=None):\n \"\"\" create the description of the table from the axes & values \"\"\"\n\n # provided expected rows if its passed\n if expectedrows is None:\n expectedrows = max(self.nrows_expected, 10000)\n\n d = dict(name='table', expectedrows=expectedrows)\n\n # description from the axes & values\n d['description'] = dict([(a.cname, a.typ) for a in self.axes])\n\n if complib:\n if complevel is None:\n complevel = self._complevel or 9\n filters = _tables().Filters(\n complevel=complevel, complib=complib,\n fletcher32=fletcher32 or self._fletcher32)\n d['filters'] = filters\n elif self._filters is not None:\n d['filters'] = self._filters\n\n return d\n\n def read_coordinates(self, where=None, start=None, stop=None, **kwargs):\n \"\"\"select coordinates (row numbers) from a table; return the\n coordinates object\n \"\"\"\n\n # validate the version\n self.validate_version(where)\n\n # infer the data kind\n if not self.infer_axes():\n return False\n\n # create the selection\n self.selection = Selection(\n self, where=where, start=start, stop=stop, **kwargs)\n coords = self.selection.select_coords()\n if self.selection.filter is not None:\n for field, op, filt in self.selection.filter.format():\n data = self.read_column(\n field, start=coords.min(), stop=coords.max() + 1)\n coords = coords[\n op(data.iloc[coords - coords.min()], filt).values]\n\n return Index(coords)\n\n def read_column(self, column, where=None, start=None, stop=None, **kwargs):\n \"\"\"return a single column from the table, generally only indexables\n are interesting\n \"\"\"\n\n # validate the version\n self.validate_version()\n\n # infer the data kind\n if not self.infer_axes():\n return False\n\n if where is not None:\n raise TypeError(\"read_column does not currently accept a where \"\n \"clause\")\n\n # find the axes\n for a in self.axes:\n if column == a.name:\n\n if not a.is_data_indexable:\n raise ValueError(\n \"column [%s] can not be extracted individually; it is \"\n \"not data indexable\" % column)\n\n # column must be an indexable or a data column\n c = getattr(self.table.cols, column)\n a.set_info(self.info)\n return Series(_set_tz(a.convert(c[start:stop],\n nan_rep=self.nan_rep,\n encoding=self.encoding\n ).take_data(),\n a.tz, True), name=column)\n\n raise KeyError(\"column [%s] not found in the table\" % column)\n\n\nclass WORMTable(Table):\n\n \"\"\" a write-once read-many table: this format DOES NOT ALLOW appending to a\n table. writing is a one-time operation the data are stored in a format\n that allows for searching the data on disk\n \"\"\"\n table_type = u('worm')\n\n def read(self, **kwargs):\n \"\"\" read the indicies and the indexing array, calculate offset rows and\n return \"\"\"\n raise NotImplementedError(\"WORMTable needs to implement read\")\n\n def write(self, **kwargs):\n \"\"\" write in a format that we can search later on (but cannot append\n to): write out the indicies and the values using _write_array\n (e.g. a CArray) create an indexing table so that we can search\n \"\"\"\n raise NotImplementedError(\"WORKTable needs to implement write\")\n\n\nclass LegacyTable(Table):\n\n \"\"\" an appendable table: allow append/query/delete operations to a\n (possibily) already existing appendable table this table ALLOWS\n append (but doesn't require them), and stores the data in a format\n that can be easily searched\n\n \"\"\"\n _indexables = [\n IndexCol(name='index', axis=1, pos=0),\n IndexCol(name='column', axis=2, pos=1, index_kind='columns_kind'),\n DataCol(name='fields', cname='values', kind_attr='fields', pos=2)\n ]\n table_type = u('legacy')\n ndim = 3\n\n def write(self, **kwargs):\n raise TypeError(\"write operations are not allowed on legacy tables!\")\n\n def read(self, where=None, columns=None, **kwargs):\n \"\"\"we have n indexable columns, with an arbitrary number of data\n axes\n \"\"\"\n\n if not self.read_axes(where=where, **kwargs):\n return None\n\n factors = [Categorical.from_array(\n a.values, ordered=True) for a in self.index_axes]\n levels = [f.categories for f in factors]\n N = [len(f.categories) for f in factors]\n labels = [f.codes for f in factors]\n\n # compute the key\n key = _factor_indexer(N[1:], labels)\n\n objs = []\n if len(unique(key)) == len(key):\n\n sorter, _ = algos.groupsort_indexer(\n _ensure_int64(key), np.prod(N))\n sorter = _ensure_platform_int(sorter)\n\n # create the objs\n for c in self.values_axes:\n\n # the data need to be sorted\n sorted_values = c.take_data().take(sorter, axis=0)\n if sorted_values.ndim == 1:\n sorted_values = sorted_values.reshape(\n (sorted_values.shape[0], 1))\n\n take_labels = [l.take(sorter) for l in labels]\n items = Index(c.values)\n block = _block2d_to_blocknd(\n values=sorted_values, placement=np.arange(len(items)),\n shape=tuple(N), labels=take_labels, ref_items=items)\n\n # create the object\n mgr = BlockManager([block], [items] + levels)\n obj = self.obj_type(mgr)\n\n # permute if needed\n if self.is_transposed:\n obj = obj.transpose(\n *tuple(Series(self.data_orientation).argsort()))\n\n objs.append(obj)\n\n else:\n warnings.warn(duplicate_doc, DuplicateWarning, stacklevel=5)\n\n # reconstruct\n long_index = MultiIndex.from_arrays(\n [i.values for i in self.index_axes])\n\n for c in self.values_axes:\n lp = DataFrame(c.data, index=long_index, columns=c.values)\n\n # need a better algorithm\n tuple_index = long_index._tuple_index\n\n unique_tuples = lib.fast_unique(tuple_index.values)\n unique_tuples = _asarray_tuplesafe(unique_tuples)\n\n indexer = match(unique_tuples, tuple_index)\n indexer = _ensure_platform_int(indexer)\n\n new_index = long_index.take(indexer)\n new_values = lp.values.take(indexer, axis=0)\n\n lp = DataFrame(new_values, index=new_index, columns=lp.columns)\n objs.append(lp.to_panel())\n\n # create the composite object\n if len(objs) == 1:\n wp = objs[0]\n else:\n wp = concat(objs, axis=0, verify_integrity=False).consolidate()\n\n # apply the selection filters & axis orderings\n wp = self.process_axes(wp, columns=columns)\n\n return wp\n\n\nclass LegacyFrameTable(LegacyTable):\n\n \"\"\" support the legacy frame table \"\"\"\n pandas_kind = u('frame_table')\n table_type = u('legacy_frame')\n obj_type = Panel\n\n def read(self, *args, **kwargs):\n return super(LegacyFrameTable, self).read(*args, **kwargs)['value']\n\n\nclass LegacyPanelTable(LegacyTable):\n\n \"\"\" support the legacy panel table \"\"\"\n table_type = u('legacy_panel')\n obj_type = Panel\n\n\nclass AppendableTable(LegacyTable):\n\n \"\"\" suppor the new appendable table formats \"\"\"\n _indexables = None\n table_type = u('appendable')\n\n def write(self, obj, axes=None, append=False, complib=None,\n complevel=None, fletcher32=None, min_itemsize=None,\n chunksize=None, expectedrows=None, dropna=False, **kwargs):\n\n if not append and self.is_exists:\n self._handle.remove_node(self.group, 'table')\n\n # create the axes\n self.create_axes(axes=axes, obj=obj, validate=append,\n min_itemsize=min_itemsize,\n **kwargs)\n\n for a in self.axes:\n a.validate(self, append)\n\n if not self.is_exists:\n\n # create the table\n options = self.create_description(complib=complib,\n complevel=complevel,\n fletcher32=fletcher32,\n expectedrows=expectedrows)\n\n # set the table attributes\n self.set_attrs()\n\n # create the table\n self._handle.create_table(self.group, **options)\n else:\n pass\n # table = self.table\n\n # update my info\n self.set_info()\n\n # validate the axes and set the kinds\n for a in self.axes:\n a.validate_and_set(self, append)\n\n # add the rows\n self.write_data(chunksize, dropna=dropna)\n\n def write_data(self, chunksize, dropna=False):\n \"\"\" we form the data into a 2-d including indexes,values,mask\n write chunk-by-chunk \"\"\"\n\n names = self.dtype.names\n nrows = self.nrows_expected\n\n # if dropna==True, then drop ALL nan rows\n masks = []\n if dropna:\n\n for a in self.values_axes:\n\n # figure the mask: only do if we can successfully process this\n # column, otherwise ignore the mask\n mask = isnull(a.data).all(axis=0)\n if isinstance(mask, np.ndarray):\n masks.append(mask.astype('u1', copy=False))\n\n # consolidate masks\n if len(masks):\n mask = masks[0]\n for m in masks[1:]:\n mask = mask & m\n mask = mask.ravel()\n else:\n mask = None\n\n # broadcast the indexes if needed\n indexes = [a.cvalues for a in self.index_axes]\n nindexes = len(indexes)\n bindexes = []\n for i, idx in enumerate(indexes):\n\n # broadcast to all other indexes except myself\n if i > 0 and i < nindexes:\n repeater = np.prod(\n [indexes[bi].shape[0] for bi in range(0, i)])\n idx = np.tile(idx, repeater)\n\n if i < nindexes - 1:\n repeater = np.prod([indexes[bi].shape[0]\n for bi in range(i + 1, nindexes)])\n idx = np.repeat(idx, repeater)\n\n bindexes.append(idx)\n\n # transpose the values so first dimension is last\n # reshape the values if needed\n values = [a.take_data() for a in self.values_axes]\n values = [v.transpose(np.roll(np.arange(v.ndim), v.ndim - 1))\n for v in values]\n bvalues = []\n for i, v in enumerate(values):\n new_shape = (nrows,) + self.dtype[names[nindexes + i]].shape\n bvalues.append(values[i].reshape(new_shape))\n\n # write the chunks\n if chunksize is None:\n chunksize = 100000\n\n rows = np.empty(min(chunksize, nrows), dtype=self.dtype)\n chunks = int(nrows / chunksize) + 1\n for i in range(chunks):\n start_i = i * chunksize\n end_i = min((i + 1) * chunksize, nrows)\n if start_i >= end_i:\n break\n\n self.write_data_chunk(\n rows,\n indexes=[a[start_i:end_i] for a in bindexes],\n mask=mask[start_i:end_i] if mask is not None else None,\n values=[v[start_i:end_i] for v in bvalues])\n\n def write_data_chunk(self, rows, indexes, mask, values):\n \"\"\"\n Parameters\n ----------\n rows : an empty memory space where we are putting the chunk\n indexes : an array of the indexes\n mask : an array of the masks\n values : an array of the values\n \"\"\"\n\n # 0 len\n for v in values:\n if not np.prod(v.shape):\n return\n\n try:\n nrows = indexes[0].shape[0]\n if nrows != len(rows):\n rows = np.empty(nrows, dtype=self.dtype)\n names = self.dtype.names\n nindexes = len(indexes)\n\n # indexes\n for i, idx in enumerate(indexes):\n rows[names[i]] = idx\n\n # values\n for i, v in enumerate(values):\n rows[names[i + nindexes]] = v\n\n # mask\n if mask is not None:\n m = ~mask.ravel().astype(bool, copy=False)\n if not m.all():\n rows = rows[m]\n\n except Exception as detail:\n raise Exception(\"cannot create row-data -> %s\" % detail)\n\n try:\n if len(rows):\n self.table.append(rows)\n self.table.flush()\n except Exception as detail:\n raise TypeError(\"tables cannot write this data -> %s\" % detail)\n\n def delete(self, where=None, start=None, stop=None, **kwargs):\n\n # delete all rows (and return the nrows)\n if where is None or not len(where):\n if start is None and stop is None:\n nrows = self.nrows\n self._handle.remove_node(self.group, recursive=True)\n else:\n # pytables<3.0 would remove a single row with stop=None\n if stop is None:\n stop = self.nrows\n nrows = self.table.remove_rows(start=start, stop=stop)\n self.table.flush()\n return nrows\n\n # infer the data kind\n if not self.infer_axes():\n return None\n\n # create the selection\n table = self.table\n self.selection = Selection(\n self, where, start=start, stop=stop, **kwargs)\n values = self.selection.select_coords()\n\n # delete the rows in reverse order\n l = Series(values).sort_values()\n ln = len(l)\n\n if ln:\n\n # construct groups of consecutive rows\n diff = l.diff()\n groups = list(diff[diff > 1].index)\n\n # 1 group\n if not len(groups):\n groups = [0]\n\n # final element\n if groups[-1] != ln:\n groups.append(ln)\n\n # initial element\n if groups[0] != 0:\n groups.insert(0, 0)\n\n # we must remove in reverse order!\n pg = groups.pop()\n for g in reversed(groups):\n rows = l.take(lrange(g, pg))\n table.remove_rows(start=rows[rows.index[0]\n ], stop=rows[rows.index[-1]] + 1)\n pg = g\n\n self.table.flush()\n\n # return the number of rows removed\n return ln\n\n\nclass AppendableFrameTable(AppendableTable):\n\n \"\"\" suppor the new appendable table formats \"\"\"\n pandas_kind = u('frame_table')\n table_type = u('appendable_frame')\n ndim = 2\n obj_type = DataFrame\n\n @property\n def is_transposed(self):\n return self.index_axes[0].axis == 1\n\n def get_object(self, obj):\n \"\"\" these are written transposed \"\"\"\n if self.is_transposed:\n obj = obj.T\n return obj\n\n def read(self, where=None, columns=None, **kwargs):\n\n if not self.read_axes(where=where, **kwargs):\n return None\n\n info = (self.info.get(self.non_index_axes[0][0], dict())\n if len(self.non_index_axes) else dict())\n index = self.index_axes[0].values\n frames = []\n for a in self.values_axes:\n\n # we could have a multi-index constructor here\n # _ensure_index doesn't recognized our list-of-tuples here\n if info.get('type') == 'MultiIndex':\n cols = MultiIndex.from_tuples(a.values)\n else:\n cols = Index(a.values)\n names = info.get('names')\n if names is not None:\n cols.set_names(names, inplace=True)\n\n if self.is_transposed:\n values = a.cvalues\n index_ = cols\n cols_ = Index(index, name=getattr(index, 'name', None))\n else:\n values = a.cvalues.T\n index_ = Index(index, name=getattr(index, 'name', None))\n cols_ = cols\n\n # if we have a DataIndexableCol, its shape will only be 1 dim\n if values.ndim == 1 and isinstance(values, np.ndarray):\n values = values.reshape((1, values.shape[0]))\n\n block = make_block(values, placement=np.arange(len(cols_)))\n mgr = BlockManager([block], [cols_, index_])\n frames.append(DataFrame(mgr))\n\n if len(frames) == 1:\n df = frames[0]\n else:\n df = concat(frames, axis=1)\n\n # apply the selection filters & axis orderings\n df = self.process_axes(df, columns=columns)\n\n return df\n\n\nclass AppendableSeriesTable(AppendableFrameTable):\n \"\"\" support the new appendable table formats \"\"\"\n pandas_kind = u('series_table')\n table_type = u('appendable_series')\n ndim = 2\n obj_type = Series\n storage_obj_type = DataFrame\n\n @property\n def is_transposed(self):\n return False\n\n def get_object(self, obj):\n return obj\n\n def write(self, obj, data_columns=None, **kwargs):\n \"\"\" we are going to write this as a frame table \"\"\"\n if not isinstance(obj, DataFrame):\n name = obj.name or 'values'\n obj = DataFrame({name: obj}, index=obj.index)\n obj.columns = [name]\n return super(AppendableSeriesTable, self).write(\n obj=obj, data_columns=obj.columns, **kwargs)\n\n def read(self, columns=None, **kwargs):\n\n is_multi_index = self.is_multi_index\n if columns is not None and is_multi_index:\n for n in self.levels:\n if n not in columns:\n columns.insert(0, n)\n s = super(AppendableSeriesTable, self).read(columns=columns, **kwargs)\n if is_multi_index:\n s.set_index(self.levels, inplace=True)\n\n s = s.iloc[:, 0]\n\n # remove the default name\n if s.name == 'values':\n s.name = None\n return s\n\n\nclass AppendableMultiSeriesTable(AppendableSeriesTable):\n \"\"\" support the new appendable table formats \"\"\"\n pandas_kind = u('series_table')\n table_type = u('appendable_multiseries')\n\n def write(self, obj, **kwargs):\n \"\"\" we are going to write this as a frame table \"\"\"\n name = obj.name or 'values'\n obj, self.levels = self.validate_multiindex(obj)\n cols = list(self.levels)\n cols.append(name)\n obj.columns = cols\n return super(AppendableMultiSeriesTable, self).write(obj=obj, **kwargs)\n\n\nclass GenericTable(AppendableFrameTable):\n \"\"\" a table that read/writes the generic pytables table format \"\"\"\n pandas_kind = u('frame_table')\n table_type = u('generic_table')\n ndim = 2\n obj_type = DataFrame\n\n @property\n def pandas_type(self):\n return self.pandas_kind\n\n @property\n def storable(self):\n return getattr(self.group, 'table', None) or self.group\n\n def get_attrs(self):\n \"\"\" retrieve our attributes \"\"\"\n self.non_index_axes = []\n self.nan_rep = None\n self.levels = []\n\n self.index_axes = [a.infer(self)\n for a in self.indexables if a.is_an_indexable]\n self.values_axes = [a.infer(self)\n for a in self.indexables if not a.is_an_indexable]\n self.data_columns = [a.name for a in self.values_axes]\n\n @property\n def indexables(self):\n \"\"\" create the indexables from the table description \"\"\"\n if self._indexables is None:\n\n d = self.description\n\n # the index columns is just a simple index\n self._indexables = [GenericIndexCol(name='index', axis=0)]\n\n for i, n in enumerate(d._v_names):\n\n dc = GenericDataIndexableCol(\n name=n, pos=i, values=[n], version=self.version)\n self._indexables.append(dc)\n\n return self._indexables\n\n def write(self, **kwargs):\n raise NotImplementedError(\"cannot write on an generic table\")\n\n\nclass AppendableMultiFrameTable(AppendableFrameTable):\n\n \"\"\" a frame with a multi-index \"\"\"\n table_type = u('appendable_multiframe')\n obj_type = DataFrame\n ndim = 2\n _re_levels = re.compile(\"^level_\\d+$\")\n\n @property\n def table_type_short(self):\n return u('appendable_multi')\n\n def write(self, obj, data_columns=None, **kwargs):\n if data_columns is None:\n data_columns = []\n elif data_columns is True:\n data_columns = obj.columns[:]\n obj, self.levels = self.validate_multiindex(obj)\n for n in self.levels:\n if n not in data_columns:\n data_columns.insert(0, n)\n return super(AppendableMultiFrameTable, self).write(\n obj=obj, data_columns=data_columns, **kwargs)\n\n def read(self, **kwargs):\n\n df = super(AppendableMultiFrameTable, self).read(**kwargs)\n df = df.set_index(self.levels)\n\n # remove names for 'level_%d'\n df.index = df.index.set_names([\n None if self._re_levels.search(l) else l for l in df.index.names\n ])\n\n return df\n\n\nclass AppendablePanelTable(AppendableTable):\n\n \"\"\" suppor the new appendable table formats \"\"\"\n table_type = u('appendable_panel')\n ndim = 3\n obj_type = Panel\n\n def get_object(self, obj):\n \"\"\" these are written transposed \"\"\"\n if self.is_transposed:\n obj = obj.transpose(*self.data_orientation)\n return obj\n\n @property\n def is_transposed(self):\n return self.data_orientation != tuple(range(self.ndim))\n\n\nclass AppendableNDimTable(AppendablePanelTable):\n\n \"\"\" suppor the new appendable table formats \"\"\"\n table_type = u('appendable_ndim')\n ndim = 4\n obj_type = Panel4D\n\n\ndef _reindex_axis(obj, axis, labels, other=None):\n ax = obj._get_axis(axis)\n labels = _ensure_index(labels)\n\n # try not to reindex even if other is provided\n # if it equals our current index\n if other is not None:\n other = _ensure_index(other)\n if (other is None or labels.equals(other)) and labels.equals(ax):\n return obj\n\n labels = _ensure_index(labels.unique())\n if other is not None:\n labels = labels & _ensure_index(other.unique())\n if not labels.equals(ax):\n slicer = [slice(None, None)] * obj.ndim\n slicer[axis] = labels\n obj = obj.loc[tuple(slicer)]\n return obj\n\n\ndef _get_info(info, name):\n \"\"\" get/create the info for this name \"\"\"\n try:\n idx = info[name]\n except:\n idx = info[name] = dict()\n return idx\n\n# tz to/from coercion\n\n\ndef _get_tz(tz):\n \"\"\" for a tz-aware type, return an encoded zone \"\"\"\n zone = tslib.get_timezone(tz)\n if zone is None:\n zone = tslib.tot_seconds(tz.utcoffset())\n return zone\n\n\ndef _set_tz(values, tz, preserve_UTC=False, coerce=False):\n \"\"\"\n coerce the values to a DatetimeIndex if tz is set\n preserve the input shape if possible\n\n Parameters\n ----------\n values : ndarray\n tz : string/pickled tz object\n preserve_UTC : boolean,\n preserve the UTC of the result\n coerce : if we do not have a passed timezone, coerce to M8[ns] ndarray\n \"\"\"\n if tz is not None:\n name = getattr(values, 'name', None)\n values = values.ravel()\n tz = tslib.get_timezone(_ensure_decoded(tz))\n values = DatetimeIndex(values, name=name)\n if values.tz is None:\n values = values.tz_localize('UTC').tz_convert(tz)\n if preserve_UTC:\n if tz == 'UTC':\n values = list(values)\n elif coerce:\n values = np.asarray(values, dtype='M8[ns]')\n\n return values\n\n\ndef _convert_index(index, encoding=None, format_type=None):\n index_name = getattr(index, 'name', None)\n\n if isinstance(index, DatetimeIndex):\n converted = index.asi8\n return IndexCol(converted, 'datetime64', _tables().Int64Col(),\n freq=getattr(index, 'freq', None),\n tz=getattr(index, 'tz', None),\n index_name=index_name)\n elif isinstance(index, TimedeltaIndex):\n converted = index.asi8\n return IndexCol(converted, 'timedelta64', _tables().Int64Col(),\n freq=getattr(index, 'freq', None),\n index_name=index_name)\n elif isinstance(index, (Int64Index, PeriodIndex)):\n atom = _tables().Int64Col()\n # avoid to store ndarray of Period objects\n return IndexCol(index._values, 'integer', atom,\n freq=getattr(index, 'freq', None),\n index_name=index_name)\n\n if isinstance(index, MultiIndex):\n raise TypeError('MultiIndex not supported here!')\n\n inferred_type = lib.infer_dtype(index)\n\n values = np.asarray(index)\n\n if inferred_type == 'datetime64':\n converted = values.view('i8')\n return IndexCol(converted, 'datetime64', _tables().Int64Col(),\n freq=getattr(index, 'freq', None),\n tz=getattr(index, 'tz', None),\n index_name=index_name)\n elif inferred_type == 'timedelta64':\n converted = values.view('i8')\n return IndexCol(converted, 'timedelta64', _tables().Int64Col(),\n freq=getattr(index, 'freq', None),\n index_name=index_name)\n elif inferred_type == 'datetime':\n converted = np.asarray([(time.mktime(v.timetuple()) +\n v.microsecond / 1E6) for v in values],\n dtype=np.float64)\n return IndexCol(converted, 'datetime', _tables().Time64Col(),\n index_name=index_name)\n elif inferred_type == 'date':\n converted = np.asarray([v.toordinal() for v in values],\n dtype=np.int32)\n return IndexCol(converted, 'date', _tables().Time32Col(),\n index_name=index_name)\n elif inferred_type == 'string':\n # atom = _tables().ObjectAtom()\n # return np.asarray(values, dtype='O'), 'object', atom\n\n converted = _convert_string_array(values, encoding)\n itemsize = converted.dtype.itemsize\n return IndexCol(\n converted, 'string', _tables().StringCol(itemsize),\n itemsize=itemsize, index_name=index_name\n )\n elif inferred_type == 'unicode':\n if format_type == 'fixed':\n atom = _tables().ObjectAtom()\n return IndexCol(np.asarray(values, dtype='O'), 'object', atom,\n index_name=index_name)\n raise TypeError(\n \"[unicode] is not supported as a in index type for [{0}] formats\"\n .format(format_type)\n )\n\n elif inferred_type == 'integer':\n # take a guess for now, hope the values fit\n atom = _tables().Int64Col()\n return IndexCol(np.asarray(values, dtype=np.int64), 'integer', atom,\n index_name=index_name)\n elif inferred_type == 'floating':\n atom = _tables().Float64Col()\n return IndexCol(np.asarray(values, dtype=np.float64), 'float', atom,\n index_name=index_name)\n else: # pragma: no cover\n atom = _tables().ObjectAtom()\n return IndexCol(np.asarray(values, dtype='O'), 'object', atom,\n index_name=index_name)\n\n\ndef _unconvert_index(data, kind, encoding=None):\n kind = _ensure_decoded(kind)\n if kind == u('datetime64'):\n index = DatetimeIndex(data)\n elif kind == u('timedelta64'):\n index = TimedeltaIndex(data)\n elif kind == u('datetime'):\n index = np.asarray([datetime.fromtimestamp(v) for v in data],\n dtype=object)\n elif kind == u('date'):\n try:\n index = np.asarray(\n [date.fromordinal(v) for v in data], dtype=object)\n except (ValueError):\n index = np.asarray(\n [date.fromtimestamp(v) for v in data], dtype=object)\n elif kind in (u('integer'), u('float')):\n index = np.asarray(data)\n elif kind in (u('string')):\n index = _unconvert_string_array(data, nan_rep=None, encoding=encoding)\n elif kind == u('object'):\n index = np.asarray(data[0])\n else: # pragma: no cover\n raise ValueError('unrecognized index type %s' % kind)\n return index\n\n\ndef _unconvert_index_legacy(data, kind, legacy=False, encoding=None):\n kind = _ensure_decoded(kind)\n if kind == u('datetime'):\n index = lib.time64_to_datetime(data)\n elif kind in (u('integer')):\n index = np.asarray(data, dtype=object)\n elif kind in (u('string')):\n index = _unconvert_string_array(data, nan_rep=None, encoding=encoding)\n else: # pragma: no cover\n raise ValueError('unrecognized index type %s' % kind)\n return index\n\n\ndef _convert_string_array(data, encoding, itemsize=None):\n \"\"\"\n we take a string-like that is object dtype and coerce to a fixed size\n string type\n\n Parameters\n ----------\n data : a numpy array of object dtype\n encoding : None or string-encoding\n itemsize : integer, optional, defaults to the max length of the strings\n\n Returns\n -------\n data in a fixed-length string dtype, encoded to bytes if needed\n \"\"\"\n\n # encode if needed\n if encoding is not None and len(data):\n data = Series(data.ravel()).str.encode(\n encoding).values.reshape(data.shape)\n\n # create the sized dtype\n if itemsize is None:\n itemsize = lib.max_len_string_array(_ensure_object(data.ravel()))\n\n data = np.asarray(data, dtype=\"S%d\" % itemsize)\n return data\n\n\ndef _unconvert_string_array(data, nan_rep=None, encoding=None):\n \"\"\"\n inverse of _convert_string_array\n\n Parameters\n ----------\n data : fixed length string dtyped array\n nan_rep : the storage repr of NaN, optional\n encoding : the encoding of the data, optional\n\n Returns\n -------\n an object array of the decoded data\n\n \"\"\"\n shape = data.shape\n data = np.asarray(data.ravel(), dtype=object)\n\n # guard against a None encoding in PY3 (because of a legacy\n # where the passed encoding is actually None)\n encoding = _ensure_encoding(encoding)\n if encoding is not None and len(data):\n\n itemsize = lib.max_len_string_array(_ensure_object(data))\n if compat.PY3:\n dtype = \"U{0}\".format(itemsize)\n else:\n dtype = \"S{0}\".format(itemsize)\n\n if isinstance(data[0], compat.binary_type):\n data = Series(data).str.decode(encoding).values\n else:\n data = data.astype(dtype, copy=False).astype(object, copy=False)\n\n if nan_rep is None:\n nan_rep = 'nan'\n\n data = lib.string_array_replace_from_nan_rep(data, nan_rep)\n return data.reshape(shape)\n\n\ndef _maybe_convert(values, val_kind, encoding):\n if _need_convert(val_kind):\n conv = _get_converter(val_kind, encoding)\n # conv = np.frompyfunc(conv, 1, 1)\n values = conv(values)\n return values\n\n\ndef _get_converter(kind, encoding):\n kind = _ensure_decoded(kind)\n if kind == 'datetime64':\n return lambda x: np.asarray(x, dtype='M8[ns]')\n elif kind == 'datetime':\n return lib.convert_timestamps\n elif kind == 'string':\n return lambda x: _unconvert_string_array(x, encoding=encoding)\n else: # pragma: no cover\n raise ValueError('invalid kind %s' % kind)\n\n\ndef _need_convert(kind):\n kind = _ensure_decoded(kind)\n if kind in (u('datetime'), u('datetime64'), u('string')):\n return True\n return False\n\n\nclass Selection(object):\n\n \"\"\"\n Carries out a selection operation on a tables.Table object.\n\n Parameters\n ----------\n table : a Table object\n where : list of Terms (or convertable to)\n start, stop: indicies to start and/or stop selection\n\n \"\"\"\n\n def __init__(self, table, where=None, start=None, stop=None, **kwargs):\n self.table = table\n self.where = where\n self.start = start\n self.stop = stop\n self.condition = None\n self.filter = None\n self.terms = None\n self.coordinates = None\n\n if is_list_like(where):\n\n # see if we have a passed coordinate like\n try:\n inferred = lib.infer_dtype(where)\n if inferred == 'integer' or inferred == 'boolean':\n where = np.asarray(where)\n if where.dtype == np.bool_:\n start, stop = self.start, self.stop\n if start is None:\n start = 0\n if stop is None:\n stop = self.table.nrows\n self.coordinates = np.arange(start, stop)[where]\n elif issubclass(where.dtype.type, np.integer):\n if ((self.start is not None and\n (where < self.start).any()) or\n (self.stop is not None and\n (where >= self.stop).any())):\n raise ValueError(\n \"where must have index locations >= start and \"\n \"< stop\"\n )\n self.coordinates = where\n\n except:\n pass\n\n if self.coordinates is None:\n\n self.terms = self.generate(where)\n\n # create the numexpr & the filter\n if self.terms is not None:\n self.condition, self.filter = self.terms.evaluate()\n\n def generate(self, where):\n \"\"\" where can be a : dict,list,tuple,string \"\"\"\n if where is None:\n return None\n\n q = self.table.queryables()\n try:\n return Expr(where, queryables=q, encoding=self.table.encoding)\n except NameError:\n # raise a nice message, suggesting that the user should use\n # data_columns\n raise ValueError(\n \"The passed where expression: {0}\\n\"\n \" contains an invalid variable reference\\n\"\n \" all of the variable refrences must be a \"\n \"reference to\\n\"\n \" an axis (e.g. 'index' or 'columns'), or a \"\n \"data_column\\n\"\n \" The currently defined references are: {1}\\n\"\n .format(where, ','.join(q.keys()))\n )\n\n def select(self):\n \"\"\"\n generate the selection\n \"\"\"\n if self.condition is not None:\n return self.table.table.read_where(self.condition.format(),\n start=self.start,\n stop=self.stop)\n elif self.coordinates is not None:\n return self.table.table.read_coordinates(self.coordinates)\n return self.table.table.read(start=self.start, stop=self.stop)\n\n def select_coords(self):\n \"\"\"\n generate the selection\n \"\"\"\n start, stop = self.start, self.stop\n nrows = self.table.nrows\n if start is None:\n start = 0\n elif start < 0:\n start += nrows\n if self.stop is None:\n stop = nrows\n elif stop < 0:\n stop += nrows\n\n if self.condition is not None:\n return self.table.table.get_where_list(self.condition.format(),\n start=start, stop=stop,\n sort=True)\n elif self.coordinates is not None:\n return self.coordinates\n\n return np.arange(start, stop)\n\n# utilities ###\n\n\ndef timeit(key, df, fn=None, remove=True, **kwargs):\n if fn is None:\n fn = 'timeit.h5'\n store = HDFStore(fn, mode='w')\n store.append(key, df, **kwargs)\n store.close()\n\n if remove:\n os.remove(fn)\n"
] | [
[
"pandas.types.common.is_categorical_dtype",
"pandas.types.common._ensure_int64",
"pandas.Series",
"pandas.compat.iteritems",
"pandas.formats.printing.adjoin",
"pandas.core.algorithms.unique",
"pandas.compat.u_safe",
"numpy.asarray",
"pandas.tslib.get_timezone",
"pandas.io.common._stringify_path",
"pandas.core.categorical.Categorical.from_array",
"pandas.computation.pytables.Expr",
"pandas.core.common._asarray_tuplesafe",
"pandas.core.config.config_prefix",
"pandas.types.common._ensure_object",
"pandas.core.config.get_option",
"pandas.sparse.array.BlockIndex",
"pandas.types.common.is_datetime64tz_dtype",
"pandas.types.common.is_list_like",
"pandas.lib.time64_to_datetime",
"pandas.tseries.api.DatetimeIndex",
"pandas.lib.infer_dtype",
"pandas.tseries.api.PeriodIndex._simple_new",
"pandas.computation.pytables.maybe_expression",
"pandas.lib.string_array_replace_from_nan_rep",
"pandas.isnull",
"pandas.types.missing.array_equivalent",
"pandas.core.index._ensure_index",
"numpy.tile",
"pandas.tseries.api.DatetimeIndex._simple_new",
"pandas.types.common.is_timedelta64_dtype",
"pandas.tools.merge.concat",
"pandas.compat.filter",
"pandas.types.common.is_datetime64_dtype",
"numpy.repeat",
"numpy.arange",
"pandas.MultiIndex.from_tuples",
"pandas.formats.printing.pprint_thing",
"numpy.prod",
"pandas.types.common._ensure_platform_int",
"pandas.Index",
"pandas.core.config.is_one_of_factory",
"pandas.lib.fast_unique",
"numpy.array",
"pandas.core.algorithms.match",
"pandas.core.config.register_option",
"numpy.empty",
"pandas.MultiIndex.from_arrays",
"pandas.sparse.array.IntIndex",
"pandas.core.internals._factor_indexer",
"pandas.DataFrame",
"pandas.compat.lrange",
"pandas.sparse.api.SparseDataFrame",
"pandas.compat.range",
"pandas.core.internals._block_shape",
"pandas.core.internals.BlockManager",
"pandas.tseries.tdi.TimedeltaIndex",
"pandas.MultiIndex"
]
] |
BioinfoTongLI/deepBlink | [
"aa819b71f380507f9fcfa0664ab0f5a8eca4b209"
] | [
"tests/test_augment.py"
] | [
"\"\"\"Unittests for the deepblink.augment module.\"\"\"\n# pylint: disable=missing-function-docstring\n\nfrom hypothesis import given\nfrom hypothesis.extra.numpy import arrays\nimport numpy as np\nimport pytest\n\nfrom deepblink.augment import augment_batch_baseline\nfrom deepblink.augment import flip\nfrom deepblink.augment import gaussian_noise\nfrom deepblink.augment import illuminate\nfrom deepblink.augment import rotate\nfrom deepblink.augment import translate\n\n\n@given(arrays(np.float32, (3, 5, 5)))\ndef test_augment_batch_baseline(arr):\n imgs, masks = augment_batch_baseline(arr, arr)\n assert imgs.shape == masks.shape == arr.shape\n\n with pytest.warns(UserWarning):\n misshaped_arr = np.zeros((10, 5, 5))\n augment_batch_baseline(misshaped_arr, misshaped_arr)\n\n\n@given(arrays(np.int8, (5, 5)))\ndef test_flip(matrix):\n img, mask = flip(matrix, matrix)\n assert np.sum(np.sum(img)) == np.sum(np.sum(matrix))\n assert mask.shape == matrix.shape\n\n\n@given(arrays(np.int8, (5, 5)))\ndef test_illuminate(matrix):\n img, mask = illuminate(matrix, matrix)\n assert img.shape == matrix.shape\n assert mask.shape == matrix.shape\n\n\n@given(arrays(np.int8, (5, 5)))\ndef test_gaussian_noise(matrix):\n img, mask = gaussian_noise(matrix, matrix)\n assert img.shape == matrix.shape\n assert mask.shape == matrix.shape\n\n\n@given(arrays(np.int8, (5, 5)))\ndef test_rotate(matrix):\n img, mask = rotate(matrix, matrix)\n assert np.sum(np.sum(img)) == np.sum(np.sum(matrix))\n assert mask.shape == matrix.shape\n\n\n@given(arrays(np.int8, (5, 5)))\ndef test_translate(matrix):\n img, mask = translate(matrix, matrix)\n assert np.sum(np.sum(img)) == np.sum(np.sum(matrix))\n assert mask.shape == matrix.shape\n"
] | [
[
"numpy.sum",
"numpy.zeros"
]
] |
ethz-asl/data-driven-dynamics | [
"decf4bec19c9fc4a1789f5eb4d6e6003774c75d6"
] | [
"Tools/parametric_model/src/models/multirotor_model.py"
] | [
"\"\"\"\n *\n * Copyright (c) 2021 Manuel Yves Galliker\n * 2021 Autonomous Systems Lab ETH Zurich\n * All rights reserved.\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions\n * are met:\n *\n * 1. Redistributions of source code must retain the above copyright\n * notice, this list of conditions and the following disclaimer.\n * 2. Redistributions in binary form must reproduce the above copyright\n * notice, this list of conditions and the following disclaimer in\n * the documentation and/or other materials provided with the\n * distribution.\n * 3. Neither the name Data Driven Dynamics nor the names of its contributors may be\n * used to endorse or promote products derived from this software\n * without specific prior written permission.\n *\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n * \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS\n * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE\n * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,\n * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,\n * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS\n * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED\n * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN\n * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n * POSSIBILITY OF SUCH DAMAGE.\n *\n\nThe model in this file estimates a simple force motor model for a multirotor.\n\nModel Parameters:\nu : normalized actuator output scaled between 0 and 1\nangular_vel_const : angular velocity constant\nangular_vel_offset : angular velocity offset\nmot_const : motor constant\nm : mass of UAV\naccel_const : combined acceleration constant k_2/m\n\nModel:\nangular_vel [rad/s] = angular_vel_const*u + angular_vel_offset\nF_thrust = - mot_const * angular_vel^2\nF_thrust_tot = - mot_const * \\\n (angular_vel_1^2 + angular_vel_2^2 + angular_vel_3^2 + angular_vel_4^2)\n\nNote that the forces are calculated in the NED body frame and are therefore negative.\n\"\"\"\n\n__author__ = \"Manuel Yves Galliker\"\n__maintainer__ = \"Manuel Yves Galliker\"\n__license__ = \"BSD 3\"\n\nfrom sklearn.linear_model import LinearRegression\nfrom .dynamics_model import DynamicsModel\nfrom .rotor_models import RotorModel\nfrom .aerodynamic_models import FuselageDragModel\nfrom .model_config import ModelConfig\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport math\n\n\nclass MultiRotorModel(DynamicsModel):\n def __init__(self, config_file, model_name=\"multirotor_model\"):\n self.config = ModelConfig(config_file)\n super(MultiRotorModel, self).__init__(\n config_dict=self.config.dynamics_model_config)\n self.mass = self.config.model_config[\"mass\"]\n self.moment_of_inertia = np.diag([self.config.model_config[\"moment_of_inertia\"][\"Ixx\"],\n self.config.model_config[\"moment_of_inertia\"][\"Iyy\"], self.config.model_config[\"moment_of_inertia\"][\"Izz\"]])\n\n self.rotor_config_dict = self.config.model_config[\"actuators\"][\"rotors\"]\n\n self.model_name = model_name\n\n def prepare_force_regression_matrices(self):\n\n accel_mat = self.data_df[[\n \"acc_b_x\", \"acc_b_y\", \"acc_b_z\"]].to_numpy()\n force_mat = accel_mat * self.mass\n #self.y_forces = (force_mat).flatten()\n self.data_df[[\"measured_force_x\", \"measured_force_y\",\n \"measured_force_z\"]] = force_mat\n\n airspeed_mat = self.data_df[[\"V_air_body_x\",\n \"V_air_body_y\", \"V_air_body_z\"]].to_numpy()\n aero_model = FuselageDragModel()\n X_aero, coef_dict_aero, col_names_aero = aero_model.compute_fuselage_features(\n airspeed_mat)\n self.data_df[col_names_aero] = X_aero\n self.coef_dict.update(coef_dict_aero)\n self.y_dict.update({\"lin\":{\"x\":\"measured_force_x\",\"y\":\"measured_force_y\",\"z\":\"measured_force_z\"}})\n\n def prepare_moment_regression_matrices(self):\n moment_mat = np.matmul(self.data_df[[\n \"ang_acc_b_x\", \"ang_acc_b_y\", \"ang_acc_b_z\"]].to_numpy(), self.moment_of_inertia)\n #self.y_moments = (moment_mat).flatten()\n self.data_df[[\"measured_moment_x\", \"measured_moment_y\",\n \"measured_moment_z\"]] = moment_mat\n \n self.y_dict.update({\"rot\":{\"x\":\"measured_moment_x\",\"y\":\"measured_moment_y\",\"z\":\"measured_moment_z\"}})\n"
] | [
[
"numpy.diag"
]
] |
rhoposit/tacotron2 | [
"2dad8df5ea50459789e16d9effb83fc2a25e42ed"
] | [
"tacotron/models.py"
] | [
"# ==============================================================================\n# Copyright (c) 2018, Yamagishi Laboratory, National Institute of Informatics\n# Author: Yusuke Yasuda ([email protected])\n# All rights reserved.\n# ==============================================================================\n\"\"\" Models. \"\"\"\n\nimport tensorflow as tf\nfrom tacotron.modules import Embedding\nfrom tacotron.tacotron_v1 import EncoderV1, DecoderV1\nfrom tacotron.hooks import MetricsSaver, PostNetMetricsSaver\nfrom util.audio import Audio\n\n\nclass SingleSpeakerTacotronV1Model(tf.estimator.Estimator):\n\n def __init__(self, params, model_dir=None, config=None, warm_start_from=None):\n def model_fn(features, labels, mode, params):\n is_training = mode == tf.estimator.ModeKeys.TRAIN\n is_validation = mode == tf.estimator.ModeKeys.EVAL\n is_prediction = mode == tf.estimator.ModeKeys.PREDICT\n\n embedding = Embedding(params.num_symbols, embedding_dim=params.embedding_dim)\n\n encoder = EncoderV1(is_training,\n cbhg_out_units=params.cbhg_out_units,\n conv_channels=params.conv_channels,\n max_filter_width=params.max_filter_width,\n projection1_out_channels=params.projection1_out_channels,\n projection2_out_channels=params.projection2_out_channels,\n num_highway=params.num_highway,\n prenet_out_units=params.encoder_prenet_out_units,\n drop_rate=params.encoder_prenet_drop_rate)\n\n decoder = DecoderV1(prenet_out_units=params.decoder_prenet_out_units,\n drop_rate=params.decoder_prenet_drop_rate,\n attention_out_units=params.attention_out_units,\n decoder_out_units=params.decoder_out_units,\n num_codes=params.num_codes,\n outputs_per_step=params.outputs_per_step,\n max_iters=params.max_iters,\n n_feed_frame=params.n_feed_frame)\n\n target = labels.codes if (is_training or is_validation) else None\n\n embedding_output = embedding(features.source)\n encoder_output = encoder(embedding_output)\n codes_output, stop_token, decoder_state = decoder(encoder_output,\n is_training=is_training,\n is_validation=is_validation,\n memory_sequence_length=features.source_length,target=target)\n alignment = tf.transpose(decoder_state[0].alignment_history.stack(), [1, 2, 0])\n\n global_step = tf.train.get_global_step()\n\n if mode is not tf.estimator.ModeKeys.PREDICT:\n codes_loss = self.codes_loss(code_output, labels.codes,\n labels.codes_loss_mask)\n done_loss = self.binary_loss(stop_token, labels.done, labels.binary_loss_mask)\n loss = code_loss + done_loss\n\n if is_training:\n lr = self.learning_rate_decay(\n params.initial_learning_rate, global_step) if params.decay_learning_rate else tf.convert_to_tensor(\n params.initial_learning_rate)\n optimizer = tf.train.AdamOptimizer(learning_rate=lr, beta1=params.adam_beta1,\n beta2=params.adam_beta2, epsilon=params.adam_eps)\n\n gradients, variables = zip(*optimizer.compute_gradients(loss))\n clipped_gradients, _ = tf.clip_by_global_norm(gradients, 1.0)\n self.add_training_stats(loss, codes_loss, done_loss, lr)\n # Add dependency on UPDATE_OPS; otherwise batchnorm won't work correctly. See:\n # https://github.com/tensorflow/tensorflow/issues/1122\n with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):\n train_op = optimizer.apply_gradients(zip(clipped_gradients, variables), global_step=global_step)\n summary_writer = tf.summary.FileWriter(model_dir)\n alignment_saver = MetricsSaver([alignment],\n global_step,\n codes_output,\n labels.codes,\n labels.target_length,\n features.id,\n features.text,\n params.alignment_save_steps,\n mode, summary_writer,\n params.save_training_time_metrics,\n params.keep_eval_results_max_epoch)\n hooks = [alignment_saver]\n if params.record_profile:\n profileHook = tf.train.ProfilerHook(save_steps=params.profile_steps, output_dir=model_dir,\n show_dataflow=True, show_memory=True)\n hooks.append(profileHook)\n return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op,\n training_hooks=hooks)\n\n if is_validation:\n # validation with teacher forcing\n codes_output_with_teacher, stop_token_with_teacher, _ = decoder(encoder_output,\n is_training=is_training,\n is_validation=is_validation,\n memory_sequence_length=features.source_length,\n target=target,\n teacher_forcing=True)\n codes_loss_with_teacher = self.spec_loss(codes_output_with_teacher, labels.codes, labels.codes_loss_mask)\n done_loss_with_teacher = self.binary_loss(stop_token_with_teacher, labels.done, labels.binary_loss_mask)\n loss_with_teacher = codes_loss_with_teacher + done_loss_with_teacher\n eval_metric_ops = self.get_validation_metrics(codes_loss, done_loss, loss_with_teacher, codes_loss_with_teacher, done_loss_with_teacher)\n\n summary_writer = tf.summary.FileWriter(model_dir)\n alignment_saver = MetricsSaver([alignment],\n global_step,\n codes_output,\n labels.codes,\n labels.target_length,\n features.id,\n features.text,\n 1,\n mode, summary_writer,\n params.save_training_time_metrics,\n params.keep_eval_results_max_epoch)\n return tf.estimator.EstimatorSpec(mode, loss=loss,\n evaluation_hooks=[alignment_saver],\n eval_metric_ops=eval_metric_ops)\n\n if is_prediction:\n return tf.estimator.EstimatorSpec(mode, predictions={\n \"id\": features.id,\n \"codes\": codes_output,\n \"alignment\": alignment,\n \"source\": features.source,\n \"text\": features.text,\n })\n\n super(SingleSpeakerTacotronV1Model, self).__init__(\n model_fn=model_fn, model_dir=model_dir, config=config,\n params=params, warm_start_from=warm_start_from)\n\n @staticmethod\n def codes_loss(y_hat, y, mask, n_priority_freq=None, priority_w=0):\n l1_loss = tf.abs(y_hat - y)\n\n # Priority L1 loss\n if n_priority_freq is not None and priority_w > 0:\n priority_loss = tf.abs(y_hat[:, :, :n_priority_freq] - y[:, :, :n_priority_freq])\n l1_loss = (1 - priority_w) * l1_loss + priority_w * priority_loss\n\n return tf.losses.compute_weighted_loss(l1_loss, weights=tf.expand_dims(mask, axis=2))\n\n @staticmethod\n def binary_loss(done_hat, done, mask):\n return tf.losses.sigmoid_cross_entropy(done, tf.squeeze(done_hat, axis=-1), weights=mask)\n\n @staticmethod\n def learning_rate_decay(init_rate, global_step):\n warmup_steps = 4000.0\n step = tf.to_float(global_step + 1)\n return init_rate * warmup_steps ** 0.5 * tf.minimum(step * warmup_steps ** -1.5, step ** -0.5)\n\n @staticmethod\n def add_training_stats(loss, codes_loss, done_loss, learning_rate):\n if loss is not None:\n tf.summary.scalar(\"loss_with_teacher\", loss)\n if codes_loss is not None:\n tf.summary.scalar(\"codes_loss\", codes_loss)\n tf.summary.scalar(\"codes_loss_with_teacher\", codes_loss)\n if done_loss is not None:\n tf.summary.scalar(\"done_loss\", done_loss)\n tf.summary.scalar(\"done_loss_with_teacher\", done_loss)\n tf.summary.scalar(\"learning_rate\", learning_rate)\n return tf.summary.merge_all()\n\n @staticmethod\n def get_validation_metrics(codes_loss, done_loss, loss_with_teacher, codes_loss_with_teacher, done_loss_with_teacher):\n metrics = {}\n if codes_loss is not None:\n metrics[\"codes_loss\"] = tf.metrics.mean(codes_loss)\n if done_loss is not None:\n metrics[\"done_loss\"] = tf.metrics.mean(done_loss)\n if loss_with_teacher is not None:\n metrics[\"loss_with_teacher\"] = tf.metrics.mean(loss_with_teacher)\n if codes_loss_with_teacher is not None:\n metrics[\"codes_loss_with_teacher\"] = tf.metrics.mean(codes_loss_with_teacher)\n if done_loss_with_teacher is not None:\n metrics[\"done_loss_with_teacher\"] = tf.metrics.mean(done_loss_with_teacher)\n return metrics\n\n"
] | [
[
"tensorflow.metrics.mean",
"tensorflow.summary.scalar",
"tensorflow.minimum",
"tensorflow.summary.merge_all",
"tensorflow.clip_by_global_norm",
"tensorflow.get_collection",
"tensorflow.to_float",
"tensorflow.train.AdamOptimizer",
"tensorflow.expand_dims",
"tensorflow.estimator.EstimatorSpec",
"tensorflow.squeeze",
"tensorflow.abs",
"tensorflow.convert_to_tensor",
"tensorflow.train.get_global_step",
"tensorflow.summary.FileWriter",
"tensorflow.train.ProfilerHook"
]
] |
ChristopherChudzicki/mitx-grading-library | [
"1d9a7107f26b5e0ebe24deb552cf943779693e18"
] | [
"mitxgraders/helpers/calc/mathfuncs.py"
] | [
"\"\"\"\nmathfuncs.py\n\nContains mathematical functions for use in interpreting formulas.\n\nContains some helper functions used in grading formulae:\n* within_tolerance\n\nDefines:\n* DEFAULT_FUNCTIONS\n* DEFAULT_VARIABLES\n* DEFAULT_SUFFIXES\n* METRIC_SUFFIXES\n\"\"\"\nfrom __future__ import print_function, division, absolute_import, unicode_literals\n\nimport six\nimport numpy as np\nimport scipy.special as special\nfrom mitxgraders.helpers.calc.specify_domain import SpecifyDomain\nfrom mitxgraders.helpers.calc.exceptions import FunctionEvalError\nfrom mitxgraders.helpers.calc.math_array import MathArray\n\n# Normal Trig\ndef sec(arg):\n \"\"\"Secant\"\"\"\n return 1 / np.cos(arg)\n\ndef csc(arg):\n \"\"\"Cosecant\"\"\"\n return 1 / np.sin(arg)\n\ndef cot(arg):\n \"\"\"Cotangent\"\"\"\n return 1 / np.tan(arg)\n\n# Inverse Trig\n# http://en.wikipedia.org/wiki/Inverse_trigonometric_functions#Relationships_among_the_inverse_trigonometric_functions\ndef arcsec(val):\n \"\"\"Inverse secant\"\"\"\n return np.arccos(1. / val)\n\ndef arccsc(val):\n \"\"\"Inverse cosecant\"\"\"\n return np.arcsin(1. / val)\n\ndef arccot(val):\n \"\"\"Inverse cotangent\"\"\"\n if np.real(val) < 0:\n return -np.pi / 2 - np.arctan(val)\n else:\n return np.pi / 2 - np.arctan(val)\n\n# Hyperbolic Trig\ndef sech(arg):\n \"\"\"Hyperbolic secant\"\"\"\n return 1 / np.cosh(arg)\n\ndef csch(arg):\n \"\"\"Hyperbolic cosecant\"\"\"\n return 1 / np.sinh(arg)\n\ndef coth(arg):\n \"\"\"Hyperbolic cotangent\"\"\"\n return 1 / np.tanh(arg)\n\n# And their inverses\ndef arcsech(val):\n \"\"\"Inverse hyperbolic secant\"\"\"\n return np.arccosh(1. / val)\n\ndef arccsch(val):\n \"\"\"Inverse hyperbolic cosecant\"\"\"\n return np.arcsinh(1. / val)\n\ndef arccoth(val):\n \"\"\"Inverse hyperbolic cotangent\"\"\"\n return np.arctanh(1. / val)\n\n# NOTE: tests are in a separate file, NOT doctests.\n# see https://bugs.python.org/issue6835\[email protected]_decorator((1,), (1,))\ndef arctan2(x, y):\n \"\"\"\n Returns the an angle in range (-pi, pi] whose tangent is y/x, taking into\n account the quadrant that (x, y) is in.\n \"\"\"\n if x == 0 and y == 0:\n raise FunctionEvalError(\"arctan2(0, 0) is undefined\")\n\n return np.arctan2(y, x)\n\n# NOTE: tests are in a separate file, NOT doctests.\n# see https://bugs.python.org/issue6835\[email protected]_decorator((1,), (1,))\ndef kronecker(x, y):\n \"\"\"\n Returns 1 if x==y, and 0 otherwise.\n Note that this should really only be used for integer expressions.\n \"\"\"\n if x == y:\n return 1\n return 0\n\ndef content_if_0d_array(obj):\n \"\"\"\n If obj is a 0d numpy array, return its contents. Otherwise, return item.\n\n Usage:\n ======\n\n >>> content_if_0d_array(5) == 5\n True\n >>> content_if_0d_array(np.array(5)) == 5\n True\n >>> content_if_0d_array(np.array([1, 2, 3]))\n array([1, 2, 3])\n \"\"\"\n return obj.item() if isinstance(obj, np.ndarray) and obj.ndim == 0 else obj\n\ndef real(z):\n \"\"\"\n Returns the real part of z.\n >>> real(2+3j)\n 2.0\n\n If the input is a number, a number is returned:\n >>> isinstance(real(2+3j), float)\n True\n\n Can be used with arrays, too: # doctest: +NORMALIZE_WHITESPACE\n >>> real(np.array([1+10j, 2+20j, 3+30j]))\n array([ 1., 2., 3.])\n \"\"\"\n # np.real seems to return 0d arrays for numerical inputs. For example,\n # np.real(2+3j) is a 0d array.\n return content_if_0d_array(np.real(z))\n\ndef imag(z):\n \"\"\"\n Returns the imaginary part of z.\n >>> imag(2+3j)\n 3.0\n\n If the input is a number, a number is returned:\n >>> isinstance(imag(2+3j), float)\n True\n\n Can be used with arrays, too:\n >>> imag(np.array([1+10j, 2+20j, 3+30j]))\n array([ 10., 20., 30.])\n \"\"\"\n return content_if_0d_array(np.imag(z))\n\ndef factorial(z):\n \"\"\"\n Factorial function over complex numbers, using the gamma function.\n Note that math.factorial will return long ints, which are problematic when running\n into overflow issues. The gamma function just returns inf.\n\n Usage\n =====\n\n Non-negative integer input returns floats:\n >>> factorial(4)\n 24.0\n\n Floats and complex numbers use scipy's gamma function:\n >>> import math\n >>> factorial(0.5) # doctest: +ELLIPSIS\n 0.8862269...\n >>> math.sqrt(math.pi)/2 # doctest: +ELLIPSIS\n 0.8862269...\n >>> factorial(3.2+4.1j) # doctest: +ELLIPSIS\n (1.0703272...-0.3028032...j)\n >>> factorial(2.2+4.1j)*(3.2+4.1j) # doctest: +ELLIPSIS\n (1.0703272...-0.3028032...j)\n\n Works with numpy arrays:\n >>> np.array_equal(\n ... factorial(np.array([1, 2, 3, 4])),\n ... np.array([1, 2, 6, 24])\n ... )\n True\n\n Really big numbers return inf:\n >>> factorial(500) == float('inf')\n True\n >>> factorial(500.5) == float('inf')\n True\n\n Throws errors at poles:\n >>> try: # doctest: +ELLIPSIS\n ... factorial(-2)\n ... except FunctionEvalError as error:\n ... print(error)\n Error evaluating factorial() or fact() in input...\n \"\"\"\n\n try:\n is_integer = isinstance(z, int) or z.is_integer()\n except AttributeError:\n is_integer = False\n\n if is_integer and z < 0:\n msg = (\"Error evaluating factorial() or fact() in input. These \"\n \"functions cannot be used at negative integer values.\")\n raise FunctionEvalError(msg)\n\n value = special.gamma(z+1)\n # value is a numpy array; If it's 0d, we can just get its item:\n try:\n return value.item()\n except ValueError:\n return value\n\[email protected]_decorator((3,), (3,))\ndef cross(a, b):\n return MathArray([\n a[1]*b[2] - b[1]*a[2],\n a[2]*b[0] - b[2]*a[0],\n a[0]*b[1] - b[0]*a[1]\n ])\n\n# Variables available by default\nDEFAULT_VARIABLES = {\n 'i': np.complex(0, 1),\n 'j': np.complex(0, 1),\n 'e': np.e,\n 'pi': np.pi\n}\n\n# These act element-wise on numpy arrays\nELEMENTWISE_FUNCTIONS = {\n 'sin': np.sin,\n 'cos': np.cos,\n 'tan': np.tan,\n 'sec': sec,\n 'csc': csc,\n 'cot': cot,\n # We use scimath variants which give complex results when needed. For example:\n # np.sqrt(-4+0j) = 2j\n # np.sqrt(-4) = nan, but\n # np.lib.scimath.sqrt(-4) = 2j\n 'sqrt': np.lib.scimath.sqrt,\n 'log10': np.lib.scimath.log10,\n 'log2': np.lib.scimath.log2,\n 'ln': np.lib.scimath.log,\n 'exp': np.exp,\n 'arccos': np.lib.scimath.arccos,\n 'arcsin': np.lib.scimath.arcsin,\n 'arctan': np.arctan,\n 'arcsec': arcsec,\n 'arccsc': arccsc,\n 'arccot': arccot,\n 'abs': np.abs,\n 'fact': factorial,\n 'factorial': factorial,\n 'sinh': np.sinh,\n 'cosh': np.cosh,\n 'tanh': np.tanh,\n 'sech': sech,\n 'csch': csch,\n 'coth': coth,\n 'arcsinh': np.arcsinh,\n 'arccosh': np.arccosh,\n 'arctanh': np.lib.scimath.arctanh,\n 'arcsech': arcsech,\n 'arccsch': arccsch,\n 'arccoth': arccoth,\n 'floor': np.floor,\n 'ceil': np.ceil\n}\n\ndef has_one_scalar_input(display_name):\n return SpecifyDomain.make_decorator((1,), display_name=display_name)\n\ndef has_at_least_2_scalar_inputs(display_name):\n return SpecifyDomain.make_decorator((1,), display_name=display_name, min_length=2)\n\nSCALAR_FUNCTIONS = {key: has_one_scalar_input(key)(ELEMENTWISE_FUNCTIONS[key])\n for key in ELEMENTWISE_FUNCTIONS}\n\nSCALAR_FUNCTIONS['arctan2'] = arctan2\nSCALAR_FUNCTIONS['kronecker'] = kronecker\n\nMULTI_SCALAR_FUNCTIONS = {\n 'min': has_at_least_2_scalar_inputs('min')(min),\n 'max': has_at_least_2_scalar_inputs('max')(max)\n}\n\nARRAY_FUNCTIONS = {\n 're': real,\n 'im': imag,\n 'conj': np.conj\n}\n\ndef has_one_square_input(display_name):\n return SpecifyDomain.make_decorator('square', display_name=display_name)\n\ndef array_abs(obj):\n \"\"\"\n Takes absolute value of numbers or vectors and suggests norm(...) instead\n for matrix/tensors.\n\n NOTE: The decision to limit abs(...) to scalars and vectors was motivated\n by pedagogy not software.\n \"\"\"\n if isinstance(obj, MathArray) and obj.ndim > 1:\n msg = (\"The abs(...) function expects a scalar or vector. To take the \"\n \"norm of a {}, try norm(...) instead.\".format(\n MathArray.get_shape_name(obj.ndim)))\n raise FunctionEvalError(msg)\n return np.linalg.norm(obj)\n\nARRAY_ONLY_FUNCTIONS = {\n 'norm': np.linalg.norm,\n 'abs': array_abs,\n 'trans': np.transpose,\n 'det': has_one_square_input('det')(np.linalg.det),\n 'trace': has_one_square_input('trace')(np.trace),\n 'ctrans': lambda x: np.conj(np.transpose(x)),\n 'adj': lambda x: np.conj(np.transpose(x)),\n 'cross': cross\n}\n\ndef merge_dicts(*source_dicts):\n \"\"\"Create a new dictionary and merge sources into it.\"\"\"\n target = {}\n for source in source_dicts:\n target.update(source)\n return target\n\nDEFAULT_FUNCTIONS = merge_dicts(SCALAR_FUNCTIONS, MULTI_SCALAR_FUNCTIONS, ARRAY_FUNCTIONS)\n\nDEFAULT_SUFFIXES = {\n '%': 0.01\n}\n\nMETRIC_SUFFIXES = {\n 'k': 1e3, 'M': 1e6, 'G': 1e9, 'T': 1e12,\n 'm': 1e-3, 'u': 1e-6, 'n': 1e-9, 'p': 1e-12\n}\n\npauli = {\n 'sigma_x': MathArray([\n [0, 1],\n [1, 0]\n ]),\n 'sigma_y': MathArray([\n [0, -1j],\n [1j, 0]\n ]),\n 'sigma_z': MathArray([\n [1, 0],\n [0, -1]\n ])\n}\n\ncartesian_xyz = {\n 'hatx': MathArray([1, 0, 0]),\n 'haty': MathArray([0, 1, 0]),\n 'hatz': MathArray([0, 0, 1])\n}\n\ncartesian_ijk = {\n 'hati': MathArray([1, 0, 0]),\n 'hatj': MathArray([0, 1, 0]),\n 'hatk': MathArray([0, 0, 1])\n}\n\ndef percentage_as_number(percent_str):\n \"\"\"\n Convert a percentage string to a number.\n\n Args:\n percent_str: A percent string, for example '5%' or '1.2%'\n\n Usage\n =====\n >>> percentage_as_number('8%')\n 0.08\n >>> percentage_as_number('250%')\n 2.5\n >>> percentage_as_number('-10%')\n -0.1\n \"\"\"\n return float(percent_str.strip()[:-1]) * 0.01\n\ndef within_tolerance(x, y, tolerance):\n \"\"\"\n Check that |x-y| <= tolerance with appropriate norm.\n\n Args:\n x: number or array (np array_like)\n y: number or array (np array_like)\n tolerance: Number or PercentageString\n\n NOTE: Calculates x - y; may raise an error for incompatible shapes.\n\n Usage\n =====\n\n The tolerance can be a number:\n >>> within_tolerance(10, 9.01, 1)\n True\n >>> within_tolerance(10, 9.01, 0.5)\n False\n\n If tolerance is a percentage, it is a percent of (the norm of) x:\n >>> within_tolerance(10, 9.01, '10%')\n True\n >>> within_tolerance(9.01, 10, '10%')\n False\n\n Works for vectors and matrices:\n >>> A = np.array([[1,2],[-3,1]])\n >>> B = np.array([[1.1, 2], [-2.8, 1]])\n >>> diff = round(np.linalg.norm(A-B), 6)\n >>> diff\n 0.223607\n >>> within_tolerance(A, B, 0.25)\n True\n \"\"\"\n # When used within graders, tolerance has already been\n # validated as a Number or PercentageString\n if isinstance(tolerance, six.text_type):\n tolerance = np.linalg.norm(x) * percentage_as_number(tolerance)\n\n difference = x - y\n\n return np.linalg.norm(difference) <= tolerance\n\ndef is_nearly_zero(x, tolerance, reference=None):\n \"\"\"\n Check that x is within tolerance of zero. If tolerance is provided as a\n percentage, a reference value is requied.\n\n Args:\n x: number or array (np array_like)\n reference: None number or array (np array_like), only used when\n tolerance is provided as a percentage\n tolerance: Number or PercentageString\n\n Usage\n =====\n >>> is_nearly_zero(0.4, 0.5)\n True\n >>> is_nearly_zero(0.4, 0.3)\n False\n >>> is_nearly_zero(0.4, '5%', reference=10)\n True\n >>> is_nearly_zero(0.4, '3%', reference=10)\n False\n\n Works for arrays, too:\n >>> x = np.array([[1, 1], [0, -1]])\n >>> np.linalg.norm(x) # doctest: +ELLIPSIS\n 1.732050...\n >>> is_nearly_zero(x, '18%', reference=10)\n True\n >>> is_nearly_zero(x, '17%', reference=10)\n False\n\n A ValueError is raised when percentage tolerance is used without reference:\n >>> try:\n ... is_nearly_zero(0.4, '3%')\n ... except ValueError as error:\n ... print(error)\n When tolerance is a percentage, reference must not be None.\n \"\"\"\n # When used within graders, tolerance has already been\n # validated as a Number or PercentageString\n if isinstance(tolerance, six.text_type):\n if reference is None:\n raise ValueError('When tolerance is a percentage, reference must '\n 'not be None.')\n tolerance = np.linalg.norm(reference) * percentage_as_number(tolerance)\n\n return np.linalg.norm(x) <= tolerance\n"
] | [
[
"numpy.arctanh",
"scipy.special.gamma",
"numpy.tanh",
"numpy.transpose",
"numpy.arccosh",
"numpy.arcsinh",
"numpy.arccos",
"numpy.cos",
"numpy.complex",
"numpy.sinh",
"numpy.tan",
"numpy.linalg.norm",
"numpy.arctan2",
"numpy.cosh",
"numpy.arcsin",
"numpy.arctan",
"numpy.sin",
"numpy.real",
"numpy.imag"
]
] |
Moetaz-M-Mokhtar/ITIintake40_FaceRecognition | [
"570ceb5d1353efa8b8754243ee8d5db36a951998"
] | [
"detection/docker/model_handler_cpu.py"
] | [
"# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n# Licensed under the Apache License, Version 2.0 (the \"License\").\n# You may not use this file except in compliance with the License.\n# A copy of the License is located at\n# http://www.apache.org/licenses/LICENSE-2.0\n# or in the \"license\" file accompanying this file. This file is distributed\n# on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n# express or implied. See the License for the specific language governing\n# permissions and limitations under the License.\n\n\"\"\"\nModelHandler defines a base model handler.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport sys\nimport os\nimport numpy as np\nimport cv2\nimport logging\nimport time\nimport base64\n\n\nfrom mms.utils.mxnet import image, ndarray\n\nsys.path.append('/root')\nfrom insightface.RetinaFace.retinaface import RetinaFace\n\ndef decode_img(img_str):\n # img_bytes = bytes(img_str, 'utf-8')\n img_buff = base64.b64decode(img_str)\n img_jpg = np.frombuffer(img_buff, dtype=np.uint8)\n img = cv2.imdecode(img_jpg, cv2.IMREAD_COLOR)\n return img\n \nclass ModelHandler(object):\n \"\"\"\n A base Model handler implementation.\n \"\"\"\n\n def __init__(self):\t\t\t\t\t\t \t \n detection_model = 'retinaface-R50/R50' # Name of the detetion model for example 'R50' for LResNet50E\n det_epoch = 0 # Detection model epoch number\n self._batch_size = 1\n self.det_threshold = 0.8\n self.image_size = 160 \t# check recognition model input layer before changing this value\n self.margin = 20 \t# Number of margin pixels to crop faces function\n self.gpuid = -1\t\t\t\t\t\t \t # use CPU\n det_model = '/root/models/detection/' + detection_model \t\t # path to the detection model\n self._detector = RetinaFace(det_model, det_epoch, self.gpuid, 'net3')\n\n def initialize(self, context):\n \"\"\"\n Initialize model. This will be called during model loading time\n :param context: Initial context contains model server system properties.\n :return:\n \"\"\"\n self._context = context\n self.initialized = True\n\n def preprocess(self, data):\n \"\"\"\n Transform raw input into model input data.\n :param batch: list of raw requests, should match batch size\n :return: list of preprocessed model input data\n \"\"\"\n assert self._batch_size == len(data), \"Invalid input batch size: {}\".format(len(batch))\n img_list = []\n for idx, img in enumerate(data):\n # We are assuming input shape is NCHW\n # [h, w] = [1024, 1024]\n img_arr = decode_img(img['body'])\n # img_arr = mx.nd.array(img_arr)\n # img_arr = image.resize(img_arr, w, h)\n # img_arr = image.transform_shape(img_arr)\n img_list.append(img_arr)\n return img_list\n\n def inference(self, model_input):\n \"\"\"\n Internal inference methods\n :param model_input: transformed model input data\n :return: list of inference output in NDArray\n \"\"\"\n inference_output = []\n for frame in model_input:\n assert frame.ndim != 2 or frame.ndim != 3, \"expected input image dimension to be 2 or 3 but got data with {}\".format(frame.ndim)\n if frame.ndim == 2:\n frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2BGR)\n im_shape = frame.shape\n scales = [1024, 1920]\n target_size = scales[0]\n max_size = scales[1]\n im_size_min = np.min(im_shape[0:2])\n im_size_max = np.max(im_shape[0:2])\n im_scale = float(target_size) / float(im_size_min)\n if np.round(im_scale * im_size_max) > max_size:\n im_scale = float(max_size) / float(im_size_max)\n scales = [im_scale]\n flip = False\n faces_bb, landmarks = self._detector.detect(frame, threshold=self.det_threshold, scales=scales, do_flip=flip)\n inference_output.append([faces_bb.tolist(), landmarks.tolist()])\n \n print('inference output: ', inference_output)\n return inference_output\n\n def postprocess(self, inference_output):\n \"\"\"\n Return predict result in batch.\n :param inference_output: list of inference output\n :return: list of predict results\n \"\"\"\n # faces_bb = [output[0] for output in inference_output]\n # landmarks = [output[1] for output in inference_output]\n return inference_output\n \n def handle(self, data, context):\n \"\"\"\n Custom service entry point function.\n :param data: list of objects, raw input from request\n :param context: model server context\n :return: list of outputs to be send back to client\n \"\"\"\n try:\n preprocess_start = time.time()\n data = self.preprocess(data)\n inference_start = time.time()\n data = self.inference(data)\n postprocess_start = time.time()\n data = self.postprocess(data)\n end_time = time.time()\n\n metrics = context.metrics\n metrics.add_time(\"PreprocessTime\", round((inference_start - preprocess_start) * 1000, 2))\n metrics.add_time(\"InferenceTime\", round((postprocess_start - inference_start) * 1000, 2))\n metrics.add_time(\"PostprocessTime\", round((end_time - postprocess_start) * 1000, 2))\n\n return data\n\n except Exception as e:\n logging.error(e, exc_info=True)\n request_processor = context.request_processor\n request_processor.report_status(500, \"Unknown inference error\")\n return [str(e)] * self._batch_size\n \n"
] | [
[
"numpy.round",
"numpy.max",
"numpy.min",
"numpy.frombuffer"
]
] |
zxc1342802/leijmtrader | [
"f24d5593d8708e48f2a9180d9469a6c2af93a08d"
] | [
"examples/strategies/king_keltner_strategy.py"
] | [
"from jiamtrader.app.cta_strategy import (\n CtaTemplate,\n StopOrder,\n TickData,\n BarData,\n TradeData,\n OrderData,\n BarGenerator,\n ArrayManager,\n)\n\nimport pandas_ta as ta\nimport pandas as pd\n\nclass KingKeltnerStrategy(CtaTemplate):\n \"\"\"\"\"\"\n\n author = \"用Python的交易员\"\n\n kk_length = 11\n kk_dev = 1.6\n trailing_percent = 0.8\n fixed_size = 1\n\n kk_up = 0\n kk_down = 0\n intra_trade_high = 0\n intra_trade_low = 0\n\n long_vt_orderids = []\n short_vt_orderids = []\n vt_orderids = []\n\n parameters = [\"kk_length\", \"kk_dev\", \"trailing_percent\", \"fixed_size\"]\n variables = [\"kk_up\", \"kk_down\"]\n\n def __init__(self, cta_engine, strategy_name, vt_symbol, setting):\n \"\"\"\"\"\"\n super().__init__(cta_engine, strategy_name, vt_symbol, setting)\n\n self.bg = BarGenerator(self.on_bar, 5, self.on_5min_bar)\n self.am = ArrayManager()\n\n def on_init(self):\n \"\"\"\n Callback when strategy is inited.\n \"\"\"\n self.write_log(\"策略初始化\")\n self.load_bar(10)\n\n def on_start(self):\n \"\"\"\n Callback when strategy is started.\n \"\"\"\n self.write_log(\"策略启动\")\n\n def on_stop(self):\n \"\"\"\n Callback when strategy is stopped.\n \"\"\"\n self.write_log(\"策略停止\")\n\n def on_tick(self, tick: TickData):\n \"\"\"\n Callback of new tick data update.\n \"\"\"\n self.bg.update_tick(tick)\n\n def on_bar(self, bar: BarData):\n \"\"\"\n Callback of new bar data update.\n \"\"\"\n self.bg.update_bar(bar)\n\n def on_5min_bar(self, bar: BarData):\n \"\"\"\"\"\"\n for orderid in self.vt_orderids:\n self.cancel_order(orderid)\n self.vt_orderids.clear()\n\n am = self.am\n am.update_bar(bar)\n if not am.inited:\n return\n\n high = pd.Series(am.high_array)\n low = pd.Series(am.low_array)\n close = pd.Series(am.close_array)\n\n range_ = ta.true_range(high, low, close)\n\n basis = ta.sma(close, self.kk_length)\n band = ta.sma(range_, self.kk_length)\n up = basis + self.kk_dev * band\n down = basis - self.kk_dev * band\n\n self.kk_up, self.kk_down = up.iloc[-1], down.iloc[-1]\n\n if self.pos == 0:\n self.intra_trade_high = bar.high_price\n self.intra_trade_low = bar.low_price\n self.send_oco_order(self.kk_up, self.kk_down, self.fixed_size)\n\n elif self.pos > 0:\n self.intra_trade_high = max(self.intra_trade_high, bar.high_price)\n self.intra_trade_low = bar.low_price\n\n vt_orderids = self.sell(self.intra_trade_high * (1 - self.trailing_percent / 100),\n abs(self.pos), True)\n self.vt_orderids.extend(vt_orderids)\n\n elif self.pos < 0:\n self.intra_trade_high = bar.high_price\n self.intra_trade_low = min(self.intra_trade_low, bar.low_price)\n\n vt_orderids = self.cover(self.intra_trade_low * (1 + self.trailing_percent / 100),\n abs(self.pos), True)\n self.vt_orderids.extend(vt_orderids)\n\n self.put_event()\n\n def on_order(self, order: OrderData):\n \"\"\"\n Callback of new order data update.\n \"\"\"\n pass\n\n def on_trade(self, trade: TradeData):\n \"\"\"\n Callback of new trade data update.\n \"\"\"\n if self.pos != 0:\n if self.pos > 0:\n for short_orderid in self.short_vt_orderids:\n self.cancel_order(short_orderid)\n\n elif self.pos < 0:\n for buy_orderid in self.long_vt_orderids:\n self.cancel_order(buy_orderid)\n\n for orderid in (self.long_vt_orderids + self.short_vt_orderids):\n if orderid in self.vt_orderids:\n self.vt_orderids.remove(orderid)\n\n self.put_event()\n\n def send_oco_order(self, buy_price, short_price, volume):\n \"\"\"\"\"\"\n self.long_vt_orderids = self.buy(buy_price, volume, True)\n self.short_vt_orderids = self.short(short_price, volume, True)\n\n self.vt_orderids.extend(self.long_vt_orderids)\n self.vt_orderids.extend(self.short_vt_orderids)\n\n def on_stop_order(self, stop_order: StopOrder):\n \"\"\"\n Callback of stop order update.\n \"\"\"\n pass\n"
] | [
[
"pandas.Series"
]
] |
abhishekmaha23/synthetic_data_generation_attempt | [
"99ee858cdf405641fd0e2797bfc14c1a736547eb"
] | [
"util/utils.py"
] | [
"import matplotlib.pyplot as plt\nfrom datetime import datetime\nimport numpy as np\nimport torch\nimport os\nimport time\nfrom scipy.ndimage.filters import gaussian_filter1d\nfrom itertools import repeat\nimport copy\nimport gym\n# import torch.multiprocessing as multiprocessing\nimport multiprocessing\nimport pickle\nimport matplotlib\nmatplotlib.use(\"TkAgg\")\nfrom collections import defaultdict, Counter\n\n\ndef plot_fig(x, y, std=None, title=None, draw_grid=True,\n xlabel=None, ylabel=None, add_legend=False,\n label=None, display_fig=True,\n save_fig=False, save_name=None, xlim=[None, None], ylim=[None, None], img_size=(10, 6), update_fig=None, smooth_fill=False, smooth_fill_sigma=None):\n # plt.ion()\n plt.figure(figsize=img_size)\n assert type(x) == list, 'X is not a list'\n # assert type(y) == list, 'Y is not a list'\n if update_fig is None:\n fig, = plt.plot(x, y, label=label)\n axes = plt.gca()\n axes.set_autoscale_on(True) # enable autoscale\n axes.autoscale_view(True, True, True)\n axes.set_xlim(xlim)\n axes.set_ylim(ylim)\n if std is not None:\n if type(std) == list:\n lower_list = [y[i] - std[i] for i in range(len(x))]\n upper_list = [y[i] + std[i] for i in range(len(x))]\n else:\n lower_list = [i - std for i in y]\n upper_list = [i + std for i in y]\n if smooth_fill is True:\n if smooth_fill_sigma is None:\n smooth_fill_sigma = (len(x) // 1000) + 1\n # smooth upper and lower parts of the filling\n lower_list = gaussian_filter1d(lower_list, sigma=smooth_fill_sigma)\n upper_list = gaussian_filter1d(upper_list, sigma=smooth_fill_sigma)\n plt.fill_between(x, lower_list, upper_list, color='b', alpha=.1)\n if add_legend:\n plt.legend(fontsize=22)\n plt.xticks(size=22)\n plt.yticks(size=22)\n if draw_grid:\n plt.grid()\n if xlabel is not None:\n plt.xlabel(xlabel, fontsize=22)\n if ylabel is not None:\n plt.ylabel(None, fontsize=22)\n if title is not None:\n plt.title(title, fontsize=22)\n\n if save_fig:\n if save_name is None:\n save_name = 'plot_-'+xlabel+' vs. ' + ylabel + str(datetime.now()) + \".pdf\"\n plt.savefig(save_name)\n if display_fig:\n plt.show()\n return fig, axes\n\n\ndef test_agent_performance(agent, env, device, num_tests=10, agent_id=999, mode='supervised'):\n agent.eval()\n rewards_so_far = []\n time_steps_so_far = []\n agent_type = agent.action_space_type\n actions_dict = defaultdict(int)\n # print(agent_id, 'starting testing of agent', agent.dim)\n for test in range(num_tests):\n done = False\n observation = env.reset()\n i = 0\n time_step = 0\n while not done:\n action = agent.get_action(agent(observation), context='test')\n\n if agent_type == 'discrete':\n actions_dict[action] += 1\n elif agent_type == 'continuous':\n action = action.reshape(agent.dim[-1],)\n observation, reward, done, info = env.step(action)\n # if mode == 'ga':\n # if i < -200:\n # done = True\n i += reward\n time_step += 1\n rewards_so_far.append(i)\n time_steps_so_far.append(time_step)\n if mode == 'ga':\n return np.mean(rewards_so_far), np.std(rewards_so_far), np.mean(time_steps_so_far)\n else:\n return np.mean(rewards_so_far), np.std(rewards_so_far), np.mean(time_steps_so_far), actions_dict\n\n\ndef test_generator_performance(random_actor_sampler, generator, actual_test_env, config, generator_input_sampler, multi=True, mode='normal'):\n multi_performances = []\n trained_agents = []\n if mode == 'retest':\n outer_test_loops = config.retest_generator_testing_loops\n inner_test_loops = config.retest_actor_testing_loops\n else:\n outer_test_loops = config.generator_testing_loops\n inner_test_loops = config.actor_testing_loops\n count = Counter(defaultdict(int))\n time_steps = []\n for i in range(outer_test_loops):\n # new_actor = get_random_agent(config.state_dim, config.action_dim, config.env_config.action_space_type, batch_norm=config.batch_norm)\n new_actor = random_actor_sampler.sample()\n new_actor_opt = torch.optim.SGD(new_actor.parameters(), lr=config.actor_init_learning_rate)\n for inner_loop_num in range(config.inner_loop_iterations):\n new_actor_opt.zero_grad()\n actor_criterion = torch.nn.MSELoss(reduction='sum')\n # softmax_actor_predicted_actions = new_actor(generator(get_generator_input()))\n generator_input, actor_target_output = generator_input_sampler.sample()\n softmax_actor_predicted_actions = new_actor(generator(generator_input), source='generator')\n new_actor_loss = actor_criterion(softmax_actor_predicted_actions, actor_target_output)\n new_actor_loss.backward()\n new_actor_opt.step()\n trained_agents.append(new_actor)\n if multi is False:\n performances_mean = []\n performances_std = []\n for agent in trained_agents:\n performance = test_agent_performance(agent, actual_test_env, config.dev, num_tests=inner_test_loops, mode='ga')\n performances_mean.append(performance[0])\n performances_std.append(performance[1])\n time_steps.append(performance[2])\n # count += Counter(performance[3])\n else:\n pool = multiprocessing.Pool(5)\n envs_list = []\n for i in range(len(trained_agents)):\n envs_list.append(copy.deepcopy(actual_test_env))\n ids = [i for i in range(len(trained_agents))]\n multi_performances = pool.starmap(test_agent_performance, zip(trained_agents, envs_list, repeat(config.dev), repeat(inner_test_loops), ids, repeat('ga')))\n performances_mean, performances_std, time_steps = zip(*multi_performances)\n pool.close()\n\n return np.mean(performances_mean), np.mean(performances_std), np.mean(time_steps), sorted(dict(count).items())\n\n\ndef check_convergence_of_generator(config, random_actor_sampler, current_generator_performance, generator, test_env, generator_input_sampler):\n def reached_threshold(generator_performance):\n return generator_performance[0] >= config.env_config.reward_threshold and generator_performance[1] <= config.env_config.reward_std_threshold\n\n if reached_threshold(current_generator_performance):\n print('Crossed threshold once, testing again.')\n final_test_performance_mean = [current_generator_performance[0]]\n final_test_performance_std = [current_generator_performance[1]]\n for i in range(1):\n extra_generator_performance = test_generator_performance(random_actor_sampler, generator, test_env, config,\n generator_input_sampler, multi=config.multi,\n mode='retest')\n final_test_performance_mean.append(extra_generator_performance[0])\n final_test_performance_std.append(extra_generator_performance[1])\n final_test_performance = (np.mean(final_test_performance_mean), np.mean(final_test_performance_std))\n if reached_threshold(final_test_performance):\n config.ended_early = True\n config.converged_performance_mean = final_test_performance[0]\n config.converged_performance_std = final_test_performance[1]\n return config.ended_early\n\n\ndef check_convergence_of_actor(config, actor, current_actor_performance, test_env):\n def reached_threshold(actor_performance):\n return actor_performance[0] >= config.env_config.reward_threshold and actor_performance[1] <= config.env_config.reward_std_threshold\n\n if reached_threshold(current_actor_performance):\n print('Crossed threshold once, testing again.')\n final_test_performance_mean = [current_actor_performance[0]]\n final_test_performance_std = [current_actor_performance[1]]\n for i in range(1):\n extra_actor_performance = test_agent_performance(actor, test_env, config.dev)\n final_test_performance_mean.append(extra_actor_performance[0])\n final_test_performance_std.append(extra_actor_performance[1])\n final_test_performance = (np.mean(final_test_performance_mean), np.mean(final_test_performance_std))\n if reached_threshold(final_test_performance):\n config.ended_early = True\n config.converged_performance_mean = final_test_performance[0]\n config.converged_performance_std = final_test_performance[1]\n return config.ended_early\n\n\ndef generate_backprop_plots(config, logs, show_plots=True):\n\n generator_losses_smoothed = gaussian_filter1d(logs.meta_losses, sigma=config.plot_smoothing_sigma)\n plot_fig([i for i in range(len(generator_losses_smoothed))],\n generator_losses_smoothed,\n title=\"Meta Losses\", draw_grid=True, xlabel=\"steps\", ylabel=\"generator loss\", add_legend=True,\n label=\"generator loss\", display_fig=show_plots, save_fig=True,\n save_name=os.path.join(config.log_path, 'Generator_losses.pdf'))\n\n critic_losses_smoothed = gaussian_filter1d(logs.critic_losses, sigma=config.plot_smoothing_sigma)\n plot_fig([i for i in range(len(critic_losses_smoothed))],\n critic_losses_smoothed,\n title=\"Critic Losses\", draw_grid=True, xlabel=\"steps\", ylabel=\"critic loss\", add_legend=True,\n label=\"critic loss\", display_fig=show_plots, save_fig=True,\n save_name=os.path.join(config.log_path, 'Critic_losses.pdf'))\n\n actor_performances_mean_plot_smoothed = gaussian_filter1d(logs.new_actor_performances_mean, sigma=config.plot_smoothing_sigma)\n plot_fig([i * config.plot_save_iterations for i in range(len(actor_performances_mean_plot_smoothed))],\n actor_performances_mean_plot_smoothed, std=logs.new_actor_performances_std,\n title=\"New actor performance\", draw_grid=True, xlabel=\"steps\", ylabel=\"a2c actor perf\", add_legend=True,\n label=\"New actor performance\", display_fig=show_plots, save_fig=True,\n save_name=os.path.join(config.log_path, 'Actor_perf_smoothed.pdf'),\n ylim=[config.env_config.plot_performance_low, config.env_config.plot_performance_high],\n smooth_fill=False, smooth_fill_sigma=config.plot_smoothing_sigma)\n plot_fig([i * config.plot_save_iterations for i in range(len(logs.new_actor_performances_mean))],\n logs.new_actor_performances_mean,\n std=logs.new_actor_performances_std, title=str(config.algorithm)+\" Actor Performance\", draw_grid=True, xlabel=\"steps\",\n ylabel=\"reward\", add_legend=True, label=\"reward\", display_fig=show_plots, save_fig=True,\n save_name=os.path.join(config.log_path, 'Actor_Perf.pdf'), ylim=[config.env_config.plot_performance_low, config.env_config.plot_performance_high])\n\n\ndef generate_rl_plots(config, logs, show_plots=True):\n actor_losses_smoothed = gaussian_filter1d(logs.actor_losses, sigma=config.plot_smoothing_sigma)\n plot_fig([i for i in range(len(actor_losses_smoothed))],\n actor_losses_smoothed,\n title=\"Actor Losses\", draw_grid=True, xlabel=\"steps\", ylabel=\"actor loss\", add_legend=True,\n label=\"actor loss\", display_fig=show_plots, save_fig=True,\n save_name=os.path.join(config.log_path, 'Actor_losses.pdf'))\n\n critic_losses_smoothed = gaussian_filter1d(logs.critic_losses, sigma=config.plot_smoothing_sigma)\n plot_fig([i for i in range(len(critic_losses_smoothed))],\n critic_losses_smoothed,\n title=\"Critic Losses\", draw_grid=True, xlabel=\"steps\", ylabel=\"critic loss\", add_legend=True,\n label=\"critic loss\", display_fig=show_plots, save_fig=True,\n save_name=os.path.join(config.log_path, 'Critic_losses.pdf'))\n\n actor_performances_mean_plot_smoothed = gaussian_filter1d(logs.actor_performances_mean, sigma=config.plot_smoothing_sigma)\n plot_fig([i * config.plot_save_iterations for i in range(len(actor_performances_mean_plot_smoothed))],\n actor_performances_mean_plot_smoothed, std=logs.actor_performances_std,\n title=\"Actor performance\", draw_grid=True, xlabel=\"steps\", ylabel=\"reward\", add_legend=True,\n label=\"reward\", display_fig=show_plots, save_fig=True,\n save_name=os.path.join(config.log_path, 'Actor_perf_smoothed.pdf'),\n ylim=[config.env_config.plot_performance_low, config.env_config.plot_performance_high],\n smooth_fill=False, smooth_fill_sigma=config.plot_smoothing_sigma)\n plot_fig([i * config.plot_save_iterations for i in range(len(logs.actor_performances_mean))],\n logs.actor_performances_mean,\n std=logs.actor_performances_std, title=\"Actor Performance\", draw_grid=True, xlabel=\"steps\",\n ylabel=\"reward\", add_legend=True, label=\"reward\", display_fig=show_plots, save_fig=True,\n save_name=os.path.join(config.log_path, 'Actor_Perf.pdf'), ylim=[config.env_config.plot_performance_low, config.env_config.plot_performance_high])\n\n\ndef generate_ga_plots(config, logs, show_plots=True):\n generator_performances_mean_smoothed = gaussian_filter1d(logs.generator_performance_mean,\n sigma=config.plot_smoothing_sigma)\n plot_fig([i * config.plot_save_iterations for i in range(len(generator_performances_mean_smoothed))],\n generator_performances_mean_smoothed, std=logs.generator_performance_std,\n title=\"Generator-actor performance\", draw_grid=True, xlabel=\"steps\", ylabel=\"actor perf\", add_legend=True,\n label=\"reward\", display_fig=show_plots, save_fig=True,\n save_name=os.path.join(config.log_path, 'GA-Generator-actor_perf_smoothed.pdf'), ylim=[config.env_config.plot_performance_low, config.env_config.plot_performance_high], smooth_fill=True,\n smooth_fill_sigma=config.plot_smoothing_sigma)\n plot_fig([i * config.plot_save_iterations for i in range(len(logs.generator_performance_mean))],\n logs.generator_performance_mean,\n std=logs.generator_performance_std, title=\"Generator-actor performance\", draw_grid=True, xlabel=\"steps\",\n ylabel=\"Actor perf\", add_legend=True, label=\"reward\", display_fig=show_plots, save_fig=True,\n save_name=os.path.join(config.log_path, 'GA-Generator-actor_perf.pdf'), ylim=[config.env_config.plot_performance_low, config.env_config.plot_performance_high])\n\n\ndef generate_all_logs(config, log):\n time_taken = time.time() - config.run_id\n config_file_name = os.path.join(config.log_path, 'config.log')\n with open(config_file_name, 'a+') as f:\n f.write('time_taken--' + str(time_taken) + '\\n')\n variables = vars(config)\n for item in variables:\n f.write(str(item) + '--' + str(variables[item]))\n f.write('\\n')\n log_file_name = os.path.join(config.log_path, 'data.log')\n with open(log_file_name, 'a+') as f:\n variables = vars(log)\n for item in variables:\n f.write(str(item) + '--' + str(variables[item]))\n f.write('\\n')\n\n\ndef save_meta_models(generator, critic, save_path):\n torch.save(generator.state_dict(), os.path.join(save_path, 'generator.pt'))\n torch.save(critic.state_dict(), os.path.join(save_path, 'critic.pt'))\n\n\ndef save_rl_models(actor, critic, save_path):\n torch.save(actor.state_dict(), os.path.join(save_path, 'actor.pt'))\n torch.save(critic.state_dict(), os.path.join(save_path, 'critic.pt'))\n\n\ndef save_object(obj, filename):\n with open(filename, 'wb') as output: # Overwrites any existing file.\n pickle.dump(obj, output, pickle.HIGHEST_PROTOCOL)\n\n\ndef generate_discrete_one_hot_output(action_space_size, num_generator_samples):\n # Creating expected output for the generator\n # num_generator_samples must be divisible by action_space_size\n with torch.no_grad():\n indices = list(np.linspace(0, num_generator_samples, num=action_space_size, endpoint=False, dtype=np.int8))\n inclusive_indices = list(np.linspace(0, num_generator_samples, num=action_space_size+1, dtype=np.int8))\n generator_one_hot_expected_actions = torch.zeros((num_generator_samples, action_space_size))\n for idx, num in enumerate(indices):\n generator_one_hot_expected_actions[num:inclusive_indices[idx+1], idx] += 1\n return generator_one_hot_expected_actions"
] | [
[
"torch.no_grad",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.plot",
"scipy.ndimage.filters.gaussian_filter1d",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.title",
"matplotlib.use",
"numpy.linspace",
"numpy.mean",
"numpy.std",
"matplotlib.pyplot.fill_between",
"matplotlib.pyplot.legend",
"torch.nn.MSELoss",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.show",
"torch.zeros",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.xlabel"
]
] |
ngbsLab/Korean-Speech-Recognition | [
"3867bf7d23222da6812c9b98a93d3c6f7b3c80fc"
] | [
"package/loss.py"
] | [
"import torch\nimport torch.nn as nn\n\nclass LabelSmoothingLoss(nn.Module):\n \"\"\"\n Provides Label-Smoothing loss.\n\n Args:\n class_num (int): the number of classfication\n ignore_index (int): Indexes that are ignored when calculating loss\n smoothing (float): ratio of smoothing (confidence = 1.0 - smoothing)\n dim (int): dimention of calculation loss\n logit (torch.Tensor): probability distribution value from model and it has a logarithm shape\n target (torch.Tensor): ground-thruth encoded to integers which directly point a word in label\n\n Returns: label_smoothed\n - **label_smoothed** (float): sum of loss\n\n Reference:\n https://github.com/pytorch/pytorch/issues/7455\n \"\"\"\n def __init__(self, class_num, ignore_index, smoothing=0.1, dim=-1):\n super(LabelSmoothingLoss, self).__init__()\n self.confidence = 1.0 - smoothing\n self.smoothing = smoothing\n self.class_num = class_num\n self.dim = dim\n self.ignore_index = ignore_index\n\n def forward(self, logit, target):\n with torch.no_grad():\n label_smoothed = torch.zeros_like(logit)\n label_smoothed.fill_(self.smoothing / (self.class_num - 1))\n label_smoothed.scatter_(1, target.data.unsqueeze(1), self.confidence)\n label_smoothed[target == self.ignore_index, :] = 0\n\n return torch.sum(-label_smoothed * logit)"
] | [
[
"torch.sum",
"torch.zeros_like",
"torch.no_grad"
]
] |
bhevencious/EvalNE | [
"a62bd11901ea891535f6cb2a05e7abb65b1f3e6f"
] | [
"evalne/evaluation/pipeline.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Author: Mara Alexandru Cristian\n# Contact: [email protected]\n# Date: 18/12/2018\n\n# The manager module contains functions and classes for reading, parsing and using a configuration file to\n# run a complete evaluation of network embedding methods.\n\nfrom __future__ import division\n\nimport os\n\nfrom evalne.utils import util\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.linear_model import LogisticRegressionCV\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.svm import LinearSVC\nfrom sklearn.model_selection import GridSearchCV\n\n\nclass EvalSetup(object):\n r\"\"\"\n This class is a wrapper that parses the config file and provides the options as properties of the class.\n Also performs basic input checks.\n\n Parameters\n ----------\n configpath : basestring\n The path of the configuration file.\n \"\"\"\n\n def __init__(self, configpath):\n # Import config parser\n try:\n from ConfigParser import ConfigParser\n except ImportError:\n from configparser import ConfigParser\n\n # Read the configuration file\n config = ConfigParser()\n config.read(configpath)\n self._config = config\n\n self._check_inpaths()\n self._check_methods('opne')\n self._check_methods('other')\n self._checkparams()\n self._check_edges()\n self._check_task()\n\n def _check_task(self):\n task = self.__getattribute__('task')\n if task not in ['lp', 'nc', 'nr']:\n raise ValueError('Incorrect value for `TASK`. Options are: `lp`, `nc` or `nr`.')\n if self.__getattribute__('task') == 'lp' and self.__getattribute__('lp_num_edge_splits') is None:\n raise ValueError('Parameter `LP_NUM_EDGE_SPLITS` needs to be defined.')\n if self.__getattribute__('task') == 'nr' and self.__getattribute__('nr_edge_samp_frac') is None:\n raise ValueError('Parameter `NR_EDGE_SAMP_FRAC` needs to be defined.')\n if self.__getattribute__('task') == 'nc':\n if self.__getattribute__('nc_num_node_splits') is None or self.__getattribute__('nc_node_fracs') is None:\n raise ValueError('Parameters `NC_NUM_NODE_SPLITS` and `NC_NODE_FRACS` need to be defined.')\n if all(x == 'ne' for x in self.__getattribute__('embtype_other')):\n pass\n else:\n raise ValueError('TASK = `nc` is currently only supported for node embedding methods.')\n\n def _check_edges(self):\n if self.__getattribute__('traintest_frac') is None or self.__getattribute__('trainvalid_frac') is None:\n raise ValueError('Train/test and train/validation fractions are required!')\n if self.__getattribute__('traintest_frac') == 0.0:\n raise ValueError('The train/test fraction, `TRAINTEST_FRAC`, can not be 0!')\n if self.__getattribute__('trainvalid_frac') == 0.0:\n raise ValueError('The train/valid fraction, `TRAINVALID_FRAC`, can not be 0!')\n if self.__getattribute__('fe_ratio') == 0.0:\n raise ValueError('The ratio of false edges, `FE_RATIO`, can not be 0!')\n\n def _check_inpaths(self):\n numnws = len(self.__getattribute__('names'))\n if self.__getattribute__('task') == 'nc' and self.__getattribute__('labelpaths') is None:\n raise ValueError('LABELPATHS for each network are required for node classification!')\n for k in self._config.options('NETWORKS'):\n if self.__getattribute__('task') == 'nc':\n if k != 'directed' and len(self.__getattribute__(k)) != numnws:\n raise ValueError('Parameter `{}` in `NETWORKS` section does not have the required num. entries ({})'\n .format(k, self.__getattribute__(k)))\n else:\n if k != 'directed' and k != 'labelpaths' and len(self.__getattribute__(k)) != numnws:\n raise ValueError('Parameter `{}` in `NETWORKS` section does not have the required num. entries ({})'\n .format(k, self.__getattribute__(k)))\n # Check if the input file exist\n for path in self.__getattribute__('inpaths'):\n if not os.path.exists(path):\n raise ValueError('Input network path {} does not exist'.format(path))\n\n def _check_methods(self, library):\n names = self.__getattribute__('names_' + library)\n methods = self.__getattribute__('methods_' + library)\n if names is not None and methods is not None and len(names) != len(methods):\n raise ValueError('Mismatch in the number of `NAMES` and `METHODS` to run in section `{} METHODS`'\n .format(library))\n\n def _checkparams(self):\n # Check if the maximize attribute is a correct one\n if self.__getattribute__('task') == 'nc':\n if self.__getattribute__('maximize') not in ['f1_micro', 'f1_macro', 'f1_weighted']:\n raise ValueError('The selected metric in `REPORT.MAXIMIZE` does not exist!')\n # Check if the scores attribute is a correct one\n if self.__getattribute__('scores') not in ['', 'f1_micro', 'f1_macro', 'f1_weighted', 'all']:\n raise ValueError('The selected metric in `REPORT.SCORES` does not exist!')\n else:\n if self.__getattribute__('maximize') not in ['auroc', 'f_score', 'precision', 'recall',\n 'accuracy', 'fallout', 'miss']:\n raise ValueError('The selected metric in `REPORT.MAXIMIZE` does not exist!')\n # Check if the scores attribute is a correct one\n if self.__getattribute__('scores') not in ['', 'auroc', 'f_score', 'precision', 'recall', 'accuracy',\n 'fallout', 'miss', 'all']:\n raise ValueError('The selected metric in `REPORT.SCORES` does not exist!')\n # Check if the curves attribute is a correct one\n if self.__getattribute__('curves') not in ['', 'roc', 'pr', 'all']:\n raise ValueError('The value of `REPORT.CURVES` is incorrect!')\n\n def getlist(self, section, option, dtype):\n r\"\"\"\n Returns option as a list of specified type, split by any kind of white space.\n\n Parameters\n ----------\n section : basestring\n The config file section name.\n option : basestring\n The config file option name.\n dtype : primitive type\n The type to which the output should be cast.\n\n Returns\n -------\n list : list\n A list of elements cast to the specified primitive type.\n \"\"\"\n res = self._config.get(section, option).split()\n if len(res) == 0 or res[0] == '' or res[0] == 'None':\n return None\n else:\n return list(map(dtype, res))\n\n def getboollist(self, section, option):\n r\"\"\"\n Returns option as a list of booleans split by any kind of white space.\n Elements such as 'True', 'true', '1', 'yes', 'on' are considered True.\n Elements such as 'False', 'false', '0', 'no', 'off' are considered False.\n\n Parameters\n ----------\n section : basestring\n The config file section name.\n option : basestring\n The config file option name.\n\n Returns\n -------\n list : list\n A list of booleans.\n \"\"\"\n res = self._config.get(section, option).split()\n if len(res) == 0 or res[0] == '' or res[0] == 'None':\n return None\n else:\n r = list()\n for elem in res:\n if elem in ['True', 'true', '1', 'yes', 'on']:\n r.append(True)\n elif elem in ['False', 'false', '0', 'no', 'off']:\n r.append(False)\n return r\n\n def getlinelist(self, section, option):\n r\"\"\"\n Returns option as a list of string, split specifically by a newline.\n\n Parameters\n ----------\n section : basestring\n The config file section name.\n option : basestring\n The config file option name.\n\n Returns\n -------\n list : list\n A list of strings.\n \"\"\"\n res = self._config.get(section, option).split('\\n')\n if len(res) == 0 or res[0] == '' or res[0] == 'None':\n return None\n else:\n return list(res)\n\n def getseplist(self, section, option):\n r\"\"\"\n Processes an options which contains a list of separators.\n Transforms \\s, \\t and \\n to white space, tab and new line respectively\n\n Parameters\n ----------\n section : basestring\n The config file section name.\n option : basestring\n The config file option name.\n\n Returns\n -------\n list : list\n A list of strings.\n \"\"\"\n separators = self.getlist(section, option, str)\n res = list()\n for sep in separators:\n s = sep.strip('\\'')\n if s == '\\\\t':\n s = '\\t'\n elif s == '\\\\s':\n s = ' '\n elif s == '\\\\n':\n s = '\\n'\n res.append(s)\n return list(res)\n\n def gettuneparams(self, library):\n r\"\"\"\n Processes the tune parameters option. Generates a list of Nones the size of the number of methods.\n The list is filled in order with each line found in the TUNE_PARAMS option.\n\n Parameters\n ----------\n library : basestring\n This parameter indicates if the TUNE_PARAMETERS option processed if from OPNE METHODS of OTHER METHODS.\n\n Returns\n -------\n tune_params : list\n A list of string containing the parameters that need to be tuned.\n \"\"\"\n methods = self.__getattribute__('methods_' + library)\n if library == 'opne':\n tune_params = self.getlinelist('OPENNE METHODS', 'tune_params_opne')\n elif library == 'other':\n tune_params = self.getlinelist('OTHER METHODS', 'tune_params_other')\n else:\n raise ValueError('Attribute name {}, does not exist'.format(library))\n if tune_params is None:\n tune_params = list()\n for i in range(len(methods) - len(tune_params)):\n tune_params.append(None)\n return tune_params\n\n @property\n def task(self):\n return self._config.get('GENERAL', 'task')\n\n @property\n def lp_num_edge_splits(self):\n return self._config.getint('GENERAL', 'lp_num_edge_splits')\n\n @property\n def nc_num_node_splits(self):\n return self._config.getint('GENERAL', 'nc_num_node_splits')\n\n @property\n def nc_node_fracs(self):\n return self.getlist('GENERAL', 'nc_node_fracs', float)\n\n @property\n def nr_edge_samp_frac(self):\n aux = self._config.getfloat('GENERAL', 'nr_edge_samp_frac')\n if aux > 1.0:\n return aux/100\n else:\n return aux\n\n @property\n def edge_embedding_methods(self):\n return self.getlist('GENERAL', 'edge_embedding_methods', str)\n\n @property\n def lp_model(self):\n model = self._config.get('GENERAL', 'lp_model')\n if model == 'LogisticRegression':\n return LogisticRegression(solver='liblinear')\n elif model == 'LogisticRegressionCV':\n return LogisticRegressionCV(Cs=10, cv=5, penalty='l2', scoring='roc_auc', solver='lbfgs', max_iter=100)\n elif model == 'DecisionTreeClassifier':\n return DecisionTreeClassifier()\n elif model == 'SVM':\n parameters = {'C': [0.1, 1, 10, 100, 1000]}\n return GridSearchCV(LinearSVC(), parameters, cv=5)\n else:\n return util.auto_import(model)\n\n @property\n def embed_dim(self):\n return self._config.getint('GENERAL', 'embed_dim')\n\n @property\n def timeout(self):\n res = self._config.get('GENERAL', 'timeout')\n if res == '' or res == 'None' or res == 'NONE':\n return None\n else:\n return int(res)\n\n @property\n def verbose(self):\n return self._config.getboolean('GENERAL', 'verbose')\n\n @property\n def seed(self):\n val = self._config.get('GENERAL', 'seed')\n if val == '' or val == 'None':\n return None\n else:\n return int(val)\n\n @property\n def names(self):\n return self.getlist('NETWORKS', 'names', str)\n\n @property\n def inpaths(self):\n return self.getlinelist('NETWORKS', 'inpaths')\n\n @property\n def directed(self):\n return self._config.getboolean('NETWORKS', 'directed')\n\n @property\n def separators(self):\n return self.getseplist('NETWORKS', 'separators')\n\n @property\n def comments(self):\n return self.getseplist('NETWORKS', 'comments')\n\n @property\n def labelpaths(self):\n return self.getlinelist('NETWORKS', 'labelpaths')\n\n @property\n def relabel(self):\n return self._config.getboolean('PREPROCESSING', 'relabel')\n\n @property\n def del_selfloops(self):\n return self._config.getboolean('PREPROCESSING', 'del_selfloops')\n\n @property\n def save_prep_nw(self):\n return self._config.getboolean('PREPROCESSING', 'save_prep_nw')\n\n @property\n def write_stats(self):\n return self._config.getboolean('PREPROCESSING', 'write_stats')\n\n @property\n def delimiter(self):\n return self._config.get('PREPROCESSING', 'delimiter').strip('\\'')\n\n @property\n def traintest_frac(self):\n return self._config.getfloat('EDGESPLIT', 'traintest_frac')\n\n @property\n def trainvalid_frac(self):\n return self._config.getfloat('EDGESPLIT', 'trainvalid_frac')\n\n @property\n def split_alg(self):\n return self._config.get('EDGESPLIT', 'split_alg')\n\n @property\n def owa(self):\n return self._config.getboolean('EDGESPLIT', 'owa')\n\n @property\n def fe_ratio(self):\n return self._config.getfloat('EDGESPLIT', 'fe_ratio')\n\n @property\n def lp_baselines(self):\n return self.getlinelist('BASELINES', 'lp_baselines')\n\n @property\n def neighbourhood(self):\n return self.getlist('BASELINES', 'neighbourhood', str)\n\n @property\n def names_opne(self):\n return self.getlist('OPENNE METHODS', 'names_opne', str)\n\n @property\n def methods_opne(self):\n return self.getlinelist('OPENNE METHODS', 'methods_opne')\n\n @property\n def tune_params_opne(self):\n return self.gettuneparams('opne')\n\n @property\n def names_other(self):\n return self.getlist('OTHER METHODS', 'names_other', str)\n\n @property\n def embtype_other(self):\n return self.getlist('OTHER METHODS', 'embtype_other', str)\n\n @property\n def write_weights_other(self):\n return self.getboollist('OTHER METHODS', 'write_weights_other')\n\n @property\n def write_dir_other(self):\n return self.getboollist('OTHER METHODS', 'write_dir_other')\n\n @property\n def methods_other(self):\n return self.getlinelist('OTHER METHODS', 'methods_other')\n\n @property\n def tune_params_other(self):\n return self.gettuneparams('other')\n\n @property\n def output_format_other(self):\n return self.getlinelist('OTHER METHODS', 'output_format_other')\n\n @property\n def input_delim_other(self):\n return self.getseplist('OTHER METHODS', 'input_delim_other')\n\n @property\n def output_delim_other(self):\n return self.getseplist('OTHER METHODS', 'output_delim_other')\n\n @property\n def maximize(self):\n return self._config.get('REPORT', 'maximize')\n\n @property\n def scores(self):\n return self._config.get('REPORT', 'scores')\n\n @property\n def curves(self):\n return self._config.get('REPORT', 'curves')\n\n @property\n def precatk_vals(self):\n return self.getlist('REPORT', 'precatk_vals', int)\n"
] | [
[
"sklearn.tree.DecisionTreeClassifier",
"sklearn.linear_model.LogisticRegressionCV",
"sklearn.svm.LinearSVC",
"sklearn.linear_model.LogisticRegression"
]
] |
GiovanniCalore/BigGAN-Tensorflow-master | [
"1fcf72fc8b9cbfdd047b9641f656afcfd0972604"
] | [
"metrics/perceptual_path_length.py"
] | [
"# Copyright (c) 2019, NVIDIA Corporation. All rights reserved.\n#\n# This work is made available under the Nvidia Source Code License-NC.\n# To view a copy of this license, visit\n# https://nvlabs.github.io/stylegan2/license.html\n\n\"\"\"Perceptual Path Length (PPL).\"\"\"\n\nimport numpy as np\nimport tensorflow as tf\nimport dnnlib.tflib as tflib\n\nfrom metrics import metric_base\nimport misc\nimport sys\n\n#----------------------------------------------------------------------------\n\n# Normalize batch of vectors.\ndef normalize(v):\n return v / tf.sqrt(tf.reduce_sum(tf.square(v), axis=-1, keepdims=True))\n\n# Spherical interpolation of a batch of vectors.\ndef slerp(a, b, t):\n a = normalize(a)\n b = normalize(b)\n d = tf.reduce_sum(a * b, axis=-1, keepdims=True)\n p = t * tf.math.acos(d)\n c = normalize(b - d * a)\n d = a * tf.math.cos(p) + c * tf.math.sin(p)\n return normalize(d)\n\n#----------------------------------------------------------------------------\n\nclass PPL(metric_base.MetricBase):\n def __init__(self, num_samples, epsilon, space, sampling, crop, minibatch_per_gpu, Gs_overrides, **kwargs):\n assert space in ['z', 'w']\n assert sampling in ['full', 'end']\n super().__init__(**kwargs)\n self.num_samples = num_samples\n self.epsilon = epsilon\n self.space = space\n self.sampling = sampling\n self.crop = crop\n self.minibatch_per_gpu = minibatch_per_gpu\n self.Gs_overrides = Gs_overrides\n\n def _evaluate(self, sess, fake_images_random_normal, interp_images, Gs_kwargs, num_gpus):\n #Gs_kwargs = dict(Gs_kwargs)\n #Gs_kwargs.update(self.Gs_overrides)\n minibatch_size = num_gpus * self.minibatch_per_gpu\n\n # Construct TensorFlow graph.\n distance_expr = []\n for gpu_idx in range(num_gpus):\n with tf.device('/gpu:%d' % gpu_idx):\n #Gs_clone = Gs.clone()\n #noise_vars = [var for name, var in Gs_clone.components.synthesis.vars.items() if name.startswith('noise')]\n\n # Generate random latents and interpolation t-values.\n #lat_t01 = tf.random_normal(shape=[self.minibatch_per_gpu * 2, 128])\n #lerp_t = tf.random_uniform([self.minibatch_per_gpu], 0.0, 1.0 if self.sampling == 'full' else 0.0)\n #print(lat_t01)\n #print(lerp_t)\n #labels = tf.reshape(tf.tile(self._get_random_labels_tf(self.minibatch_per_gpu), [1, 2]), [self.minibatch_per_gpu * 2, -1])\n\n '''\n # Interpolate in W or Z.\n if self.space == 'w':\n print('ERROR')\n sys.exit()\n dlat_t01 = Gs_clone.components.mapping.get_output_for(lat_t01, labels, **Gs_kwargs)\n dlat_t01 = tf.cast(dlat_t01, tf.float32)\n dlat_t0, dlat_t1 = dlat_t01[0::2], dlat_t01[1::2]\n dlat_e0 = tflib.lerp(dlat_t0, dlat_t1, lerp_t[:, np.newaxis, np.newaxis])\n dlat_e1 = tflib.lerp(dlat_t0, dlat_t1, lerp_t[:, np.newaxis, np.newaxis] + self.epsilon)\n dlat_e01 = tf.reshape(tf.stack([dlat_e0, dlat_e1], axis=1), dlat_t01.shape)\n else: # space == 'z'\n lat_t0, lat_t1 = lat_t01[0::2], lat_t01[1::2]\n print(lat_t0)\n print(lat_t1)\n lat_e0 = slerp(lat_t0, lat_t1, lerp_t[:, np.newaxis])\n lat_e1 = slerp(lat_t0, lat_t1, lerp_t[:, np.newaxis] + self.epsilon)\n print(lat_e0)\n print(lat_e1)\n\n lat_e01 = tf.reshape(tf.stack([lat_e0, lat_e1], axis=1), lat_t01.shape)\n print(lat_e01)\n\n #dlat_e01 = Gs_clone.components.mapping.get_output_for(lat_e01, labels, **Gs_kwargs)\n dlat_e01 = Gs(lat_e01)\n sys.exit()\n '''\n\n # Synthesize images.\n #with tf.control_dependencies([var.initializer for var in noise_vars]): # use same noise inputs for the entire minibatch\n #images = Gs_clone.components.synthesis.get_output_for(dlat_e01, randomize_noise=False, **Gs_kwargs)\n images = tf.cast(tf.transpose(interp_images, perm=[0, 3, 1, 2]), tf.float32)\n\n '''\n # Crop only the face region.\n if self.crop:\n c = int(images.shape[2] // 8)\n images = images[:, :, c*3 : c*7, c*2 : c*6]\n\n # Downsample image to 256x256 if it's larger than that. VGG was built for 224x224 images.\n factor = images.shape[2] // 256\n if factor > 1:\n images = tf.reshape(images, [-1, images.shape[1], images.shape[2] // factor, factor, images.shape[3] // factor, factor])\n images = tf.reduce_mean(images, axis=[3,5])\n\n '''\n # Scale dynamic range from [-1,1] to [0,255] for VGG.\n images = (images + 1) * (255 / 2)\n\n # Evaluate perceptual distance.\n img_e0, img_e1 = images[0::2], images[1::2]\n distance_measure = misc.load_pkl('http://d36zk2xti64re0.cloudfront.net/stylegan1/networks/metrics/vgg16_zhang_perceptual.pkl')\n distance_expr.append(distance_measure.get_output_for(img_e0, img_e1) * (1 / self.epsilon**2))\n\n # Sampling loop.\n all_distances = []\n for begin in range(0, self.num_samples, 8):\n print(\"PPL: \" + str(begin), end=\"\\r\")\n self._report_progress(begin, self.num_samples)\n all_distances += sess.run(distance_expr)\n all_distances = np.concatenate(all_distances, axis=0)\n\n # Reject outliers.\n lo = np.percentile(all_distances, 1, interpolation='lower')\n hi = np.percentile(all_distances, 99, interpolation='higher')\n filtered_distances = np.extract(np.logical_and(lo <= all_distances, all_distances <= hi), all_distances)\n self._report_result(np.mean(filtered_distances))\n\n#----------------------------------------------------------------------------\n"
] | [
[
"tensorflow.math.acos",
"numpy.logical_and",
"tensorflow.device",
"tensorflow.math.cos",
"tensorflow.math.sin",
"tensorflow.square",
"numpy.concatenate",
"tensorflow.transpose",
"tensorflow.reduce_sum",
"numpy.percentile",
"numpy.mean"
]
] |
rupei/probability | [
"4aa1ee652853a19c4e80d39216c3fa535ed3e589"
] | [
"tensorflow_probability/python/internal/backend/numpy/numpy_array.py"
] | [
"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Numpy implementations of TensorFlow general top-level functions.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport functools\n# Dependency imports\nimport numpy as np\nimport numpy as onp # pylint: disable=reimported\n\nfrom tensorflow_probability.python.internal.backend.numpy import _utils as utils\nfrom tensorflow_probability.python.internal.backend.numpy import ops\nfrom tensorflow_probability.python.internal.backend.numpy.linalg_impl import einsum\nfrom tensorflow_probability.python.internal.backend.numpy.linalg_impl import norm\nfrom tensorflow_probability.python.internal.backend.numpy.linalg_impl import tensordot\n\n\n__all__ = [\n 'concat',\n 'einsum',\n 'expand_dims',\n 'fill',\n 'gather',\n 'gather_nd',\n 'linspace',\n 'meshgrid',\n 'norm',\n 'one_hot',\n 'ones',\n 'ones_like',\n 'pad',\n 'range',\n 'rank',\n 'reshape',\n 'reverse',\n 'repeat',\n 'roll',\n 'searchsorted',\n 'shape',\n 'size',\n 'slice',\n 'split',\n 'squeeze',\n 'stack',\n 'tensordot',\n 'tile',\n 'transpose',\n 'unstack',\n 'where',\n 'zeros',\n 'zeros_like',\n # 'boolean_mask',\n # 'foldl',\n # 'foldr',\n]\n\n\nJAX_MODE = False\n\n\nif JAX_MODE:\n import jax # pylint: disable=g-import-not-at-top\n\n\ndef _astuple(x):\n try:\n return tuple(x)\n except TypeError:\n return x\n\n\ndef _gather( # pylint: disable=unused-argument\n params,\n indices,\n validate_indices=None,\n axis=None,\n batch_dims=0,\n name=None):\n \"\"\"gather.\"\"\"\n indices = ops.convert_to_tensor(indices, dtype_hint=np.int32)\n if validate_indices is not None:\n raise NotImplementedError(\n 'Argument `validate_indices != None` is currently unimplemented.')\n if batch_dims < 0:\n raise NotImplementedError('Negative `batch_dims` is currently unsupported.')\n if axis is None:\n axis = batch_dims\n if axis < 0:\n axis = axis + len(params.shape)\n # NOTE: For only the numpy backend, this function could create a single result\n # ndarray and use in-place updates. For the Jax backend, this function\n # vmaps `np.take`.\n if JAX_MODE:\n take = lambda params, indices: np.take(params, indices, # pylint: disable=g-long-lambda\n axis=axis - batch_dims)\n take = functools.reduce(\n lambda g, f: f(g), [jax.vmap] * int(batch_dims),\n take\n )\n return take(params, indices)\n params = ops.convert_to_tensor(params)\n res = np.array([\n np.take(params[i], indices[i], axis=axis - batch_dims)\n for i in np.ndindex(*params.shape[:batch_dims])\n ])\n return np.reshape(\n res,\n params.shape[:axis] + indices.shape[batch_dims:] + params.shape[axis+1:])\n\n\ndef _args_to_matching_arrays(args_list, dtype_hint=None):\n \"\"\"Converts a list to array using the first element for dtype.\n\n This method is used to match the behavior of `tf.concat`.\n\n Args:\n args_list: A list or tuple of arguments.\n dtype_hint: An optional hint used when converting the args to tensors.\n Returns:\n A list of tensors.\n \"\"\"\n dtype = None\n for arg in args_list:\n if ops.is_tensor(arg):\n dtype = arg.dtype\n break\n if dtype is None:\n ret = []\n for arg in args_list:\n ret.append(ops.convert_to_tensor(arg, dtype, dtype_hint=dtype_hint))\n if dtype is None:\n dtype = ret[-1].dtype\n else:\n ret = [ops.convert_to_tensor(arg, dtype) for arg in args_list]\n return ret\n\n\ndef _concat(values, axis, name='concat'):\n del name\n if axis is None:\n raise ValueError('None values for `axis` argument not supported.')\n if not isinstance(values, (list, tuple)):\n values = [values]\n if len(values) == 1:\n return values[0]\n values = _args_to_matching_arrays(values)\n return np.concatenate(values, axis=axis)\n\n\ndef _gather_nd_single(params, indices):\n idx = tuple(np.moveaxis(indices, -1, 0))\n return params[idx]\n\n\ndef _gather_nd( # pylint: disable=unused-argument\n params,\n indices,\n batch_dims=0,\n name=None):\n \"\"\"gather_nd.\"\"\"\n indices = ops.convert_to_tensor(indices, dtype_hint=np.int32)\n if batch_dims < 0:\n raise NotImplementedError('Negative `batch_dims` is currently unsupported.')\n if not JAX_MODE and batch_dims > 0:\n raise NotImplementedError(\n '`batch_dims > 0` currently unsupported in NumPy backend.')\n gather_nd_ = _gather_nd_single\n if JAX_MODE:\n gather_nd_ = functools.reduce(\n lambda g, f: f(g), [jax.vmap] * int(batch_dims),\n gather_nd_\n )\n return gather_nd_(params, indices)\n\n\ndef _linspace(start, stop, num, name=None, axis=0): # pylint: disable=unused-argument\n \"\"\"Match TF behavior with np.linspace.\"\"\"\n start = ops.convert_to_tensor(start)\n # Match TF weirdness arising from truediv(int32, int32) = float64\n if np.issubdtype(start.dtype, np.integer):\n start = start.astype(np.float64)\n stop = ops.convert_to_tensor(stop, dtype=start.dtype)\n num = ops.convert_to_tensor(num, dtype_hint=np.int32)\n if not np.issubdtype(num.dtype, np.integer):\n raise TypeError('`num` must be an integer but got {}'.format(num.dtype))\n num = num.astype(np.int32)\n return np.linspace(start, stop, num, axis=axis).astype(start.dtype)\n\n\ndef _one_hot( # pylint: disable=unused-argument\n indices,\n depth,\n on_value=None,\n off_value=None,\n axis=None,\n dtype=None,\n name=None):\n \"\"\"One hot.\"\"\"\n if on_value is None:\n on_value = 1\n if off_value is None:\n off_value = 0\n if dtype is None:\n dtype = utils.common_dtype([on_value, off_value], np.float32)\n indices = np.array(indices)\n depth = np.array(depth)\n pred = abs(np.arange(depth, dtype=indices.dtype) -\n indices[..., np.newaxis]) > 0\n y_out = np.where(pred, np.array(off_value, dtype), np.array(on_value, dtype))\n if axis is not None:\n y_out = np.moveaxis(y_out, -1, axis)\n return y_out\n\n\ndef _ones_like(input, dtype=None, name=None): # pylint: disable=redefined-builtin,unused-argument\n return np.ones_like(input, dtype=utils.numpy_dtype(dtype))\n\n\n# TODO(b/136555907): Add unit-test.\ndef _pad( # pylint: disable=unused-argument\n tensor,\n paddings,\n mode='CONSTANT',\n constant_values=0,\n name=None):\n tensor = ops.convert_to_tensor(tensor)\n constant_values = ops.convert_to_tensor(constant_values)\n return np.pad(\n tensor, paddings,\n mode=mode.lower(),\n constant_values=constant_values)\n\n\ndef _range(start, limit=None, delta=1, dtype=None, name='range'): # pylint: disable=unused-argument\n \"\"\"Emulates tf.range.\"\"\"\n # Emulating dtype inference logic from tf.range\n dtype = utils.numpy_dtype(dtype)\n infer_dtype = lambda t: ops.convert_to_tensor(t, dtype=dtype).dtype\n # We must keep start, limit, and delta static np.array since they determine\n # the size of the result array, which JAX requires to be static.\n start = onp.array(start, dtype=infer_dtype(start))\n limit = None if limit is None else onp.array(limit, dtype=infer_dtype(limit))\n delta = onp.array(delta, dtype=infer_dtype(delta))\n if dtype is None:\n dtype_hierarchy = [np.int32, np.int64, np.float32, np.float64]\n inferred_dtype = max([arg.dtype for arg in [start, limit, delta]\n if arg is not None],\n key=dtype_hierarchy.index)\n else:\n inferred_dtype = dtype\n return np.arange(start, limit, delta).astype(inferred_dtype)\n\n\ndef _reverse(tensor, axis, name=None): # pylint: disable=unused-argument\n if np.array(axis).ndim == 0:\n return np.flip(tensor, axis)\n for ax in axis:\n tensor = np.flip(tensor, ax)\n return tensor\n\n\nif JAX_MODE:\n _searchsorted_vmap_sides = {\n side: jax.vmap(functools.partial(jax.numpy.searchsorted, side=side))\n for side in ('left', 'right')\n }\n\n\ndef _searchsorted( # pylint: disable=unused-argument\n sorted_sequence,\n values,\n side='left',\n out_type=np.int32,\n name=None):\n \"\"\"Find indices for insertion for list to remain sorted.\"\"\"\n if JAX_MODE:\n try:\n func = _searchsorted_vmap_sides[side]\n except KeyError:\n raise ValueError(\"'%s' is an invalid value for keyword 'side'\" % side)\n sorted_sequence_2d = np.reshape(sorted_sequence,\n (-1, sorted_sequence.shape[-1]))\n values_2d = np.reshape(values, (-1, values.shape[-1]))\n if sorted_sequence_2d.shape[0] != values_2d.shape[0]:\n raise ValueError('Leading dim_size of both tensors must match.')\n return np.reshape(func(sorted_sequence_2d, values_2d).astype(out_type),\n values.shape)\n # We don't use np.searchsorted in the numpy backend because it doesn't support\n # batching.\n sorted_sequence = sorted_sequence[..., np.newaxis, :]\n values = values[..., :, np.newaxis]\n if side == 'left':\n is_in_right_location = sorted_sequence < values\n elif side == 'right':\n is_in_right_location = sorted_sequence <= values\n return np.sum(is_in_right_location, axis=-1).astype(out_type)\n\n\ndef _shape(input, out_type=np.int32, name=None): # pylint: disable=redefined-builtin,unused-argument\n return ops.convert_to_tensor(ops.convert_to_tensor(input).shape).astype(\n out_type)\n\n\ndef _size(input, out_type=np.int32, name=None): # pylint: disable=redefined-builtin, unused-argument\n return np.asarray(np.prod(ops.convert_to_tensor(input).shape), dtype=out_type)\n\n\nbuiltin_slice = slice # pylint: disable=invalid-name\n\n\ndef _slice(input_, begin, size, name=None): # pylint: disable=unused-argument,redefined-outer-name\n slices = tuple(\n builtin_slice(b, b + s if s != -1 else None) for b, s in zip(begin, size))\n return input_[slices]\n\n\ndef _split(value, num_or_size_splits, axis=0, num=None, name='split'): # pylint: disable=unused-argument\n \"\"\"Map tf.split -> np.split.\"\"\"\n indices_or_sections = onp.array(num_or_size_splits)\n if indices_or_sections.ndim == 1:\n if any(idx == -1 for idx in indices_or_sections):\n # Numpy parameterizes by split indices and returns nsplits+1 arrays.\n total_splits = sum(idx for idx in indices_or_sections if idx != -1)\n remainder = int(max(0, np.array(value).shape[axis] - total_splits))\n indices_or_sections = [\n idx if idx != -1 else remainder for idx in indices_or_sections\n ]\n indices_or_sections = onp.cumsum(onp.array(indices_or_sections))[:-1]\n return np.split(value, indices_or_sections, axis)\n\n\ndef _transpose(a, perm=None, conjugate=False, name='transpose'): # pylint: disable=unused-argument\n x = np.transpose(a, perm)\n return np.conjugate(x) if conjugate else x\n\n\ndef _zeros_like(input, dtype=None, name=None): # pylint: disable=redefined-builtin,unused-argument\n return np.zeros_like(input, dtype=utils.numpy_dtype(dtype))\n\n\n# --- Begin Public Functions --------------------------------------------------\n\n\nconcat = utils.copy_docstring(\n 'tf.concat',\n _concat)\n\n\nexpand_dims = utils.copy_docstring(\n 'tf.expand_dims',\n lambda input, axis, name=None: np.expand_dims(input, axis))\n\nfill = utils.copy_docstring(\n 'tf.fill',\n lambda dims, value, name=None: np.full(dims, value))\n\ngather = utils.copy_docstring(\n 'tf.gather',\n _gather)\n\ngather_nd = utils.copy_docstring(\n 'tf.gather_nd',\n _gather_nd)\n\nreverse = utils.copy_docstring('tf.reverse', _reverse)\n\nlinspace = utils.copy_docstring(\n 'tf.linspace',\n _linspace)\n\nmeshgrid = utils.copy_docstring(\n 'tf.meshgrid',\n np.meshgrid)\n\nnorm = utils.copy_docstring(\n 'tf.norm',\n norm)\n\none_hot = utils.copy_docstring(\n 'tf.one_hot',\n _one_hot)\n\nones = utils.copy_docstring(\n 'tf.ones',\n lambda shape, dtype=np.float32, name=None: np.ones( # pylint: disable=g-long-lambda\n shape, utils.numpy_dtype(dtype)))\n\nones_like = utils.copy_docstring(\n 'tf.ones_like',\n _ones_like)\n\npad = utils.copy_docstring(\n 'tf.pad',\n _pad)\n\nrange = utils.copy_docstring( # pylint: disable=redefined-builtin\n 'tf.range',\n _range)\n\nrank = utils.copy_docstring(\n 'tf.rank',\n lambda input, name=None: np.int32(np.array(input).ndim)) # pylint: disable=redefined-builtin,g-long-lambda\n\nrepeat = utils.copy_docstring(\n 'tf.repeat',\n lambda input, repeats, axis=None, name=None: np.repeat( # pylint: disable=g-long-lambda\n input, repeats, axis=axis))\n\nreshape = utils.copy_docstring(\n 'tf.reshape',\n lambda tensor, shape, name=None: np.reshape( # pylint: disable=g-long-lambda\n ops.convert_to_tensor(tensor), shape))\n\nroll = utils.copy_docstring(\n 'tf.roll',\n lambda input, shift, axis: np.roll(input, shift, axis)) # pylint: disable=unnecessary-lambda\n\nsearchsorted = utils.copy_docstring(\n 'tf.searchsorted',\n _searchsorted)\n\nshape = utils.copy_docstring(\n 'tf.shape',\n _shape)\n\nsize = utils.copy_docstring(\n 'tf.size',\n _size)\n\nslice = utils.copy_docstring( # pylint: disable=redefined-builtin\n 'tf.slice', _slice)\n\nsplit = utils.copy_docstring('tf.split', _split)\n\nsqueeze = utils.copy_docstring(\n 'tf.squeeze',\n lambda input, axis=None, name=None: np.squeeze(input, _astuple(axis)))\n\nstack = utils.copy_docstring(\n 'tf.stack', lambda values, axis=0, name='stack': np.moveaxis( # pylint: disable=g-long-lambda\n ops.convert_to_tensor(values), 0, axis))\n\ntile = utils.copy_docstring(\n 'tf.tile',\n lambda input, multiples, name=None: np.tile(np.array(input), multiples))\n\ntranspose = utils.copy_docstring(\n 'tf.transpose',\n _transpose)\n\nunstack = utils.copy_docstring(\n 'tf.unstack',\n lambda value, num=None, axis=0, name='unstack': list( # pylint: disable=g-long-lambda\n np.squeeze(x, axis=axis) for x in\n np.split(value, value.shape[axis] if num is None else num, axis)))\n\nwhere = utils.copy_docstring(\n 'tf.where',\n lambda condition, x=None, y=None, name=None: np.where(condition, x, y))\n\nzeros = utils.copy_docstring(\n 'tf.zeros',\n lambda shape, dtype=np.float32, name=None: np.zeros( # pylint: disable=g-long-lambda\n shape, utils.numpy_dtype(dtype)))\n\nzeros_like = utils.copy_docstring(\n 'tf.zeros_like',\n _zeros_like)\n"
] | [
[
"numpy.sum",
"numpy.take",
"numpy.issubdtype",
"numpy.moveaxis",
"numpy.ndindex",
"numpy.transpose",
"numpy.reshape",
"numpy.expand_dims",
"numpy.where",
"numpy.linspace",
"numpy.repeat",
"numpy.arange",
"numpy.roll",
"numpy.squeeze",
"numpy.conjugate",
"numpy.flip",
"numpy.array",
"numpy.concatenate",
"numpy.full",
"numpy.split"
]
] |
foobug/suzieq | [
"c5927616a0e1a1fd9283f2a3eeb120d24ff0f2b5"
] | [
"suzieq/poller/services/evpnVni.py"
] | [
"import re\nimport numpy as np\n\nfrom suzieq.poller.services.service import Service\nfrom suzieq.utils import (convert_rangestring_to_list,\n convert_macaddr_format_to_colon)\n\n\nclass EvpnVniService(Service):\n \"\"\"evpnVni service. Different class because output needs to be munged\"\"\"\n\n def clean_json_input(self, data):\n \"\"\"FRR JSON data needs some work\"\"\"\n\n devtype = data.get(\"devtype\", None)\n if any(x == devtype for x in [\"cumulus\", \"sonic\", \"linux\"]):\n data['data'] = '[' + re.sub(r'}\\n\\n{\\n', r'},\\n\\n{\\n',\n data['data']) + ']'\n return data['data']\n\n def _clean_eos_data(self, processed_data, raw_data):\n new_entries = []\n\n if not processed_data:\n return processed_data\n\n for entry in processed_data:\n vni2vrfmap = {}\n for vrf in entry['_vrf2VniMap']:\n vni2vrfmap[entry['_vrf2VniMap'][vrf]] = vrf\n\n vtepMap = entry.get('_vlan2VtepMap', {})\n replType = entry.get('replicationType')\n if replType == 'headendVcs':\n replType = 'ingressBGP'\n elif entry.get('mcastGroup', '') != \"0.0.0.0\":\n replType = \"multicast\"\n else:\n replType = ''\n for vlan in entry['_vlan2VniMap']:\n new_entry = {}\n vni = entry['_vlan2VniMap'][vlan].get('vni', 0)\n new_entry['vni'] = vni\n new_entry['vrf'] = vni2vrfmap.get(vni, '')\n new_entry['state'] = entry['state']\n new_entry['ifname'] = entry['ifname']\n new_entry['vlan'] = vlan\n new_entry['priVtepIp'] = entry['priVtepIp']\n vteplist = vtepMap.get(vlan, {})\n vteplist = (vteplist.get('remoteVtepAddr', []) +\n vteplist.get('remoteVtepAddr6', []))\n new_entry['remoteVtepList'] = vteplist\n new_entry['replicationType'] = replType\n new_entry['mcastGroup'] = entry['mcastGroup']\n if new_entry['vrf']:\n new_entry['type'] = 'L3'\n else:\n new_entry['type'] = 'L2'\n new_entry['ifname'] = entry.get('ifname', '')\n\n new_entries.append(new_entry)\n\n processed_data = new_entries\n return processed_data\n\n def _clean_cumulus_data(self, processed_data, raw_data):\n \"\"\"Clean out null entries among other cleanup\"\"\"\n\n del_indices = []\n for i, entry in enumerate(processed_data):\n if entry['vni'] is None:\n del_indices.append(i)\n if entry['mcastGroup'] and entry['mcastGroup'] != \"0.0.0.0\":\n entry['replicationType'] = 'multicast'\n elif entry['type'] != 'L3':\n entry['replicationType'] = 'ingressBGP'\n entry['mcastGroup'] = \"0.0.0.0\"\n else:\n entry['replicationType'] = ''\n entry['mcastGroup'] = \"0.0.0.0\"\n entry['remoteVtepList'] = None\n\n entry['state'] = entry.get('state', 'up').lower()\n entry['l2VniList'] = set(entry['l2VniList'])\n processed_data = np.delete(processed_data, del_indices).tolist()\n\n return processed_data\n\n def _clean_nxos_data(self, processed_data, raw_data):\n \"\"\"Merge peer records with VNI records to yield VNI-based records\"\"\"\n\n vni_dict = {}\n drop_indices = []\n\n for i, entry in enumerate(processed_data):\n if not entry['vni']:\n drop_indices.append(i)\n continue\n\n if entry['_entryType'] == 'VNI':\n type, vrf = entry['type'].split()\n if type == 'L3':\n entry['vrf'] = vrf[1:-1] # strip off '[' and ']'\n entry['type'] = type\n if 'sviState' in entry:\n entry['state'] = entry['sviState'].split()[0].lower()\n if re.search(r'[0-9.]+', entry.get('replicationType', '')):\n entry['mcastGroup'] = entry['replicationType']\n entry['replicationType'] = 'multicast'\n elif entry['type'] != 'L3':\n entry['replicationType'] = 'ingressBGP'\n entry['mcastGroup'] = \"0.0.0.0\"\n else:\n entry['replicationType'] = ''\n entry['mcastGroup'] = \"0.0.0.0\"\n\n # we'll fill this with the peers entries\n entry['remoteVtepList'] = []\n entry['state'] = entry['state'].lower()\n entry['vlan'] = int(entry['vlan'])\n vni_dict[entry['vni']] = entry\n\n elif entry['_entryType'] == 'peers':\n vni_list = convert_rangestring_to_list(\n entry.get('_vniList', ''))\n for vni in vni_list:\n vni_entry = vni_dict.get(vni, None)\n if vni_entry:\n vni_entry['remoteVtepList'].append(entry['vni'])\n drop_indices.append(i)\n\n elif entry['_entryType'] == 'iface':\n if entry.get('encapType', '') != \"VXLAN\":\n continue\n\n for vni in vni_dict:\n if vni_dict[vni]['ifname'] != entry['ifname']:\n continue\n vni_dict[vni]['priVtepIp'] = entry.get('priVtepIp', '')\n secIP = entry.get('secVtepIp', '')\n if secIP == '0.0.0.0':\n secIP = ''\n vni_dict[vni]['secVtepIp'] = secIP\n vni_dict[vni]['routerMac'] = convert_macaddr_format_to_colon(\n entry.get('routerMac', '00:00:00:00:00:00'))\n\n drop_indices.append(i)\n\n processed_data = np.delete(processed_data, drop_indices).tolist()\n\n return processed_data\n\n def _clean_junos_data(self, processed_data, raw_data):\n\n newntries = {}\n\n for entry in processed_data:\n if entry['_entryType'] == 'instance':\n if entry['_vniList'] is None:\n continue\n for i, vni in enumerate(entry['_vniList']):\n irb_iflist = entry.get('_irbIfList', [])\n vrflist = entry.get('_vrfList', [])\n vlan = entry['_vlanList'][i]\n irbif = f'irb.{vlan}'\n try:\n index = irb_iflist.index(irbif)\n vrf = vrflist[index]\n except ValueError:\n vrf = ''\n except IndexError:\n vrf = ''\n\n if vni not in newntries:\n vni_entry = {\n 'vni': int(vni),\n 'remoteVtepList': [],\n 'type': 'L2',\n 'state': 'up',\n 'vlan': int(vlan),\n 'numRemoteVteps': 0,\n 'numMacs': 0,\n 'numArpNd': 0,\n 'vrf': vrf,\n 'os': 'junos'\n }\n newntries[vni] = vni_entry\n continue\n elif entry['_entryType'] == 'l3':\n vni = int(entry.get('vni', '0'))\n priVtepIp = entry.get('priVtepIp', '')\n\n if not priVtepIp and not vni:\n continue\n\n vni_entry = {\n 'vni': vni,\n 'remoteVtepList': [],\n 'priVtepIp': priVtepIp,\n 'type': 'L3',\n 'state': 'up',\n 'numRemoteVteps': 0,\n 'routerMac': entry['routerMac'],\n 'numMacs': 0,\n 'numArpNd': 0,\n 'mcastGroup': '0.0.0.0',\n 'vrf': entry['vrf'],\n 'os': 'junos'\n }\n # Add the primary VTEP IP into the L2 entries as well\n for l2vni in newntries:\n newntries[l2vni]['priVtepIp'] = priVtepIp\n\n newntries[vni] = vni_entry\n continue\n elif entry['_entryType'] == 'remote':\n priVtepIp = entry.get('priVtepIp', '[{\"data\": \"\"}]')[0]['data']\n for i, vni in enumerate(entry.get('_vniList', [])):\n vni_entry = newntries.get(vni, {})\n if not vni_entry:\n vni_entry = {\n 'vni': int(vni),\n 'remoteVtepList': [],\n 'priVtepIp': priVtepIp,\n 'type': 'L2',\n 'state': 'up',\n 'numRemoteVteps': len(entry['_floodVtepList']),\n 'numMacs': 0,\n 'numArpNd': 0,\n 'os': 'junos'\n }\n newntries[vni] = vni_entry\n\n vni_entry['priVtepIp'] = priVtepIp\n if entry['replicationType'][i] == '0.0.0.0':\n vni_entry['replicationType'] = 'ingressBGP'\n vni_entry['mcastGroup'] = \"0.0.0.0\"\n else:\n vni_entry['replicationType'] = 'multicast'\n vni_entry['mcastGroup'] = entry['replicationType'][i]\n\n vni_entry['remoteVtepList'].append(\n entry.get('_floodVtepList', ''))\n\n processed_data = list(newntries.values())\n return processed_data\n"
] | [
[
"numpy.delete"
]
] |
PriyankaH21/astropy | [
"159fb9637ce4acdc60329d20517ed3dc7ba79581"
] | [
"astropy/nddata/tests/test_utils.py"
] | [
"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\nimport pytest\nimport numpy as np\nfrom numpy.testing import assert_allclose\n\nfrom ...tests.helper import assert_quantity_allclose\nfrom ..utils import (extract_array, add_array, subpixel_indices,\n block_reduce, block_replicate,\n overlap_slices, NoOverlapError, PartialOverlapError,\n Cutout2D)\nfrom ...wcs import WCS, Sip\nfrom ...wcs.utils import proj_plane_pixel_area\nfrom ...coordinates import SkyCoord\nfrom ... import units as u\n\ntry:\n import skimage # pylint: disable=W0611\n HAS_SKIMAGE = True\nexcept ImportError:\n HAS_SKIMAGE = False\n\n\ntest_positions = [(10.52, 3.12), (5.62, 12.97), (31.33, 31.77),\n (0.46, 0.94), (20.45, 12.12), (42.24, 24.42)]\n\ntest_position_indices = [(0, 3), (0, 2), (4, 1),\n (4, 2), (4, 3), (3, 4)]\n\ntest_slices = [slice(10.52, 3.12), slice(5.62, 12.97),\n slice(31.33, 31.77), slice(0.46, 0.94),\n slice(20.45, 12.12), slice(42.24, 24.42)]\n\nsubsampling = 5\n\ntest_pos_bad = [(-1, -4), (-1, 0), (6, 2), (6, 6)]\n\n\ndef test_slices_different_dim():\n '''Overlap from arrays with different number of dim is undefined.'''\n with pytest.raises(ValueError) as e:\n overlap_slices((4, 5, 6), (1, 2), (0, 0))\n assert \"the same number of dimensions\" in str(e.value)\n\n\ndef test_slices_pos_different_dim():\n '''Position must have same dim as arrays.'''\n with pytest.raises(ValueError) as e:\n overlap_slices((4, 5), (1, 2), (0, 0, 3))\n assert \"the same number of dimensions\" in str(e.value)\n\n\[email protected]('pos', test_pos_bad)\ndef test_slices_no_overlap(pos):\n '''If there is no overlap between arrays, an error should be raised.'''\n with pytest.raises(NoOverlapError):\n overlap_slices((5, 5), (2, 2), pos)\n\n\ndef test_slices_partial_overlap():\n '''Compute a slice for partially overlapping arrays.'''\n temp = overlap_slices((5,), (3,), (0,))\n assert temp == ((slice(0, 2, None),), (slice(1, 3, None),))\n\n temp = overlap_slices((5,), (3,), (0,), mode='partial')\n assert temp == ((slice(0, 2, None),), (slice(1, 3, None),))\n\n for pos in [0, 4]:\n with pytest.raises(PartialOverlapError) as e:\n temp = overlap_slices((5,), (3,), (pos,), mode='strict')\n assert 'Arrays overlap only partially.' in str(e.value)\n\n\ndef test_slices_overlap_wrong_mode():\n '''Call overlap_slices with non-existing mode.'''\n with pytest.raises(ValueError) as e:\n overlap_slices((5,), (3,), (0,), mode='full')\n assert \"Mode can be only\" in str(e.value)\n\n\ndef test_extract_array_wrong_mode():\n '''Call extract_array with non-existing mode.'''\n with pytest.raises(ValueError) as e:\n extract_array(np.arange(4), (2, ), (0, ), mode='full')\n assert \"Valid modes are 'partial', 'trim', and 'strict'.\" == str(e.value)\n\n\ndef test_extract_array_1d_even():\n '''Extract 1 d arrays.\n\n All dimensions are treated the same, so we can test in 1 dim.\n '''\n assert np.all(extract_array(np.arange(4), (2, ), (0, ), fill_value=-99) == np.array([-99, 0]))\n for i in [1, 2, 3]:\n assert np.all(extract_array(np.arange(4), (2, ), (i, )) == np.array([i - 1, i]))\n assert np.all(extract_array(np.arange(4.), (2, ), (4, ), fill_value=np.inf) == np.array([3, np.inf]))\n\n\ndef test_extract_array_1d_odd():\n '''Extract 1 d arrays.\n\n All dimensions are treated the same, so we can test in 1 dim.\n The first few lines test the most error-prone part: Extraction of an\n array on the boundaries.\n Additional tests (e.g. dtype of return array) are done for the last\n case only.\n '''\n assert np.all(extract_array(np.arange(4), (3,), (-1, ), fill_value=-99) == np.array([-99, -99, 0]))\n assert np.all(extract_array(np.arange(4), (3,), (0, ), fill_value=-99) == np.array([-99, 0, 1]))\n for i in [1, 2]:\n assert np.all(extract_array(np.arange(4), (3,), (i, )) == np.array([i-1, i, i+1]))\n assert np.all(extract_array(np.arange(4), (3,), (3, ), fill_value=-99) == np.array([2, 3, -99]))\n arrayin = np.arange(4.)\n extracted = extract_array(arrayin, (3,), (4, ))\n assert extracted[0] == 3\n assert np.isnan(extracted[1]) # since I cannot use `==` to test for nan\n assert extracted.dtype == arrayin.dtype\n\n\ndef test_extract_array_1d():\n \"\"\"In 1d, shape can be int instead of tuple\"\"\"\n assert np.all(extract_array(np.arange(4), 3, (-1, ), fill_value=-99) == np.array([-99, -99, 0]))\n assert np.all(extract_array(np.arange(4), 3, -1, fill_value=-99) == np.array([-99, -99, 0]))\n\n\ndef test_extract_Array_float():\n \"\"\"integer is at bin center\"\"\"\n for a in np.arange(2.51, 3.49, 0.1):\n assert np.all(extract_array(np.arange(5), 3, a) == np.array([2, 3, 4]))\n\n\ndef test_extract_array_1d_trim():\n '''Extract 1 d arrays.\n\n All dimensions are treated the same, so we can test in 1 dim.\n '''\n assert np.all(extract_array(np.arange(4), (2, ), (0, ), mode='trim') == np.array([0]))\n for i in [1, 2, 3]:\n assert np.all(extract_array(np.arange(4), (2, ), (i, ), mode='trim') == np.array([i - 1, i]))\n assert np.all(extract_array(np.arange(4.), (2, ), (4, ), mode='trim') == np.array([3]))\n\n\[email protected]('mode', ['partial', 'trim', 'strict'])\ndef test_extract_array_easy(mode):\n \"\"\"\n Test extract_array utility function.\n\n Test by extracting an array of ones out of an array of zeros.\n \"\"\"\n large_test_array = np.zeros((11, 11))\n small_test_array = np.ones((5, 5))\n large_test_array[3:8, 3:8] = small_test_array\n extracted_array = extract_array(large_test_array, (5, 5), (5, 5), mode=mode)\n assert np.all(extracted_array == small_test_array)\n\n\ndef test_extract_array_return_pos():\n '''Check that the return position is calculated correctly.\n\n The result will differ by mode. All test here are done in 1d because it's\n easier to construct correct test cases.\n '''\n large_test_array = np.arange(5)\n for i in np.arange(-1, 6):\n extracted, new_pos = extract_array(large_test_array, 3, i,\n mode='partial', return_position=True)\n assert new_pos == (1, )\n # Now check an array with an even number\n for i, expected in zip([1.49, 1.51, 3], [1.49, 0.51, 1]):\n extracted, new_pos = extract_array(large_test_array, (2,), (i,),\n mode='strict', return_position=True)\n assert new_pos == (expected, )\n # For mode='trim' the answer actually depends\n for i, expected in zip(np.arange(-1, 6), (-1, 0, 1, 1, 1, 1, 1)):\n extracted, new_pos = extract_array(large_test_array, (3,), (i,),\n mode='trim', return_position=True)\n assert new_pos == (expected, )\n\n\ndef test_add_array_odd_shape():\n \"\"\"\n Test add_array utility function.\n\n Test by adding an array of ones out of an array of zeros.\n \"\"\"\n large_test_array = np.zeros((11, 11))\n small_test_array = np.ones((5, 5))\n large_test_array_ref = large_test_array.copy()\n large_test_array_ref[3:8, 3:8] += small_test_array\n\n added_array = add_array(large_test_array, small_test_array, (5, 5))\n assert np.all(added_array == large_test_array_ref)\n\n\ndef test_add_array_even_shape():\n \"\"\"\n Test add_array_2D utility function.\n\n Test by adding an array of ones out of an array of zeros.\n \"\"\"\n large_test_array = np.zeros((11, 11))\n small_test_array = np.ones((4, 4))\n large_test_array_ref = large_test_array.copy()\n large_test_array_ref[0:2, 0:2] += small_test_array[2:4, 2:4]\n\n added_array = add_array(large_test_array, small_test_array, (0, 0))\n assert np.all(added_array == large_test_array_ref)\n\n\[email protected](('position', 'subpixel_index'),\n zip(test_positions, test_position_indices))\ndef test_subpixel_indices(position, subpixel_index):\n \"\"\"\n Test subpixel_indices utility function.\n\n Test by asserting that the function returns correct results for\n given test values.\n \"\"\"\n assert np.all(subpixel_indices(position, subsampling) == subpixel_index)\n\n\[email protected]('not HAS_SKIMAGE')\nclass TestBlockReduce:\n def test_1d(self):\n \"\"\"Test 1D array.\"\"\"\n data = np.arange(4)\n expected = np.array([1, 5])\n result = block_reduce(data, 2)\n assert np.all(result == expected)\n\n def test_1d_mean(self):\n \"\"\"Test 1D array with func=np.mean.\"\"\"\n data = np.arange(4)\n block_size = 2.\n expected = block_reduce(data, block_size, func=np.sum) / block_size\n result_mean = block_reduce(data, block_size, func=np.mean)\n assert np.all(result_mean == expected)\n\n def test_2d(self):\n \"\"\"Test 2D array.\"\"\"\n data = np.arange(4).reshape(2, 2)\n expected = np.array([[6]])\n result = block_reduce(data, 2)\n assert np.all(result == expected)\n\n def test_2d_mean(self):\n \"\"\"Test 2D array with func=np.mean.\"\"\"\n data = np.arange(4).reshape(2, 2)\n block_size = 2.\n expected = (block_reduce(data, block_size, func=np.sum) /\n block_size**2)\n result = block_reduce(data, block_size, func=np.mean)\n assert np.all(result == expected)\n\n def test_2d_trim(self):\n \"\"\"\n Test trimming of 2D array when size is not perfectly divisible\n by block_size.\n \"\"\"\n\n data1 = np.arange(15).reshape(5, 3)\n result1 = block_reduce(data1, 2)\n data2 = data1[0:4, 0:2]\n result2 = block_reduce(data2, 2)\n assert np.all(result1 == result2)\n\n def test_block_size_broadcasting(self):\n \"\"\"Test scalar block_size broadcasting.\"\"\"\n data = np.arange(16).reshape(4, 4)\n result1 = block_reduce(data, 2)\n result2 = block_reduce(data, (2, 2))\n assert np.all(result1 == result2)\n\n def test_block_size_len(self):\n \"\"\"Test block_size length.\"\"\"\n data = np.ones((2, 2))\n with pytest.raises(ValueError):\n block_reduce(data, (2, 2, 2))\n\n\[email protected]('not HAS_SKIMAGE')\nclass TestBlockReplicate:\n def test_1d(self):\n \"\"\"Test 1D array.\"\"\"\n data = np.arange(2)\n expected = np.array([0, 0, 0.5, 0.5])\n result = block_replicate(data, 2)\n assert np.all(result == expected)\n\n def test_1d_conserve_sum(self):\n \"\"\"Test 1D array with conserve_sum=False.\"\"\"\n data = np.arange(2)\n block_size = 2.\n expected = block_replicate(data, block_size) * block_size\n result = block_replicate(data, block_size, conserve_sum=False)\n assert np.all(result == expected)\n\n def test_2d(self):\n \"\"\"Test 2D array.\"\"\"\n data = np.arange(2).reshape(2, 1)\n expected = np.array([[0, 0], [0, 0], [0.25, 0.25], [0.25, 0.25]])\n result = block_replicate(data, 2)\n assert np.all(result == expected)\n\n def test_2d_conserve_sum(self):\n \"\"\"Test 2D array with conserve_sum=False.\"\"\"\n data = np.arange(6).reshape(2, 3)\n block_size = 2.\n expected = block_replicate(data, block_size) * block_size**2\n result = block_replicate(data, block_size, conserve_sum=False)\n assert np.all(result == expected)\n\n def test_block_size_broadcasting(self):\n \"\"\"Test scalar block_size broadcasting.\"\"\"\n data = np.arange(4).reshape(2, 2)\n result1 = block_replicate(data, 2)\n result2 = block_replicate(data, (2, 2))\n assert np.all(result1 == result2)\n\n def test_block_size_len(self):\n \"\"\"Test block_size length.\"\"\"\n data = np.arange(5)\n with pytest.raises(ValueError):\n block_replicate(data, (2, 2))\n\n\nclass TestCutout2D:\n def setup_class(self):\n self.data = np.arange(20.).reshape(5, 4)\n self.position = SkyCoord('13h11m29.96s -01d19m18.7s', frame='icrs')\n wcs = WCS(naxis=2)\n rho = np.pi / 3.\n scale = 0.05 / 3600.\n wcs.wcs.cd = [[scale*np.cos(rho), -scale*np.sin(rho)],\n [scale*np.sin(rho), scale*np.cos(rho)]]\n wcs.wcs.ctype = ['RA---TAN', 'DEC--TAN']\n wcs.wcs.crval = [self.position.ra.to_value(u.deg),\n self.position.dec.to_value(u.deg)]\n wcs.wcs.crpix = [3, 3]\n self.wcs = wcs\n\n # add SIP\n sipwcs = wcs.deepcopy()\n sipwcs.wcs.ctype = ['RA---TAN-SIP', 'DEC--TAN-SIP']\n a = np.array(\n [[0, 0, 5.33092692e-08, 3.73753773e-11, -2.02111473e-13],\n [0, 2.44084308e-05, 2.81394789e-11, 5.17856895e-13, 0.0],\n [-2.41334657e-07, 1.29289255e-10, 2.35753629e-14, 0.0, 0.0],\n [-2.37162007e-10, 5.43714947e-13, 0.0, 0.0, 0.0],\n [ -2.81029767e-13, 0.0, 0.0, 0.0, 0.0]]\n )\n b = np.array(\n [[0, 0, 2.99270374e-05, -2.38136074e-10, 7.23205168e-13],\n [0, -1.71073858e-07, 6.31243431e-11, -5.16744347e-14, 0.0],\n [6.95458963e-06, -3.08278961e-10, -1.75800917e-13, 0.0, 0.0],\n [3.51974159e-11, 5.60993016e-14, 0.0, 0.0, 0.0],\n [-5.92438525e-13, 0.0, 0.0, 0.0, 0.0]]\n )\n sipwcs.sip = Sip(a, b, None, None, wcs.wcs.crpix)\n sipwcs.wcs.set()\n self.sipwcs = sipwcs\n\n def test_cutout(self):\n sizes = [3, 3*u.pixel, (3, 3), (3*u.pixel, 3*u.pix), (3., 3*u.pixel),\n (2.9, 3.3)]\n for size in sizes:\n position = (2.1, 1.9)\n c = Cutout2D(self.data, position, size)\n assert c.data.shape == (3, 3)\n assert c.data[1, 1] == 10\n assert c.origin_original == (1, 1)\n assert c.origin_cutout == (0, 0)\n assert c.input_position_original == position\n assert_allclose(c.input_position_cutout, (1.1, 0.9))\n assert c.position_original == (2., 2.)\n assert c.position_cutout == (1., 1.)\n assert c.center_original == (2., 2.)\n assert c.center_cutout == (1., 1.)\n assert c.bbox_original == ((1, 3), (1, 3))\n assert c.bbox_cutout == ((0, 2), (0, 2))\n assert c.slices_original == (slice(1, 4), slice(1, 4))\n assert c.slices_cutout == (slice(0, 3), slice(0, 3))\n\n def test_size_length(self):\n with pytest.raises(ValueError):\n Cutout2D(self.data, (2, 2), (1, 1, 1))\n\n def test_size_units(self):\n for size in [3 * u.cm, (3, 3 * u.K)]:\n with pytest.raises(ValueError):\n Cutout2D(self.data, (2, 2), size)\n\n def test_size_pixel(self):\n \"\"\"\n Check size in derived pixel units.\n \"\"\"\n size = 0.3*u.arcsec / (0.1*u.arcsec/u.pixel)\n c = Cutout2D(self.data, (2, 2), size)\n assert c.data.shape == (3, 3)\n assert c.data[0, 0] == 5\n assert c.slices_original == (slice(1, 4), slice(1, 4))\n assert c.slices_cutout == (slice(0, 3), slice(0, 3))\n\n def test_size_angle(self):\n c = Cutout2D(self.data, (2, 2), (0.1*u.arcsec), wcs=self.wcs)\n assert c.data.shape == (2, 2)\n assert c.data[0, 0] == 5\n assert c.slices_original == (slice(1, 3), slice(1, 3))\n assert c.slices_cutout == (slice(0, 2), slice(0, 2))\n\n def test_size_angle_without_wcs(self):\n with pytest.raises(ValueError):\n Cutout2D(self.data, (2, 2), (3, 3 * u.arcsec))\n\n def test_cutout_trim_overlap(self):\n c = Cutout2D(self.data, (0, 0), (3, 3), mode='trim')\n assert c.data.shape == (2, 2)\n assert c.data[0, 0] == 0\n assert c.slices_original == (slice(0, 2), slice(0, 2))\n assert c.slices_cutout == (slice(0, 2), slice(0, 2))\n\n def test_cutout_partial_overlap(self):\n c = Cutout2D(self.data, (0, 0), (3, 3), mode='partial')\n assert c.data.shape == (3, 3)\n assert c.data[1, 1] == 0\n assert c.slices_original == (slice(0, 2), slice(0, 2))\n assert c.slices_cutout == (slice(1, 3), slice(1, 3))\n\n def test_cutout_partial_overlap_fill_value(self):\n fill_value = -99\n c = Cutout2D(self.data, (0, 0), (3, 3), mode='partial',\n fill_value=fill_value)\n assert c.data.shape == (3, 3)\n assert c.data[1, 1] == 0\n assert c.data[0, 0] == fill_value\n\n def test_copy(self):\n data = np.copy(self.data)\n c = Cutout2D(data, (2, 3), (3, 3))\n xy = (0, 0)\n value = 100.\n c.data[xy] = value\n xy_orig = c.to_original_position(xy)\n yx = xy_orig[::-1]\n assert data[yx] == value\n\n data = np.copy(self.data)\n c2 = Cutout2D(self.data, (2, 3), (3, 3), copy=True)\n c2.data[xy] = value\n assert data[yx] != value\n\n def test_to_from_large(self):\n position = (2, 2)\n c = Cutout2D(self.data, position, (3, 3))\n xy = (0, 0)\n result = c.to_cutout_position(c.to_original_position(xy))\n assert_allclose(result, xy)\n\n def test_skycoord_without_wcs(self):\n with pytest.raises(ValueError):\n Cutout2D(self.data, self.position, (3, 3))\n\n def test_skycoord(self):\n c = Cutout2D(self.data, self.position, (3, 3), wcs=self.wcs)\n skycoord_original = self.position.from_pixel(c.center_original[1],\n c.center_original[0],\n self.wcs)\n skycoord_cutout = self.position.from_pixel(c.center_cutout[1],\n c.center_cutout[0], c.wcs)\n assert_quantity_allclose(skycoord_original.ra, skycoord_cutout.ra)\n assert_quantity_allclose(skycoord_original.dec, skycoord_cutout.dec)\n\n def test_skycoord_partial(self):\n c = Cutout2D(self.data, self.position, (3, 3), wcs=self.wcs,\n mode='partial')\n skycoord_original = self.position.from_pixel(c.center_original[1],\n c.center_original[0],\n self.wcs)\n skycoord_cutout = self.position.from_pixel(c.center_cutout[1],\n c.center_cutout[0], c.wcs)\n assert_quantity_allclose(skycoord_original.ra, skycoord_cutout.ra)\n assert_quantity_allclose(skycoord_original.dec, skycoord_cutout.dec)\n\n def test_naxis_update(self):\n xsize = 2\n ysize = 3\n c = Cutout2D(self.data, self.position, (ysize, xsize), wcs=self.wcs)\n assert c.wcs._naxis[0] == xsize\n assert c.wcs._naxis[1] == ysize\n\n def test_crpix_maps_to_crval(self):\n w = Cutout2D(self.data, (0, 0), (3, 3), wcs=self.sipwcs,\n mode='partial').wcs\n pscale = np.sqrt(proj_plane_pixel_area(w))\n assert_allclose(\n w.wcs_pix2world(*w.wcs.crpix, 1), w.wcs.crval,\n rtol=0.0, atol=1e-6 * pscale\n )\n assert_allclose(\n w.all_pix2world(*w.wcs.crpix, 1), w.wcs.crval,\n rtol=0.0, atol=1e-6 * pscale\n )\n"
] | [
[
"numpy.ones",
"numpy.zeros",
"numpy.cos",
"numpy.copy",
"numpy.arange",
"numpy.all",
"numpy.isnan",
"numpy.array",
"numpy.sin",
"numpy.testing.assert_allclose"
]
] |
mateussangalli/SE2DINNets | [
"c4d9b6d2577a5044c243d0eb80ebe5879a7673c9"
] | [
"train_SE2DINNet.py"
] | [
"import tensorflow as tf\nimport numpy as np\nfrom tensorflow.keras import layers, regularizers\nfrom tensorflow.keras.utils import to_categorical\nimport matplotlib.pyplot as plt\nimport os\nimport argparse\n\nfrom SE2DIN import *\n\n\nfrom load_data import load_data\n\n\nmnist_rot_dir = 'mnist_rot'\nmnist_12k_dir = 'mnist12k'\nmodel_dir = 'models'\n\n\n\n\nparser = argparse.ArgumentParser(description='trains a SE2DINNet with the specified parameters(some parts of the architecture are fixed)')\nparser.add_argument('-o', '--order', type=int, default=2, help='order of the differential invariants')\nparser.add_argument('-d', '--dropout', type=int, default=30, help='dropout rate in percentage between 1 x 1 convolutions')\nparser.add_argument('-w', '--weight_decay', type=float, default=1e-4, help='weight decay')\nparser.add_argument('-f', '--data_dir', type=str, default='./', help='directory containing both MNIST-Rot and MNIST12K dataset in separate folders')\nparser.add_argument('--train_on_mnist12k', action='store_true', help='whether to train on MNIST12K or MNIST-Rot(False)')\nparser.add_argument('--lr', type=float, default=1e-2, help='initial learning rate')\nparser.add_argument('--batch_size', type=int, default=256, help='batch size')\nparser.add_argument('--epochs', type=int, default=2000, help='maximum number of epochs')\nparser.add_argument('--n_filters', type=int, default=20, help='number of filters in the middle layers')\n\nargs = parser.parse_args()\n\nweight_decay = args.weight_decay\ndropout = args.dropout / 100\nn_filters = args.n_filters\nlr = args.lr\nbatch_size = args.batch_size\nepochs = args.epochs\norder = args.order\ndata_dir = args.data_dir\n\nif args.train_on_mnist12k:\n _, _, (x_test, y_test) = load_data(os.path.join(data_dir, mnist_rot_dir))\n (x_train, y_train), (x_val, y_val), _ = load_data(os.path.join(data_dir, mnist_12k_dir))\nelse:\n (x_train, y_train), (x_val, y_val), (x_test, y_test) = load_data(os.path.join(data_dir, mnist_rot_dir))\n\ndef se2din_block(n_in, n_out, sigma, width, order=2, dropout=0, weight_decay=0):\n block = tf.keras.models.Sequential()\n block.add(layers.Input((None, None, n_in)))\n block.add(SE2DIN(sigma, width, order=order))\n block.add(layers.BatchNormalization(beta_regularizer=regularizers.l2(weight_decay),\n gamma_regularizer=regularizers.l2(weight_decay)))\n\n block.add(layers.Conv2D(n_out,1,\n kernel_regularizer=regularizers.l2(weight_decay),\n bias_regularizer=regularizers.l2(weight_decay)))\n block.add(layers.BatchNormalization(beta_regularizer=regularizers.l2(weight_decay),\n gamma_regularizer=regularizers.l2(weight_decay)))\n block.add(layers.ReLU())\n if dropout > 0:\n block.add(layers.Dropout(dropout))\n\n block.add(layers.Conv2D(n_out,1,\n kernel_regularizer=regularizers.l2(weight_decay),\n bias_regularizer=regularizers.l2(weight_decay)))\n block.add(layers.BatchNormalization(beta_regularizer=regularizers.l2(weight_decay),\n gamma_regularizer=regularizers.l2(weight_decay)))\n \n #block.add(layers.ReLU())\n return block\n\ndef get_model(n_filters, weight_decay, dropout, lr, order=2):\n input_layer = layers.Input((None,None,1))\n \n \n x = se2din_block(1,n_filters,1.,4,2,dropout,weight_decay)(input_layer)\n features0 = tf.keras.models.Model(input_layer, x)\n x += se2din_block(n_filters,n_filters,1.,4,order,dropout,weight_decay)(x)\n x += se2din_block(n_filters,n_filters,2.,8,order,dropout,weight_decay)(x)\n x += se2din_block(n_filters,n_filters,2.,8,order,dropout,weight_decay)(x)\n x += se2din_block(n_filters,n_filters,2.,8,order,dropout,weight_decay)(x)\n \n x = se2din_block(n_filters,10,2.,8,2,0,weight_decay)(x)\n \n features1 = tf.keras.models.Model(input_layer, x)\n \n x = layers.GlobalMaxPooling2D()(x)\n \n \n model = tf.keras.models.Model(input_layer, x)\n model.summary()\n model.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), optimizer=tf.keras.optimizers.Adam(lr), metrics=['accuracy'])\n return model\n\n \n\n\nmodel = get_model(n_filters, weight_decay, dropout, lr, order)\n\n# reduces learning rate when validation loss stagnates\ncb_lr = tf.keras.callbacks.ReduceLROnPlateau(\n monitor='val_loss', factor=0.1, patience=100, verbose=0,\n mode='auto', min_delta=0.0001, min_lr=1e-5\n )\n\n# stops training if validation loss remains unchanged for too long\ncb_es = tf.keras.callbacks.EarlyStopping(\n monitor='val_loss', min_delta=0, patience=300, verbose=0,\n mode='auto', restore_best_weights=True\n )\n \nmodel.fit(x_train, y_train, validation_data=(x_val, y_val), epochs=epochs, batch_size=batch_size, callbacks=[cb_lr, cb_es], verbose=2)\nmodel.evaluate(x_test, y_test)\n\n\nmodel.save(os.path.join(model_dir, f'SE2DINNetOrd{order}'))\n"
] | [
[
"tensorflow.keras.models.Sequential",
"tensorflow.keras.optimizers.Adam",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.losses.SparseCategoricalCrossentropy",
"tensorflow.keras.layers.ReLU",
"tensorflow.keras.regularizers.l2",
"tensorflow.keras.layers.GlobalMaxPooling2D",
"tensorflow.keras.models.Model",
"tensorflow.keras.callbacks.EarlyStopping",
"tensorflow.keras.callbacks.ReduceLROnPlateau",
"tensorflow.keras.layers.Input"
]
] |
Ayon134/code_for_Kids | [
"d90698bb38efe5e26c31f02bd129bfdadea158e2"
] | [
"lst.py"
] | [
"import cv2\nimport io\nfrom PIL import Image, ImageEnhance\nimport pytesseract\nfrom wand.image import Image as wi\nimport re\nimport pandas as pd\nfrom PyPDF2 import PdfFileWriter, PdfFileReader\nfrom pdf2image import convert_from_path\n\n\nclaim = '15232353'\nfile = \"a.pdf\"\npages_to_keep = [0]\ninfile = PdfFileReader(file, 'rb')\noutput = PdfFileWriter()\n\nfor i in pages_to_keep:\n p = infile.getPage(i)\n output.addPage(p)\n\n\nwith open('A1.pdf', 'wb') as f:\n output.write(f)\n\n\npages = convert_from_path('A1.pdf', 500)\n\nfor page in pages:\n page.save('A1.jpg', 'JPEG')\n\n#img = Image.open('B1.jpg').convert('LA')\n#img.save('B1.png')\n\n\n\n# Grayscale, Gaussian blur, Otsu's threshold\nimage = cv2.imread('A1.jpg')\ngray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\nblur = cv2.GaussianBlur(gray, (3,3), 0)\nthresh = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]\n\n# Morph open to remove noise and invert image\nkernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3,3))\nopening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations=1)\ninvert = 255 - opening\n\n\n#read the image\n#im = Image.open(\"B1.jpg\")\n\n#enhancer = ImageEnhance.Contrast(im)\n#img = enhancer.enhance(2.70)\n\n#read the image\n#im = Image.open(\"B1.png\")\n\n#image brightness enhancer\n#enhancer = ImageEnhance.Contrast(im)\n\n#factor = 2 #increase contrast\n#im_output = enhancer.enhance(factor)\n#im_output.save('BF.png')\n\n\ntext = pytesseract.image_to_string(invert, lang='eng', config='--psm 6')\n#text = pytesseract.image_to_string(Image.open('BF.png'))\nprint(text)\n\nx1 = text.find(\"Bill No\")\nprint(x1)\ny1 = text.find(\"Regn No\")\nprint(y1)\nz1 = text[x1:y1]\nprint(z1)\nz1 = z1.split()\nprint(z1)\nbillnum = z1[2]\nbillnum = billnum.split('-')\nif billnum[0] == '1' or billnum[0] == 'l':\n print(\"change hobe\")\n \n billnum[0] = 'I'\n billno = '-'\n billno = billno.join(billnum)\n print(billno)\nelse:\n print(\"ager\")\n billno = '-'\n billno = billno.join(billnum)\n print(billno)\n\n\nx1 = text.find(\"Bed No\")\nprint(x1)\ny1 = text.find(\"Discharge\")\nprint(y1)\nz1 = text[x1:y1]\nprint(z1)\nz1 = z1.split()\nprint(z1)\nroomno=z1[2]\nroomno = roomno.split('/')\nprint(roomno)\nroomno = roomno[0]\nprint(roomno)\n\nx1 = text.find(\"Discharge Date\")\nprint(x1)\ny1 = text.find(\"Service Group\")\nprint(y1)\nz1 = text[x1:y1]\nprint(z1)\nz1 = z1.split()\nprint(z1)\nddate = z1[2]\nprint(ddate)\ndtime = z1[3]\ndtime = dtime.split(':')\ndhr = int(dtime[0])\ndmin = int(dtime[1])\nprint(dhr)\nprint(dmin)\n\nx1 = text.find(\"Consultant:\")\nprint(x1)\ny1 = text.find(\"Adm. Date:\")\nprint(y1)\nz1 = text[x1:y1]\nprint(z1)\nz1 = z1.split()\nprint(z1)\nlength = len(z1)\nx = []\nfname = z1[1]\nsname = z1[2]\nif fname == 'OR.':\n fname = 'DR.'\nx.append(fname)\nx.append(sname)\nprint(x)\n\n\ndoc = ' '\ndoc = doc.join(x)\nprint(doc)\n\nx2 = text.find(\"Net Bill Amount :\")\nprint(x2)\ny2 = text.find(\"Net Corporate Payable :\")\nprint(y2)\nz2 = text[x2:y2]\nprint(z2)\nz2 = z2.split()\nprint(z2)\nnetbill = z2[4]\nprint(netbill)\n\nx2 = text.find(\"Discharge Type:\")\nprint(x2)\ny2 = text.find(\"Service Group\")\nprint(y2)\nz2 = text[x2:y2]\nprint(z2)\nz2 = z2.split()\nprint(z2)\nif z2[3] == 'but' or z2[3] == 'Admitted':\n dtype = 'Normal Discharge'\n \nelif z2[3] == 'against' or z2[3] == 'on':\n dtype = 'LAMA'\n\nelif z2[3] == 'Dead':\n dtype = 'NA'\n\nelif z2[3] == 'Birth' or z2[3] == 'Death' or z2[3] == 'Infant':\n dtype = 'Death Discharge'\n\nelse:\n \n dtype = z2[2]\nprint(dtype)\n\n\nx2 = text.find(\"Bill Date:\")\nprint(x2)\ny2 = text.find(\"Consultant:\")\nprint(y2)\nz2 = text[x2:y2]\nprint(z2)\nz2 = z2.split()\nprint(z2)\nbilldate = z2[2]\nprint(billdate)\nbilldate=billdate.split('-')\nprint(billdate)\nbilldin = int(billdate[0])\nbillmn = int(billdate[1])\nbillyr = int(billdate[2])\nprint(billdin)\nprint(billmn)\nprint(billyr)\n\n\ndtype = 'Stable'\nfun = pd.DataFrame(\n [[claim, ddate, dhr, dmin, dtype, roomno, doc, billno, billdin, billmn, billyr, netbill]],\n columns=['Claim_Id', 'Discharge_Date', 'Discharge_Hour', 'Discharge_Minute',\n 'Discharge_Type', 'Room_No', 'Consultant_Name', 'Bill_No', 'Bill_Day', 'Bill_Month', 'Bill_Year',\n 'Net_Bill_Amount'])\n\nfun.to_csv('reader.csv', index=False, header=False, mode='a')\n"
] | [
[
"pandas.DataFrame"
]
] |
giangtranml/framgia-training | [
"c7fb343bd43b1bceb241b447ff956febb99c94a8"
] | [
"decision_tree/decision_tree.py"
] | [
"\"\"\"\nAuthor: Giang Tran.\n\"\"\"\n\nimport numpy as np\nfrom math import log2\n\n\nclass NodeDT:\n \"\"\"\n Class Node represents in Decision Tree\n \"\"\"\n\n def __init__(self, X, y, feature_name):\n self.feature_name = feature_name\n self.X = X\n self.y = y\n self.is_leaf = False\n self.label = None\n self.used = []\n\n def entropy(self):\n \"\"\"\n Compute entropy at a given node.\n E(X) = - sum_v(p(X_v) * log_2(p(X_v))) with X_v is a subset of X = (X_1, X_2, ..., X_n)\n :return: entropy coefficient.\n \"\"\"\n n = len(self.y)\n sum_ = 0\n for i in np.unique(self.y):\n v = len(self.y[self.y == i])\n sum_ += -((v/n) * log2(v/n))\n return sum_\n\n def classification_error(self):\n pass\n\n\nclass DecisionTree:\n \"\"\"\n Metrics: either entropy/information gain or classification error.\n \"\"\"\n\n _metrics = {'ce': '_classification_error', 'ig': '_information_gain'}\n\n def __init__(self, max_depth=None, criterion='ig'):\n \"\"\"\n :param max_depth: define what depth of the tree should be.\n :param criterion: either 'ce' or 'ig'.\n \"\"\"\n self.max_depth = max_depth\n self.criterion = criterion\n if self.criterion not in self._metrics.keys():\n self.criterion = 'ig'\n self.num_class = 0\n self.tree = None\n self.thresholds = {}\n\n def _is_numerical(self, feature):\n return len(np.unique(feature)) >= 100\n\n def _find_threshold(self, feature, y_train, num_class):\n \"\"\"\n The main point is find a good threshold that is the optimal split label.\n A good threshold is the threshold that minimize mis-classification error.\n\n The algorithm:\n - If there are `n` data points in feature data => there are `n-1` available thresholds.\n - For each available threshold, split feature data to 2 partitions.\n - For each partition, we check and compute mis-classification error for each label.\n\n :param feature: numerical value of `feature`.\n :param y_train: label.\n :param num_class: number of class\n :return: categorical value of `feature`.\n \"\"\"\n assert len(num_class) == 2, \"This function only assumes work with binary classification.\"\n best_threshold = 0.0\n max_exact_classification = 0.0\n is_positive_negative = False\n sorted_feature = sorted(np.unique(feature))\n for i in range(len(sorted_feature)-1):\n # assume the value less than threshold is negative (0), greater than threshold is positive (1)\n threshold = (sorted_feature[i] + sorted_feature[i+1]) / 2\n left_partition = y_train[feature < threshold]\n right_partition = y_train[feature > threshold]\n negative_positive = ((len(left_partition[left_partition == 0]) + len(right_partition[right_partition == 1]))\n / len(feature))\n # assume the value less than threshold is positive (1), greater than threshold is negative. (0)\n positive_negative = ((len(left_partition[left_partition == 1]) + len(right_partition[right_partition == 0]))\n / len(feature))\n # make decision here\n is_positive_negative = positive_negative > negative_positive\n choose = positive_negative if is_positive_negative else negative_positive\n if max_exact_classification < choose:\n max_exact_classification = choose\n best_threshold = threshold\n return best_threshold, is_positive_negative\n\n def _entropy(self, feature, node):\n \"\"\"\n Compute entropy each partition of specific feature in a given node.\n :param feature: specific feature in dataset of `node`.\n :param node: a node we're checking on.\n :return: an entropy scalar that measure the uncertainty of a feature in data.\n \"\"\"\n entropy = 0\n categories = np.unique(feature)\n num_point = len(feature)\n for category in categories:\n # for each category in that feature\n num_category = len(feature[feature == category])\n for c in self.num_class:\n # count the number of each class\n num_category_class = len(feature[np.logical_and(feature == category, node.y == c)])\n if num_category_class == 0:\n continue\n # compute entropy/information gain or classification error\n entropy += num_category / num_point * (\n -num_category_class / num_category * log2(num_category_class / num_category))\n return entropy\n\n def _information_gain(self, feature, node):\n \"\"\"\n Compute information gain between a node with that feature.\n :param feature:\n :param node:\n :return: information gain coefficient.\n \"\"\"\n return node.entropy() - self._entropy(feature, node)\n\n def _classification_error(self, feature, node):\n pass\n\n def _stop(self, node):\n \"\"\"\n Stop condition:\n - Reach max depth or already reach all features.\n - If entropy of that node is 0\n :return: True if the node meets stop condition. False otherwise.\n \"\"\"\n return len(node.used) == node.X.shape[1] or len(node.used) == self.max_depth or node.entropy() == 0\n\n def _build_dt(self, root, column_name):\n \"\"\"\n Algorithm:\n - Start from the root. Find the best feature that has optimum entropy/information gain or classification error.\n - From that best feature, loop through all categories to build subtree.\n ...\n - If entropy/classification erorr is 0, or reach all features then that node is leaf or reach the max depth,\n then stop and move to other subtrees\n :param root: root node at current level\n :return:\n \"\"\"\n N, D = root.X.shape\n best_coef = 0.0\n best_feature = 0\n for d in range(D):\n if column_name[d] in root.used:\n continue\n feature = root.X[:, d]\n coef = getattr(self, self._metrics[self.criterion])(feature, root)\n if best_coef < coef:\n best_coef = coef\n best_feature = d\n # after choose the best feature to split.\n # loop through all its categories to build subtree\n feature = root.X[:, best_feature]\n categories = np.unique(feature)\n for category in categories:\n node = NodeDT(root.X[feature == category], root.y[feature == category], column_name[best_feature])\n node.used = root.used + [column_name[best_feature]]\n setattr(root, 'feature_' + str(category), node)\n setattr(root, 'feature_split', best_feature)\n if not self._stop(node):\n self._build_dt(node, column_name)\n else:\n node.is_leaf = True\n node.label = 1 if len(node.y[node.y == 1]) >= len(node.y[node.y == 0]) else 0\n\n def _train(self, X_train, y_train, column_name):\n self.tree = NodeDT(X_train, y_train, 'root')\n self._build_dt(self.tree, column_name)\n\n def train(self, X_train, y_train, column_name):\n self.num_class = np.unique(y_train)\n _, D = X_train.shape\n for d in range(D):\n feature = X_train[:, d]\n if self._is_numerical(feature):\n threshold, is_positive_negative = self._find_threshold(feature, y_train, self.num_class)\n feature[feature < threshold] = int(is_positive_negative)\n feature[feature > threshold] = int(not is_positive_negative)\n X_train[:, d] = feature\n self.thresholds[d] = (threshold, is_positive_negative)\n self._train(X_train, y_train, column_name)\n\n def _predict(self, X_new, node):\n if not node.is_leaf:\n node = getattr(node, 'feature_' + str(X_new[node.feature_split]))\n return self._predict(X_new, node)\n return node.label\n\n def predict(self, X_new):\n # First convert numerical feature to categorical feature.\n for key, (threshold, is_positive_negative) in self.thresholds.items():\n X_new[key] = int(is_positive_negative) if X_new[key] < threshold else int(not is_positive_negative)\n tree = self.tree\n label = self._predict(X_new, tree)\n return label\n\n def representation(self):\n print(self.tree)\n \n\nif __name__ == '__main__':\n import pandas as pd\n from sklearn.tree import DecisionTreeClassifier\n\n df = pd.read_csv('data/titanic_train.csv')\n X = df.loc[:, :].drop(['Survived', 'PassengerId'], axis=1).values\n y = df.loc[:, 'Survived'].values\n\n dt = DecisionTree(criterion='ig', max_depth=5)\n dt.train(X, y, df.columns.drop(['Survived', 'PassengerId']))\n\n df_test = pd.read_csv('data/titanic_test.csv')\n X_test = df_test.loc[:, :].drop(['Survived', 'PassengerId'], axis=1).values\n y_test = df_test.loc[:, 'Survived'].values\n predicts = []\n for x in X_test:\n predicts.append(dt.predict(x))\n predicts = np.asarray(predicts)\n print(\"Accuracy:\", len(predicts[predicts == y_test])/len(predicts))\n\n dt_sk = DecisionTreeClassifier(max_depth=5)\n X[X[:, 7] == 'male', 7] = 1\n X[X[:, 7] == 'female', 7] = 0\n\n X_test[X_test[:, 7] == 'male', 7] = 1\n X_test[X_test[:, 7] == 'female', 7] = 0\n dt_sk.fit(X, y)\n y_pred = dt_sk.predict(X_test)\n print(\"Accuracy of Sk-learn:\", len(y_pred[y_pred == y_test]) / len(y_pred))\n\n\n\n"
] | [
[
"pandas.read_csv",
"sklearn.tree.DecisionTreeClassifier",
"numpy.logical_and",
"numpy.asarray",
"numpy.unique"
]
] |
mnemocron/TelegramChatStats | [
"10b9ebb97bfb28f835fd05050f03dcb10525f7a3"
] | [
"telegram-statistics.py"
] | [
"#! /usr/bin/python3\n\n#_*_ coding: utf-8 _*_\n\n'''\n@file \t\ttelegram-statistics.py\n@author \tSimon Burkhardt - github.com/mnemocron\n@date \t\t2018.10.01\n\nPost about this code:\nhttps://www.reddit.com/r/LongDistance/comments/9mgcol/oc_chat_statistics_from_telegram_using_python/\n\nInspiration:\nhttps://www.reddit.com/r/LongDistance/comments/9jud8j/analysis_of_texts_from_a_long_distance/\n'''\n\nfrom __future__ import print_function\n\nimport sys\nimport os\nimport optparse\nimport re\nimport json\nimport codecs\nimport numpy as np # install with pip3\nimport pandas as pd # install with pip3\nimport bokeh # install with pip3\nfrom pprint import pprint\nfrom collections import Counter\nfrom datetime import datetime\nfrom datetime import timedelta\n\nfrom _message_numerics import _message_numerics\nfrom _message_graphs import _message_graphs\n\nparser = optparse.OptionParser('telegram-stats')\nparser.add_option('-i', '--input-file', \tdest='indir', \ttype='string', \thelp='chat history file')\nparser.add_option('-n', '--name', \t\t\tdest='name', \ttype='string', \thelp='name of the person')\nparser.add_option('-c', '--id', \t\t\tdest='id', \t\ttype='string', \thelp='chat id of the person')\nparser.add_option('-d', '--date-max', \t\tdest='date', \ttype='string', \thelp='only count messages after date [YYYY-MM-DD]')\nparser.add_option('-w', '--word-list', \t\tdest='words', \ttype='string', \thelp='count occurrences of words -w \"John;Vacation\"')\n(opts, args) = parser.parse_args()\n\n# Writes a dict in json format to a file\ndef dump_to_json_file(filename, data):\n\twith open(filename, 'w', encoding='utf-8') as fh:\n\t\tjson.dump(data, fh, indent=4, sort_keys=True)\n\n# writes data utf-8 encoded to a file\n# important for emojis\ndef dump_to_unicode_file(filename, data):\n\tfh = codecs.open(filename, 'w', 'utf-8')\n\tfh.write(data)\n\tfh.close()\n\n# writes a dict to a csv format\n\ndef dump_dict_to_csv_file(filename, dict):\n\t(pd.DataFrame.from_dict(data=dict, orient='index')\n\t\t.to_csv(filename, header=False, sep=';'))\n\ndef load_file_to_raw(path):\n\ttry:\n\t\twith open(path, encoding='utf-8-sig') as fh:\n\t\t\tdata = json.load(fh)\n\t\treturn data\n\texcept IOError:\n\t\tprint('Error: could not open the file')\n\t\texit(-1)\n\ndef select_chat_from_name(data, name):\n\ttry:\n\t\tfound = False\n\t\tfor chat in data['chats']['list']:\n\t\t\tif('name' in chat):\n\t\t\t\tif(name == chat['name']):\n\t\t\t\t\tif(found == True):\n\t\t\t\t\t\tprint('Error: The name \"' + str(name) + '\" is ambiguous. Use the chat ID instead.')\n\t\t\t\t\t\tprint('Use <telegram-stats -i [result.json]> to list the available chats.')\n\t\t\t\t\t\texit(-1)\n\t\t\t\t\tfound = True\n\t\t\t\t\tdata = chat\n\t\tif(found == False):\n\t\t\tprint('Error: invalid chat name: ' + name)\n\t\t\texit(-1)\n\t\treturn data\n\texcept KeyError:\n\t\tprint('Error: wrong file format (name not found)')\n\ndef select_chat_from_id(data, id):\n\tid = str(id)\n\ttry:\n\t\tfound = False\n\t\tfor chat in data['chats']['list']:\n\t\t\tif('id' in chat):\n\t\t\t\tif(id == str(chat['id'])):\n\t\t\t\t\tfound = True\n\t\t\t\t\tdata = chat\n\t\tif(found == False):\n\t\t\tprint('Error: invalid chat ID: ' + str(id))\n\t\t\texit(-1)\n\t\treturn data\n\texcept KeyError:\n\t\tprint('Error: wrong file format (keys not found)')\n\ndef calculate_metrics(chat_data, date_filter):\n\tmetrics = _message_numerics(chat_data, date_filter)\n\tdump_to_json_file('raw_metrics.json', metrics)\n\tustr = u'' + metrics['A']['name'] + '\\n'\n\tfor e in metrics['A']['emojilist']:\n\t\tustr += str(e[0]) + u' : ' + str(e[1]) + u'\\n'\n\tustr += metrics['B']['name'] + '\\n'\n\tfor e in metrics['B']['emojilist']:\n\t\tustr += str(e[0]) + u' : ' + str(e[1]) + u'\\n'\n\tdump_to_unicode_file('emojis.txt', ustr)\n\ndef calculate_graphs(chat_data, date_filter, wordlist):\n\treturn _message_graphs(chat_data, date_filter, wordlist)\n\n# https://stackoverflow.com/questions/16870663/how-do-i-validate-a-date-string-format-in-python\ndef validate_date(date_text):\n\ttry:\n\t\tdatetime.strptime(date_text, '%Y-%m-%d')\n\texcept ValueError:\n\t\tprint('Incorrect date format, should be YYYY-MM-DD')\n\t\texit(-1)\n\ndef print_available_names(raw_data):\n\tprint('')\n\tprint('available chat names:')\n\tfor chat in raw_data['chats']['list']:\n\t\tif ('name' in chat):\n\t\t\tname = chat['name']\n\t\t\tif(len(name) > 13):\n\t\t\t\tname = name[:11] + '...'\n\t\t\tif(len(name) < 7):\n\t\t\t\tname = name + '\\t'\n\t\t\tprint(name + ' \\t' + str(chat['id']) + ' \\t(' + chat['type'] + ')')\n\n### MAIN\ndef main():\n\tif (opts.indir is None):\n\t\tparser.print_help() \n\t\texit(0)\n\n\tdate_filter = '1970-01-01'\n\tif ( opts.date is not None):\n\t\tvalidate_date(opts.date)\n\t\tdate_filter = opts.date\n\n\tprint('importing raw data...')\n\traw_data = load_file_to_raw(opts.indir)\n\n\tif('chats' in raw_data):\n\t\tprint('input data is full chat export')\n\t\tif (opts.id is None and opts.name is None):\n\t\t\tprint('Error: argument <name> not specified.')\n\t\t\tprint('I do now know which chat to analyze.')\n\t\t\tprint('Available chats are:')\n\t\t\tprint_available_names(raw_data)\n\t\t\texit(0)\n\t\tif (opts.id is not None):\n\t\t\tchat_data = select_chat_from_id(raw_data, opts.id)\n\t\telif (opts.name is not None):\n\t\t\tchat_data = select_chat_from_name(raw_data, opts.name)\n\telse:\n\t\tprint('input data is a single chat export')\n\t\tchat_data = raw_data\n\n\twordlist = ''\n\tif(opts.words is not None):\n\t\twordlist = opts.words.lower().split(';')\n\t\n\tprint('calculating metrics...')\n\tcalculate_metrics(chat_data, date_filter)\n\tprint('generating graphs...')\n\traw = calculate_graphs(chat_data, date_filter, wordlist)\n\tdump_dict_to_csv_file('raw_weekdays_person_' + raw['A']['name'] + '.csv', raw['A']['hourofday'])\n\tdump_dict_to_csv_file('raw_weekdays_person_' + raw['B']['name'] + '.csv', raw['B']['hourofday'])\n\tdump_dict_to_csv_file('raw_months_person_' + raw['A']['name'] + '.csv', raw['A']['months'])\n\tdump_dict_to_csv_file('raw_months_person_' + raw['B']['name'] + '.csv', raw['B']['months'])\n\tdump_dict_to_csv_file('raw_months_chars_person_' + raw['A']['name'] + '.csv', raw['A']['months_chars'])\n\tdump_dict_to_csv_file('raw_months_chars_person_' + raw['B']['name'] + '.csv', raw['B']['months_chars'])\n\tdump_dict_to_csv_file('raw_monthly_pictures_person_' + raw['A']['name'] + '.csv', raw['A']['monthly_pictures'])\n\tdump_dict_to_csv_file('raw_monthly_pictures_person_' + raw['B']['name'] + '.csv', raw['B']['monthly_pictures'])\n\tdump_dict_to_csv_file('raw_monthly_calls_person_' + raw['A']['name'] + '.csv', raw['A']['monthly_calls'])\n\tdump_dict_to_csv_file('raw_monthly_calls_person_' + raw['B']['name'] + '.csv', raw['B']['monthly_calls'])\n\tdump_dict_to_csv_file('raw_monthly_call_duration_person_' + raw['A']['name'] + '.csv', raw['A']['monthly_call_duration'])\n\tdump_dict_to_csv_file('raw_monthly_call_duration_person_' + raw['B']['name'] + '.csv', raw['B']['monthly_call_duration'])\n\tdump_dict_to_csv_file('raw_monthly_time_to_reply_person_' + raw['A']['name'] + '.csv', raw['A']['monthly_time_to_reply'])\n\tdump_dict_to_csv_file('raw_monthly_time_to_reply_person_' + raw['B']['name'] + '.csv', raw['B']['monthly_time_to_reply'])\n\tprint('done')\n\nif __name__ == '__main__':\n\ttry:\n\t\tmain()\n\texcept KeyboardInterrupt as e:\n\t\tprint('Aborted by KeyboardInterrupt')\n\t\texit(0)\n"
] | [
[
"pandas.DataFrame.from_dict"
]
] |
bentzinir/ray | [
"39b84166f88e271b279bd0b3ce56f81d24a1852c"
] | [
"rllib/agents/sac/sac_ensemble_tf_model_unstack.py"
] | [
"from gym.spaces import MultiDiscrete\nimport numpy as np\n\nfrom ray.rllib.models.tf.tf_modelv2 import TFModelV2\nfrom ray.rllib.utils.framework import try_import_tf\n\ntf = try_import_tf()\n\n\nclass SACEnsembleTFModel(TFModelV2):\n \"\"\"Extension of standard TFModel for SAC.\n\n Data flow:\n obs -> forward() -> model_out\n model_out -> get_policy_output() -> pi(s)\n model_out, actions -> get_q_values() -> Q(s, a)\n model_out, actions -> get_twin_q_values() -> Q_twin(s, a)\n\n Note that this class by itself is not a valid model unless you\n implement forward() in a subclass.\"\"\"\n\n def __init__(self,\n obs_space,\n action_space,\n num_outputs,\n model_config,\n name,\n actor_hidden_activation=\"relu\",\n actor_hiddens=(256, 256),\n critic_hidden_activation=\"relu\",\n critic_hiddens=(256, 256),\n twin_q=False,\n initial_alpha=1.0,\n target_entropy=None,\n ensemble_size=1,\n shared_actor_body=False,\n shared_critic_body=False,\n constant_alpha=False,\n shared_entropy=False,\n ):\n \"\"\"Initialize variables of this model.\n\n Extra model kwargs:\n actor_hidden_activation (str): activation for actor network\n actor_hiddens (list): hidden layers sizes for actor network\n critic_hidden_activation (str): activation for critic network\n critic_hiddens (list): hidden layers sizes for critic network\n twin_q (bool): build twin Q networks.\n initial_alpha (float): The initial value for the to-be-optimized\n alpha parameter (default: 1.0).\n\n Note that the core layers for forward() are not defined here, this\n only defines the layers for the output heads. Those layers for\n forward() should be defined in subclasses of SACModel.\n \"\"\"\n super(SACEnsembleTFModel, self).__init__(obs_space, action_space, num_outputs,\n model_config, name)\n if isinstance(action_space, MultiDiscrete):\n ensemble_action_dims = action_space.nvec\n assert all(x == ensemble_action_dims[0] for x in ensemble_action_dims)\n self.action_dim = ensemble_action_dims[0]\n self.discrete = True\n action_outs = q_outs = self.action_dim\n else:\n self.action_dim = np.product(action_space.shape[1:])\n self.discrete = False\n action_outs = 2 * self.action_dim\n q_outs = 1\n\n self.model_out = tf.keras.layers.Input(\n shape=(self.num_outputs,), name=\"model_out\")\n\n self.twin_q = twin_q\n self.ensemble_size = ensemble_size\n self.shared_actor_body = shared_actor_body\n self.constant_alpha = constant_alpha\n self.action_model = [None for _ in range(ensemble_size)]\n self.shift_and_log_scale_diag = [None for _ in range(ensemble_size)]\n\n if self.shared_actor_body:\n print(f\"=============SHARED ACTOR BODY=============\")\n x = None\n for i, hidden in enumerate(actor_hiddens):\n if x is None:\n x = self.model_out\n x = tf.keras.layers.Dense(\n units=hidden,\n activation=getattr(tf.nn, actor_hidden_activation, None),\n name=\"action_{}\".format(i + 1))(x)\n\n for eidx in range(ensemble_size):\n a_out = tf.keras.layers.Dense(units=action_outs, activation=None, name=\"action_out_{}\".format(eidx))(x)\n self.action_model[eidx] = tf.keras.Model(self.model_out, a_out)\n\n self.shift_and_log_scale_diag[eidx] = self.action_model[eidx](self.model_out)\n\n self.register_variables(self.action_model[eidx].variables)\n else:\n for eidx in range(ensemble_size):\n self.action_model[eidx] = tf.keras.Sequential([\n tf.keras.layers.Dense(\n units=hidden,\n activation=getattr(tf.nn, actor_hidden_activation, None),\n name=\"action_{}_{}\".format(eidx, i + 1))\n for i, hidden in enumerate(actor_hiddens)\n ] + [\n tf.keras.layers.Dense(\n units=action_outs, activation=None, name=\"action_out_{}\".format(eidx))\n ])\n self.shift_and_log_scale_diag[eidx] = self.action_model[eidx](self.model_out)\n\n self.register_variables(self.action_model[eidx].variables)\n\n self.actions_input = None\n if not self.discrete:\n self.actions_input = tf.keras.layers.Input(\n shape=(self.action_dim, ), name=\"actions\")\n\n def build_q_net(name, observations, actions, eidx):\n # For continuous actions: Feed obs and actions (concatenated)\n # through the NN. For discrete actions, only obs.\n q_net = tf.keras.Sequential(([\n tf.keras.layers.Concatenate(axis=1),\n ] if not self.discrete else []) + [\n tf.keras.layers.Dense(\n units=units,\n activation=getattr(tf.nn, critic_hidden_activation, None),\n name=\"{}_hidden_{}_{}\".format(name, i, eidx))\n for i, units in enumerate(critic_hiddens)\n ] + [\n tf.keras.layers.Dense(\n units=q_outs, activation=None, name=\"{}_out\".format(name))\n ])\n\n # TODO(hartikainen): Remove the unnecessary Model calls here\n if self.discrete:\n q_net = tf.keras.Model(observations, q_net(observations))\n else:\n q_net = tf.keras.Model([observations, actions],\n q_net([observations, actions]))\n return q_net\n\n self.q_net = [None for _ in range(ensemble_size)]\n self.twin_q_net = [None for _ in range(ensemble_size)]\n\n for eidx in range(ensemble_size):\n self.q_net[eidx] = build_q_net(\"q\", self.model_out, self.actions_input, eidx)\n self.register_variables(self.q_net[eidx].variables)\n\n if twin_q:\n self.twin_q_net[eidx] = build_q_net(\"twin_q\", self.model_out,\n self.actions_input, eidx)\n self.register_variables(self.twin_q_net[eidx].variables)\n\n # Auto-calculate the target entropy.\n if target_entropy is None or target_entropy == \"auto\":\n # See hyperparams in [2] (README.md).\n if self.discrete:\n target_entropy = 0.98 * np.array(\n -np.log(1.0 / self.action_dim), dtype=np.float32)\n # TODO: find the correct entropy value for the ensemble\n # See [1] (README.md).\n else:\n # TODO: find the correct entropy value for the ensemble\n target_entropy = -np.prod(action_space.shape[1:])\n self.target_entropy = target_entropy\n\n # TODO: find correct alpha value\n if constant_alpha:\n initial_alpha = 0.1\n print(\"=================CONSTANT ALPHA====================\")\n\n print(f\"target ent: {self.target_entropy}, initial alpha: {initial_alpha}, shared ent: {shared_entropy}\")\n\n if shared_entropy:\n self.log_alpha = tf.Variable(\n np.log(initial_alpha), dtype=tf.float32, name=\"log_alpha\")\n else:\n log_alpha_vec = [np.log(initial_alpha) for _ in range(ensemble_size)]\n log_alpha_vec = np.expand_dims(log_alpha_vec, axis=1)\n self.log_alpha = tf.Variable(log_alpha_vec, dtype=tf.float32, name=\"log_alpha\")\n self.alpha = tf.exp(self.log_alpha)\n if not constant_alpha:\n self.register_variables([self.log_alpha])\n\n def get_q_values(self, model_out, actions=None, midx=None):\n \"\"\"Return the Q estimates for the most recent forward pass.\n\n This implements Q(s, a).\n\n Arguments:\n model_out (Tensor): obs embeddings from the model layers, of shape\n [BATCH_SIZE, num_outputs].\n actions (Optional[Tensor]): Actions to return the Q-values for.\n Shape: [BATCH_SIZE, action_dim]. If None (discrete action\n case), return Q-values for all actions.\n\n Returns:\n tensor of shape [BATCH_SIZE].\n \"\"\"\n # TODO: consider remove casting after debug\n model_out = tf.cast(model_out, tf.float32)\n if actions is not None:\n actions = tf.unstack(actions, axis=1)\n q_value_list = [qnet([model_out, act]) for qnet, act in zip(self.q_net, actions)]\n else:\n q_value_list = [qnet(model_out) for qnet in self.q_net]\n\n if midx is not None:\n return q_value_list[midx]\n else:\n return tf.stack(q_value_list, axis=1)\n\n def get_twin_q_values(self, model_out, actions=None, midx=None):\n \"\"\"Same as get_q_values but using the twin Q net.\n\n This implements the twin Q(s, a).\n\n Arguments:\n model_out (Tensor): obs embeddings from the model layers, of shape\n [BATCH_SIZE, num_outputs].\n actions (Optional[Tensor]): Actions to return the Q-values for.\n Shape: [BATCH_SIZE, action_dim]. If None (discrete action\n case), return Q-values for all actions.\n\n Returns:\n tensor of shape [BATCH_SIZE].\n \"\"\"\n # TODO: consider remove casting after debug\n model_out = tf.cast(model_out, tf.float32)\n\n if actions is not None:\n actions = tf.unstack(actions, axis=1)\n twin_q_value_list = [twin_qnet([model_out, act]) for twin_qnet, act in zip(self.twin_q_net, actions)]\n else:\n twin_q_value_list = [twin_qnet(model_out) for twin_qnet in self.twin_q_net]\n if midx is not None:\n return twin_q_value_list[midx]\n else:\n return tf.stack(twin_q_value_list, axis=1)\n\n def get_policy_output(self, model_out, midx=None):\n \"\"\"Return the action output for the most recent forward pass.\n\n This outputs the support for pi(s). For continuous action spaces, this\n is the action directly. For discrete, is is the mean / std dev.\n\n Arguments:\n model_out (Tensor): obs embeddings from the model layers, of shape\n [BATCH_SIZE, num_outputs].\n\n Returns:\n tensor of shape [BATCH_SIZE, action_out_size]\n \"\"\"\n if midx is not None:\n return self.action_model[midx](model_out)\n else:\n policy_output_list = [self.action_model[eidx](model_out) for eidx in range(self.ensemble_size)]\n return tf.stack(policy_output_list, axis=1)\n\n def policy_variables(self, midx=None):\n \"\"\"Return the list of variables for the policy net.\"\"\"\n\n if midx is not None:\n return self.action_model[midx].variables\n else:\n vars = []\n for eidx in range(self.ensemble_size):\n vars += self.action_model[eidx].variables\n return vars\n\n def q_variables(self, midx=None):\n \"\"\"Return the list of variables for Q / twin Q nets.\"\"\"\n\n if midx is not None:\n return self.q_net[midx].variables + (self.twin_q_net[midx].variables if self.twin_q_net else [])\n else:\n vars = []\n # We assume that the list is ordered as [Q vars, twin Q vars]\n # 1. First list Q variables\n for eidx in range(self.ensemble_size):\n vars += self.q_net[eidx].variables\n # 2. Second list twin Q variables\n if self.twin_q:\n for eidx in range(self.ensemble_size):\n vars += self.twin_q_net[eidx].variables\n return vars\n"
] | [
[
"numpy.log",
"numpy.expand_dims",
"numpy.product",
"numpy.prod"
]
] |
TamasFlorin/YOLO3-4-Py | [
"d7cc4d67c7eb9168a30ce9716ed64024fc1e1f8f"
] | [
"setup.py"
] | [
"import tempfile\nfrom distutils.command.build import build\nfrom distutils.command.clean import clean\nimport sys\nimport numpy as np # TODO: Need a mechanism to ensure numpy is already installed\nimport shutil\n\n# Compile using .cpp files if cython is not present\ntry:\n from Cython.Distutils import build_ext\nexcept ImportError:\n from distutils.command.build_ext import build_ext\n use_cython = False\nelse:\n use_cython = True\n\nfrom setuptools import setup, Extension\nfrom util import build_darknet, clean_darknet, get_cflags, get_libs, find_site_packages, get_readme, find_dist_packages\nimport logging\nimport os\n\nlogging.basicConfig(level=logging.INFO)\n\n# Default configuration\nUSE_GPU = False\nUSE_CV = False\n\nif \"GPU\" in os.environ:\n if \"DARKNET_HOME\" in os.environ:\n logging.warning(\"GPU environment variable is skipped since DARKNET_HOME is specified\")\n if int(os.environ[\"GPU\"]) == 1:\n USE_GPU = True\n else:\n USE_GPU = False\n else:\n if int(os.environ[\"GPU\"]) == 1:\n logging.info(\"Darknet will be compiled with GPU support\")\n USE_GPU = True\n else:\n logging.info(\"Darknet will be compiled without GPU support\")\n USE_GPU = False\n\n\nif \"OPENCV\" in os.environ and int(os.environ[\"OPENCV\"]) == 0:\n logging.info(\"Compiling wrapper without OpenCV\")\n USE_CV = False\nelif \"OPENCV\" in os.environ and int(os.environ[\"OPENCV\"]) == 1:\n logging.info(\"Compiling wrapper with OpenCV\")\n USE_CV = True\n\nif USE_CV & (get_libs(\"opencv\") == '' or get_cflags(\"opencv\") == ''):\n logging.warning(\"OpenCV is not configured. Compiling wrapper without OpenCV!\")\n USE_CV = False\n\n\nif USE_GPU:\n if USE_CV:\n build_branch_name = \"master\"\n else:\n build_branch_name = \"master\"\nelse:\n build_branch_name = \"master\"\n if \"DARKNET_HOME\" not in os.environ:\n if USE_CV:\n logging.warning(\"Non GPU darknet branch is used. Compiling wrapper without OpenCV!\")\n USE_CV = False # OpenCV requires yolo34py-intergration branch which has OpenCV enabled\n\nif \"DARKNET_HOME\" not in os.environ:\n logging.info(\"Selected Darknet Branch: \" + build_branch_name+ \" from Darknet Fork 'https://github.com/madhawav/darknet/'\")\n\n\ntemp_dir = os.path.join(tempfile.gettempdir(), \"darknet\") # Temp directory to build darknet\n\n# Check whether user has specified DARKNET_HOME directory. If so, we would use the darknet installation at this location.\nif not \"DARKNET_HOME\" in os.environ:\n darknet_dir = os.path.join(temp_dir, \"darknet-\" + build_branch_name)\nelse:\n logging.info(\"DARKNET_HOME is set: \" + os.environ[\"DARKNET_HOME\"])\n darknet_dir = os.environ[\"DARKNET_HOME\"]\n\ninclude_paths = [np.get_include(), os.path.join(darknet_dir,\"include\"), os.path.join(darknet_dir,\"src\")]\nlibraries = [\"darknet\",\"m\", \"pthread\"]\nlibrary_paths = [\".\", \"./__libdarknet\"]\n\nextra_compiler_flags = [ get_cflags(\"python3\")]\nextra_linker_flags = [get_libs(\"python3\")]\n\ncython_compile_directives = {}\nmacros = []\n\nif USE_GPU:\n if \"CUDA_HOME\" in os.environ:\n include_paths.append(os.path.join(os.environ[\"CUDA_HOME\"],\"include\"))\n else:\n raise Exception(\"Environment variable CUDA_HOME not set\")\n cython_compile_directives[\"USE_GPU\"] = 1\n macros.append((\"USE_GPU\", 1))\nelse:\n cython_compile_directives[\"USE_GPU\"] = 0\n macros.append((\"USE_GPU\", 0))\n\nif USE_CV:\n extra_compiler_flags.append(get_cflags(\"opencv\"))\n extra_linker_flags.append(get_libs(\"opencv\"))\n cython_compile_directives[\"USE_CV\"] = 1\n macros.append((\"USE_CV\", 1))\nelse:\n cython_compile_directives[\"USE_CV\"] = 0\n macros.append((\"USE_CV\", 0))\n\n\n# Add linker flag to search in site_packages/__libdarknet. libdarknet.so is located at this location.\nfor site_package in find_site_packages():\n extra_linker_flags.append(\"-Wl,-rpath,\" + os.path.join(site_package,\"__libdarknet\"))\n\nfor dist_package in find_dist_packages():\n extra_linker_flags.append(\"-Wl,-rpath,\" + os.path.join(dist_package,\"__libdarknet\"))\n\nif \"--inplace\" in sys.argv:\n extra_linker_flags.append(\"-Wl,-rpath,.\") # Added to make test code work\n\nif use_cython:\n pydarknet_extension = Extension(\"pydarknet\", [\"pydarknet.pyx\", \"pydarknet.pxd\", \"bridge.cpp\"], include_dirs=include_paths, language=\"c++\",\n libraries=libraries, library_dirs=library_paths, extra_link_args=extra_linker_flags,\n extra_compile_args=extra_compiler_flags, define_macros = macros)\n\n # Pass macros to Cython\n pydarknet_extension.cython_compile_time_env = cython_compile_directives\nelse:\n pydarknet_extension = Extension(\"pydarknet\", [\"pydarknet.cpp\", \"bridge.cpp\"],\n include_dirs=include_paths, language=\"c++\",\n libraries=libraries, library_dirs=library_paths, extra_link_args=extra_linker_flags,\n extra_compile_args=extra_compiler_flags, define_macros=macros)\n\n # NOTE: It is assumed that pydarknet.cpp is already generated using pydarknet.py. It is also assumed that USE_CV\n # flag is unchanged between cythonize and current compilation.\n\next_modules=[\n pydarknet_extension\n]\n\ndarknet_setup_done = False\n\ndef setup_darknet():\n '''\n Configures darknet on which the wrapper works\n :return:\n '''\n global darknet_setup_done\n if darknet_setup_done:\n return\n\n target_location = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"__libdarknet\", \"libdarknet.so\")\n\n if \"--inplace\" in sys.argv:\n logging.info(\"For inplace compilations, target location is set to root\")\n target_location = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"libdarknet.so\")\n\n if \"DARKNET_HOME\" not in os.environ:\n # If user has not specified DARKNET_HOME, we will download and build darknet.\n build_darknet(temp_dir, build_branch_name, target_location)\n else:\n logging.info(\"Copying libdarknet.so from \" + os.environ[\"DARKNET_HOME\"])\n # If user has set DARKNET_HOME, it is assumed that he has built darknet. We will copy libdarknet.so from users location to site-pacakges/__libdarknet\n shutil.copyfile(os.path.join(os.environ[\"DARKNET_HOME\"], \"libdarknet.so\"),\n target_location)\n\n darknet_setup_done = True\n\nclass CustomBuild(build):\n def run(self):\n # This is triggered when src distribution is made. Not triggered for build_ext.\n setup_darknet()\n build.run(self)\n\nclass CustomBuildExt(build_ext):\n def run(self):\n setup_darknet()\n build_ext.run(self)\n\n if not \"DARKNET_HOME\" in os.environ:\n clean_darknet(temp_dir)\n\nclass CustomClean(clean):\n def run(self):\n if os.path.exists(os.path.join(os.path.dirname(os.path.abspath(__file__)),\"__libdarknet\",\"libdarknet.so\")):\n logging.info(\"removing __libdarknet/libdarknet.so\")\n os.remove(os.path.join(os.path.dirname(__file__),\"__libdarknet\",\"libdarknet.so\"))\n\n if os.path.exists(os.path.join(os.path.dirname(os.path.abspath(__file__)), \"libdarknet.so\")):\n logging.info(\"removing libdarknet.so\")\n os.remove(os.path.join(os.path.dirname(os.path.abspath(__file__)),\"libdarknet.so\"))\n\n if os.path.exists(os.path.join(os.path.dirname(os.path.abspath(__file__)),\"pydarknet.cpp\")):\n logging.info(\"removing pydarknet.cpp\")\n os.remove(os.path.join(os.path.dirname(os.path.abspath(__file__)),\"pydarknet.cpp\"))\n\n for f in os.listdir(os.path.dirname(os.path.abspath(__file__))):\n if f.startswith(\"pydarknet.\") and f.endswith(\".so\"):\n logging.info(\"removing \" + f)\n os.remove(os.path.join(os.path.dirname(os.path.abspath(__file__)),f))\n\n clean.run(self)\n\n\nif USE_GPU:\n name = \"yolo34py-gpu\"\nelse:\n name = \"yolo34py\"\n\ncmd_class = {'clean': CustomClean, \"build\": CustomBuild, \"build_ext\": CustomBuildExt}\n\n\nsetup(\n name = name,\n description=\"Python wrapper on YOLO 3.0 implementation by 'pjreddie': (https://pjreddie.com/yolo)\",\n long_description=get_readme(),\n long_description_content_type=\"text/markdown\",\n cmdclass= cmd_class,\n version='0.1.rc13',\n ext_modules = ext_modules,\n platforms=[\"linux-x86_64\"],\n setup_requires=[\n 'cython>=0.27',\n 'requests',\n 'numpy'\n ],\n install_requires=[\n 'cython>=0.27',\n 'requests',\n 'numpy'\n ],\n python_requires='>=3.5',\n author='Madhawa Vidanapathirana',\n author_email='[email protected]',\n url=\"https://github.com/madhawav/YOLO3-4-Py\",\n package_dir={\"__libdarknet\": \"__libdarknet\"},\n packages=[\"__libdarknet\"],\n include_package_data=True,\n license=\"YOLO34Py wrapper is under Apache 2.0. Darknet is Public Domain.\",\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Text Processing :: Linguistic',\n 'Operating System :: POSIX :: Linux',\n 'Intended Audience :: Science/Research',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence'\n ],\n keywords=\"yolo darknet object detection vision\",\n\n)\n"
] | [
[
"numpy.get_include"
]
] |
sheepolata/GraphEngine | [
"853447e42dcd09154cdc5ac0b8e00c493445a389"
] | [
"ggraph.py"
] | [
"# import the pygame module, so you can use it\nimport pygame\nfrom pygame.locals import *\nimport warnings\nimport random\nimport numpy as np\nimport math\nfrom scipy.spatial import Delaunay\n\nimport delaunaytriangulation as dt\nimport graphmodel as gm\nimport drawer\nimport utils\n\nclass gNode(gm.Node):\n\n def draw(self, surface, color):\n self.drawNode(surface, color)\n self.drawEdges(surface, color)\n\n def drawNode(self, surface, color, outline_color=(255, 255, 255), outline_width=2):\n try:\n self.info[\"pos\"]\n except KeyError:\n warnings.warn(\"{} does not possess a position information (info[\\\"pos\\\"])\".format(self.id))\n return\n\n try:\n radius = self.info[\"radius\"]\n except KeyError:\n radius = 8\n\n pygame.draw.circle(surface, color, self.info[\"pos\"], radius)\n pygame.draw.circle(surface, outline_color, self.info[\"pos\"], radius+outline_width, width=outline_width)\n\n def drawEdges(self, surface, color, width=1):\n try:\n self.info[\"pos\"]\n except KeyError:\n warnings.warn(\"{} does not possess a position information (info[\\\"pos\\\"])\".format(self.id), stacklevel=2)\n return\n\n try:\n radius = self.info[\"radius\"]\n except KeyError:\n radius = 8\n\n for e in self.edges:\n if e.end == self:\n pygame.draw.circle(surface, color, (self.info[\"pos\"][0], self.info[\"pos\"][1]-radius), radius*1.2, width=width)\n else:\n try:\n if self.parent.oriented:\n v = (e.end.info[\"pos\"][0] - self.info[\"pos\"][0], e.end.info[\"pos\"][1] - self.info[\"pos\"][1])\n mag_v = np.linalg.norm(np.array(v))\n u = (v[0] / mag_v, v[1] / mag_v)\n ep = (e.end.info[\"pos\"][0] - radius*1.5*u[0], e.end.info[\"pos\"][1]- radius*1.5*u[1])\n drawer.arrow(surface, color, self.info[\"pos\"], ep)\n else:\n pygame.draw.line(surface, color, self.info[\"pos\"], e.end.info[\"pos\"], width=width)\n except KeyError:\n warnings.warn(\"{} does not possess a position information (info[\\\"pos\\\"])\".format(e.end.id), stacklevel=2)\n\n def move(self, direction, speed, limits=(1280, 720), collision=True):\n try:\n self.info[\"pos\"]\n except KeyError:\n self.info[\"pos\"] = [0, 0]\n\n new_pos = [0, 0]\n new_pos[0] = (self.info[\"pos\"][0] + math.cos(direction) * speed) % limits[0]\n new_pos[1] = (self.info[\"pos\"][1] + math.sin(direction) * speed) % limits[1]\n\n if collision:\n for other in self.parent.nodes:\n if not self.equal(other):\n _count = 0\n while self.collide(other):\n new_pos[0] = (self.info[\"pos\"][0] + math.cos(utils.angle_from_points(self.info[\"pos\"], other.info[\"pos\"])) * 1) % limits[0]\n new_pos[1] = (self.info[\"pos\"][1] + math.sin(utils.angle_from_points(self.info[\"pos\"], other.info[\"pos\"])) * 1) % limits[1]\n _count += 1\n if _count > 50:\n break\n\n self.info[\"pos\"][0] = new_pos[0]\n self.info[\"pos\"][1] = new_pos[1]\n\n\n\n def collide(self, other):\n # print(\"{} <= {}\".format(utils.distance2p(self.info[\"pos\"], other.info[\"pos\"]), max(self.info[\"radius\"], other.info[\"radius\"])))\n return utils.distance2p(self.info[\"pos\"], other.info[\"pos\"]) <= max(self.info[\"radius\"], other.info[\"radius\"])*2\n\n def collide_point(self, point):\n return utils.distance2p(self.info[\"pos\"], point) <= self.info[\"radius\"]\n\n def applyForces(self, speed=1, spring_rest_distance=75, collision=True):\n edges_force_vectors = []\n del_force_vectors = []\n\n attraction_factor = 0.8\n repulsion_factor = 1.4\n\n _t = 0.1\n t_up = 1.00 + _t\n t_down = 1.00 - _t\n\n for e in self.edges:\n if self.equal(e.end):\n continue\n try:\n dist = utils.distance2p(e.end.info[\"pos\"], self.info[\"pos\"])\n if dist >= spring_rest_distance*t_up:\n f = {\"force\":[e.end.info[\"pos\"][0] - self.info[\"pos\"][0], e.end.info[\"pos\"][1] - self.info[\"pos\"][1]]}\n f[\"f_dist\"] = utils.normalise(dist, mini=spring_rest_distance, maxi=spring_rest_distance*3) * attraction_factor\n edges_force_vectors.append(f)\n elif dist <= spring_rest_distance*t_down:\n # f = {\"force\":[self.info[\"pos\"][0] - e.end.info[\"pos\"][0], self.info[\"pos\"][1] - e.end.info[\"pos\"][1]]} # OLD WAY\n opposite_angle = (utils.angle_from_points(self.info[\"pos\"], e.end.info[\"pos\"]) + math.pi) % (2*math.pi)\n op_e = [self.info[\"pos\"][0] + dist*math.cos(opposite_angle), self.info[\"pos\"][1] + dist*math.sin(opposite_angle)]\n f = {\"force\":[self.info[\"pos\"][0] - op_e[0], self.info[\"pos\"][1] - op_e[1]]}\n f[\"f_dist\"] = utils.normalise(dist, mini=0, maxi=spring_rest_distance) * repulsion_factor\n edges_force_vectors.append(f)\n except KeyError:\n warnings.warn(\"{} or {} does not possess a position information (info[\\\"pos\\\"])\".format(self.id, e.end.id), stacklevel=2)\n\n # for other in self.parent.nodes:\n # if self.equal(other):\n # continue\n # if gm.Edge(self, other) in self.edges:\n # continue\n # try:\n # dist = utils.distance2p(other.info[\"pos\"], self.info[\"pos\"])\n # # if dist >= spring_rest_distance*t_up and dist <= spring_rest_distance*3:\n # # f = {\"force\":[other.info[\"pos\"][0] - self.info[\"pos\"][0], other.info[\"pos\"][1] - self.info[\"pos\"][1]]}\n # # f[\"f_dist\"] = utils.normalise(dist, mini=spring_rest_distance*1.5, maxi=spring_rest_distance*3) * attraction_factor \n # # edges_force_vectors.append(f)\n # if dist < spring_rest_distance*t_down:\n # opposite_angle = (utils.angle_from_points(self.info[\"pos\"], other.info[\"pos\"]) + math.pi) % (2*math.pi)\n # op_e = [self.info[\"pos\"][0] + dist*math.cos(opposite_angle), self.info[\"pos\"][1] + dist*math.sin(opposite_angle)]\n # f = {\"force\":[self.info[\"pos\"][0] - op_e[0], self.info[\"pos\"][1] - op_e[1]]}\n # f[\"f_dist\"] = utils.normalise(dist, mini=0, maxi=spring_rest_distance) * repulsion_factor * 1.2\n # edges_force_vectors.append(f)\n # except KeyError:\n # warnings.warn(\"{} or {} does not possess a position information (info[\\\"pos\\\"])\".format(self.id, other.id), stacklevel=2)\n\n for n_id in self.parent.triangulation.get_neighbours_of(self.id):\n neigh = self.parent.getNodeByID(n_id)\n\n if self.equal(neigh):\n continue\n if gm.Edge(self, neigh) in self.edges:\n continue\n\n dist = utils.distance2p(neigh.info[\"pos\"], self.info[\"pos\"])\n\n if dist >= spring_rest_distance*t_up and dist <= spring_rest_distance*3:\n f = {\"force\":[neigh.info[\"pos\"][0] - self.info[\"pos\"][0], neigh.info[\"pos\"][1] - self.info[\"pos\"][1]]}\n f[\"f_dist\"] = utils.normalise(dist, mini=spring_rest_distance*1.5, maxi=spring_rest_distance*3) * attraction_factor * 0.6\n del_force_vectors.append(f)\n if dist < spring_rest_distance*t_down:\n opposite_angle = (utils.angle_from_points(self.info[\"pos\"], neigh.info[\"pos\"]) + math.pi) % (2*math.pi)\n op_e = [self.info[\"pos\"][0] + dist*math.cos(opposite_angle), self.info[\"pos\"][1] + dist*math.sin(opposite_angle)]\n f = {\"force\":[self.info[\"pos\"][0] - op_e[0], self.info[\"pos\"][1] - op_e[1]]}\n f[\"f_dist\"] = utils.normalise(dist, mini=0, maxi=spring_rest_distance) * repulsion_factor * 1.4\n del_force_vectors.append(f)\n\n\n final_force = [0, 0]\n\n if edges_force_vectors != []:\n for f in edges_force_vectors:\n final_force[0] += f[\"force\"][0] * f[\"f_dist\"]\n final_force[1] += f[\"force\"][1] * f[\"f_dist\"]\n\n final_force[0] /= len(edges_force_vectors)\n final_force[1] /= len(edges_force_vectors)\n elif del_force_vectors != []:\n for f in del_force_vectors:\n final_force[0] += f[\"force\"][0] * f[\"f_dist\"]\n final_force[1] += f[\"force\"][1] * f[\"f_dist\"]\n\n final_force[0] /= len(del_force_vectors)\n final_force[1] /= len(del_force_vectors)\n\n final_force_mag = np.linalg.norm(np.array(final_force))\n\n spd_factor = 1\n if final_force_mag > spring_rest_distance:\n spd_factor = utils.normalise(final_force_mag, mini=spring_rest_distance*t_up, maxi=spring_rest_distance*3)\n else:\n spd_factor = 1 - utils.normalise(final_force_mag, mini=0, maxi=spring_rest_distance*t_down)\n\n if spd_factor > -0.005 and spd_factor < 0.005:\n spd_factor = 0\n # else:\n # utils.clamp(spd_factor, -1, 1)\n\n # if final_force_mag < 0.5:\n # spd_factor = 0\n\n self.move(math.atan2(final_force[1], final_force[0]), speed*spd_factor, collision=collision)\n\nclass gGraph(gm.Graph):\n\n def __init__(self, node_type=None, oriented=True):\n super().__init__(node_type=node_type, oriented=oriented)\n self.delaunay_points = None\n self.delaunay = None\n self._draw_delaunay = True\n\n def setDelaunay(self, dcl=-1):\n dict_pos = {}\n for n in self.nodes:\n dict_pos[n.id] = n.info[\"pos\"]\n\n self.triangulation = dt.Delaunay_Triangulation(dict_pos)\n self.triangulation.delaunay_cut_links = dcl\n self.triangulation.update()\n\n def computeDelaunay(self):\n # self.delaunay_points = np.array([p.info[\"pos\"] for p in self.nodes])\n # self.delaunay = Delaunay(self.delaunay_points)\n dict_pos = {}\n for n in self.nodes:\n dict_pos[n.id] = n.info[\"pos\"]\n\n self.triangulation.update(new_positions=dict_pos)\n\n def drawDelaunay(self, surface, color):\n for n in self.nodes:\n dneigh = self.triangulation.get_neighbours_of(n.id)\n for dn in dneigh:\n pygame.draw.line(surface, color, n.info[\"pos\"], self.getNodeByID(dn).info[\"pos\"])\n\n def draw(self, surface):\n for n in self.nodes:\n try:\n color = n.info[\"color\"]\n except KeyError:\n color = (255, 255, 255)\n n.drawEdges(surface, color)\n for n in self.nodes:\n try:\n color = n.info[\"color\"]\n except KeyError:\n color = (255, 255, 255)\n try:\n out_color = n.info[\"outline_color\"]\n n.drawNode(surface, color, outline_color=out_color)\n except KeyError:\n n.drawNode(surface, color)\n \n if self._draw_delaunay:\n self.drawDelaunay(surface, (0, 0, 255))"
] | [
[
"numpy.array"
]
] |
inesnolas/Rank-based-loss_ICASSP22 | [
"3ebe7345dc26b8fa74543725a51b43b7170c58cc"
] | [
"run_example.py"
] | [
"import models.SingleLayer_net as single_layer\nimport loss_functions.rank_based_loss as rbl\n# import wandb\nimport torch\nimport utils.data_functions as df\nimport os\nimport json\nimport pandas as pd\nimport csv\n\n\nos.environ['CUDA_VISIBLE_DEVICES'] = '3'\n# wandb.init(project='example')\n\nexp_name = 'example'\n# wandb.run.name = exp_name\nstandardized_data = True\nsave_training_embeddings_to_plot = True\nshuffle = False \ndrop_last = False \n\nexperiments_folder =\"./example_data\"\n\ninitial_embeddings_path = os.path.join(experiments_folder, 'Normalized_VGGish_embeddings_based_on_Training_Set')\ntrain_initial_embeddings_path = os.path.join(initial_embeddings_path, 'train')\nval_initial_embeddings_path = os.path.join(initial_embeddings_path, 'val')\ntest_initial_embeddings_path = os.path.join(initial_embeddings_path, 'test')\n\nresults_folder = os.path.join(experiments_folder, \"results_\"+exp_name)\ncheckpoints_folder = os.path.join(results_folder, \"checkpoints\")\nif not os.path.exists(checkpoints_folder):\n os.makedirs(checkpoints_folder)\n\nif save_training_embeddings_to_plot:\n if not os.path.exists(os.path.join(checkpoints_folder, \"Embeddings_plot\")):\n os.mkdir(os.path.join(checkpoints_folder, \"Embeddings_plot\"))\n \ntrain_df = pd.read_csv(os.path.join(experiments_folder, 'train.csv'), dtype = str)\nval_df = pd.read_csv(os.path.join(experiments_folder, 'val.csv'), dtype = str)\ntest_df = pd.read_csv(os.path.join(experiments_folder, 'test.csv'), dtype = str)\n\nconfigs = {\"EMBEDDINGS_SIZE\" : 128,\n\"output_EMBEDDINGS_SIZE\" :3, \n\"EARLY_STOPPING_PTC\" : 20,\n\"LR\" : 1e-5,\n\"BATCH_SIZE\" : 12,\n\"n_epochs\" : 100, \n}\nparams = {'batch_size': configs[\"BATCH_SIZE\"],'shuffle': shuffle, 'drop_last': drop_last}\n\ntraining_set = df.RankBasedLossHierarchicalLabelsEmbeddings(train_df, train_initial_embeddings_path, target_labels='hierarchical_labels')#,'species','taxon'])\ntraining_generator = torch.utils.data.DataLoader(training_set, **params)\nlen_train = len(training_set)\n\n\nvalidation_set = df.RankBasedLossHierarchicalLabelsEmbeddings(val_df , val_initial_embeddings_path, target_labels='hierarchical_labels')#,'species','taxon'])\nparams_val = {'batch_size': configs[\"BATCH_SIZE\"],'shuffle': False, 'drop_last': False}\nvalidation_generator = torch.utils.data.DataLoader(validation_set, **params_val)\nlen_val = len(validation_set)\n\nmodel =single_layer.SingleLayerHypersphereConstraint(configs)\n\n# wandb.watch(model)\n# wandb.config = configs\n# wandb.config[\"architecture\"] = \"LinLayer_cosinedist\"\n# wandb.config[\"dataset\"] = \"TuT\"\nwith open(os.path.join(results_folder, 'configs_dict'), \"w\") as c:\n json.dump(configs, c)\n\ncheckpoint_name = rbl.train_RbL(model, training_generator, validation_generator,\n checkpoints_folder, configs['EARLY_STOPPING_PTC'], save_training_embeddings_to_plot, \n configs['n_epochs'], configs, distance='cosine',\n number_of_ranks = 4)\n\n\n\nprint( \"\\nFinished training, will now use the checkpoint to generate embeddings for the test set:\")\n# Predict with checkpoint:\n\n# if save_embeddings_to_plot:\nif not os.path.exists(os.path.join(results_folder, \"test_Embeddings_plot\")):\n os.mkdir(os.path.join(results_folder, \"test_Embeddings_plot\"))\n\ntest_set = df.RankBasedLossHierarchicalLabelsEmbeddings(test_df, test_initial_embeddings_path, target_labels = 'hierarchical_labels')\ntest_generator = torch.utils.data.DataLoader(test_set, batch_size=1, shuffle=False)\nlen_test = len(test_set)\n\n# load the checkpoint, configs and model\nwith open(os.path.join(results_folder, \"configs_dict\") )as c:\n configs = json.load(c)\n\nmodel=single_layer.SingleLayerHypersphereConstraint(configs)\nmodel.load_state_dict(torch.load(checkpoint_name)[\"net_dict\"])\n\nsil_id, sil_species =rbl.predict(model, test_generator, configs, results_folder)\nprint(\"sil_fine level\", sil_id)\nprint('sil_coarse level', sil_species)\nwith open(os.path.join(results_folder, 'silhouettes_on_test_set.csv'), 'w') as fout:\n writer = csv.writer(fout)\n writer.writerow(['sil_fine_level', str(sil_id)])\n writer.writerow(['sil_coarse_level', str(sil_species)])"
] | [
[
"torch.utils.data.DataLoader",
"torch.load"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.